text stringlengths 38 1.54M |
|---|
from keras.layers import Layer, InputSpec
from keras import initializers, regularizers, constraints
import keras.backend as K
from keras_contrib.utils.test_utils import to_tuple
class PELU(Layer):
"""Parametric Exponential Linear Unit.
It follows:
`f(x) = alphas * (exp(x / betas) - 1) for x < 0`,
`f(x) = (alphas / betas) * x for x >= 0`,
where `alphas` & `betas` are learned arrays with the same shape as x.
# Input shape
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
# Output shape
Same shape as the input.
# Arguments
alphas_initializer: initialization function for the alpha variable weights.
betas_initializer: initialization function for the beta variable weights.
weights: initial weights, as a list of a single Numpy array.
shared_axes: the axes along which to share learnable
parameters for the activation function.
For example, if the incoming feature maps
are from a 2D convolution
with output shape `(batch, height, width, channels)`,
and you wish to share parameters across space
so that each filter only has one set of parameters,
set `shared_axes=[1, 2]`.
# References
- [Parametric exponential linear unit for deep convolutional neural networks](
https://arxiv.org/abs/1605.09332v3)
"""
def __init__(self, alpha_initializer='ones',
alpha_regularizer=None,
alpha_constraint=None,
beta_initializer='ones',
beta_regularizer=None,
beta_constraint=None,
shared_axes=None,
**kwargs):
super(PELU, self).__init__(**kwargs)
self.supports_masking = True
self.alpha_initializer = initializers.get(alpha_initializer)
self.alpha_regularizer = regularizers.get(alpha_regularizer)
self.alpha_constraint = constraints.get(alpha_constraint)
self.beta_initializer = initializers.get(beta_initializer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.beta_constraint = constraints.get(beta_constraint)
if shared_axes is None:
self.shared_axes = None
elif not isinstance(shared_axes, (list, tuple)):
self.shared_axes = [shared_axes]
else:
self.shared_axes = list(shared_axes)
def build(self, input_shape):
input_shape = to_tuple(input_shape)
param_shape = list(input_shape[1:])
self.param_broadcast = [False] * len(param_shape)
if self.shared_axes is not None:
for i in self.shared_axes:
param_shape[i - 1] = 1
self.param_broadcast[i - 1] = True
param_shape = tuple(param_shape)
# Initialised as ones to emulate the default ELU
self.alpha = self.add_weight(shape=param_shape,
name='alpha',
initializer=self.alpha_initializer,
regularizer=self.alpha_regularizer,
constraint=self.alpha_constraint)
self.beta = self.add_weight(shape=param_shape,
name='beta',
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint)
# Set input spec
axes = {}
if self.shared_axes:
for i in range(1, len(input_shape)):
if i not in self.shared_axes:
axes[i] = input_shape[i]
self.input_spec = InputSpec(ndim=len(input_shape), axes=axes)
self.built = True
def call(self, x, mask=None):
if K.backend() == 'theano':
pos = K.relu(x) * (K.pattern_broadcast(self.alpha, self.param_broadcast) /
K.pattern_broadcast(self.beta, self.param_broadcast))
neg = (K.pattern_broadcast(self.alpha, self.param_broadcast) *
(K.exp((-K.relu(-x))
/ K.pattern_broadcast(self.beta, self.param_broadcast)) - 1))
else:
pos = K.relu(x) * self.alpha / self.beta
neg = self.alpha * (K.exp((-K.relu(-x)) / self.beta) - 1)
return neg + pos
def get_config(self):
config = {
'alpha_initializer': initializers.serialize(self.alpha_initializer),
'alpha_regularizer': regularizers.serialize(self.alpha_regularizer),
'alpha_constraint': constraints.serialize(self.alpha_constraint),
'beta_initializer': initializers.serialize(self.beta_initializer),
'beta_regularizer': regularizers.serialize(self.beta_regularizer),
'beta_constraint': constraints.serialize(self.beta_constraint),
'shared_axes': self.shared_axes
}
base_config = super(PELU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shape):
return input_shape
|
'''
Function for extending pytorch.
'''
import torch
import torch.utils.data as data
import torch.multiprocessing as multiprocessing
from torch.utils.data.sampler import SequentialSampler, RandomSampler, BatchSampler
from torch.utils.data.dataloader import ExceptionWrapper
import collections
import sys
import traceback
import threading
if sys.version_info[0] == 2:
import Queue as queue
string_classes = basestring
else:
import queue
string_classes = (str, bytes)
def _worker_loop(dataset, index_queue, data_queue, collate_fn):
'''As torch.utils.data.dataloader._worker_loop but works on
full batches instead of iterating through the batch.
'''
global _use_shared_memory
_use_shared_memory = True
torch.set_num_threads(1)
while True:
r = index_queue.get()
if r is None:
data_queue.put(None)
break
idx, batch_indices = r
try:
# samples = collate_fn([dataset[i] for i in batch_indices])
samples = collate_fn(dataset[batch_indices])
except Exception:
data_queue.put((idx, ExceptionWrapper(sys.exc_info())))
else:
data_queue.put((idx, samples))
class DataLoaderIterBatch(data.dataloader.DataLoaderIter):
'''Change DataLoaderIter (which is used by DataLoader) to make
it process batches instead of single elements.
'''
def __init__(self, loader):
self.dataset = loader.dataset
self.collate_fn = loader.collate_fn
self.batch_sampler = loader.batch_sampler
self.num_workers = loader.num_workers
self.pin_memory = loader.pin_memory
self.done_event = threading.Event()
self.sample_iter = iter(self.batch_sampler)
if self.num_workers > 0:
self.index_queue = multiprocessing.SimpleQueue()
self.data_queue = multiprocessing.SimpleQueue()
self.batches_outstanding = 0
self.shutdown = False
self.send_idx = 0
self.rcvd_idx = 0
self.reorder_dict = {}
self.workers = [
multiprocessing.Process(
target=_worker_loop,
args=(self.dataset, self.index_queue, self.data_queue, self.collate_fn))
for _ in range(self.num_workers)]
for w in self.workers:
w.daemon = True # ensure that the worker exits on process exit
w.start()
if self.pin_memory:
in_data = self.data_queue
self.data_queue = queue.Queue()
self.pin_thread = threading.Thread(
target=_pin_memory_loop,
args=(in_data, self.data_queue, self.done_event))
self.pin_thread.daemon = True
self.pin_thread.start()
# prime the prefetch loop
for _ in range(2 * self.num_workers):
self._put_indices()
def __next__(self):
if self.num_workers == 0: # same-process loading
indices = next(self.sample_iter) # may raise StopIteration
# batch = self.collate_fn([self.dataset[i] for i in indices])
batch = self.collate_fn(self.dataset[indices])
if self.pin_memory:
batch = pin_memory_batch(batch)
return batch
# check if the next sample has already been generated
if self.rcvd_idx in self.reorder_dict:
batch = self.reorder_dict.pop(self.rcvd_idx)
return self._process_next_batch(batch)
if self.batches_outstanding == 0:
self._shutdown_workers()
raise StopIteration
while True:
assert (not self.shutdown and self.batches_outstanding > 0)
idx, batch = self.data_queue.get()
self.batches_outstanding -= 1
if idx != self.rcvd_idx:
# store out-of-order samples
self.reorder_dict[idx] = batch
continue
return self._process_next_batch(batch)
next = __next__ #python2 compatability
class DataLoaderBatch(data.DataLoader):
'''Like DataLoader but works on batches instead of iterating
through the batch.
'''
def __iter__(self):
return DataLoaderIterBatch(self)
class RandomSamplerContinuous(data.sampler.RandomSampler):
"""Samples elements randomly, without replacement, and continues for ever.
Arguments:
data_source (Dataset): dataset to sample from
"""
def __iter__(self):
while True:
for i in iter(torch.randperm(len(self.data_source)).long()):
yield i
|
import sys
sys.setrecursionlimit(10**8)
N, M = (int(x) for x in input().split())
MOD = 998244353
cnt = 0
def f(n):
if n == 2:
return (M * (M-1)) % MOD
return (M * pow(M-1, n-1, MOD) - f(n-1) + MOD) % MOD
print(f(N)) |
#
# xcPROJECTNAMEASIDENTIFIERxcAppDelegate.py
# xcPROJECTNAMExc
#
# Created by xcFULLUSERNAMExc on xcDATExc.
# Copyright xcORGANIZATIONNAMExc xcYEARxc. All rights reserved.
#
from Foundation import *
from AppKit import *
from threading import Thread
from twisted.internet import reactor
import sys
from aether.server.main import Service
from aether.client.main import browse, send
import FoundComputer
from PyObjCTools import AppHelper
from threading import Lock
try:
import Growl
n = Growl.GrowlNotifier('aether', ['done'])
n.register()
except ImportError, e:
n = None
class ReceiveHandler(object):
def __init__(self):
self.last_received = 0
self.transfers = {}
def cb(self, client, name, received, total):
promille = total / 1000
if promille < 4096:
promille = 4096
if self.last_received + promille < received:
self.last_received = received
if received==total:
if n:
n.notify('done', name, '%d kb received' % total)
rh = ReceiveHandler()
service = Service('finui@finkbook', '/Users/fin/Downloads', rh.cb)
class ReactorLoop(Thread):
def __init__(self):
self.pool = NSAutoreleasePool.alloc().init()
Thread.__init__(self)
def run(self):
service.listen()
print 'listen'
reactor.run(installSignalHandlers=0)
def stop(self):
print 'stopping'
pool.release()
reactor.stop()
class BrowseLoop(Thread):
def __init__(self, controller):
self.pool = NSAutoreleasePool.alloc().init()
Thread.__init__(self)
self.controller = controller
self.status = []
self.indexes = {}
self.add_lock = Lock()
def run(self):
browse('_at_nomin_aether._tcp', self.added, self.removed, lambda: self.status)
def added(self, serviceName, *args, **kwargs):
self.add_lock.acquire()
if serviceName in self.indexes:
return
index = self.controller.computers.content().count()
fc = FoundComputer.FoundComputer.alloc().init()
fc.name = serviceName
AppHelper.callAfter(self.controller.computers.addObject_, fc)
self.indexes[serviceName]=index
self.add_lock.release()
def removed(self, serviceName, *args, **kwargs):
self.add_lock.acquire()
AppHelper.callAfter(self.controller.computers.remove_, self.indexes[serviceName])
del self.indexes[serviceName]
self.add_lock.release()
def stop(self):
self.status.append('stop')
pool.release()
class xcAppDelegate(NSObject):
controller = objc.IBOutlet()
def applicationDidFinishLaunching_(self, sender):
NSLog("Application did finish launching.")
self.reactorthread = ReactorLoop()
self.reactorthread.start()
self.browsethread = BrowseLoop(self.controller)
self.browsethread.start()
def applicationWillTerminate_(self, sender):
self.reactorthread.stop()
self.browsethread.stop()
print 'stopped'
|
from django.apps import AppConfig as DjangoAppConfig
class AppConfig(DjangoAppConfig):
name = "unplugged.services.api"
verbose_name = "API Service"
label = "services_api"
def ready(self):
from .handler import APIServicePlugin # NOQA
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 10 15:26:43 2019
@author: Administrator
"""
import math
class Jsteg:
def __init__(self):
self.sequence_after_dct=None
def set_sequence_after_dct(self,sequence_after_dct):
self.sequence_after_dct=sequence_after_dct
self.available_info_len=len([i for i in self.sequence_after_dct if i not in (-1,1,0)]) # 不是绝对可靠的
print ("Load>> 可嵌入",self.available_info_len,'bits')
def get_sequence_after_dct(self):
return self.sequence_after_dct
def write(self,info):
"""先嵌入信息的长度,然后嵌入信息"""
info=self._set_info_len(info)
info_len=len(info)
info_index=0
im_index=0
while True:
if info_index>=info_len:
break
data=info[info_index]
if self._write(im_index,data):
info_index+=1
im_index+=1
def read(self):
"""先读出信息的长度,然后读出信息"""
_len,sequence_index=self._get_info_len()
info=[]
info_index=0
while True:
if info_index>=_len:
break
data=self._read(sequence_index)
if data!=None:
info.append(data)
info_index+=1
sequence_index+=1
return info
#===============================================================#
def _set_info_len(self,info):
l=int(math.log(self.available_info_len,2))+1
info_len=[0]*l
_len=len(info)
info_len[-len(bin(_len))+2:]=[int(i) for i in bin(_len)[2:]]
return info_len+info
def _get_info_len(self):
l=int(math.log(self.available_info_len,2))+1
len_list=[]
_l_index=0
_seq_index=0
while True:
if _l_index>=l:
break
_d=self._read(_seq_index)
if _d!=None:
len_list.append(str(_d))
_l_index+=1
_seq_index+=1
_len=''.join(len_list)
_len=int(_len,2)
return _len,_seq_index
# 注意经过DCT会有负值,此处最低有效位的嵌入方式与空域LSB略有不同
def _write(self,index,data):
origin=self.sequence_after_dct[index]
if origin in (-1,1,0):
return False
lower_bit=origin%2
if lower_bit==data:
pass
elif origin>0:
if (lower_bit,data) == (0,1):
self.sequence_after_dct[index]=origin+1
elif (lower_bit,data) == (1,0):
self.sequence_after_dct[index]=origin-1
elif origin<0:
if (lower_bit,data) == (0,1):
self.sequence_after_dct[index]=origin-1
elif (lower_bit,data) == (1,0):
self.sequence_after_dct[index]=origin+1
return True
def _read(self,index):
if self.sequence_after_dct[index] not in (-1,1,0):
return self.sequence_after_dct[index]%2
else:
return None
'''
import cv2
import numpy as np
def dct(m):
m = np.float32(m)/255.0
return cv2.dct(m)*255
'''
if __name__=="__main__":
jsteg=Jsteg()
# 写
sequence_after_dct=[-1,0,1]*100+[i for i in range(-7,500)]
#print(sequence_after_dct)
jsteg.set_sequence_after_dct(sequence_after_dct)
info1=[0,1,0,1,1,0,1,0]
jsteg.write(info1)
sequence_after_dct2=jsteg.get_sequence_after_dct()
# 读
jsteg.set_sequence_after_dct(sequence_after_dct2)
info2=jsteg.read()
print (info2) |
import sys
_module = sys.modules[__name__]
del sys
config = _module
eval_tvqa_plus = _module
maskrcnn_voc = _module
bounding_box = _module
boxlist_ops = _module
voc_eval = _module
utils = _module
inference = _module
main = _module
model = _module
cnn = _module
context_query_attention = _module
encoder = _module
model_utils = _module
position_encoding = _module
self_attention = _module
stage = _module
tvqa_dataset = _module
utils = _module
from _paritybench_helpers import _mock_config, patch_functional
from unittest.mock import mock_open, MagicMock
from torch.autograd import Function
from torch.nn import Module
import abc, collections, copy, enum, functools, inspect, itertools, logging, math, matplotlib, numbers, numpy, pandas, queue, random, re, scipy, sklearn, string, tensorflow, time, torch, torchaudio, torchtext, torchvision, types, typing, uuid, warnings
import numpy as np
from torch import Tensor
patch_functional()
open = mock_open()
yaml = logging = sys = argparse = MagicMock()
ArgumentParser = argparse.ArgumentParser
_global_config = args = argv = cfg = config = params = _mock_config()
argparse.ArgumentParser.return_value.parse_args.return_value = _global_config
yaml.load.return_value = _global_config
sys.argv = _global_config
__version__ = '1.0.0'
xrange = range
wraps = functools.wraps
import time
import torch
import numpy as np
from collections import defaultdict
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.nn.functional as F
import math
import copy
from torch.utils.data.dataset import Dataset
import re
class DepthwiseSeparableConv(nn.Module):
"""
Depth-wise separable convolution uses less parameters to generate output by convolution.
:Examples:
>>> m = DepthwiseSeparableConv(300, 200, 5, dim=1)
>>> input_tensor = torch.randn(32, 300, 20)
>>> output = m(input_tensor)
"""
def __init__(self, in_ch, out_ch, k, dim=1, relu=True):
"""
:param in_ch: input hidden dimension size
:param out_ch: output hidden dimension size
:param k: kernel size
:param dim: default 1. 1D conv or 2D conv
"""
super(DepthwiseSeparableConv, self).__init__()
self.relu = relu
if dim == 1:
self.depthwise_conv = nn.Conv1d(in_channels=in_ch, out_channels=in_ch, kernel_size=k, groups=in_ch, padding=k // 2)
self.pointwise_conv = nn.Conv1d(in_channels=in_ch, out_channels=out_ch, kernel_size=1, padding=0)
elif dim == 2:
self.depthwise_conv = nn.Conv2d(in_channels=in_ch, out_channels=in_ch, kernel_size=k, groups=in_ch, padding=k // 2)
self.pointwise_conv = nn.Conv2d(in_channels=in_ch, out_channels=out_ch, kernel_size=1, padding=0)
else:
raise Exception('Incorrect dimension!')
def forward(self, x):
"""
:Input: (N, L_in, D)
:Output: (N, L_out, D)
"""
x = x.transpose(1, 2)
if self.relu:
out = F.relu(self.pointwise_conv(self.depthwise_conv(x)), inplace=True)
else:
out = self.pointwise_conv(self.depthwise_conv(x))
return out.transpose(1, 2)
class ConvRelu(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, dim=1, stride=1, padding=0, relu=True, dropout=0.1):
"""
:param in_channels: input hidden dimension size
:param out_channels: output hidden dimension size
:param kernel_size: kernel size
:param dim: default 1. 1D conv or 2D conv
"""
super(ConvRelu, self).__init__()
self.relu = relu
self.dropout = dropout
if dim == 1:
self.conv = nn.Conv1d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding)
elif dim == 2:
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding)
else:
raise Exception('Incorrect dimension!')
def forward(self, x):
"""
:Input: (batch_num, in_ch, seq_length)
:Output: (batch_num, out_ch, seq_length)
"""
x = F.dropout(x, training=self.training, p=self.dropout)
if self.relu:
return F.relu(self.conv(x), inplace=True)
else:
return self.conv(x)
class DepthwiseSeparableConv1d(nn.Module):
def __init__(self, n_filters=128, kernel_size=7, padding=3):
super(DepthwiseSeparableConv1d, self).__init__()
self.depthwise = nn.Conv1d(n_filters, n_filters, kernel_size=kernel_size, padding=padding, groups=n_filters)
self.separable = nn.Conv1d(n_filters, n_filters, kernel_size=1)
def forward(self, x):
x = self.depthwise(x)
x = self.separable(x)
return x
class StructuredAttention(nn.Module):
"""Use each word in context to attend to words in query.
In my case, context is question-answer, query is the object-level
features in an image.
Note the values in S are cosine similarity scores, and are in [-1, 1]
They are scaled before softmax to make sure the maximum value could
get very high probability.
S_ = F.softmax(S * self.scale, dim=-1)
Consider softmax function f(m) = exp(m) / [24 * exp(-m) + exp(m)]
If not scaled, S * scale \\in [-100, 100], the weight the maximum value could only get is
exp(1) / [24 * exp(-1) + exp(1)] = 0.04 .
When set the scale = 100, S * scale \\in [-100, 100]
exp(100) / [24 * exp(-100) + exp(100)] = 0.9976
"""
def __init__(self, dropout=0.1, scale=100, add_void=False):
"""
Args:
dropout:
scale:
add_void:
"""
super(StructuredAttention, self).__init__()
self.dropout = dropout
self.scale = scale
self.add_void = add_void
def forward(self, C, Q, c_mask, q_mask, noun_mask=None, void_vector=None):
"""
match the dim of '*', singlton is allowed
Args:
C: (N, 5, Li, Lqa, D)
Q: (N, 1, Li, Lr, D)
c_mask: (N, 5, Li, Lqa)
q_mask: (N, 1, Li, Lr)
noun_mask: (N, 5, Lqa) , where 1 indicate the current position is a noun
or (N, 5, Li, Lqa), where each entry is the probability of the current
image being a positive bag for the word
void_vector: (D, )
Returns:
(N, *, Lc, D)
"""
bsz, _, num_img, num_region, hsz = Q.shape
if void_vector is not None:
num_void = len(void_vector)
Q_void = void_vector.view(1, 1, 1, num_void, hsz).repeat(bsz, 1, num_img, 1, 1)
Q = torch.cat([Q, Q_void], dim=-2)
q_mask_void = q_mask.new_ones(bsz, 1, num_img, num_void)
q_mask = torch.cat([q_mask, q_mask_void], dim=-1)
S, S_mask = self.similarity(C, Q, c_mask, q_mask)
S_ = F.softmax(S * self.scale, dim=-1)
S_ = S_ * S_mask
if noun_mask is not None:
if len(noun_mask.shape) == 3:
bsz, num_qa, lqa = noun_mask.shape
S_ = S_ * noun_mask.view(bsz, num_qa, 1, lqa, 1)
elif len(noun_mask.shape) == 4:
S_ = S_ * noun_mask.unsqueeze(-1)
else:
raise NotImplementedError
if void_vector is not None:
if self.add_void:
A = torch.matmul(S_, Q)
S, S_mask, S_ = S[:, :, :, :, :-num_void], S_mask[:, :, :, :, :-num_void], S_[:, :, :, :, :-num_void]
else:
S, S_mask, S_ = S[:, :, :, :, :-num_void], S_mask[:, :, :, :, :-num_void], S_[:, :, :, :, :-num_void]
Q = Q[:, :, :, :-num_void, :]
A = torch.matmul(S_, Q)
else:
A = torch.matmul(S_, Q)
return A, S, S_mask, S_
def similarity(self, C, Q, c_mask, q_mask):
"""
word2word dot-product similarity
Args:
C: (N, 5, Li, Lqa, D)
Q: (N, 1, Li, Lr, D)
c_mask: (N, 5, Li, Lqa)
q_mask: (N, 1, Li, Lr)
Returns:
(N, *, Lc, Lq)
"""
C = F.dropout(F.normalize(C, p=2, dim=-1), p=self.dropout, training=self.training)
Q = F.dropout(F.normalize(Q, p=2, dim=-1), p=self.dropout, training=self.training)
S_mask = torch.matmul(c_mask.unsqueeze(-1), q_mask.unsqueeze(-2))
S = torch.matmul(C, Q.transpose(-2, -1))
masked_S = S - 10000000000.0 * (1 - S_mask)
return masked_S, S_mask
class ContextQueryAttention(nn.Module):
"""
sub-a attention
"""
def __init__(self):
super(ContextQueryAttention, self).__init__()
def forward(self, C, Q, c_mask, q_mask):
"""
match the dim of '*', singlton is allowed
:param C: (N, *, Lc, D)
:param Q: (N, *, Lq, D)
:param c_mask: (N, *, Lc)
:param q_mask: (N, *, Lq)
:return: (N, Lc, D) and (N, Lq, D)
"""
S = self.similarity(C, Q, c_mask, q_mask)
S_ = F.softmax(S, dim=-1)
A = torch.matmul(S_, Q)
return A
def similarity(self, C, Q, c_mask, q_mask):
"""
word2word dot-product similarity
:param C: (N, *, Lc, D)
:param Q: (N, *, Lq, D)
:param c_mask: (N, *, Lc)
:param q_mask: (N, *, Lq)
:return: (N, *, Lc, Lq)
"""
C = F.dropout(C, p=0.1, training=self.training)
Q = F.dropout(Q, p=0.1, training=self.training)
hsz_root = math.sqrt(C.shape[-1])
S_mask = torch.matmul(c_mask.unsqueeze(-1), q_mask.unsqueeze(-2))
S = torch.matmul(C, Q.transpose(-2, -1)) / hsz_root
masked_S = S - 10000000000.0 * (1 - S_mask)
return masked_S
def clones(module, n):
"""Produce n identical layers."""
return nn.ModuleList([copy.deepcopy(module) for _ in range(n)])
class MultiHeadedAttention(nn.Module):
def __init__(self, nh, d_model, dropout=0.1):
"""
Args:
nh (int): number of heads
d_model (int): input hidden size
dropout:
"""
super(MultiHeadedAttention, self).__init__()
assert d_model % nh == 0
self.d_k = d_model // nh
self.nh = nh
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.dropout = nn.Dropout(p=dropout)
def forward(self, x, mask=None):
"""
Args:
x: (N, L, D)
mask: (N, L)
"""
bsz = x.size(0)
if mask is not None:
mask = mask.view(bsz, 1, -1, 1)
query, key, value = [l(x).view(bsz, -1, self.nh, self.d_k).transpose(1, 2) for l, x in zip(self.linears, (x, x, x))]
x, attn = self.attention(query, key, value, mask=mask, dropout=self.dropout)
x = x.transpose(1, 2).contiguous().view(bsz, -1, self.nh * self.d_k)
return self.linears[-1](x)
def attention(self, query, key, value, mask=None, dropout=None):
""" Compute 'Scaled Dot Product Attention'
Args:
query: (N, nh, L, d_k)
key: (N, nh, L, d_k)
value: (N, nh, L, d_k)
mask: (N, 1, L, 1)
dropout:
"""
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1000000000.0)
p_attn = torch.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
class PositionEncoding(nn.Module):
"""
Add positional information to input tensor.
:Examples:
>>> model = PositionEncoding(d_model=6, max_len=10, dropout=0)
>>> test_input1 = torch.zeros(3, 10, 6)
>>> output1 = model(test_input1)
>>> output1.size()
>>> test_input2 = torch.zeros(5, 3, 9, 6)
>>> output2 = model(test_input2)
>>> output2.size()
"""
def __init__(self, n_filters=128, max_len=500):
"""
:param n_filters: same with input hidden size
:param max_len: maximum sequence length
"""
super(PositionEncoding, self).__init__()
pe = torch.zeros(max_len, n_filters)
position = torch.arange(0, max_len).float().unsqueeze(1)
div_term = torch.exp(torch.arange(0, n_filters, 2).float() * -(math.log(10000.0) / n_filters))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
self.register_buffer('pe', pe)
def forward(self, x):
"""
:Input: (*, L, D)
:Output: (*, L, D) the same size as input
"""
pe = self.pe.data[:x.size(-2), :]
extra_dim = len(x.size()) - 2
for _ in range(extra_dim):
pe = pe.unsqueeze(0)
x = x + pe
return x
class EncoderBlock(nn.Module):
def __init__(self, n_conv, kernel_size=7, n_filters=128, dropout=0.1, num_heads=4):
super(EncoderBlock, self).__init__()
self.dropout = dropout
self.n_conv = n_conv
self.num_heads = num_heads
self.position_encoding = PositionEncoding(n_filters=n_filters)
self.layer_norm = nn.ModuleList([nn.LayerNorm(n_filters) for _ in range(n_conv)])
self.final_layer_norm = nn.LayerNorm(n_filters)
self.conv = nn.ModuleList([DepthwiseSeparableConv(in_ch=n_filters, out_ch=n_filters, k=kernel_size, relu=True) for _ in range(n_conv)])
if self.num_heads != 0:
self.multi_head_attn = MultiHeadedAttention(nh=num_heads, d_model=n_filters)
self.attn_layer_norm = nn.LayerNorm(n_filters)
def forward(self, x, mask):
"""
:param x: (N, L, D)
:param mask: (N, L)
:return: (N, L, D)
"""
outputs = self.position_encoding(x)
for i in range(self.n_conv):
residual = outputs
outputs = self.layer_norm[i](outputs)
if i % 2 == 0:
outputs = F.dropout(outputs, p=self.dropout, training=self.training)
outputs = self.conv[i](outputs)
outputs = outputs + residual
if self.num_heads != 0:
residual = outputs
outputs = self.attn_layer_norm(outputs)
outputs = self.multi_head_attn(outputs, mask=mask)
outputs = outputs + residual
return self.final_layer_norm(outputs)
class StackedEncoder(nn.Module):
def __init__(self, n_blocks=7, n_conv=2, kernel_size=7, hidden_size=128, dropout=0.1, num_heads=4):
super(StackedEncoder, self).__init__()
self.n_blocks = n_blocks
self.stacked_encoderBlocks = nn.ModuleList([EncoderBlock(n_conv=n_conv, kernel_size=kernel_size, n_filters=hidden_size, dropout=dropout, num_heads=num_heads) for _ in range(n_blocks)])
def forward(self, x, mask):
"""
:param x: # (N, L, D)
:param mask: # (N, L)
:return: (N, L, D)
"""
for i in range(self.n_blocks):
x = self.stacked_encoderBlocks[i](x, mask)
return x
class NormalizeScale(nn.Module):
def __init__(self, dim, num_additional_dims=1, init_norm=20):
super(NormalizeScale, self).__init__()
self.init_norm = init_norm
dims = [1] * num_additional_dims + [dim]
self.weight = nn.Parameter(torch.ones(dims) * init_norm)
def forward(self, bottom):
bottom_normalized = nn.functional.normalize(bottom, p=2, dim=1)
bottom_normalized_scaled = bottom_normalized * self.weight
return bottom_normalized_scaled
class LinearWrapper(nn.Module):
"""1D conv layer"""
def __init__(self, in_hsz, out_hsz, layer_norm=True, dropout=0.1, relu=True):
super(LinearWrapper, self).__init__()
self.relu = relu
layers = [nn.LayerNorm(in_hsz)] if layer_norm else []
layers += [nn.Dropout(dropout), nn.Linear(in_hsz, out_hsz)]
self.conv = nn.Sequential(*layers)
def forward(self, x):
"""(N, L, D)"""
if self.relu:
return F.relu(self.conv(x), inplace=True)
else:
return self.conv(x)
class ConvLinear(nn.Module):
"""1D conv layer"""
def __init__(self, in_hsz, out_hsz, kernel_size=3, layer_norm=True, dropout=0.1, relu=True):
super(ConvLinear, self).__init__()
layers = [nn.LayerNorm(in_hsz)] if layer_norm else []
layers += [nn.Dropout(dropout), DepthwiseSeparableConv(in_ch=in_hsz, out_ch=out_hsz, k=kernel_size, dim=1, relu=relu)]
self.conv = nn.Sequential(*layers)
def forward(self, x):
"""(N, L, D)"""
return self.conv(x)
def expand_span(span, expand_length=2):
"""
Args:
span (list): [st, ed]
expand_length (int): length to add on the two sides
Returns:
expanded_span (list): [max(0, st-expand_length), ed + expand_length]
Only use the span for indexing, no need to worry the case where
(ed + expand_length) >= max_length.
"""
return [max(0, span[0] - expand_length), span[1] + expand_length]
def topN_array_2d(array_2d, topN=None):
""" Get topN indices and values of a 2d array, return a tuple of indices and their values,
ranked by the value
"""
row_indices, column_indices = np.unravel_index(np.argsort(array_2d, axis=None), array_2d.shape)
row_indices = row_indices[::-1][:topN]
column_indices = column_indices[::-1][:topN]
sorted_values = array_2d[row_indices, column_indices]
sorted_triples = zip(row_indices, column_indices, sorted_values)
return sorted_triples
def find_max_triples(p1, p2, topN=5, prob_thd=None):
""" Find a list of (k1, k2) where k1 >= k2 with the maximum values of p1[k1] * p2[k2]
Args:
p1 (torch.CudaTensor): (N, L) batched start_idx probabilities
p2 (torch.CudaTensor): (N, L) batched end_idx probabilities
topN (int): return topN pairs with highest values
prob_thd (float):
Returns:
batched_sorted_triple: N * [(st_idx, ed_idx, confidence), ...]
"""
product = torch.bmm(p1.unsqueeze(2), p2.unsqueeze(1))
upper_product = torch.stack([torch.triu(p) for p in product]).data.cpu().numpy()
batched_sorted_triple = []
for idx, e in enumerate(upper_product):
sorted_triple = topN_array_2d(e, topN=topN)
if prob_thd is not None:
sorted_triple = [t for t in sorted_triple if t[2] >= prob_thd]
batched_sorted_triple.append(sorted_triple)
return batched_sorted_triple
def flat_list_of_lists(l):
"""flatten a list of lists [[1,2], [3,4]] to [1,2,3,4]"""
return [item for sublist in l for item in sublist]
def compute_temporal_iou(pred, gt):
""" compute intersection-over-union along temporal axis
Ref: https://github.com/LisaAnne/LocalizingMoments/blob/master/utils/eval.py
Args:
pred: [st (float), ed (float)]
gt: [st (float), ed (float)]
Returns:
iou (float):
"""
intersection = max(0, min(pred[1], gt[1]) - max(pred[0], gt[0]))
union = max(pred[1], gt[1]) - min(pred[0], gt[0])
if union == 0:
return 0
else:
return 1.0 * intersection / union
def get_high_iou_sapns(gt_ts_list, pred_ts_list, iou_thd=0.5, add_gt=True):
""" Note
Args:
gt_ts_list: N * (st, ed)
pred_ts_list: N * [(st_idx, ed_idx, confidence), ...]
iou_thd (float):
add_gt (bool):
Returns:
"""
spans = []
for idx, (gt_ts, pred_ts_sublist) in enumerate(zip(gt_ts_list, pred_ts_list)):
if add_gt:
cur_spans = [gt_ts]
else:
cur_spans = []
for pred_ts in pred_ts_sublist:
pred_ts = pred_ts[:2]
if compute_temporal_iou(pred_ts, gt_ts) >= iou_thd:
cur_spans.append(pred_ts)
spans.append(cur_spans)
return spans
def mask_logits(target, mask):
return target * mask + (1 - mask) * -10000000000.0
def save_pickle(data, data_path, highest=False):
protocol = 2 if highest else 0
with open(data_path, 'w') as f:
pickle.dump(data, f, protocol=protocol)
class STAGE(nn.Module):
def __init__(self, opt):
super(STAGE, self).__init__()
self.opt = opt
self.inference_mode = False
self.sub_flag = opt.sub_flag
self.vfeat_flag = opt.vfeat_flag
self.vfeat_size = opt.vfeat_size
self.t_iter = opt.t_iter
self.extra_span_length = opt.extra_span_length
self.add_local = opt.add_local
self.use_sup_att = opt.use_sup_att
self.num_negatives = opt.num_negatives
self.negative_pool_size = opt.negative_pool_size
self.num_hard = opt.num_hard
self.drop_topk = opt.drop_topk
self.margin = opt.margin
self.att_loss_type = opt.att_loss_type
self.scale = opt.scale
self.alpha = opt.alpha
self.dropout = opt.dropout
self.hsz = opt.hsz
self.bsz = None
self.num_seg = None
self.num_a = 5
self.flag_cnt = self.sub_flag + self.vfeat_flag
self.wd_size = opt.embedding_size
self.bridge_hsz = 300
self.bert_word_encoding_fc = nn.Sequential(nn.LayerNorm(self.wd_size), nn.Dropout(self.dropout), nn.Linear(self.wd_size, self.bridge_hsz), nn.ReLU(True), nn.LayerNorm(self.bridge_hsz))
if self.sub_flag:
None
if self.vfeat_flag:
None
self.vid_fc = nn.Sequential(nn.LayerNorm(self.vfeat_size), nn.Dropout(self.dropout), nn.Linear(self.vfeat_size, self.bridge_hsz), nn.ReLU(True), nn.LayerNorm(self.bridge_hsz))
if self.flag_cnt == 2:
self.concat_fc = nn.Sequential(nn.LayerNorm(3 * self.hsz), nn.Dropout(self.dropout), nn.Linear(3 * self.hsz, self.hsz), nn.ReLU(True), nn.LayerNorm(self.hsz))
self.input_embedding = nn.Sequential(nn.Dropout(self.dropout), nn.Linear(self.bridge_hsz, self.hsz), nn.ReLU(True), nn.LayerNorm(self.hsz))
self.input_encoder = StackedEncoder(n_blocks=opt.input_encoder_n_blocks, n_conv=opt.input_encoder_n_conv, kernel_size=opt.input_encoder_kernel_size, num_heads=opt.input_encoder_n_heads, hidden_size=self.hsz, dropout=self.dropout)
self.str_attn = StructuredAttention(dropout=self.dropout, scale=opt.scale, add_void=opt.add_non_visual)
self.c2q_down_projection = nn.Sequential(nn.LayerNorm(3 * self.hsz), nn.Dropout(self.dropout), nn.Linear(3 * self.hsz, self.hsz), nn.ReLU(True))
self.cls_encoder = StackedEncoder(n_blocks=opt.cls_encoder_n_blocks, n_conv=opt.cls_encoder_n_conv, kernel_size=opt.cls_encoder_kernel_size, num_heads=opt.cls_encoder_n_heads, hidden_size=self.hsz, dropout=self.dropout)
self.cls_projection_layers = nn.ModuleList([LinearWrapper(in_hsz=self.hsz, out_hsz=self.hsz, layer_norm=True, dropout=self.dropout, relu=True)] + [ConvLinear(in_hsz=self.hsz, out_hsz=self.hsz, kernel_size=3, layer_norm=True, dropout=self.dropout, relu=True) for _ in range(self.t_iter)])
self.temporal_scoring_st_layers = nn.ModuleList([LinearWrapper(in_hsz=self.hsz, out_hsz=1, layer_norm=True, dropout=self.dropout, relu=False) for _ in range(self.t_iter + 1)])
self.temporal_scoring_ed_layers = nn.ModuleList([LinearWrapper(in_hsz=self.hsz, out_hsz=1, layer_norm=True, dropout=self.dropout, relu=False) for _ in range(self.t_iter + 1)])
self.temporal_criterion = nn.CrossEntropyLoss(reduction='sum')
self.classifier = LinearWrapper(in_hsz=self.hsz * 2 if self.add_local else self.hsz, out_hsz=1, layer_norm=True, dropout=self.dropout, relu=False)
def load_word_embedding(self, pretrained_embedding, requires_grad=False):
self.word_embedding.weight.data.copy_(torch.from_numpy(pretrained_embedding))
self.word_embedding.weight.requires_grad = requires_grad
def forward(self, batch):
if self.inference_mode:
return self.forward_main(batch)
else:
out, att_loss, att_predictions, temporal_loss, temporal_predictions, other_outputs = self.forward_main(batch)
return out, att_loss, att_predictions, temporal_loss, temporal_predictions
def forward_main(self, batch):
"""
Args:
batch: edict, keys = qas, qas_mask, qa_noun_masks, sub, sub_mask, vcpt, vcpt_mask, vid, vid_mask,
att_labels, att_labels_mask, qid, target, vid_name, ts_label
qas, qas_mask, qa_noun_masks: (N, 5, Lqa)
sub, sub_mask: (N, #imgs, Ls)
vcpt, vcpt_mask: (N, #imgs, #regions)
vid, vid_mask: (N, #imgs, #regions, D), (N, #imgs, #regions)
att_labels, att_labels_mask: A list of N (#imgs, #qa-words, #regions)
qid: list(int)
vid_name: list(str)
target: torch.LongTensor
use_hard_negatives: bool, true to sample hard negatives
q_l: int, length of the tokenized question
anno_st_idx (list of int): each element is an index (at 0.5fps) of the first image
with spatial annotation.
ts_label: {"st": (N, ), "ed": (N, )} for 'st_ed'. (N, L) for 'frm'
ts_label_mask: (N, L) for both 'st_ed' and 'frm'
Returns:
"""
self.bsz = len(batch.qid)
bsz = self.bsz
num_a = self.num_a
hsz = self.hsz
a_embed = self.base_encoder(batch.qas_bert.view(bsz * num_a, -1, self.wd_size), batch.qas_mask.view(bsz * num_a, -1), self.bert_word_encoding_fc, self.input_embedding, self.input_encoder)
a_embed = a_embed.view(bsz, num_a, 1, -1, hsz)
a_mask = batch.qas_mask.view(bsz, num_a, 1, -1)
attended_sub, attended_vid, attended_vid_mask, attended_sub_mask = (None,) * 4
other_outputs = {}
if self.sub_flag:
num_imgs, num_words = batch.sub_bert.shape[1:3]
sub_embed = self.base_encoder(batch.sub_bert.view(bsz * num_imgs, num_words, -1), batch.sub_mask.view(bsz * num_imgs, num_words), self.bert_word_encoding_fc, self.input_embedding, self.input_encoder)
sub_embed = sub_embed.contiguous().view(bsz, 1, num_imgs, num_words, -1)
sub_mask = batch.sub_mask.view(bsz, 1, num_imgs, num_words)
attended_sub, attended_sub_mask, sub_raw_s, sub_normalized_s = self.qa_ctx_attention(a_embed, sub_embed, a_mask, sub_mask, noun_mask=None, non_visual_vectors=None)
other_outputs['sub_normalized_s'] = sub_normalized_s
other_outputs['sub_raw_s'] = sub_raw_s
if self.vfeat_flag:
num_imgs, num_regions = batch.vid.shape[1:3]
vid_embed = F.normalize(batch.vid, p=2, dim=-1)
vid_embed = self.base_encoder(vid_embed.view(bsz * num_imgs, num_regions, -1), batch.vid_mask.view(bsz * num_imgs, num_regions), self.vid_fc, self.input_embedding, self.input_encoder)
vid_embed = vid_embed.contiguous().view(bsz, 1, num_imgs, num_regions, -1)
vid_mask = batch.vid_mask.view(bsz, 1, num_imgs, num_regions)
attended_vid, attended_vid_mask, vid_raw_s, vid_normalized_s = self.qa_ctx_attention(a_embed, vid_embed, a_mask, vid_mask, noun_mask=None, non_visual_vectors=None)
other_outputs['vid_normalized_s'] = vid_normalized_s
other_outputs['vid_raw_s'] = vid_raw_s
if self.flag_cnt == 2:
visual_text_embedding = torch.cat([attended_sub, attended_vid, attended_sub * attended_vid], dim=-1)
visual_text_embedding = self.concat_fc(visual_text_embedding)
out, target, t_scores = self.classfier_head_multi_proposal(visual_text_embedding, attended_vid_mask, batch.target, batch.ts_label, batch.ts_label_mask, extra_span_length=self.extra_span_length)
elif self.sub_flag:
out, target, t_scores = self.classfier_head_multi_proposal(attended_sub, attended_sub_mask, batch.target, batch.ts_label, batch.ts_label_mask, extra_span_length=self.extra_span_length)
elif self.vfeat_flag:
out, target, t_scores = self.classfier_head_multi_proposal(attended_vid, attended_vid_mask, batch.target, batch.ts_label, batch.ts_label_mask, extra_span_length=self.extra_span_length)
else:
raise NotImplementedError
assert len(out) == len(target)
other_outputs['temporal_scores'] = t_scores
if self.inference_mode:
inference_outputs = {'answer': out, 't_scores': F.softmax(t_scores, dim=2), 'att_predictions': self.get_att_prediction(scores=other_outputs['vid_raw_s'], object_vocab=batch.eval_object_word_ids, words=batch.qas, vid_names=batch.vid_name, qids=batch.qid, img_indices=batch.image_indices, boxes=batch.boxes, start_indices=batch.anno_st_idx) if self.vfeat_flag else None}
return inference_outputs
att_loss = 0
att_predictions = None
if self.use_sup_att and self.training and self.vfeat_flag:
start_indices = batch.anno_st_idx
try:
cur_att_loss, cur_att_predictions = self.get_att_loss(other_outputs['vid_raw_s'], batch.att_labels, batch.target, batch.qas, qids=batch.qid, q_lens=batch.q_l, vid_names=batch.vid_name, img_indices=batch.image_indices, boxes=batch.boxes, start_indices=start_indices, num_negatives=self.num_negatives, use_hard_negatives=batch.use_hard_negatives, drop_topk=self.drop_topk)
except AssertionError as e:
save_pickle({'batch': batch, 'start_indices': start_indices, 'vid_raw_s': other_outputs['vid_raw_s']}, 'err_dict.pickle')
sys.exit(1)
att_loss += cur_att_loss
att_predictions = cur_att_predictions
temporal_loss = self.get_ts_loss(temporal_scores=t_scores, ts_labels=batch.ts_label, answer_indices=batch.target)
if self.training:
return [out, target], att_loss, att_predictions, temporal_loss, t_scores, other_outputs
else:
return out, att_loss, att_predictions, temporal_loss, F.softmax(t_scores, dim=2), other_outputs
@classmethod
def base_encoder(cls, data, data_mask, init_encoder, downsize_encoder, input_encoder):
""" Raw data --> higher-level embedding
Args:
data: (N, L) for text, (N, L, D) for video
data_mask: (N, L)
init_encoder: word_embedding layer for text, MLP (downsize) for video
downsize_encoder: MLP, down project to hsz
input_encoder: multiple layer of encoder block, with residual connection, CNN, layernorm, etc
Returns:
encoded_data: (N, L, D)
"""
data = downsize_encoder(init_encoder(data))
return input_encoder(data, data_mask)
def qa_ctx_attention(self, qa_embed, ctx_embed, qa_mask, ctx_mask, noun_mask, non_visual_vectors):
""" Align image regions with QA words
Args:
qa_embed: (N, 5, 1, Lqa, D)
qa_mask: (N, 5, 1, Lqa)
ctx_embed: (N, 1, Li, Lr, D)
ctx_mask: (N, 1, Li, Lr)
noun_mask: (N, 5, Lqa)
non_visual_vectors: (m, D), m is a tunable parameter
Returns:
"""
num_img, num_region = ctx_mask.shape[2:]
u_a, raw_s, s_mask, s_normalized = self.str_attn(qa_embed, ctx_embed, qa_mask, ctx_mask, noun_mask=noun_mask, void_vector=non_visual_vectors)
qa_embed = qa_embed.repeat(1, 1, num_img, 1, 1)
mixed = torch.cat([qa_embed, u_a, qa_embed * u_a], dim=-1)
mixed = self.c2q_down_projection(mixed)
mixed_mask = (s_mask.sum(-1) != 0).float()
return mixed, mixed_mask, raw_s, s_normalized
def get_proposals(self, max_statement, max_statement_mask, temporal_scores, targets, ts_labels, max_num_proposal=1, iou_thd=0.5, ce_prob_thd=0.01, extra_span_length=3):
"""
Args:
max_statement: (N, 5, Li, D)
max_statement_mask: (N, 5, Li, 1)
temporal_scores: (N, 5, Li, 2)
targets: (N, )
ts_labels: (N, Li) for frm or N * (st, ed) for st_ed
max_num_proposal:
iou_thd:
ce_prob_thd:
extra_span_length:
Returns:
"""
bsz, num_a, num_img, _ = max_statement_mask.shape
if self.training:
ca_temporal_scores_st_ed = temporal_scores[torch.arange(bsz, dtype=torch.long), targets].data
ca_temporal_scores_st_ed = F.softmax(ca_temporal_scores_st_ed, dim=1)
ca_pred_spans = find_max_triples(ca_temporal_scores_st_ed[:, :, 0], ca_temporal_scores_st_ed[:, :, 1], topN=max_num_proposal, prob_thd=ce_prob_thd)
ca_pred_spans = [[[sub_e[0], sub_e[1] + 1, sub_e[2]] for sub_e in e] for e in ca_pred_spans]
spans = get_high_iou_sapns(zip(ts_labels['st'].tolist(), (ts_labels['ed'] + 1).tolist()), ca_pred_spans, iou_thd=iou_thd, add_gt=True)
local_max_max_statement_list = []
global_max_max_statement_list = []
span_targets = []
for idx, (t, span_sublist) in enumerate(zip(targets, spans)):
span_targets.extend([t] * len(span_sublist))
cur_global_max_max_statement = torch.max(mask_logits(max_statement[idx], max_statement_mask[idx]), 1)[0]
global_max_max_statement_list.extend([cur_global_max_max_statement] * len(span_sublist))
for span in span_sublist:
span = expand_span(span, expand_length=extra_span_length)
cur_span_max_statement = mask_logits(max_statement[idx, :, span[0]:span[1]], max_statement_mask[idx, :, span[0]:span[1]])
local_max_max_statement_list.append(torch.max(cur_span_max_statement, 1)[0])
local_max_max_statement = torch.stack(local_max_max_statement_list)
global_max_max_statement = torch.stack(global_max_max_statement_list)
max_max_statement = torch.cat([local_max_max_statement, global_max_max_statement], dim=-1)
return max_max_statement, targets.new_tensor(span_targets)
else:
temporal_scores_st_ed = F.softmax(temporal_scores, dim=2)
temporal_scores_st_ed_reshaped = temporal_scores_st_ed.view(bsz * num_a, -1, 2)
pred_spans = find_max_triples(temporal_scores_st_ed_reshaped[:, :, 0], temporal_scores_st_ed_reshaped[:, :, 1], topN=1, prob_thd=None)
pred_spans = flat_list_of_lists(pred_spans)
pred_spans = torch.FloatTensor(pred_spans)
pred_spans, pred_scores = pred_spans[:, :2].long(), pred_spans[:, 2]
pred_spans = [[e[0], e[1] + 1] for e in pred_spans]
max_statement = max_statement.view(bsz * num_a, num_img, -1)
max_statement_mask = max_statement_mask.view(bsz * num_a, num_img, -1)
local_max_max_statement_list = []
global_max_max_statement_list = []
for idx, span in enumerate(pred_spans):
span = expand_span(span, expand_length=extra_span_length)
cur_global_max_max_statement = torch.max(mask_logits(max_statement[idx], max_statement_mask[idx]), 0)[0]
global_max_max_statement_list.append(cur_global_max_max_statement)
cur_span_max_statement = mask_logits(max_statement[idx, span[0]:span[1]], max_statement_mask[idx, span[0]:span[1]])
local_max_max_statement_list.append(torch.max(cur_span_max_statement, 0)[0])
local_max_max_statement = torch.stack(local_max_max_statement_list)
global_max_max_statement = torch.stack(global_max_max_statement_list)
max_max_statement = torch.cat([local_max_max_statement, global_max_max_statement], dim=-1)
return max_max_statement.view(bsz, num_a, -1), targets
def residual_temporal_predictor(self, layer_idx, input_tensor):
"""
Args:
layer_idx (int):
input_tensor: (N, L, D)
Returns:
temporal_score
"""
input_tensor = input_tensor + self.cls_projection_layers[layer_idx](input_tensor)
t_score_st = self.temporal_scoring_st_layers[layer_idx](input_tensor)
t_score_ed = self.temporal_scoring_ed_layers[layer_idx](input_tensor)
t_score = torch.cat([t_score_st, t_score_ed], dim=2)
return input_tensor, t_score
def classfier_head_multi_proposal(self, statement, statement_mask, targets, ts_labels, ts_labels_mask, max_num_proposal=1, ce_prob_thd=0.01, iou_thd=0.5, extra_span_length=3):
"""Predict the probabilities of each statements being true. Statements = QA + Context.
Args:
statement: (N, 5, Li, Lqa, D)
statement_mask: (N, 5, Li, Lqa)
targets: (N, )
ts_labels: (N, Li) for frm or N * (st, ed) for st_ed
ts_labels_mask: (N, Li)
max_num_proposal (int):
ce_prob_thd (float): threshold for p1*p2 (st, ed)
iou_thd (float): threshold for temporal iou
extra_span_length (int): expand the localized span to give a little bit extra context
Returns:
"""
bsz, num_a, num_img, num_words = statement_mask.shape
statement = statement.view(bsz * num_a * num_img, num_words, -1)
statement_mask = statement_mask.view(bsz * num_a * num_img, num_words)
statement = self.cls_encoder(statement, statement_mask)
max_statement = torch.max(mask_logits(statement, statement_mask.unsqueeze(2)), 1)[0]
max_statement_mask = (statement_mask.sum(1) != 0).float().view(bsz, num_a, num_img, 1)
max_statement = max_statement.view(bsz * num_a, num_img, -1)
t_score_container = []
encoded_max_statement_container = []
encoded_max_statement = max_statement
for layer_idx in range(self.t_iter + 1):
encoded_max_statement, prev_t_score = self.residual_temporal_predictor(layer_idx, encoded_max_statement)
t_score_container.append(prev_t_score.view(bsz, num_a, num_img, 2))
encoded_max_statement_container.append(encoded_max_statement)
if self.t_iter > 0:
temporal_scores_st_ed = 0.5 * (t_score_container[0] + torch.stack(t_score_container[:1]).mean(0))
else:
temporal_scores_st_ed = t_score_container[0]
temporal_scores_st_ed = mask_logits(temporal_scores_st_ed, ts_labels_mask.view(bsz, 1, num_img, 1))
stacked_max_statement = encoded_max_statement_container[0].view(bsz, num_a, num_img, -1)
if self.add_local:
max_max_statement, targets = self.get_proposals(stacked_max_statement, max_statement_mask, temporal_scores_st_ed, targets, ts_labels, max_num_proposal=max_num_proposal, iou_thd=iou_thd, ce_prob_thd=ce_prob_thd, extra_span_length=extra_span_length)
else:
max_max_statement = torch.max(mask_logits(stacked_max_statement, max_statement_mask), 2)[0]
answer_scores = self.classifier(max_max_statement).squeeze(2)
return answer_scores, targets, temporal_scores_st_ed
def get_ts_loss(self, temporal_scores, ts_labels, answer_indices):
"""
Args:
temporal_scores: (N, 5, Li, 2)
ts_labels: dict(st=(N, ), ed=(N, ))
answer_indices: (N, )
Returns:
"""
bsz = len(answer_indices)
ca_temporal_scores_st_ed = temporal_scores[torch.arange(bsz, dtype=torch.long), answer_indices]
loss_st = self.temporal_criterion(ca_temporal_scores_st_ed[:, :, 0], ts_labels['st'])
loss_ed = self.temporal_criterion(ca_temporal_scores_st_ed[:, :, 1], ts_labels['ed'])
return (loss_st + loss_ed) / 2.0
@classmethod
def sample_negatives(cls, pred_score, pos_indices, neg_indices, num_negatives=2, use_hard_negatives=False, negative_pool_size=0, num_hard=2, drop_topk=0):
""" Sample negatives from a set of indices. Several sampling strategies are supported:
1, random; 2, hard negatives; 3, drop_topk hard negatives; 4, mix easy and hard negatives
5, sampling within a pool of hard negatives; 6, sample across images of the same video.
Args:
pred_score: (num_img, num_words, num_region)
pos_indices: (N_pos, 3) all positive region indices for the same word, not necessaryily the same image.
neg_indices: (N_neg, 3) ...
num_negatives (int):
use_hard_negatives (bool):
negative_pool_size (int):
num_hard (int):
drop_topk (int):
Returns:
"""
num_unique_pos = len(pos_indices)
sampled_pos_indices = torch.cat([pos_indices] * num_negatives, dim=0)
if use_hard_negatives:
neg_scores = pred_score[neg_indices[:, 0], neg_indices[:, 1], neg_indices[:, 2]]
max_indices = torch.sort(neg_scores, descending=True)[1].tolist()
if negative_pool_size > num_negatives:
hard_pool = max_indices[drop_topk:drop_topk + negative_pool_size]
hard_pool_indices = neg_indices[hard_pool]
num_hard_negs = num_negatives
sampled_easy_neg_indices = []
if num_hard < num_negatives:
easy_pool = max_indices[drop_topk + negative_pool_size:]
easy_pool_indices = neg_indices[easy_pool]
num_hard_negs = num_hard
num_easy_negs = num_negatives - num_hard_negs
sampled_easy_neg_indices = easy_pool_indices[torch.randint(low=0, high=len(easy_pool_indices), size=(num_easy_negs * num_unique_pos,), dtype=torch.long)]
sampled_hard_neg_indices = hard_pool_indices[torch.randint(low=0, high=len(hard_pool_indices), size=(num_hard_negs * num_unique_pos,), dtype=torch.long)]
if len(sampled_easy_neg_indices) != 0:
sampled_neg_indices = torch.cat([sampled_hard_neg_indices, sampled_easy_neg_indices], dim=0)
else:
sampled_neg_indices = sampled_hard_neg_indices
else:
sampled_neg_indices = neg_indices[max_indices[drop_topk:drop_topk + len(sampled_pos_indices)]]
else:
sampled_neg_indices = neg_indices[torch.randint(low=0, high=len(neg_indices), size=(len(sampled_pos_indices),), dtype=torch.long)]
return sampled_pos_indices, sampled_neg_indices
def get_att_loss(self, scores, att_labels, target, words, vid_names, qids, q_lens, img_indices, boxes, start_indices, num_negatives=2, use_hard_negatives=False, drop_topk=0):
""" compute ranking loss, use for loop to find the indices,
use advanced indexing to perform the real calculation
Build a list contains a quaduple
Args:
scores: cosine similarity scores (N, 5, Li, Lqa, Lr), in the range [-1, 1]
att_labels: list(tensor), each has dimension (#num_imgs, #num_words, #regions), not batched
target: 1D tensor (N, )
words: LongTensor (N, 5, Lqa)
vid_names: list(str) (N,)
qids: list(int), (N, )
q_lens: list(int), (N, )
img_indices: list(list(int)), (N, Li), or None
boxes: list(list(box)) of length N, each sublist represent an image,
each box contains the coordinates of xyxy, or None
num_negatives: number of negatives for each positive region
use_hard_negatives: use hard negatives, uselect negatives with high scores
drop_topk: drop topk highest negatives (since the top negatives might be correct, they are just not labeled)
start_indices (list of int): each element is an index (at 0.5fps) of the first image
with spatial annotation. If with_ts, set to zero
Returns:
att_loss: loss value for the batch
att_predictions: (list) [{"gt": gt_scores, "pred": pred_scores}, ], used to calculate att. accuracy
"""
pos_container = []
neg_container = []
for batch_idx in range(len(target)):
ca_idx = target[batch_idx].cpu().item()
gt_score = att_labels[batch_idx]
start_idx = start_indices[batch_idx]
num_img = len(gt_score)
sen_l, _ = gt_score[0].shape
pred_score = scores[batch_idx, ca_idx, :num_img, :sen_l]
batch_pos_indices = []
batch_neg_indices = []
for img_idx, img_gt_score in enumerate(gt_score):
img_idx = start_idx + img_idx
img_pos_indices = torch.nonzero(img_gt_score)
if len(img_pos_indices) == 0:
continue
img_pos_indices = torch.cat([img_pos_indices.new_full([len(img_pos_indices), 1], img_idx), img_pos_indices], dim=1)
img_neg_indices = torch.nonzero(img_gt_score == 0)
img_neg_indices = torch.cat([img_neg_indices.new_full([len(img_neg_indices), 1], img_idx), img_neg_indices], dim=1)
batch_pos_indices.append(img_pos_indices)
batch_neg_indices.append(img_neg_indices)
if len(batch_pos_indices) == 0:
continue
batch_pos_indices = torch.cat(batch_pos_indices, dim=0)
batch_neg_indices = torch.cat(batch_neg_indices, dim=0)
available_img_indices = batch_pos_indices[:, 0].unique().tolist()
for img_idx in available_img_indices:
img_idx_pos_indices = batch_pos_indices[batch_pos_indices[:, 0] == img_idx]
img_idx_neg_indices = batch_neg_indices[batch_neg_indices[:, 0] == img_idx]
available_word_indices = img_idx_pos_indices[:, 1].unique().tolist()
for word_idx in available_word_indices:
img_idx_word_idx_pos_indices = img_idx_pos_indices[img_idx_pos_indices[:, 1] == word_idx]
img_idx_word_idx_neg_indices = img_idx_neg_indices[img_idx_neg_indices[:, 1] == word_idx]
sampled_pos_indices, sampled_neg_indices = self.sample_negatives(pred_score, img_idx_word_idx_pos_indices, img_idx_word_idx_neg_indices, num_negatives=num_negatives, use_hard_negatives=use_hard_negatives, negative_pool_size=self.negative_pool_size, num_hard=self.num_hard, drop_topk=drop_topk)
base_indices = torch.LongTensor([[batch_idx, ca_idx]] * len(sampled_pos_indices))
pos_container.append(torch.cat([base_indices, sampled_pos_indices], dim=1))
neg_container.append(torch.cat([base_indices, sampled_neg_indices], dim=1))
pos_container = torch.cat(pos_container, dim=0)
neg_container = torch.cat(neg_container, dim=0)
att_predictions = None
if not self.training and self.vfeat_flag:
att_predictions = dict(det_q=[], det_ca=[])
unique_pos_container = np.unique(pos_container.cpu().numpy(), axis=0)
for row in unique_pos_container:
batch_idx, ca_idx, img_idx, word_idx, region_idx = row
start_idx = start_indices[batch_idx]
cur_q_len = q_lens[batch_idx]
num_region = att_labels[batch_idx][img_idx - start_idx].shape[1]
if len(scores[batch_idx, ca_idx, img_idx, word_idx, :num_region].data.cpu()) != len(boxes[batch_idx][img_idx - start_idx]):
None
None
None
None
raise AssertionError
cur_det_data = {'pred': scores[batch_idx, ca_idx, img_idx, word_idx, :num_region].data.cpu(), 'word': words[batch_idx, ca_idx, word_idx], 'qid': qids[batch_idx], 'vid_name': vid_names[batch_idx], 'img_idx': img_indices[batch_idx][img_idx], 'boxes': boxes[batch_idx][img_idx - start_idx]}
if word_idx < cur_q_len:
att_predictions['det_q'].append(cur_det_data)
else:
att_predictions['det_ca'].append(cur_det_data)
pos_scores = scores[pos_container[:, 0], pos_container[:, 1], pos_container[:, 2], pos_container[:, 3], pos_container[:, 4]]
neg_scores = scores[neg_container[:, 0], neg_container[:, 1], neg_container[:, 2], neg_container[:, 3], neg_container[:, 4]]
if self.att_loss_type == 'hinge':
att_loss = torch.clamp(self.margin + neg_scores - pos_scores, min=0).sum()
elif self.att_loss_type == 'lse':
att_loss = torch.log1p(torch.exp(self.alpha * (neg_scores - pos_scores))).sum()
else:
raise NotImplementedError('Only support hinge and lse')
return att_loss, att_predictions
def get_att_prediction(self, scores, object_vocab, words, vid_names, qids, img_indices, boxes, start_indices, score_thd=0.2):
""" compute ranking loss, use for loop to find the indices,
use advanced indexing to perform the real calculation
Build a list contains a quaduple
Args:
scores: cosine similarity scores (N, 5, Li, Lqa, Lr), in the range [-1, 1]
object_vocab: list, object word ids in the vocabulary
words: LongTensor (N, 5, Lqa)
vid_names: list(str) (N,)
qids: list(int), (N, )
img_indices: list(list(int)), (N, Li), or None
boxes: list(list(box)) of length N, each sublist represent an image,
each box contains the coordinates of xyxy, or None
start_indices (list of int): each element is an index (at 0.5fps) of the first image
with spatial annotation. If with_ts, set to zero
score_thd: only keep boxes with score higher than this value
Returns:
att_loss: loss value for the batch
att_predictions: (list) [{"gt": gt_scores, "pred": pred_scores}, ], used to calculate att. accuracy
"""
att_predictions = None
if self.vfeat_flag:
att_predictions = []
for batch_idx in range(len(scores)):
start_idx = start_indices[batch_idx]
q_att_predictions = dict()
for ans_idx in range(5):
q_att_predictions[ans_idx] = []
for img_idx_local in range(len(boxes[batch_idx])):
img_idx_global = img_idx_local + start_idx
cur_img_scores = scores[batch_idx, ans_idx, img_idx_global]
cur_words = words[batch_idx, ans_idx].tolist()
cur_img_boxes = boxes[batch_idx][img_idx_local]
for word_idx, w in enumerate(cur_words):
if w in object_vocab:
cur_word_region_scores = cur_img_scores[word_idx].data.cpu().numpy()
accepted_region_ids = np.nonzero(cur_word_region_scores >= score_thd)[0].tolist()
accepted_region_scores = [float(cur_word_region_scores[i]) for i in accepted_region_ids]
accepted_region_boxes = [cur_img_boxes[i] for i in accepted_region_ids]
sorted_indices = np.argsort(accepted_region_scores)
accepted_region_scores = [accepted_region_scores[i] for i in sorted_indices]
accepted_region_boxes = [accepted_region_boxes[i] for i in sorted_indices]
cur_det_data = {'pred': accepted_region_scores, 'bbox': accepted_region_boxes, 'word': int(words[batch_idx, ans_idx, word_idx]), 'qid': int(qids[batch_idx]), 'vid_name': vid_names[batch_idx], 'img_idx': img_indices[batch_idx][img_idx_global]}
q_att_predictions[ans_idx].append(cur_det_data)
att_predictions.append(q_att_predictions)
return att_predictions
import torch
from torch.nn import MSELoss, ReLU
from _paritybench_helpers import _mock_config, _mock_layer, _paritybench_base, _fails_compile
TESTCASES = [
# (nn.Module, init_args, forward_args, jit_compiles)
(ContextQueryAttention,
lambda: ([], {}),
lambda: ([torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])], {}),
True),
(ConvLinear,
lambda: ([], {'in_hsz': 4, 'out_hsz': 4}),
lambda: ([torch.rand([4, 4, 4])], {}),
True),
(ConvRelu,
lambda: ([], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}),
lambda: ([torch.rand([4, 4])], {}),
True),
(DepthwiseSeparableConv,
lambda: ([], {'in_ch': 4, 'out_ch': 4, 'k': 4}),
lambda: ([torch.rand([4, 4, 4])], {}),
True),
(LinearWrapper,
lambda: ([], {'in_hsz': 4, 'out_hsz': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(MultiHeadedAttention,
lambda: ([], {'nh': 4, 'd_model': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
False),
(NormalizeScale,
lambda: ([], {'dim': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
False),
(PositionEncoding,
lambda: ([], {}),
lambda: ([torch.rand([4, 4, 4, 128])], {}),
True),
(StructuredAttention,
lambda: ([], {}),
lambda: ([torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])], {}),
False),
]
class Test_jayleicn_TVQAplus(_paritybench_base):
def test_000(self):
self._check(*TESTCASES[0])
def test_001(self):
self._check(*TESTCASES[1])
def test_002(self):
self._check(*TESTCASES[2])
def test_003(self):
self._check(*TESTCASES[3])
def test_004(self):
self._check(*TESTCASES[4])
def test_005(self):
self._check(*TESTCASES[5])
def test_006(self):
self._check(*TESTCASES[6])
def test_007(self):
self._check(*TESTCASES[7])
def test_008(self):
self._check(*TESTCASES[8])
|
import os
import subprocess
import sys
SOURCE_ROOT = os.path.dirname(os.path.dirname(__file__))
def main():
# Proxy all args to node script
script = os.path.join(SOURCE_ROOT, sys.argv[1])
subprocess.check_call(['node', script] + [str(x) for x in sys.argv[2:]])
if __name__ == '__main__':
sys.exit(main())
|
"""
用递归方法绘制谢尔宾斯基三角形
"""
import turtle
def sierpinski(degree, points): # degree表示阶数
colormap = ['red', 'blue', 'green', 'yellow', 'orange', 'white']
drawTriangle(points, colormap[degree]) # 填充本degree的三角
if degree > 0:
sierpinski(degree-1, {'left':points['left'],
'top':getMid(points['left'], points['top']),
'right':getMid(points['right'], points['left'])}) # 左下三角
sierpinski(degree-1, {'left':getMid(points['left'], points['top']),
'top': points['top'],
'right':getMid(points['right'], points['top'])}) # 上方三角
sierpinski(degree-1, {'left':getMid(points['left'], points['right']),
'top': getMid(points['top'], points['right']),
'right':points['right']}) # 上方三角
def drawTriangle(points, color): # 填充三角形
t.fillcolor(color)
t.penup()
t.goto(points['top'])
t.pendown()
t.begin_fill()
t.goto(points['left'])
t.goto(points['right'])
t.goto(points['top'])
t.end_fill()
def getMid(p1, p2): # 找到两个点的中点
return ((p1[0]+p2[0])/2, (p1[1]+p2[1])/2)
t = turtle.Turtle()
points = {
'left':(-200, -100),
'right':(200, -100),
'top':(0, 200)
}
sierpinski(5, points)
turtle.done()
|
# file openemory/accounts/backends.py
#
# Copyright 2010 Emory University General Library
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.contrib.auth.models import User
from django.db.models import Q
from django.contrib.auth import get_user_model
from django_auth_ldap.backend import LDAPBackend
from openemory.accounts.models import EsdPerson, UserProfile
class FacultyOrLocalAdminBackend(LDAPBackend):
'''Customized authentication backend based on
:class:`~eullocal.django.emory_ldap.backends.EmoryLDAPBackend`.
Only users who are designated as Faculty, in ESD or local users who
are designated as superusers, Site Admins, or non-faculty with the
nonfaculty_profile flag set are allowed to log in.
'''
def authenticate(self, username=None, password=None):
# Only authenticate users who are flagged as faculty in ESD
# or local accounts with superuser permission, 'Site Admin' role
# or nonfaculty_flag set
if User.objects.filter(username=username)\
.filter(Q(is_superuser=True) | Q(groups__name='Site Admin') | \
Q(userprofile__nonfaculty_profile=True))\
.exists() or \
EsdPerson.faculty.filter(netid=username.upper()).exists():
return super(FacultyOrLocalAdminBackend, self).authenticate(username=username,
password=password)
# TODO: Django backends can optionally support per-object
# permissions, which would probably make author-specific
# permissions checks cleaner
#
# supports_object_permissions = True
#
# def has_perm(self, user, perm, obj=None):
# ...
# if obj is set and is an Article;
# check if user.username is in obj.owner list for author permissions
# (how to determine author permissions?)
#
# NOTE: to make this adding this may also require a small template filter
# to allow passing an object to the has_perm method
|
import pandas as pd
import numpy as np
import matplotlib as plt
from pandas import DataFrame,Series
'''
plot data
Series
'''
data = Series(np.random.randn(1000),index=np.arange(1000))
data = data.cumsum()
'''
DataFrame
'''
data = DataFrame(np.random.randn(1000,4),index=np.arange(1000),columns=list('ABCD'))
data = data.cumsum()
print(data.head())
#data.plot()
ax = data.plot.scatter(x='A',y='B',color='DarkBlue',label='Class 1')
data.plot.scatter(x='A',y='C',color='DarkGreen',label='Class 2',ax=ax)
plt.pyplot.show()
'''
plot methods:
'bar','hist','box','kde','area','scatter','hexbin','pie'
'''
|
import inspect
import logging
import os
import re
import subprocess
from datetime import timedelta, datetime
from typing import List, Optional, Dict, Tuple, Union
from pyhttpd.certs import CertificateSpec
from pyhttpd.env import HttpdTestEnv, HttpdTestSetup
from pyhttpd.result import ExecResult
log = logging.getLogger(__name__)
class TlsTestSetup(HttpdTestSetup):
def __init__(self, env: 'HttpdTestEnv'):
super().__init__(env=env)
self.add_source_dir(os.path.dirname(inspect.getfile(TlsTestSetup)))
self.add_modules(["tls", "http2", "cgid", "watchdog", "proxy_http2"])
class TlsCipher:
def __init__(self, id: int, name: str, flavour: str,
min_version: float, max_version: float = None,
openssl: str = None):
self.id = id
self.name = name
self.flavour = flavour
self.min_version = min_version
self.max_version = max_version if max_version is not None else self.min_version
if openssl is None:
if name.startswith('TLS13_'):
openssl = re.sub(r'^TLS13_', 'TLS_', name)
else:
openssl = re.sub(r'^TLS_', '', name)
openssl = re.sub(r'_WITH_([^_]+)_', r'_\1_', openssl)
openssl = re.sub(r'_AES_(\d+)', r'_AES\1', openssl)
openssl = re.sub(r'(_POLY1305)_\S+$', r'\1', openssl)
openssl = re.sub(r'_', '-', openssl)
self.openssl_name = openssl
self.id_name = "TLS_CIPHER_0x{0:04x}".format(self.id)
def __repr__(self):
return self.name
def __str__(self):
return self.name
class TlsTestEnv(HttpdTestEnv):
CURL_SUPPORTS_TLS_1_3 = None
@classmethod
def curl_supports_tls_1_3(cls) -> bool:
if cls.CURL_SUPPORTS_TLS_1_3 is None:
# Unfortunately, there is no reliable, platform-independant
# way to verify that TLSv1.3 is properly supported by curl.
#
# p = subprocess.run(['curl', '--tlsv1.3', 'https://shouldneverexistreally'],
# stderr=subprocess.PIPE, stdout=subprocess.PIPE)
# return code 6 means the site could not be resolved, but the
# tls parameter was recognized
cls.CURL_SUPPORTS_TLS_1_3 = False
return cls.CURL_SUPPORTS_TLS_1_3
# current rustls supported ciphers in their order of preference
# used to test cipher selection, see test_06_ciphers.py
RUSTLS_CIPHERS = [
TlsCipher(0x1303, "TLS13_CHACHA20_POLY1305_SHA256", "CHACHA", 1.3),
TlsCipher(0x1302, "TLS13_AES_256_GCM_SHA384", "AES", 1.3),
TlsCipher(0x1301, "TLS13_AES_128_GCM_SHA256", "AES", 1.3),
TlsCipher(0xcca9, "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", "ECDSA", 1.2),
TlsCipher(0xcca8, "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", "RSA", 1.2),
TlsCipher(0xc02c, "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "ECDSA", 1.2),
TlsCipher(0xc02b, "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "ECDSA", 1.2),
TlsCipher(0xc030, "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "RSA", 1.2),
TlsCipher(0xc02f, "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "RSA", 1.2),
]
def __init__(self, pytestconfig=None):
super().__init__(pytestconfig=pytestconfig)
self._domain_a = "a.mod-tls.test"
self._domain_b = "b.mod-tls.test"
self.add_httpd_conf([
f'<Directory "{self.server_dir}/htdocs/{self.domain_a}">',
' AllowOverride None',
' Require all granted',
' AddHandler cgi-script .py',
' Options +ExecCGI',
'</Directory>',
f'<Directory "{self.server_dir}/htdocs/{self.domain_b}">',
' AllowOverride None',
' Require all granted',
' AddHandler cgi-script .py',
' Options +ExecCGI',
'</Directory>',
f'<VirtualHost *:{self.http_port}>',
' ServerName localhost',
' DocumentRoot "htdocs"',
'</VirtualHost>',
f'<VirtualHost *:{self.http_port}>',
f' ServerName {self.domain_a}',
' DocumentRoot "htdocs/a.mod-tls.test"',
'</VirtualHost>',
f'<VirtualHost *:{self.http_port}>',
f' ServerName {self.domain_b}',
' DocumentRoot "htdocs/b.mod-tls.test"',
'</VirtualHost>',
])
self.add_cert_specs([
CertificateSpec(domains=[self.domain_a]),
CertificateSpec(domains=[self.domain_b], key_type='secp256r1', single_file=True),
CertificateSpec(domains=[self.domain_b], key_type='rsa4096'),
CertificateSpec(name="clientsX", sub_specs=[
CertificateSpec(name="user1", client=True, single_file=True),
CertificateSpec(name="user2", client=True, single_file=True),
CertificateSpec(name="user_expired", client=True,
single_file=True, valid_from=timedelta(days=-91),
valid_to=timedelta(days=-1)),
]),
CertificateSpec(name="clientsY", sub_specs=[
CertificateSpec(name="user1", client=True, single_file=True),
]),
CertificateSpec(name="user1", client=True, single_file=True),
])
self.add_httpd_log_modules(['tls'])
def setup_httpd(self, setup: TlsTestSetup = None):
if setup is None:
setup = TlsTestSetup(env=self)
super().setup_httpd(setup=setup)
@property
def domain_a(self) -> str:
return self._domain_a
@property
def domain_b(self) -> str:
return self._domain_b
def tls_get(self, domain, paths: Union[str, List[str]], options: List[str] = None, no_stdout_list = False) -> ExecResult:
if isinstance(paths, str):
paths = [paths]
urls = [f"https://{domain}:{self.https_port}{path}" for path in paths]
return self.curl_raw(urls=urls, options=options, no_stdout_list=no_stdout_list)
def tls_get_json(self, domain: str, path: str, options=None):
r = self.tls_get(domain=domain, paths=path, options=options)
return r.json
def run_diff(self, fleft: str, fright: str) -> ExecResult:
return self.run(['diff', '-u', fleft, fright])
def openssl(self, args: List[str]) -> ExecResult:
return self.run(['openssl'] + args)
def openssl_client(self, domain, extra_args: List[str] = None) -> ExecResult:
args = ["s_client", "-CAfile", self.ca.cert_file, "-servername", domain,
"-connect", "localhost:{port}".format(
port=self.https_port
)]
if extra_args:
args.extend(extra_args)
args.extend([])
return self.openssl(args)
OPENSSL_SUPPORTED_PROTOCOLS = None
@staticmethod
def openssl_supports_tls_1_3() -> bool:
if TlsTestEnv.OPENSSL_SUPPORTED_PROTOCOLS is None:
env = TlsTestEnv()
r = env.openssl(args=["ciphers", "-v"])
protos = set()
ciphers = set()
for line in r.stdout.splitlines():
m = re.match(r'^(\S+)\s+(\S+)\s+(.*)$', line)
if m:
ciphers.add(m.group(1))
protos.add(m.group(2))
TlsTestEnv.OPENSSL_SUPPORTED_PROTOCOLS = protos
TlsTestEnv.OPENSSL_SUPPORTED_CIPHERS = ciphers
return "TLSv1.3" in TlsTestEnv.OPENSSL_SUPPORTED_PROTOCOLS
|
# Copyright (c) 2020 Hassan Abouelela
# Licensed under the MIT License
import asyncio
import json
import logging
import os
import aiohttp
import discord as discord
import math
from discord.ext import commands
from python import articlesearch
logger = logging.getLogger("galnet_discord")
logger.setLevel(logging.INFO)
handler = logging.FileHandler(filename="galnet_discord.log", encoding="utf-8", mode="w")
handler.setFormatter(logging.Formatter("%(asctime)s:%(levelname)s:%(name)s: %(message)s"))
logger.addHandler(handler)
# Loading Settings
def download_settings():
async def fetch_settings():
async with aiohttp.ClientSession() as settings_session:
async with settings_session.get(
"https://raw.githubusercontent.com/HassanAbouelela/Galnet-Newsfeed/"
"4499a01e6b5a679b807e95697effafde02f8d5e0/discord/BotSettings.json") as response:
if response.status == 200:
raw_json = json.loads(await response.read())
with open("BotSettings.json", "w+") as file:
json.dump(raw_json, file, indent=2)
asyncio.get_event_loop().run_until_complete(fetch_settings())
if not os.path.exists("BotSettings.json"):
download_settings()
raise RuntimeError("Please fill in bot settings file: `BotSettings.json`")
with open("BotSettings.json") as settings_file:
settings = json.load(settings_file)
if not all(key in settings.keys() for key in ("Maintainer-ID", "TOKEN", "PREFIX")):
print(RuntimeWarning("Error reading bot settings file."))
if settings["PREFIX"] == "_mention":
settings["PREFIX"] = commands.when_mentioned
else:
settings["PREFIX"] = settings["PREFIX"].split(",")
bot = commands.Bot(command_prefix=settings["PREFIX"], case_insensitive=True, help_command=None)
@bot.event
async def on_command_error(ctx, error):
if isinstance(error, commands.BadArgument):
if ctx.invoked_with == "read":
await ctx.send("That ID is not valid. The ID must be a number.")
else:
await ctx.send(f"{error.args}. Please send this to the developer.")
return
elif isinstance(error, commands.CommandNotFound):
if settings["PREFIX"] == commands.when_mentioned:
await ctx.send(f"That is not a valid command. Try: {bot.get_user(bot.user.id).mention} help")
else:
await ctx.send(f"That is not a valid command. Try: {settings['PREFIX']}help")
return
elif isinstance(error, commands.CheckFailure):
await ctx.send("You don't have permission to use this command.")
return
elif isinstance(error, commands.MissingPermissions):
await ctx.send("You don't have permission to use this command.")
return
elif isinstance(error, commands.CommandInvokeError):
if type(error.original) == discord.errors.Forbidden:
try:
await ctx.send("The bot doesn't have permission to send embeds here.")
except discord.errors.Forbidden:
logger.warning(error)
raise error
else:
logger.warning(error)
raise error
return
elif isinstance(error, commands.MissingRequiredArgument):
if ctx.invoked_with.lower() == "search":
if settings["PREFIX"] == commands.when_mentioned:
await ctx.send(f"That is an invalid query. Try: {bot.get_user(bot.user.id).mention} help search")
else:
await ctx.send(f"That is an invalid query. Try: {settings['PREFIX']}help search")
return
elif ctx.invoked_with.lower() == "count":
if settings["PREFIX"] == commands.when_mentioned:
await ctx.send(f"Count requires at least one search term."
f" Try: {bot.get_user(bot.user.id).mention} help count")
else:
await ctx.send(f"Count requires at least one search term."
f" Try: {settings['PREFIX']}help count")
return
else:
if settings["PREFIX"] == commands.when_mentioned:
await ctx.send(f"A required argument is missing."
f" Try: {bot.get_user(bot.user.id).mention} help {ctx.invoked_with.lower()}")
else:
await ctx.send(f"A required argument is missing."
f" Try: {settings['PREFIX']}help {ctx.invoked_with.lower()}")
return
logger.error(error)
raise error
@bot.event
async def on_ready():
print("(Re)Started")
if settings["PREFIX"] == commands.when_mentioned:
await bot.change_presence(activity=discord.Game(name=f"@{bot.user.name} help"))
else:
await bot.change_presence(activity=discord.Game(name=f"{settings['PREFIX']}help"))
@bot.command()
async def ping(ctx):
await ctx.send(f"Pong `{round(bot.latency * 1000)} ms`")
@bot.command()
@commands.is_owner()
async def stop(ctx):
try:
print("Bot has been turned off by: {}".format(ctx.author))
logger.warning("Bot has been turned off by: {}".format(ctx.author))
await bot.get_user(int(settings["Maintainer-ID"])).send("Bot has been turned off by: {}".format(ctx.author))
finally:
await bot.close()
@bot.command()
async def search(ctx, *, terms):
temp_msg = await ctx.send("Searching")
results = await articlesearch.search(terms)
final = {}
embeds = math.floor(len(results[0]) / 8)
if len(results[0]) % 8:
embeds += 1
if embeds == 0 and results[1] > 0:
embeds = 1
current_embed = 1
numbers = ["\u0031\u20E3",
"\u0032\u20E3",
"\u0033\u20E3",
"\u0034\u20E3",
"\u0035\u20E3",
"\u0036\u20E3",
"\u0037\u20E3",
"\u0038\u20E3"]
start = 0
end = 8
await temp_msg.delete()
if results[1] == 0:
await ctx.send("No results match your query")
return
cont = True
while cont:
embed = discord.Embed(
title=f"Here are your search results | Page {current_embed} / {embeds}",
color=discord.Color.orange()
)
embed.set_footer(text=f"{results[1]} Results Found")
embed.add_field(name="Key", value="ID | Title | Date Released", inline=False)
i = 1
for row in results[0][start:end]:
embed.add_field(name=f"Option {i}", value=f"{row['ID']} | {row['Title']} | "
f"{row['dateReleased'].strftime('%d %b %Y')}", inline=False)
final[i] = row['ID']
i += 1
message = await ctx.send(embed=embed)
ids = {ctx.message.id: message.id}
number = 0
if current_embed > 1:
await message.add_reaction("\u23EA")
while number < len(results[0][start:end]):
await message.add_reaction(numbers[number])
number += 1
if current_embed < embeds:
await message.add_reaction("\u23E9")
def check(payload):
if payload.user_id != ctx.author.id:
return False
if payload.message_id != ids[ctx.message.id]:
return False
if payload.emoji.name in numbers:
pass
elif payload.emoji.name == "\u23E9" or payload.emoji.name == "\u23EA":
pass
else:
return False
return True
try:
reaction = await bot.wait_for("raw_reaction_add", timeout=120.0, check=check)
if reaction.emoji.name == "\u23E9":
await message.delete()
current_embed += 1
start += 8
end += 8
elif reaction.emoji.name == "\u23EA":
await message.delete()
current_embed -= 1
start -= 8
end -= 8
elif reaction.emoji.name in numbers:
result = await command_read(final[numbers.index(reaction.emoji.name) + 1])
await ctx.send(embed=result[0])
await message.delete()
cont = False
except asyncio.TimeoutError:
try:
await ctx.send(f"Are you still there {ctx.author.mention}? Your search timed out, please start over.")
await message.clear_reactions()
cont = False
except discord.Forbidden:
cont = False
pass
@search.error
async def search_error(ctx, error):
if isinstance(error, commands.CommandInvokeError):
return ctx
return
@bot.command()
async def count(ctx, *, terms):
await ctx.send(f"{await articlesearch.count(terms)} results found.")
@bot.command()
async def update(ctx):
await command_update()
await ctx.send("Done")
async def command_update():
result = await articlesearch.update()
if result:
article_number = result[0]
article_uids = result[1]
if not os.path.exists("newschannels.txt"):
return
with open("newschannels.txt", "r") as file:
if int(article_number) == 1:
for channelid in file.readlines():
try:
await bot.get_channel(int(channelid)).send("1 new article added")
except AttributeError:
with open("newschannels.txt", "r") as newslist:
lines = newslist.readlines()
with open("newschannels.txt", "w") as newslist:
for line in lines:
if line != channelid:
newslist.write(line)
else:
for channelid in file.readlines():
try:
await bot.get_channel(int(channelid)).send(f"{article_number} new articles added")
except AttributeError:
with open("newschannels.txt", "r") as newslist:
lines = newslist.readlines()
with open("newschannels.txt", "w") as newslist:
for line in lines:
if line != channelid:
newslist.write(line)
for article in article_uids:
row = await articlesearch.read(uid=article)
file.seek(0)
for channelid in file.readlines():
try:
embed = await command_read(0, row)
try:
await bot.get_channel(int(channelid)).send(embed=embed[0])
except discord.HTTPException as e:
import datetime
await bot.get_user(int(settings["Maintainer-ID"])).send(
"Error updating news base. Message too long, could not fix `CS{}-{}`"
" .\nText: {}. \nCode: {}."
).format(datetime.datetime.now().strftime('%d%m%y%H%M'), article, e.text, e.code)
except AttributeError:
pass
@bot.command()
async def read(ctx, articleid: int):
result = await command_read(articleid)
if result == "Nothing Found":
await ctx.send(result)
else:
try:
await ctx.send(embed=result[0])
except discord.HTTPException:
import datetime
await ctx.send("An error was encountered. For now, feel free to read the article on the official website:"
f"\nhttps://community.elitedangerous.com/galnet/uid/{result[1]}"
"\n\nPlease submit a report at the issues page, and include this error code"
f" `CS{datetime.datetime.now().strftime('%d%m%y%H%M')}-{articleid}` and a brief description"
" of what happened:"
"\n<https://github.com/HassanAbouelela/Galnet-Newsfeed/issues/new?assignees=&labels"
"=&template=bug_report.md&title=Bug>")
return
async def command_read(articleid: int, command_up: tuple = False):
if not command_up:
row = await articlesearch.read(articleid)
else:
row = command_up
if not row:
return "Nothing Found"
row = row[0]
# Making sure the message fits
remaining = 6000
sixk = False
title = row["Title"]
description = row["Text"].replace("\n", "\n\n")
footer = (f"ID: {row['ID']}"
f" | Date Released: {row['dateReleased'].strftime('%d %b %Y')}"
f" | Date Indexed: {row['dateAdded'].strftime('%d %b %Y')}")
if len(title) + len(description) + len(footer) > 6000:
if len(title) > 256:
if title[:250].rfind(" ") != -1:
title = title[:title[:250].rfind(" ")] + "..."
else:
title = title[:256]
remaining -= (len(title) + len(footer))
sixk = True
if len(title) > 256:
if title[:250].rfind(" ") != -1:
title = title[:title[:250].rfind(" ")] + "..."
else:
title = title[:256]
if len(description) + len(footer) > 2048 or sixk:
remaining_len = 2048 - len(footer)
if remaining < remaining_len:
pass
else:
remaining = remaining_len
if description[:remaining - 10].rfind(".") != -1:
description = description[:description[:remaining].rfind(".")] +\
f" [[...]](http://community.elitedangerous.com/galnet/uid/{row['UID']})"
else:
description = description[:remaining - 5] +\
f" [[...]](http://community.elitedangerous.com/galnet/uid/{row['UID']})"
embed = discord.Embed(
title=title,
url=f"http://community.elitedangerous.com/galnet/uid/{row['UID']}",
description=description,
color=discord.Color.orange()
)
embed.set_footer(text=footer)
return [embed, row["UID"]]
@bot.command()
async def newschannel(ctx):
if ctx.message.guild is None or ctx.author.guild_permissions.manage_channels:
keep = True
with open("newschannels.txt", "a+") as newslist:
newslist.seek(0)
for line in newslist.readlines():
if str(ctx.channel.id) in str(line):
keep = False
if keep:
with open("newschannels.txt", "a+") as newslist:
newslist.write(f"{str(ctx.channel.id)}\n")
await ctx.send("Channel added to newslist.")
else:
with open("newschannels.txt", "r") as old:
with open("tempchannelslist.txt", "w") as new:
for line in old:
if str(ctx.channel.id) not in str(line):
new.write(line)
os.remove("newschannels.txt")
os.rename("tempchannelslist.txt", "newschannels.txt")
await ctx.send("Channel removed from newslist.")
else:
await ctx.send("You don't have permission to use this command.")
@bot.command()
async def help(ctx, command: str = None):
embed = discord.Embed(
title="Galnet Commands",
description="These are the available commands. To learn more about any command, type: `help command`",
color=discord.Color.orange()
)
embed.add_field(name="Ping", value="Checks if the bot is online.", inline=False)
embed.add_field(name="Search",
value="Searches the database based on the given options. Format: search --options keywords",
inline=False)
embed.add_field(name="Count",
value="Counts the amount of the given input. Format: count --options keywords",
inline=False)
embed.add_field(name="Update", value="Checks for new articles", inline=False)
embed.add_field(name="Read", value="Opens an article for reading. Format: read (id)", inline=False)
embed.add_field(name="NewsChannel", value="Marks the channel where this command is run as a news channel",
inline=False)
embed.add_field(name="Source", value="Links to [github page](https://github.com/HassanAbouelela/Galnet-Newsfeed/"
"wiki), and [bot invite link](https://discordapp.com/oauth2/authorize?client"
"_id=624620325090361354&permissions=379968&scope=bot)", inline=False)
embed.add_field(name="Bugs", value="[Link to submit bugs/feedback.](https://github.com/"
"HassanAbouelela/Galnet-Newsfeed/issues/new)", inline=False)
embed.add_field(name="Help", value="This menu. Format: help (command)", inline=False)
if command:
command = command.lower().strip()
if command == "ping":
embed = discord.Embed(
title="Ping",
description="Check if the bot is online",
color=discord.Color.orange()
)
elif command == "search":
embed = discord.Embed(
title="Search",
description="Searches the database based on the given options. "
"You can read any result by clicking the matching ID below the result, "
"or by using the `read` command.",
color=discord.Color.orange()
)
embed.add_field(name="Format", value="search --options keywords", inline=False)
embed.add_field(name="Options", value="""
All options must be preceded by (--).
A full list is [available here.](https://github.com/HassanAbouelela/Galnet-Newsfeed/wiki/Usage#search)
- title: Searches only in the titles of the articles (default search mode)
- content: Searches only in the content of an article, and ignores the title
- searchall: Searches both title and content of an article
- searchreverse: Searches the DB from the oldest article
- limit: Returns only the latest results up to number given (default 5). Format: limit=XYZ
- limitall: Returns all results found
- before: Looks for articles that were written before a given date. Format: YYYY-MM-DD
- after: Looks for articles that were written after a given date. Format: YYYY-MM-DD
(If both the --after & --before tags are given, the search is limited to the dates between both options.)
""", inline=False)
elif command == "count":
embed = discord.Embed(
title="Count",
description="Counts the amount of articles that fit the given conditions.",
color=discord.Color.orange()
)
embed.add_field(name="Format", value="count --options keywords", inline=False)
embed.add_field(name="Options", value="""
All options must be preceded by (--).
A full list is [available here.](https://github.com/HassanAbouelela/Galnet-Newsfeed/wiki/Usage#count)
- title: Counts the amount of articles that contain a certain term in the title.
- content: Counts the amount of articles that contain a certain term only in their content.
- all: Counts the amount of articles that contain a certain term in either the title or the content.
- before: Counts the amount of articles before a given date. Format: YYYY-MM-DD
- after: Counts the amount of articles after a given date. Format: YYYY-MM-DD
(If both the --after & --before tags are given, the search is limited to the dates between both options.)
""", inline=False)
elif command == "update":
embed = discord.Embed(
title="Update",
description="Checks for new articles",
color=discord.Color.orange()
)
elif command == "read":
embed = discord.Embed(
title="Read",
description="Sends an article to read. (Embeds must be enabled)",
color=discord.Color.orange()
)
embed.add_field(name="Format", value="read ID", inline=False)
elif command == "newschannel":
embed = discord.Embed(
title="NewsChannel",
description="Marks the channel where this command is run as a news channel",
color=discord.Color.orange()
)
embed.add_field(name="Extra Info.", value="The bot must have message access, and embed permissions"
" in this channel. The \"Manage Channel\" permission is required"
" to use this command.", inline=False)
elif command == "source":
embed = discord.Embed(
title="Source",
description="Links to [github page](https://github.com/HassanAbouelela/Galnet-Newsfeed/wiki),"
" and [bot invite link](https://discordapp.com/oauth2/authorize?client"
"_id=624620325090361354&permissions=379968&scope=bot)",
color=discord.Color.orange()
)
elif command == "bugs":
embed = discord.Embed(
title="Bugs",
description="[Link to submit bugs/feedback.](https://github.com/"
"HassanAbouelela/Galnet-Newsfeed/issues/new)",
color=discord.Color.orange()
)
elif command == "help":
embed = discord.Embed(
title="Help",
description="Sends help for a command.",
color=discord.Color.orange()
)
embed.add_field(name="Format", value="help (command)", inline=False)
await ctx.send(embed=embed)
async def sync():
await bot.wait_until_ready()
while not bot.is_closed():
await bot.change_presence(activity=discord.Game(name=f"@{bot.user.name} help"))
await command_update()
await asyncio.sleep(900)
bg_task = bot.loop.create_task(sync())
bot.run(settings["TOKEN"])
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 12 14:08:16 2019
@author: Manuel Camargo
"""
import itertools
from support_modules import support as sup
import os
import random
import time
import pandas as pd
import numpy as np
# =============================================================================
# Support
# =============================================================================
def create_file_list(path):
file_list = list()
for root, dirs, files in os.walk(path):
for f in files:
file_list.append(f)
return file_list
# =============================================================================
# Experiments definition
# =============================================================================
def configs_creation(parms):
configurer = _get_configurer(parms['config_type'])
return configurer(parms)
def _get_configurer(config_type):
if config_type == 'single':
return _configure_single
elif config_type == 'random':
return _configure_random
elif config_type == 'load':
return _configure_load
else:
raise ValueError(config_type)
def _configure_single(parms):
configs = list()
config = dict(
lstm_act='relu',
dense_act=None,
optimizers='Adam',
norm_method='lognorm',
n_sizes=15,
l_sizes=100)
for model in model_type:
configs.append({**{'model_type': model}, **config})
return configs
def _configure_random(parms):
configs = list()
# Search space definition
dense_act = [None]
norm_method = ['max', 'lognorm']
l_sizes = [50, 100, 200]
optimizers = ['Nadam', 'Adam']
lstm_act = ['tanh', 'sigmoid', 'relu']
if arch == 'sh':
n_sizes = [5, 10, 15]
listOLists = [lstm_act, dense_act,
norm_method, n_sizes,
l_sizes, optimizers]
else:
listOLists = [lstm_act, dense_act,
norm_method, l_sizes,
optimizers]
# selection method definition
choice = 'random'
preconfigs = list()
for lists in itertools.product(*listOLists):
if arch == 'sh':
preconfigs.append(dict(lstm_act=lists[0],
dense_act=lists[1],
norm_method=lists[2],
n_sizes=lists[3],
l_sizes=lists[4],
optimizers=lists[5]))
else:
preconfigs.append(dict(lstm_act=lists[0],
dense_act=lists[1],
norm_method=lists[2],
l_sizes=lists[3],
optimizers=lists[4]))
# configurations definition
if choice == 'random':
preconfigs = random.sample(preconfigs, parms['num_choice'])
for preconfig in preconfigs:
for model in model_type:
config = {'model_type': model}
config = {**config, **preconfig}
configs.append(config)
return configs
def _configure_load(parms):
configs = list()
preconfigs = pd.read_csv(os.path.join('input_files', 'configs.csv'))
preconfigs.fillna('nan', inplace=True)
column_names = {'n_size': 'n_sizes',
'l_size': 'l_sizes',
'optim': 'optimizers'}
preconfigs = preconfigs.rename(columns=column_names)
preconfigs = preconfigs.to_dict('records')
for preconfig in preconfigs:
for model in model_type:
config = {'model_type': model}
config = {**config, **preconfig}
configs.append(config)
return configs
# =============================================================================
# Sbatch files creator
# =============================================================================
def sbatch_creator(configs):
for i, _ in enumerate(configs):
if configs[i]['model_type'] in ['shared_cat', 'seq2seq']:
exp_name = (os.path.splitext(log)[0]
.lower()
.split(' ')[0][:4] + arch)
elif configs[i]['model_type'] in ['shared_cat_inter', 'seq2seq_inter',
'shared_cat_inter_full']:
exp_name = (os.path.splitext(log)[0]
.lower()
.split(' ')[0][:4] + arch + 'i')
elif configs[i]['model_type'] in ['shared_cat_snap']:
exp_name = (os.path.splitext(log)[0]
.lower()
.split(' ')[0][:4] + arch + 's')
elif configs[i]['model_type'] in ['shared_cat_city']:
exp_name = (os.path.splitext(log)[0]
.lower()
.split(' ')[0][:4] + arch + 'c')
if imp == 2:
default = ['#!/bin/bash',
'#SBATCH --partition=gpu',
'#SBATCH --gres=gpu:tesla:1',
'#SBATCH -J ' + exp_name,
'#SBATCH -N 1',
'#SBATCH --mem=14000',
'#SBATCH -t 72:00:00',
'module load cuda/10.0',
'module load python/3.6.3/virtenv',
'source activate lstm_pip'
]
else:
default = ['#!/bin/bash',
'#SBATCH --partition=amd',
'#SBATCH -J ' + exp_name,
'#SBATCH -N 1',
'#SBATCH --mem=14000',
'#SBATCH -t 72:00:00',
'module load cuda/10.0',
'module load python/3.6.3/virtenv',
'source activate lstm_pip'
]
def format_option(short, parm):
return (' -'+short+' None'
if configs[i][parm] in [None, 'nan', '', np.nan]
else ' -'+short+' '+str(configs[i][parm]))
options = 'python lstm.py -f ' + log + ' -i ' + str(imp)
options += ' -a training'
options += ' -o True'
options += format_option('l', 'lstm_act')
options += format_option('y', 'l_sizes')
options += format_option('d', 'dense_act')
options += format_option('n', 'norm_method')
options += format_option('m', 'model_type')
options += format_option('p', 'optimizers')
if arch == 'sh':
options += format_option('z', 'n_sizes')
default.append(options)
file_name = sup.folder_id()
sup.create_text_file(default, os.path.join(output_folder, file_name))
# =============================================================================
# Sbatch files submission
# =============================================================================
def sbatch_submit(in_batch, bsize=20):
file_list = create_file_list(output_folder)
print('Number of experiments:', len(file_list), sep=' ')
for i, _ in enumerate(file_list):
if in_batch:
if (i % bsize) == 0:
time.sleep(20)
os.system('sbatch '+os.path.join(output_folder, file_list[i]))
else:
os.system('sbatch '+os.path.join(output_folder, file_list[i]))
else:
os.system('sbatch '+os.path.join(output_folder, file_list[i]))
# =============================================================================
# Kernel
# =============================================================================
# create output folder
output_folder = 'jobs_files'
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# clean folder
for _, _, files in os.walk(output_folder):
for file in files:
os.unlink(os.path.join(output_folder, file))
# parameters definition
# s2, sh
arch = 'sh'
log = 'BPI_Challenge_2013_closed_problems.xes'
imp = 1 # keras lstm implementation 1 cpu, 2 gpu
# Same experiment for both models
if arch == 'sh':
model_type = ['shared_cat_city', 'shared_cat_snap']
else:
model_type = ['seq2seq_inter', 'seq2seq']
# configs definition
configs = configs_creation({'config_type': 'load', 'num_choice': 30})
# sbatch creation
sbatch_creator(configs)
# submission
sbatch_submit(True)
|
from bs4 import BeautifulSoup
import requests
a = 'https://www.cricbuzz.com/live-cricket-scores/20714/eng-vs-ire-only-test-ireland-tour-of-england-only-test-2019'
url=requests.get(a)
b=url.text
c=BeautifulSoup(b,'html.parser') #parse - take html functions
for i in c.find_all('div',{'class':"cb-col cb-col-67 cb-scrs-wrp"}):
print(i.text)
|
from django.shortcuts import render_to_response, render, get_object_or_404
from django.http import HttpResponseRedirect
from django.contrib.auth import login,logout,authenticate
from django.core.context_processors import csrf
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.contrib.auth.forms import UserCreationForm
# Create your views here.
def log_in(request):
c={}
c.update(csrf(request))
return render(request,'login.html',c)
def invalid(request):
return render_to_response('invalid.html',{})
def profile(request, username):
user=get_object_or_404(User,username=username)
return render_to_response('profile.html',{'full_name':request.user.username})
def log_out(request):
logout(request)
return render_to_response('logout.html',{})
def auth(request):
username=request.POST.get('username', False)
password=request.POST.get('password', False)
user=authenticate(username=username,password=password)
# Check if user is valid
if user is not None:
if user.is_active:
login(request,user)
user_name=get_object_or_404(User, username=request.user.username)
return HttpResponseRedirect(reverse('profile',args=(user_name.username,)))
else:
return render(request, 'auth.html',
{'inactive_message':'This user is no longer active!',
'invalid_message':None})
else:
return render(request,'auth.html',{'inactive_message': None,
'invalid_message':'Invalid username or password!'})
def register_user(request):
if request.method=='POST':
form=UserCreationForm(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect('/register_success/')
args={}
args.update(csrf(request))
args['form']=UserCreationForm
return render_to_response('register.html',args)
def register_success(request):
pass
|
from pygame import mixer
from datetime import datetime
from time import time
def speak(str):
from win32com.client import Dispatch
speak = Dispatch("SAPI.SpVoice")
speak.Speak(str)
if __name__ == '__main__':
speak("Welcome to Stay Fit Management ...Please enter your name:")
aa = input("Please enter your name:")
speak(
f"........Having a good health not only help you relive your life and work to full, but also you can turn out as a good individual in a society, where others can look up to you.")
speak(
f"Let me tell you about this software {aa}.......This software is basically for people who continuosly works for 8 to 10 hours on laptop.......This software will make sure that you are drinking water on proper interval of time.. and will also tell you to take 5 minute eye rest by closing your eye because people shows negelence towards their health specially..when on work")
def musiconloop(file, stopper):
mixer.init()
mixer.music.load(file)
mixer.music.play()
while True:
input_of_user = input()
if input_of_user == stopper:
mixer.music.stop()
break
def log_now(msg):
with open("forhealth.logs.txt", "a") as s:
s.write(f"{msg} {datetime.now()}\n")
if __name__ == '__main__':
init_water = time()
init_eyes = time()
watersecs = 60*60
eyessecs = 40*60
while True:
if time() - init_water > watersecs:
print(f"Water Drinking time....Please drink a glass of water{aa}... Enter 'Drank' to stop the alarm.")
speak(f"This you your water drinking time...Please drink a glass of water{aa}")
# musiconloop('enter song.mp3', 'Drank')
init_water = time()
log_now("Drank Water at")
if time() - init_eyes > eyessecs:
print("Eye rest time. Enter 'Done' to stop the alarm.")
speak(f"This you your eye rest time...Please close your eyes and take rest for 5 minutes {aa}")
# musiconloop('enter song.mp3', 'Done')
init_eyes = time()
log_now("Eyes Relaxed at")
|
from Communication import Message
class BaseModule:
def power_up(self):
print(f'Module {self.get_name()} powering up')
def power_down(self):
print(f'Module {self.get_name()} powering down')
def process(self, message: Message):
print(f'Module {self.get_name()} processing message {message}')
def get_name(self):
return "Base module"
|
"""
Helpers for interacting with DC/OS Docker.
"""
import subprocess
import uuid
from ipaddress import IPv4Address
from pathlib import Path
from shutil import copyfile, copytree, ignore_patterns, rmtree
from typing import Any, Dict, Optional, Set
import docker
import yaml
from retry import retry
from ._common import Node, run_subprocess
class _ConflictingContainerError(Exception):
"""
Raised when an existing container conflicts with a container which will be
created.
"""
class DCOS_Docker: # pylint: disable=invalid-name
"""
A record of a DC/OS Docker cluster.
"""
def __init__(
self,
masters: int,
agents: int,
public_agents: int,
extra_config: Dict[str, Any],
generate_config_path: Path,
dcos_docker_path: Path,
custom_ca_key: Optional[Path],
log_output_live: bool,
files_to_copy_to_installer: Dict[Path, Path],
) -> None:
"""
Create a DC/OS Docker cluster.
Args:
masters: The number of master nodes to create.
agents: The number of agent nodes to create.
public_agents: The number of public agent nodes to create.
extra_config: DC/OS Docker comes with a "base" configuration.
This dictionary can contain extra installation configuration
variables.
generate_config_path: The path to a build artifact to install.
dcos_docker_path: The path to a clone of DC/OS Docker.
custom_ca_key: A CA key to use as the cluster's root CA key.
log_output_live: If `True`, log output of subprocesses live.
If `True`, stderr is merged into stdout in the return value.
files_to_copy_to_installer: A mapping of host paths to paths on
the installer node. These are files to copy from the host to
the installer node before installing DC/OS. Currently on DC/OS
Docker the only supported paths on the installer are in the
`/genconf` directory.
"""
self.log_output_live = log_output_live
# To avoid conflicts, we use random container names.
# We use the same random string for each container in a cluster so
# that they can be associated easily.
random = uuid.uuid4()
# We create a new instance of DC/OS Docker and we work in this
# directory.
# This reduces the chance of conflicts.
# We put this in the `/tmp` directory because that is writable on
# the Vagrant VM.
tmp = Path('/tmp')
self._path = tmp / 'dcos-docker-{random}'.format(random=random)
copytree(
src=str(dcos_docker_path),
dst=str(self._path),
# If there is already a config, we do not copy it as it will be
# overwritten and therefore copying it is wasteful.
ignore=ignore_patterns('dcos_generate_config.sh'),
)
copyfile(
src=str(generate_config_path),
dst=str(self._path / 'dcos_generate_config.sh'),
)
# Files in the DC/OS Docker directory's genconf directory are mounted
# to the installer at `/genconf`.
# Therefore, every file which we want to copy to `/genconf` on the
# installer is put into the genconf directory in DC/OS Docker.
for host_path, installer_path in files_to_copy_to_installer.items():
relative_installer_path = installer_path.relative_to('/genconf')
destination_path = self._path / 'genconf' / relative_installer_path
copyfile(src=str(host_path), dst=str(destination_path))
master_ctr = 'dcos-master-{random}-'.format(random=random)
agent_ctr = 'dcos-agent-{random}-'.format(random=random)
public_agent_ctr = 'dcos-public-agent-{random}-'.format(random=random)
# Only overlay and aufs storage drivers are supported.
# This chooses the aufs driver so the host's driver is not used.
#
# This means that the tests will run even if the storage driver on
# the host is not one of these two.
#
# aufs was chosen as it is supported on the version of Docker on
# Travis CI.
client = docker.from_env()
host_storage_driver = client.info()['Driver']
supported_storage_drivers = ('overlay', 'aufs')
if host_storage_driver in supported_storage_drivers:
docker_storage_driver = host_storage_driver
else:
docker_storage_driver = 'aufs'
self._variables = {
'DOCKER_STORAGEDRIVER': docker_storage_driver,
# Some platforms support systemd and some do not.
# Disabling support makes all platforms consistent in this aspect.
'MESOS_SYSTEMD_ENABLE_SUPPORT': 'false',
# Number of nodes.
'MASTERS': str(masters),
'AGENTS': str(agents),
'PUBLIC_AGENTS': str(public_agents),
# Container names.
'MASTER_CTR': master_ctr,
'AGENT_CTR': agent_ctr,
'PUBLIC_AGENT_CTR': public_agent_ctr,
} # type: Dict[str, str]
if extra_config:
self._variables['EXTRA_GENCONF_CONFIG'] = yaml.dump(
data=extra_config,
default_flow_style=False,
)
if custom_ca_key is not None:
master_mount = '-v {custom_ca_key}:{path}'.format(
custom_ca_key=custom_ca_key,
path=Path('/var/lib/dcos/pki/tls/CA/private/custom_ca.key'),
)
self._variables['MASTER_MOUNTS'] = master_mount
self._create_containers()
@retry(exceptions=_ConflictingContainerError, delay=10, tries=30)
def _create_containers(self) -> None:
"""
Create containers for the cluster.
Creating clusters involves creating temporary installer containers.
These containers can conflict in name.
If a conflict occurs, retry.
"""
# The error substring differs on different versions of Docker.
conflict_error_substring = 'Conflict. The container name'
other_conflict_error_substring = 'Conflict. The name'
try:
self._make(target='all')
except subprocess.CalledProcessError as exc:
# Handle error in stderr or stdout.
# This is because if we log output live, stderr is redirected to
# stdout.
stderr = str(exc.stderr) + str(exc.stdout)
conflict = conflict_error_substring in stderr
conflict = conflict or other_conflict_error_substring in stderr
if conflict:
print(exc.stderr)
raise _ConflictingContainerError()
raise
def _make(self, target: str) -> None:
"""
Run `make` in the DC/OS Docker directory using variables associated
with this instance.
Args:
target: `make` target to run.
Raises:
CalledProcessError: The process exited with a non-zero code.
"""
args = ['make'] + [
'{key}={value}'.format(key=key, value=value)
for key, value in self._variables.items()
] + [target]
run_subprocess(
args=args,
cwd=str(self._path),
log_output_live=self.log_output_live
)
def postflight(self) -> None:
"""
Wait for nodes to be ready to run tests against.
"""
self._make(target='postflight')
def destroy(self) -> None:
"""
Destroy all nodes in the cluster.
"""
self._make(target='clean')
rmtree(
path=str(self._path),
# Some files may be created in the container that we cannot clean
# up.
ignore_errors=True,
)
def _nodes(self, container_base_name: str, num_nodes: int) -> Set[Node]:
"""
Args:
container_base_name: The start of the container names.
num_nodes: The number of nodes.
Returns: ``Node``s corresponding to containers with names starting
with ``container_base_name``.
"""
client = docker.from_env()
nodes = set([]) # type: Set[Node]
while len(nodes) < num_nodes:
container_name = '{container_base_name}{number}'.format(
container_base_name=container_base_name,
number=len(nodes) + 1,
)
container = client.containers.get(container_name)
ip_address = container.attrs['NetworkSettings']['IPAddress']
node = Node(
ip_address=IPv4Address(ip_address),
ssh_key_path=self._path / 'include' / 'ssh' / 'id_rsa',
)
nodes.add(node)
return nodes
@property
def masters(self) -> Set[Node]:
"""
Return all DC/OS master ``Node``s.
"""
return self._nodes(
container_base_name=self._variables['MASTER_CTR'],
num_nodes=int(self._variables['MASTERS']),
)
@property
def agents(self) -> Set[Node]:
"""
Return all DC/OS agent ``Node``s.
"""
return self._nodes(
container_base_name=self._variables['AGENT_CTR'],
num_nodes=int(self._variables['AGENTS']),
)
@property
def public_agents(self) -> Set[Node]:
"""
Return all DC/OS public agent ``Node``s.
"""
return self._nodes(
container_base_name=self._variables['PUBLIC_AGENT_CTR'],
num_nodes=int(self._variables['PUBLIC_AGENTS']),
)
|
import pandas as pd
import time
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout
from keras.optimizers import SGD
def prep_data(filename, has_y=True):
t0 = time.time()
data = pd.read_csv(filename)
X = data[["pixel%d" % x for x in xrange(784)]].as_matrix()
if has_y:
y = data[["is_%d" % x for x in xrange(10)]].as_matrix()
print "prep data %s: %.2fs" % (filename, time.time()-t0)
if has_y:
return X, y
else:
return X
def build_model(layer_sizes):
t0 = time.time()
model = Sequential()
layer_sizes = [784] + layer_sizes
for i in xrange(len(layer_sizes)-1):
model.add(Dense(layer_sizes[i], layer_sizes[i+1], init="uniform"))
model.add(Activation("tanh"))
model.add(Dropout(0.5))
model.add(Dense(layer_sizes[-1], 10, init="uniform"))
model.add(Activation("softmax"))
sgd = SGD(lr=0.1, decay=1e-6)
model.compile(loss="mse", optimizer=sgd)
print "prep model: %.2fs" % (time.time()-t0)
return model
def evaluate_accuracy(model, X, y):
t0 = time.time()
predictions = model.predict(X, batch_size=16, verbose=2)
predictions = [max(range(10), key=lambda x:p[x]) for p in predictions]
correct = 0
for i in xrange(len(predictions)):
if y[i][predictions[i]] == 1:
correct += 1
print "evaluate: %.2fs" % (time.time()-t0)
return 1.0*correct/len(predictions)
def write_predictions(model, X, filename):
predictions = model.predict(X, batch_size=16, verbose=2)
predictions = [max(range(10), key=lambda x:p[x]) for p in predictions]
f = open(filename, "w")
f.write("ImageId,Label\n")
for i in xrange(len(predictions)):
f.write("%d,%d\n" % (i+1,predictions[i]))
f.close()
def timed_run(model, time_limit):
t_start = time.time()
times = []
accuracies = []
while time.time() < t_start + time_limit*60:
t0 = time.time()
model.fit(X_train, y_train, nb_epoch=5, batch_size=16, verbose=2)
t = time.time()-t0
print "fit: %d:%02d" % (int(t/60), int(t%60))
accuracy = evaluate_accuracy(model, X_test, y_test)
times.append((time.time()-t_start)/60)
accuracies.append(accuracy)
print "accuracy: %.3f%%" % (100*accuracy)
return times, accuracies
X_train, y_train = prep_data("data/train_bin.csv")
X_test, y_test = prep_data("data/test_bin.csv")
#X_target = prep_data("data/target_adj.csv", has_y=False)
model = build_model([1000, 1600, 400])
model.load_weights("data/1000-1600-400-1hrs.hdf5")
times, accuracies = timed_run(model, 9*60)
model.save_weights("data/1000-1600-400-10hrs.hdf5", overwrite=True)
plt.plot(times, accuracies, label="[1000, 1600, 400]")
'''
model = build_model([500, 200])
model.load_weights("data/500-200-8hrs.hdf5")
write_predictions(model, X_target, "data/500-200-8hrs_out.csv")
'''
plt.legend(loc="lower right")
plt.show()
|
import logging
from django.urls import reverse_lazy
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.views.generic.base import TemplateView
from django.views.generic import CreateView
from django.contrib.auth.views import LoginView
from . import forms, decorators
logger = logging.getLogger(__name__)
class IndexView(TemplateView):
template_name = 'index.html'
@method_decorator([login_required], 'dispatch')
class DashboardView(TemplateView):
template_name = 'dashboard.html'
@method_decorator([login_required, decorators.subscriber_required], 'dispatch')
class PaidContentView(TemplateView):
template_name = 'paid-content.html'
@method_decorator([login_required], 'dispatch')
class FreeContentView(TemplateView):
template_name = 'free-content.html'
class RegistrationView(CreateView):
form_class = forms.RegistrationForm
success_url = reverse_lazy('app:login')
template_name = 'registration.html'
class InheritedLoginView(LoginView):
template_name = 'login.html'
def get_success_url(self) -> str:
url = self.get_redirect_url()
return url or reverse_lazy('app:dashboard')
|
#plot general class
#author: Juan Pablo Duarte, jpduarte@berkeley.edu
#BSIM Group, UC Berkeley
#import supportfunctions as sf
import matplotlib.pyplot as plt
from numpy import loadtxt
import numpy as np
from numpy.matlib import repmat
from scipy.misc import factorial
from scipy import sparse
from scipy.sparse import lil_matrix
from scipy.sparse.linalg import spsolve
from numpy.linalg import solve, norm
#########################################################################
#Derivative Support functions
def mkfdstencil(x,xbar,k):
#this funtion is sue to create finite diference method matrix
maxorder = len(x)
h_matrix = repmat(np.transpose(x)-xbar,maxorder,1)
powerfactor_matrix = np.transpose(repmat(np.arange(0,maxorder),maxorder,1))
factorialindex = np.transpose(repmat(factorial(np.arange(0,maxorder)),maxorder,1))
taylormatrix = h_matrix ** powerfactor_matrix /factorialindex
derivativeindex = np.zeros(maxorder)
derivativeindex[k] = 1
u = np.linalg.solve(taylormatrix,derivativeindex)
return u
def K_generator(x,order):
#this return matrix to find the derivative, x is the variable to be derived and order is the derivative order
N=len(x);
K = lil_matrix((N, N))
K[0,:6]=mkfdstencil(x[0:6],x[0],order)
K[1,:6]=mkfdstencil(x[0:6],x[1],order)
K[2,:6]=mkfdstencil(x[0:6],x[2],order)
i=3
for xbar in x[3:-3]:
#print i
K[i,i-3:i+3]=mkfdstencil(x[i-3:i+3],xbar,order)
i+=1
#print i
K[i,-7:-1]=mkfdstencil(x[-7:-1],x[-3],order)
i+=1
K[i,-7:-1]=mkfdstencil(x[-7:-1],x[-2],order)
i+=1
K[i,-7:-1]=mkfdstencil(x[-7:-1],x[-1],order)
return K.tocsr()
#########################################################################
#arrange X and Y matrices
def rearrangearray(arrayXa,elementpercylce,numberelement):
#this function reshpae array to be printing
arrayXb = arrayXa.reshape((elementpercylce, len(arrayXa)/elementpercylce))#arrayXa.reshape(( len(arrayXa)/elementpercylce,elementpercylce))#
arrayXc = np.transpose(arrayXb)
arrayXd = arrayXc.reshape((len(arrayXa)/numberelement,numberelement))#arrayXc.reshape((numberelement,len(arrayXa)/numberelement))#
arrayXe = np.transpose(arrayXd)
return arrayXe
def findelementpercylce(arrayaux):
#this function return the number of entire sequence in the initial array
#first find number of elements repeated next to each other
firstelement = arrayaux[0]
lengthaux = len(arrayaux)
flag=1
i=1
elementpercylce = 0
while ( (i<(lengthaux+1)) and flag):
elementpercylce = i-1
if (abs(arrayaux[i]-firstelement)>0): #%TODO: check abs condition
flag=0
i=i+1
elementpercylce = elementpercylce+1
#this return number of time a sequence repeat
indexes = []
b = arrayaux[0:elementpercylce]
for i in range(len(arrayaux)-len(b)+1):
if sum(abs(arrayaux[i:i+len(b)] - b))<1e-15:
indexes.append((i, i+len(b)))
return len(indexes)
#return elementpercylce
#########################################################################
#plotgeneral class definition
class plotgeneral:
def __init__(self):#, model):
self.version = 'v1'
#self.model = model
#defaul parameters
self.symbol = 'o'
self.color = ''
self.markerfacecolor = (1, 1, 1, 1)
self.lw=1
self.ylogflag = 0
self.xlogflag = 0
self.derivativeorder = 0
self.markersize= 10
self.filetype = ''
def updateparameter(self,name,value):
#this funtion update a parameter in the model
if type(value) == type(''):
exec ("self."+name+' = '+'\''+value+'\'')
#print "self."+name+' = '+'\''+value+'\''
else:
exec ("self."+name+' = '+str(value) )
#print "self."+name+' = '+str(value)
def plotfiledata(self,pathandfile,xstring,ystring,fignumber):
#this function open a file pathandfile and plot the columns with xstring and ystring string header
flagprint = 0
if self.filetype=='':
target = open( pathandfile, 'r')
header = str.split(target.readline())
if len(header)>0:
flagprint = 1
print(xstring.lower())
xindex = header.index(xstring.lower())
yindex = header.index(ystring.lower())
datalist = loadtxt(pathandfile,skiprows = 1)
xarray = datalist[:,xindex]
yarray = datalist[:,yindex]
#this is to identify index how to re-shape matrix for right plotting
numberelement = 0
numberelementaux = len(np.unique(xarray))
numberelementmaxpossible = len(xarray)
if( (np.mod(len(xarray),numberelementaux)==0) and ((numberelementmaxpossible-numberelementaux)>0) ):
numberelement = numberelementaux;
elementpercylce = findelementpercylce(xarray)*numberelement
if (numberelement==0):
numberelement = numberelementmaxpossible
elementpercylce = numberelement
#reshape matrix to plot lines
xarray = rearrangearray(xarray,elementpercylce,numberelement)
yarray = rearrangearray(yarray,elementpercylce,numberelement)
target.close()
#SAS format, for intership
if self.filetype=='SAS':
flagprint = 1
target = open(pathandfile, 'r')
#datalist = loadtxt(pathdatafile,skiprows = 1)
header = str.split(target.readline(),',')
#print header
xindex = header.index(xstring)
yindex = header.index(ystring)
#print xindex,yindex
xarray = []
yarray = []
for line in target:
#print line
linesplit = str.split(line,',')
#print linesplit
if (linesplit[xindex+1]!='noData') and (linesplit[yindex+1]!='noData'):
xarray.append(float(linesplit[xindex+1]))
yarray.append(float(linesplit[yindex+1]))
target.close()
if flagprint==1:
#plot
plt.figure(fignumber)
#log scale check
#yarray = abs(yarray)
if self.ylogflag==1:
yarray = abs(yarray)
#plot variable or its derivatives: TODO: it plot derivate with respect to x-axis, update derivative with respect to any variable
if (self.derivativeorder<1):
if self.color=='':
plt.plot( xarray, yarray, self.symbol, lw=self.lw,markersize=self.markersize )
else:
plt.plot( xarray, yarray, self.symbol, lw=self.lw,markersize=self.markersize, color=self.color )
else :
K = K_generator(xarray[:,0],self.derivativeorder)
if self.color=='':
plt.plot( xarray, K*yarray, self.symbol, lw=self.lw,markersize=self.markersize)
else:
plt.plot( xarray, K*yarray, self.symbol, lw=self.lw,markersize=self.markersize, color=self.color )
#log scale check
if self.ylogflag==1:
ax = plt.gca()
ax.set_yscale('log')
if self.xlogflag==1:
ax = plt.gca()
ax.set_xscale('log')
#x and y axis label
ax = plt.gca()
ax.set_xlabel(xstring)
if (self.derivativeorder<1):
ax.set_ylabel(ystring)
else:
ax.set_ylabel('d^'+str(self.derivativeorder)+' '+ystring+'/d'+xstring+'^'+str(self.derivativeorder))
|
#Embedded file name: ACEStream\Core\DecentralizedTracking\pymdht\core\logging_conf.pyo
import logging
import os
FORMAT = '%(asctime)s %(levelname)s %(filename)s:%(lineno)s - %(funcName)s()\n%(message)s\n'
try:
devnullstream = open('/dev/null', 'w')
except:
from ACEStream.Utilities.NullFile import *
devnullstream = NullFile()
logging.basicConfig(level=logging.CRITICAL, format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%a, %d %b %Y %H:%M:%S', stream=devnullstream)
def testing_setup(module_name):
logger = logging.getLogger('dht')
logger.setLevel(logging.DEBUG)
filename = ''.join((str(module_name), '.log'))
logger_file = os.path.join('test_logs', filename)
logger_conf = logging.FileHandler(logger_file, 'w')
logger_conf.setLevel(logging.DEBUG)
logger_conf.setFormatter(logging.Formatter(FORMAT))
logger.addHandler(logger_conf)
def setup(logs_path, logs_level):
logger = logging.getLogger('dht')
logger.setLevel(logs_level)
logger_conf = logging.FileHandler(os.path.join(logs_path, 'dht.log'), 'w')
logger_conf.setLevel(logs_level)
logger_conf.setFormatter(logging.Formatter(FORMAT))
logger.addHandler(logger_conf)
|
# -*- coding: utf-8 -*-
'''
@author: kebo
@contact: kebo0912@outlook.com
@version: 1.0
@file: keras_metric.py
@time: 2021/04/21 23:59:34
这一行开始写关于本文件的说明与解释
'''
import tensorflow as tf
from cybo.metrics.metric import Metric
class KerasMetric(tf.keras.metrics.Metric, Metric):
def __init__(
self, name, dtype=None, support_tf_function: bool = True, **kwargs):
super().__init__(name=name, dtype=dtype, **kwargs)
self.support_tf_function = support_tf_function
def _zero_wt_init(self, name, init_shape=[], dtype=tf.int32):
return self.add_weight(
name=name, shape=init_shape, initializer="zeros", dtype=dtype
)
def reset_states(self):
return super().reset_states()
def update_state(self, y_true, y_pred):
return super().update_state(y_true, y_pred)
def compute_metrics(self):
raise NotImplementedError
|
from rest_framework import permissions
from rest_framework import serializers
from rest_framework import viewsets
from application.api import router
from core.api import UserSerializer
from like.models import Like
from ugc.api import PostSerializer
from ugc.models import Post
class ContentObjectRelatedField(serializers.RelatedField):
def to_representation(self, value):
if isinstance(value, Post):
serializer = PostSerializer(value)
else:
raise Exception("Unexpected type of object")
return serializer.data
class LikeSerializer(serializers.ModelSerializer):
author = UserSerializer
target = ContentObjectRelatedField
class Meta:
model = Like
fields = ['author', 'target']
class LikeViewSet(viewsets.ModelViewSet):
queryset = Like.objects.all()
serializer_class = LikeSerializer
permission_classes = permissions.IsAuthenticated,
def get_queryset(self):
qs = super().get_queryset()
if self.request.query_params.get('like'):
qs = qs.filter(object_id=self.request.query_params.get('like'))
return qs
router.register(r'likes', LikeViewSet, 'likes')
|
import re
from concurrent import futures
from concurrent.futures import ThreadPoolExecutor
from typing import List, Optional, Tuple, Iterator, Dict
from typing_extensions import Protocol
from ._sources.base_classes import FinancialSymbolsSource
from ._sources.micex_stocks_source import MicexStocksSource
from ._sources.mutru_funds_source import MutualFundsRuSource
from ._sources.us_data_source import UsDataSource
from ._sources.registries import FinancialSymbolsRegistry
from .common.financial_symbol import FinancialSymbol
from .common.financial_symbol_id import FinancialSymbolId
from .common.financial_symbol_info import FinancialSymbolInfo
class SymbolSourcesSearchable(Protocol):
us_data_source: UsDataSource
micex_stocks_source: MicexStocksSource
mutual_funds_ru_source: MutualFundsRuSource
class _Search:
def __handle_us_data_info(self) -> List[str]:
def func(x: FinancialSymbolInfo, src: FinancialSymbolsSource) -> str:
fin_sym = src.fetch_financial_symbol(x.fin_sym_id.name)
if fin_sym is None:
return ''
line = '{} {} {}'.format(fin_sym.name, fin_sym.exchange, fin_sym.short_name)
line = re.sub(r'\s+', ' ', line.lower())
self.id2sym.update({line: fin_sym})
return line
lines = [func(x, self.symbol_sources.us_data_source)
for x in self.symbol_sources.us_data_source.get_all_infos()]
return lines
def __handle_micex_stocks(self) -> List[str]:
def func(x: FinancialSymbolInfo, src: FinancialSymbolsSource) -> str:
fin_sym = src.fetch_financial_symbol(x.fin_sym_id.name)
if fin_sym is None:
return ''
line = '{} {} {} {}'.format(fin_sym.name, fin_sym.exchange, fin_sym.isin, fin_sym.long_name)
line = re.sub(r'\s+', ' ', line.lower())
self.id2sym.update({line: fin_sym})
return line
lines = [func(x, self.symbol_sources.micex_stocks_source)
for x in self.symbol_sources.micex_stocks_source.get_all_infos()]
return lines
def __handle_mutru(self) -> List[str]:
def func(x: FinancialSymbolInfo, src: FinancialSymbolsSource) -> str:
fin_sym = src.fetch_financial_symbol(x.fin_sym_id.name)
if fin_sym is None:
return ''
line = '{} {}'.format(fin_sym.name, fin_sym.short_name)
line = re.sub(r'\s+', ' ', line.lower())
self.id2sym.update({line: fin_sym})
return line
lines = [func(x, self.symbol_sources.mutual_funds_ru_source)
for x in self.symbol_sources.mutual_funds_ru_source.get_all_infos()]
return lines
def __init__(self,
symbol_sources: SymbolSourcesSearchable,
financial_symbols_registry: FinancialSymbolsRegistry):
self.symbol_sources = symbol_sources
self.financial_symbols_registry = financial_symbols_registry
self.id2sym: Dict[str, FinancialSymbol] = {}
pool = ThreadPoolExecutor(3)
us_data_source_lines_fut = pool.submit(self.__handle_us_data_info)
micex_stocks_lines_fut = pool.submit(self.__handle_micex_stocks)
mutru_lines_fut = pool.submit(self.__handle_mutru)
def handle_all_lines() -> List[str]:
result = us_data_source_lines_fut.result() + micex_stocks_lines_fut.result() + mutru_lines_fut.result()
return result
self.lines_future: futures.Future[List[str]] = pool.submit(handle_all_lines)
def _check_finsym_access(self, query: str) -> Optional[FinancialSymbol]:
namespaces = self.financial_symbols_registry.namespaces()
starts_with_namespace = False
for ns in namespaces:
if query.startswith(ns + '/'):
starts_with_namespace = True
break
if not starts_with_namespace:
return None
fsid = FinancialSymbolId.parse(query)
return self.financial_symbols_registry.get(fsid)
def perform(self, query: str, top: int) -> List[FinancialSymbol]:
try:
fs = self._check_finsym_access(query=query)
except Exception:
fs = None
if fs is not None:
return [fs]
if not isinstance(query, str):
raise ValueError('`query` should be string')
if not isinstance(top, int):
raise ValueError('`top` should be int')
top = max(0, top)
if not query or top == 0:
return []
query = re.sub(r'\s+', ' ', query.strip().lower())
if len(query) == 0:
return []
lines: List[str] = self.lines_future.result()
r: Iterator[Tuple[int, str]] = ((l.find(query), l) for l in lines)
r = filter(lambda x: x[0] != -1, r)
r_list = sorted(r, key=lambda x: '{:4d} {}'.format(x[0], x[1]))
symbols: List[FinancialSymbol] = [self.id2sym[x[1]] for x in r_list[:top]]
return symbols
|
from urllib import urlopen
from file_handler import *
from parse_html import *
from bs4 import BeautifulSoup
from directed_graph import Directed_Graph
import urlparse
class Crawler:
base_url=''
domain_name=''
queue_file=''
crawled_file=''
queue = []
crawled = []
temp= []
graphq=[]
graph_finalq=[]
link_number=0
doc_name = ''
dg=Directed_Graph()
directed_graph="directed_graph.txt"
#finalq=set()
def __init__(self, base_url,domain_name):
Crawler.base_url = base_url
Crawler.domain_name = domain_name
#Crawler.queue_file = 'queue.txt'
Crawler.crawled_file = 'crawled.txt'
#Crawler.corpus_file = 'corpus.txt'
initialise_files(Crawler.base_url)
Crawler.graphq=file_graphlist("crawled_final.txt")
#dg=Directed_Graph()
#self.boot()
#self.crawl_page(Crawler.base_url)
# Creates directory and files for project on first run and starts the Crawler
@staticmethod
def boot():
#create_project_dir(Crawler.project_name)
Crawler.temp = Crawler.temp + Crawler.queue
del Crawler.queue[:]
list_file(Crawler.crawled,Crawler.crawled_file)
# Updates user display, fills queue and updates files
@staticmethod
def crawl_page(url):
if url not in Crawler.crawled:
Crawler.gather_links(url)
#Crawler.temp.remove(url)
Crawler.crawled.append(url)
# Crawler.link_number+=1
# Crawler.doc_name="Doc_" + str(Crawler.link_number)+ ".txt"
#print "Doc_name is "+ Crawler.doc_name
#if not os.path.isfile(Crawler.doc_name):
@staticmethod
def gather_links(url):
try:
response = urlopen(url)
html_bytes = response.read()
html_string = html_bytes.decode("utf-8")
soup = BeautifulSoup(html_string,'html.parser')
#append_to_file(Crawler.doc_name, html_string)
bodyContent = soup.find("div", {"id": "bodyContent"})
for link in bodyContent.find_all('a'):
value = str(link.get('href'))
if ':' in value:
continue
link = urljoin(Crawler.base_url, value)
if not link.startswith("https://en.wikipedia.org/wiki"):
continue
if '#' in link:
continue
Crawler.queue.append(link)
Crawler.dg.draw(link,url)
except Exception as e:
print(str(e))
return []
Crawler.queue
@staticmethod
def add_corpus(url,html):
append_to_file('corpus.txt',url)
append_to_file('corpus.txt',html)
@staticmethod
def update_files():
#set_file(Crawler.queue, Crawler.queue_file)
#set_file(Crawler.crawled, Crawler.crawled_file)
list_file(Crawler.crawled,Crawler.crawled_file)
def save_graph(self):
for link in Crawler.graphq:
value=Directed_Graph.dic.get(link,"")
data=link + value
Crawler.graph_finalq.append(data)
list_file(Crawler.graph_finalq,Crawler.directed_graph)
|
"""
Function for calculating the simplest faction from a float.
An alternative approach is to use:
>>> from fractions import Fraction
>>> x = .12345
>>> y = Fraction(x)
>>> y.limit_denominator(10)
Fraction(1, 8)
>>> y.limit_denominator(100)
Fraction(10, 81)
But usually, we are interested in a fraction that matches within some tolerance
and the max denominator that gives a particular tolerance is not obvious.
"""
from fractions import Fraction
from math import modf
__all__ = (
'approximate_fraction',
)
def simplest_fraction_in_interval(x, y):
"""
Return the fraction with the lowest denominator in the interval [x, y].
"""
# http://stackoverflow.com/questions/4266741/check-if-a-number-is-rational-in-python
if x == y:
# The algorithm will not terminate if x and y are equal.
raise ValueError("Equal arguments.")
elif x < 0 and y < 0:
# Handle negative arguments by solving positive case and negating.
return -simplest_fraction_in_interval(-y, -x)
elif x <= 0 or y <= 0:
# One argument is 0, or arguments are on opposite sides of 0, so
# the simplest fraction in interval is 0 exactly.
return Fraction(0)
else:
# Remainder and Coefficient of continued fractions for x and y.
xr, xc = modf(1 / x)
yr, yc = modf(1 / y)
if xc < yc:
return Fraction(1, int(xc) + 1)
elif yc < xc:
return Fraction(1, int(yc) + 1)
else:
return 1 / (int(xc) + simplest_fraction_in_interval(xr, yr))
def approximate_fraction(x, e):
"""
Return an approxite rational fraction of x.
The returned Fraction instance is the fraction with the lowest denominator
that differs from `x` by no more than `e`.
Examples
--------
>>> x = 1/3
>>> y = approximate_fraction(x, 1e-9)
>>> print y
1/3
"""
return simplest_fraction_in_interval(x - e, x + e)
|
# -*- coding: utf-8 -*-
import re
some_str = """
小说(5298252) 外国文学(1937844) 文学(1611318) 随笔(1132086)
中国文学(1049945) 经典(944029) 日本文学(826644) 散文(686198)
村上春树(437578) 诗歌(328662) 童话(292853) 儿童文学(238582)
古典文学(234851) 王小波(223395) 名著(222805) 杂文(216391)
余华(205153) 张爱玲(190301) 当代文学(149029) 钱钟书(105148)
外国名著(95686) 鲁迅(90101) 诗词(81175) 茨威格(63079)
米兰·昆德拉(53417) 杜拉斯(44135) 港台(7292)
流行 · · · · · ·
漫画(1277390) 推理(938118) 绘本(898125) 青春(655992)
东野圭吾(564932) 科幻(528843) 言情(506635) 悬疑(497968)
奇幻(323915) 武侠(315619) 日本漫画(295640) 韩寒(263675)
推理小说(263256) 耽美(258073) 亦舒(236304) 网络小说(211996)
三毛(208039) 安妮宝贝(173473) 阿加莎·克里斯蒂(155557) 郭敬明(154037)
穿越(153755) 金庸(151437) 科幻小说(147529) 轻小说(144440)
青春文学(120334) 魔幻(116896) 几米(115526) 幾米(98912)
张小娴(97828) J.K.罗琳(86065) 古龙(76118) 高木直子(72811)
沧月(65929) 校园(61718) 落落(58556) 张悦然(57819)
文化 · · · · · ·
历史(2079060) 心理学(1354906) 哲学(1135198) 传记(777902)
文化(719762) 社会学(698889) 艺术(505350) 设计(402957)
社会(400202) 政治(367214) 建筑(271228) 宗教(254225)
电影(245141) 政治学(226978) 数学(221753) 中国历史(176946)
回忆录(174927) 思想(157530) 国学(146873) 人物传记(124938)
人文(124877) 音乐(122100) 艺术史(118404) 绘画(113165)
戏剧(107581) 西方哲学(74968) 二战(73957) 军事(72746)
佛教(72416) 近代史(69063) 考古(51436) 自由主义(44987)
美术(38667)
生活 · · · · · ·
爱情(885736) 旅行(566180) 成长(530228) 生活(529061)
心理(403092) 励志(395325) 女性(316339) 摄影(295338)
职场(211530) 教育(211515) 美食(193885) 游记(153387)
灵修(124162) 健康(82911) 情感(82319) 两性(44269)
人际关系(42911) 手工(40793) 养生(36684) 家居(23525)
自助游(2698)
经管 · · · · · ·
经济学(425242) 管理(417275) 经济(344719) 商业(306218)
金融(280068) 投资(228450) 营销(155852) 理财(112911)
创业(112591) 广告(66249) 股票(64738) 企业史(21329)
策划(8608)
科技 · · · · · ·
科普(582611) 互联网(239059) 编程(158429) 科学(135179)
交互设计(68624) 用户体验(55275) 算法(52351) 科技(26743)
web(21887) UE(5172) 交互(5012) 通信(4873)
UCD(3573) 神经网络(2535) 程序(1300)"""
def some():
p2 = re.compile(r'[(](.*?)[)]', re.S) #贪婪
some_list = re.findall(p2, some_str)
final_num = 0
for item in some_list:
final_num += int(item)
print final_num
if __name__ == "__main__":
some()
|
# Load modules
from inferelator import inferelator_workflow, inferelator_verbose_level, MPControl, CrossValidationManager
# Set verbosity level to "Talky"
inferelator_verbose_level(1)
# Set the location of the input data and the desired location of the output files
DATA_DIR = '../data/bsubtilis'
OUTPUT_DIR = '~/bsubtilis_inference/'
EXPRESSION_FILE_NAME = 'expression.tsv.gz'
PRIORS_FILE_NAME = 'gold_standard.tsv.gz'
GOLD_STANDARD_FILE_NAME = 'gold_standard.tsv.gz'
META_DATA_FILE_NAME = 'meta_data.tsv'
TF_LIST_FILE_NAME = 'tf_names.tsv'
CV_SEEDS = list(range(42, 52))
# Multiprocessing uses the pathos implementation of multiprocessing (with dill instead of cPickle)
# This is suited for a single computer but will not work on a distributed cluster
n_cores_local = 10
local_engine = True
# Multiprocessing needs to be protected with the if __name__ == 'main' pragma
if __name__ == '__main__' and local_engine:
MPControl.set_multiprocess_engine("multiprocessing")
MPControl.client.set_processes(n_cores_local)
MPControl.connect()
# Define the general run parameters
def set_up_workflow(wkf):
wkf.set_file_paths(input_dir=DATA_DIR,
output_dir=OUTPUT_DIR,
tf_names_file=TF_LIST_FILE_NAME,
meta_data_file=META_DATA_FILE_NAME,
priors_file=PRIORS_FILE_NAME,
gold_standard_file=GOLD_STANDARD_FILE_NAME)
wkf.set_expression_file(tsv=EXPRESSION_FILE_NAME)
wkf.set_file_properties(expression_matrix_columns_are_genes=False)
wkf.set_run_parameters(num_bootstraps=5)
wkf.set_crossvalidation_parameters(split_gold_standard_for_crossvalidation=True, cv_split_ratio=0.2)
return wkf
# Inference with BBSR (crossvalidation)
# Using the crossvalidation wrapper
# Run the regression 10 times and hold 20% of the gold standard out of the priors for testing each time
# Each run is seeded differently (and therefore has different holdouts)
# Create a worker
worker = inferelator_workflow(regression="bbsr", workflow="tfa")
worker = set_up_workflow(worker)
worker.append_to_path("output_dir", "bbsr")
# Create a crossvalidation wrapper
cv_wrap = CrossValidationManager(worker)
# Assign variables for grid search
cv_wrap.add_gridsearch_parameter('random_seed', CV_SEEDS)
# Run
cv_wrap.run()
# Inference with Elastic Net (crossvalidation)
# Using the crossvalidation wrapper
# Run the regression 10 times and hold 20% of the gold standard out of the priors for testing each time
# Each run is seeded differently (and therefore has different holdouts)
# Create a worker
worker = inferelator_workflow(regression="elasticnet", workflow="tfa")
worker = set_up_workflow(worker)
worker.append_to_path("output_dir", "elastic_net")
# Set L1 ratio to 1 (This is now LASSO regression instead of Elastic Net)
# Parameters set with this function are passed to sklearn.linear_model.ElasticNetCV
worker.set_regression_parameters(l1_ratio=1, max_iter=2000)
# Create a crossvalidation wrapper and pass it the worker during __init__
cv_wrap = CrossValidationManager(worker)
# Assign variables for grid search
cv_wrap.add_gridsearch_parameter('random_seed', CV_SEEDS)
# Run
cv_wrap.run()
# Final network
worker = inferelator_workflow(regression="bbsr", workflow="tfa")
worker = set_up_workflow(worker)
worker.append_to_path('output_dir', 'final')
worker.set_crossvalidation_parameters(split_gold_standard_for_crossvalidation=False, cv_split_ratio=None)
worker.set_run_parameters(num_bootstraps=2, random_seed=100)
final_network = worker.run()
|
# from os import listdir
# from os.path import isfile
import os
import json
class TBlock:
def __init__(self, blockHash, parentHash):
self.Hash = blockHash
self.Parent = parentHash
self.Childs = []
def SetParent(self, parent):
self.Parent = parent
def AddChild(self, childHash):
for i in xrange(len(self.Childs)):
if self.Childs[i][0] == childHash:
self.Childs[i] = (self.Childs[i][0], self.Childs[i][1] + 1)
return
self.Childs.append((childHash, 1))
def SortChilds(self):
self.Childs.sort(key = lambda x: -x[1])
def __str__(self):
return '\n' + self.Hash + '\n' + self.Parent + '\n' + str(self.Childs) + '\n'
def ToDict(self):
return {'Hash': self.Hash, 'Parent': self.Parent, 'Childs': self.Childs}
def buildTree():
mypath = './data'
onlyfiles = [f for f in os.listdir(mypath) if (os.path.isfile(os.path.join(mypath, f)) and f.startswith("data_"))]
blocksTree = dict()
for input_file in onlyfiles:
with open(os.path.join(mypath, input_file), 'r') as f:
for line in f:
prevBlockHash, nextBlockHash, timeEdge = line.split()
if prevBlockHash not in blocksTree:
blocksTree[prevBlockHash] = TBlock(prevBlockHash, str(-1))
if nextBlockHash not in blocksTree:
blocksTree[nextBlockHash] = TBlock(nextBlockHash, str(-1))
blocksTree[nextBlockHash].SetParent(prevBlockHash)
blocksTree[prevBlockHash].AddChild(nextBlockHash)
result = []
for blockHash, block in blocksTree.iteritems():
block.SortChilds()
result.append(block.ToDict())
with open('result.json', 'w') as g:
json.dump(result, g, indent=4, sort_keys=True)
if __name__ == "__main__":
buildTree()
|
#!/usr/bin/python3.4
#-*- coding:utf8 -*-
import myConf
import logging
import os
import time
import qrcode
import qrcode.image.svg
from PIL import Image
import re
import readline
import pack8583
from ctypes import *
import Security
def GetSerial():
initSeq = '1'
#从配置文件取出流水号
cfgFile = myConf.GetCombination('app_env','CfgDir','usr_var','seqFile')
logging.info('seq file = [%s]' % cfgFile)
try:
with open(cfgFile,'a+') as seqFile:
seqFile.seek(0)
allLines = seqFile.readlines()
if len(allLines) != 1 :
logging.info('seq file format error')
seqFile.seek(0)
seqFile.truncate()
seqFile.writelines(initSeq )
else:
try:
if int(allLines[0]) + 1 > 999999:
seq = '%06d' % int(initSeq)
else:
seq = '%06d' % int(allLines[0])
seqFile.seek(0)
seqFile.truncate()
seqFile.writelines(str(int(seq ) + 1) )
except ValueError as e:
seq = '%06d' % int(initSeq)
seqFile.seek(0)
seqFile.truncate()
seqFile.writelines(str(int(seq ) + 1) )
except FileNotFoundError as e:
logging.info("can\'t open file [%s] " % cfgFile )
#logging.info("seq = [%s]" % seq )
return seq
def GetLocalDate():
T = time.localtime()
localDate = '%02d' % T[1] + '%02d' % T[2]
logging.info('localDate = [%s] ' % localDate)
return localDate
def GetLocalTime():
T = time.localtime()
localTime = '%02d' % T[3] + '%02d' % T[4] + '%02d' % T[5]
logging.info('localTime = [%s] ' % localTime)
return localTime
def CallCustomFun(functionName ):
logging.info('in CallCustomFun index = [%s]' % functionName)
result = OperatorOfFun.get(functionName)()
return result
def SetDefultValue(value ):
logging.info('in SetDefultValue = [%s]' % value)
return value
pass
def CallInputFun(txt = '' ):
logging.info('in InPutFun!!!')
pressData = input('请输入' + txt + ':\n')
return pressData
pass
def CallInputQRFun(txt = '' ):
logging.info('in InPutFun!!!')
pressData = input('请输入' + txt + ':\n')
return 'QRnumber=' + pressData
pass
def AutoSetFld(packSource = []):
if not isinstance(packSource[1],str) :
logging.error('this cfg %s is error' % packSource)
return None
try:
value = myOperator.get(packSource[0])(packSource[1])
except TypeError as e:
logging.error('not support this cfg %s' % packSource)
return None
return value
def CreateQrcode(sourceMsg ='alipaySeq=&QRlink='):
sorceData = []
if (not isinstance(sourceMsg,str)) and len(sourceMsg) <= 0:
logging.error('can\'t create qrcode!')
return 0
sorceData = re.findall(r'alipaySeq=(\d{0,20})&', sourceMsg)
sorceData += re.findall(r'QRlink=(.{0,128})$' ,sourceMsg)
if len(sorceData) != 2:
logging.error('can\'t create qrcode!')
return 0
cmd = 'qr %s' % (sorceData[1])
os.system(cmd)
input("press <enter> to continue")
def GenTermMac():
logging.info('in GenTermMac')
pack8583.setPackageFlf(64,'00000000')
tmpStr = create_string_buffer(1024)
Len = pack8583.libtest.packageFinal(tmpStr)
#logging.info('len = [%d] after pack = [%s]' %(Len ,tmpStr.value))
MAC = Security.GenerateTermMac(tmpStr.value.decode()[:-16])
logging.info(MAC)
return MAC
def GetCardNoFromPackage():
tmpStr = create_string_buffer(128)
length = pack8583.libtest.getFldValue(2,tmpStr,sizeof(tmpStr))
if length == 0:
length = pack8583.libtest.getFldValue(35,tmpStr,sizeof(tmpStr))
if length > 0:
cardno = re.findall(r'(\d{15,21})[D,=]',tmpStr.value.decode())[0]
else:
return None
else:
return None
logging.info('cardno = %s ' % cardno)
return cardno
def InPutPW(flag='' ):
logging.info('in input passwd fun!!')
if not isinstance(flag,str):
logging.error('input passwd error!')
return None
flagLen = len(flag)
#配置文件直接赋值
if flagLen >= 6 and flagLen <= 12 and flag.isdigit():
passwd = flag
withcardno = False
pinblock = Security.GetPinblock3Des(passwd,)
elif flag == 'withcardno':
withcardno = True
inputPasswd = input('请输入您的密码:\n')
if len(inputPasswd) >= 6 and len(inputPasswd) <= 12 and inputPasswd.isdigit():
passwd = inputPasswd
cardNo = GetCardNoFromPackage()
else:
logging.error('you input passwd error')
return None
pinblock = Security.GetPinblock3Des(passwd,1,cardNo)
else:
return None
logging.info('pinblock = [%s]' % pinblock)
return pinblock
def InPutPWWithCard(flag='' ):
logging.info('in input passwd fun!!')
if not isinstance(flag,str):
logging.error('input passwd error!')
return None
flagLen = len(flag)
cardNo = GetCardNoFromPackage()
#配置文件直接赋值
if flagLen >= 6 and flagLen <= 12 and flag.isdigit():
passwd = flag
withcardno = False
pinblock = Security.GetPinblock3Des(passwd,1,cardNo)
elif flag == '':
withcardno = True
inputPasswd = input('请输入您的密码:\n')
if len(inputPasswd) >= 6 and len(inputPasswd) <= 12 and inputPasswd.isdigit():
passwd = inputPasswd
else:
logging.error('you input passwd error')
return None
pinblock = Security.GetPinblock3Des(passwd,1,cardNo)
else:
return None
logging.info('pinblock = [%s]' % pinblock)
return pinblock
def InPutPWNoCard(flag='' ):
logging.info('in input passwd fun!!')
if not isinstance(flag,str):
logging.error('input passwd error!')
return None
flagLen = len(flag)
#配置文件直接赋值
if flagLen >= 6 and flagLen <= 12 and flag.isdigit():
passwd = flag
withcardno = False
pinblock = Security.GetPinblock3Des(passwd)
elif flag == '':
withcardno = True
inputPasswd = input('请输入您的密码:\n')
if len(inputPasswd) >= 6 and len(inputPasswd) <= 12 and inputPasswd.isdigit():
passwd = inputPasswd
else:
logging.error('you input passwd error')
return None
pinblock = Security.GetPinblock3Des(passwd)
else:
return None
logging.info('pinblock = [%s]' % pinblock)
return pinblock
def SaveWorkKey(fld62):
rightLen = [24,40,44,60,84]
logging.info('work key = [%s]' % fld62)
if not isinstance(fld62,str) or len(fld62) == 0:
logging.error('get work key error')
return None
lenFld62 = int(len(fld62) / 2)
if lenFld62 not in rightLen:
logging.error('get work key error')
return None
PINKey = fld62[0:lenFld62]
MACKey = fld62[lenFld62:]
logging.info('PINKey = [%s] ,MACKey = [%s]' % (PINKey,MACKey))
if len(PINKey)== 40 or len(PINKey)== 44:
PINKey = PINKey[0:32]
#SetConf('termInfo','tpk',PINKey)
elif len(PINKey)== 24:
PINKey = PINKey[0:16]
else:
return None
if len(MACKey)== 40:
MACKey = MACKey[0:16]
#SetConf('termInfo','tak',MACKey)
elif len(MACKey)== 44:
MACKey = MACKey[4:20]
elif len(MACKey)== 24:
MACKey = MACKey[0:16]
else:
return None
logging.info('PINKey = [%s] ,MACKey = [%s]' % (PINKey,MACKey))
myConf.SetConf('termInfo','tpk',PINKey)
myConf.SetConf('termInfo','tak',MACKey)
myConf.tpk = PINKey
myConf.tak = MACKey
def GetLogin60():
defValue = '00000001003'
lenList = [16,32]
lenTmk = len(myConf.tmk)
if lenTmk in lenList:
if lenTmk == 16:
return '00000001001'
elif lenTmk == 32:
return '00000001003'
else:
return defValue
pass
def GetMid():
return myConf.mid
def GetTid():
return myConf.termid
def GetInsNo():
return myConf.InsNo
myOperator = {
'Def':SetDefultValue,
'Fun':CallCustomFun,
'InPut':CallInputFun,
'InPutqr':CallInputQRFun,
'InPutPWWithCard':InPutPWWithCard,
'InPutPWNoCard':InPutPWNoCard,
}
OperatorOfFun = {
'GetSerial':GetSerial,
'GetDate':GetLocalDate,
'GetTime':GetLocalTime,
'GenMac':GenTermMac,
'GetLogin60':GetLogin60,
'GetMid':GetMid,
'GetTid':GetTid,
'GetInsNo':GetInsNo,
}
#logging.info(AutoSetFld(['Def','234']))
#logging.info(AutoSetFld([1,'Fun','GetSerial']))
#logging.info(AutoSetFld(['Fun','GetDate']))
#GetLocalDate()
#GetLocalTime()
|
# Copyright (c) 2017 lululemon athletica Canada inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import re
import tight
from tight.providers.aws.controllers.lambda_proxy_event import LambdaProxyController
from tight.providers.aws.controllers.lambda_proxy_event import LambdaProxySingleton
from tight.providers.aws.controllers.lambda_proxy_event import set_default_headers
def test_prepare_args_no_boom():
instance = LambdaProxyController()
prepared_args = instance.prepare_args('', {}, {})
assert prepared_args == {'event': {}, 'context': {}, 'principal_id': None}
def test_prepare_args_json_loads_body():
instance = LambdaProxyController()
prepared_args = instance.prepare_args('', {'body': '{"name":"banana"}'}, {})
assert prepared_args == {'event': {'body': {'name': 'banana'}}, 'context': {}, 'principal_id': None}
def test_prepare_args_json_loads_body_unparsable():
instance = LambdaProxyController()
prepared_args = instance.prepare_args('', {'body': 'I am just a string'}, {})
assert prepared_args == {'event': {'body': {}}, 'context': {}, 'principal_id': None}
def test_prepare_response_passthrough():
instance = LambdaProxyController()
prepared_response = instance.prepare_response(passthrough='Banana')
assert prepared_response == 'Banana'
def test_prepare_response_default():
instance = LambdaProxyController()
prepared_response = instance.prepare_response()
assert prepared_response == {'body': {}, 'headers': {'Access-Control-Allow-Origin': '*'}, 'statusCode': 200}
def test_set_headers():
set_default_headers({})
prepared_response = LambdaProxySingleton.prepare_response()
assert prepared_response == {'body': {}, 'headers': {}, 'statusCode': 200}
set_default_headers({'Content-Type': 'text/html'})
prepared_response = LambdaProxySingleton.prepare_response()
assert prepared_response == {'body': {}, 'headers': {'Content-Type': 'text/html'}, 'statusCode': 200}
def test_proxy_controller_run_error_handling(monkeypatch):
""" This looks like one big regex literal, however there are some control characters sprinkled in.
Hopefully, this will be robust enough to not break frequently. However, if it does look for
consider: Did the `lambda_proxy_event.py` module get moved? Did line numbers change enough that
pattern to match them is no longer valid?
"""
escaped_text = r"Traceback\ \(most\ recent\ call\ last\)\:\\n"
r" File\ \"\.*\/tight\/tight\/providers\/aws\/controllers\/lambda_proxy_event\.py\"\,\ line\ \d+,\ in\ run\\n"
r" method_response\ \=\ method_handler\(\*args,\ \*\*method_handler_args\)\\n"
r" File \".*\/tight\/tests\/unit\/providers\/aws\/controllers\/test_lambda_proxy_event_unit\.py\"\,\ line\ 80\,\ in\ controller_stub\\n"
r" raise\ Exception\(\'I\ am\ an\ error\.\')\nException\:\ I\ am\ an\ error\.\\n"
traceback_assertion_pattern = re.compile(escaped_text)
instance = LambdaProxyController()
def controller_stub(*args, **kwargs):
raise Exception('I am an error.')
instance.methods['test_controller:GET'] = controller_stub
def error_spy(*args, **kwargs):
match = re.search(traceback_assertion_pattern, kwargs['message'])
assert match is not None, 'Error is logged with formatted stacktrace.'
monkeypatch.setattr(tight.providers.aws.controllers.lambda_proxy_event, 'error', error_spy)
with pytest.raises(Exception) as ex:
instance.run('test_controller', {'httpMethod': 'GET'}, {})
assert str(ex.value) == 'There was an error.'
|
from weatherapp.core.abstract.command import Command
class Configurate(Command):
""" Helps to configure weatherapp providers.
"""
name = 'configurate'
def get_parser(self):
parser = super().get_parser()
parser.add_argument('provider', help='Provider name')
return parser
def run(self, argv):
""" Runs command.
"""
parsed_args = self.get_parser().parse_args(argv)
if parsed_args.provider:
provider_name = parsed_args.provider
if provider_name in self.app.providermanager:
provider_factory = self.app.providermanager.get(provider_name)
provider_factory(self.app).configurate() |
#!/usr/bin/env python
import sys
from datetime import datetime as dt
def reducer():
# your code goes here
for record in sys.stdin:
try:
# and here
# dt.striptime(date, '%Y/%m/%d') returns a date object
except:
pass
for x in top_n:
print x
if __name__ == '__main__':
reducer()
|
import unittest
from n_gram import to_n_gram
class TestNGram(unittest.TestCase):
def test_n_gram_2(self):
test_str = '札幌'
expected = ['札幌']
actual = to_n_gram(test_str, 2)
self.assertEqual(expected, actual)
def test_n_gram_14(self):
test_str = '北海道札幌市北区あいの里四条'
expected = ['北海','海道','道札','札幌','幌市','市北','北区','区あ','あい','いの','の里','里四','四条']
actual = to_n_gram(test_str, 2)
self.assertEqual(expected, actual)
|
from os import listdir
from os.path import isfile, join
from itertools import combinations
from nltk.corpus.reader import WordListCorpusReader
import re
from collections import Counter
my_tagged_path = 'tagged_emails'
test_tagged_path = 'test_tagged'
list_of_patterns = ['<speaker>(.*?)</speaker>',
'<stime>(.*?)</stime>',
'<etime>(.*?)</etime>',
'<location>(.*?)</location>',
'<sentence>(.*?)</sentence>']
def get_file_paths(path=''):
only_files = [f for f in listdir(path) if isfile(join(path,f)) and "txt" in f]
return only_files
my_files = get_file_paths(my_tagged_path)
test_files = get_file_paths(test_tagged_path)
true_positive = {'speaker' : 1,
'stime' : 1,
'etime' : 1,
'location' : 1,
'sentence' : 1}
true_negative = {'speaker' : 1,
'stime' : 1,
'etime' : 1,
'location' : 1,
'sentence' : 1}
false_positive = {'speaker' : 1,
'stime' : 1,
'etime' : 1,
'location' : 1,
'sentence' : 1}
false_negative = {'speaker' : 1,
'stime' : 1,
'etime' : 1,
'location' : 1,
'sentence' : 1}
for f in test_files:
data_test = open (test_tagged_path+"/"+f, "r").read()
my_data = open (my_tagged_path+"/"+f, "r").read()
for p in list_of_patterns:
pat = '<(.*?)>'
c = re.compile(pat)
fa = c.findall(p)
what = fa[0]
comp = re.compile(p, re.DOTALL)
found_test = comp.findall(data_test)
my_found = comp.findall(my_data)
pat2 = '<.*?>'
for f in found_test:
index = found_test.index(f)
f = re.sub(pat2,"",f)
found_test[index] = f
for f in my_found:
index = my_found.index(f)
f = re.sub(pat2,"",f)
my_found[index] = f
my_counter = Counter(my_found)
ideal_counter = Counter(found_test)
if(len(my_counter)==1 and len(ideal_counter)==1):
a=true_negative[what]
true_negative[what] = a + 1
for m in ideal_counter:
if(m in my_counter):
a=true_positive[what]
true_positive[what]=a+my_counter[m]
a=false_negative[what]
false_negative[what] = a + (ideal_counter[m] - my_counter[m])
else:
a=false_negative[what]
false_negative[what]=a+ideal_counter[m]
for m in my_counter:
my_val = my_counter[m]
if(m not in ideal_counter):
a = false_positive[what]
false_positive[what] = a + my_val
print('Writing results in seminar_tagger_results.txt')
file = open("seminar_tagger_results", 'w')
file.write("\n==========Precision==========\n")
for g in true_positive:
precision=(true_positive[g]/(float(true_positive[g]+false_positive[g])))
file.write(g + ": " + str(precision) + "\n")
file.write("\n==========Recall==========\n")
for g in true_positive:
recall=(true_positive[g]/(float(true_positive[g])+false_negative[g]))
file.write(g + ": " + str(recall) + "\n")
file.write("\n==========F1 Measure==========\n")
for g in true_positive:
precision=(true_positive[g]/(float(true_positive[g]+false_positive[g])))
recall=(true_positive[g]/(float(true_positive[g])+false_negative[g]))
f1_measure=(2*precision*recall/(float(precision + recall)))
file.write(g + ": " + str(f1_measure) + "\n")
file.write("\n==========Accuracy==========\n")
for g in true_positive:
accuracy=((true_positive[g]+true_negative[g])/(float(true_positive[g]+true_negative[g]+false_positive[g]+false_negative[g])))
file.write(g + ": " + str(accuracy) + "\n")
print('Operation done.')
|
import matplotlib.pyplot as plt
import sys
plttimeL = []
pltcwndL = []
with open("cwndL4S", "r") as file:
for i in range(0, 40):
file.readline()
for line in file:
plttimeL.append(int(line[3:].split(" ")[0])/250.0)
pltcwndL.append(int(line.split(" ")[2]))
plttimeC = []
pltcwndC = []
with open("cwndClassic", "r") as file:
for i in range(0, 40):
file.readline()
for line in file:
plttimeC.append(int(line[3:].split(" ")[0])/250.0)
pltcwndC.append(int(line.split(" ")[2]))
time = max(max(plttimeL), max(plttimeC))
plt.title("Congestion Window")
plt.ylim([0,500])
plt.xlim([0,time])
plt.plot(plttimeL, pltcwndL, label='L4S')
plt.plot(plttimeC, pltcwndC, label='Classic')
legend = plt.legend(loc='upper right')
plt.ylabel('Congestion Window')
plt.xlabel('Time [s]')
plt.grid()
plt.savefig(str(sys.argv[1])+"/cwnd-both.png")
plt.clf()
plt.title("Congestion Window L4S Sender")
plt.ylim([0,500])
plt.xlim([0,time])
plt.plot(plttimeL, pltcwndL)
plt.ylabel('Congestion Window')
plt.xlabel('Time [s]')
plt.grid()
plt.savefig(str(sys.argv[1])+"/cwnd-l4s.png")
plt.clf()
plt.title("Congestion Window Classic Sender")
plt.ylim([0,500])
plt.xlim([0,time])
plt.plot(plttimeC, pltcwndC, color='g')
plt.ylabel('Congestion Window')
plt.xlabel('Time [s]')
plt.grid()
plt.savefig(str(sys.argv[1])+"/cwnd-classic.png")
|
cena = input ("Podaj cenę")
waga = input ("Podaj wagę")
należność = float (cena) * float (waga)
print ("Cena: " + cena)
print ("waga: " + waga)
print ("Należność: " + (str(należność)))
print ("Należność:", należność)))
print (f"Należność: {należność}, wyliczone na podstawie zmiennych: cena={cena}", waga={waga}) |
import jsl
from snactor.registry.schemas import registered_schema
@registered_schema('1.0')
class DockerInfo(jsl.Document):
path = jsl.ArrayField([
jsl.IntField(),
jsl.StringField()
])
systemd_state = jsl.ArrayField([
jsl.IntField(),
jsl.StringField()
])
info = jsl.ArrayField([
jsl.IntField(),
jsl.StringField()
])
|
# print() is a function
print("Hello World")
print(25)
# varibles don't have a dolar sign!
# when naming we use snake_case, so no uppercase letters nor symbols other than underscore
first_name = "John"
last_name = "Smith"
# we can print varibles or even text or veribles
print(first_name)
print("Hello", firstname)
|
'''
README:
Project Name: Reading Calculator
Description: Given the user's reading speed and word count, this calculator will return the amount of minutes it will take to read a book.
Language: Python 3.7.9
'''
# Print statement to welcome user to calculator
print("Howdy, and welcome to the reading calculator! 🤠 ")
# Gets the user's avg reading print
reading_speed = input("What is your average reading speed per minute? ")
reading_speed = int(reading_speed)
# Gets the word count of book the user is reading
word_count = input("What is the word count of your book? ")
word_count = int(word_count)
# Calculate how much time it takes to read based on reading speed and word count
time_needed = word_count / reading_speed
# Prints a message to user with the result
print(f"The amount of minutes needed to read: {time_needed} minutes") |
import os
import sys
def _get_paths():
# Get the path to jedi.
_d = os.path.dirname
_jedi_path = _d(_d(_d(_d(_d(__file__)))))
_parso_path = sys.argv[1]
# The paths are the directory that jedi and parso lie in.
return {'jedi': _jedi_path, 'parso': _parso_path}
# Remove the first entry, because it's simply a directory entry that equals
# this directory.
del sys.path[0]
if sys.version_info > (3, 4):
from importlib.machinery import PathFinder
class _ExactImporter(object):
def __init__(self, path_dct):
self._path_dct = path_dct
def find_module(self, fullname, path=None):
if path is None and fullname in self._path_dct:
p = self._path_dct[fullname]
loader = PathFinder.find_module(fullname, path=[p])
return loader
return None
# Try to import jedi/parso.
sys.meta_path.insert(0, _ExactImporter(_get_paths()))
from jedi.evaluate.compiled import subprocess # NOQA
sys.meta_path.pop(0)
else:
import imp
def load(name):
paths = list(_get_paths().values())
fp, pathname, description = imp.find_module(name, paths)
return imp.load_module(name, fp, pathname, description)
load('parso')
load('jedi')
from jedi.evaluate.compiled import subprocess # NOQA
from jedi._compatibility import highest_pickle_protocol # noqa: E402
# Retrieve the pickle protocol.
host_sys_version = [int(x) for x in sys.argv[2].split('.')]
pickle_protocol = highest_pickle_protocol([sys.version_info, host_sys_version])
# And finally start the client.
subprocess.Listener(pickle_protocol=pickle_protocol).listen()
|
#coding:utf-8
import MySQLdb,requests,time,re
import sys,datetime
reload(sys)
sys.setdefaultencoding('utf-8')
#把xml中的数据拿下来,根据lastmod判断是否是昨天发布的文章
url_item_list = []#url和/url之间的字符
yesterday_url_list = []#仅昨天发布的文章url列表
today = time.strftime("%Y-%m-%d",time.localtime(time.time()))
#today = '2018-02-28'
print today
for i in range(1,30):#xml文件索引数值
url = 'http://heziliang.cn/xml/%s.xml'%i
r = requests.get(url)
url_item_list = re.findall(r'<url>([\s\S]*?)</url>',r.content)#*后面的?是关键
if len(url_item_list) == 0:
pass
else:
print '------------%s------------'%i
for item in url_item_list:
if today in item:
yesterday_url = re.findall(r'<loc>(.*?)</loc>',item)
yesterday_url_list.append(yesterday_url[0])
f_ytd = open('yesterday_0.txt',r'w+')#昨天发布的文章url
num = 0
txt_index = 0
for link in yesterday_url_list:
f_ytd.write(link+'\n')#把昨天的url放到单独的文件内
if num % 2000 == 1999:
f_ytd.close()
txt_index += 1
f_ytd = open('yesterday_%s.txt'%txt_index,r'w+')
num += 1
f_ytd.close()
time.sleep(1)
#开始推送
print 'push begin'
for i in range(0,txt_index+1):
headers = {'Content-Type':'text/plain'}
url = 'http://data.zz.baidu.com/urls'
time.sleep(1)
#主动推送
for link in open('yesterday_%s.txt'%i):
params_zd = {'site':'heziliang.cn','token':''}
r_zd = requests.post(url,params=params_zd,headers=headers,data=link.strip())
#mip
params = {'site':'heziliang.cn','token':'','type':'mip'}
r = requests.post(url,params=params,headers=headers,data=link.strip())
#amp
params_m = {'site':'heziliang.cn','token':'','type':'amp'}
r_m = requests.post(url,params=params_m,headers=headers,data=link.strip())
print 'zd_push:'+r_zd.content
print 'mip_push:'+r.content
print 'amp_push:'+r_m.content
time.sleep(10)
|
from dio.share.entity import Entity
from dio.delegate.core.option.option_http import OptionHttp
from dio.delegate.abstract.aiohttp_delegate import AioHttpDelegate, AioHttpRequest
from dio.delegate.core.source.source_base import SourceBase
from dio.schemable.schemable_python.compiler import compile_schema
class Foo(Entity):
foo: int
schema = compile_schema({
'foo': 'foo',
})
class SourceLocalhost7390(SourceBase):
def content(self, schemable_clz, data, schema):
return schemable_clz.from_json(data, schema)
class ReadOptionJavapAioHttp(OptionHttp):
def __init__(self):
super().__init__(
source=SourceLocalhost7390('http://localhost:7390'),
schema=schema,
path='/foo',
header=None,
)
class DelegateJavapAioHttp(AioHttpDelegate):
Entity = Foo
ReadOption = ReadOptionJavapAioHttp
async def _do_read(self, request: AioHttpRequest, ro: ReadOptionJavapAioHttp):
return await request.post(None, None)
async def _do_write(self, request: AioHttpRequest, wo):
raise NotImplementedError
|
# 0416 파이썬
# P. 251
#coffe = 0
#def coffee_machine (button) :
# print()
# if button == 1 :
# print ("아메리카노 준비중")
# elif button == 2 :
# print ("라떼 준비중")
# elif button == 3 :
# print ("아이스티 준비중")
# elif button == 4 :
# print ("핫초코 준비중")
# else :
# print ("1~4 번 중 번호를 입력해주세요")
# print("주문하신 음료를 받아주세요")
# print()
##메인 코드
#coffee = int (input("어떤 음료를 주문하시겠습니까? / 1. 아메리카노 / 2. 라떼 / 3. 아이스티 / 4. 핫초코 : "))
#coffee_machine (coffee)
#print ("감사합니다.")
# P.254
#def multiple(x1, x2) :
# result = 0
# result = x1 * x2
# return result
#calculate = 0
##메인 코드
#calculate = multiple(9, 8)
#print("9와 8의 multiple() 함수의 결과는 %d" % calculate)
# 계산기 만들기 / P.256 참고
#def plus (p1, p2) :
# result = 0
# result = p1 + p2
# return result
#def minus (m1, m2) :
# result = 0
# result = m1 - m2
# return result
#def multiply (mul1, mul2) :
# result = 0
# result = mul1 * mul2
# return result
#def divide (d1, d2) :
# result = 0
# result = d1/d2
# return result
#pl = 0
#mi = 0
#mt = 0
#di = 0
#pl = plus(int(input("")))
#
#def calculator(x1, x2, o1) :
# result = 0
# if o1 == "+" :
# result = x1 + x2
# elif o1 == "-" :
# result = x1 - x2
# elif o1 == "*" :
# result = x1 * x2
# elif o1 == "/" :
# result = x1 / x2
# else :
# result = "정확한 연산자를 입력해주세요"
# return result
#calculate = 0
#a1, a2, op = 0, 0, ""
## 메인 코드
#op = str(input("연산자를 입력해 주세요 :"))
#a1 = int(input("첫번째 숫자 : "))
#a2 = int (input("두번째 숫자 :"))
#calculate = calculator(a1, a2, op)
#print("계산기 : %d, %s, %d = %d" %(a1, op, a2, calculate))
#--------------------------------------------------------------
#import random
#a = random.randrange(1, 10)
#print (a)
#a, b = input
#().split(), input()
#print (a, b)
a = 10
for a in range (0, 10, 1) :
for a in range (0, 5, 1) :
print (a)
print (a) |
'''
Created on Jul 23, 2018
@author: ftd
'''
from pathlib import Path
class User_default_file_processor(object):
@staticmethod
def create_default_file(filename):
'''
create the default file
@param filename: the default file full path
'''
Path(filename).touch()
file = open(filename, 'w')
file.write('workspace=\n')
file.write('project=\n')
file.close()
@staticmethod
def read_default_file(filename):
'''
read the default file
@param filename: the default file full path
@return: the default info
'''
info = {}
file = open(filename, 'r')
for eachline in file.readlines():
if 'workspace=' in eachline:
info['workspace'] = eachline.replace('\n', '')[eachline.index('=')+1:]
if 'project=' in eachline:
info['project'] = eachline.replace('\n', '')[eachline.index('=')+1:]
file.close()
return info
@staticmethod
def update_default_file(filename, prop_name, prop_value):
'''
update the default file
@param filename: the default file full path
@param prop_name: the property name
@param prop_value: the property value
'''
with open(filename, 'r') as file:
lines = file.readlines()
idx = 0
for line in lines:
if (prop_name + '=') in line:
#new value
line = prop_name + '=' + prop_value + '\n'
lines[idx] = line
break
idx = idx + 1
with open(filename, 'w') as newfile:
newfile.writelines(lines)
file.close()
newfile.close() |
#Python program to count the number of lines in a text file.
file = open('text1.txt')
c=0
for line in file:
c=c+1
print(line)
print("Total number of lines: ",c)
file.close() #closing the file
#program to count the frequency of words in a file
file = open('text1.txt')
d = dict()
for line in file:
line = line.strip() # Remove the leading spaces and newline character
line = line.lower()
line.replace(",", " ") #If words are separated by commas
words = line.split(" ") #to get words
for w in words:
if w in d:
d[w] = d[w] + 1 #counting frequency
else:
d[w] = 1
print("Frequency:")
file.close()
for key in list(d.keys()):
print(key, " : ", d[key])
#Copy contents of one file to another file
firstfile = open('text1.txt','r')
secondfile = open('text2.txt','a') # text2.txt does not exists so it will already be created
for line in firstfile: #reading content from first file
secondfile.write(line) # appending content to second file
firstfile.close()
secondfile.close()
#To add a new line in text1.txt and overwrite the content of text2.txt
f = open("text1.txt", "a") #opening the file in append mode to add content
f.write("\nNew content!")
f = open("text1.txt", "r")
print(f.read())
f.close()
f1 = open("text2.txt", "w") #opening the file in write mode to overwrite content
f1.write("The content has been over written!")
f1 = open("text2.txt", "r")
print(f1.read())
f1.close()
#To sort the numbers in file 'text3.txt' in ascending order
with open('text3.txt', 'r') as file:
lines = file.readlines()
l_num = [int(num.strip()) for num in lines]
l_num.sort()
print("Ascending order: ",l_num)
#To delete the file --
# import os
# if os.path.exists("text1.txt"):
# os.remove("text1.txt")
# else:
# print("The file does not exist") |
import numpy as np
import six
import time
import six.moves.cPickle as pickle
import pylab
input_layer_n = [784]
hidden_layers_n = [100, 784]
# 重みの値
W = pickle.load(open("W1.dump", "rb"))
B = pickle.load(open("B1.dump", "rb"))
if __name__ == '__main__':
hidden_n = W[0].shape[0]
# x = np.zeros((hidden_n, hidden_n), dtype=np.float64)
#
# for i in range(hidden_n):
# x[i, i] = 1.
#
# z = f[1](W[1].dot(x) + B[1])
#
#
#
# outputs = z.transpose(1, 0)
# for i in range(len(outputs)):
# pylab.subplot(10, 10, i + 1)
# pylab.axis('off')
# pylab.imshow(outputs[i, :].reshape(28, 28), cmap=pylab.cm.gray_r, interpolation='nearest')
#
# pylab.show()
for i in range(hidden_n):
w = W[0][i, :]
pylab.subplot(10, 10, i + 1)
pylab.axis('off')
pylab.imshow(w.reshape(28, 28), cmap=pylab.cm.gray_r, interpolation='nearest')
pylab.show()
|
'''
Descripttion:
version:
Author: Jim Huang
Date: 2021-02-19 21:32:13
LastEditors: CoderXZ
LastEditTime: 2021-05-03 15:17:55
'''
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Jim Huang
@contact:Jim_Huang@trendmicr.com
@version: 1.0.0
@license: Apache Licence
@file: 3-15.py
@time: 2020/12/9 10:24
排序
"""
nums = [ 1,9,8,5,6,7,4,3,2]
lenght = len(nums)
print(nums)
count_swap = 0
count_iter = 0
for i in range(lenght):
maxindex = i
for j in range(i+1, lenght):
count_iter +=1
if nums[maxindex] < nums[j]:
nums[maxindex],nums[j] = nums[j],nums[maxindex]
count_swap +=1
print( nums)
print("counter_iter is {0}, count_swap is {1}".format( count_iter,count_swap))
#双向最大最小同时进行 ,注意大的比过了,有可能被小的换掉,恰好不比了导致错误
#nums = [ 1,2,3,4,5,6,7,8,9]
#nums = [ 9,8,7,6,5,4,3,2,1]
#nums = [1,9,8,5,6,7,4,3,2]
nums = [1,1,1,1,1,1,1,1,1]
count_swap = 0
count_iter = 0
for i in range(lenght//2):
#print(nums)
maxindex = i
minindex = -1-i
for j in range(i+1,lenght):
count_iter+=1
if nums[maxindex] < nums[j]:
nums[maxindex],nums[j] = nums[j],nums[maxindex]
count_swap +=1
if nums[minindex] > nums[-1-j]:
nums[minindex],nums[-1-j] = nums[-1-j],nums[minindex]
count_swap +=1
#只有奇数会有这个问题,再比较一下,小的不会出错
if nums[i] < nums[i+1]:
nums[i],nums[i+1] = nums[i+1],nums[i]
count_swap +=1
print(nums)
print("counter_iter is {0}, count_swap is {1}".format( count_iter,count_swap) )
|
#!/usr/bin/env python3
import random
import sys
# Set up some global variables. TODO: Add ability to play multiple games and move this into the Main function
with open('hangmanwords.txt', mode='rt', encoding='utf-8') as w:
word_list = [line.strip() for line in w]
play_word = word_list[random.randint(0, len(word_list) - 1)]
puzzle = ['- '] * len(play_word)
letters_played = []
def validate_input(a):
"""
Validates the input from the user, checking if it's empty, multiple letters or a non-alphnumeric value
:param a: letter input
:return: invalid flag
"""
invalid = 0
if len(a) != 1:
invalid = 1
print("You must enter a single letter")
elif not a.isalpha():
invalid = 1
print("You must enter a letter")
return invalid
def check_for_match(l):
"""
Checks whether the inputted letter matches against the play_word and, if so, how many times it is in the word
:param l: letter input
:return: match flag
"""
match = 0
# TODO: Track if a letter has already been played and return a message to the user (don't decrement bad guess)
# if l in letters_played:
# print("You've already played {}".format(l.upper()))
if l in play_word:
match = 1
x = play_word.count(l)
print("You got a letter! {0} appears {1} times".format(l.upper(), x))
print(' '.join(remaining_puzzle(l, x)).upper())
else:
print("Nope. {0} does not appear in the word".format(l.upper()))
return match
def remaining_puzzle(p, count):
"""
Calculates the remaining puzzle visual (- - - -) by incrementally slicing the play_word and checking for matches
:param p: letter input
:param count: number of times letter appears in word (from check_for_match)
:return: newly-calculated puzzle visual
"""
ind_start = 0
ind_end = len(puzzle)+1
for i in range(count):
'''We want to find every instance of the letter in the word
For each iteration, we update the initial index for the slice
and only look at those letters after the previous instance'''
pos = play_word[ind_start:ind_end].index(p)
puzzle[pos + ind_start] = p
ind_start += pos + 1
return puzzle
def main(test=0, bad_guesses=5):
num_bad_guesses = bad_guesses
if test:
print(play_word)
print("Your word has {0} letters".format(len(play_word)))
print(''.join(puzzle))
print("You have {0} bad guesses remaining".format(num_bad_guesses))
while num_bad_guesses:
guess = input("Guess a letter: ")
c = validate_input(guess)
if not c:
b = check_for_match(guess)
if '- ' not in puzzle: # Player has guessed all letters
break
if not b:
num_bad_guesses -= 1
print("You have {0} bad guesses remaining".format(num_bad_guesses))
if num_bad_guesses:
print("Congratulations! You guessed the word: {0}".format(play_word.upper()))
else:
print("Better luck next time! Your word was {0}".format(play_word.upper()))
if __name__ == '__main__':
main(test=int(sys.argv[1]),
bad_guesses=int(sys.argv[2]))
|
1. Evaluate |DecimalEscape| to obtain an EscapeValue _E_.
1. If _E_ is a character, then
1. Let _ch_ be _E_'s character.
1. Let _A_ be a one-element CharSet containing the character _ch_.
1. Call CharacterSetMatcher(_A_, *false*) and return its Matcher result.
1. Assert: _E_ must be an integer.
1. Let _n_ be that integer.
1. If _n_=0 or _n_>_NcapturingParens_, throw a *SyntaxError* exception.
1. Return an internal Matcher closure that takes two arguments, a State _x_ and a Continuation _c_, and performs the following steps:
1. Let _cap_ be _x_'s _captures_ List.
1. Let _s_ be _cap_[_n_].
1. If _s_ is *undefined*, return _c_(_x_).
1. Let _e_ be _x_'s _endIndex_.
1. Let _len_ be _s_'s length.
1. Let _f_ be _e_+_len_.
1. If _f_>_InputLength_, return ~failure~.
1. If there exists an integer _i_ between 0 (inclusive) and _len_ (exclusive) such that Canonicalize(_s_[_i_]) is not the same character value as Canonicalize(_Input_[_e_+_i_]), return ~failure~.
1. Let _y_ be the State (_f_, _cap_).
1. Call _c_(_y_) and return its result. |
# only hold the first click per user per company per day
import datetime
start_date = datetime.date(2016,9,1)
end_date = datetime.date(2016,9,30)
source_path = '/Users/Miao/Documents/code/sec/fundamental/'
dest_path = '/Users/Miao/Documents/code/sec/final/'
def file_name_generate(start, end):
days = [start + datetime.timedelta(days=x) for x in xrange((end-start).days + 1)]
#days = [d1 + datetime.timedelta(days = x) for x in xrange(num_days)]
res = []
for d in days:
res.append('log'+d.strftime('%Y%m%d')+'.csv')
return res
file_name = file_name_generate(start_date, end_date)
user_dict = {}
for f_n in file_name:
log_f = open(source_path+f_n, 'r')
dest = open(dest_path+f_n, 'w')
ip_dict = {}
print f_n
while(1):
try:
l = log_f.readline().strip()
if len(l) == 0:
break
data = l.split(',')
cik = data[4]
ip = data[0].split('.')
if ip[0] in ip_dict.keys():
if ip[1] in ip_dict[ip[0]].keys():
if ip[2] in ip_dict[ip[0]][ip[1]].keys():
if ip[3] in ip_dict[ip[0]][ip[1]][ip[2]].keys():
if cik in ip_dict[ip[0]][ip[1]][ip[2]][ip[3]]:
continue
else:
ip_dict[ip[0]][ip[1]][ip[2]][ip[3]].add(cik)
else:
ip_dict[ip[0]][ip[1]][ip[2]][ip[3]] = set([cik])
else:
ip_dict[ip[0]][ip[1]][ip[2]] = dict()
ip_dict[ip[0]][ip[1]][ip[2]][ip[3]] = set([cik])
else:
ip_dict[ip[0]][ip[1]] = dict()
ip_dict[ip[0]][ip[1]][ip[2]] = dict()
ip_dict[ip[0]][ip[1]][ip[2]][ip[3]] = set([cik])
else:
ip_dict[ip[0]] = dict()
ip_dict[ip[0]][ip[1]] = dict()
ip_dict[ip[0]][ip[1]][ip[2]] = dict()
ip_dict[ip[0]][ip[1]][ip[2]][ip[3]] = set([cik])
dest.write(l+'\r\n')
except:
pass
log_f.close()
dest.close()
|
# Encoding and decoding methods.
CHARTIME = 0.2
min_freq = 100
step = 50
def char_to_ind(char):
if 97 <= ord(char) <= 122:
return ord(char) - 97
elif char == ' ':
return 26
else:
return 27
def ind_to_char(ind):
if 0 <= ind < 26:
return chr(ind + 97)
elif ind == 27:
return chr(46)
return chr(32)
mappings = {ind_to_char(i): min_freq + i * step for i in range(28)}
def clean(message):
'''
Takes in a message and cleans it so that it only has a-z, spaces, and periods.
I feel like there's a more efficient implementation of this.
'''
cleaned_message = []
for c in message:
if c.isalpha():
cleaned_message.append(c.lower())
elif not c.isalpha() and c != '.':
cleaned_message.append(' ')
else:
cleaned_message.append('.')
return ''.join(cleaned_message)
def encode(message, charlength=CHARTIME):
'''
Encodes a message (str) according to a currently very primitive scheme.
Returns a list of tuples (freq, duration) to be passed into 'transmit'.
Currently all durations are 0.1 seconds even though that doesn't meet the bitrate.
'''
return [(mappings[c], charlength) for c in clean(message)]
def decode(f):
'''
Takes in a frequency and returns the best (MAP/MLE eventually?) guess of what the corresponding character is.
'''
return ind_to_char(int(f - min_freq)//step)
|
#
# code to parse the pla status page for the flow
#
#
# Distributed under MIT License
#
# Copyright (c) 2020 Greg Brougham
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from lxml import html
import requests
page = requests.get('http://www.pla.co.uk/templates/widgets/trafficWidget.cfm')
#page = requests.get('https://thamestides.org.uk/dailytides2.php?statcode=PUT&startdate=0')
if (page.status_code == 200):
print ("Success")
tree = html.fromstring(page.content)
#print (tree)
#//*[@id="content"]/table/tbody/tr[1]
tab = tree.xpath('//*[@id="content"]/table')
# table header
tab1 = tree.xpath('//table[@class="first"]//tr[1]//text()')
#row4 = tree.xpath('//*[@id="content"]/table/tbody/tr[7]/text()')
# /html/body/table/tbody/tr[48]/td[2]/text()
#text1 = tree.xpath('//html//body//table//tbody//tr[48]//text()')
#text1 = tree.xpath('//html//body//table//tbody//tr[48]//td[2]//text()')
text1 = tree.xpath('//span[@class="warningTitle"]//text()')
print (type(text1))
print (text1[0])
print (" > ", text1[0], " ", text1)
# end of file
|
import shutil
import subprocess
import traceback
import qimage2ndarray as qimage2ndarray
from PyQt5 import uic, QtCore, Qt, QtGui
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
import sys
import zmq
import os
import lyosha.first_task as graphics
from datetime import datetime
import time
def resource_path(relative_path):
if hasattr(sys, '_MEIPASS'):
return os.path.join(sys._MEIPASS, relative_path)
return os.path.join(os.path.abspath("."), relative_path)
# try:
# print(resource_path('_cnames.json'))
# os.mkdir(resource_path('branca'))
# shutil.copy(resource_path('_cnames.json'),
# resource_path('_cnames.json').strip('_cnames.json') + 'branca\\_cnames.json')
#
# shutil.copy(resource_path('_schemes.json'),
# resource_path('_schemes.json').strip('_schemes.json') + 'branca\\_schemes.json')
#
# shutil.copy(resource_path('scheme_base_codes.json'),
# resource_path('scheme_base_codes.json').strip(
# 'scheme_base_codes.json') + 'branca\\scheme_base_codes.json')
#
# shutil.copy(resource_path('scheme_info.json'),
# resource_path('scheme_info.json').strip('scheme_info.json') + 'branca\\scheme_info.json')
#
# shutil.copy(resource_path('__init__.py'),
# resource_path('__init__.py').strip('__init__.py') + 'branca\\__init__.py')
# except Exception as ex:
# print(ex)
try:
import main
except Exception as ex:
print(ex)
# connecting to server
context = zmq.Context()
socket = context.socket(zmq.REQ)
result = socket.connect("tcp://46.173.215.129:9000")
messages = "" # local chat history
update = False
# 1-сотрудник, 2-руководитель отдела, 3-руководитель подразделения, 4-руководитель предприятия
current_user = "none", "none", 0 # email, ФИ, должность
positions = ["Сотрудник", "Руководитель отдела", "Руководитель подразделения", "Руководитель предприятия"]
# loading ui's
form_main, base_main = uic.loadUiType(resource_path('mainForm.ui'))
form_chat, base_chat = uic.loadUiType(resource_path('chat.ui'))
form_login, base_login = uic.loadUiType(resource_path('login.ui'))
form_reg, base_reg = uic.loadUiType(resource_path('registration.ui'))
form_tele, base_tele = uic.loadUiType(resource_path('telemetry.ui'))
def send(message):
global socket
# thread = threading.Thread(target=test_client)
# thread.daemon = True
# thread.start()
socket.send_string(message)
print(message + " waiting answer from server")
for i in range(10):
try:
answer = socket.recv_string(zmq.NOBLOCK)
except zmq.ZMQError:
pass
else:
print("got answer")
return answer
finally:
time.sleep(0.1)
print("no answer")
return None
def log_uncaught_exceptions(ex_cls, ex, tb):
text = '{}: {}:\n'.format(ex_cls.__name__, ex)
# import traceback
text += ''.join(traceback.format_tb(tb))
print(text)
Qt.QMessageBox.critical(None, 'Error', text)
quit()
sys.excepthook = log_uncaught_exceptions
class AThread(QThread):
threadSignalAThread = pyqtSignal(str)
def __init__(self):
super().__init__()
def run(self):
global messages, update
while update:
# socket.send_string("history")
messages = send("history")
# print("waiting answer from server", datetime.now())
# messages = str(socket.recv_string()) # download chat history
# print(messages)
self.threadSignalAThread.emit(messages)
QThread.msleep(1000)
class PandasModel(QtCore.QAbstractTableModel):
def __init__(self, data, parent=None):
QtCore.QAbstractTableModel.__init__(self, parent)
self._data = data
def rowCount(self, parent=None):
return len(self._data.values)
def columnCount(self, parent=None):
return self._data.columns.size
def data(self, index, role=Qt.DisplayRole):
if index.isValid():
if role == Qt.DisplayRole:
return QtCore.QVariant(str(
self._data.values[index.row()][index.column()]))
return QtCore.QVariant()
class LoginUI(base_login, form_login):
def __init__(self):
super(base_login, self).__init__()
self.setupUi(self)
self.main = None
self.signin_button = self.findChild(QPushButton, 'signin_button')
self.signin_button.clicked.connect(self.signin_button_click)
self.signup_button = self.findChild(QPushButton, 'signup_button')
self.signup_button.clicked.connect(self.signup_button_click)
self.email_input = self.findChild(QLineEdit, 'mail_login_input')
self.password_input = self.findChild(QLineEdit, 'pw_login_input')
self.password_input.setEchoMode(QLineEdit.Password)
self.password_input.returnPressed.connect(self.signin_button_click)
self.info_label = self.findChild(QLabel, 'info')
pal = self.info_label.palette()
pal.setColor(QtGui.QPalette.WindowText, QtGui.QColor("red"))
self.info_label.setPalette(pal)
def signin_button_click(self):
global current_user
email = self.email_input.text().strip(" ")
password = self.password_input.text().strip(" ")
# email = "terleckii"
# password = "1234" # #######################################################################
if len(email) == 0:
self.info_label.setText("email is empty")
elif len(password) == 0:
self.info_label.setText("password is empty")
else:
result = send("usercheck|" + email + "|" + password)
if result is None:
self.info_label.setText("connection error. try again")
return 0
result = result.split("$#$")
print(result)
if result[0] == "exist":
name = result[1]
position = int(result[2])
current_user = email, name, position
self.main = MainUI()
self.main.show()
self.close()
else:
self.info_label.setText("login or password incorrect")
def signup_button_click(self):
self.main = RegUI()
self.main.show()
self.close()
class RegUI(base_reg, form_reg):
def __init__(self):
super(base_reg, self).__init__()
self.setupUi(self)
self.main = None
self.signup_button = self.findChild(QPushButton, 'reg_button')
self.signup_button.clicked.connect(self.signup_button_click)
self.cancel_button = self.findChild(QPushButton, 'cancel_reg_button')
self.cancel_button.clicked.connect(self.cancel_button_click)
self.name_input = self.findChild(QLineEdit, 'name_reg_input')
self.email_input = self.findChild(QLineEdit, 'email_reg_input')
self.age_input = self.findChild(QLineEdit, 'age_reg_input')
self.password_input = self.findChild(QLineEdit, 'pw_reg_input')
self.password_input.setEchoMode(QLineEdit.Password)
self.position_input = self.findChild(QComboBox, 'position_box_input')
self.position_input.addItem("Сотрудник")
self.position_input.addItem("Руководитель отдела")
self.position_input.addItem("Руководитель подразделения")
self.position_input.addItem("Руководитель предприятия")
self.info_label = self.findChild(QLabel, 'info')
pal = self.info_label.palette()
pal.setColor(QtGui.QPalette.WindowText, QtGui.QColor("red"))
self.info_label.setPalette(pal)
def signup_button_click(self):
global current_user, positions
name = self.name_input.text().strip(" ")
email = self.email_input.text().strip(" ")
position = positions.index(self.position_input.currentText()) + 1
password = self.password_input.text().strip(" ")
if len(name) == 0:
self.info_label.setText("name is empty")
elif len(email) == 0:
self.info_label.setText("email is empty")
elif len(password) == 0:
self.info_label.setText("password is empty")
else:
result = send("reg|" + email + "|" + password + "|" + name + "|" + str(position))
if result is None:
self.info_label.setText("connection error. try again")
elif result == "reg_ok":
current_user = email, name, int(position)
print(current_user)
self.main = MainUI()
self.main.show()
self.close()
else:
self.info_label.setText("this email already registered")
def cancel_button_click(self):
self.main = LoginUI()
self.main.show()
self.close()
class MainUI(base_main, form_main):
def __init__(self):
global current_user, positions
super(base_main, self).__init__()
self.setupUi(self)
self.main = None
# buttons binding
self.crm_button = self.findChild(QPushButton, 'crmButton')
self.crm_button.clicked.connect(self.crm_button_click)
self.map_button = self.findChild(QPushButton, 'mapsButton')
self.map_button.clicked.connect(self.map_button_click)
self.equipment_button = self.findChild(QPushButton, 'equipmentStateButton')
self.equipment_button.clicked.connect(self.equipment_button_click)
self.staff_button = self.findChild(QPushButton, 'staffButton')
self.staff_button.clicked.connect(self.staff_button_click)
self.store_button = self.findChild(QPushButton, 'storeButton')
self.store_button.clicked.connect(self.store_button_click)
self.chat_button = self.findChild(QPushButton, 'chatButton')
self.chat_button.clicked.connect(self.chat_button_click)
self.logout_button = self.findChild(QPushButton, 'log_out_button')
self.logout_button.clicked.connect(self.logout_button_click)
self.username = self.findChild(QLabel, 'current_user_label')
self.username.setText(current_user[1])
self.user_position = self.findChild(QLabel, 'user_position_label')
self.user_position.setText(positions[current_user[2] - 1])
def crm_button_click(self):
# This is executed when the button is pressed
print('crm_button_click')
def map_button_click(self):
# This is executed when the button is pressed
print('map_button_click')
try:
main.work()
except Exception as ex:
print(ex)
# subprocess.call("python " + resource_path("main.py"), shell=True)
def equipment_button_click(self):
# This is executed when the button is pressed
print('equipment_button_click')
self.main = TelemetryUI()
self.main.show()
self.close()
def staff_button_click(self):
# This is executed when the button is pressed
print('staff_button_click')
def store_button_click(self):
# This is executed when the button is pressed
print('store_button_click')
def chat_button_click(self):
# close current window and open chat window
print('chat_button_click')
self.main = ChatUI()
self.main.show()
self.close()
def logout_button_click(self):
print('logout_button_click')
self.main = LoginUI()
self.main.show()
self.close()
class ChatUI(base_chat, form_chat):
def __init__(self):
global update, messages
super(base_chat, self).__init__()
self.setupUi(self)
self.main = None
self.thread = None
# widgets binding
self.back_button = self.findChild(QPushButton, 'backFromChatButton')
self.back_button.clicked.connect(self.back_from_chat_click)
self.send_button = self.findChild(QPushButton, 'sendButton')
self.send_button.clicked.connect(self.send_button_click)
self.message_field = self.findChild(QLineEdit, 'inputMessage')
self.message_field.returnPressed.connect(self.send_button_click)
self.chat_field = self.findChild(QTextEdit, 'chatField')
self.chat_field.setHtml(messages) # first updating chat field
self.chat_field.verticalScrollBar().setValue(self.chat_field.verticalScrollBar().maximum()) # scroll to end
self.info_label = self.findChild(QLabel, 'info')
pal = self.info_label.palette()
pal.setColor(QtGui.QPalette.WindowText, QtGui.QColor("red"))
self.info_label.setPalette(pal)
update = True
# starting new thread
self.using_q_thread()
def using_q_thread(self):
if self.thread is None:
self.thread = AThread()
self.thread.threadSignalAThread.connect(self.on_threadSignalAThread)
self.thread.finished.connect(self.finishedAThread)
self.thread.start()
# self.btnA.setText("Stop AThread(QThread)")
else:
self.thread.terminate()
self.thread = None
# self.btnA.setText("Start AThread(QThread)")
def finishedAThread(self):
self.thread = None
# self.btnA.setText("Start AThread(QThread)")
def on_threadSignalAThread(self, value):
# self.msg.label.setText(str(value))
# print(value)
if value is None:
self.info_label.setText("connection error. try again")
else:
self.info_label.setText("")
self.chat_field.setHtml(str(value)) # updating chat field
self.chat_field.verticalScrollBar().setValue(self.chat_field.verticalScrollBar().maximum()) # scroll to end
# print("thread started")
def back_from_chat_click(self):
global update
# go back to main window
update = False
# self.thread.terminate()
self.thread = None
self.main = MainUI()
self.main.show()
self.close()
def send_button_click(self):
# send message button clicked
global messages, current_user
text = self.message_field.text().strip(" ")
if len(text) > 0: # if message is not empty
messages += text + '<br/>' # appending message to local history
answer = send("in_mes|" + current_user[0] + "|" + text)
# socket.send_string("in_mes|" + text)
# print(str(socket.recv_string()))
print(answer)
self.chat_field.setHtml(messages) # updating chat field
self.chat_field.verticalScrollBar().setValue(self.chat_field.verticalScrollBar().maximum()) # scroll to end
self.message_field.clear()
class TelemetryUI(base_tele, form_tele):
def __init__(self):
super(base_tele, self).__init__()
self.setupUi(self)
self.main = None
self.back_button = self.findChild(QPushButton, 'back_tele_button')
self.back_button.clicked.connect(self.back_click)
self.all_data_button = self.findChild(QPushButton, 'all_devices_data')
self.all_data_button.clicked.connect(self.all_data_click)
self.warning_button = self.findChild(QPushButton, 'warning_tele_button')
self.warning_button.clicked.connect(self.warning_click)
self.critical_button = self.findChild(QPushButton, 'critical_tele_button')
self.critical_button.clicked.connect(self.critical_click)
self.graphic_radiobutton = self.findChild(QRadioButton, 'graphic_rbutton')
self.table_radiobutton = self.findChild(QRadioButton, 'table_rbutton')
self.diagram_radiobutton = self.findChild(QRadioButton, 'diagram_rbutton')
self.date_begin = self.findChild(QDateTimeEdit, 'dateBegin')
self.date_end = self.findChild(QDateTimeEdit, 'dateEnd')
self.equipment_input = self.findChild(QComboBox, 'equipment_box')
self.equipment_input.addItem("Все")
self.box_dict = {"Номер": 'num', "Дата": 'date', "Температура": "oC", "Уровень вибраций": 'vsu',
"Загруженность": 'congestion', "Мощность": 'W', "Время": 'Hours'}
for i in range(1, 13):
self.equipment_input.addItem(str(i))
self.telemetry_input = self.findChild(QComboBox, 'telemetry_type')
self.telemetry_input.addItem("Номер")
self.telemetry_input.addItem("Температура")
self.telemetry_input.addItem("Уровень вибраций")
self.telemetry_input.addItem("Загруженность")
self.telemetry_input.addItem("Мощность")
self.telemetry_input.addItem("Время")
self.table = self.findChild(QTableView, 'tableView')
self.image_view = self.findChild(QGraphicsView, 'graphicsView')
def back_click(self):
self.main = MainUI()
self.main.show()
self.close()
# mask = masks[0][0] | masks[2][0] | masks[4][0] | masks[6][0] | masks[8][0]
def all_data_click(self):
if self.table_radiobutton.isChecked():
style = 0
elif self.graphic_radiobutton.isChecked():
style = 1
elif self.diagram_radiobutton.isChecked():
style = 2
else:
return 0
telemetry_type = self.box_dict.get(self.telemetry_input.currentText())
number = self.equipment_input.currentText()
if number == "Все":
number = 0
begin_date = self.date_begin.dateTime()
year_1 = begin_date.date().year()
month_1 = begin_date.date().month()
day_1 = begin_date.date().day()
hour_1 = begin_date.time().hour()
end_date = self.date_end.dateTime()
year_2 = end_date.date().year()
month_2 = end_date.date().month()
day_2 = end_date.date().day()
hour_2 = end_date.time().hour()
date1 = datetime(year_1, month_1, day_1, hour_1)
date2 = datetime(year_2, month_2, day_2, hour_2)
print("mask", graphics.masks[3][0])
result = graphics.create_graphic(int(number), date1, date2, telemetry_type, out=style)
if style == 0:
model = PandasModel(result)
self.table.setModel(model)
# self.image_view.hide()
self.table.show()
else:
# self.image_view.setBackgroundBrush(result)
scene = QGraphicsScene(self)
scene.addPixmap(QtGui.QPixmap.fromImage(qimage2ndarray.array2qimage(result)))
self.image_view.setScene(scene)
self.table.hide()
self.image_view.show()
def warning_click(self):
if self.table_radiobutton.isChecked():
style = 0
elif self.graphic_radiobutton.isChecked():
style = 1
elif self.diagram_radiobutton.isChecked():
style = 2
else:
return 0
telemetry_type = self.box_dict.get(self.telemetry_input.currentText())
number = self.equipment_input.currentText()
if number == "Все":
number = 0
begin_date = self.date_begin.dateTime()
year_1 = begin_date.date().year()
month_1 = begin_date.date().month()
day_1 = begin_date.date().day()
hour_1 = begin_date.time().hour()
end_date = self.date_end.dateTime()
year_2 = end_date.date().year()
month_2 = end_date.date().month()
day_2 = end_date.date().day()
hour_2 = end_date.time().hour()
date1 = datetime(year_1, month_1, day_1, hour_1)
date2 = datetime(year_2, month_2, day_2, hour_2)
print("mask", graphics.masks[3][0])
mask = graphics.masks[0][0] + ' OR ' + graphics.masks[2][0] + ' OR ' + graphics.masks[4][0] + ' OR ' + \
graphics.masks[6][0] + ' OR ' + graphics.masks[8][0]
result = graphics.create_graphic(int(number), date1, date2, telemetry_type, mask=mask, out=style)
if style == 0:
model = PandasModel(result)
self.table.setModel(model)
# self.image_view.hide()
self.table.show()
else:
# self.image_view.setBackgroundBrush(result)
scene = QGraphicsScene(self)
scene.addPixmap(QtGui.QPixmap.fromImage(qimage2ndarray.array2qimage(result)))
self.image_view.setScene(scene)
self.table.hide()
self.image_view.show()
def critical_click(self):
if self.table_radiobutton.isChecked():
style = 0
elif self.graphic_radiobutton.isChecked():
style = 1
elif self.diagram_radiobutton.isChecked():
style = 2
else:
return 0
telemetry_type = self.box_dict.get(self.telemetry_input.currentText())
number = self.equipment_input.currentText()
if number == "Все":
number = 0
begin_date = self.date_begin.dateTime()
year_1 = begin_date.date().year()
month_1 = begin_date.date().month()
day_1 = begin_date.date().day()
hour_1 = begin_date.time().hour()
end_date = self.date_end.dateTime()
year_2 = end_date.date().year()
month_2 = end_date.date().month()
day_2 = end_date.date().day()
hour_2 = end_date.time().hour()
date1 = datetime(year_1, month_1, day_1, hour_1)
date2 = datetime(year_2, month_2, day_2, hour_2)
print("mask", graphics.masks[3][0])
mask = graphics.masks[1][0] + ' OR ' + graphics.masks[3][0] + ' OR ' + graphics.masks[5][0] + ' OR ' + \
graphics.masks[7][0] + ' OR ' + graphics.masks[9][0]
result = graphics.create_graphic(int(number), date1, date2, telemetry_type, mask=mask, out=style)
if style == 0:
model = PandasModel(result)
self.table.setModel(model)
# self.image_view.hide()
self.table.show()
else:
# self.image_view.setBackgroundBrush(result)
scene = QGraphicsScene(self)
scene.addPixmap(QtGui.QPixmap.fromImage(qimage2ndarray.array2qimage(result)))
self.image_view.setScene(scene)
self.table.hide()
self.image_view.show()
app = QApplication(sys.argv)
window = LoginUI()
window.show()
sys.exit(app.exec())
|
import pablo as arm
import UARTModule as uart
import time
def command2():
uart.initUART()
while True:
c = int(input("Enter Command"))
uart.writeCommand(c)
def createCommand( angle ,servoI):
if angle < 0:
return (int(angle)*100) -servoI
else:
return (int(angle)*100) +servoI
def loopCommand():
uart.initUART()
x=6
y=2.5
z=-8
Y_MAX=8.5
Y_MIN=-3.5
STEP_SIZE=0.2
STEP_DELAY =0.02
while True:
# Loop Forward
while(y<Y_MAX):
isSol=True
try:
print("Trying for :" ,x,",",y,",",z)
t1,t2,t3 = arm.getInverse(x,y,z)
command1=createCommand(t1,0)
command2=createCommand(t2,1)
command3=createCommand(t3,2)
print("To Write Angles :",t1,t2,t3)
except:
isSol=False
if isSol:
uart.writeCommand(command1)
time.sleep(STEP_DELAY)
uart.writeCommand(command2)
time.sleep(STEP_DELAY)
uart.writeCommand(command3)
else:
print("NO Soultion")
time.sleep(STEP_DELAY)
#input("Emter Any Key")
y+=STEP_SIZE
# Loop BACKWARD
while(y>Y_MIN):
isSol=True
try:
print("Trying for :" ,x,",",y,",",z)
t1,t2,t3 = arm.getInverse(x,y,z)
command1=createCommand(t1,0)
command2=createCommand(t2,1)
command3=createCommand(t3,2)
print("To Write Angles :",t1,t2,t3)
except:
isSol=False
if isSol:
uart.writeCommand(command1)
time.sleep(STEP_DELAY)
uart.writeCommand(command2)
time.sleep(STEP_DELAY)
uart.writeCommand(command3)
else:
print("NO Soultion")
time.sleep(STEP_DELAY)
#input("Enter Any Key")
y-=STEP_SIZE
print("Loop Ended")
if __name__=="__main__":
# command2()
loopCommand()
|
# -*- coding:utf-8 -*-
import heapq as p
import random
class MyHeap(object):
def __init__(self, array):
self.n = len(array)
self.list = array
def build_heap(self):
n_hat = self.n
for i in range(n_hat // 2 - 1, -1, -1): # 遍历所有的非叶子节点进行heapify
self.heapify(i)
return self.list
def heapify(self, i): # 最大堆
j = i * 2 + 1 # 存储堆的数组以0开始,j为i的左孩子
while j < self.n:
if j + 1 < self.n and self.list[j] < self.list[j + 1]: # j+1<n 为右孩子存在
j += 1
if self.list[i] > self.list[j]:
break
else:
self.list[i], self.list[j] = self.list[j], self.list[i]
i = j
j = i * 2 + 1
def heap_sort(self):
res = []
self.build_heap()
# 交换堆顶与最后一个结点,再调整堆
for i in xrange(self.n):
p.heapify(self.list)
res.append(p.heappop(self.list))
# for i in range(self.n - 1, 0, -1):
# res.append(self.list[0])
# self.list[0], self.list[i] = self.list[i], self.list[0]
# self.heapify(0)
return res
if __name__ == '__main__':
a1=[13,14,2,7,3,10,6,9]
print a1
myheap=MyHeap(a1)
heap = myheap.build_heap()
sorted_heap = myheap.heap_sort()
print heap,sorted_heap
|
# coding: utf-8
# In[1]:
import pandas as pd
from pandas import DataFrame, Series
import numpy as np
import random
from keras.models import load_model, Sequential, Model
from keras.layers import Cropping2D
import cv2
import os
import socket
import scipy
from sklearn import preprocessing
import shutil
import skimage.transform as sktransform
# In[2]:
is_AWS = False if 'Macbook' in socket.gethostname() else True
# In[3]:
from keras.models import Sequential
from keras.layers.core import Flatten, Dense, Dropout, Lambda
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.layers.advanced_activations import ELU
from keras.callbacks import Callback
def Model(weights_path=None):
model = Sequential()
#Normalize to be between -1 and 1
model.add(Lambda(lambda x: x / 127.5 - 1.0, input_shape=(32, 128, 3)))
model.add(Convolution2D(16, 3, 3, input_shape=(32, 128, 3))) #(30, 126, 16)
model.add(ELU())
model.add(MaxPooling2D(pool_size=(2, 2))) #(15, 68, 16)
model.add(Convolution2D(32, 3, 3)) #(13, 66, 32)
model.add(ELU())
model.add(MaxPooling2D(pool_size=(2, 2))) #(6, 33, 32)
model.add(Convolution2D(64, 3, 3)) #(4, 31, 64)
model.add(MaxPooling2D(pool_size=(2, 2))) #(2, 15, 64)
model.add(Flatten()) #1920
model.add(Dense(500)) #500
model.add(ELU())
model.add(Dropout(.5))
model.add(Dense(100)) #100
model.add(ELU())
model.add(Dropout(.25))
model.add(Dense(20))
model.add(ELU())
model.add(Dense(1)) # TODO: Try with Tanh
if weights_path:
model.load_weights(weights_path, by_name=True)
return model
# In[4]:
if is_AWS:
track1_dir = '/home/carnd/Dropbox/udacity-data/track1'
else:
track1_dir = '/Users/macbook/Development/personal/udacity-car/CarND-Behavioral-Cloning-P3/track1'
try:
folders_to_exclude = pd.read_csv('to_exclude.csv', header=None, names=['Index', 'Name'])['Name'].tolist()
model = load_model('model.h5')
except:
folders_to_exclude = []
model = None
folders_to_exclude += ['.DS_Store']
track1_data_dirs = [x for x in os.listdir(track1_dir) if x not in folders_to_exclude]
print(track1_data_dirs)
#############
track1_data_dirs = ['data_download']
#############
track1_data_dirs = [track1_dir + '/' + x for x in track1_data_dirs]
driving_log_df = None
for data_dir in track1_data_dirs:
df = pd.read_csv(data_dir + "/driving_log.csv", header=None, names=["center","left","right","steering","throttle","brake","speed"])
cols = ['center', 'left', 'right']
for col in cols:
df[col] = df[col].str.strip()
df[col] = df[col].str.split("/").apply(lambda x: x[-1])
df[['center', 'left', 'right']] = data_dir + "/IMG/" + df[['center', 'left', 'right']]
if driving_log_df is None:
driving_log_df = df
else:
driving_log_df = pd.concat([driving_log_df, df])
print("Length: ", len(driving_log_df))
driving_log_df.head()
# In[5]:
def get_next_image_generator(df, position = 'center', offset = 0.2):
for idx, image_path in enumerate(df[position]):
img = cv2.imread(image_path)
yield img
# In[6]:
tempgen = get_next_image_generator(driving_log_df)
sample = next(tempgen)
print("Dimension of image: H x W X D = ", sample.shape)
print("# of images: ", len(driving_log_df))
print("Steering range: Min=", np.min(driving_log_df['steering']), " , Max=", np.max(driving_log_df['steering']))
print("Throttle range: Min=", np.min(driving_log_df['throttle']), " , Max=", np.max(driving_log_df['throttle']))
print("Brake range: Min=", np.min(driving_log_df['brake']), " , Max=", np.max(driving_log_df['brake']))
print("Speed range: Min=", np.min(driving_log_df['speed']), " , Max=", np.max(driving_log_df['speed']))
print("image Min: ", np.min(sample))
print("image Max: ", np.max(sample))
#sample
# In[10]:
def preprocess(image, top_offset=.375, bottom_offset=.125):
top = int(top_offset * image.shape[0])
bottom = int(bottom_offset * image.shape[0])
image = sktransform.resize(image[top:-bottom, :], (32, 128, 3))
return image
def add_random_shadow(image):
### Add random shadow as a vertical slice of image
h, w = image.shape[0], image.shape[1]
[x1, x2] = np.random.choice(w, 2, replace=False)
k = h / (x2 - x1)
b = - k * x1
for i in range(h):
c = int((i - b) / k)
image[i, :c, :] = (image[i, :c, :] * .5).astype(np.int32)
return image
def adjust_brightness(): #TODO
pass
def offset_steering(df, offset):
df[df['target'] == 'left']['steering'] = df[df['target'] == 'left']['steering'] + offset
df[df['target'] == 'right']['steering'] = df[df['target'] == 'right']['steering'] - offset
return df
def filter_by_steering(df, min_angle):
return df[np.abs(df['steering']) >= min_angle]
def append_mirrored_data(df):
dfMirror = df.copy(deep=True)
dfMirror['mirror'] = True
dfMirror['steering'] *= -1
df = pd.concat([df, dfMirror])
return df
def append_shadowed_data(df):
dfShadow = df.copy(deep=True)
dfShadow['shadow'] = True
df = pd.concat([df, dfShadow])
return df
def append_v_shift_noise_data(df):
dfVShift = df.copy(deep=True)
dfVShift['v_shift_noise'] = True
df = pd.concat([df, dfVShift])
return df
def set_position_targets(df, position):
if position == 'all':
dfLeft = df.copy(deep=True)
dfLeft['target'] = 'left'
dfCenter = df.copy(deep=True)
dfCenter['target'] = 'center'
dfRight = df.copy(deep=True)
dfRight['target'] = 'right'
df = pd.concat([dfLeft, dfCenter, dfRight])
else:
df['target'] = position
return df
# DF Columns: steering, mirror, <position>, target, type
# type is the kind of augmentation performed: center, left, right, vshifted, shadowed, brightnened
def get_next_feature(df, batch_size = 10, mode = 'train', position = 'center',
offset = 0.2, val_portion = 0.2, include_mirror=True,
include_shadow = True, include_v_shift_noise=True, min_angle=0.02):
total_len = len(df)
val_len = int(val_portion * total_len)
train_len = total_len - val_len
if mode == "train":
df = df[:train_len]
else: #Validation set
df = df[train_len:]
position = 'center' #Only use center data
df = set_position_targets(df, position)
df = offset_steering(df, offset)
df = filter_by_steering(df, min_angle)
df['mirror'] = False
if include_mirror:
df = append_mirrored_data(df)
df['shadow'] = False
if include_shadow:
df = append_shadowed_data(df)
df['v_shift_noise'] = False
if include_v_shift_noise:
df = append_v_shift_noise_data(df)
image_size = (32, 128, 3)
inputs = np.zeros([batch_size, *image_size]) #length of prediction output
targets = np.zeros([batch_size])
def generator(df, inputs, targets):
count = 0
while(True):
#Shuffle
df = df.sample(frac=1).reset_index(drop=True)
for idx in range(len(df)):
row = df.iloc[idx]
image_path = row[row['target']]
img = cv2.imread(image_path)
if row['mirror']:
img = img[:,::-1,:]
if row['shadow']:
img = add_random_shadow(img)
v_delta = .05 if row['v_shift_noise'] else 0
img = preprocess(img,
top_offset=random.uniform(.375 - v_delta, .375 + v_delta),
bottom_offset=random.uniform(.125 - v_delta, .125 + v_delta))
img = img[np.newaxis, :, :, :]
inputs[count] = img
targets[count] = row['steering']
count += 1
if count == batch_size:
yield inputs, targets
inputs = np.zeros([batch_size, *image_size])
targets = np.zeros([batch_size])
count = 0
return generator(df, inputs, targets), len(df)
# In[11]:
# Callbacks function in model, use for save best model in each epoch, but it is not neccesary
class weight_logger(Callback):
def __init__(self):
super(weight_logger, self).__init__()
# Create the weight path as empty
self.weight_path = os.path.join('weights/')
shutil.rmtree(self.weight_path, ignore_errors=True)
os.makedirs(self.weight_path, exist_ok=True)
def on_epoch_end(self, epoch, logs={}):
#At end of epoch, save the model
self.model.save_weights(os.path.join(self.weight_path, 'model_epoch_{}.h5'.format(epoch + 1)))
# Define the model
#model = Model(dropout=0.7, dropout_level=1, orig = False, discrete=False)
if model is None:
model = Model()
# In[12]:
model.compile(optimizer='Nadam', loss='mean_squared_error', lr=0.0001)
# train model
EPOCHS = 20
OFFSET = 0.2 # 0.08-0.25 from forums. 4 degrees = 0.16
VAL_PORTION = 0.1
INCLUDE_MIRROR_TRAIN = True
INCLUDE_MIRROR_VAL = False
INCLUDE_SHADOW_TRAIN = True
INCLUDE_SHADOW_VAL = False
INCLUDE_V_SHIFT_NOISE_TRAIN = True
INCLUDE_V_SHIFT_NOISE_VAL = False
MIN_ANGLE_TRAIN=0.02 #1.0
MIN_ANGLE_VAL=0.0
# Train on all the data
position = 'all'
train_generator_all, train_len = get_next_feature(driving_log_df, 10, 'train', position,
OFFSET, VAL_PORTION, INCLUDE_MIRROR_TRAIN,
INCLUDE_SHADOW_TRAIN, INCLUDE_V_SHIFT_NOISE_TRAIN,
MIN_ANGLE_TRAIN)
validation_generator_all, val_len = get_next_feature(driving_log_df, 10, 'val', position,
OFFSET, VAL_PORTION, INCLUDE_MIRROR_VAL,
INCLUDE_SHADOW_VAL, INCLUDE_V_SHIFT_NOISE_VAL,
MIN_ANGLE_VAL)
model.fit_generator(train_generator_all,
samples_per_epoch=train_len,
nb_epoch=EPOCHS,
validation_data=validation_generator_all,
nb_val_samples=val_len,
callbacks=[weight_logger()], # Add a callbacks to save best model in each epoch, but it is not neccesary
verbose=1) # If verbose=1 or none, will show processbar, keep it if run without GPU
# In[ ]:
# In[ ]:
model.save('modeltest.h5')
# In[ ]:
#model.save('model.h5')
# In[ ]:
#Series([x.split("/")[-1] for x in track1_data_dirs], name="done_to_exclude").to_csv("to_exclude.csv")
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
|
import pymysql
# Connection
connection = pymysql.connect(host='rds.chzi0a331csg.ap-south-1.rds.amazonaws.com',
user='admin',
password='rootroot',
database='rds_sql')
def handler():
cursor = connection.cursor()
cursor.execute('SELECT * from user_data')
rows = cursor.fetchall()
for row in rows:
print("{0} {1} {2} {3} {4} {5} ".format(row[0], row[1], row[2], row[3], row[4], row[5]))
handler()
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'my_subjects.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_mwindow_my_subjects(object):
def setupUi(self, mwindow_my_subjects):
mwindow_my_subjects.setObjectName("mwindow_my_subjects")
mwindow_my_subjects.resize(537, 400)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(mwindow_my_subjects.sizePolicy().hasHeightForWidth())
mwindow_my_subjects.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(10)
mwindow_my_subjects.setFont(font)
self.central_widget = QtWidgets.QWidget(mwindow_my_subjects)
self.central_widget.setObjectName("central_widget")
self.gridLayout = QtWidgets.QGridLayout(self.central_widget)
self.gridLayout.setObjectName("gridLayout")
self.vert_layout_my_subjects = QtWidgets.QVBoxLayout()
self.vert_layout_my_subjects.setObjectName("vert_layout_my_subjects")
self.lbl_my_subjects = QtWidgets.QLabel(self.central_widget)
font = QtGui.QFont()
font.setPointSize(16)
self.lbl_my_subjects.setFont(font)
self.lbl_my_subjects.setObjectName("lbl_my_subjects")
self.vert_layout_my_subjects.addWidget(self.lbl_my_subjects)
self.hori_line_my_subjects = QtWidgets.QFrame(self.central_widget)
self.hori_line_my_subjects.setFrameShape(QtWidgets.QFrame.HLine)
self.hori_line_my_subjects.setFrameShadow(QtWidgets.QFrame.Sunken)
self.hori_line_my_subjects.setObjectName("hori_line_my_subjects")
self.vert_layout_my_subjects.addWidget(self.hori_line_my_subjects)
self.hori_layout_buttons = QtWidgets.QHBoxLayout()
self.hori_layout_buttons.setObjectName("hori_layout_buttons")
self.btn_add_subject = QtWidgets.QPushButton(self.central_widget)
self.btn_add_subject.setObjectName("btn_add_subject")
self.hori_layout_buttons.addWidget(self.btn_add_subject, 0, QtCore.Qt.AlignLeft)
self.btn_delete_subject = QtWidgets.QPushButton(self.central_widget)
self.btn_delete_subject.setAutoDefault(False)
self.btn_delete_subject.setDefault(False)
self.btn_delete_subject.setFlat(False)
self.btn_delete_subject.setObjectName("btn_delete_subject")
self.hori_layout_buttons.addWidget(self.btn_delete_subject, 0, QtCore.Qt.AlignLeft)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.hori_layout_buttons.addItem(spacerItem)
self.vert_layout_my_subjects.addLayout(self.hori_layout_buttons)
self.lbl_instruction = QtWidgets.QLabel(self.central_widget)
font = QtGui.QFont()
font.setItalic(True)
self.lbl_instruction.setFont(font)
self.lbl_instruction.setObjectName("lbl_instruction")
self.vert_layout_my_subjects.addWidget(self.lbl_instruction)
self.hori_line_add_subject = QtWidgets.QFrame(self.central_widget)
self.hori_line_add_subject.setFrameShape(QtWidgets.QFrame.HLine)
self.hori_line_add_subject.setFrameShadow(QtWidgets.QFrame.Sunken)
self.hori_line_add_subject.setObjectName("hori_line_add_subject")
self.vert_layout_my_subjects.addWidget(self.hori_line_add_subject)
self.list_widget_my_subjects = QtWidgets.QListWidget(self.central_widget)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(247, 247, 247, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(160, 160, 160, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.NoBrush)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(247, 247, 247, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(247, 247, 247, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(160, 160, 160, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.NoBrush)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(247, 247, 247, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(247, 247, 247, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(160, 160, 160, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.NoBrush)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush)
self.list_widget_my_subjects.setPalette(palette)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(10)
self.list_widget_my_subjects.setFont(font)
self.list_widget_my_subjects.setAutoFillBackground(False)
self.list_widget_my_subjects.setStyleSheet("background-color: rgba(255, 255, 255, 0)")
self.list_widget_my_subjects.setFrameShape(QtWidgets.QFrame.NoFrame)
self.list_widget_my_subjects.setFrameShadow(QtWidgets.QFrame.Plain)
self.list_widget_my_subjects.setObjectName("list_widget_my_subjects")
self.vert_layout_my_subjects.addWidget(self.list_widget_my_subjects)
self.gridLayout.addLayout(self.vert_layout_my_subjects, 0, 0, 1, 1)
mwindow_my_subjects.setCentralWidget(self.central_widget)
self.actionAgenda = QtWidgets.QAction(mwindow_my_subjects)
self.actionAgenda.setObjectName("actionAgenda")
self.retranslateUi(mwindow_my_subjects)
QtCore.QMetaObject.connectSlotsByName(mwindow_my_subjects)
def retranslateUi(self, mwindow_my_subjects):
_translate = QtCore.QCoreApplication.translate
mwindow_my_subjects.setWindowTitle(_translate("mwindow_my_subjects", "My Subjects"))
self.lbl_my_subjects.setText(_translate("mwindow_my_subjects", "My Subjects"))
self.btn_add_subject.setText(_translate("mwindow_my_subjects", "Add Subject"))
self.btn_delete_subject.setText(_translate("mwindow_my_subjects", "Delete Subject"))
self.lbl_instruction.setText(_translate("mwindow_my_subjects", "To delete a subject, left-click on it and press the delete button above."))
self.actionAgenda.setText(_translate("mwindow_my_subjects", "Agenda"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
mwindow_my_subjects = QtWidgets.QMainWindow()
ui = Ui_mwindow_my_subjects()
ui.setupUi(mwindow_my_subjects)
mwindow_my_subjects.show()
sys.exit(app.exec_())
|
import pandas as pd
import numpy as np
from functools import partial
from janitor import clean_names
#Download Data
# ------------------------------------------------------------
# Adelie penguin data from: https://doi.org/10.6073/pasta/abc50eed9138b75f54eaada0841b9b86
uri_adelie = "https://portal.edirepository.org/nis/dataviewer?packageid=knb-lter-pal.219.3&entityid=002f3893385f710df69eeebe893144ff"
# Gentoo penguin data from: https://doi.org/10.6073/pasta/2b1cff60f81640f182433d23e68541ce
uri_gentoo = "https://portal.edirepository.org/nis/dataviewer?packageid=knb-lter-pal.220.3&entityid=e03b43c924f226486f2f0ab6709d2381"
# Chinstrap penguin data from: https://doi.org/10.6073/pasta/409c808f8fc9899d02401bdb04580af7
uri_chinstrap = "https://portal.edirepository.org/nis/dataviewer?packageid=knb-lter-pal.221.2&entityid=fe853aa8f7a59aa84cdd3197619ef462"
uris = [uri_adelie, uri_gentoo, uri_chinstrap]
dfs = list(map(partial(pd.read_csv,na_values = ["", "NA", "."]),uris))
penguins_raw_df = pd.concat(dfs)
# Clean Data
#--------------------------------------------------------------
penguins = (
penguins_raw_df
.pipe(clean_names)
)
|
# Generated by Django 2.2.4 on 2019-08-22 09:32
import django.db.models.deletion
import wagtail.blocks
import wagtail.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("images", "0002_customimage_file_hash"),
("programmes", "0007_add_fees_section_fields"),
]
operations = [
migrations.AddField(
model_name="programmepage",
name="apply_cta_link",
field=models.URLField(default=""),
preserve_default=False,
),
migrations.AddField(
model_name="programmepage",
name="apply_cta_text",
field=models.CharField(default="", max_length=125),
preserve_default=False,
),
migrations.AddField(
model_name="programmepage",
name="apply_image",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="images.CustomImage",
),
),
migrations.AddField(
model_name="programmepage",
name="apply_image_sub_title",
field=models.CharField(blank=True, max_length=250),
),
migrations.AddField(
model_name="programmepage",
name="apply_image_title",
field=models.CharField(blank=True, max_length=125),
),
migrations.AddField(
model_name="programmepage",
name="apply_title",
field=models.CharField(default="Start your application", max_length=125),
),
migrations.AddField(
model_name="programmepage",
name="steps",
field=wagtail.fields.StreamField(
[
(
"step",
wagtail.blocks.StructBlock(
[
("heading", wagtail.blocks.CharBlock()),
(
"link",
wagtail.blocks.StructBlock(
[
(
"title",
wagtail.blocks.CharBlock(
required=False
),
),
(
"url",
wagtail.blocks.URLBlock(
required=False
),
),
]
),
),
]
),
)
],
blank=True,
),
),
]
|
import pandas as pd
data = pd.read_csv("Twitter_SearchAPI_cleaned.csv")
polarity = []
pwordsfreq = {}
nwordsfreq = {}
match = []
for x in data['Tweet']:
matchwords = ""
polar = 0
x = str(x).lower()
words = x.split()
bagofwords = {}
for word in words:
if word in bagofwords:
bagofwords[word] += 1
else:
bagofwords[word] = 1
pwordlist = []
with open('Positive words.txt') as fp:
for i in fp:
pwordlist.append(i.strip())
nwordlist = []
with open('Negative words.txt') as fn:
for i in fn:
nwordlist.append(i.strip())
for key,value in bagofwords.items():
if(key.lower() in pwordlist):
if(matchwords == ""):
matchwords = str(key)
else:
matchwords = matchwords+','+key
polar += value
if key.lower() in pwordsfreq:
pwordsfreq[key.lower()] += value
else:
pwordsfreq[key.lower()] = 1
if(key.lower() in nwordlist):
if(matchwords == ""):
matchwords = str(key)
else:
matchwords = matchwords+','+key
polar -= value
if key.lower() in nwordsfreq:
nwordsfreq[key.lower()] += value
else:
nwordsfreq[key.lower()] = 1
match.append(matchwords)
if(polar > 0):
polarity.append("Positive")
elif(polar < 0):
polarity.append("Negative")
else:
polarity.append("Nutral")
data['Polarity'] = polarity
data['Match'] = match
data.to_csv('Twitter_SearchAPI_cleaned.csv',index=False)
word = []
count = []
prity = []
for key,value in pwordsfreq.items():
word.append(key);
count.append(value);
prity.append("Positive")
for key,value in nwordsfreq.items():
word.append(key);
count.append(value);
prity.append("Negative")
df = pd.DataFrame({'Word':word,'Frequency':count,'Polarity':prity})
df.to_csv('Words_Freq.csv',index=False, encoding='utf-8')
|
from django.db import models
class UsdRate(models.Model):
btc_price = models.FloatField()
eth_price = models.FloatField()
usdc_price = models.FloatField(default=1)
usd_price = models.FloatField(default=1)
eur_price = models.FloatField()
gbp_price = models.FloatField()
chf_price = models.FloatField()
duc_price = models.FloatField()
ducx_price = models.FloatField()
datetime = models.DateTimeField(auto_now=True)
def update_rates(self, BTC, ETH, USDC, USD, EUR, GBP, CHF, DUC, DUCX):
self.btc_price = BTC
self.eth_price = ETH
self.usdc_price = USDC
self.usd_price = USD
self.eur_price = EUR
self.gbp_price = GBP
self.chf_price = CHF
self.duc_price = DUC
self.ducx_price = DUCX
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Hospital(models.Model):
HospitalId = models.AutoField(primary_key=True)
Name = models.CharField(max_length=250)
UserName = models.CharField(max_length=250)
Password = models.CharField(max_length=250)
Contact = models.IntegerField(null=True)
Specialization = models.CharField(max_length=250, null=True)
Address = models.TextField(null=True)
Distance = models.IntegerField(null=True)
# Return the object type of the data
def __str__(self):
return str(self.HospitalId)
class Patient(models.Model):
UID = models.BigIntegerField(max_length=12, unique=True)
Name = models.CharField(max_length=250)
Age = models.IntegerField(null=True)
AgeGroup = models.CharField(max_length=20,null=True,blank=True)
Gender = models.CharField(max_length=250, null=True)
HospitalId = models.TextField(null=True,blank=True)
UserName = models.CharField(max_length=250, unique=True)
Password = models.CharField(max_length=250)
def __str__(self):
return str(self.UID)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
# Create your models here.
from auth_log.models import User
class Tags(models.Model):
"""分类表"""
type=models.CharField(max_length=20) # 标签内容
addtime=models.IntegerField(default=0) # 添加时间
status = models.IntegerField(default=1) # 1 正常 0 删除
class Meta:
db_table = 'forum_tags'
verbose_name = u'分类表'
class Article(models.Model):
"""
论坛发表主表
"""
user = models.ForeignKey(User) # 作者
tag = models.ForeignKey(Tags)
title = models.CharField(max_length=100) # 标题
content = models.CharField(max_length=5000) # 内容
praise = models.IntegerField(default=0) # 点赞数量
coll = models.IntegerField(default=0) # 收藏数量
zf_num = models.IntegerField(default=0) # 转发数量
pl_num = models.IntegerField(default=0) # 讨论数量
add_time = models.IntegerField(default=0) # 发布时间
status = models.IntegerField(default=1) # 1 正常 0 删除
class Meta:
db_table = 'forum_article'
verbose_name = u'文章主表'
class ArticleTalk(models.Model):
"""评论表"""
aid = models.ForeignKey(Article) # 帖子id
content = models.CharField(max_length=1000) # 评论内容
uid = models.IntegerField(default=0) # 评论人id
level = models.IntegerField(default=0) # 评论级别 可楼中楼回复
l_id = models.IntegerField(default=0) # 上级评论id
praise = models.IntegerField(default=0) # 点赞数
add_time = models.IntegerField(default=0) # 评论时间
class Meta:
db_table = 'forum_article_talk'
verbose_name = u'评论表'
class Praise(models.Model):
"""点赞表"""
type = models.IntegerField(default=0) # 类型 0 默认 1 文章 2 评论 3 留言
uid = models.IntegerField(default=0) # 用户id
artcile = models.IntegerField(default=0) # 帖子id
num = models.IntegerField(default=0) # 点赞数
class Meta:
db_table = 'forum_article_praise'
verbose_name = u'点赞表'
class LiuYan(models.Model):
"""留言"""
user = models.ForeignKey(User)
content = models.CharField(max_length=500)
add_time = models.IntegerField(default=0)
praise = models.IntegerField(default=0) # 点赞数
class Meta:
db_table = 'forum_ly'
verbose_name = u'留言表'
class Coll(models.Model):
"""收藏表"""
uid = models.IntegerField(default=0)
aid = models.IntegerField(default=0)
add_time = models.IntegerField(default=0)
class Meta:
db_table = 'forum_coll'
verbose_name = u'留言表'
|
# -*- coding: utf-8 -*-
# Author :Yang Ming
# Create at :2021/6/8
# tool :PyCharm
from lxml import etree
from loguru import logger
from schema.crawler_abc import *
class Crawler(CrawlerABC):
def html_parser(self, html):
ehtml = etree.HTML(html)
rooms = ehtml.xpath('//li[@class="mortar-wrapper"]')
out = {}
for room in rooms:
room_out = {}
room_out['url'] = room.xpath('.//a[@class="property-link"]/@href')
room_out['price'] = room.xpath('.//div[@class="price-range"]/text()')
address_1 = room.xpath('.//div[@class="property-title"]//text()')[0]
address_2 = room.xpath('.//div[@class="property-address js-url"]/text()')[0]
room_out['address'] = address_1 + ' | ' + address_2
room_out['phone'] = room.xpath('.//div[@class="phone-wrapper"]//span/text()')
room_out['update_at'] = room.xpath('.//span[@class="lastUpdated"]/span/text()')
out[room_out['address']] = room_out
return out
def get_url(self, **kwargs):
location = kwargs.get('location')
beds_num = kwargs.get('beds_num')
price_low = kwargs.get('price_low')
price_high = kwargs.get('price_high')
is_cat = kwargs.get('cat')
is_washer = kwargs.get('washer')
"https://www.apartments.com/queens-ny/3-bedrooms-3000-to-4200-pet-friendly-cat/washer-dryer/"
url = f'https://www.apartments.com/{location}/{beds_num}-bedrooms-{price_low}-to-{price_high}'
if is_cat:
url = url + '-pet-friendly-cat'
if is_washer:
url = url + '/washer-dryer/'
return url
def get_save_path(self, json_save_path):
name = os.path.split(__file__)[-1].split('_')[0]
# name = os.path.split(os.path.abspath(sys.argv[0]))[-1].split('_')[0]
path = json_save_path.replace('__token__',name)
return path
if __name__ == '__main__':
c = Crawler()
# run by conditions
c.run(location='brooklyn-ny', beds_num=3, price_low=3000, price_high=4500, is_cat=1, is_washer=1)
# run by existed url to save what you see
# c.run_by_url(url='https://www.apartments.com/3-bedrooms-3000-to-4200-pet-friendly-cat/washer-dryer/?bb=3mm6-t99vHw98oooB')
|
# Faça um Programa que peça os 3 lados de um triângulo.
# O programa deve informar se os valores podem ser um triângulo.
# Indique, caso os lados formem um triângulo, se o mesmo é: equilátero, isósceles ou escaleno.
l1 = float(input('Informe o primeiro lado do triangulo: '))
l2 = float(input('Informe o segundo lado do triangulo: '))
l3 = float(input('Informe o terceiro lado do triangulo: '))
if l1 < l2 + l3 and l2 < l1 + l3 and l3 < l1 + l2 and l1 == l2 and l2 == l3:
print('Ok, suas medidas formam um triangulo EQUILATERO')
elif l1 < l2 + l3 and l2 < l1 + l3 and l3 < l1 + l2 and l1 != l2 and l2 != l3 and l1 != l3:
print('Ok, suas medidas formam um triangulo ESCALENO.')
elif l1 < l2 + l3 and l2 < l1 + l3 and l3 < l1 + l2 and l1 == l2 and l1 != l3 or l1 == l3 and l1 != l2:
print('Ok, suas medidas formam um triangulo ISOCELES.')
else:
print("Suas medidas nao formam um triangulo.") |
from linked_list import LinkedList
from stack import Stack
from queue import Queue
def linked_list_example():
print('------------------->> Linked List Example <<-------------------')
llist = LinkedList()
llist.append(0)
llist.append(1)
llist.append(2)
llist.append(3)
llist.prepend(-1)
llist.prepend(-2)
llist.prepend(-3)
print(llist)
llist.remove(2)
print(llist)
llist.remove(-3)
print(llist)
tmp_node = llist.head
while tmp_node:
print(f'| {tmp_node.value} ', end='')
tmp_node = tmp_node.next
print('|')
print(llist.head)
print('-------------------^^ Linked List Example ^^-------------------')
print()
def stack_example():
print('------------------->> Stack Example <<-------------------')
stack = Stack()
stack.push(0)
stack.push(1)
stack.push(2)
stack.push(3)
stack.push(-1)
stack.push(-2)
stack.push(-3)
print(stack)
stack.pop()
stack.pop()
print(stack)
print(stack.peek)
print('-------------------^^ Stack Example ^^-------------------')
print()
def queue_example():
print('------------------->> Queue Example <<-------------------')
queue = Queue()
queue.add(3)
queue.add(2)
queue.add(1)
queue.add(0)
queue.add(-1)
print(queue)
queue.remove()
print(queue)
queue.remove()
print(queue)
print(queue.head)
print(queue.tail)
print('-------------------^^ Queue Example ^^-------------------')
print()
if __name__ == '__main__':
linked_list_example()
stack_example()
queue_example() |
#_*_coding:utf-8_*_
import unittest
import shutil
from testScenario import testScenarioCase
from testInterface import testActivity,testCharge,\
testCoupon,testCredit,testDeal,testGrade,testManage,testProduct,\
testSearch,testTag,testUser
import os,time,json
from globalVar import gl
from library import HTMLTESTRunnerCN
from library import scripts
from library.emailstmp import EmailClass
class run_test_case(object):
@classmethod
def load_tests_list(cls):
"""
指定加载测试用例顺序
:return:
"""
list = [testUser, testTag, testSearch, testProduct, testManage, testGrade, testDeal, testCredit,
testCoupon, testCharge, testActivity]
tests = [unittest.TestLoader().loadTestsFromModule(testScenarioCase)]
for module in list:
tests.append(unittest.TestLoader().loadTestsFromModule(module))
return tests
@classmethod
def create_report_file(cls):
#测试报告文件名
time_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
report_dir = time_str.split('_')[0]
cls.file_name = 'Report_{}.html'.format(time_str)
portdir = os.path.join(gl.reportPath, report_dir)
#按日期创建测试报告文件夹
if not os.path.exists(portdir):
os.mkdir(os.path.join(gl.reportPath,report_dir))
rpath = os.path.join(gl.reportPath, report_dir)
cls.filePath = os.path.join(rpath, cls.file_name) # 确定生成报告的路径
return cls.filePath
@staticmethod
def copy_report(filePath, file_name):
# 复制report下子文件夹到 templates/report/下
split_path = os.path.dirname(filePath).split("\\")
low_path = split_path[split_path.__len__() - 1]
web_path = os.path.join(gl.templatesReportPath, low_path)
if not os.path.exists(web_path):
shutil.copytree(os.path.dirname(filePath), web_path)
else:
shutil.copy(filePath, os.path.join(web_path, file_name))
return low_path
@staticmethod
def tmpl_msg(low_path, file_name):
# 发送钉钉模版测试结果
result_str = """共{}个用例, 通过{}, 失败{}, 错误{}, 通过率{}""".format(
gl.get_value('sum'),
gl.get_value('passed'),
gl.get_value('failed'),
gl.get_value('error'),
gl.get_value('passrate')
)
# 测试结论
if '100' in str(gl.get_value('passrate')):
msg_1 = '本次测试★通过★'
else:
msg_1 = '本次测试★不通过★'
# 发送钉钉消息
msg = """预发布接口自动化测试已完成:{},{}\n测试报告地址:http://60.205.217.8:5000/report/{}/{}"""
msg = msg.format(result_str, msg_1, low_path, file_name)
return msg
@staticmethod
def run(filePath):
"""
去行unittest并生成报告
:param filePath: report.html绝对路径
:return: 无
"""
# unittest测试套件
suite = unittest.TestSuite()
suite.addTests(run_test_case.load_tests_list())
# 执行测试并生成测试报告文件
with file(filePath, 'wb') as fp:
runner = HTMLTESTRunnerCN.HTMLTestRunner(
stream=fp,
title=u'预发布-接口自动化测试报告',
description=u'详细测试用例结果', # 不传默认为空
tester=u"yhleng" # 测试人员名字,不传默认为小强
)
# 运行测试用例
runner.run(suite)
@staticmethod
def invoke():
"""
开始执行测试生成测试报告
:return:
"""
# 测试报告文件名
time_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
filePath = run_test_case.create_report_file()
print(filePath)
# 开始测试发送钉钉消息
scripts.send_msg_dding('{}:★开始预发布API接口自动化测试★'.format(time_str))
# 执行测试并生成测试报告文件
run_test_case.run(filePath)
# 复制report下子文件夹到 templates/report/下
low_path = run_test_case.copy_report(filePath, run_test_case.file_name)
# 模版消息
msg = run_test_case.tmpl_msg(low_path, run_test_case.file_name)
print(msg)
scripts.send_msg_dding(msg)
# 发送测试报告To Email
email = EmailClass()
email.send(filePath)
if __name__=="__main__":
run_test_case.invoke() |
import sys
from itertools import chain
from itertools import product
input_string = sys.argv[1]
input_tuple = tuple(map(int, input_string.split("-")))
def rule_1(number):
return any(map(lambda x: x[0] == x[1], [(i, j) for i, j in zip(number[:-1], number[1:])]))
def rule_2(number):
previous_digit = number[0]
for digit in number[1:]:
if digit >= previous_digit:
previous_digit = digit
continue
else:
return False
return True
#Not needed? At least not for my input
# -----------
def rule_3(number):
return len(number) == 6
def rule_4(number):
return input_tuple[0] <= int(number) <= input_tuple[1]
#-----------
counter = 0
for number in map(str, range(input_tuple[0], input_tuple[1] + 1)):
if all(map(lambda x: x(number), [rule_1, rule_2, rule_3, rule_4])):
counter += 1
print(counter)
def n_matching(n, number):
matching = []
i = 0
positions = tuple(range(i, n+i))
while positions[-1] < len(number):
subsequence = list(map(number.__getitem__, positions))
for digit in range(10):
if all(map(lambda x: x == str(digit), subsequence)):
matching.append(positions)
i += 1
positions = tuple(range(i, n+i))
return matching
def altered_rule_1(number):
two_matching_positions = []
for i_position, i in enumerate(number[:-1]):
j_position = i_position + 1
j = number[j_position]
if i == j:
two_matching_positions.append((i_position, j_position))
#If there is no repeated digits, return False
if len(two_matching_positions) == 0:
return False
n_matching_positions = list(chain(*map(lambda x: n_matching(x, number), [3, 4, 5, 6])))
#If there are repeated digits of length two, but not of length three, return True.
if len(n_matching_positions) == 0:
return True
for two_pos in two_matching_positions:
allowed = True
for n_pos in n_matching_positions:
coincides = set(two_pos) <= set(n_pos)
if coincides:
allowed = False
break
if allowed:
return True
counter = 0
for number in map(str, range(input_tuple[0], input_tuple[1] + 1)):
if all(map(lambda x: x(number), [altered_rule_1, rule_2, rule_3, rule_4])):
counter += 1
print(counter) |
n=int(input("enter a number"))
r=0
while n>0:
rem=int(n%10)
r=(r*10)+rem
n=int(n/10)
print (r) |
import graphene
from account.mutations import AuthMutation
from product.queries import ProductQuery
from product.mutiaion import ProductMutations
from graphql_auth.schema import UserQuery, MeQuery
class Query(UserQuery, MeQuery, ProductQuery, graphene.ObjectType):
pass
class Mutation(ProductMutations, AuthMutation, graphene.ObjectType):
pass
schema = graphene.Schema(query=Query, mutation=Mutation)
|
from selenium.webdriver.common.by import By
from pages.page import BasePage
class LoginPage(BasePage):
url = BasePage.baseurl
email_address_textbox = (By.XPATH, '//INPUT[@placeholder="Email address"]')
email_right_arrow = (By.XPATH, "//DIV[@class='next-step ready']")
password_textbox = (By.XPATH, '//INPUT[@name="password"]')
login = (By.XPATH, '//BUTTON[text()="login"]')
def enter_email_address(self, email_address):
if self.wait_for_element(self.email_address_textbox):
self.send_keys(email_address, self.email_address_textbox)
self.click(self.email_right_arrow)
def enter_password(self, password):
if self.wait_for_element(self.password_textbox):
self.send_keys(password, self.password_textbox)
self.click(self.login)
|
#! /usr/bin/python
'''
Text Justification
Given an array of words and a length L, format the text such that each line has exactly L characters and is fully (left and right) justified.
You should pack your words in a greedy approach; that is, pack as many words as you can in each line. Pad extra spaces ' ' when necessary so that each line has exactly L characters.
Extra spaces between words should be distributed as evenly as possible. If the number of spaces on a line do not divide evenly between words, the empty slots on the left will be assigned more spaces than the slots on the right.
For the last line of text, it should be left justified and no extra space is inserted between words.
For example,
words: ["This", "is", "an", "example", "of", "text", "justification."]
L: 16.
Return the formatted lines as:
[
"This is an",
"example of text",
"justification. "
]
Note: Each word is guaranteed not to exceed L in length.
Corner Cases:
A line other than the last line might contain only one word. What should you do in this case?
In this case, that line should be left-justified.
'''
# Greedy algorithm should do it.
# 1. Determine the words to put into a line.
# 2. Determine the space between words
# 3. If it's the last line, left justifiy the line.
class Solution:
# @param words, a list of strings
# @param L, an integer
# @return a list of strings
def fullJustify(self, words, L):
line_len = 0
line_words = []
i = 0
output = []
while i < range(len(words)):
while line_len <= L and i < range(len(words)):
len_word = len(words[i])
if line_len == 0:
line_len += len_word
else:
line_len += 1 + len_word
line_words.append(words[i])
i += 1
if line_len > L:
extra_word = line_words.pop()
line_len = line_len-len(extra_word)-1
if len(line_words) == 1:
line = line_words[0]
while line_len < L:
line += " "
line_len += 1
output.append(line)
else:
padding_num = (L-line_len)/(len(line_words)-1)
extra_paddings_num = (L-line_len)%(len(line_words)-1)
padding = ""
while padding_num > 0:
padding += " "
padding_num -= 1
line = ""
for word in line_words:
line += word+" "
line += padding
if padding_num > 0:
line += " "
padding_num -= 1
line = line.rstrip()
output.append(line)
line_len = 0
return output
if __name__ == "__main__":
s = Solution()
print s.fullJustify(["This", "is", "an", "example", "of", "text", "justification."], 16)
|
#定义错误状态码和相应Message
USER_ALREADY_EXISTS = 20001 # 用户已经存在
PARAMETERS_INVALID = 20002 #参数不合法
PARAMETERS_NOTENOUGH = 20003#指定不能省略的参数不够
REQUEST_METHOD_ERROR = 20004#请求方法错误
INTER_NOT_IMPLEMENT_ERROR = 20005#=未实现
INTER_MACHINE_ERROR = 20006#内部错误
NOT_FOUND_ERROR =20404#未找到
#message和自定义状态码的映射字典
J_MSG = {USER_ALREADY_EXISTS: 'User already exists',
PARAMETERS_INVALID:'Error parameters',
REQUEST_METHOD_ERROR:'Error request method',
PARAMETERS_NOTENOUGH:'Not enough parameters',
INTER_NOT_IMPLEMENT_ERROR:'Not Implemention',
INTER_MACHINE_ERROR:'Inter Error',
NOT_FOUND_ERROR:'Not Found Error'}
#自定义处理错误类
class CustomFlaskErr(Exception):
# 默认的返回码
status_code = 400
# 自己定义了一个 return_code,作为更细颗粒度的错误代码
def __init__(self, return_code=None, status_code=None, payload=None):
Exception.__init__(self)
self.return_code = return_code
if status_code is not None:
self.status_code = status_code
self.payload = payload
# 构造要返回的错误代码和错误信息的 dict
def to_dict(self):
rv = dict(self.payload or ())
# 增加 dict key: return code
rv['return_code'] = self.return_code
# 增加 dict key: message, 具体内容由常量定义文件中通过 return_code 转化而来
rv['message'] = J_MSG[self.return_code]
# 日志打印
print(J_MSG[self.return_code])
return rv |
#!/usr/bin/env python
############################################################################
#
# endTrimmer.py
# 2015 James Stapleton
#
# Trims a specified number of nucleotides from each end of each
# synthetic long read in a list, prints a new file
#
############################################################################
import argparse
def main(contig_file, trim_length, outfile):
TRIM_LENGTH = int(trim_length)
with open(contig_file, 'r') as contigs:
with open(outfile, 'w') as outfile:
sequence = ''
for line in contigs:
#if line is a barcode, print it
if line[:4] == ">Bar":
outfile.write(line)
#if line is a contig head, print it,
# write out trimmed sequence
elif line[:5] == '>NODE':
if sequence:
outfile.write(sequence[TRIM_LENGTH:-TRIM_LENGTH])
outfile.write('\n')
sequence = ''
outfile.write(line)
# if line is blank, end of barcode:
# write out trimmed sequence
elif line[0] == '\n':
outfile.write(sequence[TRIM_LENGTH:-TRIM_LENGTH])
outfile.write('\n\n')
sequence = ''
else:
sequence = sequence + line.rstrip()
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('contig_file')
parser.add_argument('trim_length')
parser.add_argument('--outfile', default='trimmed_contigs.txt')
args = parser.parse_args()
main(args.contig_file, args.trim_length, args.outfile)
|
# Generated by Django 3.1.2 on 2020-10-19 16:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auctions', '0039_auto_20201019_1833'),
]
operations = [
migrations.AlterField(
model_name='auction',
name='available',
field=models.BooleanField(default=True),
preserve_default=False,
),
migrations.AlterField(
model_name='auction',
name='price',
field=models.IntegerField(default=20),
preserve_default=False,
),
]
|
import sys
sys.path.append("./")
import setting
import xmlrpclib
import SimpleXMLRPCServer
import socket
import select
import time
import random
class Sensor:
''' Represents any senors'''
def __init__(self,name,serveradd,localadd):
'''initialize a sensor, create a client to connect to server'''
self.name = name
self.ctype = 'sensor'
self.localadd = localadd
self.c0 = xmlrpclib.ServerProxy("http://"+serveradd[0]+":"+str(serveradd[1]),verbose=0)#rpc server
self.state = '0'
def register_to_server(self):
'''register with the gateway, sending name, type and listening address'''
load_balance_info = self.c0.register(self.ctype,self.name,self.localadd)
self.cid = load_balance_info['id']
self.server_to_connect= load_balance_info['assignedServer']
#connect to server rpc client
self.c = xmlrpclib.ServerProxy("http://"+self.server_to_connect[0]+":"+str(self.server_to_connect[1]))
return 1
def start_listen(self):
'''To enable communication with the gateway, start a server to catch queries and instructions'''
self.s = SimpleXMLRPCServer.SimpleXMLRPCServer(self.localadd,logRequests=False)#zerorpc.Server(self)
self.s.register_instance(self)
self.s.serve_forever()
def change_server(self, new_server_add):
self.c = xmlrpclib.ServerProxy("http://"+new_server_add[0]+":"+str(new_server_add[1]))
print self.name,"change to new server","http://"+new_server_add[0]+":"+str(new_server_add[1])
return 1
def query_state(self):
return self.state
def set_state(self,state):
'''set state from test case'''
self.state = state
return 1
def set_state_push(self,state):
'''set the state of sensor from test case, push to the gateway if state changed'''
if self.state != state:
self.state = state
self.report_to_server()
return 1
def report_to_server(self):
'''Push to the server'''
self.c.report_state(self.cid, self.state)
return 1
|
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
import scipy
from scipy import signal
matplotlib.use("pgf")
plt.rcParams.update({
"pgf.texsystem": "pdflatex",
"font.family": "serif",
"font.size": 6,
"legend.fontsize": 5,
"text.usetex": True,
"pgf.rcfonts": False
});
plt.figure(figsize=(2.65, 1.5))
data_path = "data/pursuit/"
df = pd.read_csv(os.path.join(data_path,'sa_a2c.csv'))
df = df[['episodes_total', "episode_reward_mean"]]
data = df.to_numpy()
filtered = scipy.signal.savgol_filter(data[:, 1], int(len(data[:, 1])/110)+2, 5)
plt.plot(data[:, 0], filtered, label='A2C', linewidth=0.6, color='tab:purple', linestyle=(0, (3, 3)))
df = pd.read_csv(os.path.join(data_path, 'sa_apex_second_best.csv'))
df = df[['episodes_total', "episode_reward_mean"]]
data = df.to_numpy()
# filtered = scipy.signal.savgol_filter(data[:, 1], int(len(data[:, 1])/110)+1, 5)
plt.plot(data[:, 0], data[:,1], '--', label='ApeX DQN', linewidth=0.6, color='tab:brown', linestyle=(0, (1, 1)))
df = pd.read_csv(os.path.join(data_path, 'sa_rainbow_dqn.csv'))
df = df[['episodes_total', "episode_reward_mean"]]
data = df.to_numpy()
plt.plot(data[:, 0], data[:, 1]/8.0, '--', label='Rainbow DQN', linewidth=0.6, color='tab:blue', linestyle=(0, (3, 3)))
df = pd.read_csv(os.path.join(data_path, 'sa_dqn.csv'))
df = df[['episodes_total', "episode_reward_mean"]]
data = df.to_numpy()
filtered = scipy.signal.savgol_filter(data[:, 1], int(len(data[:, 1])/110)+2, 5)
plt.plot(data[:, 0], filtered, '--', label='DQN', linewidth=0.6, color='tab:cyan', linestyle=(0, (3, 3)))
df = pd.read_csv(os.path.join(data_path,'sa_impala.csv'))
df = df[['episodes_total', "episode_reward_mean"]]
data = df.to_numpy()
plt.plot(data[:, 0], data[:, 1], label='IMPALA', linewidth=0.6, color='tab:green', linestyle='solid')
df = pd.read_csv(os.path.join(data_path, 'sa_ppo.csv'))
df = df[['episodes_total', "episode_reward_mean"]]
data = df.to_numpy()
filtered = scipy.signal.savgol_filter(data[:, 1], int(len(data[:, 1])/110)+1, 5)
plt.plot(data[:, 0], filtered, label='PPO', linewidth=0.6, color='tab:orange', linestyle=(0, (5, 2, 1, 2)))
plt.plot(np.array([0,60000]),np.array([31.03,31.03]), label='Random', linewidth=0.6, color='red', linestyle=(0, (1, 1)))
plt.xlabel('Episode', labelpad=1)
plt.ylabel('Average Total Reward', labelpad=1)
plt.title('Pursuit')
plt.xticks(ticks=[10000,20000,30000,40000,50000],labels=['10k','20k','30k','40k','50k'])
plt.xlim(0, 60000)
plt.yticks(ticks=[0,150,300,450],labels=['0','150','300','450'])
plt.ylim(-150, 600)
plt.tight_layout()
# plt.legend(loc='lower right', ncol=1, labelspacing=.2, columnspacing=.25, borderpad=.25)
plt.margins(x=0)
plt.savefig("SAPursuitGraph_camera.pgf", bbox_inches = 'tight',pad_inches = .025)
plt.savefig("SAPursuitGraph_camera.png", bbox_inches = 'tight',pad_inches = .025, dpi = 600)
|
from google_trans_new import google_translator
import streamlit as st
translator = google_translator()
st.title("Language Translator")
text = st.text_input("Enter a text")
translate = translator.translate(text,lang_tgt='fr')
st.write(translate) |
# Generated by Django 2.0 on 2018-08-15 12:04
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Attitude', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='attitudecount',
old_name='attitude_applause_num',
new_name='attitude_flower_num',
),
]
|
import datetime
from flask_peewee.auth import BaseUser
from peewee import *
from app import db
class User(db.Model, BaseUser):
username = CharField()
password = CharField()
email = CharField()
join_date = DateTimeField(default=datetime.datetime.now)
active = BooleanField(default=True)
admin = BooleanField(default=False)
def __str__(self):
return self.username
class Note(db.Model):
user = ForeignKeyField(User, backref='note')
content = TextField()
created_date = DateTimeField(default=datetime.datetime.now)
if __name__ == '__main__':
User.create_table()
Note.create_table()
|
#!/usr/bin/python
#_*_coding:utf-8_*_
from __future__ import absolute_import
from django.shortcuts import render,HttpResponseRedirect,render_to_response,HttpResponse
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response,RequestContext
from django.contrib.auth.decorators import login_required
from django.contrib import auth
from inc import tasks
from cmdb import settings
from manager.views.permission import *
from hosts.models import *
from manager.myauth import *
from inc.models import *
import os,sys,commands
@login_required
@PermissionVerify()
def game_deploy(request):
header_title,nav,tip,zone= "新服安装","游戏项目管理","新服安装","国内"
project = "inc"
area_name = request.GET.get("area")
platform = request.GET.get("platform")
date = request.POST.get("date")
subcheck = request.POST.getlist("subcheck")
crt_backup = request.POST.get("crt_backup")
if area_name is None:
area_name = "国内"
if platform is None:
platform = "android"
inc_area_info = []
inc_area = host.objects.filter(project__mini_name=project)
for i in inc_area:
all = area_type.objects.get(area_name=i.area).area_name
inc_area_info.append(all)
# 当前项目地区列表,去重
inc_area_info = list(set(inc_area_info))
publisher_info = publisher.objects.all()
# 战场服没有DB,默认数据库已初始化
bf_server = host.objects.filter(use__use_name="战场服")
bf_server.update(db_init=1)
inc_server = host.objects.filter(project__mini_name=project,area__area_name=area_name).exclude(
#use__use_name="数据服").exclude(
#use__use_name="登陆服").exclude(
#use__use_name="GM服").exclude(
use__use_name="SDK服").exclude(
use__use_name="监控服").exclude(
use__use_name="备份服").exclude(
use__use_name="区服列表服").order_by("-host_init").order_by("-db_init").order_by("game_count")
return render_to_response('inc/game_deploy.html',locals(),context_instance=RequestContext(request))
@login_required
@PermissionVerify()
def game_deploy_check(request):
header_title,nav,tip= "新服安装信息确认","游戏项目管理","新服安装信息检查"
choice_host = request.POST.get("choice_host")
game_publisher = request.POST.get("game_publisher")
server_name = request.POST.get("server_name")
server_id = request.POST.get("server_id")
zone_id = request.POST.get("zone_id")
openserver_date = request.POST.get("date")
game_type = request.POST.get("game_type")
project,p_ip,s_ip,area_mini_name,plat_mini_name,db_ip = choice_host.split('+')
if game_type == "online" or game_type == "test":
game_id = gameinfo.objects.filter(project__mini_name=project,p_ip=p_ip).count()
if game_id >= 0:
game_port = 10100 + int(game_id)
else:
game_port = 10100
app_dir = project + "_" + game_publisher + "_" + area_mini_name + "_" + plat_mini_name + "_" + server_id
elif game_type == "fight":
app_dir = project + "_" + game_publisher + "_" + area_mini_name + "_" + plat_mini_name + "_" + "bf"
elif game_type == "log":
app_dir = project + "_" + game_publisher + "_" + area_mini_name + "_" + plat_mini_name + "_" + "log"
elif game_type == "gm":
app_dir = project + "_" + game_publisher + "_" + area_mini_name + "_" + plat_mini_name + "_" + "gm"
elif game_type == "login":
app_dir = project + "_" + game_publisher + "_" + area_mini_name + "_" + plat_mini_name + "_" + "login"
area_name = host.objects.get(project__mini_name=project,p_ip=p_ip).area.area_name
if len(zone_id) == 0 or len(server_id) == 0:
err_data = "服务器ID和大区ID不能为空"
else:
check_server_id = gameinfo.objects.filter(server_id=int(server_id),area__mini_name=area_mini_name,platform__mini_name=plat_mini_name)
if len(check_server_id) != 0:
err_data = "服务器ID: %s 在%s平台下重复了!" % (server_id,plat_mini_name)
return render_to_response('inc/game_deploy_check.html',locals(),context_instance=RequestContext(request))
@login_required
@PermissionVerify()
def inc_add_gameinfo(request):
project = request.GET.get("project")
p_ip = request.GET.get("p_ip")
s_ip = request.GET.get("s_ip")
app_dir = request.GET.get("app_dir")
server_id = request.GET.get("server_id")
db_ip = request.GET.get("db_ip")
open_time = request.GET.get("date")
area_name = request.GET.get("area_name")
game_type = request.GET.get("game_type")
server_name = request.GET.get("server_name")
zone_id = request.GET.get("zone_id")
platform = host.objects.get(project__mini_name=project,p_ip=p_ip).platform.plat_name
plat_mini_name = host.objects.get(project__mini_name=project,p_ip=p_ip).platform.mini_name
use = host.objects.get(project__mini_name=project,p_ip=p_ip).use.all()
if len(use) == 1:
for i in use: use_name = i
else:
err_data = "游戏服用途必须唯一!"
if game_type == "online" or game_type == "test":
game_id = gameinfo.objects.filter(project__mini_name=project,p_ip=p_ip).count()
if game_id >= 0:
game_port = 10100 + int(game_id)
else:
game_port = 10100
elif game_type == "fight":
game_id = gameinfo.objects.filter(project__mini_name=project,p_ip=p_ip).count()
game_port = 10300 + int(game_id)*10
db_ip = "0.0.0.0"
elif game_type == "log":
game_port = 9121
db_ip = "0.0.0.0"
elif game_type == "gm":
game_port = 9121
elif game_type == "login":
game_port = 10002
try:
area_id = area_type.objects.get(area_name=area_name)
use_id = server_use.objects.get(use_name=use_name)
project_id = Project.objects.get(mini_name=project)
platform_id = platform_type.objects.get(plat_name=platform)
game_count = gameinfo.objects.filter(project__mini_name=project,p_ip=p_ip).count() + 1
if game_type == "online":
start_time = open_time.replace("-","/")
plat_mini_name = host.objects.get(project__mini_name=project,p_ip=p_ip).platform.mini_name
add_serverlist = ServerList(area_name=area_name,platform=plat_mini_name,server_name=server_name,server_id=server_id,
p_ip=p_ip,isopen="false",status=0,port=game_port,open_time=start_time,zone_id=zone_id)
add_serverlist.save()
add_game = gameinfo(project=project_id,area=area_id,platform=platform_id,use=use_id,p_ip=p_ip,
s_ip=s_ip,db_ip=db_ip,game_port=int(game_port),app_dir=app_dir,open_time=open_time,
server_id=int(server_id),server_name=server_name)
add_game.save()
#更新开服数量
upcount = host.objects.filter(p_ip=p_ip)
upcount.update(game_count=game_count)
return HttpResponseRedirect("/inc/game_info/?project=%s&area=%s&platform=%s"%(project,area_name,plat_mini_name))
except Exception,e:
err_data = "%s,添加失败,Error:%s" % (p_ip,e)
return render_to_response('inc/game_deploy_check.html',locals(),context_instance=RequestContext(request))
@login_required
@PermissionVerify()
def inc_install_process(request):
project = request.GET.get("project")
p_ip = request.GET.get("p_ip")
s_ip = request.GET.get("s_ip")
app_dir = request.GET.get("app_dir")
server_id = request.GET.get("server_id")
db_ip = request.GET.get("db_ip")
open_time = request.GET.get("date")
area_name = request.GET.get("area_name")
game_type = request.GET.get("game_type")
server_name = request.GET.get("server_name")
zone_id = request.GET.get("zone_id")
plat_name = request.GET.get("platform")
use = host.objects.get(project__mini_name=project,p_ip=p_ip).use.all()
ssh_port = host.objects.get(project__mini_name=project,p_ip=p_ip).port
area_mini_name = area_type.objects.get(area_name=area_name).mini_name
if len(use) == 1:
for i in use: use_name = i
else:
err_data = "游戏服用途必须唯一!"
#print project,p_ip,s_ip,db_ip,app_dir,server_id,db_ip,open_time,area_mini_name,game_type,server_name,zone_id,use_name,plat_name
#安装信息字典
gameinfodic = {}
#添加监控(留空)
gameinfodic.setdefault('p_ip',p_ip)
gameinfodic.setdefault('server_id',server_id)
gameinfodic.setdefault('db_ip',db_ip)
gameinfodic.setdefault('app_dir',app_dir)
gameinfodic.setdefault('plat_name',plat_name)
gameinfodic.setdefault('server_name',server_name)
gameinfodic.setdefault('area_mini_name',area_mini_name)
gameinfodic.setdefault('ssh_port',ssh_port)
# 游戏服安装
if game_type == "online":
#模版服务器,每个大区第一个服务器
template_app_dir = gameinfo.objects.filter(area__area_name=area_name,platform__mini_name=plat_name,use__use_name="游戏服").order_by("server_id")[0].app_dir
template_p_ip = gameinfo.objects.filter(area__area_name=area_name,platform__mini_name=plat_name,use__use_name="游戏服").order_by("server_id")[0].p_ip
template_db_ip = gameinfo.objects.filter(area__area_name=area_name,platform__mini_name=plat_name,use__use_name="游戏服").order_by("server_id")[0].db_ip
gameinfodic.setdefault("template_app_dir",template_app_dir)
gameinfodic.setdefault("template_p_ip",template_p_ip)
gameinfodic.setdefault("template_db_ip",template_db_ip)
#判断是否一服多开
game_id = gameinfo.objects.filter(project__mini_name=project,p_ip=p_ip).count()
if game_id >= 0:
game_port = 10100 + int(game_id)
else:
game_port = 10100
gameinfodic.setdefault('game_port',game_port)
#添加到区服列表
add_serverlist = ServerList(area_name=area_name,platform=plat_name,server_name=server_name,server_id=server_id,
p_ip=p_ip,isopen="false",status=0,port=game_port,open_time=open_time,zone_id=zone_id)
add_serverlist.save()
#开始执行安装任务
tasks.game_install_func.delay(gameinfodic)
# 战场服安装
elif game_type == "fight":
template_app_dir = gameinfo.objects.get(area__area_name=area_name,platform__mini_name=plat_name,use__use_name="战场服",server_id=0).app_dir
template_p_ip = gameinfo.objects.get(area__area_name=area_name,platform__mini_name=plat_name,use__use_name="战场服",server_id=0).p_ip
gameinfodic.setdefault("template_app_dir",template_app_dir)
gameinfodic.setdefault("template_p_ip",template_p_ip)
check_bf_count = gameinfo.objects.filter(p_ip=p_ip).count()
game_port = 10300 + check_bf_count * 10
bf_client_port = game_port + 1
gameinfodic.setdefault('game_port1',game_port)
gameinfodic.setdefault('game_port2',bf_client_port)
tasks.bfserver_install_func.delay(gameinfodic)
# 游戏服信息入库
try:
area_id = area_type.objects.get(area_name=area_name)
use_id = server_use.objects.get(use_name=use_name)
project_id = Project.objects.get(mini_name=project)
platform_id = platform_type.objects.get(mini_name=plat_name)
game_count = gameinfo.objects.filter(project__mini_name=project,p_ip=p_ip).count() + 1
add_game = gameinfo(project=project_id,area=area_id,platform=platform_id,use=use_id,p_ip=p_ip,
s_ip=s_ip,db_ip=db_ip,game_port=int(game_port),app_dir=app_dir,open_time=open_time,
server_id=int(server_id),server_name=server_name)
add_game.save()
#更新开服数量
upcount = host.objects.filter(p_ip=p_ip)
upcount.update(game_count=game_count)
except Exception,e:
err_data = "%s,添加游戏信息到GameInfo失败,Error:%s" % (p_ip,e)
return HttpResponseRedirect("/inc/inc_process_status/?gameserver=%s"% app_dir)
@login_required
@PermissionVerify()
def inc_process_status(request):
header_title,nav,tip= "装服执行过程","游戏项目管理","装服执行过程"
gameserver = request.GET.get("gameserver")
action = request.GET.get("action")
cmd = "cat /tmp/inc/%s_install.log" % gameserver
cmd_status,cmd_out = commands.getstatusoutput(cmd)
if action == "1":
return render_to_response('inc/inc_install_process_status.html',locals(),context_instance=RequestContext(request))
else:
return render_to_response('inc/inc_install_process.html',locals(),context_instance=RequestContext(request)) |
#!/usr/bin/python
""" Parse and relay a notification """
import sys
import re
import os
def message(title, message, urgency='NORMAL'):
os.system('notify-send "{}" "{}" --urgency={}'.format(title, message, urgency))
summary_replace = {' (Magazino)': '',
'Direct Message': ' '}
body_replace = {'magazino.hipchat.com': '',
'mattermost.magazino.eu': '',
'@': ''}
appname, summary, body, icon, urgency = sys.argv[1:]
try:
# Split into summary and roomname, make roomname
# lower case initials with hashtag prefix
summary, rn = summary.split(" - ")
rn = '#' + ''.join([r[0] for r in rn.split(' ')]).lower()
rn = rn.rstrip('(')
# Room name in italics
summary = summary + " <i>" + rn + "</i>"
except:
pass
try:
# Replace arbitrary items
for keyword, replaceword in summary_replace.iteritems():
summary = summary.replace(keyword, replaceword)
for keyword, replaceword in body_replace.iteritems():
body = body.replace(keyword, replaceword)
# Replace stuff in brackets
body = re.sub('\[.*\]', '', body)
except:
pass
# For mattermost, name is shown in body
summary, body = body.split(':', 1)
message(summary, body, urgency)
|
#-*-coding:utf-8-*-
'''
对角线遍历
https://blog.csdn.net/zzz_cming/article/details/81035354
https://leetcode-cn.com/explore/learn/card/array-and-string/199/introduction-to-2d-array/774/
'''
if __name__ == '__main__':
A = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
n = len(A)
for i in range(n + n - 1):
for j in range(i + 1):
k = i - j
if k < n and k >= 0 and j < n:
print("对应索引j,k,i:", j, k, i, " 对应值:", A[j][k])
|
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
from moviepy.editor import VideoFileClip
# UKULELE CLIP, OBTAINED BY CUTTING AND CROPPING
# RAW FOOTAGE
#class VideoManagerClass():
# listTitleAndFullVideo = []
# def __init__(self, ):
# print("VideoManager Created")
# return None
# def test():
# print("test")
#
#videoManager = VideoManagerClass()
#videoManager.test()
class Main():
def __init__(self,):
self.test();
def test(self):
# w,h = Avideo.size
# print(Avideo)
# print(w)
# print(h)
#Avideo.write_videofile("D:/test.mp4")
#Bvideo = VideoFileClip("D:/VideoMakerProject/VideosForderName/B.mp4", audio=False)
#w,h = Bvideo.size
#
#print(w)
#print(h)
#
## THE TEXT CLIP IS ANIMATED.
## I am *NOT* explaining the formula, understands who can/want.
#txt_mov = txt_col.set_pos( lambda t: (max(w/30,int(w-0.5*w*t)),
# max(5*h/6,int(100*t))) )
#
#final = CompositeVideoClip(([Avideo,txt_mov,Bvideo])
#final.subclip(0,5).write_videofile("D:/VideoMakerProject/AB.mp4",fps=24,codec='libx264')
main = Main()
Avideo = VideoFileClip("D:/VideoMakerProject/VideosForderName/A.mp4", audio=False).subclip(0,5); |
# logging.py
# implements Server Logger class
import os
from datetime import datetime
class Logger:
def __init__(self, caller):
self.caller = caller.replace(".", "_").replace(":", "__")
self.cache = []
self.maxCacheSize = 100
self.defaultFolder = ".log_entry"
if not os.path.exists(self.defaultFolder):
os.makedirs(self.defaultFolder)
def info(self, message):
"""
Log information level message
"""
logMessage = "[{} {} {}] {}".format("INFO", datetime.now().strftime("%d/%m/%Y %H:%M:%S"), self.caller, message)
print(logMessage)
self.cache.append(logMessage)
self._update()
def warn(self, message):
"""
Log warning level message
"""
#print("[{} {} {}] {}".format("WARN", datetime.now().strftime("%d/%m/%Y %H:%M:%S"), self.caller, message))
warnMessage = "[{} {} {}] {}".format("WARN", datetime.now().strftime("%d/%m/%Y %H:%M:%S"), self.caller, message)
print(warnMessage)
self.cache.append(warnMessage)
self._update()
def error(self, message):
"""
Log error level message
"""
#print("[{} {} {}] {}".format("ERROR", datetime.now().strftime("%d/%m/%Y %H:%M:%S"), self.caller, message))
errorMessage = "[{} {} {}] {}".format("ERROR", datetime.now().strftime("%d/%m/%Y %H:%M:%S"), self.caller, message)
print(errorMessage)
self.cache.append(errorMessage)
self._update()
def close(self):
"""
Close logger by saving cached message to log file
"""
self._save()
def _update(self):
"""
Update cache, check cache if exceeds the default value, if yes save it to local space.
"""
if len(self.cache) > self.maxCacheSize:
self._save()
self.cache = []
def _save(self):
"""
Save log message to local space
"""
if len(self.cache) <= 0: return
with open(os.path.join(self.defaultFolder, "{}.log".format(self.caller)), "a") as logFile:
for message in self.cache:
print(message, file=logFile) |
people_am = int(input('Количество человек: '))
rhyme_num = int(input('Какое число в считалке? '))
print('Значит выбывает каждый', rhyme_num, 'человек.')
guys_list = list(range(1, people_am + 1))
person_left = 0
while len(guys_list) > 1:
print('\nТекущий круг людей:', guys_list)
if person_left > len(guys_list) - 1:
print('Начало счета с номера', guys_list[person_left % len(guys_list)])
else:
print('Начало счета с номера', guys_list[person_left])
person_left = (rhyme_num + person_left - 1) % len(guys_list)
print('Выбывает человек под номером', guys_list[person_left])
guys_list.remove(guys_list[person_left])
print('\nОстался человек под номером:', guys_list[0]) |
#!/usr/bin/python3
from pathlib import Path
import shelve
def coordconv(x,y):
y=ysize-y
return(x,y)
def line(x1,y1,x2,y2):
x1,y1=coordconv(x1,y1)
x2,y2=coordconv(x2,y2)
dfile.write(f'<line x1="{x1}" y1="{y1}" x2="{x2}" y2="{y2}" stroke="black"/>\n')
def circle(x,y,r):
x,y=coordconv(x,y)
dfile.write(f'<circle cx="{x}" cy="{y}" r="{r}" stroke="black"/>\n')
def rect(x,y,w,h):
x,y=coordconv(x,y)
dfile.write(f'<rect x="{x}" y="{y}" width="{w}" height="{h}" stroke="black"/>\n')
def start(name,x,y):
global dfile
global xsize
global ysize
xsize =x
ysize =y
currentDir = Path.cwd()/Path(name+'.svg')
if currentDir.exists():
tmp= open(currentDir,'r').readlines()
dfile = open(currentDir,'w')
del tmp[-1]
for i in tmp:
dfile.write(i)
dfile.close()
dfile = open(currentDir,'a')
if not currentDir.exists():
dfile = open(currentDir,'a')
dfile.write(f'<svg height="{y}" width="{x}">\n<rect width="100%" height="100%" fill="white"/>\n')
'''
start('drawii')
line(0,0,50,50)
circle(25,25,10)
rect(10,10,20,20)
'''
def end():
dfile.write('\n\n</svg>')
dfile.close()
|
print("Calculator(simple)")
x = int(input("Enter first number"))
y = int(input("Enter second number"))
operator = input("please enter: Add, Subtract, divide or multiply")
if operator == "add":
total = x + y
print ("x + y =", total)
if operator == "subtract":
total = x - y
print ("x - y =", total)
if operator == "divide":
total = x / y
print ("x ÷ y =", total)
if operator == "multiply":
total = x * y
print ("x*y =", total)
|
from ..common import WQXException
from .MeasureCompact import MeasureCompact
from .SimpleContent import (
DetectionQuantitationLimitCommentText,
DetectionQuantitationLimitTypeName
)
from yattag import Doc
class DetectionQuantitationLimit:
"""Information that describes one of a variety of detection or quantitation limits determined in a laboratory."""
__detectionQuantitationLimitTypeName: DetectionQuantitationLimitTypeName
__detectionQuantitationLimitMeasure: MeasureCompact
__detectionQuantitationLimitCommentText: DetectionQuantitationLimitCommentText
def __init__(self, o=None, *,
detectionQuantitationLimitTypeName:DetectionQuantitationLimitTypeName = None,
detectionQuantitationLimitMeasure:MeasureCompact = None,
detectionQuantitationLimitCommentText:DetectionQuantitationLimitCommentText = None
):
if isinstance(o, DetectionQuantitationLimit):
# Assign attributes from object without typechecking
self.__detectionQuantitationLimitTypeName = o.detectionQuantitationLimitTypeName
self.__detectionQuantitationLimitMeasure = o.detectionQuantitationLimitMeasure
self.__detectionQuantitationLimitCommentText = o.detectionQuantitationLimitCommentText
elif isinstance(o, dict):
# Assign attributes from dictionary with typechecking
self.detectionQuantitationLimitTypeName = o.get('detectionQuantitationLimitTypeName', default = None)
self.detectionQuantitationLimitMeasure = o.get('detectionQuantitationLimitMeasure', default = None)
self.detectionQuantitationLimitCommentText = o.get('detectionQuantitationLimitCommentText', default = None)
else:
# Assign attributes from named keywords with typechecking
self.detectionQuantitationLimitTypeName = detectionQuantitationLimitTypeName
self.detectionQuantitationLimitMeasure = detectionQuantitationLimitMeasure
self.detectionQuantitationLimitCommentText = detectionQuantitationLimitCommentText
@property
def detectionQuantitationLimitTypeName(self) -> DetectionQuantitationLimitTypeName:
return self.__detectionQuantitationLimitTypeName
@detectionQuantitationLimitTypeName.setter
def detectionQuantitationLimitTypeName(self, val:DetectionQuantitationLimitTypeName) -> None:
self.__detectionQuantitationLimitTypeName = DetectionQuantitationLimitTypeName(val)
@property
def detectionQuantitationLimitMeasure(self) -> MeasureCompact:
return self.__detectionQuantitationLimitMeasure
@detectionQuantitationLimitMeasure.setter
def detectionQuantitationLimitMeasure(self, val:MeasureCompact) -> None:
self.__detectionQuantitationLimitMeasure = MeasureCompact(val)
@property
def detectionQuantitationLimitCommentText(self) -> DetectionQuantitationLimitCommentText:
return self.__detectionQuantitationLimitCommentText
@detectionQuantitationLimitCommentText.setter
def detectionQuantitationLimitCommentText(self, val:DetectionQuantitationLimitCommentText) -> None:
self.__detectionQuantitationLimitCommentText = None if val is None else DetectionQuantitationLimitCommentText(val)
def generateXML(self, name:str = 'DetectionQuantitationLimit') -> str:
doc, tag, text, line = Doc().ttl()
with tag(name):
if self.__detectionQuantitationLimitTypeName is None:
raise WQXException("Attribute 'detectionQuantitationLimitTypeName' is required.")
line('DetectionQuantitationLimitTypeName', self.__detectionQuantitationLimitTypeName)
if self.__detectionQuantitationLimitMeasure is None:
raise WQXException("Attribute 'detectionQuantitationLimitMeasure' is required.")
doc.asis(self.__detectionQuantitationLimitMeasure.generateXML('DetectionQuantitationLimitMeasure'))
if self.__detectionQuantitationLimitCommentText is not None:
line('DetectionQuantitationLimitCommentText', self.__detectionQuantitationLimitCommentText)
return doc.getvalue()
|
#!/usr/bin/env python3
from glob import glob
import PyPDF2, os
import argparse
import sys
def add_file(pdf_file):
print "Adding %s..." %(pdf_file)
pdfFileObj = open(pdf_file, 'rb')
pdfReader = PyPDF2.PdfFileReader(pdfFileObj)
for pageNum in range(0, pdfReader.numPages):
pageObj = pdfReader.getPage(pageNum)
pdfWriter.addPage(pageObj)
#Create the PdfFileWriter object
pdfWriter = PyPDF2.PdfFileWriter()
parser = argparse.ArgumentParser()
parser.add_argument('outfile')
parser.add_argument('inFile', nargs='+')
args = parser.parse_args()
for pdfFile in args.inFile:
if pdfFile.endswith('.pdf'):
file_list=glob(pdfFile)
for file in file_list:
add_file(file)
print("Writing output to %s..." %(args.outfile))
pdfOutput = open(args.outfile, 'wb')
pdfWriter.write(pdfOutput)
pdfOutput.close()
|
'''
# 1. problem.txt 파일을 생성 후, 다음과 같은 내용을 작성
0
1
2
3
# 2. problem.txt의 파일 내용을 다음과 같이 변경
3
2
1
0
# 3. reverse.txt의 파일 내용에 problem.txt의 파일 내용을 반대로 넣기
'''
'''
# 1
with open( 'problem.txt', 'w' ) as f:
for i in range (0,4):
f.write (str(3-i) + '\n' )
'''
'''
#2
textContents = []
with open( 'problem.txt', 'r') as f:
for i in range(4):
# line = f.readline()
# while((textContents[i] = f.readline()) != '' ): 이런 문법으로 진행하고 싶은데 왜 문제인건지?
textContents.append(f.readline())
# EOF error 의 경우 try catch문으로도 잡을 수 있다.
print(textContents)
with open( 'problem.txt', 'w') as f:
for i in range(4):
f.write ( textContents[(3-i)] )
'''
#3
textContents = []
with open( 'problem.txt', 'r') as f:
for i in range(4):
textContents.append(f.readline())
print('before: ')
print(textContents)
with open( 'reverse.txt', 'w') as f:
for i in range(4):
f.write ( textContents[(3-i)] )
print('after: ')
#print(textContents)
# with open( 'problem.txt', 'w') as f:
# for i range(3):
# f.write(str(textContext[lineNum-i]) + '\n' )
# # f1.write(content + "\n")
# 에러코드
'''
lineNum = 0
textContents = []
with open( 'problem.txt', 'r') as f:
while ( (textContents[lineNum] = f.readline()) != '' ): # readliines() 사용한 뒤 리스트로 받아와서 len(list~)
lineNum = lineNum + 1
print (lineNum)
'''
# list1 = []
# for i in range(5):
# list1.append(i)
# print(list1)
|
#!/usr/bin/env python
#!-*- coding:utf-8 -*-
import jieba
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import Input,Model
from tensorflow.keras import preprocessing
from tensorflow.keras.layers import Layer
from tensorflow.keras import initializers, regularizers, constraints
import pandas as pd
class Attention(Layer):
def __init__(self, step_dim, name="Attention", W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None, bias=None, **kwargs):
print('attention __init__, step_dim: ', step_dim)
self.supports_masking = True
self.init = initializers.get('glorot_uniform')
self.W_regularizer = W_regularizer
self.b_regularizer = b_regularizer
self.W_constraint = W_constraint
self.b_constraint = b_constraint
self.bias = bias
self.step_dim = step_dim
self.features_dim = 0
super(Attention, self).__init__(**kwargs)
def build(self, input_shape):
print('attention build input_shape:\n', len(input_shape), input_shape)
print('input_shape[-1]: ', input_shape[-1])
#assert len(input_shape) == 3
self.W = self.add_weight(shape=(input_shape[-1], ),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
self.features_dim = input_shape[-1]
if self.bias:
self.b = self.add_weight(shape=(input_shape[1],),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
else:
self.b = None
self.built = True
def compute_mask(self, input, input_mask=None):
# do not pass the mask to the next layers
return None
def call(self, x, mask=None):
features_dim = self.features_dim
step_dim = self.step_dim
print('attention call x.shape: ', x.shape, 'features_dim: ', features_dim, 'step_dim: ', step_dim)
print('self.W.shape: ', self.W.shape, 'keras.backend.reshape(self.W, (features_dim, 1)).shape: ', keras.backend.reshape(self.W, (features_dim, 1)).shape)
print('keras.backend.reshape(x,(-1,features_dim).shape): ', keras.backend.reshape(x, (-1, features_dim)).shape)
e = keras.backend.reshape(keras.backend.dot(keras.backend.reshape(x, (-1, features_dim)), keras.backend.reshape(self.W, (features_dim, 1))), (-1, step_dim)) # e = K.dot(x, self.W)
if self.bias:
e += self.b
e = keras.backend.tanh(e)
a = keras.backend.exp(e)
# apply mask after the exp. will be re-normalized next
if mask is not None:
# cast the mask to floatX to avoid float64 upcasting in theano
a *= keras.backend.cast(mask, keras.backend.floatx())
# in some cases especially in the early stages of training the sum may be almost zero
# and this results in NaN's. A workaround is to add a very small positive number ε to the sum.
a /= keras.backend.cast(keras.backend.sum(a, axis=1, keepdims=True) + keras.backend.epsilon(), keras.backend.floatx())
a = keras.backend.expand_dims(a)
c = keras.backend.sum(a * x, axis=1)
return c
def compute_output_shape(self, input_shape):
print('attention compute_output_shape: input_shape', input_shape, 'feature_dim: ', self.features_dim)
return (input_shape[0], self.features_dim)
#如果需要保存的模型中,含有自己自定义的layer,必须要自己重写get_config函数,把需要的变量增加到字典中,这里增加的是__init__函数里面的第一个非默认的参数step_dim
#如果不重写get_config,在save或者load_model的时候会出错
def get_config(self):
config = {"step_dim":self.step_dim}
base_config = super(Attention, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class DenseEmbeddingTag:
def __init__(self, user_name):
self.user_name = user_name
def load_jiedai_data(self, file_name):
wordcnt_dict = {}
black_num = 0
white_num = 0
with open(file_name) as fp:
lines = fp.readlines()
for line in lines:
label,desc=line.split("@@@@@@@@@@")[0],line.split("@@@@@@@@@@")[1]
seg_list = self.cut_word(desc)
wordcnt_dict = self.generate_wordcnt_dict(wordcnt_dict, seg_list)
if int(label) == 1:
black_num += 1
elif int(label) == 0:
white_num += 1
#print('wordcnt_dict len: ', len(wordcnt_dict))
fp.close()
return black_num,white_num,wordcnt_dict
def cut_word(self, line):
seg_list = jieba.cut(line, cut_all=True, HMM=True)
return seg_list
def generate_wordcnt_dict(self, wordcnt_dict, seg_list):
for seg in seg_list:
if len(seg)>=1 and seg != '\n':
if not seg in wordcnt_dict.keys():
wordcnt_dict[seg] = 1
else:
wordcnt_dict[seg] += 1
return wordcnt_dict
def encode_word(self, wordcnt_dict):
word_index_dict = {}
wordcnt_list = sorted(wordcnt_dict.items(),key = lambda x:x[1], reverse=True)
idx = 0
word_index = 3
for item in wordcnt_list:
word_index_dict[item[0]] = word_index
#if idx <= 100:
# print('word: ', item[0], 'word_cnt: ', item[1], 'word_index: ', word_index)
word_index += 1
idx += 1
return word_index_dict
def encode_train_data(self, file_name, sample_num, word_index_dict, word_num, max_len):
lenp = len(range(0,sample_num))
train_data = [0]*lenp
train_labels = [0]*lenp
train_sequences = [0]*lenp
idx = 0
with open(file_name) as fp:
lines = fp.readlines()
for line in lines:
label,desc=line.split("@@@@@@@@@@")[0],line.split("@@@@@@@@@@")[1]
train_labels[idx] = int(label)
data = []
seq_list = self.cut_word(desc)
for seq in seq_list:
if not seq in word_index_dict.keys():
data.append(2)
else:
if word_index_dict[seq] < word_num:
data.append(word_index_dict[seq])
else:
data.append(3)
train_data[idx] = data
idx += 1
fp.close()
train_sequences = preprocessing.sequence.pad_sequences(train_data, max_len)
return ([train_data,train_labels, train_sequences])
def load_need_pred_data(self, file_name, word_index_dict, word_num, max_len):
lenp = len(range(0,100000))
need_pred_data = [0]*lenp
need_pred_sequences = [0]*lenp
need_pred_apk = [0]*lenp
need_pred_desc = {}
idx = 0
with open(file_name) as fp:
lines = fp.readlines()
for line in lines:
if len(line.split("@@@@@@@@@@")) != 2:
print('lines: ', lines)
else:
apk,desc = line.split("@@@@@@@@@@")[0], line.split("@@@@@@@@@@")[1]
#print('apk: ', apk, 'desc: ', desc)
need_pred_desc[apk] = desc
need_pred_apk[idx] = apk
data = []
seq_list = self.cut_word(desc)
for seq in seq_list:
if not seq in word_index_dict.keys():
data.append(2)
else:
if word_index_dict[seq] < word_num:
data.append(word_index_dict[seq])
else:
data.append(3)
#print('idx:', idx, 'data: \n', data)
need_pred_data[idx] = data
idx += 1
fp.close()
#print('need_pred_data_len:\n', len(need_pred_data))
#print('need_pred_data[0]:\n', need_pred_data[0])
#print('need_pred_data[99]:\n', need_pred_data[99])
need_pred_apk = need_pred_apk[0:idx]
need_pred_sequences = preprocessing.sequence.pad_sequences(need_pred_data[0:idx], max_len)
print('pred_data len: ', len(need_pred_sequences))
return([need_pred_apk, need_pred_desc, need_pred_sequences])
def text_bi_lstm_attention_model(self, train_sequences, train_labels, word_num, embedding_dim, max_len, model_file):
input = Input((max_len,))
embedding = layers.Embedding(word_num, embedding_dim, input_length=max_line_len)(input)
x = layers.Bidirectional(layers.LSTM(128, return_sequences=True))(embedding) #这里用的是双向LSTM,也可以直接用单向LSTM,layer.LSTM(),
#注意attention要求输入的input_shpe是3个维度的,如(None, 256, 100),所以上一层的LSTM需要加上
#return_sequences=True,如果不加的话,LSTM只是输出最后一个状态,那就是2个维度(None,128)
#加上之后,输出的维度是(None,256,128),因为加上之后保存了每个词的隐形向量,不加的话仅仅是
#保持最后一个词的隐形向量
#x = layers.LSTM(128, return_sequences=True)(embedding)
x = Attention(max_len)(x)
output = layers.Dense(2, activation='softmax')(x)
model = Model(inputs = input, outputs = output)
print(model.summary())
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(train_sequences, train_labels, batch_size = 512, epochs = 5)
#模型保存,这里务必要注意上面自定义的attention这个layer,必须要重写get_config函数
model.save(model_file)
return(model)
def predict_with_model_file(self, model_file, need_pred_sequences):
#由于模型里面包含着自己定义的attention这个layer,在load_model的时候必须要增加custom_objuect这个字典参数,传入Attention关键字
model = tf.keras.models.load_model(model_file, custom_objects={"Attention":Attention})
pred_result = model.predict(need_pred_sequences)
#print('predict_result: ', pred_result, pred_result.shape)
print('predict_result.shape: ', pred_result.shape)
return(pred_result)
def predict_new(self, model, need_pred_sequences):
pred_result = model.predict(need_pred_sequences)
#print('predict_result: ', pred_result, pred_result.shape)
print('predict_result.shape: ', pred_result.shape)
return(pred_result)
def save_predict_result(self, file_name, need_pred_apk, need_pred_desc, predict_result):
with open(file_name, "w") as fp:
for idx in range(0,len(need_pred_apk)):
apk = need_pred_apk[idx]
if apk in need_pred_desc.keys():
desc = need_pred_desc[apk]
white_pred_score = predict_result[idx][0]
black_pred_score = predict_result[idx][1]
fp.write("%.3f\t%s\t%s" % (black_pred_score, apk, desc))
fp.close()
if __name__ == '__main__':
app_name_tag = DenseEmbeddingTag('app')
print('load train_data file')
black_num,white_num,wordcnt_dict = app_name_tag.load_jiedai_data("../train_data.txt")
print("black_num: ", black_num, "white_num: ", white_num, "word_cnt: ", len(wordcnt_dict))
word_index_dict = app_name_tag.encode_word(wordcnt_dict)
word_num = 10000
embedding_dim = 100
max_len = 256
max_line_len = 1000000
model_file = 'MODEL_FILE/bilstm_attention.model'
sample_num = black_num + white_num
train_data,train_labels,train_sequences = app_name_tag.encode_train_data("../train_data.txt", sample_num, word_index_dict, word_num, max_len)
app_name_tag.print_data(train_data, train_labels, train_sequences)
#train_labels = tf.keras.utils.to_categorical(train_labels)
#train_labels = pd.get_dummies(train_labels)
model = app_name_tag.text_bi_lstm_attention_model(train_sequences, train_labels,word_num, embedding_dim, max_len, model_file)
need_pred_apk,need_pred_desc,need_pred_sequences = app_name_tag.load_need_pred_data("../need_pred_data.txt", word_index_dict, word_num, max_len)
predict_result = app_name_tag.predict_new(model, need_pred_sequences)
#predict_result = app_name_tag.predict_with_model_file(model_file, need_pred_sequences)
app_name_tag.save_predict_result("predict_result.txt", need_pred_apk, need_pred_desc, predict_result)
|
# -----------------------------------------------------------------------------
# Copyright (C) 2019-2020 The python-ndn authors
#
# This file is part of python-ndn.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
# TODO: Change these names
from .schema_tree import Node
from .util import norm_pattern
from ..encoding import Name, Component, TlvModel, NameField, ContentType
from ..types import InterestTimeout
from ..utils import timestamp
class LocalResource(Node):
"""
LocalResource is a custom node that preloads some data.
When need() is called, it returns the loaded data directly.
This node type does not interact with the network.
"""
def __init__(self, parent=None, data=None):
super().__init__(parent)
self.data = data
async def on_register(self, root, app, prefix, cached: bool):
self.prefix = prefix
self.app = app
return await app.register(prefix, root._on_interest_root, root._int_validator, True)
async def need(self, match, **kwargs):
return self.data
async def provide(self, match, content, **kwargs):
self.data = content
class SegmentedNode(Node):
"""
SegmentedNode represents a segmented object.
The segmented object is composed with multiple Data packets,
whose name have a suffix "/seg=seg_no" attached to the object's name.
The ``provide`` function handles segmentation, and the ``need`` function handles reassembly.
.. note::
Currently, the fetching pipeline is a simple one-by-one pipeline.
where only one Interest will be in-flight at one time.
"""
SEGMENT_PATTERN = norm_pattern('<seg:seg_no>')[0]
SEGMENT_SIZE = 4400
def __init__(self, parent=None, timeout=4000, retry_times=3, segment_size=SEGMENT_SIZE):
super().__init__(parent)
self._set(self.SEGMENT_PATTERN, Node())
self.timeout = timeout
self.retry_times = retry_times
self.segment_size = segment_size
async def retry(self, submatch, must_be_fresh):
trial_times = 0
while True:
try:
return await submatch.need(must_be_fresh=must_be_fresh, lifetime=self.timeout, can_be_prefix=False)
except InterestTimeout:
trial_times += 1
if trial_times >= self.retry_times:
raise
async def need(self, match, **kwargs):
if match.pos < len(match.name):
raise ValueError(f'{Name.to_str(match.name)} does not match with the structure')
subname = match.name + [None]
must_be_fresh = kwargs.get('must_be_fresh', True)
contents = []
cur = 0
while True:
subname[-1] = Component.from_segment(cur)
submatch = match.finer_match(subname)
segment, meta_data = await self.retry(submatch, must_be_fresh)
contents.append(segment)
if meta_data['final_block_id'] == subname[-1]:
break
cur += 1
ret = b''.join(contents)
meta_data_ret = {
**match.env,
'content_type': meta_data['content_type'],
'block_count': cur + 1,
'freshness_period': meta_data['freshness_period']
}
return ret, meta_data_ret
async def provide(self, match, content, **kwargs):
seg_cnt = (len(content) + self.segment_size - 1) // self.segment_size
subname = match.name + [None]
final_block_id = Component.from_segment(seg_cnt - 1)
for i in range(seg_cnt):
subname[-1] = Component.from_segment(i)
submatch = match.finer_match(subname)
kwargs['final_block_id'] = final_block_id
await submatch.provide(content[i*self.segment_size:(i+1)*self.segment_size], **kwargs)
async def process_int(self, match, param, app_param, raw_packet):
if match.pos == len(match.name):
submatch = match.finer_match(match.name + [Component.from_segment(0)])
return await submatch.on_interest(param, None, raw_packet)
class RDRNode(Node):
"""
RDRNode represents a versioned and segmented object whose encoding follows the RDR protocol.
Its ``provide`` function generates the metadata packet, and ``need`` function handles version discovery.
"""
class MetaDataValue(TlvModel):
name = NameField()
class MetaData(Node):
VERSION_PATTERN = norm_pattern('<v:timestamp>')[0]
FRESHNESS_PERIOD = 10
def __init__(self, parent=None):
super().__init__(parent)
self._set(self.VERSION_PATTERN, Node(self))
def make_metadata(self, match):
metadata = RDRNode.MetaDataValue()
metadata.name = match.name[:-1] + [Component.from_version(self.parent.timestamp)]
return metadata.encode()
async def process_int(self, match, param, app_param, raw_packet):
if match.pos == len(match.name) and self.parent.timestamp is not None and param.can_be_prefix:
metaname = match.name + [Component.from_version(timestamp())]
submatch = match.finer_match(metaname)
await submatch.put_data(self.make_metadata(match), send_packet=True,
freshness_period=self.FRESHNESS_PERIOD)
async def need(self, match, **kwargs):
if self.parent.timestamp is None:
return await super().need(match, **kwargs)
else:
meta_info = {
**match.env,
'content_type': ContentType.BLOB,
'freshness_period': self.FRESHNESS_PERIOD,
'final_block_id': None
}
return self.make_metadata(match), meta_info
def __init__(self, parent=None, **kwargs):
super().__init__(parent)
self['/32=metadata'] = RDRNode.MetaData(self)
self['<v:timestamp>'] = SegmentedNode(self, **kwargs)
self.timestamp = None
async def need(self, match, **kwargs):
submatch = match.finer_match(match.name + [Component.from_str('32=metadata')])
lifetime = kwargs.get('lifetime', None)
meta_int_param = {'lifetime': lifetime} if lifetime else {}
metadata_val, _ = await submatch.need(must_be_fresh=True, can_be_prefix=True, **meta_int_param)
metadata = RDRNode.MetaDataValue.parse(metadata_val, ignore_critical=True)
submatch = match.finer_match(metadata.name)
return await submatch.need(**kwargs)
async def provide(self, match, content, **kwargs):
self.timestamp = timestamp()
submatch = match.finer_match(match.name + [Component.from_version(self.timestamp)])
await submatch.provide(content, **kwargs)
|
from django.db import models
from django.utils import timezone
# Create your models here.
class User(models.Model):
username = models.CharField('username',unique=True, max_length=64)
email = models.EmailField('email',unique=True)
password = models.CharField('password', max_length=16, help_text="密码长度为8到16位字符串, 需包含大小写字母和数字")
following = models.ManyToManyField("self", symmetrical=False)
# TODO: adding user detailed profile, like birthday/location/avatar/bio
class Recipe(models.Model):
author = models.ForeignKey(User, on_delete=models.CASCADE)
created_at = models.DateTimeField('created_at', default=timezone.now)
modified_at =models.DateTimeField('modified_at')
content = models.TextField('content')
# TODO: the content of the recipe may be divided into several text/form/img
class Dish(models.Model):
creator = models.ForeignKey(User, on_delete=models.CASCADE)
recipe = models.ForeignKey(Recipe, on_delete=models.SET_NULL, null=True)
created_at = models.DateTimeField('created_at', default=timezone.now)
content = models.TextField('content')
# TODO: content also need img src path
# TODO: for future extension
class Question(models.Model):
creator = models.ForeignKey(User, on_delete=models.CASCADE)
recipe = models.ForeignKey(Recipe, on_delete=models.CASCADE)
created_at = models.DateTimeField('created_at')
class Comment(models.Model):
creator = models.ForeignKey(User, on_delete=models.CASCADE)
dish = models.ForeignKey(Dish, on_delete=models.CASCADE)
created_at = models.DateTimeField('created_at')
class Category(models.Model):
pass
class Event(models.Model):
pass
'''
TODO:
one-to-many mapping: user->recipe, user->dish, recipe->question, dish->comment
many-to-many mapping: user<->user, recipe<->category, dish<->event
[?] since django offer the admin dashboard, I was not sure I still need a Role table and role->user mapping
'''
|
import unittest
from src.compound_interest import CompoundInterest
class CompoundInterestTest(unittest.TestCase):
# Tests
def test_100_10_20(self):
compound_interest = CompoundInterest(100, 10, 20)
self.assertEqual(732.81, compound_interest.compound_interest_calc())
# Should return 732.81 given 100 principal, 10 percent, 20 years
def test_100_6_10(self):
compound_interest = CompoundInterest(100, 6, 10)
self.assertEqual(181.94, compound_interest.compound_interest_calc())
# Should return 181.94 given 100 principal, 6 percent, 10 years
def test_100000_5_8(self):
compound_interest = CompoundInterest(100000, 5, 8)
self.assertEqual(149058.55, compound_interest.compound_interest_calc())
# Should return 149,058.55 given 100000 principal, 5 percent, 8 years
def test_0_10_19(self):
compound_interest = CompoundInterest(0, 10, 1)
self.assertEqual(0.00, compound_interest.compound_interest_calc())
# Should return 0.00 given 0 principal, 10 percent, 1 year
def test_100_0_10(self):
compound_interest = CompoundInterest(100, 0, 10)
self.assertEqual(100.00, compound_interest.compound_interest_calc())
# Should return 100.00 given 100 principal, 0 percent, 10 years
# Extention tests
# Should return 118,380.16 given 100 principal, 5 percent, 8 years, 1000 per month
def test_100_5_8_1000(self):
monthly_interest = CompoundInterest(100, 5, 8, 1000)
self.assertEqual(118380.16, monthly_interest.monthly_contribution_calc())
# Should return 156,093.99 given 100 principal, 5 percent, 10 years, 1000 per month
def test_100_5_10_1000(self):
monthly_interest = CompoundInterest(100, 5, 10, 1000)
self.assertEqual(156093.99, monthly_interest.monthly_contribution_calc())
# Should return 475,442.59 given 116028.86, 7.5 percent, 8 years, 2006 per month
def test_7_8_2006(self):
monthly_interest = CompoundInterest(116028.86, 7.5, 8, 2006)
self.assertEqual(475442.59, monthly_interest.monthly_contribution_calc())
# Should return 718,335.96 given 116028.86 principal, 9 percent, 12 years, 1456 per month
def test_116028_9_12_1456(self):
monthly_interest = CompoundInterest(116028.86, 9, 12, 1456)
self.assertEqual(718335.97, monthly_interest.monthly_contribution_calc())
|
# Drop every N'th element from a list
def drop(l, n):
return [e for i, e in enumerate(l) if (i+1) % n != 0]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.