hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a0942c6dbf6d2ab8171f74a73f13b5a5dbf5ebd
| 2,804
|
py
|
Python
|
symreg/tree.py
|
fabiochiusano/SymbolicRegressionPy
|
ed2e4d6c56b6aa22ba9be02bc45a3ea7bedc7bfd
|
[
"MIT"
] | 1
|
2017-09-01T19:02:25.000Z
|
2017-09-01T19:02:25.000Z
|
symreg/tree.py
|
fabiochiusano/SymbolicRegressionPy
|
ed2e4d6c56b6aa22ba9be02bc45a3ea7bedc7bfd
|
[
"MIT"
] | 1
|
2019-01-09T22:44:07.000Z
|
2019-01-09T22:44:07.000Z
|
symreg/tree.py
|
fabiochiusano/SymbolicRegressionPy
|
ed2e4d6c56b6aa22ba9be02bc45a3ea7bedc7bfd
|
[
"MIT"
] | null | null | null |
import math
import generator as gtr
class Tree(object):
def __init__(self):
pass
class Leaf(Tree):
def __init__(self):
pass
def height(self):
""" The height of a leaf is always 1 """
return 1
def numOfNodes(self):
""" Returns the number of nodes, both internal
and leaves, of the subtree with this node as root """
return 1
def numOfLeaves(self):
""" Returns the number of leaves of the subtree with this node as root """
return 1
def numOfInternalNodes(self):
""" Returns the number of internal nodes of the subtree with this node as root """
return 0
class ValueLeaf(Leaf):
def __init__(self, value):
""" A ValueLeaf is a leaf that contains a constant value, e.g. ValueLeaf(3) """
self.value = value
def eval(self, d):
""" The evaluation of a ValueLeaf returns the value it stores """
return self.value
def __str__(self):
return str(self.value)
def clone(self):
return ValueLeaf(self.value)
class VariableLeaf(Leaf):
def __init__(self, variable):
""" A VariableLeaf is a leaf that contains a variable, e.g. VariableLeaf("x") """
self.variable = variable
def eval(self, d):
""" The evaluation of a VariableLeaf returns the value
associated to the variable that the VariableLeaf contains """
return d[self.variable]
def __str__(self):
return str(self.variable)
def clone(self):
return VariableLeaf(self.variable)
class InternalNode(Tree):
def __init__(self):
pass
class BinaryOperatorInternalNode(InternalNode):
def __init__(self, operator, op1, op2):
""" A BinaryOperatorInternalNode is an internal node that has an operator and two operands """
self.operator = operator
self.op1 = op1
self.op2 = op2
def height(self):
return 1 + max(self.op1.height(), self.op2.height())
def numOfNodes(self):
""" Returns the number of nodes, both internal
and leaves, of the subtree with this node as root """
return 1 + self.op1.numOfNodes() + self.op2.numOfNodes()
def numOfLeaves(self):
""" Returns the number of leaves of the subtree with this node as root """
return self.op1.numOfLeaves() + self.op2.numOfLeaves()
def numOfInternalNodes(self):
""" Returns the number of internal nodes of the subtree with this node as root """
return 1 + self.op1.numOfInternalNodes() + self.op2.numOfInternalNodes()
def eval(self, d):
""" Returns the result of the operator applied to the two operands """
if self.operator == "+":
return self.op1.eval(d) + self.op2.eval(d)
elif self.operator == "*":
return self.op1.eval(d) * self.op2.eval(d)
else:
return self.op1.eval(d) - self.op2.eval(d)
def __str__(self):
return "(" + str(self.op1) + str(self.operator) + str(self.op2) + ")"
def clone(self):
return BinaryOperatorInternalNode(self.operator, self.op1.clone(), self.op2.clone())
| 26.961538
| 96
| 0.702211
|
4a0945657e38330951d46bb7605c031d008f3eec
| 642
|
py
|
Python
|
ircbot/plugin/thanks.py
|
encadyma/ircbot
|
e8ef7b091accf6337b313838ea6c41cd20148a11
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
ircbot/plugin/thanks.py
|
encadyma/ircbot
|
e8ef7b091accf6337b313838ea6c41cd20148a11
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
ircbot/plugin/thanks.py
|
encadyma/ircbot
|
e8ef7b091accf6337b313838ea6c41cd20148a11
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
"""Show your appreciation."""
import random
def register(bot):
bot.listen(r'^thanks', thanks, require_mention=True)
bot.listen(r'thanks,? create', thanks)
bot.listen(r'^thank (.*)$', thank_someone, require_mention=True)
def thanks(bot, msg):
"""Thank create for being a helpful robot."""
msg.respond(
random.choice((
"you're welcome",
'you are most welcome',
'any time',
'sure thing boss',
)),
)
def thank_someone(bot, msg):
"""Have create thank somebody on your behalf."""
msg.respond('thanks, {}!'.format(msg.match.group(1)), ping=False)
| 24.692308
| 69
| 0.595016
|
4a09461498b7e069dfcde3e2e918e4ad1ed0d36d
| 178
|
py
|
Python
|
backend/waqur/models.py
|
crowdbotics-apps/tstec20211223-32572
|
47487c035162e2fdffc1ed3c6bf7d84baf06e0ba
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/waqur/models.py
|
crowdbotics-apps/tstec20211223-32572
|
47487c035162e2fdffc1ed3c6bf7d84baf06e0ba
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/waqur/models.py
|
crowdbotics-apps/tstec20211223-32572
|
47487c035162e2fdffc1ed3c6bf7d84baf06e0ba
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
from django.conf import settings
from django.db import models
class Test123(models.Model):
"Generated Model"
lastName = models.TextField()
# Create your models here.
| 16.181818
| 33
| 0.741573
|
4a094719e0333960e614e1eb1baf297ada59d2d6
| 1,758
|
py
|
Python
|
mod/distributorapi/setup.py
|
onap/dcaegen2-platform
|
9e930892d28fc4a3378fad8f942c9f91cffe4698
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
mod/distributorapi/setup.py
|
onap/dcaegen2-platform
|
9e930892d28fc4a3378fad8f942c9f91cffe4698
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
mod/distributorapi/setup.py
|
onap/dcaegen2-platform
|
9e930892d28fc4a3378fad8f942c9f91cffe4698
|
[
"Apache-2.0",
"CC-BY-4.0"
] | 1
|
2021-10-15T15:02:20.000Z
|
2021-10-15T15:02:20.000Z
|
# ============LICENSE_START=======================================================
# Copyright (c) 2019 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============LICENSE_END=========================================================
import os
from setuptools import setup, find_packages
# extract __version__ from version file. importing distributor will lead to install failures
setup_dir = os.path.dirname(__file__)
with open(os.path.join(setup_dir, 'distributor', 'version.py')) as file:
globals_dict = dict()
exec(file.read(), globals_dict)
__version__ = globals_dict['__version__']
setup(
name = "distributor-api",
version = __version__,
packages = find_packages(),
author = "Michael Hwang",
description = ("API that manages distribution targets"),
entry_points="""
[console_scripts]
start-distributor-api=distributor.http:start_http_server
""",
install_requires=[
"Werkzeug==0.16.1",
"flask-restplus"
, "Flask-Cors"
, "requests"
],
zip_safe = False
)
| 39.954545
| 92
| 0.596132
|
4a0947d736afc849b46fa9a43ed8a8dcf21d36a5
| 832
|
py
|
Python
|
agent/ACconnector/connect_module/Controller_module/interface_module.py
|
Over42M/Fun5Ga
|
7712fcae3968a80c3f7957457e5f0492fbd1f812
|
[
"Apache-2.0"
] | 7
|
2017-04-26T12:28:22.000Z
|
2021-02-09T18:59:50.000Z
|
agent/ACconnector/connect_module/Controller_module/interface_module.py
|
Over42M/Fun5Ga
|
7712fcae3968a80c3f7957457e5f0492fbd1f812
|
[
"Apache-2.0"
] | 1
|
2017-04-26T15:22:51.000Z
|
2017-04-28T04:41:51.000Z
|
agent/ACconnector/connect_module/Controller_module/interface_module.py
|
Over42M/Fun5Ga
|
7712fcae3968a80c3f7957457e5f0492fbd1f812
|
[
"Apache-2.0"
] | 8
|
2017-06-01T08:42:16.000Z
|
2020-07-23T12:30:19.000Z
|
# -*- coding: utf-8 -*-
import socket
import fcntl
import struct
import array
def all_interfaces():
max_possible = 128 # arbitrary. raise if needed.
bytes = max_possible * 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
names = array.array('B', '\0' * bytes)
outbytes = struct.unpack('iL', fcntl.ioctl(
s.fileno(),
0x8912, # SIOCGIFCONF
struct.pack('iL', bytes, names.buffer_info()[0])
))[0]
namestr = names.tostring()
lst = []
for i in range(0, outbytes, 40):
name = namestr[i:i+16].split('\0', 1)[0]
ip = namestr[i+20:i+24]
lst.append((name, ip))
return lst
def format_ip(addr):
return str(ord(addr[0])) + '.' + \
str(ord(addr[1])) + '.' + \
str(ord(addr[2])) + '.' + \
str(ord(addr[3]))
| 25.212121
| 56
| 0.549279
|
4a0948e608e8642f010114534c45724554bbdbbf
| 3,345
|
py
|
Python
|
LSTM_Text_Generation/Analyse/Word_BestI_MaxAlg.py
|
naykun/MusicResearch
|
97bd64f23710c9f45634da0fd4674172746cfaf5
|
[
"Apache-2.0"
] | 5
|
2018-07-11T02:39:40.000Z
|
2020-09-07T19:26:46.000Z
|
LSTM_Text_Generation/Analyse/Word_BestI_MaxAlg.py
|
naykun/MusicResearch
|
97bd64f23710c9f45634da0fd4674172746cfaf5
|
[
"Apache-2.0"
] | null | null | null |
LSTM_Text_Generation/Analyse/Word_BestI_MaxAlg.py
|
naykun/MusicResearch
|
97bd64f23710c9f45634da0fd4674172746cfaf5
|
[
"Apache-2.0"
] | 2
|
2018-07-11T13:36:19.000Z
|
2018-09-01T03:42:04.000Z
|
from nltk.tokenize import WordPunctTokenizer
import io
import numpy as np
def wordtokenizer(sentence):
# 分段
words = WordPunctTokenizer().tokenize(sentence)
return words
# path = '/unsullied/sharefs/ouyangzhihao/isilon-home/AAAI/mos/data/penn/train.txt'
# with io.open(path, encoding='utf-8') as f:
# text = f.read().lower()
# print('text',text[1:100])
# words = wordtokenizer(text)
# print(np.shape(words))
# print(type(words))
# print(words[0:10])
# print('type of word:',type(words[1]))
# import ipdb; ipdb.set_trace()
# exit()
def get_text_train_data(time_step = 10, infor_length = 15, how_much_part = 1):
path = '/unsullied/sharefs/ouyangzhihao/isilon-home/AAAI/mos/data/penn/train.txt'
with io.open(path, encoding='utf-8') as f:
text = f.read().lower()
print('corpus length:', len(text))
text = text[:int(len(text) / how_much_part)]
print('truncated corpus length:', len(text))
words = wordtokenizer(text)
# cut the text in semi-redundant sequences of time_step characters
step = 1
sentences = []
next_words = []
for i in range(0, len(words) - infor_length, step):
sentences.append(''.join(words[i: i + infor_length]))
next_words.append(words[i + infor_length])
# sentences_time_step = []
# next_chars_time_step = []
# for i in range(0, len(sentences) - time_step, step):
# sentences_time_step.append((sentences[i: i + time_step]))
# next_chars_time_step.append(next_chars[i + time_step])
return sentences,next_words
class vector_pair:
def __init__(self, input, label):
self.labels = {}
self.input = input
self.add_label(label)
def add_label(self,new_label):
if not(new_label in self.labels):
self.labels[new_label] = 1
else:
self.labels[new_label] += 1
def get_acc(self):
acc = 0.
total_times = 0.
for var in self.labels:
total_times += self.labels[var]
acc = max(acc,self.labels[var])
acc = acc / total_times
return acc
def get_total_times_in_dataset(self):
total_times = 0
for var in self.labels:
total_times += self.labels[var]
return total_times
def calculate_res(text_pairs):
acc = 0
res_count = 0.
for vec in text_pairs:
acc_text_p = text_pairs[vec].get_acc()
count_text_p = text_pairs[vec].get_total_times_in_dataset()
acc += acc_text_p * count_text_p
res_count += count_text_p
return acc / res_count , res_count
def run(length):
train_data, train_label = get_text_train_data(infor_length=length)
print('Build model...')
text_pairs = {}
for index, var in enumerate(train_data):
if(var in text_pairs.keys()):
text_pairs[var].add_label(train_label[index])
else:
text_pairs[var] = vector_pair(var, train_label[index])
print('Finish init!~')
try:
acc, _ = calculate_res(text_pairs)
print(acc, _)
except Exception as e:
print(e)
max_acc_log = './words_max_acc_maxAlg.txt'
# num / Acc
print('%d \t %f' % (length, acc), file=open(max_acc_log, 'a'))
del text_pairs
del train_data
del train_label
import gc
gc.collect()
for i in range(1,41):
run(i)
| 28.589744
| 85
| 0.632885
|
4a09493e54a9c2ff51036aabcb4d38acbada0654
| 246
|
py
|
Python
|
tests/test_health.py
|
Divyanshu1509/covid-api
|
21d53344be723b4e4b4cdd9476c3cb088e61aeb6
|
[
"MIT"
] | null | null | null |
tests/test_health.py
|
Divyanshu1509/covid-api
|
21d53344be723b4e4b4cdd9476c3cb088e61aeb6
|
[
"MIT"
] | null | null | null |
tests/test_health.py
|
Divyanshu1509/covid-api
|
21d53344be723b4e4b4cdd9476c3cb088e61aeb6
|
[
"MIT"
] | null | null | null |
from fastapi.testclient import TestClient
from covidapi.app import app
client = TestClient(app)
def test_health():
response = client.get("/v1/health/")
assert response.status_code == 300
assert response.json() == {"status": "OK"}
| 20.5
| 46
| 0.703252
|
4a0949cc9b5a46ee27de7d382b8b300e27b428d5
| 12,826
|
py
|
Python
|
capsnet/mnist.py
|
zyg1968/learn
|
a0b3c01b6e338ad53d5ba52da6b1be07da4bf324
|
[
"BSD-2-Clause"
] | null | null | null |
capsnet/mnist.py
|
zyg1968/learn
|
a0b3c01b6e338ad53d5ba52da6b1be07da4bf324
|
[
"BSD-2-Clause"
] | null | null | null |
capsnet/mnist.py
|
zyg1968/learn
|
a0b3c01b6e338ad53d5ba52da6b1be07da4bf324
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import struct, os, sys, random
import math
import time
import threading
from tkinter import *
from PIL import Image, ImageTk
import queue
from scrolledwindow import ScrolledWindow
import tensorflow as tf
import numpy as np
import capsnet
from tqdm import tqdm
import config as cfg
gimgs = []
def train(msgqueue):
cwd, _ = os.path.split(sys.argv[0])
data_path = os.path.join(cwd, cfg.data_path, cfg.name)
save_path = os.path.join(cwd, cfg.save_path, cfg.name, 'k{}'.format(cfg.kernel_size[0]))
log_path = os.path.join(cwd, cfg.log_path, cfg.name)
trX, trY, max_num, valX, valY, num_val_batch = capsnet.load_mnist(True, data_path, cfg.batch_size)
#Y = valY[:num_val_batch * batch_size].reshape((-1, 1))
num_tr_batch = max_num
num_val_batch = min(cfg.test_num, num_val_batch)
capsNet = capsnet.CapsNet(is_training=True)
tf.logging.info('Graph loaded')
sv = tf.train.Supervisor(graph=capsNet.graph,
logdir=save_path,
save_model_secs=0)
epoch = 1
starttime = time.time()
startstep = 0
with sv.managed_session() as sess:
for i in range(epoch):
if sv.should_stop():
break
for step in tqdm(range(num_tr_batch), total=num_tr_batch, ncols=70, leave=False, unit='b'):
#for step in range(num_tr_batch):
global_step = i * num_tr_batch + step
if global_step>0 and global_step % cfg.train_sum_freq == 0:
_, loss, train_acc = sess.run(
[capsNet.train_op,
capsNet.total_loss,
capsNet.accuracy])
assert not np.isnan(loss), 'Something wrong! loss is nan...'
#sv.summary_writer.add_summary(summary_str, global_step)
tf.logging.info('{}: loss = {:.3f}, accurate = {:.3f}, 速度={:.2f}n/s'.format(
global_step, loss, train_acc / cfg.batch_size,
(global_step-startstep)/(time.time()-starttime)))
startstep = global_step
starttime = time.time()
else:
sess.run(capsNet.train_op)
if global_step<=0:
continue
if cfg.val_sum_freq != 0 and (global_step) % cfg.val_sum_freq == 0:
val_acc = 0
for i in tqdm(range(num_val_batch), total=num_val_batch, ncols=70, leave=False, unit='b'):
start = i * cfg.batch_size
end = start + cfg.batch_size
decimgs, predicts, acc = sess.run(
[capsNet.decoded, capsNet.argmax_idx, capsNet.accuracy],
{capsNet.X: valX[start:end], capsNet.labels: valY[start:end]})
val_acc += acc
if cfg.show_pic and msgqueue:
imgs = (valX[start:end]*255)
imgs = imgs.reshape((-1, 28, 28)).astype(np.uint8)
decimgs = decimgs*255
decimgs = decimgs.reshape((-1, 28, 28)).astype(np.uint8)
msg=Messages(imgs, decimgs, predicts, valY[start:end], i, i<num_tr_batch-1)
msgqueue.put(msg)
val_acc = val_acc / (cfg.batch_size * num_val_batch)
tf.logging.info('validate step: {} accurate = {:.3f}'.format(global_step, val_acc))
if (global_step) % cfg.save_freq == 0:
sv.saver.save(sess, save_path + '/model_epoch_%d_step_%d' % (i, global_step))
global_step = sess.run(capsNet.global_step)
sv.saver.save(sess, save_path + '/model_epoch_%d_step_%d' % (i, global_step))
def evaluate(msgqueue=None):
cwd, _ = os.path.split(sys.argv[0])
data_path = os.path.join(cwd, cfg.data_path, cfg.name)
save_path = os.path.join(cwd, cfg.save_path, cfg.name, 'k{}'.format(cfg.kernel_size[0]))
teX, teY, max_num = capsnet.load_mnist(False, data_path, cfg.batch_size)
num_te_batch = min(max_num, cfg.test_num)
capsNet = capsnet.CapsNet(is_training=False)
tf.logging.info('Graph loaded')
with tf.Session(graph=capsNet.graph) as sess:
saver = tf.train.Saver()
saver.restore(sess, tf.train.latest_checkpoint(save_path))
tf.logging.info('Checkpoint restored')
test_acc = 0
begin = random.randint(0, max_num-num_te_batch)
for i in tqdm(range(num_te_batch), total=num_te_batch, ncols=70, leave=False, unit='b'):
start = (i+begin) * cfg.batch_size
end = start + cfg.batch_size
decimgs, predicts, acc = sess.run([capsNet.decoded, capsNet.argmax_idx, capsNet.accuracy], {capsNet.X: teX[start:end], capsNet.labels: teY[start:end]})
test_acc += acc
if i>0 and i%cfg.train_sum_freq == 0:
tf.logging.info('{}: accurate = {:.3f}%'.format(i, test_acc*100 / (cfg.batch_size * i)))
if cfg.show_pic and msgqueue is not None:
imgs = (teX[start:end]*255)
imgs = imgs.reshape((-1, 28, 28)).astype(np.uint8)
decimgs = decimgs*255
decimgs = decimgs.reshape((-1, 28, 28)).astype(np.uint8)
msg=Messages(imgs, decimgs, predicts, teY[start:end], i, i<num_te_batch-1)
msgqueue.put(msg)
test_acc = test_acc*100 / (cfg.batch_size * num_te_batch)
tf.logging.info('The average({} batchs) test accurate is: {:.3f}%'.format(num_te_batch, test_acc))
class myThread (threading.Thread):
def __init__(self, name, queue, is_training=False):
threading.Thread.__init__(self)
self.name = name
self.queue = queue
self.is_training=is_training
self.running=False
def run(self):
print ("开启线程: " + self.name)
# 获取锁,用于线程同步
#threadLock.acquire()
self.running=True
if self.is_training:
train(self.queue)
else:
evaluate(self.queue)
self.running=False
print ("退出线程: " + self.name)
class Messages(object):
def __init__(self, imgs, decodeimgs, predicts, labels, title, running=False):
self.imgs=imgs
self.decodeimgs=decodeimgs
self.predicts=predicts
self.labels=labels
self.title=title
self.running=running
class DrawFigure(threading.Thread):
def __init__(self, parent, name):
threading.Thread.__init__(self)
self.root=parent
self.name = name
self.queue = None
if cfg.show_pic:
self.queue=queue.Queue()
self.running=True
self.label = Label(self.root, text="空闲")
self.label.grid(row=0, column=0, columnspan=4, sticky=EW, padx=10, pady=10)
self.plot_frame = ScrolledWindow(self.root)
self.plot_frame.grid(row=1, column=0, columnspan=4, sticky=NSEW, padx=10, pady=10)
self.width = 900
self.height = 720
self.canvas = Canvas(self.plot_frame, bg='white')
self.canvas.grid(row=0, column=0, sticky=W+E+N+S)
self.btnPre = Button(self.root,text = '上一个',command = self.previous, state=DISABLED)
self.btnPre.grid(row=2, column=0, sticky=W, padx=100, pady=20)
self.btnTrain = Button(self.root,text = '训练',command = self.train)
self.btnTrain.grid(row=2, column=1, padx=20, pady=20)
self.btnTest = Button(self.root,text = '测试',command = self.test)
self.btnTest.grid(row=2, column=2, padx=20, pady=20)
self.btnNext = Button(self.root,text = '下一个',command = self.next, state=DISABLED)
self.btnNext.grid(row=2, column=3, padx=100, pady=20)
self.root.columnconfigure(1, weight=1)
self.root.columnconfigure(2, weight=1)
self.root.rowconfigure(1, weight=1)
self.root.protocol("WM_DELETE_WINDOW", self.quit)
self.current=-1
self.scale = 5
def quit(self):
self.running = False
self.root.quit()
self.root.destroy()
exit()
def train(self):
global gimgs
gimgs = []
self.label['text'] = '正在全力训练中……'
self.btnTrain['state']=DISABLED
thtrain = myThread('训练', self.queue, is_training=True)
thtrain.setDaemon(True)
thtrain.start()
def test(self):
global gimgs
gimgs = []
self.label['text'] = '正在全力测试中……'
self.btnTest['state']=DISABLED
thtrain = myThread('计算', self.queue, is_training=False)
thtrain.setDaemon(True)
thtrain.start()
def previous(self):
oldindex = self.current
if self.current>0:
self.current -= 1
self.show(self.current, oldindex)
self.label['text']=gimgs[self.current].title
if self.current<=0:
self.btnPre['state']=DISABLED
if self.current<len(gimgs)-1:
self.btnNext['state']=NORMAL
def next(self):
oldindex = self.current
if self.current<len(gimgs)-1:
self.current += 1
self.show(self.current, oldindex)
self.label['text']=gimgs[self.current].title
if self.current>=len(gimgs)-1:
self.btnNext['state']=DISABLED
if self.current>0:
self.btnPre['state']=NORMAL
def run(self):
print ("开启线程: " + self.name)
# 获取锁,用于线程同步
#threadLock.acquire()
self.running=True
while self.running:
if self.queue and self.queue.qsize():
msg=self.queue.get(0)
#self.draw()
self.on_msg(msg)
time.sleep(0.1)
num_fig = len(gimgs)
if num_fig>0:
if self.current>0:
self.btnPre['state']=NORMAL
if self.current<num_fig-1:
self.btnNext['state']=NORMAL
print ("退出线程: " + self.name)
def on_msg(self, msg):
global gimgs
if msg.title == 0:
gimgs = []
msgp=Messages(list(map(lambda x: arraytophoto(x, self.scale), msg.imgs)),
list(map(lambda x: arraytophoto(x, self.scale), msg.decodeimgs)),
msg.predicts, msg.labels, '第{}批'.format(msg.title+1))
gimgs.append(msgp)
count=len(gimgs)
self.show(count-1, self.current)
self.current = count-1
if not msg.running:
self.btnPre['state']=NORMAL
self.btnTest['state']=NORMAL
def show(self, index, oldindex):
if not cfg.show_pic:
return
global gimgs
if oldindex==index:
return
self.canvas.delete(ALL)
msg=gimgs[index]
self.label['text'] = msg.title
self.width = self.plot_frame.winfo_width()
rowspace=50
colspace=16
colwidth=28*self.scale+colspace
rowheight = 28*self.scale+rowspace
pad=30
startx=int(pad+28*self.scale/2.0)
starty = 100
cols=int((self.width-startx*2)/colwidth/2)*2
pad=int((self.width-cols*colwidth)/2.0)
texty = starty+int(28*self.scale/2)+20
self.height= math.ceil(len(msg.imgs)*2/cols)*(rowheight)+starty+20
self.canvas.config(width = self.width, height=self.height)
for i,photo in enumerate(msg.imgs):
self.canvas.create_image((colwidth*((i*2)%cols)+startx, rowheight*int(i*2/cols)+starty), image=photo)
self.canvas.create_image((colwidth*((i*2+1)%cols)+startx, rowheight*int((i*2+1)/cols)+starty), image=msg.decodeimgs[i])
color='green'
eq = '='
if msg.predicts[i] != msg.labels[i]:
eq = '!='
color='red'
self.canvas.create_text(colwidth*((i*2)%cols)+startx+colwidth//2,
rowheight*int(i*2/cols)+texty,
text = '{}{}{}'.format(int(msg.labels[i]),eq, int(msg.predicts[i])),
font = "serif 14 bold", fill=color)
def arraytophoto(a, scale=1):
if isinstance(a, list):
a = np.array(a)
if a.ndim!=2:
a = a.reshape((-1,28))
img=Image.fromarray(a)
x,y=img.size
out = img.resize((x*scale, y*scale), Image.ANTIALIAS)
return ImageTk.PhotoImage(out)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
cfg.init_cfg()
root=Tk()
root.geometry('1080x820')
root.resizable(width=True, height=True)
root.title("胶囊网络手写输入测试")
thplot = DrawFigure(root, "显示")
thplot.setDaemon(True)
thplot.start()
root.mainloop()
| 39.58642
| 163
| 0.567909
|
4a094a75528def8ab9ed480fef026a80f95005bd
| 1,632
|
py
|
Python
|
Backtracking/subset_sum.py
|
pritsheth/Algorithms-Python
|
b8af30cbf95a772c9b9b997a30ff2c8b56a040e0
|
[
"MIT"
] | null | null | null |
Backtracking/subset_sum.py
|
pritsheth/Algorithms-Python
|
b8af30cbf95a772c9b9b997a30ff2c8b56a040e0
|
[
"MIT"
] | null | null | null |
Backtracking/subset_sum.py
|
pritsheth/Algorithms-Python
|
b8af30cbf95a772c9b9b997a30ff2c8b56a040e0
|
[
"MIT"
] | null | null | null |
# For example, given candidate set [2, 3, 6, 7] and target 7,
# A solution set is:
# [
# [7],
# [2, 2, 3]
# ]
class Solution(object):
def backtrack(self, result, nums, cur, path, start_index):
if cur < 0:
return None
if cur == 0:
result.append(path[:])
return None
for i in range(start_index, len(nums)):
path.append(nums[i])
self.backtrack(result, nums, cur - nums[i], path, i)
del path[-1]
def combinationSum(self, candidates, target):
result = []
# candidates.sort()
self.backtrack(result, candidates, target, [], 0)
return result
#
# For
# example, given
# candidate
# set[10, 1, 2, 7, 6, 1, 5] and target
# 8,
# A
# solution
# set is:
# [
# [1, 7],
# [1, 2, 5],
# [2, 6],
# [1, 1, 6]
# ]
def backtrackSum(self, result, path, nums, target, st_index):
if target < 0:
return None
if target == 0:
result.append(path[:])
for i in range(st_index, len(nums)):
path.append(nums[i])
self.backtrackSum(result, path, nums, target - nums[i], i + 1)
del path[-1]
def combinationSum2(self, candidates, target):
result = []
candidates.sort()
self.backtrackSum(result, [], candidates, target, 0)
return result
s = Solution()
combination_sum = s.combinationSum([2, 3, 6, 7], 7)
combination_sum1 = s.combinationSum2([10, 1, 2, 7, 6, 1, 5], 8)
print(combination_sum)
print(combination_sum1)
| 22.666667
| 74
| 0.520833
|
4a094a8a677fc5b0e26c9d6fe1010d762d53d2f4
| 7,614
|
py
|
Python
|
kfac.py
|
sumitsk/pytorch-a2c-ppo-acktr
|
98c5a20eadf0e453d545bc7c333366824f379efd
|
[
"MIT"
] | 9
|
2018-10-17T21:27:52.000Z
|
2021-11-03T15:36:56.000Z
|
kfac.py
|
sumitsk/pytorch-a2c-ppo-acktr
|
98c5a20eadf0e453d545bc7c333366824f379efd
|
[
"MIT"
] | 6
|
2021-03-18T21:24:56.000Z
|
2022-03-11T23:34:25.000Z
|
kfac.py
|
sumitsk/pytorch-a2c-ppo-acktr
|
98c5a20eadf0e453d545bc7c333366824f379efd
|
[
"MIT"
] | 10
|
2017-11-30T19:52:33.000Z
|
2021-02-26T06:35:43.000Z
|
import math
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
from utils import AddBias
# TODO: In order to make this code faster:
# 1) Implement _extract_patches as a single cuda kernel
# 2) Compute QR decomposition in a separate process
# 3) Actually make a general KFAC optimizer so it fits PyTorch
def _extract_patches(x, kernel_size, stride, padding):
if padding[0] + padding[1] > 0:
x = F.pad(x, (padding[1], padding[1], padding[0],
padding[0])).data # Actually check dims
x = x.unfold(2, kernel_size[0], stride[0])
x = x.unfold(3, kernel_size[1], stride[1])
x = x.transpose_(1, 2).transpose_(2, 3).contiguous()
x = x.view(
x.size(0), x.size(1), x.size(2), x.size(3) * x.size(4) * x.size(5))
return x
def compute_cov_a(a, classname, layer_info, fast_cnn):
batch_size = a.size(0)
if classname == 'Conv2d':
if fast_cnn:
a = _extract_patches(a, *layer_info)
a = a.view(a.size(0), -1, a.size(-1))
a = a.mean(1)
else:
a = _extract_patches(a, *layer_info)
a = a.view(-1, a.size(-1)).div_(a.size(1)).div_(a.size(2))
elif classname == 'AddBias':
is_cuda = a.is_cuda
a = torch.ones(a.size(0), 1)
if is_cuda:
a = a.cuda()
return a.t() @ (a / batch_size)
def compute_cov_g(g, classname, layer_info, fast_cnn):
batch_size = g.size(0)
if classname == 'Conv2d':
if fast_cnn:
g = g.view(g.size(0), g.size(1), -1)
g = g.sum(-1)
else:
g = g.transpose(1, 2).transpose(2, 3).contiguous()
g = g.view(-1, g.size(-1)).mul_(g.size(1)).mul_(g.size(2))
elif classname == 'AddBias':
g = g.view(g.size(0), g.size(1), -1)
g = g.sum(-1)
g_ = g * batch_size
return g_.t() @ (g_ / g.size(0))
def update_running_stat(aa, m_aa, momentum):
# Do the trick to keep aa unchanged and not create any additional tensors
m_aa *= momentum / (1 - momentum)
m_aa += aa
m_aa *= (1 - momentum)
class SplitBias(nn.Module):
def __init__(self, module):
super(SplitBias, self).__init__()
self.module = module
self.add_bias = AddBias(module.bias.data)
self.module.bias = None
def forward(self, input):
x = self.module(input)
x = self.add_bias(x)
return x
class KFACOptimizer(optim.Optimizer):
def __init__(self,
model,
lr=0.25,
momentum=0.9,
stat_decay=0.99,
kl_clip=0.001,
damping=1e-2,
weight_decay=0,
fast_cnn=False,
Ts=1,
Tf=10):
defaults = dict()
def split_bias(module):
for mname, child in module.named_children():
if hasattr(child, 'bias'):
module._modules[mname] = SplitBias(child)
else:
split_bias(child)
split_bias(model)
super(KFACOptimizer, self).__init__(model.parameters(), defaults)
self.known_modules = {'Linear', 'Conv2d', 'AddBias'}
self.modules = []
self.grad_outputs = {}
self.model = model
self._prepare_model()
self.steps = 0
self.m_aa, self.m_gg = {}, {}
self.Q_a, self.Q_g = {}, {}
self.d_a, self.d_g = {}, {}
self.momentum = momentum
self.stat_decay = stat_decay
self.lr = lr
self.kl_clip = kl_clip
self.damping = damping
self.weight_decay = weight_decay
self.fast_cnn = fast_cnn
self.Ts = Ts
self.Tf = Tf
self.optim = optim.SGD(
model.parameters(),
lr=self.lr * (1 - self.momentum),
momentum=self.momentum)
def _save_input(self, module, input):
if input[0].volatile == False and self.steps % self.Ts == 0:
classname = module.__class__.__name__
layer_info = None
if classname == 'Conv2d':
layer_info = (module.kernel_size, module.stride,
module.padding)
aa = compute_cov_a(input[0].data, classname, layer_info,
self.fast_cnn)
# Initialize buffers
if self.steps == 0:
self.m_aa[module] = aa.clone()
update_running_stat(aa, self.m_aa[module], self.stat_decay)
def _save_grad_output(self, module, grad_input, grad_output):
if self.acc_stats:
classname = module.__class__.__name__
layer_info = None
if classname == 'Conv2d':
layer_info = (module.kernel_size, module.stride,
module.padding)
gg = compute_cov_g(grad_output[0].data, classname,
layer_info, self.fast_cnn)
# Initialize buffers
if self.steps == 0:
self.m_gg[module] = gg.clone()
update_running_stat(gg, self.m_gg[module], self.stat_decay)
def _prepare_model(self):
for module in self.model.modules():
classname = module.__class__.__name__
if classname in self.known_modules:
assert not ((classname in ['Linear', 'Conv2d']) and module.bias is not None), \
"You must have a bias as a separate layer"
self.modules.append(module)
module.register_forward_pre_hook(self._save_input)
module.register_backward_hook(self._save_grad_output)
def step(self):
# Add weight decay
if self.weight_decay > 0:
for p in self.model.parameters():
p.grad.data.add_(self.weight_decay, p.data)
updates = {}
for i, m in enumerate(self.modules):
assert len(list(m.parameters())
) == 1, "Can handle only one parameter at the moment"
classname = m.__class__.__name__
p = next(m.parameters())
la = self.damping + self.weight_decay
if self.steps % self.Tf == 0:
# My asynchronous implementation exists, I will add it later.
# Experimenting with different ways to this in PyTorch.
self.d_a[m], self.Q_a[m] = torch.symeig(
self.m_aa[m], eigenvectors=True)
self.d_g[m], self.Q_g[m] = torch.symeig(
self.m_gg[m], eigenvectors=True)
self.d_a[m].mul_((self.d_a[m] > 1e-6).float())
self.d_g[m].mul_((self.d_g[m] > 1e-6).float())
if classname == 'Conv2d':
p_grad_mat = p.grad.data.view(p.grad.data.size(0), -1)
else:
p_grad_mat = p.grad.data
v1 = self.Q_g[m].t() @ p_grad_mat @ self.Q_a[m]
v2 = v1 / (
self.d_g[m].unsqueeze(1) * self.d_a[m].unsqueeze(0) + la)
v = self.Q_g[m] @ v2 @ self.Q_a[m].t()
v = v.view(p.grad.data.size())
updates[p] = v
vg_sum = 0
for p in self.model.parameters():
v = updates[p]
vg_sum += (v * p.grad.data * self.lr * self.lr).sum()
nu = min(1, math.sqrt(self.kl_clip / vg_sum))
for p in self.model.parameters():
v = updates[p]
p.grad.data.copy_(v)
p.grad.data.mul_(nu)
self.optim.step()
self.steps += 1
| 31.725
| 95
| 0.532046
|
4a094be60922690a47540904a91e732d43e681dc
| 95
|
py
|
Python
|
manager/reactfront/apps.py
|
samihonk/djangorest
|
c64d3645fb34144ebf500f27e4e44f19d9cd0ca2
|
[
"MIT"
] | null | null | null |
manager/reactfront/apps.py
|
samihonk/djangorest
|
c64d3645fb34144ebf500f27e4e44f19d9cd0ca2
|
[
"MIT"
] | 11
|
2020-10-12T14:07:07.000Z
|
2022-02-26T22:21:36.000Z
|
manager/reactfront/apps.py
|
samihonk/djangorest
|
c64d3645fb34144ebf500f27e4e44f19d9cd0ca2
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class ReactfrontConfig(AppConfig):
name = 'reactfront'
| 15.833333
| 34
| 0.768421
|
4a094bf4d6a1ec48084cd0e15efc7e7ddf26de35
| 7,907
|
py
|
Python
|
lib/exporter/postprocessing.py
|
SimeonZhang/detectron2_tensorflow
|
ca03f633111d540ea91b3de75dbfa1da813647be
|
[
"Apache-2.0"
] | 3
|
2021-06-07T10:48:51.000Z
|
2022-03-01T11:43:40.000Z
|
lib/exporter/postprocessing.py
|
SimeonZhang/detectron2_tensorflow
|
ca03f633111d540ea91b3de75dbfa1da813647be
|
[
"Apache-2.0"
] | null | null | null |
lib/exporter/postprocessing.py
|
SimeonZhang/detectron2_tensorflow
|
ca03f633111d540ea91b3de75dbfa1da813647be
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
from ..layers import resize_images
from ..data import fields
from ..structures import box_list
from ..structures import box_list_ops
from ..utils import id_utils
def detector_postprocess(
results,
input_shape,
thing_class_names,
label_offset=1
):
"""
Normalize the boxes related to the image size.
Assign class names for each box.
Args:
results: the raw output dict from the detector.
This object might be updated in-place.
input_shape: the image shape the input image resolution the detector sees.
thing_class_names: thing class names.
Returns:
results: the resized output from the model, based on the output resolution.
"""
result_fields = fields.ResultFields
num_instances = tf.reduce_sum(
tf.cast(results[result_fields.is_valid], tf.int32), axis=1)
max_num_instances = tf.reduce_max(num_instances)
def detector_postprocess_single_image(args):
results, input_shape = args
boxes = results[result_fields.boxes]
classes = results[result_fields.classes]
scores = results[result_fields.scores]
class_names = tf.gather(thing_class_names, classes)
result_boxlist = box_list.BoxList(boxes)
result_boxlist.add_field(result_fields.class_names, class_names)
result_boxlist.add_field(result_fields.classes, classes + label_offset)
result_boxlist.add_field(result_fields.scores, scores)
if result_fields.masks in results:
result_boxlist.add_field(result_fields.masks, results[result_fields.masks])
result_boxlist = box_list_ops.boolean_mask(
result_boxlist, results[result_fields.is_valid]
)
result_boxlist = box_list_ops.to_normalized_coordinates(
result_boxlist, input_shape, check_range=False)
result_boxlist = box_list_ops.sort_by_field(result_boxlist, result_fields.scores)
result_boxlist = box_list_ops.pad_or_clip_boxlist(result_boxlist, max_num_instances)
results = result_boxlist.as_tensor_dict()
return results
expected_fields = [
result_fields.boxes,
result_fields.classes,
result_fields.scores,
result_fields.masks
]
dtype = {k: v.dtype for k, v in results.items() if k in expected_fields}
dtype[result_fields.class_names] = tf.string
results = tf.map_fn(
detector_postprocess_single_image, [results, input_shape], dtype=dtype
)
results["num_detections"] = num_instances
return results
def sem_seg_postprocess(
sem_seg,
input_shape,
output_shape,
stuff_class_names,
stuff_ignore_value=None,
stuff_area_limit=0.001
):
"""
Return semantic segmentation predictions in the original resolution.
The input images are often resized when entering semantic segmentor. Moreover, in same
cases, they also padded inside segmentor to be divisible by maximum network stride.
As a result, we often need the predictions of the segmentor in a different
resolution from its inputs.
Args:
sem_seg (Tensor): semantic segmentation prediction logits. A tensor of shape (N, H, W),
where H, W are the height and width of the prediction.
input_shape (tuple): image size that segmentor is taking as input.
output_height, output_width: the desired output resolution.
Returns:
results: the resized output from the model, based on the output resolution.
"""
serving_fields = fields.ServingFields
num_classes = len(stuff_class_names)
stuff_included = [True] * num_classes
stuff_included[0] = False
stuff_included[-1] = False
if stuff_ignore_value is not None and stuff_ignore_value < num_classes:
stuff_included[stuff_ignore_value] = False
def sem_seg_postprocess_single_image(args):
sem_seg, input_shape, output_shape = args
sem_seg = sem_seg[:input_shape[0], :input_shape[1]]
one_hot_sem_seg = tf.one_hot(sem_seg, num_classes)
total_area = tf.cast(input_shape[0] * input_shape[1], tf.float32)
area_per_class = tf.cast(tf.reduce_sum(one_hot_sem_seg, [0, 1]), tf.float32)
area_per_class = tf.divide(area_per_class, total_area)
show_up_mask = tf.logical_and(
tf.greater(area_per_class, stuff_area_limit), stuff_included
)
classes = tf.boolean_mask(tf.range(num_classes), show_up_mask)
class_names = tf.boolean_mask(stuff_class_names, show_up_mask)
areas = tf.boolean_mask(area_per_class, show_up_mask)
padding_end = num_classes - tf.shape(classes)[0]
one_hot_sem_seg = resize_images(
one_hot_sem_seg, output_shape, align_corners=True
)
one_hot_sem_seg = one_hot_sem_seg * tf.cast(show_up_mask, tf.float32)
one_hot_sem_seg = tf.round(one_hot_sem_seg)
sem_seg = tf.reduce_sum(
one_hot_sem_seg * tf.range(num_classes, dtype=tf.float32), axis=-1
)
sem_seg = id_utils.id2rgb(sem_seg)
sem_seg = tf.image.encode_png(tf.cast(sem_seg, tf.uint8))
classes = tf.pad(classes, [[0, padding_end]])
class_names = tf.pad(class_names, [[0, padding_end]])
areas = tf.pad(areas, [[0, padding_end]])
results = {}
results[serving_fields.sem_seg] = sem_seg
results[serving_fields.sem_seg_classes] = classes
results[serving_fields.sem_seg_class_names] = class_names
results[serving_fields.sem_seg_areas] = areas
return results
dtype = {
serving_fields.sem_seg: tf.string,
serving_fields.sem_seg_classes: tf.int64,
serving_fields.sem_seg_class_names: tf.string,
serving_fields.sem_seg_areas: tf.float32
}
results = tf.map_fn(
sem_seg_postprocess_single_image, [sem_seg, input_shape, output_shape], dtype=dtype
)
return results
def panoptic_postprocess(
segments_info,
input_shape,
thing_contiguous_id_to_dataset_id,
stuff_contiguous_id_to_dataset_id,
class_names,
):
"""
Postprocess the panoptic results and return results in detection manner.
return:
results: see `detector_postprocess`.
"""
result_fields = fields.ResultFields
def get_detection_like_result_single_image(segments_info):
instance_dict = {
result_fields.boxes: segments_info["bbox"],
result_fields.classes: segments_info["category_id"],
result_fields.scores: segments_info["score"],
result_fields.is_valid: segments_info["is_valid"]
}
boxlist = box_list.BoxList.from_tensor_dict(instance_dict)
isthing = segments_info["isthing"]
thing_boxlist = box_list_ops.boolean_mask(boxlist, isthing)
stuff_boxlist = box_list_ops.boolean_mask(boxlist, tf.logical_not(isthing))
thing_classes = tf.gather(thing_contiguous_id_to_dataset_id, thing_boxlist.get_field(result_fields.classes))
stuff_classes = tf.gather(stuff_contiguous_id_to_dataset_id, stuff_boxlist.get_field(result_fields.classes))
thing_classes = tf.cast(thing_classes, tf.int64)
stuff_classes = tf.cast(stuff_classes, tf.int64)
thing_boxlist.set_field(result_fields.classes, thing_classes)
stuff_boxlist.set_field(result_fields.classes, stuff_classes)
boxlist = box_list_ops.concatenate([thing_boxlist, stuff_boxlist])
return boxlist.as_tensor_dict()
dtype = {
result_fields.boxes: tf.float32,
result_fields.classes: tf.int64,
result_fields.scores: tf.float32,
result_fields.is_valid: tf.bool
}
detection_like_result = tf.map_fn(
get_detection_like_result_single_image, segments_info, dtype=dtype
)
return detector_postprocess(detection_like_result, input_shape, class_names)
| 39.143564
| 116
| 0.703427
|
4a094c1d7b7aa87a80ab10ea8aa5d74b1952896e
| 2,937
|
py
|
Python
|
sisyphus/python/status_pb2.py
|
AnsgarSchmidt/antikeimena
|
8a8557de483c981b73195d62e4693d8c22d6ea68
|
[
"Apache-2.0"
] | null | null | null |
sisyphus/python/status_pb2.py
|
AnsgarSchmidt/antikeimena
|
8a8557de483c981b73195d62e4693d8c22d6ea68
|
[
"Apache-2.0"
] | null | null | null |
sisyphus/python/status_pb2.py
|
AnsgarSchmidt/antikeimena
|
8a8557de483c981b73195d62e4693d8c22d6ea68
|
[
"Apache-2.0"
] | null | null | null |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: status.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='status.proto',
package='antikeimena',
syntax='proto3',
serialized_pb=_b('\n\x0cstatus.proto\x12\x0b\x61ntikeimena\"O\n\x06Status\x12\x0f\n\x07version\x18\x01 \x01(\r\x12\x0e\n\x06uptime\x18\x02 \x01(\r\x12\x15\n\rsensorInError\x18\x03 \x01(\r\x12\r\n\x05\x64\x65\x62ug\x18\x04 \x01(\rb\x06proto3')
)
_STATUS = _descriptor.Descriptor(
name='Status',
full_name='antikeimena.Status',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='version', full_name='antikeimena.Status.version', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uptime', full_name='antikeimena.Status.uptime', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sensorInError', full_name='antikeimena.Status.sensorInError', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='debug', full_name='antikeimena.Status.debug', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=29,
serialized_end=108,
)
DESCRIPTOR.message_types_by_name['Status'] = _STATUS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Status = _reflection.GeneratedProtocolMessageType('Status', (_message.Message,), dict(
DESCRIPTOR = _STATUS,
__module__ = 'status_pb2'
# @@protoc_insertion_point(class_scope:antikeimena.Status)
))
_sym_db.RegisterMessage(Status)
# @@protoc_insertion_point(module_scope)
| 32.274725
| 244
| 0.738168
|
4a094f1ea1f9238d1f438fd71170ee8b3a03a43a
| 7,591
|
py
|
Python
|
ants/tests/EC.py
|
luv-sic/composing-programs
|
27391ac844df045b865524f1936682a0872569b5
|
[
"MIT"
] | 1
|
2021-11-27T08:53:01.000Z
|
2021-11-27T08:53:01.000Z
|
ants/tests/EC.py
|
luvsic3/composing-programs
|
27391ac844df045b865524f1936682a0872569b5
|
[
"MIT"
] | null | null | null |
ants/tests/EC.py
|
luvsic3/composing-programs
|
27391ac844df045b865524f1936682a0872569b5
|
[
"MIT"
] | null | null | null |
test = {
'name': 'Problem EC',
'points': 2,
'suites': [
{
'cases': [
{
'code': r"""
>>> # Testing status parameters
>>> slow = SlowThrower()
>>> scary = ScaryThrower()
>>> SlowThrower.food_cost
c9452203eb0b0f0bd2454586a6c2fc5c
# locked
>>> ScaryThrower.food_cost
50ae32be3e31df6c59633df7fdfb3a72
# locked
>>> slow.armor
d89cf7c79d5a479b0f636734143ed5e6
# locked
>>> scary.armor
d89cf7c79d5a479b0f636734143ed5e6
# locked
""",
'hidden': False,
'locked': True
},
{
'code': r"""
>>> # Testing Slow
>>> slow = SlowThrower()
>>> bee = Bee(3)
>>> colony.places["tunnel_0_0"].add_insect(slow)
>>> colony.places["tunnel_0_4"].add_insect(bee)
>>> slow.action(colony)
>>> colony.time = 1
>>> bee.action(colony)
>>> bee.place.name # SlowThrower should cause slowness on odd turns
040b6ad98a7360eba8d493c250a9b82e
# locked
>>> colony.time += 1
>>> bee.action(colony)
>>> bee.place.name # SlowThrower should cause slowness on odd turns
8344c19df8015306b462119efc8419cb
# locked
>>> for _ in range(3):
... colony.time += 1
... bee.action(colony)
>>> bee.place.name
7f44338412808161209e944b1ee0f78c
# locked
""",
'hidden': False,
'locked': True
},
{
'code': r"""
>>> # Testing Scare
>>> error_msg = "ScaryThrower doesn't scare for exactly two turns."
>>> scary = ScaryThrower()
>>> bee = Bee(3)
>>> colony.places["tunnel_0_0"].add_insect(scary)
>>> colony.places["tunnel_0_4"].add_insect(bee)
>>> scary.action(colony)
>>> bee.action(colony)
>>> bee.place.name # ScaryThrower should scare for two turns
46f9851313dc368f747e69f1670450da
# locked
>>> bee.action(colony)
>>> bee.place.name # ScaryThrower should scare for two turns
32a5320f2c5021a9b66582af8b364dc7
# locked
>>> bee.action(colony)
>>> bee.place.name
46f9851313dc368f747e69f1670450da
# locked
""",
'hidden': False,
'locked': True
},
{
'code': r"""
>>> # Testing if effects stack
>>> slow = SlowThrower()
>>> bee = Bee(3)
>>> slow_place = colony.places["tunnel_0_0"]
>>> bee_place = colony.places["tunnel_0_4"]
>>> slow_place.add_insect(slow)
>>> bee_place.add_insect(bee)
>>> for _ in range(2): # slow bee two times
... slow.action(colony)
>>> colony.time = 1
>>> for _ in range(5): # bee should only move on odd times
... bee.action(colony)
... colony.time += 1
>>> bee.place.name
'tunnel_0_2'
>>> colony.time += 1 # slow effects have worn off
>>> bee.action(colony)
>>> bee.place.name
'tunnel_0_1'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> # Testing multiple scared bees
>>> scare1 = ScaryThrower()
>>> scare2 = ScaryThrower()
>>> bee1 = Bee(3)
>>> bee2 = Bee(3)
>>> colony.places["tunnel_0_0"].add_insect(scare1)
>>> colony.places["tunnel_0_1"].add_insect(bee1)
>>> colony.places["tunnel_0_4"].add_insect(scare2)
>>> colony.places["tunnel_0_5"].add_insect(bee2)
>>> scare1.action(colony)
>>> scare2.action(colony)
>>> bee1.action(colony)
>>> bee2.action(colony)
>>> bee1.place.name
'tunnel_0_2'
>>> bee2.place.name
'tunnel_0_6'
>>> bee1.action(colony)
>>> bee2.action(colony)
>>> bee1.place.name
'tunnel_0_3'
>>> bee2.place.name
'tunnel_0_7'
>>> bee1.action(colony)
>>> bee2.action(colony)
>>> bee1.place.name
'tunnel_0_2'
>>> bee2.place.name
'tunnel_0_6'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> scare = ScaryThrower()
>>> bee = Bee(3)
>>> colony.places["tunnel_0_0"].add_insect(scare)
>>> colony.places["tunnel_0_1"].add_insect(bee)
>>> scare.action(colony)
>>> bee.action(colony)
>>> bee.place.name
ba5c35f55ba3229d1eb021382d9d19c5
# locked
>>> bee.action(colony)
>>> bee.place.name
8344c19df8015306b462119efc8419cb
# locked
>>> #
>>> # Same bee should not be scared more than once
>>> scare.action(colony)
>>> bee.action(colony)
>>> bee.place.name
ba5c35f55ba3229d1eb021382d9d19c5
# locked
""",
'hidden': False,
'locked': True
},
{
'code': r"""
>>> # Testing long effect stack
>>> scary = ScaryThrower()
>>> slow = SlowThrower()
>>> bee = Bee(3)
>>> colony.places["tunnel_0_0"].add_insect(scary)
>>> colony.places["tunnel_0_1"].add_insect(slow)
>>> colony.places["tunnel_0_3"].add_insect(bee)
>>> scary.action(colony) # scare bee once
>>> colony.time = 0
>>> bee.action(colony) # scared
>>> bee.place.name
'tunnel_0_4'
>>> for _ in range(3): # slow bee three times
... slow.action(colony)
>>> colony.time = 1
>>> bee.action(colony) # scared, but also slowed thrice
>>> bee.place.name
'tunnel_0_4'
>>> colony.time = 2
>>> bee.action(colony) # scared and slowed thrice
>>> bee.place.name
'tunnel_0_5'
>>> colony.time = 3
>>> bee.action(colony) # slowed thrice
>>> bee.place.name
'tunnel_0_5'
>>> colony.time = 4
>>> bee.action(colony) # slowed twice
>>> bee.place.name
'tunnel_0_4'
>>> colony.time = 5
>>> bee.action(colony) # slowed twice
>>> bee.place.name
'tunnel_0_4'
>>> colony.time = 6
>>> bee.action(colony) # slowed once
>>> bee.place.name
'tunnel_0_3'
>>> colony.time = 7
>>> bee.action(colony) # status effects have worn off
>>> bee.place.name
'tunnel_0_2'
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': r"""
>>> from ants import *
>>> hive, layout = Hive(AssaultPlan()), dry_layout
>>> dimensions = (1, 9)
>>> colony = AntColony(None, hive, ant_types(), layout, dimensions)
""",
'teardown': '',
'type': 'doctest'
}
]
}
| 29.885827
| 77
| 0.457779
|
4a094f5f2e36ff2a525403ca285885d04e07c029
| 4,410
|
py
|
Python
|
qiskit/aqua/utils/measurement_error_mitigation.py
|
IanJoel/qiskit-aqua
|
7707172d01f0539358f1ce2406f307e830105303
|
[
"Apache-2.0"
] | null | null | null |
qiskit/aqua/utils/measurement_error_mitigation.py
|
IanJoel/qiskit-aqua
|
7707172d01f0539358f1ce2406f307e830105303
|
[
"Apache-2.0"
] | null | null | null |
qiskit/aqua/utils/measurement_error_mitigation.py
|
IanJoel/qiskit-aqua
|
7707172d01f0539358f1ce2406f307e830105303
|
[
"Apache-2.0"
] | 2
|
2020-02-13T02:17:58.000Z
|
2020-08-09T07:56:25.000Z
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Measurement error mitigation """
import logging
from qiskit import compiler
from qiskit.ignis.mitigation.measurement import (complete_meas_cal,
CompleteMeasFitter, TensoredMeasFitter)
from ..aqua_error import AquaError
logger = logging.getLogger(__name__)
def get_measured_qubits(transpiled_circuits):
"""
Retrieve the measured qubits from transpiled circuits.
Args:
transpiled_circuits ([QuantumCircuit]): a list of transpiled circuits
Returns:
list[int]: the qubit mapping to-be-used for measure error mitigation
Raises:
AquaError: invalid qubit mapping
"""
qubit_mapping = None
for qc in transpiled_circuits:
measured_qubits = []
for inst, qargs, _ in qc.data:
if inst.name != 'measure':
continue
measured_qubits.append(qargs[0][1])
if qubit_mapping is None:
qubit_mapping = measured_qubits
elif qubit_mapping != measured_qubits:
raise AquaError("The qubit mapping of circuits are different."
"Currently, we only support single mapping.")
return qubit_mapping
def get_measured_qubits_from_qobj(qobj):
"""
Retrieve the measured qubits from transpiled circuits.
Args:
qobj (QasmObj): qobj
Returns:
list[int]: the qubit mapping to-be-used for measure error mitigation
Raises:
AquaError: invalid qubit mapping
"""
qubit_mapping = None
for exp in qobj.experiments:
measured_qubits = []
for instr in exp.instructions:
if instr.name != 'measure':
continue
measured_qubits.append(instr.qubits[0])
if qubit_mapping is None:
qubit_mapping = measured_qubits
else:
if qubit_mapping != measured_qubits:
raise AquaError("The qubit mapping of circuits are different."
"Currently, we only support single mapping.")
return qubit_mapping
# pylint: disable=invalid-name
def build_measurement_error_mitigation_qobj(qubit_list, fitter_cls, backend,
backend_config=None, compile_config=None,
run_config=None):
"""
Args:
qubit_list (list[int]): list of qubits used in the algorithm
fitter_cls (callable): CompleteMeasFitter or TensoredMeasFitter
backend (BaseBackend): backend instance
backend_config (dict, optional): configuration for backend
compile_config (dict, optional): configuration for compilation
run_config (RunConfig, optional): configuration for running a circuit
Returns:
QasmQobj: the Qobj with calibration circuits at the beginning
list[str]: the state labels for build MeasFitter
list[str]: the labels of the calibration circuits
Raises:
AquaError: when the fitter_cls is not recognizable.
"""
circlabel = 'mcal'
if not qubit_list:
raise AquaError("The measured qubit list can not be [].")
if fitter_cls == CompleteMeasFitter:
meas_calibs_circuits, state_labels = \
complete_meas_cal(qubit_list=qubit_list, circlabel=circlabel)
elif fitter_cls == TensoredMeasFitter:
# TODO support different calibration
raise AquaError("Does not support TensoredMeasFitter yet.")
else:
raise AquaError("Unknown fitter {}".format(fitter_cls))
t_meas_calibs_circuits = compiler.transpile(meas_calibs_circuits, backend,
**backend_config, **compile_config)
cals_qobj = compiler.assemble(t_meas_calibs_circuits, backend, **run_config.to_dict())
return cals_qobj, state_labels, circlabel
| 34.453125
| 90
| 0.647392
|
4a094f9b372520025ed51db7f84e4d063c562218
| 778
|
py
|
Python
|
post/models.py
|
LordK1/TesterSite
|
83b3f55d66dcd8c7baf577a7c6cfbe58b1835408
|
[
"MIT"
] | null | null | null |
post/models.py
|
LordK1/TesterSite
|
83b3f55d66dcd8c7baf577a7c6cfbe58b1835408
|
[
"MIT"
] | null | null | null |
post/models.py
|
LordK1/TesterSite
|
83b3f55d66dcd8c7baf577a7c6cfbe58b1835408
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
class Category(models.Model):
title = models.CharField(max_length=100, null=False, blank=False)
created_date = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name = "Category"
verbose_name_plural = "Categories"
def __str__(self):
return self.title
class Post(models.Model):
title = models.CharField(max_length=100, null=False, blank=False)
category = models.ManyToManyField(Category, related_name='posts')
created_date = models.DateTimeField(auto_now_add=True)
updated_date = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = "Post"
verbose_name_plural = "Posts"
def __str__(self):
return self.title
| 26.827586
| 69
| 0.70437
|
4a09506aa8f1ea05fc74096df6ae18775869a1c9
| 3,756
|
py
|
Python
|
HiC_count_bin_linkages_singleChr_diagonal_v1a.py
|
elifesciences-publications/Stadlerlab-hi-c
|
95e304bf8cb2cd606f52f94bb6369fef4f16f73a
|
[
"MIT"
] | null | null | null |
HiC_count_bin_linkages_singleChr_diagonal_v1a.py
|
elifesciences-publications/Stadlerlab-hi-c
|
95e304bf8cb2cd606f52f94bb6369fef4f16f73a
|
[
"MIT"
] | null | null | null |
HiC_count_bin_linkages_singleChr_diagonal_v1a.py
|
elifesciences-publications/Stadlerlab-hi-c
|
95e304bf8cb2cd606f52f94bb6369fef4f16f73a
|
[
"MIT"
] | null | null | null |
"""
This script takes a paired alignment file and, assigns each end to a bin (some chunk of
a chromosome defined by supplied bin size), and prints out the bin-bin counts for only
contacts within some width (controlled by width variable, currently set to 500 bins) of
the diagonal (distance between the bins).
Prints a unique format. The file starts with a series of lines that start with a #.
These are the total bin counts, used to do normalization, if desired, subsequently. After
that, the format is compressed:
chromosome bin1 bin2 counts
Some programming notes to check for: handling double counting of diagonal.
"""
from optparse import OptionParser
import sys
import re
def parse_options():
parser = OptionParser()
parser.add_option("-f", "--files", dest="filenames",
help="paired alignment files, comma separated", metavar="FILE")
parser.add_option("-b", "--bin_size",
dest="bin_size", default=1000000,
help="bin size")
parser.add_option("-w", "--width",
dest="width", default=1000,
help="width in bins from diagonal")
parser.add_option("-c", "--chromosome",
dest="chromosome", default='none',
help="chromosome")
parser.add_option("-s", "--file_stem",
dest="file_stem", default='none',
help="output file stem. Adds diag_bin_counts bin size and chr")
(options, args) = parser.parse_args()
return options
def Add_read (bin1, bin2):
if (bin1 in bin_bin_counts):
if(bin2 in bin_bin_counts[bin1]):
bin_bin_counts[bin1][bin2] = bin_bin_counts[bin1][bin2] + 1
else:
bin_bin_counts[bin1][bin2] = 1
else:
bin_bin_counts[bin1] = {}
bin_bin_counts[bin1][bin2] = 1
def add_to_totals(bin):
if (bin in bin_totals):
bin_totals[bin] = bin_totals[bin] + 1
else:
bin_totals[bin] = 1
options = parse_options()
bin_size = int(options.bin_size)
selected_chromosome = options.chromosome
filenames = options.filenames
files = filenames.split(',')
bin_bin_counts = {}
max_bin = 1
width = int(options.width)
bin_totals = {}
line_count = 0
for f in files:
file1 = open(f, 'r')
for line in file1:
line_count = line_count + 1
if (line_count % 10000000 == 0):
print('.')
line = line.rstrip()
items = line.split()
(chr1, Lmost1, chr2, Lmost2) = items[2], int(items[3]), items[5], int(items[6])
if (selected_chromosome == chr1 == chr2):
bin1 = int(Lmost1 / bin_size)
bin2 = int(Lmost2 / bin_size)
add_to_totals(bin1)
if (bin1 != bin2):
add_to_totals(bin2)
'''update_max(bin1)
update_max(bin2)'''
if (bin1 > max_bin):
max_bin = bin1
if (bin2 > max_bin):
max_bin = bin2
if (abs(bin1 - bin2) <= width):
Add_read(bin1, bin2)
# Avoid double counting on diagonal. This will be triggered unless chromosome and bin are the same
if (bin1 != bin2):
Add_read(bin2, bin1)
file1.close()
file_stem = ''
if (options.file_stem == 'none'):
file_stem = re.sub('.txt', '', files[0])
else:
file_stem = options.file_stem
outfile = open(file_stem + '_diagBinCounts_' + str(bin_size) + 'bp_chr' + selected_chromosome + '.txt','w')
print('done reading\n')
for bin in range(0, max_bin + 1):
outfile.write('#' + selected_chromosome + '\t' + str(bin) + '\t')
if (bin in bin_totals):
outfile.write(str(bin_totals[bin]) + '\n')
else:
outfile.write('0\n')
for bin1 in range(0, max_bin + 1):
for bin2 in range(max(0, bin1 - width), min(max_bin, bin1 + width)):
outfile.write(selected_chromosome + '\t' + str(bin1) + '\t' + str(bin2) + '\t')
if (bin1 in bin_bin_counts):
if(bin2 in bin_bin_counts[bin1]):
outfile.write(str(bin_bin_counts[bin1][bin2]))
else:
outfile.write('0')
else:
outfile.write('0')
outfile.write('\n')
outfile.close()
| 27.617647
| 107
| 0.667732
|
4a0950728dfe34dd05b5d6b699644e0c26cfee8e
| 1,144
|
py
|
Python
|
setup.py
|
a1black/booktag
|
f96572c5dc27fa674084e2a35b043d846337a9dc
|
[
"MIT"
] | null | null | null |
setup.py
|
a1black/booktag
|
f96572c5dc27fa674084e2a35b043d846337a9dc
|
[
"MIT"
] | null | null | null |
setup.py
|
a1black/booktag
|
f96572c5dc27fa674084e2a35b043d846337a9dc
|
[
"MIT"
] | null | null | null |
import setuptools
setuptools.setup(
name='booktag-a1black',
version='0.0.1',
author='a1black',
description='A simple script for fill in the tags in an audio book.',
license='GPLv2',
url='https://github.com/a1black/booktag',
classifiers=[
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Topic :: Multimedia :: Sound/Audio',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Multimedia :: Sound/Audio'
],
keywords='metadata, tagging, audio',
packages=['booktag', 'booktag.commands'],
python_requires='>=3.6',
include_package_data=True,
install_requires=[
'humanize>=0.5.0',
'mutagen>=1.43.0',
'natsort>=6.2.0',
'Pillow>=6.2.0',
'psutil>=5.6.0',
'python-magic>=0.4.15',
'ruamel.yaml>=0.16.5',
'tqdm>=4.43.0',
],
entry_points={
'console_scripts': [
'btag=booktag.__main__:main'
]
}
)
| 26
| 73
| 0.55507
|
4a095084b55c57d26a64ca0109389bcf13501e1f
| 747
|
py
|
Python
|
tbats/abstract/__init__.py
|
arita37/tbats
|
4e726919f08e39e74dd70a592b5258dfc7b25953
|
[
"MIT"
] | 1
|
2019-07-21T15:38:12.000Z
|
2019-07-21T15:38:12.000Z
|
tbats/abstract/__init__.py
|
arita37/tbats
|
4e726919f08e39e74dd70a592b5258dfc7b25953
|
[
"MIT"
] | null | null | null |
tbats/abstract/__init__.py
|
arita37/tbats
|
4e726919f08e39e74dd70a592b5258dfc7b25953
|
[
"MIT"
] | null | null | null |
from .ArrayHelper import ArrayHelper
from .MatrixBuilderInterface import MatrixBuilderInterface
from .MatrixBuilderCache import MatrixBuilderCache
from .MatrixBuilder import MatrixBuilder
from .Model import Model
from .ModelParams import ModelParams
from .Components import Components
from .ComponentMatrix import ComponentMatrix
from .ParamsOptimizer import ParamsOptimizer
from .SeedFinder import SeedFinder
from .Case import Case
from .Estimator import Estimator
from .ContextInterface import ContextInterface
__all__ = ['Model', 'MatrixBuilderInterface', 'MatrixBuilderCache', 'MatrixBuilder', 'ModelParams', 'ArrayHelper',
'Components', 'ParamsOptimizer', 'SeedFinder', 'Case', 'ContextInterface', 'Estimator', 'ComponentMatrix']
| 43.941176
| 117
| 0.821954
|
4a0950981a1882e84f2d1e75c5bd45d38b882cc7
| 105
|
py
|
Python
|
toggl_report/toggl_report_app/apps.py
|
naokodama/toggl_report
|
5280ec40b27872b90c72e07613829f7091ce302f
|
[
"MIT"
] | null | null | null |
toggl_report/toggl_report_app/apps.py
|
naokodama/toggl_report
|
5280ec40b27872b90c72e07613829f7091ce302f
|
[
"MIT"
] | null | null | null |
toggl_report/toggl_report_app/apps.py
|
naokodama/toggl_report
|
5280ec40b27872b90c72e07613829f7091ce302f
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class TogglReportAppConfig(AppConfig):
name = 'toggl_report_app'
| 17.5
| 38
| 0.790476
|
4a0951f3537d8db27e48cc4006861e052f92d0e3
| 8,832
|
py
|
Python
|
2. DCT and Quantization Algorithm/Assignment_2_DCT.py
|
IbrahimEl-Shal/Digital_Video_Processing
|
ce5649dba94ba5c50bc3fe6740d3059a99a6ea8f
|
[
"MIT"
] | 2
|
2021-03-08T01:59:33.000Z
|
2021-03-08T01:59:39.000Z
|
2. DCT and Quantization Algorithm/Assignment_2_DCT.py
|
IbrahimEl-Shal/Digital_Video_Processing
|
ce5649dba94ba5c50bc3fe6740d3059a99a6ea8f
|
[
"MIT"
] | null | null | null |
2. DCT and Quantization Algorithm/Assignment_2_DCT.py
|
IbrahimEl-Shal/Digital_Video_Processing
|
ce5649dba94ba5c50bc3fe6740d3059a99a6ea8f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 25 19:53:08 2019
@author: Ibrahim El-Shal
Assignment 2:
- Develop a DCT algorithm to encode images in YUV format.
- Use 8x8 block and computer the DCT coefficients.
- Use a quantization of your choice.
- Reconstruct the images using IDCT.
- Compute the PSNR between the Source Image and the Reconstructed Images.
"""
# In[1]: Import Packages
## Importing OpenCV(cv2) module
import cv2
import math
import itertools
import numpy as np
from scipy import misc
# In[2]: Display The Image and Save it
def Display_Image(Name,Image):
## Output img with window name as 'image'
cv2.imshow(Name, Image)
## Save Image
cv2.imwrite(Name+'.png',Image)
## Maintain output window utill, user presses a key
cv2.waitKey(2000)
## Destroying present windows on screen
cv2.destroyAllWindows()
return(1)
# In[3]: Split & Merge channels
def Split_Channels(img):
return (cv2.split((img)))
def Merge_Channels(ch1,ch2,ch3):
return (cv2.merge((ch1,ch2,ch3)))
# In[4]: Convert to 3D Image
def Image3D(img):
return (cv2.cvtColor(img,cv2.COLOR_GRAY2RGB))
# In[5]: Image Normalization
def Normalize(Image):
return (Image / 128)
# In[6]: Convert from RGB to YUV Image
def RGB_To_YUV(RGB_Image):
RGB_Image = Normalize(RGB_Image)
Coeff = np.array([[0.299, -0.14713, 0.615],
[0.587, -0.28886, -0.51499],
[0.114, 0.436, -0.10001]])
YUV_Image = RGB_Image.dot(Coeff)
YUV_Image = YUV_Image*128
YUV_Image = YUV_Image.astype(np.uint8)
YUV_Image[:,:,1] += 128 #b1
YUV_Image[:,:,2] += 128 #b2
print("--------- Image Converted to YUV ---------")
print("\n")
return(YUV_Image)
# In[7]: Convert from RGB to YUV Image
def YUV_To_RGB(YUV_Image):
Coeff = np.array([[1, 1, 1],
[0, -0.39465, 2.03211],
[1.13983, -0.58060, 0]])
RGB_Image = YUV_Image.dot(Coeff)
RGB_Image = RGB_Image.astype(np.uint8)
RGB_Image[:,:,0] -= 128 #b0
RGB_Image[:,:,1] -= 128 #b1
return(RGB_Image)
# In[8]: Convert RGB to Gray Scale
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.2989, 0.5870, 0.1140])
# In[9]: Get Needed blocked 8x8
def Get_Blocks(Channel):
# prevent against multiple-channel images
if len(Channel.shape) != 2:
raise ValueError('Input image must be a single channel 2D array')
# shape of image
height, width = Channel.shape
# No of needed blocks
# new block height
n_height = np.int32(math.ceil(height / 8)) * 8
# new block width
n_width = np.int32(math.ceil(width / 8)) * 8
# create a numpy zero matrix with size of H,W
padded_img = np.zeros((n_height, n_width))
padded_img[0:n_height, 0:n_width] = Channel
# split into blocks
img_blocks = [padded_img[j:j + 8, i:i + 8]
for (j, i) in itertools.product(range(0, n_height, 8),
range(0, n_width, 8))]
print("--------- Get The Needed Blockes ---------")
return(img_blocks)
# In[10]: Get the alfa values
def Check_Alfa_Value(Spatial_Frequency):
if(Spatial_Frequency == 0):
#Normalizing Scale Factor
NSF = 1 / np.sqrt(2)
else:
NSF = 1
return(NSF)
# In[11]: DCT / IDCT Formula
def DCT_Formula(blockimg,u,v):
DCT = 0
# shape of image
M, N = blockimg.shape
alfa_u = Check_Alfa_Value(u)
alfa_v = Check_Alfa_Value(v)
for x in range(0,M):
a0 = (2*x+1)*u*math.pi
for y in range(0,N):
a1 = (2*y+1)*v*math.pi
result = blockimg[x][y] * math.cos(a0/(2*M)) * math.cos(a1/(2*N)) * alfa_u * alfa_v
DCT += result
DCT = DCT/4
return(DCT)
# In[12]: DCT / IDCT transform functions
def Compute_DCT(imgblock,BlockSize= 8):
#DCT transform every block
dct_img = np.zeros((BlockSize, BlockSize))
for i in range(0,BlockSize):
for j in range(0,BlockSize):
dct_img[i][j] = DCT_Formula(imgblock,i,j)
return(dct_img)
# In[13 & 14]: Quantize and Inv. all the DCT coefficients using the quant. matrix
'''
- The compression the floating point data, will be not effective.
- Convert the weights-matrix back to values in the space of [0,255].
- Do this by finding the min/max value for the matrix and dividing each number
in this range to give us a value between [0,1] to which we multiply by 255 to get final value.
'''
Quant = np.array([[16,11,10,16, 24,40, 51, 61],
[12,12,14,19, 26,48, 60, 55],
[14,13,16,24, 40,57, 69, 56],
[14,17,22,29, 51,87, 80, 62],
[18,22,37,56, 68,109,103,77],
[24,35,55,64, 81,104,113,92],
[49,64,78,87,103,121,120,101],
[72,92,95,98,112,100,103,99]])
def Quantize(dct_blocks):
Quantized_Blocks = [(dct_block/Quant)for dct_block in dct_blocks]
print("--------- Quantization Done ---------")
print("\n")
return(Quantized_Blocks)
def Inv_Quantize(qnt_blocks):
Inv_Quantized_Blocks = []
for i in range(0,len(qnt_blocks)):
Inv_Quantized_Blocks.append(qnt_blocks[i]*Quant)
print("--------- Inverse Quantization Done ---------")
return(Inv_Quantized_Blocks)
# In[15]: Reshape the image blocks to 512x512
def Chunk(l,n):
return [l[i:i+n] for i in range(0, len(l), n)]
def Reshape_Image(ImgBlocks,BlockSize= 8,ImageSize= 512):
rec_img = []
for chunk_row_blocks in Chunk(ImgBlocks, ImageSize // BlockSize):
for row_block_num in range(BlockSize):
for block in chunk_row_blocks:
rec_img.extend(block[row_block_num])
rec_img = np.array(rec_img).reshape(ImageSize, ImageSize)
print("--------- Image Reshaped ---------")
print("\n")
return(rec_img)
# In[16]: Compute MSE
def Compute_MSE(OriginalImage,RecoveriedImage):
return np.sqrt(((OriginalImage-RecoveriedImage)**2).mean())
# In[17]: Compute PSNR
def Compute_PSNR(Computed_MSE):
if (Computed_MSE == 0):
return 100
else:
PIXEL_MAX = 255
PSNR = 20 * math.log10(PIXEL_MAX / math.sqrt(Computed_MSE))
return(PSNR)
# In[18]:
def Encode(source):
print("--------- Encoding Process ---------")
# Init block 8x8
blocks = Get_Blocks(source)
# DCT every block
dct_blocks = []
for blk in range(0,len(blocks)):
dct_blocks.append(Compute_DCT(blocks[blk]))
print("--------- DCT Done ---------")
# Quantize every block
QuantBlocks = Quantize(dct_blocks)
return(blocks, QuantBlocks)
# In[19]:
def Decode(blocks,QuantBlocks):
print("--------- Decoding Process ---------")
# Inverse Quantization for every block
InvQuantBlocks = Inv_Quantize(QuantBlocks)
# IDCT of every block
idct_blocks = []
for blk in range(0,len(blocks)):
idct_blocks.append(Compute_DCT(InvQuantBlocks[blk]))
print("--------- Inverse DCT Done ---------")
rec_img = Reshape_Image(idct_blocks)
return(rec_img)
# In[20]:
def main(Image= 'img_1'):
if(Image == 'img_1'):
#Read Image and Reshape
img = np.fromfile("Lena Gray Raw Image.txt", dtype='uint8', sep="")
#Set Height and Width
img = np.reshape(img, (512, 512))
elif(Image == 'img_2'):
img = misc.imread('barbara_gray.bmp', flatten= 1)
else:
raise AttributeError("Enter the Image Number please")
Display_Image("Original Image", img)
#Convert to 3D Image
img = Image3D(img)
#Convert to YUV Image
yuv = RGB_To_YUV(img)
Display_Image("YUV Image", yuv)
Y, U, V = Split_Channels(yuv)
Blocks_Y, Encoded_Y = Encode(Y)
Blocks_U, Encoded_U = Encode(U)
Blocks_V, Encoded_V = Encode(V)
Recovered_Y = Decode(Blocks_Y, Encoded_Y)
Recovered_U = Decode(Blocks_U, Encoded_U)
Recovered_V = Decode(Blocks_V, Encoded_V)
Recovered_Img = Merge_Channels(Recovered_Y, Recovered_U, Recovered_V)
Display_Image("Reconstructed YUV Image", Recovered_Img)
RGB = YUV_To_RGB(Recovered_Img)
Display_Image("Reconstructed Original Image", RGB)
mse = np.round(Compute_MSE(Recovered_Img, RGB),2)
psnr = np.round(Compute_PSNR(mse), 2)
print('mse={} and psnr={}dB'.format(mse, psnr))
Gray = rgb2gray(RGB)
Display_Image("gray", Gray)
return(1)
# In[21]:
## call the main function
if __name__ == '__main__':
Recovered_Img = main()
| 27.428571
| 96
| 0.592618
|
4a09521672248a4fe42f013838f8948c5c86b788
| 10,015
|
py
|
Python
|
mmaction/third_party/ActivityNet/Evaluation/eval_kinetics.py
|
Lill98/mmaction_custom_data
|
a174e995b78a936a7c80a1feb884cbfa801af740
|
[
"Apache-2.0"
] | 20
|
2021-08-18T06:53:17.000Z
|
2022-02-24T06:45:10.000Z
|
mmaction/third_party/ActivityNet/Evaluation/eval_kinetics.py
|
Lill98/mmaction_custom_data
|
a174e995b78a936a7c80a1feb884cbfa801af740
|
[
"Apache-2.0"
] | 6
|
2021-08-25T08:11:46.000Z
|
2021-10-08T07:42:08.000Z
|
mmaction/third_party/ActivityNet/Evaluation/eval_kinetics.py
|
Lill98/mmaction_custom_data
|
a174e995b78a936a7c80a1feb884cbfa801af740
|
[
"Apache-2.0"
] | 3
|
2018-09-22T14:05:25.000Z
|
2021-01-29T04:22:52.000Z
|
import json
import urllib.request, urllib.error, urllib.parse
import numpy as np
import pandas as pd
from utils import get_blocked_videos
from utils import interpolated_prec_rec
class ANETclassification(object):
GROUND_TRUTH_FIELDS = ['database', 'taxonomy', 'version']
PREDICTION_FIELDS = ['results', 'version', 'external_data']
def __init__(self, ground_truth_filename=None, prediction_filename=None,
ground_truth_fields=GROUND_TRUTH_FIELDS,
prediction_fields=PREDICTION_FIELDS,
subset='validation', verbose=False, top_k=3,
check_status=True):
if not ground_truth_filename:
raise IOError('Please input a valid ground truth file.')
if not prediction_filename:
raise IOError('Please input a valid prediction file.')
self.subset = subset
self.verbose = verbose
self.gt_fields = ground_truth_fields
self.pred_fields = prediction_fields
self.top_k = top_k
self.ap = None
self.hit_at_k = None
self.check_status = check_status
# Retrieve blocked videos from server.
if self.check_status:
self.blocked_videos = get_blocked_videos()
else:
self.blocked_videos = list()
# Import ground truth and predictions.
self.ground_truth, self.activity_index = self._import_ground_truth(
ground_truth_filename)
self.prediction = self._import_prediction(prediction_filename)
if self.verbose:
print('[INIT] Loaded annotations from {} subset.'.format(subset))
nr_gt = len(self.ground_truth)
print('\tNumber of ground truth instances: {}'.format(nr_gt))
nr_pred = len(self.prediction)
print('\tNumber of predictions: {}'.format(nr_pred))
def _import_ground_truth(self, ground_truth_filename):
"""Reads ground truth file, checks if it is well formatted, and returns
the ground truth instances and the activity classes.
Parameters
----------
ground_truth_filename : str
Full path to the ground truth json file.
Outputs
-------
ground_truth : df
Data frame containing the ground truth instances.
activity_index : dict
Dictionary containing class index.
"""
with open(ground_truth_filename, 'r') as fobj:
data = json.load(fobj)
# Checking format
if not all([field in list(data.keys()) for field in self.gt_fields]):
raise IOError('Please input a valid ground truth file.')
# Initialize data frame
activity_index, cidx = {}, 0
video_lst, label_lst = [], []
for videoid, v in data['database'].items():
if self.subset != v['subset']:
continue
if videoid in self.blocked_videos:
continue
for ann in v['annotations']:
if ann['label'] not in activity_index:
activity_index[ann['label']] = cidx
cidx += 1
video_lst.append(videoid)
label_lst.append(activity_index[ann['label']])
ground_truth = pd.DataFrame({'video-id': video_lst,
'label': label_lst})
ground_truth = ground_truth.drop_duplicates().reset_index(drop=True)
return ground_truth, activity_index
def _import_prediction(self, prediction_filename):
"""Reads prediction file, checks if it is well formatted, and returns
the prediction instances.
Parameters
----------
prediction_filename : str
Full path to the prediction json file.
Outputs
-------
prediction : df
Data frame containing the prediction instances.
"""
with open(prediction_filename, 'r') as fobj:
data = json.load(fobj)
# Checking format...
if not all([field in list(data.keys()) for field in self.pred_fields]):
raise IOError('Please input a valid prediction file.')
# Initialize data frame
video_lst, label_lst, score_lst = [], [], []
for videoid, v in data['results'].items():
if videoid in self.blocked_videos:
continue
for result in v:
label = self.activity_index[result['label']]
video_lst.append(videoid)
label_lst.append(label)
score_lst.append(result['score'])
prediction = pd.DataFrame({'video-id': video_lst,
'label': label_lst,
'score': score_lst})
return prediction
def wrapper_compute_average_precision(self):
"""Computes average precision for each class in the subset.
"""
ap = np.zeros(len(list(self.activity_index.items())))
for activity, cidx in self.activity_index.items():
gt_idx = self.ground_truth['label'] == cidx
pred_idx = self.prediction['label'] == cidx
ap[cidx] = compute_average_precision_classification(
self.ground_truth.loc[gt_idx].reset_index(drop=True),
self.prediction.loc[pred_idx].reset_index(drop=True))
return ap
def evaluate(self):
"""Evaluates a prediction file. For the detection task we measure the
interpolated mean average precision to measure the performance of a
method.
"""
ap = self.wrapper_compute_average_precision()
hit_at_k = compute_video_hit_at_k(self.ground_truth,
self.prediction, top_k=self.top_k)
avg_hit_at_k = compute_video_hit_at_k(
self.ground_truth, self.prediction, top_k=self.top_k, avg=True)
if self.verbose:
print ('[RESULTS] Performance on ActivityNet untrimmed video '
'classification task.')
print('\tMean Average Precision: {}'.format(ap.mean()))
print('\tError@{}: {}'.format(self.top_k, 1.0 - hit_at_k))
#print '\tAvg Hit@{}: {}'.format(self.top_k, avg_hit_at_k)
self.ap = ap
self.hit_at_k = hit_at_k
self.avg_hit_at_k = avg_hit_at_k
################################################################################
# Metrics
################################################################################
def compute_average_precision_classification(ground_truth, prediction):
"""Compute average precision (classification task) between ground truth and
predictions data frames. If multiple predictions occurs for the same
predicted segment, only the one with highest score is matched as
true positive. This code is greatly inspired by Pascal VOC devkit.
Parameters
----------
ground_truth : df
Data frame containing the ground truth instances.
Required fields: ['video-id']
prediction : df
Data frame containing the prediction instances.
Required fields: ['video-id, 'score']
Outputs
-------
ap : float
Average precision score.
"""
npos = float(len(ground_truth))
lock_gt = np.ones(len(ground_truth)) * -1
# Sort predictions by decreasing score order.
sort_idx = prediction['score'].values.argsort()[::-1]
prediction = prediction.loc[sort_idx].reset_index(drop=True)
# Initialize true positive and false positive vectors.
tp = np.zeros(len(prediction))
fp = np.zeros(len(prediction))
# Assigning true positive to truly grount truth instances.
for idx in range(len(prediction)):
this_pred = prediction.loc[idx]
gt_idx = ground_truth['video-id'] == this_pred['video-id']
# Check if there is at least one ground truth in the video associated.
if not gt_idx.any():
fp[idx] = 1
continue
this_gt = ground_truth.loc[gt_idx].reset_index()
if lock_gt[this_gt['index']] >= 0:
fp[idx] = 1
else:
tp[idx] = 1
lock_gt[this_gt['index']] = idx
# Computing prec-rec
tp = np.cumsum(tp).astype(np.float)
fp = np.cumsum(fp).astype(np.float)
rec = tp / npos
prec = tp / (tp + fp)
return interpolated_prec_rec(prec, rec)
def compute_video_hit_at_k(ground_truth, prediction, top_k=3, avg=False):
"""Compute accuracy at k prediction between ground truth and
predictions data frames. This code is greatly inspired by evaluation
performed in Karpathy et al. CVPR14.
Parameters
----------
ground_truth : df
Data frame containing the ground truth instances.
Required fields: ['video-id', 'label']
prediction : df
Data frame containing the prediction instances.
Required fields: ['video-id, 'label', 'score']
Outputs
-------
acc : float
Top k accuracy score.
"""
video_ids = np.unique(ground_truth['video-id'].values)
avg_hits_per_vid = np.zeros(video_ids.size)
for i, vid in enumerate(video_ids):
pred_idx = prediction['video-id'] == vid
if not pred_idx.any():
continue
this_pred = prediction.loc[pred_idx].reset_index(drop=True)
# Get top K predictions sorted by decreasing score.
sort_idx = this_pred['score'].values.argsort()[::-1][:top_k]
this_pred = this_pred.loc[sort_idx].reset_index(drop=True)
# Get labels and compare against ground truth.
pred_label = this_pred['label'].tolist()
gt_idx = ground_truth['video-id'] == vid
gt_label = ground_truth.loc[gt_idx]['label'].tolist()
avg_hits_per_vid[i] = np.mean([1 if this_label in pred_label else 0
for this_label in gt_label])
if not avg:
avg_hits_per_vid[i] = np.ceil(avg_hits_per_vid[i])
return float(avg_hits_per_vid.mean())
| 40.06
| 80
| 0.602796
|
4a0952188f1360035a26ad582dff54c5b2fc58f3
| 1,093
|
py
|
Python
|
database.py
|
snakems/karmator_bot
|
27cddc6355998bd8765539de0f70906867dfd13a
|
[
"MIT"
] | 10
|
2018-01-18T11:58:37.000Z
|
2021-12-24T13:52:59.000Z
|
database.py
|
snakems/karmator_bot
|
27cddc6355998bd8765539de0f70906867dfd13a
|
[
"MIT"
] | 6
|
2020-07-23T21:12:16.000Z
|
2021-06-25T15:23:42.000Z
|
database.py
|
snakems/karmator_bot
|
27cddc6355998bd8765539de0f70906867dfd13a
|
[
"MIT"
] | 7
|
2020-10-26T13:08:01.000Z
|
2022-01-30T18:49:26.000Z
|
import os
import peewee as pw
from playhouse.db_url import connect
from logger import db_log
DB_ADDRESS = os.environ["DATABASE_URL"]
db = connect(DB_ADDRESS)
db_log.debug(f"Create database with address {DB_ADDRESS}")
# В запросах в програме использованы логические
# операторы поскольку (из документации Peewee):
# Peewee uses bitwise operators (& and |)
# rather than logical operators (and and or)
# Postgres database -+
class BaseModel(pw.Model):
class Meta:
database = db
class KarmaUser(BaseModel):
userid = pw.IntegerField(null=False)
chatid = pw.IntegerField(null=False)
karma = pw.IntegerField(null=False)
user_name = pw.CharField(max_length=100, null=False)
user_nick = pw.CharField(max_length=50, null=False)
is_freezed = pw.BooleanField(column_name="is_banned")
class Meta:
db_table = "karma_user"
primary_key = pw.CompositeKey("userid", "chatid")
class Limitation(BaseModel):
userid = pw.IntegerField(null=False)
chatid = pw.IntegerField(null=False)
timer = pw.TimestampField(null=False, primary_key=True)
class Meta:
db_table = "limitation"
| 21.431373
| 58
| 0.755718
|
4a09532a06987cf5d2594f41b52f4396cbde3c4d
| 3,802
|
py
|
Python
|
refinery/units/misc/datefix.py
|
jhhcs/refinery
|
8173e4006f9c28ad2043806b7e29adce4849985b
|
[
"BSD-3-Clause"
] | 1
|
2022-02-13T20:57:15.000Z
|
2022-02-13T20:57:15.000Z
|
refinery/units/misc/datefix.py
|
baderj/refinery
|
801c900a7184dfd47f5455124fbcfb4cf68814ed
|
[
"BSD-3-Clause"
] | null | null | null |
refinery/units/misc/datefix.py
|
baderj/refinery
|
801c900a7184dfd47f5455124fbcfb4cf68814ed
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from re import compile as re_compile
from datetime import datetime, timedelta
from refinery.units import arg, Unit
from refinery.lib.decorators import linewise
class datefix(Unit):
"""
Parses all kinds of date _formats and unifies them into the same format.
"""
_FORMATS = [
'%B %dth %Y %H:%M:%S (UTC)', # November 27th 2019 17:37:02 (UTC)
'%Y-%m-%dT%H:%M:%S', # 2010-03-15T06:27:50
'%Y-%m-%d %H:%M:%S', # iso (2010-03-15 06:27:50.000000)
'%Y-%m-%d %H:%M:%SZ%f',
'%Y-%m-%dT%H:%M:%S.%f',
'%Y-%m-%dT%H:%M:%SZ%f',
'%a %b %d %Y %H:%M:%S', # Thu Apr 24 2014 12:32:21
]
_TIMEZONE_REGEXES = [re_compile(p) for p in [
R'([+-])(\d{2})(\d{2})$', # Thu Apr 24 2014 12:32:21 GMT-0700
R'([+-])(\d{2}):(\d{2})$', # 2017:09:11 23:47:22+02:00
R'GMT([+-])(\d{2})(\d{2}) \(.+\)$' # Thu Apr 24 2014 12:32:21 GMT-0700 (PDT)
]]
def __init__(
self,
format: arg(help='Specify the output format as a strftime-like string, using ISO by default.') = '%Y-%m-%d %H:%M:%S',
dos: arg('-d', help='Parse timestamps in DOS rather than Unix format.') = False
):
super().__init__(format=format, dos=dos)
@staticmethod
def dostime(stamp: int) -> datetime:
"""
Parses a given DOS timestamp into a datetime object.
"""
d, t = stamp >> 16, stamp & 0xFFFF
s = (t & 0x1F) << 1
return datetime(
year = ((d & 0xFE00) >> 0x9) + 1980, # noqa
month = ((d & 0x01E0) >> 0x5), # noqa
day = ((d & 0x001F) >> 0x0), # noqa
hour = ((t & 0xF800) >> 0xB), # noqa
minute = ((t & 0x07E0) >> 0x5), # noqa
second = 59 if s == 60 else s, # noqa
)
def _format(self, dt: datetime) -> str:
return dt.strftime(self.args.format)
def _extract_timezone(self, data):
for r in self._TIMEZONE_REGEXES:
m = r.search(data)
if not m:
continue
pm = m[1]
td = timedelta(
hours=int(m[2]), minutes=int(m[3]))
if pm == '-':
td = -td
return data[:-len(m[0])].strip(), td
return data, None
@linewise
def process(self, data: str) -> str:
data = data.strip()
# replace colons (i.e. for exiftool dates: 2017:01:01)
if len(data) > 10 and data[4] == ':' and data[7] == ':':
data = F'{data[0:4]}-{data[5:7]}-{data[8:]}'
# strips Z at end (i.e. 20171022055144Z)
if data.endswith('Z'):
data = data[:-1]
# parses timestamps and dates without much format
if data.isdigit():
time_stamp = int(data)
if len(data) > 14:
raise Exception('cannot parse all-numeric string as date: %s' % data)
elif len(data) == 14:
# i.e. 20111020193727
return self._format(datetime.strptime(data, '%Y%m%d%H%M%S'))
elif len(data) == 13:
# i.e. 1458016535000
time_stamp //= 1000
data = data[:-3]
if self.args.dos:
return self._format(self.dostime(time_stamp))
else:
return self._format(datetime.utcfromtimestamp(time_stamp))
data, time_delta = self._extract_timezone(data)
for f in self._FORMATS:
try:
dt = datetime.strptime(data, f)
except ValueError:
continue
return self._format(dt if time_delta is None else dt - time_delta)
return data
| 34.252252
| 125
| 0.490005
|
4a09532fa18c32427c7daa339f7c068f0527305b
| 432
|
py
|
Python
|
tests/test_guardduty/test_server.py
|
gtourkas/moto
|
307104417b579d23d02f670ff55217a2d4a16bee
|
[
"Apache-2.0"
] | 5,460
|
2015-01-01T01:11:17.000Z
|
2022-03-31T23:45:38.000Z
|
tests/test_guardduty/test_server.py
|
gtourkas/moto
|
307104417b579d23d02f670ff55217a2d4a16bee
|
[
"Apache-2.0"
] | 4,475
|
2015-01-05T19:37:30.000Z
|
2022-03-31T13:55:12.000Z
|
tests/test_guardduty/test_server.py
|
gtourkas/moto
|
307104417b579d23d02f670ff55217a2d4a16bee
|
[
"Apache-2.0"
] | 1,831
|
2015-01-14T00:00:44.000Z
|
2022-03-31T20:30:04.000Z
|
import json
import sure # noqa # pylint: disable=unused-import
import moto.server as server
def test_create_without_enable_option():
backend = server.create_backend_app("guardduty")
test_client = backend.test_client()
body = {"enable": "True"}
response = test_client.post("/detector", data=json.dumps(body))
response.status_code.should.equal(200)
json.loads(response.data).should.have.key("detectorId")
| 28.8
| 67
| 0.733796
|
4a0953e0d6fe193cdee29faea54961c5f58f71e7
| 3,358
|
py
|
Python
|
ant_colony.py
|
Sherlock-Jerry/AntColonyOptimization
|
11f30c60f84165a12c85377cfe9f12e62390bd1f
|
[
"MIT"
] | 75
|
2017-05-05T16:28:29.000Z
|
2022-03-24T03:20:57.000Z
|
ant_colony.py
|
Sherlock-Jerry/AntColonyOptimization
|
11f30c60f84165a12c85377cfe9f12e62390bd1f
|
[
"MIT"
] | 5
|
2018-02-15T19:16:04.000Z
|
2020-04-01T10:56:51.000Z
|
ant_colony.py
|
Sherlock-Jerry/AntColonyOptimization
|
11f30c60f84165a12c85377cfe9f12e62390bd1f
|
[
"MIT"
] | 43
|
2018-06-13T22:49:50.000Z
|
2022-03-20T17:17:39.000Z
|
import random as rn
import numpy as np
from numpy.random import choice as np_choice
class AntColony(object):
def __init__(self, distances, n_ants, n_best, n_iterations, decay, alpha=1, beta=1):
"""
Args:
distances (2D numpy.array): Square matrix of distances. Diagonal is assumed to be np.inf.
n_ants (int): Number of ants running per iteration
n_best (int): Number of best ants who deposit pheromone
n_iteration (int): Number of iterations
decay (float): Rate it which pheromone decays. The pheromone value is multiplied by decay, so 0.95 will lead to decay, 0.5 to much faster decay.
alpha (int or float): exponenet on pheromone, higher alpha gives pheromone more weight. Default=1
beta (int or float): exponent on distance, higher beta give distance more weight. Default=1
Example:
ant_colony = AntColony(german_distances, 100, 20, 2000, 0.95, alpha=1, beta=2)
"""
self.distances = distances
self.pheromone = np.ones(self.distances.shape) / len(distances)
self.all_inds = range(len(distances))
self.n_ants = n_ants
self.n_best = n_best
self.n_iterations = n_iterations
self.decay = decay
self.alpha = alpha
self.beta = beta
def run(self):
shortest_path = None
all_time_shortest_path = ("placeholder", np.inf)
for i in range(self.n_iterations):
all_paths = self.gen_all_paths()
self.spread_pheronome(all_paths, self.n_best, shortest_path=shortest_path)
shortest_path = min(all_paths, key=lambda x: x[1])
print (shortest_path)
if shortest_path[1] < all_time_shortest_path[1]:
all_time_shortest_path = shortest_path
self.pheromone = self.pheromone * self.decay
return all_time_shortest_path
def spread_pheronome(self, all_paths, n_best, shortest_path):
sorted_paths = sorted(all_paths, key=lambda x: x[1])
for path, dist in sorted_paths[:n_best]:
for move in path:
self.pheromone[move] += 1.0 / self.distances[move]
def gen_path_dist(self, path):
total_dist = 0
for ele in path:
total_dist += self.distances[ele]
return total_dist
def gen_all_paths(self):
all_paths = []
for i in range(self.n_ants):
path = self.gen_path(0)
all_paths.append((path, self.gen_path_dist(path)))
return all_paths
def gen_path(self, start):
path = []
visited = set()
visited.add(start)
prev = start
for i in range(len(self.distances) - 1):
move = self.pick_move(self.pheromone[prev], self.distances[prev], visited)
path.append((prev, move))
prev = move
visited.add(move)
path.append((prev, start)) # going back to where we started
return path
def pick_move(self, pheromone, dist, visited):
pheromone = np.copy(pheromone)
pheromone[list(visited)] = 0
row = pheromone ** self.alpha * (( 1.0 / dist) ** self.beta)
norm_row = row / row.sum()
move = np_choice(self.all_inds, 1, p=norm_row)[0]
return move
| 38.597701
| 156
| 0.6081
|
4a0954690f1184be071609e124a74a1bf96a6e74
| 1,088
|
py
|
Python
|
LeetCode/Python3/BinaryTree/513. Find Bottom Left Tree Value.py
|
WatsonWangZh/CodingPractice
|
dc057dd6ea2fc2034e14fd73e07e73e6364be2ae
|
[
"MIT"
] | 11
|
2019-09-01T22:36:00.000Z
|
2021-11-08T08:57:20.000Z
|
LeetCode/Python3/BinaryTree/513. Find Bottom Left Tree Value.py
|
WatsonWangZh/LeetCodePractice
|
dc057dd6ea2fc2034e14fd73e07e73e6364be2ae
|
[
"MIT"
] | null | null | null |
LeetCode/Python3/BinaryTree/513. Find Bottom Left Tree Value.py
|
WatsonWangZh/LeetCodePractice
|
dc057dd6ea2fc2034e14fd73e07e73e6364be2ae
|
[
"MIT"
] | 2
|
2020-05-27T14:58:52.000Z
|
2020-05-27T15:04:17.000Z
|
# Given a binary tree, find the leftmost value in the last row of the tree.
# Example 1:
# Input:
# 2
# / \
# 1 3
# Output:
# 1
# Example 2:
# Input:
# 1
# / \
# 2 3
# / / \
# 4 5 6
# /
# 7
# Output:
# 7
# Note: You may assume the tree (i.e., the given root node) is not NULL.
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
from collections import deque
class Solution(object):
def findBottomLeftValue(self, root):
"""
:type root: TreeNode
:rtype: int
"""
# 模拟层序遍历 O(n)
if not root:
return None
q = deque([root])
while q:
for i in range(len(q)):
cur = q.popleft()
if i == 0:
leftmost = cur.val
if cur.left:
q.append(cur.left)
if cur.right:
q.append(cur.right)
return leftmost
| 21.333333
| 75
| 0.460478
|
4a09551f2ac677c755ca0269fc3b60ae3977a1cd
| 16,974
|
py
|
Python
|
ross/fluid_flow/fluid_flow_coefficients.py
|
JuliaMota/ross
|
88c2fa69d9a583dcdc33eab8deb35c797ebf4ef8
|
[
"MIT"
] | null | null | null |
ross/fluid_flow/fluid_flow_coefficients.py
|
JuliaMota/ross
|
88c2fa69d9a583dcdc33eab8deb35c797ebf4ef8
|
[
"MIT"
] | null | null | null |
ross/fluid_flow/fluid_flow_coefficients.py
|
JuliaMota/ross
|
88c2fa69d9a583dcdc33eab8deb35c797ebf4ef8
|
[
"MIT"
] | null | null | null |
import sys
from math import isnan
import numpy as np
from scipy import integrate
from scipy.optimize import least_squares
# fmt: off
from ross.fluid_flow.fluid_flow_geometry import (move_rotor_center,
move_rotor_center_abs)
# fmt: on
def calculate_oil_film_force(fluid_flow_object, force_type=None):
"""This function calculates the forces of the oil film in the N and T directions, ie in the
opposite direction to the eccentricity and in the tangential direction.
Parameters
----------
fluid_flow_object: A FluidFlow object.
force_type: str
If set, calculates the oil film force matrix analytically considering the chosen type: 'short' or 'long'.
If set to 'numerical', calculates the oil film force numerically.
Returns
-------
radial_force: float
Force of the oil film in the opposite direction to the eccentricity direction.
tangential_force: float
Force of the oil film in the tangential direction
f_x: float
Components of forces in the x direction
f_y: float
Components of forces in the y direction
Examples
--------
>>> from ross.fluid_flow.fluid_flow import fluid_flow_example
>>> my_fluid_flow = fluid_flow_example()
>>> calculate_oil_film_force(my_fluid_flow) # doctest: +ELLIPSIS
(...
"""
if force_type != "numerical" and (
force_type == "short" or fluid_flow_object.bearing_type == "short_bearing"
):
radial_force = (
0.5
* fluid_flow_object.viscosity
* (fluid_flow_object.radius_rotor / fluid_flow_object.radial_clearance) ** 2
* (fluid_flow_object.length ** 3 / fluid_flow_object.radius_rotor)
* (
(
2
* fluid_flow_object.eccentricity_ratio ** 2
* fluid_flow_object.omega
)
/ (1 - fluid_flow_object.eccentricity_ratio ** 2) ** 2
)
)
tangential_force = (
0.5
* fluid_flow_object.viscosity
* (fluid_flow_object.radius_rotor / fluid_flow_object.radial_clearance) ** 2
* (fluid_flow_object.length ** 3 / fluid_flow_object.radius_rotor)
* (
(np.pi * fluid_flow_object.eccentricity_ratio * fluid_flow_object.omega)
/ (2 * (1 - fluid_flow_object.eccentricity_ratio ** 2) ** (3.0 / 2))
)
)
elif force_type != "numerical" and (
force_type == "long" or fluid_flow_object.bearing_type == "long_bearing"
):
radial_force = (
6
* fluid_flow_object.viscosity
* (fluid_flow_object.radius_rotor / fluid_flow_object.radial_clearance) ** 2
* fluid_flow_object.radius_rotor
* fluid_flow_object.length
* (
(
2
* fluid_flow_object.eccentricity_ratio ** 2
* fluid_flow_object.omega
)
/ (
(2 + fluid_flow_object.eccentricity_ratio ** 2)
* (1 - fluid_flow_object.eccentricity_ratio ** 2)
)
)
)
tangential_force = (
6
* fluid_flow_object.viscosity
* (fluid_flow_object.radius_rotor / fluid_flow_object.radial_clearance) ** 2
* fluid_flow_object.radius_rotor
* fluid_flow_object.length
* (
(np.pi * fluid_flow_object.eccentricity_ratio * fluid_flow_object.omega)
/ (
(2 + fluid_flow_object.eccentricity_ratio ** 2)
* (1 - fluid_flow_object.eccentricity_ratio ** 2) ** 0.5
)
)
)
else:
p_mat = fluid_flow_object.p_mat_numerical
a = np.zeros([fluid_flow_object.nz, fluid_flow_object.ntheta])
b = np.zeros([fluid_flow_object.nz, fluid_flow_object.ntheta])
g1 = np.zeros(fluid_flow_object.nz)
g2 = np.zeros(fluid_flow_object.nz)
base_vector = np.array(
[
fluid_flow_object.xre[0][0] - fluid_flow_object.xi,
fluid_flow_object.yre[0][0] - fluid_flow_object.yi,
]
)
for i in range(fluid_flow_object.nz):
for j in range(int(fluid_flow_object.ntheta)):
vector_from_rotor = np.array(
[
fluid_flow_object.xre[i][j] - fluid_flow_object.xi,
fluid_flow_object.yre[i][j] - fluid_flow_object.yi,
]
)
angle_between_vectors = np.arctan2(
vector_from_rotor[1], vector_from_rotor[0]
) - np.arctan2(base_vector[1], base_vector[0])
if angle_between_vectors < 0:
angle_between_vectors += 2 * np.pi
a[i][j] = p_mat[i][j] * np.cos(angle_between_vectors)
b[i][j] = p_mat[i][j] * np.sin(angle_between_vectors)
for i in range(fluid_flow_object.nz):
g1[i] = integrate.simps(a[i][:], fluid_flow_object.gama[0])
g2[i] = integrate.simps(b[i][:], fluid_flow_object.gama[0])
integral1 = integrate.simps(g1, fluid_flow_object.z_list)
integral2 = integrate.simps(g2, fluid_flow_object.z_list)
angle_corr = (
np.pi / 2
- np.arctan2(base_vector[1], base_vector[0])
+ fluid_flow_object.attitude_angle
)
radial_force_aux = fluid_flow_object.radius_rotor * integral1
tangential_force_aux = fluid_flow_object.radius_rotor * integral2
radial_force = radial_force_aux * np.cos(
angle_corr + np.pi
) + tangential_force_aux * np.cos(angle_corr + np.pi / 2)
tangential_force = radial_force_aux * np.cos(
angle_corr + np.pi / 2
) + tangential_force_aux * np.cos(angle_corr)
force_x = -radial_force * np.sin(
fluid_flow_object.attitude_angle
) + tangential_force * np.cos(fluid_flow_object.attitude_angle)
force_y = radial_force * np.cos(
fluid_flow_object.attitude_angle
) + tangential_force * np.sin(fluid_flow_object.attitude_angle)
return radial_force, tangential_force, force_x, force_y
def calculate_stiffness_and_damping_coefficients(fluid_flow_object):
"""This function calculates the bearing stiffness and damping matrices numerically.
Parameters
----------
fluid_flow_object: A FluidFlow object.
Returns
-------
Two lists of floats
A list of length four including stiffness floats in this order: kxx, kxy, kyx, kyy.
And another list of length four including damping floats in this order: cxx, cxy, cyx, cyy.
And
Examples
--------
>>> from ross.fluid_flow.fluid_flow import fluid_flow_example
>>> my_fluid_flow = fluid_flow_example()
>>> calculate_stiffness_and_damping_coefficients(my_fluid_flow) # doctest: +ELLIPSIS
([429...
"""
N = 6
t = np.linspace(0, 2 * np.pi / fluid_flow_object.omegap, N)
fluid_flow_object.xp = fluid_flow_object.radial_clearance * 0.0001
fluid_flow_object.yp = fluid_flow_object.radial_clearance * 0.0001
dx = np.zeros(N)
dy = np.zeros(N)
xdot = np.zeros(N)
ydot = np.zeros(N)
radial_force = np.zeros(N)
tangential_force = np.zeros(N)
force_xx = np.zeros(N)
force_yx = np.zeros(N)
force_xy = np.zeros(N)
force_yy = np.zeros(N)
X1 = np.zeros([N, 3])
X2 = np.zeros([N, 3])
F1 = np.zeros(N)
F2 = np.zeros(N)
F3 = np.zeros(N)
F4 = np.zeros(N)
for i in range(N):
fluid_flow_object.t = t[i]
delta_x = fluid_flow_object.xp * np.sin(
fluid_flow_object.omegap * fluid_flow_object.t
)
move_rotor_center(fluid_flow_object, delta_x, 0)
dx[i] = delta_x
xdot[i] = (
fluid_flow_object.omegap
* fluid_flow_object.xp
* np.cos(fluid_flow_object.omegap * fluid_flow_object.t)
)
fluid_flow_object.geometry_description()
fluid_flow_object.calculate_pressure_matrix_numerical(direction="x")
[
radial_force[i],
tangential_force[i],
force_xx[i],
force_yx[i],
] = calculate_oil_film_force(fluid_flow_object, force_type="numerical")
delta_y = fluid_flow_object.yp * np.sin(
fluid_flow_object.omegap * fluid_flow_object.t
)
move_rotor_center(fluid_flow_object, -delta_x, 0)
move_rotor_center(fluid_flow_object, 0, delta_y)
dy[i] = delta_y
ydot[i] = (
fluid_flow_object.omegap
* fluid_flow_object.yp
* np.cos(fluid_flow_object.omegap * fluid_flow_object.t)
)
fluid_flow_object.geometry_description()
fluid_flow_object.calculate_pressure_matrix_numerical(direction="y")
[
radial_force[i],
tangential_force[i],
force_xy[i],
force_yy[i],
] = calculate_oil_film_force(fluid_flow_object, force_type="numerical")
move_rotor_center(fluid_flow_object, 0, -delta_y)
fluid_flow_object.geometry_description()
fluid_flow_object.calculate_pressure_matrix_numerical()
X1[i] = [1, dx[i], xdot[i]]
X2[i] = [1, dy[i], ydot[i]]
F1[i] = -force_xx[i]
F2[i] = -force_xy[i]
F3[i] = -force_yx[i]
F4[i] = -force_yy[i]
P1 = np.dot(
np.dot(np.linalg.inv(np.dot(np.transpose(X1), X1)), np.transpose(X1)), F1
)
P2 = np.dot(
np.dot(np.linalg.inv(np.dot(np.transpose(X2), X2)), np.transpose(X2)), F2
)
P3 = np.dot(
np.dot(np.linalg.inv(np.dot(np.transpose(X1), X1)), np.transpose(X1)), F3
)
P4 = np.dot(
np.dot(np.linalg.inv(np.dot(np.transpose(X2), X2)), np.transpose(X2)), F4
)
K = [P1[1], P2[1], P3[1], P4[1]]
C = [P1[2], P2[2], P3[2], P4[2]]
return K, C
def calculate_short_stiffness_matrix(fluid_flow_object):
"""This function calculates the stiffness matrix for the short bearing.
Parameters
----------
fluid_flow_object: A FluidFlow object.
Returns
-------
list of floats
A list of length four including stiffness floats in this order: kxx, kxy, kyx, kyy
Examples
--------
>>> from ross.fluid_flow.fluid_flow import fluid_flow_example
>>> my_fluid_flow = fluid_flow_example()
>>> calculate_short_stiffness_matrix(my_fluid_flow) # doctest: +ELLIPSIS
[417...
"""
h0 = 1.0 / (
(
(np.pi ** 2) * (1 - fluid_flow_object.eccentricity_ratio ** 2)
+ 16 * fluid_flow_object.eccentricity_ratio ** 2
)
** 1.5
)
a = fluid_flow_object.load / fluid_flow_object.radial_clearance
kxx = (
a
* h0
* 4
* (
(np.pi ** 2) * (2 - fluid_flow_object.eccentricity_ratio ** 2)
+ 16 * fluid_flow_object.eccentricity_ratio ** 2
)
)
kxy = (
a
* h0
* np.pi
* (
(np.pi ** 2) * (1 - fluid_flow_object.eccentricity_ratio ** 2) ** 2
- 16 * fluid_flow_object.eccentricity_ratio ** 4
)
/ (
fluid_flow_object.eccentricity_ratio
* np.sqrt(1 - fluid_flow_object.eccentricity_ratio ** 2)
)
)
kyx = (
-a
* h0
* np.pi
* (
(np.pi ** 2)
* (1 - fluid_flow_object.eccentricity_ratio ** 2)
* (1 + 2 * fluid_flow_object.eccentricity_ratio ** 2)
+ (32 * fluid_flow_object.eccentricity_ratio ** 2)
* (1 + fluid_flow_object.eccentricity_ratio ** 2)
)
/ (
fluid_flow_object.eccentricity_ratio
* np.sqrt(1 - fluid_flow_object.eccentricity_ratio ** 2)
)
)
kyy = (
a
* h0
* 4
* (
(np.pi ** 2) * (1 + 2 * fluid_flow_object.eccentricity_ratio ** 2)
+ (
(32 * fluid_flow_object.eccentricity_ratio ** 2)
* (1 + fluid_flow_object.eccentricity_ratio ** 2)
)
/ (1 - fluid_flow_object.eccentricity_ratio ** 2)
)
)
return [kxx, kxy, kyx, kyy]
def calculate_short_damping_matrix(fluid_flow_object):
"""This function calculates the damping matrix for the short bearing.
Parameters
-------
fluid_flow_object: A FluidFlow object.
Returns
-------
list of floats
A list of length four including damping floats in this order: cxx, cxy, cyx, cyy
Examples
--------
>>> from ross.fluid_flow.fluid_flow import fluid_flow_example
>>> my_fluid_flow = fluid_flow_example()
>>> calculate_short_damping_matrix(my_fluid_flow) # doctest: +ELLIPSIS
[...
"""
# fmt: off
h0 = 1.0 / (((np.pi ** 2) * (1 - fluid_flow_object.eccentricity_ratio ** 2)
+ 16 * fluid_flow_object.eccentricity_ratio ** 2) ** 1.5)
a = fluid_flow_object.load / (fluid_flow_object.radial_clearance * fluid_flow_object.omega)
cxx = (a * h0 * 2 * np.pi * np.sqrt(1 - fluid_flow_object.eccentricity_ratio ** 2) *
((np.pi ** 2) * (1 + 2 * fluid_flow_object.eccentricity_ratio ** 2)
- 16 * fluid_flow_object.eccentricity_ratio ** 2) / fluid_flow_object.eccentricity_ratio)
cxy = (-a * h0 * 8 * ((np.pi ** 2) * (1 + 2 * fluid_flow_object.eccentricity_ratio ** 2)
- 16 * fluid_flow_object.eccentricity_ratio ** 2))
cyx = cxy
cyy = (a * h0 * (2 * np.pi * (
(np.pi ** 2) * (1 - fluid_flow_object.eccentricity_ratio ** 2) ** 2
+ 48 * fluid_flow_object.eccentricity_ratio ** 2)) /
(fluid_flow_object.eccentricity_ratio * np.sqrt(1 - fluid_flow_object.eccentricity_ratio ** 2)))
# fmt: on
return [cxx, cxy, cyx, cyy]
def find_equilibrium_position(fluid_flow_object, print_equilibrium_position=False):
"""This function finds the equilibrium position of the rotor such that the fluid flow
forces match the applied load.
Parameters
----------
fluid_flow_object: A FluidFlow object.
print_equilibrium_position: bool, optional
If True, prints the equilibrium position.
Returns
-------
None
--------
>>> from ross.fluid_flow.fluid_flow import fluid_flow_example2
>>> my_fluid_flow = fluid_flow_example2()
>>> find_equilibrium_position(my_fluid_flow)
>>> (my_fluid_flow.xi, my_fluid_flow.yi) # doctest: +ELLIPSIS
(2.24...
"""
def residuals(x, *args):
"""Calculates x component of the forces of the oil film and the
difference between the y component and the load.
Parameters
----------
x: array
Rotor center coordinates
*args : dict
Dictionary instantiating the ross.FluidFlow class.
Returns
-------
array
Array with the x component of the forces of the oil film and the difference
between the y component and the load.
"""
bearing = args[0]
move_rotor_center_abs(
bearing,
x[0] * fluid_flow_object.radial_clearance,
x[1] * fluid_flow_object.radial_clearance,
)
bearing.geometry_description()
bearing.calculate_pressure_matrix_numerical()
(
_,
_,
fx,
fy,
) = calculate_oil_film_force(bearing, force_type="numerical")
return np.array([fx, (fy - bearing.load)])
if fluid_flow_object.load is None:
sys.exit("Load must be given to calculate the equilibrium position.")
x0 = np.array(
[
0 * fluid_flow_object.radial_clearance,
-1e-3 * fluid_flow_object.radial_clearance,
]
)
move_rotor_center_abs(fluid_flow_object, x0[0], x0[1])
fluid_flow_object.geometry_description()
fluid_flow_object.calculate_pressure_matrix_numerical()
(
_,
_,
fx,
fy,
) = calculate_oil_film_force(fluid_flow_object, force_type="numerical")
result = least_squares(
residuals,
x0,
args=[fluid_flow_object],
jac="3-point",
bounds=([0, -1], [1, 0]),
)
move_rotor_center_abs(
fluid_flow_object,
result.x[0] * fluid_flow_object.radial_clearance,
result.x[1] * fluid_flow_object.radial_clearance,
)
fluid_flow_object.geometry_description()
if print_equilibrium_position is True:
print(
"The equilibrium position (x0, y0) is: (",
result.x[0] * fluid_flow_object.radial_clearance,
",",
result.x[1] * fluid_flow_object.radial_clearance,
")",
)
| 35.961864
| 113
| 0.588429
|
4a095532678c7c595c56968abb7bdaac8a7a3e01
| 263
|
py
|
Python
|
deepthought/channels.py
|
ndsystems/deepthought
|
2d206bd5cc71191c00d6cc2b60868f6fdce33828
|
[
"MIT"
] | 3
|
2021-06-15T07:02:30.000Z
|
2022-03-04T15:34:09.000Z
|
deepthought/channels.py
|
ndsystems/deepthought
|
2d206bd5cc71191c00d6cc2b60868f6fdce33828
|
[
"MIT"
] | 1
|
2021-06-18T06:58:12.000Z
|
2021-06-18T06:58:12.000Z
|
deepthought/channels.py
|
ndsystems/deepthought
|
2d206bd5cc71191c00d6cc2b60868f6fdce33828
|
[
"MIT"
] | null | null | null |
class ChannelConfig:
def __init__(self, name):
self.name = name
self.detector = None
self.exposure = None
self.marker = None
self.detect_with = None
def __repr__(self):
return f"{self.name}, {self.marker}"
| 23.909091
| 44
| 0.585551
|
4a09553995241b3ea9c3d40ec6bab5ffe0efa093
| 19,765
|
py
|
Python
|
python_module/SuperGLU/Util/PythonDirectLibs/flask_oauth_fork/flask_oauth.py
|
GeneralizedLearningUtilities/SuperGLU
|
1c373d1358431fb96dd70b324b26a14fc8ed1fcb
|
[
"MIT"
] | 8
|
2015-07-13T23:07:20.000Z
|
2020-11-13T21:09:55.000Z
|
python_module/SuperGLU/Util/PythonDirectLibs/flask_oauth_fork/flask_oauth.py
|
GeneralizedLearningUtilities/SuperGLU
|
1c373d1358431fb96dd70b324b26a14fc8ed1fcb
|
[
"MIT"
] | 7
|
2016-01-13T12:13:56.000Z
|
2021-12-14T21:12:28.000Z
|
python_module/SuperGLU/Util/PythonDirectLibs/flask_oauth_fork/flask_oauth.py
|
GeneralizedLearningUtilities/SuperGLU
|
1c373d1358431fb96dd70b324b26a14fc8ed1fcb
|
[
"MIT"
] | 6
|
2015-09-23T17:53:32.000Z
|
2020-04-30T07:27:01.000Z
|
# -*- coding: utf-8 -*-
"""
flask_oauth
~~~~~~~~~~~
Implements basic OAuth support for Flask.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
Fork notice: this is a copy take from https://github.com/mitsuhiko/flask-oauth
for version 0.13 (commit 09ebf28 "This is 0.13"). The following pull requests
were manually merged (note that if the PR is marked with [in spirit] then we
took the idea and code and made our own version of the changes)
* #67 urljoin import fixed for Python 3
* #62 [in spirit] add state parm - covered by #42
* #59 [in spirit] better generate_request_token failure info
* #49 bearer authorization header is needed for google responses
* #42 extra params for authorize call (which means #62 isn't needed)
"""
import httplib2
from functools import wraps
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
from flask import request, session, json, redirect, Response
from werkzeug import (
url_decode,
url_encode,
url_quote,
parse_options_header,
Headers
)
import oauth2
_etree = None
def get_etree():
"""Return an elementtree implementation. Prefers lxml"""
global _etree
if _etree is None:
try:
from lxml import etree as _etree
except ImportError:
try:
from xml.etree import cElementTree as _etree
except ImportError:
try:
from xml.etree import ElementTree as _etree
except ImportError:
raise TypeError('lxml or etree not found')
return _etree
def parse_response(resp, content, strict=False):
ct, options = parse_options_header(resp['content-type'])
if ct in ('application/json', 'text/javascript'):
return json.loads(content)
elif ct in ('application/xml', 'text/xml'):
# technically, text/xml is ascii based but because many
# implementations get that wrong and utf-8 is a superset
# of utf-8 anyways, so there is not much harm in assuming
# utf-8 here
charset = options.get('charset', 'utf-8')
return get_etree().fromstring(content.decode(charset))
elif ct != 'application/x-www-form-urlencoded':
if strict:
return content
charset = options.get('charset', 'utf-8')
return url_decode(content, charset=charset).to_dict()
def add_query(url, args):
if not args:
return url
return url + ('?' in url and '&' or '?') + url_encode(args)
def encode_request_data(data, format):
if format is None:
return data, None
elif format == 'json':
return json.dumps(data or {}), 'application/json'
elif format == 'urlencoded':
return url_encode(data or {}), 'application/x-www-form-urlencoded'
raise TypeError('Unknown format %r' % format)
class OAuthResponse(object):
"""Contains the response sent back from an OAuth protected remote
application.
"""
def __init__(self, resp, content):
#: a :class:`~werkzeug.Headers` object with the response headers
#: the application sent.
self.headers = Headers(resp)
#: the raw, unencoded content from the server
self.raw_data = content
#: the parsed content from the server
self.data = parse_response(resp, content, strict=True)
@property
def status(self):
"""The status code of the response."""
return self.headers.get('status', type=int)
class OAuthClient(oauth2.Client):
def request_new_token(self, uri, callback=None, params={}):
if callback is not None:
params['oauth_callback'] = callback
req = oauth2.Request.from_consumer_and_token(
self.consumer, token=self.token,
http_method='POST', http_url=uri, parameters=params,
is_form_encoded=True)
req.sign_request(self.method, self.consumer, self.token)
body = req.to_postdata()
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Content-Length': str(len(body))
}
return httplib2.Http.request(self, uri, method='POST',
body=body, headers=headers)
class OAuthException(RuntimeError):
"""Raised if authorization fails for some reason."""
message = None
type = None
def __init__(self, message, type=None, data=None):
#: A helpful error message for debugging
self.message = message
#: A unique type for this exception if available.
self.type = type
#: If available, the parsed data from the remote API that can be
#: used to pointpoint the error.
self.data = data
def __str__(self):
return self.message.encode('utf-8')
def __unicode__(self):
return self.message
class OAuth(object):
"""Registry for remote applications. In the future this will also
be the central class for OAuth provider functionality.
"""
def __init__(self):
self.remote_apps = {}
def remote_app(self, name, register=True, **kwargs):
"""Registers a new remote applicaton. If `param` register is
set to `False` the application is not registered in the
:attr:`remote_apps` dictionary. The keyword arguments are
forwarded to the :class:`OAuthRemoteApp` consturctor.
"""
app = OAuthRemoteApp(self, name, **kwargs)
if register:
assert name not in self.remote_apps, \
'application already registered'
self.remote_apps[name] = app
return app
class OAuthRemoteApp(object):
"""Represents a remote application.
:param oauth: the associated :class:`OAuth` object.
:param name: then name of the remote application
:param request_token_url: the URL for requesting new tokens
:param access_token_url: the URL for token exchange
:param authorize_url: the URL for authorization
:param consumer_key: the application specific consumer key
:param consumer_secret: the application specific consumer secret
:param request_token_params: an optional dictionary of parameters
to forward to the request token URL
or authorize URL depending on oauth
version.
:param access_token_params: an option diction of parameters to forward to
the access token URL
:param access_token_method: the HTTP method that should be used
for the access_token_url. Defaults
to ``'GET'``.
:param bearer_authorization_header: an parameter to automatically add
an OAuth2 authentication header.
Checkout: http://tools.ietf.org/html/rfc6750#section-2.1
Defaults to False
:param bearer_authorization_header_prefix: the word between Authorization: and the access_token.
Defaults to ``'Bearer'``
"""
def __init__(self, oauth, name, base_url,
request_token_url,
access_token_url, authorize_url,
consumer_key, consumer_secret,
request_token_params=None,
access_token_params=None,
access_token_method='GET',
bearer_authorization_header=False,
bearer_authorization_header_prefix='Bearer'):
self.oauth = oauth
#: the `base_url` all URLs are joined with.
self.base_url = base_url
self.name = name
self.request_token_url = request_token_url
self.access_token_url = access_token_url
self.authorize_url = authorize_url
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.tokengetter_func = None
self.request_token_params = request_token_params or {}
self.access_token_params = access_token_params or {}
self.access_token_method = access_token_method
self.bearer_authorization_header = bearer_authorization_header
self.bearer_authorization_header_prefix = bearer_authorization_header_prefix
self._consumer = oauth2.Consumer(self.consumer_key,
self.consumer_secret)
self._client = OAuthClient(self._consumer)
def status_okay(self, resp):
"""Given request data, checks if the status is okay."""
try:
return int(resp['status']) in (200, 201)
except ValueError:
return False
def get(self, *args, **kwargs):
"""Sends a ``GET`` request. Accepts the same parameters as
:meth:`request`.
"""
kwargs['method'] = 'GET'
return self.request(*args, **kwargs)
def post(self, *args, **kwargs):
"""Sends a ``POST`` request. Accepts the same parameters as
:meth:`request`.
"""
kwargs['method'] = 'POST'
return self.request(*args, **kwargs)
def put(self, *args, **kwargs):
"""Sends a ``PUT`` request. Accepts the same parameters as
:meth:`request`.
"""
kwargs['method'] = 'PUT'
return self.request(*args, **kwargs)
def delete(self, *args, **kwargs):
"""Sends a ``DELETE`` request. Accepts the same parameters as
:meth:`request`.
"""
kwargs['method'] = 'DELETE'
return self.request(*args, **kwargs)
def make_client(self, token=None):
"""Creates a new `oauth2` Client object with the token attached.
Usually you don't have to do that but use the :meth:`request`
method instead.
"""
return oauth2.Client(self._consumer, self.get_request_token(token))
def request(self, url, data="", headers=None, format='urlencoded',
method='GET', content_type=None, token=None):
"""Sends a request to the remote server with OAuth tokens attached.
The `url` is joined with :attr:`base_url` if the URL is relative.
.. versionadded:: 0.12
added the `token` parameter.
:param url: where to send the request to
:param data: the data to be sent to the server. If the request method
is ``GET`` the data is appended to the URL as query
parameters, otherwise encoded to `format` if the format
is given. If a `content_type` is provided instead, the
data must be a string encoded for the given content
type and used as request body.
:param headers: an optional dictionary of headers.
:param format: the format for the `data`. Can be `urlencoded` for
URL encoded data or `json` for JSON.
:param method: the HTTP request method to use.
:param content_type: an optional content type. If a content type is
provided, the data is passed as it and the
`format` parameter is ignored.
:param token: an optional token to pass to tokengetter. Use this if you
want to support sending requests using multiple tokens.
If you set this to anything not None, `tokengetter_func`
will receive the given token as an argument, in which case
the tokengetter should return the `(token, secret)` tuple
for the given token.
:return: an :class:`OAuthResponse` object.
"""
client = self.make_client(token)
url = self.expand_url(url)
headers = dict(headers or {})
if self.bearer_authorization_header:
headers['Authorization'] = '%s %s' % (self.bearer_authorization_header_prefix, client.token.key)
if method == 'GET':
assert format == 'urlencoded'
if data:
url = add_query(url, data)
data = ""
else:
if content_type is None:
data, content_type = encode_request_data(data, format)
if content_type is not None:
headers['Content-Type'] = content_type
return OAuthResponse(*client.request(url, method=method,
body=data or '',
headers=headers))
def expand_url(self, url):
return urljoin(self.base_url, url)
def generate_request_token(self, callback=None):
if callback is not None:
callback = urljoin(request.url, callback)
resp, content = self._client.request_new_token(
self.expand_url(self.request_token_url), callback,
self.request_token_params)
if not self.status_okay(resp):
status = 'NO STATUS'
if resp and 'status' in resp:
status = resp.get('status', 'BLANK STATUS')
msg = "Failed to generate request token. Response [%s]: '%s'" % (
status,
content
)
raise OAuthException(msg, type='token_generation_failed')
data = parse_response(resp, content)
if data is None:
raise OAuthException('Invalid token response from ' + self.name,
type='token_generation_failed')
tup = (data['oauth_token'], data['oauth_token_secret'])
session[self.name + '_oauthtok'] = tup
return tup
def get_request_token(self, token=None):
assert self.tokengetter_func is not None, 'missing tokengetter function'
# Don't pass the token if the token is None to support old
# tokengetter functions.
rv = self.tokengetter_func(*(token and (token,) or ()))
if rv is None:
rv = session.get(self.name + '_oauthtok')
if rv is None:
raise OAuthException('No token available', type='token_missing')
return oauth2.Token(*rv)
def free_request_token(self):
session.pop(self.name + '_oauthtok', None)
session.pop(self.name + '_oauthredir', None)
def authorize(self, callback=None, extra_params={}):
"""Returns a redirect response to the remote authorization URL with
the signed callback given. The callback must be `None` in which
case the application will most likely switch to PIN based authentication
or use a remotely stored callback URL. Alternatively it's an URL
on the system that has to be decorated as :meth:`authorized_handler`.
You may also pass extra parameters via the dictionary extra_params
(e.g. state for google oauth2)
"""
if self.request_token_url:
token = self.generate_request_token(callback)[0]
url = '%s?oauth_token=%s' % (self.expand_url(self.authorize_url),
url_quote(token))
else:
assert callback is not None, 'Callback is required OAuth2'
# This is for things like facebook's oauth. Since we need the
# callback for the access_token_url we need to keep it in the
# session.
params = dict(self.request_token_params)
params['redirect_uri'] = callback
params['client_id'] = self.consumer_key
params['response_type'] = 'code'
if extra_params:
params.update(extra_params)
session[self.name + '_oauthredir'] = callback
url = add_query(self.expand_url(self.authorize_url), params)
return redirect(url)
def tokengetter(self, f):
"""Registers a function as tokengetter. The tokengetter has to return
a tuple of ``(token, secret)`` with the user's token and token secret.
If the data is unavailable, the function must return `None`.
If the `token` parameter is passed to the request function it's
forwarded to the tokengetter function::
@oauth.tokengetter
def get_token(token='user'):
if token == 'user':
return find_the_user_token()
elif token == 'app':
return find_the_app_token()
raise RuntimeError('invalid token')
"""
self.tokengetter_func = f
return f
def handle_oauth1_response(self):
"""Handles an oauth1 authorization response. The return value of
this method is forwarded as first argument to the handling view
function.
"""
client = self.make_client()
resp, content = client.request('%s?oauth_verifier=%s' % (
self.expand_url(self.access_token_url),
request.args['oauth_verifier']
), self.access_token_method)
data = parse_response(resp, content)
if not self.status_okay(resp):
raise OAuthException('Invalid response from ' + self.name,
type='invalid_response', data=data)
return data
def handle_oauth2_response(self):
"""Handles an oauth2 authorization response. The return value of
this method is forwarded as first argument to the handling view
function.
"""
remote_args = {
'code': request.args.get('code'),
'client_id': self.consumer_key,
'client_secret': self.consumer_secret,
'redirect_uri': session.get(self.name + '_oauthredir')
}
remote_args.update(self.access_token_params)
if self.access_token_method == 'POST':
resp, content = self._client.request(self.expand_url(self.access_token_url),
self.access_token_method,
url_encode(remote_args))
elif self.access_token_method == 'GET':
url = add_query(self.expand_url(self.access_token_url), remote_args)
resp, content = self._client.request(url, self.access_token_method)
else:
raise OAuthException('Unsupported access_token_method: ' +
self.access_token_method)
data = parse_response(resp, content)
if not self.status_okay(resp):
raise OAuthException('Invalid response from ' + self.name,
type='invalid_response', data=data)
return data
def handle_unknown_response(self):
"""Called if an unknown response came back from the server. This
usually indicates a denied response. The default implementation
just returns `None`.
"""
return None
def authorized_handler(self, f):
"""Injects additional authorization functionality into the function.
The function will be passed the response object as first argument
if the request was allowed, or `None` if access was denied. When the
authorized handler is called, the temporary issued tokens are already
destroyed.
"""
@wraps(f)
def decorated(*args, **kwargs):
if 'oauth_verifier' in request.args:
data = self.handle_oauth1_response()
elif 'code' in request.args:
data = self.handle_oauth2_response()
else:
data = self.handle_unknown_response()
self.free_request_token()
return f(*((data,) + args), **kwargs)
return decorated
| 40.921325
| 108
| 0.603744
|
4a0955cc9e399c32d6184b44aca7354b1a763e45
| 64
|
py
|
Python
|
A/281A.py
|
johnggo/Codeforces-Solutions
|
4127ae6f72294b5781fb94c42b69cfef570aae42
|
[
"MIT"
] | 1
|
2020-08-25T19:59:11.000Z
|
2020-08-25T19:59:11.000Z
|
A/281A.py
|
johnggo/Codeforces-Solutions
|
4127ae6f72294b5781fb94c42b69cfef570aae42
|
[
"MIT"
] | null | null | null |
A/281A.py
|
johnggo/Codeforces-Solutions
|
4127ae6f72294b5781fb94c42b69cfef570aae42
|
[
"MIT"
] | null | null | null |
word = str(input())
sol = word[0].upper() + word[1:]
print(sol)
| 16
| 32
| 0.59375
|
4a095657660a0b4086b5795d6b4c9ae2ea2298de
| 1,176
|
py
|
Python
|
mindinsight/datavisual/data_transform/image_container.py
|
fapbatista/mindinsight
|
db5769eb80cbd13a2a9af7682c11f5667d8bf141
|
[
"Apache-2.0"
] | 216
|
2020-03-28T02:11:56.000Z
|
2022-03-31T06:20:09.000Z
|
mindinsight/datavisual/data_transform/image_container.py
|
fapbatista/mindinsight
|
db5769eb80cbd13a2a9af7682c11f5667d8bf141
|
[
"Apache-2.0"
] | 13
|
2020-03-31T03:00:12.000Z
|
2021-01-03T13:01:06.000Z
|
mindinsight/datavisual/data_transform/image_container.py
|
fapbatista/mindinsight
|
db5769eb80cbd13a2a9af7682c11f5667d8bf141
|
[
"Apache-2.0"
] | 21
|
2020-03-28T02:41:06.000Z
|
2021-11-24T12:20:25.000Z
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Image container."""
from mindinsight.datavisual.proto_files.mindinsight_summary_pb2 import Summary
class ImageContainer:
"""
Container for image to allow pickling.
Args:
image_message (Summary.Image): Image proto buffer message.
"""
def __init__(self, image_message: Summary.Image):
self.height = image_message.height
self.width = image_message.width
self.colorspace = image_message.colorspace
self.encoded_image = image_message.encoded_image
| 37.935484
| 78
| 0.694728
|
4a09572ee7c07b76e5dea4851196cc4434f443e3
| 7,652
|
py
|
Python
|
perfkitbenchmarker/linux_packages/blazemark.py
|
YueLee90/PerfKitBenchmarker
|
6bd92986fa35e9456ef40a872c749292f66ded23
|
[
"Apache-2.0"
] | null | null | null |
perfkitbenchmarker/linux_packages/blazemark.py
|
YueLee90/PerfKitBenchmarker
|
6bd92986fa35e9456ef40a872c749292f66ded23
|
[
"Apache-2.0"
] | null | null | null |
perfkitbenchmarker/linux_packages/blazemark.py
|
YueLee90/PerfKitBenchmarker
|
6bd92986fa35e9456ef40a872c749292f66ded23
|
[
"Apache-2.0"
] | 1
|
2021-12-07T13:29:48.000Z
|
2021-12-07T13:29:48.000Z
|
# Copyright 2016 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing blazemark installation and cleanup functions."""
import copy
import logging
import os
from perfkitbenchmarker import data
from perfkitbenchmarker import regex_util
from perfkitbenchmarker import sample
from perfkitbenchmarker.linux_packages import blaze
from perfkitbenchmarker.linux_packages import fortran
BLAZEMARK_FOLDER = 'blazemark'
BLAZEMARK_DIR = os.path.join(blaze.BLAZE_DIR, BLAZEMARK_FOLDER)
CONFIG_TEMPLATE = 'blazemark_config.j2'
CONFIG = 'config'
THROUGHPUT_HEADER_REGEX = (
r'(\w+[\w\- ]+\w+)\s*(\([0-9.]+% filled\))*\s*\[([\w/]+)\]:([0-9\s.]+)')
THROUGHPUT_RESULT_REGEX = r'([0-9]+)\s*([0-9.]+)'
FILLED_REGEX = r'([0-9.]+)% filled'
LIBS = frozenset([
'C-like', 'Classic', 'Blaze', 'Boost uBLAS', 'Blitz++',
'GMM++', 'Armadillo', 'MTL', 'Eigen'])
BLAZEMARK_BINARIES = frozenset([
'cg', 'daxpy', 'dmatsvecmult', 'dvecdvecsub', 'mat3mat3mult',
'smatdmatmult', 'smattsmatadd', 'svectdvecmult', 'tdmattdmatmult',
'tmat3mat3mult', 'tsmatdmatmult', 'tsvecdmatmult', 'tvec6tmat6mult',
'complex1', 'dmatdmatadd', 'dmattdmatadd', 'dvecnorm', 'mat3tmat3mult',
'smatdvecmult', 'smattsmatmult', 'svectsvecmult', 'tdmattsmatadd',
'tmat3tmat3add', 'tsmatdvecmult', 'tsvecdvecmult', 'vec3vec3add',
'complex2', 'dmatdmatmult', 'dmattdmatmult', 'dvecscalarmult',
'mat3vec3mult', 'smatscalarmult', 'svecdvecadd', 'tdmatdmatadd',
'tdmattsmatmult', 'tmat3tmat3mult', 'tsmatsmatadd', 'tsvecsmatmult',
'vec6vec6add', 'complex3', 'dmatdmatsub', 'dmattrans', 'dvecsvecadd',
'mat6mat6add', 'smatsmatadd', 'svecdveccross', 'tdmatdmatmult',
'tdvecdmatmult', 'tmat3vec3mult', 'tsmatsmatmult', 'tsvecsvecmult',
'complex4', 'dmatdvecmult', 'dmattsmatadd', 'dvecsveccross', 'mat6mat6mult',
'smatsmatmult', 'svecdvecmult', 'tdmatdvecmult', 'tdvecdvecmult',
'tmat6mat6mult', 'tsmatsvecmult', 'tsvectdmatmult', 'complex5', 'dmatinv',
'dmattsmatmult', 'dvecsvecmult', 'mat6tmat6mult', 'smatsvecmult',
'svecscalarmult', 'tdmatsmatadd', 'tdvecsmatmult', 'tmat6tmat6add',
'tsmattdmatadd', 'tsvectsmatmult', 'complex6', 'dmatscalarmult',
'dvecdvecadd', 'dvectdvecmult', 'mat6vec6mult', 'smattdmatadd',
'svecsvecadd', 'tdmatsmatmult', 'tdvecsvecmult', 'tmat6tmat6mult',
'tsmattdmatmult', 'tvec3mat3mult', 'complex7', 'dmatsmatadd',
'dvecdveccross', 'dvectsvecmult', 'memorysweep', 'smattdmatmult',
'svecsveccross', 'tdmatsvecmult', 'tdvectdmatmult', 'tmat6vec6mult',
'tsmattsmatadd', 'tvec3tmat3mult', 'complex8', 'dmatsmatmult',
'dvecdvecmult', 'mat3mat3add', 'smatdmatadd', 'smattrans',
'svecsvecmult', 'tdmattdmatadd', 'tdvectsmatmult', 'tsmatdmatadd',
'tsmattsmatmult', 'tvec6mat6mult'])
def GetBinaries():
"""Find available blazemark binaries."""
return BLAZEMARK_BINARIES
def _SimplfyLibName(name):
"""Simply library name parsed from output.
Args:
name: string. Name parsed from blazemark output.
Returns:
A simplified name defined in LIBS.
"""
for lib in LIBS:
if lib in name:
return lib
return name
def _ParseResult(out, test):
"""Parse blazemark results.
Sample output:
https://bitbucket.org/blaze-lib/blaze/wiki/Blazemark#!command-line-parameters
Dense Vector/Dense Vector Addition:
C-like implementation [MFlop/s]:
100 1115.44
10000000 206.317
Classic operator overloading [MFlop/s]:
100 415.703
10000000 112.557
Blaze [MFlop/s]:
100 2602.56
10000000 292.569
Boost uBLAS [MFlop/s]:
100 1056.75
10000000 208.639
Blitz++ [MFlop/s]:
100 1011.1
10000000 207.855
GMM++ [MFlop/s]:
100 1115.42
10000000 207.699
Armadillo [MFlop/s]:
100 1095.86
10000000 208.658
MTL [MFlop/s]:
100 1018.47
10000000 209.065
Eigen [MFlop/s]:
100 2173.48
10000000 209.899
N=100, steps=55116257
C-like = 2.33322 (4.94123)
Classic = 6.26062 (13.2586)
Blaze = 1 (2.11777)
Boost uBLAS = 2.4628 (5.21565)
Blitz++ = 2.57398 (5.4511)
GMM++ = 2.33325 (4.94129)
Armadillo = 2.3749 (5.0295)
MTL = 2.55537 (5.41168)
Eigen = 1.19742 (2.53585)
N=10000000, steps=8
C-like = 1.41805 (0.387753)
Classic = 2.5993 (0.710753)
Blaze = 1 (0.27344)
Boost uBLAS = 1.40227 (0.383437)
Blitz++ = 1.40756 (0.384884)
GMM++ = 1.40862 (0.385172)
Armadillo = 1.40215 (0.383403)
MTL = 1.39941 (0.382656)
Eigen = 1.39386 (0.381136)
Args:
out: string. Blazemark output in raw string format.
test: string. Name of the test ran.
Returns:
A list of samples. Each sample if a 4-tuple of (benchmark_name, value, unit,
metadata).
"""
matches = regex_util.ExtractAllMatches(THROUGHPUT_HEADER_REGEX, out)
results = []
for m in matches:
lib = _SimplfyLibName(m[0])
metadata = {}
filled = m[1]
if filled:
metadata['% filled'] = regex_util.ExtractFloat(FILLED_REGEX, filled)
unit = m[-2]
for v in regex_util.ExtractAllMatches(THROUGHPUT_RESULT_REGEX, m[-1]):
metadata['N'] = int(v[0])
results.append(sample.Sample(
'_'.join([test, lib, 'Throughput']), # Metric name
float(v[1]), # Value
unit, # Unit
copy.deepcopy(metadata))) # Metadata
logging.info('Results for %s:\n %s', test, results)
return results
def RunTest(vm, test):
"""Run blazemark test on vm.
Args:
vm: VirtualMachine. The VM to run blazemark.
test: string. The test name to run.
Returns:
A list of samples. Each sample if a 4-tuple of (benchmark_name, value, unit,
metadata).
"""
out, _ = vm.RemoteCommand(
'cd %s; export BLAZE_NUM_THREADS=%s; ./%s -only-blaze' % (
os.path.join(BLAZEMARK_DIR, 'bin'), vm.num_cpus, test))
return _ParseResult(out, test)
def _Configure(vm):
"""Configure and build blazemark on vm."""
vm.RenderTemplate(
data.ResourcePath(CONFIG_TEMPLATE),
os.path.join(BLAZEMARK_DIR, CONFIG),
{'compiler': '"g++-5"',
'compiler_flags': (
'"-Wall -Wextra -Werror -Wshadow -Woverloaded-virtual -ansi -O3 '
'-mavx -DNDEBUG -fpermissive -ansi -O3 -DNDEBUG '
'-DBLAZE_USE_BOOST_THREADS --std=c++14"'),
'lapack_path': '"/tmp/pkb/lapack-3.6.1/lib"',
'lapack_libs': '"-llapack -lblas -L%s -lgfortran"'
% os.path.dirname(fortran.GetLibPath(vm))})
vm.RemoteCommand('cd %s; ./configure %s; make -j %s' % (
BLAZEMARK_DIR, CONFIG, vm.num_cpus))
def _Install(vm):
"""Install blazemark."""
for package in ['g++5', 'build_tools', 'boost', 'blaze', 'lapack']:
vm.Install(package)
_Configure(vm)
def YumInstall(vm):
"""Installs the blazemark package on the VM."""
_Install(vm)
def AptInstall(vm):
"""Installs the blazemark package on the VM."""
_Install(vm)
| 34.468468
| 80
| 0.647151
|
4a095821f4eaba241840646f3cc1499276ba5c70
| 3,248
|
py
|
Python
|
tensorboard/plugins/mesh/mesh_demo.py
|
kevint324/tensorboard
|
cbc5b1f2d74236d89baa9d4810c166e4cee973a9
|
[
"Apache-2.0"
] | 1
|
2021-05-10T10:46:37.000Z
|
2021-05-10T10:46:37.000Z
|
tensorboard/plugins/mesh/mesh_demo.py
|
kevint324/tensorboard
|
cbc5b1f2d74236d89baa9d4810c166e4cee973a9
|
[
"Apache-2.0"
] | 2
|
2022-02-15T12:29:44.000Z
|
2022-03-02T13:26:06.000Z
|
tensorboard/plugins/mesh/mesh_demo.py
|
kevint324/tensorboard
|
cbc5b1f2d74236d89baa9d4810c166e4cee973a9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple demo which displays constant 3D mesh."""
from absl import app
from absl import flags
import numpy as np
import tensorflow as tf
from tensorboard.plugins.mesh import summary as mesh_summary
from tensorboard.plugins.mesh import demo_utils
flags.DEFINE_string(
"logdir", "/tmp/mesh_demo", "Directory to write event logs to."
)
flags.DEFINE_string("mesh_path", None, "Path to PLY file to visualize.")
FLAGS = flags.FLAGS
tf.compat.v1.disable_v2_behavior()
# Max number of steps to run training with.
_MAX_STEPS = 10
def run():
"""Runs session with a mesh summary."""
# Mesh summaries only work on TensorFlow 1.x.
if int(tf.__version__.split(".")[0]) > 1:
raise ImportError("TensorFlow 1.x is required to run this demo.")
# Flag mesh_path is required.
if FLAGS.mesh_path is None:
raise ValueError(
"Flag --mesh_path is required and must contain path to PLY file."
)
# Camera and scene configuration.
config_dict = {"camera": {"cls": "PerspectiveCamera", "fov": 75}}
# Read sample PLY file.
vertices, colors, faces = demo_utils.read_ascii_ply(FLAGS.mesh_path)
# Add batch dimension.
vertices = np.expand_dims(vertices, 0)
faces = np.expand_dims(faces, 0)
colors = np.expand_dims(colors, 0)
# Create placeholders for tensors representing the mesh.
step = tf.placeholder(tf.int32, ())
vertices_tensor = tf.placeholder(tf.float32, vertices.shape)
faces_tensor = tf.placeholder(tf.int32, faces.shape)
colors_tensor = tf.placeholder(tf.int32, colors.shape)
# Change colors over time.
t = tf.cast(step, tf.float32) / _MAX_STEPS
transformed_colors = t * (255 - colors) + (1 - t) * colors
meshes_summary = mesh_summary.op(
"mesh_color_tensor",
vertices=vertices_tensor,
faces=faces_tensor,
colors=transformed_colors,
config_dict=config_dict,
)
# Create summary writer and session.
writer = tf.summary.FileWriter(FLAGS.logdir)
sess = tf.Session()
for i in range(_MAX_STEPS):
summary = sess.run(
meshes_summary,
feed_dict={
vertices_tensor: vertices,
faces_tensor: faces,
colors_tensor: colors,
step: i,
},
)
writer.add_summary(summary, global_step=i)
def main(unused_argv):
print("Saving output to %s." % FLAGS.logdir)
run()
print("Done. Output saved to %s." % FLAGS.logdir)
if __name__ == "__main__":
app.run(main)
| 31.533981
| 80
| 0.660099
|
4a0958fb7472aae6b02acbe1ea0af3b766c05192
| 539
|
py
|
Python
|
streamdeck-plugin/src/event_handlers/hand_toggle_event_handler.py
|
andrewachen/streamdeck-googlemeet
|
957e22e6e6e9cc962192ef049771714513e29fb3
|
[
"MIT"
] | 124
|
2020-09-10T14:45:40.000Z
|
2022-03-30T21:33:26.000Z
|
streamdeck-plugin/src/event_handlers/hand_toggle_event_handler.py
|
andrewachen/streamdeck-googlemeet
|
957e22e6e6e9cc962192ef049771714513e29fb3
|
[
"MIT"
] | 24
|
2020-10-20T23:43:01.000Z
|
2022-03-07T10:23:41.000Z
|
streamdeck-plugin/src/event_handlers/hand_toggle_event_handler.py
|
andrewachen/streamdeck-googlemeet
|
957e22e6e6e9cc962192ef049771714513e29fb3
|
[
"MIT"
] | 11
|
2020-10-13T03:16:49.000Z
|
2022-01-23T22:03:30.000Z
|
from event_handlers.base_toggle_event_handler import BaseToggleEventHandler
class HandToggleEventHandler(BaseToggleEventHandler):
"""
A Stream Deck button that shows whether or not your hand is raised,
and toggles the hand on and off when you press the button.
"""
STREAM_DECK_ACTION = "com.chrisregado.googlemeet.togglehand"
BROWSER_STATE_REQUEST_EVENT_TYPE = "getHandState"
BROWSER_STATE_UPDATED_EVENT_TYPE = "handMutedState"
BROWSER_TOGGLE_EVENT_TYPE = "toggleHand"
FRIENDLY_DEVICE_NAME = "Hand"
| 33.6875
| 75
| 0.781076
|
4a09593f129f729fea2d81bd94090e1dc7575d9d
| 906
|
py
|
Python
|
setup.py
|
yunus-ceyhan/kivyx
|
f4348eb8c00ad62346b827d1ab6197f8f84cde8e
|
[
"MIT"
] | 1
|
2022-03-28T07:27:32.000Z
|
2022-03-28T07:27:32.000Z
|
setup.py
|
yunus-ceyhan/kivyx
|
f4348eb8c00ad62346b827d1ab6197f8f84cde8e
|
[
"MIT"
] | null | null | null |
setup.py
|
yunus-ceyhan/kivyx
|
f4348eb8c00ad62346b827d1ab6197f8f84cde8e
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='kivyx',
packages=find_packages(
include=["kivyx","kivyx.*"]
),
package_data={
'kivyx': [
'data/*.png',
'data/*.jpg',
'data/*.ttf',
'editor/*.py',
'fonts/*.ttf',
]
},
include_package_data=True,
version='0.0.2',
description='An UI library for personal projects',
long_description=long_description,
long_description_content_type='text/markdown',
author='Yunus Ceyhan',
author_email='yunus.ceyhn@gmail',
url='https://github.com/kivyx',
keywords=['Python', 'Kivy', 'KivyMD'],
install_requires=[],
classifiers=[],
)
| 25.885714
| 73
| 0.599338
|
4a095a5b735896a63d3142acf3747193d3b2ec97
| 17,395
|
py
|
Python
|
xlib/image/ImageProcessor.py
|
Arthurzhangsheng/DeepFaceLive
|
6d12c8f64e1ab042909adec44371dc91d395e606
|
[
"MIT"
] | 4
|
2021-07-23T16:34:24.000Z
|
2022-03-01T18:31:59.000Z
|
xlib/image/ImageProcessor.py
|
Arthurzhangsheng/DeepFaceLive
|
6d12c8f64e1ab042909adec44371dc91d395e606
|
[
"MIT"
] | null | null | null |
xlib/image/ImageProcessor.py
|
Arthurzhangsheng/DeepFaceLive
|
6d12c8f64e1ab042909adec44371dc91d395e606
|
[
"MIT"
] | null | null | null |
from enum import IntEnum
from typing import Tuple, Union
import cupy as cp
import cupyx.scipy.ndimage
import cv2
import numexpr as ne
import numpy as np
import scipy
import scipy.ndimage
class ImageProcessor:
"""
Generic image processor for numpy or cupy images
arguments
img np.ndarray|
cp.ndarray
HW (2 ndim)
HWC (3 ndim)
NHWC (4 ndim)
for cupy you should set device before using ImageProcessor
"""
def __init__(self, img : Union[np.ndarray,cp.ndarray], copy=False):
self._xp = xp = cp.get_array_module(img)
if copy and xp == np:
img = img.copy()
self._sp = cupyx.scipy if xp == cp else scipy
ndim = img.ndim
if ndim not in [2,3,4]:
raise ValueError(f'img.ndim must be 2,3,4, not {ndim}.')
# Make internal image as NHWC
if ndim == 2:
N, (H,W), C = 0, img.shape, 0
img = img[None,:,:,None]
elif ndim == 3:
N, (H,W,C) = 0, img.shape
img = img[None,...]
else:
N,H,W,C = img.shape
self._img : np.ndarray = img
def copy(self) -> 'ImageProcessor':
"""
"""
ip = ImageProcessor.__new__(ImageProcessor)
ip._img = self._img
ip._xp = self._xp
ip._sp = self._sp
return ip
def get_dims(self) -> Tuple[int,int,int,int]:
"""
returns dimensions of current working image
returns N,H,W,C (ints) , each >= 1
"""
return self._img.shape
def get_dtype(self):
return self._img.dtype
def adjust_gamma(self, red : float, green : float, blue : float) -> 'ImageProcessor':
dtype = self.get_dtype()
self.to_ufloat32()
xp, img = self._xp , self._img,
xp.power(img, xp.array([1.0 / blue, 1.0 / green, 1.0 / red], xp.float32), out=img)
xp.clip(img, 0, 1.0, out=img)
self._img = img
self.to_dtype(dtype)
return self
def apply(self, func) -> 'ImageProcessor':
"""
apply your own function on internal image
image has NHWC format. Do not change format, but dims can be changed.
func callable (img) -> img
example:
.apply( lambda img: img-[102,127,63] )
"""
img = self._img
dtype = img.dtype
new_img = func(self._img).astype(dtype)
if new_img.ndim != 4:
raise Exception('func used in ImageProcessor.apply changed format of image')
self._img = new_img
return self
def fit_in (self, TW = None, TH = None, pad_to_target : bool = False, allow_upscale : bool = False, interpolation : 'ImageProcessor.Interpolation' = None) -> float:
"""
fit image in w,h keeping aspect ratio
TW,TH int/None target width,height
pad_to_target bool pad remain area with zeros
allow_upscale bool if image smaller than TW,TH it will be upscaled
interpolation ImageProcessor.Interpolation. value
returns scale float value
"""
#if interpolation is None:
# interpolation = ImageProcessor.Interpolation.LINEAR
xp, sp = self._xp, self._sp
img = self._img
N,H,W,C = img.shape
if TW is not None and TH is None:
scale = TW / W
elif TW is None and TH is not None:
scale = TH / H
elif TW is not None and TH is not None:
SW = W / TW
SH = H / TH
scale = 1.0
if SW > 1.0 or SH > 1.0 or (SW < 1.0 and SH < 1.0):
scale /= max(SW, SH)
else:
raise ValueError('TW or TH should be specified')
if not allow_upscale and scale > 1.0:
scale = 1.0
if scale != 1.0:
img = img.transpose( (1,2,0,3) ).reshape( (H,W,N*C) )
if self._xp == cp:
img = sp.ndimage.zoom(img, (scale, scale, 1.0), order=1)
else:
img = cv2.resize (img, ( int(W*scale), int(H*scale) ), interpolation=ImageProcessor.Interpolation.LINEAR)
H,W,_ = img.shape
img = img.reshape( (H,W,N,C) ).transpose( (2,0,1,3) )
if pad_to_target:
w_pad = (TW-W) if TW is not None else 0
h_pad = (TH-H) if TH is not None else 0
if w_pad != 0 or h_pad != 0:
img = xp.pad(img, ( (0,0), (0,h_pad), (0,w_pad), (0,0) ))
self._img = img
return scale
def clip(self, min, max) -> 'ImageProcessor':
xp = self._xp
xp.clip(self._img, min, max, out=self._img)
return self
def clip2(self, low_check, low_val, high_check, high_val) -> 'ImageProcessor':
img = self._img
l, h = img < low_check, img > high_check
img[l] = low_val
img[h] = high_val
return self
def degrade_resize(self, power : float, interpolation : 'ImageProcessor.Interpolation' = None) -> 'ImageProcessor':
"""
power float 0 .. 1.0
"""
power = min(1, max(0, power))
if power == 0:
return self
if interpolation is None:
interpolation = ImageProcessor.Interpolation.LINEAR
xp, sp, img = self._xp, self._sp, self._img
N,H,W,C = img.shape
img = img.transpose( (1,2,0,3) ).reshape( (H,W,N*C) )
if xp == cp:
W_lr = max(4, round(W*(1.0-power)))
H_lr = max(4, round(H*(1.0-power)))
img = sp.ndimage.zoom(img, (H_lr/H, W_lr/W, 1), order=_scipy_order[interpolation])
img = sp.ndimage.zoom(img, (H/img.shape[0], W/img.shape[1], 1), order=_scipy_order[interpolation])
else:
W_lr = max(4, int(W*(1.0-power)))
H_lr = max(4, int(H*(1.0-power)))
img = cv2.resize (img, (W_lr,H_lr), interpolation=_cv_inter[interpolation])
img = cv2.resize (img, (W,H) , interpolation=_cv_inter[interpolation])
img = img.reshape( (H,W,N,C) ).transpose( (2,0,1,3) )
self._img = img
return self
def median_blur(self, size : int, power : float) -> 'ImageProcessor':
"""
size int median kernel size
power float 0 .. 1.0
"""
power = min(1, max(0, power))
if power == 0:
return self
dtype = self.get_dtype()
self.to_ufloat32()
xp, sp, img = self._xp, self._sp, self._img
N,H,W,C = img.shape
img = img.transpose( (1,2,0,3) ).reshape( (H,W,N*C) )
if xp == cp:
img_blur = sp.ndimage.median_filter(img, size=(size,size,1) )
img = img*(1.0-power) + img_blur*power
else:
img_blur = cv2.medianBlur(img, size)
img = ne.evaluate('img*(1.0-power) + img_blur*power')
img = img.reshape( (H,W,N,C) ).transpose( (2,0,1,3) )
self._img = img
self.to_dtype(dtype)
return self
def erode_blur(self, erode : int, blur : int, fade_to_border : bool = False) -> 'ImageProcessor':
"""
apply erode and blur to the image
erode int != 0
blur int > 0
fade_to_border(False) clip the image in order
to fade smoothly to the border with specified blur amount
"""
xp, sp = self._xp, self._sp
erode, blur = int(erode), int(blur)
img = self._img
dtype = img.dtype
N,H,W,C = img.shape
img = img.transpose( (1,2,0,3) ).reshape( (H,W,N*C) )
img = xp.pad (img, ( (H,H), (W,W), (0,0) ) )
if erode > 0:
el = xp.asarray(cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)))
iterations = max(1,erode//2)
if self._xp == cp:
img = sp.ndimage.binary_erosion(img, el[...,None], iterations = iterations, brute_force=True ).astype(dtype)
else:
img = cv2.erode(img, el, iterations = iterations )
elif erode < 0:
el = xp.asarray(cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)))
iterations = max(1,-erode//2)
if self._xp == cp:
img = sp.ndimage.binary_dilation(img, el[...,None], iterations = iterations, brute_force=True).astype(dtype)
else:
img = cv2.dilate(img, el, iterations = iterations )
if fade_to_border:
h_clip_size = H + blur // 2
w_clip_size = W + blur // 2
img[:h_clip_size,:] = 0
img[-h_clip_size:,:] = 0
img[:,:w_clip_size] = 0
img[:,-w_clip_size:] = 0
if blur > 0:
sigma = blur * 0.125 * 2
if self._xp == cp:
img = sp.ndimage.gaussian_filter(img, (sigma, sigma,0), mode='constant')
else:
img = cv2.GaussianBlur(img, (0, 0), sigma)
#if img.ndim == 2:
# img = img[...,None]
img = img[H:-H,W:-W]
img = img.reshape( (H,W,N,C) ).transpose( (2,0,1,3) )
self._img = img
return self
def rotate90(self) -> 'ImageProcessor':
self._img = self._xp.rot90(self._img, k=1, axes=(1,2) )
return self
def rotate180(self) -> 'ImageProcessor':
self._img = self._xp.rot90(self._img, k=2, axes=(1,2) )
return self
def rotate270(self) -> 'ImageProcessor':
self._img = self._xp.rot90(self._img, k=3, axes=(1,2) )
return self
def flip_horizontal(self) -> 'ImageProcessor':
"""
"""
self._img = self._img[:,:,::-1,:]
return self
def flip_vertical(self) -> 'ImageProcessor':
"""
"""
self._img = self._img[:,::-1,:,:]
return self
def pad(self, t_h, b_h, l_w, r_w) -> 'ImageProcessor':
"""
"""
xp = self._xp
img = self._img
img = xp.pad(img, ( (0,0), (t_h,b_h), (l_w,r_w), (0,0) ))
self._img = img
return self
def pad_to_next_divisor(self, dw=None, dh=None) -> 'ImageProcessor':
"""
pad image to next divisor of width/height
dw,dh int
"""
xp = self._xp
img = self._img
_,H,W,_ = img.shape
w_pad = 0
if dw is not None:
w_pad = W % dw
if w_pad != 0:
w_pad = dw - w_pad
h_pad = 0
if dh is not None:
h_pad = H % dh
if h_pad != 0:
h_pad = dh - h_pad
if w_pad != 0 or h_pad != 0:
img = xp.pad(img, ( (0,0), (0,h_pad), (0,w_pad), (0,0) ))
self._img = img
return self
def sharpen(self, factor : float, kernel_size=3) -> 'ImageProcessor':
xp = self._xp
img = self._img
N,H,W,C = img.shape
img = img.transpose( (1,2,0,3) ).reshape( (H,W,N*C) )
if xp == cp:
raise
else:
blur = cv2.GaussianBlur(img, (kernel_size, kernel_size) , 0)
img = cv2.addWeighted(img, 1.0 + (0.5 * factor), blur, -(0.5 * factor), 0)
img = img.reshape( (H,W,N,C) ).transpose( (2,0,1,3) )
self._img = img
return self
def get_image(self, format) -> np.ndarray:
"""
returns image with desired format
format str examples:
NHWC, HWCN, NHW
if symbol in format does not exist, it will be got from 0 index
zero dim will be set to 1
"""
xp = self._xp
format = format.upper()
img = self._img
# First slice missing dims
N_slice = 0 if 'N' not in format else slice(None)
H_slice = 0 if 'H' not in format else slice(None)
W_slice = 0 if 'W' not in format else slice(None)
C_slice = 0 if 'C' not in format else slice(None)
img = img[N_slice, H_slice, W_slice, C_slice]
f = ''
if 'N' in format: f += 'N'
if 'H' in format: f += 'H'
if 'W' in format: f += 'W'
if 'C' in format: f += 'C'
if f != format:
# Transpose to target
d = { s:i for i,s in enumerate(f) }
transpose_order = [ d[s] for s in format ]
img = img.transpose(transpose_order)
return xp.ascontiguousarray(img)
def ch(self, TC : int) -> 'ImageProcessor':
"""
Clips or expands channel dimension to target channels
TC int >= 1
"""
xp = self._xp
img = self._img
N,H,W,C = img.shape
if TC <= 0:
raise ValueError(f'channels must be positive value, not {TC}')
if TC > C:
# Ch expand
img = img[...,0:1] # Clip to single ch first.
img = xp.repeat (img, TC, -1) # Expand by repeat
elif TC < C:
# Ch reduction clip
img = img[...,:TC]
self._img = img
return self
def resize(self, size : Tuple, interpolation : 'ImageProcessor.Interpolation' = None, new_ip=False ) -> 'ImageProcessor':
"""
resize to (W,H)
"""
xp, sp = self._xp, self._sp
img = self._img
N,H,W,C = img.shape
TW,TH = size
if W != TW or H != TH:
if interpolation is None:
interpolation = ImageProcessor.Interpolation.LINEAR
img = img.transpose( (1,2,0,3) ).reshape( (H,W,N*C) )
if self._xp == cp:
img = sp.ndimage.zoom(img, (TW/W, TH/H, 1), order=_scipy_order[interpolation])
else:
img = cv2.resize (img, (TW, TH), interpolation=_cv_inter[interpolation])
img = img.reshape( (TH,TW,N,C) ).transpose( (2,0,1,3) )
if new_ip:
return ImageProcessor(img)
self._img = img
return self
def warpAffine(self, mat, out_width, out_height, interpolation : 'ImageProcessor.Interpolation' = None ) -> 'ImageProcessor':
"""
img HWC
"""
xp, sp, img = self._xp, self._sp, self._img
N,H,W,C = img.shape
img = img.transpose( (1,2,0,3) ).reshape( (H,W,N*C) )
if interpolation is None:
interpolation = ImageProcessor.Interpolation.LINEAR
if xp == cp:
# AffineMat inverse
xp_mat = cp.get_array_module(mat)
mat = xp_mat.linalg.inv(xp_mat.concatenate( ( mat, xp_mat.array([[0,0,1]], xp_mat.float32)), 0) )[0:2,:]
mx, my = xp.meshgrid( xp.arange(0, out_width, dtype=xp.float32), xp.arange(0, out_height, dtype=xp.float32) )
coords = xp.concatenate( (mx[None,...], my[None,...], xp.ones( (1, out_height,out_width), dtype=xp.float32)), 0 )
mat_coords = xp.matmul (xp.asarray(mat), coords.reshape( (3,-1) ) ).reshape( (2,out_height,out_width))
img = xp.concatenate([sp.ndimage.map_coordinates( img[...,c], mat_coords[::-1,...], order=_scipy_order[interpolation], mode='opencv' )[...,None] for c in range(N*C) ], -1)
else:
img = cv2.warpAffine(img, mat, (out_width, out_height), flags=_cv_inter[interpolation] )
img = img.reshape( (out_height,out_width,N,C) ).transpose( (2,0,1,3) )
self._img = img
return self
def swap_ch(self) -> 'ImageProcessor':
"""swaps order of channels"""
self._img = self._img[...,::-1]
return self
def as_float32(self) -> 'ImageProcessor':
"""
change image format to float32
"""
xp = self._xp
self._img = self._img.astype(xp.float32)
return self
def as_uint8(self) -> 'ImageProcessor':
"""
change image format to uint8
"""
xp = self._xp
self._img = self._img.astype(xp.uint8)
return self
def to_dtype(self, dtype) -> 'ImageProcessor':
xp = self._xp
if dtype == xp.float32:
return self.to_ufloat32()
elif dtype == xp.uint8:
return self.to_uint8()
else:
raise ValueError('unsupported dtype')
def to_ufloat32(self) -> 'ImageProcessor':
"""
Convert to uniform float32
if current image dtype uint8, then image will be divided by / 255.0
otherwise no operation
"""
xp = self._xp
if self._img.dtype == xp.uint8:
self._img = self._img.astype(xp.float32)
self._img /= 255.0
return self
def to_uint8(self) -> 'ImageProcessor':
"""
Convert to uint8
if current image dtype is float32/64, then image will be multiplied by *255
"""
xp = self._xp
img = self._img
if img.dtype in [xp.float32, xp.float64]:
img *= 255.0
img[img < 0] = 0
img[img > 255] = 255
img = img.astype(xp.uint8, copy=False)
self._img = img
return self
class Interpolation(IntEnum):
LINEAR = 0
CUBIC = 1
_cv_inter = { ImageProcessor.Interpolation.LINEAR : cv2.INTER_LINEAR,
ImageProcessor.Interpolation.CUBIC : cv2.INTER_CUBIC }
_scipy_order = { ImageProcessor.Interpolation.LINEAR : 1,
ImageProcessor.Interpolation.CUBIC : 3 }
| 30.252174
| 183
| 0.522047
|
4a095c94d22d363ad8beeb43b1775aee9f84ae3d
| 4,794
|
py
|
Python
|
ensime_shared/config.py
|
mic47/ensime-vim
|
4ceb76797ba598bccbceeae4d76c7d70a29ae04a
|
[
"MIT"
] | null | null | null |
ensime_shared/config.py
|
mic47/ensime-vim
|
4ceb76797ba598bccbceeae4d76c7d70a29ae04a
|
[
"MIT"
] | null | null | null |
ensime_shared/config.py
|
mic47/ensime-vim
|
4ceb76797ba598bccbceeae4d76c7d70a29ae04a
|
[
"MIT"
] | null | null | null |
# coding: utf-8
import collections
import os
import sexpdata
from ensime_shared.util import Util
BOOTSTRAPS_ROOT = os.path.join(os.environ['HOME'], '.config', 'ensime-vim')
"""Default directory where ENSIME server bootstrap projects will be created."""
LOG_FORMAT = '%(levelname)-8s <%(asctime)s> (%(filename)s:%(lineno)d) - %(message)s'
gconfig = {
"ensime_server": "ws://127.0.0.1:{}/{}",
"localhost": "http://127.0.0.1:{}/{}",
}
# Messages for user feedback, possible l10n fodder. Please keep alphabetized.
feedback = {
"analyzer_ready": "Analyzer is ready",
"failed_refactoring": "The refactoring could not be applied (more info at logs)",
"full_types_enabled_off": "Qualified type display disabled",
"full_types_enabled_on": "Qualified type display enabled",
"handler_not_implemented":
"The feature {} is not supported by the current Ensime server version {}",
"indexer_ready": "Indexer is ready",
"invalid_java": "Java not found or not executable, verify :java-home in your .ensime config",
"manual_doc": "Go to {}",
"missing_debug_class": "You must specify a class to debug",
"notify_break": "Execution paused at breakpoint line {} in {}",
"package_inspect_current": "Using currently focused package...",
"prompt_server_install":
"Please run :EnInstall to install the ENSIME server for Scala {scala_version}",
"spawned_browser": "Opened tab {}",
"start_message": "Server has been started...",
"symbol_search_symbol_required": "Must provide symbols to search for!",
"typechecking": "Typechecking...",
"unknown_symbol": "Symbol not found",
"false_response": "Unable to process command",
}
class ProjectConfig(collections.Mapping):
"""A dict-like immutable representation of an ENSIME project configuration.
Args:
filepath (str): Path of an ``.ensime`` file to parse.
"""
def __init__(self, filepath):
self._filepath = os.path.realpath(filepath)
self.__data = self.parse(filepath)
# Provide the Mapping protocol requirements
def __getitem__(self, key):
return self.__data[key]
def __iter__(self):
return iter(self.__data)
def __len__(self):
return len(self.__data)
def __repr__(self):
return "{name}({path!r})".format(
name=self.__class__.__name__,
path=self.filepath
)
@property
def filepath(self):
"""str: The canonical path of the represented config file."""
return self._filepath
@staticmethod
def find_from(path):
"""Find path of an .ensime config, searching recursively upward from path.
Args:
path (str): Path of a file or directory from where to start searching.
Returns:
str: Canonical path of nearest ``.ensime``, or ``None`` if not found.
"""
realpath = os.path.realpath(path)
config_path = os.path.join(realpath, '.ensime')
if os.path.isfile(config_path):
return config_path
elif realpath == os.path.abspath('/'):
return None
else:
dirname = os.path.dirname(realpath)
return ProjectConfig.find_from(dirname)
@staticmethod
def parse(path):
"""Parse an ``.ensime`` config file from S-expressions.
Args:
path (str): Path of an ``.ensime`` file to parse.
Returns:
dict: Configuration values with string keys.
"""
def paired(iterable):
"""s -> (s0, s1), (s2, s3), (s4, s5), ..."""
cursor = iter(iterable)
return zip(cursor, cursor)
def unwrap_if_sexp_symbol(datum):
"""Convert Symbol(':key') to ':key' (Symbol isn't hashable for dict keys).
"""
return datum.value() if isinstance(datum, sexpdata.Symbol) else datum
def sexp2dict(sexps):
"""Transforms a nested list structure from sexpdata to dict."""
newdict = {}
# Turn flat list into associative pairs
for key, value in paired(sexps):
key = str(unwrap_if_sexp_symbol(key)).lstrip(':')
# Recursively transform nested lists
if isinstance(value, list) and value:
if isinstance(value[0], list):
newdict[key] = [sexp2dict(val) for val in value]
elif isinstance(value[0], sexpdata.Symbol):
newdict[key] = sexp2dict(value)
else:
newdict[key] = value
else:
newdict[key] = value
return newdict
conf = sexpdata.loads(Util.read_file(path))
return sexp2dict(conf)
| 33.524476
| 97
| 0.605131
|
4a095caaf687a03d63e2cc8541859abdbb69bdfb
| 3,247
|
py
|
Python
|
databases/regression/randPolynomialEquationSystem/100systems_100samplesPerSys.py
|
Mortrack/CenyML
|
b54080d01491c89b311dfde3b980434db04de4d2
|
[
"Apache-2.0"
] | null | null | null |
databases/regression/randPolynomialEquationSystem/100systems_100samplesPerSys.py
|
Mortrack/CenyML
|
b54080d01491c89b311dfde3b980434db04de4d2
|
[
"Apache-2.0"
] | null | null | null |
databases/regression/randPolynomialEquationSystem/100systems_100samplesPerSys.py
|
Mortrack/CenyML
|
b54080d01491c89b311dfde3b980434db04de4d2
|
[
"Apache-2.0"
] | null | null | null |
# --------------------------------------------------------------------------- #
# --------------------------------------------------------------------------- #
# AUTHOR: César Miranda Meza
# COMPLETITION DATE: September 21, 2021.
# LAST UPDATE: November 08, 2021.
#
# This code is used to create a database for certain evaluation and validation
# purposes of the CenyML library.
# --------------------------------------------------------------------------- #
# --------------------------------------------------------------------------- #
# Python version 3.9.7
# ------------------------------- #
# ----- IMPORT OF LIBRARIES ----- #
# ------------------------------- #
import random
# ------------------------------------ #
# ----- MAIN PROGRAM STARTS HERE ----- #
# ------------------------------------ #
# ----- START OF: Variables to be mannually defined by the implementer ----- #
f = open("100systems_100samplesPerSys.txt","w+") # Create a new file to store data in it.
totalSystemsEvaluated = 100; # This will define the number of systems that will be sampled.
samplesGeneratedPerSystem = 100; # This will define the number of samples to generate for each requested sampled system.
rangeOfIndependentVariables = 100; # This will define the values to be generated for the independent variables will be ranged from 0 to the value defined in this variable.
sampleTime = rangeOfIndependentVariables / samplesGeneratedPerSystem # This will define the sample time in between each sample generated for each of the generated systems.
f.write("id;system_id;dependent_variable;independent_variable_1\n") # Write the header titles for each of the desired columns.
# ---- END OF: Variables to be mannually defined by the implementer ----- #
# ----- Generate the sample data and store it into the specified file ----- #
currentRandomValue1 = 0 # This variable will be used by the program to store random values
currentIndVarData1 = 0 # This variable will be used by the program to store each of the outputs of the independent variable 1.
currentId = 1 # This varible will be used by the program to store the value of the current Id to be tagged for the current sample generated.
x1 = 0 # This variable will be used to save in memory the current value of the independent variable 1
# For-loop for each system to generate
for currentSystem in range(1, totalSystemsEvaluated+1):
# For-loop for each sample to generate for each of the systems to generate
for currentSampleOfCurrentSystem in range(1, samplesGeneratedPerSystem+1):
currentRandomValue1 = (random.random()*2-1)*10 # We generate random values from -10 up to +10
x1 = (sampleTime * currentSampleOfCurrentSystem)
currentIndVarData1 = (89 - 5.41*x1 + 0.19*x1**2 - 0.0028*x1**3 + +(1.4e-5)*x1**4) + currentRandomValue1 # This is the equation that will govern the generated systems.
f.write(format(currentId) + ";" + format(currentSystem) + ";" + format(currentIndVarData1) + ";" + format(sampleTime * currentSampleOfCurrentSystem) + "\n") # Write the new line of data into the file.
currentId = currentId + 1 # Increase the counter of the current row Id
# ----- Close the file when the program is done inserting data into it ----- #
f.close()
| 63.666667
| 208
| 0.63166
|
4a095d1098c0691c7837ebbf718843aa29881870
| 22,736
|
py
|
Python
|
python-profiles/STANDA/8MID12-1-H.py
|
EPC-MSU/libximc
|
b0349721f57c8274b098a7b646d7ae67b8e70b9d
|
[
"BSD-2-Clause"
] | 3
|
2020-12-08T14:41:48.000Z
|
2022-02-23T13:42:42.000Z
|
python-profiles/STANDA/8MID12-1-H.py
|
EPC-MSU/libximc
|
b0349721f57c8274b098a7b646d7ae67b8e70b9d
|
[
"BSD-2-Clause"
] | 4
|
2020-12-08T20:15:06.000Z
|
2021-12-08T14:15:24.000Z
|
python-profiles/STANDA/8MID12-1-H.py
|
EPC-MSU/libximc
|
b0349721f57c8274b098a7b646d7ae67b8e70b9d
|
[
"BSD-2-Clause"
] | 2
|
2020-11-02T02:17:35.000Z
|
2021-03-18T14:13:56.000Z
|
def set_profile_8MID12_1_H(lib, id):
worst_result = Result.Ok
result = Result.Ok
feedback_settings = feedback_settings_t()
feedback_settings.IPS = 4000
class FeedbackType_:
FEEDBACK_ENCODER_MEDIATED = 6
FEEDBACK_NONE = 5
FEEDBACK_EMF = 4
FEEDBACK_ENCODER = 1
feedback_settings.FeedbackType = FeedbackType_.FEEDBACK_NONE
class FeedbackFlags_:
FEEDBACK_ENC_TYPE_BITS = 192
FEEDBACK_ENC_TYPE_DIFFERENTIAL = 128
FEEDBACK_ENC_TYPE_SINGLE_ENDED = 64
FEEDBACK_ENC_REVERSE = 1
FEEDBACK_ENC_TYPE_AUTO = 0
feedback_settings.FeedbackFlags = FeedbackFlags_.FEEDBACK_ENC_TYPE_SINGLE_ENDED | FeedbackFlags_.FEEDBACK_ENC_TYPE_AUTO
feedback_settings.CountsPerTurn = 4000
result = lib.set_feedback_settings(id, byref(feedback_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
home_settings = home_settings_t()
home_settings.FastHome = 50
home_settings.uFastHome = 0
home_settings.SlowHome = 500
home_settings.uSlowHome = 0
home_settings.HomeDelta = 1020
home_settings.uHomeDelta = 0
class HomeFlags_:
HOME_USE_FAST = 256
HOME_STOP_SECOND_BITS = 192
HOME_STOP_SECOND_LIM = 192
HOME_STOP_SECOND_SYN = 128
HOME_STOP_SECOND_REV = 64
HOME_STOP_FIRST_BITS = 48
HOME_STOP_FIRST_LIM = 48
HOME_STOP_FIRST_SYN = 32
HOME_STOP_FIRST_REV = 16
HOME_HALF_MV = 8
HOME_MV_SEC_EN = 4
HOME_DIR_SECOND = 2
HOME_DIR_FIRST = 1
home_settings.HomeFlags = HomeFlags_.HOME_USE_FAST | HomeFlags_.HOME_STOP_SECOND_REV | HomeFlags_.HOME_STOP_FIRST_BITS | HomeFlags_.HOME_DIR_SECOND
result = lib.set_home_settings(id, byref(home_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
move_settings = move_settings_t()
move_settings.Speed = 800
move_settings.uSpeed = 0
move_settings.Accel = 3200
move_settings.Decel = 4800
move_settings.AntiplaySpeed = 800
move_settings.uAntiplaySpeed = 0
class MoveFlags_:
RPM_DIV_1000 = 1
result = lib.set_move_settings(id, byref(move_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
engine_settings = engine_settings_t()
engine_settings.NomVoltage = 50
engine_settings.NomCurrent = 200
engine_settings.NomSpeed = 1600
engine_settings.uNomSpeed = 0
class EngineFlags_:
ENGINE_LIMIT_RPM = 128
ENGINE_LIMIT_CURR = 64
ENGINE_LIMIT_VOLT = 32
ENGINE_ACCEL_ON = 16
ENGINE_ANTIPLAY = 8
ENGINE_MAX_SPEED = 4
ENGINE_CURRENT_AS_RMS = 2
ENGINE_REVERSE = 1
engine_settings.EngineFlags = EngineFlags_.ENGINE_LIMIT_RPM | EngineFlags_.ENGINE_ACCEL_ON | EngineFlags_.ENGINE_REVERSE
engine_settings.Antiplay = 326
class MicrostepMode_:
MICROSTEP_MODE_FRAC_256 = 9
MICROSTEP_MODE_FRAC_128 = 8
MICROSTEP_MODE_FRAC_64 = 7
MICROSTEP_MODE_FRAC_32 = 6
MICROSTEP_MODE_FRAC_16 = 5
MICROSTEP_MODE_FRAC_8 = 4
MICROSTEP_MODE_FRAC_4 = 3
MICROSTEP_MODE_FRAC_2 = 2
MICROSTEP_MODE_FULL = 1
engine_settings.MicrostepMode = MicrostepMode_.MICROSTEP_MODE_FRAC_256
engine_settings.StepsPerRev = 20
result = lib.set_engine_settings(id, byref(engine_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
entype_settings = entype_settings_t()
class EngineType_:
ENGINE_TYPE_BRUSHLESS = 5
ENGINE_TYPE_TEST = 4
ENGINE_TYPE_STEP = 3
ENGINE_TYPE_2DC = 2
ENGINE_TYPE_DC = 1
ENGINE_TYPE_NONE = 0
entype_settings.EngineType = EngineType_.ENGINE_TYPE_STEP | EngineType_.ENGINE_TYPE_NONE
class DriverType_:
DRIVER_TYPE_EXTERNAL = 3
DRIVER_TYPE_INTEGRATE = 2
DRIVER_TYPE_DISCRETE_FET = 1
entype_settings.DriverType = DriverType_.DRIVER_TYPE_INTEGRATE
result = lib.set_entype_settings(id, byref(entype_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
power_settings = power_settings_t()
power_settings.HoldCurrent = 50
power_settings.CurrReductDelay = 1000
power_settings.PowerOffDelay = 60
power_settings.CurrentSetTime = 300
class PowerFlags_:
POWER_SMOOTH_CURRENT = 4
POWER_OFF_ENABLED = 2
POWER_REDUCT_ENABLED = 1
power_settings.PowerFlags = PowerFlags_.POWER_SMOOTH_CURRENT | PowerFlags_.POWER_REDUCT_ENABLED
result = lib.set_power_settings(id, byref(power_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
secure_settings = secure_settings_t()
secure_settings.LowUpwrOff = 800
secure_settings.CriticalIpwr = 4000
secure_settings.CriticalUpwr = 5500
secure_settings.CriticalT = 800
secure_settings.CriticalIusb = 450
secure_settings.CriticalUusb = 520
secure_settings.MinimumUusb = 420
class Flags_:
ALARM_ENGINE_RESPONSE = 128
ALARM_WINDING_MISMATCH = 64
USB_BREAK_RECONNECT = 32
ALARM_FLAGS_STICKING = 16
ALARM_ON_BORDERS_SWAP_MISSET = 8
H_BRIDGE_ALERT = 4
LOW_UPWR_PROTECTION = 2
ALARM_ON_DRIVER_OVERHEATING = 1
secure_settings.Flags = Flags_.ALARM_ENGINE_RESPONSE | Flags_.ALARM_FLAGS_STICKING | Flags_.ALARM_ON_BORDERS_SWAP_MISSET | Flags_.H_BRIDGE_ALERT | Flags_.ALARM_ON_DRIVER_OVERHEATING
result = lib.set_secure_settings(id, byref(secure_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
edges_settings = edges_settings_t()
class BorderFlags_:
BORDERS_SWAP_MISSET_DETECTION = 8
BORDER_STOP_RIGHT = 4
BORDER_STOP_LEFT = 2
BORDER_IS_ENCODER = 1
edges_settings.BorderFlags = BorderFlags_.BORDER_STOP_RIGHT | BorderFlags_.BORDER_STOP_LEFT
class EnderFlags_:
ENDER_SW2_ACTIVE_LOW = 4
ENDER_SW1_ACTIVE_LOW = 2
ENDER_SWAP = 1
edges_settings.EnderFlags = EnderFlags_.ENDER_SW2_ACTIVE_LOW | EnderFlags_.ENDER_SW1_ACTIVE_LOW | EnderFlags_.ENDER_SWAP
edges_settings.LeftBorder = -971
edges_settings.uLeftBorder = 0
edges_settings.RightBorder = 910
edges_settings.uRightBorder = 0
result = lib.set_edges_settings(id, byref(edges_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
pid_settings = pid_settings_t()
pid_settings.KpU = 0
pid_settings.KiU = 0
pid_settings.KdU = 0
pid_settings.Kpf = 0.003599999938160181
pid_settings.Kif = 0.03799999877810478
pid_settings.Kdf = 2.8000000384054147e-05
result = lib.set_pid_settings(id, byref(pid_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
sync_in_settings = sync_in_settings_t()
class SyncInFlags_:
SYNCIN_GOTOPOSITION = 4
SYNCIN_INVERT = 2
SYNCIN_ENABLED = 1
sync_in_settings.ClutterTime = 4
sync_in_settings.Position = 0
sync_in_settings.uPosition = 0
sync_in_settings.Speed = 0
sync_in_settings.uSpeed = 0
result = lib.set_sync_in_settings(id, byref(sync_in_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
sync_out_settings = sync_out_settings_t()
class SyncOutFlags_:
SYNCOUT_ONPERIOD = 64
SYNCOUT_ONSTOP = 32
SYNCOUT_ONSTART = 16
SYNCOUT_IN_STEPS = 8
SYNCOUT_INVERT = 4
SYNCOUT_STATE = 2
SYNCOUT_ENABLED = 1
sync_out_settings.SyncOutFlags = SyncOutFlags_.SYNCOUT_ONSTOP | SyncOutFlags_.SYNCOUT_ONSTART
sync_out_settings.SyncOutPulseSteps = 100
sync_out_settings.SyncOutPeriod = 2000
sync_out_settings.Accuracy = 0
sync_out_settings.uAccuracy = 0
result = lib.set_sync_out_settings(id, byref(sync_out_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
extio_settings = extio_settings_t()
class EXTIOSetupFlags_:
EXTIO_SETUP_INVERT = 2
EXTIO_SETUP_OUTPUT = 1
extio_settings.EXTIOSetupFlags = EXTIOSetupFlags_.EXTIO_SETUP_OUTPUT
class EXTIOModeFlags_:
EXTIO_SETUP_MODE_OUT_BITS = 240
EXTIO_SETUP_MODE_OUT_MOTOR_ON = 64
EXTIO_SETUP_MODE_OUT_ALARM = 48
EXTIO_SETUP_MODE_OUT_MOVING = 32
EXTIO_SETUP_MODE_OUT_ON = 16
EXTIO_SETUP_MODE_IN_BITS = 15
EXTIO_SETUP_MODE_IN_ALARM = 5
EXTIO_SETUP_MODE_IN_HOME = 4
EXTIO_SETUP_MODE_IN_MOVR = 3
EXTIO_SETUP_MODE_IN_PWOF = 2
EXTIO_SETUP_MODE_IN_STOP = 1
EXTIO_SETUP_MODE_IN_NOP = 0
EXTIO_SETUP_MODE_OUT_OFF = 0
extio_settings.EXTIOModeFlags = EXTIOModeFlags_.EXTIO_SETUP_MODE_IN_STOP | EXTIOModeFlags_.EXTIO_SETUP_MODE_IN_NOP | EXTIOModeFlags_.EXTIO_SETUP_MODE_OUT_OFF
result = lib.set_extio_settings(id, byref(extio_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
brake_settings = brake_settings_t()
brake_settings.t1 = 300
brake_settings.t2 = 500
brake_settings.t3 = 300
brake_settings.t4 = 400
class BrakeFlags_:
BRAKE_ENG_PWROFF = 2
BRAKE_ENABLED = 1
brake_settings.BrakeFlags = BrakeFlags_.BRAKE_ENG_PWROFF
result = lib.set_brake_settings(id, byref(brake_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
control_settings = control_settings_t()
control_settings.MaxSpeed[0] = 80
control_settings.MaxSpeed[1] = 800
control_settings.MaxSpeed[2] = 0
control_settings.MaxSpeed[3] = 0
control_settings.MaxSpeed[4] = 0
control_settings.MaxSpeed[5] = 0
control_settings.MaxSpeed[6] = 0
control_settings.MaxSpeed[7] = 0
control_settings.MaxSpeed[8] = 0
control_settings.MaxSpeed[9] = 0
control_settings.uMaxSpeed[0] = 0
control_settings.uMaxSpeed[1] = 0
control_settings.uMaxSpeed[2] = 0
control_settings.uMaxSpeed[3] = 0
control_settings.uMaxSpeed[4] = 0
control_settings.uMaxSpeed[5] = 0
control_settings.uMaxSpeed[6] = 0
control_settings.uMaxSpeed[7] = 0
control_settings.uMaxSpeed[8] = 0
control_settings.uMaxSpeed[9] = 0
control_settings.Timeout[0] = 1000
control_settings.Timeout[1] = 1000
control_settings.Timeout[2] = 1000
control_settings.Timeout[3] = 1000
control_settings.Timeout[4] = 1000
control_settings.Timeout[5] = 1000
control_settings.Timeout[6] = 1000
control_settings.Timeout[7] = 1000
control_settings.Timeout[8] = 1000
control_settings.MaxClickTime = 300
class Flags_:
CONTROL_BTN_RIGHT_PUSHED_OPEN = 8
CONTROL_BTN_LEFT_PUSHED_OPEN = 4
CONTROL_MODE_BITS = 3
CONTROL_MODE_LR = 2
CONTROL_MODE_JOY = 1
CONTROL_MODE_OFF = 0
control_settings.Flags = Flags_.CONTROL_MODE_LR | Flags_.CONTROL_MODE_OFF
control_settings.DeltaPosition = 1
control_settings.uDeltaPosition = 0
result = lib.set_control_settings(id, byref(control_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
joystick_settings = joystick_settings_t()
joystick_settings.JoyLowEnd = 0
joystick_settings.JoyCenter = 5000
joystick_settings.JoyHighEnd = 10000
joystick_settings.ExpFactor = 100
joystick_settings.DeadZone = 50
class JoyFlags_:
JOY_REVERSE = 1
result = lib.set_joystick_settings(id, byref(joystick_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
ctp_settings = ctp_settings_t()
ctp_settings.CTPMinError = 3
class CTPFlags_:
CTP_ERROR_CORRECTION = 16
REV_SENS_INV = 8
CTP_ALARM_ON_ERROR = 4
CTP_BASE = 2
CTP_ENABLED = 1
ctp_settings.CTPFlags = CTPFlags_.CTP_ERROR_CORRECTION
result = lib.set_ctp_settings(id, byref(ctp_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
uart_settings = uart_settings_t()
uart_settings.Speed = 115200
class UARTSetupFlags_:
UART_STOP_BIT = 8
UART_PARITY_BIT_USE = 4
UART_PARITY_BITS = 3
UART_PARITY_BIT_MARK = 3
UART_PARITY_BIT_SPACE = 2
UART_PARITY_BIT_ODD = 1
UART_PARITY_BIT_EVEN = 0
uart_settings.UARTSetupFlags = UARTSetupFlags_.UART_PARITY_BIT_EVEN
result = lib.set_uart_settings(id, byref(uart_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
controller_name = controller_name_t()
controller_name.ControllerName = bytes([0, 113, 252, 118, 36, 0, 72, 0, 3, 0, 0, 0, 104, 101, 103, 0])
class CtrlFlags_:
EEPROM_PRECEDENCE = 1
result = lib.set_controller_name(id, byref(controller_name))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
emf_settings = emf_settings_t()
emf_settings.L = 0
emf_settings.R = 0
emf_settings.Km = 0
class BackEMFFlags_:
BACK_EMF_KM_AUTO = 4
BACK_EMF_RESISTANCE_AUTO = 2
BACK_EMF_INDUCTANCE_AUTO = 1
result = lib.set_emf_settings(id, byref(emf_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
engine_advansed_setup = engine_advansed_setup_t()
engine_advansed_setup.stepcloseloop_Kw = 50
engine_advansed_setup.stepcloseloop_Kp_low = 1000
engine_advansed_setup.stepcloseloop_Kp_high = 33
result = lib.set_engine_advansed_setup(id, byref(engine_advansed_setup))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
extended_settings = extended_settings_t()
extended_settings.Param1 = 0
result = lib.set_extended_settings(id, byref(extended_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
stage_name = stage_name_t()
stage_name.PositionerName = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_stage_name(id, byref(stage_name))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
stage_information = stage_information_t()
stage_information.Manufacturer = bytes([83, 116, 97, 110, 100, 97, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
stage_information.PartNumber = bytes([56, 77, 73, 68, 49, 50, 45, 49, 45, 72, 0, 0, 95, 49, 53, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_stage_information(id, byref(stage_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
stage_settings = stage_settings_t()
stage_settings.LeadScrewPitch = 0.25
stage_settings.Units = bytes([109, 109, 0, 114, 101, 101, 0, 0])
stage_settings.MaxSpeed = 0
stage_settings.TravelRange = 11
stage_settings.SupplyVoltageMin = 5
stage_settings.SupplyVoltageMax = 12
stage_settings.MaxCurrentConsumption = 0
stage_settings.HorizontalLoadCapacity = 0
stage_settings.VerticalLoadCapacity = 0
result = lib.set_stage_settings(id, byref(stage_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
motor_information = motor_information_t()
motor_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
motor_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_motor_information(id, byref(motor_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
motor_settings = motor_settings_t()
class MotorType_:
MOTOR_TYPE_BLDC = 3
MOTOR_TYPE_DC = 2
MOTOR_TYPE_STEP = 1
MOTOR_TYPE_UNKNOWN = 0
motor_settings.MotorType = MotorType_.MOTOR_TYPE_STEP | MotorType_.MOTOR_TYPE_UNKNOWN
motor_settings.ReservedField = 0
motor_settings.Poles = 0
motor_settings.Phases = 0
motor_settings.NominalVoltage = 0
motor_settings.NominalCurrent = 0
motor_settings.NominalSpeed = 0
motor_settings.NominalTorque = 0
motor_settings.NominalPower = 0
motor_settings.WindingResistance = 0
motor_settings.WindingInductance = 0
motor_settings.RotorInertia = 0
motor_settings.StallTorque = 0
motor_settings.DetentTorque = 0
motor_settings.TorqueConstant = 0
motor_settings.SpeedConstant = 0
motor_settings.SpeedTorqueGradient = 0
motor_settings.MechanicalTimeConstant = 0
motor_settings.MaxSpeed = 1600
motor_settings.MaxCurrent = 0
motor_settings.MaxCurrentTime = 0
motor_settings.NoLoadCurrent = 0
motor_settings.NoLoadSpeed = 0
result = lib.set_motor_settings(id, byref(motor_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
encoder_information = encoder_information_t()
encoder_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
encoder_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_encoder_information(id, byref(encoder_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
encoder_settings = encoder_settings_t()
encoder_settings.MaxOperatingFrequency = 0
encoder_settings.SupplyVoltageMin = 0
encoder_settings.SupplyVoltageMax = 0
encoder_settings.MaxCurrentConsumption = 0
encoder_settings.PPR = 1000
class EncoderSettings_:
ENCSET_REVOLUTIONSENSOR_ACTIVE_HIGH = 256
ENCSET_REVOLUTIONSENSOR_PRESENT = 64
ENCSET_INDEXCHANNEL_PRESENT = 16
ENCSET_PUSHPULL_OUTPUT = 4
ENCSET_DIFFERENTIAL_OUTPUT = 1
result = lib.set_encoder_settings(id, byref(encoder_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
hallsensor_information = hallsensor_information_t()
hallsensor_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
hallsensor_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_hallsensor_information(id, byref(hallsensor_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
hallsensor_settings = hallsensor_settings_t()
hallsensor_settings.MaxOperatingFrequency = 0
hallsensor_settings.SupplyVoltageMin = 0
hallsensor_settings.SupplyVoltageMax = 0
hallsensor_settings.MaxCurrentConsumption = 0
hallsensor_settings.PPR = 0
result = lib.set_hallsensor_settings(id, byref(hallsensor_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
gear_information = gear_information_t()
gear_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
gear_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_gear_information(id, byref(gear_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
gear_settings = gear_settings_t()
gear_settings.ReductionIn = 9
gear_settings.ReductionOut = 4
gear_settings.RatedInputTorque = 0
gear_settings.RatedInputSpeed = 0
gear_settings.MaxOutputBacklash = 0
gear_settings.InputInertia = 0
gear_settings.Efficiency = 0
result = lib.set_gear_settings(id, byref(gear_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
accessories_settings = accessories_settings_t()
accessories_settings.MagneticBrakeInfo = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
accessories_settings.MBRatedVoltage = 0
accessories_settings.MBRatedCurrent = 0
accessories_settings.MBTorque = 0
class MBSettings_:
MB_POWERED_HOLD = 2
MB_AVAILABLE = 1
accessories_settings.TemperatureSensorInfo = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
accessories_settings.TSMin = 0
accessories_settings.TSMax = 0
accessories_settings.TSGrad = 0
class TSSettings_:
TS_AVAILABLE = 8
TS_TYPE_BITS = 7
TS_TYPE_SEMICONDUCTOR = 2
TS_TYPE_THERMOCOUPLE = 1
TS_TYPE_UNKNOWN = 0
accessories_settings.TSSettings = TSSettings_.TS_TYPE_THERMOCOUPLE | TSSettings_.TS_TYPE_UNKNOWN
class LimitSwitchesSettings_:
LS_SHORTED = 16
LS_SW2_ACTIVE_LOW = 8
LS_SW1_ACTIVE_LOW = 4
LS_ON_SW2_AVAILABLE = 2
LS_ON_SW1_AVAILABLE = 1
result = lib.set_accessories_settings(id, byref(accessories_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
return worst_result
| 35.974684
| 185
| 0.693658
|
4a095e43baea72d6a9009c918c67aa28fd4ab16f
| 4,187
|
py
|
Python
|
mbeddr2C_MM/transformation_from_mps/Hlayer1rule10.py
|
levilucio/SyVOLT
|
7526ec794d21565e3efcc925a7b08ae8db27d46a
|
[
"MIT"
] | 3
|
2017-06-02T19:26:27.000Z
|
2021-06-14T04:25:45.000Z
|
mbeddr2C_MM/transformation_from_mps/Hlayer1rule10.py
|
levilucio/SyVOLT
|
7526ec794d21565e3efcc925a7b08ae8db27d46a
|
[
"MIT"
] | 8
|
2016-08-24T07:04:07.000Z
|
2017-05-26T16:22:47.000Z
|
mbeddr2C_MM/transformation_from_mps/Hlayer1rule10.py
|
levilucio/SyVOLT
|
7526ec794d21565e3efcc925a7b08ae8db27d46a
|
[
"MIT"
] | 1
|
2019-10-31T06:00:23.000Z
|
2019-10-31T06:00:23.000Z
|
from core.himesis import Himesis
import uuid
class Hlayer1rule10(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the DSLTrans rule layer1rule10.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(Hlayer1rule10, self).__init__(name='Hlayer1rule10', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """layer1rule10"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'layer1rule10')
# match model. We only support one match model
self.add_node()
self.vs[0]["mm__"] = """MatchModel"""
# apply model node
self.add_node()
self.vs[1]["mm__"] = """ApplyModel"""
# paired with relation between match and apply models
self.add_node()
self.vs[2]["mm__"] = """paired_with"""
self.vs[2]["attr1"] = """layer1rule10"""
# match class AtomicComponent(layer1rule10class0AtomicComponent) node
self.add_node()
self.vs[3]["mm__"] = """AtomicComponent"""
self.vs[3]["attr1"] = """+"""
# match class RequiredPort(layer1rule10class1RequiredPort) node
self.add_node()
self.vs[4]["mm__"] = """RequiredPort"""
self.vs[4]["attr1"] = """+"""
# apply class StructDeclaration(layer1rule10class2StructDeclaration) node
self.add_node()
self.vs[5]["mm__"] = """StructDeclaration"""
self.vs[5]["attr1"] = """1"""
# apply class Member(layer1rule10class3Member) node
self.add_node()
self.vs[6]["mm__"] = """Member"""
self.vs[6]["attr1"] = """1"""
# apply class Member(layer1rule10class4Member) node
self.add_node()
self.vs[7]["mm__"] = """Member"""
self.vs[7]["attr1"] = """1"""
# match association AtomicComponent--contents-->RequiredPort node
self.add_node()
self.vs[8]["attr1"] = """contents"""
self.vs[8]["mm__"] = """directLink_S"""
# apply association StructDeclaration--members-->Member node
self.add_node()
self.vs[9]["attr1"] = """members"""
self.vs[9]["mm__"] = """directLink_T"""
# apply association StructDeclaration--members-->Member node
self.add_node()
self.vs[10]["attr1"] = """members"""
self.vs[10]["mm__"] = """directLink_T"""
# backward association StructDeclaration-->AtomicComponentnode
self.add_node()
self.vs[11]["mm__"] = """backward_link"""
# backward association Member-->RequiredPortnode
self.add_node()
self.vs[12]["mm__"] = """backward_link"""
# backward association Member-->RequiredPortnode
self.add_node()
self.vs[13]["mm__"] = """backward_link"""
# Add the edges
self.add_edges([
(0,3), # matchmodel -> match_class AtomicComponent(layer1rule10class0AtomicComponent)
(0,4), # matchmodel -> match_class RequiredPort(layer1rule10class1RequiredPort)
(1,5), # applymodel -> apply_classStructDeclaration(layer1rule10class2StructDeclaration)
(1,6), # applymodel -> apply_classMember(layer1rule10class3Member)
(1,7), # applymodel -> apply_classMember(layer1rule10class4Member)
(3,8), # match classAtomicComponent(layer1rule10class0AtomicComponent) -> association contents
(8,4), # associationcontents -> match_classAtomicComponent(layer1rule10class1RequiredPort)
(5,9), # apply class StructDeclaration(layer1rule10class2StructDeclaration) -> association members
(9,6), # associationmembers -> apply_classMember(layer1rule10class3Member)
(5,10), # apply class StructDeclaration(layer1rule10class2StructDeclaration) -> association members
(10,7), # associationmembers -> apply_classMember(layer1rule10class4Member)
(5,11), # apply class StructDeclaration(layer1rule10class0AtomicComponent) -> backward_association
(11,3), # backward_associationAtomicComponent -> match_class AtomicComponent(layer1rule10class0AtomicComponent)
(6,12), # apply class Member(layer1rule10class1RequiredPort) -> backward_association
(12,4), # backward_associationRequiredPort -> match_class RequiredPort(layer1rule10class1RequiredPort)
(7,13), # apply class Member(layer1rule10class1RequiredPort) -> backward_association
(13,4), # backward_associationRequiredPort -> match_class RequiredPort(layer1rule10class1RequiredPort)
(0,2), # matchmodel -> pairedwith
(2,1) # pairedwith -> applyModel
])
self["equations"] = []
| 38.768519
| 114
| 0.712443
|
4a095f3d051425aabe415aeaea3efa618df07ba3
| 8,495
|
py
|
Python
|
peripteras/users/api/views.py
|
tsaklidis/e-peripteras
|
39634ca07de535c6a1188af636e394a8966672ca
|
[
"MIT"
] | null | null | null |
peripteras/users/api/views.py
|
tsaklidis/e-peripteras
|
39634ca07de535c6a1188af636e394a8966672ca
|
[
"MIT"
] | null | null | null |
peripteras/users/api/views.py
|
tsaklidis/e-peripteras
|
39634ca07de535c6a1188af636e394a8966672ca
|
[
"MIT"
] | 1
|
2020-12-27T05:16:54.000Z
|
2020-12-27T05:16:54.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
from collections import Counter
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.http import Http404
from django.shortcuts import get_object_or_404
from django.db.models import Q, FieldDoesNotExist
from django.utils.translation import ugettext_lazy as _
from rest_framework.authentication import SessionAuthentication
from rest_framework.decorators import detail_route
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import generics, status, mixins, viewsets
from peripteras.kiosks.models import Kiosk, Item
from peripteras.public.api.serializers import KioskSerializer, ItemkSerializer
from peripteras.users.api.serializers import BasketSerializer
from peripteras.common.mixins import FilterMixin
class AddToBasket(FilterMixin, generics.CreateAPIView):
"""
Endpoint for adding items to basket
http://localhost:8001/user/api/basket/add/
"""
# authentication_classes = (SessionAuthentication, )
# permission_classes = (IsAuthenticated, )
# queryset = Item.objects.all()
serializer_class = ItemkSerializer
def get(self, request):
item_id = request.GET.get("item_id", None)
kiosk_id = request.GET.get("kiosk_id", None)
# basket = request.session.get('basket', None)
kiosk_ids = []
orders_holder = request.session.get('orders_holder', None)
if item_id and kiosk_id:
if orders_holder:
# users has at least one order
for order in orders_holder:
kiosk_ids.append(order['kiosk'])
if kiosk_id in kiosk_ids:
for order in orders_holder:
if order['kiosk'] == kiosk_id:
# 'this kiosk has items, add item to item list'
order['items'].append(item_id)
orders_holder_tmp = request.session.get(
'orders_holder', None)
orders_holder_tmp.append(order)
request.session[
'orders_holder'] = orders_holder_tmp
# remove this order from session
orders_holder.remove(order)
data = {
'msg': 'Προστέθηκε στο καλάθι'
}
return Response(data, status=status.HTTP_200_OK)
else:
# create new order for new kiosk
items_list = [item_id]
order = {
'kiosk': kiosk_id,
'items': items_list
}
tmp_orders_holder = orders_holder
tmp_orders_holder.append(order)
request.session['orders_holder'] = tmp_orders_holder
data = {
'msg': 'Νέα παραγελία. Μπήκε στο καλάθι'
}
return Response(data, status=status.HTTP_200_OK)
else:
# init the orders sesion holder
request.session['orders_holder'] = []
# create an order dict
items_list = [item_id]
order = {
'kiosk': kiosk_id,
'items': items_list
}
tmp_orders_holder = request.session.get('orders_holder', None)
tmp_orders_holder.append(order)
request.session['orders_holder'] = tmp_orders_holder
data = {
'msg': 'Μπήκε το πρώτο αντικείμενο στο καλάθι'
}
return Response(data, status=status.HTTP_200_OK)
else:
data = {
'msg': 'error no kiosk item id'
}
return Response(data, status=status.HTTP_406_NOT_ACCEPTABLE)
# order = {
# 'kiosk':kiosk_id,
# 'items':basket
# }
# # order_init = ['order1','order2']
# # request.session['orders'] = order_init
# tmp_orders = request.session.get('orders', None)
# tmp_orders.append(order)
# request.session['orders'] = tmp_orders
# item_to_add = Item.objects.get(id=item_id)
# if basket:
# basket = request.session['basket']
# basket.append(item_to_add.id)
# request.session['basket'] = basket
# else:
# basket = []
# basket.append(item_to_add.id)
# request.session['basket'] = basket
data = {
'msg': 'whaat'
}
return Response(data, status=status.HTTP_200_OK)
class GetFromBasket(FilterMixin, generics.CreateAPIView):
"""
Endpoint for fetching the basket
http://localhost:8001/user/api/basket/get/
"""
# authentication_classes = (SessionAuthentication, )
# permission_classes = (IsAuthenticated, )
# queryset = Item.objects.all()
serializer_class = BasketSerializer
def get(self, request):
kiosk_id = request.GET.get("kiosk_id", None)
orders_holder = request.session.get('orders_holder', None)
basket_items_ids = None
if orders_holder:
for order in orders_holder:
if order['kiosk'] == kiosk_id:
basket_items_ids = order['items']
# else:
# data = {
# 'msg':'Άδειο καλάθι'
# }
# return Response(data, status=status.HTTP_200_OK)
# basket_items_ids = request.session.get('basket', None)
kiosk = Kiosk.objects.get(id=kiosk_id)
basket_items = []
ziped_data = []
total_price = 0
if basket_items_ids:
for it_id in basket_items_ids:
tmp_item = Item.objects.get(id=it_id)
total_price += tmp_item.price
basket_items.append(tmp_item)
fee = Item()
fee.price = kiosk.delivery_fee
fee.title = _(u'Έξοδα μεταφοράς')
fee.id = False
basket_items.append(fee)
unique_items = Counter(basket_items)
ziped_data = zip(unique_items.keys(), unique_items.values())
ziped_data = self.apply_filters(request, ziped_data)
serializer = self.serializer_class(ziped_data, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class RemoveFromBasket(FilterMixin, generics.CreateAPIView):
"""
Endpoint for removing items from basket
http://localhost:8001/user/api/basket/remove/
"""
# authentication_classes = (SessionAuthentication, )
# permission_classes = (IsAuthenticated, )
# queryset = Item.objects.all()
serializer_class = ItemkSerializer
def get(self, request):
item_id = request.GET.get("item_id", None)
kiosk_id = request.GET.get("kiosk_id", None)
orders_holder = request.session.get('orders_holder', None)
basket = request.session.get('basket', None)
# if item_id:
# item_id = int(item_id)
# Here we get unicode, convert it to int to find inbasket type = (list)
if orders_holder:
for order in orders_holder:
if order['kiosk'] == kiosk_id:
if item_id not in order['items']:
data = {
'msg': 'Δεν ηταν στο καλάθι'
}
return Response(data, status=status.HTTP_200_OK)
order['items'].remove(item_id)
orders_holder_tmp = request.session.get(
'orders_holder', None)
orders_holder_tmp.append(order)
request.session['orders_holder'] = orders_holder_tmp
# remove this order from session
orders_holder.remove(order)
data = {
'msg': 'Aφαιρέθηκε από το καλάθι'
}
return Response(data, status=status.HTTP_200_OK)
else:
data = {
'msg': 'Άδειο καλάθι'
}
return Response(data, status=status.HTTP_200_OK)
| 33.577075
| 79
| 0.559152
|
4a095f643cb2dac4318731470902b34dddffc324
| 3,850
|
py
|
Python
|
examples/diffuse_pyramid.py
|
almarklein/pirt
|
b43a57aad89ad2638a65e58079153567a49a43f2
|
[
"BSD-3-Clause"
] | 10
|
2020-05-14T12:48:23.000Z
|
2022-03-24T15:03:00.000Z
|
examples/diffuse_pyramid.py
|
almarklein/pirt
|
b43a57aad89ad2638a65e58079153567a49a43f2
|
[
"BSD-3-Clause"
] | 1
|
2021-08-05T13:24:48.000Z
|
2021-08-09T13:01:21.000Z
|
examples/diffuse_pyramid.py
|
almarklein/pirt
|
b43a57aad89ad2638a65e58079153567a49a43f2
|
[
"BSD-3-Clause"
] | null | null | null |
""" Demonstrates the scale space pyramid with Gaussian diffusion.
"""
import numpy as np
from pirt import ScaleSpacePyramid
from pirt import Aarray
class Demo2D:
def __init__(self, im):
# Make Aarray
im = Aarray(im)
# Create pyramid
self._p = ScaleSpacePyramid(im)
maxLevel, maxSigma = self._p.calculate()
# Init visualization
vv.figure(1); vv.clf()
self._axes = axes = vv.gca()
axes.position.Correct(dy=40, dh=-40)
# Make slider
self._slider = vv.Slider(axes)
self._slider.position = 0,-40, 1, 20
self._slider.fullRange = 0, maxLevel
self._slider.value = 0
# Show image
self._t = vv.imshow(self._p.get_level(0))
# Bind to handler
self._slider.eventSliding.Bind(self.on_sliding)
self._slider.eventSliderChanged.Bind(self.on_sliding)
def on_sliding(self, event):
# Get level
level = self._slider.value
# Get image
im = self._p.get_level(level)
# Replace
self._t.SetData(im)
class Demo2D3:
def __init__(self, im, min_scale=None, scale_offset=0):
# Make Aarray
if True:# not isinstance(im, Aarray):
im = Aarray(im)
# Create pyramids
self._p1 = ScaleSpacePyramid(im, min_scale, scale_offset, level_factor=1.5)
self._p2 = ScaleSpacePyramid(im, min_scale, scale_offset, level_factor=2)
self._p3 = ScaleSpacePyramid(im, min_scale, scale_offset, level_factor=3)
#maxLevel, maxSigma = self._p1.calculate()
#self._p2.calculate()
#self._p3.calculate()
# Init visualization
fig = vv.figure(1); vv.clf()
self._axes1 = axes1 = vv.subplot(131); vv.title('level factor 1.5')
self._axes2 = axes2 = vv.subplot(132); vv.title('level factor 2.0')
self._axes3 = axes3 = vv.subplot(133); vv.title('level factor 3.0')
axes1.position.Correct(dy=40, dh=-40)
axes2.position.Correct(dy=40, dh=-40)
axes3.position.Correct(dy=40, dh=-40)
# Share camera
cam = vv.cameras.TwoDCamera()
for ax in [axes1, axes2, axes3]:
ax.camera = cam
# Make slider
self._slider = vv.Slider(fig)
self._slider.position = 0.1, 5, 0.8, 20
self._slider.fullRange = 0, 25
self._slider.value = 1
# Show image
self._t1 = vv.imshow(self._p1.get_level(0), axes=axes1)
self._t2 = vv.imshow(self._p2.get_level(0), axes=axes2)
self._t3 = vv.imshow(self._p3.get_level(0), axes=axes3)
# Bind to handler
self._slider.eventSliding.Bind(self.on_sliding)
self._slider.eventSliderChanged.Bind(self.on_sliding)
def on_sliding(self, event):
# Get level
sigma = self._slider.value
# Get images
im1 = self._p1.get_scale(sigma)
im2 = self._p2.get_scale(sigma)
im3 = self._p3.get_scale(sigma)
# Replace textures
self._t1.SetData(im1)
self._t2.SetData(im2)
self._t3.SetData(im3)
if __name__ == '__main__':
import visvis as vv
# Read image
im = vv.imread('astronaut.png')[:,:,1].astype(np.float32)
im = Aarray(im)[::2,:]
d = Demo2D3(im, 1.5)
vv.use().Run()
if 0:
## Diffusionkernel vs Gaussiankernel
import pirt
import visvis as vv
sigma = 300
k1, t1 = pirt.diffusionkernel(sigma, returnt=True)
k2, t2 = pirt.gaussiankernel(sigma, returnt=True)
vv.figure()
vv.plot(t1, k1, lc='r', ls=':')
vv.plot(t2, k2, lc='b')
| 28.518519
| 83
| 0.562857
|
4a095f78c2a09879d364178aedb33daa68e081fd
| 2,218
|
py
|
Python
|
setup.py
|
nurmukhametov/ROPgadget
|
15ffb1b08d64991ecdd4131e07e8a3118930c629
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
nurmukhametov/ROPgadget
|
15ffb1b08d64991ecdd4131e07e8a3118930c629
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
nurmukhametov/ROPgadget
|
15ffb1b08d64991ecdd4131e07e8a3118930c629
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
from setuptools import setup
import os
package_name = "ROPGadget"
package_dir = "ropgadget"
package_description = """
This tool lets you search your gadgets on your binaries to facilitate your ROP exploitation.
ROPgadget supports ELF, PE and Mach-O format on x86, x64, ARM, ARM64, PowerPC, SPARC and MIPS architectures.
http://www.shell-storm.org/project/ROPgadget/
""".strip()
def fullsplit(path, result=None):
"""
Split a pathname into components (the opposite of os.path.join) in a
platform-neutral way.
"""
if result is None:
result = []
head, tail = os.path.split(path)
if head == '':
return [tail] + result
if head == path:
return result
return fullsplit(head, [tail] + result)
# Compile the list of packages available, because distutils doesn't have
# an easy way to do this.
packages, data_files = [], []
root_dir = os.path.dirname(__file__)
if root_dir != '':
os.chdir(root_dir)
for dirpath, dirnames, filenames in os.walk(package_dir):
# Ignore dirnames that start with '.'
for i, dirname in enumerate(dirnames):
if dirname.startswith('.'): del dirnames[i]
if '__init__.py' in filenames:
packages.append('.'.join(fullsplit(dirpath)))
elif filenames:
data_files.append([dirpath, [os.path.join(dirpath, f) for f in filenames]])
version = "6.3"
setup(
name = package_name,
version = version,
description = package_description,
packages = packages,
license = "BSD",
author = "Jonathan Salwan",
author_email = "jonathan.salwan@gmail.com",
url = "https://github.com/JonathanSalwan/ROPgadget",
scripts = ['scripts/ROPgadget'],
install_requires = ['capstone'],
classifiers = [
'Topic :: Security',
'Environment :: Console',
'Operating System :: OS Independent',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Intended Audience :: Developers'
]
)
| 32.617647
| 109
| 0.608656
|
4a095fd190ccfeafb1daf94154a3cebc27c54b0c
| 2,466
|
py
|
Python
|
dassl/data/samplers.py
|
DMIRLAB-Group/Dassl.pytorch
|
79052448cc0b0622f14e9768dbd6e6c0598fe6d1
|
[
"MIT"
] | 1
|
2021-12-11T23:37:02.000Z
|
2021-12-11T23:37:02.000Z
|
dassl/data/samplers.py
|
DMIRLAB-Group/Dassl.pytorch
|
79052448cc0b0622f14e9768dbd6e6c0598fe6d1
|
[
"MIT"
] | null | null | null |
dassl/data/samplers.py
|
DMIRLAB-Group/Dassl.pytorch
|
79052448cc0b0622f14e9768dbd6e6c0598fe6d1
|
[
"MIT"
] | 1
|
2020-09-21T03:16:10.000Z
|
2020-09-21T03:16:10.000Z
|
import copy
import random
from collections import defaultdict
from torch.utils.data.sampler import Sampler, RandomSampler, SequentialSampler
class RandomDomainSampler(Sampler):
"""Randomly sample N domains each with K
images to form a minibatch.
"""
def __init__(self, data_source, batch_size, n_domain):
self.data_source = data_source
# Keep track of image indices for each domain
self.domain_dict = defaultdict(list)
for i, item in enumerate(data_source):
self.domain_dict[item.domain].append(i)
self.domains = list(self.domain_dict.keys())
# Make sure each domain has equal number of images
if n_domain is None or n_domain <= 0:
n_domain = len(self.domains)
assert batch_size % n_domain == 0
self.n_img_per_domain = batch_size // n_domain
self.batch_size = batch_size
self.n_domain = n_domain
# Estimate number of images that will be used
# within each epoch
tmp = []
for _, idxs in self.domain_dict.items():
tmp_nb = len(idxs) // self.n_img_per_domain
tmp.append(tmp_nb)
self.length = min(tmp) * batch_size
def __iter__(self):
domain_dict = copy.deepcopy(self.domain_dict)
final_idxs = []
stop_sampling = False
while not stop_sampling:
selected_domains = random.sample(self.domains, self.n_domain)
for domain in selected_domains:
idxs = domain_dict[domain]
selected_idxs = random.sample(idxs, self.n_img_per_domain)
final_idxs.extend(selected_idxs)
for idx in selected_idxs:
domain_dict[domain].remove(idx)
remaining = len(domain_dict[domain])
if remaining < self.n_img_per_domain:
stop_sampling = True
return iter(final_idxs)
def __len__(self):
return self.length
def build_sampler(
sampler_type, cfg=None, data_source=None, batch_size=32, n_domain=0
):
if sampler_type == 'RandomSampler':
return RandomSampler(data_source)
elif sampler_type == 'SequentialSampler':
return SequentialSampler(data_source)
elif sampler_type == 'RandomDomainSampler':
return RandomDomainSampler(data_source, batch_size, n_domain)
else:
raise ValueError('Unknown sampler type: {}'.format(sampler_type))
| 31.615385
| 78
| 0.642336
|
4a09606d303f53f5ae45b0c0c1005187bcf21764
| 17
|
py
|
Python
|
example_snippets/multimenus_snippets/Snippets/NumPy/Vectorized (universal) functions/Characterizing arrays/any Returns True if any of the elements of a evaluate to True.py
|
kuanpern/jupyterlab-snippets-multimenus
|
477f51cfdbad7409eab45abe53cf774cd70f380c
|
[
"BSD-3-Clause"
] | null | null | null |
example_snippets/multimenus_snippets/Snippets/NumPy/Vectorized (universal) functions/Characterizing arrays/any Returns True if any of the elements of a evaluate to True.py
|
kuanpern/jupyterlab-snippets-multimenus
|
477f51cfdbad7409eab45abe53cf774cd70f380c
|
[
"BSD-3-Clause"
] | null | null | null |
example_snippets/multimenus_snippets/Snippets/NumPy/Vectorized (universal) functions/Characterizing arrays/any Returns True if any of the elements of a evaluate to True.py
|
kuanpern/jupyterlab-snippets-multimenus
|
477f51cfdbad7409eab45abe53cf774cd70f380c
|
[
"BSD-3-Clause"
] | 1
|
2021-02-04T04:51:48.000Z
|
2021-02-04T04:51:48.000Z
|
np.any(a, axis=0)
| 17
| 17
| 0.647059
|
4a0960a50f2218a0e8249ba39f1011fd234930dd
| 9,782
|
py
|
Python
|
builder/make_cards.py
|
tomcontileslie/slid
|
5b8a06f482bff5c46e1dd92981695e5c5d743b79
|
[
"MIT"
] | 1
|
2020-11-02T23:35:26.000Z
|
2020-11-02T23:35:26.000Z
|
builder/make_cards.py
|
stanightline/slid
|
453beee93d48fec8edbcb172a35ebdef7d64e543
|
[
"MIT"
] | 3
|
2020-09-03T15:35:47.000Z
|
2020-09-13T10:55:07.000Z
|
builder/make_cards.py
|
stanightline/slid
|
453beee93d48fec8edbcb172a35ebdef7d64e543
|
[
"MIT"
] | 3
|
2020-09-05T21:02:27.000Z
|
2021-11-10T17:40:42.000Z
|
from bs4 import BeautifulSoup
import sys
import yaml
import os
SOURCE, OUTPUT = sys.argv[1], sys.argv[2]
with open(os.path.join(SOURCE, "_colours.yml"), "r") as file:
colours = yaml.load(file, Loader=yaml.FullLoader)
ESCDICT = {
"&" : "&",
"<" : "<",
">" : ">",
'"' : """,
"'" : "'",
"`" : "`",
"=" : "=",
"\n" : "<br>"}
ESCCHARS = ESCDICT.keys()
# default indent at 4 spaces.
INDENT = " "
def esc(string):
"""
Escapes special characters for use in HTML:
& < > " ' ` =
n.b. creates new string rather than modifying in place.
Note that for card text layout purposes, this also replaces newlines with
<br>.
"""
out = ""
for char in string:
if char in ESCCHARS:
out += ESCDICT[char]
else:
out += char
return out
def nip(string):
"""
Rather than escaping special chars like above, this simply deletes them.
For use in CSS classes and other restrictive environments.
N.B. THIS ALSO PUTS EVERYTHING IN LOWERCASE FOR CONSISTENCY
"""
out = ""
for char in string.lower():
if char.isalnum() or char in "_-":
out += char
if out == "":
out = "BADSTRING"
return out
def dict_to_html(card, category, colour):
"""
Converts one list entry from a YAML file into an HTML card for use
in index.html.
Takes as input a category, which will ba added to the tags.
Also takes in a Materialize.css colour as input, in the form of a string.
Local variables defined from card:
- title
- image
- content
- locations
- links
"""
ck = card.keys()
# get important values from card dict
if "title" in ck and isinstance(card["title"], str):
title = card["title"]
else:
title = None
if "image" in ck and isinstance(card["image"], str):
image = card["image"]
else:
image = None
if "content" in ck and isinstance(card["content"], str):
content = card["content"]
else:
content = None
locations = []
if "locations" in ck and isinstance(card["locations"], list):
for location in card["locations"]:
if isinstance(location, str):
locations.append(location)
links = []
if "links" in ck and isinstance(card["links"], list):
for link in card["links"]:
if isinstance(link, dict) and "url" in link.keys() and "text" in link.keys():
links.append((link["url"], link["text"]))
# START CREATING OUTPUT.
# Initialise div and CSS classes
out = ("<div class=\"card " # create css for card
+ colour + " lighten-2 " # define colour
+ "cat-" + nip(category)) # add category tag
# Add geo filters
for location in locations:
out += " geo-" + nip(location)
# Close CSS
out += '"'
# Add data-name. Contingency for if undefined title.
if title:
out += " data-name=\"" + esc(title) + "\">"
else:
out += " data-name=\"Image\">"
# Add image div, if there is an image.
if image:
out += "\n\n" + INDENT
out += "<div class=\"card-image\">"
out += "\n" + 2 * INDENT
out += "<img src=\"img/cards/" + image + "\">"
# Add title on top of image if necessary.
if title:
out += "\n" + 2 * INDENT
out += "<span class=\"card-title " + colour + "-text"
out += " text-darken-4\">"
out += esc(title)
out += "</span>"
out += "\n" + INDENT
out += "</div>"
# Add the bulk of the card, if there is a reason to (there should be).
if title or content:
out += "\n\n" + INDENT
out += "<div class=\"card-content\">"
out += "\n"
# only add a title here if it wasn't added on the image.
if title and not image:
out += 2 * INDENT
out += "<span class=\"card-title "
out += colour + "-text text-darken-4\">"
out += esc(title)
out += "</span>"
out += "\n"
if content:
out += 2 * INDENT
out += "<p class=\"black-text text-darken-4\">"
out += esc(content)
out += "</p>"
out += "\n"
out += INDENT + "</div>"
# add the links at bottom of card.
if links:
out += "\n\n" + INDENT
out += "<div class=\"card-action\">"
count = 0
for link in links:
count += 1
out += "\n" + 2 * INDENT
out += "<a href=\"" + str(link[0]) + "\">"
out += "\n" + 3 * INDENT
out += "<span class=\"" + colour + "-text text-darken-4\">"
out += esc(link[1])
out += "</span>"
out += "\n" + 2 * INDENT
out += "</a>"
if count < len(links):
out += "<br>"
out += "\n" + INDENT
out += "</div>"
# add locations at bottom of card.
if locations:
out += "\n\n" + INDENT
out += "<div class=\"beans\">"
for loc in locations:
out += "\n" + 2 * INDENT
out += "<div class=\"bean " + colour + " darken-4 "
out += colour + "-text text-lighten-3\">"
out += "\n" + 3 * INDENT
out += loc
out += "\n" + 2 * INDENT
out += "</div>"
out += "\n" + INDENT
out += "</div>"
out += "\n"
out += "</div>"
return out
def catset_to_dropdown(cat_set):
"""
Another helper function which turns a set of category tuples into a
dropdown.
Each category that is not "all" has a circular icon on the side with the
corresponding card colour.
"""
# Initialise the ul
out = "<ul class=\"dropdown-content slid-cat-dropdown\" id=\"dropdown-cat\">\n"
# Add the "all" filter by default
out += "<li class=\"active\">\n<a class=\"slid-filter\" filter=\"\" slid-group=\"cat\">All</a>\n</li>\n"
cat_list = list(cat_set)
# Make alphabetical by category
cat_list.sort()
for tup in cat_list:
out += "<li>\n"
out += "<a class=\"slid-filter\" filter=\".cat-"
out += nip(tup[0])
out += "\" slid-group=\"cat\">"
out += "<i class=\"material-icons left "
out += tup[1]
out += "-text text-lighten-1\">brightness_1</i>"
out += esc(tup[0])
out += "</a>\n</li>\n"
# Close the ul
out += "</ul>"
return out
def geoset_to_dropdown(geo_set):
"""
Just like the catset function, but doesn't make colourful icons.
"""
# Initialise the ul
out = "<ul class=\"dropdown-content slid-geo-dropdown\" id=\"dropdown-geo\">\n"
# Add the "all" filter by default
out += "<li class=\"active\">\n<a class=\"slid-filter\" filter=\"\" slid-group=\"geo\">All</a>\n</li>\n"
geo_list = list(geo_set)
# Make alph
geo_list.sort()
for loc in geo_list:
out += "<li>\n"
out += "<a class=\"slid-filter\" filter=\".geo-"
out += nip(loc)
out += "\" slid-group=\"geo\">"
out += esc(loc)
out += "</a>\n</li>\n"
# Close the ul
out += "</ul>"
return out
# We're going to run through every yml file and create a new string containins
# all cards to go in the grid. Initialise this string with the enclosing div
# tag.
newgridstring = "<div class=\"grid\">\n"
# Also initialise a set of category tags and a list of location tags.
# The category set contains tuples (category, colour).
cat_set = set()
geo_set = set()
for filename in os.listdir(SOURCE):
if (filename.endswith(".yml") or filename.endswith(".yaml")) and filename != "_colours.yml":
sourcefile = os.path.join(SOURCE, filename)
# The category is the filename with no extension.
cat = ".".join(filename.split(".")[:-1])
with open(sourcefile, "r") as file:
content = yaml.load(file, Loader=yaml.FullLoader)
if cat in colours.keys():
colour = colours[cat]
else:
colour = "yellow"
cat_set.add((cat, colour))
for card in content:
newgridstring += dict_to_html(card, cat, colour)
newgridstring += "\n"
# Add all geo filters
if "locations" in card.keys():
for location in card["locations"]:
if isinstance(location, str):
geo_set.add(location)
# We have now added all cards to the new string; close the div tag
newgridstring += "</div>"
# Now load and read the output file with bs4
with open(OUTPUT, "r") as file:
soup = BeautifulSoup(file, "html.parser")
# FIRST STEP: generate cards
def isgrid(tag):
return tag.has_attr("class") and 'grid' in tag["class"]
grid = soup.find(isgrid)
newgrid = BeautifulSoup(newgridstring, "html.parser")
grid.replace_with(newgrid)
# SECOND STEP: generate category filter dropdown
cat_dropdown = soup.find(id="dropdown-cat")
new_cat_dropdown = BeautifulSoup(catset_to_dropdown(cat_set), "html.parser")
cat_dropdown.replace_with(new_cat_dropdown)
# THIRD STEP: generate location filter dropdown
geo_dropdown = soup.find(id="dropdown-geo")
new_geo_dropdown = BeautifulSoup(geoset_to_dropdown(geo_set), "html.parser")
geo_dropdown.replace_with(new_geo_dropdown)
# Now pretty-print the file and save
out = soup.prettify()
with open(OUTPUT, "w") as file:
file.write(out)
| 30.006135
| 108
| 0.530464
|
4a0960d9d781cc63c5d5516f2c5f5f40b2b5917c
| 2,683
|
py
|
Python
|
dataset/archive/compare_distribution.py
|
marcelbra/academic-budget-bert
|
527053a618d781210b4faccb5fe9afef17b1324a
|
[
"Apache-2.0"
] | null | null | null |
dataset/archive/compare_distribution.py
|
marcelbra/academic-budget-bert
|
527053a618d781210b4faccb5fe9afef17b1324a
|
[
"Apache-2.0"
] | null | null | null |
dataset/archive/compare_distribution.py
|
marcelbra/academic-budget-bert
|
527053a618d781210b4faccb5fe9afef17b1324a
|
[
"Apache-2.0"
] | null | null | null |
import pickle
from datasets import load_dataset, load_from_disk
from nltk import word_tokenize, sent_tokenize
import matplotlib.pyplot as plt
from tqdm import tqdm
import multiprocessing as mp
def get_density(doc, closed_class_words, word_tokenizer):
words = word_tokenizer(doc)
count = sum([1 for word in words if word in closed_class_words])
return 1 - count / len(words)
def get_closed_class_words():
closed_class_words = []
with open("closed_class_words.txt", "r") as f:
for line in f:
closed_class_words.extend(line.split())
return closed_class_words
def lexical_density_sentences(wiki, closed_class_words):
scores = []
word_tokenizer = word_tokenize
sent_tokenizer = sent_tokenize
for article in tqdm(wiki):
text = article["text"]
sentences = sent_tokenize(text)
densities = {"document_density": get_density(text, closed_class_words, word_tokenizer),
"sentence_densities": [get_density(sentence, closed_class_words, word_tokenizer)
for sentence in sentences]}
scores.append(densities)
return scores
#wiki = load_dataset("wikipedia", "20200501.en")["train"]
closed_class_words = get_closed_class_words()
with open("densities.pkl", "rb") as handle:
data = pickle.load(handle)
data.sort(key=lambda x: x["document_density"])
s = 0
# densities = lexical_density_sentences(wiki, closed_class_words)
# with open("densities.pkl", "wb") as handle:
# pickle.dump(densities, handle, protocol=pickle.HIGHEST_PROTOCOL)
#
# def lexical_density_finetuning_ds(closed_class_words):
# options = ['cola', 'sst2', 'mrpc', 'qqp', 'stsb', 'mnli', 'qnli', 'rte', 'wnli']
# for option in options:
# dataset = load_dataset("glue", option)
# densities = []
# for example in dataset["train"]:
# if "sentence1" in example:
# sentence = example["sentence1"] + " " + example["sentence2"]
# if "question1" in example:
# sentence = example["question1"] + " " + example["question2"]
# if "premise" in example:
# sentence = example["premise"] + " " + example["hypothesis"]
# if "question" in example:
# sentence = example["question"] + " " + example["sentence"]
# if "sentence" in example:
# sentence = example["sentence"]
# density = get_density(sentence, closed_class_words)
# densities.append(density)
# densities.sort()
# plt.bar(list(range(len(densities))), densities)
# plt.title(option + " " + "lexial_density")
# plt.show()
| 35.302632
| 101
| 0.641446
|
4a096198e26d7e3f609c106942faa66be2f27c7c
| 4,491
|
py
|
Python
|
bfevfl/file.py
|
asteriation/acnh-eventflow-compiler
|
821b2b5f4e7c31ccdf46a7743ccfcd0e3219a4f8
|
[
"MIT"
] | 1
|
2022-01-25T22:12:54.000Z
|
2022-01-25T22:12:54.000Z
|
bfevfl/file.py
|
asteriation/acnh-eventflow-compiler
|
821b2b5f4e7c31ccdf46a7743ccfcd0e3219a4f8
|
[
"MIT"
] | null | null | null |
bfevfl/file.py
|
asteriation/acnh-eventflow-compiler
|
821b2b5f4e7c31ccdf46a7743ccfcd0e3219a4f8
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from typing import List, Set
from bitstring import BitStream, pack
from .actors import Actor
from .nodes import Node, RootNode, SubflowNode, ActionNode, SwitchNode
from .block import DataBlock, ContainerBlock
from .str_ import StringPool
from .dic_ import Dictionary
from .array import BlockPtrArray
from .flowchart import Flowchart
from .relt import RelocationTable
class _RELTPlaceholder(DataBlock):
def __init__(self):
super().__init__(0)
def alignment(self) -> int:
return 8
class _FileHeader(DataBlock):
def __init__(self, filename: str, flowcharts: BlockPtrArray[Flowchart], flowchart_dic: Dictionary,
timeline_dic: Dictionary, relt: _RELTPlaceholder, pool: StringPool) -> None:
super().__init__(0x48)
self.filename = pool[filename].c_str
self.relt = relt
with self._at_offset(0):
self.buffer.overwrite(b'BFEVFL\0\0')
self.buffer.overwrite(b'\x00\x03\x00\x00\xff\xfe\x03\x00')
self.buffer.overwrite(b'\x00\x00\x00\x00\x00\x00\x90\x00')
with self._at_offset(0x20):
self.buffer.overwrite(b'\x01\x00')
self._add_pointer(0x28, flowcharts)
self._add_pointer(0x30, flowchart_dic)
self._add_pointer(0x38, None)
self._add_pointer(0x40, timeline_dic)
def set_file_size(self, fsize: int) -> None:
with self._at_offset(0x1c):
self.buffer.overwrite(pack('uintle:32', fsize))
def prepare_bitstream(self) -> BitStream:
with self._at_offset(0x10):
self.buffer.overwrite(pack('uintle:32', self.filename.offset))
with self._at_offset(0x18):
self.buffer.overwrite(pack('uintle:32', self.relt.offset))
return super().prepare_bitstream()
def alignment(self) -> int:
return 8
class File(ContainerBlock):
def __init__(self, filename: str, actors: List[Actor], nodes: List[Node]) -> None:
entry_nodes = [n for n in nodes if isinstance(n, RootNode)]
other_nodes = [n for n in nodes if not isinstance(n, RootNode)]
pooled_strings: Set[str] = set()
pooled_strings.add(filename)
for n in nodes:
pooled_strings.add(n.name)
if isinstance(n, SubflowNode):
if n.ns:
pooled_strings.add(n.ns)
pooled_strings.add(n.called_root_name)
# keys are in pool, but values are stored in place
pooled_strings.update(n.params.keys())
elif isinstance(n, (ActionNode, SwitchNode)):
for param in n.params.keys():
pooled_strings.add(param)
elif isinstance(n, RootNode):
for vardef in n.vardefs:
pooled_strings.add(vardef.name)
for actor in actors:
add_actor = False
for _, a in actor.actions.items():
if a.used:
add_actor = True
pooled_strings.add(a.name)
pooled_strings.update(p.name for p in a.params)
for _, q in actor.queries.items():
if q.used:
add_actor = True
pooled_strings.add(q.name)
pooled_strings.update(p.name for p in q.params)
if add_actor:
pooled_strings.add(actor.name)
if actor.secondary_name:
pooled_strings.add(actor.secondary_name)
pool = StringPool(sorted(list(pooled_strings)))
flowchart_dic = Dictionary([filename], pool)
timeline_dic = Dictionary([], pool)
flowchart = Flowchart(filename, actors, other_nodes, entry_nodes, pool)
flowchart_ptrs = BlockPtrArray[Flowchart]([flowchart])
self.relt = _RELTPlaceholder()
self.header = _FileHeader(filename, flowchart_ptrs, flowchart_dic, timeline_dic, self.relt, pool)
super().__init__([
self.header,
flowchart_ptrs,
flowchart_dic,
timeline_dic,
flowchart,
pool,
self.relt
])
def prepare_bitstream(self) -> BitStream:
relt = RelocationTable(self.get_all_pointers())
relt.set_offset(self.relt.offset)
self.header.set_file_size(len(self) + len(relt))
return super().prepare_bitstream() + relt.prepare_bitstream()
def alignment(self) -> int:
return 8
| 35.928
| 105
| 0.611668
|
4a0962a7d6a31e5153b4f1f833dfca09f0d41a60
| 524
|
py
|
Python
|
preprocspectra/transformation_tools/__init__.py
|
dijsilva/preprocspectra
|
66b0400d3869307c340ec288cff71443c1acf8d8
|
[
"MIT"
] | 1
|
2021-09-03T09:38:28.000Z
|
2021-09-03T09:38:28.000Z
|
preprocspectra/transformation_tools/__init__.py
|
dijsilva/preprocspectra
|
66b0400d3869307c340ec288cff71443c1acf8d8
|
[
"MIT"
] | null | null | null |
preprocspectra/transformation_tools/__init__.py
|
dijsilva/preprocspectra
|
66b0400d3869307c340ec288cff71443c1acf8d8
|
[
"MIT"
] | 1
|
2020-09-06T07:53:36.000Z
|
2020-09-06T07:53:36.000Z
|
__all__ = ['snv', 'sg', 'plus_sg', 'area_norm', 'msc', 'make_transformations']
from preprocspectra.transformation_tools.transformations import snv
from preprocspectra.transformation_tools.transformations import sg
from preprocspectra.transformation_tools.transformations import plus_sg
from preprocspectra.transformation_tools.transformations import area_norm
from preprocspectra.transformation_tools.transformations import msc
from preprocspectra.transformation_tools.handle_transformations import make_transformations
| 47.636364
| 92
| 0.870229
|
4a096380ef182a07bfb906f6766308c886d79f72
| 9,114
|
py
|
Python
|
test/test27_send.py
|
Zrufy/telepot
|
a4d62a32c82e5799e9f7afc400275e9d487ddb76
|
[
"MIT"
] | 2
|
2021-04-11T12:03:19.000Z
|
2021-04-11T12:03:23.000Z
|
test/test27_send.py
|
Zrufy/telepot
|
a4d62a32c82e5799e9f7afc400275e9d487ddb76
|
[
"MIT"
] | null | null | null |
test/test27_send.py
|
Zrufy/telepot
|
a4d62a32c82e5799e9f7afc400275e9d487ddb76
|
[
"MIT"
] | null | null | null |
# coding=utf8
import time
import threading
import pprint
import sys
import traceback
import telepot
import telepot.namedtuple
"""
This script tests:
- setWebhook() and getUpdates(), and make sure they are exclusive
- sendZZZ() and sendChatAction() methods
- getUserProfilePhotos()
Run it by:
$ python2.7 test.py <token> <user_id>
It will assume the bot identified by <token>, and only communicate with the user identified by <user_id>.
If you don't know your user id, run:
$ python test.py <token> 0
And send it a message anyway. It will print out your user id as an unauthorized user.
Ctrl-C to kill it, then run the proper command again.
"""
def equivalent(data, nt):
if type(data) is dict:
keys = data.keys()
# number of dictionary keys == number of non-None values in namedtuple?
if len(keys) != len([f for f in nt._fields if getattr(nt, f) is not None]):
return False
# map `from` to `from_`
fields = list(map(lambda k: k+'_' if k in ['from'] else k, keys))
return all(map(equivalent, [data[k] for k in keys], [getattr(nt, f) for f in fields]))
elif type(data) is list:
return all(map(equivalent, data, nt))
else:
return data==nt
def examine(result, type):
try:
print 'Examining %s ......' % type
nt = telepot.namedtuple.namedtuple(result, type)
assert equivalent(result, nt), 'Not equivalent:::::::::::::::\n%s\n::::::::::::::::\n%s' % (result, nt)
if type == 'Message':
print 'Message glance2: %s' % str(telepot.glance2(result, long=True))
pprint.pprint(result)
pprint.pprint(nt)
print
except AssertionError:
traceback.print_exc()
answer = raw_input('Do you want to continue? [y] ')
if answer != 'y':
exit(1)
def send_everything_on_contact(msg):
content_type, chat_type, chat_id, msg_date, msg_id = telepot.glance2(msg, long=True)
if chat_id != USER_ID:
print 'Unauthorized user:', msg['from']['id']
exit(1)
print 'Received message from ID: %d' % chat_id
print 'Start sending various messages ...'
##### forwardMessage
r = bot.forwardMessage(chat_id, chat_id, msg_id)
examine(r, 'Message')
##### sendMessage
r = bot.sendMessage(chat_id, 'Hello, I am going to send you a lot of things.', reply_to_message_id=msg_id)
examine(r, 'Message')
r = bot.sendMessage(chat_id, u'中文')
examine(r, 'Message')
r = bot.sendMessage(chat_id, '*bold text*\n_italic text_\n[link](http://www.google.com)', parse_mode='Markdown')
examine(r, 'Message')
bot.sendMessage(chat_id, 'http://www.yahoo.com\nwith web page preview')
bot.sendMessage(chat_id, 'http://www.yahoo.com\nno web page preview', disable_web_page_preview=True)
show_keyboard = {'keyboard': [['Yes', 'No'], ['Maybe', 'Maybe not']]}
hide_keyboard = {'hide_keyboard': True}
force_reply = {'force_reply': True}
nt_show_keyboard = telepot.namedtuple.ReplyKeyboardMarkup(**show_keyboard)
nt_hide_keyboard = telepot.namedtuple.ReplyKeyboardHide(**hide_keyboard)
nt_force_reply = telepot.namedtuple.ForceReply(**force_reply)
bot.sendMessage(chat_id, 'Here is a custom keyboard', reply_markup=show_keyboard)
time.sleep(2)
bot.sendMessage(chat_id, 'Hiding it now.', reply_markup=nt_hide_keyboard)
bot.sendMessage(chat_id, 'Force reply', reply_markup=nt_force_reply)
##### sendPhoto
bot.sendChatAction(chat_id, 'upload_photo')
r = bot.sendPhoto(chat_id, open('lighthouse.jpg', 'rb'))
examine(r, 'Message')
file_id = r['photo'][0]['file_id']
bot.sendPhoto(chat_id, file_id, caption='Show original message and keyboard', reply_to_message_id=msg_id, reply_markup=nt_show_keyboard)
bot.sendPhoto(chat_id, file_id, caption='Hide keyboard', reply_markup=hide_keyboard)
##### getFile
f = bot.getFile(file_id)
examine(f, 'File')
##### downloadFile, smaller than one chunk (65K)
try:
print 'Downloading file to non-existent directory ...'
bot.downloadFile(file_id, 'non-existent-dir/file')
except:
print 'Error: as expected'
print 'Downloading file to down.1 ...'
bot.downloadFile(file_id, 'down.1')
print 'Open down.2 and download to it ...'
with open('down.2', 'wb') as down:
bot.downloadFile(file_id, down)
##### sendAudio
# Need one of `performer` or `title' for server to regard it as audio. Otherwise, server treats it as voice.
bot.sendChatAction(chat_id, 'upload_audio')
r = bot.sendAudio(chat_id, open('dgdg.mp3', 'rb'), title='Ringtone')
examine(r, 'Message')
file_id = r['audio']['file_id']
bot.sendAudio(chat_id, file_id, duration=6, performer='Ding Dong', title='Ringtone', reply_to_message_id=msg_id, reply_markup=show_keyboard)
bot.sendAudio(chat_id, file_id, performer='Ding Dong', reply_markup=nt_hide_keyboard)
##### sendDocument
bot.sendChatAction(chat_id, 'upload_document')
r = bot.sendDocument(chat_id, open('document.txt', 'rb'))
examine(r, 'Message')
file_id = r['document']['file_id']
bot.sendDocument(chat_id, file_id, reply_to_message_id=msg_id, reply_markup=nt_show_keyboard)
bot.sendDocument(chat_id, file_id, reply_markup=hide_keyboard)
##### sendSticker
r = bot.sendSticker(chat_id, open('gandhi.png', 'rb'))
examine(r, 'Message')
file_id = r['sticker']['file_id']
bot.sendSticker(chat_id, file_id, reply_to_message_id=msg_id, reply_markup=show_keyboard)
bot.sendSticker(chat_id, file_id, reply_markup=nt_hide_keyboard)
##### sendVideo
bot.sendChatAction(chat_id, 'upload_video')
r = bot.sendVideo(chat_id, open('hktraffic.mp4', 'rb'))
examine(r, 'Message')
file_id = r['video']['file_id']
bot.sendVideo(chat_id, file_id, duration=5, caption='Hong Kong traffic', reply_to_message_id=msg_id, reply_markup=nt_show_keyboard)
bot.sendVideo(chat_id, file_id, reply_markup=hide_keyboard)
##### downloadFile, multiple chunks
print 'Downloading file to down.3 ...'
bot.downloadFile(file_id, 'down.3')
##### sendVoice
r = bot.sendVoice(chat_id, open('example.ogg', 'rb'))
examine(r, 'Message')
file_id = r['voice']['file_id']
bot.sendVoice(chat_id, file_id, duration=6, reply_to_message_id=msg_id, reply_markup=show_keyboard)
bot.sendVoice(chat_id, file_id, reply_markup=nt_hide_keyboard)
##### sendLocation
bot.sendChatAction(chat_id, 'find_location')
r = bot.sendLocation(chat_id, 22.33, 114.18) # Hong Kong
examine(r, 'Message')
bot.sendLocation(chat_id, 49.25, -123.1, reply_to_message_id=msg_id, reply_markup=nt_show_keyboard) # Vancouver
bot.sendLocation(chat_id, -37.82, 144.97, reply_markup=hide_keyboard) # Melbourne
##### Done sending messages
bot.sendMessage(chat_id, 'I am done.')
def get_user_profile_photos():
print 'Getting user profile photos ...'
r = bot.getUserProfilePhotos(USER_ID)
examine(r, 'UserProfilePhotos')
expected_content_type = None
content_type_iterator = iter([
'text', 'voice', 'sticker', 'photo', 'audio' ,'document', 'video', 'contact', 'location',
'new_chat_participant', 'new_chat_title', 'new_chat_photo', 'delete_chat_photo', 'left_chat_participant'
])
def see_every_content_types(msg):
global expected_content_type, content_type_iterator
content_type, chat_type, chat_id = telepot.glance2(msg)
from_id = msg['from']['id']
if chat_id != USER_ID and from_id != USER_ID:
print 'Unauthorized user:', chat_id, from_id
return
examine(msg, 'Message')
try:
if content_type == expected_content_type:
expected_content_type = content_type_iterator.next()
bot.sendMessage(chat_id, 'Please give me a %s.' % expected_content_type)
else:
bot.sendMessage(chat_id, 'It is not a %s. Please give me a %s, please.' % (expected_content_type, expected_content_type))
except StopIteration:
# reply to sender because I am kicked from group already
bot.sendMessage(from_id, 'Thank you. I am done.')
def ask_for_various_messages():
bot.notifyOnMessage(see_every_content_types)
global expected_content_type, content_type_iterator
expected_content_type = content_type_iterator.next()
bot.sendMessage(USER_ID, 'Please give me a %s.' % expected_content_type)
def test_webhook_getupdates_exclusive():
bot.setWebhook('https://www.fake.com/fake', open('old.cert', 'rb'))
print 'Fake webhook set.'
try:
bot.getUpdates()
except telepot.TelegramError as e:
print "%d: %s" % (e.error_code, e.description)
print 'As expected, getUpdates() produces an error.'
bot.setWebhook()
print 'Fake webhook cancelled.'
TOKEN = sys.argv[1]
USER_ID = long(sys.argv[2])
bot = telepot.Bot(TOKEN)
test_webhook_getupdates_exclusive()
get_user_profile_photos()
print 'Text me to start.'
bot.notifyOnMessage(send_everything_on_contact, run_forever=True)
| 31.867133
| 144
| 0.678078
|
4a0963ce80d4cb7a130adaee5cde846ec1e9648a
| 8,983
|
py
|
Python
|
source/resolveCue.py
|
networkdynamics/attribution-extraction
|
5f4420769ea74f1c358ff8a2423fa8ca9083b4fc
|
[
"MIT"
] | 3
|
2018-03-08T21:38:42.000Z
|
2020-05-01T14:14:22.000Z
|
source/resolveCue.py
|
networkdynamics/attribution-extraction
|
5f4420769ea74f1c358ff8a2423fa8ca9083b4fc
|
[
"MIT"
] | null | null | null |
source/resolveCue.py
|
networkdynamics/attribution-extraction
|
5f4420769ea74f1c358ff8a2423fa8ca9083b4fc
|
[
"MIT"
] | null | null | null |
from operator import itemgetter
def linkCue(article, verbs, nouns):
attrs = article.attributions
sentences = article.sentences
keys = attrs.keys()
print 'cueresolving'
for key in keys:
currentAttr = attrs[key]
contentSpan = currentAttr['content']
sourceSpan = currentAttr['source']
if contentSpan == []:
continue
contentSent = contentSpan[0]['sentence_id']
thiSentence = sentences[contentSent]
#print sentences[contentSent]
#print contentSpan
#print sourceSpan
contentHead = find_head(contentSpan)
sourceHead = find_head(sourceSpan)
contentParent = []
if len(contentHead) > 0:
for head in contentHead:
if 'parents' in head:
contentParent = contentParent + head['parents']
sourceParent = []
sourceSent = -1
if len(sourceSpan) > 0:
boolean, source, cue = accordingTos(sourceSpan, currentAttr)
if boolean == True:
assign(article, currentAttr, source, cue)
continue
sourceParent = []
for head in sourceHead:
if 'parents' in head:
sourceParent = sourceParent + head['parents']
assigned = False
if len(contentParent) >= 1:
for tok in contentParent:
thisTok = tok[1]
if isVerbCue(thisTok, verbs):
tokens = getVerbAux(thisTok)
assign(article, currentAttr, sourceSpan, tokens)
assigned = True
break
#print currentAttr
if assigned == False and len(sourceParent) >= 1:
for tok in sourceParent:
thisTok = tok[1]
if isVerbCue(thisTok, verbs):
#print 'content', thisTok
tokens = getVerbAux(thisTok)
assign(article, currentAttr, sourceSpan, tokens)
assigned = True
break
if assigned == True:
continue
if currentAttr['source'] != [] and hasVerbCue(sourceSent, verbs):
token = closestVerbCue(currentAttr['source'], verbs, sentences[sourceSent])
#print 'closest source', token
tokens = getVerbAux(token)
assign(article, currentAttr, sourceSpan, tokens)
#print currentAttr
continue
elif hasVerbCue(contentSent, verbs):
token = closestVerbCue(currentAttr['content'], verbs, thiSentence)
tokens = getVerbAux(token)
#print 'verb cue sentence', token
assign(article, currentAttr, sourceSpan, tokens)
#print currentAttr
continue
ContenthasANounCue = hasNounCue(sentences[contentSent], nouns)
if sourceSent != -1:
SourcehasANounCue = hasNounCue(sentences[sourceSent], nouns)
if sourceSent != -1 and SourcehasANounCue:
#print 'source sentence noun cue', token
cue = closestVerbCue(currentAttr['source'], nouns, sentences[sourceSent])
tokens = getVerbAux(cue)
assign(article, currentAttr, sourceSpan, tokens)
continue
elif ContenthasANounCue:
cue = closestVerbCue(currentAttr['content'], nouns, sentences[contentSent])
tokens = getVerbAux(cue)
#print 'content sentence noun cue', token
assign(article, currentAttr, sourceSpan, tokens)
continue
if tokenSurroundingContent(sentences[contentSent], currentAttr['content'], 'before') != False:
cue = tokenSurroundingContent(sentences[contentSent], currentAttr['content'], 'before')
tokens = getVerbAux(cue)
#print sentences[contentSent], currentAttr['content']
#print 'next token', tokens
assign(article, currentAttr, sourceSpan, tokens)
elif tokenSurroundingContent(sentences[contentSent], currentAttr['content'], 'after') != False:
cue = tokenSurroundingContent(sentences[contentSent], currentAttr['content'], 'after')
tokens = getVerbAux(cue)
#print sentences[contentSent], currentAttr['content']
#print 'previous token', tokens
assign(article, currentAttr, sourceSpan, tokens)
else:
print 'DIDN"T FIND ANYTHING'
return article
def assign(article, currentAttr, source, cue):
sourceSpan = []
if source == []:
print source
print currentAttr
print cue
headCue = find_head(cue)
print headCue
parents = headCue[0]['children']
print parents
for parent in parents:
if 'nsubj' in parent[0] or 'dobj' in parent[0] and 'role' not in parent[1]:
source = [parent[1]]
article.add_to_attribution(currentAttr, 'source', source)
article.add_to_attribution(currentAttr, 'cue', cue)
#currentAttr['source'] = source
#currentAttr['cue'] = cue
def getVerbAux(token):
if 'children' not in token:
return [token]
children = token['children']
tokenArray = [token]
for child in children:
if child[0] == 'auxpass' or child[0] == 'aux' or child[0] == 'neg' or child[0] == 'mark':
tokenArray.append(child[1])
tokens = sorted(tokenArray, key=lambda k: k['id'])
return tokens
def hasVerbCue(sentenceID, verbs):
for verb in verbs:
if int(verb[1]) == sentenceID:
if verb[4] == 'Y':
return True
else:
continue
return False
def isVerbCue(verb, verbCues):
tokenID = verb['id']
sentID = verb['sentence_id']
for verb in verbCues:
if verb[1] == sentID and verb[2] == tokenID and verb[4] == 'Y':
return True
elif verb[0].lower() in ['added', 'adds', 'said', 'say', 'says', 'expect', 'expected', 'expects', 'report', 'believe', 'believes', 'reports', 'believed']:
return True
return False
def closestVerbCue(content, verbs, sentence):
sentenceID = sentence['id']
tokenIds = [token['id'] for token in content]
cueVerbs = []
for verb in verbs:
if int(verb[1]) == sentenceID:
if verb[4] == 'Y':
cueVerbs.append(verb)
else:
continue
else:
continue
closest = 1000
closestVerb = ''
if len(cueVerbs) == 1:
token = sentence['tokens'][int(cueVerbs[0][2])]
return token
else:
for cueVerb in cueVerbs:
verbID = int(verb[2])
distance = min(abs(tokenIds[0] - verbID), abs(tokenIds[-1] - verbID))
if closest > distance:
closest = distance
closestVerb = cueVerb
return sentence['tokens'][int(closestVerb[2])]
def hasNounCue(thisSentence, nouns):
for token in thisSentence['tokens']:
if token['lemma'] in nouns:
if token.has_key('role') and token['role'] != 'content':
return True
else:
return True
return False
def closestNounCue(content, nouns, sentence):
tokenIds = [token['id'] for token in content]
nounCues = []
for token in sentence['tokens']:
if token['lemma'] in nouns:
if token.has_key('role') and token['role'] == 'content':
continue
else:
nounCues.append(token)
closest = 1000
closestNoun = ''
if len(cueVerbs) == 1:
token = nounCues[0]
return token
else:
for nounCue in nounCues:
cueId = nounCue['id']
distance = min(abs(tokenIds[0] - cueId), abs(tokenIds[-1] - cueId))
if closest > distance:
closest = distance
closestNoun = nounCue
return closestNoun
def tokenSurroundingContent(sentence, content, position):
firstTokenID = content[0]['id']
lastTokenID = content[-1]['id']
if position == 'before':
if firstTokenID != 0:
return sentence['tokens'][firstTokenID - 1]
else:
return False
elif position == 'after':
if lastTokenID != len(sentence) - 1:
return sentence['tokens'][firstTokenID + 1]
else:
return False
else:
return False
def accordingTos(sourceSpan, currentAttr):
for token in sourceSpan:
if token['word'].lower() == 'according':
accordingID = sourceSpan.index(token)
nextID = accordingID + 1
if len(sourceSpan) >= nextID + 1 and sourceSpan[accordingID + 1]['word'].lower() == 'to':
cueSpan = [token, sourceSpan[accordingID + 1]]
del sourceSpan[nextID]
del sourceSpan[accordingID]
return True, sourceSpan, cueSpan
return False, None, None
class Stack:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
return self.items[len(self.items)-1]
def size(self):
return len(self.items)
def find_head(tokens):
heads = []
# If there is only one token, that's the head
if len(tokens) == 1:
heads = [tokens[0]]
else:
# otherwise iterate over all the tokens to find the head
for token in tokens:
# if this token has no parents or children its not part
# of the dependency tree (it's a preposition, e.g.)
if 'parents' not in token and 'children' not in token:
continue
# if this token has any parents that among the tokens list
# it's not the head!
try:
token_ids = [
(t['sentence_id'], t['id']) for t in tokens
]
has_parent_in_span = any([
(t[1]['sentence_id'], t[1]['id'])
in token_ids for t in token['parents']
])
if has_parent_in_span:
relations_to_parents = []
for t in token['parents']:
for eachToken in tokens:
if t[1]['id'] == eachToken['id'] and t[1]['sentence_id'] == eachToken['sentence_id']:
relations_to_parents.append(t)
continue
except KeyError:
pass
# otherwise it is the head
else:
heads.append(token)
# NOTE: head may be none
return heads
| 22.071253
| 156
| 0.666036
|
4a0965236ee4411127927ef186e79d332eaed71e
| 2,982
|
py
|
Python
|
torchaudio/models/wav2vec2/utils/import_huggingface.py
|
z-a-f/audio
|
d64648b6ea4e5ec4e000d4eb26daa3f2499ef3ca
|
[
"BSD-2-Clause"
] | null | null | null |
torchaudio/models/wav2vec2/utils/import_huggingface.py
|
z-a-f/audio
|
d64648b6ea4e5ec4e000d4eb26daa3f2499ef3ca
|
[
"BSD-2-Clause"
] | null | null | null |
torchaudio/models/wav2vec2/utils/import_huggingface.py
|
z-a-f/audio
|
d64648b6ea4e5ec4e000d4eb26daa3f2499ef3ca
|
[
"BSD-2-Clause"
] | null | null | null |
"""Import Hugging Face transformers's wav2vec2.0 pretrained weights to torchaudios's format.
"""
import logging
from torch.nn import Module
from ..model import Wav2Vec2Model, _get_model
_LG = logging.getLogger(__name__)
def _get_config(cfg):
config = {
'extractor_mode': f'{cfg.feat_extract_norm}_norm',
'extractor_conv_layer_config': list(zip(cfg.conv_dim, cfg.conv_kernel, cfg.conv_stride)),
'extractor_conv_bias': cfg.conv_bias,
'encoder_embed_dim': cfg.hidden_size,
'encoder_projection_dropout': cfg.feat_proj_dropout,
'encoder_pos_conv_kernel': cfg.num_conv_pos_embeddings,
'encoder_pos_conv_groups': cfg.num_conv_pos_embedding_groups,
'encoder_num_layers': cfg.num_hidden_layers,
'encoder_num_heads': cfg.num_attention_heads,
'encoder_attention_dropout': cfg.attention_dropout,
'encoder_ff_interm_features': cfg.intermediate_size,
'encoder_ff_interm_dropout': cfg.activation_dropout,
'encoder_dropout': cfg.hidden_dropout,
'encoder_layer_norm_first': cfg.do_stable_layer_norm,
'encoder_layer_drop': cfg.layerdrop,
}
return config
def _build(config, original):
if original.__class__.__name__ == 'Wav2Vec2ForCTC':
aux_num_out = original.config.vocab_size
wav2vec2 = original.wav2vec2
else:
_LG.warning(
'The model is not an instance of Wav2Vec2ForCTC. '
'"lm_head" module is not imported.')
aux_num_out = None
wav2vec2 = original
imported = _get_model(**config, aux_num_out=aux_num_out)
imported.feature_extractor.load_state_dict(wav2vec2.feature_extractor.state_dict())
imported.encoder.feature_projection.load_state_dict(wav2vec2.feature_projection.state_dict())
imported.encoder.transformer.load_state_dict(wav2vec2.encoder.state_dict())
if original.__class__.__name__ == 'Wav2Vec2ForCTC':
imported.aux.load_state_dict(original.lm_head.state_dict())
return imported
def import_huggingface_model(original: Module) -> Wav2Vec2Model:
"""Import wav2vec2 model from Hugging Face's `Transformers`_.
Args:
original (torch.nn.Module): An instance of ``Wav2Vec2ForCTC`` from ``transformers``.
Returns:
Wav2Vec2Model: Imported model.
Example
>>> from torchaudio.models.wav2vec2.utils import import_huggingface_model
>>>
>>> original = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
>>> model = import_huggingface_model(original)
>>>
>>> waveforms, _ = torchaudio.load("audio.wav")
>>> logits, _ = model(waveforms)
.. _Transformers: https://huggingface.co/transformers/
"""
_LG.info('Importing model.')
_LG.info('Loading model configuration.')
config = _get_config(original.config)
_LG.debug(' - config: %s', config)
_LG.info('Building model.')
imported = _build(config, original)
return imported
| 37.746835
| 97
| 0.705231
|
4a096581cd11536143f4a249f90089e8c5f4a77d
| 1,944
|
py
|
Python
|
app/src/processing.py
|
NilaiVemula/flask-azure
|
9b3d78964fc5caa65ed584f73800768a5be9d90c
|
[
"MIT"
] | null | null | null |
app/src/processing.py
|
NilaiVemula/flask-azure
|
9b3d78964fc5caa65ed584f73800768a5be9d90c
|
[
"MIT"
] | null | null | null |
app/src/processing.py
|
NilaiVemula/flask-azure
|
9b3d78964fc5caa65ed584f73800768a5be9d90c
|
[
"MIT"
] | null | null | null |
import numpy as np
from cv2 import connectedComponents, connectedComponentsWithStats, watershed, dilate, erode, distanceTransform, DIST_L2, CC_STAT_AREA
# make sure im is only 0 and 255
def clean_im(im):
im = im * 255
im[im != 255] = 0
return im
def postprocessing(im, thre_discard, wid_dilate):
# discard <= thre_discard
im = discard(im, thre_discard)
# dilate wid_dilate
im = dilate_im(im, wid_dilate)
# erode wid_dilate
im = erode_im(im, wid_dilate)
# watershed
im = watershed_im(im)
return im
# discard isolated area under threshold
def discard(im, threshold):
im = np.uint8(im)
_, im_label, stats, _ = connectedComponentsWithStats(im, connectivity=4)
mask = np.isin(im_label, np.where(stats[:, CC_STAT_AREA] <= threshold)[0])
im[mask] = 0
return im
# dilate width px
def dilate_im(im, width):
neibor8 = np.ones((2 * width + 1, 2 * width + 1), np.uint8)
im = dilate(im, neibor8, iterations=1)
return im
# erode width px
def erode_im(im, width):
neibor8 = np.ones((2 * width + 1, 2 * width + 1), np.uint8)
im = erode(im, neibor8, iterations=1)
return im
# extract lines
def watershed_im(im):
im = np.asarray(im, np.uint8)
im_dist = distanceTransform(im, DIST_L2, 5)
im_dist = im_dist.reshape((im_dist.shape[0], im_dist.shape[1], -1))
im_wsd = np.tile(im_dist, (1, 1, 3))
im_wsd = np.asarray(im_wsd, np.uint8)
im = 1 - im
_, markers = connectedComponents(im)
im_wsd = watershed(im_wsd, markers)
im_wsd[0, :], im_wsd[-1, :], im_wsd[:, 0], im_wsd[:, -1] = 1, 1, 1, 1
im_wsd = (im_wsd == -1)
im_wsd = np.asarray(im_wsd, np.uint8)
# fill 1px
im_wsd = 1 - im_wsd
_, im_label, stats, _ = connectedComponentsWithStats(
im_wsd, connectivity=4)
mask = np.isin(im_label, np.where(stats[:, CC_STAT_AREA] <= 1)[0])
im_wsd = 1 - im_wsd
im_wsd[mask] = 1
return im_wsd
| 27
| 133
| 0.643519
|
4a096589cd391cfb06c4ad1d0b63b92dc8f94ac8
| 4,061
|
py
|
Python
|
script/ota.py
|
xiongyu0523/azure-sphere-external-mcu-ota
|
b09aa865a83f84e741d26a2edf0c4e94b0f0f724
|
[
"MIT"
] | 3
|
2020-07-29T08:32:44.000Z
|
2020-11-23T05:16:35.000Z
|
script/ota.py
|
xiongyu0523/azure-sphere-external-mcu-ota
|
b09aa865a83f84e741d26a2edf0c4e94b0f0f724
|
[
"MIT"
] | null | null | null |
script/ota.py
|
xiongyu0523/azure-sphere-external-mcu-ota
|
b09aa865a83f84e741d26a2edf0c4e94b0f0f724
|
[
"MIT"
] | 1
|
2020-07-29T08:32:47.000Z
|
2020-07-29T08:32:47.000Z
|
import os
import sys
import re
import argparse
import json
import hashlib
from datetime import datetime, timedelta
from azure.iot.hub import IoTHubRegistryManager
from azure.iot.hub import IoTHubConfigurationManager
from azure.iot.hub import models
from azure.storage.blob import BlobServiceClient, generate_container_sas, ContainerSasPermissions
blob_conn_str = os.environ["AZURE_STORAGE_CONNECTIONSTRING"]
stroage_account_name = re.search('AccountName=(.*);AccountKey=', blob_conn_str).group(1)
account_access_key = re.search('AccountKey=(.*);EndpointSuffix=', blob_conn_str).group(1)
def upload_file(file, container):
file_name = os.path.basename(file)
blob_service_client = BlobServiceClient.from_connection_string(conn_str=blob_conn_str)
blob_client = blob_service_client.get_blob_client(container=container, blob=file_name)
with open(file, "rb") as data:
blob_client.upload_blob(data, overwrite=True)
def deploy(file, version, product, group, container, days):
file_size = os.stat(file).st_size
file_url = f"https://{stroage_account_name}.blob.core.windows.net/{container}/{os.path.basename(file)}"
file_sas = generate_container_sas(
account_name=stroage_account_name,
container_name=container,
account_key=account_access_key,
permission=ContainerSasPermissions(read=True, list=True),
expiry=datetime.utcnow() + timedelta(days=days)
)
with open(file, "rb") as f:
file_sha256 = hashlib.sha256(f.read()).hexdigest().upper()
iothub_conn_str = os.environ["AZURE_IOTHUB_CONNECTIONSTRING"]
iothub_configuration = IoTHubConfigurationManager(iothub_conn_str)
config = models.Configuration()
config.id = "ota_v" + str(version)
config.content = models.ConfigurationContent(device_content={
"properties.desired.extFwInfo":{
"version" : version,
"size" : file_size,
"url" : file_url,
"sas" : file_sas,
"sha256" : file_sha256
}
})
config.metrics = models.ConfigurationMetrics(queries={
"Downloading": f"SELECT deviceId FROM devices WHERE configurations.[[{config.id}]].status='Applied' AND properties.reported.extFwInfo.Status='downloading'",
"Interrupted": f"SELECT deviceId FROM devices WHERE configurations.[[{config.id}]].status='Applied' AND properties.reported.extFwInfo.Status='interrupted'",
"Applying": f"SELECT deviceId FROM devices WHERE configurations.[[{config.id}]].status='Applied' AND properties.reported.extFwInfo.Status='applying'",
"Applied": f"SELECT deviceId FROM devices WHERE configurations.[[{config.id}]].status='Applied' AND properties.reported.extFwInfo.Status='applied'",
"Error": f"SELECT deviceId FROM devices WHERE configurations.[[{config.id}]].status='Applied' AND properties.reported.extFwInfo.Status='error'"
}
)
config.target_condition = f"tags.productType='{product}' AND tags.deviceGroup='{group}'"
config.priority = version
iothub_configuration.create_configuration(config)
if __name__ == "__main__":
# Let's deal with arguments
parser = argparse.ArgumentParser()
parser.add_argument("FILE", type=str, help="Full path of file for ota")
parser.add_argument("VERSION", type=int, help="Version of the file, must > 0")
parser.add_argument("PRODUCT", type=str, help="Target product type")
parser.add_argument("GROUP", type=str, help="Target group under a product")
parser.add_argument("-c", "--container", type=str, default="ota", help="specify the container of blob")
parser.add_argument("-d", "--days", type=int, default=365, help="sas expire duration")
args = parser.parse_args()
if args.VERSION <= 0:
raise ValueError("version should > 0")
# Step1: upload the file to azure blob
upload_file(args.FILE, args.container)
# Step2: create a IoT device configuration
deploy(args.FILE, args.VERSION, args.PRODUCT, args.GROUP, args.container, args.days)
| 41.865979
| 168
| 0.713371
|
4a096676f25796379f637c214b8dc3f7edee5b56
| 2,171
|
py
|
Python
|
Lib/idlelib/idle_test/test_pyshell.py
|
shawwn/cpython
|
0ff8a3b374286d2218fc18f47556a5ace202dad3
|
[
"0BSD"
] | 2,441
|
2020-07-31T06:45:53.000Z
|
2022-03-30T15:56:49.000Z
|
Lib/idlelib/idle_test/test_pyshell.py
|
shawwn/cpython
|
0ff8a3b374286d2218fc18f47556a5ace202dad3
|
[
"0BSD"
] | 427
|
2017-09-29T22:54:36.000Z
|
2022-02-15T19:26:50.000Z
|
Lib/idlelib/idle_test/test_pyshell.py
|
shawwn/cpython
|
0ff8a3b374286d2218fc18f47556a5ace202dad3
|
[
"0BSD"
] | 671
|
2017-09-21T08:04:01.000Z
|
2022-03-29T14:30:07.000Z
|
"Test pyshell, coverage 12%."
# Plus coverage of test_warning. Was 20% with test_openshell.
from idlelib import pyshell
import unittest
from test.support import requires
from tkinter import Tk
class FunctionTest(unittest.TestCase):
# Test stand-alone module level non-gui functions.
def test_restart_line_wide(self):
eq = self.assertEqual
for file, mul, extra in (('', 22, ''), ('finame', 21, '=')):
width = 60
bar = mul * '='
with self.subTest(file=file, bar=bar):
file = file or 'Shell'
line = pyshell.restart_line(width, file)
eq(len(line), width)
eq(line, f"{bar+extra} RESTART: {file} {bar}")
def test_restart_line_narrow(self):
expect, taglen = "= RESTART: Shell", 16
for width in (taglen-1, taglen, taglen+1):
with self.subTest(width=width):
self.assertEqual(pyshell.restart_line(width, ''), expect)
self.assertEqual(pyshell.restart_line(taglen+2, ''), expect+' =')
class PyShellFileListTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
requires('gui')
cls.root = Tk()
cls.root.withdraw()
@classmethod
def tearDownClass(cls):
#cls.root.update_idletasks()
## for id in cls.root.tk.call('after', 'info'):
## cls.root.after_cancel(id) # Need for EditorWindow.
cls.root.destroy()
del cls.root
def test_init(self):
psfl = pyshell.PyShellFileList(self.root)
self.assertEqual(psfl.EditorWindow, pyshell.PyShellEditorWindow)
self.assertIsNone(psfl.pyshell)
# The following sometimes causes 'invalid command name "109734456recolorize"'.
# Uncommenting after_cancel above prevents this, but results in
# TclError: bad window path name ".!listedtoplevel.!frame.text"
# which is normally prevented by after_cancel.
## def test_openshell(self):
## pyshell.use_subprocess = False
## ps = pyshell.PyShellFileList(self.root).open_shell()
## self.assertIsInstance(ps, pyshell.PyShell)
if __name__ == '__main__':
unittest.main(verbosity=2)
| 33.4
| 78
| 0.639337
|
4a0966df7b7b4543945c52b8d5a88bc59dafcea3
| 9,568
|
py
|
Python
|
ASHMC/courses/views.py
|
haaksmash/Webfront
|
8eb942394c568c681a83bc2c375d7552f4b3a30c
|
[
"Apache-2.0"
] | null | null | null |
ASHMC/courses/views.py
|
haaksmash/Webfront
|
8eb942394c568c681a83bc2c375d7552f4b3a30c
|
[
"Apache-2.0"
] | null | null | null |
ASHMC/courses/views.py
|
haaksmash/Webfront
|
8eb942394c568c681a83bc2c375d7552f4b3a30c
|
[
"Apache-2.0"
] | null | null | null |
from django.views.generic import View
from django.views.generic.base import TemplateView, TemplateResponseMixin
from django import http
from django.utils import simplejson as json
from django.db.models import Q
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.core.exceptions import ObjectDoesNotExist
from .forms import CourseSearch
from .models import Course, Day, Section,\
Enrollment
import datetime
PER_PAGE = 20
ORPHANS = 10
# Create your views here.
class CourseSearcher(TemplateResponseMixin, View):
"""
This view class is actually an AJAX response in disguise: it filters out
:model:`courses.Course` based on a CourseSearch form's fields.
It creates template variables ``results``, ``total``, ``sec_filt``, and ``qs``.
``sec_filt`` tracks additional filtering that needs to be done to :model:`courses.Section` ; i.e.,
when searching by exclusive day or timestart/end, we only want to show the sections
that actually fit the search---even if the course itself has other sections that do fit.
**Template:**
:template:`courses/course_search_results.html`
"""
template_name = "course_search_results.html"
def get(self, request, page=1):
"""Typical 'get' method."""
context = {}
form = CourseSearch(request.GET)
#if form.errors:
# raise Exception("Form not completed")
if form.is_valid():
qs = Course.active_objects.all()
section_filt = Q()
# Filter according to day preferences
days = form.cleaned_data['days']
if len(days) > 0:
print 'days ', days
if form.cleaned_data['day_limit'] == 'incl':
q = Q(section__meeting__timeslots__day__in=days)
qs = qs.filter(q)
section_filt = section_filt & Q(meeting__timeslots__day__in=days)
elif form.cleaned_data['day_limit'] == 'excl':
# exclusion is tricky with the way we've set up models.
i = Q(meeting__timeslots__day__in=days)
edays = [x for x in Day.objects.all() if x not in days]
print edays
e = Q(meeting__timeslots__day__in=edays)
section_filt = i & ~e
s = Section.safe_objects.filter(section_filt)
qs = qs.filter(section__in=s)
elif form.cleaned_data['day_limit'] == 'negl':
q = ~Q(section__meeting__timeslots__day__in=days)
qs = qs.filter(q)
section_filt = section_filt & ~Q(meeting__timeslots__day__in=days)
# Filter according to campus preferences
campuses = form.cleaned_data['campus']
#print campuses
if len(campuses) > 0:
print "filtering by campus: {}".format(campuses)
qs = qs.filter( # Q(campus__in=campuses)|\
Q(codecampus__in=[x.code for x in campuses]) | \
Q(section__meeting__campus__in=campuses))
#Filter according to area
if form.cleaned_data['department']:
print "Filtering by area: {}".format(form.cleaned_data['department'])
qs = qs.filter(areas__id=form.cleaned_data['department'].id)
# Filter by number
if form.cleaned_data['numberlow'] > 0:
qs = qs.filter(number__gte=form.cleaned_data['numberlow'])
if form.cleaned_data['numberhigh'] is not None:
qs = qs.filter(number__lte=form.cleaned_data['numberhigh'])
if form.cleaned_data['timestart']:
print 'starts: ', form.cleaned_data['timestart']
q = Q(section__meeting__timeslots__starts__gt=form.cleaned_data['timestart'])
qs = qs.filter(q)
section_filt = section_filt & Q(meeting__timeslots__starts__gt=form.cleaned_data['timestart'])
if form.cleaned_data['timeend']:
print 'ends: ', form.cleaned_data['timeend']
q = Q(section__meeting__timeslots__ends__lt=form.cleaned_data['timeend'])
qs = qs.filter(q)
section_filt = section_filt & Q(meeting__timeslots__starts__gt=form.cleaned_data['timeend'])
#Filter according to professor
if form.cleaned_data['professors']:
print 'professor'
qs = qs.filter(section__meeting__teachers=form.cleaned_data['professors'])
if form.cleaned_data['only_open']:
#print "only open"
qs = qs.filter(section__is_still_open=True, section__openseats__gte=1)
if form.cleaned_data['pf_able']:
#print 'pf able'
qs = qs.filter(can_passfail=True)
if form.cleaned_data['writ_intense_only']:
qs = qs.filter(section__is_mudd_writingintense=True)
if form.cleaned_data['title']:
qs = qs.filter(Q(title__icontains=form.cleaned_data['title']) |\
Q(description__icontains=form.cleaned_data['title']))
if form.cleaned_data['code']:
qs = qs.filter(Q(code__icontains=form.cleaned_data['code']))
if form.cleaned_data['not_taken']:
qs = qs.filter(~Q(section__enrollment__student___linked_id=request.user.id))
if form.cleaned_data['in_reach']:
course_ids = Enrollment.objects.filter(student=request.user.student)\
.values_list('section__course', flat=True)
noreqs = qs.filter(Q(prerequisites=None)) # no prereqs
possibles = qs.filter(Q(prerequisites__id__in=course_ids))
# we'll take a performance hit here, but the number of courses
# that are in possibles is likely < 200, so it's not so bad.
for c in possibles:
for cid in c.prerequisites.all().values_list('id', flat=True):
if cid not in course_ids:
possibles = possibles.exclude(id=cid)
continue
for cid in c.concurrent_with.all().values_list('id', flat=True):
if cid not in course_ids:
possibles = possibles.exclude(id=cid)
continue
qs = noreqs | possibles
context['sec_filt'] = section_filt
context['total'] = qs.count()
context['qs'] = qs.order_by('codeletters', 'number')
p = Paginator(context['qs'], per_page=PER_PAGE, orphans=ORPHANS, allow_empty_first_page=True)
try:
results = p.page(page)
except (EmptyPage, InvalidPage):
results = p.page(p.num_pages)
context['results'] = results
context['path'] = request.GET.copy().urlencode()
else:
context['form'] = form
return self.render_to_response(context)
def get_nearest_day(daycode):
DAY_MAP = {'M': 0,
'T': 1,
'W': 2,
'R': 3,
'F': 4,
'S': 5,
'U': 6}
today = datetime.date.today()
nearest = today - datetime.timedelta(today.weekday() - DAY_MAP[daycode])
if today.weekday() is 6:
nearest += datetime.timedelta(7) # handles sunday oddness
return nearest
class JSONifySectionData(View):
def get(self, request):
try:
section = Section.objects.get(pk=int(request.GET['id']))
except (KeyError, ObjectDoesNotExist):
raise http.HttpResponseBadRequest()
events = []
for m in section.meeting_set.all():
for t in m.timeslots.all():
obj = {}
obj['id'] = section.id
obj['title'] = section.course.code
obj['start'] = datetime.datetime.combine(
get_nearest_day(t.day.code),
t.starts
).isoformat()
obj['end'] = datetime.datetime.combine(
get_nearest_day(t.day.code),
t.ends
).isoformat()
obj['className'] = ['colored', m.campus.code]
events += [obj]
content = json.dumps(events)
return self.get_json_response(content)
def get_json_response(self, content, **httpresponse_kwargs):
"Construct an `HttpResponse` object."
return http.HttpResponse(content,
content_type='application/json',
**httpresponse_kwargs)
class CourseDetail(TemplateResponseMixin, View):
template_name = "course_detail.html"
def get(self, request, cid="Q"):
try:
cid = int(id)
except ValueError:
raise http.Http404
print cid
class SplashPage(TemplateView):
template_name = 'splash.html'
def get_context_data(self, **kwargs):
context = super(SplashPage, self).get_context_data(**kwargs)
context['form'] = CourseSearch()
return context
| 40.542373
| 110
| 0.556334
|
4a096743f8e23530d80c2b7fbfc9b5a6c60c294b
| 4,839
|
py
|
Python
|
samples/opensource/sampleUffMaskRCNN/converted/mrcnn_to_trt_single.py
|
gabrielibagon/TensorRT
|
f9233dbb7b5d09e6f4014dc5ac766f066db57d85
|
[
"Apache-2.0"
] | null | null | null |
samples/opensource/sampleUffMaskRCNN/converted/mrcnn_to_trt_single.py
|
gabrielibagon/TensorRT
|
f9233dbb7b5d09e6f4014dc5ac766f066db57d85
|
[
"Apache-2.0"
] | null | null | null |
samples/opensource/sampleUffMaskRCNN/converted/mrcnn_to_trt_single.py
|
gabrielibagon/TensorRT
|
f9233dbb7b5d09e6f4014dc5ac766f066db57d85
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from keras.models import model_from_json, Model
from keras import backend as K
from keras.layers import Input, Lambda
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import graph_io
from mrcnn.model import *
import mrcnn.model as modellib
from mrcnn.config import Config
import sys
import os
ROOT_DIR = os.path.abspath("./")
LOG_DIR = os.path.join(ROOT_DIR, "logs")
import argparse
import os
import uff
def parse_command_line_arguments(args=None):
parser = argparse.ArgumentParser(prog='keras_to_trt', description='Convert trained keras .hdf5 model to trt .uff')
parser.add_argument(
'-w',
'--weights',
type=str,
default=None,
required=True,
help="The checkpoint weights file of keras model."
)
parser.add_argument(
'-o',
'--output_file',
type=str,
default=None,
required=True,
help="The path to output .uff file."
)
parser.add_argument(
'-l',
'--list-nodes',
action='store_true',
help="show list of nodes contained in converted pb"
)
parser.add_argument(
'-p',
'--preprocessor',
type=str,
default=False,
help="The preprocess function for converting tf node to trt plugin"
)
return parser.parse_args(args)
class CocoConfig(Config):
"""Configuration for training on MS COCO.
Derives from the base Config class and overrides values specific
to the COCO dataset.
"""
# Give the configuration a recognizable name
NAME = "coco"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 2
# Uncomment to train on 8 GPUs (default is 1)
# GPU_COUNT = 8
# Number of classes (including background)
NUM_CLASSES = 1 + 80 # COCO has 80 classes
class InferenceConfig(CocoConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
BACKBONE = 'resnet18' # OR 'resnet50' OR 'resnet101'
def main(args=None):
K.set_image_data_format('channels_first')
K.set_learning_phase(0)
args = parse_command_line_arguments(args)
model_weights_path = args.weights
output_file_path = args.output_file
list_nodes = args.list_nodes
config = InferenceConfig()
config.display()
model = modellib.MaskRCNN(mode="inference", model_dir=LOG_DIR, config=config).keras_model
model.load_weights(model_weights_path, by_name=True)
model_A = Model(inputs=model.input, outputs=model.get_layer('mrcnn_mask').output)
model_A.summary()
output_nodes = ['mrcnn_detection', "mrcnn_mask/Sigmoid"]
convert_model(model_A, output_file_path, output_nodes, preprocessor=args.preprocessor,
text=True, list_nodes=list_nodes)
def convert_model(inference_model, output_path, output_nodes=[], preprocessor=None, text=False,
list_nodes=False):
# convert the keras model to pb
orig_output_node_names = [node.op.name for node in inference_model.outputs]
print("The output names of tensorflow graph nodes: {}".format(str(orig_output_node_names)))
sess = K.get_session()
constant_graph = graph_util.convert_variables_to_constants(
sess,
sess.graph.as_graph_def(),
orig_output_node_names)
temp_pb_path = "../temp.pb"
graph_io.write_graph(constant_graph, os.path.dirname(temp_pb_path), os.path.basename(temp_pb_path),
as_text=False)
predefined_output_nodes = output_nodes
if predefined_output_nodes != []:
trt_output_nodes = predefined_output_nodes
else:
trt_output_nodes = orig_output_node_names
# convert .pb to .uff
uff.from_tensorflow_frozen_model(
temp_pb_path,
output_nodes=trt_output_nodes,
preprocessor=preprocessor,
text=text,
list_nodes=list_nodes,
output_filename=output_path,
debug_mode = False
)
os.remove(temp_pb_path)
if __name__ == "__main__":
main()
| 29.150602
| 118
| 0.689399
|
4a0968850544c67bd7466c40fa233ffd856ee040
| 2,685
|
py
|
Python
|
aliyun-python-sdk-smartag/aliyunsdksmartag/request/v20180313/ListAccessPointsRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 1,001
|
2015-07-24T01:32:41.000Z
|
2022-03-25T01:28:18.000Z
|
aliyun-python-sdk-smartag/aliyunsdksmartag/request/v20180313/ListAccessPointsRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 363
|
2015-10-20T03:15:00.000Z
|
2022-03-08T12:26:19.000Z
|
aliyun-python-sdk-smartag/aliyunsdksmartag/request/v20180313/ListAccessPointsRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 682
|
2015-09-22T07:19:02.000Z
|
2022-03-22T09:51:46.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdksmartag.endpoint import endpoint_data
class ListAccessPointsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Smartag', '2018-03-13', 'ListAccessPoints','smartag')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_SmartAGId(self): # String
return self.get_query_params().get('SmartAGId')
def set_SmartAGId(self, SmartAGId): # String
self.add_query_param('SmartAGId', SmartAGId)
| 38.913043
| 83
| 0.762011
|
4a0969189445dbfcda9395110506c649c681e178
| 2,121
|
py
|
Python
|
src/python/pants/backend/jvm/tasks/jvm_compile/zinc/zinc_analysis_parser.py
|
lahosken/pants
|
1b0340987c9b2eab9411416803c75b80736716e4
|
[
"Apache-2.0"
] | 1
|
2018-12-10T21:31:02.000Z
|
2018-12-10T21:31:02.000Z
|
src/python/pants/backend/jvm/tasks/jvm_compile/zinc/zinc_analysis_parser.py
|
lahosken/pants
|
1b0340987c9b2eab9411416803c75b80736716e4
|
[
"Apache-2.0"
] | 2
|
2016-10-13T21:37:42.000Z
|
2018-07-20T20:14:33.000Z
|
src/python/pants/backend/jvm/tasks/jvm_compile/zinc/zinc_analysis_parser.py
|
lahosken/pants
|
1b0340987c9b2eab9411416803c75b80736716e4
|
[
"Apache-2.0"
] | 1
|
2021-11-11T14:04:12.000Z
|
2021-11-11T14:04:12.000Z
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.jvm.tasks.jvm_compile.analysis_parser import (AnalysisParser, ParseError,
raise_on_eof)
from pants.backend.jvm.tasks.jvm_compile.zinc.zinc_analysis import ZincAnalysis
from pants.backend.jvm.zinc.zinc_analysis_parser import ZincAnalysisParser as UnderlyingParser
class ZincAnalysisParser(AnalysisParser):
"""Parses a zinc analysis file.
Implemented by delegating to an underlying pants.backend.jvm.zinc.ZincAnalysisParser instance.
"""
# Implement AnalysisParser properties.
empty_test_header = b'products'
current_test_header = ZincAnalysis.FORMAT_VERSION_LINE
def __init__(self):
self._underlying_parser = UnderlyingParser()
# Implement AnalysisParser methods.
def parse(self, infile):
"""Parse a ZincAnalysis instance from an open text file."""
with raise_on_eof(infile):
try:
return ZincAnalysis(self._underlying_parser.parse(infile))
except UnderlyingParser.ParseError as e:
raise ParseError(e)
def parse_products(self, infile, classes_dir):
"""An efficient parser of just the products section."""
with raise_on_eof(infile):
try:
return self._underlying_parser.parse_products(infile)
except UnderlyingParser.ParseError as e:
raise ParseError(e)
def parse_deps(self, infile):
with raise_on_eof(infile):
try:
return self._underlying_parser.parse_deps(infile, "")
except UnderlyingParser.ParseError as e:
raise ParseError(e)
def rebase(self, infile, outfile, rebase_mappings, java_home=None):
with raise_on_eof(infile):
try:
self._underlying_parser.rebase(infile, outfile, rebase_mappings, java_home)
except UnderlyingParser.ParseError as e:
raise ParseError(e)
| 37.210526
| 96
| 0.722772
|
4a09699d8a1603e1cc1e10f93ea91faaa500a632
| 6,918
|
py
|
Python
|
src/binwalk/modules/hexdiff.py
|
foreni-packages/binwalk-
|
cdfcfff21b6b4a880d70b7754ad73878b01894c9
|
[
"MIT"
] | null | null | null |
src/binwalk/modules/hexdiff.py
|
foreni-packages/binwalk-
|
cdfcfff21b6b4a880d70b7754ad73878b01894c9
|
[
"MIT"
] | null | null | null |
src/binwalk/modules/hexdiff.py
|
foreni-packages/binwalk-
|
cdfcfff21b6b4a880d70b7754ad73878b01894c9
|
[
"MIT"
] | null | null | null |
import os
import sys
import curses
import string
import platform
import binwalk.core.common as common
from binwalk.core.compat import *
from binwalk.core.module import Module, Option, Kwarg
class HexDiff(Module):
COLORS = {
'red' : '31',
'green' : '32',
'blue' : '34',
}
SEPERATORS = ['\\', '/']
DEFAULT_BLOCK_SIZE = 16
SKIPPED_LINE = "*"
CUSTOM_DISPLAY_FORMAT = "0x%.8X %s"
TITLE = "Binary Diffing"
CLI = [
Option(short='W',
long='hexdump',
kwargs={'enabled' : True},
description='Perform a hexdump / diff of a file or files'),
Option(short='G',
long='green',
kwargs={'show_green' : True, 'show_blue' : False, 'show_red' : False},
description='Only show lines containing bytes that are the same among all files'),
Option(short='i',
long='red',
kwargs={'show_red' : True, 'show_blue' : False, 'show_green' : False},
description='Only show lines containing bytes that are different among all files'),
Option(short='U',
long='blue',
kwargs={'show_blue' : True, 'show_red' : False, 'show_green' : False},
description='Only show lines containing bytes that are different among some files'),
Option(short='w',
long='terse',
kwargs={'terse' : True},
description='Diff all files, but only display a hex dump of the first file'),
]
KWARGS = [
Kwarg(name='show_red', default=True),
Kwarg(name='show_blue', default=True),
Kwarg(name='show_green', default=True),
Kwarg(name='terse', default=False),
Kwarg(name='enabled', default=False),
]
RESULT_FORMAT = "%s\n"
RESULT = ['display']
def _no_colorize(self, c, color="red", bold=True):
return c
def _colorize(self, c, color="red", bold=True):
attr = []
attr.append(self.COLORS[color])
if bold:
attr.append('1')
return "\x1b[%sm%s\x1b[0m" % (';'.join(attr), c)
def _color_filter(self, data):
red = '\x1b[' + self.COLORS['red'] + ';'
green = '\x1b[' + self.COLORS['green'] + ';'
blue = '\x1b[' + self.COLORS['blue'] + ';'
if self.show_blue and blue in data:
return True
elif self.show_green and green in data:
return True
elif self.show_red and red in data:
return True
return False
def hexascii(self, target_data, byte, offset):
color = "green"
for (fp_i, data_i) in iterator(target_data):
diff_count = 0
for (fp_j, data_j) in iterator(target_data):
if fp_i == fp_j:
continue
try:
if data_i[offset] != data_j[offset]:
diff_count += 1
except IndexError as e:
diff_count += 1
if diff_count == len(target_data)-1:
color = "red"
elif diff_count > 0:
color = "blue"
break
hexbyte = self.colorize("%.2X" % ord(byte), color)
if byte not in string.printable or byte in string.whitespace:
byte = "."
asciibyte = self.colorize(byte, color)
return (hexbyte, asciibyte)
def diff_files(self, target_files):
last_line = None
loop_count = 0
sep_count = 0
while True:
line = ""
done_files = 0
block_data = {}
seperator = self.SEPERATORS[sep_count % 2]
for fp in target_files:
block_data[fp] = fp.read(self.block)
if not block_data[fp]:
done_files += 1
# No more data from any of the target files? Done.
if done_files == len(target_files):
break
for fp in target_files:
hexline = ""
asciiline = ""
for i in range(0, self.block):
if i >= len(block_data[fp]):
hexbyte = "XX"
asciibyte = "."
else:
(hexbyte, asciibyte) = self.hexascii(block_data, block_data[fp][i], i)
hexline += "%s " % hexbyte
asciiline += "%s" % asciibyte
line += "%s |%s|" % (hexline, asciiline)
if self.terse:
break
if fp != target_files[-1]:
line += " %s " % seperator
offset = fp.offset + (self.block * loop_count)
if not self._color_filter(line):
display = line = self.SKIPPED_LINE
else:
display = self.CUSTOM_DISPLAY_FORMAT % (offset, line)
sep_count += 1
if line != self.SKIPPED_LINE or last_line != line:
self.result(offset=offset, description=line, display=display)
last_line = line
loop_count += 1
def init(self):
# Disable the invalid description auto-filtering feature.
# This will not affect our own validation.
self.config.filter.show_invalid_results = True
# Always disable terminal formatting, as it won't work properly with colorized output
self.config.display.fit_to_screen = False
# Set the block size (aka, hexdump line size)
self.block = self.config.block
if not self.block:
self.block = self.DEFAULT_BLOCK_SIZE
# Build a list of files to hexdiff
self.hex_target_files = [x for x in iter(self.next_file, None)]
# Build the header format string
header_width = (self.block * 4) + 2
if self.terse:
file_count = 1
else:
file_count = len(self.hex_target_files)
self.HEADER_FORMAT = "OFFSET " + (("%%-%ds " % header_width) * file_count) + "\n"
# Build the header argument list
self.HEADER = [fp.name for fp in self.hex_target_files]
if self.terse and len(self.HEADER) > 1:
self.HEADER = self.HEADER[0]
# Set up the tty for colorization, if it is supported
if hasattr(sys.stderr, 'isatty') and sys.stderr.isatty() and platform.system() != 'Windows':
curses.setupterm()
self.colorize = self._colorize
else:
self.colorize = self._no_colorize
def run(self):
if self.hex_target_files:
self.header()
self.diff_files(self.hex_target_files)
self.footer()
| 32.027778
| 103
| 0.514021
|
4a09699faacc3cad0545f230cbc22e4683754f3e
| 4,520
|
py
|
Python
|
apps/gromacs/benchmark.py
|
t-young31/excalibur-tests
|
039d2fe450c65975b829b7feb5bd2f7aaa48f67a
|
[
"Apache-2.0"
] | null | null | null |
apps/gromacs/benchmark.py
|
t-young31/excalibur-tests
|
039d2fe450c65975b829b7feb5bd2f7aaa48f67a
|
[
"Apache-2.0"
] | null | null | null |
apps/gromacs/benchmark.py
|
t-young31/excalibur-tests
|
039d2fe450c65975b829b7feb5bd2f7aaa48f67a
|
[
"Apache-2.0"
] | null | null | null |
"""
Strong scaling of GROMACS. Uses the benchmark simulation from
https://www.hecbiosim.ac.uk/access-hpc/benchmarks
"""
import os
import math
import reframe as rfm
import reframe.utility.sanity as sn
from abc import abstractmethod
from reframe.core.decorators import run_before, run_after
this_dir = os.path.dirname(__file__)
# TODO: Extract into installable module ---------------------------------------
def spack_env_dir(hostname):
"""
Find the directory that holds a spack.yaml file appropriate for the
current system (cluster).
---------------------------------------------------------------------------
Args:
hostname (str): Name of the host e.g. cosma8
Returns:
(str): Path to the spack env directory
"""
dir_path = os.path.join(this_dir, '..', '..', 'spack-environments', hostname)
if not (os.path.exists(dir_path) and os.path.isdir(dir_path)):
raise RuntimeError('Failed to load a spack environment. Required a'
f'directory: {dir_path} that did not exist')
return os.path.realpath(dir_path)
class DiRACTest(rfm.RegressionTest):
num_total_cores = variable(int, loggable=True)
num_omp_threads = variable(int, loggable=True)
num_mpi_tasks = variable(int, loggable=True)
num_mpi_tasks_per_node = variable(int, loggable=True)
num_nodes = variable(int, loggable=True)
@run_after('setup')
def set_attributes_after_setup(self):
"""Set the required MPI and OMP ranks/tasks/threads"""
self.num_mpi_tasks = self.num_tasks = max(self.num_total_cores//self.num_omp_threads, 1)
try:
cpus_per_node = self._current_partition.processor.num_cpus
if cpus_per_node is None:
raise AttributeError('Cannot determine the number of cores PP')
self.num_nodes = math.ceil(self.num_mpi_tasks / cpus_per_node)
except AttributeError:
print('WARNING: Failed to determine the number of nodes required '
'defaulting to 1')
self.num_nodes = 1
self.num_mpi_tasks_per_node = math.ceil(self.num_mpi_tasks / self.num_nodes)
self.num_tasks_per_node = self.num_mpi_tasks_per_node
if self.num_total_cores // self.num_omp_threads == 0:
print('WARNING: Had fewer total number of cores than the default '
f'number of OMP threads, using {self.num_total_cores} OMP '
f'threads')
self.num_omp_threads = self.num_total_cores
self.num_cpus_per_task = self.num_omp_threads
self.variables = {
'OMP_NUM_THREADS': f'{self.num_cpus_per_task}',
}
self.extra_resources = {
'mpi': {'num_slots': self.num_mpi_tasks * self.num_cpus_per_task}
}
# TODO: -----------------------------------------------------------------------
class GROMACSBenchmark(DiRACTest):
"""Base class for a GROMACS benchmark"""
valid_systems = ['*']
valid_prog_environs = ['*']
executable = 'gmx_mpi'
executable_opts = ['mdrun', '-deffnm', 'benchmark']
build_system = 'Spack'
time_limit = '60m'
exclusive_access = True
sourcesdir = this_dir
readonly_files = ['benchmark.tpr']
reference = {
'*': {'Rate': (1, None, None, 'ns/day')}
}
@run_before('compile')
def setup_build_system(self):
"""Set a specific version of GROMACS to use"""
self.build_system.specs = ['gromacs@2019%gcc@9.3.0^openmpi@4.1.1']
self.build_system.environment = spack_env_dir(self.current_system.name)
@run_before('sanity')
def set_sanity_patterns(self):
"""Set the required string in the output for a sanity check"""
self.sanity_patterns = sn.assert_found(
'GROMACS reminds you', self.stderr
)
@run_before('performance')
def set_perf_patterns(self):
"""Set the regex performance pattern to locate"""
self.perf_patterns = {
'Rate': sn.extractsingle('Performance.+', self.stderr, 0,
lambda x: float(x.split()[1]))
}
@rfm.simple_test
class StrongScalingBenchmark(GROMACSBenchmark):
variant = parameter([4 * i for i in range(1, 6)])
num_omp_threads = 4
@run_before('setup')
def set_total_num_cores(self):
"""A ReFrame parameter cannot also be a variable, thus assign
them to be equal at the start of the setup"""
self.num_total_cores = self.variant
| 32.517986
| 96
| 0.622124
|
4a096aba9954de4b1f8042f4470e9878955cbc9e
| 4,966
|
py
|
Python
|
multiphonon/redutils.py
|
granrothge/multiphonon
|
486a998eeb6b73b964a58ba0f98fe3ece15bdf6e
|
[
"MIT"
] | 1
|
2019-05-22T08:46:09.000Z
|
2019-05-22T08:46:09.000Z
|
multiphonon/redutils.py
|
granrothge/multiphonon
|
486a998eeb6b73b964a58ba0f98fe3ece15bdf6e
|
[
"MIT"
] | 118
|
2016-04-04T12:27:15.000Z
|
2021-08-18T01:46:13.000Z
|
multiphonon/redutils.py
|
granrothge/multiphonon
|
486a998eeb6b73b964a58ba0f98fe3ece15bdf6e
|
[
"MIT"
] | 5
|
2017-09-28T16:01:12.000Z
|
2020-01-31T18:58:09.000Z
|
import os, sys
def _createDefaultMantidUserConfig(facility='SNS'):
# create default Mantid user configuration for DEMO purpose.
import os
mantid_config_path = os.path.expanduser('~/.mantid/Mantid.user.properties')
mantid_user_dir = os.path.dirname(mantid_config_path)
if not os.path.exists(mantid_config_path):
if not os.path.exists(mantid_user_dir):
os.makedirs(mantid_user_dir)
with open(mantid_config_path, 'wt') as of:
of.write('default.facility=%s' % facility)
return
# this should be done before mantid is imported
_createDefaultMantidUserConfig()
mantid_checked = False
def _checkMantid():
print("* Checking Mantid ...")
import subprocess as sp, shlex
sp.call(shlex.split("python -c 'import matplotlib, mantid'"), stdout=sp.PIPE, stderr=sp.PIPE) # sometimes mantid import for the first time may fail
if sp.call(shlex.split("python -c 'import matplotlib, mantid'")):
raise RuntimeError("Please install mantid")
global mantid_checked
mantid_checked = True
print(" - Done.")
return
if not mantid_checked:
_checkMantid()
def reduce(nxsfile, qaxis, outfile, use_ei_guess=False, ei_guess=None, eaxis=None, tof2E=True, ibnorm='ByCurrent'):
"""reduce a NeXus file to a I(Q,E) histogram using Mantid
This is a wrapper of Mantid algorithms to reduce a NeXus file to IQE histogram.
Parameters
----------
nxsfile: str
path to nxs file
qaxis: 3-tuple of floats
Momentum transfer axis. (Qmin, dQ, Qmax). unit: inverse angstrom
outfile: str
path to save nxs data
use_ei_guess: boolean
Use incident energy guess
ei_guess: float
Initial guess of incident energy (meV)
eaxis: 3-tuple of floats
Energy transfer axis. (Emin, dE, Emax). unit: meV
tof2E: boolean
Conversion from time of flight axis to energy axis or not.
If the NeXus file is in time of flight, tof2E=True
If the NeXus file is processed and in energy transfer, tof2E=False
ibnorm: str
Incident beam normalization choice. Allowed values: None, ByCurrent, ToMonitor
For more details, see http://docs.mantidproject.org/nightly/algorithms/DgsReduction-v1.html
"""
from mantid.simpleapi import DgsReduction, SaveNexus, Load
from mantid import mtd
import mantid.simpleapi as msa
if tof2E == 'guess':
# XXX: this is a simple guess. all raw data files seem to have root "entry"
cmd = 'h5ls %s' % nxsfile
import subprocess as sp, shlex
o = sp.check_output(shlex.split(cmd)).strip().split()[0]
if sys.version_info >= (3,0) and isinstance(o, bytes):
o = o.decode()
tof2E = o == 'entry'
if tof2E:
if use_ei_guess:
DgsReduction(
SampleInputFile=nxsfile,
IncidentEnergyGuess=ei_guess,
UseIncidentEnergyGuess=use_ei_guess,
OutputWorkspace='reduced',
EnergyTransferRange=eaxis,
IncidentBeamNormalisation=ibnorm,
)
else:
DgsReduction(
SampleInputFile=nxsfile,
OutputWorkspace='reduced',
EnergyTransferRange=eaxis,
IncidentBeamNormalisation=ibnorm,
)
reduced = mtd['reduced']
else:
reduced = Load(nxsfile)
# get eaxis info from mtd workspace, if necessary
if eaxis is None:
Edim = reduced.getXDimension()
emin = Edim.getMinimum()
emax = Edim.getMaximum()
de = Edim.getX(1) - Edim.getX(0)
eaxis = emin, de, emax
qmin, dq, qmax = qaxis; nq = int((qmax-qmin+dq/2.)/dq)
emin, de, emax = eaxis; ne = int((emax-emin+de/2.)/de)
#
md = msa.ConvertToMD(
InputWorkspace=reduced,
QDimensions='|Q|',
dEAnalysisMode='Direct',
MinValues="%s,%s" % (qmin, emin),
MaxValues="%s,%s" % (qmax, emax),
)
binned = msa.BinMD(
InputWorkspace=md,
AxisAligned=1,
AlignedDim0="|Q|,%s,%s,%s" % (qmin, qmax, nq),
AlignedDim1="DeltaE,%s,%s,%s" % (emin, emax, ne),
)
# create histogram
import histogram as H, histogram.hdf as hh
data=binned.getSignalArray().copy()
err2=binned.getErrorSquaredArray().copy()
nev=binned.getNumEventsArray()
data/=nev
err2/=(nev*nev)
import numpy as np
qaxis = H.axis('Q', boundaries=np.arange(qmin, qmax+dq/2., dq), unit='1./angstrom')
eaxis = H.axis('E', boundaries=np.arange(emin, emax+de/2., de), unit='meV')
hist = H.histogram('IQE', (qaxis, eaxis), data=data, errors=err2)
if outfile.endswith('.nxs'):
import warnings
warnings.warn("reduce function no longer writes iqe.nxs nexus file. it only writes iqe.h5 histogram file")
outfile = outfile[:-4] + '.h5'
hh.dump(hist, outfile)
return hist
| 34.727273
| 151
| 0.627064
|
4a096ae72f6696576069a0f41a103ea60b77363d
| 1,525
|
py
|
Python
|
surrogate/crossover/cxOnePoint.py
|
liujiamingustc/phd
|
4f815a738abad43531d02ac66f5bd0d9a1def52a
|
[
"Apache-2.0"
] | 3
|
2021-01-06T03:01:18.000Z
|
2022-03-21T03:02:55.000Z
|
surrogate/crossover/cxOnePoint.py
|
liujiamingustc/phd
|
4f815a738abad43531d02ac66f5bd0d9a1def52a
|
[
"Apache-2.0"
] | null | null | null |
surrogate/crossover/cxOnePoint.py
|
liujiamingustc/phd
|
4f815a738abad43531d02ac66f5bd0d9a1def52a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Quan Pan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Quan Pan <quanpan302@hotmail.com>
# License: Apache License, Version 2.0
# Create: 2016-12-02
import numpy as np
def cxOnePoint(var1, var2):
"""Executes a one point crossover on the input :term:`sequence` individuals.
The two individuals are modified in place. The resulting individuals will
respectively have the length of the other.
:param var1: The first variable participating in the crossover.
:param var2: The second variable participating in the crossover.
:returns: A tuple of two variables.
This function uses the :func:`~random.randint` function from the
python base :mod:`random` module.
"""
size = min(len(var1), len(var2))
# size = min(var1.size, var2.size)
cxpoint = np.random.randint(1, size - 1)
var1[cxpoint:], var2[cxpoint:] = var2[cxpoint:], var1[cxpoint:]
# var1[cxpoint:], var2[cxpoint:] = var2[cxpoint:].copy(), var1[cxpoint:].copy()
return var1, var2
| 38.125
| 83
| 0.717377
|
4a096b05a87be9d486c35b691ab65c8c1610c536
| 2,783
|
py
|
Python
|
Taiyun/clean/taiyun.py
|
seucs/entity-linker
|
0156ad9b9d6439ea15518828513da9d9699b9acd
|
[
"Apache-2.0"
] | 4
|
2018-03-07T07:59:58.000Z
|
2019-10-19T09:31:44.000Z
|
Taiyun/clean/taiyun.py
|
acmom/entity-linker
|
0156ad9b9d6439ea15518828513da9d9699b9acd
|
[
"Apache-2.0"
] | null | null | null |
Taiyun/clean/taiyun.py
|
acmom/entity-linker
|
0156ad9b9d6439ea15518828513da9d9699b9acd
|
[
"Apache-2.0"
] | 2
|
2018-11-05T16:09:07.000Z
|
2019-11-07T00:22:37.000Z
|
#coding=utf8
import sys
import json
import re
import MySQLdb
from MySQLdb import cursors
reload(sys)
sys.setdefaultencoding('utf8')
# 生成Title数据表
def setTitle():
label_path = '1.0_clean_baidu_labels_zh.nt'
no_zhPattern = re.compile(u'[^\u4e00-\u9fa5]+')
count = 0
for s in open(label_path,'r'):
s = s.decode('utf8')
if re.sub(no_zhPattern,'',s) == '':
continue
title = TitleCache(title=s)
db.add(title)
count+=1
if count % 10000 == 0:
db.commit()
print count
db.commit()
# 生成Redirect数据表
def setRedirect():
redirect_path = '1.0_clean_baidu_redirects_zh.nt'
red_map = {}
count = 0
for s in open(redirect_path,'r'):
s = s.decode('utf8')
try:
(par,redir) = s.split('\t',2)
redir = redir.replace('\n','')
except:
continue
par_t = db.query(TitleCache).filter(TitleCache.title == par).first()
redir_t = db.query(TitleCache).filter(TitleCache.title == redir).first()
if par_t == None or redir_t == None:
continue
red_map[par_t.id] = redir_t.id
count+=1
if count % 10000 == 0:
print count
count = 0
for par,redir in red_map.iteritems():
db.add(RedirectCache(parent=par, redirect = redir))
count+=1
if count % 10000 == 0:
db.commit()
print count
db.commit()
# 生成Property数据表
def setProperty():
property_path = '1.0_clean_baidu_property_zh.nt'
no_zhPattern = re.compile(u'[^\u4e00-\u9fa5]+')
count=0
pro_map = {}
#for s in open(property_path,'r'):
# s = s.decode('utf8')
# (title,proper, s) = s.split('\t',3)
# _title = db.query(TitleCache).filter(TitleCache.title == title).first()
# if _title == None:
# continue
# _id = _title.id
# count+=1
# if count % 10000 == 0:
# print count
# try:
# pro_map[_id][proper]=1
# except:
# pro_map[_id] = {}
# pro_map[_id][proper]=1
#with open('aaa.json','w') as f:
# f.write(json.dumps(pro_map, ensure_ascii=False))
pro_map = json.loads(open('aaa.json','r').read())
count=0
for id, proper in pro_map.iteritems():
for p in proper.keys():
if re.sub(no_zhPattern,'',p) == '':
continue
db.add(PropertyCache(title_id=id, property=p))
count+=1
if count % 100000 == 0:
db.commit()
print count
db.commit()
# 生成倒排索引
def preprocess():
titleArr = db.query(TitleCache).all()
for t in titleArr:
title = t.title
id = t.id
print title,id
| 25.53211
| 80
| 0.537549
|
4a096bcf4917f56237ab30cc7810fdbdb6b898dc
| 2,345
|
py
|
Python
|
common/crypto.py
|
james-m/realchat
|
cfe6e1b3c419cafdc6fe03c1a344810acc96d924
|
[
"BSD-3-Clause"
] | 1
|
2015-03-03T18:17:20.000Z
|
2015-03-03T18:17:20.000Z
|
common/crypto.py
|
james-m/realchat
|
cfe6e1b3c419cafdc6fe03c1a344810acc96d924
|
[
"BSD-3-Clause"
] | null | null | null |
common/crypto.py
|
james-m/realchat
|
cfe6e1b3c419cafdc6fe03c1a344810acc96d924
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- Mode: Python; tab-width: 4 -*-
# Copyright (c) 2012 James McKernan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the author nor the names of other
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""common/crypto.py
Utility module for encrypting and decrypting strings.
Useful for encoding UUIDs for placement in browser cookies.
The underlying mechanism for key control is the keyczar crypto toolkit. Provides
an easy way to deploy sets of keys used for cryptographic functions. For more info
see http://www.keyczar.org/
A keyczar keyset is only ever read once, at module import time. This is to avoid
undue disk read operations when decrypting ciphertext.
"""
import os
import conf
import keyczar.keyczar
CRYPTER = keyczar.keyczar.Crypter.Read(
os.path.abspath(conf.get('keyczar_keyset_path')))
def encrypt(message):
return CRYPTER.Encrypt(message)
def decrypt(ciphertext):
return CRYPTER.Decrypt(ciphertext)
| 41.875
| 82
| 0.767591
|
4a096ca3fef40d131546241a4d42b852e0d95f16
| 1,454
|
py
|
Python
|
AutomatedTesting/Gem/PythonTests/assetpipeline/ap_fixtures/ap_fast_scan_setting_backup_fixture.py
|
cypherdotXd/o3de
|
bb90c4ddfe2d495e9c00ebf1e2650c6d603a5676
|
[
"Apache-2.0",
"MIT"
] | 8
|
2021-08-31T02:14:19.000Z
|
2021-12-28T19:20:59.000Z
|
AutomatedTesting/Gem/PythonTests/assetpipeline/ap_fixtures/ap_fast_scan_setting_backup_fixture.py
|
cypherdotXd/o3de
|
bb90c4ddfe2d495e9c00ebf1e2650c6d603a5676
|
[
"Apache-2.0",
"MIT"
] | 8
|
2021-07-12T13:55:00.000Z
|
2021-10-04T14:53:21.000Z
|
AutomatedTesting/Gem/PythonTests/assetpipeline/ap_fixtures/ap_fast_scan_setting_backup_fixture.py
|
cypherdotXd/o3de
|
bb90c4ddfe2d495e9c00ebf1e2650c6d603a5676
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-09-16T05:06:18.000Z
|
2021-09-16T05:06:18.000Z
|
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
Fixtures for handling system settings entry for FastScanEnabled
"""
# Import builtin libraries
import pytest
import logging
# ly-shared import
from automatedtesting_shared.platform_setting import PlatformSetting
from ly_test_tools.o3de.pipeline_utils import AP_FASTSCAN_KEY as fast_scan_key
from ly_test_tools.o3de.pipeline_utils import AP_FASTSCAN_SUBKEY as fast_scan_subkey
logger = logging.getLogger(__name__)
@pytest.fixture
def ap_fast_scan_setting_backup_fixture(request, workspace) -> PlatformSetting:
"""
PyTest Fixture for backing up and restoring the system entry for "Fast Scan Enabled"
:return: A PlatformSetting object targeting the system setting for AP Fast Scan
"""
if workspace.asset_processor_platform == 'mac':
pytest.skip("Mac plist file editing not implemented yet")
key = fast_scan_key
subkey = fast_scan_subkey
fast_scan_setting = PlatformSetting.get_system_setting(workspace, subkey, key)
original_value = fast_scan_setting.get_value()
def teardown():
if original_value is None:
fast_scan_setting.delete_entry()
else:
fast_scan_setting.set_value(original_value)
request.addfinalizer(teardown)
return fast_scan_setting
| 30.93617
| 97
| 0.772352
|
4a096d1d0c577317f31a943c4e90472c31c8f15b
| 1,651
|
py
|
Python
|
pagarmeapisdk/models/get_transfer_target_response.py
|
pagarme/pagarme-python-sdk
|
5a709ce54d46fc7326f73242700602c1c5a6bd26
|
[
"MIT"
] | null | null | null |
pagarmeapisdk/models/get_transfer_target_response.py
|
pagarme/pagarme-python-sdk
|
5a709ce54d46fc7326f73242700602c1c5a6bd26
|
[
"MIT"
] | null | null | null |
pagarmeapisdk/models/get_transfer_target_response.py
|
pagarme/pagarme-python-sdk
|
5a709ce54d46fc7326f73242700602c1c5a6bd26
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
pagarmeapisdk
This file was automatically generated by APIMATIC v3.0 (
https://www.apimatic.io ).
"""
class GetTransferTargetResponse(object):
"""Implementation of the 'GetTransferTargetResponse' model.
TODO: type model description here.
Attributes:
target_id (string): TODO: type description here.
mtype (string): TODO: type description here.
"""
# Create a mapping from Model property names to API property names
_names = {
"target_id": 'target_id',
"mtype": 'type'
}
def __init__(self,
target_id=None,
mtype=None):
"""Constructor for the GetTransferTargetResponse class"""
# Initialize members of the class
self.target_id = target_id
self.mtype = mtype
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
target_id = dictionary.get('target_id')
mtype = dictionary.get('type')
# Return an object of this model
return cls(target_id,
mtype)
| 26.629032
| 79
| 0.5851
|
4a096d3c6b61b1d5b85ee50ba9a95999374841f9
| 27,372
|
py
|
Python
|
files/Irina/Irina.py
|
HenraL/NSI_1ereG6_Programme_Python
|
9f46b848fa2331daca57e5e2e11cba41da45a67f
|
[
"Unlicense"
] | 1
|
2021-06-15T13:44:47.000Z
|
2021-06-15T13:44:47.000Z
|
files/Irina/Irina.py
|
HenraL/NSI_1ereG6_Programme_Python
|
9f46b848fa2331daca57e5e2e11cba41da45a67f
|
[
"Unlicense"
] | null | null | null |
files/Irina/Irina.py
|
HenraL/NSI_1ereG6_Programme_Python
|
9f46b848fa2331daca57e5e2e11cba41da45a67f
|
[
"Unlicense"
] | null | null | null |
import math, random
from time import sleep
print ("""
def hypothenus(a,b):
c=sqrt(a**2+b**2)
return c
print (hypothenus(3,4))
h=int(input("opzerpoezrzperezproezjrpzer: "))
b=int(input("dhaoiahreozaheozaheza:"))
def airetrianglerectangle(b,h):
a=b*h/2
return a
print ("aire du triangle rectangle {}".format(airetrianglerectangle(b, h)))
#y=7x**2-3x+2
#I="O"
#while I=="o" or I=="O":
#x=int(input("Donnez une valeur à x:"))
# def f1(x):
# y=7*x**2-3*x+2
# return y
# I=input("{}\ncontinuer : [(o)ui/(n)on]".format(f1(x)))
n=int(input("affectez une valeure à n:"))
def compter(n,L1):
compteur = L1[n]
LongueurListe=len(L1)
for i in range(n):
if L1[]==n:
compteur=n
return compteur
L1=[1,27,27,19,18,113,22,1,14,189,7,2,23,21,22,23,100,1,100,22,14,100,1]
print (compter(1,L1))
def maximum(liste):
max1=
LongueurListe=
for
for i in range(10):
print("x", end=" ")""")
ligne=10
colonne=20
for i in range(ligne):
for j in range(colonne):
print ("x", end=" ")
print()
#ligne=10000000000000000000
#print
ligne=5
#colonne=200
for i in range(ligne):
for j in range(i):
print("x", end=" ")
print()
for i in range(ligne,0,-1):
for j in range(i):
print("x", end=" ")
print()
ligne=17
for i in range(ligne):
for j in range(ligne+i):
print(" ", end=" ")
for j in range (2*j+1):
print ("x", end=" ")
print()
ligne=17
for i in range(ligne):
for j in range(ligne-i):
print(" ", end=" ")
for j in range (2*j+1):
print ("x", end=" ")
print()
ligne=5
for i in range(ligne):
for j in range(ligne-i):
print(" ", end=" ")
for j in range (2*i+1):
print ("x", end=" ")
print()
ligne=5
for i in range(ligne,0,-1):
for j in range(ligne-i):
print(" ", end=" ")
for j in range (2*i+1):
print ("x", end=" ")
print()
print (" x")
ligne=100
colonne=200
print ("""#ligne=10
#colonne=200
#for i in range(ligne,0,-1):
# for j in range(i):
# print("U", end=" ")
# print()""")
#print
t=[[0,1,2,3,4],
[5,6,7,8,9],
[10,11,12,13,14],
[15,16,17,18,19]]
print(t[0][0])
print(t[2][1])
print(t[1][3])
z=0
while z<=100:
x=random.randint(0, 3)
y=random.randint(0, 4)
print("nb de tour{}".format(z))
print ("x,y={}, {}".format(x, y))
print (t[x][y])
z+=1
for ligne in range(4):
for colonne in range(5):
valeur=t[ligne][colonne]
print(valeur, end=" ")
print()
print()
for ligne in range(4):
for colonne in range(5):
valeur=t[ligne][colonne]
if valeur<10:
print(valeur, end=" ")
else:
print(valeur, end=" ")
print()
ligne=10
colonne=20
for i in range(ligne):
for j in range(colonne):
print ("x", end=" ")
print()
b=0
#ligne=1000000000 #long
ligne=100 #court
colonne=52
for i in range(ligne):
for j in range(colonne):
b=random.randint(0, 1)
print (b, end=" ")
print()
print()
ligne=100 #court
colonne=52
for i in range(ligne):
for j in range(colonne):
b=random.randint(0, 9)
print (b, end=" ")
print()
print()
ligne=100 #court
colonne=52
for i in range(ligne):
for j in range(colonne):
b=random.randint(-9, 9)
print (b, end=" ")
print()
#print
a=0
i=0
e=1
for e in range(101):
a+=random.randint (1, 100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000)
print(a)
#print ("i=",i)
#print ("e=",e)
i+=1
e+=1
if i==100 or i==101 or i==102:
e+=1
ligne=5
for i in range(ligne):
for j in range(i):
print("x", end=" ")
print()
for i in range(ligne,0,-1):
for j in range(i):
print("x", end=" ")
print()
for i in range(ligne):
for j in range(i):
print(" ", end=" ")
for j in range (2*i+1):
print ("x", end=" ")
print()
ligne=5
for i in range(ligne):
for j in range(ligne+i):
print(" ", end=" ")
for j in range (2*j+1):
print ("x", end=" ")
print()
ligne=5
for i in range(ligne):
for j in range(ligne-i):
print(" ", end=" ")
for j in range (2*j+1):
print ("x", end=" ")
print()
eeleelelelel="f*hi***k y**how***u l*do*e b**you**ch l******do*******c"
for letter in eeleelelelel:
print (letter, end=" ")
print()
longeur=len(eeleelelelel)
e=0
for i in range(longeur):
print ("lettre[{}]:".format(e),eeleelelelel[i], end="\n")
e+=1
texte_1="sqhfmldsqhfmosqhfmdsqhfmidshfmdshfhdsfmlhdsfmlkhsqmlkfhdsqfmlkhdsflkdsfds*fds*f*dsf*d*f*df*ds*f*dsf*ds*fds*fsd*f*ds*****f*srzruzrozuprmsofhsmqfihsoqhfdsqfhdifmdslfkmdsfhiosqdmlhkezm"
texte_2="******qs*dsq*dqs*dsq*d*sq*d*sqd*sqd*sq*d*qd*qd*sq*d*dsjdiMOZHZAIDNUFjmoDNCMOdnmjsoqlqk,dld,nlsmsq*dsq*dsqd$d$*ùdd*md$d^$m*$d*m$d*$m^$*$à)=ç'fdsqfdsfsqfsqdsà3é(àèà-ç_'èé'àçè_(ç-à(_àéç_'(àç-(_'pezuhgsodlfjsm27309372498630729843070965825737874°875943265023186°5°175°98437°598647"
texte_3=texte_2+texte_1+"ezaeazezaeza"
texte_4=texte_3+2*texte_3
texte_5=2*texte_4
print (texte_1, texte_2, texte_3, texte_4, texte_5)
def affiche(eeleelelelel, n):
for i in range(n):
print(eeleelelelel, end=" ")
affiche("Bonjour",3)
print()
texte=eeleelelelel+"Bonjour Bertrand, comment vas tu?"
for lettre in texte:
code=ord(lettre)
#print(chr(code), end=" ")
print(chr(code+1), end=" ")
print ("""#encoder
def encrypte(texte):
texte_crypte=""
for i in range(lettre):
code=ord(lettre)
texte_crypte=texte_crypte+chr(texte)
return texte_crypte
texte_cypte=encrypte("Bonjour Bertrand, comment vas tu?")
print ("texte encrypte=", texte_crypte)
#decoder
def decrypte(texte):
texte_decrypte=""
for i in range(texte):
code=ord(lettre)
texte_decrypte=texte_decrypte+chr(lettre)
return texte_decrypte
print ("texte decrypte={}".format(decrypte(texte_crypte)))""")
#recherche de chaine
t1="Bonjour Bertrand, comment vas-tu?"
if"Bertrand" in t1:
print ("'Bertrand' est bien dans la chaine")
else:
print ("Chaine non trouvée")
def cherche(texte, chaine):
if texte in t1:
print ("True")
else:
print ("False")
return texte
#programme principal
t1="sqjfdshfsfsdkfjkdsjfldsjfdsjfkdsjf Bonjour Bertrand, comment vas-tu?"
print (cherche(t1, "comment"))
print (cherche(t1, "toto"))
def maze(t,nb_ligne, nb_colonne):
for y in range(nb_ligne):
for x in range(nb_colonne):
valeur=t[y][x]
if valeur ==0:
print(".", end=" ")
elif valeur==1:
print(t[y][x], end=" ")
elif valeur==2:
print("@", end=" ")
elif valeur==3:
print("M", end=" ")
elif valeur==4:
print("$", end=" ")
elif valeur==5:
print("£", end=" ")
elif valeur==6:
print("∆", end=" ")
elif valeur==7:
print("N", end=" ")
elif valeur==8:
print("❤",end=" ")
print()
t_1=[[1,1,1,1,1,1,1,1],
[1,0,2,0,0,0,0,1],
[1,1,1,1,0,0,0,1],
[1,0,0,0,0,0,0,1],
[1,0,0,0,1,0,3,1],
[1,0,107,1,1,0,0,1],
[1,0,0,3,0,0,0,1],
[1,0,0,0,0,0,0,1],
[1,1,1,1,1,1,1,1]]
t_2=[[1,1,1,1,1,1,1,1],
[1,0,2,0,0,0,0,1],
[1,1,1,1,0,0,0,1],
[1,0,0,1,0,0,0,1],
[1,0,0,1,0,1,3,1],
[1,0,0,0,0,1,0,1],
[1,0,1,1,1,1,5,1],
[1,0,0,0,3,0,4,1],
[1,1,1,1,1,1,1,1]]
t_3=[[3,0,3,3,3,3,3,3],
[3,0,0,0,0,0,0,3],
[3,6,6,6,0,6,6,3],
[3,6,0,6,0,0,0,3],
[3,0,0,0,0,6,6,3],
[3,0,6,6,0,0,0,3],
[3,0,0,0,6,6,6,3],
[3,6,6,0,0,0,0,3],
[3,3,3,3,3,3,0,3]]
t_maze1=[[3,0,3,3,3,3,3,1,3,3,3,1,3,3,3,3,3,3],
[3,0,3,0,0,0,7,7,3,0,0,0,0,2,0,0,2,3],
[3,0,0,0,7,0,0,1,0,0,3,1,0,2,2,0,2,3],
[3,0,7,0,0,7,7,1,0,3,0,1,0,2,0,0,0,3],
[3,0,0,7,0,0,7,1,0,0,0,1,0,2,0,2,0,3],
[3,7,0,7,7,0,7,1,3,0,3,1,0,2,0,2,0,3],
[3,0,0,0,7,0,0,0,0,0,3,1,0,0,0,2,0,3],
[3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3]]
t_maze1_1=[[3,0,3,3,3,3,3,1,3],
[3,0,3,0,0,0,7,7,3],
[3,0,0,0,7,0,0,1,0],
[3,0,7,0,0,7,7,1,0],
[3,0,0,7,0,0,7,1,0],
[3,7,0,7,7,0,7,1,3],
[3,0,0,0,7,0,0,0,0],
[3,3,3,3,3,3,3,3,3]]
t_maze1_2=[[3,3,1,3,3,3,3,3,3],
[0,0,0,0,2,0,0,2,3],
[0,3,1,0,2,2,0,2,3],
[3,0,1,0,2,0,0,0,3],
[0,0,1,0,2,0,2,0,3],
[0,3,1,0,2,0,2,0,3],
[0,3,1,0,0,0,2,0,3],
[3,3,3,3,3,3,3,0,3]]
t_love=[[8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8],
[8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8],
[8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8],
[8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8],
[8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8],
[8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8],
[8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8],
[8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]]
maze(t_1,9,8)
maze(t_2,9,8)
maze(t_3,9,8)
maze(t_maze1,8,18)
sleep(5)
maze(t_love,8,373)
sleep(10)
list1=["### Decoding Table\n","'\x00' # 0x00 -> NULL\n","'\x01' # 0x01 -> START OF HEADING\n","'\x02' # 0x02 -> START OF TEXT\n","'\x03' # 0x03 -> END OF TEXT\n","'\x04' # 0x04 -> END OF TRANSMISSION\n","'\x05' # 0x05 -> ENQUIRY\n","'\x06' # 0x06 -> ACKNOWLEDGE\n","'\x07' # 0x07 -> BELL\n","'\x08' # 0x08 -> BACKSPACE\n","'\t' # 0x09 -> HORIZONTAL TABULATION\n","'\\n' # 0x0A -> LINE FEED\n","'\x0b' # 0x0B -> VERTICAL TABULATION\n","'\x0c' # 0x0C -> FORM FEED\n","'\r' # 0x0D -> CARRIAGE RETURN\n","'\x0e' # 0x0E -> SHIFT OUT\n","'\x0f' # 0x0F -> SHIFT IN\n","'\x10' # 0x10 -> DATA LINK ESCAPE\n","'\x11' # 0x11 -> DEVICE CONTROL ONE\n","'\x12' # 0x12 -> DEVICE CONTROL TWO\n","'\x13' # 0x13 -> DEVICE CONTROL THREE\n","'\x14' # 0x14 -> DEVICE CONTROL FOUR\n","'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE\n","'\x16' # 0x16 -> SYNCHRONOUS IDLE\n","'\x17' # 0x17 -> END OF TRANSMISSION BLOCK\n","'\x18' # 0x18 -> CANCEL\n","'\x19' # 0x19 -> END OF MEDIUM\n","'\x1a' # 0x1A -> SUBSTITUTE\n","'\x1b' # 0x1B -> ESCAPE\n","'\x1c' # 0x1C -> FILE SEPARATOR\n","'\x1d' # 0x1D -> GROUP SEPARATOR\n","'\x1e' # 0x1E -> RECORD SEPARATOR\n","'\x1f' # 0x1F -> UNIT SEPARATOR\n","' ' # 0x20 -> SPACE\n","'!' # 0x21 -> EXCLAMATION MARK\n","""'"' # 0x22 -> QUOTATION MARK\n""","'#' # 0x23 -> NUMBER SIGN","\n'$' # 0x24 -> DOLLAR SIGN\n","'%' # 0x25 -> PERCENT SIGN\n","'&' # 0x26 -> AMPERSAND\n",""""'" # 0x27 -> APOSTROPHE\n""","'(' # 0x28 -> LEFT PARENTHESIS\n","')' # 0x29 -> RIGHT PARENTHESIS\n","'*' # 0x2A -> ASTERISK","\n'+' # 0x2B -> PLUS SIGN\n","',' # 0x2C -> COMMA\n","'-' # 0x2D -> HYPHEN-MINUS\n","'.' # 0x2E -> FULL STOP\n","'/' # 0x2F -> SOLIDUS\n","'0' # 0x30 -> DIGIT ZERO\n","'1' # 0x31 -> DIGIT ONE\n","'2' # 0x32 -> DIGIT TWO\n","'3' # 0x33 -> DIGIT THREE\n","'4' # 0x34 -> DIGIT FOUR\n","'5' # 0x35 -> DIGIT FIVE\n","'6' # 0x36 -> DIGIT SIX\n","'7' # 0x37 -> DIGIT SEVEN\n","'8' # 0x38 -> DIGIT EIGHT\n","'9' # 0x39 -> DIGIT NINE\n","':' # 0x3A -> COLON\n","';' # 0x3B -> SEMICOLON\n","'<' # 0x3C -> LESS-THAN SIGN\n","'=' # 0x3D -> EQUALS SIGN\n","'>' # 0x3E -> GREATER-THAN SIGN\n","'?' # 0x3F -> QUESTION MARK\n","'@' # 0x40 -> COMMERCIAL AT\n","'A' # 0x41 -> LATIN CAPITAL LETTER A\n","'B' # 0x42 -> LATIN CAPITAL LETTER B\n","'C' # 0x43 -> LATIN CAPITAL LETTER C\n","'D' # 0x44 -> LATIN CAPITAL LETTER D\n","'E' # 0x45 -> LATIN CAPITAL LETTER E\n","'F' # 0x46 -> LATIN CAPITAL LETTER F\n","'G' # 0x47 -> LATIN CAPITAL LETTER G\n","'H' # 0x48 -> LATIN CAPITAL LETTER H\n","'I' # 0x49 -> LATIN CAPITAL LETTER I\n","'J' # 0x4A -> LATIN CAPITAL LETTER J\n","'K' # 0x4B -> LATIN CAPITAL LETTER K\n","'L' # 0x4C -> LATIN CAPITAL LETTER L\n","'M' # 0x4D -> LATIN CAPITAL LETTER M\n","'N' # 0x4E -> LATIN CAPITAL LETTER N\n","'O' # 0x4F -> LATIN CAPITAL LETTER O\n","'P' # 0x50 -> LATIN CAPITAL LETTER P\n","'Q' # 0x51 -> LATIN CAPITAL LETTER Q\n","'R' # 0x52 -> LATIN CAPITAL LETTER R\n","'S' # 0x53 -> LATIN CAPITAL LETTER S\n","'T' # 0x54 -> LATIN CAPITAL LETTER T\n","'U' # 0x55 -> LATIN CAPITAL LETTER U\n","'V' # 0x56 -> LATIN CAPITAL LETTER V\n","'W' # 0x57 -> LATIN CAPITAL LETTER W\n","'X' # 0x58 -> LATIN CAPITAL LETTER X\n","'Y' # 0x59 -> LATIN CAPITAL LETTER Y\n","'Z' # 0x5A -> LATIN CAPITAL LETTER Z\n'[' # 0x5B -> LEFT SQUARE BRACKET\n","'\\' # 0x5C -> REVERSE SOLIDUS\n","']' # 0x5D -> RIGHT SQUARE BRACKET\n","'^' # 0x5E -> CIRCUMFLEX ACCENT\n","'_' # 0x5F -> LOW LINE\n","'`' # 0x60 -> GRAVE ACCENT\n","'a' # 0x61 -> LATIN SMALL LETTER A\n","'b' # 0x62 -> LATIN SMALL LETTER B\n","'c' # 0x63 -> LATIN SMALL LETTER C\n","'d' # 0x64 -> LATIN SMALL LETTER D\n","'e' # 0x65 -> LATIN SMALL LETTER E\n","'f' # 0x66 -> LATIN SMALL LETTER F\n","'g' # 0x67 -> LATIN SMALL LETTER G\n","'h' # 0x68 -> LATIN SMALL LETTER H\n","'i' # 0x69 -> LATIN SMALL LETTER I\n","'j' # 0x6A -> LATIN SMALL LETTER J\n","'k' # 0x6B -> LATIN SMALL LETTER K\n","'l' # 0x6C -> LATIN SMALL LETTER L\n","'m' # 0x6D -> LATIN SMALL LETTER M\n","'n' # 0x6E -> LATIN SMALL LETTER N\n","'o' # 0x6F -> LATIN SMALL LETTER O\n","'p' # 0x70 -> LATIN SMALL LETTER P\n","'q' # 0x71 -> LATIN SMALL LETTER Q\n","'r' # 0x72 -> LATIN SMALL LETTER R\n","'s' # 0x73 -> LATIN SMALL LETTER S\n","'t' # 0x74 -> LATIN SMALL LETTER T\n","'u' # 0x75 -> LATIN SMALL LETTER U\n","'v' # 0x76 -> LATIN SMALL LETTER V\n","'w' # 0x77 -> LATIN SMALL LETTER W\n","'x' # 0x78 -> LATIN SMALL LETTER X\n","'y' # 0x79 -> LATIN SMALL LETTER Y\n","'z' # 0x7A -> LATIN SMALL LETTER Z\n","'{' # 0x7B -> LEFT CURLY BRACKET\n","'|' # 0x7C -> VERTICAL LINE\n","'}' # 0x7D -> RIGHT CURLY BRACKET\n","'~' # 0x7E -> TILDE\n","'\x7f' # 0x7F -> DELETE\n","'\u20ac' # 0x80 -> EURO SIGN\n","'\ufffe' # 0x81 -> UNDEFINED\n","'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK\n","'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK\n","'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK\n","'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS\n","'\u2020' # 0x86 -> DAGGER\n","'\u2021' # 0x87 -> DOUBLE DAGGER\n","'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT\n","'\u2030' # 0x89 -> PER MILLE SIGN\n","'\u0160' # 0x8A -> LATIN CAPITAL LETTER S WITH CARON\n","'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK\n","'\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE\n","'\ufffe' # 0x8D -> UNDEFINED\n","'\u017d' # 0x8E -> LATIN CAPITAL LETTER Z WITH CARON\n","'\ufffe' # 0x8F -> UNDEFINED\n","'\ufffe' # 0x90 -> UNDEFINED\n","'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK\n","'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK\n","'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK\n","'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK\n","'\u2022' # 0x95 -> BULLET\n","'\u2013' # 0x96 -> EN DASH\n","'\u2014' # 0x97 -> EM DASH\n","'\u02dc' # 0x98 -> SMALL TILDE\n","'\u2122' # 0x99 -> TRADE MARK SIGN\n","'\u0161' # 0x9A -> LATIN SMALL LETTER S WITH CARON\n","'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK\n","'\u0153' # 0x9C -> LATIN SMALL LIGATURE OE\n","'\ufffe' # 0x9D -> UNDEFINED\n","'\u017e' # 0x9E -> LATIN SMALL LETTER Z WITH CARON\n","'\u0178' # 0x9F -> LATIN CAPITAL LETTER Y WITH DIAERESIS\n","'\xa0' # 0xA0 -> NO-BREAK SPACE\n","'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK\n","'\xa2' # 0xA2 -> CENT SIGN\n","'\xa3' # 0xA3 -> POUND SIGN\n","'\xa4' # 0xA4 -> CURRENCY SIGN\n","'\xa5' # 0xA5 -> YEN SIGN\n","'\xa6' # 0xA6 -> BROKEN BAR\n","'\xa7' # 0xA7 -> SECTION SIGN\n","'\xa8' # 0xA8 -> DIAERESIS\n","'\xa9' # 0xA9 -> COPYRIGHT SIGN\n","'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR\n","'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK\n","'\xac' # 0xAC -> NOT SIGN\n","'\xad' # 0xAD -> SOFT HYPHEN\n","'\xae' # 0xAE -> REGISTERED SIGN\n","'\xaf' # 0xAF -> MACRON\n","'\xb0' # 0xB0 -> DEGREE SIGN\n","'\xb1' # 0xB1 -> PLUS-MINUS SIGN\n","'\xb2' # 0xB2 -> SUPERSCRIPT TWO\n","'\xb3' # 0xB3 -> SUPERSCRIPT THREE\n","'\xb4' # 0xB4 -> ACUTE ACCENT\n","'\xb5' # 0xB5 -> MICRO SIGN\n","'\xb6' # 0xB6 -> PILCROW SIGN\n","'\xb7' # 0xB7 -> MIDDLE DOT\n","'\xb8' # 0xB8 -> CEDILLA\n","'\xb9' # 0xB9 -> SUPERSCRIPT ONE\n","'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR\n","'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK\n","'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER\n","'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF\n","'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS\n","'\xbf' # 0xBF -> INVERTED QUESTION MARK\n","'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE\n","'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE\n","'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX\n","'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE\n","'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS\n","'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE\n","'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE\n","'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA\n","'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE\n","'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE\n","'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX\n","'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS\n","'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE\n","'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE\n","'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX\n","'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS\n","'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH\n","'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE\n","'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE\n","'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE\n","'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX\n","'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE\n","'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS\n","'\xd7' # 0xD7 -> MULTIPLICATION SIGN\n","'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE\n","'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE\n","'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE\n","'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX\n","'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS\n","'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE\n","'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN\n","'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S\n","'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE\n","'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE\n","'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX\n","'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE\n","'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS\n","'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE\n","'\xe6' # 0xE6 -> LATIN SMALL LETTER AE\n","'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA\n","'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE\n","'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE\n","'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX\n","'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS\n","'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE\n","'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE\n","'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX\n","'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS\n","'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH\n","'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE\n","'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE\n","'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE\n","'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX\n","'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE\n","'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS\n","'\xf7' # 0xF7 -> DIVISION SIGN\n","'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE\n","'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE\n","'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE\n","'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX\n","'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS\n","'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE\n","'\xfe' # 0xFE -> LATIN SMALL LETTER THORN\n","'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS"]
for letter in list1:
print (letter)
| 71.467363
| 12,238
| 0.526414
|
4a097170ff6e0f8792d07f93ddd6877f7f8e61ae
| 910
|
py
|
Python
|
remote/models/ClientCloudHostMapping.py
|
zadjii/nebula
|
50c4ec019c9f7eb15fe105a6c53a8a12880e281c
|
[
"MIT"
] | 2
|
2020-04-15T11:20:59.000Z
|
2021-05-12T13:01:36.000Z
|
remote/models/ClientCloudHostMapping.py
|
zadjii/nebula
|
50c4ec019c9f7eb15fe105a6c53a8a12880e281c
|
[
"MIT"
] | 1
|
2018-06-05T04:48:56.000Z
|
2018-06-05T04:48:56.000Z
|
remote/models/ClientCloudHostMapping.py
|
zadjii/nebula
|
50c4ec019c9f7eb15fe105a6c53a8a12880e281c
|
[
"MIT"
] | 1
|
2018-08-15T06:45:46.000Z
|
2018-08-15T06:45:46.000Z
|
from datetime import datetime, timedelta
from sqlalchemy import Column, Integer, String, DateTime, ForeignKey, Table, BigInteger
from sqlalchemy.orm import relationship, backref
from remote.models import nebr_base as base
__author__ = 'Mike'
class ClientCloudHostMapping(base):
__tablename__ = 'clientcloudhostmapping'
id = Column(Integer, primary_key=True)
session_id = Column(ForeignKey('session.id'))
cloud_id = Column(ForeignKey('cloud.id'))
host_id = Column(ForeignKey('mirror.id'))
def __init__(self, session, cloud, host):
# Remember, if this isn't added to the DB, then the backrefs won't be
# hooked up from the other side.
# So instantiating this without a session (for the public user) is
# fraught with peril
self.session_id = session.id if session else None
self.cloud_id = cloud.id
self.host_id = host.id
| 33.703704
| 87
| 0.706593
|
4a09725f009baef669df040c26792a8215d6c230
| 3,026
|
py
|
Python
|
nn.py
|
praxidike97/a-different-approach-to-neural-nets
|
18ac64525f7e976077be6c92ec2091005f2b764f
|
[
"MIT"
] | null | null | null |
nn.py
|
praxidike97/a-different-approach-to-neural-nets
|
18ac64525f7e976077be6c92ec2091005f2b764f
|
[
"MIT"
] | null | null | null |
nn.py
|
praxidike97/a-different-approach-to-neural-nets
|
18ac64525f7e976077be6c92ec2091005f2b764f
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense
import keras.backend as K
from utils import generate_linear_unseparable_2d_distribution, to_one_hot, newline
NUM_HIDDEN = 1
def create_model(input_size, output_size):
model = Sequential()
model.add(Dense(NUM_HIDDEN, activation="sigmoid", input_shape=(input_size, )))
model.add(Dense(output_size, activation="softmax"))
model.compile("adam", loss="categorical_crossentropy", metrics=["accuracy"])
return model
def create_decision_boundary(xs, ys, model):
xx, yy = np.meshgrid(np.arange(-2.5, 2.5, 0.01), np.arange(0.0, 3.0, 0.01))
values = np.c_[xx.ravel(), yy.ravel()]
Z = np.round(np.asarray(model.predict(values)[:, 0]))
#Z = np.asarray(model(values))[0, :, 0]
"""
fig, axs = plt.subplots(nrows=1, ncols=2, sharex=True, sharey=True)
plots = list()
for i in range(0, NUM_HIDDEN):
#Z += np.asarray(model(values))[0, :, i]
Z = np.asarray(model(values))[0, :, i]
Z = Z.reshape(xx.shape)
#plt.subplot(5, 10, i+1, sharex=True, sharey=True)
ax = axs[i%2]
plot = ax.pcolormesh(xx, yy, Z, cmap="plasma", vmin=0.0, vmax=1.0)
plots.append(plot)
#ax.xlabel("Feature 1")
#ax.ylabel("Feature 2")
#plt.show()
#Z += np.round(np.asarray(model.predict(values)[:, i]))
fig.text(0.5, 0.02, 'Feature 1', ha='center')
fig.text(0.06, 0.5, 'Feature 2', va='center', rotation='vertical')
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.04, 0.7])
fig.colorbar(plots[0], cax=cbar_ax)
plt.show()
"""
Z = Z.reshape(xx.shape)
#plt.pcolormesh(xx, yy, Z, cmap="plasma")
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
#plt.colorbar()
#newline([0, -0.395], [-3, 4.474])
# Plot also the training points
ys = np.argmax(ys, axis=1)
scatter = plt.scatter(xs[:, 0], xs[:, 1], c=np.ones(len(ys))-ys, edgecolors='k', cmap=plt.cm.Paired)
plt.legend(handles=scatter.legend_elements()[0], labels=["Class 1", "Class 2"], loc="upper left")
plt.xlabel('Feature 1')
plt.ylabel('Feature 2')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.show()
if __name__ == "__main__":
num_classes = 2
num_features = 2
xs, ys_raw = generate_linear_unseparable_2d_distribution()
ys = to_one_hot(ys_raw, num_classes=num_classes)
model = create_model(input_size=num_features, output_size=num_classes)
model.fit(x=xs, y=ys, epochs=500)
create_decision_boundary(xs, ys, model)
get_1st_layer_output = K.function([model.layers[0].input],
[model.layers[0].output])
print("Number of layers: %i" % len(model.layers))
print("Weight outputs: %s" % str(model.layers[-1].get_weights()))
print("Weight outputs: %s" % str(model.layers[0].get_weights()))
#create_decision_boundary(xs, ys, get_1st_layer_output)
| 31.852632
| 104
| 0.631196
|
4a09730c5fcc5ba43dce57a2f0a508746febba52
| 66,180
|
py
|
Python
|
volta/volta/encoders.py
|
e-bug/iglue
|
a3f1288c1c89fbc4c44ee0d6c01097348615bac6
|
[
"MIT"
] | 15
|
2022-01-28T03:08:46.000Z
|
2022-03-24T09:24:07.000Z
|
volta/volta/encoders.py
|
e-bug/iglue
|
a3f1288c1c89fbc4c44ee0d6c01097348615bac6
|
[
"MIT"
] | 4
|
2022-03-01T19:13:39.000Z
|
2022-03-27T08:21:46.000Z
|
volta/volta/encoders.py
|
e-bug/iglue
|
a3f1288c1c89fbc4c44ee0d6c01097348615bac6
|
[
"MIT"
] | 2
|
2022-02-05T17:04:42.000Z
|
2022-03-11T14:52:47.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
# Copyright (c) 2020, Emanuele Bugliarello (@e-bug).
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import copy
import math
import pickle
import logging
import torch
from torch import nn
import torch.nn.functional as F
from .embeddings import *
from .config import BertConfig
from .utils import PreTrainedModel
from .losses import pre_vis_criterions, pre_vis_targets
from .m3p_transformer import M3PTransformerModel
logger = logging.getLogger(__name__)
BERT_PRETRAINED_MODEL_ARCHIVE_MAP = {
"bert-base-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-pytorch_model.bin",
"bert-large-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-pytorch_model.bin",
"bert-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-pytorch_model.bin",
"bert-large-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-pytorch_model.bin",
"bert-base-multilingual-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-pytorch_model.bin",
"bert-base-multilingual-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-pytorch_model.bin",
"bert-base-chinese": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-pytorch_model.bin",
"bert-base-german-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-pytorch_model.bin",
"bert-large-uncased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-pytorch_model.bin",
"bert-large-cased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-pytorch_model.bin",
"bert-large-uncased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-pytorch_model.bin",
"bert-large-cased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-pytorch_model.bin",
"bert-base-cased-finetuned-mrpc": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-pytorch_model.bin",
"roberta-base": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-pytorch_model.bin",
"roberta-large": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-pytorch_model.bin",
"roberta-large-mnli": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-mnli-pytorch_model.bin",
}
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
except ImportError:
logger.info("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.")
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
def load_tf_weights_in_bert(model, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
print(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
print("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
print("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(n in ["adam_v", "adam_m"] for n in name):
print("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
l = re.split(r"_(\d+)", m_name)
else:
l = [m_name]
if l[0] == "kernel" or l[0] == "gamma":
pointer = getattr(pointer, "weight")
elif l[0] == "output_bias" or l[0] == "beta":
pointer = getattr(pointer, "bias")
elif l[0] == "output_weights":
pointer = getattr(pointer, "weight")
else:
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
# ==================================================================================================================== #
# Activation Functions #
# ==================================================================================================================== #
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
class GeLU(nn.Module):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
def __init__(self):
super().__init__()
def forward(self, x):
return gelu(x)
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
# ==================================================================================================================== #
# Gated layers #
# ==================================================================================================================== #
class BertGatedSelfAttention(nn.Module):
def __init__(self, config, layer_num):
super(BertGatedSelfAttention, self).__init__()
hidden_size = config.sublayer2attn_hidden_size.get(str(layer_num), config.hidden_size)
num_attention_heads = config.sublayer2num_attention_heads.get(str(layer_num), config.num_attention_heads)
v_hidden_size = config.sublayer2v_attn_hidden_size.get(str(layer_num), config.v_hidden_size)
v_num_attention_heads = config.sublayer2v_num_attention_heads.get(str(layer_num), config.v_num_attention_heads)
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The text hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads)
)
self.num_attention_heads = num_attention_heads
self.attention_head_size = int(hidden_size / num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
if v_hidden_size % v_num_attention_heads != 0:
raise ValueError(
"The vision hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (v_hidden_size, num_attention_heads)
)
self.v_num_attention_heads = v_num_attention_heads
self.v_attention_head_size = int(v_hidden_size / v_num_attention_heads)
self.v_all_head_size = self.v_num_attention_heads * self.v_attention_head_size
self.visualization = config.visualization
self.has_tt = (layer_num in config.tt_attn_sublayers)
self.has_tv = (layer_num in config.tv_attn_sublayers)
self.has_vt = (layer_num in config.vt_attn_sublayers)
self.has_vv = (layer_num in config.vv_attn_sublayers)
self.has_text = (self.has_tt or self.has_tv)
self.has_vision = (self.has_vv or self.has_vt)
self.share_layer = (layer_num in config.shared_sublayers)
if self.has_tv or self.has_vt:
assert hidden_size == v_hidden_size, "hidden_size != v_hidden_size"
assert num_attention_heads == v_num_attention_heads, "num_attention_heads != v_num_attention_heads"
if self.has_text:
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
if self.has_text and self.has_vision and self.share_layer:
assert hidden_size == v_hidden_size, "hidden_size != v_hidden_size"
self.v_query = self.query
self.v_key = self.key
self.v_value = self.value
self.v_dropout = self.dropout
elif self.has_vision:
self.v_query = nn.Linear(config.v_hidden_size, self.v_all_head_size)
self.v_key = nn.Linear(config.v_hidden_size, self.v_all_head_size)
self.v_value = nn.Linear(config.v_hidden_size, self.v_all_head_size)
self.v_dropout = nn.Dropout(config.v_attention_probs_dropout_prob)
def transpose_for_scores(self, x, modality='text'):
if modality == 'text':
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
else:
new_x_shape = x.size()[:-1] + (self.v_num_attention_heads, self.v_attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, t_hidden_states, v_hidden_states, t_attention_mask, v_attention_mask):
"""
Args:
t_hidden_states: [bs, seq_len, hidden_size]
v_hidden_states: [bs, num_box, v_hidden_size]
t_attention_mask: [bs, 1, 1, seq_len] filled with 0s (non-pad) and -10000s (pad)
v_attention_mask: [bs, 1, 1, num_box] filled with 0s (non-pad) and -10000s (pad)
Returns:
t_context_layer: [bs, seq_len, hidden_size] or int 0 if no text
v_context_layer: [bs, num_box, v_hidden_size] or int 0 if no vision
t_attn_data: dict or None if no visualization
v_attn_data: dict or None if no visualization
"""
if self.has_text:
t_mixed_query_layer = self.query(t_hidden_states) # [bs, seq_len, hidden_size]
t_mixed_key_layer = self.key(t_hidden_states)
t_mixed_value_layer = self.value(t_hidden_states)
t_query_layer = self.transpose_for_scores(t_mixed_query_layer) # [bs, num_heads, seq_len, attn_head_size]
t_key_layer = self.transpose_for_scores(t_mixed_key_layer)
t_value_layer = self.transpose_for_scores(t_mixed_value_layer)
if self.has_vision:
v_mixed_query_layer = self.v_query(v_hidden_states) # [bs, num_box, v_hidden_size]
v_mixed_key_layer = self.v_key(v_hidden_states)
v_mixed_value_layer = self.v_value(v_hidden_states)
v_query_layer = self.transpose_for_scores(v_mixed_query_layer, 'vision') # [bs, v_num_heads, num_box, v_attn_head_size]
v_key_layer = self.transpose_for_scores(v_mixed_key_layer, 'vision')
v_value_layer = self.transpose_for_scores(v_mixed_value_layer, 'vision')
# Gated attention
if self.has_tt:
# Take the dot product between "query" and "key" to get the raw attention scores.
tt_attention_scores = torch.matmul(t_query_layer, t_key_layer.transpose(-1, -2)) # [bs, num_heads, seq_len, seq_len]
tt_attention_scores = tt_attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
tt_attention_scores = tt_attention_scores + t_attention_mask # [bs, num_heads, seq_len, seq_len]
if self.has_tv:
# Take the dot product between "query" and "key" to get the raw attention scores.
tv_attention_scores = torch.matmul(t_query_layer, v_key_layer.transpose(-1, -2)) # [bs, num_heads, seq_len, num_box]
tv_attention_scores = tv_attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
tv_attention_scores = tv_attention_scores + v_attention_mask
if self.has_vt:
# Take the dot product between "query" and "key" to get the raw attention scores.
vt_attention_scores = torch.matmul(v_query_layer, t_key_layer.transpose(-1, -2)) # [bs, num_heads, num_box, seq_len]
vt_attention_scores = vt_attention_scores / math.sqrt(self.v_attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
vt_attention_scores = vt_attention_scores + t_attention_mask
if self.has_vv:
# Take the dot product between "query" and "key" to get the raw attention scores.
vv_attention_scores = torch.matmul(v_query_layer, v_key_layer.transpose(-1, -2)) # [bs, num_heads, num_box, num_box]
vv_attention_scores = vv_attention_scores / math.sqrt(self.v_attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
vv_attention_scores = vv_attention_scores + v_attention_mask
# Gated softmax
# Normalize the attention scores to probabilities.
if self.has_tt and self.has_tv:
# Concatenate the two attention scores
t_attention_scores = torch.cat((tt_attention_scores, tv_attention_scores), dim=-1) # [bs, num_heads, seq_len, seq_len+num_box]
t_attention_probs = nn.Softmax(dim=-1)(t_attention_scores)
# Split concatenation back into tt and tv
tt_attention_probs, tv_attention_probs = \
t_attention_probs.split([tt_attention_scores.size(-1), tv_attention_scores.size(-1)], dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
tt_attention_probs = self.dropout(tt_attention_probs) # [bs, num_heads, seq_len, seq_len]
tv_attention_probs = self.dropout(tv_attention_probs) # [bs, num_heads, seq_len, num_box]
elif self.has_tt:
tt_attention_probs = self.dropout(nn.Softmax(dim=-1)(tt_attention_scores)) # [bs, num_heads, seq_len, seq_len]
elif self.has_tv:
tv_attention_probs = self.dropout(nn.Softmax(dim=-1)(tv_attention_scores)) # [bs, num_heads, seq_len, num_box]
if self.has_vv and self.has_vt:
# Concatenate the two attention scores
v_attention_scores = torch.cat((vt_attention_scores, vv_attention_scores), dim=-1) # [bs, num_heads, seq_len, seq_len+num_box]
v_attention_probs = nn.Softmax(dim=-1)(v_attention_scores)
# Split concatenation back into vt and vv
vt_attention_probs, vv_attention_probs = \
v_attention_probs.split([vt_attention_scores.size(-1), vv_attention_scores.size(-1)], dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
vv_attention_probs = self.v_dropout(vv_attention_probs) # [bs, num_heads, num_box, num_box]
vt_attention_probs = self.v_dropout(vt_attention_probs) # [bs, num_heads, num_box, seq_len]
elif self.has_vv:
vv_attention_probs = self.v_dropout(nn.Softmax(dim=-1)(vv_attention_scores)) # [bs, v_num_heads, num_box, num_box]
elif self.has_vt:
vt_attention_probs = self.v_dropout(nn.Softmax(dim=-1)(vt_attention_scores)) # [bs, num_heads, num_box, seq_len]
# Gated context
tt_context_layer, tv_context_layer, vt_context_layer, vv_context_layer = 0, 0, 0, 0
if self.has_tt:
tt_context_layer = torch.matmul(tt_attention_probs, t_value_layer) # [bs, num_heads, seq_len, attn_head_size]
tt_context_layer = tt_context_layer.permute(0, 2, 1, 3).contiguous() # [bs, seq_len, num_heads, attn_head_size]
tt_new_context_layer_shape = tt_context_layer.size()[:-2] + (self.all_head_size,)
tt_context_layer = tt_context_layer.view(*tt_new_context_layer_shape) # [bs, seq_len, num_heads*attn_head_size]
if self.has_tv:
tv_context_layer = torch.matmul(tv_attention_probs, v_value_layer)
tv_context_layer = tv_context_layer.permute(0, 2, 1, 3).contiguous()
tv_new_context_layer_shape = tv_context_layer.size()[:-2] + (self.all_head_size,)
tv_context_layer = tv_context_layer.view(*tv_new_context_layer_shape)
if self.has_vt:
vt_context_layer = torch.matmul(vt_attention_probs, t_value_layer)
vt_context_layer = vt_context_layer.permute(0, 2, 1, 3).contiguous()
vt_new_context_layer_shape = vt_context_layer.size()[:-2] + (self.v_all_head_size,)
vt_context_layer = vt_context_layer.view(*vt_new_context_layer_shape)
if self.has_vv:
vv_context_layer = torch.matmul(vv_attention_probs, v_value_layer) # [bs, v_num_heads, num_box, v_attn_head_size]
vv_context_layer = vv_context_layer.permute(0, 2, 1, 3).contiguous() # [bs, num_box, v_num_heads, v_attn_head_size]
vv_new_context_layer_shape = vv_context_layer.size()[:-2] + (self.v_all_head_size,)
vv_context_layer = vv_context_layer.view(*vv_new_context_layer_shape) # [bs, num_box, v_num_heads*v_attn_head_size]
t_context_layer = (tt_context_layer + tv_context_layer) # [bs, seq_len, hidden_size] or int 0 if no text
v_context_layer = (vv_context_layer + vt_context_layer) # [bs, num_box, v_hidden_size] or int 0 if no vision
if self.visualization:
t_attn_data = {
"intra_attn": tt_attention_probs if self.has_tt else None,
"inter_attn": tv_attention_probs if self.has_tv else None,
"queries": t_query_layer if self.has_text else None,
"keys": t_key_layer if self.has_text else None,
}
v_attn_data = {
"intra_attn": vv_attention_probs if self.has_vv else None,
"inter_attn": vt_attention_probs if self.has_vt else None,
"queries": v_query_layer if self.has_vision else None,
"keys": v_key_layer if self.has_vision else None,
}
else:
t_attn_data, v_attn_data = None, None
return t_context_layer, v_context_layer, t_attn_data, v_attn_data
class BertGatedSelfOutput(nn.Module):
def __init__(self, config, layer_num):
super(BertGatedSelfOutput, self).__init__()
hidden_size = config.sublayer2attn_hidden_size.get(str(layer_num), config.hidden_size)
v_hidden_size = config.sublayer2v_attn_hidden_size.get(str(layer_num), config.v_hidden_size)
self.has_language = ((layer_num in config.tt_attn_sublayers) or (layer_num in config.tv_attn_sublayers))
self.has_vision = ((layer_num in config.vv_attn_sublayers) or (layer_num in config.vt_attn_sublayers))
self.share_layer = (layer_num in config.shared_sublayers)
self.single_ln = (layer_num in config.single_ln_sublayers)
if self.single_ln:
assert (self.has_language and self.has_vision and self.share_layer), "Missing language, vision or sharing"
if self.has_language:
self.dense = nn.Linear(hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
else:
self.dense = lambda x: x
self.dropout = lambda x: x
self.LayerNorm = lambda x: x
if self.has_language and self.has_vision and self.share_layer:
assert (hidden_size == v_hidden_size) and (config.hidden_size == config.v_hidden_size), "hidden_size != v_hidden_size"
self.v_dense = self.dense
self.v_dropout = self.dropout
self.v_LayerNorm = self.LayerNorm
elif self.has_vision:
self.v_dense = nn.Linear(v_hidden_size, config.v_hidden_size)
self.v_dropout = nn.Dropout(config.v_hidden_dropout_prob)
self.v_LayerNorm = BertLayerNorm(v_hidden_size, eps=config.layer_norm_eps)
else:
self.v_dense = lambda x: x
self.v_dropout = lambda x: x
self.v_LayerNorm = lambda x: x
def forward(self, t_hidden_states, v_hidden_states, t_input_tensor, v_input_tensor):
"""
Args:
t_hidden_states: [bs, seq_len, hidden_size] or int 0 if no text
v_hidden_states: [bs, num_box, v_hidden_size] or int 0 if no vision
t_input_tensor: [bs, seq_len, hidden_size]
v_input_tensor: [bs, num_box, v_hidden_size]
Returns:
t_hidden_states: [bs, seq_len, hidden_size]
v_hidden_states: [bs, num_box, v_hidden_size]
"""
t_hidden_states = self.dense(t_hidden_states)
t_hidden_states = self.dropout(t_hidden_states)
v_hidden_states = self.v_dense(v_hidden_states)
v_hidden_states = self.v_dropout(v_hidden_states)
if self.single_ln:
# Concatenate text and vision
hidden_states = torch.cat((t_hidden_states, v_hidden_states), dim=1) # [bs, seq_len+num_box, hidden_size]
inputs = torch.cat((t_input_tensor, v_input_tensor), dim=1) # [bs, seq_len+num_box, hidden_size]
hidden_states = self.LayerNorm(hidden_states + inputs)
t_hidden_states, v_hidden_states = \
hidden_states.split([t_hidden_states.size(1), v_hidden_states.size(1)], dim=1)
else:
t_hidden_states = self.LayerNorm(t_hidden_states + t_input_tensor)
v_hidden_states = self.v_LayerNorm(v_hidden_states + v_input_tensor)
return t_hidden_states, v_hidden_states
class BertGatedAttention(nn.Module):
def __init__(self, config, layer_num):
super(BertGatedAttention, self).__init__()
self.attention_self = BertGatedSelfAttention(config, layer_num)
self.attention_output = BertGatedSelfOutput(config, layer_num)
def forward(self, t_input_tensor, v_input_tensor, t_attention_mask, v_attention_mask):
"""
Args:
t_input_tensor: [bs, seq_len, hidden_size]
v_input_tensor: [bs, num_box, v_hidden_size]
t_attention_mask: [bs, 1, 1, seq_len] filled with 0s (non-pad) and -10000s (pad)
v_attention_mask: [bs, 1, 1, num_box] filled with 0s (non-pad) and -10000s (pad)
Returns:
t_attn_output: [bs, seq_len, hidden_size]
v_attn_output: [bs, num_box, v_hidden_size]
t_attn_probs: dict or None if no visualization
v_attn_probs: dict or None if no visualization
"""
t_self_output, v_self_output, t_attn_probs, v_attn_probs = self.attention_self(t_input_tensor, v_input_tensor,
t_attention_mask, v_attention_mask)
t_attn_output, v_attn_output = self.attention_output(t_self_output, v_self_output, t_input_tensor, v_input_tensor)
return t_attn_output, v_attn_output, t_attn_probs, v_attn_probs
class BertGatedIntermediate(nn.Module):
def __init__(self, config, layer_num):
super(BertGatedIntermediate, self).__init__()
self.has_language = (layer_num in config.t_ff_sublayers)
self.has_vision = (layer_num in config.v_ff_sublayers)
self.share_layer = (layer_num in config.shared_sublayers)
intermediate_size = config.sublayer2intermediate_size.get(str(layer_num), config.intermediate_size)
v_intermediate_size = config.sublayer2v_intermediate_size.get(str(layer_num), config.v_intermediate_size)
if self.has_language:
self.dense = nn.Linear(config.hidden_size, intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
else:
self.dense = lambda x: x
self.intermediate_act_fn = lambda x: 0
if self.has_language and self.has_vision and self.share_layer:
assert config.hidden_size == config.v_hidden_size, "hidden_size != v_hidden_size"
assert intermediate_size == v_intermediate_size, "intermediate_size != v_intermediate_size"
self.v_dense = self.dense
self.v_intermediate_act_fn = self.intermediate_act_fn
elif self.has_vision:
self.v_dense = nn.Linear(config.v_hidden_size, v_intermediate_size)
if isinstance(config.hidden_act, str):
self.v_intermediate_act_fn = ACT2FN[config.v_hidden_act]
else:
self.v_intermediate_act_fn = config.v_hidden_act
else:
self.v_dense = lambda x: x
self.v_intermediate_act_fn = lambda x: 0
def forward(self, t_hidden_states, v_hidden_states):
"""
Args:
t_hidden_states: [bs, seq_len, hidden_size]
v_hidden_states: [bs, num_box, v_hidden_size]
Returns:
t_hidden_states: [bs, seq_len, hidden_size] or int 0 if no text
v_hidden_states: [bs, num_box, v_hidden_size] or int 0 if no vision
"""
t_hidden_states = self.dense(t_hidden_states)
t_hidden_states = self.intermediate_act_fn(t_hidden_states)
v_hidden_states = self.v_dense(v_hidden_states)
v_hidden_states = self.v_intermediate_act_fn(v_hidden_states)
return t_hidden_states, v_hidden_states
class BertGatedOutput(nn.Module):
def __init__(self, config, layer_num):
super(BertGatedOutput, self).__init__()
self.has_language = (layer_num in config.t_ff_sublayers)
self.has_vision = (layer_num in config.v_ff_sublayers)
self.share_layer = (layer_num in config.shared_sublayers)
self.single_ln = (layer_num in config.single_ln_sublayers)
if self.single_ln:
assert (self.has_language and self.has_vision and self.share_layer), "Missing language, vision or sharing"
intermediate_size = config.sublayer2intermediate_size.get(str(layer_num), config.intermediate_size)
v_intermediate_size = config.sublayer2v_intermediate_size.get(str(layer_num), config.v_intermediate_size)
if self.has_language:
self.dense = nn.Linear(intermediate_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
else:
self.dense = lambda x: x
self.dropout = lambda x: x
self.LayerNorm = lambda x: x
if self.has_language and self.has_vision and self.share_layer:
assert config.hidden_size == config.v_hidden_size, "hidden_size != v_hidden_size"
assert intermediate_size == v_intermediate_size, "intermediate_size != v_intermediate_size"
self.v_dense = self.dense
self.v_dropout = self.dropout
self.v_LayerNorm = self.LayerNorm
elif self.has_vision:
self.v_dense = nn.Linear(v_intermediate_size, config.v_hidden_size)
self.v_dropout = nn.Dropout(config.v_hidden_dropout_prob)
self.v_LayerNorm = BertLayerNorm(config.v_hidden_size, eps=config.layer_norm_eps)
else:
self.v_dense = lambda x: x
self.v_dropout = lambda x: x
self.v_LayerNorm = lambda x: x
def forward(self, t_hidden_states, v_hidden_states, t_input_tensor, v_input_tensor):
"""
Args:
t_hidden_states: [bs, seq_len, hidden_size] or int 0 if no text
v_hidden_states: [bs, num_box, v_hidden_size] or int 0 if no vision
t_input_tensor: [bs, seq_len, hidden_size]
v_input_tensor: [bs, num_box, v_hidden_size]
Returns:
t_hidden_states: [bs, seq_len, hidden_size]
v_hidden_states: [bs, num_box, v_hidden_size]
"""
t_hidden_states = self.dense(t_hidden_states)
t_hidden_states = self.dropout(t_hidden_states)
v_hidden_states = self.v_dense(v_hidden_states)
v_hidden_states = self.v_dropout(v_hidden_states)
if self.single_ln:
# Concatenate text and vision
hidden_states = torch.cat((t_hidden_states, v_hidden_states), dim=1) # [bs, seq_len+num_box, hidden_size]
inputs = torch.cat((t_input_tensor, v_input_tensor), dim=1) # [bs, seq_len+num_box, hidden_size]
hidden_states = self.LayerNorm(hidden_states + inputs)
t_hidden_states, v_hidden_states = \
hidden_states.split([t_hidden_states.size(1), v_hidden_states.size(1)], dim=1)
else:
t_hidden_states = self.LayerNorm(t_hidden_states + t_input_tensor)
v_hidden_states = self.v_LayerNorm(v_hidden_states + v_input_tensor)
return t_hidden_states, v_hidden_states
class BertGatedFeedForward(nn.Module):
def __init__(self, config, layer_num):
super(BertGatedFeedForward, self).__init__()
self.intermediate = BertGatedIntermediate(config, layer_num)
self.output = BertGatedOutput(config, layer_num)
def forward(self, t_input_tensor, v_input_tensor):
"""
Args:
t_input_tensor: [bs, seq_len, hidden_size]
v_input_tensor: [bs, num_box, v_hidden_size]
# t_attention_probs: dict or None if no visualization
# v_attention_probs: dict or None if no visualization
Returns:
t_layer_output: [bs, seq_len, hidden_size]
v_layer_output: [bs, num_box, v_hidden_size]
# t_attention_probs: dict or None if no visualization
# v_attention_probs: dict or None if no visualization
"""
t_inter_output, v_inter_output = self.intermediate(t_input_tensor, v_input_tensor)
t_layer_output, v_layer_output = self.output(t_inter_output, v_inter_output, t_input_tensor, v_input_tensor)
return t_layer_output, v_layer_output
# ==================================================================================================================== #
# Pooling #
# ==================================================================================================================== #
class BertTextPooler(nn.Module):
def __init__(self, config):
super(BertTextPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.pooler_size)
self.activation = nn.ReLU() if config.fusion_act == "relu" else nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class VLBertTextPooler(nn.Module):
def __init__(self, config):
super(VLBertTextPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.pooler_size)
self.activation = nn.ReLU() if config.fusion_act == "relu" else nn.Tanh()
def forward(self, hidden_states, text_end):
# We "pool" the model by simply taking the hidden state corresponding to the first token.
_, grid_pos = torch.meshgrid(torch.arange(hidden_states.size(0), dtype=torch.long, device=hidden_states.device),
torch.arange(hidden_states.size(1), dtype=torch.long, device=hidden_states.device))
mask_token_tensor = hidden_states[(grid_pos == text_end - 2)]
pooled_output = self.dense(mask_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertImagePooler(nn.Module):
def __init__(self, config):
super(BertImagePooler, self).__init__()
self.dense = nn.Linear(config.v_hidden_size, config.v_pooler_size)
self.activation = nn.ReLU() if config.fusion_act == "relu" else nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
# ==================================================================================================================== #
# Heads #
# ==================================================================================================================== #
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertImgPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertImgPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.v_hidden_size, config.v_hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.v_hidden_act
if config.image_head_ln:
self.LayerNorm = BertLayerNorm(config.v_hidden_size, eps=config.layer_norm_eps)
else:
self.LayerNorm = lambda x: x
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is an output-only bias for each token.
self.decoder = nn.Linear(
bert_model_embedding_weights.size(1),
bert_model_embedding_weights.size(0),
bias=False,
)
self.decoder.weight = bert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super(BertOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertImagePredictionHead(nn.Module):
def __init__(self, config):
super(BertImagePredictionHead, self).__init__()
self.transform = BertImgPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is an output-only bias for each token.
self.decoder_dict = nn.ModuleDict({
ix: nn.Linear(config.v_hidden_size, num)
for ix, num in pre_vis_targets.items()
if config.visual_target_weights.get(ix, 0) > 0
})
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
output = {}
for ix in self.decoder_dict:
output[ix] = self.decoder_dict[ix](hidden_states)
return output
class BertPreTrainingHeads(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
if config.fusion_method in {"none", "vl-bert_vqa"}:
self.bi_seq_relationship = lambda x: None
else:
self.bi_seq_relationship = nn.Linear(config.pooler_size, config.itm_dim)
self.imagePredictions = BertImagePredictionHead(config)
self.fusion_method = config.fusion_method
self.dropout = nn.Dropout(0.1)
self.apply(self.init_weights)
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=0.02)
elif isinstance(module, nn.Linear):
nn.init.xavier_uniform_(module.weight)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def forward(self, sequence_output_t, sequence_output_v, pooled_output_t, pooled_output_v):
if self.fusion_method == "sum":
pooled_output = self.dropout(pooled_output_t + pooled_output_v)
elif self.fusion_method == "mul":
pooled_output = self.dropout(pooled_output_t * pooled_output_v)
elif self.fusion_method == "text":
pooled_output = self.dropout(pooled_output_t)
elif self.fusion_method == "vl-bert_vqa":
pooled_output = self.dropout(pooled_output_t)
elif self.fusion_method == "none":
pooled_output = None
else:
assert False
prediction_scores_t = self.predictions(sequence_output_t)
seq_relationship_score = self.bi_seq_relationship(pooled_output)
prediction_scores_v_dict = self.imagePredictions(sequence_output_v)
return prediction_scores_t, prediction_scores_v_dict, seq_relationship_score, pooled_output
class SimpleClassifier(nn.Module):
def __init__(self, in_dim, hid_dim, out_dim, layer_norm_eps=1e-12, dropout_prob=0.0):
super().__init__()
self.logit_fc = nn.Sequential(
nn.Linear(in_dim, hid_dim),
GeLU(),
BertLayerNorm(hid_dim, eps=layer_norm_eps),
nn.Linear(hid_dim, out_dim),
)
self.apply(self.init_weights)
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=0.02)
elif isinstance(module, nn.Linear):
nn.init.xavier_uniform_(module.weight)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def forward(self, hidden_states):
return self.logit_fc(hidden_states)
# ==================================================================================================================== #
# Models #
# ==================================================================================================================== #
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
# in the bert encoder, we need to extract three things here.
# text bert layer: BertLayer
# vision bert layer: BertImageLayer
# Bi-Attention: Given the output of two bertlayer, perform bi-directional
# attention and add on two layers.
attn_sublayers = set(config.tt_attn_sublayers + config.tv_attn_sublayers +
config.vt_attn_sublayers + config.vv_attn_sublayers)
ff_sublayers = set(config.t_ff_sublayers + config.v_ff_sublayers)
depth = len(attn_sublayers) + len(ff_sublayers)
num2layers = {}
self.num2type = {}
for n in attn_sublayers:
num2layers[n] = BertGatedAttention(config, n)
self.num2type[n] = "attn"
for n in ff_sublayers:
num2layers[n] = BertGatedFeedForward(config, n)
self.num2type[n] = "ff"
assert len(num2layers) == depth, "Overlapping attn-ff sublayer numbers"
assert (min(num2layers) == 0) and (max(num2layers) == depth - 1), "Non contiguous sublayer numbers"
self.layer = nn.ModuleList([copy.deepcopy(sublayer) for _, sublayer in sorted(num2layers.items())])
def forward(self, txt_embedding, img_embedding, txt_attention_mask, img_attention_mask,
output_all_encoded_layers=True, output_all_attention_masks=False):
"""
Args:
txt_embedding: [bs, seq_len, hidden_size]
img_embedding: [bs, num_box, v_hidden_size]
txt_attention_mask: [bs, 1, 1, seq_len] filled with 0s (non-pad) and -10000s (pad)
img_attention_mask: [bs, 1, 1, num_box] filled with 0s (non-pad) and -10000s (pad)
output_all_encoded_layers: Bool
output_all_attention_masks: Bool
Returns:
all_encoder_layers_t:
all_encoder_layers_v:
(all_attention_mask_t, all_attention_mask_v):
"""
all_encoder_layers_t = [txt_embedding]
all_encoder_layers_v = [img_embedding]
all_attention_mask_t = [None]
all_attention_mask_v = [None]
for idx, layer in enumerate(self.layer):
layer_type = self.num2type[idx]
if layer_type == "attn":
txt_embedding, img_embedding, txt_attention_probs, img_attention_probs = \
layer(txt_embedding, img_embedding, txt_attention_mask, img_attention_mask)
if output_all_attention_masks:
all_attention_mask_t.append(txt_attention_probs)
all_attention_mask_v.append(img_attention_probs)
else:
txt_embedding, img_embedding = layer(txt_embedding, img_embedding)
if output_all_attention_masks:
all_attention_mask_t.append(None)
all_attention_mask_v.append(None)
if output_all_encoded_layers:
all_encoder_layers_t.append(txt_embedding)
all_encoder_layers_v.append(img_embedding)
# add the end part to finish.
if not output_all_encoded_layers:
all_encoder_layers_t.append(txt_embedding)
all_encoder_layers_v.append(img_embedding)
return all_encoder_layers_t, all_encoder_layers_v, (all_attention_mask_t, all_attention_mask_v)
class BertPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = BertConfig
pretrained_model_archive_map = BERT_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "bert"
def __init__(self, *inputs, **kwargs):
super(BertPreTrainedModel, self).__init__(*inputs, **kwargs)
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
class BertModel(BertPreTrainedModel):
def __init__(self, config):
super(BertModel, self).__init__(config)
self.shared_embeddings = False
# initialize word embedding
if config.model == "bert":
self.embeddings = BertEmbeddings(config)
elif config.model == "roberta":
self.embeddings = RobertaEmbeddings(config)
# initialize vision embedding
if config.image_embeddings in dual_embeddings:
self.v_embeddings = dual_embeddings[config.image_embeddings](config)
else:
self.v_embeddings = lambda x, y: None
self.encoder = BertEncoder(config)
self.fusion_method = config.fusion_method
if config.fusion_method == "none":
self.t_pooler = lambda x: None
elif config.fusion_method == "vl-bert_vqa":
self.t_pooler = VLBertTextPooler(config)
else:
self.t_pooler = BertTextPooler(config)
if config.fusion_method in {"none", "text", "vl-bert_vqa"}:
self.v_pooler = lambda x: None
else:
assert config.pooler_size == config.v_pooler_size, "pooler_size != v_pooler_size"
self.v_pooler = BertImagePooler(config)
self.apply(self.init_weights)
if config.image_embeddings in shared_embeddings:
self.embeddings = shared_embeddings[config.image_embeddings](config)
self.shared_embeddings = True
def forward(self, input_txt, input_imgs, image_loc, token_type_ids=None, attention_mask=None,
image_attention_mask=None, output_all_encoded_layers=False, output_all_attention_masks=False):
if attention_mask is None:
attention_mask = torch.ones_like(input_txt)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_txt)
if image_attention_mask is None:
image_attention_mask = torch.ones(input_imgs.size(0), input_imgs.size(1)).type_as(input_txt)
if self.shared_embeddings:
embedding_output, v_embedding_output = self.embeddings(input_txt, input_imgs, image_loc, token_type_ids)
else:
embedding_output = self.embeddings(input_txt, token_type_ids)
v_embedding_output = self.v_embeddings(input_imgs, image_loc)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_image_attention_mask = image_attention_mask.unsqueeze(1).unsqueeze(2)
# extended_attention_mask2 = attention_mask.unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype
) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
extended_image_attention_mask = extended_image_attention_mask.to(
dtype=next(self.parameters()).dtype
) # fp16 compatibility
extended_image_attention_mask = (1.0 - extended_image_attention_mask) * -10000.0
encoded_layers_t, encoded_layers_v, all_attention_mask = self.encoder(
embedding_output,
v_embedding_output,
extended_attention_mask,
extended_image_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
output_all_attention_masks=output_all_attention_masks,
)
sequence_output_t = encoded_layers_t[-1]
sequence_output_v = encoded_layers_v[-1]
if self.fusion_method == "vl-bert_vqa":
text_mask = input_txt != 0
text_end = text_mask.sum(1, keepdim=True)
pooled_output_t = self.t_pooler(sequence_output_t, text_end)
else:
pooled_output_t = self.t_pooler(sequence_output_t)
pooled_output_v = self.v_pooler(sequence_output_v)
if not output_all_encoded_layers:
encoded_layers_t = encoded_layers_t[-1]
encoded_layers_v = encoded_layers_v[-1]
return encoded_layers_t, encoded_layers_v, pooled_output_t, pooled_output_v, all_attention_mask
class M3PModel(BertPreTrainedModel):
def __init__(self, config):
super(M3PModel, self).__init__(config)
self.encoder = M3PTransformerModel(config, is_encoder=True, with_output=False, is_crossModal=True) # params, is_encoder, with_output, is_crossModal=False
self.pooler = self.encoder.pooled_layer
self.apply(self.init_weights)
def forward(self, input_txt, input_imgs, image_loc, token_type_ids=None, attention_mask=None,
image_attention_mask=None, output_all_encoded_layers=False, output_all_attention_masks=False):
txt_lens = attention_mask.sum(dim=1)
img_lens = image_attention_mask.sum(dim=1)
sequence_output = self.encoder.jointfwd(input_txt, txt_lens, input_imgs, img_lens, image_loc=image_loc)
pooled_output = self.pooler(sequence_output)
return sequence_output, pooled_output
class BertForVLPreTraining(BertPreTrainedModel):
"""BERT model with multimodal pre-training heads.
"""
def __init__(self, config):
super(BertForVLPreTraining, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)
self.loss_fct = nn.CrossEntropyLoss(ignore_index=-1)
self.visual_target_weights = config.visual_target_weights
print("model's visual targets are ", [ix for ix, w in config.visual_target_weights.items() if w > 0])
self.add_global_imgfeat = int(config.add_global_imgfeat is not None)
self.tie_weights()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.cls.predictions.decoder, self.bert.embeddings.word_embeddings)
def forward(
self,
input_ids,
image_feat,
image_loc,
token_type_ids=None,
attention_mask=None,
image_attention_mask=None,
masked_lm_labels=None,
image_label=None,
image_cls=None,
obj_labels=None,
obj_confs=None,
attr_labels=None,
attr_confs=None,
image_attrs=None,
next_sentence_label=None,
output_all_encoded_layers=False,
output_all_attention_masks=False,
):
# in this model, we first embed the images.
encoded_layers_t, encoded_layers_v, pooled_output_t, pooled_output_v, all_attention_mask = self.bert(
input_ids,
image_feat,
image_loc,
token_type_ids,
attention_mask,
image_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
output_all_attention_masks=output_all_attention_masks,
)
if output_all_encoded_layers:
sequence_output_t = encoded_layers_t[-1]
sequence_output_v = encoded_layers_v[-1]
else:
sequence_output_t = encoded_layers_t
sequence_output_v = encoded_layers_v
prediction_scores_t, prediction_scores_v_dict, seq_relationship_score, pooled_output = self.cls(
sequence_output_t, sequence_output_v, pooled_output_t, pooled_output_v
)
# Vision loss
img_loss = 0
for ix, weight in self.visual_target_weights.items():
if self.config.add_global_imgfeat == "last":
prediction_scores_v = prediction_scores_v_dict[ix][:, :-1]
else:
prediction_scores_v = prediction_scores_v_dict[ix][:, self.add_global_imgfeat:]
img_loss += pre_vis_criterions[ix](prediction_scores_v, weight, image_label, image_cls, image_feat,
obj_labels, obj_confs, attr_labels, attr_confs)
masked_img_loss = img_loss > 0 if type(img_loss) == int else img_loss.cpu().item() > 0
if masked_img_loss:
img_loss = img_loss.unsqueeze(0)
else:
img_loss = torch.zeros(1).to(input_ids.device)
if masked_lm_labels is not None:
masked_lm_loss = self.loss_fct(
prediction_scores_t.view(-1, self.config.vocab_size),
masked_lm_labels.view(-1),
).unsqueeze(0)
else:
masked_lm_loss = torch.zeros(1).to(input_ids.device)
if (seq_relationship_score is not None) and (next_sentence_label is not None):
next_sentence_loss = self.loss_fct(
seq_relationship_score.view(-1, 2),
next_sentence_label.view(-1)
).unsqueeze(0)
else:
next_sentence_loss = torch.zeros(1).to(input_ids.device)
if masked_img_loss or masked_lm_loss or next_sentence_loss:
if output_all_encoded_layers:
return masked_lm_loss, img_loss, next_sentence_loss, encoded_layers_t, encoded_layers_v
return masked_lm_loss, img_loss, next_sentence_loss
else:
if output_all_encoded_layers:
return prediction_scores_t, prediction_scores_v_dict, seq_relationship_score, all_attention_mask, \
pooled_output, encoded_layers_t, encoded_layers_v
return prediction_scores_t, prediction_scores_v_dict, seq_relationship_score, all_attention_mask, pooled_output
class BertForVLTasks(BertPreTrainedModel):
def __init__(self, config, task_cfg, task_ids, dropout_prob=0.1):
super(BertForVLTasks, self).__init__(config)
self.bert = BertModel(config)
self.dropout = nn.Dropout(dropout_prob)
# FIXME ?
# self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)
self.config = config
self.task_cfg = task_cfg
task2clf = {}
for task_id in task_ids:
task_type = task_cfg[task_id]["type"]
if task_type in {"VL-classifier", "VL-classifier-GQA"}:
task2clf[task_id] = SimpleClassifier(config.pooler_size, config.clf_hidden_size,
task_cfg[task_id]["num_labels"], config.layer_norm_eps)
elif task_type == "VL-binary-classifier":
task2clf[task_id] = SimpleClassifier(config.pooler_size * 2, config.clf_hidden_size, 2, config.layer_norm_eps)
elif task_type == "VL-tri-classifier":
task2clf[task_id] = nn.Linear(config.pooler_size, 3) # for Visual Entailiment tasks
elif task_type == "VL-logit":
task2clf[task_id] = nn.Linear(config.pooler_size, 1)
elif task_type.startswith("V-logit"):
if task_cfg[task_id].get("num_clf_layers", 1) == 2:
task2clf[task_id] = torch.nn.Sequential(
nn.Linear(config.v_hidden_size, config.v_hidden_size),
GeLU(),
torch.nn.Dropout(config.v_attention_probs_dropout_prob, inplace=False),
nn.Linear(config.v_hidden_size, 1)
)
else:
task2clf[task_id] = nn.Linear(config.v_hidden_size, 1)
else:
raise ValueError("Undefined task type: %s" % task_type)
self.clfs_dict = nn.ModuleDict(task2clf)
self.fusion_method = config.fusion_method
self.apply(self.init_weights)
# FIXME
def init_output(self, ckpt_file, task_id):
state_dict = torch.load(ckpt_file, map_location="cpu")
self.clfs_dict[task_id].weight.data = state_dict['cls.bi_seq_relationship.weight'].data[1:, :]
self.clfs_dict[task_id].bias.data = state_dict['cls.bi_seq_relationship.bias'].data[1:]
del state_dict
def forward(
self,
input_txt,
input_imgs,
image_loc,
task_id,
token_type_ids=None,
attention_mask=None,
image_attention_mask=None,
output_all_encoded_layers=False,
output_all_attention_masks=False,
):
encoded_layers_t, encoded_layers_v, pooled_output_t, pooled_output_v, all_attention_mask = self.bert(
input_txt,
input_imgs,
image_loc,
token_type_ids,
attention_mask,
image_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
output_all_attention_masks=output_all_attention_masks,
)
if output_all_encoded_layers:
sequence_output_t = encoded_layers_t[-1]
sequence_output_v = encoded_layers_v[-1]
else:
sequence_output_t = encoded_layers_t
sequence_output_v = encoded_layers_v
linguisic_prediction, vision_prediction = None, None
if self.fusion_method == "sum":
pooled_output = self.dropout(pooled_output_t + pooled_output_v)
elif self.fusion_method == "mul":
pooled_output = self.dropout(pooled_output_t * pooled_output_v)
elif self.fusion_method == "text":
pooled_output = self.dropout(pooled_output_t)
elif self.fusion_method == "vl-bert_vqa":
pooled_output = self.dropout(pooled_output_t)
elif self.fusion_method == "none":
pooled_output = None
else:
raise ValueError("Invalid fusion method: %s" % self.fusion_method)
if self.task_cfg[task_id]["type"].startswith("V-logit"):
vil_prediction = self.clfs_dict[task_id](self.dropout(sequence_output_v)) + (
(1.0 - image_attention_mask) * -10000.0).unsqueeze(2).to(dtype=next(self.parameters()).dtype)
elif self.task_cfg[task_id]["type"] == "VL-binary-classifier":
# NLVR
vil_prediction = self.clfs_dict[task_id](pooled_output.view(-1, pooled_output.size(1) * 2))
else:
vil_prediction = self.clfs_dict[task_id](pooled_output)
if output_all_encoded_layers:
return vil_prediction, vision_prediction, linguisic_prediction, all_attention_mask, \
encoded_layers_t, encoded_layers_v
return vil_prediction, vision_prediction, linguisic_prediction, all_attention_mask
class M3PForVLTasks(BertPreTrainedModel):
def __init__(self, config, task_cfg, task_ids, dropout_prob=0.1):
super(M3PForVLTasks, self).__init__(config)
self.bert = M3PModel(config)
self.dropout = nn.Dropout(dropout_prob)
# FIXME ?
# self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)
self.config = config
self.task_cfg = task_cfg
task2clf = {}
for task_id in task_ids:
task_type = task_cfg[task_id]["type"]
if task_type in {"VL-classifier", "VL-classifier-GQA"}:
task2clf[task_id] = SimpleClassifier(config.pooler_size, config.clf_hidden_size,
task_cfg[task_id]["num_labels"], config.layer_norm_eps)
elif task_type == "VL-binary-classifier":
task2clf[task_id] = SimpleClassifier(config.pooler_size * 2, config.clf_hidden_size, 2, config.layer_norm_eps)
elif task_type == "VL-tri-classifier":
task2clf[task_id] = nn.Linear(config.pooler_size, 3) # for Visual Entailiment tasks
elif task_type == "VL-logit":
task2clf[task_id] = nn.Linear(config.pooler_size, 1)
elif task_type.startswith("V-logit"):
if task_cfg[task_id].get("num_clf_layers", 1) == 2:
task2clf[task_id] = torch.nn.Sequential(
nn.Linear(config.v_hidden_size, config.v_hidden_size),
GeLU(),
torch.nn.Dropout(config.v_attention_probs_dropout_prob, inplace=False),
nn.Linear(config.v_hidden_size, 1)
)
else:
task2clf[task_id] = nn.Linear(config.v_hidden_size, 1)
else:
raise ValueError("Undefined task type: %s" % task_type)
self.clfs_dict = nn.ModuleDict(task2clf)
self.fusion_method = config.fusion_method
self.apply(self.init_weights)
# FIXME
def init_output(self, ckpt_file, task_id):
state_dict = torch.load(ckpt_file, map_location="cpu")
self.clfs_dict[task_id].weight.data = state_dict['bert.encoder.seq_relationship.weight'].data
self.clfs_dict[task_id].bias.data = state_dict['bert.encoder.seq_relationship.bias'].data
del state_dict
def forward(
self,
input_txt,
input_imgs,
image_loc,
task_id,
token_type_ids=None,
attention_mask=None,
image_attention_mask=None,
output_all_encoded_layers=False,
output_all_attention_masks=False,
):
sequence_output, pooled_output = self.bert(
input_txt,
input_imgs,
image_loc,
token_type_ids,
attention_mask,
image_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
output_all_attention_masks=output_all_attention_masks,
)
linguisic_prediction, vision_prediction = None, None
pooled_output = self.dropout(pooled_output)
if self.task_cfg[task_id]["type"].startswith("V-logit"):
raise NotImplementedError("V-logit for M3P -- get the sequence_output_v")
# vil_prediction = self.clfs_dict[task_id](self.dropout(sequence_output_v)) + (
# (1.0 - image_attention_mask) * -10000.0).unsqueeze(2).to(dtype=next(self.parameters()).dtype)
elif self.task_cfg[task_id]["type"] == "VL-binary-classifier":
# NLVR
vil_prediction = self.clfs_dict[task_id](pooled_output.view(-1, pooled_output.size(1) * 2))
else:
vil_prediction = self.clfs_dict[task_id](pooled_output)
if output_all_encoded_layers:
raise NotImplementedError("output_all_encoded_layers for M3P")
# return vil_prediction, vision_prediction, linguisic_prediction, all_attention_mask, \
# encoded_layers_t, encoded_layers_v
return vil_prediction, vision_prediction, linguisic_prediction, None # all_attention_mask
| 48.8774
| 187
| 0.647189
|
4a09731fee39348f2cd63c4bab2fe0dfd3422937
| 11,335
|
py
|
Python
|
ProgettoLube/WebInspector/venv/Lib/site-packages/tensorflow/_api/v2/compat/v1/distribute/__init__.py
|
Lube-Project/ProgettoLube
|
cbf33971e2c2e865783ec1a2302625539186a338
|
[
"MIT"
] | null | null | null |
ProgettoLube/WebInspector/venv/Lib/site-packages/tensorflow/_api/v2/compat/v1/distribute/__init__.py
|
Lube-Project/ProgettoLube
|
cbf33971e2c2e865783ec1a2302625539186a338
|
[
"MIT"
] | null | null | null |
ProgettoLube/WebInspector/venv/Lib/site-packages/tensorflow/_api/v2/compat/v1/distribute/__init__.py
|
Lube-Project/ProgettoLube
|
cbf33971e2c2e865783ec1a2302625539186a338
|
[
"MIT"
] | 1
|
2021-01-28T01:57:41.000Z
|
2021-01-28T01:57:41.000Z
|
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Library for running a computation across multiple devices.
The intent of this library is that you can write an algorithm in a stylized way
and it will be usable with a variety of different `tf.distribute.Strategy`
implementations. Each descendant will implement a different strategy for
distributing the algorithm across multiple devices/machines. Furthermore, these
changes can be hidden inside the specific layers and other library classes that
need special treatment to run in a distributed setting, so that most users'
model definition code can run unchanged. The `tf.distribute.Strategy` API works
the same way with eager and graph execution.
*Guides*
* [TensorFlow v2.x](https://www.tensorflow.org/guide/distributed_training)
* [TensorFlow v1.x](https://github.com/tensorflow/docs/blob/master/site/en/r1/guide/distribute_strategy.ipynb)
*Tutorials*
* [Distributed Training Tutorials](https://www.tensorflow.org/tutorials/distribute/)
The tutorials cover how to use `tf.distribute.Strategy` to do distributed
training with native Keras APIs, custom training loops,
and Esitmator APIs. They also cover how to save/load model when using
`tf.distribute.Strategy`.
*Glossary*
* _Data parallelism_ is where we run multiple copies of the model
on different slices of the input data. This is in contrast to
_model parallelism_ where we divide up a single copy of a model
across multiple devices.
Note: we only support data parallelism for now, but
hope to add support for model parallelism in the future.
* A _device_ is a CPU or accelerator (e.g. GPUs, TPUs) on some machine that
TensorFlow can run operations on (see e.g. `tf.device`). You may have multiple
devices on a single machine, or be connected to devices on multiple
machines. Devices used to run computations are called _worker devices_.
Devices used to store variables are _parameter devices_. For some strategies,
such as `tf.distribute.MirroredStrategy`, the worker and parameter devices
will be the same (see mirrored variables below). For others they will be
different. For example, `tf.distribute.experimental.CentralStorageStrategy`
puts the variables on a single device (which may be a worker device or may be
the CPU), and `tf.distribute.experimental.ParameterServerStrategy` puts the
variables on separate machines called _parameter servers_ (see below).
* A _replica_ is one copy of the model, running on one slice of the
input data. Right now each replica is executed on its own
worker device, but once we add support for model parallelism
a replica may span multiple worker devices.
* A _host_ is the CPU device on a machine with worker devices, typically
used for running input pipelines.
* A _worker_ is defined to be the physical machine(s) containing the physical
devices (e.g. GPUs, TPUs) on which the replicated computation is executed. A
worker may contain one or more replicas, but contains at least one
replica. Typically one worker will correspond to one machine, but in the case
of very large models with model parallelism, one worker may span multiple
machines. We typically run one input pipeline per worker, feeding all the
replicas on that worker.
* _Synchronous_, or more commonly _sync_, training is where the updates from
each replica are aggregated together before updating the model variables. This
is in contrast to _asynchronous_, or _async_ training, where each replica
updates the model variables independently. You may also have replicas
partitioned into groups which are in sync within each group but async between
groups.
* _Parameter servers_: These are machines that hold a single copy of
parameters/variables, used by some strategies (right now just
`tf.distribute.experimental.ParameterServerStrategy`). All replicas that want
to operate on a variable retrieve it at the beginning of a step and send an
update to be applied at the end of the step. These can in priniciple support
either sync or async training, but right now we only have support for async
training with parameter servers. Compare to
`tf.distribute.experimental.CentralStorageStrategy`, which puts all variables
on a single device on the same machine (and does sync training), and
`tf.distribute.MirroredStrategy`, which mirrors variables to multiple devices
(see below).
* _Replica context_ vs. _Cross-replica context_ vs _Update context_
A _replica context_ applies
when you execute the computation function that was called with `strategy.run`.
Conceptually, you're in replica context when executing the computation
function that is being replicated.
An _update context_ is entered in a `tf.distribute.StrategyExtended.update`
call.
An _cross-replica context_ is entered when you enter a `strategy.scope`. This
is useful for calling `tf.distribute.Strategy` methods which operate across
the replicas (like `reduce_to()`). By default you start in a _replica context_
(the "default single _replica context_") and then some methods can switch you
back and forth.
* _Distributed value_: Distributed value is represented by the base class
`tf.distribute.DistributedValues`. `tf.distribute.DistributedValues` is useful
to represent values on multiple devices, and it contains a map from replica id
to values. Two representative kinds of `tf.distribute.DistributedValues` are
"PerReplica" and "Mirrored" values.
"PerReplica" values exist on the worker
devices, with a different value for each replica. They are produced by
iterating through a distributed dataset returned by
`tf.distribute.Strategy.experimental_distribute_dataset` and
`tf.distribute.Strategy.experimental_distribute_datasets_from_function`. They
are also the typical result returned by
`tf.distribute.Strategy.run`.
"Mirrored" values are like "PerReplica" values, except we know that the value
on all replicas are the same. We can safely read a "Mirrored" value in a
cross-replica context by using the value on any replica.
* _Unwrapping_ and _merging_: Consider calling a function `fn` on multiple
replicas, like `strategy.run(fn, args=[w])` with an
argument `w` that is a `tf.distribute.DistributedValues`. This means `w` will
have a map taking replica id `0` to `w0`, replica id `1` to `w1`, etc.
`strategy.run()` unwraps `w` before calling `fn`, so it calls `fn(w0)` on
device `d0`, `fn(w1)` on device `d1`, etc. It then merges the return
values from `fn()`, which leads to one common object if the returned values
are the same object from every replica, or a `DistributedValues` object
otherwise.
* _Reductions_ and _all-reduce_: A _reduction_ is a method of aggregating
multiple values into one value, like "sum" or "mean". If a strategy is doing
sync training, we will perform a reduction on the gradients to a parameter
from all replicas before applying the update. _All-reduce_ is an algorithm for
performing a reduction on values from multiple devices and making the result
available on all of those devices.
* _Mirrored variables_: These are variables that are created on multiple
devices, where we keep the variables in sync by applying the same
updates to every copy. Mirrored variables are created with
`tf.Variable(...synchronization=tf.VariableSynchronization.ON_WRITE...)`.
Normally they are only used in synchronous training.
* _SyncOnRead variables_
_SyncOnRead variables_ are created by
`tf.Variable(...synchronization=tf.VariableSynchronization.ON_READ...)`, and
they are created on multiple devices. In replica context, each
component variable on the local replica can perform reads and writes without
synchronization with each other. When the
_SyncOnRead variable_ is read in cross-replica context, the values from
component variables are aggregated and returned.
_SyncOnRead variables_ bring a lot of custom configuration difficulty to the
underlying logic, so we do not encourage users to instantiate and use
_SyncOnRead variable_ on their own. We have mainly used _SyncOnRead
variables_ for use cases such as batch norm and metrics. For performance
reasons, we often don't need to keep these statistics in sync every step and
they can be accumulated on each replica independently. The only time we want
to sync them is reporting or checkpointing, which typically happens in
cross-replica context. _SyncOnRead variables_ are also often used by advanced
users who want to control when variable values are aggregated. For example,
users sometimes want to maintain gradients independently on each replica for a
couple of steps without aggregation.
* _Distribute-aware layers_
Layers are generally called in a replica context, except when defining a
Keras functional model. `tf.distribute.in_cross_replica_context` will let you
determine which case you are in. If in a replica context,
the `tf.distribute.get_replica_context` function will return the default
replica context outside a strategy scope, `None` within a strategy scope, and
a `tf.distribute.ReplicaContext` object inside a strategy scope and within a
`tf.distribute.Strategy.run` function. The `ReplicaContext` object has an
`all_reduce` method for aggregating across all replicas.
Note that we provide a default version of `tf.distribute.Strategy` that is
used when no other strategy is in scope, that provides the same API with
reasonable default behavior.
"""
from __future__ import print_function as _print_function
import sys as _sys
from . import cluster_resolver
from . import experimental
from tensorflow.python.distribute.cross_device_ops import CrossDeviceOps
from tensorflow.python.distribute.cross_device_ops import HierarchicalCopyAllReduce
from tensorflow.python.distribute.cross_device_ops import NcclAllReduce
from tensorflow.python.distribute.cross_device_ops import ReductionToOneDevice
from tensorflow.python.distribute.distribute_lib import InputContext
from tensorflow.python.distribute.distribute_lib import InputReplicationMode
from tensorflow.python.distribute.distribute_lib import ReplicaContext
from tensorflow.python.distribute.distribute_lib import RunOptions
from tensorflow.python.distribute.distribute_lib import StrategyExtendedV1 as StrategyExtended
from tensorflow.python.distribute.distribute_lib import StrategyV1 as Strategy
from tensorflow.python.distribute.distribute_lib import get_loss_reduction
from tensorflow.python.distribute.distribution_strategy_context import experimental_set_strategy
from tensorflow.python.distribute.distribution_strategy_context import get_replica_context
from tensorflow.python.distribute.distribution_strategy_context import get_strategy
from tensorflow.python.distribute.distribution_strategy_context import has_strategy
from tensorflow.python.distribute.distribution_strategy_context import in_cross_replica_context
from tensorflow.python.distribute.mirrored_strategy import MirroredStrategyV1 as MirroredStrategy
from tensorflow.python.distribute.one_device_strategy import OneDeviceStrategyV1 as OneDeviceStrategy
from tensorflow.python.distribute.reduce_util import ReduceOp
from tensorflow.python.training.server_lib import Server
del _print_function
| 55.837438
| 110
| 0.80397
|
4a0973dd6483ecc231d7d52888412c4c433b4bcb
| 18,431
|
py
|
Python
|
tests/test_gradient.py
|
MIBbrandon/tequila
|
8e872e33b40c7dcfd58d998a61071f66034e39df
|
[
"MIT"
] | null | null | null |
tests/test_gradient.py
|
MIBbrandon/tequila
|
8e872e33b40c7dcfd58d998a61071f66034e39df
|
[
"MIT"
] | 3
|
2020-12-08T14:15:56.000Z
|
2020-12-17T16:38:35.000Z
|
tests/test_gradient.py
|
clausia/tequila
|
a67fe808c165c5f30a30d9745cfb8155b47c7eca
|
[
"MIT"
] | null | null | null |
import tequila.simulators.simulator_api
from tequila.circuit import gates
from tequila.circuit.gradient import grad
from tequila.objective import ExpectationValue
from tequila.objective.objective import Variable
from tequila.hamiltonian import paulis
from tequila.simulators.simulator_api import simulate
from tequila import simulators
import numpy
import pytest
# Get QC backends for parametrized testing
import select_backends
simulators = select_backends.get()
samplers = select_backends.get(sampler=True)
@pytest.mark.parametrize("simulator", simulators)
@pytest.mark.parametrize("controlled", [False, True])
@pytest.mark.parametrize("angle_value", numpy.random.uniform(0.0, 2.0 * numpy.pi, 1))
def test_gradient_UY_HX(simulator, angle_value, controlled, silent=True):
# case X Y
# U = cos(angle/2) + sin(-angle/2)*i*Y
# <0|Ud H U |0> = cos^2(angle/2)*<0|X|0>
# + sin^2(-angle/2) <0|YXY|0>
# + cos(angle/2)*sin(angle/2)*i<0|XY|0>
# + sin(-angle/2)*cos(angle/2)*(-i) <0|YX|0>
# = cos^2*0 + sin^2*0 + cos*sin*i(<0|[XY,YX]|0>)
# = 0.5*sin(-angle)*i <0|[XY,YX]|0> = -0.5*sin(angle)*i * 2 i <0|Z|0>
# = sin(angle)
angle = Variable(name="angle")
variables = {angle: angle_value}
qubit = 0
H = paulis.X(qubit=qubit)
if controlled:
control = 1
U = gates.X(target=control) + gates.Ry(target=qubit, control=control, angle=angle)
else:
U = gates.X(target=qubit) + gates.X(target=qubit) + gates.Ry(target=qubit, angle=angle)
O = ExpectationValue(U=U, H=H)
E = simulate(O, variables=variables, backend=simulator)
print("O={type}".format(type=type(O)))
dO = grad(objective=O, variable=angle)
dE = simulate(dO, variables=variables, backend=simulator)
assert (numpy.isclose(E, numpy.sin(angle(variables)), atol=1.e-4))
assert (numpy.isclose(dE, numpy.cos(angle(variables)), atol=1.e-4))
if not silent:
print("E =", E)
print("sin(angle)=", numpy.sin(angle()))
print("dE =", dE)
print("cos(angle)=", numpy.cos(angle()))
@pytest.mark.parametrize("simulator", simulators)
@pytest.mark.parametrize("controlled", [False, True])
@pytest.mark.parametrize("angle_value", numpy.random.uniform(0.0, 2.0 * numpy.pi, 1))
def test_gradient_UY_HX_sample(simulator, angle_value, controlled, silent=True):
# case X Y
# U = cos(angle/2) + sin(-angle/2)*i*Y
# <0|Ud H U |0> = cos^2(angle/2)*<0|X|0>
# + sin^2(-angle/2) <0|YXY|0>
# + cos(angle/2)*sin(angle/2)*i<0|XY|0>
# + sin(-angle/2)*cos(angle/2)*(-i) <0|YX|0>
# = cos^2*0 + sin^2*0 + cos*sin*i(<0|[XY,YX]|0>)
# = 0.5*sin(-angle)*i <0|[XY,YX]|0> = -0.5*sin(angle)*i * 2 i <0|Z|0>
# = sin(angle)
angle = Variable(name="angle")
variables = {angle: angle_value}
qubit = 0
H = paulis.X(qubit=qubit)
if controlled:
control = 1
U = gates.X(target=control) + gates.Ry(target=qubit, control=control, angle=angle)
else:
U = gates.X(target=qubit) + gates.X(target=qubit) + gates.Ry(target=qubit, angle=angle)
O = ExpectationValue(U=U, H=H)
E = simulate(O, variables=variables, backend=simulator, samples=10000)
print("O={type}".format(type=type(O)))
dO = grad(objective=O, variable=angle)
dE = simulate(dO, variables=variables, backend=simulator, samples=10000)
assert (numpy.isclose(E, numpy.sin(angle(variables)), atol=3.e-2))
assert (numpy.isclose(dE, numpy.cos(angle(variables)), atol=3.e-2))
if not silent:
print("E =", E)
print("sin(angle)=", numpy.sin(angle()))
print("dE =", dE)
print("cos(angle)=", numpy.cos(angle()))
@pytest.mark.parametrize("simulator", simulators)
@pytest.mark.parametrize("controlled", [False, True])
@pytest.mark.parametrize("angle_value", numpy.random.uniform(0.0, 2.0 * numpy.pi, 1))
def test_gradient_UX_HY(simulator, angle_value, controlled, silent=False):
# case YX
# U = cos(angle/2) + sin(-angle/2)*i*X
# O = cos*sin*i*<0|YX|0> + sin*cos*(-i)<0|XY|0>
# = 0.5*sin(-angle)*i <0|[YX,XY]|0>
# = -sin(angle)
angle = Variable(name="angle")
variables = {angle: angle_value}
qubit = 0
H = paulis.Y(qubit=qubit)
if controlled:
control = 1
U = gates.X(target=control) + gates.Rx(target=qubit, control=control, angle=angle)
else:
U = gates.Rx(target=qubit, angle=angle)
O = ExpectationValue(U=U, H=H)
E = simulate(O, variables=variables, backend=simulator)
dO = grad(objective=O, variable='angle')
dE = simulate(dO, variables=variables)
assert (numpy.isclose(E, -numpy.sin(angle(variables)), atol=1.e-4))
assert (numpy.isclose(dE, -numpy.cos(angle(variables)), atol=1.e-4))
if not silent:
print("E =", E)
print("-sin(angle)=", -numpy.sin(angle(variables)))
print("dE =", dE)
print("-cos(angle)=", -numpy.cos(angle(variables)))
@pytest.mark.parametrize("simulator", simulators)
@pytest.mark.parametrize("controlled", [False, True])
@pytest.mark.parametrize("angle_value", numpy.random.uniform(0.0, 2.0 * numpy.pi, 1))
def test_gradient_UHZH_HY(simulator, angle_value, controlled, silent=False):
angle = Variable(name="angle")
variables = {angle: angle_value}
qubit = 0
H = paulis.Y(qubit=qubit)
if controlled:
control = 1
U = gates.X(target=control) + gates.H(target=qubit) + gates.Rz(target=qubit, control=control,
angle=angle) + gates.H(target=qubit)
else:
U = gates.H(target=qubit) + gates.Rz(target=qubit, angle=angle) + gates.H(target=qubit)
O = ExpectationValue(U=U, H=H)
E = simulate(O, variables=variables, backend=simulator)
dO = grad(objective=O, variable='angle')
dE = simulate(dO, variables=variables)
assert (numpy.isclose(E, -numpy.sin(angle(variables)), atol=1.e-4))
assert (numpy.isclose(dE, -numpy.cos(angle(variables)), atol=1.e-4))
if not silent:
print("E =", E)
print("-sin(angle)=", -numpy.sin(angle(variables)))
print("dE =", dE)
print("-cos(angle)=", -numpy.cos(angle(variables)))
@pytest.mark.parametrize("simulator", simulators)
@pytest.mark.parametrize("controlled", [False, True])
@pytest.mark.parametrize("angle_value", numpy.random.uniform(0.0, 2.0 * numpy.pi, 1))
def test_gradient_PHASE_HY(simulator, angle_value, controlled, silent=False):
angle = Variable(name="angle")
variables = {angle: angle_value}
qubit = 0
H = paulis.Y(qubit=qubit)
if controlled:
control = 1
U = gates.X(target=control) + gates.H(target=qubit) + gates.Phase(target=qubit, control=control,
phi=angle) + gates.H(target=qubit)
else:
U = gates.H(target=qubit) + gates.Phase(target=qubit, phi=angle) + gates.H(target=qubit)
O = ExpectationValue(U=U, H=H)
E = simulate(O, variables=variables, backend=simulator)
dO = grad(objective=O, variable='angle')
dE = simulate(dO, variables=variables)
assert (numpy.isclose(E, -numpy.sin(angle(variables)), atol=1.e-4))
assert (numpy.isclose(dE, -numpy.cos(angle(variables)), atol=1.e-4))
if not silent:
print("E =", E)
print("-sin(angle)=", -numpy.sin(angle(variables)))
print("dE =", dE)
print("-cos(angle)=", -numpy.cos(angle(variables)))
@pytest.mark.parametrize("simulator", simulators)
@pytest.mark.parametrize("controlled", [False, True])
@pytest.mark.parametrize("angle_value", numpy.random.uniform(0.0, 2.0 * numpy.pi, 1))
def test_gradient_UY_HX_wfnsim(simulator, angle_value, controlled, silent=True):
# same as before just with wavefunction simulation
# case X Y
# U = cos(angle/2) + sin(-angle/2)*i*Y
# <0|Ud H U |0> = cos^2(angle/2)*<0|X|0>
# + sin^2(-angle/2) <0|YXY|0>
# + cos(angle/2)*sin(angle/2)*i<0|XY|0>
# + sin(-angle/2)*cos(angle/2)*(-i) <0|YX|0>
# = cos^2*0 + sin^2*0 + cos*sin*i(<0|[XY,YX]|0>)
# = 0.5*sin(-angle)*i <0|[XY,YX]|0> = -0.5*sin(angle)*i * 2 i <0|Z|0>
# = sin(angle)
angle = Variable(name="angle")
variables = {angle: angle_value}
qubit = 0
H = paulis.X(qubit=qubit)
if controlled:
control = 1
U = gates.X(target=control) + gates.Ry(target=qubit, control=control, angle=angle)
else:
U = gates.Ry(target=qubit, angle=angle)
O = ExpectationValue(U=U, H=H)
E = simulate(O, variables=variables, backend=simulator)
dO = grad(objective=O, variable='angle')
dE = simulate(dO, variables=variables, backend=simulator)
E = numpy.float(E) # for isclose
dE = numpy.float(dE) # for isclose
assert (numpy.isclose(E, numpy.sin(angle(variables)), atol=0.0001))
assert (numpy.isclose(dE, numpy.cos(angle(variables)), atol=0.0001))
if not silent:
print("E =", E)
print("sin(angle)=", numpy.sin(angle(variables)))
print("dE =", dE)
print("cos(angle)=", numpy.cos(angle(variables)))
@pytest.mark.parametrize("simulator", simulators)
@pytest.mark.parametrize("controlled", [False, True])
@pytest.mark.parametrize("angle", numpy.random.uniform(0.0, 2.0 * numpy.pi, 1))
def test_gradient_UX_HY_wfnsim(simulator, angle, controlled, silent=True):
# same as before just with wavefunction simulation
# case YX
# U = cos(angle/2) + sin(-angle/2)*i*X
# O = cos*sin*i*<0|YX|0> + sin*cos*(-i)<0|XY|0>
# = 0.5*sin(-angle)*i <0|[YX,XY]|0>
# = -sin(angle)
angle_value = angle
angle = Variable(name="angle")
variables = {angle: angle_value}
qubit = 0
H = paulis.Y(qubit=qubit)
if controlled:
control = 1
U = gates.X(target=control) + gates.Rx(target=qubit, control=control, angle=angle)
else:
U = gates.Rx(target=qubit, angle=angle)
O = ExpectationValue(U=U, H=H)
E = simulate(O, variables=variables, backend=simulator)
dO = grad(objective=O, variable=angle)
dE = simulate(dO, variables=variables)
assert (numpy.isclose(E, -numpy.sin(angle(variables)), atol=0.0001))
assert (numpy.isclose(dE, -numpy.cos(angle(variables)), atol=0.0001))
if not silent:
print("E =", E)
print("-sin(angle)=", -numpy.sin(angle(variables)))
print("dE =", dE)
print("-cos(angle)=", -numpy.cos(angle(variables)))
@pytest.mark.parametrize("simulator", simulators)
@pytest.mark.parametrize("power", numpy.random.uniform(0.0, 2.0, 1))
@pytest.mark.parametrize("controlled", [False, True])
def test_gradient_X(simulator, power, controlled):
qubit = 0
control = 1
angle = Variable(name="angle")
if controlled:
U = gates.X(target=control) + gates.X(target=qubit, power=angle, control=control)
else:
U = gates.X(target=qubit, power=angle)
angle = Variable(name="angle")
variables = {angle: power}
H = paulis.Y(qubit=qubit)
O = ExpectationValue(U=U, H=H)
E = simulate(O, variables=variables, backend=simulator)
dO = grad(objective=O, variable=angle)
dE = simulate(dO, variables=variables, backend=simulator)
assert (numpy.isclose(E, -numpy.sin(angle(variables) * (numpy.pi)), atol=1.e-4))
assert (numpy.isclose(dE, -numpy.pi * numpy.cos(angle(variables) * (numpy.pi)), atol=1.e-4))
@pytest.mark.parametrize("simulator", simulators)
@pytest.mark.parametrize("power", numpy.random.uniform(0.0, 2.0, 1))
@pytest.mark.parametrize("controls", [1, 2, 3])
def test_gradient_deep_controlled_X(simulator, power, controls):
if controls > 2 and simulator == "qiskit":
# does not work yet
return
qubit = 0
control = [i for i in range(1, controls + 1)]
angle = Variable(name="angle")
U = gates.X(target=control) + gates.X(target=qubit, power=angle, control=control)
angle = Variable(name="angle")
variables = {angle: power}
H = paulis.Y(qubit=qubit)
O = ExpectationValue(U=U, H=H)
E = simulate(O, variables=variables, backend=simulator)
dO = grad(objective=O, variable=angle)
dE = simulate(dO, variables=variables, backend=simulator)
assert (numpy.isclose(E, -numpy.sin(angle(variables) * (numpy.pi)), atol=1.e-4))
assert (numpy.isclose(dE, -numpy.pi * numpy.cos(angle(variables) * (numpy.pi)), atol=1.e-4))
@pytest.mark.parametrize("simulator", simulators)
@pytest.mark.parametrize("power", numpy.random.uniform(0.0, 2.0 * numpy.pi, 1))
@pytest.mark.parametrize("controlled", [False, True])
def test_gradient_Y(simulator, power, controlled):
if simulator != "cirq":
return
qubit = 0
control = 1
angle = Variable(name="angle")
if controlled:
U = gates.X(target=control) + gates.Y(target=qubit, power=angle, control=control)
else:
U = gates.Y(target=qubit, power=angle)
angle = Variable(name="angle")
variables = {angle: power}
H = paulis.X(qubit=qubit)
O = ExpectationValue(U=U, H=H)
E = simulate(O, variables=variables, backend=simulator)
dO = grad(objective=O, variable=angle)
dE = simulate(dO, variables=variables, backend=simulator)
assert (numpy.isclose(E, numpy.sin(angle(variables) * (numpy.pi)), atol=1.e-4))
assert (numpy.isclose(dE, numpy.pi * numpy.cos(angle(variables) * (numpy.pi)), atol=1.e-4))
@pytest.mark.parametrize("simulator", simulators)
@pytest.mark.parametrize("power", numpy.random.uniform(0.0, 2.0, 1))
@pytest.mark.parametrize("controls", [1, 2, 3])
def test_gradient_deep_controlled_Y(simulator, power, controls):
if controls > 2 and simulator == "qiskit":
# does not work yet
return
qubit = 0
control = [i for i in range(1, controls + 1)]
angle = Variable(name="angle")
U = gates.X(target=control) + gates.Y(target=qubit, power=angle, control=control)
angle = Variable(name="angle")
variables = {angle: power}
H = paulis.X(qubit=qubit)
O = ExpectationValue(U=U, H=H)
E = simulate(O, variables=variables, backend=simulator)
dO = grad(objective=O, variable=angle)
dE = simulate(dO, variables=variables, backend=simulator)
assert (numpy.isclose(E, numpy.sin(angle(variables) * (numpy.pi)), atol=1.e-4))
assert (numpy.isclose(dE, numpy.pi * numpy.cos(angle(variables) * (numpy.pi)), atol=1.e-4))
@pytest.mark.parametrize("simulator", simulators)
@pytest.mark.parametrize("power", numpy.random.uniform(0.0, 2.0, 1))
@pytest.mark.parametrize("controlled", [False, True])
def test_gradient_Z(simulator, power, controlled):
qubit = 0
control = 1
angle = Variable(name="angle")
if controlled:
U = gates.X(target=control) + gates.H(target=qubit) + gates.Z(target=qubit, power=angle,
control=control) + gates.H(target=qubit)
else:
U = gates.H(target=qubit) + gates.Z(target=qubit, power=angle) + gates.H(target=qubit)
angle = Variable(name="angle")
variables = {angle: power}
H = paulis.Y(qubit=qubit)
O = ExpectationValue(U=U, H=H)
E = simulate(O, variables=variables, backend=simulator)
dO = grad(objective=O, variable=angle)
dE = simulate(dO, variables=variables, backend=simulator)
assert (numpy.isclose(E, -numpy.sin(angle(variables) * (numpy.pi)), atol=1.e-4))
assert (numpy.isclose(dE, -numpy.pi * numpy.cos(angle(variables) * (numpy.pi)), atol=1.e-4))
@pytest.mark.parametrize("simulator", simulators)
@pytest.mark.parametrize("power", numpy.random.uniform(0.0, 2.0, 1))
@pytest.mark.parametrize("controls", [1, 2, 3])
def test_gradient_deep_controlled_Z(simulator, power, controls):
if controls > 2 and simulator == "qiskit":
# does not work yet
return
qubit = 0
control = [i for i in range(1, controls + 1)]
angle = Variable(name="angle")
U = gates.X(target=control) + gates.H(target=qubit) + gates.Z(target=qubit, power=angle, control=control) + gates.H(
target=qubit)
angle = Variable(name="angle")
variables = {angle: power}
H = paulis.Y(qubit=qubit)
O = ExpectationValue(U=U, H=H)
E = simulate(O, variables=variables, backend=simulator)
dO = grad(objective=O, variable=angle)
dE = simulate(dO, variables=variables, backend=simulator)
assert (numpy.isclose(E, -numpy.sin(angle(variables) * (numpy.pi)), atol=1.e-4))
assert (numpy.isclose(dE, -numpy.pi * numpy.cos(angle(variables) * (numpy.pi)), atol=1.e-4))
@pytest.mark.parametrize("simulator", simulators)
@pytest.mark.parametrize("power", numpy.random.uniform(0.0, 2.0 * numpy.pi, 1))
@pytest.mark.parametrize("controlled", [False, True])
def test_gradient_H(simulator, power, controlled):
qubit = 0
control = 1
angle = Variable(name="angle")
variables = {angle: power}
H = paulis.X(qubit=qubit)
if not controlled:
U = gates.H(target=qubit, power=angle)
else:
U = gates.X(target=control) + gates.H(target=qubit, control=control, power=angle)
O = ExpectationValue(U=U, H=H)
E = simulate(O, variables=variables, backend=simulator)
assert (numpy.isclose(E, -numpy.cos(angle(variables) * (numpy.pi)) / 2 + 0.5, atol=1.e-4))
dO = grad(objective=O, variable=angle)
dE = simulate(dO, variables=variables, backend=simulator)
assert (numpy.isclose(dE, numpy.pi * numpy.sin(angle(variables) * (numpy.pi)) / 2, atol=1.e-4))
@pytest.mark.parametrize("simulator", simulators)
@pytest.mark.parametrize("power", numpy.random.uniform(0.0, 2.0 * numpy.pi, 1))
@pytest.mark.parametrize("controls", [1, 2, 3])
def test_gradient_deep_H(simulator, power, controls):
if controls > 2 and simulator == "qiskit":
# does not work yet
return
qubit = 0
angle = Variable(name="angle")
variables = {angle: power}
control = [i for i in range(1, controls + 1)]
H = paulis.X(qubit=qubit)
U = gates.X(target=control) + gates.H(target=qubit, control=control, power=angle)
O = ExpectationValue(U=U, H=H)
E = simulate(O, variables=variables, backend=simulator)
assert (numpy.isclose(E, -numpy.cos(angle(variables) * (numpy.pi)) / 2 + 0.5, atol=1.e-4))
dO = grad(objective=O, variable=angle)
dE = simulate(dO, variables=variables, backend=simulator)
assert (numpy.isclose(dE, numpy.pi * numpy.sin(angle(variables) * (numpy.pi)) / 2, atol=1.e-4))
| 41.699095
| 120
| 0.639195
|
4a0974decd894c0362ae1ba158c59f5673a68f87
| 6,041
|
py
|
Python
|
friendship/views.py
|
zhouye/shareit
|
31f86c4e72a1994eeb9ffe433134894bd0d72503
|
[
"MIT"
] | null | null | null |
friendship/views.py
|
zhouye/shareit
|
31f86c4e72a1994eeb9ffe433134894bd0d72503
|
[
"MIT"
] | null | null | null |
friendship/views.py
|
zhouye/shareit
|
31f86c4e72a1994eeb9ffe433134894bd0d72503
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.decorators import login_required
from django.conf import settings
try:
from django.contrib.auth import get_user_model
user_model = get_user_model()
except ImportError:
from django.contrib.auth.models import User
user_model = User
from django.shortcuts import render, get_object_or_404, redirect
from friendship.exceptions import AlreadyExistsError
from friendship.models import Friend, Follow, FriendshipRequest
get_friendship_context_object_name = lambda: getattr(settings, 'FRIENDSHIP_CONTEXT_OBJECT_NAME', 'user')
get_friendship_context_object_list_name = lambda: getattr(settings, 'FRIENDSHIP_CONTEXT_OBJECT_LIST_NAME', 'users')
def view_friends(request, username, template_name='friendship/friend/user_list.html'):
""" View the friends of a user """
user = get_object_or_404(user_model, username=username)
friends = Friend.objects.friends(user)
return render(request, template_name, {get_friendship_context_object_name(): user, 'friends': friends})
@login_required
def friendship_add_friend(request, to_username, template_name='friendship/friend/add.html'):
""" Create a FriendshipRequest """
ctx = {'to_username': to_username}
if request.method == 'POST':
to_user = user_model.objects.get(username=to_username)
from_user = request.user
try:
Friend.objects.add_friend(from_user, to_user)
except AlreadyExistsError as e:
ctx['errors'] = ["%s" % e]
else:
return redirect('friendship_request_list')
return render(request, template_name, ctx)
@login_required
def friendship_accept(request, friendship_request_id):
""" Accept a friendship request """
if request.method == 'POST':
f_request = get_object_or_404(FriendshipRequest, id=friendship_request_id)
f_request.accept()
return redirect('friendship_view_friends', username=request.user.username)
return redirect('friendship_requests_detail', friendship_request_id=friendship_request_id)
@login_required
def friendship_reject(request, friendship_request_id):
""" Reject a friendship request """
if request.method == 'POST':
f_request = get_object_or_404(FriendshipRequest, id=friendship_request_id)
f_request.reject()
return redirect('friendship_request_list')
return redirect('friendship_requests_detail', friendship_request_id=friendship_request_id)
@login_required
def friendship_cancel(request, friendship_request_id):
""" Cancel a previously created friendship_request_id """
if request.method == 'POST':
f_request = get_object_or_404(FriendshipRequest, id=friendship_request_id)
f_request.cancel()
return redirect('friendship_request_list')
return redirect('friendship_requests_detail', friendship_request_id=friendship_request_id)
@login_required
def friendship_request_list(request, template_name='friendship/friend/requests_list.html'):
""" View unread and read friendship requests """
# friendship_requests = Friend.objects.requests(request.user)
friendship_requests = FriendshipRequest.objects.filter(rejected__isnull=True)
return render(request, template_name, {'requests': friendship_requests})
@login_required
def friendship_request_list_rejected(request, template_name='friendship/friend/requests_list.html'):
""" View rejected friendship requests """
# friendship_requests = Friend.objects.rejected_requests(request.user)
friendship_requests = FriendshipRequest.objects.filter(rejected__isnull=True)
return render(request, template_name, {'requests': friendship_requests})
@login_required
def friendship_requests_detail(request, friendship_request_id, template_name='friendship/friend/request.html'):
""" View a particular friendship request """
f_request = get_object_or_404(FriendshipRequest, id=friendship_request_id)
return render(request, template_name, {'friendship_request': f_request})
def followers(request, username, template_name='friendship/follow/followers_list.html'):
""" List this user's followers """
user = get_object_or_404(user_model, username=username)
followers = Follow.objects.followers(user)
return render(request, template_name, {get_friendship_context_object_name(): user, 'followers': followers})
def following(request, username, template_name='friendship/follow/following_list.html'):
""" List who this user follows """
user = get_object_or_404(user_model, username=username)
following = Follow.objects.following(user)
return render(request, template_name, {get_friendship_context_object_name(): user, 'following': following})
@login_required
def follower_add(request, followee_username, template_name='friendship/follow/add.html'):
""" Create a following relationship """
ctx = {'followee_username': followee_username}
if request.method == 'POST':
followee = user_model.objects.get(username=followee_username)
follower = request.user
try:
Follow.objects.add_follower(follower, followee)
except AlreadyExistsError as e:
ctx['errors'] = ["%s" % e]
else:
return redirect('friendship_following', username=follower.username)
return render(request, template_name, ctx)
@login_required
def follower_remove(request, followee_username, template_name='friendship/follow/remove.html'):
""" Remove a following relationship """
if request.method == 'POST':
followee = user_model.objects.get(username=followee_username)
follower = request.user
Follow.objects.remove_follower(follower, followee)
return redirect('friendship_following', username=follower.username)
return render(request, template_name, {'followee_username': followee_username})
def all_users(request, template_name="friendship/user_actions.html"):
users = user_model.objects.all()
return render(request, template_name, {get_friendship_context_object_list_name(): users})
| 39.227273
| 115
| 0.754014
|
4a09750be30f727468b7bca9c6090e3d2920f906
| 396
|
py
|
Python
|
trial/migrations/0028_globalchecker_new_orders.py
|
Alex394540/django_supplement_site
|
0e1bb44608d9273968a7a65e2796ad388f2e72f5
|
[
"MIT"
] | null | null | null |
trial/migrations/0028_globalchecker_new_orders.py
|
Alex394540/django_supplement_site
|
0e1bb44608d9273968a7a65e2796ad388f2e72f5
|
[
"MIT"
] | null | null | null |
trial/migrations/0028_globalchecker_new_orders.py
|
Alex394540/django_supplement_site
|
0e1bb44608d9273968a7a65e2796ad388f2e72f5
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.3 on 2018-04-08 17:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trial', '0027_drug_product_image'),
]
operations = [
migrations.AddField(
model_name='globalchecker',
name='new_orders',
field=models.BooleanField(default=False),
),
]
| 20.842105
| 53
| 0.608586
|
4a0975299c2b73023d3bd4bbb9adcfb1c55e3ced
| 668
|
py
|
Python
|
test/test-species-name_suggest.py
|
bartaelterman/pygbif
|
110e0cbcd8cb2b1813f6e27d66e95a95579d4643
|
[
"MIT"
] | 37
|
2015-03-20T13:50:27.000Z
|
2021-07-10T11:23:16.000Z
|
test/test-species-name_suggest.py
|
bartaelterman/pygbif
|
110e0cbcd8cb2b1813f6e27d66e95a95579d4643
|
[
"MIT"
] | 82
|
2015-10-30T06:12:48.000Z
|
2021-07-13T12:20:57.000Z
|
test/test-species-name_suggest.py
|
bartaelterman/pygbif
|
110e0cbcd8cb2b1813f6e27d66e95a95579d4643
|
[
"MIT"
] | 12
|
2015-03-20T13:50:29.000Z
|
2020-09-23T08:53:46.000Z
|
"""Tests for species module - name_suggest methods"""
import vcr
import re
from pygbif import species
@vcr.use_cassette("test/vcr_cassettes/test_name_suggest.yaml")
def test_name_suggest():
"species.name_suggest - basic test"
res = species.name_suggest(q="Puma concolor")
assert list == res.__class__
assert True == all(
[bool(re.search("Puma concolor", z["canonicalName"])) for z in res]
)
@vcr.use_cassette("test/vcr_cassettes/test_name_suggest_paging.yaml")
def test_name_suggest_paging():
"species.name_suggest - paging"
res = species.name_suggest(q="Aso", limit=3)
assert list == res.__class__
assert 3 == len(res)
| 29.043478
| 75
| 0.714072
|
4a09754fc4f5619b451a9ab8311237a611f7c031
| 503
|
py
|
Python
|
src/56.py
|
cloudzfy/euler
|
b82efad753ee98375fd40ec4e3989be57828e82c
|
[
"MIT"
] | 12
|
2016-10-19T09:03:20.000Z
|
2021-01-10T10:53:23.000Z
|
src/56.py
|
cloudzfy/euler
|
b82efad753ee98375fd40ec4e3989be57828e82c
|
[
"MIT"
] | null | null | null |
src/56.py
|
cloudzfy/euler
|
b82efad753ee98375fd40ec4e3989be57828e82c
|
[
"MIT"
] | 6
|
2018-09-12T03:13:58.000Z
|
2021-07-07T00:29:43.000Z
|
# A googol (10^100) is a massive number: one followed by one-hundred zeros;
# 100^100 is almost unimaginably large: one followed by two-hundred zeros.
# Despite their size, the sum of the digits in each number is only 1.
# Considering natural numbers of the form, ab, where a, b < 100, what is the
# maximum digital sum?
def get_digital_sum(num):
return sum([int(x) for x in str(num)])
ans = 0
for a in range(1, 100):
for b in range(1, 100):
ans = max(ans, get_digital_sum(pow(a, b)))
print ans
| 29.588235
| 76
| 0.707753
|
4a09763c16677f3dcbd047cf9cfb0478314ef921
| 2,006
|
py
|
Python
|
json_to_csv.py
|
ll320/NeuronCounting_Keras_Retinanet
|
c53b6a34ea354719808c159c9d993b114d7916a3
|
[
"Apache-2.0"
] | null | null | null |
json_to_csv.py
|
ll320/NeuronCounting_Keras_Retinanet
|
c53b6a34ea354719808c159c9d993b114d7916a3
|
[
"Apache-2.0"
] | null | null | null |
json_to_csv.py
|
ll320/NeuronCounting_Keras_Retinanet
|
c53b6a34ea354719808c159c9d993b114d7916a3
|
[
"Apache-2.0"
] | null | null | null |
import json
import os, glob
import cv2
import matplotlib.pyplot as plt
def read_image_paths():
imgs_path = sorted(glob.glob(os.path.join('data/train_2', '*.png')))
print(imgs_path)
return imgs_path
def read_img(path):
img = cv2.imread(path)
plt.figure()
plt.imshow(img)
plt.show()
return None
def location_info():
example = "training_2.json"
in_file = open(example, "r")
new_variable = json.load(in_file)
in_file.close()
print(new_variable[0]['Label']['objects'][0]['bbox'])
print(new_variable[0]['Label']['objects'][1]['bbox'])
print(new_variable[0]['Label']['objects'][2]['bbox'])
locations = dict()
for j in range(len(new_variable)):
bbox_num = new_variable[j]['Label']['objects']
key = new_variable[j]['External ID']
locations[key] = []
for i in range(len(bbox_num)):
locations[key].append([])
locations[key][-1].append(bbox_num[i]['bbox']['left'])
locations[key][-1].append(bbox_num[i]['bbox']['top'])
locations[key][-1].append(bbox_num[i]['bbox']['left'] + bbox_num[i]['bbox']['width'])
locations[key][-1].append(bbox_num[i]['bbox']['top'] + bbox_num[i]['bbox']['height'])
print(locations)
return locations
def write_csv(img_path, locations):
print("start writing csv file...")
with open('img.csv', 'a', newline='') as f:
if locations is not None:
for point in locations:
f.write('{},'.format(img_path))
for i in point:
f.write('{},'.format(i))
f.write('neuron\n')
else:
f.write('{},'.format(img_path))
f.write(',')
f.write('neuron\n')
if __name__ == '__main__':
imgs_path = read_image_paths()
#for i in imgs_path:
# read_img(i)
locations = location_info()
for p in imgs_path:
img_name = os.path.basename(p)
write_csv(p, locations[img_name])
| 29.5
| 97
| 0.573779
|
4a097689f2215353c6c4c533af1a6d2608183282
| 7,044
|
py
|
Python
|
elong_2018032810/util.py
|
mannuan/pyspider_script
|
f4c988912e1099eacd0322b4e9c3a87eaaaa526f
|
[
"Apache-2.0"
] | 9
|
2018-08-28T07:53:43.000Z
|
2019-07-09T07:55:52.000Z
|
qunar_2018032720/util.py
|
mannuan/pyspider_script
|
f4c988912e1099eacd0322b4e9c3a87eaaaa526f
|
[
"Apache-2.0"
] | null | null | null |
qunar_2018032720/util.py
|
mannuan/pyspider_script
|
f4c988912e1099eacd0322b4e9c3a87eaaaa526f
|
[
"Apache-2.0"
] | null | null | null |
#-*- coding:utf-8 -*-
import time,random,re,sys,json
from pymongo import MongoClient
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import TimeoutException
reload(sys)
sys.setdefaultencoding('utf8')
from driver import *
conn = MongoClient('localhost', 27017)
db = conn.mafengwo_qiandaohu
shops = db.shops
comments = db.comments
def DropDown(driver):
driver.execute_script('window.scrollTo(0, document.body.scrollHeight)')
def PageTimeout(exestr):
try:
exec exestr
except Exception:
print '页面超时'
def getCommentInfo(shop_url):
driver = getPhantomJsMobileDriver()
shop_url = 'https://m.mafengwo.cn/poi/comment_{}'.format(shop_url.split('/')[-1])
driver.get(shop_url)
time.sleep(1)
shop_grade = float(driver.find_element_by_css_selector('body > div.comment > div.bd > div.score > p:nth-child(1) > strong').text)
while(True):
comment_len = len(driver.find_elements_by_css_selector('body > div.comment > div.list > ul > li'))
DropDown(driver)
time.sleep(0.5)
if len(driver.find_elements_by_css_selector('body > div.comment > div.list > ul > li')) == comment_len:
break
for each in driver.find_elements_by_css_selector('body > div.comment > div.list > ul > li'):
comment_user_url = each.find_element_by_css_selector('dl > dt > a').get_attribute('href')
comment_user_img = each.find_element_by_css_selector('dl > dt > a > img').get_attribute('src')
comment_user_name = each.find_element_by_css_selector('dl > dd > p').text.split('Lv')[0]
comment_user_grade = each.find_element_by_css_selector('dl > dd > p > span').text
comment_time = each.find_element_by_css_selector('dl > dd > div.time').text
comment_content = each.find_element_by_css_selector('div.context').text
content_img_url = list()
for each1 in each.find_elements_by_css_selector('div.photos.clearfix > a'):
content_img_url.append(each1.get_attribute('href'))
crawl_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) # 爬虫的时间
print comment_user_name,comment_time,comment_content
comment_data = {
'shop_url' : shop_url,
'comment_user_url': comment_user_url,
'comment_user_img': comment_user_img,
'comment_user_name': comment_user_name,
'comment_user_grade' : comment_user_grade,
'comment_time' : comment_time,
'comment_content': comment_content,
'content_img_url' : content_img_url,
'crawl_time' : crawl_time
}
if len(list(comments.find({
'shop_url' : shop_url,
'comment_user_url': comment_user_url,
'comment_content': comment_content
}))) == 0:
comments.insert(comment_data)
else:
comments.update({
'shop_url' : shop_url,
'comment_user_url': comment_user_url,
'comment_content': comment_content
},{'$set': {
'comment_user_img': comment_user_img,
'comment_user_name': comment_user_name,
'comment_user_grade': comment_user_grade,
'comment_time': comment_time,
'crawl_time': crawl_time
}
})
driver.quit()
return shop_grade
def getShopInfo2(driver,shop_url):
for each in driver.find_elements_by_link_text(u'展开全部'):
each.click()
shop_time=''
shop_price=''
shop_traffic = ''
for each in driver.find_elements_by_css_selector('body > div.container > div:nth-child(6) > div.mod.mod-detail > dl'):
if '开放时间' in each.text:
shop_time = each.text.replace('展开全部>','')
elif '门票' in each.text:
shop_price = each.text.replace('展开全部>','')
elif '交通' in each.text:
shop_traffic = each.text.replace('展开全部>','')
try:
shop_summary = driver.find_element_by_css_selector('body > div.container > div:nth-child(6) > div.mod.mod-detail > div').text.replace('展开全部>','')
except Exception:
shop_summary = ''
try:
shop_phone = driver.find_element_by_css_selector('body > div.container > div:nth-child(6) > div.mod.mod-detail > ul > li.tel > div.content').text.replace('展开全部>','')
except Exception:
shop_phone = ''
try:
shop_time_refer = driver.find_element_by_css_selector('body > div.container > div:nth-child(6) > div.mod.mod-detail > ul > li.item-time').text.replace('展开全部>','')
except Exception:
shop_time_refer = ''
print shop_time_refer
shop_address = driver.find_element_by_css_selector('body > div.container > div:nth-child(6) > div.mod.mod-location > div.mhd > p').text.replace('展开全部>','')
shop_grade = getCommentInfo(shop_url)
shop_data = {'shop_time':shop_time,
'shop_price':shop_price,
'shop_traffic':shop_traffic,
'shop_summary':shop_summary,
'shop_phone':shop_phone,
'shop_time_refer':shop_time_refer,
'shop_address':shop_address,
'shop_grade':shop_grade}
return shop_data
def getShopInfo(driver):
for each in driver.find_elements_by_css_selector('#_j_search_result_left > div > div > ul > li'):
shop_url = each.find_element_by_css_selector('div > div.ct-text > h3 > a').get_attribute('href')
shop_img = each.find_element_by_css_selector('div > div.flt1 > a > img').get_attribute('src')
shop_name = each.find_element_by_css_selector('div > div.ct-text > h3 > a').text
shop_comment = int(re.sub(r'[^\d]*',r'',each.find_element_by_css_selector('div > div.ct-text > ul > li:nth-child(2) > a').text))
shop_diary = int(re.sub(r'[^\d]*',r'',driver.find_element_by_css_selector('div > div.ct-text > ul > li:nth-child(3) > a').text))
if '景点' not in shop_name:
continue
shop_name = shop_name.split('-')[-1].strip()
print shop_name
each.find_element_by_css_selector('div > div.ct-text > h3 > a').click()
driver.switch_to_window(driver.window_handles[1])#切换到评论窗口
shop_data = getShopInfo2(driver,shop_url)#获取评论数据
driver.close()#关闭评论窗口
driver.switch_to_window(driver.window_handles[0])#切换为店铺列表页面
crawl_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) # 爬虫的时间
shop_data = dict(shop_data,**{
'shop_url' : shop_url,
'shop_img' : shop_img,
'shop_name' : shop_name,
'shop_comment' : shop_comment,
'shop_diary' : shop_diary,
'shop_type' : '景点',
'crawl_time' : crawl_time
})
if len(list(shops.find({
'shop_url': shop_url,
}))) == 0:
shops.insert(shop_data)
else:
shops.update({
'shop_url': shop_url,
},{'$set': shop_data
})
| 45.153846
| 173
| 0.626065
|
4a0976a9b7656b1e3ac4091f01882f660c84ad18
| 13,293
|
py
|
Python
|
python/api/cluster/serializers.py
|
AKhodus/adcm
|
98dbf22af3f1c6afa94505e9acaff0ac4088a602
|
[
"Apache-2.0"
] | null | null | null |
python/api/cluster/serializers.py
|
AKhodus/adcm
|
98dbf22af3f1c6afa94505e9acaff0ac4088a602
|
[
"Apache-2.0"
] | null | null | null |
python/api/cluster/serializers.py
|
AKhodus/adcm
|
98dbf22af3f1c6afa94505e9acaff0ac4088a602
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.db import IntegrityError
from rest_framework import serializers
import cm.api
import cm.job
import cm.status_api
from cm.logger import log # pylint: disable=unused-import
from cm.errors import AdcmEx
from cm.models import Action, Cluster, Host, Prototype, ServiceComponent
from api.api_views import check_obj, hlink, filter_actions, get_upgradable_func
from api.api_views import UrlField, CommonAPIURL, ObjectURL
from api.action.serializers import ActionShort
from api.component.serializers import ComponentDetailSerializer
from api.host.serializers import HostSerializer
def get_cluster_id(obj):
if hasattr(obj.obj_ref, 'clusterobject'):
return obj.obj_ref.clusterobject.cluster.id
else:
return obj.obj_ref.cluster.id
class ClusterSerializer(serializers.Serializer):
id = serializers.IntegerField(read_only=True)
prototype_id = serializers.IntegerField(help_text='id of cluster type')
name = serializers.CharField(help_text='cluster name')
description = serializers.CharField(help_text='cluster description', required=False)
state = serializers.CharField(read_only=True)
url = hlink('cluster-details', 'id', 'cluster_id')
def validate_prototype_id(self, prototype_id):
return check_obj(Prototype, {'id': prototype_id, 'type': 'cluster'})
def create(self, validated_data):
try:
return cm.api.add_cluster(
validated_data.get('prototype_id'),
validated_data.get('name'),
validated_data.get('description', ''),
)
except IntegrityError:
raise AdcmEx("CLUSTER_CONFLICT") from None
def update(self, instance, validated_data):
instance.name = validated_data.get('name', instance.name)
instance.description = validated_data.get('description', instance.description)
try:
instance.save()
except IntegrityError:
msg = 'cluster with name "{}" already exists'.format(instance.name)
raise AdcmEx("CLUSTER_CONFLICT", msg) from None
return instance
class ClusterDetailSerializer(ClusterSerializer):
# stack = serializers.JSONField(read_only=True)
issue = serializers.SerializerMethodField()
bundle_id = serializers.IntegerField(read_only=True)
edition = serializers.CharField(read_only=True)
license = serializers.CharField(read_only=True)
action = CommonAPIURL(view_name='object-action')
service = ObjectURL(view_name='service')
host = ObjectURL(view_name='host')
hostcomponent = hlink('host-component', 'id', 'cluster_id')
status = serializers.SerializerMethodField()
status_url = hlink('cluster-status', 'id', 'cluster_id')
config = CommonAPIURL(view_name='object-config')
serviceprototype = hlink('cluster-service-prototype', 'id', 'cluster_id')
upgrade = hlink('cluster-upgrade', 'id', 'cluster_id')
imports = hlink('cluster-import', 'id', 'cluster_id')
bind = hlink('cluster-bind', 'id', 'cluster_id')
prototype = hlink('cluster-type-details', 'prototype_id', 'prototype_id')
def get_issue(self, obj):
return cm.issue.aggregate_issues(obj)
def get_status(self, obj):
return cm.status_api.get_cluster_status(obj.id)
class ClusterUISerializer(ClusterDetailSerializer):
actions = serializers.SerializerMethodField()
prototype_version = serializers.SerializerMethodField()
prototype_name = serializers.SerializerMethodField()
prototype_display_name = serializers.SerializerMethodField()
upgradable = serializers.SerializerMethodField()
get_upgradable = get_upgradable_func
def get_actions(self, obj):
act_set = Action.objects.filter(prototype=obj.prototype)
self.context['object'] = obj
self.context['cluster_id'] = obj.id
actions = ActionShort(filter_actions(obj, act_set), many=True, context=self.context)
return actions.data
def get_prototype_version(self, obj):
return obj.prototype.version
def get_prototype_name(self, obj):
return obj.prototype.name
def get_prototype_display_name(self, obj):
return obj.prototype.display_name
class StatusSerializer(serializers.Serializer):
id = serializers.IntegerField(read_only=True)
component_id = serializers.IntegerField(read_only=True)
service_id = serializers.IntegerField(read_only=True)
state = serializers.CharField(read_only=True, required=False)
def to_representation(self, instance):
data = super().to_representation(instance)
data['component'] = instance.component.prototype.name
data['component_display_name'] = instance.component.prototype.display_name
data['host'] = instance.host.fqdn
data['service_name'] = instance.service.prototype.name
data['service_display_name'] = instance.service.prototype.display_name
data['service_version'] = instance.service.prototype.version
data['monitoring'] = instance.component.prototype.monitoring
status = cm.status_api.get_hc_status(instance.host_id, instance.component_id)
data['status'] = status
return data
class HostComponentSerializer(serializers.Serializer):
class MyUrlField(UrlField):
def get_kwargs(self, obj):
return {
'cluster_id': obj.cluster.id,
'hs_id': obj.id,
}
id = serializers.IntegerField(read_only=True)
host_id = serializers.IntegerField(help_text='host id')
host = serializers.CharField(read_only=True)
service_id = serializers.IntegerField()
component = serializers.CharField(help_text='component name')
component_id = serializers.IntegerField(read_only=True, help_text='component id')
state = serializers.CharField(read_only=True, required=False)
url = MyUrlField(read_only=True, view_name='host-comp-details')
host_url = hlink('host-details', 'host_id', 'host_id')
def to_representation(self, instance):
data = super().to_representation(instance)
data['component'] = instance.component.prototype.name
data['component_display_name'] = instance.component.prototype.display_name
data['host'] = instance.host.fqdn
data['service_name'] = instance.service.prototype.name
data['service_display_name'] = instance.service.prototype.display_name
data['service_version'] = instance.service.prototype.version
return data
class HostComponentUISerializer(serializers.Serializer):
hc = HostComponentSerializer(many=True, read_only=True)
host = serializers.SerializerMethodField()
component = serializers.SerializerMethodField()
def get_host(self, obj):
hosts = Host.objects.filter(cluster=self.context.get('cluster'))
return HostSerializer(hosts, many=True, context=self.context).data
def get_component(self, obj):
comps = ServiceComponent.objects.filter(cluster=self.context.get('cluster'))
return HCComponentSerializer(comps, many=True, context=self.context).data
class HostComponentSaveSerializer(serializers.Serializer):
hc = serializers.JSONField()
def validate_hc(self, hc):
if not hc:
raise AdcmEx('INVALID_INPUT', 'hc field is required')
if not isinstance(hc, list):
raise AdcmEx('INVALID_INPUT', 'hc field should be a list')
for item in hc:
for key in ('component_id', 'host_id', 'service_id'):
if key not in item:
msg = '"{}" sub-field is required'
raise AdcmEx('INVALID_INPUT', msg.format(key))
return hc
def create(self, validated_data):
hc = validated_data.get('hc')
return cm.api.add_hc(self.context.get('cluster'), hc)
class HCComponentSerializer(ComponentDetailSerializer):
service_id = serializers.IntegerField(read_only=True)
service_name = serializers.SerializerMethodField()
service_display_name = serializers.SerializerMethodField()
service_state = serializers.SerializerMethodField()
requires = serializers.SerializerMethodField()
def get_service_state(self, obj):
return obj.service.state
def get_service_name(self, obj):
return obj.service.prototype.name
def get_service_display_name(self, obj):
return obj.service.prototype.display_name
def get_requires(self, obj):
if not obj.prototype.requires:
return None
comp_list = {}
def process_requires(req_list):
for c in req_list:
comp = Prototype.obj.get(
type='component',
name=c['component'],
parent__name=c['service'],
parent__bundle_id=obj.prototype.bundle_id,
)
if comp == obj.prototype:
return
if comp.name not in comp_list:
comp_list[comp.name] = {'components': {}, 'service': comp.parent}
if comp.name in comp_list[comp.name]['components']:
return
comp_list[comp.name]['components'][comp.name] = comp
if comp.requires:
process_requires(comp.requires)
# def check_hc(comp):
# return HostComponent.objects.filter(cluster=obj.cluster, component__component=comp)
process_requires(obj.requires)
out = []
for service_name, value in comp_list.items():
comp_out = []
service = value['service']
for comp_name in value['components']:
comp = value['components'][comp_name]
comp_out.append(
{
'prototype_id': comp.id,
'name': comp_name,
'display_name': comp.display_name,
}
)
if not comp_out:
continue
out.append(
{
'prototype_id': service.id,
'name': service_name,
'display_name': service.display_name,
'components': comp_out,
}
)
return out
class BindSerializer(serializers.Serializer):
id = serializers.IntegerField(read_only=True)
export_cluster_id = serializers.IntegerField(read_only=True, source='source_cluster_id')
export_cluster_name = serializers.CharField(read_only=True, source='source_cluster')
export_cluster_prototype_name = serializers.SerializerMethodField()
export_service_id = serializers.SerializerMethodField()
export_service_name = serializers.SerializerMethodField()
import_service_id = serializers.SerializerMethodField()
import_service_name = serializers.SerializerMethodField()
def get_export_cluster_prototype_name(self, obj):
return obj.source_cluster.prototype.name
def get_export_service_name(self, obj):
if obj.source_service:
return obj.source_service.prototype.name
return None
def get_export_service_id(self, obj):
if obj.source_service:
return obj.source_service.id
return None
def get_import_service_id(self, obj):
if obj.service:
return obj.service.id
return None
def get_import_service_name(self, obj):
if obj.service:
return obj.service.prototype.name
return None
class ClusterBindSerializer(BindSerializer):
class MyUrlField(UrlField):
def get_kwargs(self, obj):
return {'bind_id': obj.id, 'cluster_id': obj.cluster.id}
url = MyUrlField(read_only=True, view_name='cluster-bind-details')
class DoBindSerializer(serializers.Serializer):
id = serializers.IntegerField(read_only=True)
export_cluster_id = serializers.IntegerField()
export_service_id = serializers.IntegerField(required=False)
export_cluster_name = serializers.CharField(read_only=True)
export_cluster_prototype_name = serializers.CharField(read_only=True)
def create(self, validated_data):
export_cluster = check_obj(Cluster, validated_data.get('export_cluster_id'))
return cm.api.bind(
validated_data.get('cluster'),
None,
export_cluster,
validated_data.get('export_service_id', 0),
)
class PostImportSerializer(serializers.Serializer):
bind = serializers.JSONField()
def create(self, validated_data):
bind = validated_data.get('bind')
cluster = self.context.get('cluster')
service = self.context.get('service')
return cm.api.multi_bind(cluster, service, bind)
| 39.212389
| 96
| 0.677123
|
4a09772d53882cad66d1eedd9c72991fa3aeddbb
| 16,139
|
py
|
Python
|
src/utils/siftscience.py
|
ResearchHub/ResearchHub-Backend-Open
|
d36dca33afae2d442690694bb2ab17180d84bcd3
|
[
"MIT"
] | 18
|
2021-05-20T13:20:16.000Z
|
2022-02-11T02:40:18.000Z
|
src/utils/siftscience.py
|
ResearchHub/ResearchHub-Backend-Open
|
d36dca33afae2d442690694bb2ab17180d84bcd3
|
[
"MIT"
] | 109
|
2021-05-21T20:14:23.000Z
|
2022-03-31T20:56:10.000Z
|
src/utils/siftscience.py
|
ResearchHub/ResearchHub-Backend-Open
|
d36dca33afae2d442690694bb2ab17180d84bcd3
|
[
"MIT"
] | 4
|
2021-05-17T13:47:53.000Z
|
2022-02-12T10:48:21.000Z
|
import json
import sift.client
from ipware import get_client_ip
from django.apps import apps
from researchhub.celery import app
from researchhub.settings import SIFT_ACCOUNT_ID, SIFT_REST_API_KEY
from utils import sentry
# https://sift.com/resources/guides/content-abuse
client = sift.Client(api_key=SIFT_REST_API_KEY, account_id=SIFT_ACCOUNT_ID)
def get_user_score(user_id):
try:
response = client.score(user_id)
out = json.dumps(response.body)
print(out)
except sift.client.ApiException as e:
sentry.log_error(e)
print(e.api_error_message)
def label_bad_user(user_id, abuse_type, description=''):
# TODO: Finish this by determing how we plan to use it
try:
response = client.label(user_id, {
'$is_bad': True,
# optional fields
'$abuse_type': abuse_type,
'$description': description,
'$source': 'django',
'$analyst': 'dev@quantfive.org'
})
print(response.body)
except sift.client.ApiException as e:
sentry.log_error(e)
print(e.api_error_message)
def unlabel_user(user_id):
# TODO: Finish this by determing how we plan to use it
try:
response = client.unlabel(user_id, abuse_type='content_abuse')
print(response.body)
except sift.client.ApiException as e:
sentry.log_error(e)
print(e.api_error_message)
def get_tracked_content_score(tracked_content):
score_response = tracked_content.get('score_response', None)
if score_response:
score = score_response['scores']['content_abuse']['score']
score = round(score * 100, 1)
return score
return None
def update_user_risk_score(user, tracked_content):
if tracked_content:
content_risk_score = get_tracked_content_score(tracked_content)
if content_risk_score:
user.sift_risk_score = content_risk_score
user.save(update_fields=['sift_risk_score'])
check_user_risk(user)
def check_user_risk(user):
sift_risk_score = user.sift_risk_score
if sift_risk_score and sift_risk_score > 90:
user.set_suspended(is_manual=False)
class DecisionsApi:
def apply_bad_user_decision(self, content_creator, source='AUTOMATED_RULE', reporter=None):
applyDecisionRequest = {
'decision_id': 'looks_bad_content_abuse',
'source': source,
'analyst': reporter.email if reporter else 'analyst@researchhub.com',
'description': 'User looks risky for content abuse',
'reason': 'User looks risky for content abuse',
}
try:
client.apply_user_decision(str(content_creator.id), applyDecisionRequest)
except sift.client.ApiException as e:
sentry.log_error(e)
print(e)
def apply_bad_content_decision(self, content_creator, content_id, source='AUTOMATED_RULE', reporter=None):
applyDecisionRequest = {
'decision_id': 'content_looks_bad_content_abuse',
'source': source,
'analyst': reporter.email if reporter else 'analyst@researchhub.com',
'description': 'Auto flag of moderator-removed content',
'reason': 'Auto flag of moderator-removed content',
}
try:
client.apply_content_decision(str(content_creator.id), content_id, applyDecisionRequest)
except sift.client.ApiException as e:
sentry.log_error(e)
print(e)
class EventsApi:
def create_meta_properties(self, request, exclude_ip=False):
user_agent = request.META.get('HTTP_USER_AGENT', '')
properties = {
'$browser': {
'$user_agent': user_agent
}
}
if not exclude_ip:
ip, is_routable = get_client_ip(request)
if ip:
properties['ip'] = ip
return properties
@staticmethod
@app.task
def celery_track_account(user_id, meta, update):
User = apps.get_model('user.User')
user = User.objects.get(id=user_id)
properties = {
# Required Fields
'$user_id': str(user.id),
# Supported Fields
'$user_email': user.email,
'$name': f'{user.first_name} {user.last_name}',
'$social_sign_on_type': '$google',
}
track_type = '$update_account' if update else '$create_account'
try:
response = client.track(track_type, properties, return_score=False)
print(response.body)
return response.body
except sift.client.ApiException as e:
sentry.log_error(e)
print(e.api_error_message)
def track_account(self, user, request, update=False):
meta = self.create_meta_properties(request, exclude_ip=True)
celery_response = self.celery_track_account.apply(
(user.id, meta, update),
priority=4,
countdown=10,
)
tracked_account = celery_response.get()
return tracked_account
def track_login(self, user, login_status, request):
# https://sift.com/developers/docs/python/events-api/reserved-events/login
meta = self.create_meta_properties(request)
celery_response = self.celery_track_login.apply(
(user.id, meta, login_status),
priority=4,
countdown=10
)
tracked_login = celery_response.get()
return tracked_login
@staticmethod
@app.task
def celery_track_login(user_id, meta, login_status):
User = apps.get_model('user.User')
user = User.objects.get(id=user_id)
properties = {
'$user_id': str(user.id),
'$login_status': login_status,
'$username': user.username,
}
try:
response = client.track('$login', properties, return_score=False)
print(response.body)
return response.body
except sift.client.ApiException as e:
sentry.log_error(e)
print(e.api_error_message)
def track_content_comment(
self,
user,
comment,
request,
is_thread=False,
update=False
):
meta = self.create_meta_properties(request)
celery_response = self.celery_track_content_comment.apply(
(
user.id,
comment.id,
comment.__class__.__name__,
meta,
is_thread,
update
),
priority=4,
countdown=10
)
tracked_comment = celery_response.get()
return tracked_comment
@staticmethod
@app.task
def celery_track_content_comment(
user_id,
comment_id,
comment_type,
meta,
is_thread,
update,
):
User = apps.get_model('user.User')
user = User.objects.get(id=user_id)
Discussion = apps.get_model(f'discussion.{comment_type}')
comment = Discussion.objects.get(id=comment_id)
root_content_id = ''
if comment.paper is not None:
root_content_id = (
f'{type(comment.paper).__name__}_{comment.paper.id}'
)
comment_properties = {
# Required fields
'$user_id': str(user.id),
# must be unique across all content types
'$content_id': f'{type(comment).__name__}_{comment.id}',
# Recommended fields
'$status': '$active',
# Required $comment object
'$comment': {
'$body': comment.plain_text,
'$contact_email': user.email,
'$root_content_id': root_content_id,
}
}
if not is_thread:
comment_properties['$comment']['$parent_comment_id'] = (
f'{type(comment.parent).__name__}_{comment.parent.id}'
)
track_type = '$update_content' if update else '$create_content'
try:
response = client.track(track_type, comment_properties, return_score=False)
print(response.body)
return response.body
except sift.client.ApiException as e:
sentry.log_error(e)
print(e.api_error_message)
def track_content_paper(self, user, paper, request, update=False):
meta = self.create_meta_properties(request)
celery_response = self.celery_track_content_paper.apply(
(user.id, paper.id, meta, update),
priority=4,
countdown=10,
)
tracked_paper = celery_response.get()
return tracked_paper
@staticmethod
@app.task
def celery_track_content_paper(user_id, paper_id, meta, update):
User = apps.get_model('user.User')
user = User.objects.get(id=user_id)
Paper = apps.get_model('paper.Paper')
paper = Paper.objects.get(id=paper_id)
post_properties = {
# Required fields
'$user_id': str(user.id),
'$content_id': f'{type(paper).__name__}_{paper.id}',
# Recommended fields
'$status': '$active',
# Required $post object
'$post': {
'$subject': paper.title,
'$body': paper.paper_title,
'$contact_email': user.email,
'$contact_address': {
'$name': f'{user.first_name} {user.last_name}',
},
'$categories': list(paper.hubs.values_list('slug', flat=True)),
}
}
track_type = '$update_content' if update else '$create_content'
try:
response = client.track(track_type, post_properties, return_score=False)
print(response.body)
return response.body
except sift.client.ApiException as e:
sentry.log_error(e)
print(e.api_error_message)
def track_content_summary(self, user, summary, request, update=False):
meta = self.create_meta_properties(request)
celery_response = self.celery_track_content_summary.apply(
(user.id, summary.id, meta, update),
priority=4,
countdown=10,
)
tracked_summary = celery_response.get()
return tracked_summary
@staticmethod
@app.task
def celery_track_content_summary(user_id, summary_id, meta, update):
User = apps.get_model('user.User')
user = User.objects.get(id=user_id)
Summary = apps.get_model('summary.Summary')
summary = Summary.objects.get(id=summary_id)
root_content_id = ''
if summary.paper is not None:
root_content_id = (
f'{type(summary.paper).__name__}_{summary.paper.id}'
)
comment_properties = {
# Required fields
'$user_id': str(user.id),
# must be unique across all content types
'$content_id': f'{type(summary).__name__}_{summary.id}',
# Recommended fields
'$status': '$active',
# Required $comment object
'$comment': {
'$body': summary.summary_plain_text,
'$contact_email': user.email,
'$root_content_id': root_content_id,
}
}
track_type = '$update_content' if update else '$create_content'
try:
response = client.track(track_type, comment_properties, return_score=False)
print(response.body)
return response.body
except sift.client.ApiException as e:
sentry.log_error(e)
print(e.api_error_message)
def track_content_bullet_point(self, user, bullet_point, request, update=False):
meta = self.create_meta_properties(request)
celery_response = self.celery_track_content_bullet_point.apply(
(user.id, bullet_point.id, meta, update),
priority=4,
countdown=10,
)
tracked_bullet_point = celery_response.get()
return tracked_bullet_point
@staticmethod
@app.task
def celery_track_content_bullet_point(user_id, bullet_point_id, meta, update):
User = apps.get_model('user.User')
user = User.objects.get(id=user_id)
BulletPoint = apps.get_model('bullet_point.BulletPoint')
bullet_point = BulletPoint.objects.get(id=bullet_point_id)
root_content_id = ''
if bullet_point.paper is not None:
root_content_id = (
f'{type(bullet_point.paper).__name__}_{bullet_point.paper.id}'
)
comment_properties = {
# Required fields
'$user_id': str(user.id),
# must be unique across all content types
'$content_id': f'{type(bullet_point).__name__}_{bullet_point.id}',
# Recommended fields
'$status': '$active',
# Required $comment object
'$comment': {
'$body': bullet_point.plain_text,
'$contact_email': user.email,
'$root_content_id': root_content_id,
}
}
track_type = '$update_content' if update else '$create_content'
try:
response = client.track(track_type, comment_properties, return_score=False)
print(response.body)
return response.body
except sift.client.ApiException as e:
sentry.log_error(e)
print(e.api_error_message)
def track_content_vote(self, user, vote, request, update=False):
meta = self.create_meta_properties(request)
vote_type = vote.__module__.split('.')[0]
celery_response =self.celery_track_content_vote.apply(
(
user.id,
vote.id,
vote_type,
meta,
update
),
priority=4,
countdown=10
)
tracked_vote = celery_response.get()
return tracked_vote
@staticmethod
@app.task
def celery_track_content_vote(
user_id,
vote_id,
vote_type,
meta,
update
):
User = apps.get_model('user.User')
user = User.objects.get(id=user_id)
Vote = apps.get_model(f'{vote_type}.Vote')
vote = Vote.objects.get(id=vote_id)
rating = vote.vote_type
review_properties = {
'$user_id': str(user.id),
'$content_id': f'{type(vote).__name__}_{vote.id}',
'$status': '$active',
'$review': {
'$contact_email': user.email,
'$rating': rating
}
}
track_type = '$update_content' if update else '$create_content'
try:
response = client.track(track_type, review_properties, return_score=False)
print(response.body)
return response.body
except sift.client.ApiException as e:
sentry.log_error(e)
print(e.api_error_message)
def track_flag_content(self, user, content_id, referer_id):
# https://sift.com/developers/docs/curl/events-api/reserved-events/flag-content
if not user:
return None
properties = {
'$user_id': str(user.id),
'$content_id': content_id,
'$flagged_by': str(referer_id),
}
try:
response = client.track('$flag_content', properties)
print(response.body)
except sift.client.ApiException as e:
sentry.log_error(e)
print(e.api_error_message)
def track_content_status(self):
# https://sift.com/developers/docs/python/events-api/reserved-events/content-status
# TODO: We might not need this?
properties = {
'$user_id': '',
'$content_id': '',
'$status': ''
}
events_api = EventsApi()
decisions_api = DecisionsApi()
| 32.213573
| 110
| 0.585352
|
4a09780245a0ef09d18aa12974c69ee473d8fbae
| 7,475
|
py
|
Python
|
official/r1/mnist/mnist_tpu.py
|
zcdzcdzcd/models
|
a31b526a7617a152a138a865b5689bf5b59f655d
|
[
"Apache-2.0"
] | 3
|
2020-06-30T23:58:18.000Z
|
2020-07-24T08:47:28.000Z
|
official/r1/mnist/mnist_tpu.py
|
zcdzcdzcd/models
|
a31b526a7617a152a138a865b5689bf5b59f655d
|
[
"Apache-2.0"
] | 1
|
2021-03-31T21:37:41.000Z
|
2021-03-31T21:37:41.000Z
|
official/r1/mnist/mnist_tpu.py
|
zcdzcdzcd/models
|
a31b526a7617a152a138a865b5689bf5b59f655d
|
[
"Apache-2.0"
] | 2
|
2019-11-10T07:48:51.000Z
|
2020-02-04T04:17:41.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MNIST model training using TPUs.
This program demonstrates training of the convolutional neural network model
defined in mnist.py on Google Cloud TPUs (https://cloud.google.com/tpu/).
If you are not interested in TPUs, you should ignore this file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
# pylint: disable=g-bad-import-order
from absl import app as absl_app # pylint: disable=unused-import
import tensorflow as tf
# pylint: enable=g-bad-import-order
# For open source environment, add grandparent directory for import
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(sys.path[0]))))
from official.r1.mnist import dataset # pylint: disable=wrong-import-position
from official.r1.mnist import mnist # pylint: disable=wrong-import-position
# Cloud TPU Cluster Resolver flags
tf.flags.DEFINE_string(
"tpu", default=None,
help="The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", default=None,
help="[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", default=None,
help="[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
# Model specific parameters
tf.flags.DEFINE_string("data_dir", "",
"Path to directory containing the MNIST dataset")
tf.flags.DEFINE_string("model_dir", None, "Estimator model_dir")
tf.flags.DEFINE_integer("batch_size", 1024,
"Mini-batch size for the training. Note that this "
"is the global batch size and not the per-shard batch.")
tf.flags.DEFINE_integer("train_steps", 1000, "Total number of training steps.")
tf.flags.DEFINE_integer("eval_steps", 0,
"Total number of evaluation steps. If `0`, evaluation "
"after training is skipped.")
tf.flags.DEFINE_float("learning_rate", 0.05, "Learning rate.")
tf.flags.DEFINE_bool("use_tpu", True, "Use TPUs rather than plain CPUs")
tf.flags.DEFINE_bool("enable_predict", True, "Do some predictions at the end")
tf.flags.DEFINE_integer("iterations", 50,
"Number of iterations per TPU training loop.")
tf.flags.DEFINE_integer("num_shards", 8, "Number of shards (TPU chips).")
FLAGS = tf.flags.FLAGS
def metric_fn(labels, logits):
accuracy = tf.metrics.accuracy(
labels=labels, predictions=tf.argmax(logits, axis=1))
return {"accuracy": accuracy}
def model_fn(features, labels, mode, params):
"""model_fn constructs the ML model used to predict handwritten digits."""
del params
image = features
if isinstance(image, dict):
image = features["image"]
model = mnist.create_model("channels_last")
if mode == tf.estimator.ModeKeys.PREDICT:
logits = model(image, training=False)
predictions = {
'class_ids': tf.argmax(logits, axis=1),
'probabilities': tf.nn.softmax(logits),
}
return tf.contrib.tpu.TPUEstimatorSpec(mode, predictions=predictions)
logits = model(image, training=(mode == tf.estimator.ModeKeys.TRAIN))
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
learning_rate = tf.train.exponential_decay(
FLAGS.learning_rate,
tf.train.get_global_step(),
decay_steps=100000,
decay_rate=0.96)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
if FLAGS.use_tpu:
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=loss,
train_op=optimizer.minimize(loss, tf.train.get_global_step()))
if mode == tf.estimator.ModeKeys.EVAL:
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode, loss=loss, eval_metrics=(metric_fn, [labels, logits]))
def train_input_fn(params):
"""train_input_fn defines the input pipeline used for training."""
batch_size = params["batch_size"]
data_dir = params["data_dir"]
# Retrieves the batch size for the current shard. The # of shards is
# computed according to the input pipeline deployment. See
# `tf.contrib.tpu.RunConfig` for details.
ds = dataset.train(data_dir).cache().repeat().shuffle(
buffer_size=50000).batch(batch_size, drop_remainder=True)
return ds
def eval_input_fn(params):
batch_size = params["batch_size"]
data_dir = params["data_dir"]
ds = dataset.test(data_dir).batch(batch_size, drop_remainder=True)
return ds
def predict_input_fn(params):
batch_size = params["batch_size"]
data_dir = params["data_dir"]
# Take out top 10 samples from test data to make the predictions.
ds = dataset.test(data_dir).take(10).batch(batch_size)
return ds
def main(argv):
del argv # Unused.
tf.logging.set_verbosity(tf.logging.INFO)
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu,
zone=FLAGS.tpu_zone,
project=FLAGS.gcp_project
)
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=FLAGS.model_dir,
session_config=tf.ConfigProto(
allow_soft_placement=True, log_device_placement=True),
tpu_config=tf.contrib.tpu.TPUConfig(FLAGS.iterations, FLAGS.num_shards),
)
estimator = tf.contrib.tpu.TPUEstimator(
model_fn=model_fn,
use_tpu=FLAGS.use_tpu,
train_batch_size=FLAGS.batch_size,
eval_batch_size=FLAGS.batch_size,
predict_batch_size=FLAGS.batch_size,
params={"data_dir": FLAGS.data_dir},
config=run_config)
# TPUEstimator.train *requires* a max_steps argument.
estimator.train(input_fn=train_input_fn, max_steps=FLAGS.train_steps)
# TPUEstimator.evaluate *requires* a steps argument.
# Note that the number of examples used during evaluation is
# --eval_steps * --batch_size.
# So if you change --batch_size then change --eval_steps too.
if FLAGS.eval_steps:
estimator.evaluate(input_fn=eval_input_fn, steps=FLAGS.eval_steps)
# Run prediction on top few samples of test data.
if FLAGS.enable_predict:
predictions = estimator.predict(input_fn=predict_input_fn)
for pred_dict in predictions:
template = ('Prediction is "{}" ({:.1f}%).')
class_id = pred_dict['class_ids']
probability = pred_dict['probabilities'][class_id]
print(template.format(class_id, 100 * probability))
if __name__ == "__main__":
absl_app.run(main)
| 37.00495
| 80
| 0.713445
|
4a09784eb8503c0ad52de74d800b9acbe18c08fc
| 1,153
|
py
|
Python
|
cda/integration.py
|
Wellheor1/l2
|
d980210921c545c68fe9d5522bb693d567995024
|
[
"MIT"
] | null | null | null |
cda/integration.py
|
Wellheor1/l2
|
d980210921c545c68fe9d5522bb693d567995024
|
[
"MIT"
] | null | null | null |
cda/integration.py
|
Wellheor1/l2
|
d980210921c545c68fe9d5522bb693d567995024
|
[
"MIT"
] | null | null | null |
import json
import logging
from urllib.parse import urljoin, urlencode
import requests
from appconf.manager import SettingManager
logger = logging.getLogger(__name__)
def get_url(path, query=None):
if query is None:
query = {}
base = SettingManager.get_cda_base_url()
if not base or base == 'empty':
return {}
return urljoin(base, path) + ('?{}'.format(urlencode(query)) if query else '')
def make_request(path, query=None, as_json=True, **kwargs):
if query is None:
query = {}
try:
url = get_url(path, query=query)
headers = {"Content-Type": "application/json"}
data = requests.post(url, headers=headers, **kwargs)
if as_json:
return data.json()
return data.text
except Exception as e:
print(e) # noqa: T001
return {}
def get_required_signatures(service: str) -> dict:
return make_request('/required-signatures', {"title": str(service)})
def render_cda(service: str, direction_data: dict) -> dict:
return make_request('/render/njk.xml', as_json=False, data=json.dumps({"title": str(service), **direction_data}))
| 27.452381
| 117
| 0.652212
|
4a0978b78bb895bd9e68b36a09b5c46aa3d42eaf
| 125
|
py
|
Python
|
Parte 1/Semana 2/DigitoDaDezena.py
|
GabrielReira/Computer-Science-USP
|
47a3cb68ccaa08f7302fbd9d9905ca80cee432cb
|
[
"MIT"
] | null | null | null |
Parte 1/Semana 2/DigitoDaDezena.py
|
GabrielReira/Computer-Science-USP
|
47a3cb68ccaa08f7302fbd9d9905ca80cee432cb
|
[
"MIT"
] | null | null | null |
Parte 1/Semana 2/DigitoDaDezena.py
|
GabrielReira/Computer-Science-USP
|
47a3cb68ccaa08f7302fbd9d9905ca80cee432cb
|
[
"MIT"
] | null | null | null |
n = int(input('Digite um número inteiro: '))
dezena = n % 100
dígito = dezena // 10
print('O dígito das dezenas é', dígito)
| 20.833333
| 44
| 0.664
|
4a0978d0dd71580d4dd28a31f6e860a3b37e9be4
| 5,154
|
py
|
Python
|
human/model/train.py
|
Vtn21/HuMAn
|
024eb6c57e4c19d3b97db3681e7aca1adea644c1
|
[
"MIT"
] | null | null | null |
human/model/train.py
|
Vtn21/HuMAn
|
024eb6c57e4c19d3b97db3681e7aca1adea644c1
|
[
"MIT"
] | 7
|
2021-02-05T19:47:25.000Z
|
2021-03-05T13:37:14.000Z
|
human/model/train.py
|
Vtn21/HuMAn
|
024eb6c57e4c19d3b97db3681e7aca1adea644c1
|
[
"MIT"
] | null | null | null |
"""train.py
Author: Victor T. N.
"""
import os
from datetime import datetime
from human.utils import dataset
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1" # Hide unnecessary TF messages
import tensorflow as tf # noqa: E402
import tensorflow_addons as tfa # noqa: E402
SHUFFLE_BUFFER = 1000
def full_training_loop(model, train_datasets=[], valid_datasets=[],
seq_lengths=[], batch_sizes=[], swa=[], rlrp=[],
patience=[], name="noname", ckpt_dir="checkpoints",
log_dir="logs", save_dir="saves"):
"""Run a full training loop, controlled by the list inputs, all of which
must have the same length.
Args:
model (tf.keras.Model): uncompiled model to be trained.
train_datasets (list): List of parsed training datasets.
Defaults to [].
valid_datasets (list): List of parsed validation datasets.
Defaults to [].
seq_lengths (list): List of sequence lengths (the length of the
recording). These are generated with preprocessing.
Defaults to [].
batch_sizes (list): List of batch sizes, used on both training and
validation datasets.
Defaults to [].
swa (list): whether to use SGD with Stochastic Weight Averaging
(1 or True) or Adam (0 or False).
Defaults to [].
rlrp (list): whether to reduce learning rate on validation loss plateau
(1 or True) or not use it (0 or False).
Defaults to [].
patience (list): number of epochs without validation loss improvement
before early stopping the training.
Defaults to [].
name (str): name of this training loop. Defaults to "noname".
ckpt_dir (str): directory to store checkpoints.
Defaults to "checkpoints".
log_dir (str): directory to store TensorBoard logs.
Defaults to "logs".
save_dir (str): directory to save the final trained model.
Defaults to "saves".
"""
for i in range(len(train_datasets)):
# Retrieve current date and time
date = datetime.today().strftime("%Y-%m-%d-%H-%M")
# Callbacks list
callbacks = []
if swa[i]:
# Timestamp
stamp = f"{date}_{name}_swa"
# Optimizer (SGD + SWA)
opt = tfa.optimizers.SWA(tf.keras.optimizers.SGD(
learning_rate=1e-5, momentum=0.5, nesterov=True))
# Checkpoint callback
callbacks.append(tfa.callbacks.AverageModelCheckpoint(
update_weights=True, filepath=f"{ckpt_dir}/{stamp}",
save_best_only=True, save_weights_only=True))
else:
# Timestamp
stamp = f"{date}_{name}_{seq_lengths[i]}_{batch_sizes[i]}"
# Optimizer (Adam)
opt = tf.keras.optimizers.Adam(learning_rate=1e-3)
# Checkpoint callback
callbacks.append(tf.keras.callbacks.ModelCheckpoint(
filepath=f"{ckpt_dir}/{stamp}", save_best_only=True,
save_weights_only=True))
# Early stopping callback
callbacks.append(tf.keras.callbacks.EarlyStopping(
patience=patience[i]))
# TensorBoard callback
callbacks.append(tf.keras.callbacks.TensorBoard(
log_dir=os.path.join(log_dir, stamp), profile_batch=0))
# Reduce learning rate on plateau callback
if rlrp[i]:
callbacks.append(tf.keras.callbacks.ReduceLROnPlateau(
factor=0.2, patience=patience[i]))
# Compile the model
model.compile(loss="mse", metrics=["mae"], optimizer=opt)
# Print useful information
print(f"Sequence length: {seq_lengths[i]}\n"
f"Batch size: {batch_sizes[i]}")
if swa[i]:
print("Optimizer: SGD + SWA")
else:
print("Optimizer: Adam")
# Map and batch the dataset
train_mapped = (train_datasets[i]
.map(dataset.map_train,
num_parallel_calls=tf.data.AUTOTUNE,
deterministic=False)
.shuffle(SHUFFLE_BUFFER)
.batch(batch_sizes[i])
.prefetch(tf.data.AUTOTUNE))
valid_mapped = (valid_datasets[i]
.map(dataset.map_train,
num_parallel_calls=tf.data.AUTOTUNE,
deterministic=False)
.shuffle(SHUFFLE_BUFFER)
.batch(batch_sizes[i])
.prefetch(tf.data.AUTOTUNE))
# Start training
model.fit(x=train_mapped, epochs=20, callbacks=callbacks,
validation_data=valid_mapped)
print(f"Training done for {name}. Saving model...")
model.save_weights(os.path.join(save_dir, name))
| 44.051282
| 79
| 0.555491
|
4a097b18f8d092cd3cc69e886da909e2a33e79ac
| 1,803
|
py
|
Python
|
Testing/encryption/enc.py
|
PedroGRivera/BCAI
|
f56f21db61e80d4a57df542e40bc49405ba5b41e
|
[
"Apache-2.0"
] | 1
|
2020-03-05T20:11:31.000Z
|
2020-03-05T20:11:31.000Z
|
Testing/encryption/enc.py
|
PedroGRivera/BCAI
|
f56f21db61e80d4a57df542e40bc49405ba5b41e
|
[
"Apache-2.0"
] | null | null | null |
Testing/encryption/enc.py
|
PedroGRivera/BCAI
|
f56f21db61e80d4a57df542e40bc49405ba5b41e
|
[
"Apache-2.0"
] | 1
|
2019-09-23T20:14:51.000Z
|
2019-09-23T20:14:51.000Z
|
import os
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
import time
def genKey(keyFile="", nonceFile="", keyLen=0):
f = open(keyFile, 'w')
f.close()
f = open(nonceFile, 'w')
f.close()
key = os.urandom(keyLen)
nonce = os.urandom(keyLen)
f = open(keyFile, 'wb')
f.write(key)
f.close()
f = open(nonceFile, 'wb')
f.write(nonce)
f.close()
def getKey(keyFile="", nonceFile=""):
f = open(keyFile, 'rb')
key = f.read()
f.close()
f = open(nonceFile, 'rb')
nonce = f.read()
f.close()
return [key, nonce]
def enc(key=b"", nonce=b"", mess=b""):
alg = algorithms.AES(key)
cipher = Cipher(alg, modes.GCM(nonce), default_backend())
encryptor = cipher.encryptor()
return encryptor.update(mess)
def dec(key=b"", nonce=b"", mess=b""):
alg = algorithms.AES(key)
cipher = Cipher(alg, modes.GCM(nonce), default_backend())
decryptor = cipher.decryptor()
return decryptor.update(mess)
mess = b"aaaa"*(10**8)
def Test(len=0):
global mess
startVal = time.time()
genKey("key.txt","nonce.txt", len)
[key, nonce] = getKey("key.txt","nonce.txt")
val = enc(key, nonce, mess)
print (mess == dec(key, nonce, val))
return (time.time() - startVal)
outcomeLow = []
outcomeHigh = []
numTests = 50
for i in range(0, numTests):
out = Test(16)
outcomeLow.append(out)
for i in range(0, numTests):
out = Test(32)
outcomeHigh.append(out)
averageLow = sum(outcomeLow)/len(outcomeLow)
averageHigh = sum(outcomeHigh)/len(outcomeHigh)
print("128 bits value: " + str(averageLow))
print("256 bits value: " + str(averageHigh))
| 26.514706
| 77
| 0.60843
|
4a097b6c9877261e6ba2c7348132558d3534fe70
| 1,902
|
py
|
Python
|
antlir/vm/tests/test_initrd_break.py
|
facebookincubator/fs_image
|
3515a24bb0e93176a5584bdc8839464fa28390d7
|
[
"MIT"
] | 9
|
2019-12-02T20:17:35.000Z
|
2020-06-13T16:34:25.000Z
|
antlir/vm/tests/test_initrd_break.py
|
facebookincubator/fs_image
|
3515a24bb0e93176a5584bdc8839464fa28390d7
|
[
"MIT"
] | 19
|
2019-11-22T23:30:04.000Z
|
2020-07-16T18:05:48.000Z
|
antlir/vm/tests/test_initrd_break.py
|
facebookincubator/fs_image
|
3515a24bb0e93176a5584bdc8839464fa28390d7
|
[
"MIT"
] | 4
|
2019-12-04T19:03:28.000Z
|
2020-06-13T16:34:29.000Z
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib.resources
import subprocess
import tempfile
from antlir.fs_utils import Path
from antlir.tests.common import AntlirTestCase
class InitrdBreakTest(AntlirTestCase):
def test_vmtest_initrd_break_default(self):
with importlib.resources.path(
__package__, "vmtest-initrd-break-default"
) as vmtest, tempfile.NamedTemporaryFile() as console_f:
# Run the buck built vmtest target instance.
subprocess.run(
[
Path(vmtest),
"--append-console={}".format(console_f.name),
],
)
# Check for the expected `systectl list-jobs` output.
console_output = str(console_f.read())
for i in [
r"debug-shell\.service +start running",
r"initrd\.target +start waiting",
]:
self.assertRegex(console_output, i)
def test_vmtest_initrd_break_custom(self):
with importlib.resources.path(
__package__, "vmtest-initrd-break-custom"
) as vmtest, tempfile.NamedTemporaryFile() as console_f:
# Run the buck built vmtest target instance.
subprocess.run(
[
Path(vmtest),
"--append-console={}".format(console_f.name),
],
)
# Check for the expected `systectl list-jobs` output.
console_output = str(console_f.read())
for i in [
r"debug-shell\.service +start running",
r"sysinit\.target +start waiting",
]:
self.assertRegex(console_output, i)
| 33.368421
| 65
| 0.57939
|
4a097c32885d9049ccab01c616479257654fcac8
| 16,427
|
py
|
Python
|
core/parameters.py
|
pvthinker/Nyles
|
8d5989699127f4897c3591f01f218b2f796ba938
|
[
"MIT"
] | 18
|
2019-09-18T12:31:10.000Z
|
2021-12-12T11:39:28.000Z
|
core/parameters.py
|
pvthinker/Nyles
|
8d5989699127f4897c3591f01f218b2f796ba938
|
[
"MIT"
] | 2
|
2019-11-25T15:23:20.000Z
|
2020-01-24T12:28:21.000Z
|
core/parameters.py
|
pvthinker/Nyles
|
8d5989699127f4897c3591f01f218b2f796ba938
|
[
"MIT"
] | 2
|
2020-06-03T12:44:17.000Z
|
2021-12-12T11:39:28.000Z
|
import os
import json
import datetime
# List of categories that are needed in the defaults
CATEGORIES = ["model", "physics", "IO", "animation", "time", "discretization", "MPI", "multigrid", "user"]
# List of attributes that are needed for every parameter
ATTRIBUTES = ["type", "default", "avail", "doc"]
class InextensibleDict(dict):
"""A dictionary that is safe against accidentally adding new keys.
It is not possible to add new keys to an InextensibleDict using the
common notation dict[key] = value. This notation can only be used
to modify the value, but not to add new keys.
This dictionary has a method freeze() that prevents any further
changes to the dictionary of the form dict[key] = value. A call to
freeze will freeze all the instances of the InextensibleDict class,
not only the one it is invoked on.
"""
frozen = False
@classmethod
def freeze(cls):
cls.frozen = True
def __setitem__(self, key, item):
if self.frozen:
raise UserParameterError(
"not possible to modify parameters after the creation of Nyles."
)
if key not in self:
raise UserParameterError(
"not possible to add new key {!r} to the parameters."
.format(key)
)
dict.__setitem__(self, key, item)
class UserParameterError(Exception):
"""An error occured with the user-set parameters."""
class DefaultsFileError(Exception):
"""An error occured in the file of default values."""
def __init__(self, defaults_file, arg):
self.args = ["bad format of file {}: {}".format(defaults_file, arg)]
class UserParameters(object):
"""User interface for modifing the experiment parameters.
The default parameters are stored in a JSON file in the core-
directory. Its filename is stored in the constant attribute
DEFAULTS_FILE of this class.
One public attribute exists for each entry in the list CATEGORIES.
This attribute is a dictionary. Every key of the dictionary is the
name of a parameter. It is possible to modify the value of the
parameter, but it is not possible to add new parameters. Once Nyles
has been created, further modifications of the parameters are not
possible.
Methods for public access:
- help
- possible_values
- view_parameters
- check
- freeze
"""
DEFAULTS_FILE = "defaults.json"
# Dictionary of datatypes; used to check that every parameter is of
# correct type; key is a string as the type appears in the file of
# default values; value is a Python type or a list of Python types
TYPES = {
"str": str,
"int": int,
"float": (float, int), # an int can be used instead of a float
"bool": bool,
"dict": dict,
"list or string": (list, tuple, str),
}
def __init__(self):
"""Load the parameters from the file of default values."""
# Get the full path of the file of default values
jsonfile = os.path.realpath(os.path.join(
os.getcwd(),
os.path.dirname(__file__),
self.DEFAULTS_FILE,
))
# Read and check the file of default values
with open(jsonfile) as f:
defaults = json.load(f)
self.check_defaults(defaults)
# Copy all parameters by category into self
for cat in CATEGORIES:
setattr(self, cat, InextensibleDict({
parameter: attributes["default"]
for parameter, attributes in defaults[cat].items()
}))
# Copy information used for help and checks
self.documentations = {}
self.options = {}
self.types = {}
for category, parameters in defaults.items():
for parameter, attributes in parameters.items():
self.documentations[parameter] = attributes["doc"]
self.options[parameter] = attributes["avail"]
self.types[parameter] = attributes["type"]
def help(self, parameter):
"""Return the documentation for a parameter."""
if parameter in self.documentations:
return self.documentations[parameter]
raise ValueError("invalid parameter: {!r}".format(parameter))
def possible_values(self, parameter):
"""Return the possible values for a parameter (list or string)."""
if parameter in self.options:
return self.options[parameter]
raise ValueError("invalid parameter: {!r}".format(parameter))
def view_parameters(self):
"""Have a look at all of the parameters in one dictionary.
Warning: it is not possible to modify parameters using the
dictionary returned by this function.
"""
return {
**self.model,
**self.physics,
**self.IO,
**self.animation,
**self.time,
**self.discretization,
**self.MPI,
**self.multigrid,
**self.user
}
def check(self):
"""Raise an exception if the value of any parameter is wrong."""
# List of values that are recognized as powers of two. Rationale:
# In 2019, it seems ridicoulous to use 2^19 for any parameter in the
# model, so this number raises a "too large" error. Assuming that
# this limit doubles every year (which is faster growth than
# predicted by Moore's law), the following list, which adds one more
# power of two every year, should be safe for the near future.
POWERS_OF_2 = [2**n for n in range(datetime.datetime.now().year - 2000)]
for parameter, value in self.view_parameters().items():
# Check type of value
param_type = self.types[parameter]
if not isinstance(value, self.TYPES[param_type]):
raise UserParameterError(
"parameter {} must be {} {}, not {}"
.format(
parameter,
"an" if param_type == "int" else "a",
param_type, type(value),
)
)
# Check if value is among the options
options = self.options[parameter]
if isinstance(options, list):
if not value in options:
raise UserParameterError(
"parameter {} must be one of {}, not {!r}"
.format(parameter, options, value)
)
elif options == "any":
# any value is allowed
pass
elif options == "> 0.0":
if not value > 0.0:
raise UserParameterError(
"parameter {} must be positive".format(parameter)
)
elif options == ">= 0.0" or options == ">= 0":
if not value >= 0.0:
raise UserParameterError(
"parameter {} must be non-negative".format(parameter)
)
elif options == ">= 1":
if not value >= 1:
raise UserParameterError(
"parameter {} must be at least 1".format(parameter)
)
elif options == "2^n":
if value not in POWERS_OF_2:
if value < max(POWERS_OF_2):
raise UserParameterError(
"parameter {} must be a power of 2"
.format(parameter)
)
else:
raise UserParameterError(
"parameter {} is very large; if you are sure to "
"use it, extend the variable POWERS_OF_2 in this "
"class to include your value".format(parameter)
)
elif options == "[3 *] 2^n":
if value not in POWERS_OF_2 and value / 3 not in POWERS_OF_2:
if value < max(POWERS_OF_2):
raise UserParameterError(
"parameter {} must be a power of 2 or 3 times a "
"power of 2".format(parameter)
)
else:
raise UserParameterError(
"parameter {} is very large; if you are sure to "
"use it, extend the variable POWERS_OF_2 in this "
"class to include your value".format(parameter)
)
elif options == "any valid filename":
if "/" in value:
raise UserParameterError(
'parameter {} must not contain a "/"'.format(parameter)
)
elif parameter == "variables_in_history":
if not isinstance(value, (list, tuple)) and value not in [
"all", "prognostic", "p+p"
]:
raise UserParameterError(
'value {!r} of parameter {} not understood'
.format(parameter, value)
)
elif parameter == "datadir":
# no check necessary
pass
else:
# this should not happen; if it does, modify this method
print("Warning: cannot check parameter", parameter)
# Check that every CPU has at least one grid point
for x in "xyz":
if self.MPI["np" + x] > self.discretization["global_n" + x]:
raise UserParameterError(
"parameter np{} cannot be larger than global_n{}"
.format(x, x)
)
@classmethod
def check_defaults(cls, defaults):
"""Raise an exception if an error with the defaults is found.
This does not check if the given default value is valid."""
# Check all categories exist
for category in CATEGORIES:
if category not in defaults:
raise DefaultsFileError(
cls.DEFAULTS_FILE,
"category {} missing".format(category),
)
parameters = []
for category in defaults:
# Check no additional category exist
if category not in CATEGORIES:
raise DefaultsFileError(
cls.DEFAULTS_FILE,
"unknown category {}".format(category),
)
for parameter, attributes in defaults[category].items():
# Check no parameter exists twice
if parameter in parameters:
raise DefaultsFileError(
cls.DEFAULTS_FILE,
"parameter {} appears twice".format(parameter),
)
parameters.append(parameter)
# Check every parameter has all attributes
for attribute in ATTRIBUTES:
if attribute not in attributes:
raise DefaultsFileError(
cls.DEFAULTS_FILE,
"attribute {} missing for parameter {}"
.format(attribute, parameter),
)
# Check no parameter has additional attributes
for attribute in attributes:
if attribute not in ATTRIBUTES:
raise DefaultsFileError(
cls.DEFAULTS_FILE,
"unknown attribute {} for parameter {}"
.format(attribute, parameter),
)
# Check parameter type is recognized
if attributes["type"] not in cls.TYPES:
raise DefaultsFileError(
cls.DEFAULTS_FILE,
"unknown type {} of parameter {}"
.format(attributes["type"], parameter),
)
def freeze(self):
# Freeze all parameter dictionaries with one command
InextensibleDict.freeze()
if __name__ == "__main__":
param = UserParameters()
print("Default parameters:", param.view_parameters())
print("-"*80)
# Changing a parameter
param.IO["expname"] = "2nd_experiment"
# Changing a parameter that does not exist
try: param.IO["epxname"] = "3rd_experiment" # intentional typo
except UserParameterError as e: print("UserParameterError:", e)
# Changing a parameter in the wrong set
try: param.model["expname"] = "4th_experiment"
except UserParameterError as e: print("UserParameterError:", e)
print("-"*80)
print("Model parameters:", param.model)
print("Physics parameters:", param.physics)
print("Input/Output parameters:", param.IO)
print("Animation parameters:", param.animation)
print("Time parameters:", param.time)
print("Discretization parameters:", param.discretization)
print("MPI parameters:", param.MPI)
print("Multigrid parameters:", param.multigrid)
print("-"*80)
print("Possible values for modelname:", param.possible_values("modelname"))
print("Description of modelname:", param.help("modelname"))
print("-"*80)
print("Possible values for variables_in_history:",
param.possible_values("variables_in_history")
)
print("Description of variables_in_history:",
param.help("variables_in_history")
)
print("-"*80)
param.check()
# Count the exceptions to make it easy to check if all were raised
i = 0
# Some examples that create a failing check
i += 1
param.model["geometry"] = "open"
try: param.check()
except UserParameterError as e: print("{:2d}. UserParameterError: {}".format(i, e))
param.model["geometry"] = "closed"
i += 1
param.model["Lx"] = -3
try: param.check()
except UserParameterError as e: print("{:2d}. UserParameterError: {}".format(i, e))
param.model["Lx"] = 3
i += 1
param.model["Ly"] = "5"
try: param.check()
except UserParameterError as e: print("{:2d}. UserParameterError: {}".format(i, e))
param.model["Ly"] = 5
i += 1
param.IO["expname"] = "foo/bar"
try: param.check()
except UserParameterError as e: print("{:2d}. UserParameterError: {}".format(i, e))
param.IO["expname"] = "foo-bar"
i += 1
param.IO["variables_in_history"] = "any"
try: param.check()
except UserParameterError as e: print("{:2d}. UserParameterError: {}".format(i, e))
param.IO["variables_in_history"] = "all"
i += 1
param.IO["timestep_history"] = -1.0
try: param.check()
except UserParameterError as e: print("{:2d}. UserParameterError: {}".format(i, e))
param.IO["timestep_history"] = 0.0
i += 1
param.time["cfl"] = 0
try: param.check()
except UserParameterError as e: print("{:2d}. UserParameterError: {}".format(i, e))
param.time["cfl"] = 1
i += 1
param.discretization["global_nx"] = 5
try: param.check()
except UserParameterError as e: print("{:2d}. UserParameterError: {}".format(i, e))
param.discretization["global_nx"] = 3
i += 1
param.discretization["global_ny"] = 2**1000
try: param.check()
except UserParameterError as e: print("{:2d}. UserParameterError: {}".format(i, e))
param.discretization["global_ny"] = 2**10
i += 1
param.discretization["global_nz"] = 1
param.MPI["npz"] = 2
try: param.check()
except UserParameterError as e: print("{:2d}. UserParameterError: {}".format(i, e))
param.MPI["npz"] = 1
i += 1
param.MPI["npx"] = 1.5
try: param.check()
except UserParameterError as e: print("{:2d}. UserParameterError: {}".format(i, e))
param.MPI["npx"] = 1
i += 1
param.MPI["nh"] = -1
try: param.check()
except UserParameterError as e: print("{:2d}. UserParameterError: {}".format(i, e))
param.MPI["nh"] = 1
i += 1
param.discretization["orderA"] = 4
try: param.check()
except UserParameterError as e: print("{:2d}. UserParameterError: {}".format(i, e))
param.discretization["orderA"] = 3
| 38.291375
| 106
| 0.556523
|
4a097d6aafbd128c81256e6133d07e20124885ac
| 9,180
|
py
|
Python
|
ansible/my_env/lib/python2.7/site-packages/ansible/modules/network/vyos/vyos_config.py
|
otus-devops-2019-02/yyashkin_infra
|
0cd0c003884155ac922e3e301305ac202de7028c
|
[
"MIT"
] | 1
|
2019-04-16T21:23:15.000Z
|
2019-04-16T21:23:15.000Z
|
ansible/my_env/lib/python2.7/site-packages/ansible/modules/network/vyos/vyos_config.py
|
otus-devops-2019-02/yyashkin_infra
|
0cd0c003884155ac922e3e301305ac202de7028c
|
[
"MIT"
] | 5
|
2020-02-26T20:10:50.000Z
|
2021-09-23T23:23:18.000Z
|
ansible/my_env/lib/python2.7/site-packages/ansible/modules/network/vyos/vyos_config.py
|
otus-devops-2019-02/yyashkin_infra
|
0cd0c003884155ac922e3e301305ac202de7028c
|
[
"MIT"
] | 1
|
2020-02-13T14:24:57.000Z
|
2020-02-13T14:24:57.000Z
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: vyos_config
version_added: "2.2"
author: "Nathaniel Case (@qalthos)"
short_description: Manage VyOS configuration on remote device
description:
- This module provides configuration file management of VyOS
devices. It provides arguments for managing both the
configuration file and state of the active configuration. All
configuration statements are based on `set` and `delete` commands
in the device configuration.
extends_documentation_fragment: vyos
notes:
- Tested against VYOS 1.1.7
- Abbreviated commands are NOT idempotent, see
L(Network FAQ,../network/user_guide/faq.html#why-do-the-config-modules-always-return-changed-true-with-abbreviated-commands).
options:
lines:
description:
- The ordered set of configuration lines to be managed and
compared with the existing configuration on the remote
device.
src:
description:
- The C(src) argument specifies the path to the source config
file to load. The source config file can either be in
bracket format or set format. The source file can include
Jinja2 template variables.
match:
description:
- The C(match) argument controls the method used to match
against the current active configuration. By default, the
desired config is matched against the active config and the
deltas are loaded. If the C(match) argument is set to C(none)
the active configuration is ignored and the configuration is
always loaded.
default: line
choices: ['line', 'none']
backup:
description:
- The C(backup) argument will backup the current devices active
configuration to the Ansible control host prior to making any
changes. The backup file will be located in the backup folder
in the playbook root directory or role root directory, if
playbook is part of an ansible role. If the directory does not
exist, it is created.
type: bool
default: 'no'
comment:
description:
- Allows a commit description to be specified to be included
when the configuration is committed. If the configuration is
not changed or committed, this argument is ignored.
default: 'configured by vyos_config'
config:
description:
- The C(config) argument specifies the base configuration to use
to compare against the desired configuration. If this value
is not specified, the module will automatically retrieve the
current active configuration from the remote device.
save:
description:
- The C(save) argument controls whether or not changes made
to the active configuration are saved to disk. This is
independent of committing the config. When set to True, the
active configuration is saved.
type: bool
default: 'no'
"""
EXAMPLES = """
- name: configure the remote device
vyos_config:
lines:
- set system host-name {{ inventory_hostname }}
- set service lldp
- delete service dhcp-server
- name: backup and load from file
vyos_config:
src: vyos.cfg
backup: yes
- name: for idempotency, use full-form commands
vyos_config:
lines:
# - set int eth eth2 description 'OUTSIDE'
- set interface ethernet eth2 description 'OUTSIDE'
"""
RETURN = """
commands:
description: The list of configuration commands sent to the device
returned: always
type: list
sample: ['...', '...']
filtered:
description: The list of configuration commands removed to avoid a load failure
returned: always
type: list
sample: ['...', '...']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: string
sample: /playbooks/ansible/backup/vyos_config.2016-07-16@22:28:34
"""
import re
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import ConnectionError
from ansible.module_utils.network.vyos.vyos import load_config, get_config, run_commands
from ansible.module_utils.network.vyos.vyos import vyos_argument_spec, get_connection
DEFAULT_COMMENT = 'configured by vyos_config'
CONFIG_FILTERS = [
re.compile(r'set system login user \S+ authentication encrypted-password')
]
def get_candidate(module):
contents = module.params['src'] or module.params['lines']
if module.params['src']:
contents = format_commands(contents.splitlines())
contents = '\n'.join(contents)
return contents
def format_commands(commands):
return [line for line in commands if len(line.strip()) > 0]
def diff_config(commands, config):
config = [str(c).replace("'", '') for c in config.splitlines()]
updates = list()
visited = set()
for line in commands:
item = str(line).replace("'", '')
if not item.startswith('set') and not item.startswith('delete'):
raise ValueError('line must start with either `set` or `delete`')
elif item.startswith('set') and item not in config:
updates.append(line)
elif item.startswith('delete'):
if not config:
updates.append(line)
else:
item = re.sub(r'delete', 'set', item)
for entry in config:
if entry.startswith(item) and line not in visited:
updates.append(line)
visited.add(line)
return list(updates)
def sanitize_config(config, result):
result['filtered'] = list()
index_to_filter = list()
for regex in CONFIG_FILTERS:
for index, line in enumerate(list(config)):
if regex.search(line):
result['filtered'].append(line)
index_to_filter.append(index)
# Delete all filtered configs
for filter_index in sorted(index_to_filter, reverse=True):
del config[filter_index]
def run(module, result):
# get the current active config from the node or passed in via
# the config param
config = module.params['config'] or get_config(module)
# create the candidate config object from the arguments
candidate = get_candidate(module)
# create loadable config that includes only the configuration updates
connection = get_connection(module)
try:
response = connection.get_diff(candidate=candidate, running=config, diff_match=module.params['match'])
except ConnectionError as exc:
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
commands = response.get('config_diff')
sanitize_config(commands, result)
result['commands'] = commands
commit = not module.check_mode
comment = module.params['comment']
diff = None
if commands:
diff = load_config(module, commands, commit=commit, comment=comment)
if result.get('filtered'):
result['warnings'].append('Some configuration commands were '
'removed, please see the filtered key')
result['changed'] = True
if module._diff:
result['diff'] = {'prepared': diff}
def main():
argument_spec = dict(
src=dict(type='path'),
lines=dict(type='list'),
match=dict(default='line', choices=['line', 'none']),
comment=dict(default=DEFAULT_COMMENT),
config=dict(),
backup=dict(type='bool', default=False),
save=dict(type='bool', default=False),
)
argument_spec.update(vyos_argument_spec)
mutually_exclusive = [('lines', 'src')]
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True
)
warnings = list()
result = dict(changed=False, warnings=warnings)
if module.params['backup']:
result['__backup__'] = get_config(module=module)
if any((module.params['src'], module.params['lines'])):
run(module, result)
if module.params['save']:
diff = run_commands(module, commands=['configure', 'compare saved'])[1]
if diff != '[edit]':
run_commands(module, commands=['save'])
result['changed'] = True
run_commands(module, commands=['exit'])
module.exit_json(**result)
if __name__ == '__main__':
main()
| 31.986063
| 129
| 0.673312
|
4a097eaf03cd3589529c1ecbaaf9253d27c0de69
| 1,041
|
py
|
Python
|
test12.py
|
hihaha99/PyGame-SpaceInvaders
|
1d9d3d4cc4683575ed03b7c5002840d456a3249a
|
[
"MIT"
] | null | null | null |
test12.py
|
hihaha99/PyGame-SpaceInvaders
|
1d9d3d4cc4683575ed03b7c5002840d456a3249a
|
[
"MIT"
] | null | null | null |
test12.py
|
hihaha99/PyGame-SpaceInvaders
|
1d9d3d4cc4683575ed03b7c5002840d456a3249a
|
[
"MIT"
] | null | null | null |
import pygame, sys
from pygame.locals import *
pygame.init()
FPS = 30 # frames per second setting
fpsClock = pygame.time.Clock()
# set up the window
DISPLAYSURF = pygame.display.set_mode((400, 300), 0, 32)
pygame.display.set_caption('Animation')
WHITE = (255, 255, 255)
catImg = pygame.image.load('technology.png')
catx = 10
caty = 10
direction = 'right'
while True: # the main game loop
DISPLAYSURF.fill(WHITE)
if direction == 'right':
catx += 5
if catx == 280:
direction = 'down'
elif direction == 'down':
caty += 5
if caty == 220:
direction = 'left'
elif direction == 'left':
catx -= 5
if catx == 10:
direction = 'up'
elif direction == 'up':
caty -= 5
if caty == 10:
direction = 'right'
DISPLAYSURF.blit(catImg, (catx, caty))
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
pygame.display.update()
fpsClock.tick(FPS)
| 22.148936
| 56
| 0.568684
|
4a097ef6bedeb4a2405199d3b2bf306a73de2618
| 45,972
|
py
|
Python
|
MRPy.py
|
mmaiarocha/Attenuation
|
c1115a9e3ec3d7973e1de86ac24f7792507b7d3d
|
[
"MIT"
] | null | null | null |
MRPy.py
|
mmaiarocha/Attenuation
|
c1115a9e3ec3d7973e1de86ac24f7792507b7d3d
|
[
"MIT"
] | null | null | null |
MRPy.py
|
mmaiarocha/Attenuation
|
c1115a9e3ec3d7973e1de86ac24f7792507b7d3d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys
import gzip as gz
import pickle as pk
import numpy as np
import pandas as pd
from warnings import warn
from scipy.interpolate import interp1d
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
#=============================================================================
#=============================================================================
class MRPy(np.ndarray):
#=============================================================================
#=============================================================================
# 1. Class initialization
#=============================================================================
def __new__(cls, np_array, fs=None, Td=None):
X = np.asarray(np_array).view(cls)
sh = X.shape
if (len(sh) == 1):
X = np.reshape(X,(1,sh[0]))
elif (sh[0] > sh[1]):
X = X.T
sh = X.shape
X.NX = sh[0]
X.N = sh[1]
err = 1.0
if (X.N == 0):
sys.exit('MRPy class cannot deal with empty arrays!')
if (np.mod(X.N, 2) != 0): # enforce N to be even
X = X[:,:-1]
err = (X.N - 1.0)/X.N # correction over Td
X.N = X.N - 1
if ((X.N > 0) & (fs != None)): # if fs is available...
X.fs = np.float(fs)
X.Td = X.N/X.fs # ... any Td will be disregarded
X.M = X.N//2 + 1
elif ((X.N > 0) & (Td != None)): # if Td is available
X.Td = err*np.float(Td)
X.fs = X.N/X.Td
X.M = X.N//2 + 1
elif (X.N > 0):
sys.exit('Neither fs or Td has been specified!')
return X
#-----------------------------------------------------------------------------
def __array_finalize__(self, X):
if X is None: return
self.fs = getattr(X, 'fs', None)
self.Td = getattr(X, 'Td', None)
self.NX = getattr(X, 'NX', None)
self.N = getattr(X, 'N', None)
self.M = getattr(X, 'M', None)
#=============================================================================
# 2. Class constructors from other sources
#=============================================================================
def from_file(filename, form='mrpy'):
"""
Load time series from file.
Parameters: filename: file to be loaded, including path,
whithout file extension
form: data formatting. Options are
'mrpy' - default gzip pickle loading
'excel' - excel generated with pandas
'columns' - t, X1, [X2, X3 ...]
'invh ' - csv file by iNVH app
'mpu6050' - gzip excel 6 axis data
"""
try:
#-------------
if (form.lower() == 'mrpy'):
with gz.GzipFile(filename+'.csv.gz', 'rb') as target:
return MRPy(*pk.load(target))
#---------------
elif (form.lower() == 'excel'):
with open(filename+'.xlsx', 'rb') as target:
data = pd.read_excel(target, sheet_name='MRPy')
ti = np.array(data.index, dtype=float)
return MRPy.resampling(ti, data.values)
#---------------
elif (form.lower() == 'columns'):
with open(filename+'.txt', 'rb') as target:
data = np.genfromtxt(target,
delimiter='\t')
ti = data[:,0]
return MRPy.resampling(ti, data[:,1:])
#---------------
elif (form.lower() == 'invh'):
with open(filename+'.csv', 'rb') as target:
data = np.genfromtxt(target,
delimiter=',',
skip_header=1)
ti = data[:,0]
return MRPy.resampling(ti, data[:,1:-1])
#---------------
elif (form.lower() == 'mpu6050'):
with gz.open(filename+'.csv.gz', 'rb') as target:
data = np.genfromtxt(target,
delimiter=',')
ti = data[:,0] - data[0,0]
return MRPy.resampling(ti, data[:,1:]/16384)
#---------------
else:
sys.exit('Data formatting not available!')
return None
except:
sys.exit('Could not read file "{0}"!'.format(filename))
return None
#-----------------------------------------------------------------------------
def from_periodogram(Sx, fs):
"""
Simulate RPs from given spectral densities.
Parameters: Sx: spectral densities as ndarray (must have odd
length, otherwise it will be truncated by 1 and
the length of simulation will not be as expected!)
The largest dimension of Sx is assumed to be the
frequency axis.
fs: sampling frequency in Hz
"""
sh = Sx.shape
if (len(sh) == 1):
Sx = np.reshape(Sx,(1,sh[0]))
else:
if (sh[0] > sh[1]):
Sx = Sx.T
sh = Sx.shape
NX = sh[0]
M0 = sh[1]
M = M0 - (np.mod(M0,2) == 0) # ensure M is odd
N = 2*(M - 1)
Sx = N*fs*np.abs(Sx[:,0:M])/2
X = np.empty((NX, N))
for k in range(NX):
phi = 2*np.pi*np.random.rand(M); phi[0] = 0.
Pw = np.sqrt(Sx[k,:]) * (np.cos(phi) + 1j*np.sin(phi))
Pw = np.hstack((Pw, np.conj(Pw[-2:0:-1])))
X[k,:] = np.real(np.fft.ifft(Pw))
return MRPy(X, fs)
#-----------------------------------------------------------------------------
def from_autocov(Cx, Tmax):
"""
Simulate RPs from given autocovariance functions.
Parameters: Cx: autocovariances as ndarray (must have odd
length, otherwise it will be truncated by 1 and
the length of simulation will not be as expected!)
The largest dimension of Cx is assumed to be the
time gap axis.
Tmax: largest time gap, associated to the last element
in array Cx. Defines process duration, which
will be approximately 2Tmax.
"""
Sx, fs = MRPy.Cx2Sx(Cx, Tmax)
return MRPy.from_periodogram(Sx, fs)
#-----------------------------------------------------------------------------
def from_pseudo(Sx, Tmax, T, zeta=0.05): # NOT READY!!!
"""
Simulate RPs from a given pseudo spectra, which are the
peak response amplitude of a single degree of freedom system, as a
function of system natural period of vibration, subjected to a
certain type of excitation. The usual application is for the
pseudo acceleration spectra of seismic excitation.
Parameters: Sx: pseudo spectras as ndarray (must have odd
length, otherwise it will be truncated by 1 and
the length of simulation will not be as expected!)
The largest dimension of Sx is assumed to be the
period axis.
Tmax: largest period, associated to the last element
in array Sx. Defines process duration, which
will be approximately 2Tmax.
T: tuple (T1, T2, T0) defining envelope timing, where
T1 is the end of attack time, T2 is the end of
sustain time, and T0 is the time constant of the
exponential amplitude decay.
zeta: system damping (ratio of critical) can be
provided or default value of 5% is assumed.
"""
sh = Sx.shape
if (len(sh) == 1):
Sx = np.reshape(Sx,(1,sh[0]))
else:
if (sh[0] > sh[1]):
Sx = Sx.T
sh = Sx.shape
NX = sh[0]
M0 = sh[1]
M = M0 - (np.mod(M0,2) == 0) # ensure M is odd
N = 2*(M - 1)
err = M/M0
fs = (M - 1)/(err*Tmax) # eventually corrects for Tmax
X = MRPy(np.random.randn(NX, N), fs)
return X.Kanai().envelope(T)
#=============================================================================
# 3. Class constructors by modification
#=============================================================================
def zero_mean(self):
"""
Clean mean values.
"""
X = MRPy.copy(self)
Xm = X.mean(axis=1)
for k in range(self.NX):
X[k,:] -= Xm[k]
return X
#-----------------------------------------------------------------------------
def superpose(self, weight=1.):
"""
Add up all series in MRPy weighted by 'weight'.
Parameters: weight: scalar or list with weights for summation.
"""
if ~hasattr(weight, "__len__"):
weight = weight*np.ones(self.NX)
X = np.zeros((1, self.N))
for kX, row in enumerate(self):
X[0,:] += weight[kX]*row
return MRPy(X, self.fs)
#-----------------------------------------------------------------------------
def double(self):
"""
Double MRPy duration by filling with mean values.
"""
Xm = self.mean(axis=1)
X = np.hstack((self, np.tile(Xm,(self.N, 1)).T))
return MRPy(X, self.fs)
#-----------------------------------------------------------------------------
def extract(self, segm=(1/4, 3/4), by='fraction'):
"""
Extract a central segment of time range. The lower and upper
cutting point as defined as a tuple or list, which meaning is
defined by a code 'by':
Parameters: segm: tuple or list with the lower and upper
cutting points.
by: code indicating the meaning of cutting points:
'fraction': default meaning
'time' : time axis related
'index' : directly through indexing
"""
if (segm[0] >= segm[1]):
sys.exit('Upper limit must be larger than lower limit!')
if (by.lower() == 'fraction'):
i0 = int(segm[0]*self.N)
i1 = int(segm[1]*self.N)
elif (by.lower() == 'time'):
i0 = int(segm[0]*self.fs)
i1 = int(segm[1]*self.fs)
elif (by.lower() == 'index'):
i0 = int(segm[0])
i1 = int(segm[1])
else:
sys.exit('Segment definition is unknown!')
return None
i1 = i1 - np.mod(i1-i0, 2) # ensure even length
if (i0 < 0 ): i0 = 0 # do not go over boundaries
if (i1 > self.N): i1 = self.N
return MRPy(self[:,i0:i1], self.fs)
#-----------------------------------------------------------------------------
def envelope(self, T):
"""
Apply an amplitude envelope with exponential attack e decay.
Parameters: T: tuple (T1, T2, T0) defining envelope timing, where
T1 is the end of attack time, T2 is the end of
sustain time, and T0 is the time constant of the
exponential amplitude attack and decay.
"""
t = self.t_axis()
X = MRPy.copy(self)
env = np.ones(self.N)
env[t < T[0]] = (1 - np.exp(-t[t < T[0]]/T[0]))/(1 - np.exp(-1))
env[t > T[1]] = np.exp((T[1] - t[t > T[1]])/T[2])
for k in range(self.NX):
X[k,:] *= env
return X
#-----------------------------------------------------------------------------
def mov_average(self, n=3, win='tri'):
"""
Apply moving average with specified window.
Parameters: n: window width (truncated to be odd integer)
win: window type. Available windows are:
'rec': rectangular
'tri': triangular
"""
n = np.int(n) # truncate to integer
n = n - (1 - np.mod(n,2)) # n is odd or will be decreased by 1
m = (n - 1)//2 + 1 # window center
W = np.ones(n) # default rectangular window
if (win.lower() == 'rec'):
pass
elif (win.lower() == 'tri'):
W[ :m] = np.linspace(1/m, 1., m)
W[m-1: ] = np.linspace(1., 1/m, m)
else:
sys.error('Averaging window type not available!')
m = m - 1
W = W/W.sum()
X = MRPy.copy(self)
for kX in range(self.NX):
for k in range(0, m):
k0 = m - k
W0 = W[k0:]/np.sum(W[k0:])
X[kX,k] = np.sum(W0*self[kX,:k+m+1])
for k in range(m, self.N-m-1):
X[kX,k] = np.sum(W*self[kX,k-m:k+m+1])
for k in range(self.N-m-1, self.N):
k0 = m - k + self.N
W0 = W[:k0]/np.sum(W[:k0])
X[kX,k] = np.sum(W0*self[kX,k-m:])
return X
#-----------------------------------------------------------------------------
def filtered(self, band, mode='pass'):
"""
Apply filtering in frequency domain. Series size is doubled by
trailing zeros before filtering, in order to minimize aliasing.
Parameters: band: frequency band as tuple or list: [f_low, f_high]
mode: filter type. Available:
'pass': band pass (default)
'stop': band stop
"""
X = self.double()
f = X.f_axis()
for kX in range(X.NX):
Xw = np.fft.fft(X[kX,:])[0:X.M]
if mode.lower() == 'pass':
Xw[(f < band[0]) | (f >= band[1])] = 0.
elif mode.lower() == 'stop':
Xw[(f >= band[0]) & (f < band[1])] = 0.
else:
warn('Filter type not available!')
X[kX,:] = np.real(np.fft.ifft(
np.hstack((Xw, np.conj(Xw[-2:0:-1])))))
return MRPy(X[:,0:self.N], self.fs)
#-----------------------------------------------------------------------------
def Kanai(self, H1=(4.84, 0.60), H2=(0.97, 0.60)):
"""
Apply Kanai/Tajimi filtering, with low frequency range attenuation
to avoid integration drifting.
Parameters: H1: tuple (f1, zeta1) for first filter part,
where default values represent firm soil condition.
H2: tuple (f2, zeta2) for second filter part, which
must properly attenuate low frequency range.
"""
X = np.empty((self.NX, self.N))
for kX, row in enumerate(self):
Xw = np.fft.fft(row)[0:self.M]
w1 = self.f_axis()/H1[0]
w2 = self.f_axis()/H2[0]
Hw1 = (1 + 2j*H1[1]*w1)/(1 - w1*w1 + 2j*H1[1]*w1)
Hw2 = (w2*w2)/(1 - w2*w2 + 2j*H2[1]*w2)
Xw = Xw*Hw1*Hw2
Xk = np.real(np.fft.ifft(np.hstack((Xw, np.conj(Xw[-2:0:-1])))))
X[kX,:] = Xk[0:self.N]
return MRPy(X, self.fs)
#-----------------------------------------------------------------------------
def integrate(self, band=None):
"""
Frequency domain integration with passing band.
Parameters: band: frequency band to keep, tuple: (f_low, f_high)
"""
if (band == None):
band[0] = 0.
band[1] = self.fs/2
elif (band[1] > self.fs/2):
warn('Upper band limit truncated to fs/2')
band[1] = self.fs/2
X = np.empty((self.NX, self.N))
f = self.f_axis(); f[0] = f[1] # avoid division by zero
for kX, row in enumerate(self):
Xw = np.fft.fft(row)[0:self.M]
Xw = Xw / (2j*np.pi*f) # division means integration
Xw[0] = 0. # disregard integration constant
Xw[(f <= band[0]) | (f > band[1])] = 0.
X[kX,:] = np.real(np.fft.ifft(
np.hstack((Xw, np.conj(Xw[-2:0:-1])))))
return MRPy(X, self.fs)
#-----------------------------------------------------------------------------
def differentiate(self, band=None):
"""
Frequency domain differentiation with passing band.
Parameters: band: frequency band to keep, tuple: (f_low, f_high)
"""
if (band == None):
band[0] = 0.
band[1] = self.fs/2
elif (band[1] > self.fs/2):
warn('Upper band limit truncated to fs/2')
band[1] = self.fs/2
X = np.empty((self.NX, self.N))
f = self.f_axis(); f[0] = f[1] # avoid division by zero
for kX, row in enumerate(self):
Xw = np.fft.fft(row)[0:self.M]
Xw = Xw * (2j*np.pi*f) # multiplication means derivation
Xw[(f <= band[0]) | (f > band[1])] = 0.
X[kX,:] = np.real(np.fft.ifft(
np.hstack((Xw, np.conj(Xw[-2:0:-1])))))
return MRPy(X, self.fs)
#-----------------------------------------------------------------------------
def sdof_fdiff(self, fn, zeta, U0=0., V0=0.):
"""
Integrates the dynamic equilibrium differential equation by
the central finite differences method.
The input is assumed to be an acceleration (force over mass),
otherwise the result must be divided by system mass to have
displacement unit.
System properties (frequency and damping) may be provided as
scalars or lists. If they are scalars, same properties are used
for all series in the MRP. The same applies for initial conditions
U0 (displacement) and V0 (velocity)
Parameters: fn: sdof natural frequency (Hz)
zeta: sdof damping as ratio of critial (nondim)
U0: initial position (default is all zero)
V0: initial velocity (default is all zero)
"""
if ~hasattr(fn, "__len__"):
fn = fn*np.ones(self.NX)
if ~hasattr(zeta, "__len__"):
zeta = zeta*np.ones(self.NX)
if ~hasattr(U0, "__len__"):
U0 = U0*np.ones(self.NX)
if ~hasattr(V0, "__len__"):
V0 = V0*np.ones(self.NX)
dt = 1/self.fs
X = MRPy(np.empty((self.NX, self.N)), self.fs)
for kX, row in enumerate(self):
zt = zeta[kX]
wn = 2*np.pi*fn[kX]
b1 = ( zt*wn + 1/dt)/dt
b2 = ( zt*wn - 1/dt)/dt
b3 = (dt*wn*wn - 2/dt)/dt
X[kX,0] = U0
X[kX,1] = U0 + V0*dt + row[0]*dt*dt/2
for k in range(2,self.N):
X[kX,k] = (row[k-1] + b2*X[kX,k-2] - b3*X[kX,k-1])/b1
return X
#-----------------------------------------------------------------------------
def sdof_Duhamel(self, fn, zeta, U0=0., V0=0.):
"""
Integrates the dynamic equilibrium differential equation by Duhamel.
The input is assumed to be an acceleration (force over mass),
otherwise the result must be divided by system mass to have
displacement unit.
System properties (frequency and damping) may be provided as
scalars or lists. If they are scalars, same properties are used
for all series in the MRP. The same applies for initial conditions
U0 (displacement) and V0 (velocity)
Parameters: fn: sdof natural frequency (Hz)
zeta: sdof damping as ratio of critial (nondim)
U0: initial position (default is all zero)
V0: initial velocity (default is all zero)
"""
if ~hasattr(fn, "__len__"):
fn = fn*np.ones(self.NX)
if ~hasattr(zeta, "__len__"):
zeta = zeta*np.ones(self.NX)
if ~hasattr(U0, "__len__"):
U0 = U0*np.ones(self.NX)
if ~hasattr(V0, "__len__"):
V0 = V0*np.ones(self.NX)
t = self.t_axis()
dt = 1/self.fs
X = MRPy(np.empty((self.NX, self.N)), self.fs)
for kX, row in enumerate(self):
zt = zeta[kX]
wn = 2*np.pi*fn[kX]
wd = wn*np.sqrt(1 - zt**2)
et = np.exp(zt*wn*t)
st = np.sin(wd*t)
ct = np.cos(wd*t)
X[kX,:] = (U0[kX]*ct + (V0[kX] + U0[kX]*zt*wn)*st/wd)/et
A = dt*np.cumsum(row*et*ct)
B = dt*np.cumsum(row*et*st)
X[kX,:] += (A*st - B*ct)/et/wd
return X
#-----------------------------------------------------------------------------
def sdof_Fourier(self, fn, zeta):
"""
Integrates the dynamic equilibrium differential equation by Fourier.
The input MRPy is assumed to be an acceleration (force over mass),
otherwise the result must be divided by system mass to have
displacement unit.
System properties (frequency and damping) may be provided as
scalars or lists. If they are scalars, same properties are used
for all series in the MRP.
Parameters: fn: sdof natural frequency (Hz)
zeta: sdof damping (nondim)
"""
if ~hasattr(fn, "__len__"):
fn = fn*np.ones(self.NX)
if ~hasattr(zeta, "__len__"):
zeta = zeta*np.ones(self.NX)
X = MRPy(np.empty((self.NX, self.N)), self.fs)
for kX, row in enumerate(self):
zt = zeta[kX]
wn = 2*np.pi*fn[kX]
K = wn*wn
b = 2*np.pi*self.f_axis()/wn
Hw = (K*((1.0 - b**2) + 1j*(2*zt*b)))**(-1)
Hw = np.hstack((Hw,np.conj(Hw[-2:0:-1])))
X[kX,:] = np.real(np.fft.ifft(Hw*np.fft.fft(row)))
return X
#-----------------------------------------------------------------------------
def random_decrement(self, div=4, thr=1.0, ref=0):
"""
Estimate the free decay response of a dynamic system from the
response to a wide band excitation by the random decrement (RD)
method.
Parameters: div: number of divisions of total length, N//n,
to define the length of decrement series.
The divided length will be eventually truncated
to be even.
thr: threshold level that defines the reference
upcrossing level, given as a multiple of the
standard deviation of the reference MRP.
ref: row of MRPy to be used as reference series.
The other series will be splitted at the same
crossing points, what implies phase consistency.
"""
n = self.N//div # convert to length
n = n - (np.mod(n,2) == 1) # force length to be even
Xm = self.mean(axis=1) # mean values are zero
Xref = self[ref,:] # reference series
X0 = thr*(Xref.std()) # crossing reference level
kref = ( ((Xref[0:(self.N-1)] < X0) & (Xref[1:self.N] >= X0)) |
((Xref[0:(self.N-1)] > X0) & (Xref[1:self.N] <= X0)) )
nk = sum(kref)
Y = MRPy(np.zeros((self.NX, n)), self.fs)
for kX, row in enumerate(self):
row -= Xm[kX] # remove mean value
for k in range(self.N - n):
if kref[k]:
Y[kX,:] += row[k:(k+n)]
return Y/nk
#-----------------------------------------------------------------------------
def fit_decay(self):
"""
Fit the theoretical free decay function of a sdof dynamic system
to the provided MRP. The MRPy mean value is discarded. The fitted
parameters are output as a tuple P = (Xp, fn, zt, ph), where
Xp is the amplitude, fn is the fundamental (undamped) frequency,
zt is the damping as the ratio of critical, and ph is the phase
angle with respect with the cosinus function. This method is
typically used to fit the output of the random decrement method.
"""
#-------------------------------------------------------
def decay(t, Xp, fn, zt, ph):
wn = 2*np.pi*fn
wd = wn*np.sqrt(1. - zt*zt)
return Xp*np.exp(-zt*wn*t)*np.cos(wd*t - ph)
#-------------------------------------------------------
t = self.t_axis()
f = self.f_axis()
P = np.zeros((self.NX, 4))
X = self.zero_mean()
Sx, fs = X.periodogram()
for kX, row in enumerate(X):
Xp = np.max(row) # initial amplitude value
fn = f[np.argmax(Sx[kX,:])] # initial natural frequency
zt = 0.03 # initial damping
ph = 0.00 # initial phase
Pmin = (0.5*Xp, 1/t[-1], 0.0, -np.pi) # lower bounds
P0 = ( Xp, fn, zt, ph ) # initial guesses
Pmax = (1.5*Xp, 1*f[-1], 0.5, np.pi) # upper bounds
try:
P[kX,:], cv = curve_fit(decay, t, row,
p0=P0, bounds=(Pmin, Pmax))
except:
P[kX,:] = np.zeros(5)
print('Not able to fit decay function!!!')
pass
X[kX,:] = decay(t, *P[kX,:])
return MRPy(X, fs), P
#=============================================================================
# 4. Class constructors from conceptual properties
#=============================================================================
def zeros(NX=1, N=1024, fs=None, Td=None):
"""
Add up all series in MRPy weighted by 'weight'.
Parameters: NX: number of processes in the MRPy object.
N: length of each process.
fs: sampling frequency (in Hz), or alternatively
Td: processes duration (second)
"""
fs, Td = MRPy.check_fs(N, fs, Td)
return MRPy(np.zeros((NX,N)), fs)
#-----------------------------------------------------------------------------
def Dirac(NX=1, N=1024, t0=0.0, fs=None, Td=None):
"""
Add up all series in MRPy weighted by 'weight'.
Parameters: NX: number of processes in the MRPy object.
N: length of each process.
t0: time at which impulse must be given
fs: sampling frequency (in Hz), or alternatively
Td: processes duration (second)
"""
fs, Td = MRPy.check_fs(N, fs, Td)
i0 = int(t0//fs)
X = np.zeros((NX,N))
X[:,i0] = 1.0
return MRPy(X, fs)
#-----------------------------------------------------------------------------
def Heaviside(NX=1, N=1024, t0=0.0, fs=None, Td=None):
"""
Add up all series in MRPy weighted by 'weight'.
Parameters: NX: number of processes in the MRPy object.
N: length of each process.
t0: time at which step must be given
fs: sampling frequency (in Hz), or alternatively
Td: processes duration (second)
"""
fs, Td = MRPy.check_fs(N, fs, Td)
i0 = int(t0*fs)
X = np.zeros((NX,N))
X[:,i0:] = 1.0
return MRPy(X, fs)
#-----------------------------------------------------------------------------
def white_noise(NX=1, N=1024, fs=None, Td=None):
"""
Add up all series in MRPy weighted by 'weight'.
Parameters: NX: number of processes in the MRPy object.
N: length of each process.
fs: sampling frequency (in Hz), or alternatively
Td: processes duration (second)
"""
fs, Td = MRPy.check_fs(N, fs, Td)
M = N//2 + 1
Sx = np.ones((NX, M))*Td/M
return MRPy.from_periodogram(Sx, fs)
#-----------------------------------------------------------------------------
def pink_noise(NX=1, N=1024, fs=None, Td=None):
"""
Add up all series in MRPy weighted by 'weight'.
Parameters: NX: number of processes in the MRPy object.
N: length of each process.
fs: sampling frequency (in Hz), or alternatively
Td: processes duration (second)
"""
fs, Td = MRPy.check_fs(N, fs, Td)
M = N//2 + 1
Sx = np.ones((NX, M))*Td/M
return MRPy.from_periodogram(Sx, fs)
#=============================================================================
# 5. MRPy properties (as non-MRPy outputs)
#=============================================================================
def periodogram(self):
"""
Estimates the one-side power spectrum of a MRP.
"""
Sx = np.empty((self.NX, self.M))
for kX in range(self.NX):
Fx = np.fft.fft(self[kX,:] - self[kX,:].mean())
Sxk = np.real(Fx*Fx.conj())*2/self.N/self.fs
Sx[kX,:] = Sxk[:self.M]
return Sx, self.fs
#-----------------------------------------------------------------------------
def autocov(self):
"""
Estimates the autocovariance functions of a MRP.
"""
Tmax = (self.M - 1)/self.fs
Cx = np.empty((self.NX, self.M))
Sx, fs = self.periodogram()
for kX in range(self.NX):
Sxk = np.hstack((Sx[kX,:], Sx[kX,-2:0:-1]))
Cxk = np.fft.ifft(Sxk)*fs/2
Cx[kX,:] = np.real(Cxk[:self.M])
return Cx, Tmax
#-----------------------------------------------------------------------------
def autocorr(self):
"""
Estimates the autocorrelation function of a MRP.
"""
Xs = self.std(axis=1)
Rx = np.empty((self.NX,self.M))
Cx, Tmax = self.autocov()
for kX in range(self.NX):
Rx[kX,:] = Cx[kX,:]/Xs[kX]/Xs[kX]
return Rx, Tmax
#-----------------------------------------------------------------------------
def pseudo(self, zeta=0.05):
"""
Estimates the pseudo spectra, which are the peak response
amplitudes of a single degree of freedom system, as a function of
system natural period of vibration. The usual application is for
seismic acceleration records.
Parameters: zeta: system damping (ratio of critical) can be
provided or default value of 5% is assumed.
"""
Tmax = (self.M - 1)/self.fs
Sx = np.zeros((self.NX, self.M))
T = self.T_axis()
for k in range(self.M): # this may take long...
if (T[k] > 8/self.fs):
X = self.sdof_Duhamel(1/T[k], zeta)
umax = np.abs(X).max(axis=1)
if ~np.any(np.isnan(umax)): Sx[:,k] = umax
return Sx, Tmax
#-----------------------------------------------------------------------------
def Davenport(self, T=-1.):
"""
Peak factor of a MRPy by Davenport's formula.
Parameters: T: observation time for estimating peak factor.
The default value is -1, that means the total
duration of MRP, Td, is to be used.
"""
if (T < 0.): T = self.Td
e = 0.5772156649
f = self.f_axis()
df = 1/self.Td
Sx, fs = self.periodogram()
gX = np.zeros(self.NX)
for kX in range(self.NX):
m0 = np.trapz(Sx[kX,:], dx=df)
m2 = np.trapz(Sx[kX,:]*f*f, dx=df)
nu = T*np.sqrt(m2/m0)
if (nu < 1): nu = 1
Lg = np.sqrt(2*np.log(nu))
if (Lg < np.sqrt(e)): Lg = np.sqrt(e)
gX[kX] = Lg + e/Lg
return gX
#-----------------------------------------------------------------------------
def splitmax(self, T=-1.):
"""
Peak factor of a MRPy by the "splitmax" method.
Parameters: T: observation time for estimating peak factor.
The default value is -1, that means the total
duration of MRP, Td, is to be used.
"""
#-----------------------------------------------
def split(X):
X1 = X[0::2]
X2 = X[1::2]
if not len(X1):
return np.array([])
if len(X1) > len(X2):
X1 = X1[:-1]
return np.max(np.vstack((X1, X2)), axis=0)
#-----------------------------------------------
if (T < 0.): T = self.Td
if (T > self.Td): T = self.Td
gX = np.zeros(self.NX)
for kX, row in enumerate(self):
Y = split(np.abs(row))
nmax = np.array([])
Xmax = np.array([])
while np.size(Y):
nmax = np.append(nmax,self.N/len(Y))
Xmax = np.append(Xmax,Y.mean())
Y = split(Y)
f = interp1d(np.log(nmax), Xmax, kind='quadratic')
gX[kX] = float(f(np.log(T*self.fs))/self.std())
return gX
#=============================================================================
# 6. Utilities
#=============================================================================
def attributes(self):
s1 = ' fs = {0:.1f}Hz\n Td = {1:.1f}s\n'
s2 = ' NX = {0}\n N = {1}\n M = {2}'
print(s1.format(self.fs, self.Td))
print(s2.format(self.NX, self.N, self.M))
#-----------------------------------------------------------------------------
def t_axis(self):
return np.linspace(0, self.Td, self.N)
#-----------------------------------------------------------------------------
def f_axis(self):
return np.linspace(0, self.fs/2, self.M)
#-----------------------------------------------------------------------------
def T_axis(self):
return np.linspace(0, (self.M - 1)/self.fs, self.M)
#-----------------------------------------------------------------------------
def subplot_shape(self):
sp0 = self.NX
sp1 = 1
if (sp0 > 12):
sp0 = 4
sp1 = 5
elif (sp0 == 8):
sp0 = 4
sp1 = 2
elif (sp0 > 6):
sp0 = 4
sp1 = 3
elif (sp0 > 3):
sp0 = 3
sp1 = 2
return sp0, sp1
#-----------------------------------------------------------------------------
def plot_time(self, fig=0, figsize=(12, 8), axis_t=None):
plt.figure(fig, figsize=figsize)
plt.suptitle('Time Domain Amplitude', fontsize=14)
t = self.t_axis()
if (axis_t == None):
axis_t = [0, self.Td, 1.2*self.min(), 1.2*self.max()]
sp0, sp1 = self.subplot_shape()
lines = []
for kX, row in enumerate(self):
plt.subplot(sp0,sp1,kX+1)
lines.append(plt.plot(t, row, lw=0.5))
plt.axis(axis_t)
plt.ylabel('Amplitude {0}'.format(kX))
plt.grid(True)
plt.xlabel('Time (s)')
return lines
#-----------------------------------------------------------------------------
def plot_freq(self, fig=0, figsize=(12, 8), axis_f=None):
plt.figure(fig, figsize=figsize)
plt.suptitle('Spectral Density Estimator', fontsize=14)
Sx, fs = self.periodogram()
f = self.f_axis()
if (axis_f == None):
axis_f = [0, self.fs/2, 0, Sx.max()]
sp0, sp1 = self.subplot_shape()
lines = []
for kX, row in enumerate(Sx):
plt.subplot(sp0,sp1,kX+1)
lines.append(plt.plot(f, row, lw=0.5))
plt.axis(axis_f)
plt.ylabel('Power {0}'.format(kX))
plt.grid(True)
plt.xlabel('Frequency (Hz)')
return lines
#-----------------------------------------------------------------------------
def plot_corr(self, fig=0, figsize=(12, 8), axis_T=None):
plt.figure(fig, figsize=figsize)
plt.suptitle('Normalized Autocorrelation', fontsize=14)
Rx, Tmax = self.autocorr()
T = self.T_axis()
if (axis_T == None):
axis_T = [0, Tmax, -1, 1]
sp0, sp1 = self.subplot_shape()
lines = []
for kX, row in enumerate(Rx):
plt.subplot(sp0,sp1,kX+1)
lines.append(plt.plot(T, row, lw=0.5))
plt.axis(axis_T)
plt.ylabel('Autocorrelation {0}'.format(kX))
plt.grid(True)
plt.xlabel('Time gap (s)')
return lines
#-----------------------------------------------------------------------------
def plot_pseudo(self, fig=0, figsize=(12, 8), axis_T=None):
plt.figure(fig, figsize=figsize)
plt.suptitle('Pseudo Spectrum', fontsize=14)
Sx, Tmax = self.pseudo()
T = self.T_axis()
if (axis_T == None):
axis_T = [0, Tmax, 0, Sx.max()]
sp0, sp1 = self.subplot_shape()
lines = []
for kX, row in enumerate(Sx):
plt.subplot(sp0,sp1,kX+1)
lines.append(plt.plot(T, row, lw=0.5))
plt.axis(axis_T)
plt.ylabel('Peak response {0}'.format(kX))
plt.grid(True)
plt.xlabel('Vibration period (s)')
return lines
#-----------------------------------------------------------------------------
def to_file(self, filename, form='mrpy'):
"""
Save MRPy object.
Parameters: filename: file to be saved, including path
form: data formatting. Options are
'mrpy' - default gzip pickle saving
'excel ' - excel through pandas
"""
if (form.lower() == 'mrpy'):
with gz.GzipFile(filename+'.gz', 'wb') as target:
pk.dump((self, self.fs), target)
elif (form.lower() == 'excel'):
data = pd.DataFrame(data = self.T,
index = self.t_axis())
excel = pd.ExcelWriter(filename+'.xlsx')
data.to_excel(excel,'MRPy')
excel.save()
else:
sys.exit('Data formatting not available!')
return None
#=============================================================================
# 7. Helpers
#=============================================================================
def resampling(ti, Xi):
"""
Resampling irregular time step to fixed time step. The last
element of ti is taken as total series duration. Series length
is kept unchanged.
Parameters: ti: irregular time where samples are avaible
Xi: time series samples, taken at ti
"""
sh = Xi.shape
if (len(sh) == 1):
Xi = np.reshape(Xi,(1,sh[0]))
elif (sh[0] > sh[1]):
Xi = Xi.T
sh = Xi.shape
NX = sh[0]
N = sh[1] # series length kept unchanged
t0 = ti[0]
t1 = ti[-1]
fs = N/(t1 - t0) # average sampling rate
t = np.linspace(t0, t1, N)
X = np.empty((NX,N))
for k in range(NX):
resX = interp1d(ti, Xi[k,:], kind='linear')
X[k,:] = resX(t)
return MRPy(X, fs)
#-----------------------------------------------------------------------------
def check_fs(N, fs, Td):
"""
Verifies if either fs or Td are given, and returns
both properties verifyed
"""
if (np.mod(N, 2) != 0): # enforce N to be even
N = N - 1
if ((fs != None) & (Td == None)): # if fs is available...
pass
elif ((fs == None) & (Td != None)): # if Td is available
fs = N/Td
else:
sys.exit('Either fs or Td must be specified!')
return fs, N/fs
#-----------------------------------------------------------------------------
def Cx2Sx(Cx, Tmax):
"""
Returns the spectral density corresponding to a given
autocovariance function.
Parameters: Cx: autocovariances as ndarray (must have odd
length, otherwise it will be truncated by 1 and
the length of simulation will not be as expected!)
The largest dimension of Cx is assumed to be the
time gap axis.
Tmax: largest time gap, associated to the last element
in array Cx. Defines process duration, which
will be approximately 2Tmax.
"""
sh = Cx.shape
if (len(sh) == 1):
Cx = np.reshape(Cx,(1,sh[0]))
else:
if (sh[0] > sh[1]):
Cx = Cx.T
sh = Cx.shape
NX = sh[0]
M0 = sh[1]
M = M0 - (np.mod(M0,2) == 0) # ensure M is odd
Cx = Cx[:, 0:M]
err = M/M0
fs = (M - 1)/(err*Tmax) # eventually corrects for Tmax
Sx = np.empty((NX, M))
for k in range(NX):
C = np.hstack((Cx[k,:], Cx[k,-2:0:-1]))
C = np.fft.fft(C)*2/fs
Sx[k,:] = np.real(C[0:M])
return Sx, fs
#-----------------------------------------------------------------------------
def Sx2Cx(Sx, fs):
"""
Returns the autocovariance corresponding to a given
spectral density.
Parameters: Sx: spectral density as ndarray (must have odd
length, otherwise it will be truncated by 1 and
the length of simulation will not be as expected!)
The largest dimension of Sx is assumed to be the
frequency axis.
fs: sampling frequency in Hz
"""
sh = Sx.shape
if (len(sh) == 1):
Sx = np.reshape(Sx,(1,sh[0]))
else:
if (sh[0] > sh[1]):
Sx = Sx.T
sh = Sx.shape
NX = sh[0]
M0 = sh[1]
M = M0 - (np.mod(M0,2) == 0) # ensure M is odd
Tmax = (M - 1)/fs
Cx = np.empty((NX, M))
for kX in range(NX):
Sxk = np.hstack((Sx[kX,:], Sx[kX,-2:0:-1]))
Cxk = np.fft.ifft(Sxk)*fs/2
Cx[kX,:] = np.real(Cxk[:M])
return Cx, Tmax
#=============================================================================
#=============================================================================
| 32.604255
| 78
| 0.402223
|
4a097f1b5987aa6cf67b58960e983bbc61cd183b
| 9,081
|
py
|
Python
|
twine/package.py
|
Acidburn0zzz/twine
|
00d1d231d613bb5a912f2ae64c07bb232dad3091
|
[
"Apache-2.0"
] | 2
|
2018-05-03T10:24:52.000Z
|
2020-02-15T05:11:21.000Z
|
twine/package.py
|
Acidburn0zzz/twine
|
00d1d231d613bb5a912f2ae64c07bb232dad3091
|
[
"Apache-2.0"
] | null | null | null |
twine/package.py
|
Acidburn0zzz/twine
|
00d1d231d613bb5a912f2ae64c07bb232dad3091
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Ian Cordasco
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import hashlib
import io
import os
import subprocess
from hashlib import blake2b
from typing import IO, Dict, Optional, Sequence, Tuple, Union
import pkg_resources
import pkginfo
from twine import exceptions
from twine.wheel import Wheel
from twine.wininst import WinInst
DIST_TYPES = {
"bdist_wheel": Wheel,
"bdist_wininst": WinInst,
"bdist_egg": pkginfo.BDist,
"sdist": pkginfo.SDist,
}
DIST_EXTENSIONS = {
".whl": "bdist_wheel",
".exe": "bdist_wininst",
".egg": "bdist_egg",
".tar.bz2": "sdist",
".tar.gz": "sdist",
".zip": "sdist",
}
MetadataValue = Union[str, Sequence[str], Tuple[str, IO, str]]
class PackageFile:
def __init__(
self,
filename: str,
comment: Optional[str],
metadata: pkginfo.Distribution,
python_version: Optional[str],
filetype: Optional[str],
) -> None:
self.filename = filename
self.basefilename = os.path.basename(filename)
self.comment = comment
self.metadata = metadata
self.python_version = python_version
self.filetype = filetype
self.safe_name = pkg_resources.safe_name(metadata.name)
self.signed_filename = self.filename + ".asc"
self.signed_basefilename = self.basefilename + ".asc"
self.gpg_signature: Optional[Tuple[str, bytes]] = None
hasher = HashManager(filename)
hasher.hash()
hexdigest = hasher.hexdigest()
self.md5_digest = hexdigest.md5
self.sha2_digest = hexdigest.sha2
self.blake2_256_digest = hexdigest.blake2
@classmethod
def from_filename(cls, filename: str, comment: None) -> "PackageFile":
# Extract the metadata from the package
for ext, dtype in DIST_EXTENSIONS.items():
if filename.endswith(ext):
meta = DIST_TYPES[dtype](filename)
break
else:
raise exceptions.InvalidDistribution(
"Unknown distribution format: '%s'" % os.path.basename(filename)
)
# If pkginfo encounters a metadata version it doesn't support, it may
# give us back empty metadata. At the very least, we should have a name
# and version
if not (meta.name and meta.version):
raise exceptions.InvalidDistribution(
"Invalid distribution metadata. Try upgrading twine if possible."
)
py_version: Optional[str]
if dtype == "bdist_egg":
pkgd = pkg_resources.Distribution.from_filename(filename)
py_version = pkgd.py_version
elif dtype == "bdist_wheel":
py_version = meta.py_version
elif dtype == "bdist_wininst":
py_version = meta.py_version
else:
py_version = None
return cls(filename, comment, meta, py_version, dtype)
def metadata_dictionary(self) -> Dict[str, MetadataValue]:
meta = self.metadata
data = {
# identify release
"name": self.safe_name,
"version": meta.version,
# file content
"filetype": self.filetype,
"pyversion": self.python_version,
# additional meta-data
"metadata_version": meta.metadata_version,
"summary": meta.summary,
"home_page": meta.home_page,
"author": meta.author,
"author_email": meta.author_email,
"maintainer": meta.maintainer,
"maintainer_email": meta.maintainer_email,
"license": meta.license,
"description": meta.description,
"keywords": meta.keywords,
"platform": meta.platforms,
"classifiers": meta.classifiers,
"download_url": meta.download_url,
"supported_platform": meta.supported_platforms,
"comment": self.comment,
"md5_digest": self.md5_digest,
"sha256_digest": self.sha2_digest,
"blake2_256_digest": self.blake2_256_digest,
# PEP 314
"provides": meta.provides,
"requires": meta.requires,
"obsoletes": meta.obsoletes,
# Metadata 1.2
"project_urls": meta.project_urls,
"provides_dist": meta.provides_dist,
"obsoletes_dist": meta.obsoletes_dist,
"requires_dist": meta.requires_dist,
"requires_external": meta.requires_external,
"requires_python": meta.requires_python,
# Metadata 2.1
"provides_extras": meta.provides_extras,
"description_content_type": meta.description_content_type,
}
if self.gpg_signature is not None:
data["gpg_signature"] = self.gpg_signature
return data
def add_gpg_signature(self, signature_filepath: str, signature_filename: str):
if self.gpg_signature is not None:
raise exceptions.InvalidDistribution("GPG Signature can only be added once")
with open(signature_filepath, "rb") as gpg:
self.gpg_signature = (signature_filename, gpg.read())
def sign(self, sign_with: str, identity: Optional[str]):
print(f"Signing {self.basefilename}")
gpg_args: Tuple[str, ...] = (sign_with, "--detach-sign")
if identity:
gpg_args += ("--local-user", identity)
gpg_args += ("-a", self.filename)
self.run_gpg(gpg_args)
self.add_gpg_signature(self.signed_filename, self.signed_basefilename)
@classmethod
def run_gpg(cls, gpg_args):
try:
subprocess.check_call(gpg_args)
return
except FileNotFoundError:
if gpg_args[0] != "gpg":
raise exceptions.InvalidSigningExecutable(
"{} executable not available.".format(gpg_args[0])
)
print("gpg executable not available. Attempting fallback to gpg2.")
try:
subprocess.check_call(("gpg2",) + gpg_args[1:])
except FileNotFoundError:
print("gpg2 executable not available.")
raise exceptions.InvalidSigningExecutable(
"'gpg' or 'gpg2' executables not available. "
"Try installing one of these or specifying an executable "
"with the --sign-with flag."
)
Hexdigest = collections.namedtuple("Hexdigest", ["md5", "sha2", "blake2"])
class HashManager:
"""Manage our hashing objects for simplicity.
This will also allow us to better test this logic.
"""
def __init__(self, filename: str) -> None:
"""Initialize our manager and hasher objects."""
self.filename = filename
self._md5_hasher = None
try:
self._md5_hasher = hashlib.md5()
except ValueError:
# FIPs mode disables MD5
pass
self._sha2_hasher = hashlib.sha256()
self._blake_hasher = None
if blake2b is not None:
self._blake_hasher = blake2b(digest_size=256 // 8)
def _md5_update(self, content: bytes) -> None:
if self._md5_hasher is not None:
self._md5_hasher.update(content)
def _md5_hexdigest(self) -> Optional[str]:
if self._md5_hasher is not None:
return self._md5_hasher.hexdigest()
return None
def _sha2_update(self, content: bytes) -> None:
if self._sha2_hasher is not None:
self._sha2_hasher.update(content)
def _sha2_hexdigest(self) -> Optional[str]:
if self._sha2_hasher is not None:
return self._sha2_hasher.hexdigest()
return None
def _blake_update(self, content: bytes) -> None:
if self._blake_hasher is not None:
self._blake_hasher.update(content)
def _blake_hexdigest(self) -> Optional[str]:
if self._blake_hasher is not None:
return self._blake_hasher.hexdigest()
return None
def hash(self) -> None:
"""Hash the file contents."""
with open(self.filename, "rb") as fp:
for content in iter(lambda: fp.read(io.DEFAULT_BUFFER_SIZE), b""):
self._md5_update(content)
self._sha2_update(content)
self._blake_update(content)
def hexdigest(self) -> Hexdigest:
"""Return the hexdigest for the file."""
return Hexdigest(
self._md5_hexdigest(), self._sha2_hexdigest(), self._blake_hexdigest(),
)
| 34.397727
| 88
| 0.616892
|
4a097f3f7f758a039f6e0380c171aa99afd5a038
| 986
|
py
|
Python
|
tests/test_replace_word_tokens.py
|
Epse/mathparse
|
18faaa8c56628a917e450451ef047fe84752a234
|
[
"MIT"
] | 41
|
2017-02-11T00:07:05.000Z
|
2022-03-22T12:45:34.000Z
|
tests/test_replace_word_tokens.py
|
xKatooo/mathparse
|
fc7b4546dba6b5e19c36576a1bdd6df1393c3fb6
|
[
"MIT"
] | 22
|
2016-11-26T13:36:49.000Z
|
2020-07-04T20:56:43.000Z
|
tests/test_replace_word_tokens.py
|
xKatooo/mathparse
|
fc7b4546dba6b5e19c36576a1bdd6df1393c3fb6
|
[
"MIT"
] | 17
|
2017-09-30T21:49:45.000Z
|
2021-03-04T10:56:23.000Z
|
from unittest import TestCase
from mathparse import mathparse
class EnglishWordTokenTestCase(TestCase):
def test_addition(self):
result = mathparse.replace_word_tokens('1 plus 1', language='ENG')
self.assertEqual(result, '1 + 1')
def test_thirty(self):
result = mathparse.replace_word_tokens(
'thirty + thirty', language='ENG'
)
self.assertEqual(result, '30 + 30')
def test_thousand(self):
result = mathparse.replace_word_tokens(
'five thousand + 30', language='ENG'
)
# Note: this ends up with double parentheses because it is both a
# scaled number ("thousand") and a word group ("five thousand")
self.assertEqual(result, '((5 * 1000)) + 30')
def test_double_digit_multiplier_for_scale(self):
result = mathparse.replace_word_tokens(
'fifty thousand + 1', language='ENG'
)
self.assertEqual(result, '((50 * 1000)) + 1')
| 29
| 74
| 0.628803
|
4a0983e387cdbb4514f8329987714ab9e754c940
| 260
|
py
|
Python
|
dvgutils/pipeline/metrics_pipe.py
|
jagin/dvg-utils
|
a7d19ead75398b09a9f1e146464cf4227f06a476
|
[
"MIT"
] | 7
|
2020-09-02T08:39:22.000Z
|
2021-10-13T18:13:04.000Z
|
dvgutils/pipeline/metrics_pipe.py
|
jagin/dvg-utils
|
a7d19ead75398b09a9f1e146464cf4227f06a476
|
[
"MIT"
] | null | null | null |
dvgutils/pipeline/metrics_pipe.py
|
jagin/dvg-utils
|
a7d19ead75398b09a9f1e146464cf4227f06a476
|
[
"MIT"
] | null | null | null |
from ..modules import Metrics
class MetricsPipe:
def __init__(self):
self.metrics = Metrics().start()
def __call__(self, data):
return self.update(data)
def update(self, data):
self.metrics.update()
return data
| 17.333333
| 40
| 0.619231
|
4a09843b1cfed4a8b659685fd647d9513e522435
| 804
|
py
|
Python
|
couchbase_utils/cb_tools/mc_stat.py
|
cgghali/TAF
|
1de8dec77ad781c373e18d9c285befd534ac203a
|
[
"Apache-2.0"
] | null | null | null |
couchbase_utils/cb_tools/mc_stat.py
|
cgghali/TAF
|
1de8dec77ad781c373e18d9c285befd534ac203a
|
[
"Apache-2.0"
] | null | null | null |
couchbase_utils/cb_tools/mc_stat.py
|
cgghali/TAF
|
1de8dec77ad781c373e18d9c285befd534ac203a
|
[
"Apache-2.0"
] | null | null | null |
from cb_tools.cb_tools_base import CbCmdBase
class McStat(CbCmdBase):
def __init__(self, shell_conn, username="Administrator",
password="password"):
CbCmdBase.__init__(self, shell_conn, "mcstat",
username=username, password=password)
def reset(self, bucket_name):
"""
Resets mcstat for the specified bucket_name
:param bucket_name: Bucket name to reset stat
"""
cmd = "%s -u %s -P %s -b %s reset" % (self.cbstatCmd,
self.username,
self.password,
bucket_name)
_, error = self._execute_cmd(cmd)
if error:
raise Exception("".join(error))
| 36.545455
| 64
| 0.506219
|
4a09844c4c1a49af4549a22e3402543979bbcc40
| 22,466
|
py
|
Python
|
env/lib/python3.7/site-packages/numpy/core/tests/test_deprecations.py
|
JacobMiske/nuclear-database-APIs
|
bc9fb6afb9aa0d98dde5d744d8f22b2791597e78
|
[
"MIT"
] | null | null | null |
env/lib/python3.7/site-packages/numpy/core/tests/test_deprecations.py
|
JacobMiske/nuclear-database-APIs
|
bc9fb6afb9aa0d98dde5d744d8f22b2791597e78
|
[
"MIT"
] | null | null | null |
env/lib/python3.7/site-packages/numpy/core/tests/test_deprecations.py
|
JacobMiske/nuclear-database-APIs
|
bc9fb6afb9aa0d98dde5d744d8f22b2791597e78
|
[
"MIT"
] | 1
|
2020-05-01T20:23:35.000Z
|
2020-05-01T20:23:35.000Z
|
"""
Tests related to deprecation warnings. Also a convenient place
to document how deprecations should eventually be turned into errors.
"""
from __future__ import division, absolute_import, print_function
import datetime
import sys
import operator
import warnings
import pytest
import shutil
import tempfile
import numpy as np
from numpy.testing import (
assert_raises, assert_warns, assert_, assert_array_equal
)
from numpy.core._multiarray_tests import fromstring_null_term_c_api
try:
import pytz
_has_pytz = True
except ImportError:
_has_pytz = False
class _DeprecationTestCase(object):
# Just as warning: warnings uses re.match, so the start of this message
# must match.
message = ''
warning_cls = DeprecationWarning
def setup(self):
self.warn_ctx = warnings.catch_warnings(record=True)
self.log = self.warn_ctx.__enter__()
# Do *not* ignore other DeprecationWarnings. Ignoring warnings
# can give very confusing results because of
# https://bugs.python.org/issue4180 and it is probably simplest to
# try to keep the tests cleanly giving only the right warning type.
# (While checking them set to "error" those are ignored anyway)
# We still have them show up, because otherwise they would be raised
warnings.filterwarnings("always", category=self.warning_cls)
warnings.filterwarnings("always", message=self.message,
category=self.warning_cls)
def teardown(self):
self.warn_ctx.__exit__()
def assert_deprecated(self, function, num=1, ignore_others=False,
function_fails=False,
exceptions=np._NoValue,
args=(), kwargs={}):
"""Test if DeprecationWarnings are given and raised.
This first checks if the function when called gives `num`
DeprecationWarnings, after that it tries to raise these
DeprecationWarnings and compares them with `exceptions`.
The exceptions can be different for cases where this code path
is simply not anticipated and the exception is replaced.
Parameters
----------
function : callable
The function to test
num : int
Number of DeprecationWarnings to expect. This should normally be 1.
ignore_others : bool
Whether warnings of the wrong type should be ignored (note that
the message is not checked)
function_fails : bool
If the function would normally fail, setting this will check for
warnings inside a try/except block.
exceptions : Exception or tuple of Exceptions
Exception to expect when turning the warnings into an error.
The default checks for DeprecationWarnings. If exceptions is
empty the function is expected to run successfully.
args : tuple
Arguments for `function`
kwargs : dict
Keyword arguments for `function`
"""
# reset the log
self.log[:] = []
if exceptions is np._NoValue:
exceptions = (self.warning_cls,)
try:
function(*args, **kwargs)
except (Exception if function_fails else tuple()):
pass
# just in case, clear the registry
num_found = 0
for warning in self.log:
if warning.category is self.warning_cls:
num_found += 1
elif not ignore_others:
raise AssertionError(
"expected %s but got: %s" %
(self.warning_cls.__name__, warning.category))
if num is not None and num_found != num:
msg = "%i warnings found but %i expected." % (len(self.log), num)
lst = [str(w) for w in self.log]
raise AssertionError("\n".join([msg] + lst))
with warnings.catch_warnings():
warnings.filterwarnings("error", message=self.message,
category=self.warning_cls)
try:
function(*args, **kwargs)
if exceptions != tuple():
raise AssertionError(
"No error raised during function call")
except exceptions:
if exceptions == tuple():
raise AssertionError(
"Error raised during function call")
def assert_not_deprecated(self, function, args=(), kwargs={}):
"""Test that warnings are not raised.
This is just a shorthand for:
self.assert_deprecated(function, num=0, ignore_others=True,
exceptions=tuple(), args=args, kwargs=kwargs)
"""
self.assert_deprecated(function, num=0, ignore_others=True,
exceptions=tuple(), args=args, kwargs=kwargs)
class _VisibleDeprecationTestCase(_DeprecationTestCase):
warning_cls = np.VisibleDeprecationWarning
class TestNonTupleNDIndexDeprecation(object):
def test_basic(self):
a = np.zeros((5, 5))
with warnings.catch_warnings():
warnings.filterwarnings('always')
assert_warns(FutureWarning, a.__getitem__, [[0, 1], [0, 1]])
assert_warns(FutureWarning, a.__getitem__, [slice(None)])
warnings.filterwarnings('error')
assert_raises(FutureWarning, a.__getitem__, [[0, 1], [0, 1]])
assert_raises(FutureWarning, a.__getitem__, [slice(None)])
# a a[[0, 1]] always was advanced indexing, so no error/warning
a[[0, 1]]
class TestComparisonDeprecations(_DeprecationTestCase):
"""This tests the deprecation, for non-element-wise comparison logic.
This used to mean that when an error occurred during element-wise comparison
(i.e. broadcasting) NotImplemented was returned, but also in the comparison
itself, False was given instead of the error.
Also test FutureWarning for the None comparison.
"""
message = "elementwise.* comparison failed; .*"
def test_normal_types(self):
for op in (operator.eq, operator.ne):
# Broadcasting errors:
self.assert_deprecated(op, args=(np.zeros(3), []))
a = np.zeros(3, dtype='i,i')
# (warning is issued a couple of times here)
self.assert_deprecated(op, args=(a, a[:-1]), num=None)
# ragged array comparison returns True/False
a = np.array([1, np.array([1,2,3])], dtype=object)
b = np.array([1, np.array([1,2,3])], dtype=object)
self.assert_deprecated(op, args=(a, b), num=None)
def test_string(self):
# For two string arrays, strings always raised the broadcasting error:
a = np.array(['a', 'b'])
b = np.array(['a', 'b', 'c'])
assert_raises(ValueError, lambda x, y: x == y, a, b)
# The empty list is not cast to string, and this used to pass due
# to dtype mismatch; now (2018-06-21) it correctly leads to a
# FutureWarning.
assert_warns(FutureWarning, lambda: a == [])
def test_void_dtype_equality_failures(self):
class NotArray(object):
def __array__(self):
raise TypeError
# Needed so Python 3 does not raise DeprecationWarning twice.
def __ne__(self, other):
return NotImplemented
self.assert_deprecated(lambda: np.arange(2) == NotArray())
self.assert_deprecated(lambda: np.arange(2) != NotArray())
struct1 = np.zeros(2, dtype="i4,i4")
struct2 = np.zeros(2, dtype="i4,i4,i4")
assert_warns(FutureWarning, lambda: struct1 == 1)
assert_warns(FutureWarning, lambda: struct1 == struct2)
assert_warns(FutureWarning, lambda: struct1 != 1)
assert_warns(FutureWarning, lambda: struct1 != struct2)
def test_array_richcompare_legacy_weirdness(self):
# It doesn't really work to use assert_deprecated here, b/c part of
# the point of assert_deprecated is to check that when warnings are
# set to "error" mode then the error is propagated -- which is good!
# But here we are testing a bunch of code that is deprecated *because*
# it has the habit of swallowing up errors and converting them into
# different warnings. So assert_warns will have to be sufficient.
assert_warns(FutureWarning, lambda: np.arange(2) == "a")
assert_warns(FutureWarning, lambda: np.arange(2) != "a")
# No warning for scalar comparisons
with warnings.catch_warnings():
warnings.filterwarnings("error")
assert_(not (np.array(0) == "a"))
assert_(np.array(0) != "a")
assert_(not (np.int16(0) == "a"))
assert_(np.int16(0) != "a")
for arg1 in [np.asarray(0), np.int16(0)]:
struct = np.zeros(2, dtype="i4,i4")
for arg2 in [struct, "a"]:
for f in [operator.lt, operator.le, operator.gt, operator.ge]:
if sys.version_info[0] >= 3:
# py3
with warnings.catch_warnings() as l:
warnings.filterwarnings("always")
assert_raises(TypeError, f, arg1, arg2)
assert_(not l)
else:
# py2
assert_warns(DeprecationWarning, f, arg1, arg2)
class TestDatetime64Timezone(_DeprecationTestCase):
"""Parsing of datetime64 with timezones deprecated in 1.11.0, because
datetime64 is now timezone naive rather than UTC only.
It will be quite a while before we can remove this, because, at the very
least, a lot of existing code uses the 'Z' modifier to avoid conversion
from local time to UTC, even if otherwise it handles time in a timezone
naive fashion.
"""
def test_string(self):
self.assert_deprecated(np.datetime64, args=('2000-01-01T00+01',))
self.assert_deprecated(np.datetime64, args=('2000-01-01T00Z',))
@pytest.mark.skipif(not _has_pytz,
reason="The pytz module is not available.")
def test_datetime(self):
tz = pytz.timezone('US/Eastern')
dt = datetime.datetime(2000, 1, 1, 0, 0, tzinfo=tz)
self.assert_deprecated(np.datetime64, args=(dt,))
class TestNonCContiguousViewDeprecation(_DeprecationTestCase):
"""View of non-C-contiguous arrays deprecated in 1.11.0.
The deprecation will not be raised for arrays that are both C and F
contiguous, as C contiguous is dominant. There are more such arrays
with relaxed stride checking than without so the deprecation is not
as visible with relaxed stride checking in force.
"""
def test_fortran_contiguous(self):
self.assert_deprecated(np.ones((2,2)).T.view, args=(complex,))
self.assert_deprecated(np.ones((2,2)).T.view, args=(np.int8,))
class TestArrayDataAttributeAssignmentDeprecation(_DeprecationTestCase):
"""Assigning the 'data' attribute of an ndarray is unsafe as pointed
out in gh-7093. Eventually, such assignment should NOT be allowed, but
in the interests of maintaining backwards compatibility, only a Deprecation-
Warning will be raised instead for the time being to give developers time to
refactor relevant code.
"""
def test_data_attr_assignment(self):
a = np.arange(10)
b = np.linspace(0, 1, 10)
self.message = ("Assigning the 'data' attribute is an "
"inherently unsafe operation and will "
"be removed in the future.")
self.assert_deprecated(a.__setattr__, args=('data', b.data))
class TestBinaryReprInsufficientWidthParameterForRepresentation(_DeprecationTestCase):
"""
If a 'width' parameter is passed into ``binary_repr`` that is insufficient to
represent the number in base 2 (positive) or 2's complement (negative) form,
the function used to silently ignore the parameter and return a representation
using the minimal number of bits needed for the form in question. Such behavior
is now considered unsafe from a user perspective and will raise an error in the future.
"""
def test_insufficient_width_positive(self):
args = (10,)
kwargs = {'width': 2}
self.message = ("Insufficient bit width provided. This behavior "
"will raise an error in the future.")
self.assert_deprecated(np.binary_repr, args=args, kwargs=kwargs)
def test_insufficient_width_negative(self):
args = (-5,)
kwargs = {'width': 2}
self.message = ("Insufficient bit width provided. This behavior "
"will raise an error in the future.")
self.assert_deprecated(np.binary_repr, args=args, kwargs=kwargs)
class TestNumericStyleTypecodes(_DeprecationTestCase):
"""
Deprecate the old numeric-style dtypes, which are especially
confusing for complex types, e.g. Complex32 -> complex64. When the
deprecation cycle is complete, the check for the strings should be
removed from PyArray_DescrConverter in descriptor.c, and the
deprecated keys should not be added as capitalized aliases in
_add_aliases in numerictypes.py.
"""
def test_all_dtypes(self):
deprecated_types = [
'Bool', 'Complex32', 'Complex64', 'Float16', 'Float32', 'Float64',
'Int8', 'Int16', 'Int32', 'Int64', 'Object0', 'Timedelta64',
'UInt8', 'UInt16', 'UInt32', 'UInt64', 'Void0'
]
if sys.version_info[0] < 3:
deprecated_types.extend(['Unicode0', 'String0'])
for dt in deprecated_types:
self.assert_deprecated(np.dtype, exceptions=(TypeError,),
args=(dt,))
class TestTestDeprecated(object):
def test_assert_deprecated(self):
test_case_instance = _DeprecationTestCase()
test_case_instance.setup()
assert_raises(AssertionError,
test_case_instance.assert_deprecated,
lambda: None)
def foo():
warnings.warn("foo", category=DeprecationWarning, stacklevel=2)
test_case_instance.assert_deprecated(foo)
test_case_instance.teardown()
class TestClassicIntDivision(_DeprecationTestCase):
"""
See #7949. Deprecate the numeric-style dtypes with -3 flag in python 2
if used for division
List of data types: https://docs.scipy.org/doc/numpy/user/basics.types.html
"""
def test_int_dtypes(self):
#scramble types and do some mix and match testing
deprecated_types = [
'bool_', 'int_', 'intc', 'uint8', 'int8', 'uint64', 'int32', 'uint16',
'intp', 'int64', 'uint32', 'int16'
]
if sys.version_info[0] < 3 and sys.py3kwarning:
import operator as op
dt2 = 'bool_'
for dt1 in deprecated_types:
a = np.array([1,2,3], dtype=dt1)
b = np.array([1,2,3], dtype=dt2)
self.assert_deprecated(op.div, args=(a,b))
dt2 = dt1
class TestNonNumericConjugate(_DeprecationTestCase):
"""
Deprecate no-op behavior of ndarray.conjugate on non-numeric dtypes,
which conflicts with the error behavior of np.conjugate.
"""
def test_conjugate(self):
for a in np.array(5), np.array(5j):
self.assert_not_deprecated(a.conjugate)
for a in (np.array('s'), np.array('2016', 'M'),
np.array((1, 2), [('a', int), ('b', int)])):
self.assert_deprecated(a.conjugate)
class TestNPY_CHAR(_DeprecationTestCase):
# 2017-05-03, 1.13.0
def test_npy_char_deprecation(self):
from numpy.core._multiarray_tests import npy_char_deprecation
self.assert_deprecated(npy_char_deprecation)
assert_(npy_char_deprecation() == 'S1')
class TestPyArray_AS1D(_DeprecationTestCase):
def test_npy_pyarrayas1d_deprecation(self):
from numpy.core._multiarray_tests import npy_pyarrayas1d_deprecation
assert_raises(NotImplementedError, npy_pyarrayas1d_deprecation)
class TestPyArray_AS2D(_DeprecationTestCase):
def test_npy_pyarrayas2d_deprecation(self):
from numpy.core._multiarray_tests import npy_pyarrayas2d_deprecation
assert_raises(NotImplementedError, npy_pyarrayas2d_deprecation)
class Test_UPDATEIFCOPY(_DeprecationTestCase):
"""
v1.14 deprecates creating an array with the UPDATEIFCOPY flag, use
WRITEBACKIFCOPY instead
"""
def test_npy_updateifcopy_deprecation(self):
from numpy.core._multiarray_tests import npy_updateifcopy_deprecation
arr = np.arange(9).reshape(3, 3)
v = arr.T
self.assert_deprecated(npy_updateifcopy_deprecation, args=(v,))
class TestDatetimeEvent(_DeprecationTestCase):
# 2017-08-11, 1.14.0
def test_3_tuple(self):
for cls in (np.datetime64, np.timedelta64):
# two valid uses - (unit, num) and (unit, num, den, None)
self.assert_not_deprecated(cls, args=(1, ('ms', 2)))
self.assert_not_deprecated(cls, args=(1, ('ms', 2, 1, None)))
# trying to use the event argument, removed in 1.7.0, is deprecated
# it used to be a uint8
self.assert_deprecated(cls, args=(1, ('ms', 2, 'event')))
self.assert_deprecated(cls, args=(1, ('ms', 2, 63)))
self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 'event')))
self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 63)))
class TestTruthTestingEmptyArrays(_DeprecationTestCase):
# 2017-09-25, 1.14.0
message = '.*truth value of an empty array is ambiguous.*'
def test_1d(self):
self.assert_deprecated(bool, args=(np.array([]),))
def test_2d(self):
self.assert_deprecated(bool, args=(np.zeros((1, 0)),))
self.assert_deprecated(bool, args=(np.zeros((0, 1)),))
self.assert_deprecated(bool, args=(np.zeros((0, 0)),))
class TestBincount(_DeprecationTestCase):
# 2017-06-01, 1.14.0
def test_bincount_minlength(self):
self.assert_deprecated(lambda: np.bincount([1, 2, 3], minlength=None))
class TestAlen(_DeprecationTestCase):
# 2019-08-02, 1.18.0
def test_alen(self):
self.assert_deprecated(lambda: np.alen(np.array([1, 2, 3])))
class TestGeneratorSum(_DeprecationTestCase):
# 2018-02-25, 1.15.0
def test_generator_sum(self):
self.assert_deprecated(np.sum, args=((i for i in range(5)),))
class TestSctypeNA(_VisibleDeprecationTestCase):
# 2018-06-24, 1.16
def test_sctypeNA(self):
self.assert_deprecated(lambda: np.sctypeNA['?'])
self.assert_deprecated(lambda: np.typeNA['?'])
self.assert_deprecated(lambda: np.typeNA.get('?'))
class TestPositiveOnNonNumerical(_DeprecationTestCase):
# 2018-06-28, 1.16.0
def test_positive_on_non_number(self):
self.assert_deprecated(operator.pos, args=(np.array('foo'),))
class TestFromstring(_DeprecationTestCase):
# 2017-10-19, 1.14
def test_fromstring(self):
self.assert_deprecated(np.fromstring, args=('\x00'*80,))
class TestFromStringAndFileInvalidData(_DeprecationTestCase):
# 2019-06-08, 1.17.0
# Tests should be moved to real tests when deprecation is done.
message = "string or file could not be read to its end"
@pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"])
def test_deprecate_unparsable_data_file(self, invalid_str):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
with tempfile.TemporaryFile(mode="w") as f:
x.tofile(f, sep=',', format='%.2f')
f.write(invalid_str)
f.seek(0)
self.assert_deprecated(lambda: np.fromfile(f, sep=","))
f.seek(0)
self.assert_deprecated(lambda: np.fromfile(f, sep=",", count=5))
# Should not raise:
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
f.seek(0)
res = np.fromfile(f, sep=",", count=4)
assert_array_equal(res, x)
@pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"])
def test_deprecate_unparsable_string(self, invalid_str):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
x_str = "1.51,2,3.51,4{}".format(invalid_str)
self.assert_deprecated(lambda: np.fromstring(x_str, sep=","))
self.assert_deprecated(lambda: np.fromstring(x_str, sep=",", count=5))
# The C-level src can use not fixed size, but 0 terminated strings,
# so test that as well:
bytestr = x_str.encode("ascii")
self.assert_deprecated(lambda: fromstring_null_term_c_api(bytestr))
with assert_warns(DeprecationWarning):
# this is slightly strange, in that fromstring leaves data
# potentially uninitialized (would be good to error when all is
# read, but count is larger then actual data maybe).
res = np.fromstring(x_str, sep=",", count=5)
assert_array_equal(res[:-1], x)
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
# Should not raise:
res = np.fromstring(x_str, sep=",", count=4)
assert_array_equal(res, x)
class Test_GetSet_NumericOps(_DeprecationTestCase):
# 2018-09-20, 1.16.0
def test_get_numeric_ops(self):
from numpy.core._multiarray_tests import getset_numericops
self.assert_deprecated(getset_numericops, num=2)
# empty kwargs prevents any state actually changing which would break
# other tests.
self.assert_deprecated(np.set_numeric_ops, kwargs={})
assert_raises(ValueError, np.set_numeric_ops, add='abc')
class TestShape1Fields(_DeprecationTestCase):
warning_cls = FutureWarning
# 2019-05-20, 1.17.0
def test_shape_1_fields(self):
self.assert_deprecated(np.dtype, args=([('a', int, 1)],))
class TestNonZero(_DeprecationTestCase):
# 2019-05-26, 1.17.0
def test_zerod(self):
self.assert_deprecated(lambda: np.nonzero(np.array(0)))
self.assert_deprecated(lambda: np.nonzero(np.array(1)))
| 39.345009
| 91
| 0.635449
|
4a0984d93c808623cd12983a575ea1720382bd0f
| 394
|
py
|
Python
|
scripts/timezone.py
|
CWrath/tradekit
|
cf33024b3be68ea6e319e6f8309516bcb17fa9a5
|
[
"Apache-2.0"
] | null | null | null |
scripts/timezone.py
|
CWrath/tradekit
|
cf33024b3be68ea6e319e6f8309516bcb17fa9a5
|
[
"Apache-2.0"
] | null | null | null |
scripts/timezone.py
|
CWrath/tradekit
|
cf33024b3be68ea6e319e6f8309516bcb17fa9a5
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime
import pytz
def is_dst():
"""Determine where or not Daylight Savings Time (DST)
is currently in effect"""
x = datetime(datetime.now().year, 1, 1, 0, 0, 0, tzinfo=pytz.timezone('US/Eastern'))
y = datetime.now(pytz.timezone('US/Eastern'))
# if DST is in effect, their offsets will be different
return not (y.utcoffset() == x.utcoffset())
| 28.142857
| 88
| 0.672589
|
4a098556922b3eaf12dac39e0624ff5ef7cb1a74
| 11,754
|
py
|
Python
|
tensorboard/summary.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
tensorboard/summary.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
tensorboard/summary.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Generation of summaries.
### Class for writing Summaries
@@FileWriter
@@FileWriterCache
### Summary Ops
@@tensor_summary
@@scalar
@@histogram
@@audio
@@image
@@merge
@@merge_all
## Utilities
@@get_summary_description
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import re as _re
import bisect
from six import StringIO
from PIL import Image
import numpy as np
# pylint: disable=unused-import
from .src.summary_pb2 import Summary
from .src.summary_pb2 import HistogramProto
_INVALID_TAG_CHARACTERS = _re.compile(r'[^-/\w\.]')
def _clean_tag(name):
# In the past, the first argument to summary ops was a tag, which allowed
# arbitrary characters. Now we are changing the first argument to be the node
# name. This has a number of advantages (users of summary ops now can
# take advantage of the tf name scope system) but risks breaking existing
# usage, because a much smaller set of characters are allowed in node names.
# This function replaces all illegal characters with _s, and logs a warning.
# It also strips leading slashes from the name.
if name is not None:
new_name = _INVALID_TAG_CHARACTERS.sub('_', name)
new_name = new_name.lstrip('/') # Remove leading slashes
if new_name != name:
logging.info(
'Summary name %s is illegal; using %s instead.' %
(name, new_name))
name = new_name
return name
def scalar(name, scalar, collections=None):
"""Outputs a `Summary` protocol buffer containing a single scalar value.
The generated Summary has a Tensor.proto containing the input Tensor.
Args:
name: A name for the generated node. Will also serve as the series name in
TensorBoard.
tensor: A real numeric Tensor containing a single value.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
Returns:
A scalar `Tensor` of type `string`. Which contains a `Summary` protobuf.
Raises:
ValueError: If tensor has the wrong shape or type.
"""
name = _clean_tag(name)
if not isinstance(scalar, float):
# try conversion, if failed then need handle by user.
scalar = float(scalar)
return Summary(value=[Summary.Value(tag=name, simple_value=scalar)])
def histogram(name, values, collections=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with a histogram.
The generated
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
has one summary value containing a histogram for `values`.
This op reports an `InvalidArgument` error if any value is not finite.
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
values: A real numeric `Tensor`. Any shape. Values to use to
build the histogram.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
name = _clean_tag(name)
hist = make_histogram(values.astype(float))
return Summary(value=[Summary.Value(tag=name, histo=hist)])
def make_histogram_buckets():
v = 1E-12
buckets = []
neg_buckets = []
while v < 1E20:
buckets.append(v)
neg_buckets.append(-v)
v *= 1.1
# Should include DBL_MAX, but won't bother for test data.
return neg_buckets[::-1] + [0] + buckets
def make_histogram(values):
"""Convert values into a histogram proto using logic from histogram.cc."""
limits = make_histogram_buckets()
counts = [0] * len(limits)
for v in values:
idx = bisect.bisect_left(limits, v)
counts[idx] += 1
limit_counts = [(limits[i], counts[i]) for i in xrange(len(limits))
if counts[i]]
bucket_limit = [lc[0] for lc in limit_counts]
bucket = [lc[1] for lc in limit_counts]
sum_sq = sum(v * v for v in values)
return HistogramProto(min=min(values),
max=max(values),
num=len(values),
sum=sum(values),
sum_squares=sum_sq,
bucket_limit=bucket_limit,
bucket=bucket)
def image(tag, tensor):
"""Outputs a `Summary` protocol buffer with images.
The summary has up to `max_images` summary values containing images. The
images are built from `tensor` which must be 3-D with shape `[height, width,
channels]` and where `channels` can be:
* 1: `tensor` is interpreted as Grayscale.
* 3: `tensor` is interpreted as RGB.
* 4: `tensor` is interpreted as RGBA.
The `name` in the outputted Summary.Value protobufs is generated based on the
name, with a suffix depending on the max_outputs setting:
* If `max_outputs` is 1, the summary value tag is '*name*/image'.
* If `max_outputs` is greater than 1, the summary value tags are
generated sequentially as '*name*/image/0', '*name*/image/1', etc.
Args:
tag: A name for the generated node. Will also serve as a series name in
TensorBoard.
tensor: A 3-D `uint8` or `float32` `Tensor` of shape `[height, width,
channels]` where `channels` is 1, 3, or 4.
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
tag = _clean_tag(tag)
if not isinstance(tensor, np.ndarray):
# try conversion, if failed then need handle by user.
tensor = np.ndarray(tensor, dtype=np.float32)
shape = tensor.shape
height, width, channel = shape[0], shape[1], shape[2]
if channel == 1:
# walk around. PIL's setting on dimension.
tensor = np.reshape(tensor, (height, width))
image = make_image(tensor, height, width, channel)
return Summary(value=[Summary.Value(tag=tag, image=image)])
def make_image(tensor, height, width, channel):
"""Convert an numpy representation image to Image protobuf"""
image = Image.fromarray(tensor)
output = StringIO.StringIO()
image.save(output, format='PNG')
image_string = output.getvalue()
output.close()
return Summary.Image(height=height,
width=width,
colorspace=channel,
encoded_image_string=image_string)
'''TODO(zihaolucky). support more summary types later.
def audio(name, tensor, sample_rate, max_outputs=3, collections=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with audio.
The summary has up to `max_outputs` summary values containing audio. The
audio is built from `tensor` which must be 3-D with shape `[batch_size,
frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
assumed to be in the range of `[-1.0, 1.0]` with a sample rate of
`sample_rate`.
The `tag` in the outputted Summary.Value protobufs is generated based on the
name, with a suffix depending on the max_outputs setting:
* If `max_outputs` is 1, the summary value tag is '*name*/audio'.
* If `max_outputs` is greater than 1, the summary value tags are
generated sequentially as '*name*/audio/0', '*name*/audio/1', etc
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
tensor: A 3-D `float32` `Tensor` of shape `[batch_size, frames, channels]`
or a 2-D `float32` `Tensor` of shape `[batch_size, frames]`.
sample_rate: A Scalar `float32` `Tensor` indicating the sample rate of the
signal in hertz.
max_outputs: Max number of batch elements to generate audio for.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [_ops.GraphKeys.SUMMARIES]
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
# pylint: enable=line-too-long
name = _clean_tag(name)
with _ops.name_scope(name, None, [tensor]) as scope:
# pylint: disable=protected-access
sample_rate = _ops.convert_to_tensor(
sample_rate, dtype=_dtypes.float32, name='sample_rate')
val = _gen_logging_ops._audio_summary_v2(
tag=scope.rstrip('/'),
tensor=tensor,
max_outputs=max_outputs,
sample_rate=sample_rate,
name=scope)
_collect(val, collections, [_ops.GraphKeys.SUMMARIES])
return val
def merge(inputs, collections=None, name=None):
# pylint: disable=line-too-long
"""Merges summaries.
This op creates a
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
protocol buffer that contains the union of all the values in the input
summaries.
When the Op is run, it reports an `InvalidArgument` error if multiple values
in the summaries to merge use the same tag.
Args:
inputs: A list of `string` `Tensor` objects containing serialized `Summary`
protocol buffers.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer resulting from the merging.
"""
# pylint: enable=line-too-long
name = _clean_tag(name)
with _ops.name_scope(name, 'Merge', inputs):
# pylint: disable=protected-access
val = _gen_logging_ops._merge_summary(inputs=inputs, name=name)
_collect(val, collections, [])
return val
def merge_all(key=_ops.GraphKeys.SUMMARIES):
"""Merges all summaries collected in the default graph.
Args:
key: `GraphKey` used to collect the summaries. Defaults to
`GraphKeys.SUMMARIES`.
Returns:
If no summaries were collected, returns None. Otherwise returns a scalar
`Tensor` of type `string` containing the serialized `Summary` protocol
buffer resulting from the merging.
"""
summary_ops = _ops.get_collection(key)
if not summary_ops:
return None
else:
return merge(summary_ops)
def get_summary_description(node_def):
"""Given a TensorSummary node_def, retrieve its SummaryDescription.
When a Summary op is instantiated, a SummaryDescription of associated
metadata is stored in its NodeDef. This method retrieves the description.
Args:
node_def: the node_def_pb2.NodeDef of a TensorSummary op
Returns:
a summary_pb2.SummaryDescription
Raises:
ValueError: if the node is not a summary op.
"""
if node_def.op != 'TensorSummary':
raise ValueError("Can't get_summary_description on %s" % node_def.op)
description_str = _compat.as_str_any(node_def.attr['description'].s)
summary_description = SummaryDescription()
_json_format.Parse(description_str, summary_description)
return summary_description
'''
| 38.537705
| 88
| 0.68683
|
4a0985fdaaccb11b5f0c547d8b379ad95d280dab
| 1,779
|
py
|
Python
|
setup.py
|
Zapgram/treq
|
86cca01d9d77bf5b90bf2c34e5d6b3dc90ada798
|
[
"MIT"
] | 20
|
2015-01-06T11:13:26.000Z
|
2019-12-04T02:22:03.000Z
|
setup.py
|
Zapgram/treq
|
86cca01d9d77bf5b90bf2c34e5d6b3dc90ada798
|
[
"MIT"
] | 13
|
2015-01-02T17:46:30.000Z
|
2015-03-31T12:57:14.000Z
|
setup.py
|
CamilionEU/treq
|
b83c379ef58286be803e86927086d1b49d2c5245
|
[
"MIT"
] | 10
|
2015-01-02T23:17:05.000Z
|
2021-06-05T12:03:25.000Z
|
from setuptools import find_packages, setup
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Framework :: Twisted",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
]
if __name__ == "__main__":
with open('README.rst') as f:
readme = f.read()
setup(
name="treq",
packages=find_packages('src'),
package_dir={"": "src"},
setup_requires=["incremental"],
use_incremental=True,
install_requires=[
"incremental",
"requests >= 2.1.0",
"six",
"Twisted[tls] >= 16.4.0 ; python_version < '3.7'",
"Twisted[tls] >= 18.7.0 ; python_version >= '3.7'",
"attrs",
],
extras_require={
"dev": [
"mock",
"pep8",
"pyflakes",
"sphinx",
"httpbin==0.5.0",
],
},
package_data={"treq": ["_version"]},
author="David Reid",
author_email="dreid@dreid.org",
maintainer="Amber Brown",
maintainer_email="hawkowl@twistedmatrix.com",
classifiers=classifiers,
description="A requests-like API built on top of twisted.web's Agent",
license="MIT/X",
url="https://github.com/twisted/treq",
long_description=readme
)
| 31.210526
| 78
| 0.541315
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.