index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
23,300 | 0ee592a699fff54acf85eb1bfd48fba6ec58d560 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#CPython CPython是使用最广的Python解释器。教程的所有代码也都在CPython下执行
exit()
print 'The quick brown fox', 'jumps over', 'the lazy dog'#The quick brown fox jumps over the lazy dog
print 'The quick brown fox','jumps over','the lazy dog'#The quick brown fox jumps over the lazy dog
print 100 + 200
print '100 + 200 =', 100 + 200
name = raw_input()#并按下回车后,Python交互式命令行就在等待你的输入了
name
'Michael'
print name
Michael
#raw_input和print是在命令行下面最基本的输入和输出
#以下两行以.py保存后在命令行下运行才正常
name = raw_input('please enter your name:')
print 'hello,', name
#十六进制用0x前缀和0-9,a-f表示,例如:0xff00
1.23e8 #123000000.0
1.2e-5 #1.2e-05
1.3e-3 #0.0013
"I'm OK"
'I\'m \"OK\"' # 'I\'m "OK"!'?????
'\''
print '\''
print '\\\n\\'
'\\\n\\' #两者结果不同
print 'I\'m learning\nPython.'
'I\'m learning\nPython.'
'\\\t\\'
print '\\\t\\'
print r'\\\t\\'
print '''line1
line2
line3
''' #注意空格的显示结果,还可以在前面加上r使用
not True
True and False #false
True or False #True
age = 18 #如果没有这一行,会报错说age未定义
if age>=18:
print 'adult'
else:
print 'teenager'
#变量名必须是大小写英文、数字和_的组合,且不能用数字开头。可以把任意数据类型赋值给变量。
x=10
x=x+2
#可变对象与不可变对象
a = 'ABC'
b = a
a = 'XYZ'
print b #b为’ABC'
a=[1,2,3]
b=a
c=a.pop() #3,a为[1,2],b为[1,2]
10/3
10.0/3
10%3
ord('A')
chr(65)
'Hello, %s' %'world'
'Hi, %s, you have $%d.' %('Michael', 1000000)#%d,整数;%f,浮点数;%s,字符串;%x,十六进制整数
'Age: %s. Gender: %s' % (25, True)#%s会把任何数据类型时为字符串
'%.2f' % 3.1415926
'%2d-%02d' % (3, 1)
'%'
'%%'
print '%%'
'growth rate: %d %%' % 7
'growth rate: %d %' % 7 #这里错
list = ['Michael', 'Bob', 'Tracy']#list元素也可以是另一个list
len(list)
list[0]
list.append()
list.insert(1,'Jack')
list.pop()#删除末尾的元素
list[1]='Sarah'#替代
L = []
len(L) #=0
tuple=(1,)
tuple=()
t=('a', 'b', ['A', 'B'])
t[2][0]
t[2][0]='X'#赋值
#如果if语句判断是True,就把缩进的两行print语句执行了,否则,什么也不做。
if x:
print 'True'#只要x是非零数值、非空字符串、非空list等,就判断为True
sum = 0
for n in [1,2,3,4,5,6,7,8,9]:#注意:
sum = sum + n
print sum #上一行得有空格,不然报错。可类似累计乘法。
sum = 0
for x in range(101):
sum = sum + x
print sum#100以内所有正整数的总和
sum=0
n=99
while n>0:
sum=sum+n
n=n-2
print sum #100以内所有奇数的总和
#从raw_input()读取的内容永远以字符串的形式返回,'1982' < 2000的值为False。
birth = int(raw_input('birth: ')) #以.py形式在命令行下运行会出错,下面的则不会。
if birth<2000:
print '00前'
else:
print '00后'
birth = raw_input('birth: ')
if int(birth) < 2000:
print '00前'
else:
print '00后'
sum=0
x=1
while x>0:
sum=sum+x
x=x+1
print sum #为死循环,按ctrl+c退出
#dict和set
d = {'Michael': 95, 'Bob': 75, 'Tracy': 85}
d['Michael']
d['Adam']= 67 #能通过key值放入数据,可以通过in判断key是否存在
d.get('Adam') #67
d.get('Adam',None)
d.get('Tom',None)#与d.get('Tom')结果一样,默认返回None
d.get('Tom', -1)
key = [1, 2, 3]
d[key] = 'a list' #unhashable type,无法放入数据。作为key的对象不能变。
d[2]= 89 #字符串、整数等都是不可变的,因此,可以放心地作为key
#set
s = set([1, 1, 2, 2, 3, 3]) #重复元素在set中自动被过滤
s.add(4)
s.remove(1)
s.add([1,3]) #会报错,因为不能放入可变对象
x =[4, 6, 2, 1, 7, 9]
y = x[:]
y.pop()
y
x #最后x和y不一样
x =[4, 6, 2, 1, 7, 9]
y = x
y.pop()
y
x #最后x和y还是一样
#不可变对象
#list可变,可以用list.sort()命令,对比L=sorted(list)
list= ['c', 'b', 'a']
list.sort()
#注意以下,不可变对象
a = 'abc'
b = a.replace('a', 'A')
b # 'Abc'
a # 'abc',没变!!!!!!不会改变不可变对象自身的内容。相反,这些方法会创建新的对象并返回
a = 'abc'
b=a
c = a.replace('a', 'A')
c # 'Abc'
a # 'abc',没变!!!!!!不会改变不可变对象自身的内容。相反,这些方法会创建新的对象并返回
b # 'abc',没变!!!!!!不会改变不可变对象自身的内容。相反,这些方法会创建新的对象并返回
|
23,301 | a87824394f27860fcf2ea8e953cd5cb1ede300a4 | import argparse
import sys
class ArgExample(object):
"""
https://pymotw.com/2/argparse/#module-argparse
"""
def get_parser(self):
"""
using the GNU/POSIX syntax
attention to the usage of '-bval'
"""
parser = argparse.ArgumentParser(description='Short sample app')
parser.add_argument('-a', action="store_true", default=False)
parser.add_argument('-b', action="store", dest="b")
parser.add_argument('-c', action="store", dest="c", type=int)
return parser
def main(argv):
ex = ArgExample()
parser = ex.get_parser()
# example
argv2 = ['-a', '-bval', '-c', '3']
print(argv2)
args = parser.parse_args(argv2)
print(args)
# from cmd line
print(argv)
args = parser.parse_args(argv)
print(args)
if __name__ == '__main__':
main(sys.argv[1:])
|
23,302 | 27c2a76a3065edd846ec936a1ecca2371030ec24 | іmроrt соdeсs аs сs
іmроrt mаth
def fіlter(textLіne):
аbс = "а б в г д е ё ж з и й к л м н о n р с т у ф х ц ч ш щ ъ ы ь э ю я".sрlіt()
аbс.аррend(' ')
fоr ltr іn textLіne:
іf ltr nоt іn аbс:
textLіne = textLіne.reрlасe(ltr, ' ')
return textLіne
def entrорy(dаtаDісt):
Entrорye = 0;
fоr let іn dаtаDісt:
degree = dаtаDісt[let]
degree *= mаth.lоg2(dаtаDісt[let])
Entrорye += + degree
last = Entrорye * (-1)
return last
Fіle = іnрut('Введите название файла: ')
sрасes = іnt(іnрut('Сколько должно быть nробелов? (0, 1): '))
Steрs = іnt(іnрut('Какой должен быть шаг биграммы? (1, 2): ')) - 1
text = сs.орen(Fіle, enсоdіng='utf-8')
letter = dісt()
соuntlet = 0
fоr k іn letter:
letter[k] = letter[k] / соuntlet
bіgrаm = dісt()
соuntbіg = 0
fоr k іn bіgrаm:
bіgrаm[k] = bіgrаm[k] / соuntbіg
рrevсhаr = 0
іsDоuble = 1
fоr lіne іn text:
lіne = fіlter(lіne.lоwer())
lіne = lіne.strір()
lіne = ' '.jоіn(lіne.sрlіt())
іf sрасes == 0:
lіne = lіne.reрlасe(' ', '')
fоr sym іn lіne:
letter[sym] = letter.get(sym, 0) + 1
іf Steрs: іsDоuble = соuntlet % 2 == 1
іf соuntlet != 0 аnd іsDоuble:
bіgrаm = рrevсhаr + sym
bіgrаm[bіgrаm] = bіgrаm.get(bіgrаm, 0) + 1
рrevсhаr = sym
соuntbіg = соuntbіg + 1
elіf nоt іsDоuble оr nоt Steрs:
рrevсhаr = sym
соuntlet = соuntlet + 1
рrіnt('Ваша биграмма:', bigram)
рrіnt('Ваши буквы:', letter)
рrіnt('Entrорy:', H1L, H2b)
іnрut()
|
23,303 | 8eef2b0ae11967a25d4929ea87aaf052766592f2 | from django.db import models
import datetime
from django.contrib.auth.models import User
from ckeditor.fields import RichTextField
# Create your models here.
class Article(models.Model):
title = models.CharField(max_length=300)
is_published = models.BooleanField(default=True)
is_archived = models.BooleanField(default=False)
pub_date = models.DateTimeField('publish date')
author = models.ForeignKey(User, null=True, blank=True, on_delete=models.CASCADE)
content = models.CharField(max_length=10000)
image = models.FileField(default=None)
content = RichTextField()
preview_image = models.FileField(null=True, blank=True)
def __str__(self):
return self.title
class Category(models.Model):
name = models.CharField(max_length=200)
article = models.ForeignKey(Article, on_delete=models.CASCADE)
def __str__(self):
return self.name
|
23,304 | d4fa1f61b3f955a696421067562001a451667e40 | import pytest
import mro
import connection as con
import psycopg2
from datetime import datetime, date, time
import uuid
xfail = pytest.mark.xfail
class test_type(object):
varchar = mro.data_types.varchar('varchar', 0, 15, not_null=False, is_updateable=True, get_value_on_insert=False, is_primary_key=False)
varchar2 = mro.data_types.varchar('varchar2', 1, 20, not_null=False, is_updateable=True, get_value_on_insert=False, is_primary_key=False)
varchar_not_null = mro.data_types.varchar('varchar_not_null', 2, 15, not_null=True, is_updateable=True, get_value_on_insert=False, is_primary_key=False)
varchar_not_updateable = mro.data_types.varchar('varchar_not_updateable', 3, 15, not_null=False, is_updateable=False, get_value_on_insert=False, is_primary_key=False)
integer = mro.data_types.integer('integer', 4, not_null=False, is_updateable=True, get_value_on_insert=False, is_primary_key=False)
boolean = mro.data_types.boolean('boolean', 5, not_null=False, is_updateable=True, get_value_on_insert=False, is_primary_key=False)
@pytest.fixture(scope="module")
def connection(request):
connection = con.connect()
request.addfinalizer(mro.disconnect)
cursor = connection.cursor()
con.drop_tables()
# TODO re-add once custom enum types are supported, currently only custom composite types are
# cursor.execute("""DROP TYPE IF EXISTS call_outcome""")
# cursor.execute("""CREATE TYPE call_outcome AS ENUM ('No Answer', 'Answer Machine', 'Hung Up', 'Busy', 'Sale')""")
cursor.execute("""create table test_type (
id serial primary key,
"varchar" varchar(15),
"varchar2" varchar(20),
"varchar_not_null" varchar(20) not null default 'abc',
"integer" integer,
"boolean" boolean,
"time" time,
"date" date,
"timestamp" timestamp,
"json" json,
"jsonb" jsonb,
"text" text default E'two\nlines',
"double" double precision,
"real" real,
"uuid" uuid,
"bytea" bytea,
"oid" oid);""")
# "custom_enum" call_outcome);""")
connection.commit()
connection.close()
mro.load_database(lambda: con.connect())
return connection
class TestDataTypes(object):
def test_varchar(self, connection):
obj = mro.test_type(varchar = 'init')
message = 'sldkhfaskjf ashdkfjahs dfkjashd'
with pytest.raises(ValueError) as excinfo:
obj.varchar = message
message = 'Hey'
assert excinfo.value.args[0] == 'Value length [{}] should not exceed [{}]'.format(len(message), 15)
message = mro.test_type(varchar = 'init')
with pytest.raises(TypeError) as excinfo:
obj.varchar = message
assert excinfo.value.args[0] == 'Value should be of type [str] not [{}]'.format(message.__class__ .__name__)
message = 'Hello World!'
obj.varchar = message
assert obj.varchar == message
def test_multi_object(self, connection):
obj = mro.test_type(varchar = 'init')
obj2 = mro.test_type(varchar = 'init')
obj.varchar = '1'
obj.varchar2 = '2'
assert obj.varchar != obj.varchar2
obj.varchar = '1'
obj2.varchar = '2'
assert obj.varchar != obj2.varchar
def test_not_null(self, connection):
obj = mro.test_type(varchar = 'init')
assert obj.varchar_not_null == 'abc'
with pytest.raises(ValueError) as excinfo:
obj.varchar_not_null = None
assert excinfo.value.args[0] == 'The value of [{}] cannot be null.'.format('varchar_not_null')
@xfail
def test_not_updateable(self, connection):
raise Exception("Not implemented")
obj = mro.test_type(varchar = 'init')
obj.varchar = '1'
assert obj.varchar == '1'
with pytest.raises(PermissionError) as excinfo:
obj.varchar_not_updateable = '2'
assert excinfo.value.args[0] == 'The value of [{}] is not updateable.'.format('varchar_not_updateable')
def test_integer(self, connection):
obj = mro.test_type(varchar = 'init')
obj.integer = 1
assert obj.integer == 1
with pytest.raises(TypeError) as excinfo:
obj.integer = '1'
assert excinfo.value.args[0] == 'Value should be of type [int] not [{}]'.format(str.__name__)
def test_boolean(self, connection):
obj = mro.test_type(varchar = 'init')
obj.boolean = True
assert obj.boolean == True
with pytest.raises(TypeError) as excinfo:
obj.boolean = 1
assert excinfo.value.args[0] == 'Value should be of type [bool] not [{}]'.format(int.__name__)
def test_time(self, connection):
obj = mro.test_type(varchar = 'init')
obj.time = time(17, 20)
assert obj.time == time(17, 20)
with pytest.raises(TypeError) as excinfo:
obj.time = datetime(2015, 12, 21, 17, 20)
assert excinfo.value.args[0] == 'Value should be of type [time] not [{}]'.format(datetime.__name__)
def test_date(self, connection):
obj = mro.test_type(varchar = 'init')
obj.date = date(2015, 12, 21)
assert obj.date == date(2015, 12, 21)
with pytest.raises(TypeError) as excinfo:
obj.date = datetime(2015, 12, 21, 17, 20)
assert excinfo.value.args[0] == 'Value should be of type [date] not [{}]'.format(datetime.__name__)
def test_datetime(self, connection):
obj = mro.test_type(varchar = 'init')
obj.timestamp = datetime(2015, 12, 21, 17, 20)
assert obj.timestamp == datetime(2015, 12, 21, 17, 20)
with pytest.raises(TypeError) as excinfo:
obj.timestamp = date(2015, 12, 21)
assert excinfo.value.args[0] == 'Value should be of type [datetime] not [{}]'.format(date.__name__)
def test_json(self, connection):
obj = mro.test_type(varchar = 'init')
obj.json = '{"key": "value"}'
assert obj.json == '{"key": "value"}'
with pytest.raises(psycopg2.DataError) as excinfo:
obj.json = 'this is just text'
assert excinfo.value.args[0].startswith('invalid input syntax for type json')
def test_jsonb(self, connection):
obj = mro.test_type(varchar = 'init')
obj.jsonb = '{"key": "value"}'
assert obj.jsonb == '{"key": "value"}'
with pytest.raises(psycopg2.DataError) as excinfo:
obj.jsonb = 'this is just text'
assert excinfo.value.args[0].startswith('invalid input syntax for type json')
def test_text(self, connection):
obj = mro.test_type(varchar = 'init')
obj.text = '1'
assert obj.text == '1'
with pytest.raises(TypeError) as excinfo:
obj.text = 1
assert excinfo.value.args[0] == 'Value should be of type [str] not [{}]'.format(int.__name__)
def test_double(self, connection):
obj = mro.test_type(varchar = 'init')
obj.double = 2.0
assert obj.double == 2.0
with pytest.raises(TypeError) as excinfo:
obj.double = '1'
assert excinfo.value.args[0] == 'Value should be of type [float] not [{}]'.format(str.__name__)
def test_real(self, connection):
obj = mro.test_type(varchar = 'init')
obj.real = 2.0
assert obj.real == 2.0
with pytest.raises(TypeError) as excinfo:
obj.real = '1'
assert excinfo.value.args[0] == 'Value should be of type [float] not [{}]'.format(str.__name__)
@xfail
def test_uuid(self, connection):
obj = mro.test_type(varchar = 'init')
obj.uuid = uuid.uuid4()
assert obj.uuid == uuid.uuid4()
with pytest.raises(TypeError) as excinfo:
obj.uuid = 'fail'
assert excinfo.value.args[0] == 'Value should be of type [uuid] not [{}]'.format(str.__name__)
@xfail
def test_custom_enum(self, connection):
obj = mro.test_type(varchar='init')
obj.custom_enum = 'Busy'
assert obj.custom_enum == 'Busy'
with pytest.raises(TypeError) as excinfo:
obj.custom_enum = 'Not Valid'
assert excinfo.value.args[0] == 'Value should be of type [custom_enum] not [{}]'.format(str.__name__)
def test_bytea(self, connection):
bytea = 'my byte array'.encode('utf-8')
obj = mro.test_type(bytea=bytea)
obj.bytea = bytea
assert obj.bytea == bytea
with pytest.raises(TypeError) as excinfo:
obj.bytea = 'Not Valid'
assert excinfo.value.args[0] == 'Value should be of type [bytes] not [{}]'.format(str.__name__)
def test_oid(self, connection):
obj = mro.test_type(varchar='init')
obj.oid = 1000
assert obj.oid == 1000
with pytest.raises(TypeError) as excinfo:
obj.oid = 'randomstring'
assert excinfo.value.args[0] == 'Value should be of type [int] not [{}]'.format(str.__name__)
if __name__ == '__main__':
#pytest.main([__file__, '-rw'])
pytest.main([__file__ + '::TestDataTypes::test_bytea']) |
23,305 | 2f7ed04d88e938ee3e0e8ce7dae2ac85564b58e9 | #!/usr/bin/python
import argparse
import os
import sys
import matplotlib
#matplotlib.use('agg')
from matplotlib import pyplot as plt
from PIL import Image
import numpy as np
import tensorflow as tf
import core.utilities.tfrecord_utils as tfrecord_utils
commandLineParser = argparse.ArgumentParser(description='Compute features from labels.')
commandLineParser.add_argument('data_path', type=str,
help='which orignal data is saved should be loaded')
commandLineParser.add_argument('target_path', type=str,
help='where to save tfrecords')
commandLineParser.add_argument('size', type=int,
help='where to save tfrecords')
def main(argv=None):
args = commandLineParser.parse_args()
if not os.path.isdir('CMDs'):
os.mkdir('CMDs')
with open('CMDs/step_process_omniglot_data.txt', 'a') as f:
f.write(' '.join(sys.argv) + '\n')
f.write('--------------------------------\n')
if not os.path.isdir(args.target_path):
os.makedirs(args.target_path)
collage = np.zeros(shape=[4 * args.size, 8 * args.size])
dirs = os.listdir(args.data_path)
len_dirs = len(dirs)
for item, j in zip(dirs, xrange(len_dirs)):
if j % 15000 == 0:
try:
writer.close()
except:
pass
writer = tf.python_io.TFRecordWriter(os.path.join(args.target_path, 'omniglot_' + str(j / 15000) + '.tfrecord'))
print j
img_file = os.path.join(args.data_path, item)
if os.path.isfile(img_file) and os.stat(img_file).st_size != 0:
try:
im = Image.open(img_file)
width, height = im.size
size = np.min([width, height])
if size < args.size:
continue
imResize = im.resize((args.size, args.size), resample=Image.NEAREST)
imResize=np.array(imResize.getdata(), dtype=np.uint8).reshape(args.size, args.size)
if j < 32:
i = j % 8
k = j / 8
collage[k * args.size:(k + 1) * args.size, i * args.size:(i + 1) * args.size] = imResize
elif j == 32:
fig = plt.imshow(np.asarray(collage, dtype=np.uint8), cmap='gray')
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
path = os.path.join(args.target_path, 'omniglot.png')
plt.savefig(path, bbox_inches='tight')
plt.close()
imResize=np.reshape(imResize, (args.size*args.size))
imResize_raw = imResize.tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'height': tfrecord_utils.int64_feature([args.size]),
'width': tfrecord_utils.int64_feature([args.size]),
'depth': tfrecord_utils.int64_feature([1]),
'label': tfrecord_utils.int64_feature([-1]),
'image_raw': tfrecord_utils.bytes_feature([imResize_raw])}))
writer.write(example.SerializeToString())
except:
with open('errors', 'a') as handle:
handle.write(item + '\n')
print 'here'
writer.close()
|
23,306 | 12c549407ced03f602dae4116ed941555dd1bc4f | values = [1, 2, "giri", 4, 5] #list data types allowes multiple values along with different datatypea
print(values[0])
print(values[2])
print(values[-1]) #refers to the last index of the list
print(values[1:3])
values.insert(3,"pai")
print(values)
values.append("endoflist")
print(values)
values[2]="Girish"
print(values)
del values[0]
print(values) |
23,307 | 496de53b92e517aa42123f9c090e7737d5b65ba0 |
class GrowingBlob:
"""
python is slow in accumulation of large data (str += segment).
Growing blob is an optimization for that
"""
def __init__(self):
self._blobs = []
self._length = 0
def append(self, blob):
self._blobs.append(blob)
self._length += len(blob)
def length(self):
return self._length
def content(self):
return "".join(self._blobs)
def substr(self, start, ceil):
if ceil > self._length:
ceil = self._length
offset = start
length = ceil - offset
result = ""
for blob in self._blobs:
if length == 0:
break
if offset > len(blob):
offset -= len(blob)
continue
else:
subblob = blob[offset: offset + length]
result += subblob
offset = 0
length -= len(subblob)
return result
|
23,308 | 3174c077c797969614447afd6c2153cbed2af18f | import os
import subprocess
from needle.engines.base import EngineBase
class Engine(EngineBase):
compare_path = "compare"
def assertSameFiles(self, output_file, baseline_file, threshold=0):
diff_file = output_file.replace('.png', '.diff.png')
compare_command = [self.compare_path,
"-metric","RMSE",
"-subimage-search",
"-dissimilarity-threshold","1.0",
baseline_file, output_file, diff_file]
process = subprocess.Popen(compare_command, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
compare_stdout, compare_stderr = process.communicate()
difference = float(compare_stderr.split()[1][1:-1])
if difference <= threshold:
os.remove(diff_file)
return
raise AssertionError("The new screenshot '{new}' did not match "
"the baseline '{baseline}' (See {diff}):\n"
"{stdout}{stderr}"
.format(new=output_file,
baseline=baseline_file,
diff=diff_file,
stdout=compare_stdout,
stderr=compare_stderr)) |
23,309 | c760a07b50ae46844d94d20102de25f1db3bddb8 | import numpy as np
import pandas as pd
from scipy.stats.stats import pearsonr
np.random.seed(12324)
from sklearn.cross_validation import StratifiedKFold
from sklearn.decomposition import PCA
from sklearn.preprocessing import normalize
from ml_metrics import auc
from sklearn.decomposition import TruncatedSVD
from sklearn.preprocessing import StandardScaler
from santander_preprocess import *
INPUT_PATH = './input/'
OUTPUT_PATH = './features/'
train = pd.read_csv(INPUT_PATH + 'train.csv')
test = pd.read_csv(INPUT_PATH + 'test.csv')
train, test = process_base(train, test)
train, test = drop_sparse(train, test)
train, test = drop_duplicated(train, test)
train, test = add_features(train, test, ['SumZeros'])
flist = [x for x in train.columns if not x in ['ID','TARGET']]
pca = PCA(n_components=2)
x_train_projected = pca.fit_transform(normalize(train[flist], axis=0))
x_test_projected = pca.transform(normalize(test[flist], axis=0))
train.insert(1, 'PCAOne', x_train_projected[:, 0])
train.insert(1, 'PCATwo', x_train_projected[:, 1])
test.insert(1, 'PCAOne', x_test_projected[:, 0])
test.insert(1, 'PCATwo', x_test_projected[:, 1])
pca_feats = train[['ID', 'PCAOne', 'PCATwo']].append(test[['ID', 'PCAOne', 'PCATwo']], ignore_index=True)
pca_feats.to_csv(OUTPUT_PATH + 'pca_feats.csv')
|
23,310 | ea192256bbb89fd7056ba2bb5b96bb02f29098f1 | #!/usr/bin/env python
# flake8: noqa
from lib.Feature import Feature, FeatureType
import cv2
class FeatureColor(Feature):
def __init__(self, type, image_set):
super().__init__(type, image_set)
self.size = 10000
self.img_size = 512
def process(self):
return super().process(self.image_set[0], force=False)
def extract(self, image, imagepath):
resized = cv2.resize(image, (self.img_size, self.img_size))
if self.type == FeatureType.ColorHSV:
hsv = cv2.cvtColor(resized, cv2.COLOR_BGR2HSV)
channels = cv2.split(hsv)
else:
channels = cv2.split(resized)
hist = []
if self.type == FeatureType.ColorHSV:
hist_h = cv2.calcHist([channels[0]], [0], None, [36], [0, 180])
hist_s = cv2.calcHist([channels[1]], [0], None, [36], [0, 256])
hist.extend(hist_h.flatten())
hist.extend(hist_s.flatten())
else:
hist_r = cv2.calcHist([channels[0]], [0], None, [36], [0, 256])
hist_g = cv2.calcHist([channels[1]], [0], None, [36], [0, 256])
hist_b = cv2.calcHist([channels[2]], [0], None, [36], [0, 256])
hist.extend(hist_r.flatten())
hist.extend(hist_g.flatten())
hist.extend(hist_b.flatten())
return hist
|
23,311 | baf887bb3ea49087f834d67ae61f726de44a9247 | """--- Day 9: Explosives in Cyberspace ---
Wandering around a secure area, you come across a datalink port to a new part of the network. After briefly scanning it for interesting files, you find one file in particular that catches your attention. It's compressed with an experimental format, but fortunately, the documentation for the format is nearby.
The format compresses a sequence of characters. Whitespace is ignored. To indicate that some sequence should be repeated, a marker is added to the file, like (10x2). To decompress this marker, take the subsequent 10 characters and repeat them 2 times. Then, continue reading the file after the repeated data. The marker itself is not included in the decompressed output.
If parentheses or other characters appear within the data referenced by a marker, that's okay - treat it like normal data, not a marker, and then resume looking for markers after the decompressed section.
For example:
ADVENT contains no markers and decompresses to itself with no changes, resulting in a decompressed length of 6.
A(1x5)BC repeats only the B a total of 5 times, becoming ABBBBBC for a decompressed length of 7.
(3x3)XYZ becomes XYZXYZXYZ for a decompressed length of 9.
A(2x2)BCD(2x2)EFG doubles the BC and EF, becoming ABCBCDEFEFG for a decompressed length of 11.
(6x1)(1x3)A simply becomes (1x3)A - the (1x3) looks like a marker, but because it's within a data section of another marker, it is not treated any differently from the A that comes after it. It has a decompressed length of 6.
X(8x2)(3x3)ABCY becomes X(3x3)ABC(3x3)ABCY (for a decompressed length of 18), because the decompressed data from the (8x2) marker (the (3x3)ABC) is skipped and not processed further.
What is the decompressed length of the file (your puzzle input)? Don't count whitespace.
"""
import re
from time import time
with open("day09input.txt") as fp:
start = time()
compressed = fp.read().strip()
index = 0
decompressed = ""
key = re.compile(r'\([0-9]+x[0-9]+\)') # ie matches "(1234x7890)"
while index < len(compressed):
marker = re.search(key, compressed[index:])
if marker: # if found a match
print(marker.group())
decompressed += compressed[index:index + marker.start()]
# add chars from index to start of a marker
index = index + marker.end() # index is now set after marker
chars_repeat = marker.group()[1:-1].split('x') # removes parens, and splits on 'x'
decompressed += compressed[index:index + int(chars_repeat[0])] * int(chars_repeat[1])
index += int(chars_repeat[0]) # set index now after the substring that was repeated
print(len(decompressed))
else: # end of file
decompressed += compressed[index:]
break
end = time()
print(decompressed)
print(len(decompressed))
print("This took {} secs".format(end - start))
|
23,312 | 16370f809055825d9442425dccd8a4b8612bee48 | import pytest
from app.data_models.app_models import EQSessionSchema
from app.storage.storage import StorageModel
def test_non_existent_model_type():
with pytest.raises(KeyError) as ex:
StorageModel(model_type=int)
assert "Invalid model_type provided" in str(ex)
def test_storage_model_properties(
app, fake_eq_session
): # pylint: disable=unused-argument
storage_model = StorageModel(model_type=type(fake_eq_session))
assert storage_model.key_field == "eq_session_id"
assert storage_model.expiry_field == "expires_at"
assert storage_model.table_name == "dev-eq-session"
def test_serialize(fake_eq_session):
expected_schema = EQSessionSchema().dump(fake_eq_session)
storage_model = StorageModel(model_type=type(fake_eq_session))
serialized_item = storage_model.serialize(fake_eq_session)
assert serialized_item["eq_session_id"] == expected_schema["eq_session_id"]
assert serialized_item["user_id"] == expected_schema["user_id"]
assert serialized_item["session_data"] == expected_schema["session_data"]
assert serialized_item["created_at"] == expected_schema["created_at"]
assert serialized_item["expires_at"] == expected_schema["expires_at"]
assert serialized_item["updated_at"] >= expected_schema["updated_at"]
def test_deserialize(fake_eq_session):
storage_model = StorageModel(model_type=type(fake_eq_session))
serialized_item = storage_model.serialize(fake_eq_session)
assert (
storage_model.deserialize(serialized_item).__dict__
== EQSessionSchema().load(serialized_item).__dict__
)
|
23,313 | 593e1deff2e9ed546754c327dca83402b5b736c1 | #!/usr/bin/python27
import sys,gzip
unp_file = sys.argv[1]
with gzip.open(unp_file,'r') as fin:
data_buffer = []
confirmed_human=False
for line in fin:
row = line.strip().split(' ')
if row[0] == '//':
# Found new ID, if the previous entry was human,
# flush the buffer
if confirmed_human:
for dat in data_buffer:
print dat,
# Wait for confirmation that next entry is human
confirmed_human = False
# Clear the data buffer for the next entry
data_buffer = []
elif row[0] == 'OS' and row[1] == 'Homo sapiens (Human).':
# The current entry is human, flush when finished
confirmed_human = True
# Store the row in the data buffer in case it is
# human and needs to be printed
data_buffer.append(line)
|
23,314 | d763a275d55796350ec242efe40c83e4a1eecf8f | # -*- coding: utf-8 -*-
import pysplash
import rq
import redis
import uuid
log = pysplash.log.logger()
queue_name = str(uuid.uuid4())
def fib(n):
log.debug("Fib(%s)" % n)
if n == 0:
return 0
elif n == 1:
return 1
else:
with rq.Connection(redis.StrictRedis()):
q = rq.Queue(queue_name)
jobs = [
q.enqueue_call(fib, (n-1,)),
q.enqueue_call(fib, (n-2,))
]
log.debug("Waiting for results of %s & %s" % (
n-1, n-2))
res = pysplash.wait_jobs(jobs, collect_results=True)
return sum(res)
if __name__ == "__main__":
pysplash.log.set_debug(True)
con = redis.StrictRedis()
with rq.Connection(con):
q = rq.Queue(queue_name)
job = q.enqueue_call(fib, (4,))
p = pysplash.Pool(
[queue_name], scale_frequency=2., zombie_timeout=15,
retire_idle=False)
log.info("Starting pool")
p.start()
|
23,315 | 57c01682abfdb042c5939020cab0ead9c1b2ea07 | from pylab import *
p=[10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
avg_errors=[0.046785994768966839, 0.026393591605066592, 0.019447430011014125, 0.016824889762125282, 0.014749894581275305, 0.014058344846835909, 0.013902900770497519, 0.01335783222182872, 0.013095053803359815, 0.012812314418784969]
max_errors=[0.12466161328487227, 0.06014663795313939, 0.046857564454218986, 0.033733650122805672, 0.027024695972816457, 0.026498548445026943, 0.02595383967477366, 0.032350452199567607, 0.022781342678025653, 0.019128706100542594]
#p=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
#avg_errors=[0.13064415966719378, 0.06668065193589752, 0.041762329011805016, 0.032087372453887632, 0.025940955208013445, 0.020902001927315856, 0.017491418497665633, 0.015251430191803542, 0.013885627880904829, 0.012176964514276323, 0.011514942464426524]
#max_errors=[0.21239150183818203, 0.13538639866599453, 0.084122160747495719, 0.054197295395114649, 0.044296771862364491, 0.034071346137693588, 0.027097041447043252, 0.025724184978861642, 0.022963139885812136, 0.023385019343718602, 0.018861835160912162]
xlabel('value of k2')
ylabel('Position error')
plot(p, max_errors, 'r--',label='Maximum error')
plot(p, avg_errors, label='Average error')
legend()
show()
|
23,316 | 1c73346284712c76f5c485827abd79e74e136611 | from event import *
from fonction_annexe import *
from time import sleep
from time import time
from random import randint
import pygame
def teleportation(personnage,largeur_terrain,longeur_terrain):
""" Cette fonction permet de passer d'un cote du mur et de reapparaitre de l'autre """
if 0 >personnage["x"]:
personnage["x"]= largeur_terrain
elif personnage["x"] > largeur_terrain:
personnage["x"] = 0
elif 0 > personnage["y"]:
personnage["y"] = longeur_terrain
elif personnage["y"] > longeur_terrain:
personnage["y"] = 0
def deplace_personnages(pacMan,fantomes,murs):
deplace_pac_man(pacMan,murs)
deplace_fantomes(fantomes,murs)
def deplace_pac_man(personnage,murs) :
if valide_deplacement(personnage,murs) :
deplace_personnage(personnage)
def deplace_fantomes(fantomes,murs) :
for fantome in fantomes:
deplace_fantome(fantome,murs)
def deplace_fantome(personnage,murs) :
if valide_deplacement(personnage,murs) :
deplace_personnage(personnage)
else :
while not valide_deplacement(personnage,murs) :
change_direction_fantome(fantome)
deplace_personnage(personnage)
def deplace_personnage(personnage) :
personnage["x"]+= personnage["direction"][0]
personnage["y"]+= personnage["direction"][1]
def valide_deplacement(personnage,murs) :
for i in murs :
if (i[0] == personnage["x"]+ personnage["direction"][0]) and (i[1] == personnage["y"]+ personnage["direction"][1]) :
return False
return True
def change_direction_pac_man(pacMan, touche):
if touche == 'Up': # flèche haut pressée
pacMan["image"] = './image/pacman_haut.png'
pacMan["direction"]=[0, -10]
elif touche == 'Down': # flèche bas pressée
pacMan["image"] = './image/pacman_bas.png'
pacMan["direction"]= [0, 10]
elif touche == 'Left': # flèche gauche pressée
pacMan["image"] = './image/pacman_gauche.png'
pacMan["direction"]= [-10, 0]
elif touche == 'Right': # flèche droite pressée
pacMan["image"] = './image/pacman_droite.png'
pacMan["direction"]=[10, 0]
else : #afin de pas provoque une erreur si on appuie pas sur autre touche
pacMan["direction"]= pacMan["direction"]
def change_direction_fantome(fantome):
rand = randint(0,5)
direction = [[0, -10],[0, 10],[-10, 0],[10, 0]]
fantome["direction"] = direction[rand]
def initialise_fantomes(fantomes):
fantomes.append({ "x":300 , "y":300 , "image" : './image/pacman_bleu.png' , "vitesse" :10 ,"vulnerable" : False, "direction" : [10,0]})
fantomes.append({ "x":270 , "y":300 , "image" : './image/pacman_jaune.png' , "vitesse" :10,"vulnerable" : False, "direction" : [10,0]})
fantomes.append({ "x":330 , "y":300 , "image" : './image/pacman_rose.png' , "vitesse" :10,"vulnerable" : False, "direction" : [10,0]})
fantomes.append({ "x":300 , "y":270 , "image" : './image/pacman_rouge.png' , "vitesse" :10,"vulnerable" : False, "direction" : [10,0]})
def reinitialise_fantomes(fantomes):
fantomes[0] = { "x":300 , "y":300 , "image" : './image/pacman_bleu.png' , "vitesse" :10 ,"vulnerable" : False, "direction" : [10,0]}
fantomes[1] = { "x":270 , "y":300 , "image" : './image/pacman_jaune.png' , "vitesse" :10,"vulnerable" : False, "direction" : [10,0]}
fantomes[2] = { "x":330 , "y":300 , "image" : './image/pacman_rose.png' , "vitesse" :10,"vulnerable" : False, "direction" : [10,0]}
fantomes[3] = { "x":300 , "y":270 , "image" : './image/pacman_rouge.png' , "vitesse" :10,"vulnerable" : False, "direction" : [10,0]}
def initialise_pacMan():
''' '''
return { "x":10 , "y":10 , "image" : './image/pacman_droite.png' , "vie": 3, "vitesse" :100, "score":0 , "direction" : [10,0]}
def initialise_coordonne_pacMan(pacMan):
''' '''
pacMan["x"] = 100
pacMan["y"] = 100
return pacMan
def initialise_coordonne_fantome(fantome):
''' '''
fantome["x"] = 100
fantome["y"] = 100
def initialiseJeu(pacMan,fantomes,points,superPoints,cerises):
initialise_points()
initialise_superPoints()
initialise_cerises()
initialise_fantomes(fantomes)
initialise_pacMan()
def collision(pacMan,fantomes,points,superPoints,cerises):
for fantome in fantomes:
collision_pacMan_fantome(pacMan, fantome,fantomes)
for point in points:
collision_pacMan_point(pacMan, point)
for cerise in cerises:
collision_pacMan_cerise(pacMan, cerise)
for superPoint in superPoints:
collision_pacMan_superPoint(pacMan, superPoint,fantomes)
def collision_pacMan_fantome(pacMan, fantome,fantomes):
if (fantome["vulnerable"] == False) and (fantome["x"] == pacMan["x"]) and (fantome["y"] == pacMan["y"] ) :
pacMan["vie"] += -1
initialise_coordonne_pacMan(pacMan)
reinitialise_fantomes(fantomes) # on initialises tous les fantomes
if (fantome["vulnerable"] == True) and ( fantome["x"] == pacMan["x"] and fantome["y"] == pacMan["y"] ) :
initialise_coordonne_fantome(fantome)
def collision_pacMan_point(pacMan, point):
if ( point["x"] == pacMan["x"] and point["y"] == pacMan["y"] ) :
pacMan["score"] += -1
def collision_pacMan_cerise(pacMan, cerise):
if ( cerise["x"] == pacMan["x"] and cerise["y"] == pacMan["y"] ) :
pacMan["vitesse"] += 10
def collision_pacMan_superPoint(pacMan, superPoint,fantomes):
if ( superPoint["x"] == pacMan["x"] and superPoint["y"] == pacMan["y"] ) :
for i in range(0,len(fantomes)) :
fantomes[i]["vulnerable"] = True
def dessine_terrain(terrain,pacMan):
rectangle(0,0,terrain["TAILLE_LARGEUR"],terrain["TAILLE_LARGEUR"],remplissage = "black")
for i in range(0,len(terrain["murs"])):
afficheElement(terrain["murs"][i])
# texte (pacMan["score"])
# texte (pacMan["vie"])
pass
def afficheElement(element):
# a,b = pixel_vers_case(personnage["x"],personnage["y"])
# x, y = case_vers_pixel(a,b)
image(element["x"],element["y"],element["image"], ancrage='center', tag='')
def afficheJeu(terrain,pacMan,fantomes,points,superPoints,cerises) :
""" Cette fonction ajoute une nouvelle tête et supprime le corp pour donner l'impression de déplacement """
dessine_terrain(terrain,pacMan) #Attention d'abord dessine terrain puis les personnages
afficheElement(pacMan)
for i in range(0,len(fantomes)):
afficheElement(fantomes[i])
for i in range(0,len(points)):
afficheElement(points[i])
for i in range(0,len(cerises)):
afficheElement(cerises[i])
for i in range(0,len(superPoints)):
afficheElement(superPoints[i])
def gameOver(pacMan):
if pacMan["vie"] != 0 :
return True
return False
# def change_direction_fantomes(fantomes,murs,pacMan) :
# for i in range(0,len(fantomes)):
# for j in range(0,len(murs)):
# if collision_pacMan_fantome(pacMan, fantome) :
# change_direction_fantome(fantomes[i])# direction
# def collision_fantome_mur(pacMan, mur):
# if (fantome["x"] == mur["x"]) and (fantome["y"] == mur["y"] ) :
# return True
# return False
# def case_vers_pixel(x, y,terrain):
# return x * terrain["TAILLE_CASE_LARGEUR"], y * terrain["TAILLE_CASE_HAUTEUR"]
#
# def pixel_vers_case(x, y) :
# return x // terrain["TAILLE_CASE_LARGEUR"], y //terrain["TAILLE_CASE_HAUTEUR"]
#
# def pixel_vers_case_vers_pixel(x,y) :
# x,y = pixel_vers_case(x,y)
# x,y = case_vers_pixel(x,y)
# return x,y
#
# def case_vers_pixel_vers_case(x,y) :
# x,y = case_vers_pixel(x,y)
# x,y = pixel_vers_case(x,y)
# return x,y
def menu() :
pass
def initialise_points() :
''' '''
pass
def initialise_superPoints() :
''' '''
pass
def initialise_cerises() :
''' '''
pass
def initialise_murs() :
pass
def mode():
mode_multijoueur()
mode_contre_la_montre()
mode_classsique()
def mode_multijoueur():
pass
def mode_contre_la_montre():
pass
def mode_classsique() :
pass
def regle_jeu():
pass
def jouer():
pass
def rejouer():
initialiseJeu(pacMan,fantomes,points,superPoints,cerises)
def meilleur_score():
pass
|
23,317 | 426b6266ec6e6f87395f00cd53e48d1b3c14da1e | # coding=utf-8
from typing import List
import numpy as np
# 输入数据
w1 = np.array([
[0.011, 1.03, -0.21],
[1.27, 1.28, 0.08],
[0.13, 3.12, 0.16],
[-0.21, 1.23, -0.11],
[-2.18, 1.39, -0.19],
[0.34, 1.96, -0.16],
[-1.38, 0.94, 0.45],
[-1.02, 0.82, 0.17],
[-1.44, 2.31, 0.14],
[0.26, 1.94, 0.08]
])
w2 = np.array([
[1.36, 2.17, 0.14],
[1.41, 1.45, -0.38],
[1.22, 0.99, 0.69],
[2.46, 2.19, 1.31],
[0.68, 0.79, 0.87],
[2.51, 3.22, 1.35],
[0.60, 2.44, 0.92],
[0.64, 0.13, 0.97],
[0.85, 0.58, 0.99],
[0.66, 0.51, 0.88]
])
# 1.求最大似然估计的均值和方差
def get_u_oneDim(w):
row = w.shape[0]
sum = 0
for i in range(row):
sum += w[i]
number_average = sum / row
return number_average
def get_sigma_oneDim(w):
u = get_u_oneDim(w)
sum = 0
row = w.shape[0]
for i in range(row):
temp = (w[i] - u) * (w[i] - u)
sum += temp
return sum / row
# 2处理多维数据
# 获得均值U
def get_u(w):
row = w.shape[0] # 获取第一维度的数目(行)
col = w.shape[1] # 获取第二维度的数目(列)
ls_average = []
for i in range(col):
sum = 0
for j in range(row):
sum += w[j][i]
ls_average.append(sum / row)
ls_u = []
ls_u.append(ls_average)
return np.array(ls_u).T
# 获得方差/协方差矩阵
def get_sigma(w):
row = w.shape[0] # 获取第一维度的数目(行)
col = w.shape[1] # 获取第二维度的数目(列)
u = get_u(w) # 获得均值
sum_matrix = np.zeros([col, col]) # 初始化矩阵
for i in range(row):
sum_matrix += np.dot(np.array([w[i, :]]).T - u, (np.array([w[i, :]]).T - u).T)
return (1 / row) * sum_matrix
def get_sigma_known(w):
col = w.shape[1] # 获取第二维度的数目(列)
array_sigma = np.zeros(col)
for i in range(col):
array_sigma[i] = get_sigma_oneDim(w[:, i])
return np.diag(array_sigma)
def main1():
# T1:均值和方差
for i in range(w1.shape[1]):
print("类一的x" + str(i + 1) + "的均值和方差分别为:")
print("𝝁̂=" + str(get_u_oneDim(w1[:, i])))
print("𝜎̂2=" + str(get_sigma_oneDim(w1[:, i])))
print
for i in range(w2.shape[1]):
print("类二的x" + str(i + 1) + "的均值和方差分别为:")
print("𝝁̂="+str(get_u_oneDim(w2[:, i])))
print("𝜎̂2="+str(get_sigma_oneDim(w2[:, i])))
print()
def main2():
# T2处理二维数据
w1_x1 = w1[:, 0:2]
w1_x2 = w1[:, 1:3]
w1_x3 = np.array(np.row_stack((w1[:, 0], w1[:, 2]))).T # 将两个列向量合称为一个矩阵
w2_x1 = w2[:, 0:2]
w2_x2 = w2[:, 1:3]
w2_x3 = np.array(np.row_stack((w2[:, 0], w2[:, 2]))).T
w1_s = [w1_x1, w1_x2, w1_x3]
w2_s = [w2_x1, w2_x2, w2_x3]
for i in range(3):
print("第一类数据,第" + str(i + 1) + "种可能的情况下,二维似然估计的均值𝝁̂为")
print(get_u(w1_s[i]))
print("二维似然估计的方差𝚺̂为")
print(get_sigma(w1_s[i]))
print()
for i in range(3):
print("第二类数据,第" + str(i + 1) + "种可能的情况下,二维似然估计的均值𝝁̂为")
print(get_u(w2_s[i]))
print("二维似然估计的方差𝚺̂为")
print(get_sigma(w2_s[i]))
def main3():
# T3处理三维数据(𝛍, 𝚺均未知)
print("𝛍, 𝚺均未知的情况下,类一的三维似然估计的均值𝝁̂和方差𝚺̂分别为")
print(get_u(w1))
print(get_sigma(w1))
print()
print("𝛍, 𝚺均未知的情况下,类二的三维似然估计的均值𝝁̂和方差𝚺̂分别为")
print(get_u(w2))
print(get_sigma(w2))
print()
def main4():
# T4处理三维数据(𝛍未知𝚺已知)
print("𝛍未知𝚺已知的情况下,类一的三维似然估计的均值𝝁̂和方差𝚺̂分别为:")
print(get_u(w1))
print(get_sigma_known(w1))
print("𝛍未知𝚺已知的情况下,类二的三维似然估计的均值𝝁̂和方差𝚺̂分别为:")
print(get_u(w2))
print(get_sigma_known(w2))
main1()
main2()
main3()
main4()
|
23,318 | 137f7e0045cdd660333aa887cdbb9f92f548b3c0 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
from bs4 import BeautifulSoup
import requests
import io
import time
driver = webdriver.Firefox()
driver.get("https://www.india.gov.in/my-government/indian-parliament/lok-sabha")
data = []
while True:
time.sleep(5)
soup = BeautifulSoup(driver.page_source, 'html.parser')
for info in soup.findAll("div", {"class": "views-field views-field-title"}):
name = info.find("a")['href']
name = "https://www.india.gov.in" + name
print(name)
data.append(name)
try:
driver.find_element_by_link_text('next ›').click()
except NoSuchElementException:
break
time.sleep(5)
print(len(data))
with open("url_output.py", "w") as f:
f.seek(0)
f.write("urls = " + str(data)) |
23,319 | 9e2ac9876daaad7bc56d056c15fd25a31b4af6df | # -*- coding: UTF-8 -*-
'''
Created on Aug 7, 2017
@author: zhaxind
'''
import requests
from lxml import html
import re
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36'
}
page = requests.get("https://ca.pcpartpicker.com/products/pricedrop/week/#dg_3", headers=headers)
tree = html.fromstring(page.text)
for x in range (0,24):
monitor_lists = tree.xpath('//h2[@id="dg_'+str(x)+'"]/following-sibling::table/tbody/tr/*[1]/a/text()')
mornitor_prices = tree.xpath('//h2[@id="dg_'+str(x)+'"]/following-sibling::table/tbody/tr/*[2]/text()')
mornitor_discountpc = tree.xpath('//h2[@id="dg_'+str(x)+'"]/following-sibling::table/tbody/tr/*[6]/text()')
Resolution_2k=re.compile('.*2560x1440.*')
for i in monitor_lists:
data = i +" "+mornitor_prices[(monitor_lists.index(i))] + " " +mornitor_discountpc[(monitor_lists.index(i))]
if(re.match(Resolution_2k,i)):
print(data)
|
23,320 | f86a117d0926ce275fa8511e0763b4b980821180 |
import json,pprint
device = {}
# ['CATALYST_3000', 'CATALYST_4500', 'CATALYST_6500', 'NEXUS_5K', 'NEXUS_7K', 'NEXUS_9K']
json_data = open('device.json')
with open('device.json','r') as json_data:
for ip, platform in json.load(json_data).items():
print type(platform)
if str(platform[1]) == 'Catalyst 37xx Stack' or platform == 'Cisco Catalyst 38xx stack':
platform.insert(1,'CATALYST_3000')
# delete the index 2 of list, above operations add one extra field and pushes list size to 3
platform.pop(2)
device[ip] = platform
elif str(platform[1]) == 'Cisco Catalyst 6509':
platform.insert(1,'CATALYST_6500')
# delete the index 2 of list, above operations add one extra field and pushes list size to 3
platform.pop(2)
device[ip] = platform
elif str(platform[1]) == 'Cisco Nexus 5548' or platform == 'Cisco Nexus 5596 UP' or platform =='Cisco Nexus 5000 Series':
platform.insert(1,'NEXUS_5K')
# delete the index 2 of list, above operations add one extra field and pushes list size to 3
platform.pop(2)
device[ip] = platform
elif str(platform[1]) == 'Cisco Nexus 7000 Series':
platform.insert(1,'NEXUS_7K')
# delete the index 2 of list, above operations add one extra field and pushes list size to 3
platform.pop(2)
device[ip] = platform
else:
device[ip]= platform
pprint.pprint(device)
|
23,321 | 2f7742213f98a2f15b2118061b34481c09dec234 | import unittest
import conexion, clients, var
from PyQt5 import QtSql
class MyTestCase(unittest.TestCase):
def test_1conexion(self):
value = conexion.Conexion.db_connect(var.filebd)
msg = 'Conexión no realizada'
self.assertTrue(value, msg)
def test_dni(self):
dni = '00000000T'
value = clients.Clients.validarDni(dni)
msg = 'Error validar DNI'
self.assertTrue(value, msg)
def test_fact(self):
valor = 345.52
codfac = 23
try:
msg = 'Claculos incorrectos'
var.subfact = 0.00
query = QtSql.QSqlQuery()
query1 = QtSql.QSqlQuery()
query.prepare('select codventa,codarticventa,cantidad from ventas where codfactventa=:codfact')
query.bindValue(':codfact', int(codfac))
if query.exec_():
while query.next():
codarticventa = query.value(1)
cantidad = query.value(2)
query1.prepare('select nombre,precio_unidad from articulos where codigo=:codarticventa')
query1.bindValue(':codarticventa', int(codarticventa))
if query1.exec_():
while query1.next():
precio = query1.value(1)
subtotal = round(float(cantidad) * float(precio), 2)
var.subfact = round(float(subtotal) + float(var.subfact), 2)
var.iva = round(float(var.subfact) * 0.21, 2)
var.fac = round(float(var.iva) + float(var.subfact), 2)
except Exception as error:
print('Error lsitado de la tabla de ventas: %s ' % str(error))
self.assertEqual(round(float(valor), 2), round(float(var.fac), 2), msg)
def test_codigo_producto(self):
cod = '588'
dato = conexion.Conexion.obtenCodPrec('Zapote')
msg = 'Error Obtener codigo del producto'
self.assertEqual(dato[0], cod, msg)
|
23,322 | d4b8293ff4edad4fd6ae4ab639f1a8ce625bdf11 | from Facebook.models import FBProfile
from .commonThread import *
class FBProfileUpdater(CommonThread):
batchSize = 50
workQueueName = 'profileUpdateQueue'
# @facebookLogger.debug(showArgs=True)
def method(self, fbProfileList):
client = getClient()
response = None
try:
response = client.get("",
ids=",".join([fbProfile._ident for fbProfile in fbProfileList]),
metadata='true',
fields=[{'metadata': ['type']}])
except ClientException as e:
if e.response['error']['code'] == 21:
returnClient(client)
# logerror(e)
match = re.search(r".*Page ID (?P<id1>[0-9]+) was migrated to page ID (?P<id2>[0-9]+).*",
e.response['error']['message'])
if match:
fbProfile = FBProfile.objects.get(_ident=match.group('id1'))
fbProfile.migrateId(match.group('id2'))
log('FBProfile "%s" was migrated to new ID (%s)' % (self, match.group('id2')))
return
else:
raise e
# pretty(response)
returnClient(client)
if response:
for ident, item in response.items():
if threadsExitFlag[0]: return
fbProfile = FBProfile.objects.get(_ident=ident)
fbProfile.update(item)
for fbProfile in fbProfileList:
if fbProfile._ident not in response.keys():
log("%s was not retrievable from facebook" % fbProfile)
fbProfile.deleted_at = today()
fbProfile.save()
|
23,323 | d934c066420370ae2be79b824a2058c1b52e6568 | from salsa import Attaquant, Defense
from soccersimulator import Player, SoccerTeam, Simulation, show_simu
joueura1 = Player("Attaquant A" , Attaquant())
joueura2 = Player("Defenseur A" , Defense())
team1 = SoccerTeam ("Equipe A" , [ joueura1, joueura2])
# nombre de joueurs de l equipe
joueurb1 = Player("Attaquant B" , Attaquant())
joueurb2 = Player("Defenseur B" , Defense())
team2 = SoccerTeam ("Equipe B" , [ joueurb1, joueurb2])
# Creer un match entre 2 equipes et de duree 10 pas
match = Simulation( team1 , team2 , 1000)
# Jouer le match ( sans le visualiser )
match.start()
# Jouer le match en le visualisant
show_simu( match )
# Attention !! une fois le match joue , la fonction start () permet de faire jouer le replay
# mais pas de relancer le match !!!
# Pour regarder le replay d un match
|
23,324 | 37061406e98b60bb709dcb3998fb91955ef8afab | import pandas as pd
import tool
def create_rule(data,rule_data,rand_num=[]):
var_list=list(rule_data['var_name'].unqiue())
if len(vars)==0:
print('error')
else:
for num in rand_num:
new_list=tool.combine(rule_data,num)
def ai_rule_test():
df=pd.read_csv('./data.csv')
df['is_touch']=((df['var1']>10)&(df['var2']>10)|(df['var3']>10))
sample_num=df.shape[0]
var_name='is_touch'
y_flag='y2_new'
df_i=df[[var_name,y_flag]].fillna('Null')
new=df_i[y_flag].groupby([df_i[var_name],df_i[y_flag]]).count().unstack().reset_index().fillna(0)
new.columns=['var_value','good','bad']
new['bin_num']=new['good']+new['bad']
new['bad_rate']=new['bad']/new['total']
new['bin_rate']=new['bin_num']/sample_num
print(new) |
23,325 | be033a7999ce76e0b495efb6fff45b5d2d42eee2 | """
Provides graphical components to construct graphical interface based on
the Python implementation of the HMSA file format, pyHMSA.
"""
__author__ = "Philippe T. Pinard"
__email__ = "philippe.pinard@gmail.com"
__copyright__ = "Copyright (c) 2013-2015 Philippe T. Pinard"
__license__ = "MIT"
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
|
23,326 | e355fe72bce566a53a3e92d9b1dee9bac1143cfa | import numpy as np
from Original import Calculations as calc
from scipy.optimize import minimize
from datetime import datetime as datetime
import pandas as pd
def add_user(book_filename, X_filename, Y_mean_filename):
print(datetime.now())
# load data and define constants
books = pd.read_csv(book_filename, dtype=str, delimiter=';', skiprows=1, usecols=[1,2], quotechar='"', encoding='latin-1').to_numpy(dtype=str)
X = np.loadtxt(X_filename, delimiter=' ', skiprows=0)
Y_mean = np.loadtxt(Y_mean_filename)
num_features = 16
lam = 0
# initialize my ratings (Y) and Theta
Y = np.zeros(shape=np.shape(Y_mean))
Theta = np.random.rand(num_features)
ratings = [['all quiet on the western front','erich',10], ['catch 22','heller',9],
['crime and punishment','fyodor', 4], ['the blind assassin','atwood', 4],
['slaughterhouse five','vonnegut', 10], ['cat\'s cradle','vonnegut', 7],
['a tale of two cities','dickens', 2]]
# insert ratings into Y
for i in range(np.shape(books)[0]):
book = books[i, :]
title = book[0].lower().replace('-',' ')
author = book[1].lower()
for rating in ratings:
if rating[0] in title and rating[1] in author:
Y[i] = rating[2]
# calculate Y_norm and R
Y_norm = Y - Y_mean
R = np.array(Y, dtype=bool)
# train latent features for user
args = (X, Y_norm, R, lam)
result = minimize(fun=calc.user_gradient, x0=Theta, args=args, method='CG', jac=True, options={'disp':True})
Theta = result.x
# calculate predictions
predict = X @ Theta.transpose()
my_predict = predict + Y_mean
print(datetime.now())
# define books of interest
book_list = [['a clockwork orange', 'anthony'],['lord of the flies', 'william'],
['nineteen eighty four', 'orwell'], ['the great gatsby', 'fitzgerald'],
['fahrenheit 451', 'bradbury'], ['the stranger', 'camus'],
['gone with the wind', 'mitchell'], ['pride and prejudice', 'austen'],
['to kill a mockingbird', 'lee']]
# print ratings for books of interest
for i in range(np.shape(books)[0]):
book = books[i, :]
title = book[0].lower().replace('-',' ')
author = book[1].lower()
for entry in book_list:
if entry[0] in title and entry[1] in author:
print('Predicting rating %f for book %s' % (my_predict[i], books[i, 0]))
# print top predicted ratings
sorted = np.argsort(-1 * my_predict)
for i in range(10):
j = sorted[i]
print('%d: Predicting rating %f for book %s by %s' % (i, my_predict[j], books[j, 0], books[j, 1]))
# def add_rating(user, book, rating):
if __name__ == "__main__":
add_user('Data/BX-Books.csv', 'Data/learned_X_150.txt', 'Data/Y_mean.txt')
|
23,327 | 0faa728e0e159d33dc036ce1ca8a137cfbc67c0b | import json
import discord
from discord.ext import commands
class CustomHelpCommand(commands.DefaultHelpCommand):
def __init__(self, **kwargs):
self.mod_cmds = kwargs.pop('mod_cmds')
self.prefixes = ", ".join(kwargs.pop('prefixes'))
super().__init__(verify_checks=False)
# desc = desc, help = perms, brief = cd
async def send_command_help(self, command):
footer_value = "Note: you may be able to use the command multiple times before triggering the cooldown.\n" \
"You should get a response or see the results of your command."
embed = discord.Embed(
title='Command: ' + command.name,
colour=discord.Colour.gold()
)
cd_value = 'None'
perms = 'None'
desc = "No Description"
example = '!' + command.name
if command.description:
desc = command.description
if command.brief is not None:
brief_dict = json.loads(command.brief)
example_list = brief_dict.get("examples", None)
cd = brief_dict.get('cd', None)
# Replace ` with quotes
if example_list is not None and example_list:
example_list[:] = [s.replace('`', "\"") for s in example_list]
example = "\n".join(('!' + x for x in example_list))
if example_list is not None and cd:
cd = int(cd)
if cd < 60:
cd_value = str(cd) + ' second(s)'
else:
cd_value = str(cd//60) + ' minute(s)'
if command.help is not None:
permlist = command.help.split(', ')
perms = "\n".join(perm for perm in permlist)
if command.aliases:
aliases = "\n".join(command.aliases)
else:
aliases = "None"
if command.signature:
usage_value = '!' + command.name + ' ' + command.signature + '\n [] parameters are optional.\n' \
'If you want to give a parameter with spaces' \
' use quotation marks `""`'
else:
usage_value = '!' + command.name
embed.description = desc
embed.add_field(name='Aliases', value=aliases, inline=True)
embed.add_field(name='Permissions (Any)', value=perms, inline=True)
embed.add_field(name='Cooldown', value=cd_value, inline=True)
embed.add_field(name='Usage', value=usage_value, inline=False)
embed.add_field(name="Example(s)", value=example, inline=False)
embed.set_footer(text=footer_value)
dest = self.get_destination()
await dest.send(embed=embed)
async def send_cog_help(self, cog):
embed = discord.Embed(
title=f"Category: {cog.qualified_name}",
description=cog.description or "No description",
colour=discord.Colour.gold()
)
sorted_commands = await self.filter_commands(cog.get_commands(), sort=True)
"""
cmd_list = []
for cmd in sorted_commands:
cmd_name = str(cmd)
desc = ""
if cmd.description:
desc = ' - ' + cmd.description
cmd_name = cmd_name + desc
cmd_list.append(cmd_name)
cmd_string = '\n'.join(cmd_list)
"""
embed.add_field(name='Commands:', value='\n'.join(str(cmd) + ' - !' + cmd.name + " " + cmd.signature for
cmd in sorted_commands))
footer = """[] parameters are optional.\n'If you want to give a parameter with spaces use
quotation marks " " """
embed.set_footer(text=footer)
dest = self.get_destination()
await dest.send(embed=embed)
async def send_bot_help(self, mapping):
embed = discord.Embed(
title="All categories and commands",
description="To get information on a specific command or category type\n"
"`!help <command/category`",
colour=discord.Colour.gold()
)
no_category = "No category:"
for cog, cog_commands in mapping.items():
sorted_commands = await self.filter_commands(cog_commands, sort=True)
if sorted_commands:
name = cog.qualified_name if cog is not None else no_category
cmd_list = []
for cmd in sorted_commands:
cmd_name = str(cmd)
if cmd_name in self.mod_cmds:
cmd_name = '__' + cmd_name + '__'
cmd_list.append(cmd_name)
cmd_string = '\n'.join(cmd_list)
embed.add_field(name=name, value=cmd_string)
footer_value = 'Underlined commands require either administrator or manage server permissions or ' \
'for the user to have role called mod, except !clear, which requires manage messages.'
embed.set_footer(text=footer_value)
embed.add_field(name='Prefixe(s)', value=self.prefixes, inline=False)
dest = self.get_destination()
await dest.send(embed=embed)
|
23,328 | 1a7433c10b08a365703b3f7d3c4d5592a1e29213 | """
A skeleton Ryu component
"""
from ryu.base import app_manager
# The central management of Ryu applications.
class RyuSkeleton(app_manager.RyuApp):
def __init__(self, *args, **kwargs):
super(RyuSkeleton, self).__init__(*args, **kwargs)
|
23,329 | 40b8abd7fc14e8777f605530b9e99c779f8f57fa | "Sequence Operations"
'''
Besides method calls, all the usual generic sequence operations from strings
and lists; work as expected on both str and bytes. Including indexing, slicing,
concatenation, and so on.
bytes really is a sequence of 8-bit integers, but for convenience prints as a
string of ASCII-coded characters where possible when displayed as a whole.
To check a given byte’s value, use the chr built-in to convert it back to its
character.
'''
B = b'spam' # A sequence of small ints
print(B) # Prints as ASCII characters (and/or hex escapes)
print(B[0]) # Indexing yields an int
print(B[-1])
print(chr(B[0])) # Show characeter for int
print(list(B)) # Show all the byte's int value
print(B[1:],B[:-1])
print(len(B))
print(B + b'lmn')
print(B * 4)
'Other Ways to Make bytes Objects'
"""
So far, we’ve been mostly making bytes objects with the b'...' literal syntax. We can
also create them by calling the bytes constructor with a str and an encoding name,
calling the bytes constructor with an iterable of integers representing byte values, or
encoding a str object per the default (or passed-in) encoding.
As we’ve seen, encoding
takes a text str and returns the raw encoded byte values of the string per the encoding
specified; conversely, decoding takes a raw bytes sequence and translates it to its str
text string representation—a series of Unicode characters. Both operations create new
string objects:
"""
B = b'abc' # Literal
print(B)
B = bytes('bimri', 'ascii') # Constructor with encoding name
print(B)
print(ord('b'))
B = bytes([97, 98, 99]) # Integer iterable
print(B)
B = 'spam'.encode() # str.encode() (or bytes())
print(B)
S = B.decode() # bytes.decode() (or str())
print(S)
'''
From a functional perspective, the last two of these operations are really tools for
converting between str and bytes
'''
|
23,330 | dc53402668e017c141e51e2b489f6787d17ac25a | # -*- coding: utf-8 -*-
# Sniffs HTTPRequest packages using scapy
# the script tries to filter out useless info and duplicates
import scapy.all as scapy
from scapy_http import http
from threading import Thread, Event
import argparse
import logging
import sys
class PacketSniffer(Thread):
def __init__(self, *args):
super().__init__()
self.print_name = 'HTTP sniffer'
self.daemon = True
self.login_text = ''
self.old_url = ''
self.socket = None
self.interface = args[0]
self.logger = args[1]
self.classPrint("Initializing.", "[+]")
self.stop_sniffing = Event()
self.keywords = ['user','username','name','login','admin','pass','password'] #keywords for finding password/login
def run(self):
#Tell scapy to start sniffing packages on the supllied interface
try:
#create our own socket so we can close it after ;)
self.socket = scapy.conf.L2listen(iface=self.interface)
self.classPrint("Sniffing for HTTPRequests on " + str(self.interface) + ".", "[+]")
scapy.sniff(opened_socket=self.socket, prn=self.processPacket, stop_filter=self.checkJoined)
except Exception as e:
print(e)
def classPrint(self, text, icon):
#Adds a class name to the printed messages.
msg = "{} {} -> \t{}".format(icon,self.print_name,text)
self.logger.info(msg)
def join(self, timeout=None):
#for joining the thread / stopping the sniffing
self.stop_sniffing.set()
self.classPrint("Stopping.", "[-]")
super().join(timeout)
def checkJoined(self, packet):
#check if thread is joined and sniffing should stop
return self.stop_sniffing.isSet()
def processPacket(self, packet):
#Checks if package is an HTTPRequest and processes it if so.
if (packet.haslayer(http.HTTPRequest)) :
self.findUrl(packet)
self.findKeywords(packet)
self.findEmails(packet)
def findKeywords(self, packet):
#Finds login information by keywords.
#It also checks if the text is not exactly the same as the last printed
#text to filter out duplicates. (could be done better)
if packet.haslayer(scapy.Raw):
text = packet[scapy.Raw].load
text = text.decode('utf-8')
for keyword in self.keywords:
if keyword in text:
if not text == self.login_text:
self.classPrint("LOGIN : " + text, "[!]")
self.login_text = text
def findEmails(self, packet):
#This function will check for email addresses by regex or something
if packet.haslayer(scapy.Raw):
text = str(packet[scapy.Raw].load)
def sslStrip(self, packet):
#redirect user to http variant of site when availlable
self.logger.DEBUG(site)
def findUrl(self, packet):
#Takes the HTTPRequest packet and constructs a string containing the complete URL
url = packet[http.HTTPRequest].Host + packet[http.HTTPRequest].Path
if not url == self.old_url:
self.classPrint("URL: " + url.decode('utf-8'), "[*]")
self.old_url = url
|
23,331 | 2d0cb40777b00ac53f3258301152a023724576ec | 18 gid=1318458867
18 uid=1828093400
20 ctime=1472350402
20 atime=1472350402
23 SCHILY.dev=16777223
23 SCHILY.ino=12649856
18 SCHILY.nlink=1
|
23,332 | 4030d8703d1e368ef4cc3b864e92689a4d99b95a | from utils.techniques.gradient_clipping import clip_gradient
import torch
import torch.nn as nn
from tqdm import tqdm
from .checkpoint import CheckPoint, load
from logger import Logger
import time
import os
from augmentation import Denormalize
import cv2
import numpy as np
from utils.gradcam import *
class Trainer(nn.Module):
def __init__(self, config, model, train_loader, val_loader, **kwargs):
super().__init__()
self.config = config
self.model = model
self.train_loader = train_loader
self.val_loader = val_loader
self.optimizer = model.optimizer
self.criterion = model.criterion
self.metrics = model.metrics # list of classification metrics
self.set_attribute(kwargs)
def logged(self, logs):
tags = [tag for tag in logs.keys()]
values = [value for value in logs.values()]
self.logger.write(tags=tags, values=values)
def fit(self, start_epoch=0, start_iter=0, num_epochs=10, print_per_iter=None):
self.num_epochs = num_epochs
self.num_iters = num_epochs * len(self.train_loader)
if self.checkpoint is None:
self.checkpoint = CheckPoint(save_per_epoch=int(num_epochs/10) + 1)
if print_per_iter is not None:
self.print_per_iter = print_per_iter
else:
self.print_per_iter = int(len(self.train_loader) / 10)
self.epoch = start_epoch
# For one-cycle lr only
if self.scheduler is not None and self.step_per_epoch:
self.scheduler.last_epoch = start_epoch - 1
self.start_iter = start_iter % len(self.train_loader)
print(f'===========================START TRAINING=================================')
print(f'Training for {num_epochs} epochs ...')
for epoch in range(self.epoch, self.num_epochs):
try:
self.epoch = epoch
self.train_per_epoch()
if self.num_evaluate_per_epoch != 0:
if epoch % self.num_evaluate_per_epoch == 0 and epoch+1 >= self.num_evaluate_per_epoch:
self.evaluate_per_epoch()
if self.scheduler is not None and self.step_per_epoch:
self.scheduler.step()
lrl = [x['lr'] for x in self.optimizer.param_groups]
lr = sum(lrl) / len(lrl)
log_dict = {'Learning rate/Epoch': lr}
self.logged(log_dict)
except KeyboardInterrupt:
self.checkpoint.save(self.model, save_mode='last', epoch=self.epoch,
iters=self.iters, best_value=self.best_value)
print("Stop training, checkpoint saved...")
break
print("Training Completed!")
def train_per_epoch(self):
self.model.train()
running_loss = 0.0
running_time = 0
loop = tqdm(self.train_loader)
for i, batch in enumerate(loop):
start_time = time.time()
with torch.cuda.amp.autocast():
loss, loss_dict = self.model.training_step(batch)
if self.use_accumulate:
loss /= self.accumulate_steps
self.model.scaler(loss, self.optimizer)
if self.use_accumulate:
if (i+1) % self.accumulate_steps == 0 or i == len(self.train_loader)-1:
self.model.scaler.step(
self.optimizer, clip_grad=self.clip_grad, parameters=self.model.parameters())
self.optimizer.zero_grad()
if self.scheduler is not None and not self.step_per_epoch:
self.scheduler.step(
(self.num_epochs + i) / len(self.train_loader))
lrl = [x['lr'] for x in self.optimizer.param_groups]
lr = sum(lrl) / len(lrl)
log_dict = {'Learning rate/Iterations': lr}
self.logging(log_dict)
else:
self.model.scaler.step(
self.optimizer, clip_grad=self.clip_grad, parameters=self.model.parameters())
self.optimizer.zero_grad()
if self.scheduler is not None and not self.step_per_epoch:
# self.scheduler.step()
self.scheduler.step(
(self.num_epochs + i) / len(self.train_loader))
lrl = [x['lr'] for x in self.optimizer.param_groups]
lr = sum(lrl) / len(lrl)
log_dict = {'Learning rate/Iterations': lr}
self.logging(log_dict)
torch.cuda.synchronize()
end_time = time.time()
for (key, value) in loss_dict.items():
if key in running_loss.keys():
running_loss[key] += value
else:
running_loss[key] = value
running_time += end_time-start_time
self.iters = self.start_iter + \
len(self.train_loader)*self.epoch + i + 1
if self.iters % self.print_per_iter == 0:
for key in running_loss.keys():
running_loss[key] /= self.print_per_iter
running_loss[key] = np.round(running_loss[key], 5)
loss_string = '{}'.format(running_loss)[
1:-1].replace("'", '').replace(",", ' ||')
print("[{}|{}] [{}|{}] || {} || Time: {:10.4f}s".format(
self.epoch, self.num_epochs, self.iters, self.num_iters, loss_string, running_time))
self.logging(
{"Training Loss/Batch": running_loss['T'] / self.print_per_iter, })
running_loss = {}
running_time = 0
if (self.iters % self.checkpoint.save_per_iter == 0 or self.iters == self.num_iters - 1):
print(f'Save model at [{self.epoch}|{self.iters}] to last.pth')
self.checkpoint.save(
self.model,
save_mode='last',
epoch=self.epoch,
iters=self.iters,
best_value=self.best_value)
def evaluate_per_epoch(self):
self.model.eval()
epoch_loss = {}
metric_dict = {}
print('=============================EVALUATION===================================')
start_time = time.time()
with torch.no_grad():
for batch in tqdm(self.val_loader):
_, loss_dict = self.model.evaluate_step(batch)
for (key, val) in loss_dict.items():
if key in epoch_loss.keys():
epoch_loss[key] += val
else:
epoch_loss[key] = val
end_time = time.time()
running_time = end_time - start_time
metric_dict = self.model.get_metric_values()
self.model.reset_metrics()
for key in epoch_loss.keys():
epoch_loss[key] /= len(self.val_loader)
epoch_loss[key] = np.round(epoch_loss[key], 5)
loss_string = '{}'.format(epoch_loss)[
1:-1].replace("'", '').replace(",", ' ||')
print()
print("[{}|{}] || {} || Time: {:10.4f} s".format(
self.epoch, self.num_epochs, loss_string, running_time))
for metric, score in metric_dict.items():
print(metric + ': ' + str(score), end=' | ')
print()
print('==========================================================================')
log_dict = {
"Validation Loss/Epoch": epoch_loss['T'] / len(self.val_loader), }
log_dict.update(metric_dict)
self.logging(log_dict)
# Save model gives best mAP score
if metric_dict['acc'] > self.best_value:
self.best_value = metric_dict['acc']
self.checkpoint.save(self.model, save_mode='best', epoch=self.epoch,
iters=self.iters, best_value=self.best_value)
if self.visualize_when_val:
self.visualize_batch()
def visualize_batch(self):
# Vizualize Grad Class Activation Mapping
if not os.path.exists('./samples'):
os.mkdir('./samples')
denom = Denormalize()
batch = next(iter(self.val_loader))
images = batch["imgs"]
#targets = batch["targets"]
self.model.eval()
config_name = self.cfg.model_name.split('_')[0]
grad_cam = GradCam(model=self.model.model, config_name=config_name)
for idx, inputs in enumerate(images):
image_outname = os.path.join(
'samples', f'{self.epoch}_{self.iters}_{idx}.jpg')
img_show = denom(inputs)
inputs = inputs.unsqueeze(0)
inputs = inputs.to(self.model.device)
target_category = None
grayscale_cam, label_idx = grad_cam(inputs, target_category)
label = self.cfg.obj_list[label_idx]
img_cam = show_cam_on_image(img_show, grayscale_cam, label)
cv2.imwrite(image_outname, img_cam)
def __str__(self) -> str:
title = '------------- Model Summary ---------------\n'
name = f'Name: {self.model.name}\n'
params = f'Number of params: {self.model.trainable_parameters}\n'
train_iter_per_epoch = f'Number of train iterations per epoch: {len(self.train_loader)}\n'
val_iter_per_epoch = f'Number of val iterations per epoch: {len(self.val_loader)}'
return title + name + params + train_iter_per_epoch + val_iter_per_epoch
def print_forward_step(self):
self.model.eval()
outputs = self.model.forward_step()
print('Feedforward: output_shape: ', outputs.shape)
def set_accumulate_step(self):
self.use_accumulate = False
if self.config.total_accumulate_steps > 0:
self.use_accumulate = True
self.accumulate_steps = max(
round(self.config.total_accumulate_steps / self.config.batch_size), 1)
def set_amp(self):
self.use_amp = False
if self.config.mixed_precision:
self.use_amp = True
def set_attribute(self, **kwargs):
self.checkpoint = None
self.evaluate_epoch = 1
self.scheduler = None
self.gradient_clip = 10
self.visualize_when_val = True
self.step_per_epoch = False
self.num_evaluate_per_epoch = 1
self.best_value = 0.0
self.logger = Logger()
self.set_accumulate_step()
self.set_amp()
for i, j in kwargs.items():
setattr(self, i, j)
|
23,333 | e559beab057098cfad9ae9c05fb83df390a40abe | #!/usr/bin/env python3
import sys
from pprint import pprint
weather_info = []
for lines in sys.stdin:
line = lines.strip().split()
weather_info.append(line)
minIndex, maxIndex,minimumTemp, maximumTemp = -1, -1, 10000, -10000
for i in range(len(weather_info)):
if minimumTemp > float(weather_info[i][5]):
minIndex = i
minimumTemp = float(weather_info[i][5] )
if maximumTemp < float(weather_info[i][9]):
maxIndex = i
maximumTemp = float( weather_info[i][9] )
print(f"{weather_info[minIndex][0]} {weather_info[minIndex][1]} Time: {weather_info[minIndex][3]} MinTemp : {weather_info[minIndex][5]}")
print(f"{weather_info[maxIndex][0]} {weather_info[maxIndex][1]} Time: {weather_info[maxIndex][7]} MaxTemp : {weather_info[maxIndex][9]}")
|
23,334 | d519263e0777085380596a47b9079a0fbaca426f | ##Mari-Ann Lind
##Katrin Linno
import random
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
print("Oled sisenenud loosipaki keskkonda.")
kingi_suurus = 0
while True: #kontrollib, et sisestatud väärtus oleks number
try:
kingi_suurus = int(input("Palun sisestage kingi suurus eurodes: "))
except ValueError:
print("Sisestatud väärtus ei ole arv! Palun sisesta täisarv!")
continue
else:
break #kui väärtus on number, siis liigub järgmise etapi juurde
print("Sisestage vähemalt 2 inimese nimed ja e-posti aadressid.")
while True: #kontrollib, et väärtus oleks kas "jah" või "ei"
andmed = input("Kas soovid sisestada nimed ja e-posti aadressid failist? [Jah/Ei]: ")
if andmed.lower() == 'jah':
break
elif andmed.lower() == 'ei':
break
else:
print('Palun vastake, kas "Jah" või "Ei"')
def andmesisestus():
global osalejad
if andmed.lower() == 'jah':
failinimi = input("Palun sisestage failinimi: ")
fail = open(failinimi, encoding="UTF-8")
# alustame tühja listiga
osalejad = []
for rida in fail:
rida = rida.strip() #annab järjendid ilma reavahetuse märgita
osalejad += [rida.split(', ')] #listi elemendid on komaga eraldatud
fail.close()
elif andmed.lower() == 'ei':
# alustame tühja listiga
osalejad = []
while True:
nimi = input("Sisesta inimese nimi (lõpetamiseks jäta tühjaks): ")
if nimi == "":
break
email = input("Sisesta e-mail: ")
sub_list=[]
sub_list.append(nimi)
sub_list.append(email)
osalejad.append(sub_list)
andmesisestus()
while True:
if len(osalejad) < 2:
print("Sisestama peab vähemalt 2 osalejat! Sisesta kõik osalejad uuesti.")
andmesisestus()
else:
break
osalejad.sort()
def sulgudeta(list): #listi kuvamine ilma sulgudeta
for i in range(len(list)): #kõigepealt järjend kõigi elementidega
for j in range(len(list[i])):
if j < len(list[i])-1: #järjend ilma viimase elemendita, et viimase elemendi lõppu ei jääks kooloneid
print(list[i][j], end=': ')
else:
print(list[i][j])
return
sulgudeta(osalejad)
# sisestatud nimede kontrollimine
while True:
kontroll = str(input("Kas kõik nimed on korrektsed? [Jah/Ei]: "))
if kontroll.lower() == "jah":
break
nimi = str(input("Sisesta isiku täisnimi, kelle nime soovid parandada: "))
#kui nime ei ole listis, siis märkus, et sisesta uuesti
uus_list1=[] #nime indeksi leidmiseks teen uue listi nimede jaoks
for i in osalejad:
uus_list1.append(i[0])
try:
asukoht=uus_list1.index(nimi) #sisestatud nime indekseerimine järjendis
except:
asukoht=i
print("Sellist nime ei ole nimekirjas! Sisestatud nimed: " + str(uus_list1).replace('[','').replace(']','').replace("'",''))
while nimi not in uus_list1:
nimi = str(input("Proovi uuesti: "))
else:
uus_nimi = input("Sisesta õige nimi: ")
asukoht=uus_list1.index(nimi)
osalejad[asukoht][0] = uus_nimi
print("Uuendatud osalejate nimekiri:")
sulgudeta(osalejad)
else:
uus_nimi = input("Sisesta õige nimi: ")
osalejad[asukoht][0] = uus_nimi
print("Uuendatud osalejate nimekiri:")
sulgudeta(osalejad)
#sisestatud e-mailide kontrollimine
while True:
kontroll = str(input("Kas kõik e-mailid on korrektsed? [Jah/Ei]: "))
if kontroll.lower() == "jah":
break #kui kõik sisestatud nimed on õiged, siis programm ei liigub e-maili muutmisest edasi
nimi = str(input("Sisesta isiku täisnimi, kelle e-maili soovid parandada: "))
uus_list2=[] #nime indeksi leidmiseks teen uue listi nimede jaoks
for i in osalejad:
uus_list2.append(i[0])
try:
asukoht=uus_list2.index(nimi) #sisestatud nime indekseerimine järjendis
except:
asukoht=i
print("Sellist nime ei ole nimekirjas! Sisestatud nimed: " + str(uus_list2).replace('[','').replace(']','').replace("'",''))
while nimi not in uus_list2: #kui sisestatud nime ei ole nimekirjas, siis tsükkel jätkab tööd, kuni sisestatakse õige nimi
nimi = str(input("Proovi uuesti: "))
else:
uus_email = input("Sisesta õige e-mail: ")
asukoht=uus_list2.index(nimi)
osalejad[asukoht][1] = uus_email
print("Uuendatud osalejate nimekiri:")
sulgudeta(osalejad)
else:
uus_email = input("Sisesta õige e-mail: ") #varem sisestatud e-mail asendadatakse uuega
osalejad[asukoht][1] = uus_email
print("Uuendatud osalejate nimekiri:")
sulgudeta(osalejad)
################################################################################
##############LOOSIMINE###########################################
osalejate_nr=[] # tehakse list, mis koosneb ainult osalejate järjekorranumbritest
loendaja=1 #listi elemendi indeksi jaoks
for i in range(len(osalejad)):
osalejate_nr.append(loendaja) #lisab järjekorra nr listi numbri
loendaja +=1
def paki_unikaalsus():
global kellele_pakk
kellele_pakk = random.sample(osalejate_nr,len(osalejate_nr)) # teeb uue listi, mille elemendid on samad kui listis osalejate_nr, ainult et juhuslikus järjekorras
for i in range(len(osalejate_nr)):
if osalejate_nr[i] == kellele_pakk[i]: #kontrollib ega keegi iseendale pakki ei tee
return False
return kellele_pakk #kui keegi isendale pakki ei tee, väljastatakse list numbritega kellele peab tegema paki
while paki_unikaalsus() == False: #kutsub funktsiooni välja seniakaua kuni, keegi loosis iseennast ei saa
paki_unikaalsus()
loendaja=0
for i in range(len(osalejad)): #lisab osalejate listi iga inimese alalisti kellele see inimene peab paki tegema
for j in range(1): # sisemise listi struktuur: [nimi, meiliaadress, järjekorra nr, inimese nr kellele peab paki tegema]
osalejad[i].append(kellele_pakk[loendaja])
loendaja +=1
#####################################################################
############# E-MAILI SAATMINE ###########################
loendaja=0
while loendaja<len(osalejad):
for i in range(len(osalejad)):
pakk_teha = osalejad[i][2]
fromaddr = "loosipakk@gmail.com" # meiliaadress millelt kirjad välja saadetakse
toaddr = osalejad[i][1]
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = "Loosipaki tegemine"
body = ("Tere, " + osalejad[i][0] + "!\n" + "Sina teed jõulupaki isikule: " + osalejad[pakk_teha-1][0] + ".\n" + "Tervitades \n Mr. Loosipaki Robot \n" + "P.S. ära vasta sellele kirjale!")
msg.attach(MIMEText(body, 'plain'))
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(fromaddr, "*******") #Asendada õige parooliga. Parooli saab programmi loojatele kirjutades.
text = msg.as_string()
server.sendmail(fromaddr, toaddr, text)
server.quit()
loendaja+=1
print("E-kirjad on teele saadetud. Ilusat jõuluaega!") |
23,335 | 592be16d591a13065936d33a2f9358a7d6551452 | import sys
#sys.path.append("/home/pi/code/Maestro/modules")
#import maestro
import RPi.GPIO as gpio
import time
import tty
import termios
#servo = maestro.Controller()
gpio.setmode(gpio.BCM)
gpio.setup(22, gpio.OUT) # button_a
gpio.setup(23, gpio.OUT) # button_b
gpio.setup(24, gpio.OUT) # button_c
def getch():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
def button_a():
gpio.output(22, True) #motor_a on
print ("motor_a on")
#else:
#gpio.output(22, False) #motor_a off
#print ("motor_3 off")
def button_b():
gpio.output(23, True) #motor_b on
print ("motor_4a on")
# else:
#gpio.output(23, False) #motor_b off
#print ("motor_4a off")
def button_c():
gpio.output(24, True) #motor_c on
print ("motor_4b on")
#else:
#gpio.output(24, False) #motor_c off
#print ("motor_4b off")
print ("a: for motor_3")
print ("b: for motor_4a")
print ("c: for motor_4b")
while True:
char = getch()
if(char == "a"):
button_a()
print ("button_a")
if(char == "b"):
button_b()
print ("button_b")
if(char == "c"):
button_c()
print ("button_c")
if(char == "x"):
print("PROGRAM ENDED")
gpio.output(22, False) #button_a off
gpio.output(23, False) #button_4a off
gpio.output(24, False) #button_4b off
break
char = ""
gpio.cleanup()
|
23,336 | cef96e6d8d051fdd12988072181a1934aa8a12f0 | from flask import Flask
from flask_mail import Mail
import os
app = Flask(__name__)
app.config['SECRET_KEY'] = os.environ.get("SECRETKEY")
app.config["MAIL_SERVER"] = "smtp.hushmail.com"
app.config["MAIL_PORT"] = 465
app.config["MAIL_USE_SSL"] = True
app.config["MAIL_USERNAME"] = os.environ.get("USER_MAIL")
app.config["MAIL_PASSWORD"] = os.environ.get("PASSWORD")
mail = Mail(app)
from application import routes
|
23,337 | 0b25cdbcd03ae7c49dc44a592b0c94b44a47854a | from login import login
from profile import get_profile
from schedule import get_current_courses, check_course
from getpass import getpass
if __name__ == '__main__':
nim = str(input('Student ID: '))
password = str(getpass('Passowrd: '))
session = login(nim, password)
user_profile = get_profile(session)
print('Welcome to SIX Bot')
print('Your profile')
print('Name :', user_profile['name'])
print('NIM :', user_profile['nim'])
print('Email :', user_profile['email'])
courses = get_current_courses(session, user_profile['nim'])
print(f'{len(courses)} found!')
for course in courses:
print(f'[INFO] checking {course["title"]}')
check_course(session, course) |
23,338 | 9858595ddbedfa663f9303b5813c1949afaf4c16 | # # Finding a Value in a List with the .index() Method
#
# spam = ['hello', 'hi', 'howdy', 'heyas']
# print(spam.index('hello')) # results would be 0
# print(spam.index('heyas')) # results would be 3
# print(spam.index('howdy howdy howdy')) # ValueError: 'howdy howdy howdy' is not in list
#
# # when there are duplicates of values in the list it will always give the index value of the first instance
# eggs = ['Zophie', 'Pooka', 'Fat-tail', 'Pooka']
# print(eggs.index('Pooka')) # index result would be 1
###########################################
# # Adding values to lists with the append() and insert() methods
# spam = ['cat', 'dog', 'bat']
# spam.append('moose')
# print(spam) # spam = ['cat', 'dog', 'bat', 'moose]
#
# spam.insert(1, 'chicken')
# print(spam) # spam = ['cat', 'chicken', 'dog', 'bat', 'moose']
#############################################
# Removing Values from lists with remove()
# spam = ['cat', 'bat', 'rat', 'elephant']
# spam.remove('rat')
# print(spam) # spam = ['cat', 'bat', 'elephant']
#
# ####################
#
# spam = ['cat', 'bat', 'rat', 'elephant']
# spam.remove('chicken') # ValueError: list.remove(x): x not in list
#
# ####################
#
# spam = ['cat', 'bat', 'rat', 'cat', 'hat', 'cat']
# spam.remove('cat')
# print(spam) # spam = ['bat', 'rat', 'cat', 'hat', 'cat']
# #The del statement is good to use when you know the index of the value you want to remove from the list. The remove() method is good when you know the value you want to remove from the list.
########################################################
## Sortind the Values in a List with the sort() Method
# spam = [2, 5, 3.14, 1, -7]
# spam.sort()
# print(spam) # returns [-7, 1, 2, 3.14, 5]
#
# spamOne = ['cats', 'ants', 'dogs', 'badgers', 'elephants']
# spamOne.sort()
# print(spamOne) # returns ['ants', 'badgers', 'cats', 'dogs', 'elephants']
#
# spam.sort(reverse=True)
# print(spam) # returns [5, 3.14, 2, 1, -7]
#
# ## you can not sort list with both string values and numerical values.
# ## sort() uses "ASCIIbetical order so capital leter will come before lower case ones.
#
# spamTwo =['Alice', 'ants', 'Bob', 'badgers', 'Carol', 'cats']
# spamTwo.sort()
# print(spamTwo) # returns ['Alice', 'Bob', 'Carol', 'ants', 'badgers', 'cats']
#
# spamTwo.sort(key=str.lower)
# print(spamTwo) # returns ['Alice', 'ants', 'badgers', 'Bob', 'Carol', 'cats']
# ## sort with the key=str.lower case will put the list into alphabetical order
#############################
## Reversing the Values in a List with the reverse() Method
# spam = ['cat', 'dog', 'moose']
# spam.reverse()
# print(spam) # returns ['moose', 'dog', 'cat']
|
23,339 | ee051914ebeb9ac5a1cc95061551ac857f8b2b9d | import tkinter
import cv2
import PIL.Image, PIL.ImageTk
import os
import cv2
import numpy as np
import tensorflow as tf
import sys
sys.path.append("..")
from utils import label_map_util
from utils import visualization_utils as vis_util
from tkinter import *
MODEL_NAME = 'inference_graph'
CWD_PATH = os.getcwd()
PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph.pb')
PATH_TO_LABELS = os.path.join(CWD_PATH,'training','labelmap.pbtxt')
NUM_CLASSES = 57
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
class App:
def __init__(self, window, window_title, video_source=0):
self.window = window
self.window.title(window_title)
self.video_source = video_source
# open video source
self.vid = MyVideoCapture(video_source)
# Create a canvas that can fit the above video source size
self.canvas = Canvas(window, width = self.vid.width, height = self.vid.height)
self.canvas.pack()
self.botFrame = Frame(window)
self.botFrame.pack(side=BOTTOM)
imgpath = "GUI\\empty.png"
img = PIL.Image.open(imgpath)
img = PIL.ImageTk.PhotoImage(img)
self.StopLabelText = Label(self.botFrame, text = "")
self.StopLabelText.grid(row = 0, column = 0, sticky='nsew')
self.StopLabel = Label(self.botFrame, text = "StopLabel", image=img)
self.StopLabel.grid(row = 1, column = 0, sticky='nsew')
self.SpeedLabelText = Label(self.botFrame, text = "Ograniczenie Prędkości:")
self.SpeedLabelText.grid(row = 0, column = 1, sticky='nsew')
self.SpeedLabel = Label(self.botFrame, text = "SpeedLabel", image=img)
self.SpeedLabel.grid(row = 1, column = 1, sticky='nsew')
self.ZakazLabelText = Label(self.botFrame, text = "Zakazy")
self.ZakazLabelText.grid(row = 0, column = 2, sticky='nsew')
self.ZakazLabel = Label(self.botFrame, text = "ZakazLabel", image=img)
self.ZakazLabel.grid(row = 1, column = 2, sticky='nsew')
self.NakazLabelText = Label(self.botFrame, text = "Nakazy")
self.NakazLabelText.grid(row = 0, column = 3, sticky='nsew')
self.NakazLabel = Label(self.botFrame, text = "NakazLabel", image=img)
self.NakazLabel.grid(row = 1, column = 3, sticky='nsew')
self.UwagaLabelText = Label(self.botFrame, text = "Uwaga")
self.UwagaLabelText.grid(row = 0, column = 4, sticky='nsew')
self.UwagaLabel = Label(self.botFrame, text = "UwagaLabel", image=img)
self.UwagaLabel.grid(row = 1, column = 4, sticky='nsew')
self.InfoLabelText = Label(self.botFrame, text = "Info")
self.InfoLabelText.grid(row = 0, column = 5, sticky='nsew')
self.InfoLabel = Label(self.botFrame, text = "InfoLabel", image=img)
self.InfoLabel.grid(row = 1, column = 5, sticky='nsew')
# After it is called once, the update method will be automatically called every delay milliseconds
#check overhead for 5/10/30/60 ms
self.delay = 15
self.update()
self.window.mainloop()
def update(self):
# Get a frame from the video source
ret, frame = self.vid.get_frame()
if ret:
frame_expanded = np.expand_dims(frame, axis=0)
# Perform the actual detection by running the model with the image as input
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: frame_expanded})
TempClasses = classes
# Draw the results of the detection (aka 'visulaize the results')
frame = vis_util.visualize_boxes_and_labels_on_image_array(
frame,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8,
min_score_thresh=0.60)
self.photo = PIL.ImageTk.PhotoImage(image = PIL.Image.fromarray(frame[0]))
self.canvas.create_image(0,0,image=self.photo, anchor=tkinter.NW)
self.window.after(self.delay, self.update)
if(frame[1].startswith("speed")):
imgpath = "GUI\speed"+frame[1][-2:]+".png"
img = PIL.Image.open(imgpath)
img = PIL.ImageTk.PhotoImage(img)
self.SpeedLabel.configure(image = img)
self.SpeedLabel.image = img
if(frame[1].startswith("koniec_zakazu")):
imgpath = "GUI\koniec_zakazu.png"
img = PIL.Image.open(imgpath)
img = PIL.ImageTk.PhotoImage(img)
self.SpeedLabel.configure(image = img)
self.SpeedLabel.image = img
if(frame[1].startswith("zakaz_")):
imgpath = "GUI\\"+frame[1]+".png"
img = PIL.Image.open(imgpath)
img = PIL.ImageTk.PhotoImage(img)
self.ZakazLabel.configure(image = img)
self.ZakazLabel.image = img
if(frame[1].startswith("nakaz_")):
imgpath = "GUI\\"+frame[1]+".png"
img = PIL.Image.open(imgpath)
img = PIL.ImageTk.PhotoImage(img)
self.NakazLabel.configure(image = img)
self.NakazLabel.image = img
if(frame[1].startswith("stop") or frame[1] == "inne_niebezpieczenstwo"):
imgpath = "GUI\stop.png"
img = PIL.Image.open(imgpath)
img = PIL.ImageTk.PhotoImage(img)
self.StopLabel.configure(image = img)
self.StopLabel.image = img
if(frame[1]=="piciong" or frame[1].startswith("stromo") or frame[1].startswith("skret") or frame[1].startswith("przejscie") or frame[1]=="zakrety_zakrety" or frame[1]=="roboty" or frame[1]=="przejazd_kol_z_zaporami" or frame[1]=="dzieci"):
imgpath = "GUI\\"+frame[1]+".png"
img = PIL.Image.open(imgpath)
img = PIL.ImageTk.PhotoImage(img)
self.UwagaLabel.configure(image = img)
self.UwagaLabel.image = img
if(frame[1] == "droga_dla_rowerow" or frame[1] == "droga_ekspresowa" or frame[1] == "rondo" or frame[1] == "zawracanie" ):
imgpath = "GUI\\"+frame[1]+".png"
img = PIL.Image.open(imgpath)
img = PIL.ImageTk.PhotoImage(img)
self.InfoLabel.configure(image = img)
self.InfoLabel.image = img
class MyVideoCapture:
def __init__(self, video_source=0):
# Open the video source
self.vid = cv2.VideoCapture(video_source)
if not self.vid.isOpened():
raise ValueError("Unable to open video source", video_source)
# Get video source width and height
self.width = self.vid.get(cv2.CAP_PROP_FRAME_WIDTH)
self.height = self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT)
# Release the video source when the object is destroyed
def __del__(self):
if self.vid.isOpened():
self.vid.release()
self.window.mainloop()
def get_frame(self):
if self.vid.isOpened():
ret, frame = self.vid.read()
if ret:
# Return a boolean success flag and the current frame converted to BGR
return (ret, cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
else:
return (ret, None)
else:
return (ret, None)
# Create a window and pass it to the Application object
App(tkinter.Tk(), "Tkinter and OpenCV")
|
23,340 | f3d0e1bb4c192f4a35051562027f0c6b72d4d45d | # coding: utf8
from __future__ import unicode_literals
from .templates import TPL_DEP_SVG, TPL_DEP_WORDS, TPL_DEP_ARCS
from .templates import TPL_ENT, TPL_ENTS, TPL_FIGURE, TPL_TITLE, TPL_PAGE
from ..util import minify_html
class DependencyRenderer(object):
"""Render dependency parses as SVGs."""
style = 'dep'
def __init__(self, options={}):
"""Initialise dependency renderer.
options (dict): Visualiser-specific options (compact, word_spacing,
arrow_spacing, arrow_width, arrow_stroke, distance, offset_x,
color, bg, font)
"""
self.compact = options.get('compact', False)
self.word_spacing = options.get('word_spacing', 45)
self.arrow_spacing = options.get('arrow_spacing',
12 if self.compact else 20)
self.arrow_width = options.get('arrow_width',
6 if self.compact else 10)
self.arrow_stroke = options.get('arrow_stroke', 2)
self.distance = options.get('distance', 150 if self.compact else 175)
self.offset_x = options.get('offset_x', 50)
self.color = options.get('color', '#000000')
self.bg = options.get('bg', '#ffffff')
self.font = options.get('font', 'Arial')
def render(self, parsed, page=False, minify=False):
"""Render complete markup.
parsed (list): Dependency parses to render.
page (bool): Render parses wrapped as full HTML page.
minify (bool): Minify HTML markup.
RETURNS (unicode): Rendered SVG or HTML markup.
"""
rendered = [self.render_svg(i, p['words'], p['arcs'])
for i, p in enumerate(parsed)]
if page:
content = ''.join([TPL_FIGURE.format(content=svg)
for svg in rendered])
markup = TPL_PAGE.format(content=content)
else:
markup = ''.join(rendered)
if minify:
return minify_html(markup)
return markup
def render_svg(self, render_id, words, arcs):
"""Render SVG.
render_id (int): Unique ID, typically index of document.
words (list): Individual words and their tags.
arcs (list): Individual arcs and their start, end, direction and label.
RETURNS (unicode): Rendered SVG markup.
"""
self.levels = self.get_levels(arcs)
self.highest_level = len(self.levels)
self.offset_y = self.distance/2*self.highest_level+self.arrow_stroke
self.width = self.offset_x+len(words)*self.distance
self.height = self.offset_y+3*self.word_spacing
self.id = render_id
words = [self.render_word(w['text'], w['tag'], i)
for i, w in enumerate(words)]
arcs = [self.render_arrow(a['label'], a['start'],
a['end'], a['dir'], i)
for i, a in enumerate(arcs)]
content = ''.join(words) + ''.join(arcs)
return TPL_DEP_SVG.format(id=self.id, width=self.width,
height=self.height, color=self.color,
bg=self.bg, font=self.font, content=content)
def render_word(self, text, tag, i):
"""Render individual word.
text (unicode): Word text.
tag (unicode): Part-of-speech tag.
i (int): Unique ID, typically word index.
RETURNS (unicode): Rendered SVG markup.
"""
y = self.offset_y+self.word_spacing
x = self.offset_x+i*self.distance
return TPL_DEP_WORDS.format(text=text, tag=tag, x=x, y=y)
def render_arrow(self, label, start, end, direction, i):
"""Render indivicual arrow.
label (unicode): Dependency label.
start (int): Index of start word.
end (int): Index of end word.
direction (unicode): Arrow direction, 'left' or 'right'.
i (int): Unique ID, typically arrow index.
RETURNS (unicode): Rendered SVG markup.
"""
level = self.levels.index(end-start)+1
x_start = self.offset_x+start*self.distance+self.arrow_spacing
y = self.offset_y
x_end = (self.offset_x+(end-start)*self.distance+start*self.distance
- self.arrow_spacing*(self.highest_level-level)/4)
y_curve = self.offset_y-level*self.distance/2
if self.compact:
y_curve = self.offset_y-level*self.distance/6
if y_curve == 0 and len(self.levels) > 5:
y_curve = -self.distance
arrowhead = self.get_arrowhead(direction, x_start, y, x_end)
arc = self.get_arc(x_start, y, y_curve, x_end)
return TPL_DEP_ARCS.format(id=self.id, i=i, stroke=self.arrow_stroke,
head=arrowhead, label=label, arc=arc)
def get_arc(self, x_start, y, y_curve, x_end):
"""Render individual arc.
x_start (int): X-coordinate of arrow start point.
y (int): Y-coordinate of arrow start and end point.
y_curve (int): Y-corrdinate of Cubic Bézier y_curve point.
x_end (int): X-coordinate of arrow end point.
RETURNS (unicode): Definition of the arc path ('d' attribute).
"""
template = "M{x},{y} C{x},{c} {e},{c} {e},{y}"
if self.compact:
template = "M{x},{y} {x},{c} {e},{c} {e},{y}"
return template.format(x=x_start, y=y, c=y_curve, e=x_end)
def get_arrowhead(self, direction, x, y, end):
"""Render individual arrow head.
direction (unicode): Arrow direction, 'left' or 'right'.
x (int): X-coordinate of arrow start point.
y (int): Y-coordinate of arrow start and end point.
end (int): X-coordinate of arrow end point.
RETURNS (unicode): Definition of the arrow head path ('d' attribute).
"""
if direction is 'left':
pos1, pos2, pos3 = (x, x-self.arrow_width+2, x+self.arrow_width-2)
else:
pos1, pos2, pos3 = (end, end+self.arrow_width-2,
end-self.arrow_width+2)
arrowhead = (pos1, y+2, pos2, y-self.arrow_width, pos3,
y-self.arrow_width)
return "M{},{} L{},{} {},{}".format(*arrowhead)
def get_levels(self, arcs):
"""Calculate available arc height "levels".
Used to calculate arrow heights dynamically and without wasting space.
args (list): Individual arcs and their start, end, direction and label.
RETURNS (list): Arc levels sorted from lowest to highest.
"""
levels = set(map(lambda arc: arc['end'] - arc['start'], arcs))
return sorted(list(levels))
class EntityRenderer(object):
"""Render named entities as HTML."""
style = 'ent'
def __init__(self, options={}):
"""Initialise dependency renderer.
options (dict): Visualiser-specific options (colors, ents)
"""
colors = {'ORG': '#7aecec', 'PRODUCT': '#bfeeb7', 'GPE': '#feca74',
'LOC': '#ff9561', 'PERSON': '#aa9cfc', 'NORP': '#c887fb',
'FACILITY': '#9cc9cc', 'EVENT': '#ffeb80', 'LAW': '#ff8197',
'LANGUAGE': '#ff8197', 'WORK_OF_ART': '#f0d0ff',
'DATE': '#bfe1d9', 'TIME': '#bfe1d9', 'MONEY': '#e4e7d2',
'QUANTITY': '#e4e7d2', 'ORDINAL': '#e4e7d2',
'CARDINAL': '#e4e7d2', 'PERCENT': '#e4e7d2'}
colors.update(options.get('colors', {}))
self.default_color = '#ddd'
self.colors = colors
self.ents = options.get('ents', None)
def render(self, parsed, page=False, minify=False):
"""Render complete markup.
parsed (list): Dependency parses to render.
page (bool): Render parses wrapped as full HTML page.
minify (bool): Minify HTML markup.
RETURNS (unicode): Rendered HTML markup.
"""
rendered = [self.render_ents(p['text'], p['ents'],
p.get('title', None)) for p in parsed]
if page:
docs = ''.join([TPL_FIGURE.format(content=doc)
for doc in rendered])
markup = TPL_PAGE.format(content=docs)
else:
markup = ''.join(rendered)
if minify:
return minify_html(markup)
return markup
def render_ents(self, text, spans, title):
"""Render entities in text.
text (unicode): Original text.
spans (list): Individual entity spans and their start, end and label.
title (unicode or None): Document title set in Doc.user_data['title'].
"""
markup = ''
offset = 0
for span in spans:
label = span['label']
start = span['start']
end = span['end']
entity = text[start:end]
fragments = text[offset:start].split('\n')
for i, fragment in enumerate(fragments):
markup += fragment
if len(fragments) > 1 and i != len(fragments)-1:
markup += '</br>'
if self.ents is None or label.upper() in self.ents:
color = self.colors.get(label.upper(), self.default_color)
markup += TPL_ENT.format(label=label, text=entity, bg=color)
else:
markup += entity
offset = end
markup += text[offset:]
markup = TPL_ENTS.format(content=markup, colors=self.colors)
if title:
markup = TPL_TITLE.format(title=title) + markup
return markup
|
23,341 | af378edc4dabfb4dcae2f24fefcf6258be7cf2d4 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from datetime import datetime
import uuid
import os
import json
def gen_serial():
return str(uuid.uuid4())
def gen_timestamp():
return datetime.now().timestamp()
def mkdir(dir):
if not os.path.exists(dir):
os.makedirs(dir)
return dir
def to_json(dict):
return json.dumps(dict)
|
23,342 | ed295e3ee312e834a234c0e9a0f780747e5fae3f | #Oscar Fabian Nanez Nunez
#Gabriela Suarez Carvajal
import re
import sys
global lexeme
global line
global diccLexema
global diccToken
diccLexema = []
diccToken = []
row = 1
back = [0, 0]
reserved_words = [
'log',
'true',
'false',
'importar',
'for',
'funcion',
'retorno',
'end',
'if',
'while',
'nil',
'else',
'desde',
'todo',
'leer'
]
alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u',
'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P',
'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '{', '}', '#', '[', ']', '(', ')', '<', '>', '=', '.',
'!', '&', '|', '+', '-', '*', '/', '%', '^', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ' ', '"', ',', ':', '\n']
def delta(column, char, state):
global lexeme
global line
if state == 0:
#cadenas no especificas => string
if char not in alphabet:
print(">>> Error lexico(linea:" + str(row) + ",posicion:" + str(column) + ")")
exit(0)
if char == '"':
lexeme = ""
return [8, 0]
# operadores especiales
elif char == '{':
diccLexema.append('{')
diccToken.append('token_llave_izq')
return [0, 0]
elif char == '}':
diccLexema.append('}')
diccToken.append('token_llave_der')
return [0, 0]
elif char == '#':
line = ""
return [0, 0]
elif char == '[':
diccLexema.append('[')
diccToken.append('token_cor_izq')
return [0, 0]
elif char == ']':
diccLexema.append(']')
diccToken.append('token_cor_der')
return [0, 0]
elif char == '(':
diccLexema.append('(')
diccToken.append('token_par_izq')
return [0, 0]
elif char == ')':
diccLexema.append(')')
diccToken.append('token_par_der')
return [0, 0]
elif char == '.':
diccLexema.append('.')
diccToken.append('token_point')
return [0, 0]
elif char == '+':
diccLexema.append('+')
diccToken.append('token_mas')
return [0, 0]
elif char == '-':
diccLexema.append('-')
diccToken.append('token_menos')
return [0, 0]
elif char == '*':
diccLexema.append('*')
diccToken.append('token_mul')
return [0, 0]
elif char == '/':
diccLexema.append('/')
diccToken.append('token_div')
return [0, 0]
elif char == '%':
diccLexema.append('%')
diccToken.append('token_mod')
return [0, 0]
elif char == '^':
diccLexema.append('^')
diccToken.append('token_pot')
return [0, 0]
elif char == ',':
diccLexema.append(',')
diccToken.append('token_coma')
return [0, 0]
elif char == ':':
diccLexema.append(':')
diccToken.append('token_dosp')
return [0, 0]
elif char == '>':
return [1, 0]
elif char == '<':
return [2, 0]
elif char == '=':
return [3, 0]
elif char == '!':
return [4, 0]
elif char == '&':
return [5, 0]
elif char == '|':
return [6, 0]
elif char == 'i':
return [7, 0]
#cadenas no especificas => id
#palabras reservadas
elif re.match(r'[a-z]', char) or re.match(r'[A-Z]', char):
lexeme=char
return [9, 0]
#cadenas no especificas => int, float
elif re.match(r'[0-9]', char):
lexeme = char
return [10, 0]
else:
return [0, 0]
if state == 1:
if char == '=':
diccLexema.append('>=')
diccToken.append('token_mayor_igual')
return [0, 0]
else:
diccLexema.append('>')
diccToken.append('token_mayor')
state = 0
return [0, 1]
if state == 2:
if char == '=':
diccLexema.append('<=')
diccToken.append('token_menor_igual')
return [0,0]
else:
diccLexema.append('<')
diccToken.append('token_menor')
return [0,1]
if state == 3:
if char == '=':
diccLexema.append('==')
diccToken.append('token_igual_num')
return [0, 0]
else:
diccLexema.append('=')
diccToken.append('token_assign')
return [0, 1]
if state == 4:
if char == '=':
diccLexema.append('!=')
diccToken.append('token_diff_num')
return [0, 0]
else:
diccLexema.append('!')
diccToken.append('token_not')
return [0, 1]
if state == 5:
if char == '&':
diccLexema.append('&&')
diccToken.append('token_and')
return [0, 0]
else:
print(">>> Error lexico(linea:" + str(row) + ",posicion:" + str(column - 1) + ")")
exit(0)
if state == 6:
if char == '|':
diccLexema.append('||')
diccToken.append('token_or')
return [0, 0]
else:
print(">>> Error lexico(linea:" + str(row) + ",posicion:" + str(column - 1) + ")")
exit(0)
if state == 7:
if char == 'n':
diccLexema.append('in')
diccToken.append('in')
return [0, 0]
else:
lexeme = 'i'
return [9, 1]
if state == 8:
if char == '"':
diccLexema.append(lexeme)
diccToken.append('token_string')
return[0, 0]
else:
lexeme = lexeme + char
return [8, 0]
if state == 9:
if re.match(r'[a-z]', char) or re.match(r'[A-Z]', char) or re.match(r'[0-9]', char):
lexeme = lexeme + str(char)
return[9, 0]
else:
if lexeme in reserved_words:
diccLexema.append(lexeme)
diccToken.append(lexeme)
else:
diccLexema.append(lexeme)
diccToken.append('id')
return [0, 1]
if state == 10:
if re.match(r'[0-9]', char):
lexeme = lexeme + char
return [10, 0]
elif char == '.':
lexeme = lexeme + char
return [11, 0]
else:
diccLexema.append(lexeme)
diccToken.append('token_integer')
return [0, 1]
if state == 11:
if re.match(r'[0-9]', char):
lexeme = lexeme + char
return[12, 0]
else:
diccLexema.append(lexeme[0:len(lexeme)-1])
diccToken.append('token_integer')
return[0, 2]
if state == 12:
if re.match(r'[0-9]', char):
lexeme = lexeme + char
return[12, 0]
else:
diccLexema.append(lexeme)
diccToken.append('token_float')
return [0, 1]
line = input()
#lines = sys.stdin.readlines()
#for line in lines:
while line != 'wea':
i = 0
line = line + " "
while i < len(line):
back = delta(i+1, line[i], back[0])
i = i + 1 - back[1]
diccLexema.append("\n")
diccToken.append('eol')
if back[0] == 8:
print("Error lexico(linea:" + str(row) + ",posicion:" + str(i - len(lexeme)) + ")")
exit(0)
line = input()
row += 1
#print(diccLexema)
print(diccToken)
global no_terminal
global derivation_chain
global token
global cont
token = diccToken[0]
def PROGRAMA():
global token
if token == 'funcion' or token== 'id' or token == 'log' or token == 'for' or token == 'while' or token == 'if' or token == 'leer' or token == 'import' or token == 'desde':
FUNCTIONSECT()
MODULE()
else:
print("Error PROGRAMA()")
def FUNCTIONSECT():
global token
if token == 'funcion':
FUNCTION()
FUNCTIONSECT()
elif token == 'id' or token == 'log' or token == 'for' or token == 'while' or token == 'if' or token == 'leer' or token == 'import' or token == 'desde' or token == 'eol':
pass
else:
print("Error FUNCTIONSECT")
def FUNCTION():
global token
if token == 'funcion':
match('funcion')
match('id')
match('token_par_izq')
ARGDEC()
match('token_par_der')
match('eol')
BLOCK()
RETURN()
match("end")
match("funcion")
match('eol')
else:
print("Error FUNCTION")
def ARGDEC():
global token
if token == 'id':
match('id')
ARGDECP()
else:
print("Error ARGDEC")
def ARGDECP():
global token
if token == 'token_coma':
match('token_coma')
ARGDEC()
elif token == 'token_par_der':
pass
else:
print("Error ARGDECP")
def RETURN():
global token
if token == 'retorno':
match('retorno')
match('token_par_izq')
SENTENCE()
match('token_par_der')
match('eol')
elif token == 'end':
pass
else:
print("Error RETURN")
def MODULE():
global token
if token == 'import' or token == 'desde':
IMPORT()
MODULE()
elif token == 'id' or token == 'log' or token == 'for' or token == 'while' or token == 'token_par_izq' or token == 'if' or token == 'leer':
BLOCK()
MODULE()
elif token == 'eol':
match('eol')
MODULE()
else:
print("Error MODULE")
def IMPORT():
global token
if token=="import":
match('import')
ID()
elif token == 'desde':
match('desde')
match('id')
match('import')
match('id')
else:
print("Error IMPORT")
def ID():
global token
if token == 'token_point':
match('token_point')
ID()
elif token == 'id':
match('id')
else:
print("Error ID")
def BLOCK():
global token
if token == 'id':
match('id')
ASSICALL()
elif token == ('log'):
PRINT()
elif token == ('leer'):
READ()
elif token == 'for':
BUCLE()
elif token == 'if':
IF()
elif token == 'while':
WHILE()
elif token == 'token_cor_der' or token == 'token_llave_der' or token == 'token_par_izq' or token == 'retorno' or token == 'eol':
pass
else:
print("Error BLOCK")
def ASSICALL():
global token
if token == 'token_assign':
ASSIGN()
elif token == 'token_par_izq':
CALLFUNC()
else:
print("Error ASSICALL")
def SENTENCE():
global token
if token == 'token_not' or token == 'token_par_der' or token == 'token_integer' or token == 'token_float' or token == 'true' or token == 'false':
BB()
elif token == 'id':
if diccToken[cont+1] == 'token_par_izq':
match('id')
CALLFUNC()
elif diccToken[cont+1] == 'token_not' or token == 'token_par_izq' or token == 'token_integer' or token == 'token_float' or token == 'id' or token == 'true' or token == 'false':
print("i was here")
BB()
elif token == 'token_coma' or token == 'token_mas' or token == 'token_par_der' or token == 'token_cor_der' or token == 'token_llave_der':
match('id')
elif token == 'token_string':
match('token_string')
elif token == 'token_cor_izq':
ARRAY()
elif token == 'token_llave_izq':
STRUC()
else:
print("Error SENTENCE")
def CALLFUNC():
global token
if token == 'token_par_izq':
match('token_par_izq')
SENTENCE()
CALLFUNCP()
match('token_par_der')
else:
print("Error CALLFUNC")
def CALLFUNCP():
global token
if token == 'token_coma':
match('token_coma')
SENTENCE()
elif token == 'token_par_der':
pass
else:
print("Error CALLFUNCP")
def ASSIGN():
global token
if token == 'token_assign':
match('token_assign')
SENTENCE()
match('eol')
else:
print("Error ASSIGN")
def PRINT():
global token
if token == 'log':
match('log')
match('token_par_izq')
OUTPUT()
match('token_par_der')
match('eol')
else:
print("Error PRINT")
def OUTPUT():
global token
if token == 'id' or token == 'token_string' or token == 'token_not' or token == 'token_par_izq' or token == 'token_integer' or token == 'token_float' or token == 'true' or token == 'false' or token == 'token_llave_izq':
SENTENCE()
OUTPUTP()
else:
print("Error OUTPUT")
def OUTPUTP():
global token
if token == 'token_mas':
match('token_mas')
OUTPUT()
elif token == 'token_par_der':
pass
else:
print("Error OUTPUTP")
def READ():
global token
if token == 'leer':
match('leer')
match('token_par_izq')
INPUT()
match('token_par_der')
else:
print("Error READ")
def INPUT():
global token
if token == 'token_integer':
match('token_integer')
elif token == 'token_float':
match('token_float')
elif token == 'token_string':
match('token_string')
else:
print("Error INPUT")
def ARRAY():
global token
if token == 'token_cor_izq':
match('token_cor_izq')
ARRELE()
match('token_cor_der')
else:
print("Error ARRAY")
def ARRELE():
global token
if token == 'id' or token == 'token_string' or token == 'token_not' or token == 'token_par_izq' or token == 'token_integer' or token == 'token_float' or token == 'true' or token == 'false' or token == 'token_llave_izq':
SENTENCE()
ARRELEP()
else:
print("Error ARRELE")
def ARRELEP():
global token
if token == 'token_coma':
match('token_coma')
ARRELE()
elif token == 'token_cor_der':
pass
else:
print("Error ARRELEP")
def STRUC():
global token
if token == 'token_llave_izq':
match('token_llave_izq')
STRUCELE()
match('token_llave_der')
else:
print("Error STRUC")
def STRUCELE():
global token
if token == 'id':
match('id')
match('token_dosp')
SENTENCE()
STRUCELEP()
else:
print("Error STRUCELE")
def STRUCELEP():
global token
if token == 'token_coma':
match('token_coma')
STRUCELE()
elif token == 'token_llave_der':
pass
else:
print("Error STRUCELEP")
def BB():
global token
if token == 'token_not' or token == 'token_par_izq' or token == 'token_integer' or token == 'token_float' or token == 'id' or token == 'true' or token == 'false':
EBAND()
BBP()
else:
print("Error BB")
def BBP():
global token
if token == 'or':
token = match('or')
BB()
elif token == 'token_par_der' or token == 'token_coma' or 'token_mas' or 'token_llave_der' or 'token_cor_der':
pass
else:
print("error BBP")
def EBAND():
global token
if token == 'token_not' or token == 'token_par_izq' or token == 'token_integer' or token == 'token_float' or token == 'id' or token == 'true' or token == 'false':
EB()
EBANDP()
else:
print("error EBAND")
def EBANDP():
global token
if token == 'token_and':
match('token_and')
EBANDP2()
elif token == 'token_par_der' or token == 'token_or' or token == 'token_coma' or 'token_mas' or 'token_par_izq' or token == 'retorno':
pass
else:
print("Error EBANDP")
def EBANDP2():
global token
if token == 'token_par_izq':
match('token_par_izq')
EBAND()
match('token_par_der')
elif token == 'token_not' or token == 'token_par_izq' or token == 'token_integer' or token == 'token_float' or token == 'id' or token == 'true' or token == 'false':
EBAND()
else:
print("error token EBANDP2")
def EB():
global token
if token == 'token_par_izq' or token == 'token_integer' or token == 'token_float' or token == 'id' or token == 'true' or token == 'false':
EBCOM()
EBP()
elif token == 'token_not':
match('token_not')
EBP2()
else:
print("Error EB")
def EBP():
if token == 'token_igual_num' or token == 'token_diff_num':
OBIGU()
EBP2()
elif token == 'token_par_der' or token == 'token_and' or token == 'token_or' or token == 'token_coma' or token == 'token_mas' or token == 'token_par_der' or token == 'token_cor_der' or token == 'token_llave_der' or token == 'retorno' or token == 'eol':
pass
else:
print("Error EBP")
def EBP2():
global token
if token == 'token_not' or token == 'token_par_izq' or token == 'token_integer' or token == 'token_float' or token == 'id' or token == 'true' or token == 'false':
EB()
elif token == 'token_par_izq':
match('token_par_izq')
EB()
match('token_par_der')
else:
print("error EBP2")
def OBIGU():
global token
if token == 'token_igual_num':
match('token_igual_num')
elif token == 'token_diff_num':
match('token_diff_num')
else:
print("Error OBIGU")
def EBCOM():
global token
if token == 'token_par_izq' or token == 'token_integer' or token == 'token_float' or token == 'id':
EARI()
EBCOMP()
elif token == 'true' or token == 'false':
BOOL()
else:
print("Error EBCOM")
def EBCOMP():
global token
if token == 'token_menor' or token == 'token_mayor' or token == 'token_menor_igual' or token == 'token_mayor_igual':
OBCOM()
EARI()
elif token == 'token_igual_num' or token == 'token_diff_num' or token == 'token_par_der' or token == 'token_and' or token == 'token_or ' or token == 'token_coma' or token == 'token_mas' or token == 'token_cor_der' or token == 'token_llave_der' or token == 'else' or token == 'token_par_izq' or token == 'retorno' or token == 'eol':
pass
else:
print("Error OBCOMP")
def BOOL():
global token
if token == 'true':
match('true')
elif token == 'false':
match('false')
else:
print("Error BOOL")
def OBCOM():
if token == 'token_menor':
match('token_menor')
elif token == 'token_mayor':
match('token_mayor')
elif token == 'token_menor_igual':
match('token_menor_igual')
elif token == 'token_mayor_igual':
match('token_mayor_igual')
else:
print("Error OBCOM")
def EARI():
global token
if token == 'token_par_izq':
match('token_par_izq')
EMUL()
OPSUM()
EARI()
match('token_par_der')
if token == 'token_par_izq' or token == 'token_integer' or token == 'token_float' or token == 'id':
EMUL()
EARIP()
else:
print("Error EARI")
def EARIP():
global token
if token == 'token_mas' or token == 'token_menos':
OPSUM()
EARI()
elif token == 'token_par_der' or token == 'token_menor' or token == 'token_mayor' or token == 'token_mayor_igual' or token == 'token_menor_igual' or token == 'token_igual_num' or token == 'token_diff_num' or token == 'token_and' or token == 'token_or' or token == 'token_coma' or token == 'token_mas ' or token == 'token_cor_der' or token == 'token_llave_der' or token == 'eol':
pass
else:
print('Error EARIP')
def OPSUM():
global token
if token == 'token_mas':
match('token_mas')
elif token == 'token_menos':
match('token_menos')
else:
print('Error OPSUM')
def EMUL():
global token
if token == 'token_par_izq':
match('token_par_izq')
E()
OPMUL()
EMUL()
match('token_par_der')
elif token == 'token_integer' or token == 'token_float' or token == 'id':
E()
EMULP()
else:
print("Error EMUL")
def EMULP():
global token
if token == 'token_div' or token == 'token_mul' or token == 'token_mod':
OPMUL()
EMUL()
elif token == 'token_par_der' or token == 'token_mas' or token == 'token_menos' or token == 'token_menor' or token == 'token_mayor' or token == 'token_menor_igual' or token == 'token_mayor_igual' or token == 'token_igual_num' or token == 'token_diff_num' or token == 'token_and' or token == 'token_or' or token == 'token_coma' or token == 'token_cor_der' or token == 'token_llave_der' or token == 'token_else' or token == 'token_par_izq' or token == 'retorno' or token == 'eol':
pass
else:
print("Error EMULP")
def OPMUL():
global token
if token == 'token_div':
match('token_div')
elif token == 'token_mul':
match('token_mul')
elif token == 'token_mod':
match('token_mod')
else:
print('Error OPMUL')
def E():
global token
if token == 'token_integer':
match('token_integer')
elif token == 'token_float':
match('token_float')
elif token == 'id':
match('id')
EP()
else:
print('Error E')
def EP():
global token
if token == 'token_mul' or token == 'token_div' or token == 'token_mod' or token == 'token_par_der' or token == 'token_menos' or token == 'token_mas' or token == 'token_menor' or token == 'token_mayor' or token == 'token_mayor_igual' or token == 'token_menor_igual' or token == 'token_igual_num' or token == 'token_diff_num' or token == 'token_and' or token == 'token_or' or token == 'eol' or token == 'token_mas ' or token == 'token_par_izq' or token == 'retorno':
pass
elif token == 'token_cor_izq':
match('token_cor_izq')
EARI()
match('token_cor_der')
elif token == 'token_point':
match('token_point')
match('id')
def BUCLE():
global token
if token == 'for':
match('for')
match('id')
match('in')
BUCLEP()
else:
print("Error BUCLE")
def BUCLEP():
global token
if token == 'id':
match('id')
BUCLEPP()
elif token == 'token_cor_izq':
ARRAY()
BUCLEPP()
else:
print("Error BUCLEP")
def BUCLEPP():
global token
if token == 'token_llave_izq':
match('token_llave_izq')
MODULE()
match('token_llave_der')
elif token == 'eol':
match('eol')
BLOCK()
def IF():
global token
if token == 'if':
match('if')
match('token_par_izq')
BB()
match('token_par_der')
IFP()
ELSE()
else:
print("Error IF")
def IFP():
global token
if token == 'token_llave_izq':
match('token_llave_izq')
MODULE()
match('token_llave_der')
elif token == 'eol':
match('eol')
BLOCK()
else:
print("ERRROR IFP")
def ELSE():
global token
if token == 'else':
match('else')
ELSEP()
elif token == 'token_llave_der' or token == 'token_par_izq' or token == 'retorno':
pass
else:
print("Error ELSE")
def ELSEP():
global token
if token =='if':
IF()
elif token == 'token_llave_der' or token == 'token_par_izq' or token == 'retorno':
pass
else:
print("Error ELSEP")
def WHILE():
global token
if token == 'while':
match('while')
match('token_par_izq')
BB()
match('token_par_der')
WHILEP()
else:
print("error WHILE")
def WHILEP():
global token
if token == 'eol':
match('eol')
BLOCK()
elif token == 'token_llave_izq':
match('token_llave_izq')
MODULE()
match('token_llave_der')
else:
print("error WHILE")
cont=0
def match(espectedToken):
global token
global cont
print("match", espectedToken, token)
if(token == espectedToken):
cont+=1
else:
errorSintaxis(espectedToken)
if cont == len(diccToken):
print("El analisis sintactico ha finalizado correctamente.")
exit(0)
else:
token = diccToken[cont]
print('\n token:'+token)
def errorSintaxis(espectedToken):
a = 1
PROGRAMA() |
23,343 | 281821ad923f402e9d42287c6087f6f1bc2665bf | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------------
# REGIOPROJEKTCHECK
# install_packages.py
#
# Description:
# PROJECT URL: http://www.regioprojektcheck.de
#
# Author:
# ILS gGmbH
#
# LICENSE: The MIT License (MIT) Copyright (c) 2014 RPC Consortium
# ---------------------------------------------------------------------------
try:
from nsis import log, messagebox
except:
def log(x): print(x)
messagebox = log
import os, sys
import subprocess
from collections import OrderedDict
import _winreg
min_requirement = 10.3
def get_python_path():
try:
esri_reg_path = r'SOFTWARE\WOW6432Node\ESRI'
arcgis_key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,
os.path.join(esri_reg_path, 'ArcGIS'),
0)
version = _winreg.QueryValueEx(arcgis_key, 'RealVersion')[0][:4]
if float(version) < min_requirement:
raise Exception('AddIn unterstützt ArcGIS ab Version {}'
.format(min_requirement))
desktop_reg_path = os.path.join(esri_reg_path,
'Desktop{v}'.format(v=version))
desktop_key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,
desktop_reg_path,
0)
desktop_dir = _winreg.QueryValueEx(desktop_key, 'InstallDir')[0]
python_reg_path = os.path.join(esri_reg_path,
'Python{v}'.format(v=version))
python_key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,
python_reg_path,
0)
python_dir = _winreg.QueryValueEx(python_key, 'PythonDir')[0]
# is desktop installation 64-Bit?
is_64b = os.path.exists(os.path.join(desktop_dir, "bin64"))
bitstr = 'x64' if is_64b else ''
possible_pathes = []
possible_pathes.append(os.path.join(python_dir, 'ArcGIS{v}'.format(v=version)))
possible_pathes.append(os.path.join(python_dir, 'ArcGISx64{v}'.format(v=version)))
python_pathes = []
for path in possible_pathes:
if os.path.exists(path):
python_pathes.append(path)
return python_pathes
except WindowsError:
log('Keine ArcGIS-Pythoninstallation gefunden.')
return None
except Exception as e:
log(e)
def install_packages(python_path):
log("\n"+ "Verwendeter Python-Pfad: " + python_path + "\n")
log(sys.version)
log(sys.platform)
platform = 'win32'
if "ArcGISx64" in python_path:
platform = 'win_amd64'
#Creating list with missing packages
used_packages = OrderedDict()
used_packages['appdirs']='appdirs-1.4.3-py2.py3-none-any.whl'
used_packages['six']='six-1.10.0-py2.py3-none-any.whl'
used_packages['pyparsing']='pyparsing-2.2.0-py2.py3-none-any.whl'
used_packages['packaging']='packaging-16.8-py2.py3-none-any.whl'
used_packages['setuptools']='setuptools-34.3.3-py2.py3-none-any.whl'
used_packages['functools32']='functools32-3.2.3.post2-py27-none-any.whl'
used_packages['numpy'] = 'numpy-1.12.1+mkl-cp27-cp27m-{}.whl'.format(platform)
used_packages['cycler'] = 'cycler-0.10.0-py2.py3-none-any.whl'
used_packages['dateutil']='python_dateutil-2.6.0-py2.py3-none-any.whl'
used_packages['pytz']='pytz-2017.2-py2.py3-none-any.whl'
used_packages['matplotlib']='matplotlib-2.0.0-cp27-cp27m-{}.whl'.format(platform)
used_packages['pyodbc']='pyodbc-4.0.16-cp27-cp27m-{}.whl'.format(platform)
used_packages['jdcal'] = 'jdcal-1.3-py2.py3-none-any.whl'
used_packages['et-xmlfile'] = 'et_xmlfile-1.0.1-py2.py3-none-any.whl'
used_packages['openpyxl'] = 'openpyxl-2.4.5-py2.py3-none-any.whl'
used_packages['polyline'] = 'polyline-1.3.2-py2.py3-none-any.whl'
used_packages['xlrd'] = 'xlrd-1.0.0-py2-none-any.whl'
used_packages['xlsxwriter'] = 'XlsxWriter-0.9.6-py2.py3-none-any.whl'
used_packages['py']='py-1.4.33-py2.py3-none-any.whl'
used_packages['colorama']='colorama-0.3.7-py2.py3-none-any.whl'
used_packages['pytest']='pytest-3.0.7-py2.py3-none-any.whl'
used_packages['imagesize']='imagesize-0.7.1-py2.py3-none-any.whl'
used_packages['pygments']='Pygments-2.2.0-py2.py3-none-any.whl'
used_packages['snowballstemmer']='snowballstemmer-1.2.1-py2.py3-none-any.whl'
used_packages['alabaster']='alabaster-0.7.10-py2.py3-none-any.whl'
used_packages['docutils']='docutils-0.13.1-py2-none-any.whl'
used_packages['requests']='requests-2.13.0-py2.py3-none-any.whl'
used_packages['babel']='Babel-2.4.0-py2-none-any.whl'
used_packages['markupsafe']='MarkupSafe-1.0-cp27-cp27m-{}.whl'.format(platform)
used_packages['jinja2']='Jinja2-2.9.6-py2.py3-none-any.whl'
used_packages['sphinx']='Sphinx-1.5.5-py2.py3-none-any.whl'
used_packages['numpydoc']='numpydoc-0.6.0-py2-none-any.whl'
used_packages['enum']='enum-0.4.6-py2-none-any.whl'
used_packages['beautifulsoup4']='beautifulsoup4-4.6.0-py2-none-any.whl'
used_packages['pypiwin32-219'] = 'pypiwin32-219-cp27-none-{}.whl'.format(platform)
used_packages['pyproj'] = 'pyproj-1.9.5.1-cp27-cp27m-{}.whl'.format(platform)
used_packages['scipy'] = 'scipy-0.19.1-cp27-cp27m-{}.whl'.format(platform)
used_packages['pandas'] = 'pandas-0.19.1-cp27-cp27m-{}.whl'.format(platform)
missing = OrderedDict()
#Installing pip
base_path = os.path.dirname(__file__)
wheel_path = os.path.join(base_path, 'installer', 'wheels')
log('Install or upgrade pip')
process = subprocess.Popen([os.path.join(python_path, 'python'),
os.path.join(wheel_path, "pip-9.0.1-py2.py3-none-any.whl", "pip"),
'install',
'--upgrade',
os.path.join(wheel_path, "pip-9.0.1-py2.py3-none-any.whl")],
shell=True)
ret = process.wait()
if ret:
log('pip nicht richtig installiert')
else:
log('pip installiert')
##Installing packages
log('wheel_path; {}'.format(wheel_path))
for package, filename in used_packages.iteritems():
log('{p}: {f}'.format(p=package, f=filename))
process = subprocess.Popen([os.path.join(python_path, 'Scripts', 'pip.exe'),
'install',
'-f', wheel_path,
os.path.join(wheel_path, filename)],
shell=True)
ret = process.wait()
if ret:
log("Paket " + package + " konnte ggf. nicht installiert werden." + "\n")
# install rpctools package
# ToDo: Finally change from --editable to wheel
log("installiere RPCTools")
process = subprocess.Popen([os.path.join(python_path, 'Scripts', 'pip.exe'),
'install',
'--editable',
base_path],
shell=True)
ret = process.wait()
if ret:
log('rpctools konnte nicht installiert werden')
else:
log("RPCTools installiert")
log('Installation abgeschlossen.')
if __name__ == '__main__':
python_pathes = get_python_path()
if python_pathes:
for path in python_pathes:
install_packages(path)
#install_packages('C:\\Python27-ArcGIS\\ArcGISx6410.4')
|
23,344 | 3af09fe01ed425a8c147eaefbd664bac25bbd9a6 | import sys
def append_tables(append_file, table_file):
a_file = open(append_file,'a')
t_file = open(table_file,'r')
a_file.write('\\\\ \n')
a_file.write('''\\\\ \n''')
name_split = table_file[:-4].split('_')
name_string = ''
for s in name_split:
name_string += ' '+s
name_string +=':'+'\n'
a_file.write(name_string)
a_file.write('''\\\\ \n''')
for line in t_file.readlines():
a_file.write(line)
t_file.close()
a_file.close()
def main():
try:
a_file = sys.argv[1]
files = sys.argv[2:]
except:
print 'input plz'
return
for f in files:
append_tables(a_file,f)
if __name__ == '__main__':
main()
|
23,345 | 47d6d29d79a559e7d440ead19febd7f932bbb74f | #TODO
# outdated outline
'''
Intro to python class notes 8 Week
Day 1-3/4
Make an error
“Hello” Print hello world, Name and save - Print, naming, saving program
Modify to print “Hello “ + name - Input, variable
Modify to ask for name - Input
Modify to ask for first name and last name
Add numbers
Define two variables var1=8 and var2=4 and print results, Save. - Variables
Modify program to add, multiply, divide, subtract - Operators
Change program to ask for number (woops, 84) - Type conversion int()
Counting loop - for x in range
Count from 1 to 10 using for loop
Guessing game - Import random, while loop
Pygame intro notes
Day 4/5-8
Day 1:
Create new directory in project and name it pygame_setup
Teach basic project setup. What is the minimal amount of code we need to make a python game. Why is each line important?
Save the project setup file as a template and then Save As a new project to introduce the coordinate system and basic drawing
coordinate system, X and Y axis, Where is the origin 0,0?
Show how to fill the screen with one color
Show the students how to draw each shape
Basic colors: RGB and how to store them in variables and use them when drawing shapes
Show RGB Color Picker in pycharm. Random Color?
Challenge: Draw a house using some of the basic shapes. Help them with the initial triangle which can confuse students Give them some time to work it out and then slowly do it along with them for those who are having troubles. Color your house and background, add a moon or sun?
Show them how to make a moving sun across the screen? I don't know if its easy to do. If so ill show them otherwise I will leave it out
I think that is enough for Day 1
Day 2: Drawing Program / Bouncing ball? I think I can only do one and will probably do some sort of drawing program. Depending on how interested they are we can go really deep into the drawing program or basic functionality and move on to something else
Day 3/4: Making a game! I have done games like Pong in the past but it takes a bit of code to create and doing more than one collision mechanism usually confused the kids more than it should. Instead of Pong I have moved onto making games where the player is some sort of object at the bottom of the screen and things are falling randomly from the top. The object is to either catch or avoid of the objects.
I will send you the code I come up with for day 1 when it is done.
''' |
23,346 | c025c1a1f186a153c1944679dc0192e5ca2db0c1 | """Constants indicating available Django features."""
from __future__ import unicode_literals
from django.db.models import F, Q
from django.db.models.options import Options
try:
# Django >= 1.7
from django import apps
except ImportError:
# Django < 1.7
apps = None
try:
# Django >= 1.11
from django.db.models import Index
_test_index = Index(fields=['test'])
except ImportError:
Index = None
_test_index = None
_options = Options({})
#: Index names changed in Django 1.5, with the introduction of index_together.
supports_index_together = hasattr(_options, 'index_together')
#: Whether new-style Index classes are available.
#:
#: Django 1.11 introduced formal support for defining explicit indexes not
#: bound to a field definition or as part of
#: ``index_together``/``unique_together``.
#:
#: Type:
#: bool
supports_indexes = hasattr(_options, 'indexes')
#: Whether Q() objects can be directly compared.
#:
#: Django 2.0 introduced this support.
#:
#: Type:
#: bool
supports_q_comparison = hasattr(Q, '__eq__')
#: Whether F() objects can be directly compared.
#:
#: Django 2.0 introduced this support.
#:
#: Type:
#: bool
supports_f_comparison = hasattr(F, '__eq__')
#: Whether new-style Constraint classes are available.
#:
#: Django 2.2 introduced formal support for defining explicit constraints not
#: bound to a field definition.
supports_constraints = hasattr(_options, 'constraints')
#: Whether built-in support for Django Migrations is present.
#:
#: This is available in Django 1.7+.
supports_migrations = apps is not None
def supports_index_feature(attr_name):
"""Return whether Index supports a specific attribute.
Args:
attr_name (unicode):
The name of the attribute.
Returns:
bool:
``True`` if the attribute is supported on this version of Django.
``False`` if it is not.
"""
return supports_indexes and hasattr(_test_index, attr_name)
|
23,347 | 7b153f65bb1b8be3242cf55bab6d04199d4f935b | msg1 = '여러분'
msg2 = '파이팅!'
display_msg = msg1 + ', ' + msg2*3 + '~!'
print(display_msg)
|
23,348 | 2d1153f9fbfea247c58cb91d444f11474f6fff1f | from app.config.parameters import DB_USER, DB_PASS, DB_HOST, DB_PORT, \
DB_NAME
TORTOISE_ORM = {
"connections": {
"default":
f'postgres://{DB_USER}:{DB_PASS}@{DB_HOST}:{DB_PORT}/{DB_NAME}'
},
"apps": {
"models": {
"models": ["app.core.models", "aerich.models"],
"default_connection": "default",
},
},
} |
23,349 | 62912ecbd92c297c75f4b633199121705b8abdc7 | from collections import defaultdict
import csv
import string
import numpy as np
import matplotlib.pyplot as plt
import numpy as np
def generate_shingles(input_path, output_path, w=3):
"""
Function to generate shingles
:param path:
:param w:
:return:
"""
j = 0
shingles_dict = defaultdict(set)
shingles_identifier = dict()
with open(input_path, 'r') as read_obj:
csv_reader = csv.reader(read_obj)
header = next(csv_reader)
# Check file as empty
if header is not None:
# Iterate over each row after the header in the csv
for row in csv_reader:
j += 1
if j % 1000 == 0:
print(j)
# Extract information
ID = str(row[0])
lyric = row[5]
# Pre-process Lyrics
lyric = lyric.translate(str.maketrans('', '', string.punctuation))
lyric = lyric.lower()
# Split lyric into words
lyric_words = lyric.split()
for i in range(len(lyric_words) - w + 1):
shingle = ' '.join(lyric_words[i:i + w])
if shingle not in shingles_identifier:
shingles_identifier[shingle] = len(shingles_identifier)
shingles_dict['ID_' + str(ID)].update([shingles_identifier[shingle]])
print('...writing tsv file...')
with open(output_path, 'w', newline='') as out_file:
tsv_writer = csv.writer(out_file, delimiter='\t')
tsv_writer.writerow(['ID', 'Shingles'])
for key, values in shingles_dict.items():
tsv_writer.writerow([key, list(values)])
def prob(j, r, b):
value = 1-(1-j**r)**b
return value
def b_vs_r(r, t=.97):
"""
Given a value of r, based on the second constraint, what is the minimum value b can take?
:param r:
:return:
"""
min_b = (np.log(1-t))/(np.log(1-.95**r))
return min_b
def view_plot(r, b='default', jacc=.95, t=.97):
if b == 'default':
b_value = b_vs_r(r, t)
print(b_value)
else:
b_value = b
x = np.arange(0, 1, .001)
y = prob(x, r, b_value)
# plot the prob
plt.plot(x, y, 'b-')
plt.xlabel('Jaccard Similarity value')
plt.ylabel('Probability of two pairs with that Jaccard value \n of being provided by the LSH algorithm')
plt.axvline(x=jacc, ymin=0, ymax=1, color='r', linestyle='--')
plt.title('S-Curve')
plt.show()
def main():
input_path = '../../dataset/250K_lyrics_from_MetroLyrics.csv'
output_path = '../data/Shingles.tsv'
w = 3
generate_shingles(input_path, output_path, w=w)
# .9
# java tools.NearDuplicatesDetector lsh_plus_min_hashing 0.95 100 1164 input_data/hash_function_50.tsv input_
# data/Shingles.tsv output_data/Results__90_20_25.tsv
if __name__ == "__main__":
main()
# False Positives: We can still remove them after the LSH model output
# False Negative: We cannot recover these pairs since the LSH has not considered them inside the potential pair, LSH algorithm does not identify them as near duplicates
# Min length of sketches means that r < 300. We then have to also look for an appropriate b value. The constraints are
# the following:
# 1. r*b=n (where n is the total number of hash functions)
# 2. 0.97 < 1-(1-.95**r)**b
# The way in which we can reduce the number of False Positives is by actually computing the Jaccard Similarity after
# the LSH algorithm (LSH gives us the potential matches and we compute the actual match a posteriori and remove those
# that are below the threshold)
# The way in which we reduce the number of False Negatives is by fine-tunning as best as possible the b value before hand
|
23,350 | bd66d1cd228a3e10e9adf0ce70e350696a8d6dd9 | from web3 import Web3
from eth_account import Account
from eth_keys import keys
from eth_utils import keccak
import rlp
def createAccount(name):
account = Account.create(name)
private_key = account.privateKey
address = account.address
print(private_key,address)
return account
url = "https://ropsten.infura.io/v3/58196e7733be426ca1564fb66eaaf575"
w3 = Web3(Web3.HTTPProvider(url))
print(w3.isConnected())
'''
A = Account.create('gordon')
private_key_A = A.privateKey
address_A = A.address
print(private_key_A,A.privateKey.hex())
print(address_A)
B = Account.create('bitch')
private_key_B = B.privateKey
address_B = B.address
print(private_key_B.hex())
print(address_B)
C = Account.create('ggininder')
private_key_C = C.privateKey
address_C = C.address
print(private_key_B.hex())
print(address_C)
balance_A = w3.eth.getBalance(address_A)
balance_B = w3.eth.getBalance(address_B)
balance_C = w3.eth.getBalance(address_C)
print('A:',balance_A)
print('B:',balance_B)
print('C:',balance_C)
'''
# test
private_key_A = bytes.fromhex('5d43a01075ccbaf8207d1c739e1a4f41b81cd68a009bd2fc0374d09b478fe0ff')
address_A = '0x3140c5cC6194dB5De7A005c2465879E3464De54E'
print(w3.eth.getBalance(address_A))
# deploy
import json
with open('./SABI.json', encoding='utf-8-sig') as f:
info_json = json.load(f)
abi = info_json
file = open('./Sbytecode.txt')
bytecode = file.read()
file.close()
Token = w3.eth.contract(abi=abi, bytecode=bytecode)
Token_tx = Token.constructor().buildTransaction({
'from':address_A,
'nonce': w3.eth.getTransactionCount(address_A),
'gas': 1728712,
'gasPrice' : w3.toWei('21','gwei')})
signed = w3.eth.account.signTransaction(Token_tx,private_key_A)
Token_Tx_hash = w3.eth.sendRawTransaction(signed.rawTransaction)
print(Token_Tx_hash.hex())
#contrat_address = ''
#Token_instance = w3.eth.contract(address = Web3.toChecksumAddress(contract_address),abi = abi)
|
23,351 | 6e6e8c7c51be0713d2e5e5e1e19423deee6602e6 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys, os, time
running = True
# display_journalay journal.txt
def display_journal():
# display journal.txt content
if os.path.isfile('journal.txt'):
print "大王,这是您之前的口谕:"
print open("journal.txt").read()
else:
print '''
大王,您之前没留下话呢。
'''
def input_journal():
while running:
line = raw_input("大王,有什么需要小人服务呢?>")
if line == 'q':
print "大王再见,大王慢走。"
break
elif line == '?' or line == 'h' or line == 'H' or line == 'help':
print '''
按q退出
按 ?/h/H/help 显示帮助
按s显示您之前的吩咐
'''
elif line == 's' or line == 'show':
display_journal()
elif line == '真主安拉':
print "恭喜你答对了,奖你24个。。。葫芦娃。"
else:
target = open("journal.txt", 'a')
target.write(time.strftime("%Y-%m-%d %X")+" : "+line+"\n")
target.close()
def main():
display_journal()
input_journal()
if __name__ == '__main__':
main()
|
23,352 | ef43befa540dfebb6d2e32996d066b4fb67fd438 | # Generated by Django 2.2.10 on 2020-06-15 07:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0013_auto_20200615_0717'),
]
operations = [
migrations.AlterField(
model_name='post',
name='category',
field=models.CharField(choices=[('interviews', 'Interviews'), ('arts', 'Arts & Culture'), ('films', 'Film Reviews'), ('books', 'Book Reviews'), ('travel', 'Travel & Experience')], default='interviews', max_length=20),
),
]
|
23,353 | d381494e22148f863a9a1c4b18b0d1a81e11cba9 | n = int(input())
matrix = []
for _ in range(n):
matrix.append([each for each in input()])
possible_hit_coordinates = [[-2, -1], [-2, 1], [-1, -2], [-1, 2], [1, -2], [1, 2], [2, -1], [2, 1]]
biggest_counter_coordinates = dict()
to_remove = None
counter = 0
while True:
biggest_counter_coordinates = {}
for row in range(n):
for col in range(n):
if matrix[row][col] == "K":
for each in possible_hit_coordinates:
if -1 < each[0] + row < n and -1 < each[1] + col < n:
if matrix[each[0] + row][each[1] + col] == "K":
if (row, col) in biggest_counter_coordinates:
biggest_counter_coordinates[(row, col)] += 1
else:
biggest_counter_coordinates[(row, col)] = 1
if biggest_counter_coordinates == {}:
break
else:
biggest_counter_coordinates = dict(sorted(biggest_counter_coordinates.items(), key=lambda x: -x[1]))
for k, v in biggest_counter_coordinates:
to_remove = (k, v)
del biggest_counter_coordinates[to_remove]
if matrix[k][v] == "K":
matrix[k][v] = "0"
counter += 1
break
print(counter)
|
23,354 | 0cd61ffed9fb9ff087a9e9f8d7ef26d69ca5213a | #-------------------------------------------------------------------------------
# Name: Rock,Paper,Scissors
# Purpose:
#
# Author: Vlad
#
# Created: 04/03/2016
# Copyright: (c) Vlad 2016
# Licence: <your licence>
#-------------------------------------------------------------------------------
import random
#User=input("Please type \n 1 for Rock \n 2 for Paper \n 3 for Scissors")
#User=int(User)
Comp=random.randrange(5)
if Comp==0:
print "You must try, or hate yourself for not trying."
elif Comp==1:
print "A friend asks only for your time not your money."
elif Comp==2:
print "Hard work pays off in the future, laziness pays off now."
elif Comp==3:
print "Change can hurt, but it leads a path to something better."
elif Comp==4:
print "If you have something good in your life, don't let it go!"
|
23,355 | 97a944423408cd36005e8bc3085a36512d8046ce | import numpy as np
import matplotlib.pyplot as plt
n = 1000
def y(x,a,b):
return a*x**b
x = np.linspace(0,1,n+1)
a = [3,1,3]
b = [3,0,1]
plt.plot(x,y(x,a[0],b[0]),'k')
plt.plot(x,y(x,a[1],b[1]),'r')
plt.plot(x,y(x,a[2],b[2]),'b')
plt.xlabel('x')
plt.ylabel('y(x) = a*x**b')
plt.title('Loglog plot of y(x) = a*x**b')
plt.loglog()
plt.legend(['a = %s and b = %s' %(a[0],b[0]),'a = %s and b = %s' %(a[1],b[1]),'a = %s and b = %s' %(a[2],b[2])],loc = 4)
plt.grid('on')
plt.savefig('EIVP5_linear_loglog_plot.png')
plt.show()
|
23,356 | 6b048b4ba060a70ed4e99166569e6d3bd4fb9d09 | # Generated by Django 3.0.7 on 2020-07-18 13:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('applications', '0008_auto_20200713_2356'),
]
operations = [
migrations.AlterField(
model_name='application',
name='app_config_path',
field=models.CharField(default='/usr/local/iohub/publisher/applications/conf/', max_length=512),
),
]
|
23,357 | e8cfd58109c0551a7b54cf7e297a364df5389151 | from app.utils.ext import INTEGER, \
TEXT, SMALLINT, Sequence, FLOAT, String, Column, \
ForeignKey, DECIMAL, db, INTEGER
from app.utils.strings import get_unix_time_tuple
"""
doc: http://docs.jinkan.org/docs/flask-sqlalchemy/models.html
"""
__all__ = ['User', 'FileModel', 'FileUserModel', 'LoginRecord', 'TodoModel', 'RssModel']
# 对外展示的
tables = {}
def addModel(model):
tables[model.__name__] = model
return model
class BaseModel():
"""可以拓展功能"""
def save(self, commit=False):
db.session.add(self)
if commit:
db.session.commit()
def delete(self, logic=True):
if logic:
self.is_delete = True
else:
db.session.delete(self)
db.session.commit()
@staticmethod
def query_all(Model):
items = db.session.query(Model).all()
return items or []
@addModel
class User(db.Model, BaseModel):
__tablename__ = "bao_user"
id = Column(INTEGER, primary_key=True)
email = Column(String(255), unique=True)
nickname = Column(String(255), nullable=True)
password = Column(String(255))
status = Column(SMALLINT, default=0) # 用户状态
# 用本地的 token ,用来重新获得请求 token 的 token
token = Column(String(64), nullable=True)
@classmethod
def get_user(cls, user_id=None, token=None):
"""
获得用户
:param user_id: 用户的id
:param token: 用户的token
:return: 用户实例, 可能为空
"""
if user_id:
return db.session.query(User).filter_by(id=user_id).first()
elif token:
return db.session.query(User).filter_by(token=token).first()
@addModel
class FileModel(db.Model, BaseModel):
""" 文件映射表 """
__tablename__ = "bao_file"
file_id = Column(INTEGER, Sequence(start=1, increment=1,
name="file_id_sep"), primary_key=True, autoincrement=True) # 主键
file_hash = Column(String(64), nullable=False)
file_name = Column(String(255), nullable=True)
file_type = Column(String(32), nullable=True)
class FileUserModel(db.Model, BaseModel):
""" 文件与用户映射 """
__tablename__ = "bao_file_user"
file_user_id = Column(INTEGER, Sequence(start=1, increment=1,
name="file_user_id_sep"), primary_key=True, autoincrement=True)
user_id = Column(INTEGER, nullable=False)
file_id = Column(INTEGER, nullable=False)
add_time = Column(String(20), nullable=True)
file_user_state = Column(SMALLINT, nullable=True) # 1 创建(未验证) 2 有效 3 失效
def __init__(self, user_id: int, file_id: int, add_time: str=None):
self.user_id = user_id
self.file_id = file_id
self.file_user_state = 1
self.add_time = add_time or get_unix_time_tuple()
@addModel
class LoginRecord(db.Model, BaseModel):
""" 登录记录表 """
__tablename__ = "bao_login_record"
record_id = Column(INTEGER, Sequence(start=1, increment=1,
name="record_id_sep"), primary_key=True, autoincrement=True)
user_id = Column(INTEGER)
login_time = Column(String(20), nullable=True)
log_ip = Column(String(20), nullable=True)
@addModel
class TodoModel(db.Model, BaseModel):
""" Todo list """
__tablename__ = "bao_todo"
todo_id = Column(INTEGER, Sequence(start=1, increment=1,
name="todo_id_sep"), primary_key=True, autoincrement=True)
todo_title = Column(String(255), nullable=True)
add_time = Column(String(20), nullable=True)
bind_user_id = Column(INTEGER, nullable=True)
todo_state = Column(SMALLINT, nullable=True) # 1 创建 2 完成 3 删除
@addModel
class RssModel(db.Model, BaseModel):
""" rss 订阅 """
__tablename__ = "bao_rss"
rss_id = Column(INTEGER, Sequence(start=1, increment=1,
name="rss_id_sep"), primary_key=True, autoincrement=True)
rss_link = Column(String(255), nullable=True, unique=True)
rss_subtitle = Column(String(255), nullable=True)
add_time = Column(String(20), nullable=True)
rss_version = Column(String(10), nullable=True)
rss_title = Column(String(255), nullable=True, comment='订阅的标题')
rss_state = Column(SMALLINT, nullable=True) # 1 创建(未验证) 2 有效 3 失效
def __init__(self, link: str, add_time: str=None):
self.rss_link = link
self.rss_state = 1
self.add_time = add_time or get_unix_time_tuple()
@addModel
class RssUserModel(db.Model, BaseModel):
""" rss 与用户映射 """
__tablename__ = "bao_rss_user"
rss_user_id = Column(INTEGER, Sequence(start=1, increment=1,
name="rss_user_id_sep"), primary_key=True, autoincrement=True)
user_id = Column(INTEGER, nullable=False)
rss_id = Column(INTEGER, nullable=False)
add_time = Column(String(20), nullable=True)
rss_user_state = Column(SMALLINT, nullable=True) # 1 创建(未验证) 2 有效 3 失效
def __init__(self, user_id: int, rss_id: int, add_time: str=None):
self.user_id = user_id
self.rss_id = rss_id
self.rss_user_state = 1
self.add_time = add_time or get_unix_time_tuple()
@addModel
class RssContentModel(db.Model, BaseModel):
__tablename__ = "bao_rss_content"
content_id = Column(INTEGER, Sequence(start=1, increment=1,
name="content_id_sep"), primary_key=True, autoincrement=True)
content_base = Column(String(255), nullable=True)
content_link = Column(String(255), unique=True, nullable=True)
content_title = Column(String(255), nullable=True)
content_description = Column(TEXT, nullable=True)
content_image_cover = Column(String(255), nullable=True)
published_time = Column(String(64), nullable=True)
add_time = Column(String(20), nullable=True)
def __init__(self, link: str, baseurl: str, title: str, description: str, cover_img: str, published_time: str, add_time: str=None):
self.content_link = link
self.content_base = baseurl
self.content_title = title
self.published_time = published_time
self.content_image_cover = cover_img
self.content_description = description
self.add_time = add_time or get_unix_time_tuple()
class RssReadRecordModel(db.Model, BaseModel):
__tablename__ = 'bao_rss_read_record'
read_id = Column(INTEGER, Sequence(start=1, increment=1,
name="read_id_sep"), primary_key=True, autoincrement=True)
read_url_id = Column(INTEGER, nullable=False)
read_user_id = Column(INTEGER)
read_time = Column(String(20), nullable=True)
def __init__(self, url_id: int, user_id: int, read_at: str = None):
self.read_url_id = url_id
self.read_user_id = user_id
self.read_time = read_at or get_unix_time_tuple()
class TaskModel(db.Model, BaseModel):
""" 包含了任务发起者,开始时间, 结束时间 状态等 """
__tablename__ = 'bao_task_record'
task_id = Column(String(125), primary_key=True)
tast_name = Column(String(255))
argsrepr = Column(String(255))
kwargs = Column(String(255))
user_id = Column(INTEGER)
begin_at = Column(String(20))
end_at = Column(String(20))
is_succ = Column(SMALLINT)
|
23,358 | c37ed3e02392a90012e6c561f944aaf3baff3cb1 | from fastapi import FastAPI
from app.db.database import selectQuery
from sklearn.preprocessing import StandardScaler, Normalizer, LabelEncoder
from sklearn.cluster import KMeans
import pandas as pd
from typing import List
from app.models import ClassifierKmeansInDb, ClassifierKmeans
from .generate import generate_statistics, generate_clusters_kmeans, generate_WCSS
app = FastAPI()
@app.get("/")
async def root():
return {"TEAM": "JSJ"}
@app.post("/kmeans/")
def classifier_kmeans(classifier_kmeans_db: ClassifierKmeansInDb):
data, columnsNames = selectQuery(classifier_kmeans_db.query)
classifier_kmeans = ClassifierKmeans(n_clusters=classifier_kmeans_db.n_clusters,
init= classifier_kmeans_db.init,
max_iter = classifier_kmeans_db.max_iter,
n_init = classifier_kmeans_db.n_init,
random_state = classifier_kmeans_db.random_state)
num_variables = len(columnsNames)
dataset = pd.DataFrame(data, columns=columnsNames).reset_index(drop=True)
x_kmeans = dataset.apply(LabelEncoder().fit_transform).values
x_kmeans = StandardScaler().fit_transform(x_kmeans)
kmeans = KMeans(**classifier_kmeans.dict())
y_kmeans = kmeans.fit_predict(x_kmeans)
dataset.insert(0, "cluster", y_kmeans)
dataset.insert(0, "num", [x+1 for x in range(len(dataset))])
statistics, baricentro = generate_statistics(dataset, kmeans, num_variables)
image = None
if len(columnsNames) == 2:
image = generate_clusters_kmeans(x_kmeans, y_kmeans, kmeans, **classifier_kmeans.dict())
codo, wcss = generate_WCSS(x_kmeans, **classifier_kmeans.dict())
return {"classifier_kmeans": classifier_kmeans, 'names':columnsNames, 'results': dataset.values.tolist(),
"statistics":statistics.values.tolist(), 'codo':codo, 'image':image, 'baricentro':baricentro, 'wcss':wcss }
|
23,359 | 256c8ac58a2e8d8de852df7f2e1c4ead277ee2c9 | #!/usr/bin/env pybricks-micropython
from pybricks import ev3brick as brick
from pybricks.hubs import EV3Brick
from pybricks.ev3devices import Motor, TouchSensor, ColorSensor, InfraredSensor, UltrasonicSensor, GyroSensor
from pybricks.parameters import Port, Stop, Direction, Button, Color, SoundFile, ImageFile, Align
from pybricks.tools import wait, StopWatch, DataLog
from pybricks.robotics import DriveBase
import time
brick.sound.beep()
|
23,360 | b87cc4c4109d24048dcb4f648796ffb3fc056cf9 | """
Given a non-empty string s, you may delete at most one character. Judge whether you can make it a palindrome.
Example 1:
Input: "aba"
Output: True
Example 2:
Input: "abca"
Output: True
Explanation: You could delete the character 'c'.
"""
class Solution:
def validPalindrome(self, string: str) -> bool:
i = 0
j = len(string)-1
while i < j:
if string[i] != string[j]:
return self.isPalindrome(string, i+1, j) or self.isPalindrome(string, i, j-1)
i += 1
j -= 1
return True
def isPalindrome(self, string, l, r):
while l < r:
if string[l] != string[r]:
return False
l += 1
r -= 1
return True
|
23,361 | e638bb5fac22c462eb61cf60e41fcb36eaa369b2 | import argparse
from os.path import isfile
from indexing import do_indexing, get_vector, get_text8, DATASET_FILE_PATH
def main(args):
word = args.word[0]
neighbors = args.n + 1
if not isfile(DATASET_FILE_PATH):
get_text8()
# get word vector via word2vec
model = get_vector()
model.wv.init_sims(replace=True)
# indexing via faiss
index = do_indexing(word2vec_model=model)
idx = model.wv.vocab[word].index
D, I = index.search(model.wv.syn0norm[idx].reshape(1, -1), neighbors)
print('{}:{}'.format(idx, model.wv.index2word[idx]))
for _D, _I in zip(D, I):
for n, (d, i) in enumerate(zip(_D.ravel(), _I.ravel())):
if n > 0:
print('{}. {}:{} = {}'.format(n, i, model.wv.index2word[i], d))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-n', type=int, default=10, help='number of neighbor')
parser.add_argument(
'word', type=str, nargs=1, help='a target word to estimate neighbors')
main(parser.parse_args())
|
23,362 | 7c1bdc4363337ff25f218624eaa405261fb38390 | import matplotlib as mpl
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
def read_datafile(file_name):
data = np.loadtxt(file_name, delimiter=',', skiprows=10)
return data
targetPoints = read_datafile('/Users/awr/Desktop/SelfDrivingCarTeamZero/targetPoints.csv')
locationPoints = read_datafile('/Users/awr/Desktop/SelfDrivingCarTeamZero/locationPoints.csv')
x= targetPoints[:,0]
y= targetPoints[:,1]
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.set_title("Gazebo target points path")
ax1.set_xlabel('Gazebo x coords')
ax1.set_ylabel('Gazebo y coords')
ax1.plot(x,y, c='r', label='target Points')
ax2 = fig.add_subplot(111)
ax2.set_title("Gazebo car path")
ax2.set_xlabel('Gazebo x coords')
ax2.set_ylabel('Gazebo y coords')
x1 = locationPoints[:,0]
y1 = locationPoints[:,1]
ax2.plot(x1,y1, c='b', label='car Points')
##ax3 = fig.add_subplot(111)
##ax3.set_title("Gazebo road path")
##ax3.set_xlabel('Gazebo x coords')
##ax3.set_ylabel('Gazebo y coords')
##x2 = roadPoints[:,0]
##y2 = roadsPoints[:,1]
##ax3.plot(x2,y2, c='g', label='road Points')
leg = ax1.legend()
plt.show()
|
23,363 | e2bf93139c1a28e1623b72dcf9c0038ffc870597 | from django.contrib import admin
# Register your models here.
from .models import Post,Tag,AboutMe,favorite,song,images,Profile,PostComment,Music
admin.site.register(Post)
admin.site.register(Tag)
admin.site.register(AboutMe)
admin.site.register(favorite)
admin.site.register(song)
admin.site.register(images)
admin.site.register(Profile)
admin.site.register(PostComment)
admin.site.register(Music) |
23,364 | 0fdb31b2797a317b89b6eb761b8c42ca8e9f59ea | from django.http.response import Http404, HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from ShareRes.models import *
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
# Create your views here.
def sendEmail(requests) :
checked_res_list = requests.POST.getlist('checks')
inputReceiver = requests.POST['inputReceiver']
inputTitle = requests.POST['inputTitle']
inputContent = requests.POST['inputContent']
mail_html = "<html><body>"
mail_html += "<h1> 맛집 공유 </h1>"
mail_html += "<p>"+inputContent+"<br>"
mail_html += "발신자님께서 공유하신 맛집은 다음과 같습니다.</p>"
for checked_res_id in checked_res_list :
restaurant = Restaurant.objects.get(id=checked_res_id)
mail_html += "<h2>" + restaurant.restaurant_name+"</h3>"
mail_html += "<h4>* 관련 링크</h4>"+"<p>"+ restaurant.restaurant_link+"</p><br>"
mail_html += "<h4>* 상세 내용</h4>"+"<p>"+ restaurant.restaurant_content+"</p><br>"
mail_html += "<h4>* 관련 키워드</h4>"+"<p>"+ restaurant.restaurant_keyword+"</p><br>"
mail_html += "<br>"
mail_html += "</body></html>"
print(mail_html)
###이하 계정 관련 설정
return HttpResponseRedirect(reverse('index'))
#return HttpResponse("sendEmail")
|
23,365 | 0cad4aee67b7b33bb1a23382c96ea3c3287af51a | from typing import Dict
from neomodel import UniqueIdProperty, StructuredNode, StringProperty, Relationship, RelationshipTo
from Strix.models.folder import Folder
from Strix.models.tag import Tag, TaggedRel
class Bookmark(StructuredNode):
uid = UniqueIdProperty()
url = StringProperty()
title = StringProperty()
tags = RelationshipTo(Tag, 'TAGGED', model=TaggedRel)
folder = Relationship(Folder, 'STORED_IN')
def get_gql_node(self) -> Dict:
return {"id": self.uid, "url": self.url, "title": self.title} |
23,366 | 2cb66698b6ee22195283dc3f7dfc49e0c1755fd9 | # -*- coding: utf-8 -*-
"""Task08.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1wN4xHrqpix-5QvIdSbYhvU76SjW85AoJ
**Task 08: Completing missing data**
"""
#!pip install rdflib
github_storage = "https://raw.githubusercontent.com/FacultadInformatica-LinkedData/Curso2021-2022/master/Assignment4/course_materials"
from rdflib import Graph, Namespace, Literal, URIRef
from rdflib.namespace import RDF, RDFS
from rdflib.plugins.sparql import prepareQuery
g1 = Graph()
g2 = Graph()
g1.parse(github_storage+"/rdf/data01.rdf", format="xml")
g2.parse(github_storage+"/rdf/data02.rdf", format="xml")
print('Grafo 1')
for s,p,o in g1:
print(s,p,o)
print('Grafo 2')
for s,p,o in g2:
print(s,p,o)
ns = Namespace('http://data.org#')
VCARD = Namespace("http://www.w3.org/2001/vcard-rdf/3.0#")
"""Tarea: lista todos los elementos de la clase Person en el primer grafo (data01.rdf) y completa los campos (given name, family name y email) que puedan faltar con los datos del segundo grafo (data02.rdf). Puedes usar consultas SPARQL o iterar el grafo, o ambas cosas."""
# q2 = prepareQuery('''
# SELECT ?val WHERE {
# ?Person VCARD:Family ?val
# }
# ''',
# initNs = { "VCARD": VCARD}
# )
# # for s,p,o in g2.triples((ns.JohnDoe,VCARD.Family, None)):
# # print(s,p,o)
# for q in g2.query(q2, initBindings={'?Person': ns.JohnDoe}):
# print(q)
# Persons in data01
q1 = prepareQuery('''
SELECT ?Subject WHERE {
?Class rdfs:subClassOf* ?Person.
?Subject rdf:type ?Class
}
''',
initNs = { "rdfs": RDFS, "rdf": RDF}
)
# Query of Given, Family, email
q2 = prepareQuery('''
SELECT ?val WHERE {
?Person ?vcard ?val
}
'''
)
info = ['Given','Family','EMAIL']
# Insert the remaining information
for r in g1.query(q1, initBindings = {'?Person' : ns.Person}):
# print(r[0])
for i in info:
for q in g2.query(q2, initBindings={'?Person': r[0], '?vcard': VCARD[i]}):
g1.add((r[0],VCARD[i],q[0]))
print('Grafo 1')
for s,p,o in g1:
print(s,p,o)
|
23,367 | 12776af03005be8a143fd1363602210a867764ed | import numpy as np
import scipy.integrate as scint
from numpy import genfromtxt
import matplotlib.pyplot as plt
from matplotlib import colors
from scipy.stats import multivariate_normal
L1 = 1.
L2 =1.
Sigma = 0.01
NBINS = 300
fontsz=14
title_font = {'fontname':'Liberation Sans', 'size':'20', 'color':'black', 'weight':'normal'}#,'verticalalignment':'bottom'} # Bottom vertical alignment for more space
axis_font = {'fontname':'Liberation Sans', 'size':'18'}#'21'}
xj, yj = np.mgrid[-L1/2:L1/2:NBINS*1j, -L2/2:L2/2:NBINS*1j]
xi, yi = np.mgrid[-L1/2:L1/2:NBINS*1j, -L2/2:L2/2:NBINS*1j]#np.mgrid[-0.6:0.6:NBINS*1j, -0.6:0.6:NBINS*1j]
domain = np.transpose(np.vstack([np.linspace(-L1/2,L1/2,NBINS),np.linspace(-L2/2,L2/2,NBINS)]))
"""
ref = 0.3*multivariate_normal.pdf(np.dstack((xj,yj)), mean = [-0.2,0.1], cov=[[0.001,0.0],[0.0,0.001]]) \
+0.3*multivariate_normal.pdf(np.dstack((xj,yj)), mean = [0.3,-0.1], cov=[[0.01,0.0],[0.0,0.01]]) \
+0.4*multivariate_normal.pdf(np.dstack((xj,yj)), mean = [0.35,0.3], cov=[[0.001,0.0],[0.0,0.001]])
"""
data=genfromtxt('/home/kt-fitz/data/cpp2/s03_umbrella_p_set03-clean.csv',delimiter=",",dtype=float)
data = np.delete(data,0,0)
tlist = data[0:-1,0]
x2 = data[0:-1,-2]#+(L1/2.)
x1 = data[0:-1,-3]#+(L2/2.)
X = np.stack([x1,x2],axis=1)
samps=genfromtxt('/home/kt-fitz/human-ergodicObj/umbrella_samples.csv',delimiter=",",dtype=float)
Nsamp = np.shape(samps)[1]
phi_approx=samps[2,0]*multivariate_normal.pdf(np.dstack((xi,yi)), mean = [samps[0,0],samps[1,0]], cov=[[Sigma/10,0.0],[0.0,Sigma/10]])
for i in range(1,Nsamp):
phi_approx = phi_approx +samps[2,i]*multivariate_normal.pdf(np.dstack((xi,yi)), mean = [samps[0,i],samps[1,i]], cov=[[Sigma/10,0.0],[0.0,Sigma/10]])
print(np.count_nonzero(samps[2]))
plt.figure()
#plt.plot(tlist,data[0:-1,5])
#plt.plot(tlist,data[0:-1,6])
plt.plot(samps[0],samps[1],'k.')
plt.ylim(-0.5,0.5)
plt.xlim(-0.5,0.5)
plt.figure()
#plt.pcolormesh(xj, yj, ref.reshape(xj.shape))#,norm=colors.Normalize(vmin=0,vmax=10.0))
plt.pcolormesh(xi, yi, phi_approx.reshape(xi.shape))
plt.plot(x1,x2,'ko',markersize=1)
plt.title("Reference Distribution", **title_font)
plt.xlabel ( r"$x$",**axis_font)
plt.ylabel ( r"$y$",**axis_font)
plt.margins(0)
cbar = plt.colorbar()
cbar.ax.tick_params(labelsize=fontsz)
cbar.ax.set_ylabel('Density',fontsize=16)
"""
plt.figure()
plt.pcolormesh(xi, yi, x_approx.reshape(xi.shape))#,norm=colors.Normalize(vmin=0,vmax=10.0))
plt.plot(x1,x2,'ko',markersize=1)
plt.title("Gaussian Approximation of X(t)", **title_font)
plt.xlabel ( r"$x$",**axis_font)
plt.ylabel ( r"$y$",**axis_font)
plt.margins(0)
cbar = plt.colorbar()
cbar.ax.tick_params(labelsize=fontsz)
cbar.ax.set_ylabel('Density',fontsize=16)
"""
plt.figure()
plt.plot(x1,x2)
plt.show()
|
23,368 | 936417471278de5dbcfc294b0a69590b0f99d971 | """Symplectic integrators for simulation of Hamiltonian dynamics."""
from abc import ABC, abstractmethod
from mici.errors import NonReversibleStepError
from mici.solvers import (maximum_norm, solve_fixed_point_direct,
solve_projection_onto_manifold_quasi_newton)
__pdoc__ = {}
class Integrator(ABC):
"""Base class for integrators."""
def __init__(self, system, step_size):
"""
Args:
system (mici.systems.System): Hamiltonian system to integrate the
dynamics of.
step_size (float): Integrator time step.
"""
self.system = system
self.step_size = step_size
@abstractmethod
def step(self, state):
"""Perform a single integrator step from a supplied state.
Args:
state (mici.states.ChainState): System state to perform integrator
step from.
Returns:
new_state (mici.states.ChainState): New object corresponding to
stepped state.
"""
class ExplicitLeapfrogIntegrator(Integrator):
r"""
Leapfrog integrator for Hamiltonian systems with tractable component flows.
The Hamiltonian function is assumed to be expressible as the sum of two
analytically tractable components for which the corresponding Hamiltonian
flows can be exactly simulated. Specifically it is assumed that the
Hamiltonian function \(h\) takes the form
\[ h(q, p) = h_1(q) + h_2(q, p) \]
where \(q\) and \(p\) are the position and momentum variables respectively,
and \(h_1\) and \(h_2\) are Hamiltonian component functions for which the
exact flows can be computed.
`LeapfrogIntegrator` is an alias for `ExplicitLeapfrogIntegrator`.
"""
def __init__(self, system, step_size):
if not hasattr(system, 'h1_flow') or not hasattr(system, 'h2_flow'):
raise ValueError(
'Explicit leapfrog integrator can only be used for systems '
'with explicit `h1_flow` and `h2_flow` Hamiltonian component '
'flow maps. For systems in which only `h1_flow` is available '
'the `ImplicitLeapfrogIntegrator` class may be used instead.')
super().__init__(system, step_size)
def step(self, state):
dt = state.dir * self.step_size
state = state.copy()
self.system.h1_flow(state, 0.5 * dt)
self.system.h2_flow(state, dt)
self.system.h1_flow(state, 0.5 * dt)
return state
LeapfrogIntegrator = ExplicitLeapfrogIntegrator
__pdoc__['LeapfrogIntegrator'] = False
class ImplicitLeapfrogIntegrator(Integrator):
r"""
Implicit leapfrog integrator for Hamiltonian with non-separable component.
The Hamiltonian function \(h\) is assumed to take the form
\[ h(q, p) = h_1(q) + h_2(q, p) \]
where \(q\) and \(p\) are the position and momentum variables respectively,
\(h_1\) is a Hamiltonian component function for which the exact flow can be
computed and \(h_2\) is a non-separable Hamiltonian component function of
the position and momentum variables and for which exact simulation of the
correspond Hamiltonian flow is not possible. A pair of implicit component
updates are used to approximate the flow due to the \(h_2\) Hamiltonian
component, with a fixed-point iteration used to solve the non-linear system
of equations.
"""
def __init__(self, system, step_size, reverse_check_tol=1e-8,
reverse_check_norm=maximum_norm,
fixed_point_solver=solve_fixed_point_direct,
fixed_point_solver_kwargs=None):
"""
Args:
system (mici.systems.System): Hamiltonian system to integrate the
dynamics of.
step_size (float): Integrator time step.
reverse_check_tol (float): Tolerance for check of reversibility of
implicit sub-steps which involve iterative solving of a
non-linear system of equations. The step is assumed to be
reversible if sequentially applying the forward and adjoint
updates to a state returns to a state with a position component
within a distance (defined by the `reverse_check_norm`
argument) of `reverse_check_tol` of the original state position
component. If this condition is not met a
`mici.errors.NonReversibleStepError` exception is raised.
reverse_check_norm (Callable[[array], float]): Norm function
accepting a single one-dimensional array input and returning a
non-negative floating point value defining the distance to use
in the reversibility check. Defaults to
`mici.solvers.maximum_norm`.
fixed_point_solver (Callable[[Callable[array], array], array]):
Function which given a function `func` and initial guess `x0`
iteratively solves the fixed point equation `func(x) = x`
initialising the iteration with `x0` and returning an array
corresponding to the solution if the iteration converges or
raising a `mici.errors.ConvergenceError` otherwise. Defaults to
`mici.solvers.solve_fixed_point_direct`.
fixed_point_solver_kwargs (None or Dict[str, object]): Dictionary
of any keyword arguments to `fixed_point_solver`.
"""
self.system = system
self.step_size = step_size
self.reverse_check_tol = reverse_check_tol
self.reverse_check_norm = maximum_norm
self.fixed_point_solver = fixed_point_solver
if fixed_point_solver_kwargs is None:
fixed_point_solver_kwargs = {}
self.fixed_point_solver_kwargs = fixed_point_solver_kwargs
def _solve_fixed_point(self, fixed_point_func, x_init):
return self.fixed_point_solver(
fixed_point_func, x_init, **self.fixed_point_solver_kwargs)
def _step_a(self, state, dt):
self.system.h1_flow(state, dt)
def _step_b_fwd(self, state, dt):
def fixed_point_func(mom):
state.mom = mom
return mom_init - dt * self.system.dh2_dpos(state)
mom_init = state.mom
state.mom = self._solve_fixed_point(fixed_point_func, mom_init)
def _step_b_adj(self, state, dt):
mom_init = state.mom.copy()
state.mom -= dt * self.system.dh2_dpos(state)
state_back = state.copy()
self._step_b_fwd(state_back, -dt)
rev_diff = self.reverse_check_norm(state_back.mom - mom_init)
if rev_diff > self.reverse_check_tol:
raise NonReversibleStepError(
f'Non-reversible step. Distance between initial and '
f'forward-backward integrated momentums = {rev_diff:.1e}.')
def _step_c_fwd(self, state, dt):
pos_init = state.pos.copy()
state.pos += dt * self.system.dh2_dmom(state)
state_back = state.copy()
self._step_c_adj(state_back, -dt)
rev_diff = self.reverse_check_norm(state_back.pos - pos_init)
if rev_diff > self.reverse_check_tol:
raise NonReversibleStepError(
f'Non-reversible step. Distance between initial and '
f'forward-backward integrated positions = {rev_diff:.1e}.')
def _step_c_adj(self, state, dt):
def fixed_point_func(pos):
state.pos = pos
return pos_init + dt * self.system.dh2_dmom(state)
pos_init = state.pos
state.pos = self._solve_fixed_point(fixed_point_func, pos_init)
def step(self, state):
dt = 0.5 * state.dir * self.step_size
state = state.copy()
self._step_a(state, dt)
self._step_b_fwd(state, dt)
self._step_c_fwd(state, dt)
self._step_c_adj(state, dt)
self._step_b_adj(state, dt)
self._step_a(state, dt)
return state
class ConstrainedLeapfrogIntegrator(Integrator):
r"""
Leapfrog integrator for constrained Hamiltonian systems.
The Hamiltonian function is assumed to be expressible as the sum of two
components for which the corresponding (unconstrained) Hamiltonian flows
can be exactly simulated. Specifically it is assumed that the Hamiltonian
function \(h\) takes the form
\[ h(q, p) = h_1(q) + h_2(q, p) \]
where \(q\) and \(p\) are the position and momentum variables respectively,
and \(h_1\) and \(h_2\) Hamiltonian component functions for which the exact
flows can be computed.
The system is assumed to be additionally subject to a set of holonomic
constraints on the position component of the state i.e. that all valid
states must satisfy
\[ c(q) = 0. \]
for some differentiable and surjective vector constraint function \(c\) and
the set of positions satisfying the constraints implicitly defining a
manifold. There is also a corresponding constraint implied on the momentum
variables which can be derived by differentiating the above with respect to
time and using that under the Hamiltonian dynamics the time derivative of
the position is equal to the negative derivative of the Hamiltonian
function with respect to the momentum
\[ \partial c(q) \nabla_2 h(q, p) = 0. \]
The set of momentum variables satisfying the above for given position
variables is termed the cotangent space of the manifold (at a position),
and the set of (position, momentum) pairs for which the position is on the
constraint manifold and the momentum in the corresponding cotangent space
is termed the cotangent bundle.
The integrator exactly preserves these constraints at all steps, such that
if an initial position momentum pair \((q, p)\) are in the cotangent
bundle, the corresponding pair after calling the `step` method of the
integrator will also be in the cotangent bundle.
"""
def __init__(self, system, step_size, n_inner_step=1,
reverse_check_tol=2e-8, reverse_check_norm=maximum_norm,
projection_solver=solve_projection_onto_manifold_quasi_newton,
projection_solver_kwargs=None):
"""
Args:
system (mici.systems.System): Hamiltonian system to integrate the
dynamics of.
step_size (float): Integrator time step.
n_inner_step (int): Positive integer specifying number of 'inner'
constrained `system.h2_flow` steps to take within each overall
step. As the derivative `system.dh1_dpos` is not evaluated
during the `system.h2_flow` steps, if this derivative is
relatively expensive to compute compared to evaluating
`system.h2_flow` then compared to using `n_inner_step = 1` (the
default) for a given `step_size` it can be more computationally
efficient to use `n_inner_step > 1` in combination within a
larger `step_size`, thus reducing the number of
`system.dh1_dpos` evaluations to simulate forward a given time
while still controlling the effective time step used for the
constrained `system.h2_flow` steps which involve solving a
non-linear system of equations to retract the position
component of the updated state back on to the manifold, with
the iterative solver typically diverging if the time step used
is too large.
reverse_check_tol (float): Tolerance for check of reversibility of
implicit sub-steps which involve iterative solving of a
non-linear system of equations. The step is assumed to be
reversible if sequentially applying the forward and adjoint
updates to a state returns to a state with a position component
within a distance (defined by the `reverse_check_norm`
argument) of `reverse_check_tol` of the original state position
component. If this condition is not met a
`mici.errors.NonReversibleStepError` exception is raised.
reverse_check_norm (Callable[[array], float]): Norm function
accepting a single one-dimensional array input and returning a
non-negative floating point value defining the distance to use
in the reversibility check. Defaults to
`mici.solvers.maximum_norm`.
projection_solver (Callable[
[ChainState, ChainState, float, System], ChainState]):
Function which given two states `state` and `state_prev`,
floating point time step `dt` and a Hamiltonian system object
`system` solves the non-linear system of equations in `λ`
system.constr(
state.pos + dh2_flow_pos_dmom @
system.jacob_constr(state_prev).T @ λ) == 0
where `dh2_flow_pos_dmom = system.dh2_flow_dmom(dt)[0]` is the
derivative of the action of the (linear) `system.h2_flow` map
on the state momentum component with respect to the position
component. This is used to project the state position
component back on to the manifold after an unconstrained
`system.h2_flow` update. Defaults to
`mici.solvers.solve_projection_onto_manifold_quasi_newton`.
projection_solver_kwargs (None or Dict[str, object]): Dictionary of
any keyword arguments to `projection_solver`.
"""
self.system = system
self.step_size = step_size
self.n_inner_step = n_inner_step
self.reverse_check_tol = reverse_check_tol
self.reverse_check_norm = reverse_check_norm
self.projection_solver = projection_solver
if projection_solver_kwargs is None:
projection_solver_kwargs = {}
self.projection_solver_kwargs = projection_solver_kwargs
def _h2_flow_retraction_onto_manifold(self, state, state_prev, dt):
self.system.h2_flow(state, dt)
self.projection_solver(state, state_prev, dt, self.system,
**self.projection_solver_kwargs)
def _project_onto_cotangent_space(self, state):
state.mom = self.system.project_onto_cotangent_space(state.mom, state)
def _step_a(self, state, dt):
self.system.h1_flow(state, dt)
self._project_onto_cotangent_space(state)
def _step_b(self, state, dt):
dt_i = dt / self.n_inner_step
for i in range(self.n_inner_step):
state_prev = state.copy()
self._h2_flow_retraction_onto_manifold(state, state_prev, dt_i)
if i == self.n_inner_step - 1:
# If at last inner step pre-evaluate dh1_dpos before projecting
# state on to cotangent space, with computed value being
# cached. During projection the constraint Jacobian at new
# position will be calculated however if we are going to make a
# h1_flow step immediately after we will evaluate dh1_dpos
# which may involve evaluating the gradient of the log
# determinant of the Gram matrix, during which we will evaluate
# the constraint Jacobian in the forward pass anyway.
# Pre-evaluating here therefore saves one extra Jacobian
# evaluation when the target density includes a Gram matrix log
# determinant term (and will not add any cost if this is not
# the case as dh1_dpos will still be cached and reused).
self.system.dh1_dpos(state)
self._project_onto_cotangent_space(state)
state_back = state.copy()
self._h2_flow_retraction_onto_manifold(state_back, state, -dt_i)
rev_diff = self.reverse_check_norm(state_back.pos - state_prev.pos)
if rev_diff > self.reverse_check_tol:
raise NonReversibleStepError(
f'Non-reversible step. Distance between initial and '
f'forward-backward integrated positions = {rev_diff:.1e}.')
def step(self, state):
dt = state.dir * self.step_size
state = state.copy()
self._step_a(state, 0.5 * dt)
self._step_b(state, dt)
self._step_a(state, 0.5 * dt)
return state
|
23,369 | 084920172b840e50843c566c3114d3a2ab375d43 | __author__ = 'yinjun'
import os
import imp
import time
class SimpleLeetLoader:
def loadDirs(self):
os.chdir(os.path.dirname(os.path.abspath(__file__)))
#print os.path.dirname(os.path.abspath(__file__))
#print os.getcwd()
dirs = os.listdir(os.getcwd())
code = {}
for d in dirs:
#print d
if os.path.isdir(d) and d!='.idea':
sub_dirs = os.listdir(d)
#print sub_dirs
for s in sub_dirs:
path = d+'/'+s
#print path
if os.path.isdir(path):
p = s.index('-')
#print p
if p >= 0:
key = s[0:p]
#print key
code[key] = path
return code
def execute(self, path):
pys = os.listdir(path)
l = len(pys)
if l == 1:
file = path + '/' + pys[0]
os.chdir(path)
execfile(pys[0])
elif l == 0:
print "python file not found in ", path
else:
if "solution.py" in pys and "test.py" in pys:
os.chdir(path)
execfile("test.py")
else:
#print pys
for i in range(l):
print i, pys[i]
no = raw_input("input file number:")
j = int(no)
if j>=0 and j < l:
file = path + '/' + pys[j]
path = os.getcwd() + '/' + path
#execfile(file)
#print path
os.chdir(path)
execfile(pys[j])
else:
print "error python file no "
def run(self):
content = raw_input("input problem number (ctrl + x or 0 to exit):")
#print content
code = self.loadDirs()
if content == '0' or content == "":
exit(1)
elif content in code:
path = code[content]
self.execute(path)
else:
print "path not found ", content
def runWhile(self):
while True:
self.run()
time.sleep(1)
if __name__ == '__main__':
# service.py executed as script
# do something
s = SimpleLeetLoader()
s.runWhile() |
23,370 | 051305344f23a4ee7f42017cbb4a97f320a535fb | import json
import logging
from cryptography.fernet import Fernet
from django.conf import settings
from django.shortcuts import reverse
from django.db import connection
from django.http import HttpResponse, HttpResponseNotFound, HttpResponseServerError, JsonResponse
from django.views.decorators.http import require_POST
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth import get_user_model
from slack_sdk import WebClient
from slack_sdk.errors import SlackApiError
from accounts.models import UserPreferences
from data.decorators import process_in_thread
from rt import api as rt_api
from . import views, models
logger = logging.getLogger(__name__)
# API Methods
def delete_file(id):
client = WebClient(token=settings.SLACK_TOKEN)
response = client.files_delete(file=id)
return response['ok']
def load_channels(archived=False):
"""
Get a list of all the public channels in Slack
:param archived: Boolean - Include archived channels
:returns: Response object (Dictionary)
"""
if not settings.SLACK_TOKEN:
return {'ok': False, 'error': 'config_error'}
client = WebClient(token=settings.SLACK_TOKEN)
try:
response = client.conversations_list(exclude_archived=not archived)
assert response['ok'] is True
channels = []
for channel in response['channels']:
channels.append((channel['id'], channel['name']))
return {'ok': True, 'channels': channels}
except SlackApiError as e:
assert e.response['ok'] is False
return e.response
def channel_info(channel_id):
"""
Retrieves all the information about a channel
:param channel_id: The ID of the channel
:return: Channel details (Dictionary)
"""
if not settings.SLACK_TOKEN:
return None
client = WebClient(token=settings.SLACK_TOKEN)
try:
response = client.conversations_info(channel=channel_id)
assert response['ok'] is True
return response['channel']
except SlackApiError as e:
assert e.response['ok'] is False
return None
def join_channel(channel):
"""
If the app gets the 'not_in_channel' error when accessing a public channel, call this method
:param channel: The channel to join
:returns: Response object (Dictionary)
"""
if not settings.SLACK_TOKEN:
return {'ok': False, 'error': 'config_error'}
client = WebClient(token=settings.SLACK_TOKEN)
try:
response = client.conversations_join(channel=channel)
assert response['ok'] is True
return {'ok': response['ok']}
except SlackApiError as e:
assert e.response['ok'] is False
return e.response
def upload(attachment, filename, title=None, message=None, channels=None):
"""
Upload a new file to Slack
:param attachment: File path to the file
:param filename: Filename with file extension (i.e. example.pdf)
:param title: Title of the file to display in Slack
:param message: The message text introducing the file in the specified ``channels``
:param channels: Comma-separated list of channel names or ids where the file should be posted (i.e. C1234567890)
:returns: Response object (Dictionary)
"""
if not settings.SLACK_TOKEN:
return {'ok': False, 'error': 'config_error'}
client = WebClient(token=settings.SLACK_TOKEN)
client.timeout = 600
try:
if channels:
response = client.files_upload(channels=channels, file=attachment, filename=filename,
initial_comment=message, title=title)
else:
response = client.files_upload(file=attachment, filename=filename, title=title)
assert response['ok'] is True
return {'ok': True, 'file': response['file']}
except SlackApiError as e:
assert e.response['ok'] is False
return e.response
def slack_post(channel, thread=None, text=None, content=None, username=None, icon_url=None, attachment=None):
"""
Post a message on Slack
The `text` parameter is not required when the `content` parameter is provided, however including it is still
highly recommended.
:param channel: The identifier of the Slack conversation to post to
:param thread: The timestamp of another message to post this message as a reply to
:param text: Message text (Formatting: https://api.slack.com/reference/surfaces/formatting)
:param content: List of valid blocks data (https://api.slack.com/block-kit)
:param username: Name displayed by the bot
:param icon_url: The URL to an image / icon to display next to the message (profile picture)
:param attachment: Dictionary with file details - {'name': 'Example File', 'filepath': '/media/slack/example.pdf'}
:returns: Response object (Dictionary)
"""
if not settings.SLACK_TOKEN:
return {'ok': False, 'error': 'config_error'}
client = WebClient(token=settings.SLACK_TOKEN)
if attachment:
filename = attachment['filepath'].split('/')[-1]
return upload(attachment['filepath'], filename, attachment['name'], text, channel)
if content:
try:
if username:
response = client.chat_postMessage(channel=channel, thread_ts=thread, blocks=content, text=text,
username=username, icon_url=icon_url)
else:
response = client.chat_postMessage(channel=channel, thread_ts=thread, blocks=content, text=text)
assert response['ok'] is True
return {'ok': True, 'message': response['message']}
except SlackApiError as e:
assert e.response['ok'] is False
return e.response
elif text:
try:
if username:
response = client.chat_postMessage(channel=channel, thread_ts=thread, text=text, username=username,
icon_url=icon_url)
else:
response = client.chat_postMessage(channel=channel, thread_ts=thread, text=text)
assert response['ok'] is True
return {'ok': True, 'message': response['message']}
except SlackApiError as e:
assert e.response['ok'] is False
return e.response
elif not content and not text:
return {'ok': False, 'error': 'no_text'}
def post_ephemeral(channel, text, user, username=None):
"""
Send an ephemeral message to a user in a channel. This message will only be visible to the target user.
:param channel: The identifier of the Slack conversation to post to
:param text: Message text (Formatting: https://api.slack.com/reference/surfaces/formatting)
:param user: The identifier of the specified user
:param username: Name displayed by the bot
:return: Response object (Dictionary)
"""
if not settings.SLACK_TOKEN:
return {'ok': False, 'error': 'config_error'}
client = WebClient(token=settings.SLACK_TOKEN)
try:
response = client.chat_postEphemeral(channel=channel, text=text, user=user, username=username)
assert response['ok'] is True
return response
except SlackApiError as e:
assert e.response['ok'] is False
return e.response
def message_react(channel, message, reaction):
"""
React to a Slack message
:param channel: The channel the message was posted to
:param message: The timestamp of the message
:param reaction: The name of the emoji to react to the message with
:return: Response object (Dictionary)
"""
if not settings.SLACK_TOKEN:
return {'ok': False, 'error': 'config_error'}
client = WebClient(token=settings.SLACK_TOKEN)
try:
response = client.reactions_add(channel=channel, timestamp=message, name=reaction)
assert response['ok'] is True
return response
except SlackApiError as e:
assert e.response['ok'] is False
return e.response
def message_unreact(channel, message, reaction):
"""
Remove a reaction from a Slack message
:param channel: The channel the message was posted to
:param message: The timestamp of the message
:param reaction: The name of the emoji to remove from the message
:return: Response object (Dictionary)
"""
if not settings.SLACK_TOKEN:
return {'ok': False, 'error': 'config_error'}
client = WebClient(token=settings.SLACK_TOKEN)
try:
response = client.reactions_remove(channel=channel, timestamp=message, name=reaction)
assert response['ok'] is True
return response
except SlackApiError as e:
assert e.response['ok'] is False
return e.response
def retrieve_message(channel, message_id):
"""
Retrieve a single message from Slack
:param channel: The channel the message was posted to
:param message_id: The timestamp of the message
:return: The message details
"""
if not settings.SLACK_TOKEN:
return {'ok': False, 'error': 'config_error'}
client = WebClient(token=settings.SLACK_TOKEN)
try:
response = client.conversations_history(channel=channel, latest=message_id, inclusive=True, limit=1)
assert response['ok'] is True
return response
except SlackApiError as e:
assert e.response['ok'] is False
return e.response
def replace_message(channel, message_id, text=None, content=None):
"""
Replace an existing message in Slack. The message will need to have been published by the bot.
The `text` parameter is not required when the `content` parameter is provided, however including it is still
highly recommended.
:param channel: The identifier of the Slack conversation the message was posted to
:param message_id: The timestamp of the message to be updated
:param text: Message text (Formatting: https://api.slack.com/reference/surfaces/formatting)
:param content: List of valid blocks data (https://api.slack.com/block-kit)
:return: Response object (Dictionary)
"""
if not settings.SLACK_TOKEN:
return {'ok': False, 'error': 'config_error'}
client = WebClient(token=settings.SLACK_TOKEN)
if content or text:
try:
response = client.chat_update(channel=channel, ts=message_id, as_user=True, text=text, blocks=content,
link_names=True)
assert response['ok'] is True
return {'ok': True, 'message': response['message']}
except SlackApiError as e:
assert e.response['ok'] is False
return e.response
else:
return {'ok': False, 'error': 'no_text'}
def message_link(channel, message_id):
"""
Get a permalink for a specific message in Slack.
:param channel: The channel the message was posted in
:param message_id: The timestamp of the message
:return: Permalink URL
"""
if not settings.SLACK_TOKEN:
return None
client = WebClient(token=settings.SLACK_TOKEN)
try:
response = client.chat_getPermalink(channel=channel, message_ts=message_id)
assert response['ok'] is True
return response['permalink']
except SlackApiError as e:
assert e.response['ok'] is False
return None
def user_add(channel, users):
"""
Invite users to join a slack channel. The bot must be a member of the channel.
:param channel: The identifier of the Slack channel to invite the users to
:param users: The identifiers of the specified users (List of up to 1000)
:return: Response object (Dictionary)
"""
if not settings.SLACK_TOKEN:
return {'ok': False, 'error': 'config_error'}
client = WebClient(token=settings.SLACK_TOKEN)
try:
response = client.conversations_invite(channel=channel, users=users)
assert response['ok'] is True
return {'ok': response['ok']}
except SlackApiError as e:
assert e.response['ok'] is False
return e.response
def user_kick(channel, user):
"""
Remove a user from a slack channel. The bot must be a member of the channel.
:param channel: The identifier of the Slack channel to remove users from
:param user: The identifier of the specified user
:return: Response object (Dictionary)
"""
if not settings.SLACK_TOKEN:
return {'ok': False, 'error': 'config_error'}
client = WebClient(token=settings.SLACK_TOKEN)
try:
response = client.conversations_kick(channel=channel, user=user)
assert response['ok'] is True
return {'ok': response['ok']}
except SlackApiError as e:
assert e.response['ok'] is False
return e.response
def user_profile(user_id):
"""
Get basic user profile information
:param user_id: The identifier for the user in Slack (i.e. U123456789)
:return: Slack user info (Dictionary)
"""
if not settings.SLACK_TOKEN:
return {'ok': False, 'error': 'config_error'}
client = WebClient(token=settings.SLACK_TOKEN)
try:
response = client.users_info(user=user_id)
assert response['ok'] is True
return response
except SlackApiError as e:
assert e.response['ok'] is False
return e.response
def lookup_user(email):
"""
Will search for a user in the Slack workspace using their email address
:param email: The email address for the user
:return: The identifier for the user in Slack (`None` if the search returns nothing)
"""
if not settings.SLACK_TOKEN:
return None
client = WebClient(token=settings.SLACK_TOKEN)
try:
response = client.users_lookupByEmail(email=email)
assert response['ok'] is True
return response['user']['id']
except SlackApiError as e:
assert e.response['ok'] is False
return None
def check_presence(user):
"""
Gets user presence information from Slack ("active" or "away")
:param user: The identifier of the specified user
:return: True if user is currently active, False if user is away
"""
if not settings.SLACK_TOKEN:
return None
client = WebClient(token=settings.SLACK_TOKEN)
try:
response = client.users_getPresence(user=user)
assert response['ok'] is True
if response['presence'] == 'active':
return True
else:
return False
except SlackApiError as e:
assert e.response['ok'] is False
return None
def open_modal(trigger_id, blocks):
"""
Opens a modal view (in Slack) in response to user action
:param trigger_id: The trigger id provided by the API during the user's last interaction
:param blocks: Block configuration (https://api.slack.com/block-kit)
:return: View ID if successful; None otherwise
"""
if not settings.SLACK_TOKEN:
return None
client = WebClient(token=settings.SLACK_TOKEN)
try:
response = client.views_open(trigger_id=trigger_id, view=blocks)
assert response['ok'] is True
return response['view']['id']
except SlackApiError as e:
assert e.response['ok'] is False
return None
# Event Handlers
@csrf_exempt
@require_POST
def handle_event(request):
"""
Event endpoint for the Slack API. Slack will send POST requests here whenever certain events have been triggered.
"""
payload = json.loads(request.body)
if payload['type'] == "url_verification":
return JsonResponse({"challenge": payload['challenge']})
elif payload['type'] == "event_callback":
event = payload['event']
if event['type'] == "team_join":
slack_post(event['user']['id'], text="Welcome to LNL!", content=views.welcome_message())
elif event['type'] == "app_home_opened":
load_app_home(event['user'])
elif event['type'] == "channel_created":
if settings.SLACK_AUTO_JOIN:
join_channel(event['channel']['id'])
return HttpResponse()
return HttpResponse("Not implemented")
@process_in_thread
def load_app_home(user_id):
"""
Load the App's Home tab.
:param user_id: The identifier for the user in Slack
:return: Response object (Dictionary)
"""
ticket_ids = []
tickets = []
user = user_profile(user_id)
if user['ok']:
email = user['user']['profile']['email']
ticket_ids = sorted(rt_api.simple_ticket_search(requestor=email, status="__Active__"), reverse=True)
for ticket_id in ticket_ids:
ticket = rt_api.fetch_ticket(ticket_id)
if ticket.get('message'):
continue
tickets.append(ticket)
blocks = views.app_home(tickets)
if not settings.SLACK_TOKEN:
return {'ok': False, 'error': 'config_error'}
client = WebClient(token=settings.SLACK_TOKEN)
try:
response = client.views_publish(user_id=user_id, view={"type": "home", "blocks": blocks})
assert response['ok'] is True
return response
except SlackApiError as e:
assert e.response['ok'] is False
return e.response
# Interaction handlers
@csrf_exempt
@require_POST
def handle_interaction(request):
"""
Interaction endpoint for the Slack API. Slack will send POST requests here when users interact with a shortcut or
interactive component.
"""
payload = json.loads(request.POST['payload'])
interaction_type = payload.get('type', None)
# Handle shortcut
if interaction_type == "shortcut":
callback_id = payload.get('callback_id', None)
if callback_id == "tfed":
blocks = views.tfed_modal()
modal_id = open_modal(payload.get('trigger_id', None), blocks)
if modal_id:
return HttpResponse()
return HttpResponseServerError("Failed to open modal")
if interaction_type == "message_action":
callback_id = payload.get('callback_id', None)
if callback_id == "report":
channel = payload.get('channel', {'id': None})['id']
sender = payload['message'].get('user', None)
if not sender:
sender = payload['message']['username']
ts = payload['message']['ts']
text = payload['message']['text']
message, created = models.SlackMessage.objects.get_or_create(posted_to=channel, posted_by=sender, ts=ts,
content=text)
blocks = views.report_message_modal(message)
modal_id = open_modal(payload.get('trigger_id', None), blocks)
if modal_id:
return HttpResponse()
return HttpResponseServerError("Failed to open modal")
# Handle modal view submission
if interaction_type == "view_submission":
values = payload['view']['state']['values']
callback_id = payload['view'].get('callback_id', None)
# TFed ticket submission
if callback_id == "tfed-modal":
subject = values['subject']['subject-action']['value']
description = values['description']['description-action']['value']
topic = values['rt_topic']['rt_topic-action']['selected_option']['value']
user_id = payload['user']['id']
user = user_profile(user_id)
if user['ok']:
__create_ticket(user, subject, description, topic)
return HttpResponse()
return HttpResponseServerError("Failed to obtain user information")
# Update TFed ticket
elif callback_id == "ticket-update-modal":
ticket_info = payload['view']['blocks'][1]
owner_id = None
if ticket_info['type'] != "divider":
ticket_info = payload['view']['blocks'][2]
owner_id = values['ticket_assignee']['ticket_assignee-action']['selected_user']
ticket_id = ticket_info['block_id'].split("#")[0]
channel = ticket_info['block_id'].split("#")[1]
ts = ticket_info['block_id'].split("#")[2]
status = values['ticket_status']['ticket_status-action']['selected_option']
if status:
status = status['value']
comments = values['ticket_comment']['ticket_comment-action']['value']
checkboxes = values['email_requestor']['email_requestor-action']['selected_options']
notify_requestor = False
if len(checkboxes) > 0:
notify_requestor = True
# Obtain user's RT token
user_id = payload['user']['id']
token = __retrieve_rt_token(user_id)
__update_ticket(ticket_id, status, owner_id, comments, notify_requestor, token, user_id, channel, ts)
return HttpResponse()
elif callback_id == "ticket-comment-modal":
ticket_id = payload['view']['blocks'][0]['block_id']
comments = values[ticket_id]['comment-action']['value']
user_id = payload['user']['id']
token = __retrieve_rt_token(user_id)
__post_ticket_comment(ticket_id, user_id, comments, token)
return HttpResponse()
elif callback_id == "report-modal":
message_id = payload['view']['blocks'][0]['block_id']
comments = values['report-comment']['comment-action']['value']
reporter = payload['user']['id']
__save_report(message_id, reporter, comments)
return HttpResponse()
return HttpResponseNotFound()
# Handle block interaction event
if interaction_type == "block_actions":
action = payload['actions'][0]['action_id']
channel = payload.get('channel', None)
if channel:
channel = channel['id']
message = payload.get('message', None)
view = payload.get('view', None)
# TFed message
if channel in [settings.SLACK_TARGET_TFED, settings.SLACK_TARGET_TFED_DB] and message and not view:
ticket_id = message['blocks'][0]['block_id'].split('~')[0]
blocks = views.ticket_update_modal(ticket_id, channel, message['ts'], action)
# Get current ticket from RT
__refresh_ticket_async(channel, message)
# Check that user has token, if not display a warning
user_id = payload['user']['id']
token = __retrieve_rt_token(user_id)
if not token:
error_message = "Hi there! Before you can update tickets, you'll need to set up access to your RT " \
"account. Visit https://lnl.wpi.edu" + reverse("support:link-account") + \
" to get started."
post_ephemeral(channel, error_message, user_id, 'Request Tracker')
return HttpResponse()
modal_id = open_modal(payload.get('trigger_id', None), blocks)
if modal_id:
return HttpResponse()
return HttpResponseServerError("Failed to open modal")
# Home tab menu options
if action == "home-ticket-update":
ticket_id = payload['actions'][0]['block_id']
option = payload['actions'][0]['selected_option']['value']
if option == 'Comment':
blocks = views.ticket_comment_modal(ticket_id)
modal_id = open_modal(payload.get('trigger_id', None), blocks)
if not modal_id:
return HttpResponseServerError("Failed to open modal")
return HttpResponse()
return HttpResponseNotFound()
@process_in_thread
def __create_ticket(user, subject, description, topic):
"""
Handler for creating a new TFed ticket
:param user: The user that submitted the ticket
:param subject: The ticket's subject line
:param description: The contents of the ticket
:param topic: The Queue in RT to post the ticket to
"""
target = settings.SLACK_TARGET_TFED
if topic == 'Database':
target = settings.SLACK_TARGET_TFED_DB
user_email = user['user']['profile'].get('email', 'lnl-no-reply@wpi.edu')
display_name = user['user']['profile']['real_name']
resp = rt_api.create_ticket(topic, user_email, subject, description + "\n\n- " + display_name)
ticket_id = resp.get('id', None)
if ticket_id:
ticket_info = {
"url": 'https://lnl-rt.wpi.edu/rt/Ticket/Display.html?id=' + ticket_id,
"id": ticket_id,
"subject": subject,
"description": description,
"status": "New",
"assignee": None,
"reporter": user['user']['name']
}
ticket = views.tfed_ticket(ticket_info)
slack_post(target, text=description, content=ticket, username='Request Tracker')
return
error_message = "Whoops! It appears something went wrong while attempting to submit your request. " \
"Please wait a few minutes then try again. If the problem persists, please email " \
"us directly at tfed@wpi.edu."
post_ephemeral(target, error_message, user['user']['id'], username="Request Tracker")
@process_in_thread
def __update_ticket(ticket_id, status, owner_id, comments, notify_requestor, token, user_id, channel, ts):
"""
Handler for updating an existing TFed ticket
:param ticket_id: The ticket number
:param status: The new status to assign to the ticket in RT
:param owner_id: The Slack user ID for the ticket owner (who the ticket will be assigned to)
:param comments: Comments to add to the ticket history
:param notify_requestor: If True, the ticket creator will receive an email with the comments
:param token: The RT auth token for the user that triggered this action
:param user_id: The Slack user ID for the user that triggered this action
:param channel: The identifier of the Slack channel this ticket was posted to
:param ts: The timestamp of the original ticket message in Slack
"""
# Update ticket metadata
owner = user_profile(owner_id)
username = ''
if owner['ok']:
username = owner['user']['profile'].get('email', '').split('@')[0]
resp = rt_api.update_ticket(ticket_id, token, status, username)
if rt_api.permission_error(resp):
error_message = "Sorry, it appears you do not have permission to perform this action."
post_ephemeral(channel, error_message, user_id, 'Request Tracker')
return
# Update ticket in Slack
current_message = retrieve_message(channel, ts)
if current_message.get('error', '') == 'not_in_channel':
join_channel(channel)
current_message = retrieve_message(channel, ts)
resp = refresh_ticket_message(channel, current_message['messages'][0])
if not resp['ok']:
logger.warning("Failed to update ticket in Slack. Please check RT to see if your changes were applied.")
# Post comments / replies, if applicable
if comments:
slack_user = user_profile(user_id)
display_name = slack_user['user']['profile']['real_name']
resp = rt_api.ticket_comment(ticket_id, comments + "\n\n- " + display_name, notify_requestor,
token=token)
if rt_api.permission_error(resp):
error_message = "Sorry, it appears you do not have permission to perform this action."
post_ephemeral(channel, error_message, user_id, 'Request Tracker')
return
profile_photo = slack_user['user']['profile']['image_original']
slack_post(channel, ts, comments, username=display_name, icon_url=profile_photo)
@process_in_thread
def __post_ticket_comment(ticket_id, user_id, comments, token):
"""
Comment on a TFed ticket (background process).
:param ticket_id: The ticket number
:param user_id: The Slack user ID for the user that triggered the action
:param comments: The comments to be added to the ticket
:param token: The RT auth token for the user that triggered the action (if applicable)
"""
user = user_profile(user_id)
display_name = user['user']['profile']['real_name']
rt_api.ticket_comment(ticket_id, comments + "\n\n- " + display_name, True, token=token)
def refresh_ticket_message(channel, message):
"""
Update a TFed ticket message with the latest information
:param channel: The channel the ticket was posted to
:param message: The original message object
:return: Response from Slack API after attempting to update the message
"""
ticket_id = message['blocks'][0]['block_id'].split('~')[0]
ticket_reporter = message['blocks'][0]['block_id'].split('~')[1]
ticket_description = message['blocks'][1]['text']['text']
ticket = rt_api.fetch_ticket(ticket_id)
if ticket.get('message'):
return {"ok": False}
ticket_owner = ticket['Owner']['id']
if ticket_owner == "Nobody":
ticket_owner = None
ticket_info = {
"url": 'https://lnl-rt.wpi.edu/rt/Ticket/Display.html?id=' + ticket_id,
"id": ticket_id,
"subject": ticket.get('Subject'),
"description": ticket_description,
"status": ticket.get('Status').capitalize(),
"assignee": ticket_owner,
"reporter": ticket_reporter
}
new_message = views.tfed_ticket(ticket_info)
return replace_message(channel, message['ts'], ticket_description, new_message)
@process_in_thread
def __refresh_ticket_async(channel, message):
"""
Update a TFed ticket message with the latest information in the background
:param channel: The channel the ticket was posted to
:param message: The original message object
:return: Response from Slack API after attempting to update the message
"""
resp = refresh_ticket_message(channel, message)
if not resp['ok']:
logger.warning("Failed to update ticket in Slack. Please check RT to see if your changes were applied.")
def __retrieve_rt_token(user_id):
"""
Retrieve a user's RT auth token (if it exists)
:param user_id: The Slack user's identifier
:return: Auth token; `None` if it doesn't exist
"""
slack_user = user_profile(user_id)
if slack_user['ok']:
username = slack_user['user']['profile'].get('email', '').split('@')[0]
user = get_user_model().objects.filter(username=username).first()
if user:
prefs = UserPreferences.objects.filter(user=user).first()
if prefs:
if prefs.rt_token:
cipher_suite = Fernet(settings.CRYPTO_KEY)
return cipher_suite.decrypt(prefs.rt_token.encode('utf-8')).decode('utf-8')
return None
@process_in_thread
def __save_report(message_id, reporter, comments):
"""
Create a report when a user reports a problematic Slack message
:param message_id: The primary key value of the corresponding SlackMessage object
:param reporter: Slack user ID for the user that reported the message
:param comments: Optional comments for the report
"""
message = models.SlackMessage.objects.get(pk=message_id)
# Ensure message was posted to public channel. For privacy reasons, we currently do not report private messages.
channel_details = channel_info(message.posted_to)
if channel_details['is_channel'] and not channel_details['is_private']:
report = models.ReportedMessage.objects.create(message=message, comments=comments, reported_by=reporter)
# Send Exec a notification
blocks = views.reported_message_notification(reporter, report)
slack_post(settings.SLACK_TARGET_EXEC, text="You have a new flagged message to review", content=blocks,
username="Admin Console")
# Add red flag to message (to inform sender their message has been reported)
# message_react(message.posted_to, message.ts, 'triangular_flag_on_post')
else:
message.public = False
message.save()
post_ephemeral(message.posted_to, "This feature currently does not support reporting private messages. Please "
"contact a member of the executive board directly.", reporter)
connection.close()
|
23,371 | bbe06a8353f5903acdd32868eca87b77249d2934 | # Faça um programa que leia o sexo de uma pessoa, mas só aceite os valores 'M' ou 'F'. Caso esteja errado, peça a digitação novamente até ter um valor correto.
sex = ''
while sex != 'm' and sex != 'f':
sex = str(input('Digite seu sexo [M/F]: ')).lower()
print('Valor aceito.')
|
23,372 | 0a74188ad5288183552a2089b4a77300afb62a93 | import re
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.remote import webelement
from source.model.room_info import RoomInfo
from source.pages.page import Page
class ReservationPage2(Page):
"""
- author: Saurav Kumar Saha
- created: 2021-03-16
- changed: 2021-07-28
This class holds the information of reservation page 2, such as:
it's url and other page elements.
"""
def __init__(self, driver: webdriver):
"""
This constructor will set all needed attributes.
"""
super().__init__()
self.__driver = driver
self.__set_attributes()
self.__value_target_room_radio = None
# public
def get_url(self) -> str:
"""
This method returns the page url
"""
return self.__page_url
def at(self) -> bool:
"""
This method verifies if the browser is currently at page location
"""
return 'step_active' in self.__get_step_2_div().get_attribute("class")
def get_next_button(self) -> webelement:
"""
This method returns the next button element
"""
return self.__driver.find_element_by_xpath(
self.xpath_next_button
)
def get_xpath_next_button(self) -> str:
"""
This method returns the next button xpath
"""
return self.__xpath_next_button
def get_english_language_label(self) -> webelement:
"""
This method returns the english language label element
"""
return self.__driver.find_element_by_xpath(
self.xpath_english_lang_label
)
def get_xpath_english_lang_label(self) -> str:
"""
This method returns the english lang label xpath
"""
return self.__xpath_english_lang_label
def get_value_target_room_radio(self) -> str:
"""
This method returns the target room radio value
"""
return self.__value_target_room_radio
def set_value_target_room_radio(self, value_target_room_radio: str):
"""
This method will set the target room radio value
"""
self.__value_target_room_radio = value_target_room_radio
def get_target_room_radio(self) -> webelement:
"""
This method returns the target room radio element
"""
return self.__driver.find_element_by_xpath(
f"//input[@value='{self.get_value_target_room_radio()}']"
)
def find_all_available_rooms(self) -> webelement:
"""
This method returns the room selection radio element
"""
available_rooms = list()
room_listing_section = self.__get_room_listing_section()
location_div_list = room_listing_section.find_elements(
By.XPATH,
".//div[contains(@class, 'room_listing_bg')]"
)
for location_div in location_div_list:
house_name = location_div.find_element(
By.XPATH,
".//div[1]/h2[@class='room_data_headline']"
).text
room_list_table = location_div.find_element(
By.XPATH,
".//table[contains(@class, 'room_data_table')]"
)
room_tr_list = room_list_table.find_elements(By.TAG_NAME, "tr")
room_tr_list = room_tr_list[1:]
for room_tr in room_tr_list:
room_type = room_tr.find_element(By.XPATH, ".//td[1]").text
number_of_persons = room_tr.find_element(By.XPATH, ".//td[2]").text
free_at = room_tr.find_element(By.XPATH, ".//td[3]").text
free_at = re.sub(r'^([^\s]*)\s+', r'\1, ', free_at)
price_euro = room_tr.find_element(By.XPATH, ".//td[4]").text
size_square_meter = room_tr.find_element(By.XPATH, ".//td[5]").text
floor = room_tr.find_element(By.XPATH, ".//td[6]").text
selection_radios = room_tr.find_elements(By.XPATH, ".//td[7]/input[@type='radio']")
if len(selection_radios) > 0:
radio_value = selection_radios[0].get_attribute("value")
available_rooms.append(
RoomInfo(
house_name,
room_type,
number_of_persons,
free_at,
price_euro,
size_square_meter,
floor,
radio_value
)
)
return available_rooms
url = property(get_url)
xpath_next_button = property(get_xpath_next_button)
xpath_english_lang_label = property(get_xpath_english_lang_label)
value_target_room_radio = property(get_value_target_room_radio, set_value_target_room_radio)
# private
def __set_attributes(self):
self.__page_url = "https://reservation.house-of-nations.de/hon/whm_showunit.php"
self.__xpath_step_2_div = "/html/body/header/div/div[3]/div[2]"
self.__xpath_next_button = "/html/body/form/section[2]/div/div[2]/input[2]"
self.__xpath_english_lang_label = "/html/body/header/div/div[1]/form/label[2]"
self.__xpath_room_listing_section = "//*[@id='content']"
def __get_step_2_div(self) -> webelement:
return self.__driver.find_element_by_xpath(
self.__xpath_step_2_div
)
def __get_room_listing_section(self) -> webelement:
return self.__driver.find_element_by_xpath(
self.__xpath_room_listing_section
)
|
23,373 | 1430b2b892d206b5fabceda3cf6ef511b50962e3 | def find_the_gap (M, N, R):
operateurs = list(R)
ma = 0
for nombres in listes(0, M, N):
possibles = [False] * 170 # Maximum = (8+9) * 10 / 1 = 170
for perm in permutations(nombres):
gap(possibles, perm, operateurs)
for i in range(1, 171):
if not possibles[i-1]:
if i > ma:
ma = i
break
return ma
def permutations(l):
if l == []:
yield []
else:
for i, v in enumerate(l):
for sub in permutations(l[:i] + l[i+1:]):
yield [v] + sub
def listes(Mi, Ma, N):
""" Liste les listes des nombres possibles entre ]Mi, Ma] """
if N == 0:
yield []
elif Ma - Mi < N:
yield None
else:
# Avec Mi + 1
for reste in listes(Mi + 1, Ma, N - 1):
if reste != None:
yield [Mi + 1] + reste
# Sans Mi + 1
for reste in listes(Mi + 1, Ma, N):
if reste != None:
yield reste
def gap(possibles, nombres, operateurs, operandes=[], s=""):
"""
Teste toutes les combinaisons d'expression en utilisant la syntaxe
polonaise inversée
"""
if len(operandes) == 1:
#if not possibles[operandes[0]-1]:
possibles[operandes[0]-1] = True
#print (operandes[0], "=", s)
def supr_liste(liste, item):
return [ i for i in liste if i != item ]
if len(operandes) >= 2:
op1 = operandes[-2]
op2 = operandes[-1]
def peut_diviser(o1, o2):
return o1 % o2 == 0
def peut_soustraire():
return op1 - op2 >= 0 # Positifs ou positifs + nuls ?
supr_op = lambda o: supr_liste(operateurs, o)
for o in operateurs:
if o == '/':
if peut_diviser(op1, op2):
gap(
possibles, nombres, supr_op('/'),
operandes[:-2] + [op1/op2], s + ' /'
)
#elif peut_diviser(op2, op1):
#gap(
#possibles, nombres, supr_op('/'),
#operandes[:-2] + [op2/op1]
#)
elif o == '−' or o == '-':
if peut_soustraire():
gap(
possibles, nombres, supr_op('-'),
operandes[:-2] + [op1-op2], s + ' -'
)
#else:
#gap(
#possibles, nombres, supr_op('-'),
#operandes[:-2] + [op2-op1]
#)
elif o == '+':
gap(
possibles, nombres, supr_op('+'),
operandes[:-2] + [op1+op2], s + ' +'
)
elif o == '*':
gap(
possibles, nombres, supr_op('*'),
operandes[:-2] + [op1*op2], s + ' *'
)
elif o == '%':
gap(
possibles, nombres, supr_op('%'),
operandes[:-2] + [op1%op2], s + ' %'
)
if nombres != []:
gap(
possibles, nombres[1:], operateurs, operandes + [nombres[0]],
s + ' ' + str(nombres[0])
)
gap(possibles, nombres[1:], operateurs, operandes, s)
print (find_the_gap(5, 2, "+ * −"))
#possibles = [False] * 170 # Maximum = (8+9) * 10 / 1 = 170
#for perm in permutations([1, 3]):
#gap(possibles, perm, ['+', '-', '*'])
#for i in range(1, 171):
#if not possibles[i-1]:
#print (i)
#break |
23,374 | c32926447cf8d335a11a5ff286638cd958fc33af | ###############################################################################
# For copyright and license notices, see __manifest__.py file in root directory
###############################################################################
from odoo import _, fields, models
from odoo.exceptions import UserError
class PosOrder(models.Model):
_inherit = 'pos.order'
def _prepare_account_move_and_lines(self, session=None, move=None):
def _flatten_tax_and_children(taxes, group_done=None):
children = self.env['account.tax']
if group_done is None:
group_done = set()
for tax in taxes.filtered(lambda t: t.amount_type == 'group'):
if tax.id not in group_done:
group_done.add(tax.id)
children |= _flatten_tax_and_children(
tax.children_tax_ids, group_done)
return taxes + children
# Tricky, via the workflow, we only have one id in the ids variable
"""Create a account move line of order grouped by products or not."""
IrProperty = self.env['ir.property']
ResPartner = self.env['res.partner']
if session and not all(session.id == order.session_id.id
for order in self):
raise UserError(_('Selected orders do not have the same session!'))
grouped_data = {}
have_to_group_by = session and session.config_id.group_by or False
get_param = self.env['ir.config_parameter'].sudo().get_param
rounding_method = get_param(
'pos_rounding_method.rounding_method', session and
session.config_id.company_id.tax_calculation_rounding_method)
def add_anglosaxon_lines(grouped_data):
Product = self.env['product.product']
Analytic = self.env['account.analytic.account']
for product_key in list(grouped_data.keys()):
if product_key[0] == 'product':
for line in grouped_data[product_key]:
product = Product.browse(line['product_id'])
# In the SO part, the entries will be inverted by
# function compute_invoice_totals
price_unit = self._get_pos_anglo_saxon_price_unit(
product, line['partner_id'], line['quantity'])
account_analytic = Analytic.browse(
line.get('analytic_account_id'))
res = Product._anglo_saxon_sale_move_lines(
line['name'], product, product.uom_id,
line['quantity'], price_unit,
fiscal_position=order.fiscal_position_id,
account_analytic=account_analytic)
if res:
line1, line2 = res
line1 = Product._convert_prepared_anglosaxon_line(
line1, line['partner_id'])
insert_data('counter_part', {
'name': line1['name'],
'account_id': line1['account_id'],
'credit': line1['credit'] or 0.0,
'debit': line1['debit'] or 0.0,
'partner_id': line1['partner_id']
})
line2 = Product._convert_prepared_anglosaxon_line(
line2, line['partner_id'])
insert_data('counter_part', {
'name': line2['name'],
'account_id': line2['account_id'],
'credit': line2['credit'] or 0.0,
'debit': line2['debit'] or 0.0,
'partner_id': line2['partner_id']
})
for order in self.filtered(lambda o: not o.account_move or
o.state == 'paid'):
current_company = order.sale_journal.company_id
account_def = IrProperty.get(
'property_account_receivable_id', 'res.partner')
order_account = (order.partner_id.property_account_receivable_id.id
or account_def and account_def.id)
partner_id = ResPartner._find_accounting_partner(
order.partner_id).id or False
if move is None:
# Create an entry for the sale
journal_id = self.env['ir.config_parameter'].sudo().get_param(
'pos.closing.journal_id_%s' % current_company.id,
default=order.sale_journal.id)
move = self._create_account_move(
order.session_id.start_at, order.name, int(journal_id),
order.company_id.id)
def insert_data(data_type, values):
# if have_to_group_by:
values.update({
'move_id': move.id,
})
key = self._get_account_move_line_group_data_type_key(
data_type, values, {'rounding_method': rounding_method})
if not key:
return
grouped_data.setdefault(key, [])
if have_to_group_by:
if not grouped_data[key]:
grouped_data[key].append(values)
else:
current_value = grouped_data[key][0]
current_value['quantity'] = current_value.get(
'quantity', 0.0) + values.get('quantity', 0.0)
current_value['credit'] = current_value.get(
'credit', 0.0) + values.get('credit', 0.0)
current_value['debit'] = current_value.get(
'debit', 0.0) + values.get('debit', 0.0)
if 'currency_id' in values:
current_value['amount_currency'] = \
current_value.get('amount_currency', 0.0) + \
values.get('amount_currency', 0.0)
if key[0] == 'tax' and \
rounding_method == 'round_globally':
if current_value['debit'] - \
current_value['credit'] > 0:
current_value['debit'] = \
current_value['debit'] - \
current_value['credit']
current_value['credit'] = 0
else:
current_value['credit'] = \
current_value['credit'] - \
current_value['debit']
current_value['debit'] = 0
else:
grouped_data[key].append(values)
# because of the weird way the pos order is written, we need to
# make sure there is at least one line, because just after the
# 'for' loop there are references to 'line' and 'income_account'
# variables (that are set inside the for loop)
# TOFIX: a deep refactoring of this method (and class!) is needed
# in order to get rid of this stupid hack
assert order.lines, _(
'The POS order must have lines when calling this method')
# Create an move for each order line
cur = order.pricelist_id.currency_id
cur_company = order.company_id.currency_id
amount_cur_company = 0.0
date_order = order.date_order.date() if order.date_order else \
fields.Date.today()
move_lines = []
for line in order.lines:
if cur != cur_company:
amount_subtotal = cur._convert(
line.price_subtotal, cur_company, order.company_id,
date_order)
else:
amount_subtotal = line.price_subtotal
# Search for the income account
if line.product_id.property_account_income_id.id:
income_account = (line.product_id
.property_account_income_id.id)
elif (line.product_id.categ_id.
property_account_income_categ_id.id):
income_account = (line.product_id.categ_id
.property_account_income_categ_id.id)
else:
raise UserError(
_('Please define income '
'account for this product: "%s" (id:%d).')
% (line.product_id.name, line.product_id.id))
name = line.product_id.name
if line.notice:
# add discount reason in move
name = name + ' (' + line.notice + ')'
# Create a move for the line for the order line
# Just like for invoices, a group of taxes must be present on
# this base line
# As well as its children
base_line_tax_ids = _flatten_tax_and_children(
line.tax_ids_after_fiscal_position).filtered(
lambda tax: tax.type_tax_use in ['sale', 'none'])
data = {
'name': name,
'quantity': line.qty,
'product_id': line.product_id.id,
'account_id': income_account,
'analytic_account_id': self._prepare_analytic_account(
line),
'credit': ((amount_subtotal > 0) and amount_subtotal) or
0.0,
'debit': ((amount_subtotal < 0) and -amount_subtotal) or
0.0,
'tax_ids': [(6, 0, base_line_tax_ids.ids)],
'partner_id': partner_id
}
if cur != cur_company:
data['currency_id'] = cur.id
data['amount_currency'] = -abs(line.price_subtotal) if \
data.get('credit') else abs(line.price_subtotal)
amount_cur_company += data['credit'] - data['debit']
insert_data('product', data)
move_lines.append({'data_type': 'product', 'values': data})
# Create the tax lines
taxes = line.tax_ids_after_fiscal_position.filtered(
lambda t: t.company_id.id == current_company.id)
if not taxes:
continue
price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)
for tax in taxes.compute_all(price, cur, line.qty)['taxes']:
if cur != cur_company:
round_tax = False if rounding_method == \
'round_globally' else True
amount_tax = cur._convert(
tax['amount'], cur_company, order.company_id,
date_order, round=round_tax)
# amount_tax = cur.with_context(
# date=date_order).compute(tax['amount'], cur_company,
# round=round_tax)
else:
amount_tax = tax['amount']
data = {
'name': _('Tax') + ' ' + tax['name'],
'product_id': line.product_id.id,
'quantity': line.qty,
'account_id': tax['account_id'] or income_account,
'credit': ((amount_tax > 0) and amount_tax) or 0.0,
'debit': ((amount_tax < 0) and -amount_tax) or 0.0,
'tax_line_id': tax['id'],
'partner_id': partner_id,
'order_id': order.id
}
if cur != cur_company:
data['currency_id'] = cur.id
data['amount_currency'] = - \
abs(tax['amount']) if data.get(
'credit') else abs(tax['amount'])
amount_cur_company += data['credit'] - data['debit']
insert_data('tax', data)
move_lines.append({'data_type': 'tax', 'values': data})
# round tax lines per order
if rounding_method == 'round_globally':
for group_key, group_value in grouped_data.items():
if group_key[0] == 'tax':
for line in group_value:
line['credit'] = cur_company.round(line['credit'])
line['debit'] = cur_company.round(line['debit'])
if line.get('currency_id'):
line['amount_currency'] = cur.round(
line.get('amount_currency', 0.0))
receivable_amounts = order._get_amount_receivable(move_lines)
data = {
'name': _('Trade Receivables'), # order.name,
'account_id': order_account,
'credit': ((receivable_amounts['amount'] < 0) and
-receivable_amounts['amount']) or 0.0,
'debit': ((receivable_amounts['amount'] > 0) and
receivable_amounts['amount']) or 0.0,
'partner_id': partner_id
}
if receivable_amounts['amount_currency']:
data['currency_id'] = cur.id
data['amount_currency'] = -abs(
receivable_amounts['amount_currency']) if data.get(
'credit') else abs(receivable_amounts['amount_currency'])
insert_data('counter_part', data)
order.write({'state': 'done', 'account_move': move.id})
if self and order.company_id.anglo_saxon_accounting:
add_anglosaxon_lines(grouped_data)
return {
'grouped_data': grouped_data,
'move': move,
}
|
23,375 | 482e711c76858863cf114450fc73602c4efe778a | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-05-18 21:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('items', '0003_remove_item_status'),
]
operations = [
migrations.RenameField(
model_name='change',
old_name='new_status',
new_name='current_status',
),
migrations.AlterField(
model_name='change',
name='old_status',
field=models.IntegerField(blank=True, choices=[(1, 'need'), (2, 'pantry'), (3, 'fridge'), (4, 'gone')], default=None),
),
]
|
23,376 | ddcf220460da9317c06a4d69b71df4849cdc3476 | from django.contrib.auth.models import User
from rest_framework import authentication
from rest_framework import exceptions
import datetime
def jwt_response_payload_handler(token, user=None, request=None):
now = datetime.datetime.now()
delta = datetime.timedelta(days=900)
#now = now + delta
profile = user.userprofile
# print "now:" ,now
has_expire = now > profile.expire_time.replace(tzinfo=None)
if has_expire:
# print "expire account:", user , " delete"
return {
'expire': True,
'status': 'error',
'message': 'expire account',
}
return {
'token': token,
'role': profile.role,
'status': 'ok',
}
def jwt_payload_handler(user):
#print user
payload = {
'username': user.username,
'user_id': user.pk,
'role': user.userprofile.role
}
return payload
|
23,377 | feeeaded423528a76987a73f9156c41cc887b3eb | from utils import generate_gauss_data, train_test_split, plot_gaussian, plot_gif
import classification_two_layers as nn
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
def main():
x = y = {"start": -5, "end": 5, "steps": 0.5}
data = generate_gauss_data(x, y)
inputs, targets = data["inputs"], data['targets']
x_train, x_val, y_train, y_val = train_test_split(inputs, targets, 0.20)
#################### NETWORK SIZE ANALYSIS #####################
# losses, batch_losses = [], []
# for layer_size in range(1, 25):
# network = nn.NueralNet(x_train, y_train, hidden_layer_size = layer_size, output_layer_size = 1,
# is_binary = False)
# nnTrainResults = network.train_network(epochs = 400)
#
# results = network.fowardPass(inputs, targets, include_bias = True)
# losses.append(results['loss'])
#
# batch_out = np.reshape(results["Yp"], (data['size'], data['size']))
# # plot_gaussian(data, batch_out, f"Gaussian Out - hidden_layer_size {layer_size}",
# # gif = {"epoch": 1000, "seq": 0})
# batch_losses.append(nnTrainResults['batch_losses'])
#
# for i in [2, 4, 5, 7, 10, 15, 18, 23]:
# # Plot results.
# plt.plot(batch_losses[i], label = f" N. Hidden Layer {i}")
# plt.xlabel("Epochs")
# plt.ylabel("Mean Squared Error loss")
# plt.legend(loc = 'best')
# plt.show()
#################### SPLIT ANALYSIS #########################
split_ratios = [0.8]
hidden_layer_shape = 15
for split in split_ratios:
x_train, x_val, y_train, y_val = train_test_split(inputs, targets, split)
network = nn.NueralNet(x_train, y_train, hidden_layer_size = hidden_layer_shape, output_layer_size = 1,
is_binary = False)
losses = network.train_network(1000, inputs, targets)
plt.plot(losses["val_losses"], label = "Validation loss")
plt.plot(losses["epoch_losses"], label = "Train loss")
plt.xlabel("Epochs")
plt.ylabel("Mean Squared Error loss")
plt.legend()
plt.title(f"Data Split - Training: {round((1 - split) * 100)}%")
plt.show()
############# LEARNING RATE ANALYSIS ###############
# hidden_layer_shape = 15
# lrs = [0.001, 0.005, 0.01, 0.05, 0.1]
#
# for lr in lrs:
# x_train, x_val, y_train, y_val = train_test_split(inputs, targets, 0.2)
# network = nn.NueralNet(x_train, y_train, hidden_layer_size = hidden_layer_shape, output_layer_size = 1,
# is_binary = False, lr = lr)
# losses = network.train_network(500, inputs, targets)
#
# plt.plot(losses["batch_losses"], label = f"Learning Rate: {lr}")
# plt.xlabel("Epochs")
# plt.ylabel("Mean Squared Error loss")
# plt.legend()
# plt.title(f"MSE by Learning Rate")
# plt.show()
#################### PLOT 3d #####################
# x_train = inputs
# y_train = targets
# hidden_layer_shape = 8
#
# network = nn.NueralNet(x_train, y_train, hidden_layer_size = hidden_layer_shape, output_layer_size = 1,
# is_binary = False, lr = 0.001)
# nnTrainResults = network.train_network(epochs = 1000)
#
# results = network.fowardPass(inputs, targets, include_bias = True)
#
# for epoch, batch in enumerate(nnTrainResults["batch_out"]):
# batch_out = np.reshape(batch, (data['size'], data['size']))
# plot_gaussian(data, batch_out, f"Gaussian Out - epoch:{epoch}", gif = {"epoch": epoch, "seq": 0})
# plot_gif("gaussian_batch", repeat_frames = 1)
if __name__ == '__main__':
main()
|
23,378 | 071b2e852a15340430fe0e462cfcec9419bf4df4 | # -*- coding: utf-8 -*-
"""genetic-algorithm-python-tutorial.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/161ijkvn8wG_seVtQexm-p3fW3r5p8s_x
# Genetic Algorithm Implementation with Python
* Tutorial: https://towardsai.net/p/computer-science/genetic-algorithm-ga-introduction-with-example-code-e59f9bc58eaf
* Github: https://github.com/towardsai/tutorials/tree/master/genetic-algorithm-tutorial
The Genetic Algorithm is a class of evolutionary algorithm that is broadly inspired by biological evolution. We all know evolution, it is a selection of parents, reproduction, and mutation of offsprings. The main aim of evolution is to reproduce offsprings that are biologically better than their parents. Genetic algorithm is mainly based on natural selection and it tries to simulate the theory of evolution.
"""
import numpy as np
import matplotlib.pyplot as plt
import copy
# cost function
def sphere(x):
''' This is the problem we will be
optimizing, each chromosome of parent has a cost
which is calculated from this cost function'''
return sum(x**2)
def roulette_wheel_selection(p):
''' Roulette Wheel Selection is a method of parent
selection for breeding. We take the cummulative sum of probabilities
and select the first parent whose cummulative sum is greater than
random number'''
c = np.cumsum(p)
r = sum(p) * np.random.rand()
ind = np.argwhere(r <= c)
return ind[0][0]
def crossover(p1, p2):
''' Performing uniform crossover. Alpha is the flag
that determines which gene of each chromosome is choosen
to be inherited by the offspring. Maultiply the alpha value
with each gene of every chromosome of both the parents and
then add the resultant value to get child chromosome'''
c1 = copy.deepcopy(p1)
c2 = copy.deepcopy(p2)
# Uniform crossover
alpha = np.random.uniform(0, 1, *(c1['position'].shape))
c1['position'] = alpha*p1['position'] + (1-alpha)*p2['position']
c2['position'] = alpha*p2['position'] + (1-alpha)*p1['position']
return c1, c2
def mutate(c, mu, sigma):
'''
c: child chromosome
mu: mutation rate. % of gene to be modified
sigma: step size of mutation'''
y = copy.deepcopy(c)
flag = np.random.rand(*(c['position'].shape)) <= mu # array of True and Flase, indicating at which position to perform mutation
ind = np.argwhere(flag)
y['position'][ind] += sigma * np.random.randn(*ind.shape)
return y
def bounds(c, varmin, varmax):
''' Defines the upper and lower bound of gene value'''
c['position'] = np.maximum(c['position'], varmin)
c['position'] = np.minimum(c['position'], varmax)
def sort(arr):
''' Bubble sorting the population + offsoring
in every iteration to get best fit individuals at top'''
n = len(arr)
for i in range(n-1):
for j in range(0, n-i-1):
if arr[j]['cost'] > arr[j+1]['cost'] :
arr[j], arr[j+1] = arr[j+1], arr[j]
return arr
def ga(costfunc, num_var, varmin, varmax, maxit, npop, num_children, mu, sigma, beta):
# Placeholder for each individual
population = {}
for i in range(npop): # each inidivdual has position(chromosomes) and cost,
population[i] = {'position': None, 'cost': None} # create individual as many as population size(npop)
# Best solution found
bestsol = copy.deepcopy(population)
bestsol_cost = np.inf # initial best cost is infinity
# Initialize population - 1st Gen
for i in range(npop):
population[i]['position'] = np.random.uniform(varmin, varmax, num_var) # randomly initialize the chromosomes and cost
population[i]['cost'] = costfunc(population[i]['position'])
if population[i]['cost'] < bestsol_cost: # if cost of an individual is less(best) than best cost,
bestsol = copy.deepcopy(population[i]) # replace the best solution with that individual
# Best cost of each generation/iteration
bestcost = np.empty(maxit)
# Main loop
for it in range(maxit):
# Calculating probability for roulette wheel selection
costs = []
for i in range(len(population)):
costs.append(population[i]['cost']) # list of all the population cost
costs = np.array(costs)
avg_cost = np.mean(costs) # taking average of the costs
if avg_cost != 0:
costs = costs/avg_cost
probs = np.exp(-beta*costs) # probability is exponensial of -ve beta times costs
for _ in range(num_children//2): # we will be having two off springs for each crossover
# hence divide number of children by 2
'''
-> choosing two parents randomly for mating
-> we are shuffling all the 20 parent individuals and
-> choosing first two of the shuffled array as our parents for mating
Randomly selecting parents by shiffling them.
But we will be using roulette wheel slection
for our algorithm
q = np.random.permutation(npop)
p1 = population[q[0]]
p2 = population[q[1]]
'''
# Roulette wheel selection
p1 = population[roulette_wheel_selection(probs)]
p2 = population[roulette_wheel_selection(probs)]
# crossover two parents
c1, c2 = crossover(p1, p2)
# Perform mutation
c1 = mutate(c1, mu, sigma)
c2 = mutate(c2, mu, sigma)
# Apply bounds
bounds(c1, varmin, varmax)
bounds(c2, varmin, varmax)
# Evaluate first off spring
c1['cost'] = costfunc(c1['position']) # calculate cost function of child 1
if type(bestsol_cost) == float:
if c1['cost'] < bestsol_cost: # replacing best solution in every generation/iteration
bestsol_cost = copy.deepcopy(c1)
else:
if c1['cost'] < bestsol_cost['cost']: # replacing best solution in every generation/iteration
bestsol_cost = copy.deepcopy(c1)
# Evaluate second off spring
if c2['cost'] < bestsol_cost['cost']: # replacing best solution in every generation/iteration
bestsol_cost = copy.deepcopy(c2)
# Merge, Sort and Select
population[len(population)] = c1
population[len(population)] = c2
population = sort(population)
# Store best cost
bestcost[it] = bestsol_cost['cost']
# Show generation information
print('Iteration {}: Best Cost = {}'. format(it, bestcost[it]))
out = population
Bestsol = bestsol
bestcost = bestcost
return (out, Bestsol, bestcost)
# Problem definition
costfunc = sphere
num_var = 5 # number of decicion variables
varmin = -10 # lower bound
varmax = 10 # upper bound
# GA Parameters
maxit = 501 # number of iterations
npop = 20 # initial population size
beta = 1
prop_children = 1 # proportion of children to population
num_children = int(np.round(prop_children * npop/2)*2) # making sure it always an even number
mu = 0.2 # mutation rate 20%, 205 of 5 is 1, mutating 1 gene
sigma = 0.1 # step size of mutation
# Run GA
out = ga(costfunc, num_var, varmin, varmax, maxit, npop, num_children, mu, sigma, beta)
# Results
#(out, Bestsol, bestcost)
plt.plot(out[2])
plt.xlim(0, maxit)
plt.xlabel('Generations')
plt.ylabel('Best Cost')
plt.title('Genetic Algorithm')
plt.grid(True)
plt.show
|
23,379 | 3a0d8d2437427c6a12dd916433a1abddc9882d9c | totalSeconds = eval(input('Enter total number of seconds: '))
print('That is equal to', totalSeconds//60, 'minutes and', totalSeconds%60, 'seconds.') |
23,380 | de87b4fda1bb5562721557e73b59b09eb509f278 |
import os
import glob
from T2_masks.train_2 import config, fetch_training_data_files
from unet3d.data import write_data_to_file, open_data_file
from unet3d.generator import get_validation_split
config["training_file"] = os.path.abspath("T2_debug_Train_ids.pkl")
config["validation_file"] = os.path.abspath("T1_debug_Test_ids.pkl")
config["validation_split"] = 0.80
config["data_file"] = os.path.abspath("T2_25pts_resize.h5")
data_file_opened = open_data_file(config["data_file"])
def main(overwrite=True):
training_list, validation_list = get_validation_split(data_file_opened,
data_split=config["validation_split"],
overwrite=overwrite,
training_file=config["training_file"] ,
validation_file=config["validation_file"])
print('validation list is ', validation_list)
print('Training list is ', training_list)
if __name__ == "__main__":
main()
|
23,381 | bf382086b545e1772e6acf8d031a28061c5d8b95 | import datetime
import time
import sys
choice = raw_input("Please type 'in' or 'out' to clock or 'report' to generate an hours report: \n")
if choice == "in" or choice == "out":
with open("clocktimes.txt", "a") as clock_file:
if choice == "in":
print("In in")
clock_file.write("Clock In: " + time.strftime("%Y-%m-%d %H:%M") + "\n")
elif choice == "out":
print("In out")
clock_file.write("Clock Out: " + time.strftime("%Y-%m-%d %H:%M") + "\n")
elif choice == "report":
# Getting user input
start_date_str = raw_input("Please enter start date (YYYY-MM-DD): ")
end_date_str = raw_input("Please enter end date (YYYY-MM-DD): ")
# Looking at clock times
clocks = open("clocktimes.txt", "r")
# Transforming user input into dates
start_date = time.strptime(start_date_str, "%Y-%m-%d")
end_date = time.strptime(end_date_str, "%Y-%m-%d")
# Iterating through clock times to find start
current_str = clocks.readline()
current = time.strptime(current_str[10:20], "%Y-%m-%d")
print(time.strftime("%Y-%m-%d", current))
print(time.strftime("%Y-%m-%d", start_date))
print(time.strftime("%Y-%m-%d", end_date))
try:
while(current < start_date):
print (current < start_date)
current_str = clocks.readline()
if(current_str[7:9] == "Out"):
current_str = clocks.readline()
current = time.strptime(current_str[10:20], "%Y-%m-%d")
except:
# Exiting if invalid start date
print("Invalid Start Date entered.")
sys.exit(0)
# Opening report and putting header
file report = open("report.txt", "w")
report.write("Date\tIn\tOut\tHours Worked")
# Iterating through clocks and arranging them correctly
while (current - end_date > 0):
if (current_str[:8] == "Clock In"):
in_time = time.strptime(current_str[19:], "%H:%M")
report.write(current.strftime("%m/%d/%y\t%H:%M") + "\t")
current_str = clocks.readline()
if (current_str[:9] == "Clock Out"):
out_time = time.strptime(current_str[20:], "%H:%M")
report.write(" " + out_time.strftime("%H:%M") + "\t")
current_str = clocks.readline()
else:
report.write(" Did not clock out\n")
else:
|
23,382 | 05ad5c8fd44895a5e3846b7fcb674fe183c7d0be | #odd or even
x=int(input('ent the value: '))
if x%2==0:
print 'EVEN'
else:
print 'ODD'
|
23,383 | e8f43e9bb64815b2c8e0e5a61205653996234c2a | from .core_parser import CoreParser
from .zonal_mean_2d_parser import ZonalMean2dParser
from .meridional_mean_2d_parser import MeridionalMean2dParser
from .area_mean_time_series_parser import AreaMeanTimeSeriesParser
SET_TO_PARSER = {
'zonal_mean_xy': CoreParser,
'zonal_mean_2d': ZonalMean2dParser,
'meridional_mean_2d': MeridionalMean2dParser,
'lat_lon': CoreParser,
'polar': CoreParser,
'cosp_histogram': CoreParser,
'area_mean_time_series': AreaMeanTimeSeriesParser,
}
|
23,384 | 70fa28492d1e4405d225e5d433b024060f8af983 | import pandas as pd
import numpy as np
import math
from anytree import Node, RenderTree
from anytree.exporter import JsonExporter
import sys
import argparse
# returns tuple: (most frequent class in D, there is only one class in D)
def get_most_freq_class(D, c_name):
class_values = D[c_name].values
class_to_freq = {}
for data in class_values:
class_val = data
if class_val in class_to_freq:
class_to_freq[class_val] = class_to_freq[class_val] + 1
else:
class_to_freq[class_val] = 1
total = 0
max_freq = 0
max_class = None
for k, v in class_to_freq.items():
if v > max_freq:
max_freq = v
max_class = k
total += v
return (max_class, total == max_freq)
# uses information gain
# returns attr with largest gain else None if all gain < threshold
def select_split_attr(D, A, c_name, threshold, use_ratio, is_cont):
entropy = entropy_dataset(D, c_name)
max_gain = threshold
best_attr = None
best_split_value = 0
split_value = None
for attr in A:
if is_cont:
entropy_of_attr, split_value = entropy_attr_cont(D, c_name, attr)
else:
entropy_of_attr = entropy_attr(D, c_name, attr)
gain = entropy - entropy_of_attr
if use_ratio:
if entropy_of_attr == 0.0:
gain = sys.maxsize
else:
gain = gain/entropy_of_attr
if gain > max_gain:
max_gain = gain
best_attr = attr
best_split_value = split_value
#print(best_attr, best_split_value)
return best_attr, best_split_value
def entropy_dataset(df, category_variable):
entropy = 0
class_counts = df[category_variable].value_counts().tolist()
total_count = np.sum(class_counts)
for class_count in class_counts:
prob_of_class = class_count/total_count
entropy += prob_of_class * math.log(prob_of_class, 2)
entropy *= -1
return entropy
def entropy_attr(df, category_variable, attr):
entropy = 0
value_to_count = df[attr].value_counts().to_dict()
total_count = sum(value_to_count.values())
for value, count in value_to_count.items():
filtered_df = df[df[attr] == value]
filtered_entropy = entropy_dataset(filtered_df, category_variable)
entropy += filtered_entropy * (count/total_count)
#print("filtered_entropy:", filtered_entropy)
#print("count/total_count:", count/total_count)
return entropy
def entropy_attr_cont(df, category_variable, attr):
entropy = 0
value_to_count = df[attr].value_counts().to_dict()
total_count = sum(value_to_count.values())
cum_count = 0
max_entropy = 0
max_split = 0
for value in sorted(value_to_count.keys()):
cum_count = value_to_count[value]
filtered_df = df[df[attr] <= value]
filtered_entropy = entropy_dataset(filtered_df, category_variable)
entropy += filtered_entropy * (cum_count/total_count)
if entropy > max_entropy:
max_entropy = entropy
max_split = value
#print("max_entropy", max_entropy, "max_split", max_split, "entropy:", entropy)
return entropy, max_split
def build_decision_tree(dataset, attributes, tree, threshold, c_name, use_ratio, is_cont):
most_freq_class, is_only_class = get_most_freq_class(dataset,c_name)
if is_only_class or len(attributes) == 0:
leaf = Node(most_freq_class)
tree = leaf
return tree
else:
split_attr, split_value = select_split_attr(dataset, attributes, c_name, threshold, use_ratio, is_cont)
if split_attr is None:
leaf = Node(most_freq_class)
tree = leaf
return tree
else:
parent = Node(split_attr)
attr_val_to_data = {}
for index,data in dataset.iterrows():
attr_val = data[split_attr]
if attr_val not in attr_val_to_data:
attr_val_to_data[attr_val] = []
attr_val_to_data[attr_val].append(data)
for k, v in attr_val_to_data.items():
child = build_decision_tree(pd.DataFrame(data = v,
columns = attributes + [c_name]),
[attr for attr in attributes if attr != split_attr],
None,threshold,c_name, use_ratio, is_cont)
child.parent = parent
child.edge = k
return parent
def get_args():
parser = argparse.ArgumentParser(description='Build Decision Tree Input Parameters, see README')
parser.add_argument('-x', '--csv', required=True, help="Path to csv file of training entries")
parser.add_argument('-z', '--res', required=False, help="Path to optional restrictions file")
return vars(parser.parse_args())
def preprocess(csv_file, res_file):
is_iris = False
with open(csv_file) as f:
if "Iris" in f.readline():
is_iris = True
category_variable = "Class" if is_iris else "Vote"
if is_iris:
df = pd.read_csv(csv_file, names = ["Sepal Length", "Sepal Width",
"Pedal Length", "Pedal Width",
"Class"])
else:
df = pd.read_csv(csv_file, skiprows=[1,2])
if res_file:
with open(res_file) as f:
res = f.readline().split(',')
cols_to_drop = [i for i in range(len(res)) if int(res[i]) == 0]
df.drop(df.columns[cols_to_drop], inplace=True, axis=1)
if not is_iris:
df.drop("Id", inplace=True, axis=1)
return df, category_variable
if __name__ == '__main__':
args = get_args()
csv_file = args['csv']
res_file = args['res']
df, category_variable = preprocess(csv_file, res_file)
is_cont = category_variable == "Class"
tree = build_decision_tree(df, list(df.columns[:-1]), None, .01,
category_variable, False, is_cont)
exporter = JsonExporter(indent=2)
print(exporter.export(tree))
#print(RenderTree(tree))
|
23,385 | ae348c51d5598d4910259ae94703e3e174778cd9 | # Enter your code here. Read input from STDIN. Print output to STDOUT
import time
def commonChild(a, b):
lengths = [[0 for j in range(len(b)+1)] for i in range(len(a)+1)]
for i, x in enumerate(a):
for j, y in enumerate(b):
if x == y:
lengths[i+1][j+1] = lengths[i][j] + 1
else:
lengths[i+1][j+1] = max(lengths[i+1][j], lengths[i][j+1])
return lengths[-1][-1]
start_time = time.time()
s1 = input()
s2 = input()
result = commonChild(s1, s2)
print(result)
# print('Elapsed time %f s' % (time.time() - start_time))
|
23,386 | 1896c7bdb71db326fd64f7423aec46ff0a41f9bb | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'checkDialog.ui'
#
# Created by: PyQt5 UI code generator 5.10.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(370, 270)
Dialog.setStyleSheet("")
self.widget = QtWidgets.QWidget(Dialog)
self.widget.setGeometry(QtCore.QRect(5, 5, 360, 260))
self.widget.setStyleSheet("\n"
"background-color:rgba(255, 244, 245, 240);")
self.widget.setObjectName("widget")
self.acceptButton = QtWidgets.QPushButton(self.widget)
self.acceptButton.setGeometry(QtCore.QRect(140, 220, 80, 30))
font = QtGui.QFont()
font.setFamily("微软雅黑")
font.setPointSize(10)
self.acceptButton.setFont(font)
self.acceptButton.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.acceptButton.setStyleSheet("\n"
"QPushButton{background-color:rgb(228, 198, 208);;border:1px solid rgb(180,180,180);border-radius:5px;color:rgb(100,100,100)}\n"
"\n"
"QPushButton:hover{color: rgb(249, 144, 111);background-color:rgb(220, 190, 200)}")
self.acceptButton.setAutoDefault(False)
self.acceptButton.setFlat(False)
self.acceptButton.setObjectName("acceptButton")
self.rejectButton = QtWidgets.QPushButton(self.widget)
self.rejectButton.setGeometry(QtCore.QRect(250, 220, 80, 30))
font = QtGui.QFont()
font.setFamily("华文细黑")
font.setPointSize(10)
self.rejectButton.setFont(font)
self.rejectButton.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.rejectButton.setStyleSheet("QPushButton:hover{color: rgb(249, 144, 111);background:rgb(250,250,250)}\n"
"QPushButton{border:1px solid rgb(180,180,180);\n"
"border-radius:5px;background-color:white;color:rgb(100,100,100)}")
self.rejectButton.setAutoDefault(False)
self.rejectButton.setFlat(False)
self.rejectButton.setObjectName("rejectButton")
self.label = QtWidgets.QLabel(self.widget)
self.label.setGeometry(QtCore.QRect(25, 80, 310, 90))
font = QtGui.QFont()
font.setFamily("华文细黑")
font.setPointSize(12)
self.label.setFont(font)
self.label.setStyleSheet("color:rgb(90,90,90);\n"
"background-color:transparent\n"
"\n"
"")
self.label.setText("")
self.label.setScaledContents(False)
self.label.setWordWrap(True)
self.label.setObjectName("label")
self.line = QtWidgets.QFrame(self.widget)
self.line.setGeometry(QtCore.QRect(0, 200, 360, 1))
self.line.setStyleSheet("background:rgb(200,200,200)")
self.line.setFrameShadow(QtWidgets.QFrame.Raised)
self.line.setLineWidth(0)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setObjectName("line")
self.line_2 = QtWidgets.QFrame(self.widget)
self.line_2.setGeometry(QtCore.QRect(0, 50, 360, 1))
self.line_2.setStyleSheet("background:rgb(200,200,200)")
self.line_2.setFrameShadow(QtWidgets.QFrame.Raised)
self.line_2.setLineWidth(0)
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setObjectName("line_2")
self.title = QtWidgets.QLabel(self.widget)
self.title.setGeometry(QtCore.QRect(25, 10, 310, 30))
font = QtGui.QFont()
font.setFamily("黑体")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.title.setFont(font)
self.title.setStyleSheet("color:rgb(80,80,80);\n"
"background-color:transparent")
self.title.setText("")
self.title.setObjectName("title")
self.frame = QtWidgets.QFrame(Dialog)
self.frame.setGeometry(QtCore.QRect(0, 0, 370, 270))
self.frame.setStyleSheet("QFrame { \n"
" \n"
" background-color: transparent;\n"
" border-top: 5px solid qlineargradient(y0:0, y1:1,\n"
" stop: 0 rgb(160, 160, 160), stop: 1 transparent);\n"
" \n"
" border-left: 5px solid qlineargradient(x0:0, x1:1,\n"
" stop: 0 rgb(160, 160, 160), stop: 1 transparent);\n"
" border-bottom: 5px solid qlineargradient(y0:0, y1:1,\n"
" stop: 0 transparent, stop: 1 rgb(160, 160, 160));\n"
" border-right: 5px solid qlineargradient(x0:0, x1:1,\n"
" stop: 0 transparent, stop: 1 rgb(160, 160, 160));\n"
";}")
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.frame.raise_()
self.widget.raise_()
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.acceptButton.setText(_translate("Dialog", "删除"))
self.acceptButton.setShortcut(_translate("Dialog", "Return"))
self.rejectButton.setText(_translate("Dialog", "取消"))
self.rejectButton.setShortcut(_translate("Dialog", "Esc"))
|
23,387 | 47d96bc78de559ca7ce16d5483ecbb9b498f9b3a | '''
cimt_metrics.py
Climate Impact Metrics Tool 'metrics' file
'''
import iris
import cimt_parent_metric
# ----------------------------------------------------------------------------------------------------
# Define each metric here ( skeleton is given below ) ------------------------------------------------
#class New_Metric( cimt_parent_metric.ImpactMetric ):
# """
# Child class for the New Metric.
# """
# def __init__( self ):
# super( New_Metric , self ).__init__( 'Full_name_of_metric' , 'stash_number' , 'units' , 'unit_factor', 'cell_number' )
#
# def load_cube( self , job ):
#
# # Cube manipulation goes here
#
# return cube
# ----------------------------------------------------------------------------------------------------
class NPP( cimt_parent_metric.ImpactMetric ):
"""
Child class for the Net Primary Productivity metric.
"""
def __init__( self ):
super( NPP , self ).__init__( 'Net_Primary_Productivity' , 'm01s03i262' , 'kg m^2 yr' , 31536000 , None )
def load_cube( self , job ):
variable = iris.AttributeConstraint( STASH = self.stash )
cube = iris.load_cube( self.job_files_dict[job] , variable )
return cube
# ----------------------------------------------------------------------------------------------------
class T_ROFF( cimt_parent_metric.ImpactMetric ):
"""
Child class for the Total Runoff metric.
"""
def __init__( self ):
super( T_ROFF , self ).__init__( 'Total_Runoff' , [ 'm01s08i235' , 'm01s08i234' ] , 'mm day^-1' , 86400.0 , None )
def load_cube( self , job ):
variables = [] ; cubes_to_sum = []
for stash in range( 0 , len( self.stash ) ):
variables.append( iris.AttributeConstraint( STASH = self.stash[ stash ] ) )
cubes_to_sum.append( iris.load_cube( self.job_files_dict[job] , variables[ stash ] ) )
cube = sum( cubes_to_sum )
return cube
# ----------------------------------------------------------------------------------------------------
class SOILM_1m( cimt_parent_metric.ImpactMetric ):
"""
Child class for the Soil Moisture (up to 1m) metric.
"""
def __init__( self ):
super( SOILM_1m , self ).__init__( 'Soil_Moisture_1m' , 'm01s08i223' , 'm^3 m^-3' , 1 , [ 1 , 2 , 3 ] )
def load_cube( self , job ):
variable = iris.AttributeConstraint( STASH = self.stash )
cube = iris.load_cube( self.job_files_dict[job] , variable )
cubes_to_sum = []
for layer in range( 0 , len( self.cell_number ) ):
cubes_to_sum.append( cube.extract( iris.Constraint( soil_model_level_number = lambda cell: cell == self.cell_number[ layer ] ) ) )
cube = sum( cubes_to_sum )
return cube
# ----------------------------------------------------------------------------------------------------
class T1p5m( cimt_parent_metric.ImpactMetric ):
"""
Child class for Temperature at 1.5M metric.
"""
def __init__( self ):
super( T1p5m , self ).__init__( 'Air_Temp_1.5m' , 'm01s03i236' , 'K' , 1 , None )
def load_cube( self , job ):
variable = iris.AttributeConstraint( STASH = self.stash )
cube = iris.load_cube( self.job_files_dict[job] , variable )
return cube |
23,388 | aaded276b65534acf26f977500e5334e18e811dc | # -*- coding: utf-8 -*-
"""
Python wrapper for the iperf3 libiperf.so.0 library. The module consists of two
classes, :class:`Client` and :class:`Server`, that inherit from the base class
:class:`IPerf3`. They provide a nice (if i say so myself) and pythonic way to
interact with the iperf3 utility.
At the moment the module redirects stdout and stderr to a pipe and returns the
received data back after each ``client.run()`` or ``server.run()`` call. In later
releases there will be an option to toggle this on or off.
A user should never have to utilise the :class:`IPerf3` class directly, this class
provides common settings for the :class:`Client` and :class:`Server` classes.
To get started quickly see the :ref:`examples` page.
.. moduleauthor:: Mathijs Mortimer <mathijs@mortimer.nl>
"""
from ctypes import cdll, c_char_p, c_int, c_char
from ctypes.util import find_library
import os
import select
import json
import threading
try:
from queue import Queue
except ImportError:
from Queue import Queue # Python2 compatibility
__version__ = '0.1.2'
def more_data(pipe_out):
"""Check if there is more data left on the pipe
:param pipe_out: The os pipe_out
:rtype: bool
"""
r, _, _ = select.select([pipe_out], [], [], 0)
return bool(r)
def read_pipe(pipe_out):
"""Read data on a pipe
Used to capture stdout data produced by libiperf
:param pipe_out: The os pipe_out
:rtype: unicode string
"""
out = b''
while more_data(pipe_out):
out += os.read(pipe_out, 1024)
return out.decode('utf-8')
def output_to_pipe(pipe_in):
"""Redirects stdout and stderr to a pipe
:param pipe_out: The pipe to redirect stdout and stderr to
"""
os.dup2(pipe_in, 1) # stdout
# os.dup2(pipe_in, 2) # stderr
def output_to_screen(stdout_fd, stderr_fd):
"""Redirects stdout and stderr to a pipe
:param stdout_fd: The stdout file descriptor
:param stderr_fd: The stderr file descriptor
"""
os.dup2(stdout_fd, 1)
#os.dup2(stderr_fd, 2)
class IPerf3(object):
"""The base class used by both the iperf3 :class:`Server` and :class:`Client`
.. note:: You should not use this class directly
"""
def __init__(self,
role,
verbose=True,
lib_name='libiperf.so.0'):
"""Initialise the iperf shared library
:param role: 'c' = client; 's' = server
:param verbose: enable verbose output
:param lib_name: The libiperf name providing the API to iperf3
"""
# TODO use find_library to find the best library
try:
self.lib = cdll.LoadLibrary(lib_name)
except OSError:
raise OSError('Could not find shared library {0}. Is iperf3 installed?'.format(lib_name))
# The test C struct iperf_test
self._test = self._new()
self.defaults()
# stdout/strerr redirection variables
self._stdout_fd = os.dup(1)
self._stderr_fd = os.dup(2)
self._pipe_out, self._pipe_in = os.pipe() # no need for pipe write
# TODO do we want to allow a user to change the json_output?
# if so, we should disable the stdout pipe when json_output=False
# Generic test settings
self.role = role
self.json_output = True
self.verbose = verbose
def __del__(self):
"""Cleanup the test after the :class:`IPerf3` class is terminated"""
try:
self.lib.iperf_free_test(self._test)
except AttributeError:
# self.lib doesn't exist, likely because iperf3 wasnt installed or
# the shared library libiperf.so.0 wasn't found
pass
def _new(self):
"""Initialise a new iperf test
struct iperf_test *iperf_new_test()
"""
return self.lib.iperf_new_test()
def defaults(self):
"""Set/reset iperf test defaults."""
self.lib.iperf_defaults(self._test)
@property
def role(self):
"""The iperf3 instance role
valid roles are 'c'=client and 's'=server
:rtype: 'c' or 's'
"""
try:
self._role = c_char(self.lib.iperf_get_test_role(self._test)).value.decode('utf-8')
except TypeError:
self._role = c_char(chr(self.lib.iperf_get_test_role(self._test))).value.decode('utf-8')
return self._role
@role.setter
def role(self, role):
if role.lower() in ['c', 's']:
self.lib.iperf_set_test_role(self._test,
c_char(role.lower().encode('utf-8')))
self._role = role
else:
raise ValueError("Unknown role, accepted values are 'c' and 's'")
@property
def bind_address(self):
"""The bind address the iperf3 instance will listen on
use * to listen on all available IPs
:rtype: string
"""
result = c_char_p(self.lib.iperf_get_test_bind_address(self._test)).value
if result:
self._bind_address = result.decode('utf-8')
else:
self._bind_address = '*'
return self._bind_address
@bind_address.setter
def bind_address(self, address):
self.lib.iperf_set_test_bind_address(self._test,
c_char_p(address.encode('utf-8')))
self._bind_address = address
@property
def port(self):
"""The port the iperf3 server is listening on"""
self._port = self.lib.iperf_get_test_server_port(self._test)
return self._port
@port.setter
def port(self, port):
self.lib.iperf_set_test_server_port(self._test, int(port))
self._port = port
@property
def json_output(self):
"""Toggles json output of libiperf
Turning this off will output the iperf3 instance results to
stdout/stderr
:rtype: bool
"""
enabled = self.lib.iperf_get_test_json_output(self._test)
if enabled:
self._json_output = True
else:
self._json_output = False
return self._json_output
@json_output.setter
def json_output(self, enabled):
if enabled:
self.lib.iperf_set_test_json_output(self._test, 1)
else:
self.lib.iperf_set_test_json_output(self._test, 0)
self._json_output = enabled
@property
def verbose(self):
"""Toggles verbose output for the iperf3 instance
:rtype: bool
"""
enabled = self.lib.iperf_get_verbose(self._test)
if enabled:
self._verbose = True
else:
self._verbose = False
return self._verbose
@verbose.setter
def verbose(self, enabled):
if enabled:
self.lib.iperf_set_verbose(self._test, 1)
else:
self.lib.iperf_set_verbose(self._test, 0)
self._verbose = enabled
@property
def _errno(self):
"""Returns the last error ID
:rtype: int
"""
return c_int.in_dll(self.lib, "i_errno").value
@property
def iperf_version(self):
"""Returns the version of the libiperf library
:rtype: string
"""
# TODO: Is there a better way to get the const char than allocating 30?
VersionType = c_char * 30
return VersionType.in_dll(self.lib, "version").value.decode('utf-8')
def _error_to_string(self, error_id):
"""Returns an error string from libiperf
:param error_id: The error_id produced by libiperf
:rtype: string
"""
strerror = self.lib.iperf_strerror
strerror.restype = c_char_p
return strerror(error_id).decode('utf-8')
def run(self):
"""Runs the iperf3 instance.
This function has to be instantiated by the Client and Server
instances
:rtype: NotImplementedError
"""
raise NotImplementedError
class Client(IPerf3):
"""An iperf3 client connection.
This opens up a connection to a running iperf3 server
Basic Usage::
>>> import iperf3
>>> client = iperf3.Client()
>>> client.duration = 1
>>> client.server_hostname = '127.0.0.1'
>>> client.port = 5201
>>> client.run()
{'intervals': [{'sum': {...
"""
def __init__(self, *args, **kwargs):
"""Initialise the iperf shared library"""
super(Client, self).__init__(role='c', *args, **kwargs)
# Internal variables
self._bulksize = None
self._server_hostname = None
self._port = None
self._num_streams = None
self._zerocopy = False
@property
def server_hostname(self):
"""The server hostname to connect to.
Accepts DNS entries or IP addresses
:rtype: string
"""
result = c_char_p(self.lib.iperf_get_test_server_hostname(self._test)).value
if result:
self._server_hostname = result.decode('utf-8')
else:
self._server_hostname = None
return self._server_hostname
@server_hostname.setter
def server_hostname(self, hostname):
self.lib.iperf_set_test_server_hostname(self._test,
c_char_p(hostname.encode('utf-8')))
self._server_hostname = hostname
@property
def duration(self):
"""The test duration in seconds."""
self._duration = self.lib.iperf_get_test_duration(self._test)
return self._duration
@duration.setter
def duration(self, duration):
self.lib.iperf_set_test_duration(self._test, duration)
self._duration = duration
@property
def bulksize(self):
"""The test bulksize."""
self._bulksize = self.lib.iperf_get_test_blksize(self._test)
return self._bulksize
@bulksize.setter
def bulksize(self, bulksize):
self.lib.iperf_set_test_blksize(self._test, bulksize)
self._bulksize = bulksize
@property
def num_streams(self):
"""The number of streams to use."""
self._num_streams = self.lib.iperf_get_test_num_streams(self._test)
return self._num_streams
@num_streams.setter
def num_streams(self, number):
self.lib.iperf_set_test_num_streams(self._test, number)
self._num_streams = number
@property
def zerocopy(self):
"""Toggle zerocopy.
Use the sendfile() system call for "Zero Copy" mode. This uses much
less CPU. This is not supported on all systems.
**Note** there isn't a hook in the libiperf library for getting the current
configured value. Relying on zerocopy.setter function
:rtype: bool
"""
return self._zerocopy
@zerocopy.setter
def zerocopy(self, enabled):
if enabled and self.lib.iperf_has_zerocopy():
self.lib.iperf_set_test_zerocopy(self._test, 1)
self._zerocopy = True
else:
self.lib.iperf_set_test_zerocopy(self._test, 0)
self._zerocopy = False
@property
def reverse(self):
"""Toggles direction of test
:rtype: bool
"""
enabled = self.lib.iperf_get_test_reverse(self._test)
if enabled:
self._reverse = True
else:
self._reverse = False
return self._reverse
@reverse.setter
def reverse(self, enabled):
if enabled:
self.lib.iperf_set_test_reverse(self._test, 1)
else:
self.lib.iperf_set_test_reverse(self._test, 0)
self._reverse = enabled
def run(self):
"""Run the current test client.
:rtype: instance of :class:`TestResult`
"""
output_to_pipe(self._pipe_in)
error = self.lib.iperf_run_client(self._test)
if error:
data = '{"error": "%s"}' % self._error_to_string(self._errno)
else:
data = read_pipe(self._pipe_out)
output_to_screen(self._stdout_fd, self._stderr_fd)
return TestResult(data)
class Server(IPerf3):
"""An iperf3 server connection.
This starts an iperf3 server session. The server terminates after each
succesful client connection so it might be useful to run Server.run()
in a loop.
The C function iperf_run_server is called in a seperate thread to make
sure KeyboardInterrupt(aka ctrl+c) can still be captured
Basic Usage::
>>> import iperf3
>>> server = iperf3.Server()
>>> server.run()
{'start': {...
"""
def __init__(self, *args, **kwargs):
"""Initialise the iperf3 server instance"""
super(Server, self).__init__(role='s', *args, **kwargs)
def run(self):
"""Run the iperf3 server instance.
:rtype: instance of :class:`TestResult`
"""
def _run_in_thread(self, data_queue):
"""Runs the iperf_run_server
:param data_queue: thread-safe queue
"""
output_to_pipe(self._pipe_in)
self.lib.iperf_run_server(self._test)
# TODO json_output_string not available on earlier iperf3 builds
# have to build in a version check using self.iperf_version
# The following line should work on later versions:
# data = c_char_p(self.lib.iperf_get_test_json_output_string(self._test)).value
data = read_pipe(self._pipe_out)
if not data:
data = '{"error": "%s"}' % self._error_to_string(self._errno)
output_to_screen(self._stdout_fd, self._stderr_fd)
self.lib.iperf_reset_test(self._test)
data_queue.put(data)
data_queue = Queue()
t = threading.Thread(target=_run_in_thread, args=[self, data_queue])
t.daemon = True
t.start()
while t.is_alive():
t.join(.1)
return TestResult(data_queue.get())
class TestResult(object):
"""Class containing iperf3 test results
:param text: The raw result from libiperf as text
:param json: The raw result from libiperf asjson/dict
:param error: Error captured during test, None if all ok
:param time: Start time
:param timesecs: Start time in seconds
:param system_info: System info
:param version: Iperf Version
:param local_host: Local host ip
:param local_port: Local port number
:param remote_host: Remote host ip
:param remote_port: Remote port number
:param reverse:
:param tcp_mss_default:
:param protocol:
:param num_streams:
:param bulksize:
:param omit:
:param duration: Test duration in seconds
:param sent_bytes: Sent bytes
:param sent_bps: Sent bits per second
:param sent_kbps: sent kilobits per second
:param sent_Mbps: Sent Megabits per second
:param sent_kB_s: Sent kiloBytes per second
:param sent_MB_s: Sent MegaBytes per second
:param received_bytes: Received bytes
:param received_bps: Received bits per second
:param received_kbps: Received kilobits per second
:param received_Mbps: Received Megabits per second
:param received_kB_s: Received kiloBytes per second
:param received_MB_s: Received MegaBytes per second
:param retransmits: amount of retransmits (Only returned from client)
:param local_cpu_total:
:param local_cpu_user:
:param local_cpu_system:
:param remote_cpu_total:
:param remote_cpu_user:
:param remote_cpu_system:
"""
def __init__(self, result):
"""Initialise TestResult
:param result: raw json output from :class:`Client` and :class:`Server`
"""
# The full result data
self.text = result
self.json = json.loads(result)
if 'error' in self.json:
self.error = self.json['error']
else:
self.error = None
# start time
self.time = self.json['start']['timestamp']['time']
self.timesecs = self.json['start']['timestamp']['timesecs']
# generic info
self.system_info = self.json['start']['system_info']
self.version = self.json['start']['version']
# connection details
self.local_host = self.json['start']['connected'][0]['local_host']
self.local_port = self.json['start']['connected'][0]['local_port']
self.remote_host = self.json['start']['connected'][0]['remote_host']
self.remote_port = self.json['start']['connected'][0]['remote_port']
# test setup
self.tcp_mss_default = self.json['start']['tcp_mss_default']
self.protocol = self.json['start']['test_start']['protocol']
self.num_streams = self.json['start']['test_start']['num_streams']
self.bulksize = self.json['start']['test_start']['blksize']
self.omit = self.json['start']['test_start']['omit']
self.duration = self.json['start']['test_start']['duration']
# test results
self.sent_bytes = self.json['end']['sum_sent']['bytes']
self.sent_bps = self.json['end']['sum_sent']['bits_per_second']
self.sent_kbps = self.sent_bps / 1024 # Kilobits per second
self.sent_Mbps = self.sent_kbps / 1024 # Megabits per second
self.sent_kB_s = self.sent_kbps / 8 # kiloBytes per second
self.sent_MB_s = self.sent_Mbps / 8 # MegaBytes per second
self.received_bytes = self.json['end']['sum_received']['bytes']
self.received_bps = self.json['end']['sum_received']['bits_per_second']
self.received_kbps = self.received_bps / 1024 # Kilobits per second
self.received_Mbps = self.received_kbps / 1024 # Megabits per second
self.received_kB_s = self.received_kbps / 8 # kiloBytes per second
self.received_MB_s = self.received_Mbps / 8 # MegaBytes per second
# retransmits only returned from client
self.retransmits = self.json['end']['sum_sent'].get('retransmits', None)
self.local_cpu_total = self.json['end']['cpu_utilization_percent']['host_total']
self.local_cpu_user = self.json['end']['cpu_utilization_percent']['host_user']
self.local_cpu_system = self.json['end']['cpu_utilization_percent']['host_system']
self.remote_cpu_total = self.json['end']['cpu_utilization_percent']['remote_total']
self.remote_cpu_user = self.json['end']['cpu_utilization_percent']['remote_user']
self.remote_cpu_system = self.json['end']['cpu_utilization_percent']['remote_system']
@property
def reverse(self):
if self.json['start']['test_start']['reverse']:
return True
else:
return False
@property
def type(self):
if 'connecting_to' in self.json['start']:
return 'client'
else:
return 'server'
def __repr__(self):
"""Print the result as received from iperf3"""
return self.text
|
23,389 | 56a9acb401b0bce05df068508c3338f257378c48 | from app.handlers.base import BaseHandler
from app.handlers.base import *
class Worker(threading.Thread):
def __init__(self, callback=None, *args, **kwargs):
super(Worker, self).__init__(*args, **kwargs)
self.callback = callback
def run(self):
import time
time.sleep(3)
self.callback('DONE')
class ThreadHandler(BaseHandler):
@asynchronous
def get(self):
self.printblah()
Worker(self.worker_done).start()
def worker_done(self, value):
self.finish(value)
|
23,390 | c15cab21c25cbf4edbf541bdc0a750c327a125c8 | # This file is part of OnDA.
#
# OnDA is free software: you can redistribute it and/or modify it under the terms of
# the GNU General Public License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# OnDA is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with OnDA.
# If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2014-2019 Deutsches Elektronen-Synchrotron DESY,
# a research centre of the Helmholtz Association.
"""
OnDA frame viewer for crystallography.
This module contains a graphical interface that displays detector data frames in
crystallography experiments.
"""
from __future__ import absolute_import, division, print_function
import collections
import copy
import sys
from typing import Any, Dict # pylint: disable=unused-import
import cfelpyutils.crystfel_utils as cfel_crystfel
import cfelpyutils.geometry_utils as cfel_geometry
import click
import numpy
import pyqtgraph
from onda.utils import gui
try:
import PyQt5.QtGui as QtGui
except ImportError:
import PyQt4.QtGui as QtGui
class CrystallographyFrameViewer(gui.OndaGui):
"""
See documentation of the __init__ function.
"""
def __init__(self, geometry, hostname, port):
# type: (Dict[str, Any], str, int) -> None
"""
OnDA frame viewer for crystallography.
This viewer receives detector frame data from an OnDA crystallography monitor,
when it is tagged with the 'ondadetectordata' label. It displays the received
detector frames, together with any detected Bragg peak (if present). A data
buffer allows the viewer to stop receiving data from the monitor but still keep
in memory the last 10 displayed frames for inspection.
Arguments:
geometry (Dict[str, Any]): a dictionary containing CrystFEL detector
geometry information (as returned by the 'load_crystfel_geometry`
function in the 'cfelpyutils' module).
hostname (str): the hostname or IP address where the viewer will listen for
data.
port(int): the port at which the viewer will listen for data.
"""
super(CrystallographyFrameViewer, self).__init__(
hostname=hostname,
port=port,
gui_update_func=self._update_image,
tag=u"ondaframedata",
)
pixel_maps = cfel_geometry.compute_pix_maps(geometry)
x_map, y_map = pixel_maps.x, pixel_maps.y
y_minimum = 2 * int(max(abs(y_map.max()), abs(y_map.min()))) + 2
x_minimum = 2 * int(max(abs(x_map.max()), abs(x_map.min()))) + 2
self._img_shape = (y_minimum, x_minimum)
self._img_center_x = int(self._img_shape[1] / 2)
self._img_center_y = int(self._img_shape[0] / 2)
visual_pixel_map = cfel_geometry.compute_visualization_pix_maps(geometry)
self._visual_pixel_map_x = visual_pixel_map.x.flatten()
self._visual_pixel_map_y = visual_pixel_map.y.flatten()
self._img = numpy.zeros(shape=self._img_shape, dtype=numpy.float)
self._frame_list = collections.deque(maxlen=20)
self._current_frame_index = -1
pyqtgraph.setConfigOption("background", 0.2)
self._ring_pen = pyqtgraph.mkPen("r", width=2)
self._peak_canvas = pyqtgraph.ScatterPlotItem()
self._image_view = pyqtgraph.ImageView()
self._image_view.ui.menuBtn.hide()
self._image_view.ui.roiBtn.hide()
self._image_view.getView().addItem(self._peak_canvas)
self._back_button = QtGui.QPushButton(text="Back")
self._back_button.clicked.connect(self._back_button_clicked)
self._forward_button = QtGui.QPushButton(text="Forward")
self._forward_button.clicked.connect(self._forward_button_clicked)
self._play_pause_button = QtGui.QPushButton(text="Pause")
self._play_pause_button.clicked.connect(self._play_pause_button_clicked)
self._citation_label = QtGui.QLabel(
"You are using an <b>OnDA</b> real-time monitor. Please cite: "
"Mariani et al., J Appl Crystallogr. 2016 May 23;49(Pt 3):1073-1080"
)
self._citation_label.setSizePolicy(
QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
)
self._horizontal_layout = QtGui.QHBoxLayout()
self._horizontal_layout.addWidget(self._back_button)
self._horizontal_layout.addWidget(self._forward_button)
self._horizontal_layout.addWidget(self._play_pause_button)
self._vertical_layout = QtGui.QVBoxLayout()
self._vertical_layout.addWidget(self._citation_label)
self._vertical_layout.addWidget(self._image_view)
self._vertical_layout.addLayout(self._horizontal_layout)
self._central_widget = QtGui.QWidget()
self._central_widget.setLayout(self._vertical_layout)
self.setCentralWidget(self._central_widget)
self.show()
def _update_image(self):
# Type () -> None
# Updates the frame image shown by the viewer.
if self.received_data is not None:
# The received aggregated data is expected to be a list of event entries
# (each being a dictionary storing the data for an event:
# List[Dict[str, Any], ...]). The last event in the list is extracted for
# visualizaton.
self._frame_list.append(copy.deepcopy(self.received_data[-1]))
self._current_frame_index = len(self._frame_list) - 1
# Resets the 'received_data' attribute to None. One can then check if
# data has been received simply by checking wether the attribute is not
# None.
self.received_data = None
try:
current_data = self._frame_list[self._current_frame_index]
except IndexError:
# If the framebuffer is empty, returns without drawing anything.
return
self._img[self._visual_pixel_map_y, self._visual_pixel_map_x] = (
current_data[b"detector_data"].ravel().astype(self._img.dtype)
)
QtGui.QApplication.processEvents()
self._image_view.setImage(
self._img.T, autoLevels=False, autoRange=False, autoHistogramRange=False
)
QtGui.QApplication.processEvents()
peak_x_list = []
peak_y_list = []
for peak_fs, peak_ss in zip(
current_data[b"peak_list"][b"fs"], current_data[b"peak_list"][b"ss"]
):
peak_index_in_slab = int(round(peak_ss)) * current_data[
b"native_data_shape"
][1] + int(round(peak_fs))
peak_x_list.append(self._visual_pixel_map_x[peak_index_in_slab])
peak_y_list.append(self._visual_pixel_map_y[peak_index_in_slab])
QtGui.QApplication.processEvents()
self._peak_canvas.setData(
x=peak_x_list,
y=peak_y_list,
symbol="o",
size=[5] * len(current_data[b"peak_list"][b"intensity"]),
brush=(255, 255, 255, 0),
pen=self._ring_pen,
pxMode=False,
)
def _back_button_clicked(self):
# Type () -> None
# Manages clicks on the 'back' button.
self._stop_stream()
if self._current_frame_index > 0:
self._current_frame_index -= 1
print("Showing frame {0} in the buffer".format(self._current_frame_index))
self._update_image()
def _forward_button_clicked(self):
# Type () -> None
# Manages clicks on the 'forward' button.
self._stop_stream()
if (self._current_frame_index + 1) < len(self._frame_list):
self._current_frame_index += 1
print("Showing frame {0} in the buffer".format(self._current_frame_index))
self._update_image()
def _stop_stream(self):
# Type () -> None
# Disconnects from the OnDA monitor and stops receiving data.
if self.listening:
self._play_pause_button.setText("Play")
self.stop_listening()
def _start_stream(self):
# Type () -> None
# Connects to the the OnDA monitor and starts receiving data.
if not self.listening:
self._play_pause_button.setText("Pause")
self.start_listening()
def _play_pause_button_clicked(self):
# Type () -> None
# Manages clicks on the 'play/pause' button.
if self.listening:
self._stop_stream()
else:
self._start_stream()
@click.command()
@click.argument("geometry_file", type=click.Path())
@click.argument("hostname", type=str, required=False)
@click.argument("port", type=int, required=False)
def main(geometry_file, hostname, port):
# type: (Dict[str, Any], str, int) -> None
"""
OnDA frame viewer for crystallography. This program must connect to a running OnDA
monitor for crystallography. If the monitor broadcasts detector frame data, this
viewer will display it. The viewer will also show, overlayed on the frame data,
any found Bragg peak. The data stream from the monitor can also be temporarily
paused, and any of the last 10 displayed detector frames can be recalled for
inspection.
GEOMETRY_FILE: the relative or absolute path to a file containing the detector
geometry information (in CrystFEL format) to be used for visualization.
HOSTNAME: the hostname where viewer will listen for data. Optional: if not
provided, it defaults to localhost (127.0.0.1).
PORT: the port at which the viewer will listen for data. Optional: if not provided,
it defaults to 12321.
"""
if hostname is None:
hostname = "127.0.0.1"
if port is None:
port = 12321
geometry = cfel_crystfel.load_crystfel_geometry(geometry_file)
app = QtGui.QApplication(sys.argv)
_ = CrystallographyFrameViewer(geometry, hostname, port)
sys.exit(app.exec_())
|
23,391 | b065a85d99a8cb1c273627c407fbe7bdc7527ac4 | import requests
import re
url = 'https://v.qq.com/x/cover/jg2a5feze5bryj2/j0855hsr5y8.html'
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36",
}
response = requests.get(url, headers=headers)
response.encoding = 'utf-8'
html = response.text
print(html)
|
23,392 | 63b67f1cf34a59eda04baa45ebf4fc25eb044f93 | from django.contrib.auth import get_user_model
from django.contrib.auth.forms import UserCreationForm
from django import forms
class SignUpForm(UserCreationForm):
class Meta:
model = get_user_model()
fields = ('name', 'username', 'email', 'password1', 'password2')
class LoginForm(forms.Form):
email = forms.EmailField(max_length=100)
password = forms.CharField(max_length=32, widget=forms.PasswordInput)
class AddForm(forms.Form):
text = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Add To Dos'}))
|
23,393 | 5fc04c03d15d7604b8dcafb4974ab457c3f8ccb0 | #!/usr/bin/env python
import sys
from argparse import ArgumentParser
argParser = ArgumentParser(description = 'Spit out plots showing shape variations.')
argParser.add_argument('input', metavar = 'PATH', help = 'Histogram ROOT file.')
argParser.add_argument('--variable', '-v', action = 'store', metavar = 'VARIABLE(S)', dest = 'variable', nargs = '+', default = ['phoPtHighMet'], help = 'Discriminating variable(s).')
argParser.add_argument('--samples', '-s', action = 'store', metavar = 'SAMPLE(S)', dest = 'samples', nargs = '+', default = [], help = 'Samples to compare. First is used as base for ratios.')
argParser.add_argument('--out-dir', '-o', action = 'store', metavar = 'OUTDIR', dest = 'outdir', default = None, help = 'Output directory name.')
args = argParser.parse_args()
sys.argv = []
import os
import array
import math
import re
import ROOT as r
from pprint import pprint
# r.gROOT.SetBatch(True)
thisdir = os.path.dirname(os.path.realpath(__file__))
basedir = os.path.dirname(thisdir)
sys.path.append(basedir)
from plotstyle import *
from datasets import allsamples
import config
from main.plotconfig import getConfig
monophConfig = getConfig('monoph')
source = r.TFile.Open(args.input)
colors = [r.kBlack, r.kRed, r.kBlue]
lumi = 0.
for sName in monophConfig.obs.samples:
lumi += allsamples[sName.name].lumi
def getHist(name, syst = '', split = ''):
if syst:
path = variable + split + '/' + name + '_' + syst # /samples
else:
path = variable + split + '/' + name # /samples
print path
return source.Get(path)
rcanvas = RatioCanvas(lumi = lumi, name = 'raw')
scanvas = RatioCanvas(lumi = lumi, name = 'norm')
if args.outdir is None:
args.outdir = '_'.join(args.samples)
plotDir = 'monophoton/compareShapes/' + args.input.split('/')[-1].rstrip('.root') + '/' + args.outdir
for variable in args.variable:
xtitle = monophConfig.getPlot(variable).title
rcanvas.Clear()
rcanvas.legend.Clear()
rcanvas.legend.setPosition(0.6, 0.7, 0.9, 0.9)
rcanvas.xtitle = xtitle
rcanvas.ytitle = 'Events / Unit'
scanvas.Clear()
scanvas.legend.Clear()
scanvas.legend.setPosition(0.6, 0.7, 0.9, 0.9)
scanvas.xtitle = xtitle
scanvas.ytitle = 'A.U.'
# for iS, sample in enumerate(args.samples): # for between sample comparisons
for iS, sample in enumerate(['LowPhoPt', 'HighPhoPt']): # for within sample comparisons
print 'Getting', sample
# hist = getHist(sample) # for between sample comparisons
hist = getHist(args.samples[0], split = sample) # for within sample comparisons
if not hist:
print "Hist doesn't exist for", sample
print "Why are you asking for this sample?"
continue
if not hist.Integral() > 0.:
print "Hist integral is 0 for "+sample+". Skipping."
continue
rcanvas.legend.add(sample, title = sample, mcolor = colors[iS], lcolor = colors[iS], lwidth = 2)
rcanvas.legend.apply(sample, hist)
rID = rcanvas.addHistogram(hist, drawOpt = 'HIST')
if hist.Integral():
hist.Scale( 1. / hist.Integral() )
scanvas.legend.add(sample, title = sample, mcolor = colors[iS], lcolor = colors[iS], lwidth = 2)
scanvas.legend.apply(sample, hist)
sID = scanvas.addHistogram(hist, drawOpt = 'HIST')
print rID, sID
rcanvas.printWeb(plotDir, variable + '_raw')
scanvas.printWeb(plotDir, variable + '_norm')
|
23,394 | f66c53098d130b92981512b1b3a38fd7dc9aacb4 | import world_data
from Player import Player
import helpers as hlp
class World:
"""
A world has a player
A world has a name
A world has many zones
"""
# ***** INITIALIZATION METHODS *****
def __init__(self, name):
self.name = name
self.description = world_data.aelurna_description()
self.player = None # To be added in main loop
self.rooms = world_data.load_rooms() # list of rooms
self.current_room = self.rooms[world_data.get_first_room()] # to be set in init_rooms()
def init_player(self):
self.player = Player(hlp.request_name())
# ***** PLAY GAME METHODS *****
def get_current_room(self):
return self.current_room
# Play game, change room if changed
def play(self):
r = self.player.play(self.current_room)
self.current_room = self.rooms[r]
|
23,395 | ade146daad6f1c43ebd0c98f974831e0edbffb75 | age = int(input())
if age<10:
print('child')
elif 10 < age <= 18:
print('teen')
else:
print('adult')
|
23,396 | 57c21de6f23e9a6ffd381b91abe9bfde60ae0e4b | from math import radians, cos, sin, asin, sqrt
def aversine(lon1, lat1, lon2, lat2):
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
r = 6371 # Radius of earth in kilometers. Use 3956 for miles
return c * r
category_mapping_matrix = pd.read_excel('../data/Data.xlsx',sheet_name='Category Mapping',index_col=0).as_matrix()
category_mapping_matrix[np.where(category_mapping_matrix[:,0]=='Shoe Wiz')[0][0]][2]
np.extract(condition,category_mapping_matrix)
list= ['Pasta Moto', 'Kichi Grill', 'M.Y. China', 'Chipotle',
'Fire of Brazil', 'Teavana', 'Cako Bakery',
'Andale Mexican Restaurant', 'The Body Shop', 'Amiri Salon', 'Origins']
A=[category_mapping_matrix[np.where(category_mapping_matrix[:,0]==name)[0][0]][2] for name in list ]
for name in list:
print name
print np.where(category_mapping_matrix[:,0]==name)[0][0]
haversine(37.66146302,-122.2986728,37.78442,-122.406832)
def cosine(lon1, lat1, lon2, lat2):
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
theta = sin(lat1)*sin(lat2) + cos(lat1)*cos(lat2)*cos(dlon)
c = acos(theta)
r = 6371 # Radius of earth in kilometers. Use 3956 for miles
return c * r
cosine(37.66146302,-122.2986728,37.78442,-122.406832)
d = acos( sin φ1 ⋅ sin φ2 + cos φ1 ⋅ cos φ2 ⋅ cos Δλ ) ⋅ R
sqrt(46000) |
23,397 | 53c009ddb5183028a1c1436d2ecda1beb0e9dacd | import unittest
from models import *
# tests here
if __name__ == '__main__':
unittest.main() |
23,398 | cc2fa03ac8df87ca6973fe3b23a7bc49dfde2c38 | #!/usr/bin/env python2
#
# Catalog App
from flask import Flask, render_template, url_for, request, redirect
from flask import flash, session as login_session, make_response, jsonify
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Category, Item, User
import random
import string
from oauth2client.client import flow_from_clientsecrets, FlowExchangeError
import httplib2
import json
import requests
CLIENT_ID = json.loads(open(
'client_secrets.json',
'r').read())['web']['client_id']
APPLICATION_NAME = "Catalog App"
app = Flask(__name__)
engine = create_engine('sqlite:///catalogwithusers.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# load static global list of categories. Not editable in this version.
categories = session.query(Category).order_by(Category.name)
# Google sign-in Oauth2 success response - initialize login session
@app.route('/gconnect', methods=['POST'])
def gconnect():
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps(
'Invalid state parameter'), 401)
response.headers['Content-Type'] = 'application.json'
code = request.data
try:
# Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = make_response(json.dumps(
'Failed to upgrade the authorization code'), 401)
response.headers['Content-Type'] = 'application/json'
return response
access_token = credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/'
'tokeninfo?access_token=%s' % access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error'), 500))
response.headers['Content-Type'] = 'application/json'
# Verify that the access token is used for the intended user
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = make_response(json.dumps(
"Token's user ID doesn't match given user ID.", 401))
response.headers['Content-Type'] = 'application/json'
return response
if result['issued_to'] != CLIENT_ID:
response = make_response(json.dumps(
"Token's client ID does not match app's."), 401)
print "Token's client ID does not match app's."
response.headers['Content-Type'] = 'application/json'
return response
stored_credentials = login_session.get('credentials')
stored_gplus_id = login_session.get('gplus_id')
if stored_credentials is not None and gplus_id == stored_gplus_id:
response = make_response(json.dumps(
'Current user is already connected.'), 200)
response.headers['Content-Type'] = 'application/json'
# Store the access token in the session for later use
login_session['credentials'] = access_token
login_session['gplus_id'] = gplus_id
# Get user info
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {
'access_token': credentials.access_token,
'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
login_session['username'] = data['name']
login_session['email'] = data['email']
# create new user if user doesn't already exist
user_id = getUserID(login_session['email'])
if not user_id:
user_id = createUser(login_session)
login_session['user_id'] = user_id
else:
login_session['user_id'] = user_id
output = "<p>You are now logged in as " + login_session['username']+"<p>"
return output
# Logout - revoke current user token and reset login_session
@app.route('/logout/', methods=['POST'])
def logout():
# only logout a user who has already logged in
credentials = login_session.get('credentials')
if credentials is None:
return 'Current user is not logged in.'
# revoke current token
url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % credentials
h = httplib2.Http()
result = h.request(url, 'GET')[0]
if result['status'] == '200':
# reset user session
del login_session['credentials']
del login_session['gplus_id']
del login_session['username']
del login_session['email']
del login_session['user_id']
return 'Successfully logged out.'
else:
return 'Failed to revoke token for given user.'
# main catalog - latest 10 items in descending datetime order
@app.route('/')
@app.route('/catalog/')
def catalog():
state = ''.join(random.choice(
string.ascii_uppercase + string.digits)
for x in xrange(32))
login_session['state'] = state
items = session.query(Item).order_by(Item.created.desc()).limit(10)
if 'username' not in login_session:
return render_template(
'publiccatalog.html',
categories=categories,
items=items, STATE=state)
return render_template(
'catalog.html',
categories=categories,
items=items,
STATE=state)
# single category listing - all items in category
@app.route('/catalog/<category>/')
def showCategory(category):
cat = session.query(Category).filter_by(name=category).one_or_none()
if cat is not None:
catItems = session.query(Item).filter_by(
category_id=cat.id).order_by(Item.name)
if 'username' not in login_session:
return render_template(
'publiccategory.html',
category=category,
categories=categories,
items=catItems)
return render_template(
'category.html',
category=category,
categories=categories,
items=catItems)
return redirect(url_for('catalog'))
# new item creation
@app.route('/catalog/new/', methods=['GET', 'POST'])
def newItem():
if 'username' not in login_session:
flash('Not authorized to create new item.')
return redirect('/catalog/')
if request.method == 'POST':
newItem = Item(
name=request.form['name'],
description=request.form['description'],
category_id=int(request.form['category']),
user_id=login_session['user_id'])
session.add(newItem)
session.commit()
flash('New item created!')
return redirect(url_for('catalog'))
else:
return render_template('newItem.html', categories=categories)
# single item listing
@app.route('/catalog/<category>/<item>/')
def showItem(category, item):
showItem = session.query(Item).filter_by(name=item).one_or_none()
creator = getUserInfo(showItem.user_id)
if showItem is not None:
if 'username' in login_session:
if creator.id == login_session['user_id']:
return render_template('item.html', item=showItem)
return render_template('publicitem.html', item=showItem)
return redirect(url_for('catalog'))
# JSON API endpoint for single item name and description
@app.route('/catalog/<category>/<item>/api/')
def itemApi(category, item):
apiItem = session.query(Item).filter_by(name=item).one_or_none()
if apiItem is not None:
return jsonify(item=apiItem.serialize)
return redirect(url_for('catalog'))
# edit item
@app.route('/catalog/<item>/edit/', methods=['GET', 'POST'])
def editItem(item):
editItem = session.query(Item).filter_by(name=item).one_or_none()
if editItem is not None:
creator = getUserInfo(editItem.user_id)
if 'username' in login_session:
if creator.id == login_session[user_id]:
if request.method == 'POST':
editItem.name = request.form['name']
editItem.description = request.form['description']
editItem.category_id = request.form['category']
session.add(editItem)
session.commit()
flash('Item edited!')
return redirect(
url_for(
'showItem',
category=editItem.category.name,
item=editItem.name))
else:
return render_template(
'editItem.html',
item=editItem,
categories=categories)
flash('Not authorized to edit item.')
return redirect(url_for('catalog'))
# delete item
@app.route('/catalog/<item>/delete/', methods=['GET', 'POST'])
def deleteItem(item):
delItem = session.query(Item).filter_by(name=item).one_or_none()
if delItem is not None:
creator = getUserInfo(delItem.user_id)
if 'username' in login_session:
if creator.id == login_session[user_id]:
if request.method == 'POST':
session.delete(delItem)
session.commit()
flash('Item deleted!')
else:
return render_template('deleteItem.html', item=delItem)
flash('Not authorized to edit item.')
return redirect(url_for('catalog'))
# function to retrieve user ID from email address
def getUserID(email):
try:
user = session.query(User).filter_by(email=email).one()
return user.id
except:
return None
# function to retrieve User from user ID
def getUserInfo(user_id):
user = session.query(User).filter_by(id=user_id).one()
return user
# create new User in database
def createUser(login_session):
newUser = User(
name=login_session['username'],
email=login_session['email'])
session.add(newUser)
session.commit()
user = session.query(User).filter_by(
email=login_session['email']).one()
return user.id
if __name__ == '__main__':
app.secret_key = 'super_secret_key'
app.debug = True
app.run(host='0.0.0.0', port=8000)
|
23,399 | e60a2d8f5a778e436be96cf4cc0424ffa33faa5d | from .MongoClient import connect_mongo
from .MongoClient import insert_apart_trade_targetarea_increase
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.