text stringlengths 38 1.54M |
|---|
# coding: utf-8
print("hello world")
#↓の書き方はversion3系ではエラー
#print "hello world"
#文字列を分割して配列(リスト化)
print("hello world".split(" "))
print("文字列結合")
x = "hello"
y = "world"
print(x+y)
print("文字列置き換え")
x = "Hello World"
print(x.replace("o","u"))
print("大文字小文字変換")
print(x.upper())
print(x.lower())
print("結合")
x = ["Hello","World"]
print(",".join(x)) # Hello,World
print("検索")
print("Hello World".find("o")) # 4
print("Hello World".find("x")) # -1
print("個数")
print("Hello World".count("o")) # 4
print("文字列インデックス")
x = "Hello World"
print(x[0]) # H
print(x[1]) # e
print(x[-1]) # d
print(x[-2]) # l
print(x[4:7]) # o W
print(x[-3:-1]) # rl
print(x[-2:]) # ld
print(x[4:]) # o World
print(x[:4]) # Hell
print("四則演算")
print(1+2)
print(1-2)
print(3*5)
print(15/3)
print(15/4) # 3 # 切り捨て?
print(15.0/4) # 3.75
print(15%4) # 3 # 余り
print("型変換")
x = 5
print(type(x))
print(type(str(x)))
x = "10"
print(type(x))
print(type(int(x)))
print("文字列線形")
x = "Tom"
y = "pen"
intro = "{} is {}".format(x, y)
print(intro)
print("改行")
print("This is a pen \nThat is Tom")
print("中央揃え、右揃え、左揃え(一行を30とした場合)")
col = 30
print("Title".center(col))
print("Date".rjust(col))
print("This is main text.".ljust(col))
#mathライブラリをインポート
import math
print(math.pi)
# piだけを使いたいとき
from math import pi
print(pi)
# type判定
print(type(1)) # <type 'int'>
print(type(1.5)) # <type 'float'>
print(type("hello world")) # <type 'str'>
print(type(["hello","world"])) # <type 'list'>
print(type([1,4,7])) # <type 'list'>
print(type(("hello", "world"))) # <type 'tuple'>
print(type(("hello",))) # <type 'tuple'>
print(type(True)) # <type 'bool'>
print(type(False)) # <type 'list'>
print(type({1:"apple", 2:"orange"})) # <type 'dict'>
# リスト(配列)
x = ["a","b","c","d",[1,2,3,4,5]]
print(x[0]) # a
print(x[2]) # c
print(x[4]) # [1,2,3,4,5]
print(x[1:3]) # ["b","c"]
print(x[1:]) # ["b","c","d",[1,2,3,4,5]]
print(x[:3]) # ["a",b","c"]
print(x[0:5:2]) # ['a', 'c', [1, 2, 3, 4, 5]] # index0-4までを2つ飛ばしで
# 項目を追加
x.append("x")
print(x) #['a', 'b', 'c', 'd', [1, 2, 3, 4, 5], 'x']
# 内容指定で削除
x.remove("x")
print(x) # ['a', 'b', 'c', 'd', [1, 2, 3, 4, 5]]
# 配列を追加
x.extend(["x","y"])
print(x) # ['a', 'b', 'c', 'd', [1, 2, 3, 4, 5], 'x', 'y']
# インデックス指定で削除
del x[5] # 追加した"x"が消える
print(x) # ['a', 'b', 'c', 'd', [1, 2, 3, 4, 5], 'y']
del x[4:] # インデックス4が移行が消える
print(x) # ["a",b","c","d"]
# 最後の項目が消える
x.pop()
print(x) # ['a', 'b', 'c']
# sort (アルファベット順)
x = ["b","a","d","c"]
x.sort()
print(x) #['a', 'b', 'c', 'd']
# reverse(配列を反転する)
x = ["b","a","d","c"]
x.reverse()
print(x) # ['c', 'd', 'a', 'b']
# 破壊的操作と戻り値による変更
# 文字列操作は戻り値に変更後の値が入る
x = "abcedfg"
x = x.upper()
print(x) # ABCDEFG
x = ["a","b","c","d"]
x = x.append("e")
print(x) # None
print(type(x)) # <type 'NoneType'>
# リストを作る方法
# 文字列からリストを作る
print(list("abc")) # ['a', 'b', 'c']
# rangeで整数の配列を作る
print(range(5)) # [0, 1, 2, 3, 4]
print(range(0,3)) # [0, 1, 2]
print(range(5,10)) # [5, 6, 7, 8, 9]
print(range(-5,3)) # [-5, -4, -3, -2, -1, 0, 1, 2]
# 2から6までを2つ飛ばしで
print(range(2,7,2)) # [2, 4, 6]
#ディクショナリ
x = {"color":"red", "size":16, "font":"hoge", 1:"number"}
print(x) # {'color': 'red', 1: 'number', 'font': 'hoge', 'size': 16} # 順番は保持されない
print(x["color"]) # red
print(x[1]) # number
# 追加
x["line"] = True
print(x) #{'color': 'red', 1: 'number', 'line': True, 'font': 'hoge', 'size': 16}
# 変更
x["color"] = "orange"
print(x) #{'color': 'orange', 1: 'number', 'line': True, 'font': 'hoge', 'size': 16}
# 削除
del x["font"]
print(x) #{'color': 'orange', 1: 'number', 'line': True, 'size': 16}
# 削除 & 取り出し
y = x.pop(1) # 戻り値で値を取り出す
print(x) #{'color': 'orange', 'line': True, 'size': 16}
print(y) # number
# 追加 & 更新
x.update({2:"Num", "color":"white"})
print(x) # {2: 'Num', 'color': 'white', 'line': True, 'size': 16}
# key一覧
y = x.keys()
print(y) # [2, 'color', 'line', 'size']
print(type(y)) # version2ではlist.version3ではdict_keys.
# print(x[y[1]]) # white # vesion3ではこの書き方は不可。y[1]がだめ。
# value一覧
y = x.values()
print(y) # ['Num', 'white', True, 16]
# keyとvalueがタプルになっている配列になって取得
y = x.items()
print(y) # [(2, 'Num'), ('color', 'white'), ('line', True), ('size', 16)]
# keyの有無をチェック
# has_keyはversion3では使えない
# print(x.has_key("color")) # True
# print(x.has_key(3)) # False
print("color" in x)
print(3 in x)
# ↓の書き方はエラーになる
# print(x[3])
# len関数(文字列、配列、ディクショナリ)で長さ、要素数を確認
print(len("abcde")) # 5
print(len(["a","b","c"])) # 3
print(len({"a":"apple", "b":"blue"})) # 2
# if, else , elif文
x = 0
print("--------")
print("x = 0")
if x == 0:
print("x は 0 です")
else:
print("これは表示されない")
if x >= 0:
print("x は 0 以上")
if x <= 0:
print("x は 0 以下")
if x < 0:
print("x は 0 より小さい")
else:
print("こちらが表示される")
print("--------")
print("x = 0, y = 1")
x = 0
y = 1
if x != 0:
print("これは表示されない")
elif y == 0:
print("これも表示されない")
elif y == 1:
print("こちらが表示される ")
else:
print("これも表示されない")
print("--------")
print("and条件")
print("x = 0, y = 1")
x = 0
y = 1
if x == 0 and y == 0:
print("これは表示されない")
if x == 0 and y == 1:
print("これは表示される ")
print("--------")
print("or条件")
if x == 0 or y == 0:
print("これは表示される ")
if x == 0 or y == 1:
print("これは表示される ")
if x == 1 or y == 0:
print("これは表示されない")
print("--------")
print("for i in [1,2,3,4,5]:")
for i in [1,2,3,4,5]:
print(i)
print("for i in range(7):")
for i in range(7):
print(i)
print('for i in "hello":')
for i in "hello":
print(i)
print('for i in {"a":"apple", "b":"blue"}')
dic = {"a":"apple", "b":"blue"}
for i in dic:
print(i)
print(dic[i])
print('for i in {"a":"apple", "b":"blue"}.values()')
for i in dic.values():
print(i)
print("while文")
i = 10
while i > 0:
print(i)
i = i - 1
print("リストのfor文でindexを取得")
for index , item in enumerate(['a','p','p','l','e']):
print("{0} {1}".format(index, item))
print("関数")
def myfunc(x):
print(x*10)
myfunc(3)
def myfunc2(x):
return x*10
print(myfunc2(4))
#デフォルト値指定
def myfunc3(x = 5):
print(x)
myfunc3() # 5が表示
myfunc3(10) # 10が表示
# 戻り値を複数指定
def myfunc4(x):
return x*2, y*3
r1, r2 = myfunc4(3)
print(r1, r2) # この書き方はversion2だとタプルに、version3だと普通に表示される
# ラムダ式(一行関数 C言語のdefineマクロのようなもの)
triple = lambda x: x*3
print(triple(4)) # 12
print("ラムダ式はリストの中に入れることもOK")
myfuncs = [lambda x: x, lambda x: x*2, lambda x: x*3]
for func in myfuncs:
print(func(3))
# クラス
print("クラス")
class myclass:
# 第一引数はクラスインスタンスを示すもので実際に使われるときは使わない
def myname(self, name):
self.name = name
person1 = myclass()
person1.myname("Tom") # 第一引数のselfは不要
print(person1.name)
print(type(person1)) # version2では <type 'instance'>、version3では <class '__main__.myclass'>
print("クラス継承")
class myExtendClass(myclass):
def myaddress(self, address):
self.address = address
person2 = myExtendClass()
person2.myname("Tom")
person2.myaddress("Tokyo")
print(person2.name, person2.address)
# GUI画面を表示(version2だけ?)
# import Tkinter
# window1 = Tkinter.Tk()
# window1.mainloop()
print("配列のコピー")
x = [10,23,46]
y = x.copy() # version2では不可
y[1] = -5
print(x)
print(y)
|
import os
from time import sleep
import tflite_runtime.interpreter as tflite
import numpy as np
import cv2
from firebase import firebase
import pandas as pd
import json
CWD_PATH = os.getcwd()
firebase_url = "firebase-url"
firebase = firebase.FirebaseApplication(firebase_url,None)
db_id=0
def make_report(data,filename):
data_frame = pd.DataFrame(data)
data_frame = data_frame.astype({"field1 count":int,"field2 count":int, "total people":int})
writer = pd.ExcelWriter(os.path.join(CWD_PATH,filename),engine="xlsxwriter")
start_row= 2
data_frame.to_excel(writer,sheet_name="Sheet1",startrow=start_row)
book = writer.book
sheet = writer.sheets["Sheet1"]
bold = book.add_format({'bold':True,'size':24})
sheet.write('A1','My Report',bold)
row_count = data_frame.shape[0] + start_row +2
chart_line = book.add_chart({'type':'line'})
chart_line.add_series({'name': '=Sheet1!$E$3',
'categories': '=Sheet1!$A$4:$A${}'.format(str(row_count)),
'values': '=Sheet1!$E$4:$E${}'.format(str(row_count)),
})
chart_line.set_style(10)
sheet.insert_chart('G2',chart_line)
chart_col = book.add_chart({'type':'column'})
chart_col.add_series({ 'name': '=Sheet1!$C$3',
'categories': '=Sheet1!$A$4:$A${}'.format(str(row_count)),
'values': '=Sheet1!$C$4:$C${}'.format(str(row_count)),
})
chart_col.add_series({ 'name': '=Sheet1!$D$3',
'values': '=Sheet1!$D$4:$D${}'.format(str(row_count)),
})
chart_col.set_title({'name':'Field1 and Field2'})
chart_col.set_x_axis({'name':'Date id'})
chart_col.set_y_axis({'name':'Count'})
sheet.insert_chart('P2',chart_col)
format1 = book.add_format({'font_color':'#E93423'})
writer.save()
if not data_frame.empty:
print("report created!")
return True
else:
return False
def firebase_get():
result = firebase.get(firebase_url,None)
return result
def firebase_post(data):
global db_id
result = firebase.patch(firebase_url+"/"+str(db_id),data)
db_id = db_id +1
if result != None:
return True
elif result == None:
return False
def is_field_contain_center(polygon,center_point):
if polygon.contains(center_point):
return True
else:
return False
def create_polygon(points,frame,color):
pts = np.array(points)
pts = pts.reshape((-1,1,2))
cv2.polylines(frame,[pts],True,color,3)
def read_labels(PATH_TO_LABELS):
with open(PATH_TO_LABELS, 'r') as f:
labels = [line.strip() for line in f.readlines()]
return labels
def initialize_detector(model_name):
model_path = os.path.join(os.path.dirname(__file__), model_name)
interpreter = tflite.Interpreter(model_path=model_path)
interpreter.allocate_tensors()
return interpreter
def draw_bounding_boxes(frame,classes,xmin,xmax,ymin,ymax,color,labels):
object_name = labels[int(classes[0])]
label = '%s' % (object_name)
labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2)
label_ymin = max(ymin, labelSize[1] + 10)
cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED)
cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), color, 2)
cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2)
def generate_detections(cv2_image, interpreter):
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
height = input_details[0]['shape'][1]
width = input_details[0]['shape'][2]
frame_rgb = cv2.cvtColor(cv2_image, cv2.COLOR_BGR2RGB)
frame_resized = cv2.resize(frame_rgb, (width, height))
input_data = np.expand_dims(frame_resized, axis=0)
val = np.reshape(frame_resized[:,:,0],-1)
input_mean = 127.5 #np.mean(val)
input_std = 127.5 #np.std(val)
floating_model = (interpreter.get_input_details()[0]['dtype'] == np.float32)
if floating_model:
input_data = (np.float32(input_data) - input_mean) / input_std
interpreter.set_tensor(interpreter.get_input_details()[0]['index'], input_data)
interpreter.invoke()
boxes = interpreter.get_tensor(output_details[0]['index'])[0]
classes = interpreter.get_tensor(output_details[1]['index'])[0]
scores = interpreter.get_tensor(output_details[2]['index'])[0]
return boxes,classes,scores
|
from unittest import TestCase, main
from socketio.namespace import BaseNamespace
from socketio.virtsocket import Socket
from mock import MagicMock
class MockSocketIOServer(object):
"""Mock a SocketIO server"""
def __init__(self, *args, **kwargs):
self.sockets = {}
def get_socket(self, socket_id=''):
return self.sockets.get(socket_id)
class MockSocket(Socket):
pass
class ChatNamespace(BaseNamespace):
def __init__(self, *args, **kwargs):
self.use_set = args[0]
super(ChatNamespace, self).__init__(*args[1:], **kwargs)
def get_initial_acl(self):
acls = ['on_foo']
if self.use_set == True:
return set(acls)
else:
return acls
def on_foo(self):
return 'a'
def on_bar(self):
return 'b'
def on_baz(foo, bar, baz):
return 'c'
class GlobalNamespace(BaseNamespace):
def on_woot(self):
return ''
def on_tobi(self):
return ''
class TestBaseNamespace(TestCase):
def setUp(self):
server = MockSocketIOServer()
self.environ = {}
socket = MockSocket(server, {})
socket.error = MagicMock()
self.environ['socketio'] = socket
self.ns = GlobalNamespace(self.environ, '/woot')
def test_process_packet_disconnect(self):
pkt = {'type': 'disconnect',
'endpoint': '/woot'
}
self.ns.process_packet(pkt)
assert not self.environ['socketio'].error.called
def test_process_packet_connect(self):
"""processing a connection packet """
pkt = {'type': 'connect',
'endpoint': '/tobi',
'qs': ''
}
self.ns.process_packet(pkt)
assert not self.environ['socketio'].error.called
# processing a connection packet with query string
pkt = {'type': 'connect',
'endpoint': '/test',
'qs': '?test=1'
}
self.ns.process_packet(pkt)
assert not self.environ['socketio'].error.called
def test_process_packet_heartbeat(self):
"""processing a heartbeat packet """
pkt = {'type': 'heartbeat',
'endpoint': ''
}
self.ns.process_packet(pkt)
assert not self.environ['socketio'].error.called
def test_process_packet_message(self):
"""processing a message packet """
pkt = {'type': 'message',
'endpoint': '',
'data': 'woot'}
data = self.ns.process_packet(pkt)
self.assertEqual(data, pkt['data'])
assert not self.environ['socketio'].error.called
# processing a message packet with id and endpoint
pkt = {'type': 'message',
'id': 5,
'ack': True,
'endpoint': '/tobi',
'data': ''}
data = self.ns.process_packet(pkt)
self.assertEqual(data, pkt['data'])
assert not self.environ['socketio'].error.called
def test_process_packet_json(self):
"""processing json packet """
pkt = {'type': 'json',
'endpoint': '',
'data': '2'}
data = self.ns.process_packet(pkt)
self.assertEqual(data, pkt['data'])
assert not self.environ['socketio'].error.called
# processing json packet with message id and ack data
pkt = {'type': 'json',
'id': 1,
'endpoint': '',
'ack': 'data',
'data': {u'a': u'b'}}
data = self.ns.process_packet(pkt)
self.assertEqual(data, pkt['data'])
assert not self.environ['socketio'].error.called
def test_process_packet_event(self):
"""processing an event packet """
pkt = {'type': 'event',
'name': 'woot',
'endpoint': '',
'args': []}
self.ns.process_packet(pkt)
assert not self.environ['socketio'].error.called
# processing an event packet with message id and ack
pkt = {'type': 'event',
'id': 1,
'ack': 'data',
'name': 'tobi',
'endpoint': '',
'args': []}
self.ns.process_packet(pkt)
assert not self.environ['socketio'].error.called
def test_process_packet_ack(self):
"""processing a ack packet """
pkt = {'type': 'ack',
'ackId': 140,
'endpoint': '',
'args': []}
self.ns.process_packet(pkt)
assert not self.environ['socketio'].error.called
def test_process_packet_error(self):
"""processing error packet """
pkt = {'type': 'error',
'reason': '',
'advice': '',
'endpoint': ''}
self.ns.process_packet(pkt)
pkt = {'type': 'error',
'reason': 'transport not supported',
'advice': '',
'endpoint': ''}
self.ns.process_packet(pkt)
# processing error packet with reason and advice
pkt = {'type': 'error',
'reason': 'unauthorized',
'advice': 'reconnect',
'endpoint': ''}
self.ns.process_packet(pkt)
# processing error packet with endpoint
pkt = {'type': 'error',
'reason': '',
'advice': '',
'endpoint': '/woot'}
self.ns.process_packet(pkt)
def test_process_packet_message_with_new_line(self):
"""processing a newline in a message"""
pkt = {'type': 'message',
'data': '\n',
'endpoint': ''}
self.ns.process_packet(pkt)
assert not self.environ['socketio'].error.called
def test_del_acl_method(self):
pkt = {'type': 'event',
'name': 'foo',
'endpoint': '/chat',
'args': []}
message = ("Trying to delete an ACL method, but none were"
+ " defined yet! Or: No ACL restrictions yet, why would you"
+ " delete one?")
try:
self.ns.del_acl_method('on_foo')
self.ns.process_packet(pkt)
except ValueError as e:
self.assertEqual(
message,
e.message,
)
else:
raise Exception("""We should not be able to delete an acl that
doesn't exist""")
def test_allowed_event_name_regex(self):
pkt = {'type': 'event',
'name': '$foo',
'endpoint': '/chat',
'args': []}
self.ns.process_packet(pkt)
args = ['unallowed_event_name',
'name must only contains alpha numerical characters',
]
kwargs = dict(msg_id=None, endpoint='/woot', quiet=False)
self.environ['socketio'].error.assert_called_with(*args, **kwargs)
def test_method_not_found(self):
""" test calling a method that doesn't exist """
pkt = {'type': 'event',
'name': 'foo',
'endpoint': '/chat',
'args': []
}
self.ns.process_packet(pkt)
kwargs = dict(
msg_id=None,
endpoint='/woot',
quiet=False
)
self.environ['socketio'].error.assert_called_with(
'no_such_method',
'The method "%s" was not found' % 'on_foo',
**kwargs
)
class TestChatNamespace(TestCase):
def setUp(self):
server = MockSocketIOServer()
self.environ = {}
socket = MockSocket(server, {})
socket.error = MagicMock()
self.environ['socketio'] = socket
self.ns = ChatNamespace(
False,
self.environ,
'/chat'
)
def test_allowed_event(self):
pkt = {'type': 'event',
'name': 'foo',
'endpoint': '/chat',
'args': []}
self.ns.process_packet(pkt)
assert not self.environ['socketio'].error.called
def test_blocked_event(self):
pkt = {'type': 'event',
'name': 'bar',
'endpoint': '/chat',
'args': []}
self.ns.process_packet(pkt)
args = [
'method_access_denied',
'You do not have access to method "on_bar"',
]
kwargs = dict(
msg_id=None,
endpoint='/chat',
quiet=False
)
self.environ['socketio'].error.assert_called_with(*args, **kwargs)
def test_add_acl_method(self):
pkt = {'type': 'event',
'name': 'bar',
'endpoint': '/chat',
'args': []}
self.ns.add_acl_method('on_bar')
self.ns.process_packet(pkt)
assert not self.environ['socketio'].error.called
def test_del_acl_method(self):
pkt = {'type': 'event',
'name': 'foo',
'endpoint': '/chat',
'args': []}
self.ns.del_acl_method('on_foo')
self.ns.process_packet(pkt)
args = [
'method_access_denied',
'You do not have access to method "on_foo"',
]
kwargs = dict(
msg_id=None,
endpoint='/chat',
quiet=False
)
self.environ['socketio'].error.assert_called_with(*args, **kwargs)
def test_lift_acl_restrictions(self):
pkt1 = {'type': 'event',
'name': 'foo',
'endpoint': '/chat',
'args': []}
self.ns.lift_acl_restrictions()
self.ns.process_packet(pkt1)
assert not self.environ['socketio'].error.called
pkt2 = {'type': 'event',
'name': 'bar',
'endpoint': '/chat',
'args': []}
self.ns.process_packet(pkt2)
assert not self.environ['socketio'].error.called
def test_use_set_on_acl(self):
self.ns = ChatNamespace(
True,
self.environ,
'/chat'
)
pkt = {'type': 'event',
'name': 'bar',
'endpoint': '/chat',
'args': []}
self.ns.add_acl_method('on_bar')
self.ns.process_packet(pkt)
assert not self.environ['socketio'].error.called
def test_call_method_invalid_definition(self):
pkt = {'type': 'event',
'name': 'baz',
'endpoint': '/chat',
'args': []}
self.ns.add_acl_method('on_baz')
self.ns.process_packet(pkt)
kwargs = dict(msg_id=None, endpoint='/chat', quiet=False)
self.environ['socketio'].error.assert_called_with(
"invalid_method_args",
"The server-side method is invalid, as it doesn't "
"have 'self' as its first argument"
, **kwargs)
if __name__ == '__main__':
main()
|
print('Provide answers between 1 and 10 for each of these questions:')
loan_size = int(input('Between 1 and 10, how large is the loan? '))
credit_status = int(
input('Between 1 and 10, how good is your credit history? '))
income = int(input('Between 1 and 10, how high is your income? '))
down_payment = int(input('Between 1 and 10, how large is your down payment? '))
should_loan = False
if (loan_size >= 5):
if (credit_status >= 7 and income >= 7):
should_loan = True
elif (credit_status >= 7 or income >= 7):
if (down_payment >= 5):
should_loan = True
else:
should_loan = False
else:
should_loan = False
else:
if (credit_status < 4):
should_loan = False
elif (income >= 7 or down_payment >= 7):
should_loan = True
elif (income >= 4 and down_payment >= 4):
should_loan = True
else:
should_loan = False
if should_loan:
print("Decision: yes")
else:
print("Decision: no")
|
#!/usr/bin/env python
# coding: utf-8
# Imports
import pandas as pd
import operator
import numpy as np
import geopandas as gpd
import json
import tempfile
import os
import csv
import sys
from datetime import datetime
sys.path.insert(0,'../')
import db_connection as db_con
starttime = datetime.now()
# Get length of table
table = 'twitter_histories_luxemburg_combined2'
last_tweet = db_con.get_last_row_id(table)
def get_user_ids_batch(start_number):
max_number = start_number+batch_size
query = f'SELECT user_id, row_id, created_at FROM {table} WHERE row_id <{max_number} AND row_id>={start_number} ORDER BY row_id ASC LIMIT {batch_size}'
data = db_con.multi_read_sql(query)
user_list = list(data['user_id'].unique())
del data
return user_list
# First table_list
start_time = '2012-07-17 22:57:06'
end_time = '2016-01-01 4:59:45'
batch_size = 250000
result = db_con.multi_with_progress(get_user_ids_batch, 31, last_tweet, batch_size)
all_users = [item for sublist in result for item in sublist]
del result
unique_users = list(dict.fromkeys(all_users))
del all_users
with open('lux_user_list_1.csv', 'w') as f:
# using csv.writer method from CSV package
write = csv.writer(f)
write.writerows([unique_users])
# Second table_list
table = "additional_data"
last_tweet = db_con.get_last_row_id(table)
result = db_con.multi_with_progress(get_user_ids_batch, 31, last_tweet, batch_size)
all_users = [item for sublist in result for item in sublist]
del result
unique_users2 = list(dict.fromkeys(all_users))
del all_users
with open('lux_user_list_2.csv', 'w') as f:
# using csv.writer method from CSV package
write = csv.writer(f)
write.writerows([unique_users2])
print(f'Script took: {datetime.now()-starttime}') |
from django.http import HttpResponse
import re
# import os
# from django.utils import timezone
# from django.template import loader
# import random
from django.shortcuts import render
from .models import Post
def post_list(request):
posts = Post.objects.order_by('-created_date')
# post_title = '<ul>'
# for post in posts:
# post_title += f'<li>{post.title}</li>'
# post_title += '</ul>'
context = {
'posts': posts,
}
return render(
request=request,
template_name='blog/post_list.html',
context=context
)
def post_detail(request, pk):
post = Post.objects.get(id=pk)
context = {
'post': post,
}
return render(request, 'blog/post_detail.html', context)
#템플릿을 가져옴 (단순 문자열이 아님)
# template = loader.get_template('blog/post_list.html')
# # 해당 템플릿을 렌더링
# context ={
# 'name': random.choice(['이정화', '박영수']),
#
# }
# content = template.render(context, request)
# return HttpResponse(content)
# context ={
# 'name': random.choice(['이정화', '박영수']),
# }
# # render 함수 알아보기 !! 위의 것과 같은 결과를 보여줌
# # loader.get_template
# # template.render
# # HttpResponse(content)
# # 위 3가지를 한번에 해주는 역할
# return render(
# request=request,
# template_name='blog/post_list.html',
# context=context
# )
# # templates/blog/post_list.html 파일의 내용을 읽어온 후,
# # 해당 내용을 아래에서 리턴해주는 HttpResponse인스턴스 생성시 인수로 넣우준다
# # os.path.abspath(__file__) <-코드가 실행중인 파일의 경로를 나타냄
# # os.path.dirname(<경로>) <-특정 경로의 상위폴더를 나타냄
# # os.path.join(<경로>, <폴더/파일명>) <- 특정 경로에서 하위폴더 또는 하위 파일을 나타냄
# # current_path = os.path.abspath(__file__)
# # p_path = os.path.dirname(current_path)
# # p_path = os.path.dirname(p_path)
# # c_path = os.path.join(p_path, 'templates')
# # c_path = os.path.join(c_path, 'blog')
# # html_path = os.path.join(c_path, 'post_list.html')
#
# views_file_path = os.path.abspath(__file__)
# blog_application_path = os.path.dirname(views_file_path)
# app_dir = os.path.dirname(blog_application_path)
# template_path = os.path.join(app_dir, 'template', 'blog', 'post_liost.html')
#
# with open(template_path, 'rt') as f:
# html_text = f.read()
# # #with문 안쓴다면
# # html_text = open(template_path, 'rt').read()
#
# return HttpResponse(html_text)
|
from django.shortcuts import render
# Create your views here.
from jsonschema import ValidationError
from rest_framework.decorators import api_view, permission_classes, authentication_classes
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework import status, generics
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from rest_framework_jwt.settings import api_settings
from .models import Search
from .serializers import SearchSerializer
JWT_DECODE_HANDLER = api_settings.JWT_DECODE_HANDLER
validation_error_message = "validation error"
class SaveSearchView(generics.CreateAPIView):
permission_classes([IsAuthenticated])
authentication_classes((JSONWebTokenAuthentication,))
serializer_class = SearchSerializer
def post(self, request):
try:
search_serializer = SearchSerializer(data=request.data)
if search_serializer.is_valid():
search_serializer.save()
else:
return Response("invalid data", status=status.HTTP_400_BAD_REQUEST)
return Response("success", status=status.HTTP_200_OK)
except ValidationError as v:
print(validation_error_message, v)
return Response(validation_error_message, status=status.HTTP_400_BAD_REQUEST) |
from repositories.base_repository import BaseRepository
from database.connection import db
groups_repo = BaseRepository(db['groups'])
|
import inspect
import logging
import socket
import traceback
from datetime import datetime
try:
import simplejson as json
except ImportError:
import json
class LogstashFormatterBase(logging.Formatter):
# The list contains all the attributes listed in
# http://docs.python.org/library/logging.html#logrecord-attributes
skip_list = {
"args",
"asctime",
"created",
"exc_info",
"exc_text",
"filename",
"funcName",
"id",
"levelname",
"levelno",
"lineno",
"module",
"msecs",
"message",
"msg",
"name",
"pathname",
"process",
"processName",
"relativeCreated",
"thread",
"threadName",
"extra",
}
easy_types = (str, bool, float, int, type(None))
def __init__(self, message_type="Logstash", tags=None, fqdn=False):
self.message_type = message_type
self.tags = tags if tags is not None else []
if fqdn:
self.host = socket.getfqdn()
else:
self.host = socket.gethostname()
def simplify(self, value):
if isinstance(value, self.easy_types):
return value
elif isinstance(value, (list, tuple, set)):
return type(value)(self.simplify(i) for i in value)
elif isinstance(value, dict):
return {self.simplify(k): self.simplify(v) for k, v in value.items()}
else:
return repr(value)
def get_extra_fields(self, record):
fields = {}
for key, value in record.__dict__.items():
if key not in self.skip_list:
fields[key] = self.simplify(value)
frame = self.get_frame(record)
if frame:
cls = self.get_class(frame)
if cls:
fields["class_name"] = cls.__module__ + "." + cls.__name__
return fields
@staticmethod
def get_frame(record: logging.LogRecord):
frame = inspect.currentframe()
while frame:
frame = frame.f_back
frameinfo = inspect.getframeinfo(frame)
if frameinfo.filename == record.pathname:
return frame
@staticmethod
def get_class(frame):
if "self" in frame.f_locals:
return type(frame.f_locals["self"])
elif "cls" in frame.f_locals:
return frame.f_locals["cls"]
def get_debug_fields(self, record):
fields = {
"stack_trace": self.format_exception(record.exc_info),
"lineno": record.lineno,
"process": record.process,
"thread_name": record.threadName,
}
# funcName was added in 2.5
if not getattr(record, "funcName", None):
fields["funcName"] = record.funcName
# processName was added in 2.6
if not getattr(record, "processName", None):
fields["processName"] = record.processName
return fields
@classmethod
def format_source(cls, message_type, host, path):
return "%s://%s/%s" % (message_type, host, path)
@classmethod
def format_timestamp(cls, time):
tstamp = datetime.utcfromtimestamp(time)
return "%s.%03dZ" % (
tstamp.strftime("%Y-%m-%dT%H:%M:%S"),
tstamp.microsecond / 1000,
)
@classmethod
def format_exception(cls, exc_info):
return "".join(traceback.format_exception(*exc_info)) if exc_info else ""
@classmethod
def serialize(cls, message):
return json.dumps(message)
def get_message(self, record: logging.LogRecord) -> dict:
raise NotImplementedError()
def format(self, record: logging.LogRecord) -> str:
message = self.get_message(record)
return self.serialize(message)
class LogstashFormatterVersion0(LogstashFormatterBase):
def get_message(self, record):
# Create message dict
message = {
"@timestamp": self.format_timestamp(record.created),
"@message": record.getMessage(),
"@source": self.format_source(
self.message_type, self.host, record.pathname
),
"@source_host": self.host,
"@source_path": record.pathname,
"@tags": self.tags,
"@type": self.message_type,
"@fields": {
"levelname": record.levelname,
"logger": record.name,
},
}
# Add extra fields
message["@fields"].update(self.get_extra_fields(record))
# If exception, add debug info
if record.exc_info:
message["@fields"].update(self.get_debug_fields(record))
return message
class LogstashFormatterVersion1(LogstashFormatterBase):
def get_message(self, record):
# Create message dict
message = {
"@timestamp": self.format_timestamp(record.created),
"message": record.getMessage(),
"host": self.host,
"path": record.pathname,
"tags": self.tags,
"type": self.message_type,
# Extra Fields
"level": record.levelname,
"logger_name": record.name,
}
# Add extra fields
message.update(self.get_extra_fields(record))
# If exception, add debug info
if record.exc_info:
message.update(self.get_debug_fields(record))
return message
versions = {0: LogstashFormatterVersion0, 1: LogstashFormatterVersion1}
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import wx
from MenuAbstraction import MenuAbstraction
from MenuPresentation import MenuPresentation
from MenuInteraction import MenuInteraction
from MenuController import MenuController
class BattleshipApp(wx.App):
def OnInit(self):
abstraction = MenuAbstraction()
presentation = MenuPresentation()
interaction = MenuInteraction()
controller = MenuController(abstraction, presentation, interaction)
presentation.Show()
self.SetTopWindow(presentation)
#import wx.lib.inspection
#wx.lib.inspection.InspectionTool().Show()
return True
if __name__ == '__main__':
app = BattleshipApp(redirect=False)
app.MainLoop()
|
import http.server
import requests
import json
import time
import secrets
import threading
import logging
import sys
from socketserver import ThreadingMixIn
from http.server import HTTPServer
from http.server import urllib
from enum import IntEnum
from threading import Event
from argparse import ArgumentParser
from time import strftime
# Command line arguments parsing
parser = ArgumentParser()
requiredArgs = parser.add_argument_group('Required arguments')
requiredArgs.add_argument("-s", "--serverHeNsIp", action="store", required = True, type = str,
help="IP address of remote HE-NS server")
parser.add_argument("-b", "--bindedIp", action="store", default="127.0.0.1", type = str,
help="Local binded IP address to listen for client requests - default is localhost only")
parser.add_argument("-l", "--localPort", action="store", default = 80, type = int,
help="set local binding ip port - default 80")
parser.add_argument("-r", "--remotePort", action="store", default = 5000, type = int,
help="set remote HE-NS server listening port - default 5000")
parser.add_argument("--serverHeNsPath", action="store", default = "/get/ip/", type = str,
help="set HE-NS server path for restAPI get request")
parser.add_argument("--torProxySchema", action="store", default = "socks5", type = str,
help="Schema for tor proxy protocol - Default socks5",
choices = ["socks5","http","https"])
parser.add_argument("--torProxyPort", action="store", default = 9150, type = int,
help="set port for local tor proxy for anonymous internet access - Default 9150")
parser.add_argument("--torProxyIpAddr", action="store", default = "127.0.0.1", type = str,
help="set IP address for tor proxy, in case resides on other LAN machine - Default localhost")
args = parser.parse_args()
# Start Region - Globals
def now():
return time.time()
BINDED_IP = args.bindedIp
HENS_IP_ADDR = 'http://' + args.serverHeNsIp
ON_PORT = args.localPort
HENS_PORT = args.remotePort
HENS_RELATIVE_PATH = args.serverHeNsPath
TOR_PROXY_SCHEMA = args.torProxySchema + '://'
TOR_PROXY_PORT = args.torProxyPort
TOR_PROXY_IP_ADDR = args.torProxyIpAddr
NEW_REQUEST_ID = '0'
INTERNAL_TOR_ENDPOINT = TOR_PROXY_SCHEMA + TOR_PROXY_IP_ADDR + ':' + str(TOR_PROXY_PORT)
HENS_GET_IP_ENDPOINT = HENS_IP_ADDR + ":" + str(HENS_PORT) + HENS_RELATIVE_PATH
HENS_TIMEOUT = 30 #seconds
PERIODIC_CLEANUP_TIMER = 60 #Every 60 seconds will cleanup orphaned requests from requests dict
MIN_TO_SEC = 60
ORPHANED_REQUEST_BREACH_TIME = 2 * MIN_TO_SEC
TIME_NEXT = now()
LOG_FILE_NAME_PREFIX = 'HeNsClient'
NUM_PURGE = 1
#for future feature support
class RequestType(IntEnum):
SIMPLE_REQUEST = 1
REQUEST_TYPE_MAX = 2
ERROR = -1
class ClientState(IntEnum):
UNINITIALIZED = 0
RECEIVE_REQUEST = 25
ENCRYPT_REQUEST = 50
QUERY_HE_NS_SERVER = 75
DECRYPT_REQUEST = 100
CLIENT_STATE_MAX = 101
ERROR = -1
#####################################
###Start Region - Logging: ####
#####################################
###############################################################################
#Each class needs to inherits this class in order to implement its own logger.#
#Usage self.log.%log level%("Log message") #
#For example self.log.info("This is an example log message!") #
#Logging is both for console and for for file with higher verbosity #
###############################################################################
class LoggerMixIn(object):
@property
def log(self):
name = '.'.join([self.__class__.__name__])
return LoggerKeeper().getLogger(name)
###############################################################################
#Keeps the instances of each class intrinsic Logger. #
#Class is a singleton object #
#Class contains a thread-safe synchronized dict to prevent logger duplicates #
###############################################################################
class LoggerKeeper(object):
class __LoggerKeeper():
def __init__(self):
self.singleUsageLock = Event()
self.loggers = dict()
#Config root logger ---> Config all other loggers
rootLogger = logging.getLogger()
self.configLogger(rootLogger)
def configLogger(self, newLogger):
newLogger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s [%(name)-18s][%(threadName)-15s][%(levelname)-5s]: %(message)s')
# Add verbose logging to file
fileHandler = logging.FileHandler(strftime(LOG_FILE_NAME_PREFIX + "_%H_%M_%m_%d_%Y.log"))
fileHandler.setLevel(logging.DEBUG)
fileHandler.setFormatter(formatter)
newLogger.addHandler(fileHandler)
# Add logging to console
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(formatter)
consoleHandler.setLevel(logging.INFO)
newLogger.addHandler(consoleHandler)
@staticmethod
def aquireLock():
LoggerKeeper.loggerHolderInstance.singleUsageLock.set()
@staticmethod
def releaseLock():
LoggerKeeper.loggerHolderInstance.singleUsageLock.clear()
def __init__(self):
if not LoggerKeeper.loggerHolderInstance:
LoggerKeeper.loggerHolderInstance = LoggerKeeper.__LoggerKeeper()
loggerHolderInstance = None
#Caution: Assumes dictionary is locked by caller
def doesLoggerExists(self, name):
return name in self.loggerHolderInstance.loggers
def getLogger(self, name):
if self.loggerHolderInstance is None:
self.loggerHolderInstance = LoggerKeeper.__LoggerKeeper()
self.loggerHolderInstance.aquireLock()
try:
if self.doesLoggerExists(name):
return self.loggerHolderInstance.loggers[name]
else:
return self.addLogger(name)
except Exception:
rootLogger = logging.getLogger()
rootLogger.error("Expected logger was not found in logger dict, returning root logger!")
return rootLogger
finally:
self.loggerHolderInstance.releaseLock()
def addLogger(self, name):
newLogger = logging.getLogger(name)
self.loggerHolderInstance.loggers[name] = newLogger
return newLogger
#################################################################################
###A class which holds the request context and logic for it's own handling ###
#################################################################################
class Request(LoggerMixIn):
def __init__(self, reqId, incomingMsg):
self.log.info("Generating new request. Given ID: " + reqId)
self.requestId = reqId
self.incomingMessage = incomingMsg #The incoming JSON
self.heNsResponseCode = None #“Never make predictions, especially about the future.” - Yogi Berra
self.responseCode = None #“Never make predictions, especially about the future.” - Yogi Berra
self.queryText = incomingMsg['data'] #Actual query text
self.requestType = incomingMsg['type'] #Query type - for future support(e.g. simple query / wildcard query etc.)
self.messageForClient = "" #Message to display on frontend
self.encryptedResultsList = None #List of the encrypted result(or results for wildcard query once implemented)
self.decryptedResultsList = None #List of the decrypted result(or results for wildcard query once implemented)
self.heNsQuery = None #encrypted query to send to HE-NS server
self.creationTime = now()
self.errorOccurred = False
self.unauthorisedRequest = False
self.previousState = ClientState.UNINITIALIZED
self.currentState = ClientState.UNINITIALIZED
#Process request context to generate the proper reply
#Returns encoded
def toJsonReply(self):
jsonMessage = {'requestId': str(self.requestId), 'progress': int(self.currentState),
'text': self.messageForClient}
if self.currentState == ClientState.DECRYPT_REQUEST:
jsonMessage['resultsList'] = self.decryptedResultsList
return json.dumps(jsonMessage).encode()
###################################################
###Start Region - Request parsing and processing###
###################################################
def handle(self):
isSuccess = False
processingState = self.getUpdatedProcessingState()
if processingState == ClientState.ERROR:
return isSuccess
if processingState == ClientState.RECEIVE_REQUEST:
isSuccess = self.handleInitialRequest()
elif processingState == ClientState.ENCRYPT_REQUEST:
isSuccess = self.handleHeEncryption()
elif processingState == ClientState.QUERY_HE_NS_SERVER:
isSuccess = self.handleQueryingHeNsServer()
elif processingState == ClientState.DECRYPT_REQUEST:
isSuccess = self.handleHeDecryption()
self.previousState = self.currentState
return isSuccess
def getUpdatedProcessingState(self):
if self.currentState == ClientState.ERROR:
return ClientState.ERROR
self.updateClientState()
return self.currentState
def getNewStateByStatusUpdate(self):
if self.incomingMessage['data'] == ClientState.RECEIVE_REQUEST:
return ClientState.ENCRYPT_REQUEST
elif self.incomingMessage['data'] == ClientState.ENCRYPT_REQUEST:
return ClientState.QUERY_HE_NS_SERVER
elif self.incomingMessage['data'] == ClientState.QUERY_HE_NS_SERVER:
return ClientState.DECRYPT_REQUEST
return ClientState.ERROR
def updateClientState(self):
if self.incomingMessage['name'] == 'query':
self.currentState = ClientState.RECEIVE_REQUEST
elif self.incomingMessage['name'] == 'statusUpdate':
self.currentState = self.getNewStateByStatusUpdate()
else:
self.currentState = ClientState.ERROR
#verify the client sent the expected message type
if not self.isRequestStateValid():
self.unauthorisedRequest = True
self.currentState = ClientState.ERROR
return
##############################################
###Start Region - Message handling:###########
##############################################
def handleInitialRequest(self):
self.messageForClient = "Encrypting Query with HE encryption"
self.responseCode = requests.codes.ok
self.log.info("Replying to front-end: Going to handle the new request")
return True
def handleHeEncryption(self):
self.log.info("Request Id: " + self.requestId +" Going to encrypt query with HE encryption")
######Encryption goes here#############
time.sleep(2) # simulate action#######
self.heNsQuery = "$RSLAF#@!KFMAS_#$)"##
#######################################
self.log.info("Request Id: " + self.requestId +" - Query encryption succeded")
self.messageForClient = "Querying HE-NS server"
return True
def handleQueryingHeNsServer(self):
isHeNsQuerySuccess = False
self.log.info("Request Id: " + self.requestId +" - Going to query HE-NS server")
try:
r = requests.get(
url = HENS_GET_IP_ENDPOINT + urllib.parse.quote_plus(self.queryText),
proxies = self.getTorProxyConfigurationDict(),
timeout = HENS_TIMEOUT
)
self.heNsResponseCode = r.status_code
if self.heNsResponseCode == requests.codes.ok:
self.log.info("Request Id: " + self.requestId +" - SUCCESS: HE-NS responded wth return code " + str(self.heNsResponseCode))
isHeNsQuerySuccess = True
else:
self.log.error("Request Id: " + self.requestId +" - HE-NS responded wth return code " + str(self.heNsResponseCode))
self.responseCode = self.heNsResponseCode
self.errorOccurred = True
return isHeNsQuerySuccess
self.log.info("Request Id: " + self.requestId +" - Parsing server JSON response")
self.encryptedResultsList = r.json()
print(self.encryptedResultsList)
self.messageForClient = "Decrypting HE-NS server response"
except requests.exceptions.Timeout:
self.heNsResponseCode = requests.codes.request_timeout
self.log.error("Request Id: " + self.requestId +" - Request timeout while querying HE-NS server")
self.messageForClient = "Request timeout while querying HE-NS server"
self.errorOccurred = True
isHeNsQuerySuccess = False
except Exception:
if self.heNsResponseCode is None or self.heNsResponseCode == requests.codes.ok:
self.log.error("Request Id: " + self.requestId +" - Exception was hit while querying HE-NS server")
self.heNsResponseCode = requests.codes.internal_server_error
if self.encryptedResultsList is None:
self.heNsResponseCode = requests.codes.bad_request
self.messageForClient = "HE-NS server provided malformed response"
self.errorOccurred = True
isHeNsQuerySuccess = False
return isHeNsQuerySuccess
@staticmethod
def getTorProxyConfigurationDict():
return dict(http=INTERNAL_TOR_ENDPOINT, https=INTERNAL_TOR_ENDPOINT)
def handleHeDecryption(self):
self.log.info("Request Id: " + self.requestId +" - Decrypting server's response")
time.sleep(1) # simulate action
self.decryptedResultsList = self.encryptedResultsList #after decryption implementation place decrypt func here.
self.log.info("Request Id: " + self.requestId +" - Server's response decrypted")
return True
#####################################
###Start Region - Error handling:####
#####################################
def determineErrorAndErrorMessage(self):
if self.heNsResponseCode is None or self.heNsResponseCode == requests.codes.ok:
self.responseCode = requests.codes.internal_server_error
self.messageForClient = "Unexpected client error occurred"
elif self.heNsResponseCode < requests.codes.bad_request:
self.responseCode = requests.codes.internal_server_error
self.messageForClient = "HE-NS server returned illegal response code: " + str(self.heNsResponseCode)
else:
self.responseCode = self.heNsResponseCode
if self.messageForClient is None or self.messageForClient == '':
self.messageForClient = "Error getting results from HE-NS server"
self.log.error("Request ID " + self.requestId + ": " + self.messageForClient)
def updateErrorCodeAndErrorMessage(self):
if self.unauthorisedRequest:
self.log.warn("Request ID " + self.requestId + ": Received request with unexpected state from client!")
self.responseCode = requests.codes.forbidden
self.messageForClient = "Unauthorized Request"
elif self.errorOccurred:
self.determineErrorAndErrorMessage()
def didAnErrorOccur(self):
return self.errorOccurred or self.unauthorisedRequest
def isRequestStateValid(self):
requestStateValid = False
#Validate allowed transitions in server "mini state machine"
if (self.currentState == ClientState.RECEIVE_REQUEST and self.previousState == ClientState.UNINITIALIZED ) or \
(self.currentState == ClientState.ENCRYPT_REQUEST and self.previousState == ClientState.RECEIVE_REQUEST ) or \
(self.currentState == ClientState.QUERY_HE_NS_SERVER and self.previousState == ClientState.ENCRYPT_REQUEST ) or \
(self.currentState == ClientState.DECRYPT_REQUEST and self.previousState == ClientState.QUERY_HE_NS_SERVER):
requestStateValid = True
return requestStateValid
###################################################
###A class which holds all concurrent requests:####
###This is a singleton which saves req contexts####
###MUST be synchronized to be thread-safe ####
###################################################
class RequestManager(LoggerMixIn):
class __RequestManager():
def __init__(self):
self.singleUsageLock = Event()
self.requests = dict()
@staticmethod
def acquireLock():
RequestManager.instance.singleUsageLock.set()
@staticmethod
def releaseLock():
RequestManager.instance.singleUsageLock.clear()
instance = None
def __init__(self):
if not RequestManager.instance:
RequestManager.instance = RequestManager.__RequestManager()
#Caution: Assumes dictionary is locked by caller
def doesRequestExists(self, requestId):
return requestId in self.instance.requests
def getRequestById(self, requestId):
self.instance.acquireLock()
try:
if self.doesRequestExists(requestId):
return self.instance.requests[requestId]
finally:
self.instance.releaseLock()
return None
def addRequest(self, requestHandler):
self.instance.acquireLock()
try:
id = str(requestHandler.requestId)
if self.doesRequestExists(id):
self.log.error("Request already exists: " + id)
return False
self.log.info("Inserting to dict request : " + id )
self.instance.requests[id] = requestHandler
finally:
self.instance.releaseLock()
return True
def generateNewRequest(self, incomingMessage):
newRequestId = self.generateNewRequestId()
requestHandler = Request(newRequestId, incomingMessage)
if not self.addRequest(requestHandler):
return None
return requestHandler
def generateNewRequestId(self):
requestId = secrets.token_urlsafe(20)
return requestId
def removeRequest(self, requestId):
removalSuccessful = False
self.instance.acquireLock()
try:
if requestId == '-1':
return removalSuccessful
self.log.debug("Removing from dict request : " + requestId)
if self.doesRequestExists(requestId):
del self.instance.requests[requestId]
removalSuccessful = True
else:
self.log.error("Failed to remove from dict request : " + requestId)
finally:
self.instance.releaseLock()
return removalSuccessful
def clearRequests(self):
self.instance.requests.clear()
def purgeOrphanedRequests(self):
self.log.debug("Purging orphaned requests")
self.purgeOrphanedRequestsFromDict()
def getOrphanedRequestIDs(self):
#dictionary size cannot be changed while iterating over it.
#In order to prevent a runtime error if a request is added or deleted from another thread we:
# 1.Lock the dictionary while collecting all orphaned request IDs.
# 2.Remove all orphaned requests after dict iteration completion
#This should be extremely quick so it won't raise a performance issue here.
orphanedRequestsList = []
self.instance.acquireLock()
try:
for requestId, request in self.instance.requests.items():
self.addRequestToOrphanedListIfExpired(orphanedRequestsList, requestId, request)
finally:
self.instance.releaseLock()
return orphanedRequestsList
def addRequestToOrphanedListIfExpired(self, orphanedRequestsList, requestId, request):
requestAgeInSeconds = now() - request.creationTime
if requestAgeInSeconds > ORPHANED_REQUEST_BREACH_TIME:
orphanedRequestsList.append(requestId)
def purgeOrphanedRequestsFromDict(self):
orphanedRequestsList = self.getOrphanedRequestIDs()
numOrphanedRequestsToPurge = len(orphanedRequestsList)
if numOrphanedRequestsToPurge > 0:
self.log.warn("Purging " + str(numOrphanedRequestsToPurge) + " orphaned requests from requests dictionary")
else:
self.log.debug("No orphaned requests - Life is good!")
for orphanedRequestId in orphanedRequestsList:
removalString = "Purged request: " if self.removeRequest(orphanedRequestId) else "Request was already removed: "
self.log.warn("\t" + removalString + orphanedRequestId)
#######################################################################################
###This is where the threading magic happens - Using Threading Mix in #
###This enables the Python HTTP simple server to serve multiple request simultaneously#
#######################################################################################
class ThreadingSimpleServer(ThreadingMixIn, HTTPServer, LoggerMixIn):
pass
######################################################################################
###Server Handler - Class which handles requests from the client(s) ##
###For each transaction between single/multi clients a new server thread is created ##
###Retrieves/Creates a request handler ##
###Init handling of request ##
###Responsed to frontent with progress/ results ##
######################################################################################
class ServerHandler(http.server.SimpleHTTPRequestHandler, LoggerMixIn):
def do_POST(self):
isSuccess = False
try:
request = self.getUpdatedRequestHandler()
if request is not None:
isSuccess = request.handle()
except Exception:
request = self.makeTempErroneousRequestHandler()
self.respondToFrontEnd(request, isSuccess)
def respondToFrontEnd(self, request, isSuccess):
isSuccess = isSuccess and self.handleInternalErrorsIfOccurred(request)
if isSuccess:
self.handleSuccess(request)
else:
self.handleError(request)
def handleInternalErrorsIfOccurred(self, request):
isSuccess = True
#The below should never happen, but here to make sure reply on error always works
if request is None:
self.log.error("Since when did pigs started to fly? THIS SHOULD NEVER HAPPEN! if we're here something is severly wrong")
request = self.makeTempErroneousRequestHandler()
isSuccess = False
if request.currentState == ClientState.QUERY_HE_NS_SERVER and request.heNsResponseCode is None:
request.heNsResponseCode = requests.codes.internal_server_error
isSuccess = False
return isSuccess
def handleSuccess(self, successfulRequest):
self.log.info("Request ID: "+ successfulRequest.requestId +" - Responding success to client")
self.send_response(successfulRequest.responseCode)
self.end_headers()
self.wfile.write(successfulRequest.toJsonReply())
if successfulRequest.currentState == ClientState.DECRYPT_REQUEST:
RequestManager().removeRequest(successfulRequest.requestId)
def handleError(self, erroneousRequest):
self.log.error("Responding to frontend on error!")
erroneousRequest.updateErrorCodeAndErrorMessage()
self.send_response(erroneousRequest.responseCode)
self.end_headers()
self.wfile.write(erroneousRequest.toJsonReply())
RequestManager().removeRequest(erroneousRequest.requestId)
@staticmethod
def generateIncomingError():
jsonErrorMessage = '{ "requestId" : -1, "data" : -1, "type" : -1 }'
return json.loads(jsonErrorMessage)
def parseIncomingMessage(self):
content_length = int(self.headers['Content-Length'])
body = self.rfile.read(content_length)
decodedMessage = body.decode('utf-8')
self.log.info("Received from client: " + decodedMessage)
return json.loads(decodedMessage)
def getUpdatedRequestHandler(self):
incomingMessage = self.parseIncomingMessage()
if incomingMessage is None:
self.log.error("Could not parse request")
return self.makeTempErroneousRequestHandler()
requestId = incomingMessage['requestId']
if requestId == NEW_REQUEST_ID:
return RequestManager().generateNewRequest(incomingMessage)
else:
requestHandler = RequestManager().getRequestById(requestId)
if requestHandler is None:
self.log.error("Failed to create or get the request's handler")
return self.makeTempErroneousRequestHandler()
requestHandler.incomingMessage = incomingMessage
return requestHandler
def makeTempErroneousRequestHandler(self):
self.log.error("Generating a temporary errored request")
erroneousRequestHandler = Request('-1', self.generateIncomingError())
erroneousRequestHandler.errorOccurred = True
erroneousRequestHandler.currentState = ClientState.ERROR
erroneousRequestHandler.responseCode = requests.codes.internal_server_error
self.log.debug("Errored request generated")
return erroneousRequestHandler
#####################################
###Start Region - Periodic tasks:####
#####################################
#This method inits a periodic requests dictionary cleanup for orphaned requests
#this is done by calling the request manager to cleanup the dictionary and schedule it again according to the timer repeatedly
def initPeriodicRequestsDictCleaner():
RequestManager().purgeOrphanedRequests()
scheduleExpiredRequestsPurging()
def scheduleExpiredRequestsPurging():
global TIME_NEXT
global NUM_PURGE
TIME_NEXT = TIME_NEXT + PERIODIC_CLEANUP_TIMER
periodicCleanupThread = threading.Timer(TIME_NEXT - now(), initPeriodicRequestsDictCleaner)
periodicCleanupThread.setDaemon(True)
periodicCleanupThread.setName('Sched-Purge-' +str(NUM_PURGE))
periodicCleanupThread.start()
NUM_PURGE = NUM_PURGE + 1 if NUM_PURGE < 999 else 1
def printRunningConfiguration(httpd):
httpd.log.info("Dynamic server config:")
httpd.log.info("\t\tIP:Port socket to serve on:\t" + "http://" + BINDED_IP + ":" + str(ON_PORT))
httpd.log.info("\t\tLocal Tor Proxy endpoint:\t" + INTERNAL_TOR_ENDPOINT)
httpd.log.info("\t\tHE-NS server endpoint:\t\t" + HENS_GET_IP_ENDPOINT)
httpd.log.info("")
httpd.log.info("")
httpd.log.info("")
httpd.log.info("Let the fun begin!")
httpd.log.info("")
httpd.log.info("")
httpd.log.info("")
#####################################
###Start Region - HTTP server main:##
#####################################
########################################
###Serves the HTTP requests ##
########################################
multiThreadedHttpServer = ThreadingSimpleServer((BINDED_IP, ON_PORT), ServerHandler, LoggerMixIn)
with multiThreadedHttpServer as httpd:
sa = httpd.socket.getsockname()
httpd.log.info("Serving HTTP on " + str(sa[0]) + " port "+ str(sa[1]) + "...")
printRunningConfiguration(httpd)
scheduleExpiredRequestsPurging()
httpd.serve_forever() |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.auth import login as auth_login, authenticate, update_session_auth_hash
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import (
UserCreationForm,
UserChangeForm,
PasswordChangeForm
)
from rest_framework.authtoken.models import Token
from django.template import RequestContext
from django.conf import settings
from django.shortcuts import render_to_response, render, redirect
from acct.forms import ( EditAccountForm, SignUpForm,
ProfileForm
)
from acct.models import UserProfile
from django.views.generic import (TemplateView,ListView,
DetailView,CreateView,
UpdateView,DeleteView)
# Create your views here.
def login(request):
# logout(request)
username = password = ''
if request.POST:
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
auth_login(request,user)
args = {'username':username, 'password':password}
return render(request,"acct/home.html", args)
return render(request, 'acct/login.html', {})
def view_profile(request):
form = UserProfile.objects.get(user=request.user)
args = {'form':form}
return render(request,'acct/profile.html',args)
def edit_profile(request):
if request.method == 'POST':
form = ProfileForm(request.POST, instance=request.user.userprofile)
if form.is_valid():
save_data = form.save(commit=False)
save_data.user = request.user
save_data.save()
return redirect(reverse('acct:profile'))
else:
udata = UserProfile.objects.get(user=request.user)
form = ProfileForm(instance=udata)
args = {'form': form}
return render(request, 'acct/edit_profile.html', args)
@login_required
def index(request):
return render(request,"acct/home.html", {})
def signup(request):
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
raw_password = form.cleaned_data.get('password1')
user = authenticate(username=username, password=raw_password)
Token.objects.create(user=user)
login(request, user)
return render(request,"acct/home.html", {})
else:
form = SignUpForm()
return render(request, 'acct/signup.html', {'form': form})
def edit_account(request):
if request.method == 'POST':
form = EditAccountForm(request.POST, instance=request.user)
if form.is_valid():
form.save()
return render(request,"acct/account.html", {})
else:
form = EditAccountForm(instance=request.user)
args = {'form':form}
return render(request,'acct/edit_account.html', args)
def change_password(request):
if request.method == 'POST':
form = PasswordChangeForm(data=request.POST, user=request.user)
if form.is_valid():
form.save()
update_session_auth_hash(request, form.user)
return render(request,"acct/profile.html", {})
else:
form = PasswordChangeForm(user=request.user)
args = {'form':form}
return render(request,'acct/change_password.html', args)
return render(request, 'acct/change_password.html', {'form1': form})
def account(request):
args = {'user':request.user}
return render(request, 'acct/account.html', args)
|
"""
This playbook runs all the pan actions one by one.
"""
import phantom.rules as phantom
import json
from datetime import datetime
from datetime import timedelta
def block_url_cb(action, success, container, results, handle):
if not success:
return
return
def unblock_ip_cb(action, success, container, results, handle):
if not success:
return
when = datetime.now()+timedelta(seconds=40)
phantom.act('block url', parameters=[{ "url" : "www.yahoo.com" }], assets=["pan"], callback=block_url_cb, start_time=when)
return
def block_ip_cb(action, success, container, results, handle):
if not success:
return
when = datetime.now()+timedelta(seconds=40)
phantom.act('unblock ip', parameters=[{ "ip" : "192.94.73.3" }], assets=["pan"], callback=unblock_ip_cb, start_time=when)
return
def block_application_cb(action, success, container, results, handle):
if not success:
return
when = datetime.now()+timedelta(seconds=40)
# Block www.freeshell.org, configure the action after a while, noticed that the commit is still not finished
# on the remote device
phantom.act('block ip', parameters=[{ "ip" : "192.94.73.3" }], assets=["pan"], callback=block_ip_cb, start_time=when)
return
def list_applications_cb(action, success, container, results, handle):
if not success:
return
phantom.act('block application', parameters=[{ "application" : "ftp" }], assets=["pan"], callback=block_application_cb)
return
def on_start(incident):
phantom.act('list applications', parameters=[{ }], assets=["pan"], callback=list_applications_cb)
return
def on_finish(incident, summary):
phantom.debug("Summary: " + summary)
return
|
M = int(input())
mo = [input() for _ in range(M)]
P = int(input())
pattern = [input() for _ in range(P)]
cnt = 0
for i in range(M-P+1):
for j in range(M-P+1):
for x in range(P):
if mo[i+x][j:j+P] != pattern[x]:
break
else:
cnt += 1
print(cnt)
# 플래그 사용해서 문제풀기
# cnt = 0
# for i in range(M-P+1):
# for j in range(M-P+1):
# flag = 0
# for x in range(P):
# if mo[i+x][j:j+P] != pattern[x]:
# flag = 1
# break
# if flag != 1:
# cnt += 1
# print(cnt) |
from os import path, getcwd
from re import match, split
import yaml
from collections import OrderedDict
from profit.run import Runner
VALID_FORMATS = ('.yaml', '.py')
"""
yaml has to be configured to represent OrderedDict
see https://stackoverflow.com/questions/16782112/can-pyyaml-dump-dict-items-in-non-alphabetical-order
and https://stackoverflow.com/questions/5121931/in-python-how-can-you-load-yaml-mappings-as-ordereddicts
"""
def represent_ordereddict(dumper, data):
value = []
for item_key, item_value in data.items():
node_key = dumper.represent_data(item_key)
node_value = dumper.represent_data(item_value)
value.append((node_key, node_value))
return yaml.nodes.MappingNode(u'tag:yaml.org,2002:map', value)
def dict_constructor(loader, node):
return OrderedDict(loader.construct_pairs(node))
_mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
yaml.add_representer(OrderedDict, represent_ordereddict)
yaml.add_constructor(_mapping_tag, dict_constructor)
""" now yaml is configured to handle OrderedDict input and output """
def load_config_from_py(filename):
""" Load the configuration parameters from a python file into dict. """
from importlib.util import spec_from_file_location, module_from_spec
spec = spec_from_file_location('f', filename)
f = module_from_spec(spec)
spec.loader.exec_module(f)
return {name: value for name, value in f.__dict__.items() if not name.startswith('_')}
class Config(OrderedDict):
"""
Configuration class
This class provides a dictionary with possible configuration parameters for
simulation, fitting and uncertainty quantification.
Possible parameters in .yaml:
base_dir: .
run_dir: .
uq: # TODO: implement
interface: ./interface.py
files:
input: ./input.txt
output: ./output.txt
ntrain: 30
variables:
input1:
kind: Normal
range: (0, 1)
dtype: float
...
independent1:
kind: Independent
range: (0, 10, 1)
dtype: int
...
output1:
kind: Output
range: independent1
dtype: float
run:
cmd: python3 ../simulation.py
ntask: 4
fit:
surrogate: GPy
kernel: RBF
sigma_n: None
sigma_f: 1e-6
save: ./model.hdf5
load: ./model.hdf5
plot: Bool
xpred: ((0, 1, 0.01), (0, 10, 0.1))
plot_searching_phase: Bool
"""
def __init__(self, base_dir=getcwd(), **entries):
super(Config, self).__init__()
self['base_dir'] = path.abspath(base_dir)
self['run_dir'] = self['base_dir']
self['uq'] = {}
self['variables'] = {}
self['fit'] = {'surrogate': 'GPy',
'kernel': 'RBF'}
self['files'] = {'input': path.join(self['base_dir'], 'input.txt'),
'output': path.join(self['base_dir'], 'output.txt')}
# Not to fill directly in file
self['independent'] = {}
self['input'] = {}
self['output'] = {}
self.update(entries)
def write_yaml(self, filename='profit.yaml'):
""" Dump UQ configuration to a yaml file.
The default filename is profit.yaml
"""
dumpdict = dict(self)
self._remove_nones(dumpdict)
with open(filename,'w') as file:
yaml.dump(dumpdict,file,default_flow_style=False)
@classmethod
def from_file(cls, filename='profit.yaml'):
""" Load configuration from .yaml or .py file.
The default filename is profit.yaml """
from profit.util import variable_kinds, safe_str, get_class_methods
self = cls(base_dir=path.split(filename)[0])
if filename.endswith('.yaml'):
with open(filename) as f:
entries = yaml.safe_load(f)
elif filename.endswith('.py'):
entries = load_config_from_py(filename)
else:
raise TypeError("Not supported file extension .{} for config file.\n"
"Valid file formats: {}".format(filename.split('.')[-1], VALID_FORMATS))
self.update(entries)
if path.isabs(filename):
self['config_path'] = filename
else:
self['config_path'] = path.abspath(path.join(getcwd(), filename))
""" Variable configuration
kind: Independent, Uniform, etc.
range: (start, end, step=1) or {'dependent variable': (start, end, step=1)} for output
dtype: float64
"""
halton_dim = []
for k, v in self['variables'].items():
if isinstance(v, str):
# match word(int_or_float, int_or_float, int_or_float)
mat = match(r'(\w+)\(?(-?\d+(?:\.\d+)?)?,?\s?(-?\d+(?:\.\d+)?)?,?\s?(-?\d+(?:\.\d+)?)?\)?', v)
kind = mat.group(1)
entries = tuple(float(entry) for entry in mat.groups()[1:] if entry is not None)
self['variables'][k] = {'kind': kind}
if safe_str(kind) == 'output':
spl = split('[()]', v)
if len(spl) >= 3:
dependent = [var.strip() for var in split(',', spl[1])]
else:
dependent = []
self['variables'][k]['depend'] = tuple(dependent)
self['variables'][k]['range'] = {k: None for k in dependent}
else:
try:
func = getattr(variable_kinds, safe_str(kind))
if safe_str(kind) == 'halton':
halton_dim.append((k, entries))
elif safe_str(kind) == 'independent':
self['variables'][k]['range'] = func(*entries, size=self['ntrain']) if entries else None
else:
self['variables'][k]['range'] = func(*entries, size=self['ntrain'])
except AttributeError:
raise RuntimeError("Variable kind not defined.\n"
"Valid Functions: {}".format(get_class_methods(variable_kinds)))
# Process data types
if 'dtype' not in self['variables'][k].keys():
self['variables'][k]['dtype'] = 'float64'
# Add to corresponding variables 'output', 'independent' or 'input'
kind = self['variables'][k]['kind'].lower()
kind = kind if kind in ('output', 'independent') else 'input'
if self['variables'][k].get('range') is not None:
self[kind][k] = self['variables'][k]
# Fill halton variables with single dimensions of n-D halton
if halton_dim:
halton = variable_kinds.halton(size=(self['ntrain'], len(halton_dim)))
for d, (k, entries) in enumerate(halton_dim):
diff = (entries[1] - entries[0]) if entries else 1
low = entries[0] if entries else 0
self['variables'][k]['range'] = diff * halton[:, d].reshape(-1, 1) + low
self['input'][k] = self['variables'][k]
# Fill range of output vector
for k, v in self['output'].items():
if not isinstance(v['range'], dict):
v['range'] = {d: None for d in v['range']}
shape = []
for d in v['range']:
self['output'][k]['range'][d] = self['variables'][d]['range']
shape.append(self['variables'][d]['range'].shape[0])
self['output'][k]['shape'] = tuple(shape)
# Run configuration
if 'run' not in self:
self['run'] = {}
if isinstance(self['run'], str):
self['run'] = {'command': self['run']}
Runner.handle_config(self['run'], self)
# Set missing mandatory dict entries to default
if not self['files'].get('input'):
self['files']['input'] = path.join(self['base_dir'], 'input.txt')
if not self['files'].get('output'):
self['files']['output'] = path.join(self['base_dir'], 'output.txt')
if not self['fit'].get('surrogate'):
self['fit']['surrogate'] = 'GPy'
if not self['fit'].get('kernel'):
self['fit']['kernel'] = 'RBF'
# Set absolute paths
self['files']['input'] = path.join(self['base_dir'], self['files']['input'])
self['files']['output'] = path.join(self['base_dir'], self['files']['output'])
if self['fit'].get('load'):
self['fit']['load'] = path.join(self['base_dir'], self['fit']['load'])
if self['fit'].get('save'):
self['fit']['save'] = path.join(self['base_dir'], self['fit']['save'])
return self
def _remove_nones(self,config=None):
if config==None: config=self.__dict__
for key in list(config):
if type(config[key]) is dict:
self._remove_nones(config[key])
#elif (type(config[key]) is not list) and (config[key] is None):
else:
if config[key] is None:
del config[key]
|
import click
import logging
from .migrate import Migrant
from .wrapper import coroutine
def setup_logger(verbosity):
log_level = {
0: logging.WARNING,
1: logging.INFO,
2: logging.DEBUG,
}.get(verbosity, logging.INFO)
logger = logging.getLogger(__package__)
logger.setLevel(log_level)
handler = logging.StreamHandler()
handler.setLevel(log_level)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
@click.command()
@click.option("--github-token", help="Your Github Token", required=True)
@click.option(
"--github-organization",
help="The organization to import to instead of the user",
)
@click.option("--github-team", help="The organization team to give the repository to")
@click.option("--bitbucket-username", help="Your Bitbucket Username", required=True)
@click.option("--bitbucket-password", help="Your Bitbucket Password", required=True)
@click.option("--bitbucket-organization", help="Your Bitbucket Organization")
@click.option(
"--repos-to-migrate",
multiple=True,
help="""
Repositories you want to migrate. \n
If not passed, the command will migrate all your repositories. \n
You can pass this parameter as many times as needed \n
e.g. --repos-to-migrate=REPO1 --repos-to-migrate=REPO2
""",
)
@click.option("-v", "--verbose", count=True)
@coroutine
async def migrate(
loop,
github_token,
github_organization,
github_team,
bitbucket_username,
bitbucket_password,
bitbucket_organization,
repos_to_migrate,
verbose,
):
setup_logger(verbose)
migrant = Migrant(
loop,
github_token,
bitbucket_username,
bitbucket_password,
gh_org=github_organization,
gh_team=github_team,
bb_org=bitbucket_organization,
repos=repos_to_migrate,
)
return await migrant.migrate()
|
import pathlib
import re
import typing
import matplotlib.pyplot as plt
import numpy
import pandas
import seaborn as sns
import functions
import plotly.express as px
sns.set(style = "darkgrid")
def RS(array: numpy.ndarray, step: int) -> float:
def compose(array: numpy.ndarray, step: int) -> numpy.ndarray:
segments = array.size // step
return array[: segments * step].reshape(step, segments)
log_growth = numpy.diff(numpy.log(array))
composed = compose(log_growth, step)
mean = composed.mean(axis=0)
mean_reshaped = numpy.tile(mean.reshape(mean.size, 1), composed.shape[0]).T
cumsum = composed.cumsum(axis=0) - mean_reshaped.cumsum(axis=0)
range_ = numpy.amax(cumsum, axis=0) - numpy.amin(cumsum, axis=0)
std = composed.std(axis=0)
return (range_ / std).mean()
months_translation = {
"January": "Январь",
"February": "Февраль",
"March": "Март",
"April": "Апрель",
"May": "Май",
"June": "Июнь",
"July": "Июль",
"August": "Август",
"September": "Сентябрь",
"October": "Октябрь",
"November": "Ноябрь",
"December": "Декабрь",
}
users: typing.Dict[int, typing.Dict[str, str]] = {}
for file_ in pathlib.Path("imoex/clients").iterdir():
sheets = pandas.read_excel(file_, sheet_name=None, header=None)
year = int(re.findall(r"clients\-(\d{4})", str(file_))[0])
for name, sheet in sheets.items():
if year < 2016:
month, year = re.findall(r"([А-Я][а-я]{2,7})(\d{4})", name)[0]
year = int(year)
else:
month = months_translation[name]
if year not in users:
users[year] = {}
users[year][month] = sheet[5][39 if year > 2011 else 33]
months_order = (
"Январь",
"Февраль",
"Март",
"Апрель",
"Май",
"Июнь",
"Июль",
"Август",
"Сентябрь",
"Октябрь",
"Ноябрь",
"Декабрь",
)
users_by_years = list(map(int, users.keys()))
years_range = range(min(users_by_years), max(users_by_years) + 1)
users_by_months = []
for year in years_range:
users_by_months += [users[year][month] for month in months_order[5 if year == 2007 else 0 :]]
imoex = pandas.read_csv(
"imoex.csv",
sep=";",
header=0,
usecols=[2, 7],
names=["date", "close"],
dtype={"date": str, "close": float},
)
imoex.loc[:, "year-month"] = imoex["date"].str[:-2]
imoex_by_months = imoex.groupby("year-month")["close"].apply(list).to_numpy()
hurst = functions.window_slopes(imoex_by_months)
stability = functions.window_means(numpy.array(users_by_months))
lyapunov = functions.chaos(imoex_by_months)
clusters = functions.clusters(hurst, stability, lyapunov, 0)
sns.lineplot(data = imoex, x = range(len(imoex["close"])), y = "close")
plt.xlabel("Период")
plt.ylabel("Значение индекса")
plt.show()
plt.clf()
sns.lineplot(x = range(len(users_by_months)), y = users_by_months)
plt.xlabel("Период")
plt.ylabel("Количество пользователей")
plt.show()
plt.clf()
sns.lineplot(x = stability, y = hurst, hue = range(len(hurst)))
plt.xlabel("Период")
plt.ylabel("Коэффициента Херста")
plt.show()
plt.clf()
data = {"Показатель Херста": hurst,
"Количество пользователей": stability,
"Кластер": clusters,
"Период": range(len(stability)),
"Показатель Ляпунова": lyapunov,
}
df = pandas.DataFrame(data=data)
fig = px.scatter(df, x = "Количество пользователей", y = "Показатель Херста",
color = "Период", color_continuous_scale = "Bluered")
fig.show()
fig = px.scatter(df, x = "Количество пользователей", y = "Показатель Херста",
color = "Кластер", color_continuous_scale = "Portland", template = "plotly_dark")
fig.show()
fig = px.scatter_3d(df, x = "Количество пользователей", y = "Показатель Херста", z="Показатель Ляпунова",
color="Период", color_continuous_scale = "Bluered")
fig.show()
fig = px.scatter_3d(df, x = "Количество пользователей", y = "Показатель Херста", z="Показатель Ляпунова",
color="Кластер", color_continuous_scale = "Portland", template = "plotly_dark")
fig.show()
print("Корейский показатель Ляпунова", functions.lyapunov(
functions.recurrence_plot(
functions.return_(
numpy.concatenate(imoex_by_months)))))
print("Корейский среднее показателя Ляпунова", numpy.mean(lyapunov))
print("Корейская диспекрсия Ляпунова ", numpy.std(lyapunov))
print("Корейский размах Ляпунова ", numpy.max(lyapunov)-numpy.min(lyapunov))
print("Корейский коэффициет H", functions.h(numpy.concatenate(imoex_by_months)))
print("Корейский среднее коэффициента Херста ", numpy.mean(hurst))
print("Корейская диспекрсия H ", numpy.std(hurst))
print("Корейский размах H ", numpy.max(hurst)-numpy.min(hurst))
|
from django.conf.urls import patterns, include, url
urlpatterns = patterns('news.views',
url(r'^$', 'news', name='news'),
url(r'^(?P<post_id>\d+)/$', 'one_new', name='one_new'),
) |
import boto
import errno
from socket import error as SocketError
from boto.kinesis.exceptions import ProvisionedThroughputExceededException
import time
from bolt import SpoutBase, BoltBase
import collections
import logging
import json
import msgpack
import base64
import msgmodel
import concurrent.futures
import Queue
from streaming.util import TCounter
from boto.kinesis.exceptions import ProvisionedThroughputExceededException, LimitExceededException
from boto.exception import JSONResponseError
class KinesisSpout(SpoutBase):
_DEFAULT_GET_CHUNK_SIZE = 150
_DEFAULT_POLLING_INTERVAL = 4.00
def __init__(self, stream, shard_id, *args, **kwargs):
super(KinesisSpout, self).__init__(*args, **kwargs)
self._kin = boto.connect_kinesis()
self._log = kwargs.get('logger', logging.getLogger("__main__"))
self._set_shard_info(stream, shard_id)
self._log.info("Shard INFO: %s" % str(self._shard_details))
self._last_seqnum = self._get_last_seqnum(**kwargs)
self._current_seqnum = self._last_seqnum
self._last_fetched_seqnum = None
self._last_seqnum_cb = None
self._shard_iter = None
self._rec_chunk = kwargs.get('chunk_size', self._DEFAULT_GET_CHUNK_SIZE)
self._polling_interval = kwargs.get('polling_interval', self._DEFAULT_POLLING_INTERVAL)
self._throttle_interval = kwargs.get('throttle_interval', (self._polling_interval/2))
self._eob_cb = kwargs.get('eob_cb')
self._cntr = kwargs.get('cntr', TCounter())
self._max_retries = 3
self._last_req_time = time.time()
self._rec_buff = collections.deque()
self._init_time = time.time()
self._start_at = kwargs.get("start_at", "LATEST")
self._fetch_count = 0
self._log.info("Finished init of stream: {0} : {1} LastSeqnum: {2}".format(stream, self._shard_id, self._last_seqnum))
def _set_shard_info(self, stream, shard_id):
stream_info = self._kin.describe_stream(stream)
self._kstream = stream_info['StreamDescription']
self._shard_id = shard_id or self._kstream['Shards'][0]['ShardId']
self._shard_details = None
for si in self._kstream['Shards']:
if (si['ShardId'] == shard_id):
self._shard_details = si
if (not self._shard_details):
raise RuntimeError("shard_id {0} invalid for this stream".format(shard_id))
def _get_last_seqnum(self, **kwargs):
if (kwargs.get('last_seqnum')):
self._log.info("Explicitly setting KinesisSpout to seqnum={0}".format(kwargs['last_seqnum']))
return kwargs.get('last_seqnum') or self._shard_details['SequenceNumberRange']['StartingSequenceNumber']
def _set_stream_iterator(self):
if (self._start_at == 'RESUME'):
self._log.warn('Resuming stream read @ seqnum={0}'.format(self._last_seqnum))
rsi = self._kin.get_shard_iterator(self._kstream['StreamName'],
self._shard_id,
'AT_SEQUENCE_NUMBER',
starting_sequence_number=self._last_seqnum)
else:
self._log.warn('Starting stream read @ {0}'.format(self._start_at))
rsi = self._kin.get_shard_iterator(self._kstream['StreamName'],
self._shard_id,
self._start_at)
self._shard_iter = rsi['ShardIterator']
def _get_fresh_shard_iterator(self, **kwargs):
seqnum = kwargs.get('seqnum', self._last_fetched_seqnum)
start_at = 'AT_SEQUENCE_NUMBER'
if (seqnum is None):
kw = {}
start_at = 'LATEST'
else:
kw = dict(starting_sequence_number=seqnum)
self._log.warn('Fresh shard iterator stream = {1} shard = {2} seqnum = {0} start_at = {3}'.
format(seqnum, self._kstream['StreamName'], self._shard_id, start_at))
rsi = self._kin.get_shard_iterator(self._kstream['StreamName'],
self._shard_id,
start_at,
**kw)
self._log.warn("Old shard iterator: {0} New shard iterator {1}".format(self._shard_iter, rsi['ShardIterator']))
self._shard_iter = rsi['ShardIterator']
def unpack_to(self, payload, seqnum, recbuffer):
recbuffer.append((seqnum, payload))
@property
def current_seqnum(self):
return self._current_seqnum
def kinesis_get_cb(self, record_bundle):
pass
def raw_record_process(self, kinesis_record):
bb = base64.b64decode(kinesis_record['Data'])
kinesis_record['Data'] = bb
def get_next_event(self, nowait=False):
if (self._shard_iter is None):
self._set_stream_iterator()
if ((self._eob_cb) and (self._current_seqnum != self._last_seqnum_cb) and (len(self._rec_buff) == 0)):
self._last_seqnum_cb = self._current_seqnum
self._eob_cb(self._current_seqnum)
while (len(self._rec_buff) == 0):
delta = time.time() - self._last_req_time
if (delta < self._throttle_interval):
self._cntr.incr("kthrottle")
time.sleep((self._throttle_interval - delta))
self._last_req_time = time.time()
res = None
tf0 = time.time()
for retry in xrange(self._max_retries):
try:
res = self._kin.get_records(self._shard_iter, limit=self._rec_chunk, b64_decode=False)
except TypeError as e:
self._log.exception('kinesis get records')
self._log.warn('trapped error on Kinesis, attempt {0}/{1} {2}'.format(retry, self._max_retries, e))
except ProvisionedThroughputExceededException as e:
self._log.warn("Exceeded throughput limits.. throttling")
time.sleep(self._throttle_interval)
except SocketError as e:
if (e.errno != errno.ECONNRESET):
self._log.exception('Socket exception {0}'.format(str(e)))
self._log.warn("Socket exception")
raise
self._log.warn("Socket connection error retry {0}/{1}".format(retry, self._max_retries))
time.sleep(self._throttle_interval)
except Exception as e:
self._log.exception('Unknown exception {0}'.format(str(e)))
self._log.warn("Unknown exception")
raise
else:
break
tfetch = time.time() - tf0
self._fetch_count += 1
self._cntr.incr("kfetch")
if (not res):
self._log.warn('MAX retries on Kinesis... sleeping then fresh iterator from last fetched seqnum: {0}'.format(self._last_fetched_seqnum))
time.sleep(self._polling_interval*5)
self._get_fresh_shard_iterator()
else:
self._rec_buff = collections.deque()
if ('Records' in res):
if (nowait) and (len(res["Records"]) == 0):
return {}
self.kinesis_get_cb(res)
self._shard_iter = res['NextShardIterator']
for (irec, recs) in enumerate(res['Records']):
self.raw_record_process(recs)
self.unpack_to(recs['Data'], recs['SequenceNumber'], self._rec_buff)
self._last_fetched_seqnum = recs['SequenceNumber']
if (len(res['Records']) == self._rec_chunk): self._cntr.incr('kmaxrecget')
if (len(res['Records']) == 0): self._cntr.incr("kempty")
if (len(self._rec_buff) and ((self._fetch_count % 50) == 0)):
ev0 = self._rec_buff[0][1]
if ev0.get('timestamp'):
self._log.info("Timestamp: {0} CNTRS: {1}".format(ev0['timestamp'], self._cntr.pprint()))
else:
self._log.info("Timestamp: {0} CNTRS: {1}".format(ev0['unix_timestamp'], self._cntr.pprint()))
if (self._cntr["kfetch"]>1000):
self._log.info("Counters reset")
self._cntr.reset_all()
self._log.debug('Record count: {1} Next Shard Iterator: {0}'.format(res['NextShardIterator'], len(self._rec_buff)))
if (len(self._rec_buff) == 0):
telap = time.time() - self._last_req_time
if (telap < self._polling_interval):
self._cntr.incr("kbackoff")
time.sleep(self._polling_interval - telap + 0.001)
(seqnum, rec) = self._rec_buff.popleft()
self._current_seqnum = seqnum
return rec
class KinesisSpoutUnbundler(KinesisSpout):
def __init__(self, *args, **kwargs):
super(KinesisSpoutUnbundler, self).__init__(*args, **kwargs)
def unpack_to(self, payload, seqnum, recbuffer):
recbundle = []
try:
recbundle = msgpack.unpackb(payload)
except msgpack.exceptions.ExtraData as ed:
self._log.warn("Msgpack Unpack ExtraData, Recovered RecLen={0}".format(str(ed.unpacked)))
except Exception as e:
self._log.warn("Msgpack Unpack Exception {0}".format(type(e)))
self._log.debug("bundle count: {0}".format(len(recbundle)))
for rec in recbundle:
recbuffer.append((seqnum, msgmodel.expand(rec)))
class KinesisSink(BoltBase):
def __init__(self, stream, *args, **kwargs):
super(KinesisSink, self).__init__(*args, **kwargs)
self._stream = stream
self._kin = boto.connect_kinesis()
stream_info = self._kin.describe_stream(stream)
self._kstream = stream_info['StreamDescription']
self._keyname = kwargs.get('keyname', None)
self._setpartkey = kwargs.get('setpartkey', (lambda evt: evt[self._keyname]))
self._cntr = kwargs.get('cntr', TCounter())
self._count = 0
self._log = kwargs.get('logger', logging.getLogger("__main__"))
self._tpause = 0.05
self._ksplit = 0
self._dropped = 0
def pack(self, record):
return msgpack.packb(record)
def kin_send(self, in_record):
vrecords = [ in_record ]
for xplit in xrange(2):
for record in vrecords:
precord = self.pack(record)
kerr = 0
for itry in xrange(3):
resp = None
try:
resp = self._kin.put_record(self._stream, precord, self._setpartkey(record))
except (ProvisionedThroughputExceededException, TypeError) as ev:
self._log.warn('Exceeded sink throughput... pausing')
time.sleep(self._tpause)
kerr = 1
except (LimitExceededException, JSONResponseError) as ev:
kerr = 2
break
except Exception as e:
self._log.warn('Exception Kinesis put {0}'.format(str(type(e))))
return None
else:
kerr = 0
if ((self._count % 5000) == 0):
self._log.warn("Put record: {0} Seq: {1}".format(self._count, resp['SequenceNumber']))
break
if (kerr == 1):
self._log.warn('Maximum retries on throughout... dropping record')
self._dropped += 1
return None
elif (kerr == 2):
break
if (not kerr):
return resp
self._ksplit += 1
self._log.info("Kinesis send record split: {0} {1}:{2}".format(len(in_record), self._ksplit, self._count))
xl2 = len(in_record)/2
vrecords = [in_record[:xl2], in_record[xl2:]]
return None
def process(self, record):
self._count += 1
self.kin_send(record)
def ftwrap(targetobj, method, *args):
return getattr(targetobj, method)(*args)
class KinesisSinkAsync(KinesisSink):
_MAX_WORKERS = 2
def __init__(self, *args, **kwargs):
super(KinesisSinkAsync, self).__init__(*args, **kwargs)
self._maxworkers = kwargs.get('maxworkers', KinesisSinkAsync._MAX_WORKERS)
self._thrpool = kwargs.get('thrpool', concurrent.futures.ThreadPoolExecutor(max_workers=self._maxworkers))
self._futures = []
self._sendq = Queue.Queue()
self._maxquelen = 500
self._pauselen = 10
def send_chain(self):
while (True):
try:
record = self._sendq.get(False)
except Queue.Empty:
break
else:
self.kin_send(record)
return True
def _reap_futures(self):
ftv = self._futures
self._futures = []
for ft in ftv:
if (not ft.done()):
self._futures.append(ft)
def _check_queue(self):
if (self._sendq.qsize() > self._pauselen):
time.sleep(0.005)
trim_count = 0
while (self._sendq.qsize() > self._maxquelen):
try:
rtrash = self._sendq.get(False)
except Queue.Empty:
break
else:
trim_count += 1
self._cntr.incr('qtrim')
time.sleep(0.010)
if (trim_count):
self._log.warn("Queue Too Long trimmed: {0} -- Please rescale Kinesis stream".format(trim_count))
if (self._sendq.qsize() > self._pauselen):
time.sleep(0.005)
def process(self, record):
self._count += 1
self._check_queue()
self._sendq.put(record)
#
self._reap_futures()
if ((self._count % 5000) == 0):
self._log.info("{0} SendQ: {1}".format(self._count, self._sendq.qsize()))
#
if (len(self._futures) < self._maxworkers):
ft = self._thrpool.submit(ftwrap, self, 'send_chain')
self._futures.append(ft)
if __name__ == "__main__":
from tools import energeia
import random
# boto.connect_kinesis = (lambda: energeia.EnergeiaClient())
class DebugBolt(BoltBase):
def __init__(self, *args, **kwargs):
super(DebugBolt, self).__init__(*args, **kwargs)
self._count = 0
def process(self, data):
self._count += 1
if ((self._count % 10000) == 0):
logging.info('Records recieved {0}'.format(self._count))
return data
def shutdown(self):
print 'Shutdown called'
pass
db = DebugBolt()
# ksink = KinesisSink("AdsQueryCoalesced", keyname="impression_id")
opts = dict(last_seqnum="49537335845514728365598234995297135798378439738109460513")
kk = KinesisSpoutUnbundler("AdsQueryRaw", "shardId-000000000002", **opts)
# kk.addsink(ksink)
kk.addsink(db)
kk.run()
|
from math import floor
from clubsandwich.blt.nice_terminal import terminal
from clubsandwich.blt.state import blt_state
from .view import View
class FirstResponderContainerView(View):
"""
Manages the "first responder" system. The control that receives
BearLibTerminal events at a given time is the first responder.
This container view listens for the tab key. When it's pressed, the subview
tree is walked until another candidate is found, or there are no others.
That new subview is the new first responder.
You don't need to create this class yourself. :py:class:`UIScene` makes it
for you.
If you want to write a control that handles input, read the source of the
:py:class:`ButtonView` class.
"""
def __init__(self, *args, **kwargs):
self.first_responder = None
super().__init__(*args, **kwargs)
self.first_responder = None
self.find_next_responder()
@property
def contains_first_responders(self):
return True
def first_responder_traversal(self):
for subview in self.subviews:
yield from self._first_responder_traversal(subview)
def _first_responder_traversal(self, v):
if v.contains_first_responders:
# this view may always become the first responder
# because it will manage
# inner first responders, but do not try to look inside it.
yield v
return
for subview in v.subviews:
yield from self._first_responder_traversal(subview)
yield v
@property
def _eligible_first_responders(self):
return [
v for v in self.first_responder_traversal()
if v != self and v.can_become_first_responder]
def remove_subviews(self, subviews):
super().remove_subviews(subviews)
for v in subviews:
for sv in self.first_responder_traversal(v):
if sv == self.first_responder:
self.set_first_responder(None)
self.find_next_responder()
return
def set_first_responder(self, new_value):
"""
Resign the active first responder and set a new one.
"""
if self.first_responder:
self.first_responder.did_resign_first_responder()
for ancestor in self.first_responder.ancestors:
ancestor.descendant_did_resign_first_responder(
self.first_responder)
self.first_responder = new_value
if self.first_responder:
self.first_responder.did_become_first_responder()
for ancestor in self.first_responder.ancestors:
ancestor.descendant_did_become_first_responder(
self.first_responder)
def find_next_responder(self):
"""
Resign active first responder and switch to the next one.
"""
existing_responder = self.first_responder or self.leftmost_leaf
all_responders = self._eligible_first_responders
try:
i = all_responders.index(existing_responder)
if i == len(all_responders) - 1:
self.set_first_responder(all_responders[0])
else:
self.set_first_responder(all_responders[i + 1])
except ValueError:
if all_responders:
self.set_first_responder(all_responders[0])
else:
self.set_first_responder(None)
def find_prev_responder(self):
"""
Resign active first responder and switch to the previous one.
"""
existing_responder = self.first_responder or self.leftmost_leaf
all_responders = self._eligible_first_responders
try:
i = all_responders.index(existing_responder)
if i == 0:
self.set_first_responder(all_responders[-1])
else:
self.set_first_responder(all_responders[i - 1])
except ValueError:
if all_responders:
self.set_first_responder(all_responders[-1])
else:
self.set_first_responder(None)
def terminal_read(self, val):
handled = self.first_responder and self.first_responder.terminal_read(
val)
if self.first_responder and not handled:
for v in self.first_responder.ancestors:
if v == self:
break
if v.terminal_read(val):
return True
can_resign = (
not self.first_responder
or self.first_responder.can_resign_first_responder)
return self.terminal_read_after_first_responder(val, can_resign)
def terminal_read_after_first_responder(self, val, can_resign):
"""
:param int val: Return value of ``terminal_read()``
:param bool can_resign: ``True`` iff there is an active first responder
that can resign
If writing a custom first responder container view, override this to
customize input behavior. For example, if writing a list view, you might
want to use the arrows to change the first responder.
"""
if can_resign and val == terminal.TK_TAB:
if blt_state.shift:
self.find_prev_responder()
return True
else:
self.find_next_responder()
return True
return False
|
from textblob import TextBlob
file1 = open("check.txt","r+")
a=file1.read()
print("Original text :",str(a))
b = TextBlob(str(a))
print("\n")
print("Corrected text :",str(b.correct()))
file1.close()
d = open("corrected.txt",'w')
d.write(str(b.correct()))
d.close()
|
# -*- coding: utf-8 -*-
from pymysql.err import (Warning, Error, InterfaceError, DataError,
DatabaseError, OperationalError, IntegrityError,
InternalError,
NotSupportedError, ProgrammingError, MySQLError)
from .conf import DBCONFIG, APPCONFIG
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 29 21:01:42 2021
@author: praveen
"""
import os
os.chdir('/Users/praveen/kinduct_final_code')
cwd = os.getcwd()
print("Current working directory: {0}".format(cwd))
from kinduct import *
def main():
print(task1_string_conversion(kinduct_data))
print(task2_rename(kinduct_data))
print(task4_losses_agg(summation, total_players))
print(task5_gp_agg(summation, total_players))
print(task6_ga_agg(summation))
print(task7_ga_over_sa_agg(summation))
print(task8_avg_percentage_wins(summation,summation_team,total_teams))
print(task9_most_goals_stopped(summation,data))
print(task10_most_efficient_player(summation))
if __name__ == "__main__":
main()
|
from flask import Flask, render_template, request
from time import sleep
import os
app = Flask(__name__)
import AES_CBC as acbc
import AES_CFB as acfb
import AES_ECB as aecb
import AES_OFB as aofb
import DES_CBC as dcbc
import DES_CFB as dcfb
import DES_ECB as decb
import DES_OFB as dofb
@app.route('/')
def hello_world():
return render_template('index.html')
@app.route('/cripto', methods=['GET', 'POST'])
def cripto():
if request.method == 'POST':
try:
os.remove("static/encrypt.bmp")
os.remove("static/decrypt.bmp")
except OSError:
pass
if request.form['cif']=="DES-CBC":dcbc.encrypt()
elif request.form['cif']=="DES-CFB":dcfb.encrypt()
elif request.form['cif']=="DES-ECB":decb.encrypt()
elif request.form['cif']=="DES-OFB":dofb.encrypt()
elif request.form['cif']=="AES-CBC":acbc.encrypt()
elif request.form['cif']=="AES-CFB":acfb.encrypt()
elif request.form['cif']=="AES-ECB":aecb.encrypt()
elif request.form['cif']=="AES-OFB":aofb.encrypt()
#sleep(5)
if request.form['des']=="DES-CBC":dcbc.decrypt()
elif request.form['des']=="DES-CFB":dcfb.decrypt()
elif request.form['des']=="DES-ECB":decb.decrypt()
elif request.form['des']=="DES-OFB":dofb.decrypt()
elif request.form['des']=="AES-CBC":acbc.decrypt()
elif request.form['des']=="AES-CFB":acfb.decrypt()
elif request.form['des']=="AES-ECB":aecb.decrypt()
elif request.form['des']=="AES-OFB":aofb.decrypt()
return "success"
else: return "0"
app.run(debug = True)
|
import logging
from jsonutils import Values
from . import ProcessingGenerator
import sanitychecks
class JsonGenerator(ProcessingGenerator):
terms_template = '''
<div class="terms-text">
<h2>Licence summary</h2>
<p>{{/spec/license/summary}}</p>
<h2>Licence type</h2>
<ul>
<li>Open source: {{/auto/license/is-open-source}}</li>
<li>Proprietary: {{/auto/license/is-proprietary}}</li>
<li>Evaluation licence: {{/auto/license/has-evaluation}}</li>
</ul>
<h2>Licence features</h2>
<ul>
<li>Commercial use: {{/spec/license/features/commercial-use}}</li>
<li>Modifications allowed: {{/spec/license/features/modifications-allowed}}</li>
<li>Distribution allowed: {{/spec/license/features/distribution-allowed}}</li>
<li>Include copyright: {{/spec/license/features/include-copyright}}</li>
<li>Include original: {{/spec/license/features/include-original}}</li>
<li>State changes: {{/spec/license/features/state-changes}}</li>
<li>Disclose source code: {{/spec/license/features/disclose}}</li>
</ul>
<h2>Licence fee</h2>
<p>{{/spec/license/fee}}</p>
<h2>Copyright statement</h2>
<p>{{/spec/license/copyright}}</p>
<h2>Full licence</h2>
<p>{{/spec/license/full}}</p>
</div>'''
contacts_template = '''
<div class="contacts-text">
<h2>Owner/developer</h2>
<p>{{/auto/nice-owners}}</p>
<h2>Contact person(s)</h2>
{{if /auto/contacts/primary != ""}}
<div class="contacts-primary">
<span class="contact-name">{{/auto/contacts/primary/name}}</span>
<span class="contact-company">{{/auto/contacts/primary/company/fullname}}</span>
<span class="contact-email">{{/auto/contacts/primary/email}}</span>
</div>
{{endif}}
{{if /auto/contacts/technical != ""}}
<h3>For technical information:</h3>
{{for /auto/contacts/technical}}
<div class="contacts-technical">
<span class="contact-name">%value/name%</span>
<span class="contact-company">%value/company/fullname%</span>
<span class="contact-email">%value/email%</span>
</div>
{{endfor}}
{{endif}}
{{if /auto/contacts/legal != ""}}
<h3>For licensing information:</h3>
{{for /auto/contacts/legal}}
<div class="contacts-legal">
<span class="contact-name">%value/name%</span>
<span class="contact-company">%value/company/fullname%</span>
<span class="contact-email">%value/email%</span>
</div>
{{endfor}}
{{endif}}
</div>'''
playground_url = 'http://playground.mediafi.org:8000/'
def __init__(self, escaping = lambda t : t):
ProcessingGenerator.__init__(self, escaping)
self.idx = Values()
def generate_entry(self, se):
self.se = se
entry = Values()
nc = self.se.get_naming_conventions()
self.se_id = nc.normalizedname()
entry.set('/id', self.se_id)
self.genDiscover(entry)
self.genMedia(entry)
self.genUsage(entry)
self.genTermsAndConditions(entry)
self.genDelivery(entry)
# entry.set('/debug', self.se)
self.se = None
return entry
def genDiscover(self, entry):
entry.set('/name', self.se.get_name())
entry.set('/supplier', self.process_value('/auto/nice-owners'))
self.genDescription(entry)
self.genCategory(entry)
self.genDocumentation(entry)
self.genSupport(entry)
def genUsage(self, entry):
self.genTry(entry)
self.genTweak(entry)
entry.set('/usage/tutorials', self.se.get('/auto/usage/tutorials'))
def genTermsAndConditions(self, entry):
entry.set('/terms/fi-ppp/type', self.se.get('/auto/license'))
entry.set('/terms/fi-ppp/license', self.se.get('/spec/license'))
entry.set('/terms/fi-ppp/text', self.process_text_snippet(JsonGenerator.terms_template))
if self.se.get('/spec/license/beyond') is None:
entry.set('/terms/beyond-fi-ppp', None)
return
# TODO: handle beyond FI-PPP license information
def genDescription(self, entry):
entry.set('/description/short', self.process_value('/spec/documentation/tag-line'))
entry.set('/description/what-it-does', self.process_value('/spec/documentation/what-it-does'))
entry.set('/description/how-it-works', self.process_value('/spec/documentation/how-it-works'))
entry.set('/description/why-you-need-it', self.process_value('/spec/documentation/why-you-need-it'))
def genCategory(self, entry):
entry.set('/category/platforms', self.se.get('/spec/platforms'))
entry.set('/category/nice-platforms', self.se.get('/auto/category/nice-platforms'))
tags = self.se.get('/auto/category/tags')
entry.set('/category/tags', tags)
for t in tags:
self.index('tags', t, self.se_id)
tags = self.se.get('/auto/category/additional-tags')
entry.set('/category/additional-tags', tags)
for t in tags:
self.index('additional-tags', t, self.se_id)
def genDocumentation(self, entry):
entry.set('/documentation/specification', self.se.get('/auto/documentation/wiki-url'))
entry.set('/documentation/devguide', self.se.get('/auto/documentation/devguide-url'))
entry.set('/documentation/installguide', self.se.get('/auto/documentation/installguide-url'))
entry.set('/documentation/api', self.se.get('/auto/documentation/api-url'))
def genSupport(self, entry):
entry.set('/support/faq', self.se.get('/auto/support/faq-url'))
entry.set('/support/bugtracker', self.se.get('/auto/support/bugtracker'))
entry.set('/support/requests', None)
entry.set('/support/contacts/text', self.process_text_snippet(JsonGenerator.contacts_template))
def genYoutubeVideo(self, entry, json_path, yid):
if yid is None:
entry.set(json_path, None)
return
entry.set(json_path + '/youtube-id', yid)
entry.set(json_path + '/url', 'https://youtu.be/%s' % yid)
def genMedia(self, entry):
filename = self.se.get('/auto/media/thumbnail')
if filename is not None:
fileparts = filename.rpartition(':')
nc = self.se.get_naming_conventions()
id = nc.normalizedname()
entry.set('/media/thumbnail', 'catalog.%s.%s' % (id, fileparts[2]))
else:
entry.set('/media/thumbnail', None)
self.genYoutubeVideo(entry, '/media/teaser', self.se.get('/auto/media/youtube-pitch'))
self.genYoutubeVideo(entry, '/media/tutorial', self.se.get('/auto/media/youtube-tutorial'))
def genTry(self, entry):
online_demo = self.se.get('/auto/usage/online-demo')
entry.set('/usage/try', online_demo)
if online_demo is not None:
sanitychecks.check_remote_resource(online_demo.get('/link'), 'Probably invalid try link!')
def genTweak(self, entry):
repo = self.se.get('/auto/usage/playground/link')
if repo is not None:
repoparts = repo.rpartition('/')
suffix = repoparts[2]
tweak = JsonGenerator.playground_url + suffix
check = sanitychecks.check_remote_resource(repo + '/blob/master/playground.json', 'Probably invalid tweak link since no "playground.json" was found!')
self.index('playground', suffix, {'url': repo, 'se': self.se.get_name(), 'valid': check})
else:
tweak = None
entry.set('/usage/tweak', tweak)
def genDelivery(self, entry):
entry.set('/delivery/model', self.se.get('/auto/delivery/model'))
entry.set('/delivery/artifact', self.process_value('/spec/delivery/description'))
entry.set('/delivery/docker', self.se.get('/spec/delivery/docker'))
entry.set('/delivery/saas-instance', self.se.get('/spec/delivery/instances/public/endpoint'))
entry.set('/delivery/source-code', self.se.get('/spec/delivery/sources'))
if self.se.get('/auto/delivery/repository/url') is None:
entry.set('/delivery/repository', None)
else:
entry.set('/delivery/repository/url', self.se.get('/auto/delivery/repository/url'))
entry.set('/delivery/repository/checkout-cmd', self.process_value('/auto/delivery/repository/checkout-cmd'))
def index(self, idxname, key, val):
path = '/' + idxname + '/' + key
l = self.idx.get(path);
if l is None:
l = []
l.append(val)
self.idx.set(path, l)
def get_index(self, idxname):
idx = self.idx.get('/' + idxname)
if idx is None:
return ""
result = idx.serialize()
if result is None:
return ""
return result
|
"""Create splits for the dataset.
See ``python create_splits.py --help`` for more information.
"""
import json
import logging
import os
import random
import re
import click
logger = logging.getLogger(__name__)
# helper functions
def _normalize(s):
"""Return a normalized version of s."""
# Remove repeated whitespace characters.
s = re.sub(r'\s+', ' ', s)
# Remove non-whitespace or word characters.
s = re.sub(r'[^\w\s]', '', s)
# Lowercase the string.
s = s.lower()
# Strip leading and trailing whitespace.
s = s.strip()
return s
# main function
@click.command(
context_settings={
'help_option_names': ['-h', '--help']
})
@click.argument(
'data_path',
type=click.Path(exists=True, file_okay=True, dir_okay=False))
@click.argument(
'output_dir',
type=click.Path(exists=True, file_okay=False, dir_okay=True))
def create_splits(data_path, output_dir):
"""Write splits for the 20Qs data at DATA_PATH to OUTPUT_DIR.
Write splits for the 20 Questions data at DATA_PATH to OUTPUT_DIR,
splitting the data into 3 parts: train, dev, and test.
Additionally, train, dev, and test have two additional attributes:
subject_split_index and question_split_index. subject_split_index
and question_split_index are the lowest index (train 0, dev 1, test
2) of the splits that the subject or the question appears in (after
lowercasing, stripping punctuation, and stripping any leading or
trailing whitespace). Thus, a subject_split_index of 1 means that
the subject appears in dev (and potentially in test) but not
train.
"""
# The structure of the splits is a bit complicated. We want a
# train, dev, and test set where the dev and test set have a good
# number of subjects and questions which do not appear in the
# training set or each other. To accomplish this distribution,
# first we'll randomly choose which subjects and questions appear
# in train, dev, and test, then we'll put each instance into the
# nine {subject, question} -> {train, dev, test} buckets, lastly
# we'll split the buckets into actual train, dev, and test sets.
logger.info(f'Reading {data_path}.')
with click.open_file(data_path, 'r') as data_file:
rows = []
for ln in data_file:
rows.append(json.loads(ln))
logger.info('Bucketing instances by subjects and questions.')
subjects = list(set([_normalize(row['subject']) for row in rows]))
random.shuffle(subjects)
questions = list(set([_normalize(row['question']) for row in rows]))
random.shuffle(questions)
subject_to_split_index = {}
start = 0
for split_index, portion in enumerate([0.8, 0.9, 1.0]):
end = int(len(subjects) * portion)
for subject in subjects[start:end]:
subject_to_split_index[subject] = split_index
start = end
question_to_split_index = {}
start = 0
for split_index, portion in enumerate([0.8, 0.9, 1.0]):
end = int(len(questions) * portion)
for question in questions[start:end]:
question_to_split_index[question] = split_index
start = end
# subject_question_instances:
# first index: the split index of the subject
# second index: the split index of the question
subject_question_instances = [
[[], [], []],
[[], [], []],
[[], [], []]
]
for row in rows:
subject_question_instances\
[subject_to_split_index[_normalize(row['subject'])]]\
[question_to_split_index[_normalize(row['question'])]]\
.append(row)
logger.info('Splitting instances into train, dev, and test.')
# first list is train, second is dev, third is test
splits = [[], [], []]
# subjects and questions from train can go in dev or test, and ones
# from dev can go in test, so map each bucket to the split index
# that is the max of the row and column indices.
for i, row in enumerate(subject_question_instances):
for j, col in enumerate(row):
splits[max(i, j)].extend(col)
# distribute some of the training data into dev and test so that we
# have a point of comparison for subjects and questions that have
# both been seen at train time.
train, dev, test = splits
train_end = int(len(train) * 0.9)
dev_end = int(len(train) * 0.95)
random.shuffle(train)
test.extend(train[dev_end:])
dev.extend(train[train_end:dev_end])
train = train[:train_end]
splits = [train, dev, test]
# shuffle all the splits
for split in splits:
random.shuffle(split)
# determine the finalized split indices for each subject / question,
# then write to disk
logger.info('Writing splits to disk.')
subject_to_final_split_index = {}
question_to_final_split_index = {}
for final_split_index, split in enumerate(splits):
for row in split:
normalized_subject = _normalize(row['subject'])
if normalized_subject not in subject_to_final_split_index:
subject_to_final_split_index[normalized_subject] = \
final_split_index
normalized_question = _normalize(row['question'])
if normalized_question not in question_to_final_split_index:
question_to_final_split_index[normalized_question] = \
final_split_index
for split_name, split in zip(['train', 'dev', 'test'], splits):
split_path = os.path.join(
output_dir, f'twentyquestions-{split_name}.jsonl')
with click.open_file(split_path, 'w') as split_file:
for row in split:
row['subject_split_index'] = subject_to_final_split_index[
_normalize(row['subject'])]
row['question_split_index'] = question_to_final_split_index[
_normalize(row['question'])]
split_file.write(json.dumps(row) + '\n')
if __name__ == '__main__':
create_splits()
|
def foreigned(to_db='default', in_db="operator_main_dbs"):
u""" декоратор для foreign tables """
assert to_db == 'default' and in_db == "operator_main_dbs"
for
for
for
for
|
import base64
import requests
from utils import REQUEST_ERRORS
def file_to_base64(file_name) -> str:
with open(file_name, 'rb') as fp:
return base64.b64encode(fp.read()).decode()
class Training:
URL = 'https://snowboy.kitt.ai/api/v1/train/'
PARAMS = {
'name': 'alice_mdm',
'language': 'ru',
'age_group': '30_39',
'gender': 'M',
'microphone': 'mic', # e.g., PS3 Eye
'token': 'd4977cf8ff6ede6efb8d2277c1608c7dbebf18a7',
}
def __init__(self, file1, file2, file3, params: dict or None=None):
self.__params = self.PARAMS.copy()
if isinstance(params, dict):
self.__params.update(params)
# noinspection PyTypeChecker
self.__params['voice_samples'] = [
{'wave': file_to_base64(file1)},
{'wave': file_to_base64(file2)},
{'wave': file_to_base64(file3)}
]
self._data = None
self._request()
def _request(self):
try:
response = requests.post(self.URL, json=self.__params)
except REQUEST_ERRORS as e:
raise RuntimeError('Request error: {}'.format(e))
if not response.ok:
raise RuntimeError('Server error: {}'.format(response.status_code))
self._data = response.iter_content()
def save(self, file_path):
if self._data is None:
raise Exception('There\'s nothing to save')
with open(file_path, 'wb') as fp:
for d in self._data:
fp.write(d)
return file_path
|
import socket
import threading
import json
def recv_sockdata(the_socket):
total_data = ""
while True:
data = the_socket.recv(1024).decode()
if "END" in data:
total_data += data[:data.index("END")]
break
total_data += data
return total_data
def deal_data(data):
# 对数据进行处理
print(data)
def do_server(sock,addr):
# 处理客户端连接
print("收到来自客户端{}的连接:".format(addr))
# 发送数据:
sock.sendall((json.dumps({"msg":"welcome connect to server"})+" END").encode())
# 在一个死循环中接收数据并处理
while True:
try:
recv_data = recv_sockdata(sock)
deal_data(recv_data) # 处理数据
except (ConnectionResetError,ConnectionAbortedError):
break
if __name__ == '__main__':
HOST = ''
PORT = 10001
#创建Socket
tcpSocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
#绑定地址
tcpSocket.bind((HOST,PORT))
#监听端口,传入的参数指定等待连接的最大数量
tcpSocket.listen(16)
threads = [] # 线程数组,每一个线程维护一个客户端连接
#服务器程序通过一个永久循环来接受来自客户端的连接
while True:
print('Waiting for connection...')
# 接受一个新连接:
sock,addr = tcpSocket.accept()
# 创建新线程来处理TCP连接:每个连接都必须创建新线程(或进程)来处理,
#否则,单线程在处理连接的过程中,无法接受其他客户端的连接:
th = threading.Thread(target=do_server,args=(sock,addr))
th.start()
threads.append(th)
|
from Crypto.Util.number import inverse
c=2205316413931134031074603746928247799030155221252519872650080519263755075355825243327515211479747536697517688468095325517209911688684309894900992899707504087647575997847717180766377832435022794675332132906451858990782325436498952049751141
n=29331922499794985782735976045591164936683059380558950386560160105740343201513369939006307531165922708949619162698623675349030430859547825708994708321803705309459438099340427770580064400911431856656901982789948285309956111848686906152664473350940486507451771223435835260168971210087470894448460745593956840586530527915802541450092946574694809584880896601317519794442862977471129319781313161842056501715040555964011899589002863730868679527184420789010551475067862907739054966183120621407246398518098981106431219207697870293412176440482900183550467375190239898455201170831410460483829448603477361305838743852756938687673
e = 3
# find the cube root of large integer
# code copy from https://riptutorial.com/python/example/8751/computing-large-integer-roots
def nth_root(x, n):
# Start with some reasonable bounds around the nth root.
upper_bound = 1
while upper_bound ** n <= x:
upper_bound *= 2
lower_bound = upper_bound // 2
# Keep searching for a better result as long as the bounds make sense.
while lower_bound < upper_bound:
mid = (lower_bound + upper_bound) // 2
mid_nth = mid ** n
if lower_bound < mid and mid_nth < x:
lower_bound = mid
elif upper_bound > mid and mid_nth > x:
upper_bound = mid
else:
# Found perfect nth root.
return mid
return mid + 1
root = nth_root(c, 3)
print(hex(root)[2:-1].decode("hex"))
# phi = (p-1)*(q-1)
# d = inverse(e, phi)
# m = pow(c, d, n)
# print(hex(m)[2:-1].decode("hex")) |
import re
import json
import scrapy
class ArtistsSpider(scrapy.Spider):
name = "artists"
f = open("artists_urls.txt")
start_urls = [url.strip() for url in f.readlines()]
f.close()
def parse(self, response):
artist_url = response.url
img = response.xpath('//img[@id="main_fichacreador1_encabezado1_img_URLImg"]/@src').extract_first()
name = response.xpath('//a[@id="main_fichacreador1_encabezado1_hl_NombreApellido"]/text()').extract_first()
real_name = response.xpath('//span[@id="main_fichacreador1_encabezado1_lbl_NombreCompleto"]/text()').extract_first()
if real_name:
real_name = real_name.replace('Nombre real: ', '')
category = response.xpath('//span[@id="main_fichacreador1_encabezado1_lbl_Categoria"]/text()').extract_first()
dates = response.xpath('//span[@id="main_fichacreador1_encabezado1_lbl_Fechas"]/text()').extract_first()
place_b = response.xpath('//span[@id="main_fichacreador1_encabezado1_lbl_LugarNacimiento"]/node()').extract()
if 'Lugar de nacimiento:' in place_b: place_b.remove('Lugar de nacimiento:')
if '<br>' in place_b: place_b.remove('<br>')
pseudonym = response.xpath('//span[@id="main_fichacreador1_encabezado1_lbl_Seudonimo"]/text()').extract_first()
all_compositions=[]
for c in \
response.xpath('//a[contains(@id,"main_fichacreador1_DL_Temas_hl_Letra_")]/@href').extract():
all_compositions.append(c)
lyrics=[]
for c in \
response.xpath('//a[contains(@id,"main_fichacreador1_DL_Letras_hl_Letra_")]/@href').extract():
lyrics.append(c)
compositions =[]
for c in \
response.xpath('//a[contains(@id,"main_fichacreador1_DL_Partituras_hl_Partitura_")]/@href').extract():
compositions.append(c)
pattern = re.compile(r'var audioPlaylist = new Playlist\(".*", [\[\r\n\t\t ]?[ ]?(.*?)[\r\n\t \]\r\n ]?[ \]]?, {.*}\);', re.MULTILINE | re.DOTALL)
audio2 = response.xpath('//script[contains(., "var audioPlaylist")]/text()').re(pattern)
audio = []
if len(audio2):
audio_items= audio2[0][1:-1].split('},{')
for rec in audio_items:
item = {}
rec_items = re.findall(r'(id:".*"),(idtema:".*"),(titulo:".*"),(canta:".*"),(detalles:".*"),(duracion:".*"),(formacion:".*"),(oga:".*"),(mp3:".*")', rec)
for a in rec_items[0]:
a = a[:-1]
a = a.split(':"')
if a[0] in ["id","mp3","oga","titulo","duracion","formacion","detalles"]:
item[a[0]] = a[1]
audio.append(item)
videos = response.xpath('//iframe[contains(@src,"youtu")]/@src').extract()
recordings=[]
for d in\
response.xpath('//div[@id="discografia"]//div[@class="text-muted"]'):
prev = d.xpath('.//preceding-sibling::div[1]')
r_id = prev.xpath('.//a[contains(@id,"main_fichacreador1_RP_Discografia_hl_Tema_")]/@href').extract()
r_name = prev.xpath('.//a[contains(@id,"main_fichacreador1_RP_Discografia_hl_Tema_")]/text()').extract()
r_type = prev.xpath('.//span[contains(@id,"main_fichacreador1_RP_Discografia_lbl_RitmoDuracion_")]/text()').extract()
r_vocal = d.xpath('.//span[contains(@id,"main_fichacreador1_RP_Discografia_lbl_Canta_")]/text()').extract()
r_performer_type = d.xpath('.//span[contains(@id,"main_fichacreador1_RP_Discografia_lbl_Formacion_")]/text()').extract()
r_performer = d.xpath('.//span[contains(@id,"main_fichacreador1_RP_Discografia_lbl_Interprete_")]/text()').extract()
r_description = d.xpath('.//span[contains(@id,"main_fichacreador1_RP_Discografia_lbl_DetallesGrabacion_")]/text()').extract()
recordings.append({'r_type': r_type, 'r_vocal': r_vocal, 'r_name':
r_name, 'r_id': r_id,
'r_performer_type': r_performer_type, 'r_performer':
r_performer, 'r_description': r_description})
biography = response.xpath('//a[contains(@id,"main_fichacreador1_Biografias1_DL_Biografias_hl_Biografia_")]/@href').extract()
articles = response.xpath('//a[contains(@id,"main_fichacreador1_Cronicas1_RP_Cronicas_hl_Cronica_")]/@href').extract()
return {'artist_url': artist_url, 'img': img, 'name': name, 'real_name':real_name,
'category': category, 'dates': dates, 'place_b': place_b,
'pseudonym': pseudonym, 'all_compositions':
all_compositions, 'lyrics': lyrics, 'audio': audio, 'video': videos,
'recordings': recordings, 'compositions': compositions,
'biography': biography, 'articles':articles}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 2 14:11:52 2020
@author: charm
"""
import dataset_lib as dat
import models as mdl
import torchvision.transforms as transforms
import torch.utils.data as data
import torchvision
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.optim as opt
import numpy as np
#%%
def generate_grid(dataset,num_imgs=64,orig=True):
dataloader = data.DataLoader(dataset,batch_size=num_imgs,shuffle=True)
# Get a batch of training data
inputs, aug_inputs ,labels = next(iter(dataloader))
if orig:
for i in range(inputs.shape[0]):
inputs[i,:,:,:] = unnorm(inputs[i,:,:,:])
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out)
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
#mean = np.array([0.485, 0.456, 0.406])
#std = np.array([0.229, 0.224, 0.225])
#inp = std * inp + mean
#inp = np.clip(inp, 0, 1)
fig,ax = plt.subplots(figsize = (10,10))
ax.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
unnorm = transforms.Normalize([-0.485/0.229, -0.456/0.224, -0.406/0.225], [1/0.229, 1/0.224, 1/0.225])
if __name__ == "__main__":
file_dir = '/home/charm/data_driven_force_estimation/experiment_data' # define the file directory for dataset
model_type = "S"
feat_extract = False
force_align = False
weight_file = weight_file = "best_modelweights_" + model_type
if model_type!="S" and feat_extract:
weight_file="best_modelweights_" + model_type + "_ft"
if force_align and model_type!= "V" :
weight_file = weight_file + "_faligned"
if model_type == "V_RNN":
trans_function = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
else:
# Define a transformation for the images
trans_function = transforms.Compose([transforms.Resize((224,224)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
# We have to define the crop area of interest for the images
# I hope to create cross-hairs in the gui so I can "aim" better during data collection.
# That would help make the crop area consistent.
crop_list = []
for i in range(1,48):
#crop_list.append((50,350,300,300))
crop_list.append((270-150,480-150,300,300))
'''
train_list = [1,3,5,7,
8,10,12,14,
15,17,19,21,41,42]
val_list = [2,6,
9,13,
16,20,44]
'''
train_list = [1,3,5,7] # small data
#train_list = [1,3,5,7,48,49] # slow pulls
#val_list = [2,6,50] # slow pulls
#train_list = [1,3,5,7,51,52] # fstate pulls
#val_list = [2,6,53] # fstate pulls
#train_list = [1,3,5,7,54,55] # f fs pulls
val_list = [2,6,56] # ffs pulls
#test_list = [4,11,18,
#22,23,24,25,26,27,28,29,32,33]
test_list = [4,8]
config_dict={'file_dir':file_dir,
'include_torque': False,
'spatial_forces': force_align,
'custom_state': None,
'batch_size': 32,
'crop_list': crop_list,
'trans_function': trans_function}
dataloaders,dataset_sizes = dat.init_dataset(train_list,val_list,test_list,model_type,config_dict,augment=False)
np.savetxt('PSM2_mean_smalldata.csv',dataloaders['train'].dataset.mean)
np.savetxt('PSM2_std_smalldata.csv',dataloaders['train'].dataset.stdev)
'''
## if we ablate uncomment these lines -----------------------------
qty = ['t','fx','fy','fz','tx','ty','tz',
'px','py','pz','qx','qy','qz','qw','vx','vy','vz','wx','wy','wz',
'q1','q2','q3','q4','q5','q6','q7',
'vq1','vq2','vq3','vq4','vq5','vq6','vq7',
'tq1','tq2','tq3','tq4','tq5','tq6','tq7',
'q1d','q2d','q3d','q4d','q5d','q6d','q7d',
'tq1d','tq2d','tq3d','tq4d','tq5d','tq6d','tq7d',
'psm_fx','psm_fy','psm_fz','psm_tx','psm_ty','psm_tz',
'J1','J2','J3','J4','J5','J6','J1','J2','J3','J4','J5','J6',
'J1','J2','J3','J4','J5','J6','J1','J2','J3','J4','J5','J6',
'J1','J2','J3','J4','J5','J6','J1','J2','J3','J4','J5','J6']
force_features = ['tq1','tq2','tq3','tq4','tq5','tq6','tq7',
'q1d','q2d','q3d','q4d','q5d','q6d','q7d',
'tq1d','tq2d','tq3d','tq4d','tq5d','tq6d','tq7d',
'psm_fx','psm_fy','psm_fz','psm_tx','psm_ty','psm_tz']
pos_features = ['px','py','pz','qx','qy','qz','qw',
'vx','vy','vz','wx','wy','wz',
'q1','q2','q3','q4','q5','q6','q7',
'vq1','vq2','vq3','vq4','vq5','vq6','vq7',
'q1d','q2d','q3d','q4d','q5d','q6d','q7d']
vel_features=['vx','vy','vz','wx','wy','wz',
'vq1','vq2','vq3','vq4','vq5','vq6','vq7']
mask_feature = vel_features
mask = np.isin(qty,mask_feature,invert=False)
for loader in dataloaders.values():
loader.dataset.mask_labels(mask)
weight_file = weight_file + "_V" # add ablation type
'''
#end of ablation code
#%%
#generate_grid(dataloaders['test'].dataset,64)
# define model
if model_type == "VS":
model = mdl.StateVisionModel(30, 54, 3,feature_extract=feat_extract,TFN=True)
elif model_type == "S":
model = mdl.StateModel(54, 3)
elif (model_type == "V") or (model_type == "V_RNN"):
#model = mdl.VisionModel(3)
model = mdl.BabyVisionModel()
weight_file = weight_file + "_fffsdata.dat"
# create loss function
criterion = nn.MSELoss(reduction='sum')
# define optimization method
optimizer = opt.Adam(model.parameters(),lr=1e-3,weight_decay=0)
#optimizer = opt.SGD(model.parameters(),lr=1e-5,weight_decay=0,momentum=0.9)
model,train_history,val_history,_ = mdl.train_model(model,
criterion, optimizer,
dataloaders, dataset_sizes,
num_epochs=100,
L1_loss=1e-3,
model_type= model_type,
weight_file=weight_file,
suppress_log=False,
multigpu=False)
|
from django.contrib.auth.models import User
from django.http.response import Http404
from perfil.models import Endereco, Perfil
from .models import ItemPedido, Pedido
from typing import Dict, List
from django.shortcuts import redirect, render
from django.views import View
from sabores import models
from django.contrib import messages
class Carrinho(View):
def get(self, *args, **kwargs):
if not self.request.user.is_authenticated:
return redirect('perfil:login')
carrinho: Dict = self.request.session.get('carrinho')
total = ''
if carrinho:
total = self.get_total()
contexto: Dict = {
'carrinho': carrinho,
'total': total
}
return render(self.request, 'pedido/carrinho.html', contexto)
def get_total(self):
return sum(
[x['preco']
if type(x) == dict
else 0
for x
in list(self.request.session['carrinho'].values())
]
)
class RemoveCarrinho(View):
def get(self, *args, **kwargs):
if not self.request.user.is_authenticated:
return redirect('perfil:login')
self.carrinho: Dict = self.request.session.get('carrinho')
self.sabor: str = self.request.GET.get('vid')
key = self.get_deleted_key()
http_referer = self.request.META.get('HTTP_REFERER')
if not self.carrinho or not self.sabor:
return redirect('pedido:fazerpedido')
try:
del self.carrinho[key]
except:
raise Http404
self.request.session.save()
return redirect(http_referer)
def get_deleted_key(self):
keys: List = [
k if v['sabor'] == self.sabor else None
for k, v in self.carrinho.items()
]
for key in keys:
if key is not None:
return key
class ConfirmarPedido(Carrinho):
def get(self, *args, **kwargs):
if not self.request.user.is_authenticated:
return redirect('perfil:login')
carrinho: Dict = self.request.session.get('carrinho')
if not carrinho:
return redirect('pedido:fazerpedido')
user: User = self.request.user
perfil: Perfil = Perfil.objects.filter(user=user.id).first()
enderecos: Endereco = Endereco.objects.filter(perfil=perfil)
total: float = self.get_total()
contexto: Dict = {
'carrinho': carrinho,
'total': total,
'enderecos': enderecos
}
return render(self.request, 'pedido/confirmar_pedido.html', contexto)
def post(self, *args, **kwargs):
user_id = self.request.user.id
perfil: Perfil = Perfil.objects.filter(user=user_id).first()
endereco_id: str = self.request.POST.get('endereco')
endereco_db: Endereco = Endereco.objects.filter(id=endereco_id).first()
endereco: str = f'{endereco_db.rua}, n {endereco_db.numero}, '\
f'{endereco_db.cidade}'
carrinho = self.request.session.get('carrinho')
pedido: Pedido = Pedido(
perfil_pedido=perfil,
endereco=endereco,
total=self.get_total(),
status='P',
)
pedido.save()
ItemPedido.objects.bulk_create(
[
ItemPedido(
pedido=pedido,
sabor=pizza['sabor'],
tamanho=pizza['tamanho'],
quantidade=pizza['quantidade'],
preco=pizza['preco'],
)
for pizza in carrinho.values()
]
)
del self.request.session['carrinho']
messages.success(
self.request,
'Pedido realizado com sucesso. Agradecemos a preferência.'
)
return redirect('pedido:meuspedidos')
class MeusPedidos(View):
def get(self, *args, **kwargs):
if not self.request.user.is_authenticated:
return redirect('perfil:login')
user_id = self.request.user.id
perfil: Perfil = Perfil.objects.filter(user=user_id).first()
pedidos: Pedido = Pedido.objects.filter(
perfil_pedido=perfil)
contexto: Dict = {
'pedidos': pedidos,
}
return render(self.request, 'pedido/meus_pedidos.html', contexto)
class FazerPedido(View):
def setup(self, *args, **kwargs):
super().setup(*args, **kwargs)
sabores_salgados: models.Sabor = models.Sabor.objects.filter(
no_cardapio=True, salgado=True)
sabores_doces: models.Sabor = models.Sabor.objects.filter(
no_cardapio=True, doce=True)
self.contexto: Dict = {
'sabores_salgados': sabores_salgados,
'sabores_doces': sabores_doces,
}
self.renderizar = render(
self.request, 'pedido/fazer_pedido.html', self.contexto)
def get(self, *args, **kwargs):
return self.renderizar
def post(self, *args, **kwargs):
if not self.request.user.is_authenticated:
return redirect('perfil:login')
http_referer = self.request.META.get('HTTP_REFERER')
datas: Dict = self.request.POST
self.carrinho = self.request.session.get('carrinho')
self.tamanho: str = datas.get('size')
self.quantidade: int = 1
sabores_id: List = []
for k, v in datas.items():
if 'sabor' in k:
sabores_id.append(int(v))
if not self.tamanho:
messages.error(
self.request,
'Precisa selecionar um tamanho',
)
return redirect(http_referer)
if self.is_2_flavors():
if len(datas) != 4:
messages.error(
self.request,
'Selecione 2 sabores'
)
return redirect(http_referer)
self.sabor1 = models.Sabor.objects.filter(id=sabores_id[0]).first()
self.sabor2 = models.Sabor.objects.filter(id=sabores_id[1]).first()
self.sabor = f'{self.sabor1} e {self.sabor2}'
self.sabor_id = str(sum(sabores_id)) + self.sabor2.nome_sabor
if self.is_1_flavor():
if len(datas) != 3:
messages.error(
self.request,
'Selecione apenas 1 sabor'
)
return redirect(http_referer)
self.sabor1 = models.Sabor.objects.filter(id=sabores_id[0]).first()
self.sabor = f'{self.sabor1}'
self.sabor_id = str(sum(sabores_id))
if not self.carrinho:
self.request.session['carrinho'] = {}
self.carrinho = self.request.session['carrinho']
if self.sabor_id in self.carrinho.keys():
self.carrinho[self.sabor_id]['quantidade'] += 1
preco = self.get_preco()
self.carrinho[self.sabor_id]['preco'] = round(preco, 2)
else:
preco = self.get_preco()
tamanho = self.get_size()
self.carrinho[self.sabor_id] = {
'sabor': self.sabor,
'tamanho': tamanho,
'quantidade': self.quantidade,
'preco': round(preco, 2)
}
self.request.session.save()
messages.success(
self.request,
'Pizza adicionada ao carrinho. Vá ate lá para finalizar o pedido.'
)
return redirect('pedido:fazerpedido')
def get_preco(self):
tamanho: str = self.tamanho
quantidade: int = self.quantidade
if self.sabor_id in self.carrinho:
quantidade = self.carrinho[self.sabor_id]['quantidade']
if 'grande' in tamanho:
if self.is_1_flavor():
preco = self.sabor1.preco_grande * quantidade
if self.is_2_flavors():
preco = ((self.sabor1.preco_grande +
self.sabor2.preco_grande) / 2 * quantidade)
if 'media' in tamanho:
if self.is_1_flavor():
preco = self.sabor1.preco_medio * quantidade
if self.is_2_flavors():
preco = ((self.sabor1.preco_medio +
self.sabor2.preco_medio) / 2 * quantidade)
if 'pequena' in tamanho:
if self.is_1_flavor():
preco = self.sabor1.preco_pequeno * quantidade
if self.is_2_flavors():
preco = ((self.sabor1.preco_pequeno +
self.sabor2.preco_pequeno) / 2 * quantidade)
return preco
def get_size(self):
if 'grande' in self.tamanho:
return 'Pizza Grande'
if 'media' in self.tamanho:
return 'Pizza Media'
return 'Pizza Pequena'
def is_2_flavors(self):
return '2' in self.tamanho
def is_1_flavor(self):
return '1' in self.tamanho
|
#!/usr/bin/env python3
# Imports
def cm2inch(value):
return value/2.54
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib as mpl
import pandas as pd
import numpy as np
import seaborn.apionly as sns
lsc = mpl.colors.LinearSegmentedColormap
niceblue = '#64B5CD'
nicered = '#C44E52'
niceyellow = '#CCB974'
plt.style.use('custom')
colormap = lsc.from_list('foo',[(0.0,niceblue),
(0.5,niceyellow),
(1.0,nicered)])
maxt, mint = 1e3,1
def get_color(t):
τ = (t-mint)/(maxt-mint)
return colormap(τ)
niceblue = '#64B5CD'
nicered = '#C44E52'
niceyellow = '#CCB974'
import matplotlib as mpl
lsc = mpl.colors.LinearSegmentedColormap
colormap = lsc.from_list('foo',[(0.0,niceblue),
(0.5,niceyellow),
(1.0,nicered)])
plt.style.use('custom')
df = pd.read_csv('anneal.csv').groupby(['mc','parameter']).mean().reset_index()
# Just the 1D plots
ts = np.unique(df['parameter'])
maxt, mint = max(ts), min(ts)
tws = np.unique(df['mc'])
maxtw, mintw = max(tws), min(tws)
def get_color_t(t):
lmaxt = np.log10(maxt)
lmint = np.log10(mint)
τ = (np.log10(t)-lmint)/(lmaxt-lmint)
return colormap(τ)
def get_color_tw(tw):
lmaxtw = np.log10(maxtw)
lmintw = np.log10(mintw)
τ = (np.log10(tw)-lmintw)/(lmaxtw-lmintw)
return colormap(τ)
fig = plt.figure(figsize=(cm2inch(15),cm2inch(8)))
axL = fig.add_subplot(121)
axR = fig.add_subplot(122)
i = 0
for t,tdf in df.groupby('parameter'):
if i%2 == 0:
axL.plot(tdf['mc'], tdf['value'],
lw=1, color=get_color_t(t))
i+=1
axL.set_xscale('log')
axL.set_xlabel('$t_w$')
axL.set_ylabel('$C(t_w,t_w+t_0)$')
for tw,twdf in df.groupby('mc'):
if i%2 == 0:
axR.plot(twdf['parameter'], twdf['value'],
lw=1, color=get_color_tw(tw))
i+=1
axR.set_xscale('log')
axR.set_xlabel('$t_0$')
axR.set_ylabel('')
axR.set_ylim(0,1)
axL.set_ylim(0,1)
axR.set_yticklabels([])
# Create a fake colorbars
def fmt(x, pos):
return '%d' % x
sm = cm.ScalarMappable(cmap=colormap,norm=plt.Normalize(vmin=0,vmax=maxt))
sm.set_array([])
fig.savefig('corrfunction.pdf')
# For the beamer
fig = plt.figure(figsize=(cm2inch(10),cm2inch(5)))
axL = fig.add_subplot(121)
axR = fig.add_subplot(122)
i = 0
for t,tdf in df.groupby('parameter'):
if i%2 == 0:
axL.plot(tdf['mc'], tdf['value'],
lw=1, color=get_color_t(t))
i+=1
axL.set_xscale('log')
axL.set_xlabel('$t_w$')
axL.set_ylabel('$C(t_w,t_w+t_0)$')
for tw,twdf in df.groupby('mc'):
if i%2 == 0:
axR.plot(twdf['parameter'], twdf['value'],
lw=1, color=get_color_tw(tw))
i+=1
axR.set_xscale('log')
axR.set_xlabel('$t_0$')
axR.set_ylabel('')
axR.set_ylim(0,1)
axL.set_ylim(0,1)
axR.set_yticklabels([])
# Create a fake colorbar
def fmt(x, pos):
return '%d' % x
sm = cm.ScalarMappable(cmap=colormap,norm=plt.Normalize(vmin=0,vmax=maxt))
sm.set_array([])
cbaxes = fig.add_axes([0.3, 0.4, 0.15, 0.01])
cbar = plt.colorbar(sm,
format=mpl.ticker.FuncFormatter(fmt),
orientation='horizontal',
cax=cbaxes)
cbar.set_ticks([0,500,1000])
cbar.outline.set_visible(False)
cbar.ax.set_title('t₀')
cbar.ax.title.set_size(10)
cbar.ax.title.set_color('#555555')
sm = cm.ScalarMappable(cmap=colormap,norm=plt.Normalize(vmin=0,vmax=maxt))
sm.set_array([])
cbaxes = fig.add_axes([0.63, 0.4, 0.15, 0.01])
cbar = plt.colorbar(sm,
format=mpl.ticker.FuncFormatter(fmt),
orientation='horizontal',
cax=cbaxes)
cbar.set_ticks([0,500,1000])
cbar.outline.set_visible(False)
cbar.ax.set_title('$t_w$')
cbar.ax.title.set_size(10)
cbar.ax.title.set_color('#555555')
axL.grid(b=False)
axR.grid(b=False)
fig.savefig('corrfunction_beamer.pdf')
|
"""
Solve 1 layer shallow water equation [-2pi, 2pi]**3 with periodic bcs
(u,v)_t = -g*grad(h) - Cd*(u,v)*sqrt(u**2+v**2) (1)
h_t = -div( (H+h)*u) (2)
Discretize in time with 4th order Runge-Kutta
with both u(x, y, t=0) and h(x, y, t=0) given.
Using the Fourier basis for all two spatial directions.
mpirun -np 4 python swater_1L.py
"""
from sys import exit
from sympy import symbols, exp, lambdify
import numpy as np
import matplotlib.pyplot as plt
from mpi4py import MPI
from time import time
import h5py
from shenfun.fourier.bases import R2CBasis, C2CBasis
from shenfun import *
#from spectralDNS.utilities import Timer
from shenfun.utilities.h5py_writer import HDF5Writer
from shenfun.utilities.generate_xdmf import generate_xdmf
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
#timer = Timer()
# parameters
g=1.
H=1.
Cd=.001
# Use sympy to set up initial condition
x, y = symbols("x,y")
he = 1.0*exp(-(x**2 + y**2))
ue = 0.
ve = 0.
ul = lambdify((x, y), ue, 'numpy')
vl = lambdify((x, y), ve, 'numpy')
hl = lambdify((x, y), he, 'numpy')
# Size of discretization
N = (128, 128)
# Defocusing or focusing
gamma = 1
K0 = C2CBasis(N[0], domain=(-2*np.pi, 2*np.pi))
K1 = C2CBasis(N[1], domain=(-2*np.pi, 2*np.pi))
T = TensorProductSpace(comm, (K0, K1), slab=False, **{'planner_effort': 'FFTW_MEASURE'})
TTT = MixedTensorProductSpace([T, T, T])
#Kp0 = C2CBasis(N[0], domain=(-2*np.pi, 2*np.pi), padding_factor=1.5)
#Kp1 = C2CBasis(N[1], domain=(-2*np.pi, 2*np.pi), padding_factor=1.5)
#Tp = TensorProductSpace(comm, (Kp0, Kp1), slab=False, **{'planner_effort': 'FFTW_MEASURE'})
# Turn on padding by commenting out:
Tp = T
file0 = HDF5Writer("swater_1L{}.h5".format(N[0]), ['u', 'v', 'h'], TTT)
X = T.local_mesh(True) # physical grid
uvh = Array(TTT, False) # in physical space
u, v, h = uvh[:]
up = Array(Tp, False)
vp = Array(Tp, False)
duvh = Array(TTT)
du, dv, df = duvh[:]
uvh_hat = Array(TTT)
uvh_hat0 = Array(TTT)
uvh_hat1 = Array(TTT)
w0 = Array(T)
u_hat, v_hat, h_hat = uvh_hat[:]
# initialize
u[:] = ul(*X)
v[:] = vl(*X)
h[:] = hl(*X)
u_hat = T.forward(u, u_hat)
v_hat = T.forward(v, v_hat)
h_hat = T.forward(h, h_hat)
# trial, test functions
uh = TrialFunction(T)
uh_test = TestFunction(T)
vh = TrialFunction(T)
vh_test = TestFunction(T)
hh = TrialFunction(T)
hh_test = TestFunction(T)
# coefficients
A =inner(hh,hh_test)
#A = (2*np.pi)**2
Cu = g*inner(hh, Dx(uh_test, 0, 1))/A
Cv = g*inner(hh, Dx(vh_test, 1, 1))/A
Chu = H*inner(uh, Dx(hh_test, 0, 1))/A
Chv = H*inner(vh, Dx(hh_test, 1, 1))/A
count = 0
def compute_rhs(duvh_hat, uvh_hat, up, vp, T, Tp, w0):
global count
count += 1
duvh_hat.fill(0)
u_hat, v_hat, h_hat = uvh_hat[:]
du_hat, dv_hat, dh_hat = duvh_hat[:]
#
du_hat[:] = Cu*h_hat
up = Tp.backward(u_hat, up)
vp = Tp.backward(v_hat, vp)
du_hat += -Tp.forward(Cd*up*(up**2+vp**2), w0) # should be a sqrt here
#
dv_hat[:] = Cv*h_hat
dv_hat += -Tp.forward(Cd*vp*(up**2+vp**2), w0) # should be a sqrt here
#
dh_hat[:] = Chu*u_hat + Chv*v_hat # ignore the nonlinear term for now
if np.isnan(np.max(np.abs(dh_hat))):
print('!! blow up')
exit()
if np.linalg.norm(dh_hat)==0. or np.linalg.norm(du_hat)==0.:
print('norm(dhdt)=%.2e or norm(dudt)=%.2e'%(np.linalg.norm(dh_hat), np.linalg.norm(du_hat)))
return duvh_hat
# Integrate using a 4th order Rung-Kutta method
a = [1./6., 1./3., 1./3., 1./6.] # Runge-Kutta parameter
b = [0.5, 0.5, 1.] # Runge-Kutta parameter
t = 0.0
dt = .001
end_time = 1000.
tstep = 0
write_x_slice = N[0]//2
levels = np.linspace(-1., 1., 100)
#levels = 100
if rank == 0:
plt.figure()
image = plt.contourf(X[1][...], X[0][...], h[...], levels)
plt.draw()
plt.pause(1e-4)
t0 = time()
#
#K = np.array(T.local_wavenumbers(True, True))
#TV = VectorTensorProductSpace([T, T, T])
#gradu = Array(TV, False)
while t < end_time-1e-8:
t += dt
tstep += 1
print(tstep)
uvh_hat1[:] = uvh_hat0[:] = uvh_hat
for rk in range(4):
duvh = compute_rhs(duvh, uvh_hat, up, vp, T, Tp, w0)
if rk < 3:
uvh_hat[:] = uvh_hat0 + b[rk]*dt*duvh
uvh_hat1 += a[rk]*dt*duvh
uvh_hat[:] = uvh_hat1
#timer()
#if tstep % 10 == 0:
# uvh = TTT.backward(uvh_hat, uvh)
# file0.write_slice_tstep(tstep, [slice(None), 16], uvh)
# file0.write_slice_tstep(tstep, [slice(None), 12], uvh)
#if tstep % 10 == 0:
# uvh = TTT.backward(uvh_hat, uvh)
# file0.write_tstep(tstep, uvh)
if tstep % 100 == 0 and rank == 0:
uvh = TTT.backward(uvh_hat, uvh)
image.ax.clear()
image.ax.contourf(X[1][...], X[0][...], h[...], levels)
image.ax.set_title('tstep = %d'%(tstep))
plt.pause(1e-6)
plt.savefig('swater_1L_{}_real_{}.png'.format(N[0], tstep))
#if False and tstep % 100 == 0:
# uf = TT.backward(uf_hat, uf)
# ekin = 0.5*energy_fourier(f_hat, T)
# es = 0.5*energy_fourier(1j*K*u_hat, T)
# eg = gamma*np.sum(0.5*u**2 - 0.25*u**4)/np.prod(np.array(N))
# eg = comm.allreduce(eg)
# gradu = TV.backward(1j*K*u_hat, gradu)
# ep = comm.allreduce(np.sum(f*gradu)/np.prod(np.array(N)))
# ea = comm.allreduce(np.sum(np.array(X)*(0.5*f**2 + 0.5*gradu**2 - (0.5*u**2 - 0.25*u**4)*f))/np.prod(np.array(N)))
# if rank == 0:
# print("Time = %2.2f Total energy = %2.8e Linear momentum %2.8e Angular momentum %2.8e" %(t, ekin+es+eg, ep, ea))
# comm.barrier()
file0.close()
#timer.final(MPI, rank, True)
if rank == 0:
generate_xdmf("swater_1L{}.h5".format(N[0]))
|
def super_digit(num):
digit = 0
while num > 0:
digit += num % 10
num //= 10
if digit < 10:
return digit
else:
return super_digit(digit)
num = int("861568688536788" * 100000)
print(super_digit(num))
|
import turtle
import random
import sqlite3
##전역 변수 선언 부분(turtle)##
swidth, sheight, pSize, exitCount = 300, 300, 3, 0
r, g, b, angle, dist, curX, curY = [0] * 7
##전역 변수 선언 부분(DB)##
con, cur, row, col = None, None, None, None
data1, data2, data3, data4, data5, data6, data7 = 0, 0, 0, 0, 0, 0, 0
i = 0
sql = ""
count = 0
strdata1, strdata2, strdata3, strdata4, strdata5, strdata6, strdata7 = [], [], [], [], [], [], []
##메인 코드 부분##
con = sqlite3.connect("C:/sqlite/turtledb") ##DB생성
cur = con.cursor() ##커서생성
# cur.execute("CREATE TABLE ttable(선분 ID int,색상R float,색상G float,색상B int,순번 int,X 좌표 int,Y 좌표 int)")
turtle.title('거북이가 맘대로 다니기(DB)')
turtle.shape('turtle')
turtle.pensize(pSize)
turtle.setup(width=swidth + 30, height=sheight + 30)
turtle.screensize(swidth, sheight)
#실행전 데이터 삭제
cur.execute("delete from turtleTable")
while (True):
# row = cur.fetchone()
# col = cur.fetchall()
count += 1
data1 = count
data5 = count
r = random.random()
g = random.random()
b = random.random()
data2 = str(r)
data3 = str(g)
data4 = str(b)
turtle.pencolor((r, g, b))
angle = random.randrange(0, 360)
dist = random.randrange(1, 100)
turtle.left(angle) ##이동
turtle.forward(dist)
curX = int(turtle.xcor()) ##현재 거북이 위치 구함
curY = int(turtle.ycor())
data6 = str(curX)
data7 = str(curY)
sql = "INSERT INTO turtleTable VALUES(" + str(data1) + ", '" + data2 + "' , '" + data3 + "' , '" + data4 + "', " + str(data5) + " , '" + data6 + "', '" + data7 + "')"
cur.execute(sql)
print("%5s %5s %5s %5s %5s %5s %5s" % (data1, data2, data3, data4, data5, data6, data7))
strdata1.append("선분 ID")
strdata2.append("색상 R")
strdata3.append(data3)
strdata4.append(data4)
strdata5.append(data5)
strdata6.append("X 좌표")
strdata7.append("Y 좌표")
if (-swidth / 2 <= curX and curX <= swidth / 2) and (-sheight / 2 <= curY and curY <= sheight / 2):
pass
else:
turtle.penup()
turtle.goto(0, 0)
turtle.pendown()
exitCount += 1
if exitCount >= 5:
break
con.commit()
# con.close()
turtle.clear()
cur.execute("select * from turtleTable order by 선분id desc")
while (True):
row = cur.fetchone()
if row == None:
break
data1 = row[0]
data2 = row[1]
data3 = row[2]
data4 = row[3]
data6 = row[5]
data7 = row[6]
print("%5s %5s %5s %5s %5s %5s %5s" % (data1, data2, data3, data4, data5, data6, data7))
turtle.goto(int(data6), int(data7))
#turtle.goto(int(strdata6.pop()), int(strdata7.pop()))
r = data2
g = data3
b = data4
turtle.pencolor((float(data2), float(data3), float(data4)))
# data1 = data1.pop()
turtle.goto(20, 20)
turtle.done()
|
import inspect
# ViewSets define the view behavior.
from django.views.generic import TemplateView
from rest_framework import viewsets, mixins, generics, permissions, status
from rest_framework.decorators import api_view, detail_route
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.request import Request
from rest_framework.views import APIView
from core.models import User
from core.permissions import IsOwnerOrNothing, IsUserOrUserFriend, IsUser
from core.serializers import UserBasicSerializer, UserDetaliedSerializer
from django.http import HttpResponse
from twitter.models import Post
from twitter.serializers import PostSerializer
def vk_auth_view(request):
html = '<a href="http://127.0.0.1:8000/social/login/vk-oauth2/">Enter via VK</a>'
return HttpResponse(html)
class IndexView(TemplateView):
template_name = "core/index.html"
class UserViewSet(viewsets.ReadOnlyModelViewSet):
queryset = User.objects.all()
# this line doesn't do anything about permissions, but lack of it cause exception
permissions = [IsAuthenticated,]
# this line provide correct IsAuthenticated policy
permission_classes = (IsAuthenticated,)
def get_serializer_class(self):
if self.action == 'list':
return UserBasicSerializer
if self.action == 'retrieve':
return UserDetaliedSerializer
@detail_route(methods=['get'], permissions=[IsUserOrUserFriend])
def subscribed_on(self, request, pk=None):
"""
List of 'pk' user's subscriptions on other users
"""
user = self.get_object()
subscriptions = user.subscriptions
serializer = UserBasicSerializer(subscriptions, many=True)
return Response(serializer.data)
@detail_route(methods=['get'], permission_classes=[IsUserOrUserFriend])
def subscribers(self, request, pk=None):
"""
List of 'pk' user's subscribers
"""
user = self.get_object()
subscribers = user.subscribers
serializer = UserBasicSerializer(subscribers, many=True)
return Response(serializer.data)
@detail_route(methods=['post'], permissions=[IsUser])
def subscribe(self, request, pk=None):
"""
Subscribe user (in request.user or in token) to user with 'pk'
"""
user = self.get_object()
user_to_subscribe_on = User.objects.get(pk=request.data['subscribe_to'])
user_to_subscribe_on.add_subscriber(user)
user_to_subscribe_on.save()
return Response(status=status.HTTP_201_CREATED)
@detail_route(methods=['post'], permissions=[IsUser])
def unsubscribe(self, request, pk=None):
user = self.get_object()
user_to_unsubscribe_on = User.objects.get(pk=request.data['unsubscribe_to'])
user_to_unsubscribe_on.remove_subscriber(user)
user_to_unsubscribe_on.save()
return Response(status=status.HTTP_201_CREATED)
@detail_route(methods=['get'], permissions=[IsUserOrUserFriend ])
def posts(self, request, pk=None):
"""
List of all user's posts
"""
# user = User.objects.get(pk=pk)
serializer = PostSerializer(Post.objects.filter(author__id=pk), many=True)
return Response(serializer.data)
# user = self.get_object()
# serializer = UserSerializer(data=request.data)
# if serializer.is_valid():
# user.set_password(serializer.data['password'])
# user.save()
# return Response({'status': 'password set'})
# else:
# return Response(serializer.errors,
# status=status.HTTP_400_BAD_REQUEST)
# class ProfileDetail(generics.RetrieveUpdateDestroyAPIView):
# queryset = Profile.objects.all()
# serializer_class = ProfileSerializer
# permission_classes = (permissions.IsAuthenticated, IsOwnerOrNothing)
# class UserDetail(mixins.RetrieveModelMixin,
# mixins.UpdateModelMixin,
# mixins.DestroyModelMixin,
# generics.GenericAPIView):
# queryset = User.objects.all()
# serializer_class = UserSerializer
#
# def get(self, request, *args, **kwargs):
# return self.retrieve(request, *args, **kwargs)
#
# def put(self, request, *args, **kwargs):
# return self.update(request, *args, **kwargs)
#
# def delete(self, request, *args, **kwargs):
# return self.destroy(request, *args, **kwargs)
#
# @detail_route(methods=['get'])
# def subscribe(self, request, pk=None):
# return self.retrieve(request)
# class UserViewSet(APIView):
# # defines what user-objects will we send
# # queryset = User.objects.all()
# # serializer_class = UserSerializer
#
# def get(self, request, format=None):
# snippets = User.objects.all()
# serializer = UserSerializer(snippets, many=True)
# return Response(serializer.data)
# class SubscribeView(APIView):
#
# def get(self, request, format=None):
# users = User.objects.all()
# serializer = UserSerializer(users, many=True)
# return Response(serializer.data)
# @api_view(['POST, GET'])
# def subscribe(request, format=None):
# """
# List all code snippets, or create a new snippet.
# """
# if request.method == 'POST' or request.method == 'GET':
# user = User.objects.all()[0]
# serializer = UserSerializer(user)
# return Response(serializer.data)
# class UserList(mixins.ListModelMixin,
# generics.GenericAPIView):
# queryset = User.objects.all()
# serializer_class = UserSerializer
#
# def get(self, request, *args, **kwargs):
# return self.list(request, *args, **kwargs)
|
from Node import Node
def constructPaths(graph):
"""
Args --> graph, a list of Node objects
Returns --> A list of lists. Each list consists of a list of indices of the nodes to be followed along the path
"""
paths = [ [] for x in xrange(len(graph)) ] # Initialise our list
for i in xrange(len(graph)): # Iterate over all nodes
index = i # Will be used to repeatedly get the predecessor
# Setting up the initial values
paths[i].append(i)
while True:
indexOfPred = graph[index].getPredecessor() # Getting the index of the predecessor of this node
if indexOfPred == -1: # If it is the source vertex, break. (Will break if the current Node doesn't have a predecessor as well)
break
else:
paths[i].append(indexOfPred) # Add the index of the predecessor to our path
index = indexOfPred # Set index to be the index of the predecessor to repeatedly get predecessors
return paths
def bfs(graph):
source = graph[0]
nodesOnCurrentLevel = [0]
levels = {0:0}
currentLevel = 1
while nodesOnCurrentLevel:
newNodes = []
for j in xrange(len(nodesOnCurrentLevel)):
currentNode = graph[nodesOnCurrentLevel[j]]
neighbors = currentNode.getNeighbors()[0]
for k in xrange(len(neighbors)):
if neighbors[k] not in levels:
levels[neighbors[k]] = currentLevel
graph[neighbors[k]].setPredecessor(j)
print j
newNodes.append(k)
nodesOnCurrentLevel = newNodes
return levels
def main():
graph = []
nNodes = int(raw_input("Enter the number of nodes: "))
print "First key you enter will be the source vertex"
for i in xrange(nNodes):
keyValue = int(raw_input("Enter the nodes' key: "))
graph.append(Node(keyValue))
graph[len(graph) - 1].indexInGraph = i
if len(graph) == 1:
graph[0].setDistFromSrcVertex(0)
print
print "Please enter the edges now. When you're done, just type 'done' "
print "Enter them as 3 spaced numbers. '4 5 1' indicates there is a path from Node #4 to Node #5 with an edge weight of 1 \n(no path from Node #5 to Node #4) based on the order you entered them"
print
while True:
inp = raw_input()
if inp != 'done':
indices = [int(x) for x in inp.split()]
graph[indices[0] - 1].addNeighbor((indices[1] - 1), indices[2])
else:
break
bfs(graph)
print constructPaths(graph)
if __name__ == '__main__':
main()
|
"""
Given an array nums of integers, return how many of them contain an even number of digits.
Example 1:
Input: nums = [12,345,2,6,7896]
Output: 2
Explanation:
12 contains 2 digits (even number of digits).
345 contains 3 digits (odd number of digits).
2 contains 1 digit (odd number of digits).
6 contains 1 digit (odd number of digits).
7896 contains 4 digits (even number of digits).
Therefore only 12 and 7896 contain an even number of digits.
https://ru.wikipedia.org/wiki/%D0%94%D0%B5%D1%81%D1%8F%D1%82%D0%B8%D1%87%D0%BD%D1%8B%D0%B9_%D0%BB%D0%BE%D0%B3%D0%B0%D1%80%D0%B8%D1%84%D0%BC
log10(10) = 1 # The result is 1 since 10^1=10.
log10(100) = 2 # The result is 2 since 10^2=100.
"""
import math
def nums_with_even_num_of_digits_v1(nums_array):
count = 0
for num in nums_array:
if (math.floor(math.log(num, 10)) + 1) % 2 == 0:
count += 1
return count
assert nums_with_even_num_of_digits_v1([1, 2, 3, 4, 5, 10, 11]) == 2
def nums_with_even_num_of_digits_v2(nums_array):
return sum([(math.floor(math.log(num, 10)) + 1) % 2 == 0 for num in nums_array])
assert nums_with_even_num_of_digits_v2([1, 2, 3, 4, 5, 10, 11]) == 2
|
from django.conf.urls.defaults import *
from siteapps_v1.ntgreekvocab.models import SimpleCard
urlpatterns = patterns('',
# renders template called <modelname>_list.html
# (r'^$', 'django.views.generic.list_detail.object_list', info_dict),
url(r'^card/all/$',
'django.views.generic.list_detail.object_list',
{'queryset':SimpleCard.objects.all().order_by('greek_word')},
name='cards-all'
),
# renders template called <modelname>_detail.html
#(r'^entry_g/(?P<object_id>\d+)/$', 'django.views.generic.list_detail.object_detail', info_dict),
url(r'^$',
'siteapps_v1.ntgreekvocab.views.home',
name='home'
),
url(r'^card/random/(?P<card_id>\d+)/$',
'siteapps_v1.ntgreekvocab.views.card_random_view',
name='card-random-view'
),
url(r'^card/(?P<card_id>\d+)/$',
'siteapps_v1.ntgreekvocab.views.card_view',
name='card-view'
),
url(r'^card/add/$',
'siteapps_v1.ntgreekvocab.views.card_add',
name='card-add'
),
url(r'^card/edit/(?P<card_id>\d+)/$',
'siteapps_v1.ntgreekvocab.views.card_edit',
name='card-edit'
),
url(r'^card/list/$',
'siteapps_v1.ntgreekvocab.views.card_list',
name='cards-list'
),
url(r'^lesson/(?P<lesson_num>(NA|[0-9]{1,2}[ABab]{0,1}))/$',
'siteapps_v1.ntgreekvocab.views.cards_by_lesson',
name='lesson'
),
url(r'^card/lookup/$',
'siteapps_v1.ntgreekvocab.views.card_lookup',
name='card-lookup'
),
# ajax urls
url(r'^card_lookup/$',
'siteapps_v1.ntgreekvocab.views.ajax_card_autocomplete',
name='ajax-card-autocomplete'
),
url(r'^card/fetch/(?P<card_id>\d+)/$',
'siteapps_v1.ntgreekvocab.views.ajax_card_fetch',
name='ajax-card-fetch'
),
url(r'^clearln/$',
'siteapps_v1.ntgreekvocab.views.ajax_clear_lesson_filters',
name='ajax-clear-lesson-filters'
),
)
|
import os
import time
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
from selenium import webdriver
from selenium.webdriver.common.by import By
from discord_webhook import DiscordWebhook, DiscordEmbed
import schedule
from dotenv import load_dotenv
load_dotenv()
EMAIL = os.getenv('EMAIL')
PASSWORD = os.getenv('PASSWORD')
DISCORD_WEBHOOK_URL = os.getenv('DISCORD_WEBHOOK_URL')
BUYER_REQUESTS_URL = os.getenv('BUYER_REQUESTS_URL')
def driver_setup():
s = Service(ChromeDriverManager().install())
chromeOptions = webdriver.ChromeOptions()
chromeOptions.add_argument('--disable-blink-features=AutomationControlled')
chromeOptions.add_argument('--disable-dev-shm-usage')
chromeOptions.add_argument('--no-sandbox')
chromeOptions.add_argument('--ignore-certificate-errors')
driver = webdriver.Chrome(service=s, options=chromeOptions)
driver.execute_script("return navigator.userAgent")
return driver
def check_buyer_requests(driver):
driver.get(BUYER_REQUESTS_URL)
driver.find_element(By.ID, 'login').send_keys(EMAIL)
driver.find_element(By.ID, 'password').send_keys(PASSWORD)
driver.implicitly_wait(10)
driver.find_element(
By.XPATH, "/html/body/div[3]/div[3]/div/section/div/div/div/div/form/div/button").click()
driver.implicitly_wait(10)
no_of_requests = driver.find_element(
By.XPATH, "/html/body/div[3]/div[3]/section/div/article/div[1]/ul/li[1]/a").get_attribute("data-count-extended")
driver.quit()
return no_of_requests
def send_notification(no_of_requests, webhook_url):
webhook = DiscordWebhook(
url=webhook_url)
webhook.add_embed(
DiscordEmbed(
title=f"{no_of_requests} Fiverr Buyer Requests Available",
description=f"\nGo check it out fast.\n{BUYER_REQUESTS_URL}",
color=0x00ff00
)
)
webhook.execute()
def main():
driver = driver_setup()
no_of_requests = check_buyer_requests(driver)
print(no_of_requests)
if no_of_requests != '0':
send_notification(
no_of_requests, DISCORD_WEBHOOK_URL)
if __name__ == "__main__":
schedule.every(5).minutes.do(main)
while 1:
schedule.run_pending()
time.sleep(1)
|
import pyclesperanto_prototype as cle
import numpy as np
#initialise as float 32 arrays with zeros or array_equal method won't work
#cle.pull returns float 32, np.zeros is float 64
source = np.zeros((10, 10, 10),dtype=np.float32)
source[1, 1, 1] = 1
reference = np.zeros((5, 19, 10),dtype=np.float32)
#will this change with device used?
reference[3, 2, 1] = 0.16987294
def test_deskew_y():
result = cle.deskew_y(source, angle_in_degrees=30)
a = cle.pull(result)
b = cle.pull(reference)
print(a)
print(b)
print(a.shape)
print(b.shape)
assert (np.array_equal(a, b))
def test_deskew_with_passing_destination():
result = cle.deskew_y(source, angle_in_degrees=30)
result2 = cle.create(result.shape)
cle.deskew_y(source, result2, angle_in_degrees=30)
print(result)
print(result2)
assert cle.array_equal(result, result2)
|
# Copyright (c) 2005-2011, NumPy Developers.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the NumPy Developers nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import (
TYPE_CHECKING,
Any,
List,
Literal,
Sequence,
Tuple,
Type,
TypeVar,
Union,
)
import numpy as np
from packaging.version import Version
# Type annotations stubs
try:
from numpy.typing import ArrayLike, DTypeLike
from numpy.typing._dtype_like import _DTypeLikeNested, _ShapeLike, _SupportsDType
# Xarray requires a Mapping[Hashable, dtype] in many places which
# conflics with numpys own DTypeLike (with dtypes for fields).
# https://numpy.org/devdocs/reference/typing.html#numpy.typing.DTypeLike
# This is a copy of this DTypeLike that allows only non-Mapping dtypes.
DTypeLikeSave = Union[
np.dtype,
# default data type (float64)
None,
# array-scalar types and generic types
Type[Any],
# character codes, type strings or comma-separated fields, e.g., 'float64'
str,
# (flexible_dtype, itemsize)
Tuple[_DTypeLikeNested, int],
# (fixed_dtype, shape)
Tuple[_DTypeLikeNested, _ShapeLike],
# (base_dtype, new_dtype)
Tuple[_DTypeLikeNested, _DTypeLikeNested],
# because numpy does the same?
List[Any],
# anything with a dtype attribute
_SupportsDType[np.dtype],
]
except ImportError:
# fall back for numpy < 1.20, ArrayLike adapted from numpy.typing._array_like
from typing import Protocol
if TYPE_CHECKING:
class _SupportsArray(Protocol):
def __array__(self) -> np.ndarray:
...
class _SupportsDTypeFallback(Protocol):
@property
def dtype(self) -> np.dtype:
...
else:
_SupportsArray = Any
_SupportsDTypeFallback = Any
_T = TypeVar("_T")
_NestedSequence = Union[
_T,
Sequence[_T],
Sequence[Sequence[_T]],
Sequence[Sequence[Sequence[_T]]],
Sequence[Sequence[Sequence[Sequence[_T]]]],
]
_RecursiveSequence = Sequence[Sequence[Sequence[Sequence[Sequence[Any]]]]]
_ArrayLike = Union[
_NestedSequence[_SupportsArray],
_NestedSequence[_T],
]
_ArrayLikeFallback = Union[
_ArrayLike[Union[bool, int, float, complex, str, bytes]],
_RecursiveSequence,
]
# The extra step defining _ArrayLikeFallback and using ArrayLike as a type
# alias for it works around an issue with mypy.
# The `# type: ignore` below silences the warning of having multiple types
# with the same name (ArrayLike and DTypeLike from the try block)
ArrayLike = _ArrayLikeFallback # type: ignore
# fall back for numpy < 1.20
DTypeLikeSave = Union[ # type: ignore[misc]
np.dtype,
str,
None,
Type[Any],
Tuple[Any, Any],
List[Any],
_SupportsDTypeFallback,
]
DTypeLike = DTypeLikeSave # type: ignore[misc]
if Version(np.__version__) >= Version("1.20.0"):
sliding_window_view = np.lib.stride_tricks.sliding_window_view
else:
from numpy.core.numeric import normalize_axis_tuple # type: ignore[attr-defined]
from numpy.lib.stride_tricks import as_strided
# copied from numpy.lib.stride_tricks
def sliding_window_view(
x, window_shape, axis=None, *, subok=False, writeable=False
):
"""
Create a sliding window view into the array with the given window shape.
Also known as rolling or moving window, the window slides across all
dimensions of the array and extracts subsets of the array at all window
positions.
.. versionadded:: 1.20.0
Parameters
----------
x : array_like
Array to create the sliding window view from.
window_shape : int or tuple of int
Size of window over each axis that takes part in the sliding window.
If `axis` is not present, must have same length as the number of input
array dimensions. Single integers `i` are treated as if they were the
tuple `(i,)`.
axis : int or tuple of int, optional
Axis or axes along which the sliding window is applied.
By default, the sliding window is applied to all axes and
`window_shape[i]` will refer to axis `i` of `x`.
If `axis` is given as a `tuple of int`, `window_shape[i]` will refer to
the axis `axis[i]` of `x`.
Single integers `i` are treated as if they were the tuple `(i,)`.
subok : bool, optional
If True, sub-classes will be passed-through, otherwise the returned
array will be forced to be a base-class array (default).
writeable : bool, optional
When true, allow writing to the returned view. The default is false,
as this should be used with caution: the returned view contains the
same memory location multiple times, so writing to one location will
cause others to change.
Returns
-------
view : ndarray
Sliding window view of the array. The sliding window dimensions are
inserted at the end, and the original dimensions are trimmed as
required by the size of the sliding window.
That is, ``view.shape = x_shape_trimmed + window_shape``, where
``x_shape_trimmed`` is ``x.shape`` with every entry reduced by one less
than the corresponding window size.
"""
window_shape = (
tuple(window_shape) if np.iterable(window_shape) else (window_shape,)
)
# first convert input to array, possibly keeping subclass
x = np.array(x, copy=False, subok=subok)
window_shape_array = np.array(window_shape)
if np.any(window_shape_array < 0):
raise ValueError("`window_shape` cannot contain negative values")
if axis is None:
axis = tuple(range(x.ndim))
if len(window_shape) != len(axis):
raise ValueError(
f"Since axis is `None`, must provide "
f"window_shape for all dimensions of `x`; "
f"got {len(window_shape)} window_shape elements "
f"and `x.ndim` is {x.ndim}."
)
else:
axis = normalize_axis_tuple(axis, x.ndim, allow_duplicate=True)
if len(window_shape) != len(axis):
raise ValueError(
f"Must provide matching length window_shape and "
f"axis; got {len(window_shape)} window_shape "
f"elements and {len(axis)} axes elements."
)
out_strides = x.strides + tuple(x.strides[ax] for ax in axis)
# note: same axis can be windowed repeatedly
x_shape_trimmed = list(x.shape)
for ax, dim in zip(axis, window_shape):
if x_shape_trimmed[ax] < dim:
raise ValueError("window shape cannot be larger than input array shape")
x_shape_trimmed[ax] -= dim - 1
out_shape = tuple(x_shape_trimmed) + window_shape
return as_strided(
x, strides=out_strides, shape=out_shape, subok=subok, writeable=writeable
)
if Version(np.__version__) >= Version("1.22.0"):
QUANTILE_METHODS = Literal[
"inverted_cdf",
"averaged_inverted_cdf",
"closest_observation",
"interpolated_inverted_cdf",
"hazen",
"weibull",
"linear",
"median_unbiased",
"normal_unbiased",
"lower",
"higher",
"midpoint",
"nearest",
]
else:
QUANTILE_METHODS = Literal[ # type: ignore[misc]
"linear",
"lower",
"higher",
"midpoint",
"nearest",
]
|
import os
import threading
import time
def sing(*args):
print('a'*80)
print('-' * 30, '参数args:{}'.format(args))
for i in range(5):
print('i am sing', i)
time.sleep(1)
def dance(args):
print('-' * 30, '参数args:%s' % args)
for i in range(5):
print('iam dancing', i)
time.sleep(1)
def main():
name = '王力宏'
age = 10
# Thread(target,name,args) 中的name是给线程起名的,不是参数;args才是参数
t_sing = threading.Thread(target=sing, name='唱歌', args=(name, age))
t_dance = threading.Thread(target=dance, name='跳舞', args=(name,))
t_sing.start()
t_dance.start()
t_sing.join()
t_dance.join()
print('主线程,子线程全部结束')
if __name__ == '__main__':
main()
|
'''
@Date : 01/11/2020
@Author: Zhihan Zhang
@mail : zhangzhihan@pku.edu.cn
@homepage: ytyz1307zzh.github.io
Prepare input instances for retrieve_para.py
'''
import argparse
import json
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument('-input', type=str, default='../ConceptNet/rough_retrieval.json',
help='data file that contain data instances for all datasets, e.g.,'
'ConceptNet retrieval file')
parser.add_argument('-output', type=str, default='./wiki_query.json')
opt = parser.parse_args()
data = json.load(open(opt.input, 'r', encoding='utf8'))
result = []
ids = set()
for instance in tqdm(data):
para_id = instance['id']
entity = instance['entity']
paragraph = instance['paragraph']
topic = instance['topic']
prompt = instance['prompt']
if para_id in ids:
continue
ids.add(para_id)
result.append({'id': para_id,
'entity': entity,
'topic': topic,
'prompt': prompt,
'paragraph': paragraph,
})
json.dump(result, open(opt.output, 'w', encoding='utf8'), indent=4, ensure_ascii=False)
print('Number of saved data: ', len(result))
|
GPU_ID = 1
BATCH_SIZE = 64
VAL_BATCH_SIZE = 104
NUM_OUTPUT_UNITS = 397 # This is the answer vocabulary size
MAX_WORDS_IN_EXP = 36
MAX_ITERATIONS = 40000
PRINT_INTERVAL = 100
# what data to use for training
TRAIN_DATA_SPLITS = 'train'
# what data to use for the vocabulary
ANSWER_VOCAB_SPACE = 'train'
EXP_VOCAB_SPACE = 'train'
# location of the data
ACTIVITY_PREFIX = './ACT-X'
DATA_PATHS = {
'train': {
'ans_file': ACTIVITY_PREFIX + '/textual/exp_train_split.json',
'features_prefix': ACTIVITY_PREFIX + '/Features/resnet_res5c_bgrms_large/'
},
'val': {
'ans_file': ACTIVITY_PREFIX + '/textual/exp_val_split.json',
'features_prefix': ACTIVITY_PREFIX + '/Features/resnet_res5c_bgrms_large/'
}
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AntMerchantExpandActivitySignupCreateModel(object):
def __init__(self):
self._activity_code = None
self._ext_info = None
@property
def activity_code(self):
return self._activity_code
@activity_code.setter
def activity_code(self, value):
self._activity_code = value
@property
def ext_info(self):
return self._ext_info
@ext_info.setter
def ext_info(self, value):
self._ext_info = value
def to_alipay_dict(self):
params = dict()
if self.activity_code:
if hasattr(self.activity_code, 'to_alipay_dict'):
params['activity_code'] = self.activity_code.to_alipay_dict()
else:
params['activity_code'] = self.activity_code
if self.ext_info:
if hasattr(self.ext_info, 'to_alipay_dict'):
params['ext_info'] = self.ext_info.to_alipay_dict()
else:
params['ext_info'] = self.ext_info
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AntMerchantExpandActivitySignupCreateModel()
if 'activity_code' in d:
o.activity_code = d['activity_code']
if 'ext_info' in d:
o.ext_info = d['ext_info']
return o
|
# -*- encoding: utf-8 -*-
'''
@Time : 2022/04/06 23:52:52
@Author : Yu Runshen
@License : (C)Copyright 2017-2018, Liugroup-NLPR-CASIA
'''
# here put the import lib
import sys, os
class Solution:
def climbStairs(self, n):
if n == 1:
return 1
elif n == 2:
return 2
dp = [0] * (n+1)
dp[1] = 1
dp[2] = 2
for i in range(3, n+1):
dp[i] = dp[i-1] + dp[i-2]
return dp[n]
def climbStairs_more(self, n, m):
'''
爬楼梯进阶版, 如果爬一次可以爬 1,2,3,... m 阶台阶, 请问一共有多少种爬的方式
其实是一个完全背包问题
'''
dp = [0] * (n+1)
dp[0] = 1
# 遍历背包
for i in range(n+1):
# 遍历物品
for step in range(1, m+1):
if i >= step:
dp[i] += dp[i-step]
return dp[n]
s = Solution()
print(s.climbStairs(4))
print(s.climbStairs_more(4,2)) |
#!/usr/bin/env python
"""
Summarizes resulting reactivities.out of spats based on expected results. Part of Read_Mapping tests.
Usage: python check_reactivities.py --input <> --output <> --linker <> --sequence <>
Options:
--input reactivities.out file from spats
--output File to output result summary
--linker Linker sequence
--sequence Sequence of RNA target
Version: 0.1
Date: March 29, 2016
Author: Angela M Yu
Copyright (c) 2016 Lucks Laboratory - all rights reserved.
"""
import getopt
import sys
from itertools import repeat
def getopts(short_arg_string, long_arg_list):
"""
Returns a dictionary of command line arguments as defined by short_arg_string and long_arg_list
"""
try:
opts, args = getopt.getopt(sys.argv[1:],short_arg_string,long_arg_list)
except getopt.GetoptError as err:
print str(err)
sys.exit(2)
return dict(opts)
opts = getopts("", ["input=", "output=", "linker=", "sequence="])
spats_reactivities_out = opts["--input"]
output = opts["--output"]
linker_len = len(opts["--linker"])
sequence_len = len(opts["--sequence"])
# Read in reactivities.out
reads = []
with open(spats_reactivities_out, "r") as f:
header = f.readline()
for line in f:
fields = line.split("\t")
reads += [int(fields[4]), int(fields[5])] # treated_reads is [4], untreated_reads is [5]
# Build expected reads
case_exp = zip(range(1,sequence_len+2), range(1, sequence_len+2)[::-1]) #creates pairs [(1,N), (2,N-1), ..., (N+1,1)]
case_exp = [ele for i in case_exp for ele in i] + [0]*(2*linker_len-4) #add 0s where linker does not map
# Calculate summary
correct = sum([1 if a[0] == a[1] else 0 for a in zip(case_exp, reads)])
incorrect = len(reads) - correct
expected_read_lines = 2 * (sequence_len + linker_len - 1)
if correct == len(reads) and len(reads) == expected_read_lines:
result = "OK - %s read positions out of %s expected, %s correct, %s incorrect\n"%(len(reads), expected_read_lines, correct, incorrect)
else:
result = "FAILED - %s read positions out of %s expected, %s correct, %s incorrect\n"%(len(reads), expected_read_lines, correct, incorrect)
with open(output, "a") as f:
f.write(result)
|
import numpy as np
from math import sqrt
from time import time
# Number of points to us in the approximation
N = 10000000
start = time()
# Create a matrix of N paris of numbers
points = np.random.rand( N, 2 )
# Calculate the norm of each row
norms = np.linalg.norm(points, axis = 1)
# Determine if each point is in the circle
inside_circle = norms <= 1
# Count the number in the circle
number_in_circle = float( np.sum(inside_circle) )
end = time()
# Print the estimate of pi and the run time
print( 4 * number_in_circle / float(N) )
print('Time = ', end - start)
# Set number in circle to zero
number_in_circle = 0
start = time()
# begin loop
for i in range(N):
# Calculate random values for our xy-pair
x = np.random.rand()
y = np.random.rand()
# Calculate norm of resulting vector
norm = sqrt( x**2 + y**2 )
# Is this point inside the circle
inside_circle = norm <= 1
# If yes, increment number in circle by one.
if inside_circle == True:
number_in_circle += 1
end = time()
# Print the estimate of pi and the run time
print( 4 * number_in_circle / float(N) )
print('Time = ', end - start)
|
from __future__ import division
import re
import datetime
import json
import glob
import os
import multiprocessing
import argparse
import hashlib
import random
from bs4 import BeautifulSoup
from pyparsing import Optional, Group, Word, nums, Literal
from tqdm import tqdm
import numpy as np
from toolbox.strings import pad_left
def autoconvert(s):
try:
return int(s)
except ValueError:
try:
return float(s)
except ValueError:
return s
class ExtendedEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.timedelta):
# NOTE: uses default string representation, e.g. "00:30:00"
return str(obj)
return json.JSONEncoder.default(self, obj)
# Integer numbers
number = Word(nums)
# Optional hour part
hour_symbol = Literal('h')
hour = Group(number + hour_symbol)('hour').setParseAction(lambda s,l,t: int(t[0][0][0]))
# Optional minute part
minute_symbol = Literal('m')
minute = Group(number + minute_symbol)('minute').setParseAction(lambda s,l,t: int(t[0][0]))
# Put the hours and minutes together
duration = Optional(hour) + Optional(minute)
# Convert resulting dictionaries into datetime.timedelta
def timedict_to_timedelta(d):
hours = d['hour'] if 'hour' in d else 0
minutes = d['minute'] if 'minute' in d else 0
return datetime.timedelta(hours=hours, minutes=minutes)
# Put everything together: parse string and output a timedelta
duration_parser = lambda s: timedict_to_timedelta(duration.parseString(s).asDict())
def dict_to_string(d):
return ''.join([str(value) for value in d.itervalues()])
def hash_dict(d):
return hashlib.md5(dict_to_string(d)).hexdigest()
def parse_ingredients(li):
ingredients = []
for el in li:
label = el.find('label', attrs={'ng-class': "{true: 'checkList__item'}[true]"})
if label:
span = label.find('span')
ingredient = dict()
ingredient['id'] = int(span['data-id'])
ingredient['name'] = span.text
ingredients.append(ingredient)
return ingredients
def parse_file(filename):
fp = open(filename)
soup = BeautifulSoup(fp, 'html5lib')
# Get ID from filename
basename = os.path.basename(filename)
id_ = os.path.splitext(basename)[0]
recipe = {'id': int(id_)}
name = soup.find('h1', itemprop='name').text
recipe['name'] = name
ingredients = []
li = soup.find('ul', id="lst_ingredients_1")('li')
ingredients = parse_ingredients(li)
li = soup.find('ul', id='lst_ingredients_2')('li')
ingredients2 = parse_ingredients(li)
ingredients.extend(ingredients2)
recipe['ingredients'] = ingredients
yield_ = soup.find('meta', itemprop='recipeYield')
recipe['yields'] = int(yield_['content']) if yield_ is not None else None
cal = soup.find('span', class_='calorie-count')
recipe['calories'] = int(cal.find('span').text) if cal is not None else None
nut = soup.find('h3', text='Nutrition')
nutrients = dict()
if nut:
for ul in nut.find_next_siblings(class_='nutrientLine'):
try:
nutrient_type = ul.find('li').text.rstrip(': ')
amount = ul.find('li', class_='nutrientLine__item--amount')
nutrients[nutrient_type] = autoconvert(amount.find('span').text)
except:
continue
recipe['nutrients'] = nutrients if nutrients else None
prep = soup.find('span', class_='ready-in-time')
recipe['preparation_time'] = duration_parser(prep.text) if prep else None
prep_root = soup.find('ul', class_='prepTime')
if prep_root: # Has time information
preptime = prep_root.find('time', itemprop='prepTime')
recipe['preparation_time'] = duration_parser(preptime.text).seconds if preptime else None
cooktime = prep_root.find('time', itemprop='cookTime')
recipe['cooking_time'] = duration_parser(cooktime.text).seconds if cooktime else None
totaltime = prep_root.find('time', itemprop='totalTime')
recipe['total_time'] = duration_parser(totaltime.text).seconds if totaltime else None
start_number = re.compile(r'(\d+).*?')
try:
rating_stars = soup.find('section', id='reviews').find('ol').find_all('li')
assert len(rating_stars) == 6, "Expected 5 degrees of ratings and a total count, for a total of 6, but got %d" % len(rating_stars)
recipe['rating_count'] = int(rating_stars.pop(0).text.rstrip(' Ratings'))
ratings = dict()
for idx, degree in enumerate(rating_stars):
stars_title = degree.div['title']
number_of_stars = str(5 - idx)
ratings[number_of_stars + ' stars'] = int(start_number.match(stars_title).group(1))
recipe['ratings'] = ratings
except Exception:
pass
directory = random_dir(args.output)
np.savez_compressed(os.path.join(directory, str(recipe['id'])), recipe)
def split_list(l, n):
"""
Split list into n smaller lists, not preserving ordering.
"""
res = [[] for i in xrange(n)]
x = 0
while x < len(l):
for sublist_idx in xrange(n):
if x == len(l):
break
res[sublist_idx].append(l[x])
x = x + 1
return res
def wrapper(filename):
try:
parse_file(filename)
except Exception as err:
print('Error parsing {0}: {1}'.format(filename, err))
def random_dir(root):
dirsize = 469
strlen = len(str(dirsize))
rand1 = random.randint(0, dirsize) # Using sqrt(number_files)
dir1 = pad_left(str(rand1), '0', strlen)
rand2 = random.randint(0, dirsize)
dir2 = pad_left(str(rand2), '0', strlen)
level1 = os.path.join(os.path.abspath(root), dir1)
level2 = os.path.join(level1, dir2)
if not os.path.exists(level1):
os.makedirs(level1)
if not os.path.exists(level2):
os.makedirs(level2)
return level2
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process recipe files')
parser.add_argument('input', type=str, help='input directory')
parser.add_argument('output', type=str, help='output file')
parser.add_argument('--pool-size', '-p', type=int, help='pool size (= number of workers)')
parser.add_argument('--chunk-size', '-c', type=int, default=1, help='chunk size (= worker batch size)')
args = parser.parse_args()
recipes = []
p = multiprocessing.Pool(args.pool_size)
# Parse all HTML files in target folder
filenames = glob.glob(args.input + '/*.html')
print("Got %d files." % len(filenames))
for result in tqdm(p.imap_unordered(wrapper, filenames, chunksize=args.chunk_size), total=len(filenames)):
pass
# p.map_async(parse_file, filenames)#, chunksize=args.chunk_size)
|
from threading import Thread
from lxml import etree
from copy import copy
import logging
import commands
from anansi.xml import XMLMessage,XMLError,gen_element
from anansi.comms import TCPClient
from anansi.config import config
from anansi import log
logger = logging.getLogger('anansi.mpsr')
OBS_TYPES = ['TRACKING', 'TRANSITING', 'STATIONARY']
CONFIGS = ['TB','CORR','INDIV','FB']
MPSR_SUCCESS = 0
LOAD_CONFIG = "/home/dada/scripts/load_config.csh"
class InvalidConfiguration(Exception):
def __init__(self,msg):
super(InvalidConfiguration,self).__init__(msg)
class MPSRError(Exception):
def __init__(self,response):
msg = "MPSR returned failure state with message: %s"%response
super(MPSRError,self).__init__(msg)
class MPSRMessage(XMLMessage):
def __init__(self):
super(MPSRMessage,self).__init__(gen_element('mpsr_tmc_message'))
def __str__(self):
return super(MPSRMessage,self).__str__().replace("\n","")+"\r\n"
def pprint(self):
return super(MPSRMessage,self).__str__()
def query(self):
self.root.append(gen_element("command",text="query"))
return str(self)
def stop(self):
self.root.append(gen_element("command",text="stop"))
return str(self)
def start(self):
self.root.append(gen_element("command",text="start"))
return str(self)
def prepare(self, mpsr_config):
self.root.append(gen_element("command",text="prepare"))
self.root.append(self._source_parameters(mpsr_config))
self.root.append(self._signal_parameters(mpsr_config))
self.root.append(self._pfb_parameters(mpsr_config))
self.root.append(self._observation_parameters(mpsr_config))
return str(self)
def _source_parameters(self,mpsr_config):
elem = gen_element('source_parameters')
_ = mpsr_config
elem.append(gen_element('name',text=_['source_name'],attributes={'epoch':_['epoch']}))
elem.append(gen_element('ra',text=_['ra'],attributes={'units':_['ra_units']}))
elem.append(gen_element('dec',text=_['dec'],attributes={'units':_['dec_units']}))
elem.append(gen_element('ns_tilt',text=_['ns_tilt'],attributes={'units':_['ns_tilt_units']}))
elem.append(gen_element('md_angle',text=_['md_angle'],attributes={'units':_['md_angle_units']}))
return elem
def _signal_parameters(self,mpsr_config):
_ = mpsr_config
elem = gen_element('signal_parameters')
elem.append(gen_element('nchan',text=_['nchan']))
elem.append(gen_element('nbit',text=_['nbit']))
elem.append(gen_element('ndim',text=_['ndim']))
elem.append(gen_element('npol',text=_['npol']))
elem.append(gen_element('nant',text=_['nant']))
elem.append(gen_element('bandwidth',text=_['bw'],attributes={'units':_['bw_units']}))
elem.append(gen_element('centre_frequency',text=_['cfreq'],attributes={'units':_['cfreq_units']}))
return elem
def _pfb_parameters(self,mpsr_config):
_ = mpsr_config
elem = gen_element('pfb_parameters')
elem.append(gen_element('oversampling_ratio',text=_['oversampling_ratio']))
elem.append(gen_element('sampling_time',text=_['tsamp'],attributes={'units':_['tsamp_units']}))
elem.append(gen_element('channel_bandwidth',text=_['foff'],attributes={'units':_['foff_units']}))
elem.append(gen_element('dual_sideband',text=_['dual_sideband']))
elem.append(gen_element('resolution',text=_['resolution']))
return elem
def _observation_parameters(self,mpsr_config):
_ = mpsr_config
elem = gen_element('observation_parameters')
elem.append(gen_element('observer',text=_['observer']))
elem.append(gen_element('aq_processing_file',text=_['aq_proc_file']))
elem.append(gen_element('bf_processing_file',text=_['bf_proc_file']))
elem.append(gen_element('bp_processing_file',text=_['bp_proc_file']))
elem.append(gen_element('mode',text=_['mode']))
elem.append(gen_element('project_id',text=_['project_id']))
elem.append(gen_element('tobs',text=_['tobs']))
elem.append(gen_element('type',text=_['type']))
elem.append(gen_element('config',text=_['config']))
return elem
class MPSRDefaultResponse(XMLMessage):
def __init__(self,msg):
try:
super(MPSRDefaultResponse,self).__init__(etree.fromstring(msg))
except:
logger.error("Unknown MPSR message: %s"%msg)
raise XMLError(msg)
self._parse()
def _parse(self):
self.passed = self.root.find('reply').text == "ok"
self.response = self.root.find('response').text
class MPSRQueryResponse(MPSRDefaultResponse):
def __init__(self,msg):
super(MPSRQueryResponse,self).__init__(msg)
def _parse(self):
super(MPSRQueryResponse,self)._parse()
node = self.root.find('response')
self.mpsr_status = node.find("mpsr_status").text
class MPSRConfiguration(dict):
def __init__(self):
super(MPSRConfiguration,self).__init__(copy(config.mpsr_defaults.__dict__))
def set_source(self,name,ra,dec):
self['source_name'] = name
self['ra'] = str(ra)
self['dec'] = str(dec)
def set_type(self,obs_type):
if obs_type not in OBS_TYPES:
msg = ("%s is not a valid observation type\n"
"valid types are: %s"%(obs_type,", ".join(OBS_TYPES)))
raise InvalidConfiguration(msg)
self['type'] = obs_type
def set_config(self,config_type):
if config_type == "TB":
self.update(config.mpsr_tb_config.__dict__)
elif config_type == "CORR":
self.update(config.mpsr_corr_config.__dict__)
elif config_type == "INDIV":
self.update(config.mpsr_indiv_config.__dict__)
elif config_type == "FB":
self.update(config.mpsr_fb_config.__dict__)
else:
msg = ("%s is not a valid configuration\n"
"valid types are: %s"%(config_type,", ".join(CONFIGS)))
logger.error(msg)
raise InvalidConfiguration(msg)
class MPSRControls(object):
def __init__(self):
self._ip = config.mpsr_server.ip
self._port = config.mpsr_server.port
self._timeout = config.mpsr_server.timeout
def _send(self,msg,response_class):
try:
client = TCPClient(self._ip,self._port,timeout=self._timeout)
except Exception as error:
raise error
logger.debug("Sending XML to MPSR:\n%s"%msg)
client.send(msg)
response = response_class(client.receive())
logger.debug("Received XML from MPSR:\n%s"%response)
client.close()
if not response.passed:
error = MPSRError(response.response)
logger.error(str(error))
raise error
return response
def prepare(self,mpsr_config):
msg = MPSRMessage().prepare(mpsr_config)
return self._send(msg,MPSRDefaultResponse)
def start(self):
msg = MPSRMessage().start()
return self._send(msg,MPSRDefaultResponse)
def stop(self):
msg = MPSRMessage().stop()
return self._send(msg,MPSRDefaultResponse)
def query(self):
msg = MPSRMessage().query()
return self._send(msg,MPSRQueryResponse)
def mpsr_startup():
cmd = "ssh dada@mpsr-srv0 mopsr_backend_start.pl"
status,output = commands.getstatusoutput(cmd)
if status != MPSR_SUCCESS:
raise MPSRError(output)
def mpsr_shutdown():
cmd= "ssh dada@mpsr-srv0 mopsr_backend_stop.pl"
status,output = commands.getstatusoutput(cmd)
if status != MPSR_SUCCESS:
raise MPSRError(output)
def mpsr_load_config(config_name):
""" Load an MPSR configuration.
Currently supported configurations are:
- live_bfp_40chan_16ant_22pfb_352_ants_512scr
- live_bfp_40chan_16ant_22pfb_352_beams
- live_corr_40chan_16ant_22pfb
"""
cmd= "ssh dada@mpsr-srv0 %s %s"%(LOAD_CONFIG,config_name)
status,output = commands.getstatusoutput(cmd)
if status != MPSR_SUCCESS:
raise MPSRError(output)
if __name__ == '__main__':
mpsr_config = MPSRConfiguration()
mpsr_config.set_source("J0457+4515","11:11:11","22:22:22")
mpsr_config.set_type('TRACKING')
mpsr_config.set_config('TB')
controller = MPSRControls()
controller.prepare(mpsr_config)
controller.start()
controller.stop()
|
# class Renderer(object):
# def __init(self, results):
# self.results = results
#
# def __call__(self):
# raise NotImplementedError()
#
# class TrainResultRenderer(Renderer):
# def __call__(self):
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import seaborn as sns
def build_train_figure(name, train_result, fontsize):
fig = plt.figure()
fig.suptitle(name, fontsize=fontsize)
train_grid = gridspec.GridSpec(2, 4)
history = train_result.get('net_history', None)
if history:
length_history = len(history[:, 'train_loss'])
# len_history = len(history)
train_loss = history[:, 'train_loss'], np.arange(1, length_history+1)
valid_loss = history[:, 'valid_loss'], np.arange(1, length_history+1)
roc_auc = history[:, 'roc_auc'], np.arange(1, len(history)+1)
valid_acc = history[:, 'valid_acc'], np.arange(1, length_history+1)
valid_loss_np = np.array(valid_loss[0])
best_epoch = np.argmin(valid_loss_np) + 1
train_loss_until_best = train_loss[0][:best_epoch], np.arange(1, best_epoch+1)
valid_loss_until_best = valid_loss[0][:best_epoch], np.arange(1, best_epoch+1)
valid_acc_until_best = valid_acc[0][:best_epoch], np.arange(1, best_epoch+1)
roc_auc_until_best = roc_auc[0][:best_epoch], np.arange(1, best_epoch+1)
train_loss_ax = fig.add_subplot(train_grid[0, 0])
valid_loss_ax = fig.add_subplot(train_grid[0, 1])
roc_auc_ax = fig.add_subplot(train_grid[0, 2])
valid_acc_ax = fig.add_subplot(train_grid[0, 3])
train_loss_until_best_ax = fig.add_subplot(train_grid[1, 0])
valid_loss_until_best_ax = fig.add_subplot(train_grid[1, 1])
roc_auc_until_best_ax = fig.add_subplot(train_grid[1, 2])
valid_acc_until_best_ax = fig.add_subplot(train_grid[1, 3])
# plot train results
simple_plot(train_loss_until_best, 'Train Loss', x_label='epochs',
y_label='nll loss', ax=train_loss_ax, fontsize=fontsize)
simple_plot(valid_loss_until_best, 'Valid Loss', x_label='epochs',
y_label='nll loss', ax=valid_loss_ax, fontsize=fontsize)
simple_plot(roc_auc_until_best, 'Roc Auc', x_label='epochs',
y_label='', ax=roc_auc_ax, fontsize=fontsize)
simple_plot(valid_acc_until_best, 'Valid Acc', x_label='epochs',
y_label='valid acc', ax=valid_acc_ax, fontsize=fontsize)
# simple_plot(train_loss, 'Train Loss', x_label='epochs',
# y_label='nll loss', ax=train_loss_until_best_ax, fontsize=fontsize)
# simple_plot(valid_loss, 'Valid Loss', x_label='epochs',
# y_label='nll loss', ax=valid_loss_until_best_ax, fontsize=fontsize)
# simple_plot(roc_auc, 'Roc Auc', x_label='epochs', y_label='',
# ax=roc_auc_until_best_ax, fontsize=fontsize)
# simple_plot(valid_acc, 'Valid Acc', x_label='epochs',
# y_label='valid acc', ax=valid_acc_until_best_ax, fontsize=fontsize)
fig.subplots_adjust(wspace=0.5, hspace=0.5)
return fig
def compute_spelling_test_acc(group_by_item_df):
data = group_by_item_df
nb_iterations = len(data['predicted_item_iteration'][0])
accuracies = []
for x_iter in range(nb_iterations):
acc = np.mean(data['predicted_item_iteration'].apply(
lambda x: x[x_iter]) == data['true_item'])
accuracies.append({'after_x_rep': x_iter + 1, 'acc': acc})
return accuracies
def build_spelling_test_figure(name, test_spelling_result, fontsize):
fig = plt.figure()
fig.suptitle(name, fontsize=fontsize)
test_spelling_grid = gridspec.GridSpec(2, 2)
acc_iter_ax = fig.add_subplot(test_spelling_grid[0, 0])
grouped_by_item = test_spelling_result['group_by_item']
group_by_item_df = pd.DataFrame(grouped_by_item)
print(group_by_item_df[['predicted_item_iteration', 'true_item']])
acc_arr = compute_spelling_test_acc(group_by_item_df)
bar_plot_arr(acc_arr, 'Acc after N repetitions', x_key='after_x_rep',
y_key='acc', ax=acc_iter_ax, fontsize=fontsize)
fig.subplots_adjust(wspace=0.7, hspace=0.8)
fig.tight_layout(pad=5)
return fig
# data['on_first'] = data['predicted_item_iteration']
# print(data[['predicted_item_iteration', 'true_item']])
# print(data[['predicted_item_iteration', 'acc_after_0_iter']])
# grouped_by_true_item = group_by_key(grouped_by_item, 'true_item')
#
# for item in grouped_by_item:
# print(item['true_item'])
# for item, value in grouped_by_true_item.items():
# print('{} {}'.format(item, len(value)))
# print(grouped_by_true_item.keys())
def build_test_figure(name, test_result, test_result_on_train, fontsize):
fig = plt.figure()
fig.suptitle(name, fontsize=fontsize)
test_grid = gridspec.GridSpec(2, 2)
confusion_matrix_ax = fig.add_subplot(test_grid[0, 0])
confusion_matrix_norm_ax = fig.add_subplot(test_grid[0, 1])
confusion_matrix_on_train_ax = fig.add_subplot(test_grid[1, 0])
confusion_matrix_norm_on_train_ax = fig.add_subplot(test_grid[1, 1])
confusion_matrix_data = test_result.get('confusion_matrix', None)
if confusion_matrix_data is not None:
heatmap = confusion_matrix_heatmap(confusion_matrix_data, '', [
'Non-Target', 'Target'], ax=confusion_matrix_ax, fontsize=fontsize)
heatmap_norm = confusion_matrix_heatmap(confusion_matrix_data, '', [
'Non-Target', 'Target'], ax=confusion_matrix_norm_ax, fontsize=fontsize, norm=True)
confusion_matrix_data_on_train = test_result_on_train.get(
'confusion_matrix', None)
if confusion_matrix_data_on_train is not None:
heatmap = confusion_matrix_heatmap(confusion_matrix_data_on_train, '', [
'Non-Target', 'Target'], ax=confusion_matrix_on_train_ax, fontsize=fontsize)
heatmap_norm = confusion_matrix_heatmap(confusion_matrix_data_on_train, '', [
'Non-Target', 'Target'], ax=confusion_matrix_norm_on_train_ax, fontsize=fontsize, norm=True)
fig.subplots_adjust(wspace=0.7, hspace=0.8)
fig.tight_layout(pad=5)
return fig
def simple_plot(data, name, ax, x_label="", y_label="", fontsize=10):
ax.set_xlabel(x_label, fontsize=fontsize)
ax.set_ylabel(y_label, fontsize=fontsize)
ax.set_title(name, fontsize=fontsize)
if isinstance(data, tuple):
X=data[1]
y=data[0]
else:
X=np.arange(len(data))
y=data
ax.plot(X, y)
def bar_plot_arr(arr, name, x_key='x', y_key='y', x_label=None, y_label=None, ax=None, fontsize=10):
return bar_plot_df(pd.DataFrame(arr), name, x_key, y_key, x_label, y_label, ax, fontsize)
def bar_plot_df(df, name, x_key='x', y_key='y', x_label=None, y_label=None, ax=None, fontsize=10):
bar_plot = sns.barplot(x=x_key, y=y_key, data=df,
ax=ax, color=sns.xkcd_rgb["denim blue"])
for p in ax.patches:
ax.annotate("%.2f" % p.get_height(), (p.get_x() + p.get_width() / 2., p.get_height()),
ha='center', va='center', rotation=0, xytext=(0, 2), textcoords='offset points', fontsize=4)
if x_label:
ax.set_xlabel(x_label, fontsize=fontsize)
if y_label:
ax.set_ylabel(y_label, fontsize=fontsize)
ax.set_title(name, fontsize=fontsize)
def confusion_matrix_heatmap(confusion_matrix, name, class_names, ax, fontsize=10, norm=False):
if norm:
confusion_matrix = confusion_matrix.astype(
'float') / confusion_matrix.sum(axis=1)[:, np.newaxis]
title = 'CM {}'.format(name)
title = 'Normalized ' + title if norm else title
ax.set_title(title, fontsize=fontsize)
df_cm = pd.DataFrame(
confusion_matrix, index=class_names, columns=class_names)
fmt = ".3f" if norm else "d"
heatmap = sns.heatmap(df_cm, annot=True, fmt=fmt,
ax=ax, annot_kws={"size": 5})
heatmap.yaxis.set_ticklabels(
heatmap.yaxis.get_ticklabels(), rotation=0, ha='right', fontsize=fontsize)
heatmap.xaxis.set_ticklabels(
heatmap.xaxis.get_ticklabels(), rotation=45, ha='right', fontsize=fontsize)
|
"""An example for using the TackEverything package
This example uses an Object Detection model from the TensorFlow git
for detecting humans. Using a simple citizen/cop classification model
I've created using TF, it can now easily detect and track cops in a video using
a few lines of code.
The use of the TrackEverything package make the models much more accurate
and robust, using tracking features and statistics.
"""
import os
#hide some tf loading data
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# pylint: disable=wrong-import-position
from TrackEverything.detector import Detector
from TrackEverything.tool_box import InspectorVars
from TrackEverything.statistical_methods import StatisticalCalculator, StatMethods
from TrackEverything.visualization_utils import VisualizationVars
from detection_vars import get_det_vars
from classification_vars import get_class_vars
from play_video import run_video
# pylint: enable=wrong-import-position
DET_MODEL_PATH="detection_models/faster_rcnn_inception_v2_coco_2018_01_28/saved_model"
CLASS_MODEL_PATH="classification_models/" \
"0.93855_L_0.3909_opt_RMSprop_loss_b_crossentropy_lr_0.0005_baches_20_shape_[165, 90]_auc.hdf5"
#set the detector
detector_1=Detector(
det_vars=get_det_vars(DET_MODEL_PATH),
class_vars=get_class_vars(CLASS_MODEL_PATH),
inspector_vars=InspectorVars(
stat_calc=StatisticalCalculator(method=StatMethods.EMA)
),
visualization_vars=VisualizationVars(
labels=["Citizen","Cop"],
colors=["Green","Red","Cyan"],
show_trackers=True,
uncertainty_threshold=0.5,
uncertainty_label="Getting Info"
)
)
#Test it on a video
VIDEO_PATH="video/032.mp4"
run_video(VIDEO_PATH,(480,270),detector_1)
# from play_video import save_video
# save_video(VIDEO_PATH,(480,270),detector_1,"video/cop_032.avi")
|
from itertools import accumulate
from collections import Counter
from math import comb
input()
nums = [int(x) for x in input().split()]
cnts = Counter([0] + list(accumulate(nums)))
ans = sum(comb(cnt, 2) for _, cnt in cnts.items())
print(ans)
|
from TableUtils import VTTable
import sys
import hashlib
basedir = 'C:/Data/Genomes/PlasmodiumFalciparum/Release_21/OriginalData_04'
# Build index of all available studies
tableStudies=VTTable.VTTable()
tableStudies.allColumnsText=True
#tableStudies.LoadFile(basedir+"/PartnerStudies.txt")
tableStudies.LoadXls(basedir+"/PartnerStudies.xlsx","PartnerStudies")
studiesMap = tableStudies.BuildColDict('Study', False)
tablePeople=VTTable.VTTable()
tablePeople.allColumnsText=True
#tablePeople.LoadFile(basedir+"/PS_people.txt")
tablePeople.LoadXls(basedir+"/PS_people.xlsx","PS_people")
tablePeople.ColumnRemoveQuotes("Affiliation1")
tablePeople.ColumnRemoveQuotes("Affliliation2")
tablePeople.ColumnRemoveQuotes("LeadPartnerFor")
tablePeople.ColumnRemoveQuotes("KeyAssociateFor")
tablePeople.DropCol('Affiliation1')
tablePeople.DropCol('Affliliation2')
tablePeople.DropCol('Affiliation URL')
#Create ID
tablePeople.MergeColsToString('id', '{0}','Name')
tablePeople.MapCol('id', lambda st: st.lower().replace(' ','-').replace('.',''))
tablePeople.ArrangeColumns(['id'])
tablePeople.PrintRows(0,9999)
#check uniqueness of id's
tablePeople.BuildColDict('id', False)
#Split roles into normalised table
tableRoles=VTTable.VTTable()
tableRoles.AddColumn(VTTable.VTColumn('contact_person','Text'))
tableRoles.AddColumn(VTTable.VTColumn('study','Text'))
for RowNr in tablePeople.GetRowNrRange():
personid=tablePeople.GetValue(RowNr,tablePeople.GetColNr('id'))
studyListStr = tablePeople.GetValue(RowNr,tablePeople.GetColNr('LeadPartnerFor'))
if len(studyListStr)>0:
studylist=studyListStr.split(',')
for study in studylist:
if study not in studiesMap:
raise Exception("Invalid study "+study)
tableRoles.AddRowEmpty()
RowNr2=tableRoles.GetRowCount()-1
tableRoles.SetValue(RowNr2,0,personid)
tableRoles.SetValue(RowNr2,1,study)
tableRoles.AddIndexCol('id')
tableRoles.PrintRows(0,9999)
tableRoles.SaveFile(basedir+'/Output/study_contact_person.txt', True, '')
tableRoles.SaveSQLDump(basedir+'/Output/study_contact_person.sql','study_contact_person')
#Prepare & save people table
tablePeople.DropCol('LeadPartnerFor')
tablePeople.DropCol('KeyAssociateFor')
tablePeople.DropCol('Previous_ContactPersonFor')
tablePeople.RenameCol('id', 'contact_person')
tablePeople.RenameCol('Name', 'name')
tablePeople.RenameCol('Email', 'email')
tablePeople.AddColumn(VTTable.VTColumn('description','Text'))
tablePeople.AddColumn(VTTable.VTColumn('image','Text'))
tablePeople.FillColumn('description', '')
tablePeople.FillColumn('image', '')
tablePeople.ArrangeColumns(['contact_person', 'description', 'email', 'image', 'name'])
tablePeople.PrintRows(0,9)
tablePeople.SaveFile(basedir+'/Output/contact_person.txt', True, '')
tablePeople.SaveSQLDump(basedir+'/Output/contact_person.sql','contact_person')
|
import os
import csv
import re
from getpass import getpass
from email_utils import Email, EmailConnection
from file_utils import FileUtils
from datetime import datetime
f_utils = FileUtils()
# ------ Variables gloables de configuration ------
SERVER_SMTP = "smtp.gmail.com"
SERVER_PORT = 587 # Port SMTP
#FROM = "reflets@etsmtl.net" # Adresse Reflets par défaut
FROM = ""
# -------------------------------------------------
print('================================================')
print(' Bievenue sur ImageMailer! V1.1 ')
print()
print(' Écrit par Skander pour le club photo ')
print(' R E F L E T S ')
print()
print(' Contact: skander.kc AT gmail.com ')
print('================================================')
print()
print("°°° Connexion au serveur de messagerie Gmail °°°")
# Connexion au serveur SMTP Gmail
password = getpass(" - Entrer le mot de passe de " + FROM + " : ")
print("Connexion au serveur de messagerie...")
try:
server = EmailConnection(SERVER_SMTP, SERVER_PORT, FROM, password)
except:
raise Exception("Il y a une erreur de connexion au serveur de messagerie. Réessayez.")
print("Connexion établie!")
print()
print("°°° Spécification du dossier image et des données en .CSV °°°")
# Récupérer le dossier contenant les photos
# Dans ce dossier, on devrait retrouver pleins de dossier ayant comme nom l'index des participants
root_dir = input(" - Indiquer le nom du dossier contenant les photos (laisser vide si actuel) : ")
photos_dir_content = f_utils.get_directory_content(root_dir)
#print(photos_dir_content)
# Récupérer fichier CSV
csv_file_name = input(" - Indiquer le nom du fichier CSV contenant les emails: ")
# Vérification validité fichier csv
f_utils.check_if_csv(csv_file_name)
print()
count_rows = sum(1 for line in open(csv_file_name))
total_participant = count_rows - 1
if count_rows == 0:
raise Exception("Le fichier .Csv est vide.")
# Lire les données en csv et envoyer les courriels
print("°°° Envoi de " + str(total_participant) + " courriels °°°")
with open(csv_file_name, 'r', encoding="utf-8") as csv_file:
reader = csv.reader(csv_file, delimiter=',')
next(reader) # Skip la première ligne (nom des colonnes)
emails_not_sent = []
index_row = 1
for row in reader:
row_str = str(row)
row_array = row_str.split(',')
name = row_array[1].strip()
email = row_array[2].strip()
# Formatage: supprimer les guillemets (') au début et à la fin
name = name[1:-1]
email = email[1:-1]
index_raw = row_array[-1] # >>> '177'] (par exemple)
# Formatage: re permet de garder que des nombres (regex \D).
index = re.sub(r"\D", "", index_raw) # >>> 177 (même exemple)
formatted_index = f_utils.formatted_index(int(index)) # Ex. si index 1 => 001
participant = (index + " - " + name + " - " + email)
# Chercher nom dossier ayant le même numero que l'index
for directory in photos_dir_content:
directory_path = root_dir + "/" + directory
# Youpi ! On a trouvé le participant et son dossier contenant les photos
# On peut maintenant envoyer ses photos !
if os.path.isdir(directory_path) and directory == formatted_index:
photos = [f for f in os.listdir(directory_path) if f_utils.is_photo(f)] # Ne récuperer que les images
print('---------------------- ' + str(index_row) + '/' + str(total_participant) + ' ----------------------')
print(participant)
if len(photos) > 0:
# Remplacer la photo par le chemin complet
for i, photo in enumerate(photos):
photos[i] = directory_path + "/" + photo
subject = 'Votre photo LinkedIn est prête!'
message = f_utils.read_file_content("mails/email.html")
print("Préparation du courriel à envoyer à " + name )
email = Email(FROM, email, subject, message, attachments=photos, message_type="html")
print("Envoi...")
try:
server.send(email)
print("Courriel envoyé!")
except:
print("ÉCHEC de l'envoi du courriel à " + name)
print("On passe au suivant...")
reason = "Échec envoi"
participant = participant + " - " + reason
emails_not_sent.append(participant)
pass
else:
print("Aucune photo trouvée")
reason = "Aucune photo"
participant = participant + " - " + reason
emails_not_sent.append(participant)
index_row += 1
print()
# Afficher les emails non envoyés si c'est le cas
count_emails_not_sent = len(emails_not_sent)
if count_emails_not_sent > 0:
print("°°° Oyé! Oyé! il y a eu " + str(count_emails_not_sent) + " courriels inacheminés °°°")
print("Voici la liste, sous la forme : index - nom - courriel ")
# Afficher & enregistrer la liste dans un fichier texte
now = datetime.strftime(datetime.now(), '%Y-%m-%d-%H_%M_%S')
file_emails_not_sent = "emails_not_sent_" + now + ".txt"
if not os.path.exists("log"):
os.makedirs("log")
with open("log/" + file_emails_not_sent, 'w') as text_file:
text_file.write("Liste des courriels inacheminés \n")
text_file.write("Format id - nom - courriel - raison : \n\n")
for email in emails_not_sent:
email_str = str(email)
print(email_str)
text_file.write(email_str + "\n")
print("Pas de panique, vous pouvez consulter cette liste dans le fichier : log/" + file_emails_not_sent)
else:
print("°°° Succès! Tous les courriels ont été envoyés. °°°")
print()
# Déconnexion
print("Déconnexion du serveur...")
server.close()
print("Déconnecté!")
|
#library
import re as e
import dns.resolver
import socket as soc
import smtplib as sl
#enter Address to verify
addrtov = input("enter Email : ")
#specifying regular expression
verified = e.match('^[_a-z0-9-]+(\.[_a-z0-9-]+)*@[a-z0-9-]+(\.[a-z0-9-]+)*(\.[a-z]{2,4})$', addrtov)
if verified == None:
print('Bad Syntax')
else:
print("OK")
# Get local server hostname
#host = "brickin.info"
#host = "hotmail.com"
host = addrtov.split('@')
domain=host[1]
records = dns.resolver.query(domain , 'MX')
mxRecord = records[0].exchange
mxRecord = str(mxRecord)
#print(mxRecord)
# SMTP lib setup
server = sl.SMTP('samlati.com', 2525)
server.set_debuglevel(1)
server.connect()
server.login('samlatizyhdskck', '7apreKVFNFTdBpWy')
# SMTP Conversation
#server.connect(mxRecord)
server.helo(domain)
server.mail('dav22mark@gmail.com')
code, message = server.rcpt(str(addrtov))
server.quit()
#print(message)
# Assume 250 as Success
if code == 250:
print('True')
elif code==550:
print('blacklisted')
|
from django.conf.urls.static import static
from django.conf.urls import url, include
from django.contrib import admin
from django.conf import settings
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^blog/', include('blog.urls', namespace='blog')),
url(r'^accounts/', include('accounts.urls')),
# blog와는 다르게 accounts 에서는 namespce를 사용하지 않는다. 왜냐하면 auth 앱내에서 namespce를 사용하지않고 url reserve 기능을 사용하고 있기 때문.
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) |
# -*- coding: utf-8 -*-
import requests
from config import TG_TOKEN, USE_PROXY, PROXY, CHAT_ID
__author__ = 'ihciah'
class TGBot:
@staticmethod
def send_message(text, user=0):
tg_message_url = "https://api.telegram.org/bot%s/sendMessage" % TG_TOKEN
user = int(user)
data = {"chat_id": CHAT_ID[user] if user < len(CHAT_ID) else user,
"text": text,
"disable_notification": False
}
res = requests.post(tg_message_url, data, proxies=PROXY).json()
if res["ok"]:
return res["result"]["message_id"]
return None
@staticmethod
def send_image(image_url, caption="", user=0):
tg_photo_url = "https://api.telegram.org/bot%s/sendPhoto" % TG_TOKEN
user = int(user)
data = {"chat_id": CHAT_ID[user] if user < len(CHAT_ID) else user,
"photo": image_url,
"disable_notification": False,
}
if caption:
data["caption"] = caption
requests.post(tg_photo_url, data, proxies=PROXY)
@staticmethod
def update_message(text, message_id, user=0):
tg_update_url = "https://api.telegram.org/bot%s/editMessageText" % TG_TOKEN
user = int(user)
data = {"chat_id": CHAT_ID[user] if user < len(CHAT_ID) else user,
"text": text,
"message_id": message_id
}
requests.post(tg_update_url, data, proxies=PROXY)
|
def Max2(n,k):
for i in range(len(n)):
if(n[i]>=k):
return i
return None
def Max(n,k,first,last):
if n[0] >= k:
return 0
if(n[len(n)-1]<k):
return None
else:
if((last-first)<=1):
if(n[first]>=k):
return first
else:
return last
else:
if(n[(last+first)//2]>=k):
return Max(n,k,first,(last+first)//2)
if(n[(last+first)//2]<k):
return Max(n,k,first+((last-first)//2),last)
|
import json
import boto3
import base64
import re
import numpy as np
lambda_func = boto3.client('lambda')
lambda_name = ""
def lambda_handler(event, context):
global lambda_name
lambda_name = event['functionId']
memory = linear_algorithm()
set_lambda_memory_level(memory)
return {'statusCode': 200, 'body': json.dumps("response")}
def linear_algorithm():
aws_compute_coef = 0.00001667
memory_prev = 128
attempts_counter = 0
values = []
max_attempts = 3
step_increment = 128
min_duration = 0
set_lambda_memory_level(memory_prev)
duration_prev = invoke_lambda()
global_min = {
"duration": duration_prev,
"memory": memory_prev
}
value = [duration_prev, memory_prev, memory_prev * duration_prev * aws_compute_coef / 1024000]
values.append(value)
while (attempts_counter < max_attempts):
memory = memory_prev + step_increment
set_lambda_memory_level(memory)
duration = int(invoke_lambda())
value = [duration, memory, duration * memory * aws_compute_coef / 1024000]
values.append(value)
if(duration /duration_prev < 0.99):
if(attempts_counter == 0):
global_min["memory"] = memory
global_min["duration"] = duration
print("global min duration: ", global_min["duration"])
elif(duration/min_duration < 0.99):
global_min["memory"] = memory
global_min["duration"] = duration
print("global min duration with not zero counter: ", global_min["duration"])
else:
attempts_counter += 1
min_duration = global_min["duration"]
print("attempts counter: ", attempts_counter)
duration_prev = duration
memory_prev = memory
print(values)
print("selected memory: ", global_min["memory"])
return global_min["memory"]
def invoke_lambda():
durations = []
for _ in range(5):
response = lambda_func.invoke(
FunctionName=lambda_name,
InvocationType='RequestResponse',
LogType='Tail',
)
log = base64.b64decode(response["LogResult"])
m = re.search('\tBilled Duration: (\d+)', log.decode("utf-8"))
durations.append(int(m.group(1)))
return np.percentile(durations, 90)
def set_lambda_memory_level(memory: int):
lambda_func.update_function_configuration(
FunctionName=lambda_name,
MemorySize=int(memory)
) |
import builtins
import unittest
import ex3_5
class MyTestCase(unittest.TestCase):
def test_case_1(self):
input_value = "1\n2\n1\n3\n2\n3"
expected = "1/4"
self.common_test(input_value, expected)
def test_case_2(self):
input_value = "3\n6\n1\n6\n4\n9"
expected = "3/4"
self.common_test(input_value, expected)
def common_test(self, input_value, expected):
original_input = builtins.input
builtins.input = lambda: input_value
actual = ex3_5.handle()
self.assertEqual(expected, actual)
builtins.input = original_input
if __name__ == '__main__':
unittest.main()
|
#Acceleration and angular acceleration visualization program
# July 7, 2020
# Yuto Nakayachi
import sys
import csv
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Variable to get real value
MPU9250A_2g = 0.000061035156 # 0.000061035156 g/LSB
MPU9250A_4g = 0.000122070312 # 0.000122070312 g/LSB
MPU9250A_8g = 0.000244140625 # 0.000244140625 g/LSB
MPU9250A_16g = 0.000488281250 # 0.000488281250 g/LSB
MPU9250G_250dps = 0.007633587786 # 0.007633587786 dps/LSB
MPU9250G_500dps = 0.015267175572 # 0.015267175572 dps/LSB
MPU9250G_1000dps = 0.030487804878 # 0.030487804878 dps/LSB
MPU9250G_2000dps = 0.060975609756 # 0.060975609756 dps/LSB
MPU9250M_4800uT = 0.6 # 0.6 uT/LSB
MPU9250T_85degC = 0.002995177763 # 0.002995177763 degC/LSB
Magnetometer_Sensitivity_Scale_Factor = 0.15
yl=MPU9250A_4g * 35000
yr=MPU9250G_500dps * 35000
args = sys.argv
title = args[1]
data = pd.read_csv(title,encoding="UTF-8")
xdata = data["ms"]
print(xdata.head())
data=data.drop(data.columns[[0,1]],axis=1)
print(data.head())
data1 = data.drop(data.columns[[6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23]],axis=1) #CH1
data2 = data.drop(data.columns[[0,1,2,3,4,5,12,13,14,15,16,17,18,19,20,21,22,23]],axis=1) #CH2
data3 = data.drop(data.columns[[0,1,2,3,4,5,6,7,8,9,10,11,18,19,20,21,22,23]],axis=1) #CH3
data4 = data.drop(data.columns[[0,1,2,3,4,5, 6,7,8,9,10,11, 12,13,14,15,16,17]],axis=1) #CH4
print(data1.head())
print(data2.head())
print(data3.head())
print(data4.head())
#CH1
fig1,(axeL1,axeR1) = plt.subplots(ncols=2)
axeL1.plot(xdata,data1["ACC_X1"],label="ACC_X")
axeL1.plot(xdata,data1["ACC_Y1"],label="ACC_Y")
axeL1.plot(xdata,data1["ACC_Z1"],label="ACC_Z")
axeR1.plot(xdata,data1["GYRO_X1"],label="GYRO_X")
axeR1.plot(xdata,data1["GYRO_Y1"],label="GYRO_Y")
axeR1.plot(xdata,data1["GYRO_Z1"],label="GYRO_Z")
axeL1.set_title("CH1 ACC")
axeR1.set_title("CH1 GYRO")
axeL1.set_xlabel("ms")
axeR1.set_xlabel("ms")
axeL1.set_ylabel("Value")
axeR1.set_ylabel("Value")
axeL1.set_ylim(-yl,yl)
axeR1.set_ylim(-yr,yr)
axeL1.legend()
axeR1.legend()
#CH2
fig2,(axeL2,axeR2) = plt.subplots(ncols=2)
axeL2.plot(xdata,data2["ACC_X2"],label="ACC_X")
axeL2.plot(xdata,data2["ACC_Y2"],label="ACC_Y")
axeL2.plot(xdata,data2["ACC_Z2"],label="ACC_Z")
axeR2.plot(xdata,data2["GYRO_X2"],label="GYRO_X")
axeR2.plot(xdata,data2["GYRO_Y2"],label="GYRO_Y")
axeR2.plot(xdata,data2["GYRO_Z2"],label="GYRO_Z")
axeL2.set_title("CH2 ACC")
axeR2.set_title("CH2 GYRO")
axeL2.set_xlabel("ms")
axeR2.set_xlabel("ms")
axeL2.set_ylabel("Value")
axeR2.set_ylabel("Value")
axeL2.set_ylim(-yl,yl)
axeR2.set_ylim(-yr,yr)
axeL2.legend()
axeR2.legend()
#CH3
fig3,(axeL3,axeR3) = plt.subplots(ncols=2)
axeL3.plot(xdata,data3["ACC_X3"],label="ACC_X")
axeL3.plot(xdata,data3["ACC_Y3"],label="ACC_Y")
axeL3.plot(xdata,data3["ACC_Z3"],label="ACC_Z")
axeR3.plot(xdata,data3["GYRO_X3"],label="GYRO_X")
axeR3.plot(xdata,data3["GYRO_Y3"],label="GYRO_Y")
axeR3.plot(xdata,data3["GYRO_Z3"],label="GYRO_Z")
axeL3.set_title("CH3 ACC")
axeR3.set_title("CH3 GYRO")
axeL3.set_xlabel("ms")
axeR3.set_xlabel("ms")
axeL3.set_ylabel("Value")
axeR3.set_ylabel("Value")
axeL3.set_ylim(-yl,yl)
axeR3.set_ylim(-yr,yr)
axeL3.legend()
axeR3.legend()
#CH4
fig4,(axeL4,axeR4) = plt.subplots(ncols=2)
axeL4.plot(xdata,data4["ACC_X4"],label="ACC_X")
axeL4.plot(xdata,data4["ACC_Y4"],label="ACC_Y")
axeL4.plot(xdata,data4["ACC_Z4"],label="ACC_Z")
axeR4.plot(xdata,data4["GYRO_X4"],label="GYRO_X")
axeR4.plot(xdata,data4["GYRO_Y4"],label="GYRO_Y")
axeR4.plot(xdata,data4["GYRO_Z4"],label="GYRO_Z")
axeL4.set_title("CH4 ACC")
axeR4.set_title("CH4 GYRO")
axeL4.set_xlabel("ms")
axeR4.set_xlabel("ms")
axeL4.set_ylabel("Value")
axeR4.set_ylabel("Value")
axeL4.set_ylim(-yl,yl)
axeR4.set_ylim(-yr,yr)
axeL4.legend()
axeR4.legend()
plt.show() |
from nmr import *
import fornotebook as fnb
fullPath = '/Users/stupidrobot/ActiveProjects/exp_data/ryan_cnsi/nmr/151012_Annex_162C_Tris_RT_ODNP/'
close('all')
fl = fnb.figlist()
#overNight,fl.figurelist = integrate(fullPath,[42],integration_width=75,first_figure=fl.figurelist,pdfstring='overNight')
#overNight.data *= -1
#
#dayTime,fl.figurelist = integrate(fullPath,[41],integration_width=75,first_figure=fl.figurelist,pdfstring='dayTime')
#dayTime.data *= -1
fig = figure(figsize=(15,8))
plot(overNight.runcopy(real).set_error(None),'.',alpha = 0.5,label='real overNight')
plot(overNight.runcopy(abs).set_error(None),'.',alpha = 0.5,label='abs overNight')
plot(dayTime.runcopy(real).set_error(None),'.',alpha = 0.5,label='real dayTime')
plot(dayTime.runcopy(abs).set_error(None),'.',alpha = 0.5,label='abs dayTime')
legend(loc=3,prop={'size':15})
xlabel(r'$\mathtt{run\/ number}$',fontsize=30)
xticks(fontsize=20)
yticks(fontsize=20)
ylabel(r'$\mathtt{integration\/ val}$',fontsize=30)
title(r'$\mathtt{NMR\/ Stability\/ Measurement}$',fontsize=30)
fig.patch.set_alpha(0) # This makes the background transparent!!
giveSpace()
tight_layout()
show()
oNavg = nddata(average(overNight.data)).set_error(std(overNight.data)).labels('value',array([0]))
dTavg = nddata(average(dayTime.data)).set_error(std(dayTime.data)).labels('value',array([1]))
fig = figure(figsize=(15,8))
plot(oNavg,'o',label='overNight Average')
plot(dTavg,'o',label='dayTime Average')
legend(loc=3,prop={'size':15})
xlabel(r'$\mathtt{run\/}$',fontsize=30)
xticks(fontsize=20)
yticks(fontsize=20)
ylabel(r'$\mathtt{integration\/ val}$',fontsize=30)
title(r'$\mathtt{NMR\/ Stability\/ Measurement}$',fontsize=30)
fig.patch.set_alpha(0) # This makes the background transparent!!
giveSpace()
tight_layout()
show()
|
def main():
t = input("First time? ")
t1= input("Second time? ")
t2= input("Third time? ")
a = float(t)* 0.299792
a1 = float(t1)*0.299792
a2 = float(t2)*0.299792
print (a)
print(a1)
print(a2)
return
main()
|
import dateutil.parser
import numpy as np
def sort_log(log_string):
lines = np.asarray(log_string.split('\n')[:-1])
times = [dateutil.parser.parse(line[1:27]) for line in lines]
sort_indexes = np.argsort(times)
print('\n'.join(lines[sort_indexes]))
|
"""
Leetcode #989
"""
from leetcode.utils import List
class Solution:
def addToArrayForm(self, A: List[int], K: int) -> List[int]:
if not A:
return list(k)
if not K:
return A
for i in reversed(range(len(A))):
# 1 2 0 0
# 3 4
K, d = divmod(K, 10)
carry, A[i] = divmod(A[i] + d, 10)
K += carry
if not K:
break
if K:
A = list(map(int, str(K))) + A
return A
def addToArrayForm_ALT(self, A: List[int], K: int) -> List[int]:
res = []
i = len(A) - 1
while K > 0 or i >= 0:
K, rmd = divmod(K + (A[i] if i >= 0 else 0), 10)
res.append(rmd)
i -= 1
return list(reversed(res))
if __name__ == "__main__":
solution = Solution()
assert solution.addToArrayForm([1, 2, 0, 0], 34) == [1, 2, 3, 4]
assert solution.addToArrayForm([2, 1, 5], 806) == [1, 0, 2, 1]
assert solution.addToArrayForm([9,9,9,9,9,9,9,9,9,9], 1) == [1,0,0,0,0,0,0,0,0,0,0]
assert solution.addToArrayForm_ALT([9,9,9,9,9,9,9,9,9,9], 1) == [1,0,0,0,0,0,0,0,0,0,0]
|
import os
import csv
from datetime import datetime
import boto3
from botocore.exceptions import ClientError
date = datetime.today().strftime("%d-%m-%Y")
dir_name = "Reports"
fields = [
"SI No",
"Instance Name",
"Instance Id",
"Elastic IP",
"Private IP",
"Instance State",
]
fields_unassociated = ["SI No", "Elastic IP"]
# Creating Reports directory if it not exists
if not os.path.exists(dir_name):
os.mkdir(dir_name)
try:
# Add the AWS accounts credentials below.
aws_accounts = {
1: {
"aws_aka": "First AWS Account Name",
"aws_aki": "[access_key_id]",
"aws_sak": "[secret_access_key]",
},
2: {
'aws_aka': 'Second AWS Account Name',
'aws_aki': '[access_key_id]',
'aws_sak': '[secret_access_key]'
},
3: {
'aws_aka': 'Third AWS Account Name',
'aws_aki': '[access_key_id]',
'aws_sak': '[secret_access_key]'
}
}
regions = boto3.session.Session().get_available_regions("ec2")
for aws_current_account in aws_accounts.items():
if not os.path.exists(dir_name + "/" + aws_current_account[1]["aws_aka"]):
os.mkdir(dir_name + "/" + aws_current_account[1]["aws_aka"])
previous_region = []
previous_region_eip = []
with open(
dir_name
+ "/"
+ aws_current_account[1]["aws_aka"]
+ "/"
+ "Allocated and associated.csv",
"a",
) as csvfile:
csv_writer = csv.writer(csvfile)
csv_writer.writerows(
[["Account Alias", aws_current_account[1]["aws_aka"]], ["Date", date]]
)
for region in regions:
unassociated_ips = []
serial_number = 1
serial_number_1 = 1
previous_instance_id = []
try:
ec2 = boto3.resource(
"ec2",
aws_access_key_id=aws_current_account[1]["aws_aki"],
aws_secret_access_key=aws_current_account[1]["aws_sak"],
region_name=region,
)
for elastic_ip in ec2.vpc_addresses.all():
if (
len(previous_region_eip) == 0
or region != previous_region_eip[-1]
):
csv_writer.writerow([])
csv_writer.writerow(["Region Name", region])
csv_writer.writerow([])
csv_writer.writerow(fields)
previous_region_eip.append(region)
if (
len(previous_instance_id) != 0
and elastic_ip.instance_id is not None
and elastic_ip.instance_id != previous_instance_id[-1]
):
serial_number += 1
if elastic_ip.network_interface_id:
rows = []
instance_name = ""
instance = ec2.Instance(elastic_ip.instance_id)
for tag_name in instance.tags:
if tag_name["Key"] == "Name":
instance_name += tag_name["Value"]
rows.append(
[
serial_number,
instance_name,
elastic_ip.instance_id,
elastic_ip.public_ip,
elastic_ip.private_ip_address,
instance.state["Name"],
]
)
previous_instance_id.append(elastic_ip.instance_id)
csv_writer.writerows(rows)
else:
unassociated_ips.append(elastic_ip)
for unassociated_ip in unassociated_ips:
rows = []
if len(previous_region) == 0 or region != previous_region[-1]:
csv_writer.writerow(["Unassociated Elastic IPs"])
csv_writer.writerow(fields_unassociated)
previous_region.append(region)
rows.append([serial_number_1, unassociated_ip.public_ip])
csv_writer.writerows(rows)
serial_number_1 += 1
except ClientError:
print(f"The region {region} is not enabled...")
except Exception as error:
print("An exception occurred", error)
|
#!/usr/bin/env python
"""
deletes the first line
necessary for xml output of xml.dom.ext.PrettyPrint to help
IE 6 and 7 to read the webpage in standard mode.
otherwise it falls back to the stupid quirks mode and everything is fubar
"""
def delFirstLine(fn):
"""
deletes first line
@fn: string of file name
"""
with open(fn, 'r') as f:
lines = f.readlines()
with open(fn, 'w') as f:
f.write('\n'.join(lines[1:]))
class UnicodeFileWriter:
def __init__(self, file):
self._file = file
def write(self, data):
self._file.write(data)
|
from keras.models import Sequential, Model, Input
from keras.layers import Dot, Dense, Dropout, Embedding, Reshape, Conv1D, MaxPooling1D
from keras.layers.merge import Concatenate
from keras.callbacks import ModelCheckpoint
from keras import metrics, utils, losses
from keras.utils.vis_utils import model_to_dot
from sklearn import metrics as sk_metrics, cross_validation
from IPython.display import SVG
import matplotlib.pyplot as plt
from pylab import plot
import numpy as np
import pandas as pd
import pickle
import warnings
warnings.filterwarnings('ignore')
k = 128
data = pd.read_csv('./mldata/ratings.dat', sep='::', engine='python', names=['uid', 'sid', 'rating', 'time'])
n_user = data.uid.unique().shape[0]
n_sub = data.sid.unique().shape[0]
print n_user, n_sub
input1 = Input(shape=(1,))
model1 = Embedding(n_user, k, input_length=1)(input1)
model1 = Reshape((k,))(model1)
input2 = Input(shape=(1,))
model2 = Embedding(n_sub, k, input_length=1)(input2)
model2 = Reshape((k,))(model2)
output = Dot(axes=1)([model1, model2])
model = Model([input1, input2], output)
model.compile(loss='mse', optimizer='adam')
SVG(model_to_dot(model).create(prog='dot', format='svg'))
train, test = cross_validation.train_test_split(data, test_size=0.1, random_state=1)
x_train = [train.uid, train.sid]
y_train = train.rating
x_test = [test.uid, test.sid]
y_test = test.rating
history = model.fit(x_train, y_train, batch_size=500, epochs=20, validation_data=(x_test, y_test))
save_path = './save/ml-dot/'
model.save(save_path + 'model.h5')
with open(save_path + 'history.pkl', 'wb') as file_history:
pickle.dump(history.history, file_history)
pd.DataFrame(history.history, columns=['loss', 'val_loss']).head(20).transpose()
pylab.plot(history.history['loss'], label='loss')
pylab.plot(history.history['val_loss'], label='val_loss')
plt.legend()
plt.ylim(0, 3)
|
from django.shortcuts import render
from static.hotel.bd.connection import conn
# Create your views here.
def suites():
with conn.cursor() as read:
sql = "select id, identifica from tb_suites where sit = 1"
read.execute(sql)
suites = read.fetchall()
read.close()
return suites
def reserva():
with conn.cursor() as read:
sql = "select * from tb_reservas"
read.execute(sql)
dados = read.fetchall()
read.close()
return dados
def cliente():
with conn.cursor() as read:
sql = "select id, name from tb_clientes"
read.execute(sql)
clientes = read.fetchall()
read.close()
return clientes
def adicionaReserva(request):
with conn.cursor() as read:
sql = "select * from tb_suites where sit = 0"
read.execute(sql)
dados2 = read.fetchall()
read.close()
return render(request, 'reservas/cadReservas.html', {'dados': cliente(), 'dados2': dados2})
def listaReserva(request):
return render(request, 'reservas/listaReservas.html', {'dados': reserva(), 'clientes': cliente(), 'suites': suites()})
def inserirReserva(request):
cliente = request.POST['cliente']
data = request.POST['data']
data2 = request.POST['data2']
quarto = request.POST['quarto']
try:
with conn.cursor() as insere:
sql = "insert into tb_reservas(cliente,data_ini,data_end,quarto) values (%s, %s, %s, %s)"
insere.execute(sql, (cliente, data, data2, quarto))
insere.close()
conn.commit()
return render(request, 'reservas/listaReservas.html')
except:
return render(request, 'reservas/listaReservas.html')
def reservaDelete(request, id):
with conn.cursor() as delete:
sql = "delete from tb_reservas where id = %s"
delete.execute(sql, id)
conn.commit()
delete.close()
return render(request, 'reservas/listaReservas.html',
{'dados': reserva(), 'clientes': cliente(), 'suites': suites()})
def reservaEdit(request, id):
return render(request, 'reservas/listaReservas.html',
{'dados': reserva(), 'clientes': cliente(), 'suites': suites()})
def changeSit(request, id):
return render(request, 'reservas/listaReservas.html',
{'dados': reserva(), 'clientes': cliente(), 'suites': suites()})
def editReserva(request, id):
with conn.cursor() as read:
sql = "select * from tb_reservas where id = %s"
read.execute(sql, id)
dados = read.fetchall()
read.close()
return render(request, 'reservas/edit_reservas.html', {'dados': dados})
|
# Mirko Mantovani
class UndirectedGraph:
def __init__(self):
self.graph = {}
def __repr__(self):
return 'Graph:'+ str(self.graph)
def add_node(self, node):
if node not in self.graph:
self.graph[node] = {}
def add_edge(self, i, j, weight):
if i not in self.graph:
self.add_node(i)
if j not in self.graph:
self.add_node(j)
self.graph[i][j] = weight
self.graph[j][i] = weight
def get_edge(self, i, j):
if i in self.graph:
if j in self.graph[i]:
return self.graph[i][j]
return -1 |
#Efetuar a leitura de quatro números inteiros e apresentar os números que são divisíveis, ao mesmo tempo,
#por 2 e 9
print('Apresenta os números que são divisíveis por 4 e 9')
i=1
number=[]
while i<=4:
a=int(input('Digite um número: '))
if a%4==0 and a%9==0:
number.append(a)
i=i+1
if len(number) != 0:
print('Os numeros divisiveis por 4 e 9 simuntaneamnete sao', number)
else:
print('Nenhum numero é divisivel por 4 e 9 simutaneamente') |
#!/usr/bin/env python
#
# BakeBit example for the basic functions of BakeBit 128x64 OLED (http://wiki.friendlyarm.com/wiki/index.php/BakeBit_-_OLED_128x64)
#
# The BakeBit connects the NanoPi NEO and BakeBit sensors.
# You can learn more about BakeBit here: http://wiki.friendlyarm.com/BakeBit
#
# Have a question about this example? Ask on the forums here: http://www.friendlyarm.com/Forum/
#
'''
## License
The MIT License (MIT)
BakeBit: an open source platform for connecting BakeBit Sensors to the NanoPi NEO.
Copyright (C) 2016 FriendlyARM
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import bakebit_128_64_oled as oled
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import math
import time
oled.init() #initialze SEEED OLED display
oled.clearDisplay() #clear the screen and set start position to top left corner
oled.setNormalDisplay() #Set display to normal mode (i.e non-inverse mode)
oled.setHorizontalMode()
width=128
height=64
# Create image buffer.
# Make sure to create image with mode '1' for 1-bit color.
image = Image.new('1', (width, height))
# Load default font.
font = ImageFont.load_default()
# Alternatively load a TTF font. Make sure the .ttf font file is in the same directory as this python script!
# Some nice fonts to try: http://www.dafont.com/bitmap.php
# font = ImageFont.truetype('Minecraftia.ttf', 8)
# Create drawing object.
draw = ImageDraw.Draw(image)
# Define text and get total width.
text = 'Live a noble and honest life. Reviving past times in your old age will help you to enjoy your life again.'
maxwidth, unused = draw.textsize(text, font=font)
# Set animation and sine wave parameters.
amplitude = height/4
offset = height/2 - 4
velocity = -2
startpos = width
# Animate text moving in sine wave.
print('Press Ctrl-C to quit.')
pos = startpos
while True:
try:
# Clear image buffer by drawing a black filled box.
draw.rectangle((0,0,width,height), outline=0, fill=0)
# Enumerate characters and draw them offset vertically based on a sine wave.
x = pos
for i, c in enumerate(text):
# Stop drawing if off the right side of screen.
if x > width:
break
# Calculate width but skip drawing if off the left side of screen.
if x < -10:
char_width, char_height = draw.textsize(c, font=font)
x += char_width
continue
# Calculate offset from sine wave.
y = offset+math.floor(amplitude*math.sin(x/float(width)*2.0*math.pi))
# Draw text.
draw.text((x, y), c, font=font, fill=255)
# Increment x position based on chacacter width.
char_width, char_height = draw.textsize(c, font=font)
x += char_width
# Draw the image buffer.
oled.drawImage(image)
# Move position for next frame.
pos += velocity
# Start over if text has scrolled completely off left side of screen.
if pos < -maxwidth:
pos = startpos
# Pause briefly before drawing next frame.
time.sleep(0.1)
except KeyboardInterrupt:
break
except IOError:
print ("Error")
break
|
#!/usr/local/bin/python2.7
# encoding=utf8
'''
Given nums = [2, 7, 11, 15], target = 9,
Because nums[0] + nums[1] = 2 + 7 = 9,
first should = return [0, 1]
second should = return 2 7
'''
nums = [2, 7, 11, 15]
target = 9
dic = {}
for i, num in enumerate(nums):
if num in dic:
print [dic[num], i] ## printing the index
print nums[dic[num]], nums[i] ## printing the actual element
else:
dic[target - num] = i
|
"""
GSSHA-based model using gsshapy.
"""
import os
import geopandas as gpd
import param
from gsshapy.modeling import GSSHAModel
class RoughnessSpecification(param.Parameterized):
"""Abatract class for a parameterized specification of surface roughness."""
__abstract = True
# If we could modify code in gsshpy we could actually do something useful here
# (e.g. replacing code in GSSHAModel.set_roughness()), but probably for this initial
# wrapping we should just return the right combo of arguments for GSSHAModel.
def get_args(self):
raise NotImplementedError
class UniformRoughness(RoughnessSpecification):
"""Roughness specified as a uniform constant over an area."""
# TODO: what default?
value = param.Number(default=0.04, doc="""
Value of uniform manning's n roughness for grid.""")
def get_args(self):
return {
'roughness': self.value,
'land_use_grid': None,
'land_use_to_roughness_table': None,
'land_use_grid_id': None
}
# Need to rename/reorganize these - but can't quite make sense of the existing names
# and docstrings. Would need to study code...
class GriddedRoughness(RoughnessSpecification):
"""Roughness specified as a land-use grid file."""
__abstract = True
land_use_grid = param.FileSelector(default=None, doc="""
Path to land use grid to use for roughness.""")
def get_args(self):
raise NotImplementedError
class GriddedRoughnessTable(GriddedRoughness):
"""Roughness specified as a land-use grid file and roughness table."""
land_use_to_roughness_table = param.FileSelector(default=None, doc="""
Path to land use to roughness table.""")
def get_args(self):
return {
'roughness': None,
'land_use_grid': self.land_use_grid,
'land_use_to_roughness_table': self.land_use_to_roughness_table,
'land_use_grid_id': None
}
class GriddedRoughnessID(GriddedRoughness):
"""Roughness specified as a land-use grid file and ID of default GHSSApy grid."""
land_use_grid_id = param.String(default='nlcd', doc="""
ID of default grid supported in GSSHApy. """)
def get_args(self):
return {
'roughness': None,
'land_use_grid': self.land_use_grid,
'land_use_to_roughness_table': None,
'land_use_grid_id': self.land_use_grid_id
}
# Hmm, the "required for new model" bit that was in every docstring makes me think there might
# be another option to consider too, but I haven't got that far yet...
class CreateModel(param.Parameterized):
"""Abstract base class for creating models."""
__abstract = True
project_base_directory = param.Foldername(default=os.getcwd(), doc="""
Base directory to which name will be appended to write project files to.""", precedence=0)
project_name = param.String(default='vicksburg_south', doc="""
Name of project. Required for new model.""")
def _map_kw(self,p):
kw = {}
kw['project_directory'] = os.path.abspath(os.path.join(p.project_base_directory, p.project_name))
# Currently allows overwriting existing files
os.makedirs(kw['project_directory'],exist_ok=True)
kw['project_name'] = p.project_name
return kw
def __call__(self,**params):
raise NotImplementedError
# TODO: precedence
# TODO: check about abs path requirement
# TODO: defaults are strange e.g. ./vicksburg_watershed/*.shp
class CreateGSSHAModel(CreateModel):
"""Create a new GSSHA model."""
mask_shapefile = param.FileSelector(default='./vicksburg_watershed/watershed_boundary.shp',
path='./*/*.shp', doc="""
Path to watershed boundary shapefile. Required for new model. Typically a *.shp file.""", precedence=0.1)
grid_cell_size = param.Number(default=None, precedence=0.2)
# TODO: specify acceptable file extensions?
elevation_grid_path = param.FileSelector(
doc="""
Path to elevation raster used for GSSHA grid. Required for new model. Typically a *.ele file.""", precedence=0.3)
# TODO: paramnb ClassSelector should cache instances that get
# created or else editing parameters on non-default options is
# confusing.
roughness = param.ClassSelector(RoughnessSpecification,default=UniformRoughness(),doc="""
Method for specifying roughness""", precedence=-1)
out_hydrograph_write_frequency = param.Number(default=10, bounds=(1,60), doc="""
Frequency of writing to hydrograph (minutes). Sets HYD_FREQ card. Required for new model.""", precedence=0.8)
def _map_kw(self,p):
kw = super(CreateGSSHAModel,self)._map_kw(p)
kw['mask_shapefile'] = p.mask_shapefile
kw['grid_cell_size'] = p.grid_cell_size
kw['elevation_grid_path'] = os.path.abspath(p.elevation_grid_path)
kw['out_hydrograph_write_frequency'] = p.out_hydrograph_write_frequency
kw.update(p.roughness.get_args())
return kw
def __call__(self,**params):
p = param.ParamOverrides(self,params)
return GSSHAModel(**self._map_kw(p))
#class create_framework(param.ParameterizedFunction):
# pass
|
#This program determines if a number is cousin or not
number = int(input('Dame un numero para determinar si es primo: '))
if number > 1:
I_think_hes_a_cousin = True
for divider in range(2, number):
if number % divider == 0:
I_think_hes_a_cousin = False
else:
I_think_hes_a_cousin = False
if I_think_hes_a_cousin:
print('The number {0} is cousin.'.format(number))
else:
print('The number {0} is not a cousin.'.format(number))
|
#Para declarar uma variável global dentro de function
global variavel
#Variável global
f= 0
print(f)
f = "abc"
print(f)
#Forma de Concatenar
print("Isto é uma string " + str(123))
#Criar Função equivale ao método no c#
#Variável local
def PrimeiraFuncao():
global f
f= "def"
print(f)
PrimeiraFuncao()
print(f)
|
"""
This module is part of the 'farben' package,
which is released under MIT license.
"""
from ..palette import Palette
from ..utils import hex2rgb
class Copic(Palette):
"""
Holds Copic® utilities
"""
# Identifier
identifier = "copic"
# Dictionary holding fetched colors
sets = {
"classic": [],
"sketch": [],
"ciao": [],
}
# Copyright notices
copyright_notices = {
"xml": "\n Copic® and related trademarks are the property of\n "
+ "Too Marker Corporation (https://www.toomarker.co.jp/en)\n ",
"gpl": "##\n# Copic® and related trademarks are the property of\n"
+ "# Too Marker Corporation (https://www.toomarker.co.jp/en)\n##\n",
}
def fetch_all(self) -> None:
"""
Fetches all Copic® colors at once
Available sets:
- 'classic' (currently 289 colors)
- 'sketch'
- 'ciao'
:return: None
"""
self.fetch("classic")
self.fetch("sketch")
self.fetch("ciao")
def fetch(self, set_name: str) -> None:
"""
Fetches Copic® colors
:param set_name: str Name of color set
:return: None
"""
# One URL to rule them all
base_url = f"https://copic.de/copic-{set_name}-farb/bestellraster"
# Scrape Copic® colors from HTML
soup = self.get_html(base_url)
for color_block in soup.find_all("div", {"class": "copic-colors__color-block"}):
hexa = color_block.find("div", {"class": "copic-colors__cap"})[
"style"
].split()[1]
name = color_block.find("div", {"class": "copic-colors__color-name"})
color = {}
color["code"] = color_block.find("strong").text
color["rgb"] = f"rgb({hex2rgb(hexa)})"
color["hex"] = hexa.upper()
color["name"] = name.text
self.sets[set_name].append(color)
print(f'Loading {color["code"]} in set "{set_name}" .. done')
|
from agents.agent import Agent
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Categorical
from torch.utils.data import SubsetRandomSampler, BatchSampler
import numpy as np
class PPOAgent(Agent):
def __init__(self, state_space: int, action_space: int, a_hidden=50, c_hidden=50,
a_lr=1e-3, c_lr=3e-3, gamma=0.9, clip_e=0.2):
self.state_space = state_space
self.action_space = action_space
self.gamma = gamma
self.clip_e = clip_e
self.actor = Actor(state_space, a_hidden, action_space)
self.actor_optim = optim.Adam(self.actor.parameters(), lr=a_lr)
self.critic = Critic(state_space, c_hidden)
self.critic_optim = optim.Adam(self.critic.parameters(), lr=c_lr)
self.states = []
self.new_states = []
self.actions = []
self.a_probs = []
self.rewards = []
def act(self, state):
state = torch.tensor(state, dtype=torch.float).view(-1, self.state_space) # 1st dimension is batch number
with torch.no_grad():
probs = self.actor(state)
# print(probs)
dist = Categorical(probs)
action = dist.sample()
return action.item(), probs[:, action.item()].item()
def train(self, batch_size=8):
# PPO algorithm
# Unroll rewards
rewards = np.array(self.rewards)
reward = 0
for i in reversed(range(len(self.rewards))):
rewards[i] += self.gamma * reward
reward = rewards[i]
states = torch.tensor(self.states, dtype=torch.float)
actions = torch.tensor(self.actions, dtype=torch.long).view(-1, 1)
old_probs = torch.tensor(self.a_probs, dtype=torch.float).view(-1, 1)
rewards = torch.tensor(rewards, dtype=torch.float).view(-1, 1)
for batch in BatchSampler(SubsetRandomSampler(range(len(self.states))), batch_size, drop_last=False):
# Calculate advantage
Rt = rewards[batch]
V = self.critic(states[batch])
advantage = Rt - V
advantage.detach_() # Inplace detach autograd
# Calculate PPO loss
new_probs = self.actor(states[batch]).gather(1, actions[batch])
prob_ratio = new_probs / old_probs[batch]
surr = prob_ratio * advantage
clipped_surr = torch.clamp(prob_ratio, 1 - self.clip_e, 1 + self.clip_e) * advantage
actor_loss = -torch.min(surr, clipped_surr).mean() # Mean is here for when batch_size > 1
# Update actor
self.actor_optim.zero_grad()
actor_loss.backward()
self.actor_optim.step()
# Update critic
critic_loss = F.mse_loss(Rt, V)
self.critic_optim.zero_grad()
critic_loss.backward()
self.critic_optim.step()
self._clear_buffers()
def store_transition(self, state, new_state, action, a_prob, reward):
self.states.append(state)
self.new_states.append(new_state)
self.actions.append(action)
self.a_probs.append(a_prob)
self.rewards.append(reward)
def _clear_buffers(self):
self.states = []
self.new_states = []
self.actions = []
self.a_probs = []
self.rewards = []
class Actor(nn.Module):
def __init__(self, input, hidden, output):
super(Actor, self).__init__()
self.fc1 = nn.Linear(input, hidden)
self.fc2 = nn.Linear(hidden, output)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.softmax(self.fc2(x), dim=1)
return x
class Critic(nn.Module):
def __init__(self, input, hidden):
super(Critic, self).__init__()
self.fc1 = nn.Linear(input, hidden)
self.fc2 = nn.Linear(hidden, 1)
def forward(self, x):
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
|
# Fecha: 12 de Abril de 2016
# Autora: Yurani Melisa Palacios Palacios
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# Background
# In this ssesion we will complete our study of the 1D random walk and move on to applications in higher
# dimensions. In particular we will prepare our way to study the properties of Brownian motion 2D
# Problems
# 1. Modify the function "rwalk1d" that you created last session, to account for the fact that the used
# coin is biased
# def rwalk1d(N, p)
def rwalk1d (N, p):
n = 0
for i in range (N):
r = np.random.random()
if r < p:
n = n + 1
else :
n = n - 1
return n
# 2. Run n random walks in one dimension and calculate the quantities ...
# use n= 100, N= 1000 and q= 0.5, 0.7, 0.2, what do you observe?
n = 100
N = 1000
p = 0.2
s = 0
r = 0
for i in range (n):
x = rwalk1d (N, p)
s = s + x
r = r + x**2
s = s/ float(N)
r = r/ float(N)
r = np.sqrt (r - s**2)
print s, r
print N * (2 * p -1), np.sqrt (4 * N * p * (1 - p))
# 3. Create an array h of n entries where the ith position is the number of times that the "drunk" i the
# 1D random walk ends up i meters away from the center (Warning: the final position of the walker can be
# negative, whereas the position in the array cannot be smaller than 0.)
N = 1000
n = 1000
p = 0.5
x = np.arange (2 * N + 1) - N
h = np.zeros (2 * N + 1)
for i in range(n):
xf = rwalk1d (N, p)
h [xf + N] += 1
# 4. Plot h as a function of i. Explain
plt.plot (x, h)
plt.show()
|
import re
validation_schemes = {
"byr": "^(19[2-9][0-9]|200[0-2])$",
"iyr": "^(201[0-9]|2020)$",
"eyr": "^(202[0-9]|2030)$",
"hgt": "^((1[5-8][0-9]|19[0-3])cm)|((59|6[0-9]|7[0-6])in)$",
"hcl": "^#[0-9a-f]{6}$",
"ecl": "^(amb|blu|brn|gry|grn|hzl|oth)$",
"pid": "^[0-9]{9}$",
"cid": ""
}
required = {'byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid'}
def parse_passports(filename, required, check_values=False):
with open(filename) as f:
passports = f.read().split('\n\n')
valid_count = 0
for p in passports:
# print 'password: {}'.format(p)
keys = [key.strip(':') for key in re.findall(r"[a-z]{3}:", p)]
# print keys
# print required.issubset(set(keys))
if required.issubset(set(keys)):
if not check_values:
valid_count += 1
continue
if all([validate_field(field) for field in p.split()]):
valid_count += 1
return valid_count
def validate_field(field):
key, value = field.split(':')
if key not in validation_schemes:
return False
pattern = validation_schemes.get(key)
return re.match(pattern, value)
if __name__ == '__main__':
# PART 1
assert parse_passports('p1-test-input.txt', required) == 2
print 'day 4 part 1: {}'.format(parse_passports('input.txt', required))
# PART 2
assert parse_passports('p2-test-valid.txt', required, True) == 4
assert parse_passports('p2-test-invalid.txt', required, True) == 0
print 'day 4 part 2: {}'.format(parse_passports('input.txt', required, True))
|
"""
@author: xuanke
@time: 2020/1/11
@function: 对sys模块进行操作
"""
import sys
def test_dynamic():
"""
sys动态属性
:return:
"""
# 接收命令行参数,更复杂的传参,可以使用argparse
print(sys.argv)
# 打印python解释器搜索模块的路径
print(sys.path)
# 获取已经加载的模块
print(sys.modules)
def test_static():
"""
sys静态属性
:return:
"""
# 获取python可执行文件的路径
print(sys.executable)
# 获取操作系统信息
print(sys.platform)
# 获取python内置的模块
print(sys.builtin_module_names)
# 获取python解释器版本信息
print(sys.implementation)
def test_method():
"""
sys方法
:return:
"""
# 打印python解释器默认编码
print(sys.getdefaultencoding())
# 打印对象的引用计数
obj_1 = [1, 2]
print(sys.getrefcount(obj_1))
# 对象占用内存的大小
print(sys.getsizeof(obj_1))
# 退出程序
# sys.exit(0)
# 正常内容重定向
sys.stdout.write("I am test\n")
print("I am test2")
# 错误内容重定向
sys.stderr.write("I am error")
# 读取内容,对比input
print("please input something")
content = sys.stdin.readline()
print(content)
stdout_1 = sys.stdout
sys.stdout = open('text1.txt', 'w')
print("ddd")
print("qqq")
sys.stdout = stdout_1
print("111")
if __name__ == '__main__':
test_method()
|
import json
from sseclient import SSEClient as EventSource
from kafka import KafkaProducer
def get_wiki_recent_changes(producer):
url = 'https://stream.wikimedia.org/v2/stream/recentchange'
try:
for event in EventSource(url):
if event.event == 'message':
try:
change = json.loads(event.data)
except ValueError:
pass
else:
# Send msg to topic wiki-changes
producer.send('wiki-changes', change)
except KeyboardInterrupt:
print("process interrupted")
def connect_kafka_producer():
_producer = None
try:
_producer = KafkaProducer(bootstrap_servers=['kafka:9092'],
api_version=(0, 10),
value_serializer=lambda v: json.dumps(v).encode('utf-8'))
except Exception as ex:
print('Exception while connecting Kafka')
print(str(ex))
finally:
return _producer
if __name__ == '__main__':
kafka_producer = connect_kafka_producer()
get_wiki_recent_changes(kafka_producer)
|
from sklearn import tree
from sklearn import svm
from sklearn import neighbors
from sklearn import discriminant_analysis
from sklearn import linear_model
dt = tree.DecisionTreeClassifier()
# CHALLENGE - create 3 more classifiers...
# 1
lsvc = svm.LinearSVC()
# 2
kn = neighbors.KNeighborsClassifier(3)
# 3
svc = svm.SVC()
classifiers = [ dt, lsvc, kn, svc ]
# [height, weight, shoe_size]
X = [[181, 80, 44], [177, 70, 43], [160, 60, 38], [154, 54, 37], [166, 65, 40],
[190, 90, 47], [175, 64, 39],
[177, 70, 40], [159, 55, 37], [171, 75, 42], [181, 85, 43]]
male = 'male'
female = 'female'
Y = [male, male, female, female, male, male, female, female,
female, male, male]
# CHALLENGE - ...and train them on our data
for clf in classifiers:
clf = clf.fit(X, Y)
prediction = clf.predict([[190, 70, 43]])
print("%s %s" % (clf, prediction))
# CHALLENGE compare their results and print the best one! |
from graphs.graph import Graph
from graphs.node import Node
from graphs.edge import Edge
from graphs.tests.test_utils import compare_graphs, get_default_graph
from pytest import raises
def test_find_node_in_graph():
graph = get_default_graph()
expected_path = ["a","c","f"]
actual_path = graph.breadth_first_search(start=Node("a"), target=Node("f"))
assert actual_path == expected_path
def test_target_not_in_graph():
graph = get_default_graph()
expected_path = ["a","c","f"]
with raises(ValueError):
graph.breadth_first_search(start=Node("a"), target=Node("g"))
def test_start_not_in_graph():
graph = get_default_graph()
with raises(ValueError):
graph.breadth_first_search(start=Node("w"), target=Node("f"))
def test_cyclic_graph_small():
graph = Graph()
nodes = [["a","b"],["b","a"]]
graph.make_unweighted_from_list(nodes)
expected_path = ["a","b"]
actual_path = graph.breadth_first_search(start=Node("a"), target=Node("b"))
assert actual_path == expected_path
def test_cyclic_graph_large():
graph = Graph()
nodes = [["a","b"],["b","c"],["c","d"],["d","e"],["d","a"],["a","d"],["e","z"],["z","a"]]
graph.make_unweighted_from_list(nodes)
expected_path = ["a","d","e","z"]
actual_path = graph.breadth_first_search(start=Node("a"), target=Node("z"))
assert actual_path == expected_path
def test_no_path_from_start_to_target():
graph = get_default_graph()
expected_path = []
actual_path = graph.breadth_first_search(start=Node("b"), target=Node("a"))
assert actual_path == expected_path
def test_cyclic_no_path_from_start_to_target():
graph = Graph()
nodes = [["a","b"],["b","a"],["c"]]
graph.make_unweighted_from_list(nodes)
expected_path = []
actual_path = graph.breadth_first_search(start=Node("a"), target=Node("c"))
assert actual_path == expected_path
def test_visited_reset():
graph = get_default_graph()
for node in graph.graph:
assert node.visited == False
actual_path = graph.breadth_first_search(start=Node("b"), target=Node("a"))
for node in graph.graph:
assert node.visited == False
def test_undirected_graph():
graph = get_default_graph(directed=False)
expected_path = ["a","c","f"]
actual_path = graph.breadth_first_search(start=Node("a"), target=Node("f"))
assert actual_path == expected_path
def test_start_is_target():
graph = get_default_graph(directed=False) #shouldn't matter
expected_path = ["f"]
actual_path = graph.breadth_first_search(start=Node("f"), target=Node("f"))
assert actual_path == expected_path
|
from typing import List
class Solution:
def removeElement(self, nums: List[int], val: int) -> int:
'''双指针,快慢指针,时间复杂度O(n) 空间复杂度O(1)'''
index = 0
for i in range(len(nums)):
if nums[i] != val:
nums[index] = nums[i]
index += 1
print(index, nums)
return index
if __name__ == '__main__':
nums = [3, 2, 2, 3]
val = 2
s = Solution()
s.removeElement(nums, val)
|
from gym.envs.registration import register
register(
id='SingleVideoEnv-v0',
entry_point='SVE.SVE:SingleVideoEnv',
) |
#!/usr/bin/python
# coding=utf-8
import sys
import getopt
import os
import multiprocessing
#Función para abrir y leer archivo ingresado por terminal.
def LecturaArchivo(ruta, tamanio, cola_inicial):
EOF = False
#Abrir archivo para lectura.
fd = os.open(ruta, os.O_RDONLY)
#Bucle para particionar y leer archivo por bloques.
while not EOF:
#Leer "x" bytes del archivo.
lectura_actual = os.read(fd, tamanio)
#Colocar los datos de la lectura del archivo en la cola.
cola_inicial.put(lectura_actual)
"""Cuando el número de elementos sea menor que los "x" bytes exigidos por el usuario,
implica que el archivo fue leído en su totalidad y sale del bucle."""
if len(lectura_actual) < tamanio:
EOF = True
#Cerrar archivo.
os.close(fd)
#Función para contar los elementos de un arreglo cuyo contenido es el archivo leído.
def ContadorPalabras(lectura):
for caracter in "\n":
#Recorrer el contenido del archivo y reemplazar
lectura = lectura.replace(caracter, " ")
#Devolucón del número de elementos del arreglo que contiene los datos del archivo.
return len(lectura.split())
#Función para colocar las palabras en la cola resultante.
def PalabrasCola(cola_inicial, cola_final):
#Mientras que la cola que contiene los datos leídos no se encuentre vacía:
while cola_inicial.qsize() != 0:
#Extraer los datos de la cola y guardarlos dentro de una variable.
lectura = cola_inicial.get()
#Invocar la función ContadorPalabras.
palabras = ContadorPalabras(lectura)
#Colocar el número de elementos del arreglo que contiene los datos en una nueva cola.
cola_final.put(palabras)
return
#Función para apertura y ejecución de procesos hijos.
def AperturaProcesos():
#Declarar lista.
procesos = []
#Colocar en lista el número de procesos.
for contador in range(numero_procesos):
#Apertura de procesos hijos.
ap_proceso = multiprocessing.Process(target = PalabrasCola, args=(cola_entrada, cola_salida))
#Adjuntar a lista la apertura de procesos hijos.
procesos.append(ap_proceso)
#Puesta en marcha y ejecución de procesos hijos.
ap_proceso.start()
#Lista con procesos hijos.
return procesos
#Función para vincular y enlazar procesos en ejecución.
def UnionProcesos(listado_procesos):
#Recorrer lista que contiene procesos.
for p in listado_procesos:
#Bloqueo de proceso.
p.join()
#Función de ayuda al usuario.
def OpcAyuda():
print "\n Ejecución de Programa:\n"
mensaje = "Modo de uso: ./procesos.py -f [archivo o ruta de archivo]"
mensaje += " -n [tamaño en bytes para lectura de archivo en bloques]"
mensaje += " -p [número de procesos para ejecución de archivo]\n"
print mensaje
print "Ejemplo: ./procesos.py -f prueba.txt -n 1024 -p 2\n"
#Salir del programa.
exit(0)
#Creación de colas por multiprocesamiento.
cola_entrada = multiprocessing.Queue()
cola_salida = multiprocessing.Queue()
"""Uso de getopt para indicar el archivo o ruta de archivo por consola y
el número de bytes para la lectura en bloques del mismo."""
opciones, argumentos = getopt.getopt(sys.argv[1:], "f:n:p:h")
"""Bucle para recorrer el arreglo y localizar el archivo o
ruta de archivo, y el número de bytes ingresados."""
ruta_archivo = ""
valor_bytes = 0
numero_procesos = 2
for i in opciones:
if i[0] == "-h":
#Llamada a función de ayuda para guiar al usuario.
OpcAyuda()
if i[0] == "-f":
#La pos. 1 del arreglo constituye la dirección del archivo.
ruta_archivo = i[1]
if i[0] == "-n":
#La pos. 1 del arreglo constituye los "x" bytes para la lectura en bloques del archivo.
valor_bytes = int(i[1])
if i[0] == "-p":
numero_procesos = int(i[1])
#Llamada a función AperturaProcesos para la puesta en marcha y ejeción de procesos.
listado_procesos = AperturaProcesos()
#Llamada a función LecturaArchivo para abrir y leer archivo.
LecturaArchivo(ruta_archivo, int(valor_bytes), cola_entrada)
#Llamada a función UnionProcesos para enlazar procesos.
UnionProcesos(listado_procesos)
#Contador de palabras.
nro_palabras = 0
#Mientras que la cola salida no se encuentre vacía:
while cola_salida.qsize() != 0:
#Sumatoria del número de palabras que contiene la cola salida.
nro_palabras = nro_palabras + int(cola_salida.get())
print "Palabras totales del archivo ingresado:", nro_palabras, "palabras."
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 17 22:16:31 2018
@author: chakyu
"""
import sys
import os
import time
time.sleep(2.5)
os.system('clear')
print('-'*50)
print('The platform you are on is: %s' % sys.platform) |
#Riccardo Seppi - MPE - HEG (2019) - 25 October
#This code reads halo masses from DM simulations (GLAM)
#builds HMF and fits them to models with fixed cosmological parameters
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.stats import uniform
from scipy.stats import norm
from bayescorner import bayescorner
#read the MFs
infile='mass_histogram0096.txt'
cosmo_params = np.loadtxt(infile, skiprows=1, max_rows=1, dtype=float)
z, Omega0, hubble, Xoff = cosmo_params
print(cosmo_params)
params = {'flat': True, 'H0': hubble*100, 'Om0': Omega0, 'Ob0': 0.049, 'sigma8': 0.828, 'ns': 0.96}
mass_data = np.loadtxt(infile, skiprows=3, dtype=float)
print(mass_data[::10,:-1])
mass_bins_pl = mass_data[:,0]
total = mass_data[:,1]
mass_number = mass_data[:,2]
counts_error = mass_data[:,3]
mass_bins_pl1 = mass_data[:,4]
total1 = mass_data[:,5]
mass_number1 = mass_data[:,6]
counts_error1 = mass_data[:,7]
mass_bins_pl2 = mass_data[:,8]
total2 = mass_data[:,9]
mass_number2 = mass_data[:,10]
counts_error2 = mass_data[:,11]
#Now I want to fit it
#consider the model (comparat17, tinker08...)
#NB: z will have to be the same of the simulation analyzed!!!
from colossus.lss import mass_function as mf
from colossus.cosmology import cosmology
from colossus.lss import peaks
#cosmology.setCosmology('planck18')
cosmology.addCosmology('myCosmo', params) #params was defined at line 16
cosmo=cosmology.setCosmology('myCosmo')
#print(cosmo.rho_m(0.0))
#fitting with Bhattacharya 2011
A0 = 0.333
a0 = 0.788
p0 = 0.807
q0 = 1.795
def mass_function_rseppi(Mass,A0,a0,p0,q0):
cosmo=cosmology.getCurrent()
delta_c = peaks.collapseOverdensity(z=z)
R = peaks.lagrangianR(Mass)
sigma = cosmo.sigma(R=R,z=z)
nu = delta_c / sigma
nu2 = nu**2
zp1 = 1.0+z
A = A0 * zp1**-0.11
a = a0 * zp1**-0.01
p = p0
q = q0
f = A * np.sqrt(2 / np.pi) * np.exp(-a * nu2 * 0.5) * (1.0 + (a*nu2)**-p) * (nu * np.sqrt(a))**q
d_ln_sigma_d_ln_R = cosmo.sigma(R, z, derivative = True)
rho_Mpc = cosmo.rho_m(0.0) * 1E9
mass_func_model = -1/3*f*rho_Mpc/Mass*d_ln_sigma_d_ln_R
return mass_func_model
mass_func_model = mass_function_rseppi(mass_bins_pl,A0,a0,p0,q0)
#print('model values = ', mass_func_model)
#mass_func_model=mf.massFunction(mass_bins_pl,z=z,mdef = 'vir', model = 'tinker08', q_out = 'dndlnM')
figure, ax = plt.subplots(2,1)
ax[0].loglog()
ax[1].set_xlabel(r'M [$M_{\odot}$/h]')
ax[1].set_ylabel('ratio')
ax[0].set_ylabel(r'dn/dlnM [$(Mpc/h)^{-3}$]')
mf_test = mf.massFunction(mass_bins_pl,z=z,mdef = 'fof', model = 'bhattacharya11', q_out = 'dndlnM')
ax[0].plot(mass_bins_pl, mf_test, label='Bhattacharya11')
ax[0].plot(mass_bins_pl, mass_func_model, label='model_rseppi')
ax[1].plot(mass_bins_pl, mass_func_model/mf_test, color='r')
ax[0].legend()
#plt.show()
# Use MCMC method - emcee
#import lmfit
import pymultinest
from tqdm import tqdm
#import emcee
#p=lmfit.Parameters()
#p.add_many(('A0', 0.333, True, 0.001,0.5),('a0', 0.788, True,0.2,4.0),('p0', 0.807, True,-2.0,7.0),('q0', 1.795, True,0.5,10.0))
parameters = ['A0', 'a0', 'p0', 'q0']
'''
def log_prior(A0,a0,p0,q0):
#v=p.valuesdict()
#A0,a0,p0,q0 = pa
mu = np.array([0.333, 0.788, 0.807, 1.795])
sigma = np.array([0.15, 0.4, 0.45, 1.0])
#if 0.2 < v['A0'] < 0.5 and 0.6 < v['a0'] < 1.5 and 0.0 < v['p0'] < 2.0 and 1.5 < v['q0'] < 2.5:
if(0.2 < A0 < 0.5 and 0.6 < a0 < 1.5 and 0.0 < p0 < 2.0 and 1.5 < q0 < 2.5):
return np.sum((1/np.sqrt(2*np.pi*sigma*sigma))-0.5*((mu-p)/sigma)**2)
return -np.inf
'''
plt.figure()
cube=np.arange(-1,1,0.002)
print(cube)
cube_pdf=norm.pdf(cube,loc=0.333,scale=1)
print(cube_pdf)
plt.hist(cube_pdf,bins=30,range=[0,1])
cube_ppf=norm.ppf(cube,loc=0.333,scale=1)
print(cube_ppf)
plt.hist(cube_ppf,bins=30,range=[0,1])
plt.plot(cube,cube_pdf, label = 'pdf')
plt.plot(cube,cube_ppf, label = 'ppf')
plt.show()
def prior(cube,ndim,nparams):
#cube[0] = (cube[0] + 0.333)
#cube[1] = (cube[1]*500 + 0.788)
#cube[2] = (cube[2]*500 + 0.807)
#cube[3] = (cube[3]*500 + 1.795)
cube[0] = norm.ppf(cube[0], loc = 0.333, scale = 0.3)
cube[1] = norm.ppf(cube[1], loc = 0.788, scale = 0.5)
cube[2] = norm.ppf(cube[2], loc = 0.807, scale = 0.5)
cube[3] = norm.ppf(cube[3], loc = 1.795, scale = 0.5)
'''
for i in range(len(counts_error)):
if(i<10):
counts_error[i] = 1000*counts_error[i]
elif(10<=i<20):
counts_error[i] = 50*counts_error[i]
'''
'''
#define residual function
def residual(p):
v=p.valuesdict()
res = (mass_number - mass_function_rseppi(mass_bins_pl,v['A0'],v['a0'],v['p0'],v['q0']))/counts_error
return res
def residual1(p):
v=p.valuesdict()
res = (mass_number1 - mass_function_rseppi(mass_bins_pl1,v['A0'],v['a0'],v['p0'],v['q0']))/counts_error1
return res
def residual2(p):
v=p.valuesdict()
res = (mass_number2 - mass_function_rseppi(mass_bins_pl2,v['A0'],v['a0'],v['p0'],v['q0']))/counts_error2
return res
#print('residuals =',residual(p))
mi = lmfit.minimize(residual, p, method='leastsq')
# print report on the leastsq fit
lmfit.printfuncs.report_fit(mi.params, min_correl=0.5)
print(mi.params)
mi1 = lmfit.minimize(residual1, p, method='leastsq')
# print report on the leastsq fit
lmfit.printfuncs.report_fit(mi1.params, min_correl=0.5)
print(mi1.params)
mi2 = lmfit.minimize(residual2,p, method='leastsq')
# print report on the leastsq fit
lmfit.printfuncs.report_fit(mi2.params, min_correl=0.5)
print(mi2.params)
'''
#now I want to use MCMC method
'''
def loglike(p):
resid = residual(p)
resid = resid**2
resid = resid + np.log(2*np.pi*counts_error**2)
logL = -0.5*np.sum(resid)
if(np.isnan(logL)):
logL=-np.inf
return logL
'''
def loglike(cube, ndim, nparams):
A0 = cube[0]
a0 = cube[1]
p0 = cube[2]
q0 = cube[3]
ymodel = mass_function_rseppi(mass_bins_pl, A0, a0, p0, q0)
resid = (mass_number - ymodel)/counts_error
resid = resid**2
resid = resid + np.log(2*np.pi*counts_error**2)
logL = -0.5*np.sum(resid)
if(np.isnan(logL)):
logL=-np.inf
return logL
def loglike1(cube, ndim, nparams):
A0 = cube[0]
a0 = cube[1]
p0 = cube[2]
q0 = cube[3]
ymodel = mass_function_rseppi(mass_bins_pl1, A0, a0, p0, q0)
resid = (mass_number1 - ymodel)/counts_error1
resid = resid**2
resid = resid + np.log(2*np.pi*counts_error1**2)
logL = -0.5*np.sum(resid)
if(np.isnan(logL)):
logL=-np.inf
return logL
def loglike2(cube, ndim, nparams):
A0 = cube[0]
a0 = cube[1]
p0 = cube[2]
q0 = cube[3]
ymodel = mass_function_rseppi(mass_bins_pl2, A0, a0, p0, q0)
resid = (mass_number2 - ymodel)/counts_error2
resid = resid**2
resid = resid + np.log(2*np.pi*counts_error2**2)
logL = -0.5*np.sum(resid)
if(np.isnan(logL)):
logL=-np.inf
return logL
n_params = len(parameters)
plt.figure()
#plt.scatter(mass_bins_pl, mass_number)
plt.errorbar(mass_bins_pl, mass_number, yerr = counts_error, fmt='.', label='full sample')
plt.errorbar(mass_bins_pl1, mass_number1, yerr=counts_error1, fmt='.', label='Xoff < %.3g'%Xoff)
plt.errorbar(mass_bins_pl2,mass_number2,yerr=counts_error2, fmt='.', label='Xoff > %.3g'%Xoff)
# run MultiNest
import json
datafile = 'output/datafile'
print('Running multinest...')
resum = False
pymultinest.run(loglike, prior, n_params, outputfiles_basename=datafile, resume = resum, verbose = True)
print('Done!')
json.dump(parameters, open(datafile + 'params.json', 'w')) # save parameter names
print('Running Analyzer...')
a = pymultinest.Analyzer(outputfiles_basename=datafile, n_params = n_params)
bestfit_params = a.get_best_fit()
print(bestfit_params)
v=list(bestfit_params.values())
print(v)
A0,a0,p0,q0 = v[1]
plt.plot(mass_bins_pl, mass_function_rseppi(mass_bins_pl,A0, a0, p0, q0), ls='solid', label='fit full sample')
pymultinest.run(loglike1, prior, n_params, outputfiles_basename=datafile + '_1_', resume = resum, verbose = True)
json.dump(parameters, open(datafile + '_1_params.json', 'w')) # save parameter names
a1 = pymultinest.Analyzer(outputfiles_basename=datafile + '_1_', n_params = n_params)
bestfit_params1 = a1.get_best_fit()
v1=list(bestfit_params1.values())
print(v1)
A0,a0,p0,q0 = v1[1]
plt.plot(mass_bins_pl1, mass_function_rseppi(mass_bins_pl1,A0, a0, p0, q0), label='fit Xoff < %.3g'%Xoff)
pymultinest.run(loglike2, prior, n_params, outputfiles_basename=datafile + '_2_', resume = resum, verbose = True)
json.dump(parameters, open(datafile + '_2_params.json', 'w')) # save parameter names
a2 = pymultinest.Analyzer(outputfiles_basename=datafile + '_2_', n_params = n_params)
bestfit_params2 = a2.get_best_fit()
v2=list(bestfit_params2.values())
print(v2)
A0,a0,p0,q0 = v2[1]
plt.plot(mass_bins_pl2, mass_function_rseppi(mass_bins_pl2,A0, a0, p0, q0), label='fit Xoff > %.3g'%Xoff)
plt.plot(mass_bins_pl, mass_function_rseppi(mass_bins_pl,0.333, 0.788, 0.807, 1.795), label='Bhattacharya 2011')
plt.loglog()
plt.xlabel(r'M [$M_{\odot}/h]$', fontsize=18)
plt.ylabel(r'dn/dlnM $[(Mpc/h)^{-3}]$',fontsize=18)
plt.grid(True)
plt.legend()
plt.tight_layout()
plt.savefig(datafile + 'all_data.pdf')
plt.show()
plt.close()
a_lnZ = a.get_stats()['global evidence']
import corner
data = a.get_data()[:,2:]
weights = a.get_data()[:,0]
data1 = a1.get_data()[:,2:]
weights1 = a1.get_data()[:,0]
data2 = a2.get_data()[:,2:]
weights2 = a2.get_data()[:,0]
mask = weights.cumsum() > 1e-5
mask = weights > 1e-4
mask1 = weights1.cumsum() > 1e-5
mask1 = weights1 > 1e-4
mask2 = weights2.cumsum() > 1e-5
mask2 = weights2 > 1e-4
#fig = bayescorner(params = [data[:,0],data[:,1],data[:,2],data[:,3]], param_names = ['A0', 'a0', 'p0', 'q0'], color_base = '#1f77b4', figsize=(14,14))
corner.corner(data[mask,:], weights=weights[mask],
labels=parameters, show_titles=True, truths = v[1], title = 'full sample')
corner.corner(data1[mask1,:], weights=weights1[mask1],
labels=parameters, show_titles=True, truths = v1[1], title = 'fit Xoff < %.3g'%Xoff)
corner.corner(data2[mask2,:], weights=weights2[mask2],
labels=parameters, show_titles=True, truths = v2[1], title = 'fit Xoff > %.3g'%Xoff)
plt.show()
'''
def loglike1(p):
resid = residual1(p)
resid = resid**2
resid = resid + np.log(2*np.pi*counts_error1**2)
logL = -0.5*np.sum(resid)
if(np.isnan(logL)):
logL=-np.inf
return logL
def loglike2(p):
resid = residual2(p)
resid = resid**2
resid = resid + np.log(2*np.pi*counts_error2**2)
logL = -0.5*np.sum(resid)
if(np.isnan(logL)):
logL=-np.inf
return logL
def logPoisson(p):
v=p.valuesdict()
logL = - np.sum(mass_function_rseppi(mass_bins_pl,v['A0'],v['a0'],v['p0'],v['q0'])) + np.sum(mass_number*np.log(mass_function_rseppi(mass_bins_pl,v['A0'],v['a0'],v['p0'],v['q0'])))
if(np.isnan(logL)):
logL=-np.inf
return logL
priors = np.array([0.333, 0.788, 0.807, 1.795])
print(type(priors))
print(priors[0])
def logProb(A0,a0,p0,q0):
lp = log_prior(A0,a0,p0,q0)
if not np.isfinte(lp):
return -np.inf
return lp + loglike
# build a general minimizer for curve fitting and optimization.
mini = lmfit.Minimizer(loglike, mi.params, nan_policy='propagate')
#mini = lmfit.Minimizer(logPoisson, mi.params, nan_policy='propagate')
# sampling of the posterion distribution
res = mini.emcee(burn=300, steps=2000, thin=10,params=mi.params)
# show corner plot (confidence limits, parameter distributions, correlations)
print('parameters plot')
figure=corner.corner(res.flatchain, labels=res.var_names,
truths=list(res.params.valuesdict().values()),
show_titles=True, title_kwargs={"fontsize": 12})
#plt.show()
print("median of posterior probability distribution")
print('------------------------------------------')
lmfit.report_fit(res.params)
mini1 = lmfit.Minimizer(loglike1, mi1.params, nan_policy='propagate')
res1 = mini1.emcee(burn=300, steps=2000, thin=10,params=mi1.params)
figure=corner.corner(res1.flatchain, labels=res1.var_names,
truths=list(res1.params.valuesdict().values()),
show_titles=True, title_kwargs={"fontsize": 12})
mini2 = lmfit.Minimizer(loglike2, mi2.params, nan_policy='propagate')
res2 = mini2.emcee(burn=300, steps=2000, thin=10,params=mi2.params)
figure=corner.corner(res2.flatchain, labels=res2.var_names,
truths=list(res2.params.valuesdict().values()),
show_titles=True, title_kwargs={"fontsize": 12})
plt.figure()
plt.errorbar(mass_bins_pl,mass_number,yerr=counts_error, fmt='.', label='full sample')
plt.errorbar(mass_bins_pl1,mass_number1,yerr=counts_error1, fmt='.', label='Xoff low')
plt.errorbar(mass_bins_pl2,mass_number2,yerr=counts_error2, fmt='.', label='Xoff high')
plt.xscale('log')
plt.yscale('log')
plt.xlabel(r'M [$M_{\odot}/h]$', fontsize=18)
plt.ylabel(r'dn/dlnM $[(Mpc/h)^{-3}]$',fontsize=18)
plt.grid(True)
plt.tight_layout()
mf_test_tinker = mf.massFunction(mass_bins_pl,z=z,mdef = '200m', model = 'tinker08', q_out = 'dndlnM')
plt.plot(mass_bins_pl, mf_test_tinker, label='tinker08')
plt.plot(mass_bins_pl,mass_function_rseppi(mass_bins_pl,mi.params['A0'],mi.params['a0'],mi.params['p0'],mi.params['q0']), label='fit full')
plt.plot(mass_bins_pl1,mass_function_rseppi(mass_bins_pl1,mi1.params['A0'],mi1.params['a0'],mi1.params['p0'],mi1.params['q0']), label='fit low')
plt.plot(mass_bins_pl2,mass_function_rseppi(mass_bins_pl2,mi2.params['A0'],mi2.params['a0'],mi2.params['p0'],mi2.params['q0']), label='fit high')
plt.legend()
plt.show()
'''
|
import pyrebase
config = {
"Service key of firebase"
}
firebase=pyrebase.initialize_app(config)
storage=firebase.storage()
path_on_cloud = "/Storage/TaskImage/AAA00"
path_on_local = "C:/Users/Administrator/Pictures/firebase_downloaded/"
all_files = storage.child(path_on_cloud).list_files()
for file in all_files:
try:
# file.download_to_filename(path_on_local + file.name)
storage.child(file.name).download(path_on_local + file.name)
except:
print('Download Failed') |
"""
Tegner en retvinklet trekant
"""
import turtle
myTurtle = turtle.Turtle()
myTurtle. forward(50)
myTurtle.left(135)
myTurtle. forward(70)
myTurtle.left(135)
myTurtle. forward(50)
myTurtle.left(120)
turtle.done() |
from move import BoardMove
class TicTacToeBoard:
BOARD_SIZE = 3
EMPTY = None
def __init__(self):
self._board = self.initialize_board()
@property
def board(self):
return self._board
def initialize_board(self):
return [ [self.EMPTY] * self.BOARD_SIZE for row in range(self.BOARD_SIZE) ]
def make_move(self, move, symbol):
if self.valid_move(move):
self._board[move.x][move.y] = symbol
def at(self, position):
return self.board[position.x][position.y]
def valid_move(self, move):
return self.is_within(move) and self.is_empty(move)
def is_within(self, move):
return self.inside(move.x) and self.inside(move.y)
def is_empty(self, move):
return self._board[move.x][move.y] == self.EMPTY
def inside(self, point):
return point >= 0 and point < self.BOARD_SIZE
def clear_square(self, square):
self._board[square.x][square.y] = self.EMPTY
def full_board(self):
for row in self.board:
for element in row:
if element == self.EMPTY:
return False
return True
def get_diagonal(self):
return [self.board[i][i] for i in range(self.BOARD_SIZE)]
def get_antidiagonal(self):
return [self.board[i][self.BOARD_SIZE - 1 - i] for i in range(self.BOARD_SIZE)]
def get_row(self, row_number):
return self.board[row_number]
def get_column(self, column_number):
return [self.board[i][column_number] for i in range(self.BOARD_SIZE)] |
import json
from copy import deepcopy
from schematics import types
from schematics.schema import Field
from .base import PyLexObject, SlotsProperty, GenericAttachmentsProperty
from .input import LexInputEvent
class ResponseCardProperty(PyLexObject):
version = types.IntType()
contentType = types.StringType('application/vnd.amazonaws.card.generic')
genericAttachments = types.ListType(types.ModelType(GenericAttachmentsProperty))
""" :type : List[GenericAttachmentsProperty] """
class MessageProperty(PyLexObject):
contentType = types.StringType(default='PlainText')
content = types.StringType(default='')
class DialogActionProperty(PyLexObject):
type = types.StringType(default='')
message = types.ModelType(MessageProperty, serialize_when_none=False, default=MessageProperty())
""" :type : MessageProperty """
responseCard = types.ModelType(ResponseCardProperty, serialize_when_none=False, default=ResponseCardProperty())
""" :type : ResponseCardProperty """
class DialogActionSlotsProperty(DialogActionProperty):
slots = types.ModelType(SlotsProperty, default=SlotsProperty())
""" :type : dict[str, str] """
class LexOutputResponse(PyLexObject):
dialogAction = types.ModelType(DialogActionProperty, default=DialogActionProperty())
""" :type : DialogActionProperty """
sessionAttributes = types.DictType(types.StringType(), default={})
""" :type : SessionAttributesProperty """
def update_from_input(self, event):
"""
:type event: LexInputEvent
:return:
"""
self.update_session_attributes(event)
def update_session_attributes(self, event):
for key, val in event.sessionAttributes.items():
self.sessionAttributes[key] = val
def to_primitive(self, role=None, app_data=None, **kwargs):
d = deepcopy(super(LexOutputResponse, self).to_primitive(role=role, app_data=app_data, **kwargs))
if hasattr(self.dialogAction, 'message') and not self.dialogAction.message.content:
del d['dialogAction']['message']
if hasattr(self.dialogAction, 'responseCard') and self.dialogAction.responseCard.version is None:
del d['dialogAction']['responseCard']
return d
class CloseLexOutputResponse(LexOutputResponse):
class CloseDialogActionProperty(DialogActionProperty):
type = types.StringType(default='Close')
fulfillmentState = types.StringType(default='Fulfilled')
dialogAction = types.ModelType(CloseDialogActionProperty, default=CloseDialogActionProperty())
""" :type : CloseLexOutputResponse.CloseDialogActionProperty """
class LexOutputSlotsResponse(LexOutputResponse):
class SubDialogActionSlotsProperty(DialogActionSlotsProperty):
pass
dialogAction = types.ModelType(SubDialogActionSlotsProperty, default=SubDialogActionSlotsProperty())
""" :type : LexOutputSlotsResponse.SubDialogActionSlotsProperty """
@classmethod
def create_class(cls, slots_property_class):
class NewIntentOutputResponse(LexOutputSlotsResponse):
class SubDialogActionSlotsProperty(cls.SubDialogActionSlotsProperty):
slots = types.ModelType(slots_property_class, default=slots_property_class())
dialogAction = types.ModelType(SubDialogActionSlotsProperty, default=SubDialogActionSlotsProperty())
""" :type : NewIntentOutputResponse.SubDialogActionSlotsProperty """
return NewIntentOutputResponse
def update_from_input(self, event):
"""
:type event: LexInputEvent
:return:
"""
super(LexOutputSlotsResponse, self).update_from_input(event)
self.update_slots(event)
self.update_intent_name(event)
def update_intent_name(self, event):
if hasattr(self.dialogAction, 'intentName'):
self.dialogAction.intentName = event.currentIntent.name
def update_slots(self, event):
"""
:type lex_input_event: LexInputEvent
:return: None
"""
if isinstance(event, LexInputEvent):
event_slots = event.currentIntent.slots
elif isinstance(event, basestring) or isinstance(event, unicode) or isinstance(event, str):
event_slots = deepcopy(json.loads(event)['currentIntent']['slots'])
else:
event_slots = deepcopy(event['currentIntent']['slots'])
for key, val in event_slots.items():
if key not in self.dialogAction.slots._schema.fields:
field = Field(key, types.StringType())
self.dialogAction.slots._schema.append_field(field)
self.dialogAction.slots[key] = val
class ConfirmIntentOutputResponse(LexOutputSlotsResponse):
class SubDialogActionSlotsProperty(DialogActionSlotsProperty):
type = types.StringType(default='ConfirmIntent')
intentName = types.StringType(default='')
dialogAction = types.ModelType(SubDialogActionSlotsProperty, default=SubDialogActionSlotsProperty())
""" :type : ConfirmIntentOutputResponse.SubDialogActionSlotsProperty """
class DelegateIntentOutputResponse(LexOutputSlotsResponse):
class SubDialogActionSlotsProperty(DialogActionSlotsProperty):
type = types.StringType(default='Delegate')
dialogAction = types.ModelType(SubDialogActionSlotsProperty, default=SubDialogActionSlotsProperty())
""" :type : DelegateIntentOutputResponse.SubDialogActionSlotsProperty """
class ElicitIntentOutputResponse(LexOutputResponse):
class ElicitIntentDialogActionProperty(DialogActionProperty):
type = types.StringType(default='ElicitIntent')
dialogAction = types.ModelType(ElicitIntentDialogActionProperty, default=ElicitIntentDialogActionProperty())
""" :type : ElicitIntentOutputResponse.ElicitIntentDialogActionProperty """
class ElicitSlotOutputResponse(LexOutputSlotsResponse):
class SubDialogActionSlotsProperty(DialogActionSlotsProperty):
type = types.StringType(default='ElicitSlot')
intentName = types.StringType(default='')
slotToElicit = types.StringType(default='')
dialogAction = types.ModelType(SubDialogActionSlotsProperty, default=SubDialogActionSlotsProperty())
""" :type : ElicitSlotOutputResponse.SubDialogActionSlotsProperty """
|
#!/usr/bin/python
# ==============================================================================
# Author: Tao Li (taoli@ucsd.edu)
# Date: May 2, 2015
# Question: 019-Remove-Nth-Node-From-End-of-List
# Link: https://leetcode.com/problems/remove-nth-node-from-end-of-list/
# ==============================================================================
# Given a linked list, remove the nth node from the end of list and return its head.
#
# For example,
#
# Given linked list: 1->2->3->4->5, and n = 2.
#
# After removing the second node from the end, the linked list becomes 1->2->3->5.
#
# Note:
# Given n will always be valid.
# Try to do this in one pass.
# ==============================================================================
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param {ListNode} head
# @param {integer} n
# @return {ListNode}
def removeNthFromEnd(self, head, n):
p1 = head
p2 = head
if n <= 0:
return head
while n > 0:
if p1 is None: return head
p1 = p1.next
n -= 1
if p1 is None:
head = head.next
return head
while p1.next:
p1 = p1.next
p2 = p2.next
p2.next = p2.next.next
return head |
# coding: utf-8
import dataset
class Banco:
def saveUsuario(self, nome, usuario, senha, tipo):
with dataset.connect('sqlite:///pizzaria.db') as db:
if ( self.getUsuario(usuario, senha) ):
return False
else:
return db['usuario'].insert(dict(nome=nome, usuario=usuario, senha=senha, tipo=tipo))
def getUsuario(self, usuario, senha):
with dataset.connect('sqlite:///pizzaria.db') as db:
usuario = db['usuario'].find_one(usuario=usuario, senha=senha)
if usuario:
return usuario
else:
return False
def getUsuarioID(self, id):
with dataset.connect('sqlite:///pizzaria.db') as db:
usuario = db['usuario'].find_one(id=id)
if usuario:
return usuario
else:
return False
def listPizzas (self):
with dataset.connect('sqlite:///pizzaria.db') as db:
pizzas = db['pizzas'].all()
if db['pizzas'].count() > 0 :
return pizzas
else:
return False
def getPizza (self, id):
with dataset.connect('sqlite:///pizzaria.db') as db:
pizza = db['pizzas'].find_one(id=id)
if pizza:
return pizza
else:
return False
def savePizza(self, nome, descricao):
with dataset.connect('sqlite:///pizzaria.db') as db:
return db['pizzas'].insert(dict(nome=nome, descricao=descricao, status='ativo'))
def updatePizza(self, id, nome, descricao, status):
with dataset.connect('sqlite:///pizzaria.db') as db:
return db['pizzas'].update(dict(id=id, nome=nome, descricao=descricao, status=status), ['id'])
def deletePizza(self, id):
with dataset.connect('sqlite:///pizzaria.db') as db:
return db['pizzas'].delete( id=id )
def listPedidos (self):
with dataset.connect('sqlite:///pizzaria.db') as db:
pedidos = db['pedidos'].all()
if db['pedidos'].count() > 0 :
return pedidos
else:
return False
def listPedidosClientes (self, id):
with dataset.connect('sqlite:///pizzaria.db') as db:
pedidos = db['pedidos'].find(usuario=id)
if db['pedidos'].count(usuario=id) > 0 :
return pedidos
else:
return False
def getPedido (self, id):
with dataset.connect('sqlite:///pizzaria.db') as db:
pedidos = db['pedidos'].find_one(id=id)
if pedidos:
return pedidos
else:
return False
def savePedido(self, pizza, preco, usuario):
with dataset.connect('sqlite:///pizzaria.db') as db:
return db['pedidos'].insert(dict(usuario=usuario, codigo_pizza=pizza, tamanho_valor=preco, status='Pendente'))
def updatePedido(self, id, pizza, preco, usuario):
with dataset.connect('sqlite:///pizzaria.db') as db:
return db['pedidos'].update(dict(id=id, usuario=usuario, codigo_pizza=pizza, tamanho_valor=preco), ['id'])
def deletePedido(self, id):
with dataset.connect('sqlite:///pizzaria.db') as db:
return db['pedidos'].delete( id=id )
def statusPedido(self, id, status):
with dataset.connect('sqlite:///pizzaria.db') as db:
if status == 1:
return db['pedidos'].update(dict(id=id, status='Pronto para envio'), ['id'])
elif status == 2:
return db['pedidos'].update(dict(id=id, status='Entregue'), ['id'])
elif status == 3:
return db['pedidos'].update(dict(id=id, status='Concluido'), ['id'])
def getValorPedidoDescricao(self, valor):
if valor == '11.90':
return u'Broto - R$ 11,90 - 4 Fatias'
elif valor == '21.90':
return u'Pequena - R$ 21,90 - 6 Fatias'
elif valor == '31.90':
return u'Média - R$ 31,90 - 8 Fatias'
elif valor == '41.90':
return u'Grande - R$ 41,90 - 10 Fatias'
elif valor == '51.90':
return u'Extra Grande - R$ 51,90 - 12 Fatias' |
# Copyright 2019 Alethea Katherine Flowers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import nox
nox.options.sessions = ["format", "lint", "test"]
@nox.session(python="3.7")
def freeze(session):
session.install("pip-tools")
session.run("pip-compile", "--output-file", "requirements.txt", "requirements.in")
@nox.session(python="3.7")
def format(session):
session.install("black", "isort")
session.run("black", "hotline", "tests", "noxfile.py")
session.run("isort", "-rc", "hotline", "tests", "noxfile.py")
@nox.session(python="3.7")
def lint(session):
session.install("mypy", "flake8", "black")
session.run("black", "--check", "hotline", "tests")
session.run("flake8", "docuploader", "tests")
session.run("mypy", "hotline")
@nox.session(python="3.7")
def test(session):
session.install("-r", "requirements.txt")
session.install("-r", "requirements-test.txt")
session.run(
"pytest",
"--cov",
"hotline",
"--cov-report",
"term-missing",
"tests",
*session.posargs
)
@nox.session(python="3.7")
def cli(session):
session.install("-r", "requirements.txt")
env = {
# Workaround for https://github.com/pallets/werkzeug/issues/461
"PYTHONPATH": os.getcwd(),
"FLASK_ENV": "development",
"FLASK_APP": "hotline.__main__",
}
session.run("python", "-m", "flask", *session.posargs, env=env)
@nox.session(python="3.7")
def serve(session):
session.install("-r", "requirements.txt")
env = {
# Workaround for https://github.com/pallets/werkzeug/issues/461
"PYTHONPATH": os.getcwd(),
"FLASK_RUN_PORT": "8080",
"FLASK_ENV": "development",
"FLASK_APP": "hotline.__main__",
}
session.run("python", "-m", "flask", "run", env=env)
@nox.session(python="3.7")
def serve_prod(session):
session.install("-r", "requirements.txt")
session.run("gunicorn", "-b", ":8080", "hotline.__main__:app")
@nox.session(python="3.7")
def shell(session):
session.install("-r", "requirements.txt")
session.install("ipython")
session.run("ipython")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.