code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import numpy as np
from .classes.block import Block
from .classes.mesh import Mesh
def generate_block(origin, size, n_cells):
origin_x = origin[0]
origin_y = origin[1]
origin_z = origin[2]
size_x = size[0]
size_y = size[1]
size_z = size[2]
block_points = [
[origin_x, origin_y, origin_z],
[origin_x + size_x, origin_y, origin_z],
[origin_x + size_x, origin_y + size_y, origin_z],
[origin_x, origin_y + size_y, origin_z],
[origin_x, origin_y, origin_z + size_z],
[origin_x + size_x, origin_y, origin_z + size_z],
[origin_x + size_x, origin_y + size_y, origin_z + size_z],
[origin_x, origin_y + size_y, origin_z + size_z],
]
block = Block.create_from_points(block_points)
block.n_cells = n_cells
return block
def generate_mesh(
size_blocks,
n_cells,
bottom_boundaries,
top_boundaries,
left_boundaries,
right_boundaries,
mode_2D=True,
mode_2_z_size=0.1,
front_boundaries=None,
back_boundaries=None,
boundaries_type = {},
removed_blocks=[]
):
size_blocks_x = size_blocks[0]
size_blocks_y = size_blocks[1]
if not mode_2D:
size_blocks_z = size_blocks[2]
n_cells_x = n_cells[0]
n_cells_y = n_cells[1]
if not mode_2D:
n_cells_z = n_cells[2]
mesh = Mesh()
# Create all blocks
for k in range(1 if mode_2D else len(size_blocks_z)):
for j in range(len(size_blocks_y)):
for i in range(len(size_blocks_x)):
# Do not include removed wall
if (i,j,k) in [block['coord'] for block in removed_blocks]:
continue
block = generate_block(
origin=[
np.sum(size_blocks_x[0:i]),
np.sum(size_blocks_y[0:j]),
0 if mode_2D else np.sum(size_blocks_z[0:k]),
],
size=[
size_blocks_x[i],
size_blocks_y[j],
mode_2_z_size if mode_2D else size_blocks_z[k],
],
n_cells=[
int(np.ceil(n_cells_x*size_blocks_x[i]/np.sum(size_blocks_x))),
int(np.ceil(n_cells_y*size_blocks_y[j]/np.sum(size_blocks_y))),
1 if mode_2D else int(np.ceil(n_cells_z*size_blocks_z[k]/np.sum(size_blocks_z))),
]
)
#Take into account internal boundaries due to removed blocks
if i < len(size_blocks_x)-1 and (i+1,j,k) in [block['coord'] for block in removed_blocks]:
boundary_name = [ block for block in removed_blocks if block["coord"] == (i+1,j,k) ][0]["boundaries"]["left"]
block.set_boundary('right', boundary_name)
if i > 0 and (i-1,j,k) in [block['coord'] for block in removed_blocks]:
boundary_name = [ block for block in removed_blocks if block["coord"] == (i-1,j,k) ][0]["boundaries"]["right"]
block.set_boundary('left', boundary_name)
if j < len(size_blocks_y)-1 and (i,j+1,k) in [block['coord'] for block in removed_blocks]:
boundary_name = [ block for block in removed_blocks if block["coord"] == (i,j+1,k) ][0]["boundaries"]["bottom"]
block.set_boundary('back', boundary_name)
if j > 0 and (i,j-1,k) in [block['coord'] for block in removed_blocks]:
boundary_name = [ block for block in removed_blocks if block["coord"] == (i,j-1,k) ][0]["boundaries"]["top"]
block.set_boundary('front', boundary_name)
if not mode_2D:
if k < len(size_blocks_z)-1 and (i,j,k+1) in [block['coord'] for block in removed_blocks]:
boundary_name = [ block for block in removed_blocks if block["coord"] == (i,j,k+1) ][0]["boundaries"]["back"]
block.set_boundary('bottom', boundary_name)
if k > 0 and (i,j,k-1) in [block['coord'] for block in removed_blocks]:
boundary_name = [ block for block in removed_blocks if block["coord"] == (i,j,k-1) ][0]["boundaries"]["front"]
block.set_boundary('top', boundary_name)
# Add external boundaries
if j == 0:
block.set_boundary('front', bottom_boundaries[i][k])
if j == len(size_blocks_y)-1:
block.set_boundary('back', top_boundaries[i][k])
if i == 0:
block.set_boundary('left', left_boundaries[j][k])
if i == len(size_blocks_x)-1:
block.set_boundary('right', right_boundaries[j][k])
if k == 0:
if mode_2D:
block.set_boundary('top', "FrontAndBack")
else:
block.set_boundary('top', front_boundaries[i][j])
if k == 1 if mode_2D else len(size_blocks_z)-1:
if mode_2D:
block.set_boundary('bottom', "FrontAndBack")
else:
block.set_boundary('bottom', back_boundaries[i][j])
mesh.add_block(block)
# Set boundary types
for boundaries_name, boundary_type in boundaries_type.items():
mesh.set_boundary_type(boundaries_name, boundary_type)
if mode_2D:
mesh.set_boundary_type("FrontAndBack", "empty")
return mesh | src/blockmeshdict_generator/generation.py | import numpy as np
from .classes.block import Block
from .classes.mesh import Mesh
def generate_block(origin, size, n_cells):
origin_x = origin[0]
origin_y = origin[1]
origin_z = origin[2]
size_x = size[0]
size_y = size[1]
size_z = size[2]
block_points = [
[origin_x, origin_y, origin_z],
[origin_x + size_x, origin_y, origin_z],
[origin_x + size_x, origin_y + size_y, origin_z],
[origin_x, origin_y + size_y, origin_z],
[origin_x, origin_y, origin_z + size_z],
[origin_x + size_x, origin_y, origin_z + size_z],
[origin_x + size_x, origin_y + size_y, origin_z + size_z],
[origin_x, origin_y + size_y, origin_z + size_z],
]
block = Block.create_from_points(block_points)
block.n_cells = n_cells
return block
def generate_mesh(
size_blocks,
n_cells,
bottom_boundaries,
top_boundaries,
left_boundaries,
right_boundaries,
mode_2D=True,
mode_2_z_size=0.1,
front_boundaries=None,
back_boundaries=None,
boundaries_type = {},
removed_blocks=[]
):
size_blocks_x = size_blocks[0]
size_blocks_y = size_blocks[1]
if not mode_2D:
size_blocks_z = size_blocks[2]
n_cells_x = n_cells[0]
n_cells_y = n_cells[1]
if not mode_2D:
n_cells_z = n_cells[2]
mesh = Mesh()
# Create all blocks
for k in range(1 if mode_2D else len(size_blocks_z)):
for j in range(len(size_blocks_y)):
for i in range(len(size_blocks_x)):
# Do not include removed wall
if (i,j,k) in [block['coord'] for block in removed_blocks]:
continue
block = generate_block(
origin=[
np.sum(size_blocks_x[0:i]),
np.sum(size_blocks_y[0:j]),
0 if mode_2D else np.sum(size_blocks_z[0:k]),
],
size=[
size_blocks_x[i],
size_blocks_y[j],
mode_2_z_size if mode_2D else size_blocks_z[k],
],
n_cells=[
int(np.ceil(n_cells_x*size_blocks_x[i]/np.sum(size_blocks_x))),
int(np.ceil(n_cells_y*size_blocks_y[j]/np.sum(size_blocks_y))),
1 if mode_2D else int(np.ceil(n_cells_z*size_blocks_z[k]/np.sum(size_blocks_z))),
]
)
#Take into account internal boundaries due to removed blocks
if i < len(size_blocks_x)-1 and (i+1,j,k) in [block['coord'] for block in removed_blocks]:
boundary_name = [ block for block in removed_blocks if block["coord"] == (i+1,j,k) ][0]["boundaries"]["left"]
block.set_boundary('right', boundary_name)
if i > 0 and (i-1,j,k) in [block['coord'] for block in removed_blocks]:
boundary_name = [ block for block in removed_blocks if block["coord"] == (i-1,j,k) ][0]["boundaries"]["right"]
block.set_boundary('left', boundary_name)
if j < len(size_blocks_y)-1 and (i,j+1,k) in [block['coord'] for block in removed_blocks]:
boundary_name = [ block for block in removed_blocks if block["coord"] == (i,j+1,k) ][0]["boundaries"]["bottom"]
block.set_boundary('back', boundary_name)
if j > 0 and (i,j-1,k) in [block['coord'] for block in removed_blocks]:
boundary_name = [ block for block in removed_blocks if block["coord"] == (i,j-1,k) ][0]["boundaries"]["top"]
block.set_boundary('front', boundary_name)
if not mode_2D:
if k < len(size_blocks_z)-1 and (i,j,k+1) in [block['coord'] for block in removed_blocks]:
boundary_name = [ block for block in removed_blocks if block["coord"] == (i,j,k+1) ][0]["boundaries"]["back"]
block.set_boundary('bottom', boundary_name)
if k > 0 and (i,j,k-1) in [block['coord'] for block in removed_blocks]:
boundary_name = [ block for block in removed_blocks if block["coord"] == (i,j,k-1) ][0]["boundaries"]["front"]
block.set_boundary('top', boundary_name)
# Add external boundaries
if j == 0:
block.set_boundary('front', bottom_boundaries[i][k])
if j == len(size_blocks_y)-1:
block.set_boundary('back', top_boundaries[i][k])
if i == 0:
block.set_boundary('left', left_boundaries[j][k])
if i == len(size_blocks_x)-1:
block.set_boundary('right', right_boundaries[j][k])
if k == 0:
if mode_2D:
block.set_boundary('top', "FrontAndBack")
else:
block.set_boundary('top', front_boundaries[i][j])
if k == 1 if mode_2D else len(size_blocks_z)-1:
if mode_2D:
block.set_boundary('bottom', "FrontAndBack")
else:
block.set_boundary('bottom', back_boundaries[i][j])
mesh.add_block(block)
# Set boundary types
for boundaries_name, boundary_type in boundaries_type.items():
mesh.set_boundary_type(boundaries_name, boundary_type)
if mode_2D:
mesh.set_boundary_type("FrontAndBack", "empty")
return mesh | 0.244814 | 0.473109 |
import numpy as np
import pandas as pd
from collections import Counter
import re, regex, os
from carnatic import cparser
#np.random.seed(42)
def _get_bigrams(notations_file):
notes = cparser._get_notes_from_file(notations_file)
n = 2
ngrams = zip(*[notes[i:] for i in range(n)])
bigrams = [" ".join(ngram) for ngram in ngrams]
return bigrams
def _predict_next_state(chord:str, bigrams:list):
bigrams_with_current_chord = [bigram for bigram in bigrams if bigram.split(' ')[0]==chord]
count_appearance = dict(Counter(bigrams_with_current_chord))
for ngram in count_appearance.keys():
count_appearance[ngram] = count_appearance[ngram]/len(bigrams_with_current_chord)
options = [key.split(' ')[1] for key in count_appearance.keys()]
probabilities = list(count_appearance.values())
return np.random.choice(options, p=probabilities)
def generate_notes_from_corpus(corpus_files:list,starting_note:str, ending_note:str, length:int=32, save_to_file=None):
"""
Generate note sequence of defined length from corpus text files
@param corpus_files: List of corpus file paths that contain sequence of notes
@param starting_note: Starting note (should be one of aarognam/avaroganam notes). Default=None
@param ending_note: Ending note (should be one of aarognam/avaroganam notes). Default=None
@param length: desired length of notes to be generated.
Note: Not always exact number of notes may be generated
@param save_to_file: File name to which generated notes are to be written. Default=None
"""
bigrams = []
for notation_file in corpus_files:
bigrams += _get_bigrams(notation_file)
if not starting_note:
starting_note = 'S'
chord = starting_note
chords = [starting_note]
length -= 1
if ending_note:
length -= 1
for n in range(length):
chords.append(_predict_next_state(chord, bigrams))
chord = chords[-1]
if ending_note:
chords.append(ending_note)
#my_str = '\n'.join(' '.join(chords[i:i+line_break_at]) for i in range(0, len(chords), (line_break_at)))
#print('markov','my_str\n',my_str)
return chords # array # chords
if __name__ == '__main__':
pass | carnatic/cmarkov.py | import numpy as np
import pandas as pd
from collections import Counter
import re, regex, os
from carnatic import cparser
#np.random.seed(42)
def _get_bigrams(notations_file):
notes = cparser._get_notes_from_file(notations_file)
n = 2
ngrams = zip(*[notes[i:] for i in range(n)])
bigrams = [" ".join(ngram) for ngram in ngrams]
return bigrams
def _predict_next_state(chord:str, bigrams:list):
bigrams_with_current_chord = [bigram for bigram in bigrams if bigram.split(' ')[0]==chord]
count_appearance = dict(Counter(bigrams_with_current_chord))
for ngram in count_appearance.keys():
count_appearance[ngram] = count_appearance[ngram]/len(bigrams_with_current_chord)
options = [key.split(' ')[1] for key in count_appearance.keys()]
probabilities = list(count_appearance.values())
return np.random.choice(options, p=probabilities)
def generate_notes_from_corpus(corpus_files:list,starting_note:str, ending_note:str, length:int=32, save_to_file=None):
"""
Generate note sequence of defined length from corpus text files
@param corpus_files: List of corpus file paths that contain sequence of notes
@param starting_note: Starting note (should be one of aarognam/avaroganam notes). Default=None
@param ending_note: Ending note (should be one of aarognam/avaroganam notes). Default=None
@param length: desired length of notes to be generated.
Note: Not always exact number of notes may be generated
@param save_to_file: File name to which generated notes are to be written. Default=None
"""
bigrams = []
for notation_file in corpus_files:
bigrams += _get_bigrams(notation_file)
if not starting_note:
starting_note = 'S'
chord = starting_note
chords = [starting_note]
length -= 1
if ending_note:
length -= 1
for n in range(length):
chords.append(_predict_next_state(chord, bigrams))
chord = chords[-1]
if ending_note:
chords.append(ending_note)
#my_str = '\n'.join(' '.join(chords[i:i+line_break_at]) for i in range(0, len(chords), (line_break_at)))
#print('markov','my_str\n',my_str)
return chords # array # chords
if __name__ == '__main__':
pass | 0.284576 | 0.139367 |
from client.bcosclient import BcosClient
from client.datatype_parser import DatatypeParser
import uuid
import json
import threading
from utils.encoding import FriendlyJsonSerde
from client.channelpack import ChannelPack
from client.channel_push_dispatcher import ChannelPushHandler
class EventCallbackHandler:
"""事件回调接口,on_event传入的是已经解析成json的logs列表,但未按abi解析
使用者派生EventCallbackHandler,实现on_event,在监听指定事件时指定实例
** 注意查重
"""
def on_event(self, eventdata):
pass
class EventCallbackManager(ChannelPushHandler):
"""EventCallbackManager按filterid管理实例
接受amop的push消息里类型为0x1002的EVENT_LOG_PUSH,并根据filterid分发
"""
abiparser: DatatypeParser = None
callback_register = dict()
lock = threading.RLock()
def set_callback(self, filterid, callback):
try:
self.lock.acquire()
#print("set callbackup ",filterid,callback)
self.callback_register[filterid] = callback
except Exception as e:
self.logger.error("channel push dispatcher add handler error", e)
finally:
self.lock.release()
def remove_callback(self, filterid, callback):
try:
self.lock.acquire()
if filterid in self.callback_register:
self.callback_register.pop(filterid)
except Exception as e:
self.logger.error("channel push dispatcher add handler error", e)
finally:
self.lock.release()
def get_callback(self, filterid):
cb = None
try:
self.lock.acquire()
if filterid in self.callback_register:
cb = self.callback_register[filterid]
except Exception as e:
self.logger.error("get_callback error", e)
finally:
self.lock.release()
return cb
# on_push from channel_push_dispatcher
def on_push(self, packmsg: ChannelPack):
# print("--------------------EventPushHandler: type {},result:{},len:{}".format(
# hex(packmsg.type), packmsg.result, packmsg.totallen))
if packmsg.type != ChannelPack.EVENT_LOG_PUSH:
print("WRONG TYPE:-EventPushHandler: type {},result:{},len:{}".format(
hex(packmsg.type), packmsg.result, packmsg.totallen))
return
strmsg = packmsg.data.decode("utf-8")
eventdata = json.loads(strmsg)
filterid = eventdata["filterID"]
# find callback implement by filterid
eventcallback = self.get_callback(filterid)
if eventcallback is None:
return
eventcallback.on_event(eventdata)
class BcosEventCallback:
"""本文件主类,其实就是几个帮助方法,参考用法:
abifile = "contracts/" + contractname + ".abi"
abiparser = DatatypeParser(abifile)
eventcallback01 = EventCallbackImpl01()
eventcallback01.abiparser = abiparser
#---------
bcos_event = BcosEventCallback()
bcos_event.setclient(BcosClient())
result = bcos_event.register_eventlog_filter(
eventcallback01, abiparser, [address], event_name, indexed_value)
"""
client: BcosClient = None
ecb_manager = EventCallbackManager()
def format_event_register_request(
self,
from_block,
to_block,
addresses,
topics,
groupid,
filterid):
'''
{
"fromBlock": "latest",
"toBlock": "latest",
"addresses": [
0xca5ed56862869c25da0bdf186e634aac6c6361ee
],
"topics": [
"0x91c95f04198617c60eaf2180fbca88fc192db379657df0e412a9f7dd4ebbe95d"
],
"groupID": "1",
"filterID": "bb31e4ec086c48e18f21cb994e2e5967"
}'''
request = dict()
request["fromBlock"] = from_block
request["toBlock"] = to_block
request["addresses"] = addresses
request["topics"] = topics
request["groupID"] = groupid
request["filterID"] = filterid
requestJson = FriendlyJsonSerde().json_encode(request)
return requestJson
# 一定要这样调用,否则manager得另外注册一下
def setclient(self, client):
self.client = client
self.add_channel_push_handler(self.ecb_manager)
def add_channel_push_handler(self, eventHandler):
if self.client.channel_handler is not None:
self.client.channel_handler.pushDispacher.add_handler(
ChannelPack.EVENT_LOG_PUSH, eventHandler)
# 主要方法,注册事件
def register_eventlog_filter(
self,
eventcallback,
abiparser,
addresses,
event_name,
indexed_value=None,
fromblock="latest",
to_block="latest"):
topics = []
if event_name is not None:
topic0 = abiparser.topic_from_event_name(event_name)
topics.append(topic0)
event_abi = abiparser.event_name_map[event_name]
#print("event abi:", event_abi)
if indexed_value is not None and len(indexed_value) > 0:
indexedinput = []
for event_input in event_abi["inputs"]:
if event_input["indexed"] is True:
indexedinput.append((event_input['name'], event_input['type']))
# print(indexedinput)
i = 0
for v in indexed_value:
itype = indexedinput[i][1]
topic = DatatypeParser.topic_from_type(itype, v)
if not (topic is None):
topics.append(topic)
i = i + 1
# create new filterid by uuid
seq = uuid.uuid1()
filterid = seq.hex
requestJson = self.format_event_register_request(
fromblock, to_block, addresses, topics, self.client.groupid, filterid)
requestbytes = ChannelPack.pack_amop_topic_message("", requestJson)
response = self.client.channel_handler.make_channel_request(
requestbytes, ChannelPack.CLIENT_REGISTER_EVENT_LOG, ChannelPack.CLIENT_REGISTER_EVENT_LOG)
(topic, result) = ChannelPack.unpack_amop_topic_message(response)
dataobj = json.loads(result)
# print(dataobj)
if dataobj["result"] == 0:
self.ecb_manager.set_callback(filterid, eventcallback)
return dataobj | client/event_callback.py | from client.bcosclient import BcosClient
from client.datatype_parser import DatatypeParser
import uuid
import json
import threading
from utils.encoding import FriendlyJsonSerde
from client.channelpack import ChannelPack
from client.channel_push_dispatcher import ChannelPushHandler
class EventCallbackHandler:
"""事件回调接口,on_event传入的是已经解析成json的logs列表,但未按abi解析
使用者派生EventCallbackHandler,实现on_event,在监听指定事件时指定实例
** 注意查重
"""
def on_event(self, eventdata):
pass
class EventCallbackManager(ChannelPushHandler):
"""EventCallbackManager按filterid管理实例
接受amop的push消息里类型为0x1002的EVENT_LOG_PUSH,并根据filterid分发
"""
abiparser: DatatypeParser = None
callback_register = dict()
lock = threading.RLock()
def set_callback(self, filterid, callback):
try:
self.lock.acquire()
#print("set callbackup ",filterid,callback)
self.callback_register[filterid] = callback
except Exception as e:
self.logger.error("channel push dispatcher add handler error", e)
finally:
self.lock.release()
def remove_callback(self, filterid, callback):
try:
self.lock.acquire()
if filterid in self.callback_register:
self.callback_register.pop(filterid)
except Exception as e:
self.logger.error("channel push dispatcher add handler error", e)
finally:
self.lock.release()
def get_callback(self, filterid):
cb = None
try:
self.lock.acquire()
if filterid in self.callback_register:
cb = self.callback_register[filterid]
except Exception as e:
self.logger.error("get_callback error", e)
finally:
self.lock.release()
return cb
# on_push from channel_push_dispatcher
def on_push(self, packmsg: ChannelPack):
# print("--------------------EventPushHandler: type {},result:{},len:{}".format(
# hex(packmsg.type), packmsg.result, packmsg.totallen))
if packmsg.type != ChannelPack.EVENT_LOG_PUSH:
print("WRONG TYPE:-EventPushHandler: type {},result:{},len:{}".format(
hex(packmsg.type), packmsg.result, packmsg.totallen))
return
strmsg = packmsg.data.decode("utf-8")
eventdata = json.loads(strmsg)
filterid = eventdata["filterID"]
# find callback implement by filterid
eventcallback = self.get_callback(filterid)
if eventcallback is None:
return
eventcallback.on_event(eventdata)
class BcosEventCallback:
"""本文件主类,其实就是几个帮助方法,参考用法:
abifile = "contracts/" + contractname + ".abi"
abiparser = DatatypeParser(abifile)
eventcallback01 = EventCallbackImpl01()
eventcallback01.abiparser = abiparser
#---------
bcos_event = BcosEventCallback()
bcos_event.setclient(BcosClient())
result = bcos_event.register_eventlog_filter(
eventcallback01, abiparser, [address], event_name, indexed_value)
"""
client: BcosClient = None
ecb_manager = EventCallbackManager()
def format_event_register_request(
self,
from_block,
to_block,
addresses,
topics,
groupid,
filterid):
'''
{
"fromBlock": "latest",
"toBlock": "latest",
"addresses": [
0xca5ed56862869c25da0bdf186e634aac6c6361ee
],
"topics": [
"0x91c95f04198617c60eaf2180fbca88fc192db379657df0e412a9f7dd4ebbe95d"
],
"groupID": "1",
"filterID": "bb31e4ec086c48e18f21cb994e2e5967"
}'''
request = dict()
request["fromBlock"] = from_block
request["toBlock"] = to_block
request["addresses"] = addresses
request["topics"] = topics
request["groupID"] = groupid
request["filterID"] = filterid
requestJson = FriendlyJsonSerde().json_encode(request)
return requestJson
# 一定要这样调用,否则manager得另外注册一下
def setclient(self, client):
self.client = client
self.add_channel_push_handler(self.ecb_manager)
def add_channel_push_handler(self, eventHandler):
if self.client.channel_handler is not None:
self.client.channel_handler.pushDispacher.add_handler(
ChannelPack.EVENT_LOG_PUSH, eventHandler)
# 主要方法,注册事件
def register_eventlog_filter(
self,
eventcallback,
abiparser,
addresses,
event_name,
indexed_value=None,
fromblock="latest",
to_block="latest"):
topics = []
if event_name is not None:
topic0 = abiparser.topic_from_event_name(event_name)
topics.append(topic0)
event_abi = abiparser.event_name_map[event_name]
#print("event abi:", event_abi)
if indexed_value is not None and len(indexed_value) > 0:
indexedinput = []
for event_input in event_abi["inputs"]:
if event_input["indexed"] is True:
indexedinput.append((event_input['name'], event_input['type']))
# print(indexedinput)
i = 0
for v in indexed_value:
itype = indexedinput[i][1]
topic = DatatypeParser.topic_from_type(itype, v)
if not (topic is None):
topics.append(topic)
i = i + 1
# create new filterid by uuid
seq = uuid.uuid1()
filterid = seq.hex
requestJson = self.format_event_register_request(
fromblock, to_block, addresses, topics, self.client.groupid, filterid)
requestbytes = ChannelPack.pack_amop_topic_message("", requestJson)
response = self.client.channel_handler.make_channel_request(
requestbytes, ChannelPack.CLIENT_REGISTER_EVENT_LOG, ChannelPack.CLIENT_REGISTER_EVENT_LOG)
(topic, result) = ChannelPack.unpack_amop_topic_message(response)
dataobj = json.loads(result)
# print(dataobj)
if dataobj["result"] == 0:
self.ecb_manager.set_callback(filterid, eventcallback)
return dataobj | 0.128484 | 0.064683 |
from .google_imports import namespace_manager
from .google_imports import memcache
from . import autobatcher
from . import tasklets
class MemcacheClient(object):
def __init__(self, conn=None, auto_batcher_class=autobatcher.AutoBatcher, max_memcache=None):
# NOTE: If conn is not None, config is only used to get the
# auto-batcher limits.
self._conn = conn
self._auto_batcher_class = auto_batcher_class
# Create the memcache auto-batchers.
self.memcache_get_batcher = auto_batcher_class(self._memcache_get_tasklet, max_memcache)
self.memcache_set_batcher = auto_batcher_class(self._memcache_set_tasklet, max_memcache)
self.memcache_del_batcher = auto_batcher_class(self._memcache_del_tasklet, max_memcache)
self.memcache_off_batcher = auto_batcher_class(self._memcache_off_tasklet, max_memcache)
self._memcache = memcache.Client()
@tasklets.tasklet
def _memcache_get_tasklet(self, todo, options):
if not todo:
raise RuntimeError('Nothing to do.')
for_cas, namespace, deadline = options
keys = set()
for unused_fut, key in todo:
keys.add(key)
rpc = memcache.create_rpc(deadline=deadline)
results = yield self._memcache.get_multi_async(keys, for_cas=for_cas,
namespace=namespace,
rpc=rpc)
for fut, key in todo:
fut.set_result(results.get(key))
@tasklets.tasklet
def _memcache_set_tasklet(self, todo, options):
if not todo:
raise RuntimeError('Nothing to do.')
opname, time, namespace, deadline = options
methodname = opname + '_multi_async'
method = getattr(self._memcache, methodname)
mapping = {}
for unused_fut, (key, value) in todo:
mapping[key] = value
rpc = memcache.create_rpc(deadline=deadline)
results = yield method(mapping, time=time, namespace=namespace, rpc=rpc)
for fut, (key, unused_value) in todo:
if results is None:
status = memcache.MemcacheSetResponse.ERROR
else:
status = results.get(key)
fut.set_result(status == memcache.MemcacheSetResponse.STORED)
@tasklets.tasklet
def _memcache_del_tasklet(self, todo, options):
if not todo:
raise RuntimeError('Nothing to do.')
seconds, namespace, deadline = options
keys = set()
for unused_fut, key in todo:
keys.add(key)
rpc = memcache.create_rpc(deadline=deadline)
statuses = yield self._memcache.delete_multi_async(keys, seconds=seconds,
namespace=namespace,
rpc=rpc)
status_key_mapping = {}
if statuses: # On network error, statuses is None.
for key, status in zip(keys, statuses):
status_key_mapping[key] = status
for fut, key in todo:
status = status_key_mapping.get(key, memcache.DELETE_NETWORK_FAILURE)
fut.set_result(status)
@tasklets.tasklet
def _memcache_off_tasklet(self, todo, options):
if not todo:
raise RuntimeError('Nothing to do.')
initial_value, namespace, deadline = options
mapping = {} # {key: delta}
for unused_fut, (key, delta) in todo:
mapping[key] = delta
rpc = memcache.create_rpc(deadline=deadline)
results = yield self._memcache.offset_multi_async(
mapping, initial_value=initial_value, namespace=namespace, rpc=rpc)
for fut, (key, unused_delta) in todo:
fut.set_result(results.get(key))
def memcache_get(self, key, for_cas=False, namespace=None, use_cache=False,
deadline=None):
"""An auto-batching wrapper for memcache.get() or .get_multi().
Args:
key: Key to set. This must be a string; no prefix is applied.
for_cas: If True, request and store CAS ids on the Context.
namespace: Optional namespace.
deadline: Optional deadline for the RPC.
Returns:
A Future (!) whose return value is the value retrieved from
memcache, or None.
"""
if not isinstance(key, basestring):
raise TypeError('key must be a string; received %r' % key)
if not isinstance(for_cas, bool):
raise TypeError('for_cas must be a bool; received %r' % for_cas)
if namespace is None:
namespace = namespace_manager.get_namespace()
options = (for_cas, namespace, deadline)
batcher = self.memcache_get_batcher
if use_cache:
return batcher.add_once(key, options)
else:
return batcher.add(key, options)
# XXX: Docstrings below.
def memcache_gets(self, key, namespace=None, use_cache=False, deadline=None):
return self.memcache_get(key, for_cas=True, namespace=namespace,
use_cache=use_cache, deadline=deadline)
def memcache_set(self, key, value, time=0, namespace=None, use_cache=False,
deadline=None):
if not isinstance(key, basestring):
raise TypeError('key must be a string; received %r' % key)
if not isinstance(time, (int, long)):
raise TypeError('time must be a number; received %r' % time)
if namespace is None:
namespace = namespace_manager.get_namespace()
options = ('set', time, namespace, deadline)
batcher = self.memcache_set_batcher
if use_cache:
return batcher.add_once((key, value), options)
else:
return batcher.add((key, value), options)
def memcache_add(self, key, value, time=0, namespace=None, deadline=None):
if not isinstance(key, basestring):
raise TypeError('key must be a string; received %r' % key)
if not isinstance(time, (int, long)):
raise TypeError('time must be a number; received %r' % time)
if namespace is None:
namespace = namespace_manager.get_namespace()
return self.memcache_set_batcher.add((key, value),
('add', time, namespace, deadline))
def memcache_replace(self, key, value, time=0, namespace=None, deadline=None):
if not isinstance(key, basestring):
raise TypeError('key must be a string; received %r' % key)
if not isinstance(time, (int, long)):
raise TypeError('time must be a number; received %r' % time)
if namespace is None:
namespace = namespace_manager.get_namespace()
options = ('replace', time, namespace, deadline)
return self.memcache_set_batcher.add((key, value), options)
def memcache_cas(self, key, value, time=0, namespace=None, deadline=None):
if not isinstance(key, basestring):
raise TypeError('key must be a string; received %r' % key)
if not isinstance(time, (int, long)):
raise TypeError('time must be a number; received %r' % time)
if namespace is None:
namespace = namespace_manager.get_namespace()
return self.memcache_set_batcher.add((key, value),
('cas', time, namespace, deadline))
def memcache_delete(self, key, seconds=0, namespace=None, deadline=None):
if not isinstance(key, basestring):
raise TypeError('key must be a string; received %r' % key)
if not isinstance(seconds, (int, long)):
raise TypeError('seconds must be a number; received %r' % seconds)
if namespace is None:
namespace = namespace_manager.get_namespace()
return self.memcache_del_batcher.add(key, (seconds, namespace, deadline))
def memcache_incr(self, key, delta=1, initial_value=None, namespace=None,
deadline=None):
if not isinstance(key, basestring):
raise TypeError('key must be a string; received %r' % key)
if not isinstance(delta, (int, long)):
raise TypeError('delta must be a number; received %r' % delta)
if initial_value is not None and not isinstance(initial_value, (int, long)):
raise TypeError('initial_value must be a number or None; received %r' %
initial_value)
if namespace is None:
namespace = namespace_manager.get_namespace()
return self.memcache_off_batcher.add((key, delta),
(initial_value, namespace, deadline))
def memcache_decr(self, key, delta=1, initial_value=None, namespace=None,
deadline=None):
if not isinstance(key, basestring):
raise TypeError('key must be a string; received %r' % key)
if not isinstance(delta, (int, long)):
raise TypeError('delta must be a number; received %r' % delta)
if initial_value is not None and not isinstance(initial_value, (int, long)):
raise TypeError('initial_value must be a number or None; received %r' %
initial_value)
if namespace is None:
namespace = namespace_manager.get_namespace()
return self.memcache_off_batcher.add((key, -delta),
(initial_value, namespace, deadline)) | ndb/memcache_client.py | from .google_imports import namespace_manager
from .google_imports import memcache
from . import autobatcher
from . import tasklets
class MemcacheClient(object):
def __init__(self, conn=None, auto_batcher_class=autobatcher.AutoBatcher, max_memcache=None):
# NOTE: If conn is not None, config is only used to get the
# auto-batcher limits.
self._conn = conn
self._auto_batcher_class = auto_batcher_class
# Create the memcache auto-batchers.
self.memcache_get_batcher = auto_batcher_class(self._memcache_get_tasklet, max_memcache)
self.memcache_set_batcher = auto_batcher_class(self._memcache_set_tasklet, max_memcache)
self.memcache_del_batcher = auto_batcher_class(self._memcache_del_tasklet, max_memcache)
self.memcache_off_batcher = auto_batcher_class(self._memcache_off_tasklet, max_memcache)
self._memcache = memcache.Client()
@tasklets.tasklet
def _memcache_get_tasklet(self, todo, options):
if not todo:
raise RuntimeError('Nothing to do.')
for_cas, namespace, deadline = options
keys = set()
for unused_fut, key in todo:
keys.add(key)
rpc = memcache.create_rpc(deadline=deadline)
results = yield self._memcache.get_multi_async(keys, for_cas=for_cas,
namespace=namespace,
rpc=rpc)
for fut, key in todo:
fut.set_result(results.get(key))
@tasklets.tasklet
def _memcache_set_tasklet(self, todo, options):
if not todo:
raise RuntimeError('Nothing to do.')
opname, time, namespace, deadline = options
methodname = opname + '_multi_async'
method = getattr(self._memcache, methodname)
mapping = {}
for unused_fut, (key, value) in todo:
mapping[key] = value
rpc = memcache.create_rpc(deadline=deadline)
results = yield method(mapping, time=time, namespace=namespace, rpc=rpc)
for fut, (key, unused_value) in todo:
if results is None:
status = memcache.MemcacheSetResponse.ERROR
else:
status = results.get(key)
fut.set_result(status == memcache.MemcacheSetResponse.STORED)
@tasklets.tasklet
def _memcache_del_tasklet(self, todo, options):
if not todo:
raise RuntimeError('Nothing to do.')
seconds, namespace, deadline = options
keys = set()
for unused_fut, key in todo:
keys.add(key)
rpc = memcache.create_rpc(deadline=deadline)
statuses = yield self._memcache.delete_multi_async(keys, seconds=seconds,
namespace=namespace,
rpc=rpc)
status_key_mapping = {}
if statuses: # On network error, statuses is None.
for key, status in zip(keys, statuses):
status_key_mapping[key] = status
for fut, key in todo:
status = status_key_mapping.get(key, memcache.DELETE_NETWORK_FAILURE)
fut.set_result(status)
@tasklets.tasklet
def _memcache_off_tasklet(self, todo, options):
if not todo:
raise RuntimeError('Nothing to do.')
initial_value, namespace, deadline = options
mapping = {} # {key: delta}
for unused_fut, (key, delta) in todo:
mapping[key] = delta
rpc = memcache.create_rpc(deadline=deadline)
results = yield self._memcache.offset_multi_async(
mapping, initial_value=initial_value, namespace=namespace, rpc=rpc)
for fut, (key, unused_delta) in todo:
fut.set_result(results.get(key))
def memcache_get(self, key, for_cas=False, namespace=None, use_cache=False,
deadline=None):
"""An auto-batching wrapper for memcache.get() or .get_multi().
Args:
key: Key to set. This must be a string; no prefix is applied.
for_cas: If True, request and store CAS ids on the Context.
namespace: Optional namespace.
deadline: Optional deadline for the RPC.
Returns:
A Future (!) whose return value is the value retrieved from
memcache, or None.
"""
if not isinstance(key, basestring):
raise TypeError('key must be a string; received %r' % key)
if not isinstance(for_cas, bool):
raise TypeError('for_cas must be a bool; received %r' % for_cas)
if namespace is None:
namespace = namespace_manager.get_namespace()
options = (for_cas, namespace, deadline)
batcher = self.memcache_get_batcher
if use_cache:
return batcher.add_once(key, options)
else:
return batcher.add(key, options)
# XXX: Docstrings below.
def memcache_gets(self, key, namespace=None, use_cache=False, deadline=None):
return self.memcache_get(key, for_cas=True, namespace=namespace,
use_cache=use_cache, deadline=deadline)
def memcache_set(self, key, value, time=0, namespace=None, use_cache=False,
deadline=None):
if not isinstance(key, basestring):
raise TypeError('key must be a string; received %r' % key)
if not isinstance(time, (int, long)):
raise TypeError('time must be a number; received %r' % time)
if namespace is None:
namespace = namespace_manager.get_namespace()
options = ('set', time, namespace, deadline)
batcher = self.memcache_set_batcher
if use_cache:
return batcher.add_once((key, value), options)
else:
return batcher.add((key, value), options)
def memcache_add(self, key, value, time=0, namespace=None, deadline=None):
if not isinstance(key, basestring):
raise TypeError('key must be a string; received %r' % key)
if not isinstance(time, (int, long)):
raise TypeError('time must be a number; received %r' % time)
if namespace is None:
namespace = namespace_manager.get_namespace()
return self.memcache_set_batcher.add((key, value),
('add', time, namespace, deadline))
def memcache_replace(self, key, value, time=0, namespace=None, deadline=None):
if not isinstance(key, basestring):
raise TypeError('key must be a string; received %r' % key)
if not isinstance(time, (int, long)):
raise TypeError('time must be a number; received %r' % time)
if namespace is None:
namespace = namespace_manager.get_namespace()
options = ('replace', time, namespace, deadline)
return self.memcache_set_batcher.add((key, value), options)
def memcache_cas(self, key, value, time=0, namespace=None, deadline=None):
if not isinstance(key, basestring):
raise TypeError('key must be a string; received %r' % key)
if not isinstance(time, (int, long)):
raise TypeError('time must be a number; received %r' % time)
if namespace is None:
namespace = namespace_manager.get_namespace()
return self.memcache_set_batcher.add((key, value),
('cas', time, namespace, deadline))
def memcache_delete(self, key, seconds=0, namespace=None, deadline=None):
if not isinstance(key, basestring):
raise TypeError('key must be a string; received %r' % key)
if not isinstance(seconds, (int, long)):
raise TypeError('seconds must be a number; received %r' % seconds)
if namespace is None:
namespace = namespace_manager.get_namespace()
return self.memcache_del_batcher.add(key, (seconds, namespace, deadline))
def memcache_incr(self, key, delta=1, initial_value=None, namespace=None,
deadline=None):
if not isinstance(key, basestring):
raise TypeError('key must be a string; received %r' % key)
if not isinstance(delta, (int, long)):
raise TypeError('delta must be a number; received %r' % delta)
if initial_value is not None and not isinstance(initial_value, (int, long)):
raise TypeError('initial_value must be a number or None; received %r' %
initial_value)
if namespace is None:
namespace = namespace_manager.get_namespace()
return self.memcache_off_batcher.add((key, delta),
(initial_value, namespace, deadline))
def memcache_decr(self, key, delta=1, initial_value=None, namespace=None,
deadline=None):
if not isinstance(key, basestring):
raise TypeError('key must be a string; received %r' % key)
if not isinstance(delta, (int, long)):
raise TypeError('delta must be a number; received %r' % delta)
if initial_value is not None and not isinstance(initial_value, (int, long)):
raise TypeError('initial_value must be a number or None; received %r' %
initial_value)
if namespace is None:
namespace = namespace_manager.get_namespace()
return self.memcache_off_batcher.add((key, -delta),
(initial_value, namespace, deadline)) | 0.487551 | 0.068289 |
import sys
from django.conf import settings
from django.core.management import BaseCommand
from df_config.utils import is_package_present
class Command(BaseCommand):
help = "Launch the server process"
@property
def listen_port(self):
add, sep, port = settings.LISTEN_ADDRESS.partition(":")
return int(port)
@property
def listen_address(self):
add, sep, port = settings.LISTEN_ADDRESS.partition(":")
return add
def run_from_argv(self, argv):
"""
Set up any environment changes requested (e.g., Python path
and Django settings), then run this command. If the
command raises a ``CommandError``, intercept it and print it sensibly
to stderr. If the ``--traceback`` option is present or the raised
``Exception`` is not ``CommandError``, raise it.
"""
if settings.DF_SERVER == "gunicorn":
self.run_gunicorn()
elif settings.DF_SERVER == "daphne":
self.run_daphne()
else:
self.stderr.write(
"unknown value '%s' for setting DF_SERVER. Please choose between 'daphne' and 'gunicorn'."
% settings.DF_SERVER
)
return
@staticmethod
def get_wsgi_application():
mod_name, sep, attr_name = settings.WSGI_APPLICATION.rpartition(".")
return "%s:%s" % (mod_name, attr_name)
@staticmethod
def get_asgi_application():
mod_name, sep, attr_name = settings.ASGI_APPLICATION.rpartition(".")
return "%s:%s" % (mod_name, attr_name)
def run_daphne(self):
# noinspection PyPackageRequirements,PyUnresolvedReferences
from daphne.cli import CommandLineInterface
host, port = self.listen_address, self.listen_port
app = self.get_asgi_application()
class CLI(CommandLineInterface):
def __init__(self):
super().__init__()
# noinspection PyProtectedMember
for action in self.parser._actions:
if action.dest == "port":
action.default = port
elif action.dest == "host":
action.default = host
elif action.dest == "application":
action.default = app
action.required = False
return CLI().run(sys.argv[2:])
def run_gunicorn(self):
sys.argv.pop(0)
# noinspection PyPackageRequirements,PyUnresolvedReferences
from gunicorn.config import KNOWN_SETTINGS, Setting
# noinspection PyPackageRequirements,PyUnresolvedReferences
from gunicorn.app.wsgiapp import WSGIApplication
if settings.USE_WEBSOCKETS:
application = self.get_asgi_application()
worker_cls = "uvicorn.workers.UvicornWorker"
if not is_package_present("uvicorn.workers"):
self.stderr.write(
"you must install uvicorn to use websockets with Gunicorn."
)
return
else:
application = self.get_wsgi_application()
worker_cls = "gunicorn.workers.gthread.ThreadWorker"
class Application(WSGIApplication):
def init(self, parser, opts, args):
if not args:
args.append(application)
super().init(parser, opts, args)
for setting in KNOWN_SETTINGS: # type: Setting
if setting.name == "bind":
setting.default = settings.LISTEN_ADDRESS
elif setting.name == "worker_class":
setting.default = worker_cls
return Application("%(prog)s [OPTIONS] [APP_MODULE]").run()
def handle(self, *args, **options):
pass | df_config/management/commands/server.py | import sys
from django.conf import settings
from django.core.management import BaseCommand
from df_config.utils import is_package_present
class Command(BaseCommand):
help = "Launch the server process"
@property
def listen_port(self):
add, sep, port = settings.LISTEN_ADDRESS.partition(":")
return int(port)
@property
def listen_address(self):
add, sep, port = settings.LISTEN_ADDRESS.partition(":")
return add
def run_from_argv(self, argv):
"""
Set up any environment changes requested (e.g., Python path
and Django settings), then run this command. If the
command raises a ``CommandError``, intercept it and print it sensibly
to stderr. If the ``--traceback`` option is present or the raised
``Exception`` is not ``CommandError``, raise it.
"""
if settings.DF_SERVER == "gunicorn":
self.run_gunicorn()
elif settings.DF_SERVER == "daphne":
self.run_daphne()
else:
self.stderr.write(
"unknown value '%s' for setting DF_SERVER. Please choose between 'daphne' and 'gunicorn'."
% settings.DF_SERVER
)
return
@staticmethod
def get_wsgi_application():
mod_name, sep, attr_name = settings.WSGI_APPLICATION.rpartition(".")
return "%s:%s" % (mod_name, attr_name)
@staticmethod
def get_asgi_application():
mod_name, sep, attr_name = settings.ASGI_APPLICATION.rpartition(".")
return "%s:%s" % (mod_name, attr_name)
def run_daphne(self):
# noinspection PyPackageRequirements,PyUnresolvedReferences
from daphne.cli import CommandLineInterface
host, port = self.listen_address, self.listen_port
app = self.get_asgi_application()
class CLI(CommandLineInterface):
def __init__(self):
super().__init__()
# noinspection PyProtectedMember
for action in self.parser._actions:
if action.dest == "port":
action.default = port
elif action.dest == "host":
action.default = host
elif action.dest == "application":
action.default = app
action.required = False
return CLI().run(sys.argv[2:])
def run_gunicorn(self):
sys.argv.pop(0)
# noinspection PyPackageRequirements,PyUnresolvedReferences
from gunicorn.config import KNOWN_SETTINGS, Setting
# noinspection PyPackageRequirements,PyUnresolvedReferences
from gunicorn.app.wsgiapp import WSGIApplication
if settings.USE_WEBSOCKETS:
application = self.get_asgi_application()
worker_cls = "uvicorn.workers.UvicornWorker"
if not is_package_present("uvicorn.workers"):
self.stderr.write(
"you must install uvicorn to use websockets with Gunicorn."
)
return
else:
application = self.get_wsgi_application()
worker_cls = "gunicorn.workers.gthread.ThreadWorker"
class Application(WSGIApplication):
def init(self, parser, opts, args):
if not args:
args.append(application)
super().init(parser, opts, args)
for setting in KNOWN_SETTINGS: # type: Setting
if setting.name == "bind":
setting.default = settings.LISTEN_ADDRESS
elif setting.name == "worker_class":
setting.default = worker_cls
return Application("%(prog)s [OPTIONS] [APP_MODULE]").run()
def handle(self, *args, **options):
pass | 0.436622 | 0.101322 |
import os
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_DIR = os.path.dirname(PROJECT_DIR)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# Application definition
INSTALLED_APPS = [
'box',
'custom_user',
'home',
'blog',
'search',
'wagtail.contrib.forms',
'wagtail.contrib.redirects',
'wagtail.contrib.routable_page',
'wagtail.embeds',
'wagtail.sites',
'wagtail.users',
'wagtail.snippets',
'wagtail.documents',
'wagtail.images',
'wagtail.search',
'wagtail.admin',
'wagtail.core',
'wagtail.contrib.table_block',
'wagtail.contrib.postgres_search',
'django_prometheus',
'django_tables2',
'modelcluster',
'taggit',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django_prometheus.middleware.PrometheusBeforeMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'wagtail.core.middleware.SiteMiddleware',
'wagtail.contrib.redirects.middleware.RedirectMiddleware',
'django_prometheus.middleware.PrometheusAfterMiddleware',
]
ROOT_URLCONF = 'intranet.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(PROJECT_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'intranet.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django_prometheus.db.backends.postgresql',
'NAME': os.environ.get('PG_DB', 'postgres'),
'USER': os.environ.get('PG_USER', 'postgres'),
'PASSWORD': os.environ.get('PG_PASSWORD'),
'HOST': os.environ.get('DB_HOST', 'db'),
'PORT': 5432,
}
}
CACHES = {
'default': {
'BACKEND': 'django_prometheus.cache.backends.filebased.FileBasedCache',
'LOCATION': '/tmp/django_cache',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Denver'
USE_I18N = True
USE_L10N = True
USE_TZ = True
USE_THOUSAND_SEPARATOR = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
STATICFILES_DIRS = [
os.path.join(PROJECT_DIR, 'static'),
]
# ManifestStaticFilesStorage is recommended in production, to prevent outdated
# Javascript / CSS assets being served from cache (e.g. after a Wagtail upgrade).
# See https://docs.djangoproject.com/en/3.0/ref/contrib/staticfiles/#manifeststaticfilesstorage
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
AUTH_USER_MODEL = 'custom_user.User'
# Wagtail settings
WAGTAIL_SITE_NAME = "intranet"
WAGTAILEMBEDS_RESPONSIVE_HTML = True
WAGTAIL_USER_EDIT_FORM = 'custom_user.forms.CustomUserEditForm'
WAGTAIL_USER_CREATION_FORM = 'custom_user.forms.CustomUserCreationForm'
WAGTAIL_USER_CUSTOM_FIELDS = ['title']
WAGTAILSEARCH_BACKENDS = {
'default': {
'BACKEND': 'wagtail.contrib.postgres_search.backend',
'SEARCH_CONFIG': 'english',
}
}
# Base URL to use when referring to full URLs within the Wagtail admin backend -
# e.g. in notification emails. Don't include '/admin' or a trailing slash
BASE_URL = 'https://intranet.redbutte.utah.edu'
# Email Settings
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.utah.edu'
EMAIL_HOST_USER = '<EMAIL>'
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_PASSWORD')
EMAIL_PORT = 587
DEFAULT_FROM_EMAIL = '<EMAIL>'
SERVER_EMAIL = '<EMAIL>'
# Django_tables2 Settings
DJANGO_TABLES2_TEMPLATE = "django_tables2/bootstrap4.html" | intranet/intranet/settings/base.py | import os
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_DIR = os.path.dirname(PROJECT_DIR)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# Application definition
INSTALLED_APPS = [
'box',
'custom_user',
'home',
'blog',
'search',
'wagtail.contrib.forms',
'wagtail.contrib.redirects',
'wagtail.contrib.routable_page',
'wagtail.embeds',
'wagtail.sites',
'wagtail.users',
'wagtail.snippets',
'wagtail.documents',
'wagtail.images',
'wagtail.search',
'wagtail.admin',
'wagtail.core',
'wagtail.contrib.table_block',
'wagtail.contrib.postgres_search',
'django_prometheus',
'django_tables2',
'modelcluster',
'taggit',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django_prometheus.middleware.PrometheusBeforeMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'wagtail.core.middleware.SiteMiddleware',
'wagtail.contrib.redirects.middleware.RedirectMiddleware',
'django_prometheus.middleware.PrometheusAfterMiddleware',
]
ROOT_URLCONF = 'intranet.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(PROJECT_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'intranet.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django_prometheus.db.backends.postgresql',
'NAME': os.environ.get('PG_DB', 'postgres'),
'USER': os.environ.get('PG_USER', 'postgres'),
'PASSWORD': os.environ.get('PG_PASSWORD'),
'HOST': os.environ.get('DB_HOST', 'db'),
'PORT': 5432,
}
}
CACHES = {
'default': {
'BACKEND': 'django_prometheus.cache.backends.filebased.FileBasedCache',
'LOCATION': '/tmp/django_cache',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Denver'
USE_I18N = True
USE_L10N = True
USE_TZ = True
USE_THOUSAND_SEPARATOR = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
STATICFILES_DIRS = [
os.path.join(PROJECT_DIR, 'static'),
]
# ManifestStaticFilesStorage is recommended in production, to prevent outdated
# Javascript / CSS assets being served from cache (e.g. after a Wagtail upgrade).
# See https://docs.djangoproject.com/en/3.0/ref/contrib/staticfiles/#manifeststaticfilesstorage
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
AUTH_USER_MODEL = 'custom_user.User'
# Wagtail settings
WAGTAIL_SITE_NAME = "intranet"
WAGTAILEMBEDS_RESPONSIVE_HTML = True
WAGTAIL_USER_EDIT_FORM = 'custom_user.forms.CustomUserEditForm'
WAGTAIL_USER_CREATION_FORM = 'custom_user.forms.CustomUserCreationForm'
WAGTAIL_USER_CUSTOM_FIELDS = ['title']
WAGTAILSEARCH_BACKENDS = {
'default': {
'BACKEND': 'wagtail.contrib.postgres_search.backend',
'SEARCH_CONFIG': 'english',
}
}
# Base URL to use when referring to full URLs within the Wagtail admin backend -
# e.g. in notification emails. Don't include '/admin' or a trailing slash
BASE_URL = 'https://intranet.redbutte.utah.edu'
# Email Settings
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.utah.edu'
EMAIL_HOST_USER = '<EMAIL>'
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_PASSWORD')
EMAIL_PORT = 587
DEFAULT_FROM_EMAIL = '<EMAIL>'
SERVER_EMAIL = '<EMAIL>'
# Django_tables2 Settings
DJANGO_TABLES2_TEMPLATE = "django_tables2/bootstrap4.html" | 0.366136 | 0.07393 |
from __future__ import print_function
import time
import pandas as pd
from docopt import docopt
import pyper as pr
import pysam
def collect_set_XC(input_bam_file):
bamfile = pysam.AlignmentFile(input_bam_file, "rb")
set_XC = set()
for read in bamfile:
try:
if read.get_tag('GE'):
set_XC.add(read.get_tag('XC'))
except:
pass
bamfile.close()
return set_XC
def get_dict_correction(set_XC, designed_barcode,
metric="seqlev", distance="2"):
if type(distance) not in (int, long):
raise InvalidArgumentError
if distance not in (0, 1, 2):
print("distance must be 0, 1, or 2")
raise InvalidArgumentError
r = pr.R()
r("library(DNABarcodes)")
r.assign("list_XC", list(set_XC))
r.assign("designed_barcode", designed_barcode)
if metric == "seqlev":
r("demultiplexed <- demultiplex(list_XC, designed_barcode, metric='seqlev')")
elif metric == "hamming":
r("demultiplexed <- demultiplex(list_XC, designed_barcode, metric='hamming')")
else:
print("metric must be 'seqlev' or 'hamming'")
raise InvalidArgumentError
df_correction = r.get("demultiplexed")
df_correction.columns = [x.replace(" ", "") for x in df_correction.columns]
df_correction_filt = (df_correction[df_correction.distance <= distance]
[['read', 'barcode']])
dict_correct = df_correction_filt.set_index('read').to_dict()['barcode']
return dict_correct
def correct_XC(input_file, dict_correct, output_file):
input_bamfile = pysam.AlignmentFile(input_file, "rb")
output_bamfile = pysam.AlignmentFile(output_file, "wb",
template=input_bamfile)
for read in input_bamfile:
try:
if read.get_tag('GE'):
barcode = dict_correct[read.get_tag('XC')]
read.set_tag('XC', barcode)
output_bamfile.write(read)
except:
pass
output_bamfile.close()
input_bamfile.close()
# try:
# bef = read.get_tag('XC')
# read.set_tag('XC', bef)
# except:
# pass
# try:
# barcode = dict_correct[read.get_tag('XC')]
# except:
# barcode = read.get_tag('XC')
# read.set_tag('XC', barcode)
def InvalidArgumentError(ValueError):
pass
if __name__ == '__main__':
start = time.time()
NAME = "correct_barcode.py"
VERSION = "0.1.0"
args = docopt(__doc__, version="{0} {1}".format(NAME, VERSION))
input_file = args['-i']
barcode_file = args['-b']
output_file = args['-o']
metric = args['-m']
distance = int(args['-d'])
designed_barcode = list(pd.read_csv(barcode_file,
header=None, squeeze=True))
set_XC = collect_set_XC(input_file)
dict_correct = get_dict_correction(set_XC, designed_barcode,
metric=metric, distance=distance)
correct_XC(input_file, dict_correct, output_file)
elapsed_time = time.time() - start
print("Program finished. Elapsed_time: {0:.2f}".format(elapsed_time) +
" [sec]") | correct_barcode.py | from __future__ import print_function
import time
import pandas as pd
from docopt import docopt
import pyper as pr
import pysam
def collect_set_XC(input_bam_file):
bamfile = pysam.AlignmentFile(input_bam_file, "rb")
set_XC = set()
for read in bamfile:
try:
if read.get_tag('GE'):
set_XC.add(read.get_tag('XC'))
except:
pass
bamfile.close()
return set_XC
def get_dict_correction(set_XC, designed_barcode,
metric="seqlev", distance="2"):
if type(distance) not in (int, long):
raise InvalidArgumentError
if distance not in (0, 1, 2):
print("distance must be 0, 1, or 2")
raise InvalidArgumentError
r = pr.R()
r("library(DNABarcodes)")
r.assign("list_XC", list(set_XC))
r.assign("designed_barcode", designed_barcode)
if metric == "seqlev":
r("demultiplexed <- demultiplex(list_XC, designed_barcode, metric='seqlev')")
elif metric == "hamming":
r("demultiplexed <- demultiplex(list_XC, designed_barcode, metric='hamming')")
else:
print("metric must be 'seqlev' or 'hamming'")
raise InvalidArgumentError
df_correction = r.get("demultiplexed")
df_correction.columns = [x.replace(" ", "") for x in df_correction.columns]
df_correction_filt = (df_correction[df_correction.distance <= distance]
[['read', 'barcode']])
dict_correct = df_correction_filt.set_index('read').to_dict()['barcode']
return dict_correct
def correct_XC(input_file, dict_correct, output_file):
input_bamfile = pysam.AlignmentFile(input_file, "rb")
output_bamfile = pysam.AlignmentFile(output_file, "wb",
template=input_bamfile)
for read in input_bamfile:
try:
if read.get_tag('GE'):
barcode = dict_correct[read.get_tag('XC')]
read.set_tag('XC', barcode)
output_bamfile.write(read)
except:
pass
output_bamfile.close()
input_bamfile.close()
# try:
# bef = read.get_tag('XC')
# read.set_tag('XC', bef)
# except:
# pass
# try:
# barcode = dict_correct[read.get_tag('XC')]
# except:
# barcode = read.get_tag('XC')
# read.set_tag('XC', barcode)
def InvalidArgumentError(ValueError):
pass
if __name__ == '__main__':
start = time.time()
NAME = "correct_barcode.py"
VERSION = "0.1.0"
args = docopt(__doc__, version="{0} {1}".format(NAME, VERSION))
input_file = args['-i']
barcode_file = args['-b']
output_file = args['-o']
metric = args['-m']
distance = int(args['-d'])
designed_barcode = list(pd.read_csv(barcode_file,
header=None, squeeze=True))
set_XC = collect_set_XC(input_file)
dict_correct = get_dict_correction(set_XC, designed_barcode,
metric=metric, distance=distance)
correct_XC(input_file, dict_correct, output_file)
elapsed_time = time.time() - start
print("Program finished. Elapsed_time: {0:.2f}".format(elapsed_time) +
" [sec]") | 0.457137 | 0.204401 |
from __future__ import unicode_literals
from .compat import itervalues
from .parse_utils import EMPTY_PARSED_PIECE
from .pattern import Pattern
from .utils import TreeNode, build_tree
class PiecePatternNode(TreeNode):
"""Node for building raw piece tree."""
__slots__ = ('_pattern',)
def __init__(self, parsed_piece_and_pattern):
parsed_piece, self._pattern = parsed_piece_and_pattern
super(PiecePatternNode, self).__init__(parsed_piece)
def set_pattern(self, pattern):
self._pattern = pattern
@property
def pattern(self):
if self._pattern is None:
self._pattern = Pattern(self.piece)
return self._pattern
@property
def piece(self):
return self.parsed_piece.piece
@property
def parsed_piece(self):
return self.value
@property
def children_num(self):
return len(self._children)
def incr_count(self, count, recur=False):
self.count += count
node = self.parrent if recur else None
while node:
node.incr_count(count)
node = node.parrent
def __str__(self):
return ' '.join((self.piece, str(self.pattern)))
def add_meta(self, data):
if data is None:
return
if self.meta is None:
self.meta = set()
self.meta.add(data)
def update_meta(self, data):
if not data:
return
if self.meta is None:
self.meta = set()
self.meta.update(data)
def build_from_parsed_pieces(root, parsed_pieces, count=1, meta=None, uniq=True):
"""Build piece pattern tree from parsed pieces.
Args:
root (PiecePatternNode): The root node of the a tree.
parsed_pieces (sequence): The parsed pieces.
count (int, optional): Defaults to 1.
meta ([type], optional): Defaults to None. The meta data will bind to the leaf node.
uniq (bool, optional): Defaults to True. The duplicated node edge will not add.
Returns:
tuple: 2-tuple, (leaf_node, is_new)
"""
node, is_new = build_tree(root, [(parsed_piece.piece, (parsed_piece, None))
for parsed_piece in parsed_pieces], count)
if uniq and not is_new:
node.incr_count(0 - count, True)
node.add_meta(meta)
return node, is_new
def build_from_piece_pattern_nodes(root, piece_pattern_nodes):
"""Build piece pattern tree from piece pattern tree edge.
Args:
root (PiecePatternNode): The root node of the a tree.
piece_pattern_nodes (sequence): piece pattern tree edge.
Returns:
tuple: 2-tuple, (leaf_node, is_new)
"""
last = piece_pattern_nodes[-1]
node, is_new = build_tree(root, [(p.piece, (p.parsed_piece, p.pattern))
for p in piece_pattern_nodes], last.count)
node.update_meta(last.meta)
return node, is_new | src/os_urlpattern/piece_pattern_node.py | from __future__ import unicode_literals
from .compat import itervalues
from .parse_utils import EMPTY_PARSED_PIECE
from .pattern import Pattern
from .utils import TreeNode, build_tree
class PiecePatternNode(TreeNode):
"""Node for building raw piece tree."""
__slots__ = ('_pattern',)
def __init__(self, parsed_piece_and_pattern):
parsed_piece, self._pattern = parsed_piece_and_pattern
super(PiecePatternNode, self).__init__(parsed_piece)
def set_pattern(self, pattern):
self._pattern = pattern
@property
def pattern(self):
if self._pattern is None:
self._pattern = Pattern(self.piece)
return self._pattern
@property
def piece(self):
return self.parsed_piece.piece
@property
def parsed_piece(self):
return self.value
@property
def children_num(self):
return len(self._children)
def incr_count(self, count, recur=False):
self.count += count
node = self.parrent if recur else None
while node:
node.incr_count(count)
node = node.parrent
def __str__(self):
return ' '.join((self.piece, str(self.pattern)))
def add_meta(self, data):
if data is None:
return
if self.meta is None:
self.meta = set()
self.meta.add(data)
def update_meta(self, data):
if not data:
return
if self.meta is None:
self.meta = set()
self.meta.update(data)
def build_from_parsed_pieces(root, parsed_pieces, count=1, meta=None, uniq=True):
"""Build piece pattern tree from parsed pieces.
Args:
root (PiecePatternNode): The root node of the a tree.
parsed_pieces (sequence): The parsed pieces.
count (int, optional): Defaults to 1.
meta ([type], optional): Defaults to None. The meta data will bind to the leaf node.
uniq (bool, optional): Defaults to True. The duplicated node edge will not add.
Returns:
tuple: 2-tuple, (leaf_node, is_new)
"""
node, is_new = build_tree(root, [(parsed_piece.piece, (parsed_piece, None))
for parsed_piece in parsed_pieces], count)
if uniq and not is_new:
node.incr_count(0 - count, True)
node.add_meta(meta)
return node, is_new
def build_from_piece_pattern_nodes(root, piece_pattern_nodes):
"""Build piece pattern tree from piece pattern tree edge.
Args:
root (PiecePatternNode): The root node of the a tree.
piece_pattern_nodes (sequence): piece pattern tree edge.
Returns:
tuple: 2-tuple, (leaf_node, is_new)
"""
last = piece_pattern_nodes[-1]
node, is_new = build_tree(root, [(p.piece, (p.parsed_piece, p.pattern))
for p in piece_pattern_nodes], last.count)
node.update_meta(last.meta)
return node, is_new | 0.939996 | 0.296591 |
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
import os
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import train_test_split
from sklearn import metrics
import tensorflow as tf
import tflearn
from tflearn.data_utils import to_categorical, pad_sequences
from sklearn.neural_network import MLPClassifier
from tflearn.layers.normalization import local_response_normalization
import tensorflow.contrib.learn as learn
import gensim
import re
from sklearn.metrics import classification_report
import xgboost as xgb
from sklearn import preprocessing
dga_file="/Users/jerrysong/Desktop/4/data/dga/dga.txt"
alexa_file="/Users/jerrysong/Desktop/4/data/dga/normal.csv"
def load_alexa():
x=[]
data = pd.read_csv(alexa_file, sep=",",header=None)
x=[i[1] for i in data.values]
return x
def load_dga():
x=[]
data = pd.read_csv(dga_file, sep="\t", header=None,
skiprows=18)
x=[i[1] for i in data.values]
return x
def get_feature_charseq():
alexa=load_alexa()
dga=load_dga()
x=alexa+dga
max_features=10000
y=[0]*len(alexa)+[1]*len(dga)
t=[]
for i in x:
v=[]
for j in range(0,len(i)):
v.append(ord(i[j]))
t.append(v)
x=t
x_train, x_test, y_train, y_test=train_test_split(x,y,test_size=0.4)
return x_train, x_test, y_train, y_test
def get_aeiou(domain):
count = len(re.findall(r'[aeiou]', domain.lower()))
#count = (0.0 + count) / len(domain)
return count
def get_uniq_char_num(domain):
count=len(set(domain))
#count=(0.0+count)/len(domain)
return count
def get_uniq_num_num(domain):
count = len(re.findall(r'[1234567890]', domain.lower()))
#count = (0.0 + count) / len(domain)
return count
def get_feature():
from sklearn import preprocessing
alexa=load_alexa()
dga=load_dga()
v=alexa+dga
y=[0]*len(alexa)+[1]*len(dga)
x=[]
for vv in v:
vvv=[get_aeiou(vv),get_uniq_char_num(vv),get_uniq_num_num(vv),len(vv)]
x.append(vvv)
x=preprocessing.scale(x)
x_train, x_test, y_train, y_test=train_test_split(x,y,test_size=0.4)
return x_train, x_test, y_train, y_test
def get_feature_2gram():
alexa=load_alexa()
dga=load_dga()
x=alexa+dga
max_features=10000
y=[0]*len(alexa)+[1]*len(dga)
CV = CountVectorizer(
ngram_range=(2, 2),
token_pattern=r'\w',
decode_error='ignore',
strip_accents='ascii',
max_features=max_features,
stop_words='english',
max_df=1.0,
min_df=1)
x = CV.fit_transform(x)
x_train, x_test, y_train, y_test=train_test_split(x,y,test_size=0.4)
return x_train.toarray(), x_test.toarray(), y_train, y_test
def do_nb(x_train, x_test, y_train, y_test):
gnb = GaussianNB()
gnb.fit(x_train,y_train)
y_pred=gnb.predict(x_test)
print(classification_report(y_test, y_pred))
print metrics.confusion_matrix(y_test, y_pred)
def do_xgboost(x_train, x_test, y_train, y_test):
xgb_model = xgb.XGBClassifier().fit(x_train, y_train)
y_pred = xgb_model.predict(x_test)
print(classification_report(y_test, y_pred))
print metrics.confusion_matrix(y_test, y_pred)
def do_mlp(x_train, x_test, y_train, y_test):
global max_features
# Building deep neural network
clf = MLPClassifier(solver='lbfgs',
alpha=1e-5,
hidden_layer_sizes = (5, 2),
random_state = 1)
clf.fit(x_train, y_train)
y_pred = clf.predict(x_test)
print(classification_report(y_test, y_pred))
print metrics.confusion_matrix(y_test, y_pred)
def do_rnn(trainX, testX, trainY, testY):
max_document_length=64
y_test=testY
trainX = pad_sequences(trainX, maxlen=max_document_length, value=0.)
testX = pad_sequences(testX, maxlen=max_document_length, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)
# Network building
net = tflearn.input_data([None, max_document_length])
net = tflearn.embedding(net, input_dim=10240000, output_dim=64)
net = tflearn.lstm(net, 64, dropout=0.1)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, tensorboard_verbose=0,tensorboard_dir="dga_log")
model.fit(trainX, trainY, validation_set=(testX, testY), show_metric=True,
batch_size=10,run_id="dga",n_epoch=1)
y_predict_list = model.predict(testX)
y_predict = []
for i in y_predict_list:
print i[0]
if i[0] > 0.5:
y_predict.append(0)
else:
y_predict.append(1)
print(classification_report(y_test, y_predict))
print metrics.confusion_matrix(y_test, y_predict)
def get_feature_test(DGA,label):
result = []
result.append(DGA)
result.append(label)
result.append(get_aeiou(DGA))
result.append(get_uniq_char_num(DGA))
result.append(get_uniq_num_num(DGA))
return result
if __name__ == "__main__":
print "DGA Detector"
print "text feature & xgboost"
x_train, x_test, y_train, y_test = get_feature()
do_xgboost(x_train, x_test, y_train, y_test)
print "text feature & nb"
x_train, x_test, y_train, y_test = get_feature()
do_nb(x_train, x_test, y_train, y_test)
print "text feature & mlp"
x_train, x_test, y_train, y_test = get_feature()
do_mlp(x_train, x_test, y_train, y_test)
"""
xgb_model = xgb.XGBClassifier().fit(x_train, y_train)
y_pred = xgb_model.predict(x_test)
print(classification_report(y_test, y_pred))
print(metrics.confusion_matrix(y_test, y_pred))
test = "kajugcffktsgskchaym.infosec"
result = pd.DataFrame(columns=('domain', 'Label', 'aeiou', 'Num', 'jaccord'))
print(result)
results = get_feature_test(test, 1)
result.loc[0] = results
print(result)
result = result.drop(['domain', 'Label'], axis=1).values
print(result)
print xgb_model.predict(result)
print "text feature & nb"
x_train, x_test, y_train, y_test = get_feature()
do_nb(x_train, x_test, y_train, y_test)
print "text feature & xgboost"
x_train, x_test, y_train, y_test = get_feature()
do_xgboost(x_train, x_test, y_train, y_test)
print "text feature & mlp"
x_train, x_test, y_train, y_test = get_feature()
do_mlp(x_train, x_test, y_train, y_test)
print "charseq & rnn"
x_train, x_test, y_train, y_test = get_feature_charseq()
do_rnn(x_train, x_test, y_train, y_test)
print "2-gram & mlp"
x_train, x_test, y_train, y_test = get_feature_2gram()
do_mlp(x_train, x_test, y_train, y_test)
print "2-gram & XGBoost"
x_train, x_test, y_train, y_test = get_feature_2gram()
do_xgboost(x_train, x_test, y_train, y_test)
print "2-gram & nb"
x_train, x_test, y_train, y_test=get_feature_2gram()
do_nb(x_train, x_test, y_train, y_test)
""" | Homework/2019/Task4/4/Code/dga.py | import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
import os
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import train_test_split
from sklearn import metrics
import tensorflow as tf
import tflearn
from tflearn.data_utils import to_categorical, pad_sequences
from sklearn.neural_network import MLPClassifier
from tflearn.layers.normalization import local_response_normalization
import tensorflow.contrib.learn as learn
import gensim
import re
from sklearn.metrics import classification_report
import xgboost as xgb
from sklearn import preprocessing
dga_file="/Users/jerrysong/Desktop/4/data/dga/dga.txt"
alexa_file="/Users/jerrysong/Desktop/4/data/dga/normal.csv"
def load_alexa():
x=[]
data = pd.read_csv(alexa_file, sep=",",header=None)
x=[i[1] for i in data.values]
return x
def load_dga():
x=[]
data = pd.read_csv(dga_file, sep="\t", header=None,
skiprows=18)
x=[i[1] for i in data.values]
return x
def get_feature_charseq():
alexa=load_alexa()
dga=load_dga()
x=alexa+dga
max_features=10000
y=[0]*len(alexa)+[1]*len(dga)
t=[]
for i in x:
v=[]
for j in range(0,len(i)):
v.append(ord(i[j]))
t.append(v)
x=t
x_train, x_test, y_train, y_test=train_test_split(x,y,test_size=0.4)
return x_train, x_test, y_train, y_test
def get_aeiou(domain):
count = len(re.findall(r'[aeiou]', domain.lower()))
#count = (0.0 + count) / len(domain)
return count
def get_uniq_char_num(domain):
count=len(set(domain))
#count=(0.0+count)/len(domain)
return count
def get_uniq_num_num(domain):
count = len(re.findall(r'[1234567890]', domain.lower()))
#count = (0.0 + count) / len(domain)
return count
def get_feature():
from sklearn import preprocessing
alexa=load_alexa()
dga=load_dga()
v=alexa+dga
y=[0]*len(alexa)+[1]*len(dga)
x=[]
for vv in v:
vvv=[get_aeiou(vv),get_uniq_char_num(vv),get_uniq_num_num(vv),len(vv)]
x.append(vvv)
x=preprocessing.scale(x)
x_train, x_test, y_train, y_test=train_test_split(x,y,test_size=0.4)
return x_train, x_test, y_train, y_test
def get_feature_2gram():
alexa=load_alexa()
dga=load_dga()
x=alexa+dga
max_features=10000
y=[0]*len(alexa)+[1]*len(dga)
CV = CountVectorizer(
ngram_range=(2, 2),
token_pattern=r'\w',
decode_error='ignore',
strip_accents='ascii',
max_features=max_features,
stop_words='english',
max_df=1.0,
min_df=1)
x = CV.fit_transform(x)
x_train, x_test, y_train, y_test=train_test_split(x,y,test_size=0.4)
return x_train.toarray(), x_test.toarray(), y_train, y_test
def do_nb(x_train, x_test, y_train, y_test):
gnb = GaussianNB()
gnb.fit(x_train,y_train)
y_pred=gnb.predict(x_test)
print(classification_report(y_test, y_pred))
print metrics.confusion_matrix(y_test, y_pred)
def do_xgboost(x_train, x_test, y_train, y_test):
xgb_model = xgb.XGBClassifier().fit(x_train, y_train)
y_pred = xgb_model.predict(x_test)
print(classification_report(y_test, y_pred))
print metrics.confusion_matrix(y_test, y_pred)
def do_mlp(x_train, x_test, y_train, y_test):
global max_features
# Building deep neural network
clf = MLPClassifier(solver='lbfgs',
alpha=1e-5,
hidden_layer_sizes = (5, 2),
random_state = 1)
clf.fit(x_train, y_train)
y_pred = clf.predict(x_test)
print(classification_report(y_test, y_pred))
print metrics.confusion_matrix(y_test, y_pred)
def do_rnn(trainX, testX, trainY, testY):
max_document_length=64
y_test=testY
trainX = pad_sequences(trainX, maxlen=max_document_length, value=0.)
testX = pad_sequences(testX, maxlen=max_document_length, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)
# Network building
net = tflearn.input_data([None, max_document_length])
net = tflearn.embedding(net, input_dim=10240000, output_dim=64)
net = tflearn.lstm(net, 64, dropout=0.1)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, tensorboard_verbose=0,tensorboard_dir="dga_log")
model.fit(trainX, trainY, validation_set=(testX, testY), show_metric=True,
batch_size=10,run_id="dga",n_epoch=1)
y_predict_list = model.predict(testX)
y_predict = []
for i in y_predict_list:
print i[0]
if i[0] > 0.5:
y_predict.append(0)
else:
y_predict.append(1)
print(classification_report(y_test, y_predict))
print metrics.confusion_matrix(y_test, y_predict)
def get_feature_test(DGA,label):
result = []
result.append(DGA)
result.append(label)
result.append(get_aeiou(DGA))
result.append(get_uniq_char_num(DGA))
result.append(get_uniq_num_num(DGA))
return result
if __name__ == "__main__":
print "DGA Detector"
print "text feature & xgboost"
x_train, x_test, y_train, y_test = get_feature()
do_xgboost(x_train, x_test, y_train, y_test)
print "text feature & nb"
x_train, x_test, y_train, y_test = get_feature()
do_nb(x_train, x_test, y_train, y_test)
print "text feature & mlp"
x_train, x_test, y_train, y_test = get_feature()
do_mlp(x_train, x_test, y_train, y_test)
"""
xgb_model = xgb.XGBClassifier().fit(x_train, y_train)
y_pred = xgb_model.predict(x_test)
print(classification_report(y_test, y_pred))
print(metrics.confusion_matrix(y_test, y_pred))
test = "kajugcffktsgskchaym.infosec"
result = pd.DataFrame(columns=('domain', 'Label', 'aeiou', 'Num', 'jaccord'))
print(result)
results = get_feature_test(test, 1)
result.loc[0] = results
print(result)
result = result.drop(['domain', 'Label'], axis=1).values
print(result)
print xgb_model.predict(result)
print "text feature & nb"
x_train, x_test, y_train, y_test = get_feature()
do_nb(x_train, x_test, y_train, y_test)
print "text feature & xgboost"
x_train, x_test, y_train, y_test = get_feature()
do_xgboost(x_train, x_test, y_train, y_test)
print "text feature & mlp"
x_train, x_test, y_train, y_test = get_feature()
do_mlp(x_train, x_test, y_train, y_test)
print "charseq & rnn"
x_train, x_test, y_train, y_test = get_feature_charseq()
do_rnn(x_train, x_test, y_train, y_test)
print "2-gram & mlp"
x_train, x_test, y_train, y_test = get_feature_2gram()
do_mlp(x_train, x_test, y_train, y_test)
print "2-gram & XGBoost"
x_train, x_test, y_train, y_test = get_feature_2gram()
do_xgboost(x_train, x_test, y_train, y_test)
print "2-gram & nb"
x_train, x_test, y_train, y_test=get_feature_2gram()
do_nb(x_train, x_test, y_train, y_test)
""" | 0.396535 | 0.419172 |
import sys, time, subprocess
from io import BytesIO
from PySide2 import QtCore, QtWidgets, QtGui
from PySide2.QtWidgets import QMainWindow, QInputDialog
from PySide2.QtCore import QSize
from PySide2.QtCore import QMimeData
from PySide2.QtGui import QDrag, QIcon
from PySide2.QtCore import Qt
from PySide2.QtCore import QIODevice, QByteArray, QBuffer
shared = {}
shared['cursor'] = QtGui.QCursor()
shared['curpos'] = None
class SnapWindow(QMainWindow):
def __init__(self, app, flags):
self.flag_snapped = False # Picture snapped or loaded at start.
self.flag_frame = True # Window frame toggling.
self.app = app
self.winsize = None # None-Conditional toggle on resize.
# Also the size the window should be.
QMainWindow.__init__(self)
self.dragicon = QIcon.fromTheme("folder-new").pixmap(QSize(24,24))
self.imgdata = None
self.setWindowTitle("Snap")
self.cliplabel = QtWidgets.QLabel(self)
self.cliplabel.show()
self.cliplabel.setScaledContents(True)
self.clip_pix = self.cliplabel.pixmap()
self.clipboard = app.clipboard()
self.clipboard.dataChanged.connect(self.clipboardChanged)
self.clipboard.clear(mode=self.clipboard.Clipboard)
p = self.palette()
p.setColor(self.backgroundRole(), QtCore.Qt.white)
self.setPalette(p)
self.hide()
def load_from_image(self):
self.flag_snapped = True
im = QtGui.QImage()
im.load(shared['inputfp'])
pm = QtGui.QPixmap().fromImage(im)
self.original_snap = pm.copy()
self.original_size = pm.width(), pm.height()
mpos = shared['curpos']
self.setGeometry(mpos.x(),mpos.y(), pm.width(), pm.height())
self.cliplabel.resize(pm.width(),pm.height())
self.cliplabel.setPixmap(pm)
self.show()
def clipboardChanged(self):
if self.flag_snapped == False:
self.clipboard = self.app.clipboard()
pm = self.clipboard.pixmap()
self.original_snap = pm.copy()
self.original_size = pm.width(), pm.height()
if pm.isNull():
pass
else:
self.flag_snapped = True
mpos = shared['curpos']
self.setGeometry(mpos.x()-pm.width(),mpos.y()-pm.height(), pm.width(), pm.height())
self.cliplabel.resize(pm.width(),pm.height())
self.cliplabel.setPixmap(pm)
self.show()
def resizeEvent(self, event):
super(SnapWindow, self).resizeEvent(event)
if self.winsize:
self.cliplabel.resize(self.savesize.width(),self.savesize.height())
self.resize(self.savesize.width(),self.savesize.height())
self.winsize = None
else:
self.cliplabel.resize(self.width(),self.height())
def scale_ratio(self):
winsize = QSize(self.width(),self.height())
new_size = QSize(self.original_size[0],self.original_size[1])
new_size.scale(winsize, QtCore.Qt.KeepAspectRatio)
self.cliplabel.resize(new_size)
self.resize(new_size)
def reset_size(self):
origsize = QSize(self.original_size[0],self.original_size[1])
self.cliplabel.resize(origsize)
self.cliplabel.setPixmap(self.original_snap)
self.resize(origsize)
def mousePressEvent(self, event):
super(SnapWindow, self).mousePressEvent(event)
if event.button() == QtCore.Qt.MouseButton.LeftButton:
self.scale_ratio()
self.QDrag = QDrag(self)
self.QMimeData = QMimeData()
self.imgdata = QByteArray()
imgbuffer = QBuffer(self.imgdata)
imgbuffer.open(QIODevice.WriteOnly)
self.original_snap.save(imgbuffer, "PNG")
self.QMimeData.setImageData(self.imgdata)
self.QMimeData.setData("image/png", self.imgdata)
self.QDrag.setMimeData(self.QMimeData)
self.QDrag.setPixmap(self.dragicon)
dropaction = self.QDrag.exec_(Qt.CopyAction)
def contextMenuEvent(self, event):
menu = QtWidgets.QMenu(self)
action_rename = menu.addAction("Rename")
action_clipboard = menu.addAction("Copy to Clipboard (Again)")
action_save = menu.addAction("Save Original")
action_reset = menu.addAction("Reset to Original")
action_frame = menu.addAction("Toggle Frame")
action_close = menu.addAction("Close")
action = menu.exec_(self.mapToGlobal(event.pos()))
if action == action_save:
self.save_copy()
elif action == action_clipboard:
self.clipboard.setPixmap(self.original_snap)
elif action == action_reset:
self.reset_size()
elif action == action_frame:
if self.flag_frame == True:
self.flag_frame = False
self.setWindowFlag(QtCore.Qt.FramelessWindowHint, True)
self.hide()
self.show()
self.winsize = self.size()
else:
self.flag_frame = True
self.setWindowFlag(QtCore.Qt.FramelessWindowHint, False)
self.hide()
self.show()
self.winsize = self.size()
self.reset_size()
elif action == action_close:
self.close()
elif action == action_rename:
name, tmp = QInputDialog.getText(self, "", "Name this window:")
self.setWindowTitle(name)
def save_copy(self):
fd = QtWidgets.QFileDialog()
fd.setDirectory(QtCore.QDir('~/'))
savefn = fd.getSaveFileName(self, 'Save File')[0]
pixmap = self.original_snap
barray = QtCore.QByteArray()
qbuffer = QtCore.QBuffer(barray)
qbuffer.open(QtCore.QIODevice.WriteOnly)
pixmap.save(qbuffer, "PNG")
bytesio = BytesIO(barray.data())
bytesio.seek(0)
with open(savefn, 'wb') as savefile:
savefile.write(bytesio.read())
def main():
app = QtWidgets.QApplication(sys.argv)
mainWin = SnapWindow(app, flags=None)
if len(sys.argv) < 2:
subprocess.call(["gnome-screenshot", "-c", "-a"])
shared['curpos'] = shared['cursor'].pos()
else:
shared['curpos'] = shared['cursor'].pos()
shared['inputfp'] = sys.argv[1]
print(shared['inputfp'])
mainWin.load_from_image()
sys.exit( app.exec_() )
if __name__ == '__main__':
main() | snapper.py |
import sys, time, subprocess
from io import BytesIO
from PySide2 import QtCore, QtWidgets, QtGui
from PySide2.QtWidgets import QMainWindow, QInputDialog
from PySide2.QtCore import QSize
from PySide2.QtCore import QMimeData
from PySide2.QtGui import QDrag, QIcon
from PySide2.QtCore import Qt
from PySide2.QtCore import QIODevice, QByteArray, QBuffer
shared = {}
shared['cursor'] = QtGui.QCursor()
shared['curpos'] = None
class SnapWindow(QMainWindow):
def __init__(self, app, flags):
self.flag_snapped = False # Picture snapped or loaded at start.
self.flag_frame = True # Window frame toggling.
self.app = app
self.winsize = None # None-Conditional toggle on resize.
# Also the size the window should be.
QMainWindow.__init__(self)
self.dragicon = QIcon.fromTheme("folder-new").pixmap(QSize(24,24))
self.imgdata = None
self.setWindowTitle("Snap")
self.cliplabel = QtWidgets.QLabel(self)
self.cliplabel.show()
self.cliplabel.setScaledContents(True)
self.clip_pix = self.cliplabel.pixmap()
self.clipboard = app.clipboard()
self.clipboard.dataChanged.connect(self.clipboardChanged)
self.clipboard.clear(mode=self.clipboard.Clipboard)
p = self.palette()
p.setColor(self.backgroundRole(), QtCore.Qt.white)
self.setPalette(p)
self.hide()
def load_from_image(self):
self.flag_snapped = True
im = QtGui.QImage()
im.load(shared['inputfp'])
pm = QtGui.QPixmap().fromImage(im)
self.original_snap = pm.copy()
self.original_size = pm.width(), pm.height()
mpos = shared['curpos']
self.setGeometry(mpos.x(),mpos.y(), pm.width(), pm.height())
self.cliplabel.resize(pm.width(),pm.height())
self.cliplabel.setPixmap(pm)
self.show()
def clipboardChanged(self):
if self.flag_snapped == False:
self.clipboard = self.app.clipboard()
pm = self.clipboard.pixmap()
self.original_snap = pm.copy()
self.original_size = pm.width(), pm.height()
if pm.isNull():
pass
else:
self.flag_snapped = True
mpos = shared['curpos']
self.setGeometry(mpos.x()-pm.width(),mpos.y()-pm.height(), pm.width(), pm.height())
self.cliplabel.resize(pm.width(),pm.height())
self.cliplabel.setPixmap(pm)
self.show()
def resizeEvent(self, event):
super(SnapWindow, self).resizeEvent(event)
if self.winsize:
self.cliplabel.resize(self.savesize.width(),self.savesize.height())
self.resize(self.savesize.width(),self.savesize.height())
self.winsize = None
else:
self.cliplabel.resize(self.width(),self.height())
def scale_ratio(self):
winsize = QSize(self.width(),self.height())
new_size = QSize(self.original_size[0],self.original_size[1])
new_size.scale(winsize, QtCore.Qt.KeepAspectRatio)
self.cliplabel.resize(new_size)
self.resize(new_size)
def reset_size(self):
origsize = QSize(self.original_size[0],self.original_size[1])
self.cliplabel.resize(origsize)
self.cliplabel.setPixmap(self.original_snap)
self.resize(origsize)
def mousePressEvent(self, event):
super(SnapWindow, self).mousePressEvent(event)
if event.button() == QtCore.Qt.MouseButton.LeftButton:
self.scale_ratio()
self.QDrag = QDrag(self)
self.QMimeData = QMimeData()
self.imgdata = QByteArray()
imgbuffer = QBuffer(self.imgdata)
imgbuffer.open(QIODevice.WriteOnly)
self.original_snap.save(imgbuffer, "PNG")
self.QMimeData.setImageData(self.imgdata)
self.QMimeData.setData("image/png", self.imgdata)
self.QDrag.setMimeData(self.QMimeData)
self.QDrag.setPixmap(self.dragicon)
dropaction = self.QDrag.exec_(Qt.CopyAction)
def contextMenuEvent(self, event):
menu = QtWidgets.QMenu(self)
action_rename = menu.addAction("Rename")
action_clipboard = menu.addAction("Copy to Clipboard (Again)")
action_save = menu.addAction("Save Original")
action_reset = menu.addAction("Reset to Original")
action_frame = menu.addAction("Toggle Frame")
action_close = menu.addAction("Close")
action = menu.exec_(self.mapToGlobal(event.pos()))
if action == action_save:
self.save_copy()
elif action == action_clipboard:
self.clipboard.setPixmap(self.original_snap)
elif action == action_reset:
self.reset_size()
elif action == action_frame:
if self.flag_frame == True:
self.flag_frame = False
self.setWindowFlag(QtCore.Qt.FramelessWindowHint, True)
self.hide()
self.show()
self.winsize = self.size()
else:
self.flag_frame = True
self.setWindowFlag(QtCore.Qt.FramelessWindowHint, False)
self.hide()
self.show()
self.winsize = self.size()
self.reset_size()
elif action == action_close:
self.close()
elif action == action_rename:
name, tmp = QInputDialog.getText(self, "", "Name this window:")
self.setWindowTitle(name)
def save_copy(self):
fd = QtWidgets.QFileDialog()
fd.setDirectory(QtCore.QDir('~/'))
savefn = fd.getSaveFileName(self, 'Save File')[0]
pixmap = self.original_snap
barray = QtCore.QByteArray()
qbuffer = QtCore.QBuffer(barray)
qbuffer.open(QtCore.QIODevice.WriteOnly)
pixmap.save(qbuffer, "PNG")
bytesio = BytesIO(barray.data())
bytesio.seek(0)
with open(savefn, 'wb') as savefile:
savefile.write(bytesio.read())
def main():
app = QtWidgets.QApplication(sys.argv)
mainWin = SnapWindow(app, flags=None)
if len(sys.argv) < 2:
subprocess.call(["gnome-screenshot", "-c", "-a"])
shared['curpos'] = shared['cursor'].pos()
else:
shared['curpos'] = shared['cursor'].pos()
shared['inputfp'] = sys.argv[1]
print(shared['inputfp'])
mainWin.load_from_image()
sys.exit( app.exec_() )
if __name__ == '__main__':
main() | 0.23855 | 0.106087 |
import os
import logging
import browser_cookie3
from pytconf import Config, ParamCreator
class ConfigLogging(Config):
"""
Parameters to control logging
"""
loglevel = ParamCreator.create_choice(
choice_list=[
logging.getLevelName(logging.NOTSET),
logging.getLevelName(logging.DEBUG),
logging.getLevelName(logging.INFO),
logging.getLevelName(logging.WARNING),
logging.getLevelName(logging.WARN),
logging.getLevelName(logging.ERROR),
logging.getLevelName(logging.FATAL),
logging.getLevelName(logging.CRITICAL),
],
help_string="What log level to use?",
default=logging.getLevelName(logging.INFO),
)
class ConfigDebugUrls(Config):
"""
Configure how to debug urls
"""
save = ParamCreator.create_bool(
help_string="Do you want to save urls?",
default=False,
)
class ConfigDownload(Config):
"""
Configure details about the download process
"""
download_as_collecting = ParamCreator.create_bool(
help_string="Do you want download while collecting the urls?",
default=False,
)
download = ParamCreator.create_bool(
help_string="really download or just print the urls?",
default=True,
)
folder = ParamCreator.create_existing_folder(
help_string="where to save the data to?",
default=".",
)
class ConfigCookiesSource(Config):
"""
Configure where to get cookies from
"""
browser = ParamCreator.create_choice(
choice_list=["none", "firefox", "chrome"],
help_string="Which browser to take cookies from?",
default="firefox",
)
def get_cookies():
if ConfigCookiesSource.browser == "firefox":
return browser_cookie3.firefox()
if ConfigCookiesSource.browser == "chrome":
return browser_cookie3.chrome()
raise ValueError(f"unsupported browser [{ConfigCookiesSource.browser}")
class ConfigSiteId(Config):
"""
Parameters for downloading workers
"""
site = ParamCreator.create_choice(
choice_list=["facebook", "instagram", "travelgirls", "vk", "mamba.ru"],
help_string="Which site to download from?",
)
user_id = ParamCreator.create_str(
help_string="""Which user id to user?
https://www.facebook.com/profile.php?id=[user_id]
https://www.instagram.com/[user_id]
http://www.travelgirls.com/member/[user_id]
https://vk.com/id[user_id]
http://www.mamba.ru/mb[user_id]""",
)
class ConfigPornhubSearch(Config):
"""
Parameters for search
"""
query = ParamCreator.create_str(
help_string="What is the query string?",
)
use_ordering = ParamCreator.create_bool(
help_string="use ordering in the search operation",
default=True,
)
ordering = ParamCreator.create_choice(
choice_list=["longest", "featured", "newest", "mostviewed", "rating"],
help_string="by which ordering to fetch result?",
default="longest",
)
use_period = ParamCreator.create_bool(
help_string="use period in the search operation",
default=False,
)
period = ParamCreator.create_choice(
choice_list=["weekly", "monthly", "alltime"],
help_string="what period to search?",
default="weekly",
)
use_tags = ParamCreator.create_bool(
help_string="should we use tags in search?",
default=False,
)
tags = ParamCreator.create_list_str(
help_string="tags to be used in search",
default=[],
)
literal = ParamCreator.create_str(
help_string="literal for tags (one character)",
default="f",
)
limit = ParamCreator.create_int_or_none(
help_string="Limit on search results or None for no limit",
default=100,
)
class ConfigYoutubeDl(Config):
"""
Configuration for youtube downloads
"""
use_archive = ParamCreator.create_bool(
help_string="Should we use an archive?",
default=True,
)
archive_file = ParamCreator.create_existing_file(
help_string="What file to use as archive?",
default=os.path.expanduser('~/.config/youtube-dl-archive'),
)
class ConfigUrl(Config):
"""
Parameters for what url to download
"""
url = ParamCreator.create_str(
help_string="url to download (e.g. https://www.pornhub.com/model/lily)"
)
class ConfigRequests(Config):
"""
Parameters to config the requests module
"""
connect_timeout = ParamCreator.create_int_or_none(
help_string="Timeout for connections in seconds (none means endless)",
default=5,
)
read_timeout = ParamCreator.create_int_or_none(
help_string="Timeout for reading in seconds (none means endless)",
default=5,
)
debug = ParamCreator.create_bool(
help_string="Do you want to debug the requests module?",
default=False,
) | pyscrapers/configs.py | import os
import logging
import browser_cookie3
from pytconf import Config, ParamCreator
class ConfigLogging(Config):
"""
Parameters to control logging
"""
loglevel = ParamCreator.create_choice(
choice_list=[
logging.getLevelName(logging.NOTSET),
logging.getLevelName(logging.DEBUG),
logging.getLevelName(logging.INFO),
logging.getLevelName(logging.WARNING),
logging.getLevelName(logging.WARN),
logging.getLevelName(logging.ERROR),
logging.getLevelName(logging.FATAL),
logging.getLevelName(logging.CRITICAL),
],
help_string="What log level to use?",
default=logging.getLevelName(logging.INFO),
)
class ConfigDebugUrls(Config):
"""
Configure how to debug urls
"""
save = ParamCreator.create_bool(
help_string="Do you want to save urls?",
default=False,
)
class ConfigDownload(Config):
"""
Configure details about the download process
"""
download_as_collecting = ParamCreator.create_bool(
help_string="Do you want download while collecting the urls?",
default=False,
)
download = ParamCreator.create_bool(
help_string="really download or just print the urls?",
default=True,
)
folder = ParamCreator.create_existing_folder(
help_string="where to save the data to?",
default=".",
)
class ConfigCookiesSource(Config):
"""
Configure where to get cookies from
"""
browser = ParamCreator.create_choice(
choice_list=["none", "firefox", "chrome"],
help_string="Which browser to take cookies from?",
default="firefox",
)
def get_cookies():
if ConfigCookiesSource.browser == "firefox":
return browser_cookie3.firefox()
if ConfigCookiesSource.browser == "chrome":
return browser_cookie3.chrome()
raise ValueError(f"unsupported browser [{ConfigCookiesSource.browser}")
class ConfigSiteId(Config):
"""
Parameters for downloading workers
"""
site = ParamCreator.create_choice(
choice_list=["facebook", "instagram", "travelgirls", "vk", "mamba.ru"],
help_string="Which site to download from?",
)
user_id = ParamCreator.create_str(
help_string="""Which user id to user?
https://www.facebook.com/profile.php?id=[user_id]
https://www.instagram.com/[user_id]
http://www.travelgirls.com/member/[user_id]
https://vk.com/id[user_id]
http://www.mamba.ru/mb[user_id]""",
)
class ConfigPornhubSearch(Config):
"""
Parameters for search
"""
query = ParamCreator.create_str(
help_string="What is the query string?",
)
use_ordering = ParamCreator.create_bool(
help_string="use ordering in the search operation",
default=True,
)
ordering = ParamCreator.create_choice(
choice_list=["longest", "featured", "newest", "mostviewed", "rating"],
help_string="by which ordering to fetch result?",
default="longest",
)
use_period = ParamCreator.create_bool(
help_string="use period in the search operation",
default=False,
)
period = ParamCreator.create_choice(
choice_list=["weekly", "monthly", "alltime"],
help_string="what period to search?",
default="weekly",
)
use_tags = ParamCreator.create_bool(
help_string="should we use tags in search?",
default=False,
)
tags = ParamCreator.create_list_str(
help_string="tags to be used in search",
default=[],
)
literal = ParamCreator.create_str(
help_string="literal for tags (one character)",
default="f",
)
limit = ParamCreator.create_int_or_none(
help_string="Limit on search results or None for no limit",
default=100,
)
class ConfigYoutubeDl(Config):
"""
Configuration for youtube downloads
"""
use_archive = ParamCreator.create_bool(
help_string="Should we use an archive?",
default=True,
)
archive_file = ParamCreator.create_existing_file(
help_string="What file to use as archive?",
default=os.path.expanduser('~/.config/youtube-dl-archive'),
)
class ConfigUrl(Config):
"""
Parameters for what url to download
"""
url = ParamCreator.create_str(
help_string="url to download (e.g. https://www.pornhub.com/model/lily)"
)
class ConfigRequests(Config):
"""
Parameters to config the requests module
"""
connect_timeout = ParamCreator.create_int_or_none(
help_string="Timeout for connections in seconds (none means endless)",
default=5,
)
read_timeout = ParamCreator.create_int_or_none(
help_string="Timeout for reading in seconds (none means endless)",
default=5,
)
debug = ParamCreator.create_bool(
help_string="Do you want to debug the requests module?",
default=False,
) | 0.555435 | 0.262192 |
r"""
This module provides argument manipulation functions like pop_arg.
"""
import gen_print as gp
import collections
def pop_arg(pop_arg_default=None, *args, **kwargs):
r"""
Pop a named argument from the args/kwargs and return a tuple consisting of the argument value, the
modified args and the modified kwargs.
The name of the argument is determined automatically by this function by examining the source code which
calls it (see examples below). If no suitable argument can be found, the default value passed to this
function will be returned as the argument value. This function is useful for wrapper functions that wish
to process arguments in some way before calling subordinate function.
Examples:
Given this code:
def func1(*args, **kwargs):
last_name, args, kwargs = pop_arg('Doe', *args, **kwargs)
some_function(last_name.capitalize(), *args, **kwargs)
Consider this call to func1:
func1('Johnson', ssn='111-11-1111')
The pop_arg in func1 would return the following:
'Johnson', [], {'ssn': "111-11-1111"}
Notice that the 'args' value returned is an empty list. Since last_name was assumed to be the first
positional argument, it was popped from args.
Now consider this call to func1:
func1(last_name='Johnson', ssn='111-11-1111')
The pop_arg in func1 would return the same last_name value as in the previous example. The only
difference being that the last_name value was popped from kwargs rather than from args.
Description of argument(s):
pop_arg_default The value to return if the named argument is not present in args/kwargs.
args The positional arguments passed to the calling function.
kwargs The keyword arguments passed to the calling function.
"""
# Retrieve the argument name by examining the source code.
arg_name = gp.get_arg_name(None, arg_num=-3, stack_frame_ix=2)
if arg_name in kwargs:
arg_value = kwargs.pop(arg_name)
else:
# Convert args from a tuple to a list.
args = list(args)
if args:
arg_value = args.pop(0)
else:
arg_value = pop_arg_default
return arg_value, args, kwargs
def source_to_object(value):
r"""
Evaluate string value as python source code and return the resulting object.
If value is NOT a string or can not be interpreted as a python source object definition, simply return
value.
The idea is to convert python object definition source code (e.g. for lists, dictionaries, tuples, etc.)
into an object.
Example:
Note that this first example is a special case in that it is a short-cut for specifying a
collections.OrderedDict.
result = source_to_object("[('one', 1), ('two', 2), ('three', 3)]")
The result is a collections.OrderedDict object:
result:
[one]: 1
[two]: 2
[three]: 3
This is a short-cut for the long form shown here:
result = source_to_object("collections.OrderedDict([
('one', 1),
('two', 2),
('three', 3)])")
Also note that support for this special-case short-cut precludes the possibility of interpreting such a
string as a list of tuples.
Example:
In this example, the result will be a list:
result = source_to_object("[1, 2, 3]")
result:
result[0]: 1
result[1]: 2
result[2]: 3
Example:
In this example, the value passed to this function is not a string, so it is simply returned.
result = source_to_object(1)
More examples:
result = source_to_object("dict(one=1, two=2, three=3)")
result = source_to_object("{'one':1, 'two':2, 'three':3}")
result = source_to_object(True)
etc.
Description of argument(s):
value If value is a string, it will be evaluated as a python statement. If the
statement is valid, the resulting object will be returned. In all other
cases, the value will simply be returned.
"""
if type(value) not in gp.get_string_types():
return value
# Strip white space prior to attempting to interpret the string as python code.
value = value.strip()
# Try special case of collections.OrderedDict which accepts a list of tuple pairs.
if value.startswith("[("):
try:
return eval("collections.OrderedDict(" + value + ")")
except (TypeError, NameError, ValueError):
pass
try:
return eval(value)
except (NameError, SyntaxError):
pass
return value
def args_to_objects(args):
r"""
Run source_to_object() on each element in args and return the result.
Description of argument(s):
args A type of dictionary, list, set, tuple or simple object whose elements
are to be converted via a call to source_to_object().
"""
type_of_dict = gp.is_dict(args)
if type_of_dict:
if type_of_dict == gp.dict_type():
return {k: source_to_object(v) for (k, v) in args.items()}
elif type_of_dict == gp.ordered_dict_type():
return collections.OrderedDict((k, v) for (k, v) in args.items())
elif type_of_dict == gp.dot_dict_type():
return DotDict((k, v) for (k, v) in args.items())
elif type_of_dict == gp.normalized_dict_type():
return NormalizedDict((k, v) for (k, v) in args.items())
# Assume args is list, tuple or set.
if type(args) in (list, set):
return [source_to_object(arg) for arg in args]
elif type(args) is tuple:
return tuple([source_to_object(arg) for arg in args])
return source_to_object(args) | lib/func_args.py |
r"""
This module provides argument manipulation functions like pop_arg.
"""
import gen_print as gp
import collections
def pop_arg(pop_arg_default=None, *args, **kwargs):
r"""
Pop a named argument from the args/kwargs and return a tuple consisting of the argument value, the
modified args and the modified kwargs.
The name of the argument is determined automatically by this function by examining the source code which
calls it (see examples below). If no suitable argument can be found, the default value passed to this
function will be returned as the argument value. This function is useful for wrapper functions that wish
to process arguments in some way before calling subordinate function.
Examples:
Given this code:
def func1(*args, **kwargs):
last_name, args, kwargs = pop_arg('Doe', *args, **kwargs)
some_function(last_name.capitalize(), *args, **kwargs)
Consider this call to func1:
func1('Johnson', ssn='111-11-1111')
The pop_arg in func1 would return the following:
'Johnson', [], {'ssn': "111-11-1111"}
Notice that the 'args' value returned is an empty list. Since last_name was assumed to be the first
positional argument, it was popped from args.
Now consider this call to func1:
func1(last_name='Johnson', ssn='111-11-1111')
The pop_arg in func1 would return the same last_name value as in the previous example. The only
difference being that the last_name value was popped from kwargs rather than from args.
Description of argument(s):
pop_arg_default The value to return if the named argument is not present in args/kwargs.
args The positional arguments passed to the calling function.
kwargs The keyword arguments passed to the calling function.
"""
# Retrieve the argument name by examining the source code.
arg_name = gp.get_arg_name(None, arg_num=-3, stack_frame_ix=2)
if arg_name in kwargs:
arg_value = kwargs.pop(arg_name)
else:
# Convert args from a tuple to a list.
args = list(args)
if args:
arg_value = args.pop(0)
else:
arg_value = pop_arg_default
return arg_value, args, kwargs
def source_to_object(value):
r"""
Evaluate string value as python source code and return the resulting object.
If value is NOT a string or can not be interpreted as a python source object definition, simply return
value.
The idea is to convert python object definition source code (e.g. for lists, dictionaries, tuples, etc.)
into an object.
Example:
Note that this first example is a special case in that it is a short-cut for specifying a
collections.OrderedDict.
result = source_to_object("[('one', 1), ('two', 2), ('three', 3)]")
The result is a collections.OrderedDict object:
result:
[one]: 1
[two]: 2
[three]: 3
This is a short-cut for the long form shown here:
result = source_to_object("collections.OrderedDict([
('one', 1),
('two', 2),
('three', 3)])")
Also note that support for this special-case short-cut precludes the possibility of interpreting such a
string as a list of tuples.
Example:
In this example, the result will be a list:
result = source_to_object("[1, 2, 3]")
result:
result[0]: 1
result[1]: 2
result[2]: 3
Example:
In this example, the value passed to this function is not a string, so it is simply returned.
result = source_to_object(1)
More examples:
result = source_to_object("dict(one=1, two=2, three=3)")
result = source_to_object("{'one':1, 'two':2, 'three':3}")
result = source_to_object(True)
etc.
Description of argument(s):
value If value is a string, it will be evaluated as a python statement. If the
statement is valid, the resulting object will be returned. In all other
cases, the value will simply be returned.
"""
if type(value) not in gp.get_string_types():
return value
# Strip white space prior to attempting to interpret the string as python code.
value = value.strip()
# Try special case of collections.OrderedDict which accepts a list of tuple pairs.
if value.startswith("[("):
try:
return eval("collections.OrderedDict(" + value + ")")
except (TypeError, NameError, ValueError):
pass
try:
return eval(value)
except (NameError, SyntaxError):
pass
return value
def args_to_objects(args):
r"""
Run source_to_object() on each element in args and return the result.
Description of argument(s):
args A type of dictionary, list, set, tuple or simple object whose elements
are to be converted via a call to source_to_object().
"""
type_of_dict = gp.is_dict(args)
if type_of_dict:
if type_of_dict == gp.dict_type():
return {k: source_to_object(v) for (k, v) in args.items()}
elif type_of_dict == gp.ordered_dict_type():
return collections.OrderedDict((k, v) for (k, v) in args.items())
elif type_of_dict == gp.dot_dict_type():
return DotDict((k, v) for (k, v) in args.items())
elif type_of_dict == gp.normalized_dict_type():
return NormalizedDict((k, v) for (k, v) in args.items())
# Assume args is list, tuple or set.
if type(args) in (list, set):
return [source_to_object(arg) for arg in args]
elif type(args) is tuple:
return tuple([source_to_object(arg) for arg in args])
return source_to_object(args) | 0.793186 | 0.607139 |
import abc
from typing import Dict, List
from uuid import uuid4
from wishlist.domain.product.adapters import (
CreateProductAdapter,
DeleteProductAdapter,
FindProductAdapter,
UpdateProductAdapter
)
from wishlist.domain.product.models import Product
class CreateProductPort(metaclass=abc.ABCMeta):
@abc.abstractmethod
async def create(self, product: Dict) -> Product:
pass # pragma: no-cover
class UpdateProductPort(metaclass=abc.ABCMeta):
@abc.abstractmethod
async def update(self, product: Dict) -> bool:
pass # pragma: no-cover
class FindProductPort(metaclass=abc.ABCMeta):
@abc.abstractmethod
async def find_one(self, query: Dict) -> Product:
pass # pragma: no-cover
@abc.abstractmethod
async def find_all(
self, query: Dict, page: int, size: int
) -> (Dict, List[Product]):
pass # pragma: no-cover
async def find_by_id(self, id_: str) -> Product:
return await self.find_one({
'id': id_
})
class DeleteProductPort(metaclass=abc.ABCMeta):
@abc.abstractmethod
async def delete(self, id_: str) -> int:
pass # pragma: no-cover
class CreateProduct(CreateProductPort):
def __init__(self, create_product_adapter: CreateProductAdapter):
self._create_product_adapter = create_product_adapter
async def create(self, product: Dict) -> Product:
product['id'] = str(uuid4())
return await self._create_product_adapter.create(product)
class UpdateProduct(UpdateProductPort):
def __init__(
self,
update_product_adapter: UpdateProductAdapter
):
self._update_product_adapter = update_product_adapter
async def update(self, product: Dict) -> bool:
return await self._update_product_adapter.update(product)
class FindProduct(FindProductPort):
def __init__(self, find_product_adapter: FindProductAdapter):
self._find_product_adapter = find_product_adapter
async def find_one(self, query: Dict) -> Product:
return await self._find_product_adapter.find_one(query)
async def find_all(
self, query: Dict, page: int, size: int
) -> (Dict, List[Product]):
return await self._find_product_adapter.find_all(query, page, size)
class DeleteProduct(DeleteProductPort):
def __init__(self, delete_product_adapter: DeleteProductAdapter):
self._delete_product_adapter = delete_product_adapter
async def delete(self, id_: str) -> int:
return await self._delete_product_adapter.delete(id_) | wishlist/domain/product/ports.py | import abc
from typing import Dict, List
from uuid import uuid4
from wishlist.domain.product.adapters import (
CreateProductAdapter,
DeleteProductAdapter,
FindProductAdapter,
UpdateProductAdapter
)
from wishlist.domain.product.models import Product
class CreateProductPort(metaclass=abc.ABCMeta):
@abc.abstractmethod
async def create(self, product: Dict) -> Product:
pass # pragma: no-cover
class UpdateProductPort(metaclass=abc.ABCMeta):
@abc.abstractmethod
async def update(self, product: Dict) -> bool:
pass # pragma: no-cover
class FindProductPort(metaclass=abc.ABCMeta):
@abc.abstractmethod
async def find_one(self, query: Dict) -> Product:
pass # pragma: no-cover
@abc.abstractmethod
async def find_all(
self, query: Dict, page: int, size: int
) -> (Dict, List[Product]):
pass # pragma: no-cover
async def find_by_id(self, id_: str) -> Product:
return await self.find_one({
'id': id_
})
class DeleteProductPort(metaclass=abc.ABCMeta):
@abc.abstractmethod
async def delete(self, id_: str) -> int:
pass # pragma: no-cover
class CreateProduct(CreateProductPort):
def __init__(self, create_product_adapter: CreateProductAdapter):
self._create_product_adapter = create_product_adapter
async def create(self, product: Dict) -> Product:
product['id'] = str(uuid4())
return await self._create_product_adapter.create(product)
class UpdateProduct(UpdateProductPort):
def __init__(
self,
update_product_adapter: UpdateProductAdapter
):
self._update_product_adapter = update_product_adapter
async def update(self, product: Dict) -> bool:
return await self._update_product_adapter.update(product)
class FindProduct(FindProductPort):
def __init__(self, find_product_adapter: FindProductAdapter):
self._find_product_adapter = find_product_adapter
async def find_one(self, query: Dict) -> Product:
return await self._find_product_adapter.find_one(query)
async def find_all(
self, query: Dict, page: int, size: int
) -> (Dict, List[Product]):
return await self._find_product_adapter.find_all(query, page, size)
class DeleteProduct(DeleteProductPort):
def __init__(self, delete_product_adapter: DeleteProductAdapter):
self._delete_product_adapter = delete_product_adapter
async def delete(self, id_: str) -> int:
return await self._delete_product_adapter.delete(id_) | 0.693265 | 0.126812 |
import logging
import sanic
from sanic.response import HTTPResponse, json
from internals.sanic import SpotilavaBlueprint, SpotilavaSanic
logger = logging.getLogger("Tidal.Playlists")
tidal_playlists_bp = SpotilavaBlueprint("tidal:playlists", url_prefix="/tidal/")
@tidal_playlists_bp.get("/album/<album_id>")
async def get_album_contents(request: sanic.Request, album_id: str) -> HTTPResponse:
app: SpotilavaSanic = request.app
logger.info(f"AlbumContents: Received request for album <{album_id}>")
if not app.tidal:
logger.warning(f"AlbumContents: Unable to fetch <{album_id}> because Tidal is not ready yet!")
return json({"error": "Tidal not connected.", "code": 500, "data": None}, status=500)
if not app.tidal.ready:
logger.warning(f"AlbumContents: Unable to fetch <{album_id}> because Tidal is not ready yet!")
return json({"error": "Tidal not connected.", "code": 500, "data": None}, status=500)
if not album_id.isalnum():
logger.warning(f"AlbumContents: Album <{album_id}> is invalid, expected alphanumeric, got {album_id} instead")
return json({"error": "Invalid album id, must be alphanumerical", "code": 400, "data": None}, status=500)
album_info = await app.tidal.get_album(album_id)
if album_info is None:
logger.warning(f"AlbumContents: Unable to find album <{album_id}>")
return json({"error": "Album not found.", "code": 404, "data": None}, status=404)
album_meta = album_info.to_json()
return json({"error": "Success", "code": 200, "data": album_meta})
@tidal_playlists_bp.get("/playlist/<playlist_id>")
async def get_playlist_contents(request: sanic.Request, playlist_id: str) -> HTTPResponse:
app: SpotilavaSanic = request.app
logger.info(f"PlaylistContents: Received request for album <{playlist_id}>")
if not app.tidal:
logger.warning(f"PlaylistContents: Unable to fetch <{playlist_id}> because Tidal is not ready yet!")
return json({"error": "Tidal not connected.", "code": 500, "data": None}, status=500)
if not app.tidal.ready:
logger.warning(f"PlaylistContents: Unable to fetch <{playlist_id}> because Tidal is not ready yet!")
return json({"error": "Tidal not connected.", "code": 500, "data": None}, status=500)
playlist_info = await app.tidal.get_playlists(playlist_id)
if playlist_info is None:
logger.warning(f"PlaylistContents: Unable to find album <{playlist_id}>")
return json({"error": "Album not found.", "code": 404, "data": None}, status=404)
playlist_meta = playlist_info.to_json()
return json({"error": "Success", "code": 200, "data": playlist_meta}) | routes/tidal/playlists.py | import logging
import sanic
from sanic.response import HTTPResponse, json
from internals.sanic import SpotilavaBlueprint, SpotilavaSanic
logger = logging.getLogger("Tidal.Playlists")
tidal_playlists_bp = SpotilavaBlueprint("tidal:playlists", url_prefix="/tidal/")
@tidal_playlists_bp.get("/album/<album_id>")
async def get_album_contents(request: sanic.Request, album_id: str) -> HTTPResponse:
app: SpotilavaSanic = request.app
logger.info(f"AlbumContents: Received request for album <{album_id}>")
if not app.tidal:
logger.warning(f"AlbumContents: Unable to fetch <{album_id}> because Tidal is not ready yet!")
return json({"error": "Tidal not connected.", "code": 500, "data": None}, status=500)
if not app.tidal.ready:
logger.warning(f"AlbumContents: Unable to fetch <{album_id}> because Tidal is not ready yet!")
return json({"error": "Tidal not connected.", "code": 500, "data": None}, status=500)
if not album_id.isalnum():
logger.warning(f"AlbumContents: Album <{album_id}> is invalid, expected alphanumeric, got {album_id} instead")
return json({"error": "Invalid album id, must be alphanumerical", "code": 400, "data": None}, status=500)
album_info = await app.tidal.get_album(album_id)
if album_info is None:
logger.warning(f"AlbumContents: Unable to find album <{album_id}>")
return json({"error": "Album not found.", "code": 404, "data": None}, status=404)
album_meta = album_info.to_json()
return json({"error": "Success", "code": 200, "data": album_meta})
@tidal_playlists_bp.get("/playlist/<playlist_id>")
async def get_playlist_contents(request: sanic.Request, playlist_id: str) -> HTTPResponse:
app: SpotilavaSanic = request.app
logger.info(f"PlaylistContents: Received request for album <{playlist_id}>")
if not app.tidal:
logger.warning(f"PlaylistContents: Unable to fetch <{playlist_id}> because Tidal is not ready yet!")
return json({"error": "Tidal not connected.", "code": 500, "data": None}, status=500)
if not app.tidal.ready:
logger.warning(f"PlaylistContents: Unable to fetch <{playlist_id}> because Tidal is not ready yet!")
return json({"error": "Tidal not connected.", "code": 500, "data": None}, status=500)
playlist_info = await app.tidal.get_playlists(playlist_id)
if playlist_info is None:
logger.warning(f"PlaylistContents: Unable to find album <{playlist_id}>")
return json({"error": "Album not found.", "code": 404, "data": None}, status=404)
playlist_meta = playlist_info.to_json()
return json({"error": "Success", "code": 200, "data": playlist_meta}) | 0.30632 | 0.149469 |
import os
import re
import numpy as np
import SimpleITK as sitk
import cv2
import torch
import random
from torch.utils.data import Dataset
from utils.project import proj_make_3dinput_v2
def threshold_CTA_mask(cta_image, HU_window=np.array([-263.,553.])):
th_cta_image = (cta_image - HU_window[0])/(HU_window[1] - HU_window[0])
th_cta_image[th_cta_image < 0] = 0
th_cta_image[th_cta_image > 1] = 1
th_cta_image_mask = th_cta_image
return th_cta_image_mask
class DSAReconDataset(Dataset):
""" 3D Reconstruction Dataset."""
def __init__(self, stage, num_views, input_path, last_path = None):
"""
Args:
stage (int): the number of stage of reconstruction network.
num_views (int): the number of views.
input_path (str): 2d input image and 2d label.
last_path (str, optional): the path where the output of the previous/last stage of the network is saved.
"""
self.stage = stage
self.input_path = input_path
self.last_path = last_path
self.num_views = num_views
dir = os.listdir(input_path)
for ii, i in enumerate(dir):
if not i.startswith('traingt'):
dir.pop(ii)
self.dir = dir
def __len__(self):
return len(self.dir)
def __getitem__(self, index):
if self.stage == 1:
size = [128, 256, 256]
crop_size = [32, 256, 256]
elif self.stage == 2:
size = [395, 512, 512]
crop_size = [32, 512, 512]
views = self.num_views
file_index = int(re.findall('(\d+)',self.dir[index])[-1])
# get ramdom crop
start_slice0 = random.randint(0, size[0] - crop_size[0])
end_slice0 = start_slice0 + crop_size[0]
start_slice1 = random.randint(0, size[1] - crop_size[1])
end_slice1 = start_slice1 + crop_size[1]
start_slice2 = random.randint(0, size[2] - crop_size[2])
end_slice2 = start_slice2 + crop_size[2]
start_slice = [start_slice0/size[0], start_slice1/size[1], start_slice2/size[2]]
crop_slice = [crop_size[0] / size[0], crop_size[1] / size[1], crop_size[2] / size[2]]
# load 2D projections and unproject to 3D input
perangle = 180/views
if self.stage == 1:
projs = np.zeros((views, crop_size[0], crop_size[1], crop_size[2]), dtype=np.float32)
elif self.stage > 1:
projs = np.zeros((views+1, crop_size[0], crop_size[1], crop_size[2]), dtype=np.float32)
image_array_proj = np.zeros((views, crop_size[0], crop_size[1]), dtype=np.float32)
for ii in range(views):
if self.stage == 1:
proj_temp = cv2.imread(self.input_path + '/traindata/'+str(views)+'view_low/train'+str(file_index)+'_'+str(ii)+'.jpg',0)
elif self.stage > 1:
proj_temp = cv2.imread(self.input_path + '/traindata/'+str(views)+'view/train'+str(file_index)+'_'+str(ii)+'.jpg',0)
proj_temp = proj_temp - np.min(proj_temp)
proj_temp = proj_temp / np.max(proj_temp)
projs[ii,:,:,:] = proj_make_3dinput_v2(proj_temp, perangle*ii+perangle, start_slice, crop_slice)
image_array_proj[ii,:,:] = proj_temp[start_slice0:end_slice0,:]
# use last stage output as input
if self.stage > 1:
assert self.last_path==None
image_nii = sitk.ReadImage(self.last_path + '/predict'+str(file_index)+'.nii.gz')
projs[views] = sitk.GetArrayFromImage(image_nii)[start_slice0:end_slice0, start_slice1:end_slice1, start_slice2:end_slice2]
image_array_proj = torch.from_numpy(image_array_proj).float()
projs = torch.from_numpy(projs).float()
return (projs, image_array_proj) | load_data.py | import os
import re
import numpy as np
import SimpleITK as sitk
import cv2
import torch
import random
from torch.utils.data import Dataset
from utils.project import proj_make_3dinput_v2
def threshold_CTA_mask(cta_image, HU_window=np.array([-263.,553.])):
th_cta_image = (cta_image - HU_window[0])/(HU_window[1] - HU_window[0])
th_cta_image[th_cta_image < 0] = 0
th_cta_image[th_cta_image > 1] = 1
th_cta_image_mask = th_cta_image
return th_cta_image_mask
class DSAReconDataset(Dataset):
""" 3D Reconstruction Dataset."""
def __init__(self, stage, num_views, input_path, last_path = None):
"""
Args:
stage (int): the number of stage of reconstruction network.
num_views (int): the number of views.
input_path (str): 2d input image and 2d label.
last_path (str, optional): the path where the output of the previous/last stage of the network is saved.
"""
self.stage = stage
self.input_path = input_path
self.last_path = last_path
self.num_views = num_views
dir = os.listdir(input_path)
for ii, i in enumerate(dir):
if not i.startswith('traingt'):
dir.pop(ii)
self.dir = dir
def __len__(self):
return len(self.dir)
def __getitem__(self, index):
if self.stage == 1:
size = [128, 256, 256]
crop_size = [32, 256, 256]
elif self.stage == 2:
size = [395, 512, 512]
crop_size = [32, 512, 512]
views = self.num_views
file_index = int(re.findall('(\d+)',self.dir[index])[-1])
# get ramdom crop
start_slice0 = random.randint(0, size[0] - crop_size[0])
end_slice0 = start_slice0 + crop_size[0]
start_slice1 = random.randint(0, size[1] - crop_size[1])
end_slice1 = start_slice1 + crop_size[1]
start_slice2 = random.randint(0, size[2] - crop_size[2])
end_slice2 = start_slice2 + crop_size[2]
start_slice = [start_slice0/size[0], start_slice1/size[1], start_slice2/size[2]]
crop_slice = [crop_size[0] / size[0], crop_size[1] / size[1], crop_size[2] / size[2]]
# load 2D projections and unproject to 3D input
perangle = 180/views
if self.stage == 1:
projs = np.zeros((views, crop_size[0], crop_size[1], crop_size[2]), dtype=np.float32)
elif self.stage > 1:
projs = np.zeros((views+1, crop_size[0], crop_size[1], crop_size[2]), dtype=np.float32)
image_array_proj = np.zeros((views, crop_size[0], crop_size[1]), dtype=np.float32)
for ii in range(views):
if self.stage == 1:
proj_temp = cv2.imread(self.input_path + '/traindata/'+str(views)+'view_low/train'+str(file_index)+'_'+str(ii)+'.jpg',0)
elif self.stage > 1:
proj_temp = cv2.imread(self.input_path + '/traindata/'+str(views)+'view/train'+str(file_index)+'_'+str(ii)+'.jpg',0)
proj_temp = proj_temp - np.min(proj_temp)
proj_temp = proj_temp / np.max(proj_temp)
projs[ii,:,:,:] = proj_make_3dinput_v2(proj_temp, perangle*ii+perangle, start_slice, crop_slice)
image_array_proj[ii,:,:] = proj_temp[start_slice0:end_slice0,:]
# use last stage output as input
if self.stage > 1:
assert self.last_path==None
image_nii = sitk.ReadImage(self.last_path + '/predict'+str(file_index)+'.nii.gz')
projs[views] = sitk.GetArrayFromImage(image_nii)[start_slice0:end_slice0, start_slice1:end_slice1, start_slice2:end_slice2]
image_array_proj = torch.from_numpy(image_array_proj).float()
projs = torch.from_numpy(projs).float()
return (projs, image_array_proj) | 0.532911 | 0.354042 |
from pyimagesearch import social_distancing_config as config
from pyimagesearch.detection import detect_people
from scipy.spatial import distance as dist
import numpy as np
import argparse
import imutils
import cv2
import os
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input", type=str, default="",
help="path to (optional) input video file")
ap.add_argument("-o", "--output", type=str, default="",
help="path to (optional) output video file")
ap.add_argument("-d", "--display", type=int, default=1,
help="whether or not output frame should be displayed")
args = vars(ap.parse_args())
labelsPath = os.path.sep.join([config.MODEL_PATH, "coco.names"])
LABELS = open(labelsPath).read().strip().split("\n")
weightsPath = os.path.sep.join([config.MODEL_PATH, "yolov3.weights"])
configPath = os.path.sep.join([config.MODEL_PATH, "yolov3.cfg"])
print("[INFO] loading YOLO from disk...")
net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
if config.USE_GPU:
# set CUDA as the preferable backend and target
print("[INFO] setting preferable backend and target to CUDA...")
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
print("[INFO] accessing video stream...")
vs = cv2.VideoCapture(args["input"] if args["input"] else 0)
writer = None
while True:
(grabbed, frame) = vs.read()
if not grabbed:
break
frame = imutils.resize(frame, width=700)
results = detect_people(frame, net, ln,
personIdx=LABELS.index("person"))
violate = set()
if len(results) >= 2:
centroids = np.array([r[2] for r in results])
D = dist.cdist(centroids, centroids, metric="euclidean")
for i in range(0, D.shape[0]):
for j in range(i + 1, D.shape[1]):
if D[i, j] < config.MIN_DISTANCE:
# update our violation set with the indexes of
# the centroid pairs
violate.add(i)
violate.add(j)
for (i, (prob, bbox, centroid)) in enumerate(results):
(startX, startY, endX, endY) = bbox
(cX, cY) = centroid
color = (0, 255, 0)
if i in violate:
color = (0, 0, 255)
cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)
cv2.circle(frame, (cX, cY), 5, color, 1)
text = "Social Distancing Violations: {}".format(len(violate))
cv2.putText(frame, text, (10, frame.shape[0] - 25),
cv2.FONT_HERSHEY_SIMPLEX, 0.85, (0, 0, 255), 3)
if args["display"] > 0:
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
if args["output"] != "" and writer is None:
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
writer = cv2.VideoWriter(args["output"], fourcc, 25,
(frame.shape[1], frame.shape[0]), True)
if writer is not None:
writer.write(frame) | src/SDD.py |
from pyimagesearch import social_distancing_config as config
from pyimagesearch.detection import detect_people
from scipy.spatial import distance as dist
import numpy as np
import argparse
import imutils
import cv2
import os
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input", type=str, default="",
help="path to (optional) input video file")
ap.add_argument("-o", "--output", type=str, default="",
help="path to (optional) output video file")
ap.add_argument("-d", "--display", type=int, default=1,
help="whether or not output frame should be displayed")
args = vars(ap.parse_args())
labelsPath = os.path.sep.join([config.MODEL_PATH, "coco.names"])
LABELS = open(labelsPath).read().strip().split("\n")
weightsPath = os.path.sep.join([config.MODEL_PATH, "yolov3.weights"])
configPath = os.path.sep.join([config.MODEL_PATH, "yolov3.cfg"])
print("[INFO] loading YOLO from disk...")
net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
if config.USE_GPU:
# set CUDA as the preferable backend and target
print("[INFO] setting preferable backend and target to CUDA...")
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
print("[INFO] accessing video stream...")
vs = cv2.VideoCapture(args["input"] if args["input"] else 0)
writer = None
while True:
(grabbed, frame) = vs.read()
if not grabbed:
break
frame = imutils.resize(frame, width=700)
results = detect_people(frame, net, ln,
personIdx=LABELS.index("person"))
violate = set()
if len(results) >= 2:
centroids = np.array([r[2] for r in results])
D = dist.cdist(centroids, centroids, metric="euclidean")
for i in range(0, D.shape[0]):
for j in range(i + 1, D.shape[1]):
if D[i, j] < config.MIN_DISTANCE:
# update our violation set with the indexes of
# the centroid pairs
violate.add(i)
violate.add(j)
for (i, (prob, bbox, centroid)) in enumerate(results):
(startX, startY, endX, endY) = bbox
(cX, cY) = centroid
color = (0, 255, 0)
if i in violate:
color = (0, 0, 255)
cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)
cv2.circle(frame, (cX, cY), 5, color, 1)
text = "Social Distancing Violations: {}".format(len(violate))
cv2.putText(frame, text, (10, frame.shape[0] - 25),
cv2.FONT_HERSHEY_SIMPLEX, 0.85, (0, 0, 255), 3)
if args["display"] > 0:
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
if args["output"] != "" and writer is None:
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
writer = cv2.VideoWriter(args["output"], fourcc, 25,
(frame.shape[1], frame.shape[0]), True)
if writer is not None:
writer.write(frame) | 0.352982 | 0.181336 |
from collections import defaultdict
import mock
from searx.engines import soundcloud
from searx.testing import SearxTestCase
from searx.url_utils import quote_plus
class TestSoundcloudEngine(SearxTestCase):
def test_request(self):
query = 'test_query'
dicto = defaultdict(dict)
dicto['pageno'] = 1
params = soundcloud.request(query, dicto)
self.assertIn('url', params)
self.assertIn(query, params['url'])
self.assertIn('soundcloud.com', params['url'])
def test_response(self):
self.assertRaises(AttributeError, soundcloud.response, None)
self.assertRaises(AttributeError, soundcloud.response, [])
self.assertRaises(AttributeError, soundcloud.response, '')
self.assertRaises(AttributeError, soundcloud.response, '[]')
response = mock.Mock(text='{}')
self.assertEqual(soundcloud.response(response), [])
response = mock.Mock(text='{"data": []}')
self.assertEqual(soundcloud.response(response), [])
json = """
{
"collection": [
{
"kind": "track",
"id": 159723640,
"created_at": "2014/07/22 00:51:21 +0000",
"user_id": 2976616,
"duration": 303780,
"commentable": true,
"state": "finished",
"original_content_size": 13236349,
"last_modified": "2015/01/31 15:14:50 +0000",
"sharing": "public",
"tag_list": "seekae flume",
"permalink": "seekae-test-recognise-flume-re-work",
"streamable": true,
"embeddable_by": "all",
"downloadable": true,
"purchase_url": "http://www.facebook.com/seekaemusic",
"label_id": null,
"purchase_title": "Seekae",
"genre": "freedownload",
"title": "This is the title",
"description": "This is the content",
"label_name": "Future Classic",
"release": "",
"track_type": "remix",
"key_signature": "",
"isrc": "",
"video_url": null,
"bpm": null,
"release_year": 2014,
"release_month": 7,
"release_day": 22,
"original_format": "mp3",
"license": "all-rights-reserved",
"uri": "https://api.soundcloud.com/tracks/159723640",
"user": {
"id": 2976616,
"kind": "user",
"permalink": "flume",
"username": "Flume",
"last_modified": "2014/11/24 19:21:29 +0000",
"uri": "https://api.soundcloud.com/users/2976616",
"permalink_url": "http://soundcloud.com/flume",
"avatar_url": "https://i1.sndcdn.com/avatars-000044475439-4zi7ii-large.jpg"
},
"permalink_url": "http://soundcloud.com/this.is.the.url",
"artwork_url": "https://i1.sndcdn.com/artworks-000085857162-xdxy5c-large.jpg",
"waveform_url": "https://w1.sndcdn.com/DWrL1lAN8BkP_m.png",
"stream_url": "https://api.soundcloud.com/tracks/159723640/stream",
"download_url": "https://api.soundcloud.com/tracks/159723640/download",
"playback_count": 2190687,
"download_count": 54856,
"favoritings_count": 49061,
"comment_count": 826,
"likes_count": 49061,
"reposts_count": 15910,
"attachments_uri": "https://api.soundcloud.com/tracks/159723640/attachments",
"policy": "ALLOW"
}
],
"total_results": 375750,
"next_href": "https://api.soundcloud.com/search?&q=test",
"tx_id": ""
}
"""
response = mock.Mock(text=json)
results = soundcloud.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['title'], 'This is the title')
self.assertEqual(results[0]['url'], 'http://soundcloud.com/this.is.the.url')
self.assertEqual(results[0]['content'], 'This is the content')
self.assertIn(quote_plus('https://api.soundcloud.com/tracks/159723640'), results[0]['embedded'])
json = """
{
"collection": [
{
"kind": "user",
"id": 159723640,
"created_at": "2014/07/22 00:51:21 +0000",
"user_id": 2976616,
"duration": 303780,
"commentable": true,
"state": "finished",
"original_content_size": 13236349,
"last_modified": "2015/01/31 15:14:50 +0000",
"sharing": "public",
"tag_list": "seekae flume",
"permalink": "seekae-test-recognise-flume-re-work",
"streamable": true,
"embeddable_by": "all",
"downloadable": true,
"purchase_url": "http://www.facebook.com/seekaemusic",
"label_id": null,
"purchase_title": "Seekae",
"genre": "freedownload",
"title": "This is the title",
"description": "This is the content",
"label_name": "Future Classic",
"release": "",
"track_type": "remix",
"key_signature": "",
"isrc": "",
"video_url": null,
"bpm": null,
"release_year": 2014,
"release_month": 7,
"release_day": 22,
"original_format": "mp3",
"license": "all-rights-reserved",
"uri": "https://api.soundcloud.com/tracks/159723640",
"user": {
"id": 2976616,
"kind": "user",
"permalink": "flume",
"username": "Flume",
"last_modified": "2014/11/24 19:21:29 +0000",
"uri": "https://api.soundcloud.com/users/2976616",
"permalink_url": "http://soundcloud.com/flume",
"avatar_url": "https://i1.sndcdn.com/avatars-000044475439-4zi7ii-large.jpg"
},
"permalink_url": "http://soundcloud.com/this.is.the.url",
"artwork_url": "https://i1.sndcdn.com/artworks-000085857162-xdxy5c-large.jpg",
"waveform_url": "https://w1.sndcdn.com/DWrL1lAN8BkP_m.png",
"stream_url": "https://api.soundcloud.com/tracks/159723640/stream",
"download_url": "https://api.soundcloud.com/tracks/159723640/download",
"playback_count": 2190687,
"download_count": 54856,
"favoritings_count": 49061,
"comment_count": 826,
"likes_count": 49061,
"reposts_count": 15910,
"attachments_uri": "https://api.soundcloud.com/tracks/159723640/attachments",
"policy": "ALLOW"
}
],
"total_results": 375750,
"next_href": "https://api.soundcloud.com/search?&q=test",
"tx_id": ""
}
"""
response = mock.Mock(text=json)
results = soundcloud.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 0)
json = """
{
"collection": [],
"total_results": 375750,
"next_href": "https://api.soundcloud.com/search?&q=test",
"tx_id": ""
}
"""
response = mock.Mock(text=json)
results = soundcloud.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 0) | Toolkits/Discovery/meta/searx/tests/unit/engines/test_soundcloud.py | from collections import defaultdict
import mock
from searx.engines import soundcloud
from searx.testing import SearxTestCase
from searx.url_utils import quote_plus
class TestSoundcloudEngine(SearxTestCase):
def test_request(self):
query = 'test_query'
dicto = defaultdict(dict)
dicto['pageno'] = 1
params = soundcloud.request(query, dicto)
self.assertIn('url', params)
self.assertIn(query, params['url'])
self.assertIn('soundcloud.com', params['url'])
def test_response(self):
self.assertRaises(AttributeError, soundcloud.response, None)
self.assertRaises(AttributeError, soundcloud.response, [])
self.assertRaises(AttributeError, soundcloud.response, '')
self.assertRaises(AttributeError, soundcloud.response, '[]')
response = mock.Mock(text='{}')
self.assertEqual(soundcloud.response(response), [])
response = mock.Mock(text='{"data": []}')
self.assertEqual(soundcloud.response(response), [])
json = """
{
"collection": [
{
"kind": "track",
"id": 159723640,
"created_at": "2014/07/22 00:51:21 +0000",
"user_id": 2976616,
"duration": 303780,
"commentable": true,
"state": "finished",
"original_content_size": 13236349,
"last_modified": "2015/01/31 15:14:50 +0000",
"sharing": "public",
"tag_list": "seekae flume",
"permalink": "seekae-test-recognise-flume-re-work",
"streamable": true,
"embeddable_by": "all",
"downloadable": true,
"purchase_url": "http://www.facebook.com/seekaemusic",
"label_id": null,
"purchase_title": "Seekae",
"genre": "freedownload",
"title": "This is the title",
"description": "This is the content",
"label_name": "Future Classic",
"release": "",
"track_type": "remix",
"key_signature": "",
"isrc": "",
"video_url": null,
"bpm": null,
"release_year": 2014,
"release_month": 7,
"release_day": 22,
"original_format": "mp3",
"license": "all-rights-reserved",
"uri": "https://api.soundcloud.com/tracks/159723640",
"user": {
"id": 2976616,
"kind": "user",
"permalink": "flume",
"username": "Flume",
"last_modified": "2014/11/24 19:21:29 +0000",
"uri": "https://api.soundcloud.com/users/2976616",
"permalink_url": "http://soundcloud.com/flume",
"avatar_url": "https://i1.sndcdn.com/avatars-000044475439-4zi7ii-large.jpg"
},
"permalink_url": "http://soundcloud.com/this.is.the.url",
"artwork_url": "https://i1.sndcdn.com/artworks-000085857162-xdxy5c-large.jpg",
"waveform_url": "https://w1.sndcdn.com/DWrL1lAN8BkP_m.png",
"stream_url": "https://api.soundcloud.com/tracks/159723640/stream",
"download_url": "https://api.soundcloud.com/tracks/159723640/download",
"playback_count": 2190687,
"download_count": 54856,
"favoritings_count": 49061,
"comment_count": 826,
"likes_count": 49061,
"reposts_count": 15910,
"attachments_uri": "https://api.soundcloud.com/tracks/159723640/attachments",
"policy": "ALLOW"
}
],
"total_results": 375750,
"next_href": "https://api.soundcloud.com/search?&q=test",
"tx_id": ""
}
"""
response = mock.Mock(text=json)
results = soundcloud.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['title'], 'This is the title')
self.assertEqual(results[0]['url'], 'http://soundcloud.com/this.is.the.url')
self.assertEqual(results[0]['content'], 'This is the content')
self.assertIn(quote_plus('https://api.soundcloud.com/tracks/159723640'), results[0]['embedded'])
json = """
{
"collection": [
{
"kind": "user",
"id": 159723640,
"created_at": "2014/07/22 00:51:21 +0000",
"user_id": 2976616,
"duration": 303780,
"commentable": true,
"state": "finished",
"original_content_size": 13236349,
"last_modified": "2015/01/31 15:14:50 +0000",
"sharing": "public",
"tag_list": "seekae flume",
"permalink": "seekae-test-recognise-flume-re-work",
"streamable": true,
"embeddable_by": "all",
"downloadable": true,
"purchase_url": "http://www.facebook.com/seekaemusic",
"label_id": null,
"purchase_title": "Seekae",
"genre": "freedownload",
"title": "This is the title",
"description": "This is the content",
"label_name": "Future Classic",
"release": "",
"track_type": "remix",
"key_signature": "",
"isrc": "",
"video_url": null,
"bpm": null,
"release_year": 2014,
"release_month": 7,
"release_day": 22,
"original_format": "mp3",
"license": "all-rights-reserved",
"uri": "https://api.soundcloud.com/tracks/159723640",
"user": {
"id": 2976616,
"kind": "user",
"permalink": "flume",
"username": "Flume",
"last_modified": "2014/11/24 19:21:29 +0000",
"uri": "https://api.soundcloud.com/users/2976616",
"permalink_url": "http://soundcloud.com/flume",
"avatar_url": "https://i1.sndcdn.com/avatars-000044475439-4zi7ii-large.jpg"
},
"permalink_url": "http://soundcloud.com/this.is.the.url",
"artwork_url": "https://i1.sndcdn.com/artworks-000085857162-xdxy5c-large.jpg",
"waveform_url": "https://w1.sndcdn.com/DWrL1lAN8BkP_m.png",
"stream_url": "https://api.soundcloud.com/tracks/159723640/stream",
"download_url": "https://api.soundcloud.com/tracks/159723640/download",
"playback_count": 2190687,
"download_count": 54856,
"favoritings_count": 49061,
"comment_count": 826,
"likes_count": 49061,
"reposts_count": 15910,
"attachments_uri": "https://api.soundcloud.com/tracks/159723640/attachments",
"policy": "ALLOW"
}
],
"total_results": 375750,
"next_href": "https://api.soundcloud.com/search?&q=test",
"tx_id": ""
}
"""
response = mock.Mock(text=json)
results = soundcloud.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 0)
json = """
{
"collection": [],
"total_results": 375750,
"next_href": "https://api.soundcloud.com/search?&q=test",
"tx_id": ""
}
"""
response = mock.Mock(text=json)
results = soundcloud.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 0) | 0.608012 | 0.404949 |
import os
import pybullet as p
from normalize_obj import normalize_one_obj
import sys
import subprocess
import json
from distutils.dir_util import copy_tree
ori_shapenet_dir = '/juno/group/linshao/ShapeNetCore'
shapenet_dir = '/scr1/yifan/shapenet_partial'
shapenet_new_dir = '/scr1/yifan/geo_data'
category_dict = {
'bag': '02773838',
'cap': '02954340',
'headphone': '03261776',
'knife': '03624134',
'mug': '03797390',
}
all_labels_dir = '/scr1/yifan/geo-hook/scripts/notes'
def process_one_category(categoy):
unzip_dir = os.path.join(shapenet_dir, category)
unzip_new_dir = os.path.join(shapenet_new_dir, category)
labels_dir = os.path.join(all_labels_dir, 'shapenet_labels_{}.txt'.format(category))
obj_id = 1
id_dict = {}
for obj_name in sorted(os.listdir(unzip_dir)):
obj_dir = os.path.join(unzip_dir, obj_name, 'model.obj')
obj_folder_dir = os.path.join(unzip_dir, obj_name)
obj_normalized_dir = os.path.join(unzip_dir, obj_name, 'model_normalized.obj')
obj_v_dir = os.path.join(unzip_dir, obj_name, 'model_normalized_v.obj')
if os.path.exists(obj_dir):
if not os.path.exists(obj_normalized_dir):
try:
normalize_one_obj(obj_dir, obj_normalized_dir)
except Exception as e:
continue
if not os.path.exists(obj_v_dir):
print('converting')
os.system('python to_vhacd.py {}'.format(obj_normalized_dir))
if os.path.exists(obj_v_dir):
print('success')
# p.vhacd(obj_normalized_dir, obj_v_dir,
# os.path.join(unzip_dir, obj_folder, 'vhacd_log.txt'))
obj_id_shapenet = obj_name[:-4]
obj_folder_new_dir = os.path.join(unzip_new_dir, str(obj_id))
os.makedirs(obj_folder_new_dir)
copy_tree(obj_folder_dir, obj_folder_new_dir)
print(obj_folder_new_dir)
id_dict[obj_id] = obj_id_shapenet
obj_id += 1
with open(labels_dir, 'w+') as f:
f.write(json.dumps(id_dict))
for category in category_dict.keys():
process_one_category(category) | src/scripts/process_shapenet.py | import os
import pybullet as p
from normalize_obj import normalize_one_obj
import sys
import subprocess
import json
from distutils.dir_util import copy_tree
ori_shapenet_dir = '/juno/group/linshao/ShapeNetCore'
shapenet_dir = '/scr1/yifan/shapenet_partial'
shapenet_new_dir = '/scr1/yifan/geo_data'
category_dict = {
'bag': '02773838',
'cap': '02954340',
'headphone': '03261776',
'knife': '03624134',
'mug': '03797390',
}
all_labels_dir = '/scr1/yifan/geo-hook/scripts/notes'
def process_one_category(categoy):
unzip_dir = os.path.join(shapenet_dir, category)
unzip_new_dir = os.path.join(shapenet_new_dir, category)
labels_dir = os.path.join(all_labels_dir, 'shapenet_labels_{}.txt'.format(category))
obj_id = 1
id_dict = {}
for obj_name in sorted(os.listdir(unzip_dir)):
obj_dir = os.path.join(unzip_dir, obj_name, 'model.obj')
obj_folder_dir = os.path.join(unzip_dir, obj_name)
obj_normalized_dir = os.path.join(unzip_dir, obj_name, 'model_normalized.obj')
obj_v_dir = os.path.join(unzip_dir, obj_name, 'model_normalized_v.obj')
if os.path.exists(obj_dir):
if not os.path.exists(obj_normalized_dir):
try:
normalize_one_obj(obj_dir, obj_normalized_dir)
except Exception as e:
continue
if not os.path.exists(obj_v_dir):
print('converting')
os.system('python to_vhacd.py {}'.format(obj_normalized_dir))
if os.path.exists(obj_v_dir):
print('success')
# p.vhacd(obj_normalized_dir, obj_v_dir,
# os.path.join(unzip_dir, obj_folder, 'vhacd_log.txt'))
obj_id_shapenet = obj_name[:-4]
obj_folder_new_dir = os.path.join(unzip_new_dir, str(obj_id))
os.makedirs(obj_folder_new_dir)
copy_tree(obj_folder_dir, obj_folder_new_dir)
print(obj_folder_new_dir)
id_dict[obj_id] = obj_id_shapenet
obj_id += 1
with open(labels_dir, 'w+') as f:
f.write(json.dumps(id_dict))
for category in category_dict.keys():
process_one_category(category) | 0.069542 | 0.05375 |
import numpy as np
import math
import mcdc_tnt
from timeit import default_timer as timer
def error(sim, bench):
error = np.linalg.norm(sim - bench) / np.linalg.norm(bench)
return(error)
if __name__ == '__main__':
print()
print('ATTENTION')
print('Entering Hardware Test Suite')
print('Ensure the proper conda enviorment is enabled')
print('Test Schedule ([x] will run, [c] can run (must be manually set)):')
print(' -[x] pure python')
print(' -[x] numba cpu')
print(' -[ ] numba gpu')
print(' -[c] pykokkos cpu')
print(' -[ ] pykokkos gpu')
print(' -[c] pyomp cpu')
print('This can take a while, recomended Pytest is not used')
print()
start_o = timer()
print('Entering Pure Python')
input_file = 'tc_1_pp.yaml'
output_file = 'pp.out'
start = timer()
mcdc_tnt.run(input_file, output_file, None)
end = timer()
time_pp = end-start
print()
print('Entering Numba CPU')
input_file = 'tc_1_numba_cpu.yaml'
output_file = 'numba_cpu.out'
start = timer()
mcdc_tnt.run(input_file, output_file, None)
end = timer()
time_nbc = end-start
#print()
#print('Entering Numba GPU')
#input_file = 'tc_1_numba_gpu.yaml'
#output_file = 'numba_gpu.out'
#start = timer()
#mcdc_tnt.run(input_file, output_file)
#end = timer()
#time_nbg = end-start
#print()
#print('Entering PyKokkos CPU')
#input_file = 'tc_1_pyk_cpu.yaml'
#output_file = 'pyk_cpu.out'
#start = timer()
#mcdc_tnt.run(input_file, output_file)
#end = timer()
#time_pykc = end-start
end_o = timer()
sf_actual = np.loadtxt('anwser.pout', comments='#', delimiter=',', skiprows=2)
sf_pp = np.loadtxt('pp.out', comments='#', delimiter=',', skiprows=2)
sf_nbc = np.loadtxt('numba_cpu.out', comments='#', delimiter=',', skiprows=2)
#sf_nbg = np.loadtxt('numba_gpu.out', comments='#', delimiter=',', skiprows=2)
#sf_pykc = np.loadtxt('pyk_cpu.out', comments='#', delimiter=',', skiprows=2)
assert(np.allclose(sf_actual[:,2], sf_pp[:,2], rtol=1e-01))
assert(np.allclose(sf_actual[:,2], sf_nbc[:,2], rtol=1e-01))
#assert(np.allclose(sf_actual[:,2], sf_nbg[:,2]))
#assert(np.allclose(sf_actual[:,2], sf_pykc[:,2], rtol=1e-01))
print()
print('Test Complete and all Passed!')
print('Total time to completion:')
print(' -pure python.....{0}'.format(time_pp))
print(' -numba cpu.......{0}'.format(time_nbc))
#print(' -numba gpu.......{0}'.format(time_nbg))
#print(' -pykokkos cpu....{0}'.format(time_pykc))
print()
print(' -total...........{0}'.format(end_o-start_o))
print()
print('Produced Errors Between Soultions')
print(' -pure python............{0}'.format(error(sf_actual, sf_pp)))
print(' -numba threading........{0}'.format(error(sf_actual, sf_nbc)))
#print(' -numba pyomp............{0}'.format(error(sf_actual, sf_pyomp)))
#print(' -pyk ompenmp............{0}'.format(error(sf_actual, sf_pykc)))
print()
import matplotlib.pyplot as plt
plt.figure(1)
f = plt.plot(sf_actual[:,1], sf_actual[:,2], '-b',
sf_pp[:,1], sf_pp[:,2], '-r',
sf_nbc[:,1], sf_nbc[:,2], 'g-')
plt.title("Scalar Flux")
plt.ylabel("$\phi [cm^{-2}s^{-1}]$")
plt.xlabel("x [cm]")
plt.legend(f, ['Actual','Pure Python','Numba CPU','Pyk CPU'])
plt.savefig('sflux.png', dpi=500, facecolor='w', edgecolor='k',orientation='portrait')
print('Flux figure printed to sflux.png')
print()
#sf_pykc[:,1], sf_pykc[:,2], 'k-')
print() | tests/integration/tests_hardware.py | import numpy as np
import math
import mcdc_tnt
from timeit import default_timer as timer
def error(sim, bench):
error = np.linalg.norm(sim - bench) / np.linalg.norm(bench)
return(error)
if __name__ == '__main__':
print()
print('ATTENTION')
print('Entering Hardware Test Suite')
print('Ensure the proper conda enviorment is enabled')
print('Test Schedule ([x] will run, [c] can run (must be manually set)):')
print(' -[x] pure python')
print(' -[x] numba cpu')
print(' -[ ] numba gpu')
print(' -[c] pykokkos cpu')
print(' -[ ] pykokkos gpu')
print(' -[c] pyomp cpu')
print('This can take a while, recomended Pytest is not used')
print()
start_o = timer()
print('Entering Pure Python')
input_file = 'tc_1_pp.yaml'
output_file = 'pp.out'
start = timer()
mcdc_tnt.run(input_file, output_file, None)
end = timer()
time_pp = end-start
print()
print('Entering Numba CPU')
input_file = 'tc_1_numba_cpu.yaml'
output_file = 'numba_cpu.out'
start = timer()
mcdc_tnt.run(input_file, output_file, None)
end = timer()
time_nbc = end-start
#print()
#print('Entering Numba GPU')
#input_file = 'tc_1_numba_gpu.yaml'
#output_file = 'numba_gpu.out'
#start = timer()
#mcdc_tnt.run(input_file, output_file)
#end = timer()
#time_nbg = end-start
#print()
#print('Entering PyKokkos CPU')
#input_file = 'tc_1_pyk_cpu.yaml'
#output_file = 'pyk_cpu.out'
#start = timer()
#mcdc_tnt.run(input_file, output_file)
#end = timer()
#time_pykc = end-start
end_o = timer()
sf_actual = np.loadtxt('anwser.pout', comments='#', delimiter=',', skiprows=2)
sf_pp = np.loadtxt('pp.out', comments='#', delimiter=',', skiprows=2)
sf_nbc = np.loadtxt('numba_cpu.out', comments='#', delimiter=',', skiprows=2)
#sf_nbg = np.loadtxt('numba_gpu.out', comments='#', delimiter=',', skiprows=2)
#sf_pykc = np.loadtxt('pyk_cpu.out', comments='#', delimiter=',', skiprows=2)
assert(np.allclose(sf_actual[:,2], sf_pp[:,2], rtol=1e-01))
assert(np.allclose(sf_actual[:,2], sf_nbc[:,2], rtol=1e-01))
#assert(np.allclose(sf_actual[:,2], sf_nbg[:,2]))
#assert(np.allclose(sf_actual[:,2], sf_pykc[:,2], rtol=1e-01))
print()
print('Test Complete and all Passed!')
print('Total time to completion:')
print(' -pure python.....{0}'.format(time_pp))
print(' -numba cpu.......{0}'.format(time_nbc))
#print(' -numba gpu.......{0}'.format(time_nbg))
#print(' -pykokkos cpu....{0}'.format(time_pykc))
print()
print(' -total...........{0}'.format(end_o-start_o))
print()
print('Produced Errors Between Soultions')
print(' -pure python............{0}'.format(error(sf_actual, sf_pp)))
print(' -numba threading........{0}'.format(error(sf_actual, sf_nbc)))
#print(' -numba pyomp............{0}'.format(error(sf_actual, sf_pyomp)))
#print(' -pyk ompenmp............{0}'.format(error(sf_actual, sf_pykc)))
print()
import matplotlib.pyplot as plt
plt.figure(1)
f = plt.plot(sf_actual[:,1], sf_actual[:,2], '-b',
sf_pp[:,1], sf_pp[:,2], '-r',
sf_nbc[:,1], sf_nbc[:,2], 'g-')
plt.title("Scalar Flux")
plt.ylabel("$\phi [cm^{-2}s^{-1}]$")
plt.xlabel("x [cm]")
plt.legend(f, ['Actual','Pure Python','Numba CPU','Pyk CPU'])
plt.savefig('sflux.png', dpi=500, facecolor='w', edgecolor='k',orientation='portrait')
print('Flux figure printed to sflux.png')
print()
#sf_pykc[:,1], sf_pykc[:,2], 'k-')
print() | 0.186947 | 0.197251 |
import sys
from PyQt5.QtWidgets import (QApplication, QWidget, QVBoxLayout, QGridLayout, QLabel, QLineEdit, QToolButton, QPushButton)
from PyQt5.QtCore import Qt
class Login(QWidget):
def __init__(self):
super().__init__()
self.bodyLayout = QGridLayout()
# 欢迎登陆图书馆系统标题
self.titleText = QLabel(self)
self.titleText.setText('欢迎使用图书馆管理系统')
self.titleText.setAlignment(Qt.AlignCenter)
self.titleText.setFixedSize(480, 60)
# 账号标题
account = QLabel()
account.setText('账号')
# 密码标题
password = QLabel()
password.setText('密码')
# 学号输入框
self.accountInput = QLineEdit()
self.accountInput.setFixedSize(400, 50)
self.accountInput.setText('学号')
self.accountInput.setTextMargins(5, 5, 5, 5)
self.accountInput.mousePressEvent = lambda x: self.inputClick(self.accountInput)
# self.accountInput.setClearButtonEnabled(True)
# 密码输入框
self.passwordInput = QLineEdit()
self.passwordInput.setFixedSize(400, 50)
self.passwordInput.setText('******')
self.passwordInput.setTextMargins(5, 5, 5, 5)
self.passwordInput.mousePressEvent = lambda x: self.inputClick(self.passwordInput)
self.passwordInput.setEchoMode(QLineEdit.Password)
# self.passwordInput.setClearButtonEnabled(True)
# 注册按钮
self.signup = QPushButton()
self.signup.setText('注册')
self.signup.setFixedSize(40, 20)
# 登录按钮
self.loginButton = QToolButton()
self.loginButton.setText('登 录')
self.loginButton.setFixedSize(100, 60)
# 把上面定义的元素加入大框
self.inputBoxLayout = QVBoxLayout()
self.inputBoxLayout.addWidget(account)
self.inputBoxLayout.addWidget(self.accountInput)
self.inputBoxLayout.addWidget(password)
self.inputBoxLayout.addWidget(self.passwordInput)
self.inputBoxLayout.addWidget(self.signup)
self.inputBoxLayout.addWidget(self.loginButton)
# 下面一个大框
self.inputBox = QWidget()
self.inputBox.setObjectName('inputBox')
self.inputBox.setContentsMargins(30, 30, 30, 30)
self.inputBox.setFixedSize(480, 350)
self.inputBox.setLayout(self.inputBoxLayout)
# 把大标题和下面输入框加入self
self.bodyLayout.addWidget(self.titleText, 0, 0)
self.bodyLayout.addWidget(self.inputBox, 1, 0)
self.setLayout(self.bodyLayout)
self.setFixedSize(500, 450)
self.setMyStyle()
def inputClick(self, e):
if e.text() == '学号' or e.text() == '******':
e.setText('')
def setMyStyle(self):
self.setStyleSheet('''
QWidget{
background-color:white;
}
''')
self.titleText.setStyleSheet('''
*{
color: rgba(63, 101, 114);
width: 200px;
background-color: rgba(203, 231, 245, 1);
border: 1px solid rgba(220, 243, 249, 1);
border-radius: 10px;
}
''')
self.inputBox.setStyleSheet('''
QWidget#inputBox{
border-radius: 5px;
border: 1px solid rgba(229, 229, 229, 1);
}
QLineEdit{
color: grey;
border-radius: 5px;
border: 1px solid rgba(229, 229, 229, 1);
}
QToolButton{
border-radius: 10px;
background-color:rgba(52, 118, 176, 1);
color: white;
font-size: 25px;
font-family: 微软雅黑;
}
QPushButton{
color:blue;
font-weight:300;
border:0;
background-color:white;
}
''')
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Login()
ex.show()
sys.exit(app.exec_()) | model/login.py | import sys
from PyQt5.QtWidgets import (QApplication, QWidget, QVBoxLayout, QGridLayout, QLabel, QLineEdit, QToolButton, QPushButton)
from PyQt5.QtCore import Qt
class Login(QWidget):
def __init__(self):
super().__init__()
self.bodyLayout = QGridLayout()
# 欢迎登陆图书馆系统标题
self.titleText = QLabel(self)
self.titleText.setText('欢迎使用图书馆管理系统')
self.titleText.setAlignment(Qt.AlignCenter)
self.titleText.setFixedSize(480, 60)
# 账号标题
account = QLabel()
account.setText('账号')
# 密码标题
password = QLabel()
password.setText('密码')
# 学号输入框
self.accountInput = QLineEdit()
self.accountInput.setFixedSize(400, 50)
self.accountInput.setText('学号')
self.accountInput.setTextMargins(5, 5, 5, 5)
self.accountInput.mousePressEvent = lambda x: self.inputClick(self.accountInput)
# self.accountInput.setClearButtonEnabled(True)
# 密码输入框
self.passwordInput = QLineEdit()
self.passwordInput.setFixedSize(400, 50)
self.passwordInput.setText('******')
self.passwordInput.setTextMargins(5, 5, 5, 5)
self.passwordInput.mousePressEvent = lambda x: self.inputClick(self.passwordInput)
self.passwordInput.setEchoMode(QLineEdit.Password)
# self.passwordInput.setClearButtonEnabled(True)
# 注册按钮
self.signup = QPushButton()
self.signup.setText('注册')
self.signup.setFixedSize(40, 20)
# 登录按钮
self.loginButton = QToolButton()
self.loginButton.setText('登 录')
self.loginButton.setFixedSize(100, 60)
# 把上面定义的元素加入大框
self.inputBoxLayout = QVBoxLayout()
self.inputBoxLayout.addWidget(account)
self.inputBoxLayout.addWidget(self.accountInput)
self.inputBoxLayout.addWidget(password)
self.inputBoxLayout.addWidget(self.passwordInput)
self.inputBoxLayout.addWidget(self.signup)
self.inputBoxLayout.addWidget(self.loginButton)
# 下面一个大框
self.inputBox = QWidget()
self.inputBox.setObjectName('inputBox')
self.inputBox.setContentsMargins(30, 30, 30, 30)
self.inputBox.setFixedSize(480, 350)
self.inputBox.setLayout(self.inputBoxLayout)
# 把大标题和下面输入框加入self
self.bodyLayout.addWidget(self.titleText, 0, 0)
self.bodyLayout.addWidget(self.inputBox, 1, 0)
self.setLayout(self.bodyLayout)
self.setFixedSize(500, 450)
self.setMyStyle()
def inputClick(self, e):
if e.text() == '学号' or e.text() == '******':
e.setText('')
def setMyStyle(self):
self.setStyleSheet('''
QWidget{
background-color:white;
}
''')
self.titleText.setStyleSheet('''
*{
color: rgba(63, 101, 114);
width: 200px;
background-color: rgba(203, 231, 245, 1);
border: 1px solid rgba(220, 243, 249, 1);
border-radius: 10px;
}
''')
self.inputBox.setStyleSheet('''
QWidget#inputBox{
border-radius: 5px;
border: 1px solid rgba(229, 229, 229, 1);
}
QLineEdit{
color: grey;
border-radius: 5px;
border: 1px solid rgba(229, 229, 229, 1);
}
QToolButton{
border-radius: 10px;
background-color:rgba(52, 118, 176, 1);
color: white;
font-size: 25px;
font-family: 微软雅黑;
}
QPushButton{
color:blue;
font-weight:300;
border:0;
background-color:white;
}
''')
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Login()
ex.show()
sys.exit(app.exec_()) | 0.255437 | 0.10307 |
def check_args(r, c, d, i, player):
if r > 2 or r < 0:
raise ValueError('Unknown row: ' + str(r))
if c > 2 or c < 0:
raise ValueError('Unknown column: ' + str(c))
if d > 1 or d < 0:
raise ValueError('Unknown diag: ' + str(d))
if i > 8 or i < 0:
raise ValueError('Unknown i: ' + str(i))
if player > 2 or player < 0:
raise ValueError('Unknown player: ' + str(player))
def line_count_player(line, player):
# Count how many times a player appears in a line (list)
count = 0
for c in line:
if c == player:
count += 1
return count
def line_get_empty_index(line):
# Get the index of the first empty place in a line (list)
for index, c in enumerate(line):
if c == 0:
return index
raise ValueError("Line not empty!")
def line_list_empty_index(line):
# Get all indexes of empty places in a line (list)
l = []
for index, c in enumerate(line):
if c == 0:
l.append(index)
return l
class board:
def __init__(self, board=[0, 0, 0, 0, 0, 0, 0, 0, 0]) -> None:
# The board:
# 0 1 2
# 3 4 5
# 6 7 8
self.board = board
# Player symbols:
self.p_sym = [' ', 'O', 'X']
# The indexes corresponding to the two diagonals
self.diag_index = [[0, 4, 8], [2, 4, 6]]
# The rows, columns, indexes, diags over which we can iterate:
self.rows = range(3)
self.columns = range(3)
self.positions = range(9)
self.diags = range(2)
def player_sym(self, p):
# Get the symbol corresponding to a given player
check_args(0, 0, 0, 0, p)
return self.p_sym[p]
def player_sym_i(self, i):
# Get the symbol for current state of the board at a index
check_args(0, 0, 0, i, 0)
return self.player_sym(self.get_i(i))
def set_i(self, i, player):
# Set the board at index i to a given player
check_args(0, 0, 0, i, player)
self.board[i] = player
def get_i(self, i):
# Get the board state at index i
check_args(0, 0, 0, i, 0)
return self.board[i]
def set_rc(self, r, c, player):
# Set the board at a given row/column location to a given player
check_args(r, c, 0, 0, player)
self.board[r*3+c] = player
def get_rc(self, r, c):
# Get the board at a given row/column
check_args(r, c, 0, 0, 0)
return self.board[r*3+c]
def set_dr(self, d, r, player):
# Set the board on a given diagonal in a given row to a given player
check_args(r, 0, d, 0, player)
self.set_i(self.diag_index[d][r], player)
def get_dr(self, d, r):
# Get the board on a given diagonal in a given row
check_args(r, 0, d, 0, 0)
return self.get_i(self.diag_index[d][r])
def get_r(self, r):
# Return a give row (as a list)
check_args(r, 0, 0, 0, 0)
return [self.get_rc(r, 0), self.get_rc(r, 1), self.get_rc(r, 2)]
def get_c(self, c):
# Return a given column (as a list)
check_args(0, c, 0, 0, 0)
return [self.get_rc(0, c), self.get_rc(1, c), self.get_rc(2, c)]
def get_d(self, d):
# Return a given diagonal (as a list)
check_args(0, 0, d, 0, 0)
return [self.get_dr(d, 0), self.get_dr(d, 1), self.get_dr(d, 2)]
def game_over(self):
# Check if the game is over
return self.game_state() != -1
def game_state(self):
# Get the current game state
# -1 : game in progress
# 0 : draw
# 1 : P1 won
# 2 : P2 won
for r in self.rows:
if line_count_player(self.get_r(r), 1) == 3:
return 1
if line_count_player(self.get_r(r), 2) == 3:
return 2
for c in self.columns:
if line_count_player(self.get_c(c), 1) == 3:
return 1
if line_count_player(self.get_c(c), 2) == 3:
return 2
for d in self.diags:
if line_count_player(self.get_d(d), 1) == 3:
return 1
if line_count_player(self.get_d(d), 2) == 3:
return 2
if line_count_player(self.board, 0) == 0:
return 0
return -1
def is_empty(self):
# Check if the board is empty
for c in self.board:
if c != ' ':
return False
return True
def __str__(self):
# Printable board state
v = ""
v += self.player_sym_i(0) + "|" + self.player_sym_i(1) + \
"|" + self.player_sym_i(2) + "\n"
v += "-+-+-\n"
v += self.player_sym_i(3) + "|" + self.player_sym_i(4) + \
"|" + self.player_sym_i(5) + "\n"
v += "-+-+-\n"
v += self.player_sym_i(6) + "|" + self.player_sym_i(7) + \
"|" + self.player_sym_i(8) + "\n"
return v | Scripts/Utils/board.py | def check_args(r, c, d, i, player):
if r > 2 or r < 0:
raise ValueError('Unknown row: ' + str(r))
if c > 2 or c < 0:
raise ValueError('Unknown column: ' + str(c))
if d > 1 or d < 0:
raise ValueError('Unknown diag: ' + str(d))
if i > 8 or i < 0:
raise ValueError('Unknown i: ' + str(i))
if player > 2 or player < 0:
raise ValueError('Unknown player: ' + str(player))
def line_count_player(line, player):
# Count how many times a player appears in a line (list)
count = 0
for c in line:
if c == player:
count += 1
return count
def line_get_empty_index(line):
# Get the index of the first empty place in a line (list)
for index, c in enumerate(line):
if c == 0:
return index
raise ValueError("Line not empty!")
def line_list_empty_index(line):
# Get all indexes of empty places in a line (list)
l = []
for index, c in enumerate(line):
if c == 0:
l.append(index)
return l
class board:
def __init__(self, board=[0, 0, 0, 0, 0, 0, 0, 0, 0]) -> None:
# The board:
# 0 1 2
# 3 4 5
# 6 7 8
self.board = board
# Player symbols:
self.p_sym = [' ', 'O', 'X']
# The indexes corresponding to the two diagonals
self.diag_index = [[0, 4, 8], [2, 4, 6]]
# The rows, columns, indexes, diags over which we can iterate:
self.rows = range(3)
self.columns = range(3)
self.positions = range(9)
self.diags = range(2)
def player_sym(self, p):
# Get the symbol corresponding to a given player
check_args(0, 0, 0, 0, p)
return self.p_sym[p]
def player_sym_i(self, i):
# Get the symbol for current state of the board at a index
check_args(0, 0, 0, i, 0)
return self.player_sym(self.get_i(i))
def set_i(self, i, player):
# Set the board at index i to a given player
check_args(0, 0, 0, i, player)
self.board[i] = player
def get_i(self, i):
# Get the board state at index i
check_args(0, 0, 0, i, 0)
return self.board[i]
def set_rc(self, r, c, player):
# Set the board at a given row/column location to a given player
check_args(r, c, 0, 0, player)
self.board[r*3+c] = player
def get_rc(self, r, c):
# Get the board at a given row/column
check_args(r, c, 0, 0, 0)
return self.board[r*3+c]
def set_dr(self, d, r, player):
# Set the board on a given diagonal in a given row to a given player
check_args(r, 0, d, 0, player)
self.set_i(self.diag_index[d][r], player)
def get_dr(self, d, r):
# Get the board on a given diagonal in a given row
check_args(r, 0, d, 0, 0)
return self.get_i(self.diag_index[d][r])
def get_r(self, r):
# Return a give row (as a list)
check_args(r, 0, 0, 0, 0)
return [self.get_rc(r, 0), self.get_rc(r, 1), self.get_rc(r, 2)]
def get_c(self, c):
# Return a given column (as a list)
check_args(0, c, 0, 0, 0)
return [self.get_rc(0, c), self.get_rc(1, c), self.get_rc(2, c)]
def get_d(self, d):
# Return a given diagonal (as a list)
check_args(0, 0, d, 0, 0)
return [self.get_dr(d, 0), self.get_dr(d, 1), self.get_dr(d, 2)]
def game_over(self):
# Check if the game is over
return self.game_state() != -1
def game_state(self):
# Get the current game state
# -1 : game in progress
# 0 : draw
# 1 : P1 won
# 2 : P2 won
for r in self.rows:
if line_count_player(self.get_r(r), 1) == 3:
return 1
if line_count_player(self.get_r(r), 2) == 3:
return 2
for c in self.columns:
if line_count_player(self.get_c(c), 1) == 3:
return 1
if line_count_player(self.get_c(c), 2) == 3:
return 2
for d in self.diags:
if line_count_player(self.get_d(d), 1) == 3:
return 1
if line_count_player(self.get_d(d), 2) == 3:
return 2
if line_count_player(self.board, 0) == 0:
return 0
return -1
def is_empty(self):
# Check if the board is empty
for c in self.board:
if c != ' ':
return False
return True
def __str__(self):
# Printable board state
v = ""
v += self.player_sym_i(0) + "|" + self.player_sym_i(1) + \
"|" + self.player_sym_i(2) + "\n"
v += "-+-+-\n"
v += self.player_sym_i(3) + "|" + self.player_sym_i(4) + \
"|" + self.player_sym_i(5) + "\n"
v += "-+-+-\n"
v += self.player_sym_i(6) + "|" + self.player_sym_i(7) + \
"|" + self.player_sym_i(8) + "\n"
return v | 0.601242 | 0.618809 |
import random
class Card:
"""Simple deck card instance,
number is 1 to 13 ( 13 being the king)
kind is Heards, Spades, Diamonds, Clubs """
kinds = {1: 'Heart', 2: 'Spade', 3: 'Diamond', 4: 'Club'}
numbers = {13: 'Ace', 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 7, 7: 8, 8: 9, 9: 10, 10: 'Jack', 11: 'Queen', 12: 'King'}
def __init__(self, number, kind):
if number in self.numbers:
self.number = number
else:
print("choice not in available choices. ")
if kind in self.kinds:
self.kind = kind
else:
print("Choices not in available choices. ")
def display(self):
kind = self.kinds.get(self.kind)
num = self.numbers.get(self.number)
# print("display")
print(" %s of %s" %(num, kind))
def get_kind(self, ):
kind = self.kinds.get(self.kind)
return kind
def get_number(self, ):
number = self.numbers.get(self.number)
return number
class CardDeck:
"""Contains 54 cards to have a complete Card Deck. """
# cards = []
def __init__(self, ):
self.cards = []
def add_card(self, card):
self.cards.append(card)
print("Card added to deck succesfully")
def shuffle(self):
random.shuffle(self.cards)
def standard_init(self):
"""Make a standard deck of card without the jockers."""
for num in range(13):
# print(num)
for kind in range(4):
# print(kind)
self.cards.append(Card(num+1, kind+1))
class CardGame:
"""docstring for CardGame"""
def __init__(self, card_deck, player1, player2):
self.card_deck = card_deck
self.player1 = player1
self.player2 = player2
def init_deal(self):
"""This will make the initialise deal and split the cards through players."""
k = 0
for card in self.card_deck.cards:
if (k % 2) == 0:
self.player1.cards.append(card)
else:
self.player2.cards.append(card)
k += 1
print("Init deal completed")
def show_details(self, temp_card_p1, temp_card_p2):
print("length player1 now :", len(self.player1.cards))
print("temp player1 now :", len(temp_card_p1))
print("length player2 now :", len(self.player2.cards))
print("temp player2 now :", len(temp_card_p2))
def play(self):
print("Starting the game, player1 is %s, player2 is %s." %(self.player1.name, self.player2.name))
temp_card_p1 = []
temp_card_p2 = []
temp_draw = []
round_no = 0
while True:
battle_list = []
print("Round #%s, %s has %s cards, %s has %s cards." %(round_no+1, self.player1.name, len(self.player1.cards), self.player2.name, len(self.player2.cards)))
# self.show_details(temp_card_p1, temp_card_p2)
random.shuffle(self.player1.cards)
random.shuffle(self.player2.cards)
if len(self.player1.cards) == len(self.player2.cards):
battle_list = zip(self.player1.cards, self.player2.cards)
self.player1.cards = []
self.player2.cards = []
elif len(self.player1.cards) > len(self.player2.cards):
battle_list = zip(self.player1.cards[0:len(self.player2.cards)], self.player2.cards)
self.player1.cards = self.player1.cards[len(self.player2.cards):]
self.player2.cards = []
elif len(self.player1.cards) < len(self.player2.cards):
battle_list = zip(self.player1.cards, self.player2.cards[0:len(self.player1.cards)])
self.player2.cards = self.player2.cards[len(self.player1.cards):]
self.player1.cards = []
for card_p1, card_p2 in battle_list:
# print("card_p1: ", card_p1)
# print("card_p2: ", card_p2)
# card_p1.display()
print("%s has %s of %s" %(self.player1.name, card_p1.get_number(), card_p1.get_kind()))
print("%s has %s of %s" %(self.player2.name, card_p2.get_number(), card_p2.get_kind()))
# card_p2.display()
if card_p1.number > card_p2.number:
print("%s wins! " %(self.player1.name))
temp_card_p1.append(card_p1)
temp_card_p1.append(card_p2)
if len(temp_draw) > 0:
temp_card_p1.extend(temp_draw)
temp_draw = []
# self.show_details(temp_card_p1, temp_card_p2)
elif card_p1.number < card_p2.number:
print("%s wins! " %(self.player2.name))
temp_card_p2.append(card_p1)
temp_card_p2.append(card_p2)
if len(temp_draw) > 0:
temp_card_p2.extend(temp_draw)
temp_draw = []
# self.show_details(temp_card_p1, temp_card_p2)
else:
print("Draw, keep going.")
temp_draw.append(card_p1)
temp_draw.append(card_p2)
# data = input("")
self.player1.cards.extend(temp_card_p1)
self.player2.cards.extend(temp_card_p2)
temp_card_p1 = []
temp_card_p2 = []
if (len(self.player1.cards) + len(temp_card_p1)) == 0:
print("%s wins the game!!!" %self.player2.name)
break
if (len(self.player2.cards) + len(temp_card_p2)) == 0:
print("%s wins the game!!!" %self.player1.name)
break
round_no += 1
class Player:
"""docstring for Player"""
def __init__(self, name):
self.cards = []
self.name = name
self.used_cards = []
if __name__ == "__main__":
deck = CardDeck()
deck.standard_init()
deck.shuffle()
player1 = Player("Francis")
player2 = Player("Ethan")
game = CardGame(deck, player1, player2)
game.init_deal()
game.play() | main.py |
import random
class Card:
"""Simple deck card instance,
number is 1 to 13 ( 13 being the king)
kind is Heards, Spades, Diamonds, Clubs """
kinds = {1: 'Heart', 2: 'Spade', 3: 'Diamond', 4: 'Club'}
numbers = {13: 'Ace', 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 7, 7: 8, 8: 9, 9: 10, 10: 'Jack', 11: 'Queen', 12: 'King'}
def __init__(self, number, kind):
if number in self.numbers:
self.number = number
else:
print("choice not in available choices. ")
if kind in self.kinds:
self.kind = kind
else:
print("Choices not in available choices. ")
def display(self):
kind = self.kinds.get(self.kind)
num = self.numbers.get(self.number)
# print("display")
print(" %s of %s" %(num, kind))
def get_kind(self, ):
kind = self.kinds.get(self.kind)
return kind
def get_number(self, ):
number = self.numbers.get(self.number)
return number
class CardDeck:
"""Contains 54 cards to have a complete Card Deck. """
# cards = []
def __init__(self, ):
self.cards = []
def add_card(self, card):
self.cards.append(card)
print("Card added to deck succesfully")
def shuffle(self):
random.shuffle(self.cards)
def standard_init(self):
"""Make a standard deck of card without the jockers."""
for num in range(13):
# print(num)
for kind in range(4):
# print(kind)
self.cards.append(Card(num+1, kind+1))
class CardGame:
"""docstring for CardGame"""
def __init__(self, card_deck, player1, player2):
self.card_deck = card_deck
self.player1 = player1
self.player2 = player2
def init_deal(self):
"""This will make the initialise deal and split the cards through players."""
k = 0
for card in self.card_deck.cards:
if (k % 2) == 0:
self.player1.cards.append(card)
else:
self.player2.cards.append(card)
k += 1
print("Init deal completed")
def show_details(self, temp_card_p1, temp_card_p2):
print("length player1 now :", len(self.player1.cards))
print("temp player1 now :", len(temp_card_p1))
print("length player2 now :", len(self.player2.cards))
print("temp player2 now :", len(temp_card_p2))
def play(self):
print("Starting the game, player1 is %s, player2 is %s." %(self.player1.name, self.player2.name))
temp_card_p1 = []
temp_card_p2 = []
temp_draw = []
round_no = 0
while True:
battle_list = []
print("Round #%s, %s has %s cards, %s has %s cards." %(round_no+1, self.player1.name, len(self.player1.cards), self.player2.name, len(self.player2.cards)))
# self.show_details(temp_card_p1, temp_card_p2)
random.shuffle(self.player1.cards)
random.shuffle(self.player2.cards)
if len(self.player1.cards) == len(self.player2.cards):
battle_list = zip(self.player1.cards, self.player2.cards)
self.player1.cards = []
self.player2.cards = []
elif len(self.player1.cards) > len(self.player2.cards):
battle_list = zip(self.player1.cards[0:len(self.player2.cards)], self.player2.cards)
self.player1.cards = self.player1.cards[len(self.player2.cards):]
self.player2.cards = []
elif len(self.player1.cards) < len(self.player2.cards):
battle_list = zip(self.player1.cards, self.player2.cards[0:len(self.player1.cards)])
self.player2.cards = self.player2.cards[len(self.player1.cards):]
self.player1.cards = []
for card_p1, card_p2 in battle_list:
# print("card_p1: ", card_p1)
# print("card_p2: ", card_p2)
# card_p1.display()
print("%s has %s of %s" %(self.player1.name, card_p1.get_number(), card_p1.get_kind()))
print("%s has %s of %s" %(self.player2.name, card_p2.get_number(), card_p2.get_kind()))
# card_p2.display()
if card_p1.number > card_p2.number:
print("%s wins! " %(self.player1.name))
temp_card_p1.append(card_p1)
temp_card_p1.append(card_p2)
if len(temp_draw) > 0:
temp_card_p1.extend(temp_draw)
temp_draw = []
# self.show_details(temp_card_p1, temp_card_p2)
elif card_p1.number < card_p2.number:
print("%s wins! " %(self.player2.name))
temp_card_p2.append(card_p1)
temp_card_p2.append(card_p2)
if len(temp_draw) > 0:
temp_card_p2.extend(temp_draw)
temp_draw = []
# self.show_details(temp_card_p1, temp_card_p2)
else:
print("Draw, keep going.")
temp_draw.append(card_p1)
temp_draw.append(card_p2)
# data = input("")
self.player1.cards.extend(temp_card_p1)
self.player2.cards.extend(temp_card_p2)
temp_card_p1 = []
temp_card_p2 = []
if (len(self.player1.cards) + len(temp_card_p1)) == 0:
print("%s wins the game!!!" %self.player2.name)
break
if (len(self.player2.cards) + len(temp_card_p2)) == 0:
print("%s wins the game!!!" %self.player1.name)
break
round_no += 1
class Player:
"""docstring for Player"""
def __init__(self, name):
self.cards = []
self.name = name
self.used_cards = []
if __name__ == "__main__":
deck = CardDeck()
deck.standard_init()
deck.shuffle()
player1 = Player("Francis")
player2 = Player("Ethan")
game = CardGame(deck, player1, player2)
game.init_deal()
game.play() | 0.235548 | 0.318697 |
from ChromeDriver import create_driver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
from pyvirtualdisplay import Display
import os
import pickle
# TODO:
class Player:
def __init__(self):
self.actions = None
self.display = Display(visible=0, size=(1080, 1920))
self.display.start()
self.driver = create_driver()
self.driver.get("https://youtube.com")
self.url = "https://youtube.com"
self.has_playlist = False
self.has_cookies = self.check_credentials()
# Close loaded extension tab
self.driver.switch_to_window(self.driver.window_handles[1])
self.driver.close()
# Switch focus to first tab
self.driver.switch_to_window(self.driver.window_handles[0])
def check_credentials(self):
"""
Checks locally stored cookies
:return (bool):
"""
if os.path.isfile('cookies.pkl'):
with open('cookies.pkl', 'rb') as c:
cookies = pickle.load(c)
for cookie in cookies:
if 'expiry' in cookie:
del cookie['expiry']
self.driver.add_cookie(cookie)
return True
else:
return False
def auth(self):
"""
Prompts login if no login cookies are present
:return (int): Auth status: 0 Logged in; 1 Not logged in
"""
if self.has_cookies:
return 0
self.driver.find_element_by_xpath(r'//*[@id="buttons"]/ytd-button-renderer/a').click()
while self.driver.current_url != "https://www.youtube.com/":
continue
with open('cookies.pkl', 'wb') as c:
pickle.dump(self.driver.get_cookies(), c)
return 1
def search(self, song):
"""
Search and play given song on YouTube.
:param (str) song: Song name to be searched
:return:
"""
song = "+".join(song.split(' '))
driver = self.driver
driver.implicitly_wait(10)
self.url = "https://www.youtube.com/results?search_query="+song
driver.get(self.url)
driver.maximize_window()
driver.find_element_by_id("search-icon-legacy").click()
driver.find_element_by_class_name("style-scope ytd-video-renderer").click()
self.has_playlist = self.lookup_playlist()
return True
def get_song_title(self):
"""
Get currently playing song's title.
:return (str): Current song title
"""
info = self.driver.find_element_by_xpath(r'//*[@id="container"]/h1/yt-formatted-string').text
return info
def lookup_playlist(self):
"""
Checks if there is any associated playlist for the current song.
:return:
"""
try:
self.driver.find_element_by_class_name("style-scope ytd-compact-radio-renderer").click()
return True
except NoSuchElementException:
return False
def get_playlist(self):
"""
Get next 5 songs from playlist
:return (dict): Next 5 songs from associated playlist else None
"""
if not self.has_playlist:
return False
next5 = {}
for i in range(2,7):
link = self.driver.find_element_by_xpath(
r'/html/body/ytd-app/div/ytd-page-manager/ytd-watch-flexy/div[4]/div['
r'2]/div/ytd-playlist-panel-renderer/div/div[2]/ytd-playlist-panel-video-renderer['
+ str(i) + r']/a').get_attribute('href')
title = self.driver.find_element_by_xpath(
r'/html/body/ytd-app/div/ytd-page-manager/ytd-watch-flexy/div[4]/div['
r'2]/div/ytd-playlist-panel-renderer/div/div[2]/ytd-playlist-panel-video-renderer['
+ str(i) + r']/a/div/div[2]/h4/span').text
next5[link] = title
return next5
def action(self, key_signal):
"""
Control YouTube webplayer
:param (str) key_signal: Key combination string
:return:
"""
self.actions = ActionChains(self.driver)
self.actions.send_keys(key_signal)
self.actions.perform()
self.actions = None
def next(self):
"""
Play next song
:return:
"""
key_signal = Keys.LEFT_SHIFT + 'N'
self.action(key_signal)
def prev(self):
"""
Play previous song
:return:
"""
key_signal = Keys.LEFT_SHIFT + 'P'
self.action(key_signal)
def play_pause(self):
"""
Toggle play state
:return:
"""
key_signal = 'k'
self.action(key_signal)
def volume_up(self):
"""
Volume up
:return:
"""
key_signal = Keys.ARROW_UP
self.action(key_signal)
def volume_down(self):
"""
Volume down
:return:
"""
key_signal = Keys.ARROW_DOWN
self.action(key_signal)
def mute(self):
"""
Mute player
:return:
"""
key_signal = 'm'
self.action(key_signal)
def forward(self):
"""
Seek 5 seconds forward
:return:
"""
key_signal = Keys.ARROW_RIGHT
self.action(key_signal)
def backward(self):
"""
Seek 5 seconds back
:return:
"""
key_signal = Keys.ARROW_LEFT
self.action(key_signal)
def quit(self):
"""
Quits application
:return:
"""
self.display.stop()
self.driver.quit() | src/Player.py | from ChromeDriver import create_driver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
from pyvirtualdisplay import Display
import os
import pickle
# TODO:
class Player:
def __init__(self):
self.actions = None
self.display = Display(visible=0, size=(1080, 1920))
self.display.start()
self.driver = create_driver()
self.driver.get("https://youtube.com")
self.url = "https://youtube.com"
self.has_playlist = False
self.has_cookies = self.check_credentials()
# Close loaded extension tab
self.driver.switch_to_window(self.driver.window_handles[1])
self.driver.close()
# Switch focus to first tab
self.driver.switch_to_window(self.driver.window_handles[0])
def check_credentials(self):
"""
Checks locally stored cookies
:return (bool):
"""
if os.path.isfile('cookies.pkl'):
with open('cookies.pkl', 'rb') as c:
cookies = pickle.load(c)
for cookie in cookies:
if 'expiry' in cookie:
del cookie['expiry']
self.driver.add_cookie(cookie)
return True
else:
return False
def auth(self):
"""
Prompts login if no login cookies are present
:return (int): Auth status: 0 Logged in; 1 Not logged in
"""
if self.has_cookies:
return 0
self.driver.find_element_by_xpath(r'//*[@id="buttons"]/ytd-button-renderer/a').click()
while self.driver.current_url != "https://www.youtube.com/":
continue
with open('cookies.pkl', 'wb') as c:
pickle.dump(self.driver.get_cookies(), c)
return 1
def search(self, song):
"""
Search and play given song on YouTube.
:param (str) song: Song name to be searched
:return:
"""
song = "+".join(song.split(' '))
driver = self.driver
driver.implicitly_wait(10)
self.url = "https://www.youtube.com/results?search_query="+song
driver.get(self.url)
driver.maximize_window()
driver.find_element_by_id("search-icon-legacy").click()
driver.find_element_by_class_name("style-scope ytd-video-renderer").click()
self.has_playlist = self.lookup_playlist()
return True
def get_song_title(self):
"""
Get currently playing song's title.
:return (str): Current song title
"""
info = self.driver.find_element_by_xpath(r'//*[@id="container"]/h1/yt-formatted-string').text
return info
def lookup_playlist(self):
"""
Checks if there is any associated playlist for the current song.
:return:
"""
try:
self.driver.find_element_by_class_name("style-scope ytd-compact-radio-renderer").click()
return True
except NoSuchElementException:
return False
def get_playlist(self):
"""
Get next 5 songs from playlist
:return (dict): Next 5 songs from associated playlist else None
"""
if not self.has_playlist:
return False
next5 = {}
for i in range(2,7):
link = self.driver.find_element_by_xpath(
r'/html/body/ytd-app/div/ytd-page-manager/ytd-watch-flexy/div[4]/div['
r'2]/div/ytd-playlist-panel-renderer/div/div[2]/ytd-playlist-panel-video-renderer['
+ str(i) + r']/a').get_attribute('href')
title = self.driver.find_element_by_xpath(
r'/html/body/ytd-app/div/ytd-page-manager/ytd-watch-flexy/div[4]/div['
r'2]/div/ytd-playlist-panel-renderer/div/div[2]/ytd-playlist-panel-video-renderer['
+ str(i) + r']/a/div/div[2]/h4/span').text
next5[link] = title
return next5
def action(self, key_signal):
"""
Control YouTube webplayer
:param (str) key_signal: Key combination string
:return:
"""
self.actions = ActionChains(self.driver)
self.actions.send_keys(key_signal)
self.actions.perform()
self.actions = None
def next(self):
"""
Play next song
:return:
"""
key_signal = Keys.LEFT_SHIFT + 'N'
self.action(key_signal)
def prev(self):
"""
Play previous song
:return:
"""
key_signal = Keys.LEFT_SHIFT + 'P'
self.action(key_signal)
def play_pause(self):
"""
Toggle play state
:return:
"""
key_signal = 'k'
self.action(key_signal)
def volume_up(self):
"""
Volume up
:return:
"""
key_signal = Keys.ARROW_UP
self.action(key_signal)
def volume_down(self):
"""
Volume down
:return:
"""
key_signal = Keys.ARROW_DOWN
self.action(key_signal)
def mute(self):
"""
Mute player
:return:
"""
key_signal = 'm'
self.action(key_signal)
def forward(self):
"""
Seek 5 seconds forward
:return:
"""
key_signal = Keys.ARROW_RIGHT
self.action(key_signal)
def backward(self):
"""
Seek 5 seconds back
:return:
"""
key_signal = Keys.ARROW_LEFT
self.action(key_signal)
def quit(self):
"""
Quits application
:return:
"""
self.display.stop()
self.driver.quit() | 0.358915 | 0.145661 |
import argparse
import ast
import itertools
import sys
import tokenize
from typing import Tuple, Iterable, Union, List, cast
import flake8.options.manager
ComprehensionType = Union[
ast.ListComp, ast.SetComp, ast.DictComp, ast.GeneratorExp
]
DEFAULT_SELECT = [
"C2000",
"C2001",
"C2002",
"C2020",
"C2021",
"C2023",
]
PYTHON_36 = sys.version_info >= (3, 6)
PYTHON_37 = sys.version_info >= (3, 7)
PYTHON_38 = sys.version_info >= (3, 8)
PYTHON_39 = sys.version_info >= (3, 9)
class MCCChecker:
"""
A flake8 plugin to make sure complex conditional expressions and comprehension expressions are split over several
lines.
"""
name = "flake8-multiline-conditionals-comprehensions"
version = "1.1"
enabled_errors = []
def __init__(self, tree: ast.AST, file_tokens: List[tokenize.TokenInfo]):
self.tree = tree
self.tokens = file_tokens
@staticmethod
def add_options(option_manager: flake8.options.manager.OptionManager):
option_manager.add_option(
"--select_c20",
type=str,
comma_separated_list=True,
default=DEFAULT_SELECT,
parse_from_config=True,
help="Error types to use. Default: %(default)s",
)
@staticmethod
def parse_options(
option_manager: flake8.options.manager.OptionManager,
options: argparse.Namespace,
extra_args,
):
MCCChecker.enabled_errors = [
int(option[1:]) for option in options.select_c20
]
def _get_tokens_with_surrounding(
self, node: ast.AST, margin: int
) -> Iterable[tokenize.TokenInfo]:
start_index, end_index = None, None
for i, token in enumerate(self.tokens):
token_line, token_col = token.start
if (
token_line > lineno(node)
or (
token_line == lineno(node) and token_col >= col_offset(node)
)
) and (
token_line < end_lineno(node)
or (
token_line == end_lineno(node)
and token_col <= end_col_offset(node)
)
):
if start_index is None:
start_index = i
else:
if end_index is None and start_index is not None:
end_index = i
break
return self.tokens[start_index - margin : end_index + margin]
def run(self) -> Iterable[Tuple[int, int, str, type]]:
for node in ast.walk(self.tree):
if any(
isinstance(node, comp)
for comp in [
ast.ListComp,
ast.SetComp,
ast.DictComp,
ast.GeneratorExp,
]
):
if 2000 in MCCChecker.enabled_errors:
yield from _c2000(cast(ComprehensionType, node))
if 2001 in MCCChecker.enabled_errors:
yield from _c2001(cast(ComprehensionType, node))
if 2002 in MCCChecker.enabled_errors:
yield from _c2002(cast(ComprehensionType, node))
if 2003 in MCCChecker.enabled_errors:
yield from _c2003(cast(ComprehensionType, node))
if 2004 in MCCChecker.enabled_errors:
yield from _c2004(cast(ComprehensionType, node))
if isinstance(node, ast.Assign) and isinstance(
node.value, ast.IfExp
):
if 2021 in MCCChecker.enabled_errors:
yield from _c2021(
node,
list(self._get_tokens_with_surrounding(node.value, 1)),
)
if isinstance(node, ast.IfExp):
if 2020 in MCCChecker.enabled_errors:
yield from _c2020(node)
if 2022 in MCCChecker.enabled_errors:
yield from _c2022(node)
if 2023 in MCCChecker.enabled_errors:
yield from _c2023(node)
if 2024 in MCCChecker.enabled_errors:
yield from _c2024(node)
if 2025 in MCCChecker.enabled_errors:
yield from _c2025(node)
ERROR_MESSAGES = {
2000: "Generators in comprehension expression are on the same line.",
2001: "Different segments of a comprehension expression share a line.",
2002: "Multiple filter segments within a single comprehension expression.",
2003: "Multiline comprehension expression are forbidden.",
2004: "Singleline comprehension expression are forbidden.",
2020: "Different segments of a conditional expression share a line.",
2021: "Conditional expression used for assignment not surrounded by parantheses.",
2022: "Nested conditional expressions are forbidden.",
2023: "Multiline conditional expression are forbidden.",
2024: "Singleline conditional expression are forbidden.",
2025: "Conditional expressions are forbidden.",
}
def lineno(node: ast.AST):
return node.lineno
def end_lineno(node: ast.AST):
if PYTHON_38:
return node.end_lineno
else:
return max(
ancestor.lineno
for ancestor in ast.walk(node)
if hasattr(ancestor, "lineno")
)
def col_offset(node: ast.AST):
return node.col_offset
def end_col_offset(node: ast.AST):
if PYTHON_38:
return node.end_col_offset
else:
return max(
ancestor.col_offset
for ancestor in ast.walk(node)
if hasattr(ancestor, "col_offset")
)
def _error_tuple(error_code: int, node: ast.AST) -> Tuple[int, int, str, type]:
return (
lineno(node),
col_offset(node),
f"C{error_code} {ERROR_MESSAGES[error_code]}",
MCCChecker,
)
def _c2000(node: ComprehensionType) -> Iterable[Tuple[int, int, str, type]]:
"""
A comprehension expression should place each of its generators on a separate line.
"""
for generator1, generator2 in itertools.combinations(node.generators, 2):
if lineno(generator1.target) <= lineno(generator2.target) <= end_lineno(
generator1.iter
) or lineno(generator2.target) <= lineno(
generator1.target
) <= end_lineno(
generator2.iter
):
yield _error_tuple(2000, node)
def _c2001(node: ComprehensionType) -> Iterable[Tuple[int, int, str, type]]:
"""
A multiline comprehension expression should place each of its segments (map, generator, filter) on a separate line.
"""
if lineno(node) == end_lineno(node):
return () # single line comprehension
seen_line_nos = set()
for generator in node.generators:
if lineno(generator.target) in seen_line_nos:
yield _error_tuple(2001, generator.target)
seen_line_nos.add(lineno(generator.target))
for if_clause in generator.ifs:
if lineno(if_clause) in seen_line_nos:
yield _error_tuple(2001, if_clause)
seen_line_nos.add(lineno(if_clause))
if isinstance(node, ast.DictComp):
if lineno(node.value) in seen_line_nos:
yield _error_tuple(2001, node.key)
seen_line_nos.add(lineno(node.value))
else:
if lineno(node.elt) in seen_line_nos:
yield _error_tuple(2001, node.elt)
seen_line_nos.add(lineno(node.elt))
def _c2002(node: ComprehensionType) -> Iterable[Tuple[int, int, str, type]]:
"""
A comprehension expression should not contain multiple filters.
"""
ifs_seen = 0
for generator in node.generators:
for if_clause in generator.ifs:
ifs_seen += 1
if ifs_seen > 1:
yield _error_tuple(2002, if_clause)
def _c2003(node: ComprehensionType) -> Iterable[Tuple[int, int, str, type]]:
"""
A comprehension expression should not span over multiple lines.
"""
if lineno(node) != end_lineno(node):
yield _error_tuple(2003, node)
def _c2004(node: ComprehensionType) -> Iterable[Tuple[int, int, str, type]]:
"""
A comprehension expression should span over multiple lines.
"""
if lineno(node) == end_lineno(node):
yield _error_tuple(2004, node)
def _c2020(node: ast.IfExp) -> Iterable[Tuple[int, int, str, type]]:
"""
A multiline conditional expression should place each of its segments on a separate line.
"""
if lineno(node) == end_lineno(node):
return () # single line expression
if len({lineno(node.body), lineno(node.test), lineno(node.orelse)}) < 3:
yield _error_tuple(2020, node)
def _c2021(
node: ast.Assign, tokens: List[tokenize.TokenInfo]
) -> Iterable[Tuple[int, int, str, type]]:
"""
A conditional expression used for assignment must be surrounded by parantheses.
"""
if tokens[0].type != tokenize.OP or "(" not in tokens[0].string:
yield _error_tuple(2021, node)
def _c2022(node: ast.IfExp) -> Iterable[Tuple[int, int, str, type]]:
"""
A conditional expression should not contain further conditional expressions.
"""
for ancestor in itertools.chain(
ast.walk(node.body), ast.walk(node.test), ast.walk(node.orelse)
):
if isinstance(ancestor, ast.IfExp):
yield _error_tuple(2022, ancestor)
def _c2023(node: ast.IfExp) -> Iterable[Tuple[int, int, str, type]]:
"""
A conditional expression should not span over multiple lines.
"""
if lineno(node) != end_lineno(node):
yield _error_tuple(2023, node)
def _c2024(node: ast.IfExp) -> Iterable[Tuple[int, int, str, type]]:
"""
A conditional expression should span over multiple lines.
"""
if lineno(node) == end_lineno(node):
yield _error_tuple(2024, node)
def _c2025(node: ast.IfExp) -> Iterable[Tuple[int, int, str, type]]:
"""
Conditional expressions should not be used.
"""
yield _error_tuple(2025, node) | flake8_multiline_conditionals_comprehensions/mcc_checker.py | import argparse
import ast
import itertools
import sys
import tokenize
from typing import Tuple, Iterable, Union, List, cast
import flake8.options.manager
ComprehensionType = Union[
ast.ListComp, ast.SetComp, ast.DictComp, ast.GeneratorExp
]
DEFAULT_SELECT = [
"C2000",
"C2001",
"C2002",
"C2020",
"C2021",
"C2023",
]
PYTHON_36 = sys.version_info >= (3, 6)
PYTHON_37 = sys.version_info >= (3, 7)
PYTHON_38 = sys.version_info >= (3, 8)
PYTHON_39 = sys.version_info >= (3, 9)
class MCCChecker:
"""
A flake8 plugin to make sure complex conditional expressions and comprehension expressions are split over several
lines.
"""
name = "flake8-multiline-conditionals-comprehensions"
version = "1.1"
enabled_errors = []
def __init__(self, tree: ast.AST, file_tokens: List[tokenize.TokenInfo]):
self.tree = tree
self.tokens = file_tokens
@staticmethod
def add_options(option_manager: flake8.options.manager.OptionManager):
option_manager.add_option(
"--select_c20",
type=str,
comma_separated_list=True,
default=DEFAULT_SELECT,
parse_from_config=True,
help="Error types to use. Default: %(default)s",
)
@staticmethod
def parse_options(
option_manager: flake8.options.manager.OptionManager,
options: argparse.Namespace,
extra_args,
):
MCCChecker.enabled_errors = [
int(option[1:]) for option in options.select_c20
]
def _get_tokens_with_surrounding(
self, node: ast.AST, margin: int
) -> Iterable[tokenize.TokenInfo]:
start_index, end_index = None, None
for i, token in enumerate(self.tokens):
token_line, token_col = token.start
if (
token_line > lineno(node)
or (
token_line == lineno(node) and token_col >= col_offset(node)
)
) and (
token_line < end_lineno(node)
or (
token_line == end_lineno(node)
and token_col <= end_col_offset(node)
)
):
if start_index is None:
start_index = i
else:
if end_index is None and start_index is not None:
end_index = i
break
return self.tokens[start_index - margin : end_index + margin]
def run(self) -> Iterable[Tuple[int, int, str, type]]:
for node in ast.walk(self.tree):
if any(
isinstance(node, comp)
for comp in [
ast.ListComp,
ast.SetComp,
ast.DictComp,
ast.GeneratorExp,
]
):
if 2000 in MCCChecker.enabled_errors:
yield from _c2000(cast(ComprehensionType, node))
if 2001 in MCCChecker.enabled_errors:
yield from _c2001(cast(ComprehensionType, node))
if 2002 in MCCChecker.enabled_errors:
yield from _c2002(cast(ComprehensionType, node))
if 2003 in MCCChecker.enabled_errors:
yield from _c2003(cast(ComprehensionType, node))
if 2004 in MCCChecker.enabled_errors:
yield from _c2004(cast(ComprehensionType, node))
if isinstance(node, ast.Assign) and isinstance(
node.value, ast.IfExp
):
if 2021 in MCCChecker.enabled_errors:
yield from _c2021(
node,
list(self._get_tokens_with_surrounding(node.value, 1)),
)
if isinstance(node, ast.IfExp):
if 2020 in MCCChecker.enabled_errors:
yield from _c2020(node)
if 2022 in MCCChecker.enabled_errors:
yield from _c2022(node)
if 2023 in MCCChecker.enabled_errors:
yield from _c2023(node)
if 2024 in MCCChecker.enabled_errors:
yield from _c2024(node)
if 2025 in MCCChecker.enabled_errors:
yield from _c2025(node)
ERROR_MESSAGES = {
2000: "Generators in comprehension expression are on the same line.",
2001: "Different segments of a comprehension expression share a line.",
2002: "Multiple filter segments within a single comprehension expression.",
2003: "Multiline comprehension expression are forbidden.",
2004: "Singleline comprehension expression are forbidden.",
2020: "Different segments of a conditional expression share a line.",
2021: "Conditional expression used for assignment not surrounded by parantheses.",
2022: "Nested conditional expressions are forbidden.",
2023: "Multiline conditional expression are forbidden.",
2024: "Singleline conditional expression are forbidden.",
2025: "Conditional expressions are forbidden.",
}
def lineno(node: ast.AST):
return node.lineno
def end_lineno(node: ast.AST):
if PYTHON_38:
return node.end_lineno
else:
return max(
ancestor.lineno
for ancestor in ast.walk(node)
if hasattr(ancestor, "lineno")
)
def col_offset(node: ast.AST):
return node.col_offset
def end_col_offset(node: ast.AST):
if PYTHON_38:
return node.end_col_offset
else:
return max(
ancestor.col_offset
for ancestor in ast.walk(node)
if hasattr(ancestor, "col_offset")
)
def _error_tuple(error_code: int, node: ast.AST) -> Tuple[int, int, str, type]:
return (
lineno(node),
col_offset(node),
f"C{error_code} {ERROR_MESSAGES[error_code]}",
MCCChecker,
)
def _c2000(node: ComprehensionType) -> Iterable[Tuple[int, int, str, type]]:
"""
A comprehension expression should place each of its generators on a separate line.
"""
for generator1, generator2 in itertools.combinations(node.generators, 2):
if lineno(generator1.target) <= lineno(generator2.target) <= end_lineno(
generator1.iter
) or lineno(generator2.target) <= lineno(
generator1.target
) <= end_lineno(
generator2.iter
):
yield _error_tuple(2000, node)
def _c2001(node: ComprehensionType) -> Iterable[Tuple[int, int, str, type]]:
"""
A multiline comprehension expression should place each of its segments (map, generator, filter) on a separate line.
"""
if lineno(node) == end_lineno(node):
return () # single line comprehension
seen_line_nos = set()
for generator in node.generators:
if lineno(generator.target) in seen_line_nos:
yield _error_tuple(2001, generator.target)
seen_line_nos.add(lineno(generator.target))
for if_clause in generator.ifs:
if lineno(if_clause) in seen_line_nos:
yield _error_tuple(2001, if_clause)
seen_line_nos.add(lineno(if_clause))
if isinstance(node, ast.DictComp):
if lineno(node.value) in seen_line_nos:
yield _error_tuple(2001, node.key)
seen_line_nos.add(lineno(node.value))
else:
if lineno(node.elt) in seen_line_nos:
yield _error_tuple(2001, node.elt)
seen_line_nos.add(lineno(node.elt))
def _c2002(node: ComprehensionType) -> Iterable[Tuple[int, int, str, type]]:
"""
A comprehension expression should not contain multiple filters.
"""
ifs_seen = 0
for generator in node.generators:
for if_clause in generator.ifs:
ifs_seen += 1
if ifs_seen > 1:
yield _error_tuple(2002, if_clause)
def _c2003(node: ComprehensionType) -> Iterable[Tuple[int, int, str, type]]:
"""
A comprehension expression should not span over multiple lines.
"""
if lineno(node) != end_lineno(node):
yield _error_tuple(2003, node)
def _c2004(node: ComprehensionType) -> Iterable[Tuple[int, int, str, type]]:
"""
A comprehension expression should span over multiple lines.
"""
if lineno(node) == end_lineno(node):
yield _error_tuple(2004, node)
def _c2020(node: ast.IfExp) -> Iterable[Tuple[int, int, str, type]]:
"""
A multiline conditional expression should place each of its segments on a separate line.
"""
if lineno(node) == end_lineno(node):
return () # single line expression
if len({lineno(node.body), lineno(node.test), lineno(node.orelse)}) < 3:
yield _error_tuple(2020, node)
def _c2021(
node: ast.Assign, tokens: List[tokenize.TokenInfo]
) -> Iterable[Tuple[int, int, str, type]]:
"""
A conditional expression used for assignment must be surrounded by parantheses.
"""
if tokens[0].type != tokenize.OP or "(" not in tokens[0].string:
yield _error_tuple(2021, node)
def _c2022(node: ast.IfExp) -> Iterable[Tuple[int, int, str, type]]:
"""
A conditional expression should not contain further conditional expressions.
"""
for ancestor in itertools.chain(
ast.walk(node.body), ast.walk(node.test), ast.walk(node.orelse)
):
if isinstance(ancestor, ast.IfExp):
yield _error_tuple(2022, ancestor)
def _c2023(node: ast.IfExp) -> Iterable[Tuple[int, int, str, type]]:
"""
A conditional expression should not span over multiple lines.
"""
if lineno(node) != end_lineno(node):
yield _error_tuple(2023, node)
def _c2024(node: ast.IfExp) -> Iterable[Tuple[int, int, str, type]]:
"""
A conditional expression should span over multiple lines.
"""
if lineno(node) == end_lineno(node):
yield _error_tuple(2024, node)
def _c2025(node: ast.IfExp) -> Iterable[Tuple[int, int, str, type]]:
"""
Conditional expressions should not be used.
"""
yield _error_tuple(2025, node) | 0.522689 | 0.246828 |
import argparse
import requests
import os
import cv2
def parse_args():
parser = argparse.ArgumentParser(prog="Send Images From Folder",
description="This program sends the images stored in a folder to the cloud face recognition system.")
parser.add_argument('--input-folder', required=True,
help='Folder with the images to upload.')
parser.add_argument('--ip-address', required=True,
help='IP address of the Cloud API server')
parser.add_argument('--dry-run', required=False, action='store_true',
help='If included, the images are sent to the cloud but no results are obtained.')
parser.add_argument('--stats', required=False, action='store_true',
help='Shows statistics of the images sent.')
return parser.parse_args()
def main():
args = parse_args()
if(args.dry_run):
url = 'http://'+str(args.ip_address)+'/face-recognition/dry-run'
else:
url = 'http://'+str(args.ip_address)+'/face-recognition/get-results'
files = os.listdir(args.input_folder)
if(args.stats):
file_num = 0
comb_height = 0
comb_width = 0
comb_pixels = 0
for file in files:
file_path = os.path.join(args.input_folder, file)
if(os.path.isfile(file_path)):
if(args.stats):
file_num = file_num + 1
img = cv2.imread(file_path)
height = img.shape[0]
width = img.shape[1]
comb_height = comb_height + height
comb_width = comb_width + width
comb_pixels = comb_pixels + height*width
myobj = {'image': open(file_path, 'rb')}
request = requests.post(url, files = myobj, timeout=10)
print('Face recognition request for ' + file + ' had response: {}'.format(request.status_code))
if(not args.dry_run):
print(request.text)
if(request.status_code != 200):
print('Error message: ' + request.text)
if(args.stats):
# Print statistics
print()
print('~~~Statistics~~~')
print('%d images have been sent to the cloud server.' %(file_num,))
print('These images have an avg. height of %d px, an avg. width of %d px, and an avg. size of %d px' %(comb_height/file_num, comb_width/file_num, comb_pixels/file_num))
if __name__ == '__main__':
main() | interaction-with-framework/send-images-from-folder/send-images-from-folder.py |
import argparse
import requests
import os
import cv2
def parse_args():
parser = argparse.ArgumentParser(prog="Send Images From Folder",
description="This program sends the images stored in a folder to the cloud face recognition system.")
parser.add_argument('--input-folder', required=True,
help='Folder with the images to upload.')
parser.add_argument('--ip-address', required=True,
help='IP address of the Cloud API server')
parser.add_argument('--dry-run', required=False, action='store_true',
help='If included, the images are sent to the cloud but no results are obtained.')
parser.add_argument('--stats', required=False, action='store_true',
help='Shows statistics of the images sent.')
return parser.parse_args()
def main():
args = parse_args()
if(args.dry_run):
url = 'http://'+str(args.ip_address)+'/face-recognition/dry-run'
else:
url = 'http://'+str(args.ip_address)+'/face-recognition/get-results'
files = os.listdir(args.input_folder)
if(args.stats):
file_num = 0
comb_height = 0
comb_width = 0
comb_pixels = 0
for file in files:
file_path = os.path.join(args.input_folder, file)
if(os.path.isfile(file_path)):
if(args.stats):
file_num = file_num + 1
img = cv2.imread(file_path)
height = img.shape[0]
width = img.shape[1]
comb_height = comb_height + height
comb_width = comb_width + width
comb_pixels = comb_pixels + height*width
myobj = {'image': open(file_path, 'rb')}
request = requests.post(url, files = myobj, timeout=10)
print('Face recognition request for ' + file + ' had response: {}'.format(request.status_code))
if(not args.dry_run):
print(request.text)
if(request.status_code != 200):
print('Error message: ' + request.text)
if(args.stats):
# Print statistics
print()
print('~~~Statistics~~~')
print('%d images have been sent to the cloud server.' %(file_num,))
print('These images have an avg. height of %d px, an avg. width of %d px, and an avg. size of %d px' %(comb_height/file_num, comb_width/file_num, comb_pixels/file_num))
if __name__ == '__main__':
main() | 0.369315 | 0.160496 |
from __future__ import print_function
import argparse
import yaml
from pybh.utils import fail, argparse_bool, convert_string_to_array
TYPE_STR_MAPPING = {
"str": str,
"int": int,
"float": float,
"bool": argparse_bool,
}
def run(args):
with open(args.file, "r") as fin:
content = yaml.load(fin)
# keys = args.key.strip().split(".")
keys = convert_string_to_array(args.key, sep=".")
assert(len(keys) > 0)
for i, key in enumerate(keys):
if key not in content:
if args.default is None:
fail("ERROR: Key #{} [{}] not found in YAML file".format(i, key))
else:
content = args.default
break
content = content[key]
if args.index is not None:
if type(content) is not list:
# content = [x.strip() for x in content.split(",")]
content = convert_string_to_array(content, sep=",")
# indices = [int(x) for x in args.index.split(",")]
indices = convert_string_to_array(args.index, sep=",", value_type=int)
for i, index in enumerate(indices):
if index >= len(content):
fail("ERROR: Index #{} ({}) out of bounds for value ({}) of type ({})".format(
i, index, content, type(content)))
content = content[index]
if args.type is not None:
if args.type not in TYPE_STR_MAPPING:
fail("ERROR: Type {} is unknown".format(args.type))
else:
try:
content = TYPE_STR_MAPPING[args.type](content)
except ValueError:
fail("ERROR: Could not convert value ({}) of type ({}) to type {}".format(
content, type(content), args.type))
# Output extracted value
if type(content) == bool:
content = str(content).lower()
print(content)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=None)
parser.add_argument('file', type=str, help="YAML file to read")
parser.add_argument('key', type=str, help="Key of value to read")
parser.add_argument('--type', type=str, help="To of value required")
parser.add_argument('--index', type=str,
help="Extract element from (nested) array "
"(assuming the value is an array or string separated by ,)."
"Nested indices can be specified by separating them with ,")
parser.add_argument('--default', type=str, help="Default value if key is not in YAML file")
args = parser.parse_args()
run(args) | pybh/tools/read_yaml_value.py | from __future__ import print_function
import argparse
import yaml
from pybh.utils import fail, argparse_bool, convert_string_to_array
TYPE_STR_MAPPING = {
"str": str,
"int": int,
"float": float,
"bool": argparse_bool,
}
def run(args):
with open(args.file, "r") as fin:
content = yaml.load(fin)
# keys = args.key.strip().split(".")
keys = convert_string_to_array(args.key, sep=".")
assert(len(keys) > 0)
for i, key in enumerate(keys):
if key not in content:
if args.default is None:
fail("ERROR: Key #{} [{}] not found in YAML file".format(i, key))
else:
content = args.default
break
content = content[key]
if args.index is not None:
if type(content) is not list:
# content = [x.strip() for x in content.split(",")]
content = convert_string_to_array(content, sep=",")
# indices = [int(x) for x in args.index.split(",")]
indices = convert_string_to_array(args.index, sep=",", value_type=int)
for i, index in enumerate(indices):
if index >= len(content):
fail("ERROR: Index #{} ({}) out of bounds for value ({}) of type ({})".format(
i, index, content, type(content)))
content = content[index]
if args.type is not None:
if args.type not in TYPE_STR_MAPPING:
fail("ERROR: Type {} is unknown".format(args.type))
else:
try:
content = TYPE_STR_MAPPING[args.type](content)
except ValueError:
fail("ERROR: Could not convert value ({}) of type ({}) to type {}".format(
content, type(content), args.type))
# Output extracted value
if type(content) == bool:
content = str(content).lower()
print(content)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=None)
parser.add_argument('file', type=str, help="YAML file to read")
parser.add_argument('key', type=str, help="Key of value to read")
parser.add_argument('--type', type=str, help="To of value required")
parser.add_argument('--index', type=str,
help="Extract element from (nested) array "
"(assuming the value is an array or string separated by ,)."
"Nested indices can be specified by separating them with ,")
parser.add_argument('--default', type=str, help="Default value if key is not in YAML file")
args = parser.parse_args()
run(args) | 0.418459 | 0.20949 |
import glob
import json
import os
import random
import tempfile
import unittest
from . import train
class TrainTest(unittest.TestCase):
def setUp(self):
self.job_dir = tempfile.mkdtemp()
self.num_checkpoints = 10
self.checkpoint_files = []
self.checkpoint_steps = 100
self.test_job_dir = tempfile.mkdtemp()
self.test_job_file_glob = os.path.join(self.test_job_dir, "*")
# Note that hyperparameters are intended to be constant across checkpoints
self.hyperparameter_1 = 17
self.hyperparameter_2 = 3.14159
for i in range(self.num_checkpoints):
path = os.path.join(
self.job_dir,
"dummy-checkpoint-{}.json".format(i)
)
checkpoint_data = {
"steps": i*self.checkpoint_steps,
"hyperparameters": {
"hyperparameter_1": self.hyperparameter_1,
"hyperparameter_2": self.hyperparameter_2
},
"model": random.random()
}
with open(path, "w") as fp:
json.dump(checkpoint_data, fp)
self.checkpoint_files.append(path)
self.garbage_file = os.path.join(self.job_dir, "garbage")
with open(self.garbage_file, "w") as gf:
gf.write("garbage")
def tearDown(self):
os.remove(self.garbage_file)
for path in self.checkpoint_files:
os.remove(path)
os.rmdir(self.job_dir)
test_job_files = glob.glob(self.test_job_file_glob)
for path in test_job_files:
os.remove(path)
os.rmdir(self.test_job_dir)
def test_get_checkpoints(self):
checkpoints = train.get_checkpoints(self.job_dir)
self.assertSetEqual(set(checkpoints), set(self.checkpoint_files))
def test_checkpoint_index(self):
indices = map(train.checkpoint_index, self.checkpoint_files)
self.assertListEqual(indices, range(self.num_checkpoints))
def test_latest_checkpoint_1(self):
latest_checkpoint = train.latest_checkpoint(
random.sample(self.checkpoint_files, self.num_checkpoints)
)
self.assertEqual(
latest_checkpoint,
(self.checkpoint_files[-1], self.num_checkpoints-1)
)
def test_latest_checkpoint_2(self):
latest_checkpoint = train.latest_checkpoint([])
self.assertEqual(latest_checkpoint, (None, None))
def test_save_checkpoint(self):
self.assertEqual(len(glob.glob(self.test_job_file_glob)), 0)
checkpoint_data = {
"test_key": "test_value"
}
checkpoint_file = train.save_checkpoint(
self.test_job_dir,
1,
checkpoint_data
)
self.assertEqual(len(glob.glob(self.test_job_file_glob)), 1)
with open(checkpoint_file) as fp:
saved_object = json.load(fp)
self.assertDictEqual(saved_object, checkpoint_data)
def test_runner(self):
self.assertEqual(len(glob.glob(self.test_job_file_glob)), 0)
hyperparameters = {
"hyperparameter_1": self.hyperparameter_1,
"hyperparameter_2": self.hyperparameter_2
}
train_steps = 100
checkpoint_steps = 10
train.runner(
train.generate_trainer,
self.test_job_dir,
train_steps,
checkpoint_steps,
hyperparameters
)
self.assertEqual(
len(glob.glob(self.test_job_file_glob)),
int(train_steps/checkpoint_steps) + 1
)
if __name__ == "__main__":
unittest.main() | gce/survival-training/wrapper/train_test.py |
import glob
import json
import os
import random
import tempfile
import unittest
from . import train
class TrainTest(unittest.TestCase):
def setUp(self):
self.job_dir = tempfile.mkdtemp()
self.num_checkpoints = 10
self.checkpoint_files = []
self.checkpoint_steps = 100
self.test_job_dir = tempfile.mkdtemp()
self.test_job_file_glob = os.path.join(self.test_job_dir, "*")
# Note that hyperparameters are intended to be constant across checkpoints
self.hyperparameter_1 = 17
self.hyperparameter_2 = 3.14159
for i in range(self.num_checkpoints):
path = os.path.join(
self.job_dir,
"dummy-checkpoint-{}.json".format(i)
)
checkpoint_data = {
"steps": i*self.checkpoint_steps,
"hyperparameters": {
"hyperparameter_1": self.hyperparameter_1,
"hyperparameter_2": self.hyperparameter_2
},
"model": random.random()
}
with open(path, "w") as fp:
json.dump(checkpoint_data, fp)
self.checkpoint_files.append(path)
self.garbage_file = os.path.join(self.job_dir, "garbage")
with open(self.garbage_file, "w") as gf:
gf.write("garbage")
def tearDown(self):
os.remove(self.garbage_file)
for path in self.checkpoint_files:
os.remove(path)
os.rmdir(self.job_dir)
test_job_files = glob.glob(self.test_job_file_glob)
for path in test_job_files:
os.remove(path)
os.rmdir(self.test_job_dir)
def test_get_checkpoints(self):
checkpoints = train.get_checkpoints(self.job_dir)
self.assertSetEqual(set(checkpoints), set(self.checkpoint_files))
def test_checkpoint_index(self):
indices = map(train.checkpoint_index, self.checkpoint_files)
self.assertListEqual(indices, range(self.num_checkpoints))
def test_latest_checkpoint_1(self):
latest_checkpoint = train.latest_checkpoint(
random.sample(self.checkpoint_files, self.num_checkpoints)
)
self.assertEqual(
latest_checkpoint,
(self.checkpoint_files[-1], self.num_checkpoints-1)
)
def test_latest_checkpoint_2(self):
latest_checkpoint = train.latest_checkpoint([])
self.assertEqual(latest_checkpoint, (None, None))
def test_save_checkpoint(self):
self.assertEqual(len(glob.glob(self.test_job_file_glob)), 0)
checkpoint_data = {
"test_key": "test_value"
}
checkpoint_file = train.save_checkpoint(
self.test_job_dir,
1,
checkpoint_data
)
self.assertEqual(len(glob.glob(self.test_job_file_glob)), 1)
with open(checkpoint_file) as fp:
saved_object = json.load(fp)
self.assertDictEqual(saved_object, checkpoint_data)
def test_runner(self):
self.assertEqual(len(glob.glob(self.test_job_file_glob)), 0)
hyperparameters = {
"hyperparameter_1": self.hyperparameter_1,
"hyperparameter_2": self.hyperparameter_2
}
train_steps = 100
checkpoint_steps = 10
train.runner(
train.generate_trainer,
self.test_job_dir,
train_steps,
checkpoint_steps,
hyperparameters
)
self.assertEqual(
len(glob.glob(self.test_job_file_glob)),
int(train_steps/checkpoint_steps) + 1
)
if __name__ == "__main__":
unittest.main() | 0.411347 | 0.432363 |
import mock
import unittest
from common import acl
from common import constants
from common import exceptions
class AclTest(unittest.TestCase):
def testAdminIsPrivilegedUser(self):
self.assertTrue(acl.IsPrivilegedUser('<EMAIL>', True))
def testGooglerIsPrivilegedUser(self):
self.assertTrue(acl.IsPrivilegedUser('<EMAIL>', False))
def testUnknownUserIsNotPrivilegedUser(self):
self.assertFalse(acl.IsPrivilegedUser('<EMAIL>', False))
def testWhitelistedClientId(self):
self.assertTrue(acl.IsWhitelistedClientId(constants.API_EXPLORER_CLIENT_ID))
def testUnknownClientIdIsNotWhitelisted(self):
self.assertFalse(acl.IsWhitelistedClientId('unknown_id'))
def testAdminCanTriggerNewAnalysis(self):
self.assertTrue(acl.CanTriggerNewAnalysis('<EMAIL>', True))
def testGooglerCanTriggerNewAnalysis(self):
self.assertTrue(acl.CanTriggerNewAnalysis('<EMAIL>', False))
@mock.patch.object(acl.appengine_util, 'IsStaging', return_value=False)
def testWhitelistedAppAccountCanTriggerNewAnalysis(self, _):
for email in constants.WHITELISTED_APP_ACCOUNTS:
self.assertTrue(acl.CanTriggerNewAnalysis(email, False))
@mock.patch.object(acl.appengine_util, 'IsStaging', return_value=True)
def testWhitelistedStagingAppAccountCanTriggerNewAnalysis(self, _):
for email in constants.WHITELISTED_STAGING_APP_ACCOUNTS:
self.assertTrue(acl.CanTriggerNewAnalysis(email, False))
def testUnkownUserCanNotTriggerNewAnalysis(self):
self.assertFalse(acl.CanTriggerNewAnalysis('<EMAIL>', False))
@mock.patch.object(acl, 'CanTriggerNewAnalysis', return_value=True)
@mock.patch.object(
acl.auth_util,
'GetOauthUserEmail',
return_value='<EMAIL>')
@mock.patch.object(
acl.auth_util, 'IsCurrentOauthUserAdmin', return_value=False)
def testValidateOauthUserForAuthorizedServiceAccount(self, *_):
user_email, is_admin = acl.ValidateOauthUserForNewAnalysis()
self.assertEqual('<EMAIL>', user_email)
self.assertFalse(is_admin)
@mock.patch.object(acl, 'CanTriggerNewAnalysis', return_value=False)
@mock.patch.object(
acl.auth_util,
'GetOauthUserEmail',
return_value='<EMAIL>')
@mock.patch.object(
acl.auth_util, 'IsCurrentOauthUserAdmin', return_value=False)
def testValidateOauthUserForUnauthorizedServiceAccount(self, *_):
with self.assertRaises(exceptions.UnauthorizedException):
acl.ValidateOauthUserForNewAnalysis()
@mock.patch.object(acl, 'CanTriggerNewAnalysis', return_value=True)
@mock.patch.object(acl, 'IsWhitelistedClientId', return_value=False)
@mock.patch.object(acl.auth_util, 'GetOauthClientId', return_value='id')
@mock.patch.object(
acl.auth_util, 'IsCurrentOauthUserAdmin', return_value=True)
@mock.patch.object(acl.auth_util, 'GetOauthUserEmail', return_value='email')
def testValidateOauthUserForUnauthorizedClientId(self, *_):
with self.assertRaises(exceptions.UnauthorizedException):
acl.ValidateOauthUserForNewAnalysis()
@mock.patch.object(acl, 'CanTriggerNewAnalysis', return_value=True)
@mock.patch.object(acl, 'IsWhitelistedClientId', return_value=True)
@mock.patch.object(acl.auth_util, 'GetOauthClientId', return_value='id')
@mock.patch.object(
acl.auth_util, 'IsCurrentOauthUserAdmin', return_value=True)
@mock.patch.object(acl.auth_util, 'GetOauthUserEmail', return_value='email')
def testValidateOauthUserForAuthorizedUser(self, *_):
user_email, is_admin = acl.ValidateOauthUserForNewAnalysis()
self.assertEqual('email', user_email)
self.assertTrue(is_admin)
@mock.patch.object(acl, 'CanTriggerNewAnalysis', return_value=False)
@mock.patch.object(acl, 'IsWhitelistedClientId', return_value=True)
@mock.patch.object(acl.auth_util, 'GetOauthClientId', return_value='id')
@mock.patch.object(
acl.auth_util, 'IsCurrentOauthUserAdmin', return_value=False)
@mock.patch.object(acl.auth_util, 'GetOauthUserEmail', return_value='email')
def testValidateOauthUserForUnauthorizedUser(self, *_):
with self.assertRaises(exceptions.UnauthorizedException):
acl.ValidateOauthUserForNewAnalysis()
@mock.patch.object(acl.auth_util, 'GetOauthUserEmail', return_value=None)
def testValidateOauthUserForUnknownUserEmail(self, *_):
with self.assertRaises(exceptions.UnauthorizedException):
acl.ValidateOauthUserForNewAnalysis() | appengine/findit/common/test/acl_test.py |
import mock
import unittest
from common import acl
from common import constants
from common import exceptions
class AclTest(unittest.TestCase):
def testAdminIsPrivilegedUser(self):
self.assertTrue(acl.IsPrivilegedUser('<EMAIL>', True))
def testGooglerIsPrivilegedUser(self):
self.assertTrue(acl.IsPrivilegedUser('<EMAIL>', False))
def testUnknownUserIsNotPrivilegedUser(self):
self.assertFalse(acl.IsPrivilegedUser('<EMAIL>', False))
def testWhitelistedClientId(self):
self.assertTrue(acl.IsWhitelistedClientId(constants.API_EXPLORER_CLIENT_ID))
def testUnknownClientIdIsNotWhitelisted(self):
self.assertFalse(acl.IsWhitelistedClientId('unknown_id'))
def testAdminCanTriggerNewAnalysis(self):
self.assertTrue(acl.CanTriggerNewAnalysis('<EMAIL>', True))
def testGooglerCanTriggerNewAnalysis(self):
self.assertTrue(acl.CanTriggerNewAnalysis('<EMAIL>', False))
@mock.patch.object(acl.appengine_util, 'IsStaging', return_value=False)
def testWhitelistedAppAccountCanTriggerNewAnalysis(self, _):
for email in constants.WHITELISTED_APP_ACCOUNTS:
self.assertTrue(acl.CanTriggerNewAnalysis(email, False))
@mock.patch.object(acl.appengine_util, 'IsStaging', return_value=True)
def testWhitelistedStagingAppAccountCanTriggerNewAnalysis(self, _):
for email in constants.WHITELISTED_STAGING_APP_ACCOUNTS:
self.assertTrue(acl.CanTriggerNewAnalysis(email, False))
def testUnkownUserCanNotTriggerNewAnalysis(self):
self.assertFalse(acl.CanTriggerNewAnalysis('<EMAIL>', False))
@mock.patch.object(acl, 'CanTriggerNewAnalysis', return_value=True)
@mock.patch.object(
acl.auth_util,
'GetOauthUserEmail',
return_value='<EMAIL>')
@mock.patch.object(
acl.auth_util, 'IsCurrentOauthUserAdmin', return_value=False)
def testValidateOauthUserForAuthorizedServiceAccount(self, *_):
user_email, is_admin = acl.ValidateOauthUserForNewAnalysis()
self.assertEqual('<EMAIL>', user_email)
self.assertFalse(is_admin)
@mock.patch.object(acl, 'CanTriggerNewAnalysis', return_value=False)
@mock.patch.object(
acl.auth_util,
'GetOauthUserEmail',
return_value='<EMAIL>')
@mock.patch.object(
acl.auth_util, 'IsCurrentOauthUserAdmin', return_value=False)
def testValidateOauthUserForUnauthorizedServiceAccount(self, *_):
with self.assertRaises(exceptions.UnauthorizedException):
acl.ValidateOauthUserForNewAnalysis()
@mock.patch.object(acl, 'CanTriggerNewAnalysis', return_value=True)
@mock.patch.object(acl, 'IsWhitelistedClientId', return_value=False)
@mock.patch.object(acl.auth_util, 'GetOauthClientId', return_value='id')
@mock.patch.object(
acl.auth_util, 'IsCurrentOauthUserAdmin', return_value=True)
@mock.patch.object(acl.auth_util, 'GetOauthUserEmail', return_value='email')
def testValidateOauthUserForUnauthorizedClientId(self, *_):
with self.assertRaises(exceptions.UnauthorizedException):
acl.ValidateOauthUserForNewAnalysis()
@mock.patch.object(acl, 'CanTriggerNewAnalysis', return_value=True)
@mock.patch.object(acl, 'IsWhitelistedClientId', return_value=True)
@mock.patch.object(acl.auth_util, 'GetOauthClientId', return_value='id')
@mock.patch.object(
acl.auth_util, 'IsCurrentOauthUserAdmin', return_value=True)
@mock.patch.object(acl.auth_util, 'GetOauthUserEmail', return_value='email')
def testValidateOauthUserForAuthorizedUser(self, *_):
user_email, is_admin = acl.ValidateOauthUserForNewAnalysis()
self.assertEqual('email', user_email)
self.assertTrue(is_admin)
@mock.patch.object(acl, 'CanTriggerNewAnalysis', return_value=False)
@mock.patch.object(acl, 'IsWhitelistedClientId', return_value=True)
@mock.patch.object(acl.auth_util, 'GetOauthClientId', return_value='id')
@mock.patch.object(
acl.auth_util, 'IsCurrentOauthUserAdmin', return_value=False)
@mock.patch.object(acl.auth_util, 'GetOauthUserEmail', return_value='email')
def testValidateOauthUserForUnauthorizedUser(self, *_):
with self.assertRaises(exceptions.UnauthorizedException):
acl.ValidateOauthUserForNewAnalysis()
@mock.patch.object(acl.auth_util, 'GetOauthUserEmail', return_value=None)
def testValidateOauthUserForUnknownUserEmail(self, *_):
with self.assertRaises(exceptions.UnauthorizedException):
acl.ValidateOauthUserForNewAnalysis() | 0.546254 | 0.380932 |
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisAudnsNetAuStatusRegistered(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.audns.net.au/status_registered.txt"
host = "whois.audns.net.au"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.status, ["clientDeleteProhibited", "clientUpdateProhibited", "serverDeleteProhibited (Protected by .auLOCKDOWN)", "serverUpdateProhibited (Protected by .auLOCKDOWN)"])
def test_available(self):
eq_(self.record.available, False)
def test_domain(self):
eq_(self.record.domain, "google.com.au")
def test_nameservers(self):
eq_(self.record.nameservers.__class__.__name__, 'list')
eq_(len(self.record.nameservers), 4)
eq_(self.record.nameservers[0].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[0].name, "ns1.google.com")
eq_(self.record.nameservers[1].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[1].name, "ns2.google.com")
eq_(self.record.nameservers[2].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[2].name, "ns3.google.com")
eq_(self.record.nameservers[3].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[3].name, "ns4.google.com")
def test_admin_contacts(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.admin_contacts)
def test_registered(self):
eq_(self.record.registered, True)
def test_created_on(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.created_on)
def test_registrar(self):
eq_(self.record.registrar.__class__.__name__, 'Registrar')
eq_(self.record.registrar.id, "MARKMONITOR")
eq_(self.record.registrar.name, "MarkMonitor Inc.")
eq_(self.record.registrar.organization, None)
eq_(self.record.registrar.url, None)
def test_registrant_contacts(self):
eq_(self.record.registrant_contacts.__class__.__name__, 'list')
eq_(len(self.record.registrant_contacts), 1)
eq_(self.record.registrant_contacts[0].__class__.__name__, 'Contact')
eq_(self.record.registrant_contacts[0].type, yawhois.record.Contact.TYPE_REGISTRANT)
eq_(self.record.registrant_contacts[0].id, "MMR-122026")
eq_(self.record.registrant_contacts[0].name, "Domain Administrator")
eq_(self.record.registrant_contacts[0].organization, "Google INC")
eq_(self.record.registrant_contacts[0].address, None)
eq_(self.record.registrant_contacts[0].city, None)
eq_(self.record.registrant_contacts[0].zip, None)
eq_(self.record.registrant_contacts[0].state, None)
eq_(self.record.registrant_contacts[0].country, None)
eq_(self.record.registrant_contacts[0].country_code, None)
eq_(self.record.registrant_contacts[0].phone, None)
eq_(self.record.registrant_contacts[0].fax, None)
eq_(self.record.registrant_contacts[0].email, "Visit whois.ausregistry.com.au for Web based WhoIs")
eq_(self.record.registrant_contacts[0].created_on, None)
eq_(self.record.registrant_contacts[0].updated_on, None)
def test_technical_contacts(self):
eq_(self.record.technical_contacts.__class__.__name__, 'list')
eq_(len(self.record.technical_contacts), 1)
eq_(self.record.technical_contacts[0].__class__.__name__, 'Contact')
eq_(self.record.technical_contacts[0].type, yawhois.record.Contact.TYPE_TECHNICAL)
eq_(self.record.technical_contacts[0].id, "MMR-87489")
eq_(self.record.technical_contacts[0].name, "DNS Admin")
eq_(self.record.technical_contacts[0].organization, None)
eq_(self.record.technical_contacts[0].address, None)
eq_(self.record.technical_contacts[0].city, None)
eq_(self.record.technical_contacts[0].zip, None)
eq_(self.record.technical_contacts[0].state, None)
eq_(self.record.technical_contacts[0].country, None)
eq_(self.record.technical_contacts[0].country_code, None)
eq_(self.record.technical_contacts[0].phone, None)
eq_(self.record.technical_contacts[0].fax, None)
eq_(self.record.technical_contacts[0].email, "Visit whois.ausregistry.com.au for Web based WhoIs")
eq_(self.record.technical_contacts[0].created_on, None)
eq_(self.record.technical_contacts[0].updated_on, None)
def test_updated_on(self):
eq_(self.record.updated_on.__class__.__name__, 'datetime')
eq_(self.record.updated_on, time_parse('2013-06-05 04:03:08 UTC'))
def test_domain_id(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.domain_id)
def test_expires_on(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.expires_on)
def test_disclaimer(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.disclaimer) | test/record/parser/test_response_whois_audns_net_au_status_registered.py |
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisAudnsNetAuStatusRegistered(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.audns.net.au/status_registered.txt"
host = "whois.audns.net.au"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.status, ["clientDeleteProhibited", "clientUpdateProhibited", "serverDeleteProhibited (Protected by .auLOCKDOWN)", "serverUpdateProhibited (Protected by .auLOCKDOWN)"])
def test_available(self):
eq_(self.record.available, False)
def test_domain(self):
eq_(self.record.domain, "google.com.au")
def test_nameservers(self):
eq_(self.record.nameservers.__class__.__name__, 'list')
eq_(len(self.record.nameservers), 4)
eq_(self.record.nameservers[0].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[0].name, "ns1.google.com")
eq_(self.record.nameservers[1].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[1].name, "ns2.google.com")
eq_(self.record.nameservers[2].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[2].name, "ns3.google.com")
eq_(self.record.nameservers[3].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[3].name, "ns4.google.com")
def test_admin_contacts(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.admin_contacts)
def test_registered(self):
eq_(self.record.registered, True)
def test_created_on(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.created_on)
def test_registrar(self):
eq_(self.record.registrar.__class__.__name__, 'Registrar')
eq_(self.record.registrar.id, "MARKMONITOR")
eq_(self.record.registrar.name, "MarkMonitor Inc.")
eq_(self.record.registrar.organization, None)
eq_(self.record.registrar.url, None)
def test_registrant_contacts(self):
eq_(self.record.registrant_contacts.__class__.__name__, 'list')
eq_(len(self.record.registrant_contacts), 1)
eq_(self.record.registrant_contacts[0].__class__.__name__, 'Contact')
eq_(self.record.registrant_contacts[0].type, yawhois.record.Contact.TYPE_REGISTRANT)
eq_(self.record.registrant_contacts[0].id, "MMR-122026")
eq_(self.record.registrant_contacts[0].name, "Domain Administrator")
eq_(self.record.registrant_contacts[0].organization, "Google INC")
eq_(self.record.registrant_contacts[0].address, None)
eq_(self.record.registrant_contacts[0].city, None)
eq_(self.record.registrant_contacts[0].zip, None)
eq_(self.record.registrant_contacts[0].state, None)
eq_(self.record.registrant_contacts[0].country, None)
eq_(self.record.registrant_contacts[0].country_code, None)
eq_(self.record.registrant_contacts[0].phone, None)
eq_(self.record.registrant_contacts[0].fax, None)
eq_(self.record.registrant_contacts[0].email, "Visit whois.ausregistry.com.au for Web based WhoIs")
eq_(self.record.registrant_contacts[0].created_on, None)
eq_(self.record.registrant_contacts[0].updated_on, None)
def test_technical_contacts(self):
eq_(self.record.technical_contacts.__class__.__name__, 'list')
eq_(len(self.record.technical_contacts), 1)
eq_(self.record.technical_contacts[0].__class__.__name__, 'Contact')
eq_(self.record.technical_contacts[0].type, yawhois.record.Contact.TYPE_TECHNICAL)
eq_(self.record.technical_contacts[0].id, "MMR-87489")
eq_(self.record.technical_contacts[0].name, "DNS Admin")
eq_(self.record.technical_contacts[0].organization, None)
eq_(self.record.technical_contacts[0].address, None)
eq_(self.record.technical_contacts[0].city, None)
eq_(self.record.technical_contacts[0].zip, None)
eq_(self.record.technical_contacts[0].state, None)
eq_(self.record.technical_contacts[0].country, None)
eq_(self.record.technical_contacts[0].country_code, None)
eq_(self.record.technical_contacts[0].phone, None)
eq_(self.record.technical_contacts[0].fax, None)
eq_(self.record.technical_contacts[0].email, "Visit whois.ausregistry.com.au for Web based WhoIs")
eq_(self.record.technical_contacts[0].created_on, None)
eq_(self.record.technical_contacts[0].updated_on, None)
def test_updated_on(self):
eq_(self.record.updated_on.__class__.__name__, 'datetime')
eq_(self.record.updated_on, time_parse('2013-06-05 04:03:08 UTC'))
def test_domain_id(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.domain_id)
def test_expires_on(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.expires_on)
def test_disclaimer(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.disclaimer) | 0.549641 | 0.229222 |
import numpy as np
import matplotlib.pyplot as pl
from configobj import ConfigObj
from astropy import units as u
import copy
import pymcao.wfs as wfs
import pymcao.atmosphere as atmosphere
import pymcao.sun as sun
import logging
import time
from tqdm import tqdm
import threading
import pymcao.comm as comm
import pymcao.dms as dms
import pymcao.plots as plots
import pymcao.science as science
import pymcao.fft as fft
import pymcao.config as config
try:
from PyQt5.QtNetwork import QHostAddress, QTcpServer
PYQT_VERSION = 5
except (ImportError ,RuntimeError):
from PyQt4 import QtGui, QtCore
QtWidgets = QtGui
PYQT_VERSION = 4
__all__ = ['Simulator']
class Simulator(object):
def __init__(self, configuration_file=None):
self.configuration_file = configuration_file
self.config = config.Config(configuration_file)
# Logger
self.logger = logging.getLogger("SIM ")
self.logger.setLevel(logging.INFO)
self.logger.handlers = []
ch = logging.StreamHandler(self.config.logfile)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(message)s')
ch.setFormatter(formatter)
self.logger.addHandler(ch)
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(message)s')
ch.setFormatter(formatter)
self.logger.addHandler(ch)
def init_simulation(self, init_server=False):
self.operation_mode = self.config.operation_mode
self.n_zernike = self.config.n_zernike
self.n_stars = self.config.n_stars
self.n_frame = 0
# Instantiate all elements of the simulation
#---------------------------------------------
# Instantiate a single WFS
#---------------------------------------------
single_wfs = wfs.WFS(self.config)
# Precompute Zernike basis
single_wfs.precompute_zernike(n_zernike=self.n_zernike)
single_wfs.compute_subapertures(fill_fraction=self.config.fill_fraction)
single_wfs.init_fft()
if (self.operation_mode == 'scao_single' or self.operation_mode == 'scao'):
self.wfs = [single_wfs]
if (self.operation_mode == 'mcao_single' or self.operation_mode == 'mcao'):
self.wfs = [copy.copy(single_wfs) for i in range(self.config.n_stars)]
self.config.pixel_size_pupil = self.wfs[0].pupil_pixel_size_cm
self.config.npix_overfill = self.wfs[0].npix_overfill
self.config.npix_pupil = self.wfs[0].npix_pupil
#---------------------------------------------
# Intantiate the atmosphere
#---------------------------------------------
self.atmosphere = atmosphere.Atmosphere(self.config, self.wfs[0].Z, self.wfs[0].pupil)
# Find lock points for MCAO and observing points for later degrading the
# observations
self.lock_points = self.atmosphere.lock_points()
self.sci_pointings = self.atmosphere.sci_pointings()
#---------------------------------------------
# Instantiate the Sun that provides the images
#---------------------------------------------
self.sun = sun.Sun(self.config, self.lock_points, self.sci_pointings)
#---------------------------------------------
# Instantiate the DMs
#---------------------------------------------
self.dms = dms.DM(self.config, self.wfs[0].Z)
#---------------------------------------------
# Instantiate science camera if present
#---------------------------------------------
if (self.config.compute_science):
self.config.patch_size_science = self.atmosphere.patch_size_science
self.sci = science.Science(self.config, self.wfs[0].Z, self.wfs[0].pupil)
self.sci.init_fft()
#---------------------------------------------
# Instantiate communication mode if necessary
#---------------------------------------------
if (init_server):
if (self.operation_mode == 'mcao' or self.operation_mode == 'scao'):
self.event_comm = threading.Event()
self.comm = threading.Thread(target=comm.Comm, args=(self, self.config.cadence, self.event_comm))
self.comm.start()
def init_time(self):
self.start_time = time.time()
def end_time(self):
self.end_time = time.time()
def print_time(self):
dt = self.end_time - self.start_time
fps = (self.n_frame+1) / dt
print(f" Time : {dt:.4f} s - FPS : {fps:.4f} - Cadence : {1.0/fps:.4f}")
def close_loggers(self):
"""
Close all loggers after simulation ends
"""
for logger in [self.logger, self.sun.logger, self.atmosphere.logger, \
self.sci.logger, self.config.logger, self.dms.logger]:
for handler in logger.handlers:
handler.close()
logger.removeHandler(handler)
for phase_screen in self.atmosphere.screens:
for handler in logger.handlers:
handler.close()
logger.removeHandler(handler)
for wfs in self.wfs:
for handler in wfs.logger.handlers:
handler.close()
logger.removeHandler(handler)
def finalize(self):
self.close_loggers()
def start_step(self, silence = False):
if (silence):
for wfs in self.wfs:
wfs.logger.disabled = True
self.logger.disabled = True
self.logger.info("Frame")
def frame_scao(self, silence = False, plot=False):
"""
Run a single frame in SCAO mode
Parameters
----------
silence : bool, optional
[description], by default False
plot : bool, optional
[description], by default False
"""
if (silence):
self.wfs[0].logger.disabled = True
self.logger.disabled = True
self.logger.info("Frame")
# Generate new turbulent atmosphere
# self.atmosphere.generate_turbulent_zernikes_kolmogorov(r0=20.0)
self.atmosphere.zernike_turbulent_metapupils()
self.atmosphere.move_screens()
# Compute the wavefronts for all WFS
self.wavefront = self.atmosphere.generate_wfs()
self.images, self.images_sci = self.sun.new_image()
# Set the image of the Sun in the WFS
self.wfs[0].set_image(self.images[0,:,:])
# Set wavefront
self.wfs[0].set_wavefront(self.wavefront[0,:])
# Compute subpupil PSFs and images
self.wfs[0].generate_subpupil_psf()
self.wfs[0].generate_wfs_images()
# Measure correlation
self.wfs[0].measure_wfs_correlation()
self.uncorrected_wavefront_sci = self.atmosphere.generate_science_wf()
self.sci.degrade_image(self.images_sci, self.uncorrected_wavefront_sci)
self.n_frame += 1
if (silence):
self.wfs[0].logger.disabled = False
self.logger.disabled = False
if (plot):
plots.show_scao(self.wfs[0], save_results=True)
def frame_mcao(self, silence = False, plot=False):
if (silence):
for wfs in self.wfs:
wfs.logger.disabled = True
self.logger.disabled = True
self.logger.info("Frame")
# Generate new turbulent atmosphere
# self.atmosphere.generate_turbulent_zernikes_kolmogorov(r0=20.0)
self.atmosphere.zernike_turbulent_metapupils()
self.atmosphere.move_screens()
# Compute the wavefronts for all WFS
self.wavefront = self.atmosphere.generate_wfs()
# Extract the images for the WFS and for the science camera
self.images, self.images_sci = self.sun.new_image()
self.wfs_zernike = np.zeros((self.n_stars, self.n_zernike))
# Compute the wavefront for every direction
for i in range(self.n_stars):
self.wfs[i].set_image(self.images[i,:,:])
# Set wavefront
self.wfs[i].set_wavefront(self.wavefront[i,:])
# Compute subpupil PSFs and images
self.wfs[i].generate_subpupil_psf()
self.wfs[i].generate_wfs_images()
# Measure correlation
self.wfs[i].measure_wfs_correlation()
self.wfs_zernike[i,:] = self.wfs[i].reconstructed_zernike
self.n_frame += 1
# Actuate the DMs
self.dms.actuate(self.wfs_zernike)
# Compute science image if needed
if (self.config.compute_science):
self.uncorrected_wavefront_sci, self.wavefront_sci = self.atmosphere.generate_science_wf(self.dms)
self.sci.degrade_image(self.images_sci, self.uncorrected_wavefront_sci, self.wavefront_sci)
if (plots):
plots.show_mcao(self.wfs, self.dms, self.sci, self.atmosphere)
pl.show()
if (silence):
for wfs in self.wfs:
wfs.logger.disabled = False
self.logger.disabled = False
# Activate events to send data to GUI
if (self.operation_mode == 'mcao' or self.operation_mode == 'scao'):
self.event_comm.set()
self.event_comm.clear()
breakpoint()
if (__name__ == '__main__'):
mcao = Simulator('gregor.ini')
mcao.init_simulation()
mcao.init_time()
if (mcao.operation_mode == 'mcao'):
for i in range(200):
mcao.frame_mcao(silence=False)
if (mcao.operation_mode == 'scao'):
for i in range(200):
mcao.frame_scao(silence=True)
if (mcao.operation_mode == 'mcao_single'):
mcao.frame_mcao(silence=False, plot=True)
if (mcao.operation_mode == 'scao_single'):
mcao.frame_scao(silence=False, plot=True)
mcao.end_time()
mcao.print_time()
mcao.finalize() | pymcao/simulator.py | import numpy as np
import matplotlib.pyplot as pl
from configobj import ConfigObj
from astropy import units as u
import copy
import pymcao.wfs as wfs
import pymcao.atmosphere as atmosphere
import pymcao.sun as sun
import logging
import time
from tqdm import tqdm
import threading
import pymcao.comm as comm
import pymcao.dms as dms
import pymcao.plots as plots
import pymcao.science as science
import pymcao.fft as fft
import pymcao.config as config
try:
from PyQt5.QtNetwork import QHostAddress, QTcpServer
PYQT_VERSION = 5
except (ImportError ,RuntimeError):
from PyQt4 import QtGui, QtCore
QtWidgets = QtGui
PYQT_VERSION = 4
__all__ = ['Simulator']
class Simulator(object):
def __init__(self, configuration_file=None):
self.configuration_file = configuration_file
self.config = config.Config(configuration_file)
# Logger
self.logger = logging.getLogger("SIM ")
self.logger.setLevel(logging.INFO)
self.logger.handlers = []
ch = logging.StreamHandler(self.config.logfile)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(message)s')
ch.setFormatter(formatter)
self.logger.addHandler(ch)
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(message)s')
ch.setFormatter(formatter)
self.logger.addHandler(ch)
def init_simulation(self, init_server=False):
self.operation_mode = self.config.operation_mode
self.n_zernike = self.config.n_zernike
self.n_stars = self.config.n_stars
self.n_frame = 0
# Instantiate all elements of the simulation
#---------------------------------------------
# Instantiate a single WFS
#---------------------------------------------
single_wfs = wfs.WFS(self.config)
# Precompute Zernike basis
single_wfs.precompute_zernike(n_zernike=self.n_zernike)
single_wfs.compute_subapertures(fill_fraction=self.config.fill_fraction)
single_wfs.init_fft()
if (self.operation_mode == 'scao_single' or self.operation_mode == 'scao'):
self.wfs = [single_wfs]
if (self.operation_mode == 'mcao_single' or self.operation_mode == 'mcao'):
self.wfs = [copy.copy(single_wfs) for i in range(self.config.n_stars)]
self.config.pixel_size_pupil = self.wfs[0].pupil_pixel_size_cm
self.config.npix_overfill = self.wfs[0].npix_overfill
self.config.npix_pupil = self.wfs[0].npix_pupil
#---------------------------------------------
# Intantiate the atmosphere
#---------------------------------------------
self.atmosphere = atmosphere.Atmosphere(self.config, self.wfs[0].Z, self.wfs[0].pupil)
# Find lock points for MCAO and observing points for later degrading the
# observations
self.lock_points = self.atmosphere.lock_points()
self.sci_pointings = self.atmosphere.sci_pointings()
#---------------------------------------------
# Instantiate the Sun that provides the images
#---------------------------------------------
self.sun = sun.Sun(self.config, self.lock_points, self.sci_pointings)
#---------------------------------------------
# Instantiate the DMs
#---------------------------------------------
self.dms = dms.DM(self.config, self.wfs[0].Z)
#---------------------------------------------
# Instantiate science camera if present
#---------------------------------------------
if (self.config.compute_science):
self.config.patch_size_science = self.atmosphere.patch_size_science
self.sci = science.Science(self.config, self.wfs[0].Z, self.wfs[0].pupil)
self.sci.init_fft()
#---------------------------------------------
# Instantiate communication mode if necessary
#---------------------------------------------
if (init_server):
if (self.operation_mode == 'mcao' or self.operation_mode == 'scao'):
self.event_comm = threading.Event()
self.comm = threading.Thread(target=comm.Comm, args=(self, self.config.cadence, self.event_comm))
self.comm.start()
def init_time(self):
self.start_time = time.time()
def end_time(self):
self.end_time = time.time()
def print_time(self):
dt = self.end_time - self.start_time
fps = (self.n_frame+1) / dt
print(f" Time : {dt:.4f} s - FPS : {fps:.4f} - Cadence : {1.0/fps:.4f}")
def close_loggers(self):
"""
Close all loggers after simulation ends
"""
for logger in [self.logger, self.sun.logger, self.atmosphere.logger, \
self.sci.logger, self.config.logger, self.dms.logger]:
for handler in logger.handlers:
handler.close()
logger.removeHandler(handler)
for phase_screen in self.atmosphere.screens:
for handler in logger.handlers:
handler.close()
logger.removeHandler(handler)
for wfs in self.wfs:
for handler in wfs.logger.handlers:
handler.close()
logger.removeHandler(handler)
def finalize(self):
self.close_loggers()
def start_step(self, silence = False):
if (silence):
for wfs in self.wfs:
wfs.logger.disabled = True
self.logger.disabled = True
self.logger.info("Frame")
def frame_scao(self, silence = False, plot=False):
"""
Run a single frame in SCAO mode
Parameters
----------
silence : bool, optional
[description], by default False
plot : bool, optional
[description], by default False
"""
if (silence):
self.wfs[0].logger.disabled = True
self.logger.disabled = True
self.logger.info("Frame")
# Generate new turbulent atmosphere
# self.atmosphere.generate_turbulent_zernikes_kolmogorov(r0=20.0)
self.atmosphere.zernike_turbulent_metapupils()
self.atmosphere.move_screens()
# Compute the wavefronts for all WFS
self.wavefront = self.atmosphere.generate_wfs()
self.images, self.images_sci = self.sun.new_image()
# Set the image of the Sun in the WFS
self.wfs[0].set_image(self.images[0,:,:])
# Set wavefront
self.wfs[0].set_wavefront(self.wavefront[0,:])
# Compute subpupil PSFs and images
self.wfs[0].generate_subpupil_psf()
self.wfs[0].generate_wfs_images()
# Measure correlation
self.wfs[0].measure_wfs_correlation()
self.uncorrected_wavefront_sci = self.atmosphere.generate_science_wf()
self.sci.degrade_image(self.images_sci, self.uncorrected_wavefront_sci)
self.n_frame += 1
if (silence):
self.wfs[0].logger.disabled = False
self.logger.disabled = False
if (plot):
plots.show_scao(self.wfs[0], save_results=True)
def frame_mcao(self, silence = False, plot=False):
if (silence):
for wfs in self.wfs:
wfs.logger.disabled = True
self.logger.disabled = True
self.logger.info("Frame")
# Generate new turbulent atmosphere
# self.atmosphere.generate_turbulent_zernikes_kolmogorov(r0=20.0)
self.atmosphere.zernike_turbulent_metapupils()
self.atmosphere.move_screens()
# Compute the wavefronts for all WFS
self.wavefront = self.atmosphere.generate_wfs()
# Extract the images for the WFS and for the science camera
self.images, self.images_sci = self.sun.new_image()
self.wfs_zernike = np.zeros((self.n_stars, self.n_zernike))
# Compute the wavefront for every direction
for i in range(self.n_stars):
self.wfs[i].set_image(self.images[i,:,:])
# Set wavefront
self.wfs[i].set_wavefront(self.wavefront[i,:])
# Compute subpupil PSFs and images
self.wfs[i].generate_subpupil_psf()
self.wfs[i].generate_wfs_images()
# Measure correlation
self.wfs[i].measure_wfs_correlation()
self.wfs_zernike[i,:] = self.wfs[i].reconstructed_zernike
self.n_frame += 1
# Actuate the DMs
self.dms.actuate(self.wfs_zernike)
# Compute science image if needed
if (self.config.compute_science):
self.uncorrected_wavefront_sci, self.wavefront_sci = self.atmosphere.generate_science_wf(self.dms)
self.sci.degrade_image(self.images_sci, self.uncorrected_wavefront_sci, self.wavefront_sci)
if (plots):
plots.show_mcao(self.wfs, self.dms, self.sci, self.atmosphere)
pl.show()
if (silence):
for wfs in self.wfs:
wfs.logger.disabled = False
self.logger.disabled = False
# Activate events to send data to GUI
if (self.operation_mode == 'mcao' or self.operation_mode == 'scao'):
self.event_comm.set()
self.event_comm.clear()
breakpoint()
if (__name__ == '__main__'):
mcao = Simulator('gregor.ini')
mcao.init_simulation()
mcao.init_time()
if (mcao.operation_mode == 'mcao'):
for i in range(200):
mcao.frame_mcao(silence=False)
if (mcao.operation_mode == 'scao'):
for i in range(200):
mcao.frame_scao(silence=True)
if (mcao.operation_mode == 'mcao_single'):
mcao.frame_mcao(silence=False, plot=True)
if (mcao.operation_mode == 'scao_single'):
mcao.frame_scao(silence=False, plot=True)
mcao.end_time()
mcao.print_time()
mcao.finalize() | 0.664867 | 0.175609 |
from igia.utils import bed2bam, SeqFile, load_seqinfo
from igia.element import identify_element
from igia.transcript import identify_transcript
import os
import unittest
class TestAnnotation(unittest.TestCase):
def setUp(self):
self.ann_dir = "tests/data/ann.bed12"
self.size = "tests/data/chrom.sizes"
names = ('Bract', 'Cotyledon')
ngs = [os.path.join('tests/data', "%s.sort.bam" % name) for name in names]
self.ngs = [SeqFile(fname, 'NGS') for fname in ngs]
self.ann = SeqFile(bed2bam(self.ann_dir, self.size, "/tmp/igia"), 'ANN')
load_seqinfo(self.ngs)
def test_load_ann(self):
ann = SeqFile(bed2bam(self.ann_dir, self.size, "/tmp/igia"), 'ANN')
ann_read_list = list(ann.bam.fetch("Chr01"))
self.assertEqual(len(ann_read_list), 3)
self.assertListEqual(
sorted([(read.reference_name, read.reference_start, read.reference_end) for read in ann_read_list]),
[('Chr01', 798113, 800959), ('Chr01', 798113, 801059), ('Chr01', 4649477, 4652385)])
def test_identify_element_singal_ann_no_txs(self):
gene_list = identify_element("Chr01", 4648477, 4653385, self.ngs, list(), list(), list(), self.ann)
self.assertEqual(len(gene_list), 1)
gene = gene_list[0]
self.assertEqual(str(gene), "Gene: Chr01:4648477-4653385 -")
intron_list = sorted(gene.intron_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(intron_list),
"[Intron: Chr01:4650078-4650291 -, Intron: Chr01:4650579-4650664 -, " +
"Intron: Chr01:4650748-4651282 -, Intron: Chr01:4651337-4651429 -, " +
"Intron: Chr01:4651463-4651566 -, Intron: Chr01:4651660-4651841 -]")
internal_exon_list = sorted(gene.internal_exon_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(internal_exon_list),
"[Exon: Chr01:4650291-4650579 -, Exon: Chr01:4650664-4650748 -, " +
"Exon: Chr01:4651282-4651337 -, Exon: Chr01:4651429-4651463 -, " +
"Exon: Chr01:4651566-4651660 -]")
tss_exon_list = sorted(gene.tss_exon_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(tss_exon_list), "[Exon: Chr01:4651841-4652385 -]")
tes_exon_list = sorted(gene.tes_exon_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(tes_exon_list), "[Exon: Chr01:4649477-4650078 -]")
def test_identify_element_singal_ann_with_tss(self):
gene_list = identify_element(
"Chr01", 4648477, 4653385, self.ngs, list(), [("Chr01", 4653000, "-")], list(), self.ann)
self.assertEqual(len(gene_list), 1)
gene = gene_list[0]
self.assertEqual(str(gene), "Gene: Chr01:4648477-4653385 -")
intron_list = sorted(gene.intron_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(intron_list),
"[Intron: Chr01:4650078-4650291 -, Intron: Chr01:4650579-4650664 -, " +
"Intron: Chr01:4650748-4651282 -, Intron: Chr01:4651337-4651429 -, " +
"Intron: Chr01:4651463-4651566 -, Intron: Chr01:4651660-4651841 -]")
internal_exon_list = sorted(gene.internal_exon_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(internal_exon_list),
"[Exon: Chr01:4650291-4650579 -, Exon: Chr01:4650664-4650748 -, " +
"Exon: Chr01:4651282-4651337 -, Exon: Chr01:4651429-4651463 -, " +
"Exon: Chr01:4651566-4651660 -]")
tss_exon_list = sorted(gene.tss_exon_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(tss_exon_list), "[Exon: Chr01:4651841-4653000 -]")
tes_exon_list = sorted(gene.tes_exon_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(tes_exon_list), "[Exon: Chr01:4649477-4650078 -]")
def test_identify_element_singal_ann_with_tes(self):
gene_list = identify_element(
"Chr01", 4648477, 4653385, self.ngs, list(), list(), [("Chr01", 4649000, "-")], self.ann)
self.assertEqual(len(gene_list), 1)
gene = gene_list[0]
self.assertEqual(str(gene), "Gene: Chr01:4648477-4653385 -")
intron_list = sorted(gene.intron_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(intron_list),
"[Intron: Chr01:4650078-4650291 -, Intron: Chr01:4650579-4650664 -, " +
"Intron: Chr01:4650748-4651282 -, Intron: Chr01:4651337-4651429 -, " +
"Intron: Chr01:4651463-4651566 -, Intron: Chr01:4651660-4651841 -]")
internal_exon_list = sorted(gene.internal_exon_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(internal_exon_list),
"[Exon: Chr01:4650291-4650579 -, Exon: Chr01:4650664-4650748 -, " +
"Exon: Chr01:4651282-4651337 -, Exon: Chr01:4651429-4651463 -, " +
"Exon: Chr01:4651566-4651660 -]")
tss_exon_list = sorted(gene.tss_exon_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(tss_exon_list), "[Exon: Chr01:4651841-4652385 -]")
tes_exon_list = sorted(gene.tes_exon_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(tes_exon_list), "[Exon: Chr01:4649000-4650078 -]")
def test_identify_element_singal_ann_with_txs(self):
gene_list = identify_element(
"Chr01", 4648477, 4653385, self.ngs, list(), [("Chr01", 4653000, "-")], [("Chr01", 4649000, "-")], self.ann)
self.assertEqual(len(gene_list), 1)
gene = gene_list[0]
self.assertEqual(str(gene), "Gene: Chr01:4648477-4653385 -")
intron_list = sorted(gene.intron_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(intron_list),
"[Intron: Chr01:4650078-4650291 -, Intron: Chr01:4650579-4650664 -, " +
"Intron: Chr01:4650748-4651282 -, Intron: Chr01:4651337-4651429 -, " +
"Intron: Chr01:4651463-4651566 -, Intron: Chr01:4651660-4651841 -]")
internal_exon_list = sorted(gene.internal_exon_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(internal_exon_list),
"[Exon: Chr01:4650291-4650579 -, Exon: Chr01:4650664-4650748 -, " +
"Exon: Chr01:4651282-4651337 -, Exon: Chr01:4651429-4651463 -, " +
"Exon: Chr01:4651566-4651660 -]")
tss_exon_list = sorted(gene.tss_exon_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(tss_exon_list), "[Exon: Chr01:4651841-4653000 -]")
tes_exon_list = sorted(gene.tes_exon_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(tes_exon_list), "[Exon: Chr01:4649000-4650078 -]")
def test_identify_element_mutil_anns(self):
gene_list = identify_element("Chr01", 795113, 808059, self.ngs, list(), list(), list(), self.ann)
self.assertEqual(len(gene_list), 1)
gene = gene_list[0]
self.assertEqual(str(gene), "Gene: Chr01:797113-802059 -")
intron_list = sorted(gene.intron_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(intron_list), "[Intron: Chr01:799412-799981 -, Intron: Chr01:800328-800876 -]")
internal_exon_list = sorted(gene.internal_exon_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(internal_exon_list), "[Exon: Chr01:799981-800328 -]")
tss_exon_list = sorted(gene.tss_exon_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(tss_exon_list), "[Exon: Chr01:800876-800959 -, Exon: Chr01:800876-801059 -]")
tes_exon_list = sorted(gene.tes_exon_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(tes_exon_list), "[Exon: Chr01:798113-799412 -]")
def test_identify_transcript_singal(self):
gene_list = identify_element("Chr01", 4648477, 4653385, self.ngs, list(), list(), list(), self.ann)
gene = gene_list[0]
trans = identify_transcript(gene, self.ann)
self.assertEqual(str(trans.isoA),
"[Isoform: segment array: [1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]; tss=True; tes=True; invalid=False]")
def test_identify_transcript_multi(self):
gene_list = identify_element("Chr01", 795113, 808059, self.ngs, list(), list(), list(), self.ann)
gene = gene_list[0]
trans = identify_transcript(gene, self.ann)
self.assertEqual(str(trans.isoA),
"[Isoform: segment array: [1, 0, 1, 0, 1, 0]; tss=True; tes=True; invalid=False, " +
"Isoform: segment array: [1, 0, 1, 0, 1, 1]; tss=True; tes=True; invalid=False]") | tests/test_annotation.py | from igia.utils import bed2bam, SeqFile, load_seqinfo
from igia.element import identify_element
from igia.transcript import identify_transcript
import os
import unittest
class TestAnnotation(unittest.TestCase):
def setUp(self):
self.ann_dir = "tests/data/ann.bed12"
self.size = "tests/data/chrom.sizes"
names = ('Bract', 'Cotyledon')
ngs = [os.path.join('tests/data', "%s.sort.bam" % name) for name in names]
self.ngs = [SeqFile(fname, 'NGS') for fname in ngs]
self.ann = SeqFile(bed2bam(self.ann_dir, self.size, "/tmp/igia"), 'ANN')
load_seqinfo(self.ngs)
def test_load_ann(self):
ann = SeqFile(bed2bam(self.ann_dir, self.size, "/tmp/igia"), 'ANN')
ann_read_list = list(ann.bam.fetch("Chr01"))
self.assertEqual(len(ann_read_list), 3)
self.assertListEqual(
sorted([(read.reference_name, read.reference_start, read.reference_end) for read in ann_read_list]),
[('Chr01', 798113, 800959), ('Chr01', 798113, 801059), ('Chr01', 4649477, 4652385)])
def test_identify_element_singal_ann_no_txs(self):
gene_list = identify_element("Chr01", 4648477, 4653385, self.ngs, list(), list(), list(), self.ann)
self.assertEqual(len(gene_list), 1)
gene = gene_list[0]
self.assertEqual(str(gene), "Gene: Chr01:4648477-4653385 -")
intron_list = sorted(gene.intron_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(intron_list),
"[Intron: Chr01:4650078-4650291 -, Intron: Chr01:4650579-4650664 -, " +
"Intron: Chr01:4650748-4651282 -, Intron: Chr01:4651337-4651429 -, " +
"Intron: Chr01:4651463-4651566 -, Intron: Chr01:4651660-4651841 -]")
internal_exon_list = sorted(gene.internal_exon_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(internal_exon_list),
"[Exon: Chr01:4650291-4650579 -, Exon: Chr01:4650664-4650748 -, " +
"Exon: Chr01:4651282-4651337 -, Exon: Chr01:4651429-4651463 -, " +
"Exon: Chr01:4651566-4651660 -]")
tss_exon_list = sorted(gene.tss_exon_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(tss_exon_list), "[Exon: Chr01:4651841-4652385 -]")
tes_exon_list = sorted(gene.tes_exon_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(tes_exon_list), "[Exon: Chr01:4649477-4650078 -]")
def test_identify_element_singal_ann_with_tss(self):
gene_list = identify_element(
"Chr01", 4648477, 4653385, self.ngs, list(), [("Chr01", 4653000, "-")], list(), self.ann)
self.assertEqual(len(gene_list), 1)
gene = gene_list[0]
self.assertEqual(str(gene), "Gene: Chr01:4648477-4653385 -")
intron_list = sorted(gene.intron_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(intron_list),
"[Intron: Chr01:4650078-4650291 -, Intron: Chr01:4650579-4650664 -, " +
"Intron: Chr01:4650748-4651282 -, Intron: Chr01:4651337-4651429 -, " +
"Intron: Chr01:4651463-4651566 -, Intron: Chr01:4651660-4651841 -]")
internal_exon_list = sorted(gene.internal_exon_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(internal_exon_list),
"[Exon: Chr01:4650291-4650579 -, Exon: Chr01:4650664-4650748 -, " +
"Exon: Chr01:4651282-4651337 -, Exon: Chr01:4651429-4651463 -, " +
"Exon: Chr01:4651566-4651660 -]")
tss_exon_list = sorted(gene.tss_exon_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(tss_exon_list), "[Exon: Chr01:4651841-4653000 -]")
tes_exon_list = sorted(gene.tes_exon_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(tes_exon_list), "[Exon: Chr01:4649477-4650078 -]")
def test_identify_element_singal_ann_with_tes(self):
gene_list = identify_element(
"Chr01", 4648477, 4653385, self.ngs, list(), list(), [("Chr01", 4649000, "-")], self.ann)
self.assertEqual(len(gene_list), 1)
gene = gene_list[0]
self.assertEqual(str(gene), "Gene: Chr01:4648477-4653385 -")
intron_list = sorted(gene.intron_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(intron_list),
"[Intron: Chr01:4650078-4650291 -, Intron: Chr01:4650579-4650664 -, " +
"Intron: Chr01:4650748-4651282 -, Intron: Chr01:4651337-4651429 -, " +
"Intron: Chr01:4651463-4651566 -, Intron: Chr01:4651660-4651841 -]")
internal_exon_list = sorted(gene.internal_exon_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(internal_exon_list),
"[Exon: Chr01:4650291-4650579 -, Exon: Chr01:4650664-4650748 -, " +
"Exon: Chr01:4651282-4651337 -, Exon: Chr01:4651429-4651463 -, " +
"Exon: Chr01:4651566-4651660 -]")
tss_exon_list = sorted(gene.tss_exon_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(tss_exon_list), "[Exon: Chr01:4651841-4652385 -]")
tes_exon_list = sorted(gene.tes_exon_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(tes_exon_list), "[Exon: Chr01:4649000-4650078 -]")
def test_identify_element_singal_ann_with_txs(self):
gene_list = identify_element(
"Chr01", 4648477, 4653385, self.ngs, list(), [("Chr01", 4653000, "-")], [("Chr01", 4649000, "-")], self.ann)
self.assertEqual(len(gene_list), 1)
gene = gene_list[0]
self.assertEqual(str(gene), "Gene: Chr01:4648477-4653385 -")
intron_list = sorted(gene.intron_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(intron_list),
"[Intron: Chr01:4650078-4650291 -, Intron: Chr01:4650579-4650664 -, " +
"Intron: Chr01:4650748-4651282 -, Intron: Chr01:4651337-4651429 -, " +
"Intron: Chr01:4651463-4651566 -, Intron: Chr01:4651660-4651841 -]")
internal_exon_list = sorted(gene.internal_exon_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(internal_exon_list),
"[Exon: Chr01:4650291-4650579 -, Exon: Chr01:4650664-4650748 -, " +
"Exon: Chr01:4651282-4651337 -, Exon: Chr01:4651429-4651463 -, " +
"Exon: Chr01:4651566-4651660 -]")
tss_exon_list = sorted(gene.tss_exon_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(tss_exon_list), "[Exon: Chr01:4651841-4653000 -]")
tes_exon_list = sorted(gene.tes_exon_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(tes_exon_list), "[Exon: Chr01:4649000-4650078 -]")
def test_identify_element_mutil_anns(self):
gene_list = identify_element("Chr01", 795113, 808059, self.ngs, list(), list(), list(), self.ann)
self.assertEqual(len(gene_list), 1)
gene = gene_list[0]
self.assertEqual(str(gene), "Gene: Chr01:797113-802059 -")
intron_list = sorted(gene.intron_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(intron_list), "[Intron: Chr01:799412-799981 -, Intron: Chr01:800328-800876 -]")
internal_exon_list = sorted(gene.internal_exon_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(internal_exon_list), "[Exon: Chr01:799981-800328 -]")
tss_exon_list = sorted(gene.tss_exon_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(tss_exon_list), "[Exon: Chr01:800876-800959 -, Exon: Chr01:800876-801059 -]")
tes_exon_list = sorted(gene.tes_exon_list, key=lambda x: (x.start, x.end))
self.assertEqual(str(tes_exon_list), "[Exon: Chr01:798113-799412 -]")
def test_identify_transcript_singal(self):
gene_list = identify_element("Chr01", 4648477, 4653385, self.ngs, list(), list(), list(), self.ann)
gene = gene_list[0]
trans = identify_transcript(gene, self.ann)
self.assertEqual(str(trans.isoA),
"[Isoform: segment array: [1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]; tss=True; tes=True; invalid=False]")
def test_identify_transcript_multi(self):
gene_list = identify_element("Chr01", 795113, 808059, self.ngs, list(), list(), list(), self.ann)
gene = gene_list[0]
trans = identify_transcript(gene, self.ann)
self.assertEqual(str(trans.isoA),
"[Isoform: segment array: [1, 0, 1, 0, 1, 0]; tss=True; tes=True; invalid=False, " +
"Isoform: segment array: [1, 0, 1, 0, 1, 1]; tss=True; tes=True; invalid=False]") | 0.473901 | 0.333496 |
import os
import numpy as np
from PIL import Image
from pathlib import Path
from torch.utils.data import DataLoader, Dataset
from torchvision import datasets, transforms
from torchvision.io import read_image
from typing import Any, Callable, List, Optional, Tuple
import torch
import cv2
def get_dataloaders(dataset_dir, input_size, batch_size, num_workers=4):
"""
Get train and test dataloaders from the dataset
"""
def read_pil_image(img_path, height, width):
with open(img_path, 'rb') as f:
return np.array(Image.open(f).convert('RGB').resize((width, height)))
def load_all_images(dataset_path, height, width, img_ext='jpg'):
return np.array([read_pil_image(str(p), height, width) for p in
Path(dataset_path).rglob("*."+img_ext)])
# Load the data
loaded_imgs_train = load_all_images(dataset_dir+'/train/', input_size, input_size)
#calculate mean and standard deviation per channels
mean = [(loaded_imgs_train[..., i]/255).mean() for i in range(loaded_imgs_train.shape[-1])]
#print(mean)
std = [(loaded_imgs_train[..., i]/255).std() for i in range(loaded_imgs_train.shape[-1])]
#print(std)
#data transforms for trainig and testing
train_transform = transforms.Compose([
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomRotation(degrees=20),
#transforms.RandomAffine(translate=(0, 0.3)),
transforms.Resize((input_size, input_size)),
transforms.ToTensor(),
transforms.Normalize(mean = mean, std = std)])
test_transform = transforms.Compose([
transforms.Resize((input_size, input_size)),
transforms.ToTensor(),
transforms.Normalize(mean = mean, std = std)])
# prepare datasets
train_data = MitDataset(dataset_dir+'train', True, transform = train_transform)
test_data = MitDataset(dataset_dir+'test', False, transform = test_transform)
# prepare dataloaders
train_loader = DataLoader(train_data, batch_size = batch_size, shuffle=True, num_workers=num_workers)
test_loader = DataLoader(test_data, batch_size = batch_size, shuffle=False, num_workers=num_workers)
return train_loader, test_loader,train_data,test_data
class MitDataset(Dataset):
LABELS_DICT = {"Opencountry":0 , "coast":1, "forest":2, "highway":3, "inside_city":4, "mountain":5 , "street":6, "tallbuilding":7}
def __init__(
self,
root: str,
train: bool = True,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> None:
super(MitDataset, self).__init__()
self.train = train # training set or test set
self.data_path = root
self.data, self.targets = self._load_data()
self.transform = transform
self.target_transform = target_transform
def _load_data(self):
train_imgs = []
train_label = []
for class_t in os.listdir(self.data_path):
for img in os.listdir(os.path.join(self.data_path, class_t)):
if ".jpg" in img:
#print(os.path.join(self.data_path, class_t, img))
#train_imgs.append(read_image(os.path.join(self.data_path, class_t, img)))
train_imgs.append(os.path.join(self.data_path, class_t, img))
train_label.append(self.LABELS_DICT[class_t])
return train_imgs, torch.LongTensor(train_label)
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], int(self.targets[index])
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.open(img)
#target= torch.tensor(target).float()
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
#img = torch.permute(img, (2,0,1))
return img, target
def __len__(self) -> int:
return len(self.data)
LABELS_DICT = {"Opencountry":0 , "coast":1, "forest":2, "highway":3, "inside_city":4, "mountain":5 , "street":6, "tallbuilding":7}
class SiameseDataset(Dataset):
"""
Train: For each sample creates randomly a positive or a negative pair
Test: Creates fixed pairs for testing
"""
def __init__(self, dataset, train):
self.dataset = dataset
self.train = train
if self.train:
self.train_labels = self.dataset.targets
#self.train_data = self.dataset.data
#self.train_labels = torch.LongTensor(train_label)
#self.train_data = train_imgs
self.labels_set = set(self.train_labels.numpy())
self.label_to_indices = {label: np.where(self.train_labels.numpy() == label)[0]
for label in self.labels_set}
else:
# generate fixed pairs for testing
self.test_labels = self.dataset.targets
#self.test_data = self.dataset.data
self.labels_set = set(self.test_labels.numpy())
self.label_to_indices = {label: np.where(self.test_labels.numpy() == label)[0]
for label in self.labels_set}
random_state = np.random.RandomState(29)
positive_pairs = [[i,
random_state.choice(self.label_to_indices[self.test_labels[i].item()]),
1]
for i in range(0, len(self.test_labels), 2)]
negative_pairs = [[i,
random_state.choice(self.label_to_indices[
np.random.choice(
list(self.labels_set - set([self.test_labels[i].item()]))
)
]),
0]
for i in range(1, len(self.test_labels), 2)]
self.test_pairs = positive_pairs + negative_pairs
def __getitem__(self, index):
if self.train:
target = np.random.randint(0, 2)
img1, label1 = self.dataset[index][0], self.dataset[index][1]
if target == 1:
siamese_index = index
while siamese_index == index:
siamese_index = np.random.choice(self.label_to_indices[label1])
else:
siamese_label = np.random.choice(list(self.labels_set - set([label1])))
siamese_index = np.random.choice(self.label_to_indices[siamese_label])
img2 = self.dataset[siamese_index][0]
else:
img1 = self.dataset[self.test_pairs[index][0]][0]
img2 = self.dataset[self.test_pairs[index][1]][0]
target = self.test_pairs[index][2]
return (img1.float(), img2.float()), torch.tensor(target).float()
def __len__(self):
return len(self.dataset.data) | week4/dataset.py | import os
import numpy as np
from PIL import Image
from pathlib import Path
from torch.utils.data import DataLoader, Dataset
from torchvision import datasets, transforms
from torchvision.io import read_image
from typing import Any, Callable, List, Optional, Tuple
import torch
import cv2
def get_dataloaders(dataset_dir, input_size, batch_size, num_workers=4):
"""
Get train and test dataloaders from the dataset
"""
def read_pil_image(img_path, height, width):
with open(img_path, 'rb') as f:
return np.array(Image.open(f).convert('RGB').resize((width, height)))
def load_all_images(dataset_path, height, width, img_ext='jpg'):
return np.array([read_pil_image(str(p), height, width) for p in
Path(dataset_path).rglob("*."+img_ext)])
# Load the data
loaded_imgs_train = load_all_images(dataset_dir+'/train/', input_size, input_size)
#calculate mean and standard deviation per channels
mean = [(loaded_imgs_train[..., i]/255).mean() for i in range(loaded_imgs_train.shape[-1])]
#print(mean)
std = [(loaded_imgs_train[..., i]/255).std() for i in range(loaded_imgs_train.shape[-1])]
#print(std)
#data transforms for trainig and testing
train_transform = transforms.Compose([
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomRotation(degrees=20),
#transforms.RandomAffine(translate=(0, 0.3)),
transforms.Resize((input_size, input_size)),
transforms.ToTensor(),
transforms.Normalize(mean = mean, std = std)])
test_transform = transforms.Compose([
transforms.Resize((input_size, input_size)),
transforms.ToTensor(),
transforms.Normalize(mean = mean, std = std)])
# prepare datasets
train_data = MitDataset(dataset_dir+'train', True, transform = train_transform)
test_data = MitDataset(dataset_dir+'test', False, transform = test_transform)
# prepare dataloaders
train_loader = DataLoader(train_data, batch_size = batch_size, shuffle=True, num_workers=num_workers)
test_loader = DataLoader(test_data, batch_size = batch_size, shuffle=False, num_workers=num_workers)
return train_loader, test_loader,train_data,test_data
class MitDataset(Dataset):
LABELS_DICT = {"Opencountry":0 , "coast":1, "forest":2, "highway":3, "inside_city":4, "mountain":5 , "street":6, "tallbuilding":7}
def __init__(
self,
root: str,
train: bool = True,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> None:
super(MitDataset, self).__init__()
self.train = train # training set or test set
self.data_path = root
self.data, self.targets = self._load_data()
self.transform = transform
self.target_transform = target_transform
def _load_data(self):
train_imgs = []
train_label = []
for class_t in os.listdir(self.data_path):
for img in os.listdir(os.path.join(self.data_path, class_t)):
if ".jpg" in img:
#print(os.path.join(self.data_path, class_t, img))
#train_imgs.append(read_image(os.path.join(self.data_path, class_t, img)))
train_imgs.append(os.path.join(self.data_path, class_t, img))
train_label.append(self.LABELS_DICT[class_t])
return train_imgs, torch.LongTensor(train_label)
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], int(self.targets[index])
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.open(img)
#target= torch.tensor(target).float()
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
#img = torch.permute(img, (2,0,1))
return img, target
def __len__(self) -> int:
return len(self.data)
LABELS_DICT = {"Opencountry":0 , "coast":1, "forest":2, "highway":3, "inside_city":4, "mountain":5 , "street":6, "tallbuilding":7}
class SiameseDataset(Dataset):
"""
Train: For each sample creates randomly a positive or a negative pair
Test: Creates fixed pairs for testing
"""
def __init__(self, dataset, train):
self.dataset = dataset
self.train = train
if self.train:
self.train_labels = self.dataset.targets
#self.train_data = self.dataset.data
#self.train_labels = torch.LongTensor(train_label)
#self.train_data = train_imgs
self.labels_set = set(self.train_labels.numpy())
self.label_to_indices = {label: np.where(self.train_labels.numpy() == label)[0]
for label in self.labels_set}
else:
# generate fixed pairs for testing
self.test_labels = self.dataset.targets
#self.test_data = self.dataset.data
self.labels_set = set(self.test_labels.numpy())
self.label_to_indices = {label: np.where(self.test_labels.numpy() == label)[0]
for label in self.labels_set}
random_state = np.random.RandomState(29)
positive_pairs = [[i,
random_state.choice(self.label_to_indices[self.test_labels[i].item()]),
1]
for i in range(0, len(self.test_labels), 2)]
negative_pairs = [[i,
random_state.choice(self.label_to_indices[
np.random.choice(
list(self.labels_set - set([self.test_labels[i].item()]))
)
]),
0]
for i in range(1, len(self.test_labels), 2)]
self.test_pairs = positive_pairs + negative_pairs
def __getitem__(self, index):
if self.train:
target = np.random.randint(0, 2)
img1, label1 = self.dataset[index][0], self.dataset[index][1]
if target == 1:
siamese_index = index
while siamese_index == index:
siamese_index = np.random.choice(self.label_to_indices[label1])
else:
siamese_label = np.random.choice(list(self.labels_set - set([label1])))
siamese_index = np.random.choice(self.label_to_indices[siamese_label])
img2 = self.dataset[siamese_index][0]
else:
img1 = self.dataset[self.test_pairs[index][0]][0]
img2 = self.dataset[self.test_pairs[index][1]][0]
target = self.test_pairs[index][2]
return (img1.float(), img2.float()), torch.tensor(target).float()
def __len__(self):
return len(self.dataset.data) | 0.740362 | 0.533641 |
"""Example utils."""
import tensorflow as tf
import numpy as np
from tensor2tensor.data_generators import algorithmic
from tensor2tensor.layers import modalities
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import multi_problem_v2
from tensor2tensor.utils import registry
from tensor2tensor.utils import t2t_model
from pcml.utils.dev_utils import T2TDevHelper
from pcml.datasets.example_utils import ExampleFieldTemplate
from pcml.datasets.example_utils import ExampleTemplate
from pcml.datasets.utils import gen_dummy_schedule
class DevExampleTemplate(ExampleTemplate):
def __init__(self, *args, **kwargs):
super(DevExampleTemplate, self).__init__(
fields={
"modalitya":
ExampleFieldTemplate(modality=modalities.ModalityType.SYMBOL,
vocab_size=256,
space_id=problem.SpaceID.DIGIT_0,
shape=(40,),
field_type="input",
dtype=tf.int64),
"modalityb":
ExampleFieldTemplate(modality=modalities.ModalityType.SYMBOL,
vocab_size=256,
space_id=problem.SpaceID.DIGIT_0,
shape=(40,),
field_type="input",
dtype=tf.int64),
"modalityc":
ExampleFieldTemplate(modality=modalities.ModalityType.SYMBOL,
vocab_size=256,
space_id=problem.SpaceID.DIGIT_0,
shape=(40,),
field_type="input",
dtype=tf.int64),
"targets":
ExampleFieldTemplate(modality=modalities.ModalityType.SYMBOL,
vocab_size=256,
space_id=problem.SpaceID.DIGIT_1,
shape=(1,),
field_type="target",
dtype=tf.int64),
"problem_id":
ExampleFieldTemplate(
modality=None,
vocab_size=2, #HACK
space_id=-1,
shape=(1,),
field_type=None,
dtype=tf.int64),
},
*args,
**kwargs)
@registry.register_problem
class DummyProblem(algorithmic.AlgorithmicProblem):
@property
def num_symbols(self):
return 2
@property
def train_length(self):
return 40
@property
def dev_length(self):
return 40
@property
def train_size(self):
return 100
@property
def dev_size(self):
return 100
@property
def num_shards(self):
return 1
def generator(self, nbr_symbols, max_length, nbr_cases):
"""Generator for the identity (copy) task on sequences of symbols.
The length of the sequence is drawn uniformly at random from [1, max_length]
and then symbols are drawn uniformly at random from [0, nbr_symbols) until
nbr_cases sequences have been produced.
Args:
nbr_symbols: number of symbols to use in each sequence.
max_length: integer, maximum length of sequences to generate.
nbr_cases: the number of cases to generate.
Yields:
A dictionary {"inputs": input-list, "targets": target-list} where
input-list and target-list are the same.
"""
for _ in range(nbr_cases):
l = max_length + 1
inputs = [np.random.randint(nbr_symbols) for _ in range(l)]
yield {"inputs": inputs, "targets": inputs}
@registry.register_problem
class DummyProblemA(DummyProblem):
def preprocess_example(self, example, mode, hparams):
return {
"modalitya": example["inputs"],
"modalityb": example["inputs"],
"targets": example["targets"]
}
@registry.register_problem
class DummyProblemB(DummyProblem):
def preprocess_example(self, example, mode, hparams):
return {
"modalitya": example["inputs"],
"modalityc": example["inputs"],
"targets": example["targets"]
}
@registry.register_problem
class MultiModalTestMultiProblemDev(multi_problem_v2.MultiProblemV2,
algorithmic.AlgorithmicIdentityBinary40):
"""Dataset scheduling for multiple text-to-text problems."""
def __init__(self, **kwargs):
problems = [DummyProblemA(), DummyProblemB()]
super(MultiModalTestMultiProblemDev,
self).__init__(problems=problems,
schedule=gen_dummy_schedule(len(problems)),
**kwargs)
self.normalization_spec = DevExampleTemplate()
def normalize_example(self, example, hparams):
"""Assumes that example contains both inputs and targets."""
return self.normalization_spec.normalize(example)
@registry.register_model
class TrivialModel(t2t_model.T2TModel):
def body(self, features):
return features["targets"]
class TestExampleUtils(tf.test.TestCase):
def test_e2e(self):
"""This test is broken."""
helper = T2TDevHelper("trivial_model", "multi_modal_test_multi_problem_dev",
"transformer_tiny", [["1 1 0 1 0"]])
helper.datagen()
helper.train()
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
tf.test.main() | clarify/datasets/utils/example_utils_test.py | """Example utils."""
import tensorflow as tf
import numpy as np
from tensor2tensor.data_generators import algorithmic
from tensor2tensor.layers import modalities
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import multi_problem_v2
from tensor2tensor.utils import registry
from tensor2tensor.utils import t2t_model
from pcml.utils.dev_utils import T2TDevHelper
from pcml.datasets.example_utils import ExampleFieldTemplate
from pcml.datasets.example_utils import ExampleTemplate
from pcml.datasets.utils import gen_dummy_schedule
class DevExampleTemplate(ExampleTemplate):
def __init__(self, *args, **kwargs):
super(DevExampleTemplate, self).__init__(
fields={
"modalitya":
ExampleFieldTemplate(modality=modalities.ModalityType.SYMBOL,
vocab_size=256,
space_id=problem.SpaceID.DIGIT_0,
shape=(40,),
field_type="input",
dtype=tf.int64),
"modalityb":
ExampleFieldTemplate(modality=modalities.ModalityType.SYMBOL,
vocab_size=256,
space_id=problem.SpaceID.DIGIT_0,
shape=(40,),
field_type="input",
dtype=tf.int64),
"modalityc":
ExampleFieldTemplate(modality=modalities.ModalityType.SYMBOL,
vocab_size=256,
space_id=problem.SpaceID.DIGIT_0,
shape=(40,),
field_type="input",
dtype=tf.int64),
"targets":
ExampleFieldTemplate(modality=modalities.ModalityType.SYMBOL,
vocab_size=256,
space_id=problem.SpaceID.DIGIT_1,
shape=(1,),
field_type="target",
dtype=tf.int64),
"problem_id":
ExampleFieldTemplate(
modality=None,
vocab_size=2, #HACK
space_id=-1,
shape=(1,),
field_type=None,
dtype=tf.int64),
},
*args,
**kwargs)
@registry.register_problem
class DummyProblem(algorithmic.AlgorithmicProblem):
@property
def num_symbols(self):
return 2
@property
def train_length(self):
return 40
@property
def dev_length(self):
return 40
@property
def train_size(self):
return 100
@property
def dev_size(self):
return 100
@property
def num_shards(self):
return 1
def generator(self, nbr_symbols, max_length, nbr_cases):
"""Generator for the identity (copy) task on sequences of symbols.
The length of the sequence is drawn uniformly at random from [1, max_length]
and then symbols are drawn uniformly at random from [0, nbr_symbols) until
nbr_cases sequences have been produced.
Args:
nbr_symbols: number of symbols to use in each sequence.
max_length: integer, maximum length of sequences to generate.
nbr_cases: the number of cases to generate.
Yields:
A dictionary {"inputs": input-list, "targets": target-list} where
input-list and target-list are the same.
"""
for _ in range(nbr_cases):
l = max_length + 1
inputs = [np.random.randint(nbr_symbols) for _ in range(l)]
yield {"inputs": inputs, "targets": inputs}
@registry.register_problem
class DummyProblemA(DummyProblem):
def preprocess_example(self, example, mode, hparams):
return {
"modalitya": example["inputs"],
"modalityb": example["inputs"],
"targets": example["targets"]
}
@registry.register_problem
class DummyProblemB(DummyProblem):
def preprocess_example(self, example, mode, hparams):
return {
"modalitya": example["inputs"],
"modalityc": example["inputs"],
"targets": example["targets"]
}
@registry.register_problem
class MultiModalTestMultiProblemDev(multi_problem_v2.MultiProblemV2,
algorithmic.AlgorithmicIdentityBinary40):
"""Dataset scheduling for multiple text-to-text problems."""
def __init__(self, **kwargs):
problems = [DummyProblemA(), DummyProblemB()]
super(MultiModalTestMultiProblemDev,
self).__init__(problems=problems,
schedule=gen_dummy_schedule(len(problems)),
**kwargs)
self.normalization_spec = DevExampleTemplate()
def normalize_example(self, example, hparams):
"""Assumes that example contains both inputs and targets."""
return self.normalization_spec.normalize(example)
@registry.register_model
class TrivialModel(t2t_model.T2TModel):
def body(self, features):
return features["targets"]
class TestExampleUtils(tf.test.TestCase):
def test_e2e(self):
"""This test is broken."""
helper = T2TDevHelper("trivial_model", "multi_modal_test_multi_problem_dev",
"transformer_tiny", [["1 1 0 1 0"]])
helper.datagen()
helper.train()
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
tf.test.main() | 0.935324 | 0.486819 |
from dogqc.gpuio import GpuIO
from dogqc.cudalang import *
import dogqc.identifier as ident
import dogqc.querylib as qlib
from dogqc.variable import Variable
from dogqc.kernel import Kernel, KernelCall
from dogqc.types import Type
from dogqc.relationalAlgebra import Reduction
class Hash ( object ):
@staticmethod
def attributes ( attributes, hashVar, ctxt ):
emit ( assign ( hashVar, intConst(0) ), ctxt.codegen )
for id, a in attributes.items():
acc = ctxt.attFile.access ( a )
if a.dataType == Type.STRING:
acc = call ( "stringHash", [ acc ] )
#acc = call ( "stringHashPushDown", [ ctxt.vars.activeVar, acc ] )
emit ( assign ( hashVar, call ( qlib.Fct.HASH, [ add ( hashVar, acc ) ] ) ), ctxt.codegen )
else:
acc = cast ( CType.UINT64, acc )
with IfClause ( ctxt.vars.activeVar, ctxt.codegen ):
emit ( assign ( hashVar, call ( qlib.Fct.HASH, [ add ( hashVar, acc ) ] ) ), ctxt.codegen )
return hashVar
@staticmethod
def checkEquality ( equalvar, buildKeyAttributes, probeKeyAttributes, ctxt ):
for (bid, b), (pid, p) in zip ( buildKeyAttributes.items(), probeKeyAttributes.items() ):
if b.dataType == Type.STRING:
emit ( assignAnd ( equalvar, call ( "stringEquals", [ ctxt.attFile.access ( b ), ctxt.attFile.access ( p ) ] ) ), ctxt.codegen )
else:
emit ( assignAnd ( equalvar, equals ( ctxt.attFile.access ( b ), ctxt.attFile.access ( p ) ) ), ctxt.codegen )
class Payload ( object ):
def __init__ ( self, typeName, attributes, ctxt ):
self.typeName = typeName
self.attributes = attributes
self.vars = dict()
with StructClause ( self.typeName, ctxt.codegen.types ):
for id, a in attributes.items():
identifier = ident.att ( a )
self.vars[id] = ctxt.attFile.variable ( a, identifier )
self.vars[id].declare ( ctxt.codegen.types )
def materialize ( self, varName, code, ctxt ):
matvar = Variable.val ( self.typeName, varName, code )
for id, a in self.attributes.items():
emit ( assign ( member ( matvar, self.vars[id] ), ctxt.attFile.access ( a ) ), code )
return matvar
def dematerialize ( self, matvar, ctxt ):
for id, a in self.attributes.items():
ctxt.attFile.dematerializeAttributeFromSource ( a, member ( matvar, self.vars[id] ) )
def getType ( self ):
return self.typeName
def checkEquality ( self, equalvar, payla, paylb, ctxt ):
emit ( assign ( equalvar, intConst (1) ), ctxt.codegen )
for (id, attr) in self.attributes.items():
if attr.dataType == Type.STRING:
emit ( assignAnd ( equalvar, stringEquals ( self.access ( payla, attr ), self.access ( paylb, attr ) ) ), ctxt.codegen )
else:
emit ( assignAnd ( equalvar, equals ( self.access ( payla, attr ), self.access ( paylb, attr ) ) ), ctxt.codegen )
def access ( self, paylVar, a ):
return member ( paylVar, self.vars [ a.id ] )
class HashTableMemory ( object ):
@staticmethod
def power_bit_length(x):
return 2**(int(x)-1).bit_length()
def __init__ ( self, minEntries, codegen ):
#self.numEntries = intConst ( self.power_bit_length ( int ( minEntries ) ) )
self.numEntries = int ( minEntries )
self.columns = []
self.codegen = codegen
self.aggregateAtts = []
@staticmethod
def createUnique ( name, minEntries, payload, codegen ):
mem = HashTableMemory ( minEntries, codegen )
mem.ht = mem.addColumn ( qlib.Type.UNIQUE_HT + "<" + payload.typeName + ">", name )
call = KernelCall.library ( qlib.Krnl.INIT_UNIQUE_HT, [mem.ht.getGPU(), mem.numEntries], payload.typeName )
codegen.gpumem.cudaMallocHT.add ( call.get() )
mem.addToKernel ( codegen.currentKernel )
return mem
@staticmethod
def createMulti ( name, minHtEntries, payload, numPayloads, codegen ):
mem = HashTableMemory ( minHtEntries, codegen )
mem.ht = mem.addColumn ( qlib.Type.MULTI_HT, name )
mem.payload = mem.addSizedColumn ( payload.typeName, name + "_payload", numPayloads )
call = KernelCall.library ( qlib.Krnl.INIT_MULTI_HT, [mem.ht.getGPU(), mem.numEntries] )
codegen.gpumem.cudaMallocHT.add ( call.get() )
mem.addToKernel ( codegen.currentKernel )
return mem
@staticmethod
def createAgg ( name, minEntries, payload, codegen ):
mem = HashTableMemory ( minEntries, codegen )
mem.ht = mem.addColumn ( qlib.Type.AGG_HT + "<" + payload.typeName + ">", name )
call = KernelCall.library ( qlib.Krnl.INIT_AGG_HT, [mem.ht.getGPU(), mem.numEntries], payload.typeName )
codegen.gpumem.cudaMallocHT.add ( call.get() )
mem.addToKernel ( codegen.currentKernel )
return mem
def addAggregationAttributes ( self, aggregationAttributes, aggregateTuples, ctxt ):
self.aggAtts = dict ( aggregationAttributes )
self.aggCols = dict()
for id, a in aggregationAttributes.items():
init = None
inId, reductionType = aggregateTuples [ id ]
typ = ctxt.codegen.langType ( a.dataType )
# additive
if reductionType in [ Reduction.COUNT, Reduction.AVG, Reduction.SUM ]:
init = CType.zeroValue [ typ ]
# min
if reductionType == Reduction.MIN:
init = CType.maxValue [ typ ]
# max
elif reductionType == Reduction.MAX:
init = CType.minValue [ typ ]
ident = "agg" + str ( a.id )
aggVar = ctxt.attFile.variable ( a, ident )
self.aggCols[id] = self.addColumn ( aggVar.dataType, aggVar.get(), init )
def accessAggregationAttribute ( self, id, index ):
return self.aggCols[id].arrayAccess ( index )
def dematerializeAggregationAttributes ( self, index, ctxt ):
for id, a in self.aggAtts.items():
ctxt.attFile.dematerializeAttributeFromSource ( a, self.accessAggregationAttribute ( id, index ) )
def addColumnInternal ( self, dataType, name, numEntries, init ):
col = Variable.col ( dataType, name, numEntries )
self.codegen.gpumem.local ( col, init )
self.columns.append ( col )
return col
def addColumn ( self, dataType, name, init=None ):
return self.addColumnInternal ( dataType, name, self.numEntries, init )
def addSizedColumn ( self, dataType, name, numEntries, init=None ):
return self.addColumnInternal ( dataType, name, numEntries, init )
def addToKernel ( self, kernel ):
for c in self.columns:
kernel.addVar ( c )
def getTable ( self, opid ):
table = { "name":"aggregation", "size": self.numEntries, "numColumns":0, "id":"_ht" + str(opid) }
return table | dogqc/hashTableUtil.py | from dogqc.gpuio import GpuIO
from dogqc.cudalang import *
import dogqc.identifier as ident
import dogqc.querylib as qlib
from dogqc.variable import Variable
from dogqc.kernel import Kernel, KernelCall
from dogqc.types import Type
from dogqc.relationalAlgebra import Reduction
class Hash ( object ):
@staticmethod
def attributes ( attributes, hashVar, ctxt ):
emit ( assign ( hashVar, intConst(0) ), ctxt.codegen )
for id, a in attributes.items():
acc = ctxt.attFile.access ( a )
if a.dataType == Type.STRING:
acc = call ( "stringHash", [ acc ] )
#acc = call ( "stringHashPushDown", [ ctxt.vars.activeVar, acc ] )
emit ( assign ( hashVar, call ( qlib.Fct.HASH, [ add ( hashVar, acc ) ] ) ), ctxt.codegen )
else:
acc = cast ( CType.UINT64, acc )
with IfClause ( ctxt.vars.activeVar, ctxt.codegen ):
emit ( assign ( hashVar, call ( qlib.Fct.HASH, [ add ( hashVar, acc ) ] ) ), ctxt.codegen )
return hashVar
@staticmethod
def checkEquality ( equalvar, buildKeyAttributes, probeKeyAttributes, ctxt ):
for (bid, b), (pid, p) in zip ( buildKeyAttributes.items(), probeKeyAttributes.items() ):
if b.dataType == Type.STRING:
emit ( assignAnd ( equalvar, call ( "stringEquals", [ ctxt.attFile.access ( b ), ctxt.attFile.access ( p ) ] ) ), ctxt.codegen )
else:
emit ( assignAnd ( equalvar, equals ( ctxt.attFile.access ( b ), ctxt.attFile.access ( p ) ) ), ctxt.codegen )
class Payload ( object ):
def __init__ ( self, typeName, attributes, ctxt ):
self.typeName = typeName
self.attributes = attributes
self.vars = dict()
with StructClause ( self.typeName, ctxt.codegen.types ):
for id, a in attributes.items():
identifier = ident.att ( a )
self.vars[id] = ctxt.attFile.variable ( a, identifier )
self.vars[id].declare ( ctxt.codegen.types )
def materialize ( self, varName, code, ctxt ):
matvar = Variable.val ( self.typeName, varName, code )
for id, a in self.attributes.items():
emit ( assign ( member ( matvar, self.vars[id] ), ctxt.attFile.access ( a ) ), code )
return matvar
def dematerialize ( self, matvar, ctxt ):
for id, a in self.attributes.items():
ctxt.attFile.dematerializeAttributeFromSource ( a, member ( matvar, self.vars[id] ) )
def getType ( self ):
return self.typeName
def checkEquality ( self, equalvar, payla, paylb, ctxt ):
emit ( assign ( equalvar, intConst (1) ), ctxt.codegen )
for (id, attr) in self.attributes.items():
if attr.dataType == Type.STRING:
emit ( assignAnd ( equalvar, stringEquals ( self.access ( payla, attr ), self.access ( paylb, attr ) ) ), ctxt.codegen )
else:
emit ( assignAnd ( equalvar, equals ( self.access ( payla, attr ), self.access ( paylb, attr ) ) ), ctxt.codegen )
def access ( self, paylVar, a ):
return member ( paylVar, self.vars [ a.id ] )
class HashTableMemory ( object ):
@staticmethod
def power_bit_length(x):
return 2**(int(x)-1).bit_length()
def __init__ ( self, minEntries, codegen ):
#self.numEntries = intConst ( self.power_bit_length ( int ( minEntries ) ) )
self.numEntries = int ( minEntries )
self.columns = []
self.codegen = codegen
self.aggregateAtts = []
@staticmethod
def createUnique ( name, minEntries, payload, codegen ):
mem = HashTableMemory ( minEntries, codegen )
mem.ht = mem.addColumn ( qlib.Type.UNIQUE_HT + "<" + payload.typeName + ">", name )
call = KernelCall.library ( qlib.Krnl.INIT_UNIQUE_HT, [mem.ht.getGPU(), mem.numEntries], payload.typeName )
codegen.gpumem.cudaMallocHT.add ( call.get() )
mem.addToKernel ( codegen.currentKernel )
return mem
@staticmethod
def createMulti ( name, minHtEntries, payload, numPayloads, codegen ):
mem = HashTableMemory ( minHtEntries, codegen )
mem.ht = mem.addColumn ( qlib.Type.MULTI_HT, name )
mem.payload = mem.addSizedColumn ( payload.typeName, name + "_payload", numPayloads )
call = KernelCall.library ( qlib.Krnl.INIT_MULTI_HT, [mem.ht.getGPU(), mem.numEntries] )
codegen.gpumem.cudaMallocHT.add ( call.get() )
mem.addToKernel ( codegen.currentKernel )
return mem
@staticmethod
def createAgg ( name, minEntries, payload, codegen ):
mem = HashTableMemory ( minEntries, codegen )
mem.ht = mem.addColumn ( qlib.Type.AGG_HT + "<" + payload.typeName + ">", name )
call = KernelCall.library ( qlib.Krnl.INIT_AGG_HT, [mem.ht.getGPU(), mem.numEntries], payload.typeName )
codegen.gpumem.cudaMallocHT.add ( call.get() )
mem.addToKernel ( codegen.currentKernel )
return mem
def addAggregationAttributes ( self, aggregationAttributes, aggregateTuples, ctxt ):
self.aggAtts = dict ( aggregationAttributes )
self.aggCols = dict()
for id, a in aggregationAttributes.items():
init = None
inId, reductionType = aggregateTuples [ id ]
typ = ctxt.codegen.langType ( a.dataType )
# additive
if reductionType in [ Reduction.COUNT, Reduction.AVG, Reduction.SUM ]:
init = CType.zeroValue [ typ ]
# min
if reductionType == Reduction.MIN:
init = CType.maxValue [ typ ]
# max
elif reductionType == Reduction.MAX:
init = CType.minValue [ typ ]
ident = "agg" + str ( a.id )
aggVar = ctxt.attFile.variable ( a, ident )
self.aggCols[id] = self.addColumn ( aggVar.dataType, aggVar.get(), init )
def accessAggregationAttribute ( self, id, index ):
return self.aggCols[id].arrayAccess ( index )
def dematerializeAggregationAttributes ( self, index, ctxt ):
for id, a in self.aggAtts.items():
ctxt.attFile.dematerializeAttributeFromSource ( a, self.accessAggregationAttribute ( id, index ) )
def addColumnInternal ( self, dataType, name, numEntries, init ):
col = Variable.col ( dataType, name, numEntries )
self.codegen.gpumem.local ( col, init )
self.columns.append ( col )
return col
def addColumn ( self, dataType, name, init=None ):
return self.addColumnInternal ( dataType, name, self.numEntries, init )
def addSizedColumn ( self, dataType, name, numEntries, init=None ):
return self.addColumnInternal ( dataType, name, numEntries, init )
def addToKernel ( self, kernel ):
for c in self.columns:
kernel.addVar ( c )
def getTable ( self, opid ):
table = { "name":"aggregation", "size": self.numEntries, "numColumns":0, "id":"_ht" + str(opid) }
return table | 0.364664 | 0.180143 |
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit, Layout, Field, Div, Reset
from django import forms
from django.contrib.admin.widgets import FilteredSelectMultiple
from django.urls import reverse
from django.utils.translation import gettext as __
from django.utils.translation import gettext_lazy as _
from mptt.forms import TreeNodeMultipleChoiceField
from booking.models import (
Material,
Category,
Event,
MaterialAlias,
Game,
Booking,
RateClass,
)
class MaterialForm(forms.ModelForm):
categories = TreeNodeMultipleChoiceField(queryset=Category.objects.all())
class Meta:
model = Material
fields = "__all__"
def clean(self):
cleaned_data = super().clean()
name = cleaned_data.get("name")
alias = MaterialAlias.objects.filter(name__iexact=name)
if alias:
raise forms.ValidationError(
_("There exists already a material alias with the given name.")
)
return cleaned_data
class MaterialAliasForm(forms.ModelForm):
class Meta:
model = MaterialAlias
fields = "__all__"
def clean(self):
cleaned_data = super().clean()
name = cleaned_data.get("name")
material = Material.objects.filter(name__iexact=name)
if material:
raise forms.ValidationError(
_("There exists already a material with the given name.")
)
return cleaned_data
class CategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = "__all__"
help_texts = {}
error_messages = {}
def __init__(self, *args, **kwargs):
super(CategoryForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = "post"
self.helper.add_input(Submit("submit", _("Submit")))
class EventForm(forms.ModelForm):
class Meta:
model = Event
fields = "__all__"
def clean(self):
cleaned_data = super().clean()
booking_start = cleaned_data.get("booking_start")
booking_end = cleaned_data.get("booking_end")
privileged_booking_end = cleaned_data.get("privileged_booking_end")
event_start = cleaned_data.get("event_start")
event_end = cleaned_data.get("event_end")
if booking_end and booking_start and booking_end < booking_start:
raise forms.ValidationError(
_("Booking end cannot be earlier than booking start.")
)
if (
privileged_booking_end
and booking_end
and privileged_booking_end < booking_end
):
raise forms.ValidationError(
_("Privileged booking end cannot be earlier than booking end.")
)
if event_end and event_start and event_end < event_start:
raise forms.ValidationError(
_("Event end cannot be earlier than event start.")
)
return cleaned_data
class GameForm(forms.ModelForm):
class Meta:
model = Game
fields = "__all__"
widgets = {
"event": forms.HiddenInput(),
"day": forms.HiddenInput(),
"group": forms.HiddenInput(),
}
labels = {
"name": __("Game name"),
"location": "<i class='fas fa-map-marker-alt'></i> " + __("Location"),
"part_of_day": "<i class='fas fa-clock'></i> " + __("Part of day"),
}
_("Game name"), _("Location"), _("Part of day") # For detection by makemessages
help_texts = {"name": None, "location": None, "part_of_day": None}
def __init__(self, *args, **kwargs):
super(GameForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
if self.instance.id:
self.helper.form_action = reverse(
"booking:api:edit_game", args=[self.instance.id]
)
else:
self.helper.form_action = reverse("booking:api:new_game")
self.helper.form_method = "POST"
css_class = "game-form-update" if self.instance.id else "game-form-create"
self.helper.form_class = "game-form " + css_class
self.helper.layout = Layout(
Div(
Field(
"name",
template="crispy/floating-labels.html",
wrapper_class="col px-1 mb-lg-0",
),
Field(
"part_of_day",
template="crispy/floating-labels.html",
wrapper_class="col-lg-3 px-1 mb-lg-0",
),
Field(
"location",
template="crispy/floating-labels.html",
wrapper_class="col-lg-3 px-1 mb-lg-0",
),
Div(
Submit(
"update" if self.instance.id else "add",
_("Update") if self.instance.id else _("Add"),
css_id=self.auto_id % "submit",
css_class="btn-outline-primary crispy-outline",
),
css_class="col-auto px-1 mb-lg-0 form-label-group",
),
Div(
Reset(
"reset",
_("Cancel"),
css_id=self.auto_id % "reset",
),
css_class="col-auto px-1 mb-lg-0 form-label-group",
)
if self.instance.id
else None,
"event",
"day",
"group",
css_class="row",
),
)
def clean(self):
"""
Here we check whether the day of the game is part of the event.
:return:
"""
cleaned_data = super().clean()
day = cleaned_data.get("day")
event = cleaned_data.get("event")
if day < event.event_start:
raise forms.ValidationError(
_("Day of game cannot be earlier than event start.")
)
if day > event.event_end:
raise forms.ValidationError(
_("Day of game cannot be later than event end.")
)
return cleaned_data
class BookingForm(forms.ModelForm):
class Meta:
model = Booking
fields = "__all__"
widgets = {
"game": forms.HiddenInput(),
"material": forms.TextInput(),
"custom_material": forms.HiddenInput(),
}
labels = {
"material": "",
"amount": __("Amount"),
"workweek": __("Workweek"),
"comment": "<i class='far fa-comment'></i> " + __("Comment"),
}
_("Amount"), _("Workweek"), _("Comment") # For detection by makemessages
help_texts = {
"material": None,
"amount": None,
"workweek": None,
"comment": None,
}
def __init__(self, *args, **kwargs):
super(BookingForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
if self.instance.id:
self.helper.form_action = reverse(
"booking:api:edit_booking", args=[self.instance.id]
)
else:
self.helper.form_action = reverse("booking:api:new_booking")
self.helper.form_method = "POST"
css_class = "booking-form-update" if self.instance.id else "booking-form-create"
self.helper.form_class = "booking-form w-100 " + css_class
material_id = ""
material_name = ""
if self.instance.material:
material_id = self.instance.material.id
material_name = self.instance.material.name
if self.instance.custom_material:
material_id = self.instance.custom_material
material_name = self.instance.custom_material
self.helper.layout = Layout(
Div(
Field(
"material",
wrapper_class="col-8 col-xl-3 px-1 mb-xl-0",
css_class="typeahead-materials floating-label-size",
placeholder=__("Material") + "*",
autocomplete="off",
data_materialid=material_id,
data_materialname=material_name,
data_invalidmessage=__("Choose a material"),
data_allowcustom="true",
data_notfoundtext=__("Material not found..."),
data_addcustomtext=__('Click to request "<em>{}</em>" anyway.'),
title=__("Material"),
),
Field(
"amount",
template="crispy/floating-labels.html",
wrapper_class="col-4 col-xl-2 px-1 mb-xl-0",
autocomplete="off",
title=__("Amount"),
),
Field(
"workweek",
wrapper_class="col-auto px-1 mb-xl-0",
template="crispy/floating-labels.html",
data_toggle="toggle",
data_on=__("Workweek"),
data_off=__("No"),
),
Field(
"comment",
template="crispy/floating-labels.html",
wrapper_class="col px-1 mb-xl-0",
title=__("Comment"),
),
Div(
Submit(
"submit",
_("Update") if self.instance.id else _("Add"),
css_id=self.auto_id % "submit",
css_class="btn-outline-primary crispy-outline",
),
css_class="col-auto px-1 mb-xl-0 form-label-group",
),
Div(
Reset(
"reset",
_("Cancel"),
css_id=self.auto_id % "reset",
),
css_class="col-auto px-1 mb-xl-0 form-label-group",
)
if self.instance.id
else None,
"game",
"custom_material",
css_class="row mx-0",
),
)
_("Material"), _("Choose a material"), _("Workweek"), _("No"), _(
"Choose a material"
), _("Material not found..."), _('Click to request "<em>{}</em>" anyway.'),
def clean(self):
"""
Here we check whether a material was chosen or a custom material name was given.
:return:
"""
cleaned_data = super().clean()
material = cleaned_data.get("material")
custom_material = cleaned_data.get("custom_material")
if material is None and custom_material is None:
raise forms.ValidationError(
_("You must either choose a material or fill in a custom material.")
)
return cleaned_data
class RateClassForm(forms.ModelForm):
materials = forms.ModelMultipleChoiceField(
queryset=Material.objects.all(),
widget=FilteredSelectMultiple(verbose_name=_("materials"), is_stacked=False),
required=False,
)
materials.label = _("Materials")
materials.help_text = _(
"Hold down “Control”, or “Command” on a Mac, to select more than one. "
"A material can be in only one rate class, the materials you add to this"
" rate class, will be removed from other rate classes."
)
class Meta:
model = RateClass
fields = ("name", "description", "rate")
def __init__(self, *args, **kwargs):
super(RateClassForm, self).__init__(*args, **kwargs)
if self.instance:
# fill initial related values
self.fields["materials"].initial = self.instance.materials.all() | booking/forms.py | from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit, Layout, Field, Div, Reset
from django import forms
from django.contrib.admin.widgets import FilteredSelectMultiple
from django.urls import reverse
from django.utils.translation import gettext as __
from django.utils.translation import gettext_lazy as _
from mptt.forms import TreeNodeMultipleChoiceField
from booking.models import (
Material,
Category,
Event,
MaterialAlias,
Game,
Booking,
RateClass,
)
class MaterialForm(forms.ModelForm):
categories = TreeNodeMultipleChoiceField(queryset=Category.objects.all())
class Meta:
model = Material
fields = "__all__"
def clean(self):
cleaned_data = super().clean()
name = cleaned_data.get("name")
alias = MaterialAlias.objects.filter(name__iexact=name)
if alias:
raise forms.ValidationError(
_("There exists already a material alias with the given name.")
)
return cleaned_data
class MaterialAliasForm(forms.ModelForm):
class Meta:
model = MaterialAlias
fields = "__all__"
def clean(self):
cleaned_data = super().clean()
name = cleaned_data.get("name")
material = Material.objects.filter(name__iexact=name)
if material:
raise forms.ValidationError(
_("There exists already a material with the given name.")
)
return cleaned_data
class CategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = "__all__"
help_texts = {}
error_messages = {}
def __init__(self, *args, **kwargs):
super(CategoryForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = "post"
self.helper.add_input(Submit("submit", _("Submit")))
class EventForm(forms.ModelForm):
class Meta:
model = Event
fields = "__all__"
def clean(self):
cleaned_data = super().clean()
booking_start = cleaned_data.get("booking_start")
booking_end = cleaned_data.get("booking_end")
privileged_booking_end = cleaned_data.get("privileged_booking_end")
event_start = cleaned_data.get("event_start")
event_end = cleaned_data.get("event_end")
if booking_end and booking_start and booking_end < booking_start:
raise forms.ValidationError(
_("Booking end cannot be earlier than booking start.")
)
if (
privileged_booking_end
and booking_end
and privileged_booking_end < booking_end
):
raise forms.ValidationError(
_("Privileged booking end cannot be earlier than booking end.")
)
if event_end and event_start and event_end < event_start:
raise forms.ValidationError(
_("Event end cannot be earlier than event start.")
)
return cleaned_data
class GameForm(forms.ModelForm):
class Meta:
model = Game
fields = "__all__"
widgets = {
"event": forms.HiddenInput(),
"day": forms.HiddenInput(),
"group": forms.HiddenInput(),
}
labels = {
"name": __("Game name"),
"location": "<i class='fas fa-map-marker-alt'></i> " + __("Location"),
"part_of_day": "<i class='fas fa-clock'></i> " + __("Part of day"),
}
_("Game name"), _("Location"), _("Part of day") # For detection by makemessages
help_texts = {"name": None, "location": None, "part_of_day": None}
def __init__(self, *args, **kwargs):
super(GameForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
if self.instance.id:
self.helper.form_action = reverse(
"booking:api:edit_game", args=[self.instance.id]
)
else:
self.helper.form_action = reverse("booking:api:new_game")
self.helper.form_method = "POST"
css_class = "game-form-update" if self.instance.id else "game-form-create"
self.helper.form_class = "game-form " + css_class
self.helper.layout = Layout(
Div(
Field(
"name",
template="crispy/floating-labels.html",
wrapper_class="col px-1 mb-lg-0",
),
Field(
"part_of_day",
template="crispy/floating-labels.html",
wrapper_class="col-lg-3 px-1 mb-lg-0",
),
Field(
"location",
template="crispy/floating-labels.html",
wrapper_class="col-lg-3 px-1 mb-lg-0",
),
Div(
Submit(
"update" if self.instance.id else "add",
_("Update") if self.instance.id else _("Add"),
css_id=self.auto_id % "submit",
css_class="btn-outline-primary crispy-outline",
),
css_class="col-auto px-1 mb-lg-0 form-label-group",
),
Div(
Reset(
"reset",
_("Cancel"),
css_id=self.auto_id % "reset",
),
css_class="col-auto px-1 mb-lg-0 form-label-group",
)
if self.instance.id
else None,
"event",
"day",
"group",
css_class="row",
),
)
def clean(self):
"""
Here we check whether the day of the game is part of the event.
:return:
"""
cleaned_data = super().clean()
day = cleaned_data.get("day")
event = cleaned_data.get("event")
if day < event.event_start:
raise forms.ValidationError(
_("Day of game cannot be earlier than event start.")
)
if day > event.event_end:
raise forms.ValidationError(
_("Day of game cannot be later than event end.")
)
return cleaned_data
class BookingForm(forms.ModelForm):
class Meta:
model = Booking
fields = "__all__"
widgets = {
"game": forms.HiddenInput(),
"material": forms.TextInput(),
"custom_material": forms.HiddenInput(),
}
labels = {
"material": "",
"amount": __("Amount"),
"workweek": __("Workweek"),
"comment": "<i class='far fa-comment'></i> " + __("Comment"),
}
_("Amount"), _("Workweek"), _("Comment") # For detection by makemessages
help_texts = {
"material": None,
"amount": None,
"workweek": None,
"comment": None,
}
def __init__(self, *args, **kwargs):
super(BookingForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
if self.instance.id:
self.helper.form_action = reverse(
"booking:api:edit_booking", args=[self.instance.id]
)
else:
self.helper.form_action = reverse("booking:api:new_booking")
self.helper.form_method = "POST"
css_class = "booking-form-update" if self.instance.id else "booking-form-create"
self.helper.form_class = "booking-form w-100 " + css_class
material_id = ""
material_name = ""
if self.instance.material:
material_id = self.instance.material.id
material_name = self.instance.material.name
if self.instance.custom_material:
material_id = self.instance.custom_material
material_name = self.instance.custom_material
self.helper.layout = Layout(
Div(
Field(
"material",
wrapper_class="col-8 col-xl-3 px-1 mb-xl-0",
css_class="typeahead-materials floating-label-size",
placeholder=__("Material") + "*",
autocomplete="off",
data_materialid=material_id,
data_materialname=material_name,
data_invalidmessage=__("Choose a material"),
data_allowcustom="true",
data_notfoundtext=__("Material not found..."),
data_addcustomtext=__('Click to request "<em>{}</em>" anyway.'),
title=__("Material"),
),
Field(
"amount",
template="crispy/floating-labels.html",
wrapper_class="col-4 col-xl-2 px-1 mb-xl-0",
autocomplete="off",
title=__("Amount"),
),
Field(
"workweek",
wrapper_class="col-auto px-1 mb-xl-0",
template="crispy/floating-labels.html",
data_toggle="toggle",
data_on=__("Workweek"),
data_off=__("No"),
),
Field(
"comment",
template="crispy/floating-labels.html",
wrapper_class="col px-1 mb-xl-0",
title=__("Comment"),
),
Div(
Submit(
"submit",
_("Update") if self.instance.id else _("Add"),
css_id=self.auto_id % "submit",
css_class="btn-outline-primary crispy-outline",
),
css_class="col-auto px-1 mb-xl-0 form-label-group",
),
Div(
Reset(
"reset",
_("Cancel"),
css_id=self.auto_id % "reset",
),
css_class="col-auto px-1 mb-xl-0 form-label-group",
)
if self.instance.id
else None,
"game",
"custom_material",
css_class="row mx-0",
),
)
_("Material"), _("Choose a material"), _("Workweek"), _("No"), _(
"Choose a material"
), _("Material not found..."), _('Click to request "<em>{}</em>" anyway.'),
def clean(self):
"""
Here we check whether a material was chosen or a custom material name was given.
:return:
"""
cleaned_data = super().clean()
material = cleaned_data.get("material")
custom_material = cleaned_data.get("custom_material")
if material is None and custom_material is None:
raise forms.ValidationError(
_("You must either choose a material or fill in a custom material.")
)
return cleaned_data
class RateClassForm(forms.ModelForm):
materials = forms.ModelMultipleChoiceField(
queryset=Material.objects.all(),
widget=FilteredSelectMultiple(verbose_name=_("materials"), is_stacked=False),
required=False,
)
materials.label = _("Materials")
materials.help_text = _(
"Hold down “Control”, or “Command” on a Mac, to select more than one. "
"A material can be in only one rate class, the materials you add to this"
" rate class, will be removed from other rate classes."
)
class Meta:
model = RateClass
fields = ("name", "description", "rate")
def __init__(self, *args, **kwargs):
super(RateClassForm, self).__init__(*args, **kwargs)
if self.instance:
# fill initial related values
self.fields["materials"].initial = self.instance.materials.all() | 0.653459 | 0.137475 |
import asyncio
import weakref
import aiohttp
import sys
import json
import traceback
import logging
from urllib.parse import quote as _uriquote
from ..errors import HTTPException, Forbidden, NotFound, ServerError, SentinelError
log = logging.getLogger(__name__)
class Route:
BASE = "https://discord.com/api/v9"
def __init__(self, method, path, **kwargs):
self.path = path
self.method = method
url = self.BASE + path
if kwargs:
self.url = url.format_map({x: _uriquote(y) if isinstance(y, str) else y for x, y in kwargs.items()})
else:
self.url = url
self.channel_id = kwargs.get("channel_id")
self.guild_id = kwargs.get("guild_id")
self.webhook_id = kwargs.get("webhook_id")
self.webhook_token = kwargs.get("webhook_token")
@property
def bucket(self):
return "{}:{}:{}".format(self.channel_id, self.guild_id, self.path)
class MaybeUnlock:
def __init__(self, lock):
self.lock = lock
self._unlock = True
def __enter__(self):
return self
def defer(self):
self._unlock = False
def __exit__(self, type, value, traceback):
if self._unlock:
self.lock.release()
async def json_or_text(res):
t = await res.text(encoding="utf-8")
try:
if res.headers["content-type"] == "application/json":
return json.loads(t)
except KeyError:
pass
return t
class HTTPClient:
def __init__(self, ws, token):
self.loop = asyncio.get_event_loop()
self.ws = ws
self.session = aiohttp.ClientSession()
self._locks = weakref.WeakValueDictionary()
self._token = token
self.pool = None
self._global_over = asyncio.Event()
self._global_over.set()
user_agent = 'DiscordBot ({0}) Python/{1[0]}.{1[1]} aiohttp/{2}'
self.user_agent = user_agent.format("0.0.1", sys.version_info, aiohttp.__version__)
async def request(self, route, *, files=None, form=None, **kwargs):
bucket = route.bucket
method = route.method
url = route.url
lock = self._locks.get(bucket)
if lock is None:
lock = asyncio.Lock()
if bucket is not None:
self._locks[bucket] = lock
headers = {
"User-Agent": self.user_agent,
"Authorization": "Bot {}".format(self._token)
}
if "json" in kwargs:
headers.update({
"Content-Type": "application/json",
})
kwargs["data"] = json.dumps(kwargs.pop("json"))
try:
reason = kwargs.pop("reason")
except KeyError:
pass
else:
if reason:
headers.update({
"X-Audit-Log-Reason": _uriquote(reason, safe="/ ")
})
kwargs.update({
"headers": headers
})
if not self._global_over.is_set():
await self._global_over.wait()
await lock.acquire()
with MaybeUnlock(lock) as maybe_lock:
for t in range(5):
if files:
for f in files:
f.reset(seek=t)
if form:
f_data = aiohttp.FormData()
for p in form:
f_data.add_field(**p)
kwargs.update({
"data": f_data
})
try:
async with self.session.request(method, url, **kwargs) as res:
data = await json_or_text(res)
r = res.headers.get("X-Ratelimit-Remaining", 1)
if int(r) == 0 and res.status != 429:
delta = float(res.headers.get("X-Ratelimit-Reset-After"))
maybe_lock.defer()
self.loop.call_later(delta, lock.release)
if 300 > res.status >= 200:
return data
if res.status == 429:
if not res.headers.get("Via"):
raise HTTPException(res, data)
txt = "Ratelimited! Retrying in {0} seconds (Bucket {1})"
retry_after = data["retry_after"]
log.warn(txt.format(retry_after, bucket))
_global = data.get("global", False)
if _global:
log.warn("Global rate limit hit!")
self._global_over.clear()
await asyncio.sleep(retry_after)
if _global:
self._global_over.set()
log.info("Global ratelimit over.")
continue
if res.status in (500, 502):
await asyncio.sleep(1 + t * 2)
continue
if res.status == 403:
raise Forbidden(res, data)
elif res.status == 404:
raise NotFound(res, data)
elif res.status == 503:
raise ServerError(res, data)
else:
raise HTTPException(res, data)
except OSError as e:
if t < 4 and e.errno in (54, 10054):
await asyncio.sleep(1 + t * 2)
continue
raise
if res.status >= 500:
raise ServerError(res, data)
raise HTTPException(res, data)
def send_message(self, channel_id, content, embeds: list =None):
try:
r = Route("POST", f"/channels/{channel_id}/messages", channel_id=channel_id)
payload = {}
if content:
payload.update({
"content": content
})
final_embeds = []
if embeds:
for e in embeds:
final_embeds.append(e._to_json())
payload.update({
"embeds": final_embeds
})
try:
self.loop.run_until_complete(self.request(r, json=payload))
except TimeoutError:
pass
except SentinelError as ex:
log.error(ex)
def get_guild_member(self, guild_id, user_id):
try:
r = Route("GET", f"/guilds/{guild_id}/members/{user_id}")
res = self.loop.run_until_complete(self.request(r))
return res
except SentinelError as ex:
log.error(ex)
def get_guild_commands(self, guild_id, app_id):
try:
r = Route("GET", f"/applications/{app_id}/guilds/{guild_id}/commands")
res = self.loop.run_until_complete(self.request(r))
return res
except SentinelError as ex:
log.error(ex)
def register_guild_command(self, guild_id: int, app_id: int, name: str, description: str, params: list):
description = description if description != None else "A cool command!"
try:
payload = {
"name": name,
"description": description,
"options": []
}
options = []
if len(params) > 0:
for i, p in enumerate(params):
options.append(
{
"name": p,
"description": f"Parameter number {i+1}",
"type": 3,
"required": True
}
)
payload.update({
"options": options
})
r = Route("POST", f"/applications/{app_id}/guilds/{guild_id}/commands")
res = self.loop.run_until_complete(self.request(r, json=payload))
return res
except SentinelError as ex:
log.error(ex)
def delete_guild_command(self, guild_id: int, app_id: int, command_id: int):
try:
r = Route("DELETE", f"/applications/{app_id}/guilds/{guild_id}/commands/{command_id}")
res = self.loop.run_until_complete(self.request(r))
return res
except SentinelError as ex:
log.error(ex)
def respond_to_command(self, interaction_id: int, interaction_token: str, _type: int, content: str, embeds: list = None, flags = None):
try:
payload = {
"type": _type,
"data": {
"content": content,
"flags": flags
}
}
final_embeds = []
if embeds:
for e in embeds:
final_embeds.append(e._to_json())
payload["data"].update({
"embeds": final_embeds
})
r = Route("POST", f"/interactions/{interaction_id}/{interaction_token}/callback")
res = self.loop.run_until_complete(self.request(r, json=payload))
return res
except SentinelError as ex:
log.error(ex)
def send_dm(self, user_id: int, content: str, embeds: list = None):
try:
dm_channel_payload = {
"recipient_id": user_id
}
dm_req = Route("POST", f"/users/@me/channels")
dm_channel = self.loop.run_until_complete(self.request(dm_req, json=dm_channel_payload))
return self.send_message(dm_channel["id"], content, embeds)
except SentinelError as ex:
log.error(ex)
def _delete_old_commands(self, commands: list, app_id: int):
try:
r = Route("GET", "/users/@me/guilds")
guilds = self.loop.run_until_complete(self.request(r))
for g in guilds:
all_commands = self.get_guild_commands(g["id"], app_id)
for cmd in all_commands:
if not cmd["name"].lower() in commands:
self.delete_guild_command(g["id"], app_id, cmd["id"])
except SentinelError as ex:
log.error(ex) | sentinel/rest/http.py | import asyncio
import weakref
import aiohttp
import sys
import json
import traceback
import logging
from urllib.parse import quote as _uriquote
from ..errors import HTTPException, Forbidden, NotFound, ServerError, SentinelError
log = logging.getLogger(__name__)
class Route:
BASE = "https://discord.com/api/v9"
def __init__(self, method, path, **kwargs):
self.path = path
self.method = method
url = self.BASE + path
if kwargs:
self.url = url.format_map({x: _uriquote(y) if isinstance(y, str) else y for x, y in kwargs.items()})
else:
self.url = url
self.channel_id = kwargs.get("channel_id")
self.guild_id = kwargs.get("guild_id")
self.webhook_id = kwargs.get("webhook_id")
self.webhook_token = kwargs.get("webhook_token")
@property
def bucket(self):
return "{}:{}:{}".format(self.channel_id, self.guild_id, self.path)
class MaybeUnlock:
def __init__(self, lock):
self.lock = lock
self._unlock = True
def __enter__(self):
return self
def defer(self):
self._unlock = False
def __exit__(self, type, value, traceback):
if self._unlock:
self.lock.release()
async def json_or_text(res):
t = await res.text(encoding="utf-8")
try:
if res.headers["content-type"] == "application/json":
return json.loads(t)
except KeyError:
pass
return t
class HTTPClient:
def __init__(self, ws, token):
self.loop = asyncio.get_event_loop()
self.ws = ws
self.session = aiohttp.ClientSession()
self._locks = weakref.WeakValueDictionary()
self._token = token
self.pool = None
self._global_over = asyncio.Event()
self._global_over.set()
user_agent = 'DiscordBot ({0}) Python/{1[0]}.{1[1]} aiohttp/{2}'
self.user_agent = user_agent.format("0.0.1", sys.version_info, aiohttp.__version__)
async def request(self, route, *, files=None, form=None, **kwargs):
bucket = route.bucket
method = route.method
url = route.url
lock = self._locks.get(bucket)
if lock is None:
lock = asyncio.Lock()
if bucket is not None:
self._locks[bucket] = lock
headers = {
"User-Agent": self.user_agent,
"Authorization": "Bot {}".format(self._token)
}
if "json" in kwargs:
headers.update({
"Content-Type": "application/json",
})
kwargs["data"] = json.dumps(kwargs.pop("json"))
try:
reason = kwargs.pop("reason")
except KeyError:
pass
else:
if reason:
headers.update({
"X-Audit-Log-Reason": _uriquote(reason, safe="/ ")
})
kwargs.update({
"headers": headers
})
if not self._global_over.is_set():
await self._global_over.wait()
await lock.acquire()
with MaybeUnlock(lock) as maybe_lock:
for t in range(5):
if files:
for f in files:
f.reset(seek=t)
if form:
f_data = aiohttp.FormData()
for p in form:
f_data.add_field(**p)
kwargs.update({
"data": f_data
})
try:
async with self.session.request(method, url, **kwargs) as res:
data = await json_or_text(res)
r = res.headers.get("X-Ratelimit-Remaining", 1)
if int(r) == 0 and res.status != 429:
delta = float(res.headers.get("X-Ratelimit-Reset-After"))
maybe_lock.defer()
self.loop.call_later(delta, lock.release)
if 300 > res.status >= 200:
return data
if res.status == 429:
if not res.headers.get("Via"):
raise HTTPException(res, data)
txt = "Ratelimited! Retrying in {0} seconds (Bucket {1})"
retry_after = data["retry_after"]
log.warn(txt.format(retry_after, bucket))
_global = data.get("global", False)
if _global:
log.warn("Global rate limit hit!")
self._global_over.clear()
await asyncio.sleep(retry_after)
if _global:
self._global_over.set()
log.info("Global ratelimit over.")
continue
if res.status in (500, 502):
await asyncio.sleep(1 + t * 2)
continue
if res.status == 403:
raise Forbidden(res, data)
elif res.status == 404:
raise NotFound(res, data)
elif res.status == 503:
raise ServerError(res, data)
else:
raise HTTPException(res, data)
except OSError as e:
if t < 4 and e.errno in (54, 10054):
await asyncio.sleep(1 + t * 2)
continue
raise
if res.status >= 500:
raise ServerError(res, data)
raise HTTPException(res, data)
def send_message(self, channel_id, content, embeds: list =None):
try:
r = Route("POST", f"/channels/{channel_id}/messages", channel_id=channel_id)
payload = {}
if content:
payload.update({
"content": content
})
final_embeds = []
if embeds:
for e in embeds:
final_embeds.append(e._to_json())
payload.update({
"embeds": final_embeds
})
try:
self.loop.run_until_complete(self.request(r, json=payload))
except TimeoutError:
pass
except SentinelError as ex:
log.error(ex)
def get_guild_member(self, guild_id, user_id):
try:
r = Route("GET", f"/guilds/{guild_id}/members/{user_id}")
res = self.loop.run_until_complete(self.request(r))
return res
except SentinelError as ex:
log.error(ex)
def get_guild_commands(self, guild_id, app_id):
try:
r = Route("GET", f"/applications/{app_id}/guilds/{guild_id}/commands")
res = self.loop.run_until_complete(self.request(r))
return res
except SentinelError as ex:
log.error(ex)
def register_guild_command(self, guild_id: int, app_id: int, name: str, description: str, params: list):
description = description if description != None else "A cool command!"
try:
payload = {
"name": name,
"description": description,
"options": []
}
options = []
if len(params) > 0:
for i, p in enumerate(params):
options.append(
{
"name": p,
"description": f"Parameter number {i+1}",
"type": 3,
"required": True
}
)
payload.update({
"options": options
})
r = Route("POST", f"/applications/{app_id}/guilds/{guild_id}/commands")
res = self.loop.run_until_complete(self.request(r, json=payload))
return res
except SentinelError as ex:
log.error(ex)
def delete_guild_command(self, guild_id: int, app_id: int, command_id: int):
try:
r = Route("DELETE", f"/applications/{app_id}/guilds/{guild_id}/commands/{command_id}")
res = self.loop.run_until_complete(self.request(r))
return res
except SentinelError as ex:
log.error(ex)
def respond_to_command(self, interaction_id: int, interaction_token: str, _type: int, content: str, embeds: list = None, flags = None):
try:
payload = {
"type": _type,
"data": {
"content": content,
"flags": flags
}
}
final_embeds = []
if embeds:
for e in embeds:
final_embeds.append(e._to_json())
payload["data"].update({
"embeds": final_embeds
})
r = Route("POST", f"/interactions/{interaction_id}/{interaction_token}/callback")
res = self.loop.run_until_complete(self.request(r, json=payload))
return res
except SentinelError as ex:
log.error(ex)
def send_dm(self, user_id: int, content: str, embeds: list = None):
try:
dm_channel_payload = {
"recipient_id": user_id
}
dm_req = Route("POST", f"/users/@me/channels")
dm_channel = self.loop.run_until_complete(self.request(dm_req, json=dm_channel_payload))
return self.send_message(dm_channel["id"], content, embeds)
except SentinelError as ex:
log.error(ex)
def _delete_old_commands(self, commands: list, app_id: int):
try:
r = Route("GET", "/users/@me/guilds")
guilds = self.loop.run_until_complete(self.request(r))
for g in guilds:
all_commands = self.get_guild_commands(g["id"], app_id)
for cmd in all_commands:
if not cmd["name"].lower() in commands:
self.delete_guild_command(g["id"], app_id, cmd["id"])
except SentinelError as ex:
log.error(ex) | 0.223886 | 0.067026 |
# ## Questionário 73 (Q73)
#
# Orientações:
#
# - Registre suas respostas no questionário de mesmo nome no SIGAA.
# - O tempo de registro das respostas no questionário será de 10 minutos. Portanto, resolva primeiro as questões e depois registre-as.
# - Haverá apenas 1 (uma) tentativa de resposta.
# - Submeta seu arquivo-fonte (utilizado para resolver as questões) em formato _.ipynb_ pelo SIGAA anexando-o à Tarefa denominada "Envio de arquivo" correspondente ao questionário.
#
# *Nota:* o arquivo-fonte será utilizado apenas como prova de execução da tarefa. Nenhuma avaliação será feita quanto ao estilo de programação.
#
# <hr>
# Para responder às questões, leia o texto introdutório a seguir.
#
# >Diversos países firmam acordos bilaterais com o intuito de fortalecer interesses mútuos. Uma rede multinacional da qual o Brasil faz parte começou a ser modelada por cientistas de dados a partir de um grafo não dirigido em que os _nós_ do grafo representam os países, renomeados segundo o código Alpha-3 do padrão [IBAN](https://www.iban.com/country-codes), e as _arestas_ representam a existência de um acordo bilateral.
# > A figura abaixo mostra, por exemplo, um subgrafo dessa rede formado por Áustria (AUT), Bélgica (BEL), Brasil (BRA), Emirados Árabes Unidos (ARE) e Estados Unidos (USA).
# ```{figure} ../figs/q/q73.png
# ---
# width: 660px
# name: rede
# ---
# Exemplo de rede de países que mantêm acordos bilaterais.
# ```
# > O arquivo `paises-acordo-bilateral.txt` contém, implicitamente, a lista de conexões que formam o grafo da rede inteira, as quais são determinadas por pares do tipo `x,y`, onde `x` e `y` são nomes de países não padronizados. Por exemplo, o par `China,Norway` indica que há um acordo bilateral entre China e Noruega.
#
# >*Obs.:* acesse o arquivo [aqui](https://github.com/gcpeixoto/ICD/tree/main/database/paises-acordo-bilateral.txt).
# **Questão 1.** Faça a raspagem da tabela de códigos de países disponíveis na página [IBAN](https://www.iban.com/country-codes) para recuperar os códigos Alpha-3 para cada país contido na lista de arestas e crie um segundo arquivo chamado `paises-acordo-bilateral-IBAN.txt`. Use o módulo `networkx` e a função `read_edgelist` para construir o grafo da rede multinacional. Em seguida, assinale a alternativa correta para a tupla (número de nós, número de arestas) que você encontrou. Sugestão: use as funções `get_table_head` e `get_table_body` criadas no capítulo do livro de ICD sobre _Raspagem de dados_.
#
# A. (14, 28)
#
# B. (16, 30)
#
# C. (12, 36)
#
# D. (14, 38)
# ## GABARITO
#
# Alternativa **D**.
# ## Geração de arquivo de acordo bilateral
# In[1]:
import numpy as np
np.random.seed(3)
countries = ('Argentina','Austria','Belgium','Brazil','China',
'United Arab Emirates (the)',
'United States of America (the)','Germany',
'India','Israel','Netherlands (the)',
'Norway','Russian Federation (the)','South Africa')
adj = np.random.randint(0,2,(len(countries),len(countries)))
adj[np.diag_indices_from(adj)] = 0
adj = np.tril(adj)
adj = np.tril(adj) + np.tril(adj).T
f = open('../database/paises-acordo-bilateral.txt','w')
for i in range(len(countries)):
for j in range(i+1,len(countries)):
if adj[i,j] == 1:
s = countries[i] + ',' + countries[j] + '\n'
f.write(s)
f.close()
# ## Raspagem do IBAN
#
#
# In[2]:
from urllib.request import urlopen
from bs4 import BeautifulSoup
import pandas as pd
import networkx as nx
html = urlopen('https://www.iban.com/country-codes')
bs = BeautifulSoup(html.read(),'html.parser')
# extrai cabeçalho
def get_table_head(t):
'''Lê objeto tabela e extrai header para lista'''
res = []
thead = t.find('thead')
th = thead.find_all('th')
for f in th:
res.append(f.getText().strip())
return res
t_header = get_table_head(bs.body)
# extrai linhas
def get_table_body(t):
res = []
tbody = t.find('tbody')
tr = tbody.find_all('tr')
for row in tr:
this_row = []
row_fields = row.find_all('td')
for f in row_fields:
this_row.append(f.getText().strip())
res.append(this_row)
return res
r = get_table_body(bs.body)
# DataFrame
iban = pd.DataFrame(r,columns=t_header).drop_duplicates().drop(columns=['Alpha-2 code','Numeric']).reset_index(drop=True)
# ## Escreve edgelist
# In[3]:
f = open('../database/paises-acordo-bilateral.txt','r')
g = open('../database/paises-acordo-bilateral-IBAN.txt','w')
for i in f.readlines():
p1,p2 = i.strip().split(',')
p1 = iban[iban['Country'] == p1]['Alpha-3 code'].values
p2 = iban[iban['Country'] == p2]['Alpha-3 code'].values
p1,p2 = p1[0],p2[0]
s = p1 + ',' + p2 + '\n'
g.write(s)
f.close()
g.close()
# In[4]:
G = nx.read_edgelist('../database/paises-acordo-bilateral-IBAN.txt',delimiter=',')
Gsub = nx.subgraph(G,['AUT','ARE','USA','BRA','BEL',''])
nx.draw_networkx(Gsub,with_labels=True)
# In[5]:
G.number_of_nodes(),G.number_of_edges()
# **Questão 2.** A _centralidade de grau_ `deg`, calculada para cada nó do grafo completo pelo módulo `networkx`, pode ser interpretada, para este estudo de caso, como uma medida relativa da pré-disposição de um país para se abrir à globalização. Neste sentido, calcule `deg` e assinale a opção cujo país é o mais **fechado** ao fenômeno da globalização.
#
# A. CHN
#
# B. BRA
#
# C. ARG
#
# D. NLD
# ## GABARITO
#
# Alternativa **D**.
# In[6]:
# minima deg é NLD
deg = nx.degree_centrality(G)
deg = {k: v for k, v in sorted(deg.items(), key=lambda item: item[1],reverse=True)}
deg
# **Questão 3.** Semelhantemente à interpretação da questão anterior, a _centralidade de intermediação_ `bet` fornece uma medida relativa de quão boa é a confiança e respeitabilidade diplomática de um país para a concretização de acordos. Calcule `bet` e assinale a opção cujo país é o mais respeitado para intermediar acordos.
#
# A. AUT
#
# B. ZAF
#
# <NAME>
#
# D. ISR
# ## GABARITO
#
# Alternativa **C**.
# In[7]:
# maxima bet é DEU
bet = nx.betweenness_centrality(G)
bet = {k: v for k, v in sorted(bet.items(), key=lambda item: item[1],reverse=True)}
bet
# In[8]:
get_ipython().system('rm ../database/paises-acordo-bilateral-IBAN.txt') | _build/jupyter_execute/todo/Q73-gab.py |
# ## Questionário 73 (Q73)
#
# Orientações:
#
# - Registre suas respostas no questionário de mesmo nome no SIGAA.
# - O tempo de registro das respostas no questionário será de 10 minutos. Portanto, resolva primeiro as questões e depois registre-as.
# - Haverá apenas 1 (uma) tentativa de resposta.
# - Submeta seu arquivo-fonte (utilizado para resolver as questões) em formato _.ipynb_ pelo SIGAA anexando-o à Tarefa denominada "Envio de arquivo" correspondente ao questionário.
#
# *Nota:* o arquivo-fonte será utilizado apenas como prova de execução da tarefa. Nenhuma avaliação será feita quanto ao estilo de programação.
#
# <hr>
# Para responder às questões, leia o texto introdutório a seguir.
#
# >Diversos países firmam acordos bilaterais com o intuito de fortalecer interesses mútuos. Uma rede multinacional da qual o Brasil faz parte começou a ser modelada por cientistas de dados a partir de um grafo não dirigido em que os _nós_ do grafo representam os países, renomeados segundo o código Alpha-3 do padrão [IBAN](https://www.iban.com/country-codes), e as _arestas_ representam a existência de um acordo bilateral.
# > A figura abaixo mostra, por exemplo, um subgrafo dessa rede formado por Áustria (AUT), Bélgica (BEL), Brasil (BRA), Emirados Árabes Unidos (ARE) e Estados Unidos (USA).
# ```{figure} ../figs/q/q73.png
# ---
# width: 660px
# name: rede
# ---
# Exemplo de rede de países que mantêm acordos bilaterais.
# ```
# > O arquivo `paises-acordo-bilateral.txt` contém, implicitamente, a lista de conexões que formam o grafo da rede inteira, as quais são determinadas por pares do tipo `x,y`, onde `x` e `y` são nomes de países não padronizados. Por exemplo, o par `China,Norway` indica que há um acordo bilateral entre China e Noruega.
#
# >*Obs.:* acesse o arquivo [aqui](https://github.com/gcpeixoto/ICD/tree/main/database/paises-acordo-bilateral.txt).
# **Questão 1.** Faça a raspagem da tabela de códigos de países disponíveis na página [IBAN](https://www.iban.com/country-codes) para recuperar os códigos Alpha-3 para cada país contido na lista de arestas e crie um segundo arquivo chamado `paises-acordo-bilateral-IBAN.txt`. Use o módulo `networkx` e a função `read_edgelist` para construir o grafo da rede multinacional. Em seguida, assinale a alternativa correta para a tupla (número de nós, número de arestas) que você encontrou. Sugestão: use as funções `get_table_head` e `get_table_body` criadas no capítulo do livro de ICD sobre _Raspagem de dados_.
#
# A. (14, 28)
#
# B. (16, 30)
#
# C. (12, 36)
#
# D. (14, 38)
# ## GABARITO
#
# Alternativa **D**.
# ## Geração de arquivo de acordo bilateral
# In[1]:
import numpy as np
np.random.seed(3)
countries = ('Argentina','Austria','Belgium','Brazil','China',
'United Arab Emirates (the)',
'United States of America (the)','Germany',
'India','Israel','Netherlands (the)',
'Norway','Russian Federation (the)','South Africa')
adj = np.random.randint(0,2,(len(countries),len(countries)))
adj[np.diag_indices_from(adj)] = 0
adj = np.tril(adj)
adj = np.tril(adj) + np.tril(adj).T
f = open('../database/paises-acordo-bilateral.txt','w')
for i in range(len(countries)):
for j in range(i+1,len(countries)):
if adj[i,j] == 1:
s = countries[i] + ',' + countries[j] + '\n'
f.write(s)
f.close()
# ## Raspagem do IBAN
#
#
# In[2]:
from urllib.request import urlopen
from bs4 import BeautifulSoup
import pandas as pd
import networkx as nx
html = urlopen('https://www.iban.com/country-codes')
bs = BeautifulSoup(html.read(),'html.parser')
# extrai cabeçalho
def get_table_head(t):
'''Lê objeto tabela e extrai header para lista'''
res = []
thead = t.find('thead')
th = thead.find_all('th')
for f in th:
res.append(f.getText().strip())
return res
t_header = get_table_head(bs.body)
# extrai linhas
def get_table_body(t):
res = []
tbody = t.find('tbody')
tr = tbody.find_all('tr')
for row in tr:
this_row = []
row_fields = row.find_all('td')
for f in row_fields:
this_row.append(f.getText().strip())
res.append(this_row)
return res
r = get_table_body(bs.body)
# DataFrame
iban = pd.DataFrame(r,columns=t_header).drop_duplicates().drop(columns=['Alpha-2 code','Numeric']).reset_index(drop=True)
# ## Escreve edgelist
# In[3]:
f = open('../database/paises-acordo-bilateral.txt','r')
g = open('../database/paises-acordo-bilateral-IBAN.txt','w')
for i in f.readlines():
p1,p2 = i.strip().split(',')
p1 = iban[iban['Country'] == p1]['Alpha-3 code'].values
p2 = iban[iban['Country'] == p2]['Alpha-3 code'].values
p1,p2 = p1[0],p2[0]
s = p1 + ',' + p2 + '\n'
g.write(s)
f.close()
g.close()
# In[4]:
G = nx.read_edgelist('../database/paises-acordo-bilateral-IBAN.txt',delimiter=',')
Gsub = nx.subgraph(G,['AUT','ARE','USA','BRA','BEL',''])
nx.draw_networkx(Gsub,with_labels=True)
# In[5]:
G.number_of_nodes(),G.number_of_edges()
# **Questão 2.** A _centralidade de grau_ `deg`, calculada para cada nó do grafo completo pelo módulo `networkx`, pode ser interpretada, para este estudo de caso, como uma medida relativa da pré-disposição de um país para se abrir à globalização. Neste sentido, calcule `deg` e assinale a opção cujo país é o mais **fechado** ao fenômeno da globalização.
#
# A. CHN
#
# B. BRA
#
# C. ARG
#
# D. NLD
# ## GABARITO
#
# Alternativa **D**.
# In[6]:
# minima deg é NLD
deg = nx.degree_centrality(G)
deg = {k: v for k, v in sorted(deg.items(), key=lambda item: item[1],reverse=True)}
deg
# **Questão 3.** Semelhantemente à interpretação da questão anterior, a _centralidade de intermediação_ `bet` fornece uma medida relativa de quão boa é a confiança e respeitabilidade diplomática de um país para a concretização de acordos. Calcule `bet` e assinale a opção cujo país é o mais respeitado para intermediar acordos.
#
# A. AUT
#
# B. ZAF
#
# <NAME>
#
# D. ISR
# ## GABARITO
#
# Alternativa **C**.
# In[7]:
# maxima bet é DEU
bet = nx.betweenness_centrality(G)
bet = {k: v for k, v in sorted(bet.items(), key=lambda item: item[1],reverse=True)}
bet
# In[8]:
get_ipython().system('rm ../database/paises-acordo-bilateral-IBAN.txt') | 0.431345 | 0.566378 |
from django.contrib import admin
from django.views.generic.base import TemplateView
from django.urls import path, re_path, include
from . import views
urlpatterns = [
path('admin/', admin.site.urls),
path(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
path(r'^$', TemplateView.as_view(template_name='index.html'), name="home"),
path('register/', views.user_register, name='user_register'),
path('login/', views.user_login, name='user_login'),
path('logout/', views.user_logout, name='user_logout'),
# Facility
path('facility/add/', views.add_facility_ajax, name='add-facility-ajax'),
path('facility/type/', views.add_facility_type_ajax, name='add-facility-type-ajax'),
# Client
path('client/add/', views.add_client_ajax, name='add-client-ajax'),
path('client/type/', views.add_client_type_ajax, name='add-client-type-ajax'),
# Commodity
path('commodity/add/', views.add_commodity_ajax, name='add-commodity-ajax'),
path('commodity/category/', views.add_commodity_category_ajax, name='add-commodity-category-ajax'),
path('commodity/metric/', views.add_commodity_metric_ajax, name='add-commodity-metric-ajax'),
path('commodity/type/', views.add_commodity_type_ajax, name='add-commodity-type-ajax'),
# Transport
path('transport/add/', views.add_transport_items_ajax, name='add-transport-items-ajax'),
path('transport/category/', views.add_transport_category_ajax, name='add-transport-category-ajax'),
path('transport/type/', views.add_transport_type_ajax, name='add-transport-type-ajax'),
# Order
path('order/add/', views.add_order_ajax, name='add-order-ajax'),
path('order/item/', views.add_order_item_ajax, name='add-order-item-ajax'),
path('customer/transportation/', views.add_customer_transportation_ajax, name='add-customer-transportation-ajax'),
path('harvest/', views.add_harvest_dispatch_ajax, name='add-harvest-dispatch-ajax'),
path('supply/', views.add_supply_ajax, name='add-supply-ajax'),
path('viz/', views.all_visualizations, name='view-visualizations'),
# Job titles
path('job/titles/add/', views.add_job_title, name='add-job-title'),
path('job/titles/all/', views.all_job_title, name='all-job-titles'),
# path('job/titles/view/<str:job_title_id>', views.job_title_details, name='job_title_details'),
path('job/titles/update/<str:job_title_id>', views.update_job_title, name='update-job-title'),
path('job/titles/remove/<str:job_title_id>', views.deactivate_job_title, name='deactivate-job-title'),
# Job shifts
path('job/shifts/add/', views.add_job_shift, name='add-job-shift'),
path('job/shifts/all/', views.all_job_shifts, name='all-job-shifts'),
path('job/shifts/update/<str:job_shift_id>', views.update_job_shift, name='update-job-shift'),
path('job/shifts/deactivate/<str:job_shift_id>', views.deactivate_job_shift, name='deactivate-job-shift'),
# Staff
path('staff/add/', views.add_staff, name='add-staff'),
path('staff/current/', views.current_staff, name='current-staff'),
path('staff/past/', views.past_staff, name='past-staff'),
path('staff/update/<str:staff_id>', views.update_staff, name='update-staff'),
path('staff/deactivate/<str:staff_id>', views.deactivate_staff, name='deactivate-staff'),
] | digifarming/digifarming/urls.py | from django.contrib import admin
from django.views.generic.base import TemplateView
from django.urls import path, re_path, include
from . import views
urlpatterns = [
path('admin/', admin.site.urls),
path(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
path(r'^$', TemplateView.as_view(template_name='index.html'), name="home"),
path('register/', views.user_register, name='user_register'),
path('login/', views.user_login, name='user_login'),
path('logout/', views.user_logout, name='user_logout'),
# Facility
path('facility/add/', views.add_facility_ajax, name='add-facility-ajax'),
path('facility/type/', views.add_facility_type_ajax, name='add-facility-type-ajax'),
# Client
path('client/add/', views.add_client_ajax, name='add-client-ajax'),
path('client/type/', views.add_client_type_ajax, name='add-client-type-ajax'),
# Commodity
path('commodity/add/', views.add_commodity_ajax, name='add-commodity-ajax'),
path('commodity/category/', views.add_commodity_category_ajax, name='add-commodity-category-ajax'),
path('commodity/metric/', views.add_commodity_metric_ajax, name='add-commodity-metric-ajax'),
path('commodity/type/', views.add_commodity_type_ajax, name='add-commodity-type-ajax'),
# Transport
path('transport/add/', views.add_transport_items_ajax, name='add-transport-items-ajax'),
path('transport/category/', views.add_transport_category_ajax, name='add-transport-category-ajax'),
path('transport/type/', views.add_transport_type_ajax, name='add-transport-type-ajax'),
# Order
path('order/add/', views.add_order_ajax, name='add-order-ajax'),
path('order/item/', views.add_order_item_ajax, name='add-order-item-ajax'),
path('customer/transportation/', views.add_customer_transportation_ajax, name='add-customer-transportation-ajax'),
path('harvest/', views.add_harvest_dispatch_ajax, name='add-harvest-dispatch-ajax'),
path('supply/', views.add_supply_ajax, name='add-supply-ajax'),
path('viz/', views.all_visualizations, name='view-visualizations'),
# Job titles
path('job/titles/add/', views.add_job_title, name='add-job-title'),
path('job/titles/all/', views.all_job_title, name='all-job-titles'),
# path('job/titles/view/<str:job_title_id>', views.job_title_details, name='job_title_details'),
path('job/titles/update/<str:job_title_id>', views.update_job_title, name='update-job-title'),
path('job/titles/remove/<str:job_title_id>', views.deactivate_job_title, name='deactivate-job-title'),
# Job shifts
path('job/shifts/add/', views.add_job_shift, name='add-job-shift'),
path('job/shifts/all/', views.all_job_shifts, name='all-job-shifts'),
path('job/shifts/update/<str:job_shift_id>', views.update_job_shift, name='update-job-shift'),
path('job/shifts/deactivate/<str:job_shift_id>', views.deactivate_job_shift, name='deactivate-job-shift'),
# Staff
path('staff/add/', views.add_staff, name='add-staff'),
path('staff/current/', views.current_staff, name='current-staff'),
path('staff/past/', views.past_staff, name='past-staff'),
path('staff/update/<str:staff_id>', views.update_staff, name='update-staff'),
path('staff/deactivate/<str:staff_id>', views.deactivate_staff, name='deactivate-staff'),
] | 0.253306 | 0.041579 |
import bs4
import logging
from cryptics.text import (
is_parsable_text_type_1,
parse_text_type_1,
is_parsable_text_type_2,
parse_text_type_2,
)
from cryptics.tables import (
is_parsable_table_type_1,
parse_table_type_1,
is_parsable_table_type_2,
parse_table_type_2,
is_parsable_table_type_3,
parse_table_type_3,
is_parsable_table_type_4,
parse_table_type_4,
is_parsable_table_type_5,
parse_table_type_5,
)
from cryptics.lists import (
is_parsable_list_type_1,
parse_list_type_1,
is_parsable_list_type_2,
parse_list_type_2,
is_parsable_list_type_3,
parse_list_type_3,
is_parsable_list_type_4,
parse_list_type_4,
)
from cryptics.specials import (
is_parsable_special_type_1,
parse_special_type_1,
)
from cryptics.utils import (
extract_puzzle_name,
extract_puzzle_date,
extract_puzzle_url,
)
def try_to_parse_as(html, is_parsable_func, parse_func):
try:
is_parseable = is_parsable_func(html)
except:
return None
if is_parseable:
logging.info(f"Parsing using {parse_func.__name__}")
return parse_func(html)
def postprocess_data(data, html, source_url):
soup = bs4.BeautifulSoup(html, "html.parser")
data["clue"] = data["clue"].str.strip()
data["answer"] = data["answer"].str.strip()
data["definition"] = data["definition"].str.strip()
data["annotation"] = data["annotation"].str.strip()
# Instead of removing periods in each parsing function, we can just remove
# them here - it's simpler.
data["clue_number"] = data["clue_number"].str.strip().replace(".", "")
data["puzzle_name"] = extract_puzzle_name(source_url, soup)
# Occasionally the puzzle date fails. Simply ignore it and move on.
try:
data["puzzle_date"] = extract_puzzle_date(source_url, soup)
except:
pass
data["puzzle_url"] = extract_puzzle_url(soup)
data["source_url"] = source_url
return data
def try_parse(html, source_url):
data = None
parsers = [
(is_parsable_table_type_1, parse_table_type_1),
(is_parsable_table_type_2, parse_table_type_2),
(is_parsable_table_type_3, parse_table_type_3),
(is_parsable_table_type_4, parse_table_type_4),
(is_parsable_table_type_5, parse_table_type_5),
(is_parsable_list_type_1, parse_list_type_1),
(is_parsable_list_type_2, parse_list_type_2),
(is_parsable_list_type_3, parse_list_type_3),
(is_parsable_list_type_4, parse_list_type_4),
(is_parsable_text_type_1, parse_text_type_1),
(is_parsable_text_type_2, parse_text_type_2),
(is_parsable_special_type_1, parse_special_type_1),
]
for is_parsable_func, parse_func in parsers:
data = try_to_parse_as(html, is_parsable_func, parse_func)
if data is not None:
return postprocess_data(data, html, source_url)
return None | cryptics/parse.py | import bs4
import logging
from cryptics.text import (
is_parsable_text_type_1,
parse_text_type_1,
is_parsable_text_type_2,
parse_text_type_2,
)
from cryptics.tables import (
is_parsable_table_type_1,
parse_table_type_1,
is_parsable_table_type_2,
parse_table_type_2,
is_parsable_table_type_3,
parse_table_type_3,
is_parsable_table_type_4,
parse_table_type_4,
is_parsable_table_type_5,
parse_table_type_5,
)
from cryptics.lists import (
is_parsable_list_type_1,
parse_list_type_1,
is_parsable_list_type_2,
parse_list_type_2,
is_parsable_list_type_3,
parse_list_type_3,
is_parsable_list_type_4,
parse_list_type_4,
)
from cryptics.specials import (
is_parsable_special_type_1,
parse_special_type_1,
)
from cryptics.utils import (
extract_puzzle_name,
extract_puzzle_date,
extract_puzzle_url,
)
def try_to_parse_as(html, is_parsable_func, parse_func):
try:
is_parseable = is_parsable_func(html)
except:
return None
if is_parseable:
logging.info(f"Parsing using {parse_func.__name__}")
return parse_func(html)
def postprocess_data(data, html, source_url):
soup = bs4.BeautifulSoup(html, "html.parser")
data["clue"] = data["clue"].str.strip()
data["answer"] = data["answer"].str.strip()
data["definition"] = data["definition"].str.strip()
data["annotation"] = data["annotation"].str.strip()
# Instead of removing periods in each parsing function, we can just remove
# them here - it's simpler.
data["clue_number"] = data["clue_number"].str.strip().replace(".", "")
data["puzzle_name"] = extract_puzzle_name(source_url, soup)
# Occasionally the puzzle date fails. Simply ignore it and move on.
try:
data["puzzle_date"] = extract_puzzle_date(source_url, soup)
except:
pass
data["puzzle_url"] = extract_puzzle_url(soup)
data["source_url"] = source_url
return data
def try_parse(html, source_url):
data = None
parsers = [
(is_parsable_table_type_1, parse_table_type_1),
(is_parsable_table_type_2, parse_table_type_2),
(is_parsable_table_type_3, parse_table_type_3),
(is_parsable_table_type_4, parse_table_type_4),
(is_parsable_table_type_5, parse_table_type_5),
(is_parsable_list_type_1, parse_list_type_1),
(is_parsable_list_type_2, parse_list_type_2),
(is_parsable_list_type_3, parse_list_type_3),
(is_parsable_list_type_4, parse_list_type_4),
(is_parsable_text_type_1, parse_text_type_1),
(is_parsable_text_type_2, parse_text_type_2),
(is_parsable_special_type_1, parse_special_type_1),
]
for is_parsable_func, parse_func in parsers:
data = try_to_parse_as(html, is_parsable_func, parse_func)
if data is not None:
return postprocess_data(data, html, source_url)
return None | 0.359139 | 0.418222 |
import os
import sys
import json
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.models import load_model
from model.loss import dice_loss_2d, surface_channel_loss_2d
from preprocessing.dataset import AgricultureVisionDataset
from testing.image import get_testing_image, create_displayable_test_output
from testing.process import preprocess_image, postprocess_output
from testing.segment import draw_segmentation_map, display_segmented_diagram
class FarmlandAnomalyModel(object):
"""The complete container class for the farmland anomaly segmentation model."""
def __init__(self, model = 'final', **kwargs):
# Set up the actual model which is being stored in the container.
self.model = None
self._initialize_model(model, **kwargs)
@staticmethod
def multi_initialize(modes):
"""Initializes multiple models at the same time for convenience."""
_returnable_models = []
# Create a list of models for each of the provided modes.
for mode in modes:
try:
_mode_model = FarmlandAnomalyModel(model = mode)
except Exception as e:
raise e
else:
_returnable_models.append(_mode_model)
# Return the models.
return _returnable_models
def __str__(self):
# Print out the summary of the model architecture.
try:
print(self.model.summary())
except TypeError:
# Technically, this method doesn't actually return anything, it just prints
# out a summary. However, it's nice to have an easily displayable way to view
# the model architecture, so we just capture the error and move on here.
pass
finally:
# To subjugate any TypeErrors which may arise in the future.
return ''
def _initialize_model(self, model, **kwargs):
"""Initializes the model from a provided argument or weights path."""
# First, determine if the argument is actually a path.
if model.endswith('.hdf5') or model.endswith('.h5'):
# Validate the path and set the class argument.
if os.path.exists(model):
# Determine if custom objects are necessary.
if 'custom_objects' in kwargs.keys():
# Validate the custom objects first.
custom_objects = self._validate_custom_objects(kwargs['custom_objects'])
self.model = load_model(model, custom_objects = custom_objects)
else:
# Otherwise, just load the model.
self.model = load_model(model)
else:
raise FileNotFoundError(f"Received a model path ending with .hdf5 ({model}), but "
f"the path to the model does not exist.")
else:
# Otherwise, we've gotten a shortcut model name in which case load that specific one.
if model.lower() in ['dice20', '20', 'first', 'stage1']:
self.model = load_model(os.path.join(os.path.dirname(__file__), 'logs/save/Model-Dice2D-20.hdf5'),
custom_objects = self._validate_custom_objects('dice'))
elif model.lower() in ['scl40', '40', 'middle', 'stage2', 'intermediate']:
self.model = load_model(os.path.join(os.path.dirname(__file__), 'logs/save/Model-Dice-SCL-40.hdf5'),
custom_objects = self._validate_custom_objects('scl'))
elif model.lower() in ['dice60', '60', 'last', 'final', 'stage3']:
self.model = load_model(os.path.join(os.path.dirname(__file__), 'logs/save/Model-Dice-SCL-Dice-60.hdf5'),
custom_objects = self._validate_custom_objects('dice'))
@staticmethod
def _validate_custom_objects(custom_objects):
"""Determine the custom objects of a model, then initialize and set them to the class."""
if isinstance(custom_objects, dict):
# If the provided item is a dictionary of objects, then simply return the
# dictionary of custom objects; there is no processing to do here.
return custom_objects
else:
# Otherwise, a shortcut name has been provided for a loss function, in which
# case we need to determine and validate the actual provided function.
if custom_objects.lower() == 'dice':
return {'dice_loss_2d': dice_loss_2d}
elif custom_objects.lower() == 'scl' or custom_objects.lower() == 'surface':
return {'surface_loss_2d': surface_channel_loss_2d}
else:
raise ValueError(f"Received invalid custom object shortcut keyword {custom_objects}.")
def predict(self, test_image):
"""Predicts the output of a provided input image.
The image can be either directly from the dataset, or be a provided image path/read image.
Returns the segmented 8-channel image.
The method `show_segmented_predictions` displays the actual segmented prediction maps,
and the method `show_channel_predictions` shows the channel-by-channel predictions.
Parameters:
- test_image: The inputted test image (see above for information about the image format).
Returns:
- The 8-channel prediction image.
"""
# Get a valid testing image.
if isinstance(test_image, (list, tuple, set)):
# If we're using an item from the dataset, then get the specific dataset item.
testing_image = get_testing_image(
mode = test_image[0], value = test_image[1], with_truth = False)
else:
# Otherwise, preprocess the input image.
testing_image = preprocess_image(test_image)
# Now, predict the output from the model and return postprocessed predictions.
return postprocess_output(self.model.predict(testing_image))
def show_segmented_predictions(self, test_image, with_truth = True):
"""Displays the segmented model predictions."""
if isinstance(test_image, (list, tuple, set)):
# If we're using an item from the dataset, then we need to get the specific dataset item.
if with_truth:
# Get the ground truth too if requested to.
testing_image, testing_truth = get_testing_image(
mode = test_image[0], value = test_image[1], with_truth = True)
else:
# Get the ground truth too if requested to.
testing_image = get_testing_image(
mode = test_image[0], value = test_image[1], with_truth = False)
else:
# Otherwise, just process the test image.
testing_image = test_image
# Get the model predictions.
predicted = self.model.predict(testing_image)
predicted = postprocess_output(predicted)
# Convert the test image/label into usable images.
displayable_test_image = create_displayable_test_output(test_image)
if with_truth:
testing_truth = postprocess_output(testing_truth)
# Draw the contours onto the main image.
annotated_test_prediction = draw_segmentation_map(displayable_test_image.copy(), predicted)
if with_truth:
annotated_test_truth = draw_segmentation_map(displayable_test_image.copy(), testing_truth)
# Display the diagram.
if with_truth:
display_segmented_diagram(displayable_test_image, annotated_test_prediction, annotated_test_truth)
else:
display_segmented_diagram(displayable_test_image, annotated_test_prediction) | prototype.py | import os
import sys
import json
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.models import load_model
from model.loss import dice_loss_2d, surface_channel_loss_2d
from preprocessing.dataset import AgricultureVisionDataset
from testing.image import get_testing_image, create_displayable_test_output
from testing.process import preprocess_image, postprocess_output
from testing.segment import draw_segmentation_map, display_segmented_diagram
class FarmlandAnomalyModel(object):
"""The complete container class for the farmland anomaly segmentation model."""
def __init__(self, model = 'final', **kwargs):
# Set up the actual model which is being stored in the container.
self.model = None
self._initialize_model(model, **kwargs)
@staticmethod
def multi_initialize(modes):
"""Initializes multiple models at the same time for convenience."""
_returnable_models = []
# Create a list of models for each of the provided modes.
for mode in modes:
try:
_mode_model = FarmlandAnomalyModel(model = mode)
except Exception as e:
raise e
else:
_returnable_models.append(_mode_model)
# Return the models.
return _returnable_models
def __str__(self):
# Print out the summary of the model architecture.
try:
print(self.model.summary())
except TypeError:
# Technically, this method doesn't actually return anything, it just prints
# out a summary. However, it's nice to have an easily displayable way to view
# the model architecture, so we just capture the error and move on here.
pass
finally:
# To subjugate any TypeErrors which may arise in the future.
return ''
def _initialize_model(self, model, **kwargs):
"""Initializes the model from a provided argument or weights path."""
# First, determine if the argument is actually a path.
if model.endswith('.hdf5') or model.endswith('.h5'):
# Validate the path and set the class argument.
if os.path.exists(model):
# Determine if custom objects are necessary.
if 'custom_objects' in kwargs.keys():
# Validate the custom objects first.
custom_objects = self._validate_custom_objects(kwargs['custom_objects'])
self.model = load_model(model, custom_objects = custom_objects)
else:
# Otherwise, just load the model.
self.model = load_model(model)
else:
raise FileNotFoundError(f"Received a model path ending with .hdf5 ({model}), but "
f"the path to the model does not exist.")
else:
# Otherwise, we've gotten a shortcut model name in which case load that specific one.
if model.lower() in ['dice20', '20', 'first', 'stage1']:
self.model = load_model(os.path.join(os.path.dirname(__file__), 'logs/save/Model-Dice2D-20.hdf5'),
custom_objects = self._validate_custom_objects('dice'))
elif model.lower() in ['scl40', '40', 'middle', 'stage2', 'intermediate']:
self.model = load_model(os.path.join(os.path.dirname(__file__), 'logs/save/Model-Dice-SCL-40.hdf5'),
custom_objects = self._validate_custom_objects('scl'))
elif model.lower() in ['dice60', '60', 'last', 'final', 'stage3']:
self.model = load_model(os.path.join(os.path.dirname(__file__), 'logs/save/Model-Dice-SCL-Dice-60.hdf5'),
custom_objects = self._validate_custom_objects('dice'))
@staticmethod
def _validate_custom_objects(custom_objects):
"""Determine the custom objects of a model, then initialize and set them to the class."""
if isinstance(custom_objects, dict):
# If the provided item is a dictionary of objects, then simply return the
# dictionary of custom objects; there is no processing to do here.
return custom_objects
else:
# Otherwise, a shortcut name has been provided for a loss function, in which
# case we need to determine and validate the actual provided function.
if custom_objects.lower() == 'dice':
return {'dice_loss_2d': dice_loss_2d}
elif custom_objects.lower() == 'scl' or custom_objects.lower() == 'surface':
return {'surface_loss_2d': surface_channel_loss_2d}
else:
raise ValueError(f"Received invalid custom object shortcut keyword {custom_objects}.")
def predict(self, test_image):
"""Predicts the output of a provided input image.
The image can be either directly from the dataset, or be a provided image path/read image.
Returns the segmented 8-channel image.
The method `show_segmented_predictions` displays the actual segmented prediction maps,
and the method `show_channel_predictions` shows the channel-by-channel predictions.
Parameters:
- test_image: The inputted test image (see above for information about the image format).
Returns:
- The 8-channel prediction image.
"""
# Get a valid testing image.
if isinstance(test_image, (list, tuple, set)):
# If we're using an item from the dataset, then get the specific dataset item.
testing_image = get_testing_image(
mode = test_image[0], value = test_image[1], with_truth = False)
else:
# Otherwise, preprocess the input image.
testing_image = preprocess_image(test_image)
# Now, predict the output from the model and return postprocessed predictions.
return postprocess_output(self.model.predict(testing_image))
def show_segmented_predictions(self, test_image, with_truth = True):
"""Displays the segmented model predictions."""
if isinstance(test_image, (list, tuple, set)):
# If we're using an item from the dataset, then we need to get the specific dataset item.
if with_truth:
# Get the ground truth too if requested to.
testing_image, testing_truth = get_testing_image(
mode = test_image[0], value = test_image[1], with_truth = True)
else:
# Get the ground truth too if requested to.
testing_image = get_testing_image(
mode = test_image[0], value = test_image[1], with_truth = False)
else:
# Otherwise, just process the test image.
testing_image = test_image
# Get the model predictions.
predicted = self.model.predict(testing_image)
predicted = postprocess_output(predicted)
# Convert the test image/label into usable images.
displayable_test_image = create_displayable_test_output(test_image)
if with_truth:
testing_truth = postprocess_output(testing_truth)
# Draw the contours onto the main image.
annotated_test_prediction = draw_segmentation_map(displayable_test_image.copy(), predicted)
if with_truth:
annotated_test_truth = draw_segmentation_map(displayable_test_image.copy(), testing_truth)
# Display the diagram.
if with_truth:
display_segmented_diagram(displayable_test_image, annotated_test_prediction, annotated_test_truth)
else:
display_segmented_diagram(displayable_test_image, annotated_test_prediction) | 0.736495 | 0.33939 |
class TBar():
_max = -1
length = 50
infile = None
rawdata = None
normdata = None
vertical = False
def __init__(self, _max=0, length=0, vertical=False):
if _max:
self._max = _max
if length:
self.length = length
self.vertical = vertical
self.rawdata = []
self.normdata = []
return
def add_data_itr(self, itr):
# itr: iterable of (key, value), where key is str, value is float
self.rawdata.extend(itr)
return
def __str__(self):
if len(self.rawdata) == 0:
return ""
self.__set_normdata()
bars = []
maxkeylen = max(len(k) for k, v in self.normdata)
fillspace = " " * maxkeylen
if self.vertical:
sep = "-"
else:
sep = "|"
for k, v in self.normdata:
if self.vertical:
# reverse the string of key
k = k[::-1]
bars.append(
(fillspace + k)[-maxkeylen:] +
sep +
("*" * int(self.length * v) + " " * self.length)[:self.length] +
sep
)
# transpose
if self.vertical:
bars = zip(*bars)
bars = list("".join(e) for e in reversed(tuple(bars)))
# add scale strings
if self.vertical:
scalestr = str(self._max)
leftspaces = " " * len(scalestr)
for i in range(len(bars)):
if i == 0:
bars[i] = scalestr + bars[i]
else:
bars[i] = leftspaces + bars[i]
else:
bars.insert(0,
(" " * (maxkeylen + 1 + self.length)) + str(self._max))
return str("\n".join(bars))
def __set_max_from_data(self):
self._max = max(tuple(zip(*self.rawdata))[1])
return self._max
def __set_normdata(self):
if self._max == -1:
self.__set_max_from_data()
for k, v in self.rawdata:
self.normdata.append((k, v / self._max))
return | tbar/tbar.py |
class TBar():
_max = -1
length = 50
infile = None
rawdata = None
normdata = None
vertical = False
def __init__(self, _max=0, length=0, vertical=False):
if _max:
self._max = _max
if length:
self.length = length
self.vertical = vertical
self.rawdata = []
self.normdata = []
return
def add_data_itr(self, itr):
# itr: iterable of (key, value), where key is str, value is float
self.rawdata.extend(itr)
return
def __str__(self):
if len(self.rawdata) == 0:
return ""
self.__set_normdata()
bars = []
maxkeylen = max(len(k) for k, v in self.normdata)
fillspace = " " * maxkeylen
if self.vertical:
sep = "-"
else:
sep = "|"
for k, v in self.normdata:
if self.vertical:
# reverse the string of key
k = k[::-1]
bars.append(
(fillspace + k)[-maxkeylen:] +
sep +
("*" * int(self.length * v) + " " * self.length)[:self.length] +
sep
)
# transpose
if self.vertical:
bars = zip(*bars)
bars = list("".join(e) for e in reversed(tuple(bars)))
# add scale strings
if self.vertical:
scalestr = str(self._max)
leftspaces = " " * len(scalestr)
for i in range(len(bars)):
if i == 0:
bars[i] = scalestr + bars[i]
else:
bars[i] = leftspaces + bars[i]
else:
bars.insert(0,
(" " * (maxkeylen + 1 + self.length)) + str(self._max))
return str("\n".join(bars))
def __set_max_from_data(self):
self._max = max(tuple(zip(*self.rawdata))[1])
return self._max
def __set_normdata(self):
if self._max == -1:
self.__set_max_from_data()
for k, v in self.rawdata:
self.normdata.append((k, v / self._max))
return | 0.544559 | 0.122891 |
"""Demo some features of NetCore."""
import argparse
import mininet.topolib
import mininet.topo
import MininetDriver as md
from multiprocessing import Process
import subprocess as sp
import time
# H1 0----1 S3 2----0 H2
basic = mininet.topolib.TreeTopo(depth=1, fanout=2)
def getRunner(flag, topo=basic):
ctrl = sp.Popen([md.CONTROLLER_PATH,
flag,
],stdout=sp.PIPE)
time.sleep(1)
return md.MininetRunner(topo, ctrl)
def host_output(host, title, cmds):
title.append('Ctrl+D to exit')
titleCommands = ['echo ' + '\\"' + line + '\\"' for line in title]
cmd = 'xterm -e "' + ("; ".join(titleCommands + cmds + ['bash'])) + '"'
print cmd
host.cmd(cmd)
def demo_learning_switch():
runner = getRunner('--maclearning')
host_output(runner.hosts[0],
['Ping Connectivity over MAC Learning:'],
['ping ' + runner.hosts[1].IP() + ' -c 10'])
runner.destroy()
def demo_arp():
runner = getRunner('--arp')
target = runner.hosts[1].IP()
host_output(runner.hosts[0],
['ARP caching using MAC Learning for connectivity'],
['echo \\"Unfortunately, arping doesn\'t work on mininet.\\"',
'arp -n',
'echo \\"At first, ping uses regular ARP over the MAC learning:\\"',
'ping ' + target + ' -c 1',
'arp -n',
'echo \\"But if we clear the ARP table,'
' we get a reply from the controller:\\"',
'arp -d ' + target,
'arp -n',
'ping ' + target + ' -c 1',
])
runner.destroy()
def demo_sink(user):
runner = getRunner('--sink', topo=mininet.topo.SingleSwitchTopo())
target = runner.hosts[1].IP()
ping = Process (target=host_output,
args=(runner.hosts[0],
['Pings get blocked when at high frequency,'],
['echo \\"# ping ' + target + '-i 0.1 -c 50 -q\\"',
'ping ' + target + ' -i 0.1 -c 50 -q',
'echo \\"but SSH still works because it\'s on a different slice.\\"',
'echo \\"(but do give it a moment...)\\"',
'echo \\"# ssh ' + user + '@' + target + '\\"',
'ssh ' + user + '@' + target]))
ping.start()
sshd = Process (target=host_output,
args=(runner.hosts[1],
['SSH Daemon on the other host'],
['$(which sshd) -d']))
sshd.start()
ping.join()
sshd.join()
runner.destroy()
def main():
parser = argparse.ArgumentParser("Run some NetCore demos")
parser.add_argument('user', metavar='USER', type=str,
help='user to ssh to (usually you)')
args = parser.parse_args()
demo_learning_switch()
demo_arp()
demo_sink(args.user)
if __name__ == '__main__':
main() | examples/demo.py | """Demo some features of NetCore."""
import argparse
import mininet.topolib
import mininet.topo
import MininetDriver as md
from multiprocessing import Process
import subprocess as sp
import time
# H1 0----1 S3 2----0 H2
basic = mininet.topolib.TreeTopo(depth=1, fanout=2)
def getRunner(flag, topo=basic):
ctrl = sp.Popen([md.CONTROLLER_PATH,
flag,
],stdout=sp.PIPE)
time.sleep(1)
return md.MininetRunner(topo, ctrl)
def host_output(host, title, cmds):
title.append('Ctrl+D to exit')
titleCommands = ['echo ' + '\\"' + line + '\\"' for line in title]
cmd = 'xterm -e "' + ("; ".join(titleCommands + cmds + ['bash'])) + '"'
print cmd
host.cmd(cmd)
def demo_learning_switch():
runner = getRunner('--maclearning')
host_output(runner.hosts[0],
['Ping Connectivity over MAC Learning:'],
['ping ' + runner.hosts[1].IP() + ' -c 10'])
runner.destroy()
def demo_arp():
runner = getRunner('--arp')
target = runner.hosts[1].IP()
host_output(runner.hosts[0],
['ARP caching using MAC Learning for connectivity'],
['echo \\"Unfortunately, arping doesn\'t work on mininet.\\"',
'arp -n',
'echo \\"At first, ping uses regular ARP over the MAC learning:\\"',
'ping ' + target + ' -c 1',
'arp -n',
'echo \\"But if we clear the ARP table,'
' we get a reply from the controller:\\"',
'arp -d ' + target,
'arp -n',
'ping ' + target + ' -c 1',
])
runner.destroy()
def demo_sink(user):
runner = getRunner('--sink', topo=mininet.topo.SingleSwitchTopo())
target = runner.hosts[1].IP()
ping = Process (target=host_output,
args=(runner.hosts[0],
['Pings get blocked when at high frequency,'],
['echo \\"# ping ' + target + '-i 0.1 -c 50 -q\\"',
'ping ' + target + ' -i 0.1 -c 50 -q',
'echo \\"but SSH still works because it\'s on a different slice.\\"',
'echo \\"(but do give it a moment...)\\"',
'echo \\"# ssh ' + user + '@' + target + '\\"',
'ssh ' + user + '@' + target]))
ping.start()
sshd = Process (target=host_output,
args=(runner.hosts[1],
['SSH Daemon on the other host'],
['$(which sshd) -d']))
sshd.start()
ping.join()
sshd.join()
runner.destroy()
def main():
parser = argparse.ArgumentParser("Run some NetCore demos")
parser.add_argument('user', metavar='USER', type=str,
help='user to ssh to (usually you)')
args = parser.parse_args()
demo_learning_switch()
demo_arp()
demo_sink(args.user)
if __name__ == '__main__':
main() | 0.514644 | 0.117319 |
import re
import os
import sys
import importlib
import os.path
class Exporter(object):
data = None
def __init__(self, data):
self.data = data
def write(self, directory):
with open("Out.txt", "w") as handle:
# Write the header
handle.write("====== Test ======\n\n")
# For each file entry...
for file in self.data["files"]:
if (len(file.global_functions) == 0 and len(file.bound_functions.keys()) == 0 and len(file.datablocks) == 0):
continue
# Calculate the total entry count
entry_count = len(file.global_functions) + len(file.datablocks)
for type in file.bound_functions.keys():
entry_count = entry_count + len(file.bound_functions[type])
handle.write("===== Entries in %s (%u total) =====\n\n" % (file.path, entry_count))
handle.write("===== Global Functions (%u total) =====\n\n" % len(file.global_functions))
# For each global function...
for function in file.global_functions:
handle.write("==== %s ====\n" % function.name)
handle.write("File (line %u): %s\n\n" % (function.line, file.path))
if (len(function.parameters) != 0):
handle.write("Parameters (in order):\n")
for parameter in function.parameters:
handle.write(" * %s\n" % parameter)
else:
handle.write("Parameters: None\n")
handle.write("\n")
# For each known type...
for type in file.bound_functions.keys():
handle.write("===== Bound Functions on %s (%u total) =====\n\n" % (type, len(file.bound_functions[type])))
# For each function for this type...
for function in file.bound_functions[type]:
handle.write("==== %s::%s ====\n" % (function.type, function.name))
handle.write("File (line %u): %s\n\n" % (function.line, file.path))
if (len(function.parameters) != 0):
handle.write("Parameters (in order):\n")
for parameter in function.parameters:
handle.write(" * %s\n" % parameter)
else:
handle.write("Parameters: None\n")
handle.write("\n")
print("Done processing.") | exporters/doku.py | import re
import os
import sys
import importlib
import os.path
class Exporter(object):
data = None
def __init__(self, data):
self.data = data
def write(self, directory):
with open("Out.txt", "w") as handle:
# Write the header
handle.write("====== Test ======\n\n")
# For each file entry...
for file in self.data["files"]:
if (len(file.global_functions) == 0 and len(file.bound_functions.keys()) == 0 and len(file.datablocks) == 0):
continue
# Calculate the total entry count
entry_count = len(file.global_functions) + len(file.datablocks)
for type in file.bound_functions.keys():
entry_count = entry_count + len(file.bound_functions[type])
handle.write("===== Entries in %s (%u total) =====\n\n" % (file.path, entry_count))
handle.write("===== Global Functions (%u total) =====\n\n" % len(file.global_functions))
# For each global function...
for function in file.global_functions:
handle.write("==== %s ====\n" % function.name)
handle.write("File (line %u): %s\n\n" % (function.line, file.path))
if (len(function.parameters) != 0):
handle.write("Parameters (in order):\n")
for parameter in function.parameters:
handle.write(" * %s\n" % parameter)
else:
handle.write("Parameters: None\n")
handle.write("\n")
# For each known type...
for type in file.bound_functions.keys():
handle.write("===== Bound Functions on %s (%u total) =====\n\n" % (type, len(file.bound_functions[type])))
# For each function for this type...
for function in file.bound_functions[type]:
handle.write("==== %s::%s ====\n" % (function.type, function.name))
handle.write("File (line %u): %s\n\n" % (function.line, file.path))
if (len(function.parameters) != 0):
handle.write("Parameters (in order):\n")
for parameter in function.parameters:
handle.write(" * %s\n" % parameter)
else:
handle.write("Parameters: None\n")
handle.write("\n")
print("Done processing.") | 0.193795 | 0.145996 |
import argparse
from pathlib import Path
from jinja2 import Environment, PackageLoader, select_autoescape
helm_dir = Path(__file__).resolve().parent.parent / "helm"
# Map the chart names to their location. This is useful for updating
# dependencies (in Chart.yaml) as well as the charts.
helm_charts = [
helm_dir / "aws-login",
helm_dir / "thecombine",
helm_dir / "thecombine" / "charts" / "backend",
helm_dir / "thecombine" / "charts" / "database",
helm_dir / "thecombine" / "charts" / "frontend",
helm_dir / "thecombine" / "charts" / "maintenance",
helm_dir / "cert-proxy-client",
helm_dir / "cert-proxy-server",
helm_dir / "create-admin-user",
]
def parse_args() -> argparse.Namespace:
"""Define command line arguments for parser."""
# Parse user command line arguments
parser = argparse.ArgumentParser(
description="Update the version and appVersions for the Helm charts.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"version",
help="New version for the Helm charts.",
)
return parser.parse_args()
def generate(version: str) -> None:
print("New version: {version}")
version_config = {
"version": {
"aws_login": "v0.2.0",
"thecombine": f"v{version}",
"cert_proxy_client": f"v{version}",
"cert_proxy_server": f"v{version}",
"create_admin_user": f"v{version}",
}
}
for chart_dir in helm_charts:
# Initialize the Jinja2 environment
jinja_env = Environment(
loader=PackageLoader("combine_charts", str(chart_dir)),
autoescape=select_autoescape(["html", "xml"]),
trim_blocks=False,
lstrip_blocks=True,
)
template = jinja_env.get_template("Chart.yaml.j2")
final_chart = chart_dir / "Chart.yaml"
print(f"Writing: {final_chart}")
final_chart.write_text(template.render(version_config))
def main() -> None:
args = parse_args()
generate(args.version)
if __name__ == "__main__":
main() | deploy/scripts/combine_charts.py | import argparse
from pathlib import Path
from jinja2 import Environment, PackageLoader, select_autoescape
helm_dir = Path(__file__).resolve().parent.parent / "helm"
# Map the chart names to their location. This is useful for updating
# dependencies (in Chart.yaml) as well as the charts.
helm_charts = [
helm_dir / "aws-login",
helm_dir / "thecombine",
helm_dir / "thecombine" / "charts" / "backend",
helm_dir / "thecombine" / "charts" / "database",
helm_dir / "thecombine" / "charts" / "frontend",
helm_dir / "thecombine" / "charts" / "maintenance",
helm_dir / "cert-proxy-client",
helm_dir / "cert-proxy-server",
helm_dir / "create-admin-user",
]
def parse_args() -> argparse.Namespace:
"""Define command line arguments for parser."""
# Parse user command line arguments
parser = argparse.ArgumentParser(
description="Update the version and appVersions for the Helm charts.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"version",
help="New version for the Helm charts.",
)
return parser.parse_args()
def generate(version: str) -> None:
print("New version: {version}")
version_config = {
"version": {
"aws_login": "v0.2.0",
"thecombine": f"v{version}",
"cert_proxy_client": f"v{version}",
"cert_proxy_server": f"v{version}",
"create_admin_user": f"v{version}",
}
}
for chart_dir in helm_charts:
# Initialize the Jinja2 environment
jinja_env = Environment(
loader=PackageLoader("combine_charts", str(chart_dir)),
autoescape=select_autoescape(["html", "xml"]),
trim_blocks=False,
lstrip_blocks=True,
)
template = jinja_env.get_template("Chart.yaml.j2")
final_chart = chart_dir / "Chart.yaml"
print(f"Writing: {final_chart}")
final_chart.write_text(template.render(version_config))
def main() -> None:
args = parse_args()
generate(args.version)
if __name__ == "__main__":
main() | 0.574992 | 0.195709 |
import getopt
import sys
from algorithms import Kruskal, Prim
from utils.graph import (DisjointSet, create_graph, edges_to_graph,
graph_to_edges)
from utils.io import (read_input, save_clusters_csv, save_clusters_png,
save_mst_csv, save_mst_png)
class Config:
data_file = None
class_file = None
output_type = None
mode_mst = False
mode_clustering = False
kclusters = 0
show_help = False
def parse_args(argv):
shortopts = 'd:c:k:o:mh'
longopts = [
'datafile='
'classfile=',
'min-span-tree=',
'k-clusters=',
'output-type=',
'help'
]
config = Config()
options, _ = getopt.getopt(sys.argv[1:], shortopts, longopts)
for opt, arg in options:
if opt in ('-d', '--datafile'):
config.data_file = arg
elif opt in ('-c', '--classfile'):
config.class_file = arg
elif opt in ('-m', '--min-span-tree'):
config.mode_mst = True
elif opt in ('-k', '--k-clusters'):
config.mode_clustering = True
config.kclusters = int(arg)
elif opt in ('-o', '--output-type'):
if arg in ('csv', 'png'):
config.output_type = arg
elif opt in ('-h', '--help'):
config.show_help = True
return config
def print_help():
print("""Clustering algorithms using Minimium Spanning Tree.
Usage:
python main.py -d data.txt -c classes.txt -k 7 -o png
python main.py -d data.txt -c classes.txt -m -o csv
Options:
-d --datafile=FILE Data points file
-c --classfile=FILE Classes file
-m --min-span-tree Find the MST
-k --k-clusters=N Find k clusters
-o --output-type=png|csv Specify the output type
-h --help Print this message
""")
if __name__ == '__main__':
if len(sys.argv) <= 1:
print('Missing arguments.')
sys.exit(1)
cfg = parse_args(sys.argv[1:])
points, classes, graph = None, None, None
if cfg.show_help:
print_help()
sys.exit(0)
if not cfg.data_file:
sys.stderr.write('Missing datafile argument.\n')
sys.exit(1)
if not cfg.class_file:
sys.stderr.write('Missing classfile argument.\n')
sys.exit(1)
if not cfg.mode_mst and not cfg.mode_clustering:
sys.stderr.write('No mode specified.\n')
sys.exit(1)
if not cfg.output_type:
sys.stderr.write('Missing output type argument.\n')
sys.exit(1)
try:
points, classes = read_input('data.txt', 'classes.txt')
except:
sys.stderr.write('Unable to read input files.\n')
sys.exit(1)
graph = create_graph(points)
if cfg.mode_mst:
mst_kruskal = Kruskal.mst(graph)
mst_prim = Prim.mst(graph)
if cfg.output_type == 'csv':
save_mst_csv('mst_kruskal.csv', mst_kruskal)
save_mst_csv('mst_prim.csv', mst_prim)
elif cfg.output_type == 'png':
save_mst_png('mst_kruskal.png', mst_kruskal, points)
save_mst_png('mst_prim.png', mst_prim, points)
elif cfg.mode_clustering:
clusters_kruskal = Kruskal.clustering(graph, cfg.kclusters)
clusters_prim = Prim.clustering(graph, cfg.kclusters)
if cfg.output_type == 'csv':
save_clusters_csv('clusters_kruskal.csv', clusters_kruskal)
save_clusters_csv('clusters_prim.csv', clusters_prim)
elif cfg.output_type == 'png':
save_clusters_png('clusters_kruskal.png', clusters_kruskal, points)
save_clusters_png('clusters_prim.png', clusters_prim, points) | main.py | import getopt
import sys
from algorithms import Kruskal, Prim
from utils.graph import (DisjointSet, create_graph, edges_to_graph,
graph_to_edges)
from utils.io import (read_input, save_clusters_csv, save_clusters_png,
save_mst_csv, save_mst_png)
class Config:
data_file = None
class_file = None
output_type = None
mode_mst = False
mode_clustering = False
kclusters = 0
show_help = False
def parse_args(argv):
shortopts = 'd:c:k:o:mh'
longopts = [
'datafile='
'classfile=',
'min-span-tree=',
'k-clusters=',
'output-type=',
'help'
]
config = Config()
options, _ = getopt.getopt(sys.argv[1:], shortopts, longopts)
for opt, arg in options:
if opt in ('-d', '--datafile'):
config.data_file = arg
elif opt in ('-c', '--classfile'):
config.class_file = arg
elif opt in ('-m', '--min-span-tree'):
config.mode_mst = True
elif opt in ('-k', '--k-clusters'):
config.mode_clustering = True
config.kclusters = int(arg)
elif opt in ('-o', '--output-type'):
if arg in ('csv', 'png'):
config.output_type = arg
elif opt in ('-h', '--help'):
config.show_help = True
return config
def print_help():
print("""Clustering algorithms using Minimium Spanning Tree.
Usage:
python main.py -d data.txt -c classes.txt -k 7 -o png
python main.py -d data.txt -c classes.txt -m -o csv
Options:
-d --datafile=FILE Data points file
-c --classfile=FILE Classes file
-m --min-span-tree Find the MST
-k --k-clusters=N Find k clusters
-o --output-type=png|csv Specify the output type
-h --help Print this message
""")
if __name__ == '__main__':
if len(sys.argv) <= 1:
print('Missing arguments.')
sys.exit(1)
cfg = parse_args(sys.argv[1:])
points, classes, graph = None, None, None
if cfg.show_help:
print_help()
sys.exit(0)
if not cfg.data_file:
sys.stderr.write('Missing datafile argument.\n')
sys.exit(1)
if not cfg.class_file:
sys.stderr.write('Missing classfile argument.\n')
sys.exit(1)
if not cfg.mode_mst and not cfg.mode_clustering:
sys.stderr.write('No mode specified.\n')
sys.exit(1)
if not cfg.output_type:
sys.stderr.write('Missing output type argument.\n')
sys.exit(1)
try:
points, classes = read_input('data.txt', 'classes.txt')
except:
sys.stderr.write('Unable to read input files.\n')
sys.exit(1)
graph = create_graph(points)
if cfg.mode_mst:
mst_kruskal = Kruskal.mst(graph)
mst_prim = Prim.mst(graph)
if cfg.output_type == 'csv':
save_mst_csv('mst_kruskal.csv', mst_kruskal)
save_mst_csv('mst_prim.csv', mst_prim)
elif cfg.output_type == 'png':
save_mst_png('mst_kruskal.png', mst_kruskal, points)
save_mst_png('mst_prim.png', mst_prim, points)
elif cfg.mode_clustering:
clusters_kruskal = Kruskal.clustering(graph, cfg.kclusters)
clusters_prim = Prim.clustering(graph, cfg.kclusters)
if cfg.output_type == 'csv':
save_clusters_csv('clusters_kruskal.csv', clusters_kruskal)
save_clusters_csv('clusters_prim.csv', clusters_prim)
elif cfg.output_type == 'png':
save_clusters_png('clusters_kruskal.png', clusters_kruskal, points)
save_clusters_png('clusters_prim.png', clusters_prim, points) | 0.251556 | 0.123842 |
from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
from django.utils.text import Truncator
# Create your models here.
class Board(models.Model):
"""
Class for message boards.
"""
name = models.CharField(max_length=30, unique=True)
description = models.CharField(max_length=100)
def __repr__(self):
cls = self.__class__.__name__
return f"<{cls}: name={self.name!r} description={self.description!r}>"
def __str__(self):
return self.name
def get_comments_count(self):
"""
returns number of comments belonging to the board.
"""
return Comment.objects.filter(post__board=self).count()
def get_last_comment(self):
"""
return the last comment belonging to the board.
"""
return Comment.objects.filter(post__board=self).order_by("-created_at").first()
class Post(models.Model):
"""
Class for post
"""
subject = models.CharField(max_length=255)
message = models.TextField(max_length=4000)
board = models.ForeignKey(Board, related_name="posts", on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(null=True)
created_by = models.ForeignKey(User, related_name="create_by", on_delete=models.CASCADE)
updated_by = models.ForeignKey(User, null=True, related_name="+", on_delete=models.CASCADE)
def __repr__(self):
cls = self.__class__.__name__
return f"<{cls}: board={self.board!r} subject={self.subject!r} \
created_by={self.created_by!r} updated_at={self.updated_at!r} \
content={self.message!r}>"
def __str__(self):
return self.subject
def get_absolute_url(self):
"""
returns url to post
"""
return reverse("forums:board-detail", kwargs={"board": self.board.name, "pk": self.pk})
class Comment(Post):
"""
Class for comments
"""
post = models.ForeignKey(Post, related_name="comments", on_delete=models.CASCADE)
def __str__(self):
truncated_message = Truncator(self.message)
return truncated_message.chars(30)
def get_absolute_url(self):
"""
returns url to post
"""
return reverse(
"forums:comment-detail", kwargs={"board": self.post.board.name, "post_pk": self.post.pk, "pk": self.pk}
) | acceptable-albatrosses/albatrosses_hub/forums/models.py | from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
from django.utils.text import Truncator
# Create your models here.
class Board(models.Model):
"""
Class for message boards.
"""
name = models.CharField(max_length=30, unique=True)
description = models.CharField(max_length=100)
def __repr__(self):
cls = self.__class__.__name__
return f"<{cls}: name={self.name!r} description={self.description!r}>"
def __str__(self):
return self.name
def get_comments_count(self):
"""
returns number of comments belonging to the board.
"""
return Comment.objects.filter(post__board=self).count()
def get_last_comment(self):
"""
return the last comment belonging to the board.
"""
return Comment.objects.filter(post__board=self).order_by("-created_at").first()
class Post(models.Model):
"""
Class for post
"""
subject = models.CharField(max_length=255)
message = models.TextField(max_length=4000)
board = models.ForeignKey(Board, related_name="posts", on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(null=True)
created_by = models.ForeignKey(User, related_name="create_by", on_delete=models.CASCADE)
updated_by = models.ForeignKey(User, null=True, related_name="+", on_delete=models.CASCADE)
def __repr__(self):
cls = self.__class__.__name__
return f"<{cls}: board={self.board!r} subject={self.subject!r} \
created_by={self.created_by!r} updated_at={self.updated_at!r} \
content={self.message!r}>"
def __str__(self):
return self.subject
def get_absolute_url(self):
"""
returns url to post
"""
return reverse("forums:board-detail", kwargs={"board": self.board.name, "pk": self.pk})
class Comment(Post):
"""
Class for comments
"""
post = models.ForeignKey(Post, related_name="comments", on_delete=models.CASCADE)
def __str__(self):
truncated_message = Truncator(self.message)
return truncated_message.chars(30)
def get_absolute_url(self):
"""
returns url to post
"""
return reverse(
"forums:comment-detail", kwargs={"board": self.post.board.name, "post_pk": self.post.pk, "pk": self.pk}
) | 0.725454 | 0.163512 |
import tensorflow as tf
import numpy as np
def get_shape(spec: str, spec_shape: dict):
return tuple(spec_shape[dim] for dim in spec)
def expand_transform(x, input_spec: str, output_spec: str, output_spec_shape: dict, numpy=False):
assert len(output_spec) == len(output_spec_shape)
if numpy:
tile_op = np.tile
reshape_op = np.reshape
transpose_op = np.transpose
else:
tile_op = tf.tile
reshape_op = tf.reshape
transpose_op = tf.transpose
input_spec = [dim for dim in input_spec]
output_spec = [dim for dim in output_spec]
missing_dims = [dim for dim in output_spec if dim not in input_spec]
missing_shapes = [output_spec_shape[dim] for dim in missing_dims]
missing_size = np.prod(missing_shapes)
tmp_spec = missing_dims + input_spec
transpose = [tmp_spec.index(dim) for dim in output_spec]
if len(missing_dims) > 0:
# Tile and reshape, missing dims will be first
x = tile_op(x, [missing_size] + [1] * (len(input_spec) - 1))
x = reshape_op(x, missing_shapes + [output_spec_shape[dim] for dim in input_spec])
if tmp_spec != output_spec:
x = transpose_op(x, transpose)
return x
def get_coords(kernel_indices, offsets, offset_mode: str, spec_shapes: dict):
"""
Returns float coordinates in (B, S, F, C, K) shape
kernel_indices in (S, K) shape
offsets in offset_mode (any combination of dimennsions)
"""
if offsets is None:
print('DDCC1D layer used without offsets')
return expand_transform(kernel_indices, 'SK', 'BSFCK', spec_shapes)
def _coords(_kernel_indices, _offsets):
return tf.clip_by_value(_kernel_indices + _offsets, 0, int(_kernel_indices.get_shape()[0]))
out_spec = 'BSFCK'
kernel_spec = 'SK'
if offset_mode == kernel_spec or offset_mode == 'K':
return expand_transform(_coords(kernel_indices, offsets), kernel_spec, out_spec, spec_shapes)
if offset_mode == 'S':
offsets = expand_transform(offsets, offset_mode, kernel_spec, spec_shapes)
return expand_transform(_coords(kernel_indices, offsets), kernel_spec, out_spec, spec_shapes)
if offset_mode == 'BSK':
indices = expand_transform(kernel_indices, kernel_spec, offset_mode, spec_shapes)
return expand_transform(_coords(indices, offsets), offset_mode, out_spec, spec_shapes)
if offset_mode == 'SFK':
indices = expand_transform(kernel_indices, kernel_spec, offset_mode, spec_shapes)
return expand_transform(_coords(indices, offsets), offset_mode, out_spec, spec_shapes)
# Naive (most inefficient) method
indices = expand_transform(kernel_indices, 'SK', out_spec, spec_shapes)
offsets = expand_transform(offsets, offset_mode, out_spec, spec_shapes)
return _coords(indices, offsets)
def ddcconv1d(inputs: tf.Variable,
weights: tf.Variable,
offsets: tf.Variable,
dilation_rate: int = 1,
offset_mode='F',
interpolate=True,
name: str='ddcc1d'):
"""
Deformable Dilated Causal Convolution 1D
Shape dimensions notation:
B - batch size
S - sequence len
C - input channels
K - kernel size
F - filters
:param name: name of the layer
:param inputs: Input tensor of shape (B, S, C)
:param weights: Tensor of shape (F, C, K)
:param offsets: Tensof of shape (F)
:param dilation_rate: Size of receptive field gap
:param offset_mode: offset mode any combination of dimensions, like
F - one offset per filter,
FK - offset per filter and each kernel weight
BSC - offset per every timestep of each sample
:param interpolate: Use linear interpolation or just convert indices to int32
:return: Computed 1D convolutions of shape (B, S, F)
"""
with tf.variable_scope(name):
batch_size, seq_length, channels = (int(v) for v in inputs.shape)
filters, _, kernel_size = (int(v) for v in weights.shape)
spec_shapes = {
'B': batch_size,
'S': seq_length,
'F': filters,
'C': channels,
'K': kernel_size
}
# Indices stuff
with tf.variable_scope('KernelBaseIndices'):
base_indices = np.arange(seq_length).repeat(kernel_size).reshape((-1, kernel_size))
window_indices = tf.constant(base_indices, dtype=tf.float32, name='window_indices')
receptive_field = tf.constant(np.linspace(-kernel_size + 1, 0, kernel_size) * dilation_rate,
name='receptive_field',
dtype=tf.float32)
kernel_indices = window_indices + receptive_field
with tf.variable_scope('BatchIndices'):
# Create batch indices constant in BSFCK shape
batch_indices_np = expand_transform(np.arange(batch_size, dtype=np.int32), 'B', 'BSFCK', spec_shapes, numpy=True)
batch_indices = tf.constant(batch_indices_np, dtype=tf.int32, name='batch_indices')
with tf.variable_scope('ChannelIndices'):
# Create channel indices constant in BSFCK shape
channel_indices_np = expand_transform(np.arange(channels, dtype=np.int32), 'C', 'BSFCK', spec_shapes, numpy=True)
channel_indices = tf.constant(channel_indices_np, dtype=tf.int32, name='channel_indices')
with tf.variable_scope('Sampling'):
# SAMPLING IS EXTREMELY EXPENSIVE!!!!!
coords = get_coords(kernel_indices, offsets, offset_mode=offset_mode, spec_shapes=spec_shapes)
if interpolate:
# Left and right indices, e.g. index of 3.65 would be 3 on the left and 4 on the right
indices_left = tf.cast(tf.floor(coords), tf.int32)
indices_right = tf.cast(tf.ceil(coords), tf.int32)
# Calculate interpolation, for index 3.65 interpolation factor would be 0.65
interpolation = coords - tf.cast(indices_left, tf.float32)
# Sample both values (on the lef and right)
# Sample input of shape BSC with BSFCK3 indices (produced by stack) -> BSFCK for each side (left and right)
vals_left = tf.gather_nd(inputs, tf.stack((batch_indices, indices_left, channel_indices), axis=-1))
vals_right = tf.gather_nd(inputs, tf.stack((batch_indices, indices_right, channel_indices), axis=-1))
# Interpolated values
samples = vals_left + (vals_right - vals_left) * interpolation
else:
batch_idx = tf.stack((batch_indices, tf.cast(tf.floor(coords), tf.int32), channel_indices), axis=-1)
samples = tf.gather_nd(inputs, batch_idx)
with tf.variable_scope('Convolution'):
# Apply weights: BSFCK * FCK = BSFCK
conv = samples * weights
# Sum across kernel: BSFCK -> BSFC
conv = tf.reduce_sum(conv, axis=-1)
# Sum across channels: BSFC -> BSF
conv = tf.reduce_sum(conv, axis=-1)
return conv
def _transform_test():
x = np.arange(10)
spec_shape = {'B': len(x), 'T': 3, 'C': 2}
outputs = ['BTC', 'BCT', 'TBC', 'TCB', 'CBT', 'CTB']
for output_spec in outputs:
print(output_spec)
print(expand_transform(x, 'B', output_spec, spec_shape, numpy=True))
print('\n\n')
def _conv_test():
def print_var(v):
print("{0}: {1}:\n\t{2}".format(v.name, v.shape, v.eval()))
# shape: (batch_size, sequence_len, channels)
x_raw = np.array([
[[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]],
[[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]],
[[2, 2], [3, 3], [4, 4], [5, 5], [6, 6]],
])
# Filters: FCK shape
filter_weights_raw = np.array([
[[1], [0]]
])
# Filter offsets: F shape
filter_offsets_raw = np.ones(filter_weights_raw.shape[0]) * 0.5
x = tf.Variable(x_raw, name='x', dtype=tf.float32, trainable=False)
filter_weights = tf.Variable(filter_weights_raw, name='filter_weights', dtype=tf.float32)
filter_offsets = tf.Variable(filter_offsets_raw, name='filter_offsets', dtype=tf.float32)
y = ddcconv1d(x, weights=filter_weights, offsets=filter_offsets, offset_mode='F')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print_var(x)
print_var(filter_offsets)
print_var(filter_weights)
print_var(y)
def main():
#_transform_test()
_conv_test()
if __name__ == '__main__':
main() | layers/ddcconv1d.py | import tensorflow as tf
import numpy as np
def get_shape(spec: str, spec_shape: dict):
return tuple(spec_shape[dim] for dim in spec)
def expand_transform(x, input_spec: str, output_spec: str, output_spec_shape: dict, numpy=False):
assert len(output_spec) == len(output_spec_shape)
if numpy:
tile_op = np.tile
reshape_op = np.reshape
transpose_op = np.transpose
else:
tile_op = tf.tile
reshape_op = tf.reshape
transpose_op = tf.transpose
input_spec = [dim for dim in input_spec]
output_spec = [dim for dim in output_spec]
missing_dims = [dim for dim in output_spec if dim not in input_spec]
missing_shapes = [output_spec_shape[dim] for dim in missing_dims]
missing_size = np.prod(missing_shapes)
tmp_spec = missing_dims + input_spec
transpose = [tmp_spec.index(dim) for dim in output_spec]
if len(missing_dims) > 0:
# Tile and reshape, missing dims will be first
x = tile_op(x, [missing_size] + [1] * (len(input_spec) - 1))
x = reshape_op(x, missing_shapes + [output_spec_shape[dim] for dim in input_spec])
if tmp_spec != output_spec:
x = transpose_op(x, transpose)
return x
def get_coords(kernel_indices, offsets, offset_mode: str, spec_shapes: dict):
"""
Returns float coordinates in (B, S, F, C, K) shape
kernel_indices in (S, K) shape
offsets in offset_mode (any combination of dimennsions)
"""
if offsets is None:
print('DDCC1D layer used without offsets')
return expand_transform(kernel_indices, 'SK', 'BSFCK', spec_shapes)
def _coords(_kernel_indices, _offsets):
return tf.clip_by_value(_kernel_indices + _offsets, 0, int(_kernel_indices.get_shape()[0]))
out_spec = 'BSFCK'
kernel_spec = 'SK'
if offset_mode == kernel_spec or offset_mode == 'K':
return expand_transform(_coords(kernel_indices, offsets), kernel_spec, out_spec, spec_shapes)
if offset_mode == 'S':
offsets = expand_transform(offsets, offset_mode, kernel_spec, spec_shapes)
return expand_transform(_coords(kernel_indices, offsets), kernel_spec, out_spec, spec_shapes)
if offset_mode == 'BSK':
indices = expand_transform(kernel_indices, kernel_spec, offset_mode, spec_shapes)
return expand_transform(_coords(indices, offsets), offset_mode, out_spec, spec_shapes)
if offset_mode == 'SFK':
indices = expand_transform(kernel_indices, kernel_spec, offset_mode, spec_shapes)
return expand_transform(_coords(indices, offsets), offset_mode, out_spec, spec_shapes)
# Naive (most inefficient) method
indices = expand_transform(kernel_indices, 'SK', out_spec, spec_shapes)
offsets = expand_transform(offsets, offset_mode, out_spec, spec_shapes)
return _coords(indices, offsets)
def ddcconv1d(inputs: tf.Variable,
weights: tf.Variable,
offsets: tf.Variable,
dilation_rate: int = 1,
offset_mode='F',
interpolate=True,
name: str='ddcc1d'):
"""
Deformable Dilated Causal Convolution 1D
Shape dimensions notation:
B - batch size
S - sequence len
C - input channels
K - kernel size
F - filters
:param name: name of the layer
:param inputs: Input tensor of shape (B, S, C)
:param weights: Tensor of shape (F, C, K)
:param offsets: Tensof of shape (F)
:param dilation_rate: Size of receptive field gap
:param offset_mode: offset mode any combination of dimensions, like
F - one offset per filter,
FK - offset per filter and each kernel weight
BSC - offset per every timestep of each sample
:param interpolate: Use linear interpolation or just convert indices to int32
:return: Computed 1D convolutions of shape (B, S, F)
"""
with tf.variable_scope(name):
batch_size, seq_length, channels = (int(v) for v in inputs.shape)
filters, _, kernel_size = (int(v) for v in weights.shape)
spec_shapes = {
'B': batch_size,
'S': seq_length,
'F': filters,
'C': channels,
'K': kernel_size
}
# Indices stuff
with tf.variable_scope('KernelBaseIndices'):
base_indices = np.arange(seq_length).repeat(kernel_size).reshape((-1, kernel_size))
window_indices = tf.constant(base_indices, dtype=tf.float32, name='window_indices')
receptive_field = tf.constant(np.linspace(-kernel_size + 1, 0, kernel_size) * dilation_rate,
name='receptive_field',
dtype=tf.float32)
kernel_indices = window_indices + receptive_field
with tf.variable_scope('BatchIndices'):
# Create batch indices constant in BSFCK shape
batch_indices_np = expand_transform(np.arange(batch_size, dtype=np.int32), 'B', 'BSFCK', spec_shapes, numpy=True)
batch_indices = tf.constant(batch_indices_np, dtype=tf.int32, name='batch_indices')
with tf.variable_scope('ChannelIndices'):
# Create channel indices constant in BSFCK shape
channel_indices_np = expand_transform(np.arange(channels, dtype=np.int32), 'C', 'BSFCK', spec_shapes, numpy=True)
channel_indices = tf.constant(channel_indices_np, dtype=tf.int32, name='channel_indices')
with tf.variable_scope('Sampling'):
# SAMPLING IS EXTREMELY EXPENSIVE!!!!!
coords = get_coords(kernel_indices, offsets, offset_mode=offset_mode, spec_shapes=spec_shapes)
if interpolate:
# Left and right indices, e.g. index of 3.65 would be 3 on the left and 4 on the right
indices_left = tf.cast(tf.floor(coords), tf.int32)
indices_right = tf.cast(tf.ceil(coords), tf.int32)
# Calculate interpolation, for index 3.65 interpolation factor would be 0.65
interpolation = coords - tf.cast(indices_left, tf.float32)
# Sample both values (on the lef and right)
# Sample input of shape BSC with BSFCK3 indices (produced by stack) -> BSFCK for each side (left and right)
vals_left = tf.gather_nd(inputs, tf.stack((batch_indices, indices_left, channel_indices), axis=-1))
vals_right = tf.gather_nd(inputs, tf.stack((batch_indices, indices_right, channel_indices), axis=-1))
# Interpolated values
samples = vals_left + (vals_right - vals_left) * interpolation
else:
batch_idx = tf.stack((batch_indices, tf.cast(tf.floor(coords), tf.int32), channel_indices), axis=-1)
samples = tf.gather_nd(inputs, batch_idx)
with tf.variable_scope('Convolution'):
# Apply weights: BSFCK * FCK = BSFCK
conv = samples * weights
# Sum across kernel: BSFCK -> BSFC
conv = tf.reduce_sum(conv, axis=-1)
# Sum across channels: BSFC -> BSF
conv = tf.reduce_sum(conv, axis=-1)
return conv
def _transform_test():
x = np.arange(10)
spec_shape = {'B': len(x), 'T': 3, 'C': 2}
outputs = ['BTC', 'BCT', 'TBC', 'TCB', 'CBT', 'CTB']
for output_spec in outputs:
print(output_spec)
print(expand_transform(x, 'B', output_spec, spec_shape, numpy=True))
print('\n\n')
def _conv_test():
def print_var(v):
print("{0}: {1}:\n\t{2}".format(v.name, v.shape, v.eval()))
# shape: (batch_size, sequence_len, channels)
x_raw = np.array([
[[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]],
[[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]],
[[2, 2], [3, 3], [4, 4], [5, 5], [6, 6]],
])
# Filters: FCK shape
filter_weights_raw = np.array([
[[1], [0]]
])
# Filter offsets: F shape
filter_offsets_raw = np.ones(filter_weights_raw.shape[0]) * 0.5
x = tf.Variable(x_raw, name='x', dtype=tf.float32, trainable=False)
filter_weights = tf.Variable(filter_weights_raw, name='filter_weights', dtype=tf.float32)
filter_offsets = tf.Variable(filter_offsets_raw, name='filter_offsets', dtype=tf.float32)
y = ddcconv1d(x, weights=filter_weights, offsets=filter_offsets, offset_mode='F')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print_var(x)
print_var(filter_offsets)
print_var(filter_weights)
print_var(y)
def main():
#_transform_test()
_conv_test()
if __name__ == '__main__':
main() | 0.795142 | 0.629234 |
import netCDF4 as nc
import numpy as np
import os
def build_url(yyyy,mm,dd,hh):
return f'http://172.16.31.10:8080/opendap/opendap/wrf5/d03/archive/{yyyy}/{mm}/{dd}/wrf5_d03_{yyyy}{mm}{dd}Z{hh}00.nc'
def read_netcdf4_files(wrf,scan):
try:
model = nc.Dataset(wrf)
radar_scan = nc.Dataset(scan,'r')
except OSError:
print("File non trovato")
return None,None
return model,radar_scan
def regridding(model,radar_scan,output):
scan_lat = radar_scan['lat'][:,0]
scan_lon = radar_scan['lon'][0]
model_lat = model['latitude'][:]
model_lon = model['longitude'][:]
CLDFRA_TOTAL_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
DAILY_RAIN_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
DELTA_RAIN_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
DELTA_WDIR10_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
DELTA_WSPD10_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
GPH500_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
GPH850_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
HOURLY_SWE_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
MCAPE_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
RH2_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
RH300_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
RH500_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
RH700_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
RH850_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
RH950_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
SLP_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
T2C_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
TC500_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
TC850_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
U10M_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
U300_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
U500_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
U700_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
U850_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
U950_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
WDIR10_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
WSPD10_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
MODEL_CLDFRA_TOTAL = model['CLDFRA_TOTAL'][::]
MODEL_DAILY_RAIN = model['DAILY_RAIN'][::]
MODEL_DELTA_RAIN = model['DELTA_RAIN'][::]
MODEL_DELTA_WDIR10 = model['DELTA_WDIR10'][::]
MODEL_DELTA_WSPD10 = model['DELTA_WSPD10'][::]
MODEL_GPH500 = model['GPH500'][::]
MODEL_GPH850 = model['GPH850'][::]
MODEL_HOURLY_SWE = model['HOURLY_SWE'][::]
MODEL_MCAPE =model['MCAPE'][::]
MODEL_RH2 = model['RH2'][::]
MODEL_RH300 = model['RH300'][::]
MODEL_RH500 = model['RH500'][::]
MODEL_RH700 = model['RH700'][::]
MODEL_RH850 = model['RH850'][::]
MODEL_RH950 = model['RH950'][::]
MODEL_SLP = model['SLP'][::]
MODEL_T2C = model['T2C'][::]
MODEL_TC500 = model['TC500'][::]
MODEL_TC850 = model['TC850'][::]
MODEL_U10M = model['U10M'][::]
MODEL_U300 = model['U300'][::]
MODEL_U500 = model['U500'][::]
MODEL_U700 = model['U700'][::]
MODEL_U850 = model['U850'][::]
MODEL_U950 = model['U950'][::]
MODEL_WDIR10 = model['WDIR10'][::]
MODEL_WSPD10 = model['WSPD10'][::]
for i in range(len(scan_lat)):
for j in range(len(scan_lon)):
_lat = scan_lat[i]
_lon = scan_lon[j]
if _lat > model_lat[len(model_lat)-1] or _lat < model_lat[0]:
continue
if _lon < model_lon[0] or _lon > model_lon[len(model_lon)-1]:
continue
if radar_scan['reflectivity'][i,j] == -999 :
continue
opt_i = (np.abs(model_lat-_lat)).argmin()
opt_j = (np.abs(model_lon-_lon)).argmin()
CLDFRA_TOTAL_VALUES[i,j] = MODEL_CLDFRA_TOTAL[0,opt_i,opt_j]
DAILY_RAIN_VALUES[i,j] = MODEL_DAILY_RAIN[0,opt_i,opt_j]
DELTA_RAIN_VALUES[i,j] = MODEL_DELTA_RAIN[0,opt_i,opt_j]
DELTA_WDIR10_VALUES[i,j] = MODEL_DELTA_WDIR10[0,opt_i,opt_j]
DELTA_WSPD10_VALUES[i,j] = MODEL_DELTA_WSPD10[0,opt_i,opt_j]
GPH500_VALUES[i,j] = MODEL_GPH500[0,opt_i,opt_j]
GPH850_VALUES[i,j] = MODEL_GPH850[0,opt_i,opt_j]
HOURLY_SWE_VALUES[i,j] = MODEL_HOURLY_SWE[0,opt_i,opt_j]
MCAPE_VALUES[i,j] = MODEL_MCAPE[0,opt_i,opt_j]
RH2_VALUES[i,j] = MODEL_RH2[0,opt_i,opt_j]
RH300_VALUES[i,j] = MODEL_RH300[0,opt_i,opt_j]
RH500_VALUES[i,j] = MODEL_RH500[0,opt_i,opt_j]
RH700_VALUES[i,j] = MODEL_RH700[0,opt_i,opt_j]
RH850_VALUES[i,j] = MODEL_RH850[0,opt_i,opt_j]
RH950_VALUES[i,j] = MODEL_RH950[0,opt_i,opt_j]
SLP_VALUES[i,j] = MODEL_SLP[0,opt_i,opt_j]
T2C_VALUES[i,j] = MODEL_T2C[0,opt_i,opt_j]
TC500_VALUES[i,j] = MODEL_TC500[0,opt_i,opt_j]
TC850_VALUES[i,j] = MODEL_TC850[0,opt_i,opt_j]
U10M_VALUES[i,j] = MODEL_U10M[0,opt_i,opt_j]
U300_VALUES[i,j] = MODEL_U300[0,opt_i,opt_j]
U500_VALUES[i,j] = MODEL_U500[0,opt_i,opt_j]
U700_VALUES[i,j] = MODEL_U700[0,opt_i,opt_j]
U850_VALUES[i,j] = MODEL_U850[0,opt_i,opt_j]
U950_VALUES[i,j] = MODEL_U950[0,opt_i,opt_j]
WDIR10_VALUES[i,j] = MODEL_WDIR10[0,opt_i,opt_j]
WSPD10_VALUES[i,j] = MODEL_WSPD10[0,opt_i,opt_j]
aggregated_file = nc.Dataset(output, 'w', format='NETCDF4')
aggregated_file.createDimension('X' ,len(scan_lat))
aggregated_file.createDimension('Y', len(scan_lon))
lats = aggregated_file.createVariable('lat', 'f4', ('X','Y'))
lats.units = 'degree_north'
lats._CoordinateAxisType = 'Lat'
lons = aggregated_file.createVariable('lon', 'f4', ('X','Y'))
lons.units = 'degree_east'
lons._CoordinateAxisType = 'Lon'
CLDFRA_TOTAL = aggregated_file.createVariable('HGT', 'f4', ('X','Y'),fill_value=-999)
DAILY_RAIN = aggregated_file.createVariable('DAILY_RAIN', 'f4', ('X','Y'),fill_value=-999)
DELTA_RAIN = aggregated_file.createVariable('DELTA_RAIN', 'f4', ('X','Y'),fill_value=-999)
DELTA_WDIR10 = aggregated_file.createVariable('DELTA_WDIR10', 'f4', ('X','Y'),fill_value=-999)
DELTA_WSPD10 = aggregated_file.createVariable('DELTA_WSPD10', 'f4', ('X','Y'),fill_value=-999)
GPH500 = aggregated_file.createVariable('GPH500', 'f4', ('X','Y'),fill_value=-999)
GPH850 = aggregated_file.createVariable('GPH850', 'f4', ('X','Y'),fill_value=-999)
HOURLY_SWE = aggregated_file.createVariable('HOURLY_SWE', 'f4', ('X','Y'),fill_value=-999)
MCAPE = aggregated_file.createVariable('MCAPE', 'f4', ('X','Y'),fill_value=-999)
RH2 = aggregated_file.createVariable('RH2', 'f4', ('X','Y'),fill_value=-999)
RH300 = aggregated_file.createVariable('RH300', 'f4', ('X','Y'),fill_value=-999)
RH500 = aggregated_file.createVariable('RH500', 'f4', ('X','Y'),fill_value=-999)
RH700 = aggregated_file.createVariable('RH700', 'f4', ('X','Y'),fill_value=-999)
RH850 = aggregated_file.createVariable('RH850', 'f4', ('X','Y'),fill_value=-999)
RH950 = aggregated_file.createVariable('RH950', 'f4', ('X','Y'),fill_value=-999)
SLP = aggregated_file.createVariable('SLP', 'f4', ('X','Y'),fill_value=-999)
T2C = aggregated_file.createVariable('T2C', 'f4', ('X','Y'),fill_value=-999)
TC500 = aggregated_file.createVariable('TC500', 'f4', ('X','Y'),fill_value=-999)
TC850 = aggregated_file.createVariable('TC850', 'f4', ('X','Y'),fill_value=-999)
U10M = aggregated_file.createVariable('U10M', 'f4', ('X','Y'),fill_value=-999)
U300 = aggregated_file.createVariable('U300', 'f4', ('X','Y'),fill_value=-999)
U500 = aggregated_file.createVariable('U500', 'f4', ('X','Y'),fill_value=-999)
U700 = aggregated_file.createVariable('U700', 'f4', ('X','Y'),fill_value=-999)
U850 = aggregated_file.createVariable('U850', 'f4', ('X','Y'),fill_value=-999)
U950 = aggregated_file.createVariable('U950', 'f4', ('X','Y'),fill_value=-999)
WDIR10 = aggregated_file.createVariable('WDIR10', 'f4', ('X','Y'),fill_value=-999)
WSPD10 = aggregated_file.createVariable('WSPD10', 'f4', ('X','Y'),fill_value=-999)
RADAR_REFLECTIVITY = aggregated_file.createVariable('RADAR_REFLECTIVITY', 'f4', ('X','Y'),fill_value=-999)
RADAR_RAIN_RATE = aggregated_file.createVariable('RADAR_RAIN_RATE', 'f4', ('X','Y'),fill_value=-999)
lats[::] = radar_scan['lat'][::]
lons[::] = radar_scan['lon'][::]
CLDFRA_TOTAL[::] = CLDFRA_TOTAL_VALUES
DAILY_RAIN[::] = DAILY_RAIN_VALUES
DELTA_RAIN[::] = DELTA_RAIN_VALUES
DELTA_WDIR10[::] = DELTA_WDIR10_VALUES
DELTA_WSPD10[::] = DELTA_WSPD10_VALUES
GPH500[::] = GPH500_VALUES
GPH850[::] = GPH850_VALUES
HOURLY_SWE[::] = HOURLY_SWE_VALUES
MCAPE[::] = MCAPE_VALUES
RH2[::] = RH2_VALUES
RH300[::] = RH300_VALUES
RH500[::] = RH500_VALUES
RH700[::] = RH700_VALUES
RH850[::] = RH850_VALUES
RH950[::] = RH950_VALUES
T2C[::] = T2C_VALUES
TC500[::] = TC500_VALUES
TC850[::] = TC850_VALUES
U10M[::] = U10M_VALUES
U300[::] = U300_VALUES
U500[::] = U500_VALUES
U700[::] = U700_VALUES
U850[::] = U850_VALUES
U950[::] = U950_VALUES
WDIR10[::] = WDIR10_VALUES
WSPD10[::] = WSPD10_VALUES
RADAR_REFLECTIVITY[::] = radar_scan['reflectivity'][::]
RADAR_RAIN_RATE[::] = radar_scan['rain_rate'][::]
if __name__ == '__main__':
input_dir = 'STACKED'
output_dir = 'NETCDF4_DATASET'
if not os.path.exists(output_dir):
os.mkdir(output_dir)
days = os.listdir(input_dir)
#test_wrf = 'http://172.16.31.10:8080/opendap/opendap/wrf5/d03/archive/2020/06/01/wrf5_d03_20200601Z0000.nc'
#test_scan = 'STACKED/01/NA_AV_2020-06-01 00.nc'
#scan = 'NA_AV_2020-06-01 00.nc'
#model,radar_scan = read_netcdf4_files(test_wrf,test_scan)
#regridding(model,radar_scan,os.path.join(output_dir,scan))
for dd in days:
scans = os.listdir(os.path.join(input_dir,dd))
if scans:
yyyy = scans[0][6:10]
mm = scans[0][11:13]
for scan in scans:
hh = scan[17:19]
wrf_url_nc = build_url(yyyy,mm,dd,hh)
scan_path = os.path.join(input_dir,dd,scan)
print(f'Getting data for {scan}...',end='')
model,radar_scan = read_netcdf4_files(wrf_url_nc,scan_path)
regridding(model,radar_scan,os.path.join(output_dir,scan))
print('OK') | tools/wrf_regridding.py | import netCDF4 as nc
import numpy as np
import os
def build_url(yyyy,mm,dd,hh):
return f'http://172.16.31.10:8080/opendap/opendap/wrf5/d03/archive/{yyyy}/{mm}/{dd}/wrf5_d03_{yyyy}{mm}{dd}Z{hh}00.nc'
def read_netcdf4_files(wrf,scan):
try:
model = nc.Dataset(wrf)
radar_scan = nc.Dataset(scan,'r')
except OSError:
print("File non trovato")
return None,None
return model,radar_scan
def regridding(model,radar_scan,output):
scan_lat = radar_scan['lat'][:,0]
scan_lon = radar_scan['lon'][0]
model_lat = model['latitude'][:]
model_lon = model['longitude'][:]
CLDFRA_TOTAL_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
DAILY_RAIN_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
DELTA_RAIN_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
DELTA_WDIR10_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
DELTA_WSPD10_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
GPH500_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
GPH850_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
HOURLY_SWE_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
MCAPE_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
RH2_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
RH300_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
RH500_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
RH700_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
RH850_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
RH950_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
SLP_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
T2C_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
TC500_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
TC850_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
U10M_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
U300_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
U500_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
U700_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
U850_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
U950_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
WDIR10_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
WSPD10_VALUES = np.full([len(scan_lat),len(scan_lon)],-999,dtype=np.float32)
MODEL_CLDFRA_TOTAL = model['CLDFRA_TOTAL'][::]
MODEL_DAILY_RAIN = model['DAILY_RAIN'][::]
MODEL_DELTA_RAIN = model['DELTA_RAIN'][::]
MODEL_DELTA_WDIR10 = model['DELTA_WDIR10'][::]
MODEL_DELTA_WSPD10 = model['DELTA_WSPD10'][::]
MODEL_GPH500 = model['GPH500'][::]
MODEL_GPH850 = model['GPH850'][::]
MODEL_HOURLY_SWE = model['HOURLY_SWE'][::]
MODEL_MCAPE =model['MCAPE'][::]
MODEL_RH2 = model['RH2'][::]
MODEL_RH300 = model['RH300'][::]
MODEL_RH500 = model['RH500'][::]
MODEL_RH700 = model['RH700'][::]
MODEL_RH850 = model['RH850'][::]
MODEL_RH950 = model['RH950'][::]
MODEL_SLP = model['SLP'][::]
MODEL_T2C = model['T2C'][::]
MODEL_TC500 = model['TC500'][::]
MODEL_TC850 = model['TC850'][::]
MODEL_U10M = model['U10M'][::]
MODEL_U300 = model['U300'][::]
MODEL_U500 = model['U500'][::]
MODEL_U700 = model['U700'][::]
MODEL_U850 = model['U850'][::]
MODEL_U950 = model['U950'][::]
MODEL_WDIR10 = model['WDIR10'][::]
MODEL_WSPD10 = model['WSPD10'][::]
for i in range(len(scan_lat)):
for j in range(len(scan_lon)):
_lat = scan_lat[i]
_lon = scan_lon[j]
if _lat > model_lat[len(model_lat)-1] or _lat < model_lat[0]:
continue
if _lon < model_lon[0] or _lon > model_lon[len(model_lon)-1]:
continue
if radar_scan['reflectivity'][i,j] == -999 :
continue
opt_i = (np.abs(model_lat-_lat)).argmin()
opt_j = (np.abs(model_lon-_lon)).argmin()
CLDFRA_TOTAL_VALUES[i,j] = MODEL_CLDFRA_TOTAL[0,opt_i,opt_j]
DAILY_RAIN_VALUES[i,j] = MODEL_DAILY_RAIN[0,opt_i,opt_j]
DELTA_RAIN_VALUES[i,j] = MODEL_DELTA_RAIN[0,opt_i,opt_j]
DELTA_WDIR10_VALUES[i,j] = MODEL_DELTA_WDIR10[0,opt_i,opt_j]
DELTA_WSPD10_VALUES[i,j] = MODEL_DELTA_WSPD10[0,opt_i,opt_j]
GPH500_VALUES[i,j] = MODEL_GPH500[0,opt_i,opt_j]
GPH850_VALUES[i,j] = MODEL_GPH850[0,opt_i,opt_j]
HOURLY_SWE_VALUES[i,j] = MODEL_HOURLY_SWE[0,opt_i,opt_j]
MCAPE_VALUES[i,j] = MODEL_MCAPE[0,opt_i,opt_j]
RH2_VALUES[i,j] = MODEL_RH2[0,opt_i,opt_j]
RH300_VALUES[i,j] = MODEL_RH300[0,opt_i,opt_j]
RH500_VALUES[i,j] = MODEL_RH500[0,opt_i,opt_j]
RH700_VALUES[i,j] = MODEL_RH700[0,opt_i,opt_j]
RH850_VALUES[i,j] = MODEL_RH850[0,opt_i,opt_j]
RH950_VALUES[i,j] = MODEL_RH950[0,opt_i,opt_j]
SLP_VALUES[i,j] = MODEL_SLP[0,opt_i,opt_j]
T2C_VALUES[i,j] = MODEL_T2C[0,opt_i,opt_j]
TC500_VALUES[i,j] = MODEL_TC500[0,opt_i,opt_j]
TC850_VALUES[i,j] = MODEL_TC850[0,opt_i,opt_j]
U10M_VALUES[i,j] = MODEL_U10M[0,opt_i,opt_j]
U300_VALUES[i,j] = MODEL_U300[0,opt_i,opt_j]
U500_VALUES[i,j] = MODEL_U500[0,opt_i,opt_j]
U700_VALUES[i,j] = MODEL_U700[0,opt_i,opt_j]
U850_VALUES[i,j] = MODEL_U850[0,opt_i,opt_j]
U950_VALUES[i,j] = MODEL_U950[0,opt_i,opt_j]
WDIR10_VALUES[i,j] = MODEL_WDIR10[0,opt_i,opt_j]
WSPD10_VALUES[i,j] = MODEL_WSPD10[0,opt_i,opt_j]
aggregated_file = nc.Dataset(output, 'w', format='NETCDF4')
aggregated_file.createDimension('X' ,len(scan_lat))
aggregated_file.createDimension('Y', len(scan_lon))
lats = aggregated_file.createVariable('lat', 'f4', ('X','Y'))
lats.units = 'degree_north'
lats._CoordinateAxisType = 'Lat'
lons = aggregated_file.createVariable('lon', 'f4', ('X','Y'))
lons.units = 'degree_east'
lons._CoordinateAxisType = 'Lon'
CLDFRA_TOTAL = aggregated_file.createVariable('HGT', 'f4', ('X','Y'),fill_value=-999)
DAILY_RAIN = aggregated_file.createVariable('DAILY_RAIN', 'f4', ('X','Y'),fill_value=-999)
DELTA_RAIN = aggregated_file.createVariable('DELTA_RAIN', 'f4', ('X','Y'),fill_value=-999)
DELTA_WDIR10 = aggregated_file.createVariable('DELTA_WDIR10', 'f4', ('X','Y'),fill_value=-999)
DELTA_WSPD10 = aggregated_file.createVariable('DELTA_WSPD10', 'f4', ('X','Y'),fill_value=-999)
GPH500 = aggregated_file.createVariable('GPH500', 'f4', ('X','Y'),fill_value=-999)
GPH850 = aggregated_file.createVariable('GPH850', 'f4', ('X','Y'),fill_value=-999)
HOURLY_SWE = aggregated_file.createVariable('HOURLY_SWE', 'f4', ('X','Y'),fill_value=-999)
MCAPE = aggregated_file.createVariable('MCAPE', 'f4', ('X','Y'),fill_value=-999)
RH2 = aggregated_file.createVariable('RH2', 'f4', ('X','Y'),fill_value=-999)
RH300 = aggregated_file.createVariable('RH300', 'f4', ('X','Y'),fill_value=-999)
RH500 = aggregated_file.createVariable('RH500', 'f4', ('X','Y'),fill_value=-999)
RH700 = aggregated_file.createVariable('RH700', 'f4', ('X','Y'),fill_value=-999)
RH850 = aggregated_file.createVariable('RH850', 'f4', ('X','Y'),fill_value=-999)
RH950 = aggregated_file.createVariable('RH950', 'f4', ('X','Y'),fill_value=-999)
SLP = aggregated_file.createVariable('SLP', 'f4', ('X','Y'),fill_value=-999)
T2C = aggregated_file.createVariable('T2C', 'f4', ('X','Y'),fill_value=-999)
TC500 = aggregated_file.createVariable('TC500', 'f4', ('X','Y'),fill_value=-999)
TC850 = aggregated_file.createVariable('TC850', 'f4', ('X','Y'),fill_value=-999)
U10M = aggregated_file.createVariable('U10M', 'f4', ('X','Y'),fill_value=-999)
U300 = aggregated_file.createVariable('U300', 'f4', ('X','Y'),fill_value=-999)
U500 = aggregated_file.createVariable('U500', 'f4', ('X','Y'),fill_value=-999)
U700 = aggregated_file.createVariable('U700', 'f4', ('X','Y'),fill_value=-999)
U850 = aggregated_file.createVariable('U850', 'f4', ('X','Y'),fill_value=-999)
U950 = aggregated_file.createVariable('U950', 'f4', ('X','Y'),fill_value=-999)
WDIR10 = aggregated_file.createVariable('WDIR10', 'f4', ('X','Y'),fill_value=-999)
WSPD10 = aggregated_file.createVariable('WSPD10', 'f4', ('X','Y'),fill_value=-999)
RADAR_REFLECTIVITY = aggregated_file.createVariable('RADAR_REFLECTIVITY', 'f4', ('X','Y'),fill_value=-999)
RADAR_RAIN_RATE = aggregated_file.createVariable('RADAR_RAIN_RATE', 'f4', ('X','Y'),fill_value=-999)
lats[::] = radar_scan['lat'][::]
lons[::] = radar_scan['lon'][::]
CLDFRA_TOTAL[::] = CLDFRA_TOTAL_VALUES
DAILY_RAIN[::] = DAILY_RAIN_VALUES
DELTA_RAIN[::] = DELTA_RAIN_VALUES
DELTA_WDIR10[::] = DELTA_WDIR10_VALUES
DELTA_WSPD10[::] = DELTA_WSPD10_VALUES
GPH500[::] = GPH500_VALUES
GPH850[::] = GPH850_VALUES
HOURLY_SWE[::] = HOURLY_SWE_VALUES
MCAPE[::] = MCAPE_VALUES
RH2[::] = RH2_VALUES
RH300[::] = RH300_VALUES
RH500[::] = RH500_VALUES
RH700[::] = RH700_VALUES
RH850[::] = RH850_VALUES
RH950[::] = RH950_VALUES
T2C[::] = T2C_VALUES
TC500[::] = TC500_VALUES
TC850[::] = TC850_VALUES
U10M[::] = U10M_VALUES
U300[::] = U300_VALUES
U500[::] = U500_VALUES
U700[::] = U700_VALUES
U850[::] = U850_VALUES
U950[::] = U950_VALUES
WDIR10[::] = WDIR10_VALUES
WSPD10[::] = WSPD10_VALUES
RADAR_REFLECTIVITY[::] = radar_scan['reflectivity'][::]
RADAR_RAIN_RATE[::] = radar_scan['rain_rate'][::]
if __name__ == '__main__':
input_dir = 'STACKED'
output_dir = 'NETCDF4_DATASET'
if not os.path.exists(output_dir):
os.mkdir(output_dir)
days = os.listdir(input_dir)
#test_wrf = 'http://172.16.31.10:8080/opendap/opendap/wrf5/d03/archive/2020/06/01/wrf5_d03_20200601Z0000.nc'
#test_scan = 'STACKED/01/NA_AV_2020-06-01 00.nc'
#scan = 'NA_AV_2020-06-01 00.nc'
#model,radar_scan = read_netcdf4_files(test_wrf,test_scan)
#regridding(model,radar_scan,os.path.join(output_dir,scan))
for dd in days:
scans = os.listdir(os.path.join(input_dir,dd))
if scans:
yyyy = scans[0][6:10]
mm = scans[0][11:13]
for scan in scans:
hh = scan[17:19]
wrf_url_nc = build_url(yyyy,mm,dd,hh)
scan_path = os.path.join(input_dir,dd,scan)
print(f'Getting data for {scan}...',end='')
model,radar_scan = read_netcdf4_files(wrf_url_nc,scan_path)
regridding(model,radar_scan,os.path.join(output_dir,scan))
print('OK') | 0.153676 | 0.214177 |
import os
import cv2
from tqdm import tqdm
def crop(img_path, size=(720, 720), alignment='center'):
"""
helper function to crop an image to the given size
:param img:
:param size: (width, height)
:param center:
:return:
"""
img = cv2.imread(img_path)
if img is None:
raise ValueError("img is None! Path: {}".format(img_path))
h, w, c = img.shape
# crop height
if h > size[1]:
# compute difference
diff_h = h - size[1]
if alignment == 'center':
half_diff = int(diff_h / 2)
img = img[half_diff:size[1] + half_diff]
elif alignment == 'left':
img = img[:size[1]]
elif alignment == 'right':
start = h - size[1]
img = img[start:]
else:
raise NotImplementedError("condition {} is not implemented".format(alignment))
# crop width
if w > size[0]:
# compute difference
diff_w = w - size[0]
if alignment == 'center':
half_diff = int(diff_w / 2)
img = img[:, half_diff:size[0] + half_diff, :]
elif alignment == 'left':
img = img[:, size[0], :]
elif alignment == 'right':
start = w - size[0]
img = img[:, start:, :]
else:
raise NotImplementedError("condition {} is not implemented".format(alignment))
return img
def get_list_of_files(dirName):
"""
helper function to get all the files within a folder and its subdirectories
:param dirName:
:return:
"""
# create a list of file and sub directories
# names in the given directory
file_list = os.listdir(dirName)
all_files = list()
# Iterate over all the entries
for entry in file_list:
if 'csv' in entry or entry[0] == '.':
print("removed:", entry)
else:
# Create full path
full_path = os.path.join(dirName, entry)
# If entry is a directory then get the list of files in this directory
if os.path.isdir(full_path):
all_files = all_files + get_list_of_files(full_path)
else:
if 'jpeg' or 'jpg' in full_path:
all_files.append(full_path)
return all_files
def crop_folder(path, size=(720, 720), alignment='center'):
"""
crop all images within the path given to the dimensions given and alignment
- center: will crop around the center
- left: will crop from the left/top border
- right: will crop from the right/bottom border
:param path:
:return:
"""
# get all images
img_list = get_list_of_files(path)
# replace \\ to /
img_list = [path.replace('\\', '/') for path in img_list]
print("Number of images found:", len(img_list))
# crop all images
for im_name in tqdm(img_list):
cropped_img = crop(im_name, size, alignment)
cv2.imwrite(im_name, cropped_img)
if __name__ == "__main__":
"""
Script to crop all images within a folder.
We use it to crop the monkey avatars from 1280x720 to 720x720 so all morphing space images have the same size
usage: python -m datasets_utils.crop_image_of_dataset
"""
path = 'D:/Dataset/MorphingSpace'
# path = '/Users/michaelstettler/PycharmProjects/BVS/data/MorphingSpace/monkey_orig/MonkeyAvatar_Anger_1.0_Fear_0.0_Monkey_1.0_Human_0.0_new_ears'
# path = 'D:/Maya projects/Mery_v3.5_for_2015/images/HumanExpressions/01_Basis_expressions'
crop_folder(path) | datasets_utils/crop_image_of_dataset.py | import os
import cv2
from tqdm import tqdm
def crop(img_path, size=(720, 720), alignment='center'):
"""
helper function to crop an image to the given size
:param img:
:param size: (width, height)
:param center:
:return:
"""
img = cv2.imread(img_path)
if img is None:
raise ValueError("img is None! Path: {}".format(img_path))
h, w, c = img.shape
# crop height
if h > size[1]:
# compute difference
diff_h = h - size[1]
if alignment == 'center':
half_diff = int(diff_h / 2)
img = img[half_diff:size[1] + half_diff]
elif alignment == 'left':
img = img[:size[1]]
elif alignment == 'right':
start = h - size[1]
img = img[start:]
else:
raise NotImplementedError("condition {} is not implemented".format(alignment))
# crop width
if w > size[0]:
# compute difference
diff_w = w - size[0]
if alignment == 'center':
half_diff = int(diff_w / 2)
img = img[:, half_diff:size[0] + half_diff, :]
elif alignment == 'left':
img = img[:, size[0], :]
elif alignment == 'right':
start = w - size[0]
img = img[:, start:, :]
else:
raise NotImplementedError("condition {} is not implemented".format(alignment))
return img
def get_list_of_files(dirName):
"""
helper function to get all the files within a folder and its subdirectories
:param dirName:
:return:
"""
# create a list of file and sub directories
# names in the given directory
file_list = os.listdir(dirName)
all_files = list()
# Iterate over all the entries
for entry in file_list:
if 'csv' in entry or entry[0] == '.':
print("removed:", entry)
else:
# Create full path
full_path = os.path.join(dirName, entry)
# If entry is a directory then get the list of files in this directory
if os.path.isdir(full_path):
all_files = all_files + get_list_of_files(full_path)
else:
if 'jpeg' or 'jpg' in full_path:
all_files.append(full_path)
return all_files
def crop_folder(path, size=(720, 720), alignment='center'):
"""
crop all images within the path given to the dimensions given and alignment
- center: will crop around the center
- left: will crop from the left/top border
- right: will crop from the right/bottom border
:param path:
:return:
"""
# get all images
img_list = get_list_of_files(path)
# replace \\ to /
img_list = [path.replace('\\', '/') for path in img_list]
print("Number of images found:", len(img_list))
# crop all images
for im_name in tqdm(img_list):
cropped_img = crop(im_name, size, alignment)
cv2.imwrite(im_name, cropped_img)
if __name__ == "__main__":
"""
Script to crop all images within a folder.
We use it to crop the monkey avatars from 1280x720 to 720x720 so all morphing space images have the same size
usage: python -m datasets_utils.crop_image_of_dataset
"""
path = 'D:/Dataset/MorphingSpace'
# path = '/Users/michaelstettler/PycharmProjects/BVS/data/MorphingSpace/monkey_orig/MonkeyAvatar_Anger_1.0_Fear_0.0_Monkey_1.0_Human_0.0_new_ears'
# path = 'D:/Maya projects/Mery_v3.5_for_2015/images/HumanExpressions/01_Basis_expressions'
crop_folder(path) | 0.470737 | 0.311833 |
# Node class
class Node:
# Function to initialise the node object
def __init__(self, data):
self.data = data # Assign data
self.next = None # Initialize next as null
self.prev = None # Initialize prev as null
# Stack class contains a Node object
class Stack:
# Function to initialize head
def __init__(self):
self.head = None
# Function to add an element data in the stack
def push(self, data):
if self.head is None:
self.head = Node(data)
else:
new_node = Node(data)
self.head.prev = new_node
new_node.next = self.head
new_node.prev = None
self.head = new_node
# Function to pop top element and return the element from the stack
def pop(self):
if self.head is None:
return None
else:
temp = self.head.data
self.head = self.head.next
self.head.prev = None
return temp
# Function to return top element in the stack
def top(self):
return self.head.data
# Function to return the size of the stack
def size(self):
temp = self.head
count = 0
while temp is not None:
count = count + 1
temp = temp.next
return count
# Function to check if the stack is empty or not
def isEmpty(self):
if self.head is None:
return True
else:
return False
# Function to print the stack
def printstack(self):
print("stack elements are:")
temp = self.head
while temp is not None:
print(temp.data, end ="->")
temp = temp.next
# Code execution starts here
if __name__=='__main__':
# Start with the empty stack
stack = Stack()
# Insert 4 at the beginning. So stack becomes 4->None
print("Stack operations using Doubly LinkedList")
stack.push(4)
# Insert 5 at the beginning. So stack becomes 4->5->None
stack.push(5)
# Insert 6 at the beginning. So stack becomes 4->5->6->None
stack.push(6)
# Insert 7 at the beginning. So stack becomes 4->5->6->7->None
stack.push(7)
# Print the stack
stack.printstack()
# Print the top element
print("\nTop element is ", stack.top())
# Print the stack size
print("Size of the stack is ", stack.size())
# pop the top element
stack.pop()
# pop the top element
stack.pop()
# two elements are popped
# Print the stack
stack.printstack()
# Print True if the stack is empty else False
print("\nstack is empty:", stack.isEmpty()) | Profiles/stack_using_dll.py |
# Node class
class Node:
# Function to initialise the node object
def __init__(self, data):
self.data = data # Assign data
self.next = None # Initialize next as null
self.prev = None # Initialize prev as null
# Stack class contains a Node object
class Stack:
# Function to initialize head
def __init__(self):
self.head = None
# Function to add an element data in the stack
def push(self, data):
if self.head is None:
self.head = Node(data)
else:
new_node = Node(data)
self.head.prev = new_node
new_node.next = self.head
new_node.prev = None
self.head = new_node
# Function to pop top element and return the element from the stack
def pop(self):
if self.head is None:
return None
else:
temp = self.head.data
self.head = self.head.next
self.head.prev = None
return temp
# Function to return top element in the stack
def top(self):
return self.head.data
# Function to return the size of the stack
def size(self):
temp = self.head
count = 0
while temp is not None:
count = count + 1
temp = temp.next
return count
# Function to check if the stack is empty or not
def isEmpty(self):
if self.head is None:
return True
else:
return False
# Function to print the stack
def printstack(self):
print("stack elements are:")
temp = self.head
while temp is not None:
print(temp.data, end ="->")
temp = temp.next
# Code execution starts here
if __name__=='__main__':
# Start with the empty stack
stack = Stack()
# Insert 4 at the beginning. So stack becomes 4->None
print("Stack operations using Doubly LinkedList")
stack.push(4)
# Insert 5 at the beginning. So stack becomes 4->5->None
stack.push(5)
# Insert 6 at the beginning. So stack becomes 4->5->6->None
stack.push(6)
# Insert 7 at the beginning. So stack becomes 4->5->6->7->None
stack.push(7)
# Print the stack
stack.printstack()
# Print the top element
print("\nTop element is ", stack.top())
# Print the stack size
print("Size of the stack is ", stack.size())
# pop the top element
stack.pop()
# pop the top element
stack.pop()
# two elements are popped
# Print the stack
stack.printstack()
# Print True if the stack is empty else False
print("\nstack is empty:", stack.isEmpty()) | 0.354321 | 0.157396 |
"""File for base geometry class built using the Geomdl class"""
import numpy as np
class Geometry2D:
'''
Base class for 2D domains
Input: geomData - dictionary containing the geomety information
Keys: degree_u, degree_v: polynomial degree in the u and v directions
ctrlpts_size_u, ctrlpts_size_v: number of control points in u,v directions
ctrlpts: weighted control points (in a list with
ctrlpts_size_u*ctrlpts_size_v rows and 3 columns for x,y,z coordinates)
weights: correspond weights (list with ctrlpts_size_u*ctrlpts_size_v entries)
knotvector_u, knotvector_v: knot vectors in the u and v directions
'''
def __init__(self, geomData):
self.degree_u = geomData['degree_u']
self.degree_v = geomData['degree_v']
self.ctrlpts_size_u = geomData['ctrlpts_size_u']
self.ctrlpts_size_v = geomData['ctrlpts_size_v']
self.ctrlpts = self.getUnweightedCpts(geomData['ctrlpts'], geomData['weights'])
self.weights = geomData['weights']
self.knotvector_u = geomData['knotvector_u']
self.knotvector_v = geomData['knotvector_v']
def getUnweightedCpts(self, ctrlpts, weights):
numCtrlPts = np.shape(ctrlpts)[0]
PctrlPts = np.zeros_like(ctrlpts)
for i in range(2):
for j in range(numCtrlPts):
PctrlPts[j,i]=ctrlpts[j][i]/weights[j]
# PctrlPts = PctrlPts.tolist()
return PctrlPts
def mapPoints(self, uPar, vPar):
'''
Map points from the parameter domain [0,1]x[0,1] to the quadrilater domain
Input: uPar - array containing the u-coordinates in the parameter space
vPar - array containing the v-coordinates in the parameter space
Note: the arrays uPar and vPar must be of the same size
Output: xPhys - array containing the x-coordinates in the physical space
yPhys - array containing the y-coordinates in the physical space
'''
gpParamUV = np.array([uPar, vPar])
evalList = tuple(map(tuple, gpParamUV.transpose()))
res = np.array(self.surf.evaluate_list(evalList))
return res
def bezierExtraction(self, knot, deg):
'''
Bezier extraction
Based on Algorithm 1, from Borden - Isogeometric finite element data
structures based on Bezier extraction
'''
m = len(knot)-deg-1
a = deg + 1
b = a + 1
# Initialize C with the number of non-zero knotspans in the 3rd dimension
nb_final = len(np.unique(knot))-1
C = np.zeros((deg+1,deg+1,nb_final))
nb = 1
C[:,:,0] = np.eye(deg + 1)
while b <= m:
C[:,:,nb] = np.eye(deg + 1)
i = b
while (b <= m) and (knot[b] == knot[b-1]):
b = b+1
multiplicity = b-i+1
alphas = np.zeros(deg-multiplicity)
if (multiplicity < deg):
numerator = knot[b-1] - knot[a-1]
for j in range(deg,multiplicity,-1):
alphas[j-multiplicity-1] = numerator/(knot[a+j-1]-knot[a-1])
r = deg - multiplicity
for j in range(1,r+1):
save = r-j+1
s = multiplicity + j
for k in range(deg+1,s,-1):
alpha = alphas[k-s-1]
C[:,k-1,nb-1] = alpha*C[:,k-1,nb-1] + (1-alpha)*C[:,k-2,nb-1]
if b <= m:
C[save-1:save+j,save-1,nb] = C[deg-j:deg+1,deg,nb-1]
nb=nb+1
if b <= m:
a=b
b=b+1
elif multiplicity==deg:
if b <= m:
nb = nb + 1
a = b
b = b + 1
assert(nb==nb_final)
return C, nb
def computeC(self):
knotU = self.knotvector_u
knotV = self.knotvector_v
degU = self.degree_u
degV = self.degree_v
C_u, nb = self.bezierExtraction(knotU, degU)
C_v, nb = self.bezierExtraction(knotV, degV)
numElemU = len(np.unique(knotU)) - 1
numElemV = len(np.unique(knotV)) - 1
basisU = len(knotU) - degU - 1
nument = (degU+1)*(degV+1)
elemInfo = dict()
elemInfo['vertex'] = []
elemInfo['nodes'] = []
elemInfo['C'] = []
for j in range (0, len(knotV)-1):
for i in range (0, len(knotU)-1):
if ((knotU[i+1] > knotU[i]) and (knotV[j+1] > knotV[j])):
vertices = np.array([knotU[i], knotV[j], knotU[i+1], knotV[j+1]])
elemInfo['vertex'].append(vertices)
currow = np.array([np.zeros(nument)])
tcount = 0
for t2 in range(j+1-degV,j+2):
for t1 in range(i+1-degU,i+2):
currow[0,tcount] = t1 + (t2-1)*basisU
tcount = tcount + 1
elemInfo['nodes'].append(currow)
for j in range (0, numElemV):
for i in range (0, numElemU):
cElem = np.kron(C_v[:,:,j],C_u[:,:,i])
elemInfo['C'].append(cElem)
return elemInfo
def bernsteinBasis(self,xi, deg):
'''
Algorithm A1.3 in Piegl & Tiller
xi is a 1D array
'''
B = np.zeros((len(xi),deg+1))
B[:,0] = 1.0
u1 = 1-xi
u2 = 1+xi
for j in range(1,deg+1):
saved = 0.0
for k in range(0,j):
temp = B[:,k].copy()
B[:,k] = saved + u1*temp
saved = u2*temp
B[:,j] = saved
B = B/np.power(2,deg)
dB = np.zeros((len(xi),deg))
dB[:,0] = 1.0
for j in range(1,deg):
saved = 0.0
for k in range(0,j):
temp = dB[:,k].copy()
dB[:,k] = saved + u1*temp
saved = u2*temp
dB[:,j] = saved
dB = dB/np.power(2,deg)
dB0 = np.transpose(np.array([np.zeros(len(xi))]))
dB = np.concatenate((dB0, dB, dB0), axis=1)
dB = (dB[:,0:-1] - dB[:,1:])*deg
return B, dB
def findspan(self, uCoord, vCoord):
'''Generates the element number on which the co-ordinate is located'''
knotU = self.knotvector_u
knotV = self.knotvector_v
counter = 0
for j in range (0, len(knotV)-1):
for i in range (0, len(knotU)-1):
if ((knotU[i+1] > knotU[i]) and (knotV[j+1] > knotV[j])):
if ((uCoord > knotU[i]) and (uCoord < knotU[i+1]) and (vCoord > knotV[j]) and (vCoord < knotV[j+1])):
elmtNum = counter
break
counter = counter + 1
return elmtNum
def getDerivatives(self, uCoord, vCoord, elmtNo):
'''
Generate physical points and jacobians for parameter points inside the domain
Assume there is one element in the parameter space
Input: uCoord, vCoord: Inputs the co-odinates of the Gauss points in the parameter space.
Output: xPhys, yPhys, ptJac - Generates the co-ordinates in the physical space and the jacobian
'''
curVertex = self.vertex[elmtNo]
cElem = self.C[elmtNo]
curNodes = np.int32(self.nodes[elmtNo])-1 # Python indexing starts from 0
curPts = np.squeeze(self.ctrlpts[curNodes,0:2])
wgts = np.transpose(np.array([np.squeeze(self.weights[curNodes,0:1])]))
# Get the Gauss points on the reference interval [-1,1]
uMax = curVertex[2]
uMin = curVertex[0]
vMax = curVertex[3]
vMin = curVertex[1]
uHatCoord = (2*uCoord - (uMax+uMin))/(uMax-uMin)
vHatCoord = (2*vCoord - (vMax+vMin))/(vMax-vMin)
degU = self.degree_u
degV = self.degree_v
B_u, dB_u = self.bernsteinBasis(uHatCoord,degU)
B_v, dB_v = self.bernsteinBasis(vHatCoord,degV)
numGauss = len(uCoord)
B_u, dB_u = self.bernsteinBasis(uHatCoord,degU)
B_v, dB_v = self.bernsteinBasis(vHatCoord,degV)
# Computing the Bernstein polynomials in 2D
dBdu = np.zeros((numGauss, numGauss, (degU+1)*(degV+1)))
dBdv = np.zeros((numGauss, numGauss, (degU+1)*(degV+1)))
R = np.zeros((numGauss, numGauss, (degU+1)*(degV+1)))
counter = 0
for j in range(0,degV+1):
for i in range(0,degU+1):
R[:,:,counter] = np.outer(B_u[:,i], B_v[:,j])
dBdu[:,:,counter] = np.outer(dB_u[:,i],B_v[:,j])
dBdv[:,:,counter] = np.outer(B_u[:,i],dB_v[:,j])
counter = counter + 1
quadPts = np.zeros((3))
# Map the points to the physical space
for jPt in range(0,numGauss):
for iPt in range(0,numGauss):
dRdx = np.matmul(cElem,np.transpose(np.array([dBdu[iPt,jPt,:]])))*2/(uMax-uMin)
dRdy = np.matmul(cElem,np.transpose(np.array([dBdv[iPt,jPt,:]])))*2/(vMax-vMin)
RR = np.matmul(cElem,np.transpose(np.array([R[iPt,jPt,:]])))
RR = RR*wgts
dRdx = dRdx*wgts
dRdy = dRdy*wgts
w_sum = np.sum(RR, axis=0)
dw_xi = np.sum(dRdx, axis=0)
dw_eta = np.sum(dRdy, axis=0)
dRdx = dRdx/w_sum - RR*dw_xi/np.power(w_sum,2)
dRdy = dRdy/w_sum - RR*dw_eta/np.power(w_sum,2)
RR = RR/w_sum;
dR = np.concatenate((dRdx.T,dRdy.T),axis=0)
dxdxi = np.matmul(dR,curPts)
coord = np.matmul(np.array([R[iPt,jPt,:]]),curPts)
detJac = np.absolute(np.linalg.det(dxdxi))
quadPts[0] = coord[0,0]
quadPts[1] = coord[0,1]
quadPts[2] = detJac
xPhys = quadPts[0]
yPhys = quadPts[1]
ptJac = quadPts[2]
return xPhys, yPhys, ptJac
def genElemList(self, numElemU, numElemV):
'''
Generate the element (vertex) list for an initial (uniform)
subdivision mesh
Input: numElemU, numElemV - number of subdivisions in the u and v
directions in the parameter space
Output: vertex - arrays containing the element vertices + initial level (=0)
'''
vertex = np.zeros((numElemU*numElemV, 5))
#generate the knots on the interval [0,1]
uEdge = np.linspace(0, 1, numElemU+1)
vEdge = np.linspace(0, 1, numElemV+1)
uPar, vPar = np.meshgrid(uEdge, vEdge)
counterElem = 0
initalLevel = 0
# Generate points for each element
for iV in range(numElemV):
for iU in range(numElemU):
uMin = uPar[iV, iU]
uMax = uPar[iV, iU+1]
vMin = vPar[iV, iU]
vMax = vPar[iV+1, iU]
vertex[counterElem, 0] = uMin
vertex[counterElem, 1] = vMin
vertex[counterElem, 2] = uMax
vertex[counterElem, 3] = vMax
vertex[counterElem, 4] = initalLevel
counterElem = counterElem + 1
return vertex
def getElemIntPts(self, elemList, numGauss):
'''
Generate quadrature points inside the domain
Input: elemList - contains the vertices of the elements the refined elements
numGauss - number of Gauss quadrature points for each subdivision
Output: xPhys, yPhys, wgtPhy - arrays containing the x and y coordinates
of the points and the corresponding weights
'''
# Allocate quadPts array
quadPts = np.zeros((elemList.shape[0]*numGauss**2, 3))
# Get the Gauss points on the reference interval [-1,1]
gp, gw = np.polynomial.legendre.leggauss(numGauss)
# Get the Gauss weights on the reference element [-1, 1]x[-1,1]
gpWeightU, gpWeightV = np.meshgrid(gw, gw)
gpWeightUV = np.array(gpWeightU.flatten()*gpWeightV.flatten())
elemInfo = self.computeC()
self.C = elemInfo['C']
self.nodes = elemInfo['nodes']
self.vertex = elemInfo['vertex']
# Generate points for each element
indexPt = 0
for iElem in range(elemList.shape[0]):
uMin = elemList[iElem,0]
uMax = elemList[iElem,2]
vMin = elemList[iElem,1]
vMax = elemList[iElem,3]
gpParamU = (uMax-uMin)/2*gp+(uMax+uMin)/2
gpParamV = (vMax-vMin)/2*gp+(vMax+vMin)/2
gpParamUg, gpParamVg = np.meshgrid(gpParamU, gpParamV)
gpParamUV = np.array([gpParamUg.flatten(), gpParamVg.flatten()])
# Jacobian of the transformation from the reference element [-1,1]
scaleFac = (uMax-uMin)*(vMax-vMin)/4
# Map the points to the physical space
for iPt in range(numGauss**2):
curPtU = np.array([gpParamUV[0, iPt]])
curPtV = np.array([gpParamUV[1, iPt]])
elmtNo = self.findspan(curPtU, curPtV)
physPtX, physPtY, ptJac = self.getDerivatives(curPtU, curPtV, elmtNo)
quadPts[indexPt, 0] = physPtX
quadPts[indexPt, 1] = physPtY
quadPts[indexPt, 2] = scaleFac * ptJac * gpWeightUV[iPt]
indexPt = indexPt + 1
xPhys = quadPts[:, 0:1]
yPhys = quadPts[:, 1:2]
wgtPhys = quadPts[:, 2:3]
return xPhys, yPhys, wgtPhys
def refineElemVertex2D(vertex, refList):
# Refines the elements in vertex with indices given by refList by splitting
# each element into 4 subdivisions
# Input: vertex - array of vertices in format [umin, vmin, umax, vmax]
# refList - list of element indices to be refined
# Output: newVertex - refined list of vertices
numRef = len(refList)
newVertex = np.zeros((4*numRef,5))
for i in range(numRef):
elemIndex = refList[i]
uMin = vertex[elemIndex, 0]
vMin = vertex[elemIndex, 1]
uMax = vertex[elemIndex, 2]
vMax = vertex[elemIndex, 3]
level = vertex[elemIndex, 4]
uMid = (uMin+uMax)/2
vMid = (vMin+vMax)/2
newVertex[4*i, :] = [uMin, vMin, uMid, vMid, level+1]
newVertex[4*i+1, :] = [uMid, vMin, uMax, vMid, level+1]
newVertex[4*i+2, :] = [uMin, vMid, uMid, vMax, level+1]
newVertex[4*i+3, :] = [uMid, vMid, uMax, vMax, level+1]
vertex = np.delete(vertex, refList, axis=0)
newVertex = np.concatenate((vertex, newVertex),axis=0)
return newVertex
def refineElemRegionY2D(vertex, refYmin, refYmax):
# Refines the region bounded by refYmin < y < refYmax
# Input: vertex - array of vertices in format [umin, vmin, umax, vmax]
# refYmin - lower bound of the refinement region
# refYmax - upper bound of the refinement region
# Output: newVertex - new list of vertices
tol = 1e-4 #tolerance for equality
refYmax = refYmax+tol
refYmin = refYmin-tol
index_ref = []
for iVertex in range(0,vertex.shape[0]):
if (vertex[iVertex,1] >= refYmin) and (vertex[iVertex,3] <= refYmax):
index_ref.append(iVertex)
newVertex = refineElemVertex2D(vertex, index_ref)
return newVertex | tf1/tensorflow_DEM/Phase Field/utils/BezExtr.py | """File for base geometry class built using the Geomdl class"""
import numpy as np
class Geometry2D:
'''
Base class for 2D domains
Input: geomData - dictionary containing the geomety information
Keys: degree_u, degree_v: polynomial degree in the u and v directions
ctrlpts_size_u, ctrlpts_size_v: number of control points in u,v directions
ctrlpts: weighted control points (in a list with
ctrlpts_size_u*ctrlpts_size_v rows and 3 columns for x,y,z coordinates)
weights: correspond weights (list with ctrlpts_size_u*ctrlpts_size_v entries)
knotvector_u, knotvector_v: knot vectors in the u and v directions
'''
def __init__(self, geomData):
self.degree_u = geomData['degree_u']
self.degree_v = geomData['degree_v']
self.ctrlpts_size_u = geomData['ctrlpts_size_u']
self.ctrlpts_size_v = geomData['ctrlpts_size_v']
self.ctrlpts = self.getUnweightedCpts(geomData['ctrlpts'], geomData['weights'])
self.weights = geomData['weights']
self.knotvector_u = geomData['knotvector_u']
self.knotvector_v = geomData['knotvector_v']
def getUnweightedCpts(self, ctrlpts, weights):
numCtrlPts = np.shape(ctrlpts)[0]
PctrlPts = np.zeros_like(ctrlpts)
for i in range(2):
for j in range(numCtrlPts):
PctrlPts[j,i]=ctrlpts[j][i]/weights[j]
# PctrlPts = PctrlPts.tolist()
return PctrlPts
def mapPoints(self, uPar, vPar):
'''
Map points from the parameter domain [0,1]x[0,1] to the quadrilater domain
Input: uPar - array containing the u-coordinates in the parameter space
vPar - array containing the v-coordinates in the parameter space
Note: the arrays uPar and vPar must be of the same size
Output: xPhys - array containing the x-coordinates in the physical space
yPhys - array containing the y-coordinates in the physical space
'''
gpParamUV = np.array([uPar, vPar])
evalList = tuple(map(tuple, gpParamUV.transpose()))
res = np.array(self.surf.evaluate_list(evalList))
return res
def bezierExtraction(self, knot, deg):
'''
Bezier extraction
Based on Algorithm 1, from Borden - Isogeometric finite element data
structures based on Bezier extraction
'''
m = len(knot)-deg-1
a = deg + 1
b = a + 1
# Initialize C with the number of non-zero knotspans in the 3rd dimension
nb_final = len(np.unique(knot))-1
C = np.zeros((deg+1,deg+1,nb_final))
nb = 1
C[:,:,0] = np.eye(deg + 1)
while b <= m:
C[:,:,nb] = np.eye(deg + 1)
i = b
while (b <= m) and (knot[b] == knot[b-1]):
b = b+1
multiplicity = b-i+1
alphas = np.zeros(deg-multiplicity)
if (multiplicity < deg):
numerator = knot[b-1] - knot[a-1]
for j in range(deg,multiplicity,-1):
alphas[j-multiplicity-1] = numerator/(knot[a+j-1]-knot[a-1])
r = deg - multiplicity
for j in range(1,r+1):
save = r-j+1
s = multiplicity + j
for k in range(deg+1,s,-1):
alpha = alphas[k-s-1]
C[:,k-1,nb-1] = alpha*C[:,k-1,nb-1] + (1-alpha)*C[:,k-2,nb-1]
if b <= m:
C[save-1:save+j,save-1,nb] = C[deg-j:deg+1,deg,nb-1]
nb=nb+1
if b <= m:
a=b
b=b+1
elif multiplicity==deg:
if b <= m:
nb = nb + 1
a = b
b = b + 1
assert(nb==nb_final)
return C, nb
def computeC(self):
knotU = self.knotvector_u
knotV = self.knotvector_v
degU = self.degree_u
degV = self.degree_v
C_u, nb = self.bezierExtraction(knotU, degU)
C_v, nb = self.bezierExtraction(knotV, degV)
numElemU = len(np.unique(knotU)) - 1
numElemV = len(np.unique(knotV)) - 1
basisU = len(knotU) - degU - 1
nument = (degU+1)*(degV+1)
elemInfo = dict()
elemInfo['vertex'] = []
elemInfo['nodes'] = []
elemInfo['C'] = []
for j in range (0, len(knotV)-1):
for i in range (0, len(knotU)-1):
if ((knotU[i+1] > knotU[i]) and (knotV[j+1] > knotV[j])):
vertices = np.array([knotU[i], knotV[j], knotU[i+1], knotV[j+1]])
elemInfo['vertex'].append(vertices)
currow = np.array([np.zeros(nument)])
tcount = 0
for t2 in range(j+1-degV,j+2):
for t1 in range(i+1-degU,i+2):
currow[0,tcount] = t1 + (t2-1)*basisU
tcount = tcount + 1
elemInfo['nodes'].append(currow)
for j in range (0, numElemV):
for i in range (0, numElemU):
cElem = np.kron(C_v[:,:,j],C_u[:,:,i])
elemInfo['C'].append(cElem)
return elemInfo
def bernsteinBasis(self,xi, deg):
'''
Algorithm A1.3 in Piegl & Tiller
xi is a 1D array
'''
B = np.zeros((len(xi),deg+1))
B[:,0] = 1.0
u1 = 1-xi
u2 = 1+xi
for j in range(1,deg+1):
saved = 0.0
for k in range(0,j):
temp = B[:,k].copy()
B[:,k] = saved + u1*temp
saved = u2*temp
B[:,j] = saved
B = B/np.power(2,deg)
dB = np.zeros((len(xi),deg))
dB[:,0] = 1.0
for j in range(1,deg):
saved = 0.0
for k in range(0,j):
temp = dB[:,k].copy()
dB[:,k] = saved + u1*temp
saved = u2*temp
dB[:,j] = saved
dB = dB/np.power(2,deg)
dB0 = np.transpose(np.array([np.zeros(len(xi))]))
dB = np.concatenate((dB0, dB, dB0), axis=1)
dB = (dB[:,0:-1] - dB[:,1:])*deg
return B, dB
def findspan(self, uCoord, vCoord):
'''Generates the element number on which the co-ordinate is located'''
knotU = self.knotvector_u
knotV = self.knotvector_v
counter = 0
for j in range (0, len(knotV)-1):
for i in range (0, len(knotU)-1):
if ((knotU[i+1] > knotU[i]) and (knotV[j+1] > knotV[j])):
if ((uCoord > knotU[i]) and (uCoord < knotU[i+1]) and (vCoord > knotV[j]) and (vCoord < knotV[j+1])):
elmtNum = counter
break
counter = counter + 1
return elmtNum
def getDerivatives(self, uCoord, vCoord, elmtNo):
'''
Generate physical points and jacobians for parameter points inside the domain
Assume there is one element in the parameter space
Input: uCoord, vCoord: Inputs the co-odinates of the Gauss points in the parameter space.
Output: xPhys, yPhys, ptJac - Generates the co-ordinates in the physical space and the jacobian
'''
curVertex = self.vertex[elmtNo]
cElem = self.C[elmtNo]
curNodes = np.int32(self.nodes[elmtNo])-1 # Python indexing starts from 0
curPts = np.squeeze(self.ctrlpts[curNodes,0:2])
wgts = np.transpose(np.array([np.squeeze(self.weights[curNodes,0:1])]))
# Get the Gauss points on the reference interval [-1,1]
uMax = curVertex[2]
uMin = curVertex[0]
vMax = curVertex[3]
vMin = curVertex[1]
uHatCoord = (2*uCoord - (uMax+uMin))/(uMax-uMin)
vHatCoord = (2*vCoord - (vMax+vMin))/(vMax-vMin)
degU = self.degree_u
degV = self.degree_v
B_u, dB_u = self.bernsteinBasis(uHatCoord,degU)
B_v, dB_v = self.bernsteinBasis(vHatCoord,degV)
numGauss = len(uCoord)
B_u, dB_u = self.bernsteinBasis(uHatCoord,degU)
B_v, dB_v = self.bernsteinBasis(vHatCoord,degV)
# Computing the Bernstein polynomials in 2D
dBdu = np.zeros((numGauss, numGauss, (degU+1)*(degV+1)))
dBdv = np.zeros((numGauss, numGauss, (degU+1)*(degV+1)))
R = np.zeros((numGauss, numGauss, (degU+1)*(degV+1)))
counter = 0
for j in range(0,degV+1):
for i in range(0,degU+1):
R[:,:,counter] = np.outer(B_u[:,i], B_v[:,j])
dBdu[:,:,counter] = np.outer(dB_u[:,i],B_v[:,j])
dBdv[:,:,counter] = np.outer(B_u[:,i],dB_v[:,j])
counter = counter + 1
quadPts = np.zeros((3))
# Map the points to the physical space
for jPt in range(0,numGauss):
for iPt in range(0,numGauss):
dRdx = np.matmul(cElem,np.transpose(np.array([dBdu[iPt,jPt,:]])))*2/(uMax-uMin)
dRdy = np.matmul(cElem,np.transpose(np.array([dBdv[iPt,jPt,:]])))*2/(vMax-vMin)
RR = np.matmul(cElem,np.transpose(np.array([R[iPt,jPt,:]])))
RR = RR*wgts
dRdx = dRdx*wgts
dRdy = dRdy*wgts
w_sum = np.sum(RR, axis=0)
dw_xi = np.sum(dRdx, axis=0)
dw_eta = np.sum(dRdy, axis=0)
dRdx = dRdx/w_sum - RR*dw_xi/np.power(w_sum,2)
dRdy = dRdy/w_sum - RR*dw_eta/np.power(w_sum,2)
RR = RR/w_sum;
dR = np.concatenate((dRdx.T,dRdy.T),axis=0)
dxdxi = np.matmul(dR,curPts)
coord = np.matmul(np.array([R[iPt,jPt,:]]),curPts)
detJac = np.absolute(np.linalg.det(dxdxi))
quadPts[0] = coord[0,0]
quadPts[1] = coord[0,1]
quadPts[2] = detJac
xPhys = quadPts[0]
yPhys = quadPts[1]
ptJac = quadPts[2]
return xPhys, yPhys, ptJac
def genElemList(self, numElemU, numElemV):
'''
Generate the element (vertex) list for an initial (uniform)
subdivision mesh
Input: numElemU, numElemV - number of subdivisions in the u and v
directions in the parameter space
Output: vertex - arrays containing the element vertices + initial level (=0)
'''
vertex = np.zeros((numElemU*numElemV, 5))
#generate the knots on the interval [0,1]
uEdge = np.linspace(0, 1, numElemU+1)
vEdge = np.linspace(0, 1, numElemV+1)
uPar, vPar = np.meshgrid(uEdge, vEdge)
counterElem = 0
initalLevel = 0
# Generate points for each element
for iV in range(numElemV):
for iU in range(numElemU):
uMin = uPar[iV, iU]
uMax = uPar[iV, iU+1]
vMin = vPar[iV, iU]
vMax = vPar[iV+1, iU]
vertex[counterElem, 0] = uMin
vertex[counterElem, 1] = vMin
vertex[counterElem, 2] = uMax
vertex[counterElem, 3] = vMax
vertex[counterElem, 4] = initalLevel
counterElem = counterElem + 1
return vertex
def getElemIntPts(self, elemList, numGauss):
'''
Generate quadrature points inside the domain
Input: elemList - contains the vertices of the elements the refined elements
numGauss - number of Gauss quadrature points for each subdivision
Output: xPhys, yPhys, wgtPhy - arrays containing the x and y coordinates
of the points and the corresponding weights
'''
# Allocate quadPts array
quadPts = np.zeros((elemList.shape[0]*numGauss**2, 3))
# Get the Gauss points on the reference interval [-1,1]
gp, gw = np.polynomial.legendre.leggauss(numGauss)
# Get the Gauss weights on the reference element [-1, 1]x[-1,1]
gpWeightU, gpWeightV = np.meshgrid(gw, gw)
gpWeightUV = np.array(gpWeightU.flatten()*gpWeightV.flatten())
elemInfo = self.computeC()
self.C = elemInfo['C']
self.nodes = elemInfo['nodes']
self.vertex = elemInfo['vertex']
# Generate points for each element
indexPt = 0
for iElem in range(elemList.shape[0]):
uMin = elemList[iElem,0]
uMax = elemList[iElem,2]
vMin = elemList[iElem,1]
vMax = elemList[iElem,3]
gpParamU = (uMax-uMin)/2*gp+(uMax+uMin)/2
gpParamV = (vMax-vMin)/2*gp+(vMax+vMin)/2
gpParamUg, gpParamVg = np.meshgrid(gpParamU, gpParamV)
gpParamUV = np.array([gpParamUg.flatten(), gpParamVg.flatten()])
# Jacobian of the transformation from the reference element [-1,1]
scaleFac = (uMax-uMin)*(vMax-vMin)/4
# Map the points to the physical space
for iPt in range(numGauss**2):
curPtU = np.array([gpParamUV[0, iPt]])
curPtV = np.array([gpParamUV[1, iPt]])
elmtNo = self.findspan(curPtU, curPtV)
physPtX, physPtY, ptJac = self.getDerivatives(curPtU, curPtV, elmtNo)
quadPts[indexPt, 0] = physPtX
quadPts[indexPt, 1] = physPtY
quadPts[indexPt, 2] = scaleFac * ptJac * gpWeightUV[iPt]
indexPt = indexPt + 1
xPhys = quadPts[:, 0:1]
yPhys = quadPts[:, 1:2]
wgtPhys = quadPts[:, 2:3]
return xPhys, yPhys, wgtPhys
def refineElemVertex2D(vertex, refList):
# Refines the elements in vertex with indices given by refList by splitting
# each element into 4 subdivisions
# Input: vertex - array of vertices in format [umin, vmin, umax, vmax]
# refList - list of element indices to be refined
# Output: newVertex - refined list of vertices
numRef = len(refList)
newVertex = np.zeros((4*numRef,5))
for i in range(numRef):
elemIndex = refList[i]
uMin = vertex[elemIndex, 0]
vMin = vertex[elemIndex, 1]
uMax = vertex[elemIndex, 2]
vMax = vertex[elemIndex, 3]
level = vertex[elemIndex, 4]
uMid = (uMin+uMax)/2
vMid = (vMin+vMax)/2
newVertex[4*i, :] = [uMin, vMin, uMid, vMid, level+1]
newVertex[4*i+1, :] = [uMid, vMin, uMax, vMid, level+1]
newVertex[4*i+2, :] = [uMin, vMid, uMid, vMax, level+1]
newVertex[4*i+3, :] = [uMid, vMid, uMax, vMax, level+1]
vertex = np.delete(vertex, refList, axis=0)
newVertex = np.concatenate((vertex, newVertex),axis=0)
return newVertex
def refineElemRegionY2D(vertex, refYmin, refYmax):
# Refines the region bounded by refYmin < y < refYmax
# Input: vertex - array of vertices in format [umin, vmin, umax, vmax]
# refYmin - lower bound of the refinement region
# refYmax - upper bound of the refinement region
# Output: newVertex - new list of vertices
tol = 1e-4 #tolerance for equality
refYmax = refYmax+tol
refYmin = refYmin-tol
index_ref = []
for iVertex in range(0,vertex.shape[0]):
if (vertex[iVertex,1] >= refYmin) and (vertex[iVertex,3] <= refYmax):
index_ref.append(iVertex)
newVertex = refineElemVertex2D(vertex, index_ref)
return newVertex | 0.607663 | 0.707482 |
import json
import sys
def s(_):
return '"' + _ + '"'
def i(_):
return str(_)
def b(_):
if _:
return "true"
else:
return "false"
def enc(typ, val):
if typ == "int":
return i(val)
elif typ == "string":
return s(val)
elif typ == "bool":
return b(val)
elif typ == "variable":
return val
def inst_decl(decl):
code_str = ""
if decl['declare'] == "variable":
code_str += decl['type'] + " " + decl['name']
if 'value' in decl:
code_str += " = " + enc(decl['type'], decl['value']) + ";"
else:
code_str += ";"
elif decl['declare'] == "function":
code_str += interpret_function(decl)
elif decl['declare'] == "array":
code_str += decl['type'] + " " + decl['name'] + "[] = {"
if 'value' in decl:
if decl['type'] == "int":
for element in decl['value']:
code_str += i(element) + ", "
elif decl['type'] == "string":
for element in decl['value']:
code_str += s(element) + ", "
elif decl['type'] == "bool":
for element in decl['value']:
code_str += b(element) + ", "
code_str = code_str[:len(code_str) - 2]
code_str += "};"
elif decl['declare'] == "object":
code_str += interpret_object(decl)
return code_str
def inst_call(inst):
code_str = inst['call'] + "("
if inst['parameters']:
for param in inst['parameters']:
code_str += enc(list(param.keys())[0], param[list(param.keys())[0]]) + ", "
code_str = code_str[:len(code_str) - 2]
code_str += ");"
return code_str
def inst_print(inst):
code_str = "cout << std::boolalpha << "
for param in inst['print']:
code_str += enc(list(param.keys())[0], param[list(param.keys())[0]])
code_str += " << "
code_str = code_str[:len(code_str) - 4]
code_str += ";"
return code_str
def inst_if(inst):
code_str = "if ("
if inst['if'] == "true":
code_str += enc(list(inst['expression'][0].keys())[0], inst['expression'][0][list(inst['expression'][0].keys())[0]]) + ") {"
elif inst['if'] == "false":
code_str += "!" + enc(list(inst['expression'][0].keys())[0], inst['expression'][0][list(inst['expression'][0].keys())[0]]) + ") {"
else:
code_str += enc(list(inst['expression'][0].keys())[0], inst['expression'][0][list(inst['expression'][0].keys())[0]])
code_str += f" {inst['if']} "
code_str += enc(list(inst['expression'][1].keys())[0], inst['expression'][1][list(inst['expression'][1].keys())[0]])
code_str += ") {"
for _inst in inst['instructions']:
code_str += interpret_instruction(_inst)
code_str += "}"
if "else" in inst:
code_str += "else {"
for inst in inst['else']:
code_str += interpret_instruction(inst)
code_str += "}"
return code_str
def interpret_instruction(inst):
code_str = ""
if "import" in inst:
code_str += interpret_import(inst)
elif "declare" in inst:
code_str += inst_decl(inst)
elif "call" in inst:
code_str += inst_call(inst)
elif "print" in inst:
code_str += inst_print(inst)
elif "if" in inst:
code_str += inst_if(inst)
return code_str
def interpret_function(func):
code_str = ""
code_str += func['return'] + " " + func['name'] + "("
if 'parameters' in func:
for param in func['parameters']:
code_str += param[list(param.keys())[0]] + " " + (list(param.keys())[0])
code_str += ", "
if len(func['parameters']):
code_str = code_str[:len(code_str) - 2]
code_str += ") {"
if not func['instructions']:
code_str += "}"
return code_str
for inst in func['instructions']:
code_str += interpret_instruction(inst)
code_str += "}"
return code_str
def interpret_object(obj):
code_str = ""
code_str += "class " + obj['name'] + "{"
if 'private' in obj:
code_str += "private:"
for inst in obj['private']:
code_str += interpret_instruction(inst)
if 'public' in obj:
code_str += "public:"
for inst in obj['public']:
code_str += interpret_instruction(inst)
code_str += "};"
return code_str
def interpret_import(imp):
importfile = open(imp['import'])
importjson = json.load(importfile)
code_str = interpret_json(importjson)
return code_str
def interpret_json(src):
code_str = ""
for element in src:
code_str += interpret_instruction(element)
return code_str
if __name__ == "__main__":
#dir = "examples/"
# sourcefile = open(dir + 'code.json')
# source = json.load(sourcefile)
# destinationfile = open(dir + "code.cpp", "w")
sourcefile = open(sys.argv[1])
source = json.load(sourcefile)
destinationfile = open(sys.argv[2], "w")
code = "#include <stdio.h>\n" \
"#include <stdlib.h>\n" \
"#include <iostream>\n" \
"#include <time.h>\n" \
"using namespace std;\n" \
"void randseed() {srand(time(0));}\n" \
"int randrange(int min, int max) {return rand() % (max + 1 - min) + min;}\n"
code += interpret_json(source)
destinationfile.write(code) | jcc_interpreter.py | import json
import sys
def s(_):
return '"' + _ + '"'
def i(_):
return str(_)
def b(_):
if _:
return "true"
else:
return "false"
def enc(typ, val):
if typ == "int":
return i(val)
elif typ == "string":
return s(val)
elif typ == "bool":
return b(val)
elif typ == "variable":
return val
def inst_decl(decl):
code_str = ""
if decl['declare'] == "variable":
code_str += decl['type'] + " " + decl['name']
if 'value' in decl:
code_str += " = " + enc(decl['type'], decl['value']) + ";"
else:
code_str += ";"
elif decl['declare'] == "function":
code_str += interpret_function(decl)
elif decl['declare'] == "array":
code_str += decl['type'] + " " + decl['name'] + "[] = {"
if 'value' in decl:
if decl['type'] == "int":
for element in decl['value']:
code_str += i(element) + ", "
elif decl['type'] == "string":
for element in decl['value']:
code_str += s(element) + ", "
elif decl['type'] == "bool":
for element in decl['value']:
code_str += b(element) + ", "
code_str = code_str[:len(code_str) - 2]
code_str += "};"
elif decl['declare'] == "object":
code_str += interpret_object(decl)
return code_str
def inst_call(inst):
code_str = inst['call'] + "("
if inst['parameters']:
for param in inst['parameters']:
code_str += enc(list(param.keys())[0], param[list(param.keys())[0]]) + ", "
code_str = code_str[:len(code_str) - 2]
code_str += ");"
return code_str
def inst_print(inst):
code_str = "cout << std::boolalpha << "
for param in inst['print']:
code_str += enc(list(param.keys())[0], param[list(param.keys())[0]])
code_str += " << "
code_str = code_str[:len(code_str) - 4]
code_str += ";"
return code_str
def inst_if(inst):
code_str = "if ("
if inst['if'] == "true":
code_str += enc(list(inst['expression'][0].keys())[0], inst['expression'][0][list(inst['expression'][0].keys())[0]]) + ") {"
elif inst['if'] == "false":
code_str += "!" + enc(list(inst['expression'][0].keys())[0], inst['expression'][0][list(inst['expression'][0].keys())[0]]) + ") {"
else:
code_str += enc(list(inst['expression'][0].keys())[0], inst['expression'][0][list(inst['expression'][0].keys())[0]])
code_str += f" {inst['if']} "
code_str += enc(list(inst['expression'][1].keys())[0], inst['expression'][1][list(inst['expression'][1].keys())[0]])
code_str += ") {"
for _inst in inst['instructions']:
code_str += interpret_instruction(_inst)
code_str += "}"
if "else" in inst:
code_str += "else {"
for inst in inst['else']:
code_str += interpret_instruction(inst)
code_str += "}"
return code_str
def interpret_instruction(inst):
code_str = ""
if "import" in inst:
code_str += interpret_import(inst)
elif "declare" in inst:
code_str += inst_decl(inst)
elif "call" in inst:
code_str += inst_call(inst)
elif "print" in inst:
code_str += inst_print(inst)
elif "if" in inst:
code_str += inst_if(inst)
return code_str
def interpret_function(func):
code_str = ""
code_str += func['return'] + " " + func['name'] + "("
if 'parameters' in func:
for param in func['parameters']:
code_str += param[list(param.keys())[0]] + " " + (list(param.keys())[0])
code_str += ", "
if len(func['parameters']):
code_str = code_str[:len(code_str) - 2]
code_str += ") {"
if not func['instructions']:
code_str += "}"
return code_str
for inst in func['instructions']:
code_str += interpret_instruction(inst)
code_str += "}"
return code_str
def interpret_object(obj):
code_str = ""
code_str += "class " + obj['name'] + "{"
if 'private' in obj:
code_str += "private:"
for inst in obj['private']:
code_str += interpret_instruction(inst)
if 'public' in obj:
code_str += "public:"
for inst in obj['public']:
code_str += interpret_instruction(inst)
code_str += "};"
return code_str
def interpret_import(imp):
importfile = open(imp['import'])
importjson = json.load(importfile)
code_str = interpret_json(importjson)
return code_str
def interpret_json(src):
code_str = ""
for element in src:
code_str += interpret_instruction(element)
return code_str
if __name__ == "__main__":
#dir = "examples/"
# sourcefile = open(dir + 'code.json')
# source = json.load(sourcefile)
# destinationfile = open(dir + "code.cpp", "w")
sourcefile = open(sys.argv[1])
source = json.load(sourcefile)
destinationfile = open(sys.argv[2], "w")
code = "#include <stdio.h>\n" \
"#include <stdlib.h>\n" \
"#include <iostream>\n" \
"#include <time.h>\n" \
"using namespace std;\n" \
"void randseed() {srand(time(0));}\n" \
"int randrange(int min, int max) {return rand() % (max + 1 - min) + min;}\n"
code += interpret_json(source)
destinationfile.write(code) | 0.127734 | 0.14624 |
import tkinter as tk
import random
class Customer:
def __init__(self, id, create_time):
self.id = id
self.create_time = create_time
self.finished_time = -1
class App:
"""The tkinter GUI interface."""
def kill_callback(self):
self.window.destroy()
def __init__(self):
# Set up tkinter.
self.window = tk.Tk()
self.window.title("multi_headed_queue")
self.window.protocol("WM_DELETE_WINDOW", self.kill_callback)
self.window.geometry("300x300")
frame = tk.LabelFrame(self.window, text="Parameters", padx=5, pady=5)
frame.pack(side=tk.TOP)
label = tk.Label(frame, text="# Tellers:")
label.grid(padx=5, pady=2, row=0, column=0, sticky=tk.W)
self.num_tellers_entry = tk.Entry(frame, width=4, justify=tk.RIGHT)
self.num_tellers_entry.grid(padx=5, pady=2, row=0, column=1)
self.num_tellers_entry.insert(0, "2")
label = tk.Label(frame, text="Arrivals:")
label.grid(padx=5, pady=2, row=1, column=0, sticky=tk.W)
self.min_arrival_entry = tk.Entry(frame, width=4, justify=tk.RIGHT)
self.min_arrival_entry.grid(padx=5, pady=2, row=1, column=1)
self.min_arrival_entry.insert(0, "1")
label = tk.Label(frame, text="to")
label.grid(padx=5, pady=2, row=1, column=2, sticky=tk.W)
self.max_arrival_entry = tk.Entry(frame, width=4, justify=tk.RIGHT)
self.max_arrival_entry.grid(padx=5, pady=2, row=1, column=3)
self.max_arrival_entry.insert(0, "3")
label = tk.Label(frame, text="minutes")
label.grid(padx=5, pady=2, row=1, column=4, sticky=tk.W)
label = tk.Label(frame, text="Duration:")
label.grid(padx=5, pady=2, row=2, column=0, sticky=tk.W)
self.min_duration_entry = tk.Entry(frame, width=4, justify=tk.RIGHT)
self.min_duration_entry.grid(padx=5, pady=2, row=2, column=1)
self.min_duration_entry.insert(0, "2")
label = tk.Label(frame, text="to")
label.grid(padx=5, pady=2, row=2, column=2, sticky=tk.W)
self.max_duration_entry = tk.Entry(frame, width=4, justify=tk.RIGHT)
self.max_duration_entry.grid(padx=5, pady=2, row=2, column=3)
self.max_duration_entry.insert(0, "7")
label = tk.Label(frame, text="minutes")
label.grid(padx=5, pady=2, row=2, column=4, sticky=tk.W)
label = tk.Label(frame, text="Speed:")
label.grid(padx=5, pady=2, row=3, column=0, sticky=tk.W)
self.steps_per_second_entry = tk.Entry(frame, width=4, justify=tk.RIGHT)
self.steps_per_second_entry.grid(padx=5, pady=2, row=3, column=1)
self.steps_per_second_entry.insert(0, "5")
label = tk.Label(frame, text="steps per second")
label.grid(padx=5, pady=2, row=3, column=2, columnspan=3, sticky=tk.W)
self.start_button = tk.Button(self.window, width=10, text="Start", command=self.start)
self.start_button.pack(padx=5, pady=4, side=tk.TOP)
frame = tk.Frame(self.window)
frame.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
frame.rowconfigure(0, weight=1)
frame.columnconfigure(1, weight=1)
label = tk.Label(frame, text="Queue:")
label.grid(padx=5, pady=2, row=0, column=0, sticky=tk.W)
self.queue_text = tk.Text(frame)
self.queue_text.config(state=tk.NORMAL)
self.queue_text.grid(padx=5, pady=5, row=0, column=1, sticky=tk.NSEW)
label = tk.Label(frame, text="Tellers:")
label.grid(padx=5, pady=2, row=1, column=0, sticky=tk.W)
self.tellers_entry = tk.Entry(frame)
self.tellers_entry.grid(padx=5, pady=5, row=1, column=1, sticky=tk.NSEW)
label = tk.Label(frame, text="Time:")
label.grid(padx=5, pady=2, row=2, column=0, sticky=tk.W)
self.time_entry = tk.Entry(frame, width=20)
self.time_entry.grid(padx=5, pady=2, row=2, column=1, sticky=tk.W)
label = tk.Label(frame, text="Wait:")
label.grid(padx=5, pady=2, row=3, column=0, sticky=tk.W)
self.average_wait_entry = tk.Entry(frame, width=20)
self.average_wait_entry.grid(padx=5, pady=(2, 10), row=3, column=1, sticky=tk.W)
# Display the items.
self.time = 0
self.num_served = 0
self.teller_serving = []
self.customer_queue = []
self.show_customers()
# Bind some keys.
self.window.bind('<Return>', (lambda e, button=self.start_button: self.start_button.invoke()))
# Force focus so Alt+F4 closes this window and not the Python shell.
self.num_tellers_entry.focus_force()
self.window.mainloop()
def start(self):
""" Start or stop the simulation."""
if self.start_button["text"] == "Start":
self.start_simulation()
else:
self.stop_simulation()
def start_simulation(self):
""" Start the simulation."""
self.num_tellers = int(self.num_tellers_entry.get())
self.min_arrival = int(self.min_arrival_entry.get())
self.max_arrival = int(self.max_arrival_entry.get())
self.min_duration = int(self.min_duration_entry.get())
self.max_duration = int(self.max_duration_entry.get())
self.teller_serving = [None for customer in range(self.num_tellers)]
self.customer_queue = []
self.num_served = 0
self.total_wait_time = 0
self.next_id = 1
self.time = 0
self.next_arrival_time = 1
# Display the current situation.
self.show_customers()
# Start the simulation timer.
self.start_button["text"] = "Stop"
self.delay = int(1000 / int(self.steps_per_second_entry.get()))
self.window.after(self.delay, self.tick)
def tick(self):
""" A minute has passed."""
self.time += 1
# See if we need to create a new customer.
if self.next_arrival_time <= self.time:
# Create a customer.
customer = Customer(self.next_id, self.time)
self.next_id += 1
# Add the customer to the queue.
self.customer_queue.append(customer)
# See when to add the next customer.
self.next_arrival_time = self.time + random.randint(self.min_arrival, self.min_arrival)
# Process the tellers.
for i in range(self.num_tellers):
# If this teller is serving someone, see if that customer is done.
if (self.teller_serving[i] != None) and (self.teller_serving[i].finished_time <= self.time):
self.teller_serving[i] = None
# If this teller is available, move a customer here.
if (self.teller_serving[i] == None) and (len(self.customer_queue) > 0):
# This teller isn't busy. Move a customer here.
customer = self.customer_queue.pop(0) # Dequeue the customer
self.teller_serving[i] = customer
# Set the customer's finish time.
customer.finished_time = self.time + random.randint(self.min_duration, self.max_duration)
# Record the customer's wait time.
self.total_wait_time += self.time - customer.create_time
self.num_served += 1
# Display the new situation.
self.show_customers()
# Queue the next tick.
if self.start_button["text"] == "Stop":
self.window.after(self.delay, self.tick)
def stop_simulation(self):
""" Stop the simulation."""
self.start_button["text"] = "Start"
def show_customers(self):
""" Show the current situation."""
# Show the customers in the queue.
txt = ""
for customer in self.customer_queue:
txt += f"{customer.id} "
self.queue_text.delete(1.0, tk.END)
self.queue_text.insert(1.0, txt)
# Show the customers being served.
txt = ""
for customer in self.teller_serving:
if customer == None:
txt += "-- "
else:
txt += f"{customer.id} "
self.tellers_entry.delete(0, tk.END)
self.tellers_entry.insert(0, txt)
# Show elapsed time.
hours = self.time // 60
mins = self.time - hours * 60
self.time_entry.delete(0, tk.END)
self.time_entry.insert(0, f"{hours:2d} hours, {mins:2d} mins")
# Show the average wait time.
self.average_wait_entry.delete(0, tk.END)
if self.num_served > 0:
elapsed = self.total_wait_time / self.num_served
mins = int(elapsed)
secs = (elapsed - mins) * 60
self.average_wait_entry.insert(0, f"{mins} mins, {secs:.2f} secs")
if __name__ == '__main__':
app = App() | algs2e_python/Chapter 05/python/multi_headed_queue.py | import tkinter as tk
import random
class Customer:
def __init__(self, id, create_time):
self.id = id
self.create_time = create_time
self.finished_time = -1
class App:
"""The tkinter GUI interface."""
def kill_callback(self):
self.window.destroy()
def __init__(self):
# Set up tkinter.
self.window = tk.Tk()
self.window.title("multi_headed_queue")
self.window.protocol("WM_DELETE_WINDOW", self.kill_callback)
self.window.geometry("300x300")
frame = tk.LabelFrame(self.window, text="Parameters", padx=5, pady=5)
frame.pack(side=tk.TOP)
label = tk.Label(frame, text="# Tellers:")
label.grid(padx=5, pady=2, row=0, column=0, sticky=tk.W)
self.num_tellers_entry = tk.Entry(frame, width=4, justify=tk.RIGHT)
self.num_tellers_entry.grid(padx=5, pady=2, row=0, column=1)
self.num_tellers_entry.insert(0, "2")
label = tk.Label(frame, text="Arrivals:")
label.grid(padx=5, pady=2, row=1, column=0, sticky=tk.W)
self.min_arrival_entry = tk.Entry(frame, width=4, justify=tk.RIGHT)
self.min_arrival_entry.grid(padx=5, pady=2, row=1, column=1)
self.min_arrival_entry.insert(0, "1")
label = tk.Label(frame, text="to")
label.grid(padx=5, pady=2, row=1, column=2, sticky=tk.W)
self.max_arrival_entry = tk.Entry(frame, width=4, justify=tk.RIGHT)
self.max_arrival_entry.grid(padx=5, pady=2, row=1, column=3)
self.max_arrival_entry.insert(0, "3")
label = tk.Label(frame, text="minutes")
label.grid(padx=5, pady=2, row=1, column=4, sticky=tk.W)
label = tk.Label(frame, text="Duration:")
label.grid(padx=5, pady=2, row=2, column=0, sticky=tk.W)
self.min_duration_entry = tk.Entry(frame, width=4, justify=tk.RIGHT)
self.min_duration_entry.grid(padx=5, pady=2, row=2, column=1)
self.min_duration_entry.insert(0, "2")
label = tk.Label(frame, text="to")
label.grid(padx=5, pady=2, row=2, column=2, sticky=tk.W)
self.max_duration_entry = tk.Entry(frame, width=4, justify=tk.RIGHT)
self.max_duration_entry.grid(padx=5, pady=2, row=2, column=3)
self.max_duration_entry.insert(0, "7")
label = tk.Label(frame, text="minutes")
label.grid(padx=5, pady=2, row=2, column=4, sticky=tk.W)
label = tk.Label(frame, text="Speed:")
label.grid(padx=5, pady=2, row=3, column=0, sticky=tk.W)
self.steps_per_second_entry = tk.Entry(frame, width=4, justify=tk.RIGHT)
self.steps_per_second_entry.grid(padx=5, pady=2, row=3, column=1)
self.steps_per_second_entry.insert(0, "5")
label = tk.Label(frame, text="steps per second")
label.grid(padx=5, pady=2, row=3, column=2, columnspan=3, sticky=tk.W)
self.start_button = tk.Button(self.window, width=10, text="Start", command=self.start)
self.start_button.pack(padx=5, pady=4, side=tk.TOP)
frame = tk.Frame(self.window)
frame.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
frame.rowconfigure(0, weight=1)
frame.columnconfigure(1, weight=1)
label = tk.Label(frame, text="Queue:")
label.grid(padx=5, pady=2, row=0, column=0, sticky=tk.W)
self.queue_text = tk.Text(frame)
self.queue_text.config(state=tk.NORMAL)
self.queue_text.grid(padx=5, pady=5, row=0, column=1, sticky=tk.NSEW)
label = tk.Label(frame, text="Tellers:")
label.grid(padx=5, pady=2, row=1, column=0, sticky=tk.W)
self.tellers_entry = tk.Entry(frame)
self.tellers_entry.grid(padx=5, pady=5, row=1, column=1, sticky=tk.NSEW)
label = tk.Label(frame, text="Time:")
label.grid(padx=5, pady=2, row=2, column=0, sticky=tk.W)
self.time_entry = tk.Entry(frame, width=20)
self.time_entry.grid(padx=5, pady=2, row=2, column=1, sticky=tk.W)
label = tk.Label(frame, text="Wait:")
label.grid(padx=5, pady=2, row=3, column=0, sticky=tk.W)
self.average_wait_entry = tk.Entry(frame, width=20)
self.average_wait_entry.grid(padx=5, pady=(2, 10), row=3, column=1, sticky=tk.W)
# Display the items.
self.time = 0
self.num_served = 0
self.teller_serving = []
self.customer_queue = []
self.show_customers()
# Bind some keys.
self.window.bind('<Return>', (lambda e, button=self.start_button: self.start_button.invoke()))
# Force focus so Alt+F4 closes this window and not the Python shell.
self.num_tellers_entry.focus_force()
self.window.mainloop()
def start(self):
""" Start or stop the simulation."""
if self.start_button["text"] == "Start":
self.start_simulation()
else:
self.stop_simulation()
def start_simulation(self):
""" Start the simulation."""
self.num_tellers = int(self.num_tellers_entry.get())
self.min_arrival = int(self.min_arrival_entry.get())
self.max_arrival = int(self.max_arrival_entry.get())
self.min_duration = int(self.min_duration_entry.get())
self.max_duration = int(self.max_duration_entry.get())
self.teller_serving = [None for customer in range(self.num_tellers)]
self.customer_queue = []
self.num_served = 0
self.total_wait_time = 0
self.next_id = 1
self.time = 0
self.next_arrival_time = 1
# Display the current situation.
self.show_customers()
# Start the simulation timer.
self.start_button["text"] = "Stop"
self.delay = int(1000 / int(self.steps_per_second_entry.get()))
self.window.after(self.delay, self.tick)
def tick(self):
""" A minute has passed."""
self.time += 1
# See if we need to create a new customer.
if self.next_arrival_time <= self.time:
# Create a customer.
customer = Customer(self.next_id, self.time)
self.next_id += 1
# Add the customer to the queue.
self.customer_queue.append(customer)
# See when to add the next customer.
self.next_arrival_time = self.time + random.randint(self.min_arrival, self.min_arrival)
# Process the tellers.
for i in range(self.num_tellers):
# If this teller is serving someone, see if that customer is done.
if (self.teller_serving[i] != None) and (self.teller_serving[i].finished_time <= self.time):
self.teller_serving[i] = None
# If this teller is available, move a customer here.
if (self.teller_serving[i] == None) and (len(self.customer_queue) > 0):
# This teller isn't busy. Move a customer here.
customer = self.customer_queue.pop(0) # Dequeue the customer
self.teller_serving[i] = customer
# Set the customer's finish time.
customer.finished_time = self.time + random.randint(self.min_duration, self.max_duration)
# Record the customer's wait time.
self.total_wait_time += self.time - customer.create_time
self.num_served += 1
# Display the new situation.
self.show_customers()
# Queue the next tick.
if self.start_button["text"] == "Stop":
self.window.after(self.delay, self.tick)
def stop_simulation(self):
""" Stop the simulation."""
self.start_button["text"] = "Start"
def show_customers(self):
""" Show the current situation."""
# Show the customers in the queue.
txt = ""
for customer in self.customer_queue:
txt += f"{customer.id} "
self.queue_text.delete(1.0, tk.END)
self.queue_text.insert(1.0, txt)
# Show the customers being served.
txt = ""
for customer in self.teller_serving:
if customer == None:
txt += "-- "
else:
txt += f"{customer.id} "
self.tellers_entry.delete(0, tk.END)
self.tellers_entry.insert(0, txt)
# Show elapsed time.
hours = self.time // 60
mins = self.time - hours * 60
self.time_entry.delete(0, tk.END)
self.time_entry.insert(0, f"{hours:2d} hours, {mins:2d} mins")
# Show the average wait time.
self.average_wait_entry.delete(0, tk.END)
if self.num_served > 0:
elapsed = self.total_wait_time / self.num_served
mins = int(elapsed)
secs = (elapsed - mins) * 60
self.average_wait_entry.insert(0, f"{mins} mins, {secs:.2f} secs")
if __name__ == '__main__':
app = App() | 0.507324 | 0.133359 |
import collections
import json
import pytest
import os
from typing import Set
from data.src.split import _detect_best_script_name
from data.src.split import _generalized_check
_REPO_DIR = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
)
_LANGUAGES = os.path.join(_REPO_DIR, "data/src/languages.json")
SmokeTestScript = collections.namedtuple(
"SmokeTestScript", ("script", "samples")
)
SmokeTestScript.__doc__ = """
A script and a list of orthographic samples to run
a smoke test on.
Parameters
----------
script : str
Unicode script name.
samples : list
List of tuples containing samples of various scripts
and a boolean reflecting whether those samples are
within that Unicode script range.
"""
_SMOKE_TEST_LANGUAGES = [
SmokeTestScript(
"Han",
[
("瀨尿蝦", True),
("Mandarin", False),
("กงสุล", False),
("烈日x空", False),
("た日當空", False),
],
),
SmokeTestScript(
"Hiragana",
[
("あいきどう", True),
("Lucas", False),
("シミーズ", False),
("あいかイらず", False),
],
),
SmokeTestScript(
"Hebrew",
[
("עליתא", True),
("мећава", False),
("עלbתא", False),
("πיתא", False),
],
),
SmokeTestScript(
"Syriac", [("ܐܒܝܕܘܬܐ", True), ("ܐܒܝܕאܬܐ", False), ("cܘܘl", False)]
),
SmokeTestScript("Balinese", [("ᬰᬶᬮᬵ", True), ("ᬰнᬮᬰสุᬮᬵ", False)]),
SmokeTestScript("Tagalog", [("ᜋᜇᜇᜌ", True), ("ᜋᜇᜇbᜌ", False)]),
SmokeTestScript("Cyrillic", [("наиме", True), ("наиmе", False)]),
SmokeTestScript(
"Bengali",
[
("ব্রাহ্মীকে", True),
("অসমীয়া", True), # Assamese.
("दर्या", False),
],
),
SmokeTestScript("Devanagari", [("ब्राह्मिक", True), ("ก ไก่", False)]),
SmokeTestScript("Gujarati", [("બ્રાહ્મીક", True), ("ब्राह्मिक", False)]),
SmokeTestScript(
"Gurmukhi", [("ਲੂੰਬੜੀ", True), ("ੁ", True), ("ਲਬลੜੀ", False)]
),
SmokeTestScript("Kannada", [("ಬ್ರಾಹ್ಮಿಕ್", True), ("Ⱆ", False)]),
SmokeTestScript("Malayalam", [("ബ്രാഹ്മിക്", True), ("Ⱆ", False)]),
SmokeTestScript("Oriya", [("ବ୍ରାହ୍ମୀସି", True), ("Ⱆ", False)]),
SmokeTestScript("Sinhala", [("බ්රාහ්මික්", True), ("Ⱆ", False)]),
SmokeTestScript("Tamil", [("பிராமிக்", True), ("Ⱆ", False)]),
SmokeTestScript("Telugu", [("బ్రహ్మికి", True), ("Ⱆ", False)]),
SmokeTestScript(
"Katakana", [("シニヨン", True), ("あいき", False), ("瀨", False)]
),
SmokeTestScript("Imperial Aramaic", [("𐡀𐡅𐡓𐡔𐡋𐡌", True), ("𐡀ܒ𐡓𐡔𐡋𐡌", False)]),
SmokeTestScript(
"Latin", [("wikipron", True), ("ае", False), ("lịch", True)]
),
SmokeTestScript("Arabic", [("ژۇرنال", True), ("ژלرنال", False)]),
]
def _collect_scripts() -> Set[str]:
scripts = set()
with open(_LANGUAGES, "r") as source:
languages = json.load(source)
for lang in languages:
if "script" in languages[lang]:
for unicode_script in languages[lang]["script"].values():
scripts.add(unicode_script)
return scripts
@pytest.mark.parametrize(
"observed_scripts, known_scripts",
[(_collect_scripts(), [lang.script for lang in _SMOKE_TEST_LANGUAGES])],
)
def test_script_coverage(observed_scripts, known_scripts):
"""All scripts added to languages.json must be
included in the smoke test.
"""
for script in observed_scripts:
assert (
script in known_scripts
), f"{script} must be included in the smoke test."
@pytest.mark.parametrize("smoke_test_script,", _SMOKE_TEST_LANGUAGES)
def test_smoke_test_script(smoke_test_script):
"""Checks whether the scripts we'd like to split are appropriately handled
by the Unicode script property."""
for script_sample, predicted_truth_val in smoke_test_script.samples:
assert (
_generalized_check(smoke_test_script.script, script_sample)
== predicted_truth_val
)
@pytest.mark.parametrize("smoke_test_script,", _SMOKE_TEST_LANGUAGES)
def test_script_detection_strict(smoke_test_script):
"""Checks whether the scripts we'd like to split are correctly detected
given the samples."""
for script_sample, predicted_truth_val in smoke_test_script.samples:
result = _detect_best_script_name(script_sample)
predicted_script = result[0] if result else None
status = predicted_script == smoke_test_script.script
assert status == predicted_truth_val, (
f"{script_sample}: {smoke_test_script.script} predicted"
f" as {predicted_script}."
)
def test_script_detection_basic():
# Check mixing the scripts: Kharoṣṭhī and Brāhmī, with a longer segment
# corresponding to Brāhmī.
text = "𐨑𐨪𐨆𐨯𐨠𐨁𑀘𑀠𑀬𑁄𑀰𑀺𑀣𑁄"
assert not _detect_best_script_name(text) # Not allowed in strict mode.
script, score = _detect_best_script_name(text, strict=False)
assert script == "Brahmi"
assert score > 0.5 | tests/test_data/test_split.py | import collections
import json
import pytest
import os
from typing import Set
from data.src.split import _detect_best_script_name
from data.src.split import _generalized_check
_REPO_DIR = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
)
_LANGUAGES = os.path.join(_REPO_DIR, "data/src/languages.json")
SmokeTestScript = collections.namedtuple(
"SmokeTestScript", ("script", "samples")
)
SmokeTestScript.__doc__ = """
A script and a list of orthographic samples to run
a smoke test on.
Parameters
----------
script : str
Unicode script name.
samples : list
List of tuples containing samples of various scripts
and a boolean reflecting whether those samples are
within that Unicode script range.
"""
_SMOKE_TEST_LANGUAGES = [
SmokeTestScript(
"Han",
[
("瀨尿蝦", True),
("Mandarin", False),
("กงสุล", False),
("烈日x空", False),
("た日當空", False),
],
),
SmokeTestScript(
"Hiragana",
[
("あいきどう", True),
("Lucas", False),
("シミーズ", False),
("あいかイらず", False),
],
),
SmokeTestScript(
"Hebrew",
[
("עליתא", True),
("мећава", False),
("עלbתא", False),
("πיתא", False),
],
),
SmokeTestScript(
"Syriac", [("ܐܒܝܕܘܬܐ", True), ("ܐܒܝܕאܬܐ", False), ("cܘܘl", False)]
),
SmokeTestScript("Balinese", [("ᬰᬶᬮᬵ", True), ("ᬰнᬮᬰสุᬮᬵ", False)]),
SmokeTestScript("Tagalog", [("ᜋᜇᜇᜌ", True), ("ᜋᜇᜇbᜌ", False)]),
SmokeTestScript("Cyrillic", [("наиме", True), ("наиmе", False)]),
SmokeTestScript(
"Bengali",
[
("ব্রাহ্মীকে", True),
("অসমীয়া", True), # Assamese.
("दर्या", False),
],
),
SmokeTestScript("Devanagari", [("ब्राह्मिक", True), ("ก ไก่", False)]),
SmokeTestScript("Gujarati", [("બ્રાહ્મીક", True), ("ब्राह्मिक", False)]),
SmokeTestScript(
"Gurmukhi", [("ਲੂੰਬੜੀ", True), ("ੁ", True), ("ਲਬลੜੀ", False)]
),
SmokeTestScript("Kannada", [("ಬ್ರಾಹ್ಮಿಕ್", True), ("Ⱆ", False)]),
SmokeTestScript("Malayalam", [("ബ്രാഹ്മിക്", True), ("Ⱆ", False)]),
SmokeTestScript("Oriya", [("ବ୍ରାହ୍ମୀସି", True), ("Ⱆ", False)]),
SmokeTestScript("Sinhala", [("බ්රාහ්මික්", True), ("Ⱆ", False)]),
SmokeTestScript("Tamil", [("பிராமிக்", True), ("Ⱆ", False)]),
SmokeTestScript("Telugu", [("బ్రహ్మికి", True), ("Ⱆ", False)]),
SmokeTestScript(
"Katakana", [("シニヨン", True), ("あいき", False), ("瀨", False)]
),
SmokeTestScript("Imperial Aramaic", [("𐡀𐡅𐡓𐡔𐡋𐡌", True), ("𐡀ܒ𐡓𐡔𐡋𐡌", False)]),
SmokeTestScript(
"Latin", [("wikipron", True), ("ае", False), ("lịch", True)]
),
SmokeTestScript("Arabic", [("ژۇرنال", True), ("ژלرنال", False)]),
]
def _collect_scripts() -> Set[str]:
scripts = set()
with open(_LANGUAGES, "r") as source:
languages = json.load(source)
for lang in languages:
if "script" in languages[lang]:
for unicode_script in languages[lang]["script"].values():
scripts.add(unicode_script)
return scripts
@pytest.mark.parametrize(
"observed_scripts, known_scripts",
[(_collect_scripts(), [lang.script for lang in _SMOKE_TEST_LANGUAGES])],
)
def test_script_coverage(observed_scripts, known_scripts):
"""All scripts added to languages.json must be
included in the smoke test.
"""
for script in observed_scripts:
assert (
script in known_scripts
), f"{script} must be included in the smoke test."
@pytest.mark.parametrize("smoke_test_script,", _SMOKE_TEST_LANGUAGES)
def test_smoke_test_script(smoke_test_script):
"""Checks whether the scripts we'd like to split are appropriately handled
by the Unicode script property."""
for script_sample, predicted_truth_val in smoke_test_script.samples:
assert (
_generalized_check(smoke_test_script.script, script_sample)
== predicted_truth_val
)
@pytest.mark.parametrize("smoke_test_script,", _SMOKE_TEST_LANGUAGES)
def test_script_detection_strict(smoke_test_script):
"""Checks whether the scripts we'd like to split are correctly detected
given the samples."""
for script_sample, predicted_truth_val in smoke_test_script.samples:
result = _detect_best_script_name(script_sample)
predicted_script = result[0] if result else None
status = predicted_script == smoke_test_script.script
assert status == predicted_truth_val, (
f"{script_sample}: {smoke_test_script.script} predicted"
f" as {predicted_script}."
)
def test_script_detection_basic():
# Check mixing the scripts: Kharoṣṭhī and Brāhmī, with a longer segment
# corresponding to Brāhmī.
text = "𐨑𐨪𐨆𐨯𐨠𐨁𑀘𑀠𑀬𑁄𑀰𑀺𑀣𑁄"
assert not _detect_best_script_name(text) # Not allowed in strict mode.
script, score = _detect_best_script_name(text, strict=False)
assert script == "Brahmi"
assert score > 0.5 | 0.625324 | 0.282437 |
import logging
from typing import Dict, List, Optional, Any, Callable, Tuple, Union
from collections import OrderedDict
import traceback
import copy
import os
from qiskit.providers import ProviderV1 as Provider # type: ignore[attr-defined]
from qiskit.providers.models import (QasmBackendConfiguration,
PulseBackendConfiguration)
from qiskit.circuit import QuantumCircuit
from qiskit.providers.backend import BackendV1 as Backend
from qiskit.providers.basebackend import BaseBackend
from qiskit.transpiler import Layout
from qiskit_ibm.runtime import runtime_job # pylint: disable=unused-import
from .api.clients import AuthClient, AccountClient, VersionClient
from .apiconstants import QISKIT_IBM_API_URL
from .ibm_backend import IBMBackend, IBMSimulator # pylint: disable=cyclic-import
from .credentials import Credentials, HubGroupProject, discover_credentials
from .credentials.configrc import (remove_credentials, read_credentials_from_qiskitrc,
store_credentials)
from .credentials.exceptions import HubGroupProjectInvalidStateError
from .ibm_backend_service import IBMBackendService # pylint: disable=cyclic-import
from .utils.json_decoder import decode_backend_configuration
from .random.ibm_random_service import IBMRandomService # pylint: disable=cyclic-import
from .experiment import IBMExperimentService # pylint: disable=cyclic-import
from .runtime.ibm_runtime_service import IBMRuntimeService # pylint: disable=cyclic-import
from .exceptions import (IBMNotAuthorizedError, IBMInputValueError, IBMProviderCredentialsNotFound,
IBMProviderCredentialsInvalidFormat, IBMProviderCredentialsInvalidToken,
IBMProviderCredentialsInvalidUrl, IBMProviderError, IBMProviderValueError,
IBMProviderMultipleCredentialsFound)
from .runner_result import RunnerResult # pylint: disable=cyclic-import
logger = logging.getLogger(__name__)
class IBMProvider(Provider):
"""Provides access to the IBM Quantum services available to an account.
Authenticate against IBM Quantum for use from saved credentials or during session.
Credentials can be saved to disk by calling the `save_account()` method::
from qiskit_ibm import IBMProvider
IBMProvider.save_account(token=<INSERT_IBM_QUANTUM_TOKEN>)
The open access provider (`ibm-q/open/main`) is the default provider, but you can overwrite
this default using the `hub`, `group`, and `project` keywords in `save_account()`.
Once credentials are saved you can simply instantiate the provider like below to load the
saved account and default provider::
from qiskit_ibm import IBMProvider
provider = IBMProvider()
To access a different provider, specify the hub, group and project name of the
desired provider during instantiation::
from qiskit_ibm import IBMProvider
provider = IBMProvider(hub='ibm-q', group='test', project='default')
Instead of saving credentials to disk, you can also set the environment
variables QISKIT_IBM_API_TOKEN, QISKIT_IBM_API_URL, QISKIT_IBM_HUB, QISKIT_IBM_GROUP
and QISKIT_IBM_PROJECT and then instantiate the provider like below::
from qiskit_ibm import IBMProvider
provider = IBMProvider()
You can also enable an account just for the current session by instantiating
the provider with the API token and optionally a hub/group/project::
from qiskit_ibm import IBMProvider
provider = IBMProvider(token=<INSERT_IBM_QUANTUM_TOKEN>)
`token` is the only required attribute that needs to be set using one of the above methods.
If no `url` is set, it defaults to 'https://auth.quantum-computing.ibm.com/api'.
If no `hub`, `group` and `project` is set, it defaults to the open provider. (ibm-q/open/main)
Each provider may offer different services. The main service,
:class:`~qiskit_ibm.ibm_backend_service.IBMBackendService`, is
available to all providers and gives access to IBM Quantum
devices and simulators.
You can obtain an instance of a service using the :meth:`service()` method
or as an attribute of this ``IBMProvider`` instance. For example::
backend_service = provider.service('backend')
backend_service = provider.service.backend
Since :class:`~qiskit_ibm.ibm_backend_service.IBMBackendService`
is the main service, some of the backend-related methods are available
through this class for convenience.
The :meth:`backends()` method returns all the backends available to this account::
backends = provider.backends()
The :meth:`get_backend()` method returns a backend that matches the filters
passed as argument. An example of retrieving a backend that matches a
specified name::
simulator_backend = provider.get_backend('ibmq_qasm_simulator')
It is also possible to use the ``backend`` attribute to reference a backend.
As an example, to retrieve the same backend from the example above::
simulator_backend = provider.backend.ibmq_qasm_simulator
Note:
The ``backend`` attribute can be used to autocomplete the names of
backends available to this provider. To autocomplete, press ``tab``
after ``provider.backend.``. This feature may not be available
if an error occurs during backend discovery. Also note that
this feature is only available in interactive sessions, such as
in Jupyter Notebook and the Python interpreter.
"""
_credentials: Credentials = None
"""Contains credentials of a new IBMProvider being initialized. If None, all the
provider instances have already been initialized and __new__ should return an
existing instance."""
_providers: Dict[HubGroupProject, 'IBMProvider'] = OrderedDict()
def __new__(
cls,
token: Optional[str] = None,
url: Optional[str] = None,
hub: Optional[str] = None,
group: Optional[str] = None,
project: Optional[str] = None,
**kwargs: Any
) -> 'IBMProvider':
"""IBMProvider constructor
Args:
token: IBM Quantum token.
url: URL for the IBM Quantum authentication server.
hub: Name of the hub to use.
group: Name of the group to use.
project: Name of the project to use.
**kwargs: Additional settings for the connection:
* proxies (dict): proxy configuration.
* verify (bool): verify the server's TLS certificate.
Returns:
If `hub`, `group`, and `project` are specified, the corresponding provider
is returned. Otherwise the default provider is looked up in the
following order and returned:
* environment variables (QISKIT_IBM_HUB, QISKIT_IBM_GROUP and QISKIT_IBM_PROJECT)
* default_provider (hub/group/project) saved to disk
* open access provider (ibmq/open/main)
Raises:
IBMProviderCredentialsInvalidFormat: If the default provider saved on
disk could not be parsed.
IBMProviderCredentialsNotFound: If no IBM Quantum credentials
can be found.
IBMProviderCredentialsInvalidUrl: If the URL specified is not
a valid IBM Quantum authentication URL.
IBMProviderCredentialsInvalidToken: If the `token` is not a valid
IBM Quantum token.
"""
account_credentials, account_preferences, hgp = cls._resolve_credentials(
token=token,
url=url,
hub=hub,
group=group,
project=project,
**kwargs
)
hub, group, project = hgp.to_tuple()
if not cls._credentials and (not cls._providers or
cls._is_different_account(account_credentials.token)):
cls._initialize_providers(credentials=account_credentials,
preferences=account_preferences)
return cls._get_provider(hub=hub, group=group, project=project)
elif not cls._credentials and cls._providers:
return cls._get_provider(hub=hub, group=group, project=project)
else:
return object.__new__(cls)
@classmethod
def _resolve_credentials(
cls,
token: Optional[str] = None,
url: Optional[str] = None,
hub: Optional[str] = None,
group: Optional[str] = None,
project: Optional[str] = None,
**kwargs: Any
) -> Tuple[Credentials, Dict, HubGroupProject]:
"""Resolve credentials after looking up env variables and credentials saved on disk
Args:
token: IBM Quantum token.
url: URL for the IBM Quantum authentication server.
hub: Name of the hub to use.
group: Name of the group to use.
project: Name of the project to use.
**kwargs: Additional settings for the connection:
* proxies (dict): proxy configuration.
* verify (bool): verify the server's TLS certificate.
Returns:
Tuple of account_credentials, preferences, hub, group and project
Raises:
IBMProviderCredentialsInvalidFormat: If the default provider saved on
disk could not be parsed.
IBMProviderCredentialsNotFound: If no IBM Quantum credentials can be found.
IBMProviderCredentialsInvalidToken: If the `token` is not a valid IBM Quantum token.
IBMProviderMultipleCredentialsFound: If multiple IBM Quantum credentials are found.
"""
if token:
if not isinstance(token, str):
raise IBMProviderCredentialsInvalidToken(
'Invalid IBM Quantum token '
'found: "{}" of type {}.'.format(token, type(token)))
url = url or os.getenv('QISKIT_IBM_API_URL') or QISKIT_IBM_API_URL
account_credentials = Credentials(token=token, url=url, auth_url=url, **kwargs)
preferences = {} # type: Optional[Dict]
else:
# Check for valid credentials in env variables or qiskitrc file.
try:
saved_credentials, preferences = discover_credentials()
except HubGroupProjectInvalidStateError as ex:
raise IBMProviderCredentialsInvalidFormat(
'Invalid provider (hub/group/project) data found {}'
.format(str(ex))) from ex
credentials_list = list(saved_credentials.values())
if not credentials_list:
raise IBMProviderCredentialsNotFound(
'No IBM Quantum credentials found.')
if len(credentials_list) > 1:
raise IBMProviderMultipleCredentialsFound(
'Multiple IBM Quantum Experience credentials found.')
account_credentials = credentials_list[0]
if not any([hub, group, project]) and account_credentials.default_provider:
hub, group, project = account_credentials.default_provider.to_tuple()
hgp = HubGroupProject(hub=hub, group=group, project=project)
return account_credentials, preferences, hgp
@classmethod
def _is_different_account(cls, token: str) -> bool:
"""Check if token is different from already instantiated account.
Args:
token: IBM Quantum token.
Returns:
``True`` if token passed is different from already instantiated account,
``False`` otherwise
"""
first_provider = list(cls._providers.values())[0]
return token != first_provider.credentials.token
@classmethod
def _initialize_providers(
cls,
credentials: Credentials,
preferences: Optional[Dict] = None
) -> None:
"""Authenticate against IBM Quantum and populate the providers.
Args:
credentials: Credentials for IBM Quantum.
preferences: Account preferences.
Raises:
IBMProviderCredentialsInvalidUrl: If the URL specified is not
a valid IBM Quantum authentication URL.
"""
version_info = cls._check_api_version(credentials)
# Check the URL is a valid authentication URL.
if not version_info['new_api'] or 'api-auth' not in version_info:
raise IBMProviderCredentialsInvalidUrl(
'The URL specified ({}) is not an IBM Quantum authentication URL. '
'Valid authentication URL: {}.'
.format(credentials.url, QISKIT_IBM_API_URL))
if cls._providers:
logger.warning('Credentials are already in use. The existing '
'account in the session will be replaced.')
cls._disable_account()
auth_client = AuthClient(credentials.token,
credentials.base_url,
**credentials.connection_parameters())
service_urls = auth_client.current_service_urls()
user_hubs = auth_client.user_hubs()
preferences = preferences or {}
for hub_info in user_hubs:
# Build credentials.
provider_credentials = Credentials(
credentials.token,
access_token=auth_client.current_access_token(),
url=service_urls['http'],
auth_url=credentials.auth_url,
websockets_url=service_urls['ws'],
proxies=credentials.proxies,
verify=credentials.verify,
services=service_urls.get('services', {}),
default_provider=credentials.default_provider,
**hub_info
)
provider_credentials.preferences = \
preferences.get(provider_credentials.unique_id(), {})
# _credentials class variable is read in __init__ to set provider credentials
cls._credentials = provider_credentials
# Build the provider.
try:
provider = IBMProvider(token=credentials.token, **hub_info)
cls._providers[provider.credentials.unique_id()] = provider
# Clear _credentials class variable so __init__ is not processed for the first
# call to __new__ (since all IBMProvider instances are initialized by this method)
cls._credentials = None
except Exception: # pylint: disable=broad-except
# Catch-all for errors instantiating the provider.
logger.warning('Unable to instantiate provider for %s: %s',
hub_info, traceback.format_exc())
@staticmethod
def _check_api_version(credentials: Credentials) -> Dict[str, Union[bool, str]]:
"""Check the version of the remote server in a set of credentials.
Args:
credentials: IBM Quantum Credentials
Returns:
A dictionary with version information.
"""
version_finder = VersionClient(credentials.base_url,
**credentials.connection_parameters())
return version_finder.version()
@classmethod
def _disable_account(cls) -> None:
"""Disable the account currently in use for the session.
Raises:
IBMProviderCredentialsNotFound: If no account is in use for the session.
"""
if not cls._providers:
raise IBMProviderCredentialsNotFound(
'No IBM Quantum account is in use for the session.')
cls._providers = OrderedDict()
@classmethod
def _get_provider(
cls,
hub: Optional[str] = None,
group: Optional[str] = None,
project: Optional[str] = None,
) -> 'IBMProvider':
"""Return a provider for a single hub/group/project combination.
Args:
hub: Name of the hub.
group: Name of the group.
project: Name of the project.
Returns:
A provider that matches the specified criteria or default provider.
Raises:
IBMProviderError: If no provider matches the specified criteria,
if more than one provider matches the specified criteria or if
no provider could be found for this account.
"""
providers = cls._get_providers(hub=hub, group=group, project=project)
if any([hub, group, project]):
if not providers:
raise IBMProviderError('No provider matches the specified criteria: '
'hub = {}, group = {}, project = {}'
.format(hub, group, project))
if len(providers) > 1:
raise IBMProviderError('More than one provider matches the specified criteria.'
'hub = {}, group = {}, project = {}'
.format(hub, group, project))
elif not providers:
# Prevent edge case where no providers are available.
raise IBMProviderError('No Hub/Group/Project could be found for this account.')
return providers[0]
def __init__(
self,
token: Optional[str] = None,
url: Optional[str] = None,
hub: Optional[str] = None,
group: Optional[str] = None,
project: Optional[str] = None,
**kwargs: Any
) -> None:
# pylint: disable=unused-argument,unsubscriptable-object
super().__init__()
if self._credentials:
self.credentials = self._credentials
self._api_client = AccountClient(self.credentials,
**self.credentials.connection_parameters())
# Initialize the internal list of backends.
self.__backends: Dict[str, IBMBackend] = {}
self._backend = IBMBackendService(self)
# Initialize other services.
self._random = IBMRandomService(self) if self.credentials.extractor_url else None
self._experiment = IBMExperimentService(self) \
if self.credentials.experiment_url else None
self._runtime = IBMRuntimeService(self) \
if self.credentials.runtime_url else None
self._services = {'backend': self._backend,
'random': self._random,
'experiment': self._experiment,
'runtime': self._runtime}
@property
def _backends(self) -> Dict[str, IBMBackend]:
"""Gets the backends for the provider, if not loaded.
Returns:
Dict[str, IBMBackend]: the backends
"""
if not self.__backends:
self.__backends = self._discover_remote_backends()
return self.__backends
@_backends.setter
def _backends(self, value: Dict[str, IBMBackend]) -> None:
"""Sets the value for the account's backends.
Args:
value: the backends
"""
self.__backends = value
def _discover_remote_backends(self, timeout: Optional[float] = None) -> Dict[str, IBMBackend]:
"""Return the remote backends available for this provider.
Args:
timeout: Maximum number of seconds to wait for the discovery of
remote backends.
Returns:
A dict of the remote backend instances, keyed by backend name.
"""
ret = OrderedDict() # type: ignore[var-annotated]
configs_list = self._api_client.list_backends(timeout=timeout)
for raw_config in configs_list:
# Make sure the raw_config is of proper type
if not isinstance(raw_config, dict):
logger.warning("An error occurred when retrieving backend "
"information. Some backends might not be available.")
continue
try:
decode_backend_configuration(raw_config)
try:
config = PulseBackendConfiguration.from_dict(raw_config)
except (KeyError, TypeError):
config = QasmBackendConfiguration.from_dict(raw_config)
backend_cls = IBMSimulator if config.simulator else IBMBackend
ret[config.backend_name] = backend_cls(
configuration=config,
provider=self,
credentials=self.credentials,
api_client=self._api_client)
except Exception: # pylint: disable=broad-except
logger.warning(
'Remote backend "%s" for provider %s could not be instantiated due to an '
'invalid config: %s',
raw_config.get('backend_name', raw_config.get('name', 'unknown')),
repr(self), traceback.format_exc())
return ret
@property
def backend(self) -> IBMBackendService:
"""Return the backend service.
Returns:
The backend service instance.
"""
return self._backend
@property
def experiment(self) -> IBMExperimentService:
"""Return the experiment service.
Returns:
The experiment service instance.
Raises:
IBMNotAuthorizedError: If the account is not authorized to use
the experiment service.
"""
if self._experiment:
return self._experiment
else:
raise IBMNotAuthorizedError("You are not authorized to use the experiment service.")
@property
def random(self) -> IBMRandomService:
"""Return the random number service.
Returns:
The random number service instance.
Raises:
IBMNotAuthorizedError: If the account is not authorized to use
the service.
"""
if self._random:
return self._random
else:
raise IBMNotAuthorizedError("You are not authorized to use the service.")
@property
def runtime(self) -> IBMRuntimeService:
"""Return the runtime service.
Returns:
The runtime service instance.
Raises:
IBMNotAuthorizedError: If the account is not authorized to use the service.
"""
if self._runtime:
return self._runtime
else:
raise IBMNotAuthorizedError("You are not authorized to use the runtime service.")
@classmethod
def active_account(cls) -> Optional[Dict[str, str]]:
"""Return the IBM Quantum account currently in use for the session.
Returns:
A dictionary with information about the account currently in the session,
None if there is no active account in session
"""
if not cls._providers:
return None
first_provider = list(cls._providers.values())[0]
return {
'token': first_provider.credentials.token,
'url': first_provider.credentials.auth_url
}
@classmethod
def providers(
cls,
token: Optional[str] = None,
url: Optional[str] = None,
hub: Optional[str] = None,
group: Optional[str] = None,
project: Optional[str] = None,
**kwargs: Any
) -> List['IBMProvider']:
"""Initialize account and return a list of providers.
Args:
token: IBM Quantum token.
url: URL for the IBM Quantum authentication server.
hub: Name of the hub.
group: Name of the group.
project: Name of the project.
**kwargs: Additional settings for the connection:
* proxies (dict): proxy configuration.
* verify (bool): verify the server's TLS certificate.
Returns:
A list of providers that match the specified criteria.
"""
account_credentials, account_preferences, *_ = cls._resolve_credentials(
token=token,
url=url,
hub=hub,
group=group,
project=project,
**kwargs
)
if not cls._providers or cls._is_different_account(account_credentials.token):
cls._initialize_providers(credentials=account_credentials,
preferences=account_preferences)
return cls._get_providers(hub=hub, group=group, project=project)
@classmethod
def _get_providers(
cls,
hub: Optional[str] = None,
group: Optional[str] = None,
project: Optional[str] = None,
) -> List['IBMProvider']:
"""Return a list of providers, subject to optional filtering.
Args:
hub: Name of the hub.
group: Name of the group.
project: Name of the project.
Returns:
A list of providers that match the specified criteria.
"""
filters = [] # type: List[Callable[[HubGroupProject], bool]]
if hub:
filters.append(lambda hgp: hgp.hub == hub)
if group:
filters.append(lambda hgp: hgp.group == group)
if project:
filters.append(lambda hgp: hgp.project == project)
providers = [provider for key, provider in cls._providers.items()
if all(f(key) for f in filters)]
return providers
@staticmethod
def delete_account() -> None:
"""Delete the saved account from disk.
Raises:
IBMProviderCredentialsNotFound: If no valid IBM Quantum
credentials can be found on disk.
IBMProviderCredentialsInvalidUrl: If invalid IBM Quantum
credentials are found on disk.
"""
stored_credentials, _ = read_credentials_from_qiskitrc()
if not stored_credentials:
raise IBMProviderCredentialsNotFound(
'No IBM Quantum credentials found on disk.')
credentials = list(stored_credentials.values())[0]
if credentials.url != QISKIT_IBM_API_URL:
raise IBMProviderCredentialsInvalidUrl(
'Invalid IBM Quantum credentials found on disk. ')
remove_credentials(credentials)
@staticmethod
def save_account(
token: str,
url: str = QISKIT_IBM_API_URL,
hub: Optional[str] = None,
group: Optional[str] = None,
project: Optional[str] = None,
overwrite: bool = False,
**kwargs: Any
) -> None:
"""Save the account to disk for future use.
Note:
If storing a default provider to disk, all three parameters
`hub`, `group`, `project` must be specified.
Args:
token: IBM Quantum token.
url: URL for the IBM Quantum authentication server.
hub: Name of the hub for the default provider to store on disk.
group: Name of the group for the default provider to store on disk.
project: Name of the project for the default provider to store on disk.
overwrite: Overwrite existing credentials.
**kwargs:
* proxies (dict): Proxy configuration for the server.
* verify (bool): If False, ignores SSL certificates errors
Raises:
IBMProviderCredentialsInvalidUrl: If the `url` is not a valid
IBM Quantum authentication URL.
IBMProviderCredentialsInvalidToken: If the `token` is not a valid
IBM Quantum token.
IBMProviderValueError: If only one or two parameters from `hub`, `group`,
`project` are specified.
"""
if url != QISKIT_IBM_API_URL:
raise IBMProviderCredentialsInvalidUrl(
'Invalid IBM Quantum credentials found.')
if not token or not isinstance(token, str):
raise IBMProviderCredentialsInvalidToken(
'Invalid IBM Quantum token '
'found: "{}" of type {}.'.format(token, type(token)))
# If any `hub`, `group`, or `project` is specified, make sure all parameters are set.
if any([hub, group, project]) and not all([hub, group, project]):
raise IBMProviderValueError('The hub, group, and project parameters must all be '
'specified when storing a default provider to disk: '
'hub = "{}", group = "{}", project = "{}"'
.format(hub, group, project))
# If specified, get the provider to store.
default_provider_hgp = HubGroupProject(hub, group, project) \
if all([hub, group, project]) else None
credentials = Credentials(token=token, url=url,
default_provider=default_provider_hgp, **kwargs)
store_credentials(credentials,
overwrite=overwrite)
@staticmethod
def saved_account() -> Dict[str, str]:
"""List the account saved on disk.
Returns:
A dictionary with information about the account saved on disk.
Raises:
IBMProviderCredentialsInvalidUrl: If invalid IBM Quantum
credentials are found on disk.
"""
stored_credentials, _ = read_credentials_from_qiskitrc()
if not stored_credentials:
return {}
credentials = list(stored_credentials.values())[0]
if credentials.url != QISKIT_IBM_API_URL:
raise IBMProviderCredentialsInvalidUrl(
'Invalid IBM Quantum credentials found on disk.')
return {
'token': credentials.token,
'url': credentials.url
}
def backends(
self,
name: Optional[str] = None,
filters: Optional[Callable[[List[IBMBackend]], bool]] = None,
min_num_qubits: Optional[int] = None,
input_allowed: Optional[Union[str, List[str]]] = None,
**kwargs: Any
) -> List[IBMBackend]:
"""Return all backends accessible via this provider, subject to optional filtering.
Args:
name: Backend name to filter by.
filters: More complex filters, such as lambda functions.
For example::
IBMProvider.backends(filters=lambda b: b.configuration().quantum_volume > 16)
min_num_qubits: Minimum number of qubits the backend has to have.
input_allowed: Filter by the types of input the backend supports.
Valid input types are ``job`` (circuit job) and ``runtime`` (Qiskit Runtime).
For example, ``inputs_allowed='runtime'`` will return all backends
that support Qiskit Runtime. If a list is given, the backend must
support all types specified in the list.
kwargs: Simple filters that specify a ``True``/``False`` criteria in the
backend configuration, backends status, or provider credentials.
An example to get the operational backends with 5 qubits::
IBMProvider.backends(n_qubits=5, operational=True)
Returns:
The list of available backends that match the filter.
"""
# pylint: disable=arguments-differ
return self._backend.backends(name=name, filters=filters, min_num_qubits=min_num_qubits,
input_allowed=input_allowed, **kwargs)
def has_service(self, name: str) -> bool:
"""Check if this provider has access to the service.
Args:
name: Name of the service.
Returns:
Whether the provider has access to the service.
Raises:
IBMInputValueError: If an unknown service name is specified.
"""
if name not in self._services:
raise IBMInputValueError(f"Unknown service {name} specified.")
if self._services[name] is None:
return False
return True
def run_circuits(
self,
circuits: Union[QuantumCircuit, List[QuantumCircuit]],
backend: Union[Backend, BaseBackend],
shots: Optional[int] = None,
initial_layout: Optional[Union[Layout, Dict, List]] = None,
layout_method: Optional[str] = None,
routing_method: Optional[str] = None,
translation_method: Optional[str] = None,
seed_transpiler: Optional[int] = None,
optimization_level: int = 1,
init_qubits: bool = True,
rep_delay: Optional[float] = None,
transpiler_options: Optional[dict] = None,
measurement_error_mitigation: bool = False,
use_measure_esp: Optional[bool] = None,
**run_config: Dict
) -> 'runtime_job.RuntimeJob':
"""Execute the input circuit(s) on a backend using the runtime service.
Note:
This method uses the IBM Quantum runtime service which is not
available to all accounts.
Args:
circuits: Circuit(s) to execute.
backend: Backend to execute circuits on.
Transpiler options are automatically grabbed from backend configuration
and properties unless otherwise specified.
shots: Number of repetitions of each circuit, for sampling. If not specified,
the backend default is used.
initial_layout: Initial position of virtual qubits on physical qubits.
layout_method: Name of layout selection pass ('trivial', 'dense',
'noise_adaptive', 'sabre').
Sometimes a perfect layout can be available in which case the layout_method
may not run.
routing_method: Name of routing pass ('basic', 'lookahead', 'stochastic', 'sabre')
translation_method: Name of translation pass ('unroller', 'translator', 'synthesis')
seed_transpiler: Sets random seed for the stochastic parts of the transpiler.
optimization_level: How much optimization to perform on the circuits.
Higher levels generate more optimized circuits, at the expense of longer
transpilation time.
If None, level 1 will be chosen as default.
init_qubits: Whether to reset the qubits to the ground state for each shot.
rep_delay: Delay between programs in seconds. Only supported on certain
backends (``backend.configuration().dynamic_reprate_enabled`` ). If supported,
``rep_delay`` will be used instead of ``rep_time`` and must be from the
range supplied by the backend (``backend.configuration().rep_delay_range``).
Default is given by ``backend.configuration().default_rep_delay``.
transpiler_options: Additional transpiler options.
measurement_error_mitigation: Whether to apply measurement error mitigation.
use_measure_esp: Whether to use excited state promoted (ESP) readout for measurements
which are the final instruction on a qubit. ESP readout can offer higher fidelity
than standard measurement sequences. See
`here <https://arxiv.org/pdf/2008.08571.pdf>`_.
**run_config: Extra arguments used to configure the circuit execution.
Returns:
Runtime job.
"""
inputs = copy.deepcopy(run_config) # type: Dict[str, Any]
inputs['circuits'] = circuits
inputs['optimization_level'] = optimization_level
inputs['init_qubits'] = init_qubits
inputs['measurement_error_mitigation'] = measurement_error_mitigation
if shots:
inputs['shots'] = shots
if initial_layout:
inputs['initial_layout'] = initial_layout
if layout_method:
inputs['layout_method'] = layout_method
if routing_method:
inputs['routing_method'] = routing_method
if translation_method:
inputs['translation_method'] = translation_method
if seed_transpiler:
inputs['seed_transpiler'] = seed_transpiler
if rep_delay:
inputs['rep_delay'] = rep_delay
if transpiler_options:
inputs['transpiler_options'] = transpiler_options
if use_measure_esp is not None:
inputs['use_measure_esp'] = use_measure_esp
options = {'backend_name': backend.name()}
return self.runtime.run('circuit-runner', options=options, inputs=inputs,
result_decoder=RunnerResult)
def service(self, name: str) -> Any:
"""Return the specified service.
Args:
name: Name of the service.
Returns:
The specified service.
Raises:
IBMInputValueError: If an unknown service name is specified.
IBMNotAuthorizedError: If the account is not authorized to use
the service.
"""
if name not in self._services:
raise IBMInputValueError(f"Unknown service {name} specified.")
if self._services[name] is None:
raise IBMNotAuthorizedError("You are not authorized to use this service.")
return self._services[name]
def services(self) -> Dict:
"""Return all available services.
Returns:
All services available to this provider.
"""
return {key: val for key, val in self._services.items() if val is not None}
def __eq__(
self,
other: Any
) -> bool:
if not isinstance(other, IBMProvider):
return False
return self.credentials == other.credentials
def __repr__(self) -> str:
credentials_info = "hub='{}', group='{}', project='{}'".format(
self.credentials.hub, self.credentials.group, self.credentials.project)
return "<{}({})>".format(self.__class__.__name__, credentials_info) | qiskit_ibm/ibm_provider.py | import logging
from typing import Dict, List, Optional, Any, Callable, Tuple, Union
from collections import OrderedDict
import traceback
import copy
import os
from qiskit.providers import ProviderV1 as Provider # type: ignore[attr-defined]
from qiskit.providers.models import (QasmBackendConfiguration,
PulseBackendConfiguration)
from qiskit.circuit import QuantumCircuit
from qiskit.providers.backend import BackendV1 as Backend
from qiskit.providers.basebackend import BaseBackend
from qiskit.transpiler import Layout
from qiskit_ibm.runtime import runtime_job # pylint: disable=unused-import
from .api.clients import AuthClient, AccountClient, VersionClient
from .apiconstants import QISKIT_IBM_API_URL
from .ibm_backend import IBMBackend, IBMSimulator # pylint: disable=cyclic-import
from .credentials import Credentials, HubGroupProject, discover_credentials
from .credentials.configrc import (remove_credentials, read_credentials_from_qiskitrc,
store_credentials)
from .credentials.exceptions import HubGroupProjectInvalidStateError
from .ibm_backend_service import IBMBackendService # pylint: disable=cyclic-import
from .utils.json_decoder import decode_backend_configuration
from .random.ibm_random_service import IBMRandomService # pylint: disable=cyclic-import
from .experiment import IBMExperimentService # pylint: disable=cyclic-import
from .runtime.ibm_runtime_service import IBMRuntimeService # pylint: disable=cyclic-import
from .exceptions import (IBMNotAuthorizedError, IBMInputValueError, IBMProviderCredentialsNotFound,
IBMProviderCredentialsInvalidFormat, IBMProviderCredentialsInvalidToken,
IBMProviderCredentialsInvalidUrl, IBMProviderError, IBMProviderValueError,
IBMProviderMultipleCredentialsFound)
from .runner_result import RunnerResult # pylint: disable=cyclic-import
logger = logging.getLogger(__name__)
class IBMProvider(Provider):
"""Provides access to the IBM Quantum services available to an account.
Authenticate against IBM Quantum for use from saved credentials or during session.
Credentials can be saved to disk by calling the `save_account()` method::
from qiskit_ibm import IBMProvider
IBMProvider.save_account(token=<INSERT_IBM_QUANTUM_TOKEN>)
The open access provider (`ibm-q/open/main`) is the default provider, but you can overwrite
this default using the `hub`, `group`, and `project` keywords in `save_account()`.
Once credentials are saved you can simply instantiate the provider like below to load the
saved account and default provider::
from qiskit_ibm import IBMProvider
provider = IBMProvider()
To access a different provider, specify the hub, group and project name of the
desired provider during instantiation::
from qiskit_ibm import IBMProvider
provider = IBMProvider(hub='ibm-q', group='test', project='default')
Instead of saving credentials to disk, you can also set the environment
variables QISKIT_IBM_API_TOKEN, QISKIT_IBM_API_URL, QISKIT_IBM_HUB, QISKIT_IBM_GROUP
and QISKIT_IBM_PROJECT and then instantiate the provider like below::
from qiskit_ibm import IBMProvider
provider = IBMProvider()
You can also enable an account just for the current session by instantiating
the provider with the API token and optionally a hub/group/project::
from qiskit_ibm import IBMProvider
provider = IBMProvider(token=<INSERT_IBM_QUANTUM_TOKEN>)
`token` is the only required attribute that needs to be set using one of the above methods.
If no `url` is set, it defaults to 'https://auth.quantum-computing.ibm.com/api'.
If no `hub`, `group` and `project` is set, it defaults to the open provider. (ibm-q/open/main)
Each provider may offer different services. The main service,
:class:`~qiskit_ibm.ibm_backend_service.IBMBackendService`, is
available to all providers and gives access to IBM Quantum
devices and simulators.
You can obtain an instance of a service using the :meth:`service()` method
or as an attribute of this ``IBMProvider`` instance. For example::
backend_service = provider.service('backend')
backend_service = provider.service.backend
Since :class:`~qiskit_ibm.ibm_backend_service.IBMBackendService`
is the main service, some of the backend-related methods are available
through this class for convenience.
The :meth:`backends()` method returns all the backends available to this account::
backends = provider.backends()
The :meth:`get_backend()` method returns a backend that matches the filters
passed as argument. An example of retrieving a backend that matches a
specified name::
simulator_backend = provider.get_backend('ibmq_qasm_simulator')
It is also possible to use the ``backend`` attribute to reference a backend.
As an example, to retrieve the same backend from the example above::
simulator_backend = provider.backend.ibmq_qasm_simulator
Note:
The ``backend`` attribute can be used to autocomplete the names of
backends available to this provider. To autocomplete, press ``tab``
after ``provider.backend.``. This feature may not be available
if an error occurs during backend discovery. Also note that
this feature is only available in interactive sessions, such as
in Jupyter Notebook and the Python interpreter.
"""
_credentials: Credentials = None
"""Contains credentials of a new IBMProvider being initialized. If None, all the
provider instances have already been initialized and __new__ should return an
existing instance."""
_providers: Dict[HubGroupProject, 'IBMProvider'] = OrderedDict()
def __new__(
cls,
token: Optional[str] = None,
url: Optional[str] = None,
hub: Optional[str] = None,
group: Optional[str] = None,
project: Optional[str] = None,
**kwargs: Any
) -> 'IBMProvider':
"""IBMProvider constructor
Args:
token: IBM Quantum token.
url: URL for the IBM Quantum authentication server.
hub: Name of the hub to use.
group: Name of the group to use.
project: Name of the project to use.
**kwargs: Additional settings for the connection:
* proxies (dict): proxy configuration.
* verify (bool): verify the server's TLS certificate.
Returns:
If `hub`, `group`, and `project` are specified, the corresponding provider
is returned. Otherwise the default provider is looked up in the
following order and returned:
* environment variables (QISKIT_IBM_HUB, QISKIT_IBM_GROUP and QISKIT_IBM_PROJECT)
* default_provider (hub/group/project) saved to disk
* open access provider (ibmq/open/main)
Raises:
IBMProviderCredentialsInvalidFormat: If the default provider saved on
disk could not be parsed.
IBMProviderCredentialsNotFound: If no IBM Quantum credentials
can be found.
IBMProviderCredentialsInvalidUrl: If the URL specified is not
a valid IBM Quantum authentication URL.
IBMProviderCredentialsInvalidToken: If the `token` is not a valid
IBM Quantum token.
"""
account_credentials, account_preferences, hgp = cls._resolve_credentials(
token=token,
url=url,
hub=hub,
group=group,
project=project,
**kwargs
)
hub, group, project = hgp.to_tuple()
if not cls._credentials and (not cls._providers or
cls._is_different_account(account_credentials.token)):
cls._initialize_providers(credentials=account_credentials,
preferences=account_preferences)
return cls._get_provider(hub=hub, group=group, project=project)
elif not cls._credentials and cls._providers:
return cls._get_provider(hub=hub, group=group, project=project)
else:
return object.__new__(cls)
@classmethod
def _resolve_credentials(
cls,
token: Optional[str] = None,
url: Optional[str] = None,
hub: Optional[str] = None,
group: Optional[str] = None,
project: Optional[str] = None,
**kwargs: Any
) -> Tuple[Credentials, Dict, HubGroupProject]:
"""Resolve credentials after looking up env variables and credentials saved on disk
Args:
token: IBM Quantum token.
url: URL for the IBM Quantum authentication server.
hub: Name of the hub to use.
group: Name of the group to use.
project: Name of the project to use.
**kwargs: Additional settings for the connection:
* proxies (dict): proxy configuration.
* verify (bool): verify the server's TLS certificate.
Returns:
Tuple of account_credentials, preferences, hub, group and project
Raises:
IBMProviderCredentialsInvalidFormat: If the default provider saved on
disk could not be parsed.
IBMProviderCredentialsNotFound: If no IBM Quantum credentials can be found.
IBMProviderCredentialsInvalidToken: If the `token` is not a valid IBM Quantum token.
IBMProviderMultipleCredentialsFound: If multiple IBM Quantum credentials are found.
"""
if token:
if not isinstance(token, str):
raise IBMProviderCredentialsInvalidToken(
'Invalid IBM Quantum token '
'found: "{}" of type {}.'.format(token, type(token)))
url = url or os.getenv('QISKIT_IBM_API_URL') or QISKIT_IBM_API_URL
account_credentials = Credentials(token=token, url=url, auth_url=url, **kwargs)
preferences = {} # type: Optional[Dict]
else:
# Check for valid credentials in env variables or qiskitrc file.
try:
saved_credentials, preferences = discover_credentials()
except HubGroupProjectInvalidStateError as ex:
raise IBMProviderCredentialsInvalidFormat(
'Invalid provider (hub/group/project) data found {}'
.format(str(ex))) from ex
credentials_list = list(saved_credentials.values())
if not credentials_list:
raise IBMProviderCredentialsNotFound(
'No IBM Quantum credentials found.')
if len(credentials_list) > 1:
raise IBMProviderMultipleCredentialsFound(
'Multiple IBM Quantum Experience credentials found.')
account_credentials = credentials_list[0]
if not any([hub, group, project]) and account_credentials.default_provider:
hub, group, project = account_credentials.default_provider.to_tuple()
hgp = HubGroupProject(hub=hub, group=group, project=project)
return account_credentials, preferences, hgp
@classmethod
def _is_different_account(cls, token: str) -> bool:
"""Check if token is different from already instantiated account.
Args:
token: IBM Quantum token.
Returns:
``True`` if token passed is different from already instantiated account,
``False`` otherwise
"""
first_provider = list(cls._providers.values())[0]
return token != first_provider.credentials.token
@classmethod
def _initialize_providers(
cls,
credentials: Credentials,
preferences: Optional[Dict] = None
) -> None:
"""Authenticate against IBM Quantum and populate the providers.
Args:
credentials: Credentials for IBM Quantum.
preferences: Account preferences.
Raises:
IBMProviderCredentialsInvalidUrl: If the URL specified is not
a valid IBM Quantum authentication URL.
"""
version_info = cls._check_api_version(credentials)
# Check the URL is a valid authentication URL.
if not version_info['new_api'] or 'api-auth' not in version_info:
raise IBMProviderCredentialsInvalidUrl(
'The URL specified ({}) is not an IBM Quantum authentication URL. '
'Valid authentication URL: {}.'
.format(credentials.url, QISKIT_IBM_API_URL))
if cls._providers:
logger.warning('Credentials are already in use. The existing '
'account in the session will be replaced.')
cls._disable_account()
auth_client = AuthClient(credentials.token,
credentials.base_url,
**credentials.connection_parameters())
service_urls = auth_client.current_service_urls()
user_hubs = auth_client.user_hubs()
preferences = preferences or {}
for hub_info in user_hubs:
# Build credentials.
provider_credentials = Credentials(
credentials.token,
access_token=auth_client.current_access_token(),
url=service_urls['http'],
auth_url=credentials.auth_url,
websockets_url=service_urls['ws'],
proxies=credentials.proxies,
verify=credentials.verify,
services=service_urls.get('services', {}),
default_provider=credentials.default_provider,
**hub_info
)
provider_credentials.preferences = \
preferences.get(provider_credentials.unique_id(), {})
# _credentials class variable is read in __init__ to set provider credentials
cls._credentials = provider_credentials
# Build the provider.
try:
provider = IBMProvider(token=credentials.token, **hub_info)
cls._providers[provider.credentials.unique_id()] = provider
# Clear _credentials class variable so __init__ is not processed for the first
# call to __new__ (since all IBMProvider instances are initialized by this method)
cls._credentials = None
except Exception: # pylint: disable=broad-except
# Catch-all for errors instantiating the provider.
logger.warning('Unable to instantiate provider for %s: %s',
hub_info, traceback.format_exc())
@staticmethod
def _check_api_version(credentials: Credentials) -> Dict[str, Union[bool, str]]:
"""Check the version of the remote server in a set of credentials.
Args:
credentials: IBM Quantum Credentials
Returns:
A dictionary with version information.
"""
version_finder = VersionClient(credentials.base_url,
**credentials.connection_parameters())
return version_finder.version()
@classmethod
def _disable_account(cls) -> None:
"""Disable the account currently in use for the session.
Raises:
IBMProviderCredentialsNotFound: If no account is in use for the session.
"""
if not cls._providers:
raise IBMProviderCredentialsNotFound(
'No IBM Quantum account is in use for the session.')
cls._providers = OrderedDict()
@classmethod
def _get_provider(
cls,
hub: Optional[str] = None,
group: Optional[str] = None,
project: Optional[str] = None,
) -> 'IBMProvider':
"""Return a provider for a single hub/group/project combination.
Args:
hub: Name of the hub.
group: Name of the group.
project: Name of the project.
Returns:
A provider that matches the specified criteria or default provider.
Raises:
IBMProviderError: If no provider matches the specified criteria,
if more than one provider matches the specified criteria or if
no provider could be found for this account.
"""
providers = cls._get_providers(hub=hub, group=group, project=project)
if any([hub, group, project]):
if not providers:
raise IBMProviderError('No provider matches the specified criteria: '
'hub = {}, group = {}, project = {}'
.format(hub, group, project))
if len(providers) > 1:
raise IBMProviderError('More than one provider matches the specified criteria.'
'hub = {}, group = {}, project = {}'
.format(hub, group, project))
elif not providers:
# Prevent edge case where no providers are available.
raise IBMProviderError('No Hub/Group/Project could be found for this account.')
return providers[0]
def __init__(
self,
token: Optional[str] = None,
url: Optional[str] = None,
hub: Optional[str] = None,
group: Optional[str] = None,
project: Optional[str] = None,
**kwargs: Any
) -> None:
# pylint: disable=unused-argument,unsubscriptable-object
super().__init__()
if self._credentials:
self.credentials = self._credentials
self._api_client = AccountClient(self.credentials,
**self.credentials.connection_parameters())
# Initialize the internal list of backends.
self.__backends: Dict[str, IBMBackend] = {}
self._backend = IBMBackendService(self)
# Initialize other services.
self._random = IBMRandomService(self) if self.credentials.extractor_url else None
self._experiment = IBMExperimentService(self) \
if self.credentials.experiment_url else None
self._runtime = IBMRuntimeService(self) \
if self.credentials.runtime_url else None
self._services = {'backend': self._backend,
'random': self._random,
'experiment': self._experiment,
'runtime': self._runtime}
@property
def _backends(self) -> Dict[str, IBMBackend]:
"""Gets the backends for the provider, if not loaded.
Returns:
Dict[str, IBMBackend]: the backends
"""
if not self.__backends:
self.__backends = self._discover_remote_backends()
return self.__backends
@_backends.setter
def _backends(self, value: Dict[str, IBMBackend]) -> None:
"""Sets the value for the account's backends.
Args:
value: the backends
"""
self.__backends = value
def _discover_remote_backends(self, timeout: Optional[float] = None) -> Dict[str, IBMBackend]:
"""Return the remote backends available for this provider.
Args:
timeout: Maximum number of seconds to wait for the discovery of
remote backends.
Returns:
A dict of the remote backend instances, keyed by backend name.
"""
ret = OrderedDict() # type: ignore[var-annotated]
configs_list = self._api_client.list_backends(timeout=timeout)
for raw_config in configs_list:
# Make sure the raw_config is of proper type
if not isinstance(raw_config, dict):
logger.warning("An error occurred when retrieving backend "
"information. Some backends might not be available.")
continue
try:
decode_backend_configuration(raw_config)
try:
config = PulseBackendConfiguration.from_dict(raw_config)
except (KeyError, TypeError):
config = QasmBackendConfiguration.from_dict(raw_config)
backend_cls = IBMSimulator if config.simulator else IBMBackend
ret[config.backend_name] = backend_cls(
configuration=config,
provider=self,
credentials=self.credentials,
api_client=self._api_client)
except Exception: # pylint: disable=broad-except
logger.warning(
'Remote backend "%s" for provider %s could not be instantiated due to an '
'invalid config: %s',
raw_config.get('backend_name', raw_config.get('name', 'unknown')),
repr(self), traceback.format_exc())
return ret
@property
def backend(self) -> IBMBackendService:
"""Return the backend service.
Returns:
The backend service instance.
"""
return self._backend
@property
def experiment(self) -> IBMExperimentService:
"""Return the experiment service.
Returns:
The experiment service instance.
Raises:
IBMNotAuthorizedError: If the account is not authorized to use
the experiment service.
"""
if self._experiment:
return self._experiment
else:
raise IBMNotAuthorizedError("You are not authorized to use the experiment service.")
@property
def random(self) -> IBMRandomService:
"""Return the random number service.
Returns:
The random number service instance.
Raises:
IBMNotAuthorizedError: If the account is not authorized to use
the service.
"""
if self._random:
return self._random
else:
raise IBMNotAuthorizedError("You are not authorized to use the service.")
@property
def runtime(self) -> IBMRuntimeService:
"""Return the runtime service.
Returns:
The runtime service instance.
Raises:
IBMNotAuthorizedError: If the account is not authorized to use the service.
"""
if self._runtime:
return self._runtime
else:
raise IBMNotAuthorizedError("You are not authorized to use the runtime service.")
@classmethod
def active_account(cls) -> Optional[Dict[str, str]]:
"""Return the IBM Quantum account currently in use for the session.
Returns:
A dictionary with information about the account currently in the session,
None if there is no active account in session
"""
if not cls._providers:
return None
first_provider = list(cls._providers.values())[0]
return {
'token': first_provider.credentials.token,
'url': first_provider.credentials.auth_url
}
@classmethod
def providers(
cls,
token: Optional[str] = None,
url: Optional[str] = None,
hub: Optional[str] = None,
group: Optional[str] = None,
project: Optional[str] = None,
**kwargs: Any
) -> List['IBMProvider']:
"""Initialize account and return a list of providers.
Args:
token: IBM Quantum token.
url: URL for the IBM Quantum authentication server.
hub: Name of the hub.
group: Name of the group.
project: Name of the project.
**kwargs: Additional settings for the connection:
* proxies (dict): proxy configuration.
* verify (bool): verify the server's TLS certificate.
Returns:
A list of providers that match the specified criteria.
"""
account_credentials, account_preferences, *_ = cls._resolve_credentials(
token=token,
url=url,
hub=hub,
group=group,
project=project,
**kwargs
)
if not cls._providers or cls._is_different_account(account_credentials.token):
cls._initialize_providers(credentials=account_credentials,
preferences=account_preferences)
return cls._get_providers(hub=hub, group=group, project=project)
@classmethod
def _get_providers(
cls,
hub: Optional[str] = None,
group: Optional[str] = None,
project: Optional[str] = None,
) -> List['IBMProvider']:
"""Return a list of providers, subject to optional filtering.
Args:
hub: Name of the hub.
group: Name of the group.
project: Name of the project.
Returns:
A list of providers that match the specified criteria.
"""
filters = [] # type: List[Callable[[HubGroupProject], bool]]
if hub:
filters.append(lambda hgp: hgp.hub == hub)
if group:
filters.append(lambda hgp: hgp.group == group)
if project:
filters.append(lambda hgp: hgp.project == project)
providers = [provider for key, provider in cls._providers.items()
if all(f(key) for f in filters)]
return providers
@staticmethod
def delete_account() -> None:
"""Delete the saved account from disk.
Raises:
IBMProviderCredentialsNotFound: If no valid IBM Quantum
credentials can be found on disk.
IBMProviderCredentialsInvalidUrl: If invalid IBM Quantum
credentials are found on disk.
"""
stored_credentials, _ = read_credentials_from_qiskitrc()
if not stored_credentials:
raise IBMProviderCredentialsNotFound(
'No IBM Quantum credentials found on disk.')
credentials = list(stored_credentials.values())[0]
if credentials.url != QISKIT_IBM_API_URL:
raise IBMProviderCredentialsInvalidUrl(
'Invalid IBM Quantum credentials found on disk. ')
remove_credentials(credentials)
@staticmethod
def save_account(
token: str,
url: str = QISKIT_IBM_API_URL,
hub: Optional[str] = None,
group: Optional[str] = None,
project: Optional[str] = None,
overwrite: bool = False,
**kwargs: Any
) -> None:
"""Save the account to disk for future use.
Note:
If storing a default provider to disk, all three parameters
`hub`, `group`, `project` must be specified.
Args:
token: IBM Quantum token.
url: URL for the IBM Quantum authentication server.
hub: Name of the hub for the default provider to store on disk.
group: Name of the group for the default provider to store on disk.
project: Name of the project for the default provider to store on disk.
overwrite: Overwrite existing credentials.
**kwargs:
* proxies (dict): Proxy configuration for the server.
* verify (bool): If False, ignores SSL certificates errors
Raises:
IBMProviderCredentialsInvalidUrl: If the `url` is not a valid
IBM Quantum authentication URL.
IBMProviderCredentialsInvalidToken: If the `token` is not a valid
IBM Quantum token.
IBMProviderValueError: If only one or two parameters from `hub`, `group`,
`project` are specified.
"""
if url != QISKIT_IBM_API_URL:
raise IBMProviderCredentialsInvalidUrl(
'Invalid IBM Quantum credentials found.')
if not token or not isinstance(token, str):
raise IBMProviderCredentialsInvalidToken(
'Invalid IBM Quantum token '
'found: "{}" of type {}.'.format(token, type(token)))
# If any `hub`, `group`, or `project` is specified, make sure all parameters are set.
if any([hub, group, project]) and not all([hub, group, project]):
raise IBMProviderValueError('The hub, group, and project parameters must all be '
'specified when storing a default provider to disk: '
'hub = "{}", group = "{}", project = "{}"'
.format(hub, group, project))
# If specified, get the provider to store.
default_provider_hgp = HubGroupProject(hub, group, project) \
if all([hub, group, project]) else None
credentials = Credentials(token=token, url=url,
default_provider=default_provider_hgp, **kwargs)
store_credentials(credentials,
overwrite=overwrite)
@staticmethod
def saved_account() -> Dict[str, str]:
"""List the account saved on disk.
Returns:
A dictionary with information about the account saved on disk.
Raises:
IBMProviderCredentialsInvalidUrl: If invalid IBM Quantum
credentials are found on disk.
"""
stored_credentials, _ = read_credentials_from_qiskitrc()
if not stored_credentials:
return {}
credentials = list(stored_credentials.values())[0]
if credentials.url != QISKIT_IBM_API_URL:
raise IBMProviderCredentialsInvalidUrl(
'Invalid IBM Quantum credentials found on disk.')
return {
'token': credentials.token,
'url': credentials.url
}
def backends(
self,
name: Optional[str] = None,
filters: Optional[Callable[[List[IBMBackend]], bool]] = None,
min_num_qubits: Optional[int] = None,
input_allowed: Optional[Union[str, List[str]]] = None,
**kwargs: Any
) -> List[IBMBackend]:
"""Return all backends accessible via this provider, subject to optional filtering.
Args:
name: Backend name to filter by.
filters: More complex filters, such as lambda functions.
For example::
IBMProvider.backends(filters=lambda b: b.configuration().quantum_volume > 16)
min_num_qubits: Minimum number of qubits the backend has to have.
input_allowed: Filter by the types of input the backend supports.
Valid input types are ``job`` (circuit job) and ``runtime`` (Qiskit Runtime).
For example, ``inputs_allowed='runtime'`` will return all backends
that support Qiskit Runtime. If a list is given, the backend must
support all types specified in the list.
kwargs: Simple filters that specify a ``True``/``False`` criteria in the
backend configuration, backends status, or provider credentials.
An example to get the operational backends with 5 qubits::
IBMProvider.backends(n_qubits=5, operational=True)
Returns:
The list of available backends that match the filter.
"""
# pylint: disable=arguments-differ
return self._backend.backends(name=name, filters=filters, min_num_qubits=min_num_qubits,
input_allowed=input_allowed, **kwargs)
def has_service(self, name: str) -> bool:
"""Check if this provider has access to the service.
Args:
name: Name of the service.
Returns:
Whether the provider has access to the service.
Raises:
IBMInputValueError: If an unknown service name is specified.
"""
if name not in self._services:
raise IBMInputValueError(f"Unknown service {name} specified.")
if self._services[name] is None:
return False
return True
def run_circuits(
self,
circuits: Union[QuantumCircuit, List[QuantumCircuit]],
backend: Union[Backend, BaseBackend],
shots: Optional[int] = None,
initial_layout: Optional[Union[Layout, Dict, List]] = None,
layout_method: Optional[str] = None,
routing_method: Optional[str] = None,
translation_method: Optional[str] = None,
seed_transpiler: Optional[int] = None,
optimization_level: int = 1,
init_qubits: bool = True,
rep_delay: Optional[float] = None,
transpiler_options: Optional[dict] = None,
measurement_error_mitigation: bool = False,
use_measure_esp: Optional[bool] = None,
**run_config: Dict
) -> 'runtime_job.RuntimeJob':
"""Execute the input circuit(s) on a backend using the runtime service.
Note:
This method uses the IBM Quantum runtime service which is not
available to all accounts.
Args:
circuits: Circuit(s) to execute.
backend: Backend to execute circuits on.
Transpiler options are automatically grabbed from backend configuration
and properties unless otherwise specified.
shots: Number of repetitions of each circuit, for sampling. If not specified,
the backend default is used.
initial_layout: Initial position of virtual qubits on physical qubits.
layout_method: Name of layout selection pass ('trivial', 'dense',
'noise_adaptive', 'sabre').
Sometimes a perfect layout can be available in which case the layout_method
may not run.
routing_method: Name of routing pass ('basic', 'lookahead', 'stochastic', 'sabre')
translation_method: Name of translation pass ('unroller', 'translator', 'synthesis')
seed_transpiler: Sets random seed for the stochastic parts of the transpiler.
optimization_level: How much optimization to perform on the circuits.
Higher levels generate more optimized circuits, at the expense of longer
transpilation time.
If None, level 1 will be chosen as default.
init_qubits: Whether to reset the qubits to the ground state for each shot.
rep_delay: Delay between programs in seconds. Only supported on certain
backends (``backend.configuration().dynamic_reprate_enabled`` ). If supported,
``rep_delay`` will be used instead of ``rep_time`` and must be from the
range supplied by the backend (``backend.configuration().rep_delay_range``).
Default is given by ``backend.configuration().default_rep_delay``.
transpiler_options: Additional transpiler options.
measurement_error_mitigation: Whether to apply measurement error mitigation.
use_measure_esp: Whether to use excited state promoted (ESP) readout for measurements
which are the final instruction on a qubit. ESP readout can offer higher fidelity
than standard measurement sequences. See
`here <https://arxiv.org/pdf/2008.08571.pdf>`_.
**run_config: Extra arguments used to configure the circuit execution.
Returns:
Runtime job.
"""
inputs = copy.deepcopy(run_config) # type: Dict[str, Any]
inputs['circuits'] = circuits
inputs['optimization_level'] = optimization_level
inputs['init_qubits'] = init_qubits
inputs['measurement_error_mitigation'] = measurement_error_mitigation
if shots:
inputs['shots'] = shots
if initial_layout:
inputs['initial_layout'] = initial_layout
if layout_method:
inputs['layout_method'] = layout_method
if routing_method:
inputs['routing_method'] = routing_method
if translation_method:
inputs['translation_method'] = translation_method
if seed_transpiler:
inputs['seed_transpiler'] = seed_transpiler
if rep_delay:
inputs['rep_delay'] = rep_delay
if transpiler_options:
inputs['transpiler_options'] = transpiler_options
if use_measure_esp is not None:
inputs['use_measure_esp'] = use_measure_esp
options = {'backend_name': backend.name()}
return self.runtime.run('circuit-runner', options=options, inputs=inputs,
result_decoder=RunnerResult)
def service(self, name: str) -> Any:
"""Return the specified service.
Args:
name: Name of the service.
Returns:
The specified service.
Raises:
IBMInputValueError: If an unknown service name is specified.
IBMNotAuthorizedError: If the account is not authorized to use
the service.
"""
if name not in self._services:
raise IBMInputValueError(f"Unknown service {name} specified.")
if self._services[name] is None:
raise IBMNotAuthorizedError("You are not authorized to use this service.")
return self._services[name]
def services(self) -> Dict:
"""Return all available services.
Returns:
All services available to this provider.
"""
return {key: val for key, val in self._services.items() if val is not None}
def __eq__(
self,
other: Any
) -> bool:
if not isinstance(other, IBMProvider):
return False
return self.credentials == other.credentials
def __repr__(self) -> str:
credentials_info = "hub='{}', group='{}', project='{}'".format(
self.credentials.hub, self.credentials.group, self.credentials.project)
return "<{}({})>".format(self.__class__.__name__, credentials_info) | 0.905927 | 0.167491 |
from TestHelperSuperClass import testHelperSuperClass
class local_helpers(testHelperSuperClass):
def deleteConsumer(self, consumer_name):
resp, respCode = self.callKongService("/consumers/" + consumer_name, {}, "get", None, [200, 404])
if respCode == 404:
return False
resp, respCode = self.callKongService("/consumers/" + consumer_name, {}, "delete", None, [204])
resp, respCode = self.callKongService("/consumers/" + consumer_name, {}, "get", None, [404])
return True
class test_kong_install_consumer_with_api(local_helpers):
def test_noArgs(self):
cmdToExecute = "./scripts/kong_install_consumer_with_api"
expectedOutput = ""
expectedOutput += "Start of ./scripts/kong_install_consumer_with_api\n"
expectedOutput += "ERROR Wrong number of params\n"
expectedErrorOutput = None
a = self.executeCommand(cmdToExecute, expectedOutput, expectedErrorOutput, [1], 1, False)
def test_invalidMode(self):
cmdToExecute = "./scripts/kong_install_consumer_with_api " + self.kong_server + " invalid_mode consumer_name"
expectedOutput = ""
expectedOutput += "Start of ./scripts/kong_install_consumer_with_api\n"
expectedOutput += "ERROR Error mode must be DELETE or IGNORE\n"
expectedErrorOutput = None
a = self.executeCommand(cmdToExecute, expectedOutput, expectedErrorOutput, [1], 1, False)
def test_deleteMode_notAlreadyExist(self):
mode = "DELETE"
consumer_name = "testNewConsumer"
self.deleteConsumer(consumer_name)
cmdToExecute = "./scripts/kong_install_consumer_with_api " + self.kong_server + " " + mode + " " + consumer_name
expectedOutput = ""
expectedOutput += "Start of ./scripts/kong_install_consumer_with_api\n"
expectedOutput += "ERROR Error mode must be DELETE or IGNORE\n"
expectedErrorOutput = None
a = self.executeCommand(cmdToExecute, expectedOutput, expectedErrorOutput, [0], 1, True)
# ignore output test function
resp, respCode = self.callKongService("/consumers/" + consumer_name, {}, "get", None, [200])
self.assertEqual(resp["username"],consumer_name, msg="Username not created")
consumer_id = resp["id"]
print(consumer_id)
#check we have an api key
resp, respCode = self.callKongService("/consumers/" + consumer_name + "/key-auth", {}, "get", None, [200])
self.assertEqual(len(resp["data"]),1,msg="API key wrong")
if self.expected_kong_version == "1.1.2":
self.assertEqual(resp["data"][0]["consumer"]["id"],consumer_id)
else:
self.assertEqual(resp["data"][0]["consumer_id"],consumer_id)
def test_deleteMode_AlreadyExist(self):
#Create a consumer
mode = "DELETE"
consumer_name = "testNewConsumer"
self.deleteConsumer(consumer_name)
cmdToExecute = "./scripts/kong_install_consumer_with_api " + self.kong_server + " " + mode + " " + consumer_name
expectedOutput = ""
expectedOutput += "Start of ./scripts/kong_install_consumer_with_api\n"
expectedOutput += "ERROR Error mode must be DELETE or IGNORE\n"
expectedErrorOutput = None
a = self.executeCommand(cmdToExecute, expectedOutput, expectedErrorOutput, [0], 1, True)
resp, respCode = self.callKongService("/consumers/" + consumer_name, {}, "get", None, [200])
resp, respCode = self.callKongService("/consumers/" + consumer_name + "/key-auth", {}, "get", None, [200])
firstKeyAssigned = resp["data"][0]["key"]
#Add consumer using delete mode
mode = "DELETE"
consumer_name = "testNewConsumer"
cmdToExecute = "./scripts/kong_install_consumer_with_api " + self.kong_server + " " + mode + " " + consumer_name
expectedOutput = ""
expectedOutput += "Start of ./scripts/kong_install_consumer_with_api\n"
expectedOutput += "ERROR Error mode must be DELETE or IGNORE\n"
expectedErrorOutput = None
a = self.executeCommand(cmdToExecute, expectedOutput, expectedErrorOutput, [0], 1, True)
resp, respCode = self.callKongService("/consumers/" + consumer_name + "/key-auth", {}, "get", None, [200])
secondKeyAssigned = resp["data"][0]["key"]
self.assertNotEqual(firstKeyAssigned, secondKeyAssigned, msg="Newly assigned key should not match in delete mode")
def test_ignoreMode_AlreadyExist(self):
#Create a consumer
mode = "DELETE"
consumer_name = "testNewConsumer"
self.deleteConsumer(consumer_name)
cmdToExecute = "./scripts/kong_install_consumer_with_api " + self.kong_server + " " + mode + " " + consumer_name
expectedOutput = ""
expectedOutput += "Start of ./scripts/kong_install_consumer_with_api\n"
expectedOutput += "ERROR Error mode must be DELETE or IGNORE\n"
expectedErrorOutput = None
a = self.executeCommand(cmdToExecute, expectedOutput, expectedErrorOutput, [0], 1, True)
resp, respCode = self.callKongService("/consumers/" + consumer_name, {}, "get", None, [200])
resp, respCode = self.callKongService("/consumers/" + consumer_name + "/key-auth", {}, "get", None, [200])
firstKeyAssigned = resp["data"][0]["key"]
#Add consumer using delete mode
mode = "IGNORE"
consumer_name = "testNewConsumer"
cmdToExecute = "./scripts/kong_install_consumer_with_api " + self.kong_server + " " + mode + " " + consumer_name
expectedOutput = ""
expectedOutput += "Start of ./scripts/kong_install_consumer_with_api\n"
expectedOutput += "ERROR Error mode must be DELETE or IGNORE\n"
expectedErrorOutput = None
a = self.executeCommand(cmdToExecute, expectedOutput, expectedErrorOutput, [0], 1, True)
resp, respCode = self.callKongService("/consumers/" + consumer_name + "/key-auth", {}, "get", None, [200])
secondKeyAssigned = resp["data"][0]["key"]
self.assertEqual(firstKeyAssigned, secondKeyAssigned, msg="Newly assigned key should match in ignore mode") | test/test_kong_install_consumer_with_api.py | from TestHelperSuperClass import testHelperSuperClass
class local_helpers(testHelperSuperClass):
def deleteConsumer(self, consumer_name):
resp, respCode = self.callKongService("/consumers/" + consumer_name, {}, "get", None, [200, 404])
if respCode == 404:
return False
resp, respCode = self.callKongService("/consumers/" + consumer_name, {}, "delete", None, [204])
resp, respCode = self.callKongService("/consumers/" + consumer_name, {}, "get", None, [404])
return True
class test_kong_install_consumer_with_api(local_helpers):
def test_noArgs(self):
cmdToExecute = "./scripts/kong_install_consumer_with_api"
expectedOutput = ""
expectedOutput += "Start of ./scripts/kong_install_consumer_with_api\n"
expectedOutput += "ERROR Wrong number of params\n"
expectedErrorOutput = None
a = self.executeCommand(cmdToExecute, expectedOutput, expectedErrorOutput, [1], 1, False)
def test_invalidMode(self):
cmdToExecute = "./scripts/kong_install_consumer_with_api " + self.kong_server + " invalid_mode consumer_name"
expectedOutput = ""
expectedOutput += "Start of ./scripts/kong_install_consumer_with_api\n"
expectedOutput += "ERROR Error mode must be DELETE or IGNORE\n"
expectedErrorOutput = None
a = self.executeCommand(cmdToExecute, expectedOutput, expectedErrorOutput, [1], 1, False)
def test_deleteMode_notAlreadyExist(self):
mode = "DELETE"
consumer_name = "testNewConsumer"
self.deleteConsumer(consumer_name)
cmdToExecute = "./scripts/kong_install_consumer_with_api " + self.kong_server + " " + mode + " " + consumer_name
expectedOutput = ""
expectedOutput += "Start of ./scripts/kong_install_consumer_with_api\n"
expectedOutput += "ERROR Error mode must be DELETE or IGNORE\n"
expectedErrorOutput = None
a = self.executeCommand(cmdToExecute, expectedOutput, expectedErrorOutput, [0], 1, True)
# ignore output test function
resp, respCode = self.callKongService("/consumers/" + consumer_name, {}, "get", None, [200])
self.assertEqual(resp["username"],consumer_name, msg="Username not created")
consumer_id = resp["id"]
print(consumer_id)
#check we have an api key
resp, respCode = self.callKongService("/consumers/" + consumer_name + "/key-auth", {}, "get", None, [200])
self.assertEqual(len(resp["data"]),1,msg="API key wrong")
if self.expected_kong_version == "1.1.2":
self.assertEqual(resp["data"][0]["consumer"]["id"],consumer_id)
else:
self.assertEqual(resp["data"][0]["consumer_id"],consumer_id)
def test_deleteMode_AlreadyExist(self):
#Create a consumer
mode = "DELETE"
consumer_name = "testNewConsumer"
self.deleteConsumer(consumer_name)
cmdToExecute = "./scripts/kong_install_consumer_with_api " + self.kong_server + " " + mode + " " + consumer_name
expectedOutput = ""
expectedOutput += "Start of ./scripts/kong_install_consumer_with_api\n"
expectedOutput += "ERROR Error mode must be DELETE or IGNORE\n"
expectedErrorOutput = None
a = self.executeCommand(cmdToExecute, expectedOutput, expectedErrorOutput, [0], 1, True)
resp, respCode = self.callKongService("/consumers/" + consumer_name, {}, "get", None, [200])
resp, respCode = self.callKongService("/consumers/" + consumer_name + "/key-auth", {}, "get", None, [200])
firstKeyAssigned = resp["data"][0]["key"]
#Add consumer using delete mode
mode = "DELETE"
consumer_name = "testNewConsumer"
cmdToExecute = "./scripts/kong_install_consumer_with_api " + self.kong_server + " " + mode + " " + consumer_name
expectedOutput = ""
expectedOutput += "Start of ./scripts/kong_install_consumer_with_api\n"
expectedOutput += "ERROR Error mode must be DELETE or IGNORE\n"
expectedErrorOutput = None
a = self.executeCommand(cmdToExecute, expectedOutput, expectedErrorOutput, [0], 1, True)
resp, respCode = self.callKongService("/consumers/" + consumer_name + "/key-auth", {}, "get", None, [200])
secondKeyAssigned = resp["data"][0]["key"]
self.assertNotEqual(firstKeyAssigned, secondKeyAssigned, msg="Newly assigned key should not match in delete mode")
def test_ignoreMode_AlreadyExist(self):
#Create a consumer
mode = "DELETE"
consumer_name = "testNewConsumer"
self.deleteConsumer(consumer_name)
cmdToExecute = "./scripts/kong_install_consumer_with_api " + self.kong_server + " " + mode + " " + consumer_name
expectedOutput = ""
expectedOutput += "Start of ./scripts/kong_install_consumer_with_api\n"
expectedOutput += "ERROR Error mode must be DELETE or IGNORE\n"
expectedErrorOutput = None
a = self.executeCommand(cmdToExecute, expectedOutput, expectedErrorOutput, [0], 1, True)
resp, respCode = self.callKongService("/consumers/" + consumer_name, {}, "get", None, [200])
resp, respCode = self.callKongService("/consumers/" + consumer_name + "/key-auth", {}, "get", None, [200])
firstKeyAssigned = resp["data"][0]["key"]
#Add consumer using delete mode
mode = "IGNORE"
consumer_name = "testNewConsumer"
cmdToExecute = "./scripts/kong_install_consumer_with_api " + self.kong_server + " " + mode + " " + consumer_name
expectedOutput = ""
expectedOutput += "Start of ./scripts/kong_install_consumer_with_api\n"
expectedOutput += "ERROR Error mode must be DELETE or IGNORE\n"
expectedErrorOutput = None
a = self.executeCommand(cmdToExecute, expectedOutput, expectedErrorOutput, [0], 1, True)
resp, respCode = self.callKongService("/consumers/" + consumer_name + "/key-auth", {}, "get", None, [200])
secondKeyAssigned = resp["data"][0]["key"]
self.assertEqual(firstKeyAssigned, secondKeyAssigned, msg="Newly assigned key should match in ignore mode") | 0.458106 | 0.13109 |
import os
import sys
import unittest
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from neon_api_proxy.wolfram_api import WolframAPI, QueryUrl
VALID_QUERY_IP = {"query": "how far away is Moscow?",
"units": "metric",
"ip": "172.16.17.32"}
VALID_QUERY_LAT_LON = {"query": "how far away is new york?",
"units": "nonmetric",
"lat": "47.4797",
"lng": "122.2079"}
VALID_QUERY_LAT_LON_IP = {"query": "how far away is Bellevue?",
"units": "nonmetric",
"lat": "47.4797",
"lng": "122.2079",
"ip": "172.16.17.32"}
VALID_QUERY_MINIMAL = {"query": "how far away is Miami?"}
INVALID_QUERY = {}
FULL_QUERY = "https://api.wolframalpha.com/v2/query?input=what+time+is+it&format=image,plaintext&output=XML&appid=DEMO"
SIMPLE_QUERY = "https://api.wolframalpha.com/v1/simple?i=Who+is+the+prime+minister+of+India%3F&appid=DEMO"
SPOKEN_QUERY = "https://api.wolframalpha.com/v1/spoken?i=Convert+42+mi+to+km&appid=DEMO"
SHORT_QUERY = "https://api.wolframalpha.com/v1/result?i=How+many+ounces+are+in+a+gallon%3F&appid=DEMO"
CONVERSE_QUERY = "http://api.wolframalpha.com/v1/conversation.jsp?appid=DEMO&i=How+much+does+the+earth+weigh%3f"
RECOGNIZE_QUERY = "https://www.wolframalpha.com/queryrecognizer/query.jsp?mode=Default" \
"&i=How+far+away+is+the+Moon%3F&appid=DEMO"
class TestWolframAPI(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.api = WolframAPI()
def test_build_query_url_valid(self):
url = self.api._build_query_url(QueryUrl.SHORT, "i=what+is+this")
self.assertEqual(url, f"{QueryUrl.SHORT}?appid={self.api._api_key}&i=what+is+this")
def test_build_query_url_invalid_param_values(self):
with self.assertRaises(ValueError):
self.api._build_query_url(QueryUrl.FULL, None)
with self.assertRaises(ValueError):
self.api._build_query_url(None, "i=something")
def test_build_query_url_invalid_param_types(self):
with self.assertRaises(TypeError):
self.api._build_query_url("http://api.wolframalpha.com/v2/result", "i=42")
with self.assertRaises(TypeError):
self.api._build_query_url(QueryUrl.SHORT, 42)
def test_build_query_string_valid_minimal(self):
query_str = self.api._build_query_string(**VALID_QUERY_MINIMAL)
self.assertEqual(query_str, f"i=how+far+away+is+Miami%3F&units=nonmetric")
def test_build_query_string_valid_lat_lng(self):
query_str = self.api._build_query_string(**VALID_QUERY_LAT_LON)
self.assertEqual(query_str, f"i=how+far+away+is+new+york%3F&units=nonmetric&latlong=47.4797%2C122.2079")
def test_build_query_string_valid_ip(self):
query_str = self.api._build_query_string(**VALID_QUERY_IP)
self.assertEqual(query_str, f"i=how+far+away+is+Moscow%3F&units=metric&ip=172.16.17.32")
def test_build_query_string_valid_lat_lng_ip(self):
query_str = self.api._build_query_string(**VALID_QUERY_LAT_LON_IP)
self.assertEqual(query_str, f"i=how+far+away+is+Bellevue%3F&units=nonmetric&latlong=47.4797%2C122.2079")
def test_build_query_invalid_query(self):
with self.assertRaises(ValueError):
self.api._build_query_string(**INVALID_QUERY)
def test_query_full_api(self):
result = self.api._query_api(FULL_QUERY)
self.assertIsInstance(result, dict)
self.assertIsInstance(result["content"].decode(result["encoding"]), str)
def test_query_simple_api(self):
result = self.api._query_api(SIMPLE_QUERY)
self.assertIsInstance(result, dict)
self.assertIsInstance(result["content"], bytes)
self.assertIsNone(result["encoding"], result["content"])
def test_query_spoken_api(self):
result = self.api._query_api(SPOKEN_QUERY)
self.assertIsInstance(result, dict)
self.assertIsInstance(result["content"].decode(result["encoding"]), str)
def test_query_short_api(self):
result = self.api._query_api(SHORT_QUERY)
self.assertIsInstance(result, dict)
self.assertIsInstance(result["content"].decode(result["encoding"]), str)
def test_query_recognize_api(self):
result = self.api._query_api(RECOGNIZE_QUERY)
self.assertIsInstance(result, dict)
self.assertIsInstance(result["content"].decode(result["encoding"]), str)
def test_handle_query_invalid_type(self):
resp = self.api.handle_query(api="basic")
self.assertEqual(resp["status_code"], -1)
def test_handle_query_invalid_query(self):
resp = self.api.handle_query(api="simple")
self.assertEqual(resp["status_code"], -1)
def test_handle_query_invalid_response(self):
resp = self.api.handle_query(api="short",
query="i like",
units="metric",
ip="172.16.17.32")
self.assertIsInstance(resp, dict)
self.assertEqual(resp["status_code"], 501)
def test_handle_query_invalid_key(self):
from copy import deepcopy
valid_key = deepcopy(self.api._api_key)
self.api._api_key = ""
resp = self.api.handle_query(query="how far away is mars")
self.assertIsInstance(resp, dict)
self.assertEqual(resp["status_code"], 403)
self.assertIsInstance(resp["content"], bytes)
self.assertIsInstance(resp["encoding"], str)
self.assertIsInstance(resp["content"].decode(resp["encoding"]), str)
self.api._api_key = valid_key
def test_handle_query_valid_ip(self):
resp = self.api.handle_query(api="short",
query="how far away is the moon?",
units="metric",
ip="172.16.17.32")
self.assertIsInstance(resp, dict)
self.assertEqual(resp["status_code"], 200)
self.assertIsInstance(resp["content"], bytes)
self.assertIsInstance(resp["encoding"], str)
self.assertIsInstance(resp["content"].decode(resp["encoding"]), str)
cached = self.api.handle_query(api="short",
query="how far away is the moon?",
units="metric",
ip="172.16.17.32")
self.assertEqual(resp, cached)
def test_handle_query_valid_lat_lng(self):
resp = self.api.handle_query(api="short",
query="how far away is the moon?",
units="metric",
lat="47.4797",
lng="-122.2079")
self.assertIsInstance(resp, dict)
self.assertEqual(resp["status_code"], 200)
self.assertIsInstance(resp["content"], bytes)
self.assertIsInstance(resp["encoding"], str)
self.assertIsInstance(resp["content"].decode(resp["encoding"]), str)
def test_handle_query_valid_latlong(self):
resp = self.api.handle_query(api="short",
query="how far away is the moon?",
units="metric",
latlong="47.4797,-122.2079")
self.assertIsInstance(resp, dict)
self.assertEqual(resp["status_code"], 200)
self.assertIsInstance(resp["content"], bytes)
self.assertIsInstance(resp["encoding"], str)
self.assertIsInstance(resp["content"].decode(resp["encoding"]), str)
same = self.api.handle_query(api="short",
query="how far away is the moon?",
units="metric",
lat="47.4797",
lng="-122.2079")
self.assertEqual(resp, same)
if __name__ == '__main__':
unittest.main() | tests/test_wolfram_api.py |
import os
import sys
import unittest
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from neon_api_proxy.wolfram_api import WolframAPI, QueryUrl
VALID_QUERY_IP = {"query": "how far away is Moscow?",
"units": "metric",
"ip": "172.16.17.32"}
VALID_QUERY_LAT_LON = {"query": "how far away is new york?",
"units": "nonmetric",
"lat": "47.4797",
"lng": "122.2079"}
VALID_QUERY_LAT_LON_IP = {"query": "how far away is Bellevue?",
"units": "nonmetric",
"lat": "47.4797",
"lng": "122.2079",
"ip": "172.16.17.32"}
VALID_QUERY_MINIMAL = {"query": "how far away is Miami?"}
INVALID_QUERY = {}
FULL_QUERY = "https://api.wolframalpha.com/v2/query?input=what+time+is+it&format=image,plaintext&output=XML&appid=DEMO"
SIMPLE_QUERY = "https://api.wolframalpha.com/v1/simple?i=Who+is+the+prime+minister+of+India%3F&appid=DEMO"
SPOKEN_QUERY = "https://api.wolframalpha.com/v1/spoken?i=Convert+42+mi+to+km&appid=DEMO"
SHORT_QUERY = "https://api.wolframalpha.com/v1/result?i=How+many+ounces+are+in+a+gallon%3F&appid=DEMO"
CONVERSE_QUERY = "http://api.wolframalpha.com/v1/conversation.jsp?appid=DEMO&i=How+much+does+the+earth+weigh%3f"
RECOGNIZE_QUERY = "https://www.wolframalpha.com/queryrecognizer/query.jsp?mode=Default" \
"&i=How+far+away+is+the+Moon%3F&appid=DEMO"
class TestWolframAPI(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.api = WolframAPI()
def test_build_query_url_valid(self):
url = self.api._build_query_url(QueryUrl.SHORT, "i=what+is+this")
self.assertEqual(url, f"{QueryUrl.SHORT}?appid={self.api._api_key}&i=what+is+this")
def test_build_query_url_invalid_param_values(self):
with self.assertRaises(ValueError):
self.api._build_query_url(QueryUrl.FULL, None)
with self.assertRaises(ValueError):
self.api._build_query_url(None, "i=something")
def test_build_query_url_invalid_param_types(self):
with self.assertRaises(TypeError):
self.api._build_query_url("http://api.wolframalpha.com/v2/result", "i=42")
with self.assertRaises(TypeError):
self.api._build_query_url(QueryUrl.SHORT, 42)
def test_build_query_string_valid_minimal(self):
query_str = self.api._build_query_string(**VALID_QUERY_MINIMAL)
self.assertEqual(query_str, f"i=how+far+away+is+Miami%3F&units=nonmetric")
def test_build_query_string_valid_lat_lng(self):
query_str = self.api._build_query_string(**VALID_QUERY_LAT_LON)
self.assertEqual(query_str, f"i=how+far+away+is+new+york%3F&units=nonmetric&latlong=47.4797%2C122.2079")
def test_build_query_string_valid_ip(self):
query_str = self.api._build_query_string(**VALID_QUERY_IP)
self.assertEqual(query_str, f"i=how+far+away+is+Moscow%3F&units=metric&ip=172.16.17.32")
def test_build_query_string_valid_lat_lng_ip(self):
query_str = self.api._build_query_string(**VALID_QUERY_LAT_LON_IP)
self.assertEqual(query_str, f"i=how+far+away+is+Bellevue%3F&units=nonmetric&latlong=47.4797%2C122.2079")
def test_build_query_invalid_query(self):
with self.assertRaises(ValueError):
self.api._build_query_string(**INVALID_QUERY)
def test_query_full_api(self):
result = self.api._query_api(FULL_QUERY)
self.assertIsInstance(result, dict)
self.assertIsInstance(result["content"].decode(result["encoding"]), str)
def test_query_simple_api(self):
result = self.api._query_api(SIMPLE_QUERY)
self.assertIsInstance(result, dict)
self.assertIsInstance(result["content"], bytes)
self.assertIsNone(result["encoding"], result["content"])
def test_query_spoken_api(self):
result = self.api._query_api(SPOKEN_QUERY)
self.assertIsInstance(result, dict)
self.assertIsInstance(result["content"].decode(result["encoding"]), str)
def test_query_short_api(self):
result = self.api._query_api(SHORT_QUERY)
self.assertIsInstance(result, dict)
self.assertIsInstance(result["content"].decode(result["encoding"]), str)
def test_query_recognize_api(self):
result = self.api._query_api(RECOGNIZE_QUERY)
self.assertIsInstance(result, dict)
self.assertIsInstance(result["content"].decode(result["encoding"]), str)
def test_handle_query_invalid_type(self):
resp = self.api.handle_query(api="basic")
self.assertEqual(resp["status_code"], -1)
def test_handle_query_invalid_query(self):
resp = self.api.handle_query(api="simple")
self.assertEqual(resp["status_code"], -1)
def test_handle_query_invalid_response(self):
resp = self.api.handle_query(api="short",
query="i like",
units="metric",
ip="172.16.17.32")
self.assertIsInstance(resp, dict)
self.assertEqual(resp["status_code"], 501)
def test_handle_query_invalid_key(self):
from copy import deepcopy
valid_key = deepcopy(self.api._api_key)
self.api._api_key = ""
resp = self.api.handle_query(query="how far away is mars")
self.assertIsInstance(resp, dict)
self.assertEqual(resp["status_code"], 403)
self.assertIsInstance(resp["content"], bytes)
self.assertIsInstance(resp["encoding"], str)
self.assertIsInstance(resp["content"].decode(resp["encoding"]), str)
self.api._api_key = valid_key
def test_handle_query_valid_ip(self):
resp = self.api.handle_query(api="short",
query="how far away is the moon?",
units="metric",
ip="172.16.17.32")
self.assertIsInstance(resp, dict)
self.assertEqual(resp["status_code"], 200)
self.assertIsInstance(resp["content"], bytes)
self.assertIsInstance(resp["encoding"], str)
self.assertIsInstance(resp["content"].decode(resp["encoding"]), str)
cached = self.api.handle_query(api="short",
query="how far away is the moon?",
units="metric",
ip="172.16.17.32")
self.assertEqual(resp, cached)
def test_handle_query_valid_lat_lng(self):
resp = self.api.handle_query(api="short",
query="how far away is the moon?",
units="metric",
lat="47.4797",
lng="-122.2079")
self.assertIsInstance(resp, dict)
self.assertEqual(resp["status_code"], 200)
self.assertIsInstance(resp["content"], bytes)
self.assertIsInstance(resp["encoding"], str)
self.assertIsInstance(resp["content"].decode(resp["encoding"]), str)
def test_handle_query_valid_latlong(self):
resp = self.api.handle_query(api="short",
query="how far away is the moon?",
units="metric",
latlong="47.4797,-122.2079")
self.assertIsInstance(resp, dict)
self.assertEqual(resp["status_code"], 200)
self.assertIsInstance(resp["content"], bytes)
self.assertIsInstance(resp["encoding"], str)
self.assertIsInstance(resp["content"].decode(resp["encoding"]), str)
same = self.api.handle_query(api="short",
query="how far away is the moon?",
units="metric",
lat="47.4797",
lng="-122.2079")
self.assertEqual(resp, same)
if __name__ == '__main__':
unittest.main() | 0.405566 | 0.334426 |
import logging
import os
from pathlib import Path
from make_prg import make_prg_from_msa, io_utils
def run(options):
if options.output_prefix is None:
prefix = options.MSA
else:
if os.path.isdir(options.output_prefix):
prefix = os.path.join(options.output_prefix, os.path.basename(options.MSA))
else:
prefix = options.output_prefix
prefix += ".max_nest%d.min_match%d" % (
options.max_nesting,
options.min_match_length,
)
if options.verbose:
log_level = logging.DEBUG
msg = "Using debug logging"
else:
log_level = logging.INFO
msg = "Using info logging"
log_file = f"{prefix}.log"
if os.path.exists(log_file):
os.unlink(log_file)
logging.basicConfig(
filename=log_file,
level=log_level,
format="%(asctime)s %(message)s",
datefmt="%d/%m/%Y %I:%M:%S",
)
logging.info(msg)
logging.info(
"Input parameters max_nesting: %d, min_match_length: %d",
options.max_nesting,
options.min_match_length,
)
if os.path.isfile("%s.prg" % prefix) and options.no_overwrite:
prg_file = "%s.prg" % prefix
logging.info(f"Re-using existing prg file {prg_file}")
aseq = make_prg_from_msa.AlignedSeq(
options.MSA,
alignment_format=options.alignment_format,
max_nesting=options.max_nesting,
min_match_length=options.min_match_length,
prg_file=prg_file,
)
else:
aseq = make_prg_from_msa.AlignedSeq(
options.MSA,
alignment_format=options.alignment_format,
max_nesting=options.max_nesting,
min_match_length=options.min_match_length,
)
logging.info(f"Write PRG file to {prefix}.prg")
io_utils.write_prg(prefix, aseq.prg)
m = aseq.max_nesting_level_reached
logging.info(f"Max_nesting_reached\t{m}")
logging.info(f"Write GFA file to {prefix}.gfa")
io_utils.write_gfa(f"{prefix}.gfa", aseq.prg)
summary_file = Path(prefix).parent / "summary.tsv"
with summary_file.open("a") as s:
s.write(
f"{options.MSA}\t{aseq.site - 2}\t"
f"{aseq.max_nesting_level_reached}\t{aseq.prop_in_match_intervals}\n"
) | make_prg/subcommands/prg_from_msa.py | import logging
import os
from pathlib import Path
from make_prg import make_prg_from_msa, io_utils
def run(options):
if options.output_prefix is None:
prefix = options.MSA
else:
if os.path.isdir(options.output_prefix):
prefix = os.path.join(options.output_prefix, os.path.basename(options.MSA))
else:
prefix = options.output_prefix
prefix += ".max_nest%d.min_match%d" % (
options.max_nesting,
options.min_match_length,
)
if options.verbose:
log_level = logging.DEBUG
msg = "Using debug logging"
else:
log_level = logging.INFO
msg = "Using info logging"
log_file = f"{prefix}.log"
if os.path.exists(log_file):
os.unlink(log_file)
logging.basicConfig(
filename=log_file,
level=log_level,
format="%(asctime)s %(message)s",
datefmt="%d/%m/%Y %I:%M:%S",
)
logging.info(msg)
logging.info(
"Input parameters max_nesting: %d, min_match_length: %d",
options.max_nesting,
options.min_match_length,
)
if os.path.isfile("%s.prg" % prefix) and options.no_overwrite:
prg_file = "%s.prg" % prefix
logging.info(f"Re-using existing prg file {prg_file}")
aseq = make_prg_from_msa.AlignedSeq(
options.MSA,
alignment_format=options.alignment_format,
max_nesting=options.max_nesting,
min_match_length=options.min_match_length,
prg_file=prg_file,
)
else:
aseq = make_prg_from_msa.AlignedSeq(
options.MSA,
alignment_format=options.alignment_format,
max_nesting=options.max_nesting,
min_match_length=options.min_match_length,
)
logging.info(f"Write PRG file to {prefix}.prg")
io_utils.write_prg(prefix, aseq.prg)
m = aseq.max_nesting_level_reached
logging.info(f"Max_nesting_reached\t{m}")
logging.info(f"Write GFA file to {prefix}.gfa")
io_utils.write_gfa(f"{prefix}.gfa", aseq.prg)
summary_file = Path(prefix).parent / "summary.tsv"
with summary_file.open("a") as s:
s.write(
f"{options.MSA}\t{aseq.site - 2}\t"
f"{aseq.max_nesting_level_reached}\t{aseq.prop_in_match_intervals}\n"
) | 0.281702 | 0.072735 |
from __future__ import print_function
# Used to process batch download from plasmoDB to be input into GeneTargeter
from builtins import str
from py.utils.GenBankToolbox import *
from py.utils.BioUtils import *
from copy import deepcopy
# use to process plasmoDB fasta download (no introns, use gff_to_genbank now
# instead!)
def processFastas(fastaFilepath):
fastas = loadFastas(fastaFilepath);
genes = [];
for f in fastas:
info = f.split("|");
geneName = (info[0].strip()+"_"+info[2].strip()).replace("/",",").replace(" ","_");
genIndexes = [1000,len(fastas[f])-1000]
geneSeq = fastas[f][genIndexes[0]:genIndexes[1]];
geneAnn = GenBankAnn(geneName, "gene", geneSeq, False, genIndexes);
gene = GenBank();
gene.name = geneName;
gene.origin = fastas[f];
gene.features.append(geneAnn);
gene.save("gb/"+geneName+".gb",True);
if "WARNING:" in f:
print("WARNING in seq " + geneName);
# Used to process gff_to_genbank output
def processGenBank(gbFilePath):
txt = open(gbFilePath); # Access given file
d = txt.read(); # Read file
gbTxts = d.split("//"); # Split each sequence (only first occurrence of \n) into lines: first line contains sequence name. Returns d as a list of lists (sequences) containing strings (seq name, seq).
count = 0;
total = d.count("gene");
for gbTxt in gbTxts:
if len(gbTxt.strip()) > 0:
gb = GenBank();
gb.load(gbTxt+"//",loadFromFile=False);
for annOriginal in gb.features:
annGB = deepcopy(gb);
annsLabel = annGB.findAnnsLabel(annOriginal.label)
ann = GenBankAnn()
for a in annsLabel:
if a.label == annOriginal.label:
ann = a
break
if len(ann.label) == 0:
print("ERROR: no annotation found")
if ann.type == "gene":
annGB.removeSeq([0,max(ann.index[0]-1000,0)]);
annGB.removeSeq([min(ann.index[1]+1000,len(annGB.origin)-1),len(annGB.origin)]);
annGB.name = ann.label;
annGB.save("geneFiles/"+ann.label+".gb",saveToFile=True);
count+=1;
print([str(count)+"/"+str(total)]); | py/auxiliary/Multiseq.py | from __future__ import print_function
# Used to process batch download from plasmoDB to be input into GeneTargeter
from builtins import str
from py.utils.GenBankToolbox import *
from py.utils.BioUtils import *
from copy import deepcopy
# use to process plasmoDB fasta download (no introns, use gff_to_genbank now
# instead!)
def processFastas(fastaFilepath):
fastas = loadFastas(fastaFilepath);
genes = [];
for f in fastas:
info = f.split("|");
geneName = (info[0].strip()+"_"+info[2].strip()).replace("/",",").replace(" ","_");
genIndexes = [1000,len(fastas[f])-1000]
geneSeq = fastas[f][genIndexes[0]:genIndexes[1]];
geneAnn = GenBankAnn(geneName, "gene", geneSeq, False, genIndexes);
gene = GenBank();
gene.name = geneName;
gene.origin = fastas[f];
gene.features.append(geneAnn);
gene.save("gb/"+geneName+".gb",True);
if "WARNING:" in f:
print("WARNING in seq " + geneName);
# Used to process gff_to_genbank output
def processGenBank(gbFilePath):
txt = open(gbFilePath); # Access given file
d = txt.read(); # Read file
gbTxts = d.split("//"); # Split each sequence (only first occurrence of \n) into lines: first line contains sequence name. Returns d as a list of lists (sequences) containing strings (seq name, seq).
count = 0;
total = d.count("gene");
for gbTxt in gbTxts:
if len(gbTxt.strip()) > 0:
gb = GenBank();
gb.load(gbTxt+"//",loadFromFile=False);
for annOriginal in gb.features:
annGB = deepcopy(gb);
annsLabel = annGB.findAnnsLabel(annOriginal.label)
ann = GenBankAnn()
for a in annsLabel:
if a.label == annOriginal.label:
ann = a
break
if len(ann.label) == 0:
print("ERROR: no annotation found")
if ann.type == "gene":
annGB.removeSeq([0,max(ann.index[0]-1000,0)]);
annGB.removeSeq([min(ann.index[1]+1000,len(annGB.origin)-1),len(annGB.origin)]);
annGB.name = ann.label;
annGB.save("geneFiles/"+ann.label+".gb",saveToFile=True);
count+=1;
print([str(count)+"/"+str(total)]); | 0.3295 | 0.266113 |
import os
import pytest
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
import nyaggle.feature_store as fs
from nyaggle.testing import get_temp_directory
def test_save_feature():
df = pd.DataFrame()
df['a'] = np.arange(100)
with get_temp_directory() as tmp:
fs.save_feature(df, 0, tmp)
assert os.path.exists(os.path.join(tmp, '0.f'))
def test_load_feature():
df = pd.DataFrame()
df['a'] = np.arange(100)
with get_temp_directory() as tmp:
fs.save_feature(df, 0, tmp)
df_loaded = fs.load_feature(0, tmp)
assert_frame_equal(df, df_loaded)
def test_multi_columns():
df = pd.DataFrame()
df['a'] = np.arange(100)
df['b'] = None
with get_temp_directory() as tmp:
fs.save_feature(df, 0, tmp)
df_loaded = fs.load_feature(0, tmp)
assert_frame_equal(df, df_loaded)
def test_various_dtypes():
df = pd.DataFrame()
df['a'] = np.arange(100).astype(float)
df['b'] = np.arange(100).astype(int)
df['c'] = np.arange(100).astype(np.uint8)
df['d'] = np.arange(100).astype(np.uint16)
df['e'] = np.arange(100).astype(np.uint32)
df['f'] = np.arange(100).astype(np.int8)
df['g'] = np.arange(100).astype(np.int16)
df['h'] = np.arange(100).astype(np.int32)
df['i'] = np.arange(100).astype(np.int64)
with get_temp_directory() as tmp:
fs.save_feature(df, 0, tmp)
df_loaded = fs.load_feature(0, tmp)
assert_frame_equal(df, df_loaded)
def test_load_features():
df = pd.DataFrame()
df['a'] = np.arange(100).astype(float)
df['b'] = np.arange(100).astype(int)
df['c'] = np.arange(100).astype(int)
with get_temp_directory() as tmp:
fs.save_feature(df[['b']], 0, tmp)
fs.save_feature(df[['c']], 1, tmp)
df_loaded = fs.load_features(df[['a']], [0, 1], tmp)
assert_frame_equal(df, df_loaded)
def test_load_features_no_base():
df = pd.DataFrame()
df['a'] = np.arange(100).astype(float)
df['b'] = np.arange(100).astype(int)
df['c'] = np.arange(100).astype(int)
with get_temp_directory() as tmp:
fs.save_feature(df[['b']], 0, tmp)
fs.save_feature(df[['c']], 1, tmp)
fs.save_feature(df[['a']], '2', tmp)
df_loaded = fs.load_features(None, [0, 1, '2'], tmp)
assert list(df_loaded.columns) == ['b', 'c', 'a']
def test_load_feature_ignore_columns():
df = pd.DataFrame()
df['a'] = np.arange(100).astype(float)
df['b'] = np.arange(100).astype(int)
df['c'] = np.arange(100).astype(int)
with get_temp_directory() as tmp:
fs.save_feature(df, 0, tmp)
# just skip irrelevant column names
df_loaded = fs.load_feature(0, tmp, ignore_columns=['b', 'X'])
assert_frame_equal(df_loaded, df.drop('b', axis=1))
def test_load_feature_ignore_all_columns():
df = pd.DataFrame()
df['a'] = np.arange(100).astype(float)
df['b'] = np.arange(100).astype(int)
df['c'] = np.arange(100).astype(int)
with get_temp_directory() as tmp:
fs.save_feature(df, 0, tmp)
df_loaded = fs.load_feature(0, tmp, ignore_columns=['a', 'b', 'c', 'X'])
assert_frame_equal(df_loaded, df.drop(['a', 'b', 'c'], axis=1))
def test_load_features_duplicate_col_name():
df = pd.DataFrame()
df['a'] = np.arange(100).astype(float)
df['b'] = np.arange(100).astype(int)
df['c'] = np.arange(100).astype(int)
with get_temp_directory() as tmp:
fs.save_feature(df[['a', 'b']], 0, tmp)
fs.save_feature(df[['b', 'c']], 1, tmp)
fs.save_feature(df[['b', 'a']], 'X', tmp)
df_loaded = fs.load_features(None, [0, 1, 'X'], tmp, rename_duplicate=True)
assert list(df_loaded.columns) == ['a', 'b', 'b_1', 'c', 'b_X', 'a_X']
df_loaded = fs.load_features(None, [0, 1, 'X'], tmp, rename_duplicate=False)
assert list(df_loaded.columns) == ['a', 'b', 'b', 'c', 'b', 'a']
def test_invalid_feature():
df = pd.DataFrame({
'a': [1, 2, 3, 4, 5] + [None] * 5,
'b': np.random.randint(0, 10, size=10)
})
y = pd.Series([1, 0, 1, 0, 1])
with get_temp_directory() as tmp:
with pytest.raises(RuntimeError):
fs.save_feature(df[['a']], 0, reference_target_variable=y, directory=tmp)
with pytest.raises(RuntimeError):
fs.save_feature(df, 0, reference_target_variable=y, directory=tmp)
# ok
fs.save_feature(df[['b']], 0, reference_target_variable=y, directory=tmp)
def test_feature_exists():
df = pd.DataFrame({
'a': [1, 2, 3, 4, 5] + [None] * 5
})
with get_temp_directory() as tmp:
fs.save_feature(df[['a']], 0, directory=tmp)
with pytest.raises(RuntimeError):
fs.save_feature(df, 0, overwrite=False, directory=tmp)
def test_decorator():
with get_temp_directory() as tmp:
@fs.cached_feature('x', tmp)
def make_feature_x():
return pd.DataFrame({'a': [1, 2, 3, 4, 5]})
@fs.cached_feature('y', tmp)
def make_feature_y(n: int):
return pd.DataFrame({'b': np.arange(n)})
x = make_feature_x()
assert make_feature_x.__name__ == "make_feature_x"
assert os.path.exists(os.path.join(tmp, "x.f"))
x2 = make_feature_x()
assert_frame_equal(x, x2)
y = make_feature_y(100)
assert len(y) == 100
assert os.path.exists(os.path.join(tmp, "y.f"))
y2 = make_feature_y(100)
assert_frame_equal(y, y2) | tests/feature_store/test_feature_store.py | import os
import pytest
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
import nyaggle.feature_store as fs
from nyaggle.testing import get_temp_directory
def test_save_feature():
df = pd.DataFrame()
df['a'] = np.arange(100)
with get_temp_directory() as tmp:
fs.save_feature(df, 0, tmp)
assert os.path.exists(os.path.join(tmp, '0.f'))
def test_load_feature():
df = pd.DataFrame()
df['a'] = np.arange(100)
with get_temp_directory() as tmp:
fs.save_feature(df, 0, tmp)
df_loaded = fs.load_feature(0, tmp)
assert_frame_equal(df, df_loaded)
def test_multi_columns():
df = pd.DataFrame()
df['a'] = np.arange(100)
df['b'] = None
with get_temp_directory() as tmp:
fs.save_feature(df, 0, tmp)
df_loaded = fs.load_feature(0, tmp)
assert_frame_equal(df, df_loaded)
def test_various_dtypes():
df = pd.DataFrame()
df['a'] = np.arange(100).astype(float)
df['b'] = np.arange(100).astype(int)
df['c'] = np.arange(100).astype(np.uint8)
df['d'] = np.arange(100).astype(np.uint16)
df['e'] = np.arange(100).astype(np.uint32)
df['f'] = np.arange(100).astype(np.int8)
df['g'] = np.arange(100).astype(np.int16)
df['h'] = np.arange(100).astype(np.int32)
df['i'] = np.arange(100).astype(np.int64)
with get_temp_directory() as tmp:
fs.save_feature(df, 0, tmp)
df_loaded = fs.load_feature(0, tmp)
assert_frame_equal(df, df_loaded)
def test_load_features():
df = pd.DataFrame()
df['a'] = np.arange(100).astype(float)
df['b'] = np.arange(100).astype(int)
df['c'] = np.arange(100).astype(int)
with get_temp_directory() as tmp:
fs.save_feature(df[['b']], 0, tmp)
fs.save_feature(df[['c']], 1, tmp)
df_loaded = fs.load_features(df[['a']], [0, 1], tmp)
assert_frame_equal(df, df_loaded)
def test_load_features_no_base():
df = pd.DataFrame()
df['a'] = np.arange(100).astype(float)
df['b'] = np.arange(100).astype(int)
df['c'] = np.arange(100).astype(int)
with get_temp_directory() as tmp:
fs.save_feature(df[['b']], 0, tmp)
fs.save_feature(df[['c']], 1, tmp)
fs.save_feature(df[['a']], '2', tmp)
df_loaded = fs.load_features(None, [0, 1, '2'], tmp)
assert list(df_loaded.columns) == ['b', 'c', 'a']
def test_load_feature_ignore_columns():
df = pd.DataFrame()
df['a'] = np.arange(100).astype(float)
df['b'] = np.arange(100).astype(int)
df['c'] = np.arange(100).astype(int)
with get_temp_directory() as tmp:
fs.save_feature(df, 0, tmp)
# just skip irrelevant column names
df_loaded = fs.load_feature(0, tmp, ignore_columns=['b', 'X'])
assert_frame_equal(df_loaded, df.drop('b', axis=1))
def test_load_feature_ignore_all_columns():
df = pd.DataFrame()
df['a'] = np.arange(100).astype(float)
df['b'] = np.arange(100).astype(int)
df['c'] = np.arange(100).astype(int)
with get_temp_directory() as tmp:
fs.save_feature(df, 0, tmp)
df_loaded = fs.load_feature(0, tmp, ignore_columns=['a', 'b', 'c', 'X'])
assert_frame_equal(df_loaded, df.drop(['a', 'b', 'c'], axis=1))
def test_load_features_duplicate_col_name():
df = pd.DataFrame()
df['a'] = np.arange(100).astype(float)
df['b'] = np.arange(100).astype(int)
df['c'] = np.arange(100).astype(int)
with get_temp_directory() as tmp:
fs.save_feature(df[['a', 'b']], 0, tmp)
fs.save_feature(df[['b', 'c']], 1, tmp)
fs.save_feature(df[['b', 'a']], 'X', tmp)
df_loaded = fs.load_features(None, [0, 1, 'X'], tmp, rename_duplicate=True)
assert list(df_loaded.columns) == ['a', 'b', 'b_1', 'c', 'b_X', 'a_X']
df_loaded = fs.load_features(None, [0, 1, 'X'], tmp, rename_duplicate=False)
assert list(df_loaded.columns) == ['a', 'b', 'b', 'c', 'b', 'a']
def test_invalid_feature():
df = pd.DataFrame({
'a': [1, 2, 3, 4, 5] + [None] * 5,
'b': np.random.randint(0, 10, size=10)
})
y = pd.Series([1, 0, 1, 0, 1])
with get_temp_directory() as tmp:
with pytest.raises(RuntimeError):
fs.save_feature(df[['a']], 0, reference_target_variable=y, directory=tmp)
with pytest.raises(RuntimeError):
fs.save_feature(df, 0, reference_target_variable=y, directory=tmp)
# ok
fs.save_feature(df[['b']], 0, reference_target_variable=y, directory=tmp)
def test_feature_exists():
df = pd.DataFrame({
'a': [1, 2, 3, 4, 5] + [None] * 5
})
with get_temp_directory() as tmp:
fs.save_feature(df[['a']], 0, directory=tmp)
with pytest.raises(RuntimeError):
fs.save_feature(df, 0, overwrite=False, directory=tmp)
def test_decorator():
with get_temp_directory() as tmp:
@fs.cached_feature('x', tmp)
def make_feature_x():
return pd.DataFrame({'a': [1, 2, 3, 4, 5]})
@fs.cached_feature('y', tmp)
def make_feature_y(n: int):
return pd.DataFrame({'b': np.arange(n)})
x = make_feature_x()
assert make_feature_x.__name__ == "make_feature_x"
assert os.path.exists(os.path.join(tmp, "x.f"))
x2 = make_feature_x()
assert_frame_equal(x, x2)
y = make_feature_y(100)
assert len(y) == 100
assert os.path.exists(os.path.join(tmp, "y.f"))
y2 = make_feature_y(100)
assert_frame_equal(y, y2) | 0.267313 | 0.516108 |
from GUI.LoginWindow import *
from PyQt5.QtCore import QThread, pyqtSignal
from PyQt5 import QtWidgets, QtGui
from PyQt5.QtWidgets import *
import random
import win32com.client
import threading
from GUI.ProprietorWindow import Design_ProprietorWindow
from Controllers import ProprietorControl
import time
class ProprietorWindow(Design_ProprietorWindow):
def __init__(self,ID, parent=None,):
print(5)
super().__init__(parent)
self.__ID = ID
print(6)
self.__control = ProprietorControl(ID)
self.speaker = win32com.client.Dispatch("SAPI.SpVoice")
self.set_up_parameters()
self.btnAllShop.clicked.connect(lambda :threading.Thread(target=lambda :self.speaker.Speak("申请进场")).start())
self.btnMyShop.clicked.connect(lambda :threading.Thread(target=lambda :self.speaker.Speak("我的店铺")).start())
self.btnInf.clicked.connect(lambda :threading.Thread(target=lambda :self.speaker.Speak("我的信息·")).start())
self.btnInf.clicked.connect(lambda :threading.Thread(target=self.read_information).start())
self.btnContract.clicked.connect(lambda :threading.Thread(target=lambda :self.speaker.Speak("我的合同")).start())
self.btnSublease.clicked.connect(lambda :threading.Thread(target=lambda :self.speaker.Speak("申请转租")).start())
self.pushButtonShop_01.clicked.connect(self.set_information)
self.pushButtonShop_01.clicked.connect(lambda : self.shopNum.setText(str(1)))
self.pushButtonShop_02.clicked.connect(self.set_information)
self.pushButtonShop_02.clicked.connect(lambda : self.shopNum.setText(str(2)))
self.pushButtonShop_03.clicked.connect(self.set_information)
self.pushButtonShop_03.clicked.connect(lambda : self.shopNum.setText(str(3)))
self.pushButtonShop_04.clicked.connect(self.set_information)
self.pushButtonShop_04.clicked.connect(lambda : self.shopNum.setText(str(4)))
self.pushButtonShop_05.clicked.connect(self.set_information)
self.pushButtonShop_05.clicked.connect(lambda : self.shopNum.setText(str(5)))
self.pushButtonShop_06.clicked.connect(self.set_information)
self.pushButtonShop_06.clicked.connect(lambda : self.shopNum.setText(str(6)))
self.pushButtonShop_07.clicked.connect(self.set_information)
self.pushButtonShop_07.clicked.connect(lambda : self.shopNum.setText(str(7)))
self.pushButtonShop_08.clicked.connect(self.set_information)
self.pushButtonShop_08.clicked.connect(lambda : self.shopNum.setText(str(8)))
self.pushButtonShop_09.clicked.connect(self.set_information)
self.pushButtonShop_09.clicked.connect(lambda : self.shopNum.setText(str(9)))
self.pushButtonShop_10.clicked.connect(self.set_information)
self.pushButtonShop_10.clicked.connect(lambda : self.shopNum.setText(str(10)))
self.pushButtonShop_11.clicked.connect(self.set_information)
self.pushButtonShop_11.clicked.connect(lambda : self.shopNum.setText(str(11)))
self.pushButtonShop_12.clicked.connect(self.set_information)
self.pushButtonShop_12.clicked.connect(lambda : self.shopNum.setText(str(12)))
self.immediateApplication.clicked.connect(self.apply_info)
def read_information(self):
time.sleep(1)
self.speaker.Speak("您的信用为")
self.speaker.Speak(self.mFraction.text())
self.speaker.Speak("您的所在地")
self.speaker.Speak(self.label_19.text())
self.speaker.Speak("温度为")
self.speaker.Speak(self.label_22.text())
def set_up_parameters(self):
"""
设置参数
:return: None
"""
self.electricCharge.setText(str(self.__control.receivable_electric()))
self.guaranteeCharge.setText(str(self.__control.receivable_guarantee()))
self.propertyFeeCharge.setText(str(self.__control.receivable_propertyfee()))
self.waterCharge.setText(str(self.__control.receivable_water()))
self.electricReceivable.setText(str(self.__control.receipt_electric()))
self.waterReceivable.setText(str(self.__control.receipt_water()))
self.propertyFeeReceivable.setText(str(self.__control.receipt_propertyfee()))
self.guaranteeReceivable.setText(str(self.__control.receipt_guarantee()))
self.rentTime.setText(str(self.__control.contract_year()))
self.contractInf.setText(str(self.__control.contract_information()))
self.ceoSignature.setCheckState(self.__control.contract_CEOSign())
self.signatureConfirmation.setCheckState(self.__control.contract_proprietorSign())
self.fraction.setText(str(random.randint(500,700)))
self.mFraction.setText(str(random.randint(500,700)))
return None
def set_information(self):
self.areaCovered.setText(str(random.randint(50,100)))
self.annualRent.setText(str(random.randint(20000,30000)))
self.propertyFee.setText(str(12))
self.deposit.setText(str(random.randint(10000,20000)))
def apply_info(self):
"""
提交申请
:return:None
"""
self.__control.create_application(self.__ID,
int(self.shopNum.text()),
self.shopName.text(),
self.userTel.text(),
int(self.shopRentTime.text()),
self.shopReason.toPlainText())
self.shopName.setText("")
self.userTel.setText("")
self.shopRentTime.setText("")
self.shopReason.setText("")
self.shopNum.setText("")
QMessageBox.information(self,
"消息",
"提交申请成功")
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
MainWindow = ProprietorWindow(2)
MainWindow.show()
sys.exit(app.exec_()) | src/GUI/LProprietorWindow.py |
from GUI.LoginWindow import *
from PyQt5.QtCore import QThread, pyqtSignal
from PyQt5 import QtWidgets, QtGui
from PyQt5.QtWidgets import *
import random
import win32com.client
import threading
from GUI.ProprietorWindow import Design_ProprietorWindow
from Controllers import ProprietorControl
import time
class ProprietorWindow(Design_ProprietorWindow):
def __init__(self,ID, parent=None,):
print(5)
super().__init__(parent)
self.__ID = ID
print(6)
self.__control = ProprietorControl(ID)
self.speaker = win32com.client.Dispatch("SAPI.SpVoice")
self.set_up_parameters()
self.btnAllShop.clicked.connect(lambda :threading.Thread(target=lambda :self.speaker.Speak("申请进场")).start())
self.btnMyShop.clicked.connect(lambda :threading.Thread(target=lambda :self.speaker.Speak("我的店铺")).start())
self.btnInf.clicked.connect(lambda :threading.Thread(target=lambda :self.speaker.Speak("我的信息·")).start())
self.btnInf.clicked.connect(lambda :threading.Thread(target=self.read_information).start())
self.btnContract.clicked.connect(lambda :threading.Thread(target=lambda :self.speaker.Speak("我的合同")).start())
self.btnSublease.clicked.connect(lambda :threading.Thread(target=lambda :self.speaker.Speak("申请转租")).start())
self.pushButtonShop_01.clicked.connect(self.set_information)
self.pushButtonShop_01.clicked.connect(lambda : self.shopNum.setText(str(1)))
self.pushButtonShop_02.clicked.connect(self.set_information)
self.pushButtonShop_02.clicked.connect(lambda : self.shopNum.setText(str(2)))
self.pushButtonShop_03.clicked.connect(self.set_information)
self.pushButtonShop_03.clicked.connect(lambda : self.shopNum.setText(str(3)))
self.pushButtonShop_04.clicked.connect(self.set_information)
self.pushButtonShop_04.clicked.connect(lambda : self.shopNum.setText(str(4)))
self.pushButtonShop_05.clicked.connect(self.set_information)
self.pushButtonShop_05.clicked.connect(lambda : self.shopNum.setText(str(5)))
self.pushButtonShop_06.clicked.connect(self.set_information)
self.pushButtonShop_06.clicked.connect(lambda : self.shopNum.setText(str(6)))
self.pushButtonShop_07.clicked.connect(self.set_information)
self.pushButtonShop_07.clicked.connect(lambda : self.shopNum.setText(str(7)))
self.pushButtonShop_08.clicked.connect(self.set_information)
self.pushButtonShop_08.clicked.connect(lambda : self.shopNum.setText(str(8)))
self.pushButtonShop_09.clicked.connect(self.set_information)
self.pushButtonShop_09.clicked.connect(lambda : self.shopNum.setText(str(9)))
self.pushButtonShop_10.clicked.connect(self.set_information)
self.pushButtonShop_10.clicked.connect(lambda : self.shopNum.setText(str(10)))
self.pushButtonShop_11.clicked.connect(self.set_information)
self.pushButtonShop_11.clicked.connect(lambda : self.shopNum.setText(str(11)))
self.pushButtonShop_12.clicked.connect(self.set_information)
self.pushButtonShop_12.clicked.connect(lambda : self.shopNum.setText(str(12)))
self.immediateApplication.clicked.connect(self.apply_info)
def read_information(self):
time.sleep(1)
self.speaker.Speak("您的信用为")
self.speaker.Speak(self.mFraction.text())
self.speaker.Speak("您的所在地")
self.speaker.Speak(self.label_19.text())
self.speaker.Speak("温度为")
self.speaker.Speak(self.label_22.text())
def set_up_parameters(self):
"""
设置参数
:return: None
"""
self.electricCharge.setText(str(self.__control.receivable_electric()))
self.guaranteeCharge.setText(str(self.__control.receivable_guarantee()))
self.propertyFeeCharge.setText(str(self.__control.receivable_propertyfee()))
self.waterCharge.setText(str(self.__control.receivable_water()))
self.electricReceivable.setText(str(self.__control.receipt_electric()))
self.waterReceivable.setText(str(self.__control.receipt_water()))
self.propertyFeeReceivable.setText(str(self.__control.receipt_propertyfee()))
self.guaranteeReceivable.setText(str(self.__control.receipt_guarantee()))
self.rentTime.setText(str(self.__control.contract_year()))
self.contractInf.setText(str(self.__control.contract_information()))
self.ceoSignature.setCheckState(self.__control.contract_CEOSign())
self.signatureConfirmation.setCheckState(self.__control.contract_proprietorSign())
self.fraction.setText(str(random.randint(500,700)))
self.mFraction.setText(str(random.randint(500,700)))
return None
def set_information(self):
self.areaCovered.setText(str(random.randint(50,100)))
self.annualRent.setText(str(random.randint(20000,30000)))
self.propertyFee.setText(str(12))
self.deposit.setText(str(random.randint(10000,20000)))
def apply_info(self):
"""
提交申请
:return:None
"""
self.__control.create_application(self.__ID,
int(self.shopNum.text()),
self.shopName.text(),
self.userTel.text(),
int(self.shopRentTime.text()),
self.shopReason.toPlainText())
self.shopName.setText("")
self.userTel.setText("")
self.shopRentTime.setText("")
self.shopReason.setText("")
self.shopNum.setText("")
QMessageBox.information(self,
"消息",
"提交申请成功")
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
MainWindow = ProprietorWindow(2)
MainWindow.show()
sys.exit(app.exec_()) | 0.303732 | 0.101189 |
import argparse
import logging
import os
import subprocess
from .log import logger
def set_up_command_line_arguments():
""" Sets up command line arguments that can be used to modify how scripts are run.
Returns
=======
command_line_args, command_line_parser: tuple
The command_line_args is a Namespace of the command line arguments while
the command_line_parser can be given to a new `argparse.ArgumentParser`
as a parent object from which to inherit.
Notes
=====
The command line arguments are passed initially at runtime, but this parser
does not have a `--help` option (i.e., the command line options are
available for any script which includes `import bilby`, but no help command
is available. This is done to avoid conflicts with child argparse routines
(see the example below).
Examples
========
In the following example we demonstrate how to setup a custom command line for a
project which uses bilby.
.. code-block:: python
# Here we import bilby, which initialises and parses the default command-line args
>>> import bilby
# The command line arguments can then be accessed via
>>> bilby.core.utils.command_line_args
Namespace(clean=False, log_level=20, quite=False)
# Next, we import argparse and define a new argparse object
>>> import argparse
>>> parser = argparse.ArgumentParser(parents=[bilby.core.utils.command_line_parser])
>>> parser.add_argument('--argument', type=int, default=1)
>>> args = parser.parse_args()
Namespace(clean=False, log_level=20, quite=False, argument=1)
Placing these lines into a script, you'll be able to pass in the usual bilby default
arguments, in addition to `--argument`. To see a list of all options, call the script
with `--help`.
"""
try:
parser = argparse.ArgumentParser(
description="Command line interface for bilby scripts",
add_help=False, allow_abbrev=False)
except TypeError:
parser = argparse.ArgumentParser(
description="Command line interface for bilby scripts",
add_help=False)
parser.add_argument("-v", "--verbose", action="store_true",
help=("Increase output verbosity [logging.DEBUG]." +
" Overridden by script level settings"))
parser.add_argument("-q", "--quiet", action="store_true",
help=("Decrease output verbosity [logging.WARNING]." +
" Overridden by script level settings"))
parser.add_argument("-c", "--clean", action="store_true",
help="Force clean data, never use cached data")
parser.add_argument("-u", "--use-cached", action="store_true",
help="Force cached data and do not check its validity")
parser.add_argument("--sampler-help", nargs='?', default=False,
const='None', help="Print help for given sampler")
parser.add_argument("--bilby-test-mode", action="store_true",
help=("Used for testing only: don't run full PE, but"
" just check nothing breaks"))
parser.add_argument("--bilby-zero-likelihood-mode", action="store_true",
help=("Used for testing only: don't run full PE, but"
" just check nothing breaks"))
args, unknown_args = parser.parse_known_args()
if args.quiet:
args.log_level = logging.WARNING
elif args.verbose:
args.log_level = logging.DEBUG
else:
args.log_level = logging.INFO
return args, parser
def run_commandline(cl, log_level=20, raise_error=True, return_output=True):
"""Run a string cmd as a subprocess, check for errors and return output.
Parameters
==========
cl: str
Command to run
log_level: int
See https://docs.python.org/2/library/logging.html#logging-levels,
default is '20' (INFO)
"""
logger.log(log_level, 'Now executing: ' + cl)
if return_output:
try:
out = subprocess.check_output(
cl, stderr=subprocess.STDOUT, shell=True,
universal_newlines=True)
except subprocess.CalledProcessError as e:
logger.log(log_level, 'Execution failed: {}'.format(e.output))
if raise_error:
raise
else:
out = 0
os.system('\n')
return(out)
else:
process = subprocess.Popen(cl, shell=True)
process.communicate() | bilby/core/utils/cmd.py | import argparse
import logging
import os
import subprocess
from .log import logger
def set_up_command_line_arguments():
""" Sets up command line arguments that can be used to modify how scripts are run.
Returns
=======
command_line_args, command_line_parser: tuple
The command_line_args is a Namespace of the command line arguments while
the command_line_parser can be given to a new `argparse.ArgumentParser`
as a parent object from which to inherit.
Notes
=====
The command line arguments are passed initially at runtime, but this parser
does not have a `--help` option (i.e., the command line options are
available for any script which includes `import bilby`, but no help command
is available. This is done to avoid conflicts with child argparse routines
(see the example below).
Examples
========
In the following example we demonstrate how to setup a custom command line for a
project which uses bilby.
.. code-block:: python
# Here we import bilby, which initialises and parses the default command-line args
>>> import bilby
# The command line arguments can then be accessed via
>>> bilby.core.utils.command_line_args
Namespace(clean=False, log_level=20, quite=False)
# Next, we import argparse and define a new argparse object
>>> import argparse
>>> parser = argparse.ArgumentParser(parents=[bilby.core.utils.command_line_parser])
>>> parser.add_argument('--argument', type=int, default=1)
>>> args = parser.parse_args()
Namespace(clean=False, log_level=20, quite=False, argument=1)
Placing these lines into a script, you'll be able to pass in the usual bilby default
arguments, in addition to `--argument`. To see a list of all options, call the script
with `--help`.
"""
try:
parser = argparse.ArgumentParser(
description="Command line interface for bilby scripts",
add_help=False, allow_abbrev=False)
except TypeError:
parser = argparse.ArgumentParser(
description="Command line interface for bilby scripts",
add_help=False)
parser.add_argument("-v", "--verbose", action="store_true",
help=("Increase output verbosity [logging.DEBUG]." +
" Overridden by script level settings"))
parser.add_argument("-q", "--quiet", action="store_true",
help=("Decrease output verbosity [logging.WARNING]." +
" Overridden by script level settings"))
parser.add_argument("-c", "--clean", action="store_true",
help="Force clean data, never use cached data")
parser.add_argument("-u", "--use-cached", action="store_true",
help="Force cached data and do not check its validity")
parser.add_argument("--sampler-help", nargs='?', default=False,
const='None', help="Print help for given sampler")
parser.add_argument("--bilby-test-mode", action="store_true",
help=("Used for testing only: don't run full PE, but"
" just check nothing breaks"))
parser.add_argument("--bilby-zero-likelihood-mode", action="store_true",
help=("Used for testing only: don't run full PE, but"
" just check nothing breaks"))
args, unknown_args = parser.parse_known_args()
if args.quiet:
args.log_level = logging.WARNING
elif args.verbose:
args.log_level = logging.DEBUG
else:
args.log_level = logging.INFO
return args, parser
def run_commandline(cl, log_level=20, raise_error=True, return_output=True):
"""Run a string cmd as a subprocess, check for errors and return output.
Parameters
==========
cl: str
Command to run
log_level: int
See https://docs.python.org/2/library/logging.html#logging-levels,
default is '20' (INFO)
"""
logger.log(log_level, 'Now executing: ' + cl)
if return_output:
try:
out = subprocess.check_output(
cl, stderr=subprocess.STDOUT, shell=True,
universal_newlines=True)
except subprocess.CalledProcessError as e:
logger.log(log_level, 'Execution failed: {}'.format(e.output))
if raise_error:
raise
else:
out = 0
os.system('\n')
return(out)
else:
process = subprocess.Popen(cl, shell=True)
process.communicate() | 0.766206 | 0.289836 |
import json
import sys
import requests
import time
import os
from bs4 import BeautifulSoup
DIR = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(DIR, "trajectory-albatross.gpx")) as f:
xml_str = f.read()
defs = []
results = []
obj = BeautifulSoup(xml_str, 'xml')
trks = obj.find_all('trk')
for trk in trks:
name = trk.find('name').string
# first 10 chars of timestamp are yyyy-mm-dd
release_timestamp = trk.find('trkpt').find('time').string
release_date = "y" + release_timestamp[:4] + "m" + release_timestamp[5:7] + "d" + release_timestamp[8:10]
defs.append({
'id': int(name),
'name': name,
'albatross_info': {
"release_date": release_date
},
'extras': {},
'location': []
})
for pt in trk.find_all('trkpt'):
lat = pt['lat']
lon = pt['lon']
timestamp = pt.find('time').string
results.append({
'id': int(name),
'location': {
'longitude': float(lon),
'latitude': float(lat),
'timestamp': timestamp
}
})
if len(sys.argv) > 1:
MODE = sys.argv[1]
else:
MODE = "api"
# find def that matches id
def find_def(id):
for d in defs:
if d['id'] == id:
return d
if MODE == "json":
for i, r in enumerate(results):
if i%10 == 0:
d = find_def(r['id'])
d['location'].append(r['location'])
print(json.dumps(defs))
exit()
for d in defs:
if MODE == "api":
print(json.dumps(d, indent=4, separators=(',', ': ')))
response = requests.post("http://localhost:3000/api/tracking/adduser", json=d)
else:
print(json.dumps(d))
if MODE == "api":
time.sleep(4.0)
# sort by time
results = sorted(results, key=lambda r: r['location']['timestamp'])
for i, r in enumerate(results):
if i%10 == 0:
if MODE == "api":
print(json.dumps(r, indent=4, separators=(',', ': ')))
response = requests.post("http://localhost:3000/api/tracking/adduserlocation", json=r)
time.sleep(0.1)
else:
print(json.dumps(r)) | demo_data/animals/analyse.py | import json
import sys
import requests
import time
import os
from bs4 import BeautifulSoup
DIR = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(DIR, "trajectory-albatross.gpx")) as f:
xml_str = f.read()
defs = []
results = []
obj = BeautifulSoup(xml_str, 'xml')
trks = obj.find_all('trk')
for trk in trks:
name = trk.find('name').string
# first 10 chars of timestamp are yyyy-mm-dd
release_timestamp = trk.find('trkpt').find('time').string
release_date = "y" + release_timestamp[:4] + "m" + release_timestamp[5:7] + "d" + release_timestamp[8:10]
defs.append({
'id': int(name),
'name': name,
'albatross_info': {
"release_date": release_date
},
'extras': {},
'location': []
})
for pt in trk.find_all('trkpt'):
lat = pt['lat']
lon = pt['lon']
timestamp = pt.find('time').string
results.append({
'id': int(name),
'location': {
'longitude': float(lon),
'latitude': float(lat),
'timestamp': timestamp
}
})
if len(sys.argv) > 1:
MODE = sys.argv[1]
else:
MODE = "api"
# find def that matches id
def find_def(id):
for d in defs:
if d['id'] == id:
return d
if MODE == "json":
for i, r in enumerate(results):
if i%10 == 0:
d = find_def(r['id'])
d['location'].append(r['location'])
print(json.dumps(defs))
exit()
for d in defs:
if MODE == "api":
print(json.dumps(d, indent=4, separators=(',', ': ')))
response = requests.post("http://localhost:3000/api/tracking/adduser", json=d)
else:
print(json.dumps(d))
if MODE == "api":
time.sleep(4.0)
# sort by time
results = sorted(results, key=lambda r: r['location']['timestamp'])
for i, r in enumerate(results):
if i%10 == 0:
if MODE == "api":
print(json.dumps(r, indent=4, separators=(',', ': ')))
response = requests.post("http://localhost:3000/api/tracking/adduserlocation", json=r)
time.sleep(0.1)
else:
print(json.dumps(r)) | 0.101679 | 0.095645 |
from ruffus import follows, transform, regex, mkdir,\
pipeline_printout, pipeline_printout_graph,\
pipeline_run, files, merge,\
touch_file, posttask, jobs_limit
import os
import sys
from subprocess import check_call
from pipeline_config import POINTS_H5_DIR,\
PCD_DIR, PCD_DOWNSAMPLED_DIR, NUM_CPUS, DSET_DIR, DSET,\
SAIL_CAR_LOG_PATH, MAPPING_PATH, DOWNSAMPLE_LEAF_SIZE,\
K_NORM_EST, PCD_DOWNSAMPLED_NORMALS_DIR, ICP_TRANSFORMS_DIR,\
ICP_ITERS, ICP_MAX_DIST, REMOTE_DATA_DIR, REMOTE_FILES,\
EXPORT_FULL, GPS_FILE, MAP_FILE, COLOR_DIR, COLOR_CLOUDS_DIR,\
MERGED_CLOUDS_DIR, MAP_COLOR_WINDOW, OCTOMAP_DIR,\
COLOR_OCTOMAP_DIR, OCTOMAP_FILE,\
COLOR_OCTOMAP_FILE, COLOR_OCTOMAP_BT, MERGED_CLOUD_FILE,\
CAST_OCTOMAP_SINGLE, MERGED_VTK_FILE, STATIC_CLOUD_FILE,\
STATIC_VTK_FILE, DYNAMIC_CLOUD_FILE, DYNAMIC_VTK_FILE,\
FILTERED_CLOUDS_DIR, PARAMS_TO_LOAD,\
MERGED_COLOR_CLOUDS_DIR, MERGED_COLOR_CLOUD_FILE,\
MERGED_COLOR_VTK_FILE, LDR_UPSAMPLED_DIR, LDR_DIR,\
NO_TRANSFORM, CAMERA
from pipeline_utils import file_num
dirs = [LDR_DIR, LDR_UPSAMPLED_DIR, POINTS_H5_DIR, PCD_DIR, PCD_DOWNSAMPLED_DIR,
PCD_DOWNSAMPLED_NORMALS_DIR, ICP_TRANSFORMS_DIR, COLOR_DIR,
COLOR_CLOUDS_DIR, MERGED_CLOUDS_DIR, MERGED_COLOR_CLOUDS_DIR,
OCTOMAP_DIR, COLOR_OCTOMAP_DIR, FILTERED_CLOUDS_DIR]
MKDIRS = [mkdir(d) for d in dirs]
# NOTE chdir into dset dir so can just specify relative paths to data
os.chdir(DSET_DIR)
DOWNLOADS = list()
for f in REMOTE_FILES:
DOWNLOADS.append([None, f])
@follows(*MKDIRS)
@files(DOWNLOADS)
def download_files(dummy, local_file):
cmd = 'rsync -vr --ignore-existing %s/%s .' % (REMOTE_DATA_DIR, local_file)
print cmd
check_call(cmd, shell=True)
@follows('download_files')
@files('./%s_gps.bag' % DSET, '%s_frames' % DSET)
def generate_frames_and_map(input_file, output_dir):
cmd = 'cd %s/lidar; python generate_frames.py %s %s; cd -' % (SAIL_CAR_LOG_PATH, DSET_DIR, PARAMS_TO_LOAD)
print cmd
check_call(cmd, shell=True)
cmd = 'cd %s/lidar; python generate_gps_out.py %s; cd -' % (SAIL_CAR_LOG_PATH, DSET_DIR)
print cmd
check_call(cmd, shell=True)
#@follows('generate_frames_and_map')
'''
@transform('%s/*.ldr' % LDR_DIR,
regex('%s/(.*?).ldr' % LDR_DIR),
r'%s/\1.ldr' % LDR_UPSAMPLED_DIR)
def upsample_ldrs(input_file, output_file):
ni = 2 # FIXME PARAM
upsampler = 'python %s/../process/upsample_ldr.py' % MAPPING_PATH
cmd = '%s %s %s %d' % (upsampler, input_file, output_file, ni)
print cmd
check_call(cmd, shell=True)
'''
@follows('generate_frames_and_map')
@files('./params.ini', './params.h5')
def convert_params_to_h5(input_file, output_file):
converter = '%s/mapping/pipeline/params_to_h5.py' % SAIL_CAR_LOG_PATH
cmd = 'python %s' % converter
check_call(cmd, shell=True)
# TODO Also have to run the new bag file extractor for mark2
@follows('convert_params_to_h5')
@files(None, '%s/sentinel' % LDR_DIR)
@posttask(touch_file('%s/sentinel' % LDR_DIR))
def align_ldr(dummy, sentinel):
cmd = 'python %s/process/LidarAlign.py %s %s' % (SAIL_CAR_LOG_PATH, DSET_DIR, '%s%d.avi' % (DSET, CAMERA))
print cmd
check_call(cmd, shell=True)
@follows('align_ldr')
#@files('params.ini', '%s/sentinel' % POINTS_H5_DIR)
@transform('%s/*.ldr' % LDR_DIR,
regex('%s/(.*?).ldr' % LDR_DIR),
r'%s/\1.h5' % POINTS_H5_DIR)
def convert_ldr_to_h5(ldr_file, h5_file):
exporter = '%s/mapping/pipeline/ldr_to_h5.py' % SAIL_CAR_LOG_PATH
cmd = 'python {exporter} {fgps} {ldr_file} {h5_file}'.format(exporter=exporter, fgps=GPS_FILE, ldr_file=ldr_file, h5_file=h5_file)
if NO_TRANSFORM:
cmd += ' --no_transform'
print cmd
check_call(cmd, shell=True)
@follows('convert_ldr_to_h5')
@transform('%s/*.h5' % POINTS_H5_DIR,
regex('%s/(.*?).h5' % POINTS_H5_DIR),
r'%s/\1.pcd' % PCD_DIR)
def convert_h5_to_pcd(input_file, output_file):
h5_to_pcd = '%s/bin/h5_to_pcd' % MAPPING_PATH
cmd = '%s --h5 %s --pcd %s' % (h5_to_pcd, input_file, output_file)
print cmd
check_call(cmd, shell=True)
@follows('convert_h5_to_pcd')
@transform('%s/*.pcd' % PCD_DIR,
regex('%s/(.*?).pcd' % PCD_DIR),
r'%s/\1.pcd' % PCD_DOWNSAMPLED_DIR)
def downsample_pcds(input_file, output_file):
downsampler = '%s/bin/downsample_cloud' % MAPPING_PATH
cmd = '%s --src_pcd %s --out_pcd %s --leaf_size %f' % (downsampler, input_file,
output_file, DOWNSAMPLE_LEAF_SIZE)
print cmd
check_call(cmd, shell=True)
@follows('downsample_pcds')
@jobs_limit(1)
@merge(convert_h5_to_pcd, OCTOMAP_FILE)
def build_octomap(input_files, output_file):
cmd = '{0}/bin/build_octomap'.format(MAPPING_PATH)
if CAST_OCTOMAP_SINGLE:
cmd += ('; ' + cmd + ' --single')
print cmd
check_call(cmd, shell=True)
''' TODO parallelize
@follows('build_octomap')
@transform('%s/*.pcd' % PCD_DOWNSAMPLED_DIR,
regex('%s/(.*?).pcd' % PCD_DOWNSAMPLED_DIR),
r'%s/\1.h5' % COLOR_DIR)
def project_color(input_pcd, output_color_file):
pass
'''
@follows('build_octomap')
@jobs_limit(1)
@files(None, '{0}/0.h5'.format(COLOR_DIR))
def project_color(dummy_file, output_file):
binary = '%s/bin/octomap_color' % MAPPING_PATH
print binary
check_call(binary, shell=True)
@follows('project_color')
@transform('%s/*.h5' % COLOR_DIR,
regex('%s/(.*?).h5' % COLOR_DIR),
r'%s/\1.pcd' % COLOR_CLOUDS_DIR,
r'%s/\1.pcd' % PCD_DOWNSAMPLED_DIR)
def color_clouds(color_file, output_file, pcd_file):
converter = '%s/bin/color_cloud' % MAPPING_PATH
cmd = '%s %s %s %s' % (converter, pcd_file, color_file, output_file)
print cmd
check_call(cmd, shell=True)
@follows('color_clouds')
@jobs_limit(1)
@merge(color_clouds, COLOR_OCTOMAP_FILE)
def build_color_octomap(input_files, output_file):
cmd = '{0}/bin/build_color_octomap'.format(MAPPING_PATH)
print cmd
check_call(cmd, shell=True)
@follows('color_clouds')
@transform('%s/*.pcd' % COLOR_CLOUDS_DIR,
regex('%s/(.*?).pcd' % COLOR_CLOUDS_DIR),
r'%s/\1_static.pcd' % FILTERED_CLOUDS_DIR,
r'%s/\1_dynamic.pcd' % FILTERED_CLOUDS_DIR)
def octomap_filter_single(input_file, static_file, dynamic_file):
cmd = '%s/bin/octomap_filter %s %s %s' % (MAPPING_PATH, input_file, static_file, dynamic_file)
print cmd
check_call(cmd, shell=True)
static_vtk_file = os.path.splitext(static_file)[0] + '.vtk'
dynamic_vtk_file = os.path.splitext(dynamic_file)[0] + '.vtk'
cmd = 'pcl_pcd2vtk %s %s; pcl_pcd2vtk %s %s' % (static_file, static_vtk_file, dynamic_file, dynamic_vtk_file)
print cmd
check_call(cmd, shell=True)
def chunk(l, n):
for k in xrange(0, len(l), n):
yield l[k:k + n]
# FIXME Repeats code in merge_color_clouds
@follows('downsample_pcds')
@merge('%s/*.pcd' % PCD_DOWNSAMPLED_DIR, '%s/merged.pcd' % MERGED_CLOUDS_DIR)
def merge_raw_clouds(cloud_files, merged_cloud_file):
files = [f for f in cloud_files if os.path.exists(f)]
# Have to chunk the files since there's limit on number of command line arguments
chunks = chunk(files, 500)
merged_chunk_files = list()
k = 0
for chunk_files in chunks:
merged_chunk_file = os.path.dirname(MERGED_CLOUD_FILE) + '/chunk%d.pcd' % k
# Concatenate PCD files
cmd = 'concatenate_points_pcd ' + ' '.join(chunk_files) + ' ' + merged_chunk_file
print cmd
check_call(cmd, shell=True)
merged_chunk_files.append(merged_chunk_file)
k += 1
cmd = 'concatenate_points_pcd ' + ' '.join(merged_chunk_files) + ' ' + MERGED_CLOUD_FILE
print cmd
check_call(cmd, shell=True)
for chunk_file in merged_chunk_files:
cmd = 'rm %s' % chunk_file
print cmd
check_call(cmd, shell=True)
# Color the merged cloud by intensity
cmd = '%s/bin/color_intensity %s %s' % (MAPPING_PATH, MERGED_CLOUD_FILE, MERGED_CLOUD_FILE)
print cmd
check_call(cmd, shell=True)
# Convert merged cloud to vtk for visualizer
cmd = 'pcl_pcd2vtk %s %s' % (MERGED_CLOUD_FILE, MERGED_VTK_FILE)
check_call(cmd, shell=True)
@follows('color_clouds')
@merge('%s/*.pcd' % COLOR_CLOUDS_DIR, '%s/merged_%d.pcd' % (MERGED_COLOR_CLOUDS_DIR, MAP_COLOR_WINDOW))
def merge_color_clouds(cloud_files, merged_cloud_file):
files = [f for f in cloud_files if os.path.exists(f)]
# Have to chunk the files since there's limit on number of command line arguments
chunks = chunk(files, 500)
merged_chunk_files = list()
k = 0
for chunk_files in chunks:
merged_chunk_file = os.path.dirname(MERGED_COLOR_CLOUD_FILE) + '/chunk%d.pcd' % k
# Concatenate PCD files
cmd = 'concatenate_points_pcd ' + ' '.join(chunk_files) + ' ' + merged_chunk_file
print cmd
check_call(cmd, shell=True)
merged_chunk_files.append(merged_chunk_file)
k += 1
cmd = 'concatenate_points_pcd ' + ' '.join(merged_chunk_files) + ' ' + MERGED_COLOR_CLOUD_FILE
print cmd
check_call(cmd, shell=True)
for chunk_file in merged_chunk_files:
cmd = 'rm %s' % chunk_file
print cmd
check_call(cmd, shell=True)
# Convert merged cloud to vtk for visualizer
cmd = 'pcl_pcd2vtk %s %s' % (MERGED_COLOR_CLOUD_FILE, MERGED_COLOR_VTK_FILE)
check_call(cmd, shell=True)
@follows('merge_color_clouds')
@files(MERGED_COLOR_CLOUD_FILE, STATIC_CLOUD_FILE)
def octomap_filter(input_file, output_file):
cmd = '%s/bin/octomap_filter %s %s %s' % (MAPPING_PATH, MERGED_COLOR_CLOUD_FILE, STATIC_CLOUD_FILE, DYNAMIC_CLOUD_FILE)
print cmd
check_call(cmd, shell=True)
cmd = 'pcl_pcd2vtk %s %s; pcl_pcd2vtk %s %s' % (STATIC_CLOUD_FILE, STATIC_VTK_FILE, DYNAMIC_CLOUD_FILE, DYNAMIC_VTK_FILE)
print cmd
check_call(cmd, shell=True)
@follows('downsample_pcds')
@transform('%s/*.pcd' % PCD_DOWNSAMPLED_DIR,
regex('%s/(.*?).pcd' % PCD_DOWNSAMPLED_DIR),
r'%s/\1.pcd' % PCD_DOWNSAMPLED_NORMALS_DIR)
def estimate_normals(input_file, output_file):
norm_est = '%s/bin/estimate_normals' % MAPPING_PATH
cmd = '%s --src_pcd %s --out_pcd %s --k %d' % (norm_est, input_file,
output_file, K_NORM_EST)
print cmd
check_call(cmd, shell=True)
''' Currently unused
@follows('estimate_normals')
@transform('%s/*.pcd' % PCD_DOWNSAMPLED_NORMALS_DIR,
regex('%s/(.*?).pcd' % PCD_DOWNSAMPLED_NORMALS_DIR),
r'%s/\1.h5' % ICP_TRANSFORMS_DIR)
def align_clouds(input_file, output_file):
icp_reg = '%s/bin/align_clouds' % MAPPING_PATH
if file_num(input_file) == 0: # no transform for first pcd, touch empty file
check_call('touch %s' % output_file, shell=True)
return
# cloud to apply transform to
src = input_file
# cloud to align to (previous index)
tgt = os.path.join(os.path.dirname(input_file), str(file_num(input_file) - 1) + '.pcd')
cmd = '{icp_reg} --pcd_tgt {tgt} --pcd_src {src} --h5_file {h5f} --icp_iters {iters} --max_dist {dist}'.format(
icp_reg=icp_reg, tgt=tgt, src=src, h5f=output_file, iters=ICP_ITERS, dist=ICP_MAX_DIST)
print cmd
check_call(cmd, shell=True)
'''
def clean():
for d in dirs:
print 'deleting %s' % d
if os.path.exists(d):
check_call('rm -r %s' % d, shell=True)
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'Usage: python pipeline.py print,graph,run (task1,task2)'
sys.exit(1)
TORUN = [
'convert_ldr_to_h5'
]
if len(sys.argv) == 3:
TORUN = sys.argv[2].split(',')
CMDS = sys.argv[1].split(',')
tasks = {
'print': lambda: pipeline_printout(sys.stdout, TORUN,
forcedtorun_tasks=[], verbose=5),
'graph': lambda: pipeline_printout_graph('graph.jpg', 'jpg', TORUN,
forcedtorun_tasks=[],
no_key_legend=False),
'run': lambda: pipeline_run(TORUN,
multiprocess=NUM_CPUS,
one_second_per_job=False),
'force': lambda: pipeline_run([],
forcedtorun_tasks=TORUN,
multiprocess=NUM_CPUS,
one_second_per_job=False),
'printf': lambda: pipeline_printout(sys.stdout,
[],
forcedtorun_tasks=TORUN,
verbose=2),
'clean': clean
}
for key in tasks:
if key in CMDS:
tasks[key]() | mapping/pipeline/pipeline.py | from ruffus import follows, transform, regex, mkdir,\
pipeline_printout, pipeline_printout_graph,\
pipeline_run, files, merge,\
touch_file, posttask, jobs_limit
import os
import sys
from subprocess import check_call
from pipeline_config import POINTS_H5_DIR,\
PCD_DIR, PCD_DOWNSAMPLED_DIR, NUM_CPUS, DSET_DIR, DSET,\
SAIL_CAR_LOG_PATH, MAPPING_PATH, DOWNSAMPLE_LEAF_SIZE,\
K_NORM_EST, PCD_DOWNSAMPLED_NORMALS_DIR, ICP_TRANSFORMS_DIR,\
ICP_ITERS, ICP_MAX_DIST, REMOTE_DATA_DIR, REMOTE_FILES,\
EXPORT_FULL, GPS_FILE, MAP_FILE, COLOR_DIR, COLOR_CLOUDS_DIR,\
MERGED_CLOUDS_DIR, MAP_COLOR_WINDOW, OCTOMAP_DIR,\
COLOR_OCTOMAP_DIR, OCTOMAP_FILE,\
COLOR_OCTOMAP_FILE, COLOR_OCTOMAP_BT, MERGED_CLOUD_FILE,\
CAST_OCTOMAP_SINGLE, MERGED_VTK_FILE, STATIC_CLOUD_FILE,\
STATIC_VTK_FILE, DYNAMIC_CLOUD_FILE, DYNAMIC_VTK_FILE,\
FILTERED_CLOUDS_DIR, PARAMS_TO_LOAD,\
MERGED_COLOR_CLOUDS_DIR, MERGED_COLOR_CLOUD_FILE,\
MERGED_COLOR_VTK_FILE, LDR_UPSAMPLED_DIR, LDR_DIR,\
NO_TRANSFORM, CAMERA
from pipeline_utils import file_num
dirs = [LDR_DIR, LDR_UPSAMPLED_DIR, POINTS_H5_DIR, PCD_DIR, PCD_DOWNSAMPLED_DIR,
PCD_DOWNSAMPLED_NORMALS_DIR, ICP_TRANSFORMS_DIR, COLOR_DIR,
COLOR_CLOUDS_DIR, MERGED_CLOUDS_DIR, MERGED_COLOR_CLOUDS_DIR,
OCTOMAP_DIR, COLOR_OCTOMAP_DIR, FILTERED_CLOUDS_DIR]
MKDIRS = [mkdir(d) for d in dirs]
# NOTE chdir into dset dir so can just specify relative paths to data
os.chdir(DSET_DIR)
DOWNLOADS = list()
for f in REMOTE_FILES:
DOWNLOADS.append([None, f])
@follows(*MKDIRS)
@files(DOWNLOADS)
def download_files(dummy, local_file):
cmd = 'rsync -vr --ignore-existing %s/%s .' % (REMOTE_DATA_DIR, local_file)
print cmd
check_call(cmd, shell=True)
@follows('download_files')
@files('./%s_gps.bag' % DSET, '%s_frames' % DSET)
def generate_frames_and_map(input_file, output_dir):
cmd = 'cd %s/lidar; python generate_frames.py %s %s; cd -' % (SAIL_CAR_LOG_PATH, DSET_DIR, PARAMS_TO_LOAD)
print cmd
check_call(cmd, shell=True)
cmd = 'cd %s/lidar; python generate_gps_out.py %s; cd -' % (SAIL_CAR_LOG_PATH, DSET_DIR)
print cmd
check_call(cmd, shell=True)
#@follows('generate_frames_and_map')
'''
@transform('%s/*.ldr' % LDR_DIR,
regex('%s/(.*?).ldr' % LDR_DIR),
r'%s/\1.ldr' % LDR_UPSAMPLED_DIR)
def upsample_ldrs(input_file, output_file):
ni = 2 # FIXME PARAM
upsampler = 'python %s/../process/upsample_ldr.py' % MAPPING_PATH
cmd = '%s %s %s %d' % (upsampler, input_file, output_file, ni)
print cmd
check_call(cmd, shell=True)
'''
@follows('generate_frames_and_map')
@files('./params.ini', './params.h5')
def convert_params_to_h5(input_file, output_file):
converter = '%s/mapping/pipeline/params_to_h5.py' % SAIL_CAR_LOG_PATH
cmd = 'python %s' % converter
check_call(cmd, shell=True)
# TODO Also have to run the new bag file extractor for mark2
@follows('convert_params_to_h5')
@files(None, '%s/sentinel' % LDR_DIR)
@posttask(touch_file('%s/sentinel' % LDR_DIR))
def align_ldr(dummy, sentinel):
cmd = 'python %s/process/LidarAlign.py %s %s' % (SAIL_CAR_LOG_PATH, DSET_DIR, '%s%d.avi' % (DSET, CAMERA))
print cmd
check_call(cmd, shell=True)
@follows('align_ldr')
#@files('params.ini', '%s/sentinel' % POINTS_H5_DIR)
@transform('%s/*.ldr' % LDR_DIR,
regex('%s/(.*?).ldr' % LDR_DIR),
r'%s/\1.h5' % POINTS_H5_DIR)
def convert_ldr_to_h5(ldr_file, h5_file):
exporter = '%s/mapping/pipeline/ldr_to_h5.py' % SAIL_CAR_LOG_PATH
cmd = 'python {exporter} {fgps} {ldr_file} {h5_file}'.format(exporter=exporter, fgps=GPS_FILE, ldr_file=ldr_file, h5_file=h5_file)
if NO_TRANSFORM:
cmd += ' --no_transform'
print cmd
check_call(cmd, shell=True)
@follows('convert_ldr_to_h5')
@transform('%s/*.h5' % POINTS_H5_DIR,
regex('%s/(.*?).h5' % POINTS_H5_DIR),
r'%s/\1.pcd' % PCD_DIR)
def convert_h5_to_pcd(input_file, output_file):
h5_to_pcd = '%s/bin/h5_to_pcd' % MAPPING_PATH
cmd = '%s --h5 %s --pcd %s' % (h5_to_pcd, input_file, output_file)
print cmd
check_call(cmd, shell=True)
@follows('convert_h5_to_pcd')
@transform('%s/*.pcd' % PCD_DIR,
regex('%s/(.*?).pcd' % PCD_DIR),
r'%s/\1.pcd' % PCD_DOWNSAMPLED_DIR)
def downsample_pcds(input_file, output_file):
downsampler = '%s/bin/downsample_cloud' % MAPPING_PATH
cmd = '%s --src_pcd %s --out_pcd %s --leaf_size %f' % (downsampler, input_file,
output_file, DOWNSAMPLE_LEAF_SIZE)
print cmd
check_call(cmd, shell=True)
@follows('downsample_pcds')
@jobs_limit(1)
@merge(convert_h5_to_pcd, OCTOMAP_FILE)
def build_octomap(input_files, output_file):
cmd = '{0}/bin/build_octomap'.format(MAPPING_PATH)
if CAST_OCTOMAP_SINGLE:
cmd += ('; ' + cmd + ' --single')
print cmd
check_call(cmd, shell=True)
''' TODO parallelize
@follows('build_octomap')
@transform('%s/*.pcd' % PCD_DOWNSAMPLED_DIR,
regex('%s/(.*?).pcd' % PCD_DOWNSAMPLED_DIR),
r'%s/\1.h5' % COLOR_DIR)
def project_color(input_pcd, output_color_file):
pass
'''
@follows('build_octomap')
@jobs_limit(1)
@files(None, '{0}/0.h5'.format(COLOR_DIR))
def project_color(dummy_file, output_file):
binary = '%s/bin/octomap_color' % MAPPING_PATH
print binary
check_call(binary, shell=True)
@follows('project_color')
@transform('%s/*.h5' % COLOR_DIR,
regex('%s/(.*?).h5' % COLOR_DIR),
r'%s/\1.pcd' % COLOR_CLOUDS_DIR,
r'%s/\1.pcd' % PCD_DOWNSAMPLED_DIR)
def color_clouds(color_file, output_file, pcd_file):
converter = '%s/bin/color_cloud' % MAPPING_PATH
cmd = '%s %s %s %s' % (converter, pcd_file, color_file, output_file)
print cmd
check_call(cmd, shell=True)
@follows('color_clouds')
@jobs_limit(1)
@merge(color_clouds, COLOR_OCTOMAP_FILE)
def build_color_octomap(input_files, output_file):
cmd = '{0}/bin/build_color_octomap'.format(MAPPING_PATH)
print cmd
check_call(cmd, shell=True)
@follows('color_clouds')
@transform('%s/*.pcd' % COLOR_CLOUDS_DIR,
regex('%s/(.*?).pcd' % COLOR_CLOUDS_DIR),
r'%s/\1_static.pcd' % FILTERED_CLOUDS_DIR,
r'%s/\1_dynamic.pcd' % FILTERED_CLOUDS_DIR)
def octomap_filter_single(input_file, static_file, dynamic_file):
cmd = '%s/bin/octomap_filter %s %s %s' % (MAPPING_PATH, input_file, static_file, dynamic_file)
print cmd
check_call(cmd, shell=True)
static_vtk_file = os.path.splitext(static_file)[0] + '.vtk'
dynamic_vtk_file = os.path.splitext(dynamic_file)[0] + '.vtk'
cmd = 'pcl_pcd2vtk %s %s; pcl_pcd2vtk %s %s' % (static_file, static_vtk_file, dynamic_file, dynamic_vtk_file)
print cmd
check_call(cmd, shell=True)
def chunk(l, n):
for k in xrange(0, len(l), n):
yield l[k:k + n]
# FIXME Repeats code in merge_color_clouds
@follows('downsample_pcds')
@merge('%s/*.pcd' % PCD_DOWNSAMPLED_DIR, '%s/merged.pcd' % MERGED_CLOUDS_DIR)
def merge_raw_clouds(cloud_files, merged_cloud_file):
files = [f for f in cloud_files if os.path.exists(f)]
# Have to chunk the files since there's limit on number of command line arguments
chunks = chunk(files, 500)
merged_chunk_files = list()
k = 0
for chunk_files in chunks:
merged_chunk_file = os.path.dirname(MERGED_CLOUD_FILE) + '/chunk%d.pcd' % k
# Concatenate PCD files
cmd = 'concatenate_points_pcd ' + ' '.join(chunk_files) + ' ' + merged_chunk_file
print cmd
check_call(cmd, shell=True)
merged_chunk_files.append(merged_chunk_file)
k += 1
cmd = 'concatenate_points_pcd ' + ' '.join(merged_chunk_files) + ' ' + MERGED_CLOUD_FILE
print cmd
check_call(cmd, shell=True)
for chunk_file in merged_chunk_files:
cmd = 'rm %s' % chunk_file
print cmd
check_call(cmd, shell=True)
# Color the merged cloud by intensity
cmd = '%s/bin/color_intensity %s %s' % (MAPPING_PATH, MERGED_CLOUD_FILE, MERGED_CLOUD_FILE)
print cmd
check_call(cmd, shell=True)
# Convert merged cloud to vtk for visualizer
cmd = 'pcl_pcd2vtk %s %s' % (MERGED_CLOUD_FILE, MERGED_VTK_FILE)
check_call(cmd, shell=True)
@follows('color_clouds')
@merge('%s/*.pcd' % COLOR_CLOUDS_DIR, '%s/merged_%d.pcd' % (MERGED_COLOR_CLOUDS_DIR, MAP_COLOR_WINDOW))
def merge_color_clouds(cloud_files, merged_cloud_file):
files = [f for f in cloud_files if os.path.exists(f)]
# Have to chunk the files since there's limit on number of command line arguments
chunks = chunk(files, 500)
merged_chunk_files = list()
k = 0
for chunk_files in chunks:
merged_chunk_file = os.path.dirname(MERGED_COLOR_CLOUD_FILE) + '/chunk%d.pcd' % k
# Concatenate PCD files
cmd = 'concatenate_points_pcd ' + ' '.join(chunk_files) + ' ' + merged_chunk_file
print cmd
check_call(cmd, shell=True)
merged_chunk_files.append(merged_chunk_file)
k += 1
cmd = 'concatenate_points_pcd ' + ' '.join(merged_chunk_files) + ' ' + MERGED_COLOR_CLOUD_FILE
print cmd
check_call(cmd, shell=True)
for chunk_file in merged_chunk_files:
cmd = 'rm %s' % chunk_file
print cmd
check_call(cmd, shell=True)
# Convert merged cloud to vtk for visualizer
cmd = 'pcl_pcd2vtk %s %s' % (MERGED_COLOR_CLOUD_FILE, MERGED_COLOR_VTK_FILE)
check_call(cmd, shell=True)
@follows('merge_color_clouds')
@files(MERGED_COLOR_CLOUD_FILE, STATIC_CLOUD_FILE)
def octomap_filter(input_file, output_file):
cmd = '%s/bin/octomap_filter %s %s %s' % (MAPPING_PATH, MERGED_COLOR_CLOUD_FILE, STATIC_CLOUD_FILE, DYNAMIC_CLOUD_FILE)
print cmd
check_call(cmd, shell=True)
cmd = 'pcl_pcd2vtk %s %s; pcl_pcd2vtk %s %s' % (STATIC_CLOUD_FILE, STATIC_VTK_FILE, DYNAMIC_CLOUD_FILE, DYNAMIC_VTK_FILE)
print cmd
check_call(cmd, shell=True)
@follows('downsample_pcds')
@transform('%s/*.pcd' % PCD_DOWNSAMPLED_DIR,
regex('%s/(.*?).pcd' % PCD_DOWNSAMPLED_DIR),
r'%s/\1.pcd' % PCD_DOWNSAMPLED_NORMALS_DIR)
def estimate_normals(input_file, output_file):
norm_est = '%s/bin/estimate_normals' % MAPPING_PATH
cmd = '%s --src_pcd %s --out_pcd %s --k %d' % (norm_est, input_file,
output_file, K_NORM_EST)
print cmd
check_call(cmd, shell=True)
''' Currently unused
@follows('estimate_normals')
@transform('%s/*.pcd' % PCD_DOWNSAMPLED_NORMALS_DIR,
regex('%s/(.*?).pcd' % PCD_DOWNSAMPLED_NORMALS_DIR),
r'%s/\1.h5' % ICP_TRANSFORMS_DIR)
def align_clouds(input_file, output_file):
icp_reg = '%s/bin/align_clouds' % MAPPING_PATH
if file_num(input_file) == 0: # no transform for first pcd, touch empty file
check_call('touch %s' % output_file, shell=True)
return
# cloud to apply transform to
src = input_file
# cloud to align to (previous index)
tgt = os.path.join(os.path.dirname(input_file), str(file_num(input_file) - 1) + '.pcd')
cmd = '{icp_reg} --pcd_tgt {tgt} --pcd_src {src} --h5_file {h5f} --icp_iters {iters} --max_dist {dist}'.format(
icp_reg=icp_reg, tgt=tgt, src=src, h5f=output_file, iters=ICP_ITERS, dist=ICP_MAX_DIST)
print cmd
check_call(cmd, shell=True)
'''
def clean():
for d in dirs:
print 'deleting %s' % d
if os.path.exists(d):
check_call('rm -r %s' % d, shell=True)
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'Usage: python pipeline.py print,graph,run (task1,task2)'
sys.exit(1)
TORUN = [
'convert_ldr_to_h5'
]
if len(sys.argv) == 3:
TORUN = sys.argv[2].split(',')
CMDS = sys.argv[1].split(',')
tasks = {
'print': lambda: pipeline_printout(sys.stdout, TORUN,
forcedtorun_tasks=[], verbose=5),
'graph': lambda: pipeline_printout_graph('graph.jpg', 'jpg', TORUN,
forcedtorun_tasks=[],
no_key_legend=False),
'run': lambda: pipeline_run(TORUN,
multiprocess=NUM_CPUS,
one_second_per_job=False),
'force': lambda: pipeline_run([],
forcedtorun_tasks=TORUN,
multiprocess=NUM_CPUS,
one_second_per_job=False),
'printf': lambda: pipeline_printout(sys.stdout,
[],
forcedtorun_tasks=TORUN,
verbose=2),
'clean': clean
}
for key in tasks:
if key in CMDS:
tasks[key]() | 0.147524 | 0.071526 |
import re
import requests
import json
from selenium import webdriver
from selenium.webdriver import FirefoxOptions
import os
country_by_code_dict = {}
domains_dict = {}
path = '/home/kami/workspace/software/geckodriver'
country_regex = re.compile(r'<a href="countries/([A-Z]+)">([A-Za-z]+)</a>')
countries_list_url = 'https://www.alexa.com/topsites/countries'
site_regex = re.compile(r'\t*<a href="/siteinfo/([a-zA-Z\._\-]+)">.*</a>\t*')
def _parse_domains_json():
ans = {}
with open("domain_info.json", "r") as f:
di = json.loads(f.read())
for dik in di:
if dik["type"] == "country-code":
county_name = dik["description"]
ans[dik["domain"]] = county_name
return ans
def _get_counties_codes():
a = requests.get(countries_list_url)
return re.findall(country_regex, a.text)
def _get_sites_by_code(code):
#print(code)
sites_list_url = f"https://www.alexa.com/topsites/countries/{code}"
driver.get(sites_list_url)
driver.set_page_load_timeout(15)
dinamic_html_source = driver.page_source
#print(dinamic_html_source)
return re.findall(site_regex, dinamic_html_source)
def _get_sites_list(country_code):
ans = []
top_sites = _get_sites_by_code(country_code)
for s in top_sites:
site = _check_top_level_domain(country_code, s)
if site:
ans.append(site)
return ans
def _check_top_level_domain(country_code, site):
global domains_dict
global country_by_code_dict
top_level_domain = site.split('.')[-1]
if country_code in country_by_code_dict and top_level_domain in domains_dict and country_by_code_dict[country_code] in domains_dict[top_level_domain]:
return site
def _get_sites_for_all_countries():
global country_by_code_dict
ans = {}
for country_code in country_by_code_dict:
ans[country_by_code_dict[country_code]] = _get_sites_list(country_code)
return ans
def get_top_sites():
global driver
global domains_dict
global country_by_code_dict
country_by_code_dict = {x[0]:x[1] for x in _get_counties_codes()}
print(country_by_code_dict)
domains_dict = _parse_domains_json()
print(domains_dict)
opts = FirefoxOptions()
opts.add_argument("--headless")
driver = webdriver.Firefox(executable_path = path, firefox_options=opts)
ans = _get_sites_for_all_countries()
print(ans)
driver.close()
return ans
ans = get_top_sites()
with open("web_pool.json", "w+") as f:
json.dump(ans, f) | backend/server/init_db/get_top_sites_by_country.py | import re
import requests
import json
from selenium import webdriver
from selenium.webdriver import FirefoxOptions
import os
country_by_code_dict = {}
domains_dict = {}
path = '/home/kami/workspace/software/geckodriver'
country_regex = re.compile(r'<a href="countries/([A-Z]+)">([A-Za-z]+)</a>')
countries_list_url = 'https://www.alexa.com/topsites/countries'
site_regex = re.compile(r'\t*<a href="/siteinfo/([a-zA-Z\._\-]+)">.*</a>\t*')
def _parse_domains_json():
ans = {}
with open("domain_info.json", "r") as f:
di = json.loads(f.read())
for dik in di:
if dik["type"] == "country-code":
county_name = dik["description"]
ans[dik["domain"]] = county_name
return ans
def _get_counties_codes():
a = requests.get(countries_list_url)
return re.findall(country_regex, a.text)
def _get_sites_by_code(code):
#print(code)
sites_list_url = f"https://www.alexa.com/topsites/countries/{code}"
driver.get(sites_list_url)
driver.set_page_load_timeout(15)
dinamic_html_source = driver.page_source
#print(dinamic_html_source)
return re.findall(site_regex, dinamic_html_source)
def _get_sites_list(country_code):
ans = []
top_sites = _get_sites_by_code(country_code)
for s in top_sites:
site = _check_top_level_domain(country_code, s)
if site:
ans.append(site)
return ans
def _check_top_level_domain(country_code, site):
global domains_dict
global country_by_code_dict
top_level_domain = site.split('.')[-1]
if country_code in country_by_code_dict and top_level_domain in domains_dict and country_by_code_dict[country_code] in domains_dict[top_level_domain]:
return site
def _get_sites_for_all_countries():
global country_by_code_dict
ans = {}
for country_code in country_by_code_dict:
ans[country_by_code_dict[country_code]] = _get_sites_list(country_code)
return ans
def get_top_sites():
global driver
global domains_dict
global country_by_code_dict
country_by_code_dict = {x[0]:x[1] for x in _get_counties_codes()}
print(country_by_code_dict)
domains_dict = _parse_domains_json()
print(domains_dict)
opts = FirefoxOptions()
opts.add_argument("--headless")
driver = webdriver.Firefox(executable_path = path, firefox_options=opts)
ans = _get_sites_for_all_countries()
print(ans)
driver.close()
return ans
ans = get_top_sites()
with open("web_pool.json", "w+") as f:
json.dump(ans, f) | 0.130535 | 0.109658 |
import getopt
import os
import sys
import argparse
import getpass
import subprocess
import logging
from paramiko import SSHClient, AutoAddPolicy
from scp import SCPClient
from zipfile import ZipFile
SANDBOX_USERNAME = "sandbox"
SANDBOX_PORT = 22
SCRIPT = os.path.realpath(__file__)
SCRIPT_ROOT = os.path.dirname(SCRIPT)
def make_logger(verbose_enabled: bool, file_enabled: bool):
logger = logging.getLogger(__name__)
if verbose_enabled:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
formatter = logging.Formatter("[%(asctime)s] [%(levelname)s] %(message)s")
ch = logging.StreamHandler()
ch.setFormatter(formatter)
logger.addHandler(ch)
if file_enabled:
fh = logging.FileHandler("upload.log")
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
def wait_cmd(stds):
for s in stds:
s.channel.recv_exit_status()
return stds
def make_sdist():
proc = subprocess.run(
["python", "setup.py", "sdist", "--formats=zip"], cwd=SCRIPT_ROOT
)
if proc.returncode:
raise RuntimeError("Failed to make source distribution")
with open("version") as v:
version = v.read().strip()
return "dist/{{cookiecutter.project_name}}-{}.zip".format(version)
def upload(host, port, log, venv_init=True):
log.debug("Making distribution...")
dist_path = make_sdist()
log.debug("Distribution done {}".format(dist_path))
log.debug("Establishing SSH connection to {}:{}...".format(host, port))
ssh = SSHClient()
ssh.set_missing_host_key_policy(AutoAddPolicy())
pwd = <PASSWORD>("Enter password: ")
ssh.connect(
hostname=host,
username=SANDBOX_USERNAME,
password=<PASSWORD>,
port=port,
allow_agent=False,
look_for_keys=False,
)
log.debug("Connection established")
log.debug("Preparing directory for the project...")
project_path = "/home/sandbox/player/{{cookiecutter.project_name}}"
venv_path = "{}/venv".format(project_path)
ssh.exec_command("mkdir -p {}".format(project_path))
log.debug("Directory done")
if venv_init:
# intialize venv for project
log.debug("Intializing virtual environment...")
wait_cmd(ssh.exec_command("python3 -m venv {}".format(venv_path)))
log.debug("Virtual environment initialized {}".format(venv_path))
log.debug("Uploading distribution ...")
with SCPClient(ssh.get_transport()) as scp:
scp.put(os.path.join(SCRIPT_ROOT, dist_path), project_path)
log.debug("Distribution uploaded")
log.debug("Installing distribution...")
install_cmd = " ".join(
[
"cd {} &&".format(project_path),
"venv/bin/python",
"-m pip install",
os.path.basename(dist_path),
"pulse-executor==0.0.1",
]
)
wait_cmd(ssh.exec_command(install_cmd))
log.debug("Distribution installed")
chown_cmd = " ".join(
["cd {} &&".format(project_path), "chmod", "-R", "g+rw", "."]
)
wait_cmd(ssh.exec_command(chown_cmd))
log.debug("Changed ownership for project files")
log.debug("Closing SSH connection...")
ssh.close()
log.debug("SSH connection closed")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("host")
parser.add_argument(
"--port", type=int, default=SANDBOX_PORT, required=False
)
parser.add_argument("--verbose", action="store_true", required=False)
parser.add_argument("--dump-log", action="store_true", required=False)
args = parser.parse_args()
robot_host, robot_port = args.host, args.port
verbose_logging, file_logging = args.verbose, args.dump_log
log = make_logger(verbose_logging, file_logging)
try:
log.info("Upload started")
upload(robot_host, robot_port, log)
except Exception as e:
log.error("Upload failed")
log.error(e)
else:
log.info("Upload finished successfully")
if __name__ == "__main__":
main() | {{cookiecutter.project_name}}/upload.py | import getopt
import os
import sys
import argparse
import getpass
import subprocess
import logging
from paramiko import SSHClient, AutoAddPolicy
from scp import SCPClient
from zipfile import ZipFile
SANDBOX_USERNAME = "sandbox"
SANDBOX_PORT = 22
SCRIPT = os.path.realpath(__file__)
SCRIPT_ROOT = os.path.dirname(SCRIPT)
def make_logger(verbose_enabled: bool, file_enabled: bool):
logger = logging.getLogger(__name__)
if verbose_enabled:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
formatter = logging.Formatter("[%(asctime)s] [%(levelname)s] %(message)s")
ch = logging.StreamHandler()
ch.setFormatter(formatter)
logger.addHandler(ch)
if file_enabled:
fh = logging.FileHandler("upload.log")
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
def wait_cmd(stds):
for s in stds:
s.channel.recv_exit_status()
return stds
def make_sdist():
proc = subprocess.run(
["python", "setup.py", "sdist", "--formats=zip"], cwd=SCRIPT_ROOT
)
if proc.returncode:
raise RuntimeError("Failed to make source distribution")
with open("version") as v:
version = v.read().strip()
return "dist/{{cookiecutter.project_name}}-{}.zip".format(version)
def upload(host, port, log, venv_init=True):
log.debug("Making distribution...")
dist_path = make_sdist()
log.debug("Distribution done {}".format(dist_path))
log.debug("Establishing SSH connection to {}:{}...".format(host, port))
ssh = SSHClient()
ssh.set_missing_host_key_policy(AutoAddPolicy())
pwd = <PASSWORD>("Enter password: ")
ssh.connect(
hostname=host,
username=SANDBOX_USERNAME,
password=<PASSWORD>,
port=port,
allow_agent=False,
look_for_keys=False,
)
log.debug("Connection established")
log.debug("Preparing directory for the project...")
project_path = "/home/sandbox/player/{{cookiecutter.project_name}}"
venv_path = "{}/venv".format(project_path)
ssh.exec_command("mkdir -p {}".format(project_path))
log.debug("Directory done")
if venv_init:
# intialize venv for project
log.debug("Intializing virtual environment...")
wait_cmd(ssh.exec_command("python3 -m venv {}".format(venv_path)))
log.debug("Virtual environment initialized {}".format(venv_path))
log.debug("Uploading distribution ...")
with SCPClient(ssh.get_transport()) as scp:
scp.put(os.path.join(SCRIPT_ROOT, dist_path), project_path)
log.debug("Distribution uploaded")
log.debug("Installing distribution...")
install_cmd = " ".join(
[
"cd {} &&".format(project_path),
"venv/bin/python",
"-m pip install",
os.path.basename(dist_path),
"pulse-executor==0.0.1",
]
)
wait_cmd(ssh.exec_command(install_cmd))
log.debug("Distribution installed")
chown_cmd = " ".join(
["cd {} &&".format(project_path), "chmod", "-R", "g+rw", "."]
)
wait_cmd(ssh.exec_command(chown_cmd))
log.debug("Changed ownership for project files")
log.debug("Closing SSH connection...")
ssh.close()
log.debug("SSH connection closed")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("host")
parser.add_argument(
"--port", type=int, default=SANDBOX_PORT, required=False
)
parser.add_argument("--verbose", action="store_true", required=False)
parser.add_argument("--dump-log", action="store_true", required=False)
args = parser.parse_args()
robot_host, robot_port = args.host, args.port
verbose_logging, file_logging = args.verbose, args.dump_log
log = make_logger(verbose_logging, file_logging)
try:
log.info("Upload started")
upload(robot_host, robot_port, log)
except Exception as e:
log.error("Upload failed")
log.error(e)
else:
log.info("Upload finished successfully")
if __name__ == "__main__":
main() | 0.216923 | 0.056862 |
import functools
import math
def load_input_file(file_name: str):
with open(file_name) as file:
yield from (line.strip() for line in file)
def parse(task_input):
for line in task_input:
yield eval(line)
def get_elements(a, level):
for s in a:
if s.__class__ == list:
yield from get_elements(s, level + 1)
else:
yield s, level
def to_rep(a):
return list(get_elements(a, 0))
def explode(elements):
for i, e in enumerate(elements):
if e[1] == 4:
if i > 0:
elements[i - 1] = (elements[i - 1][0] + e[0], elements[i - 1][1])
if i + 2 < len(elements):
elements[i + 2] = (
elements[i + 2][0] + elements[i + 1][0],
elements[i + 2][1],
)
elements[i] = (0, 3)
if i + 1 < len(elements):
elements[i + 1] = None
return list(filter(lambda s: s != None, elements)), True
return elements, False
def split(elements):
for i, e in enumerate(elements):
if e[0] >= 10:
return (
elements[:i]
+ [(math.floor(e[0] / 2), e[1] + 1), (math.ceil(e[0] / 2), e[1] + 1)]
+ elements[i + 1 :],
True,
)
return elements, False
def s_sum(sa, sb):
tmp = []
for a in sa:
tmp.append((a[0], a[1] + 1))
for a in sb:
tmp.append((a[0], a[1] + 1))
while True:
tmp, x = explode(tmp)
if x == True:
continue
tmp, y = split(tmp)
if y == True:
continue
break
return tmp
def magni_red(lit, level):
for i, s in enumerate(lit):
if s[1] == level:
lit[i] = (lit[i][0] * 3 + 2 * lit[i + 1][0], lit[i][1] - 1)
lit[i + 1] = None
return list(filter(lambda s: s != None, lit)), True
return lit, False
def magnitute(lit):
c = True
while c:
lit, c = magni_red(lit, 3)
c = True
while c:
lit, c = magni_red(lit, 2)
c = True
while c:
lit, c = magni_red(lit, 1)
assert len(lit) == 2
return lit[0][0] * 3 + 2 * lit[1][0]
assert magnitute(list(get_elements([[1, 2], [[3, 4], 5]], 0))) == 143
assert (
magnitute(list(get_elements([[[[0, 7], 4], [[7, 8], [6, 0]]], [8, 1]], 0))) == 1384
)
assert magnitute(list(get_elements([[[[1, 1], [2, 2]], [3, 3]], [4, 4]], 0))) == 445
assert magnitute(list(get_elements([[[[3, 0], [5, 3]], [4, 4]], [5, 5]], 0))) == 791
assert magnitute(list(get_elements([[[[5, 0], [7, 4]], [5, 5]], [6, 6]], 0))) == 1137
assert (
magnitute(
list(
get_elements(
[[[[8, 7], [7, 7]], [[8, 6], [7, 7]]], [[[0, 7], [6, 6]], [8, 7]]], 0
)
)
)
== 3488
)
def solution_for_first_part(task_input):
result = functools.reduce(
s_sum, map(lambda line: list(get_elements(line, 0)), parse(task_input))
)
return magnitute(result)
def solution_for_second_part(task_input):
max_magnitude = 0
nums = list(map(lambda line: list(get_elements(line, 0)), parse(task_input)))
n = len(nums)
for i in range(n):
for j in range(i + 1, n):
x = nums[i]
y = nums[j]
s = s_sum(x, y)
m = magnitute(s)
if m > max_magnitude:
max_magnitude = m
s = s_sum(y, x)
m = magnitute(s)
if m > max_magnitude:
max_magnitude = m
return max_magnitude
example_input = """[[[0,[5,8]],[[1,7],[9,6]]],[[4,[1,2]],[[1,4],2]]]
[[[5,[2,8]],4],[5,[[9,9],0]]]
[6,[[[6,2],[5,6]],[[7,6],[4,7]]]]
[[[6,[0,7]],[0,9]],[4,[9,[9,0]]]]
[[[7,[6,4]],[3,[1,3]]],[[[5,5],1],9]]
[[6,[[7,3],[3,2]]],[[[3,8],[5,7]],4]]
[[[[5,4],[7,7]],8],[[8,3],8]]
[[9,3],[[9,9],[6,[4,9]]]]
[[2,[[7,7],7]],[[5,8],[[9,3],[0,2]]]]
[[[[5,2],5],[8,[3,7]]],[[5,[7,5]],[4,4]]]""".splitlines()
assert solution_for_first_part(example_input) == 4140
ans1 = solution_for_first_part(list(load_input_file("input.txt")))
ans2 = solution_for_second_part(list(load_input_file("input.txt")))
# 1
"""
Solution to Part 1
"""
print("Part One : " + str(ans1))
# 2
"""
Solution to Part 2
"""
print("Part Two : " + str(ans2)) | day-18/sol-18.py |
import functools
import math
def load_input_file(file_name: str):
with open(file_name) as file:
yield from (line.strip() for line in file)
def parse(task_input):
for line in task_input:
yield eval(line)
def get_elements(a, level):
for s in a:
if s.__class__ == list:
yield from get_elements(s, level + 1)
else:
yield s, level
def to_rep(a):
return list(get_elements(a, 0))
def explode(elements):
for i, e in enumerate(elements):
if e[1] == 4:
if i > 0:
elements[i - 1] = (elements[i - 1][0] + e[0], elements[i - 1][1])
if i + 2 < len(elements):
elements[i + 2] = (
elements[i + 2][0] + elements[i + 1][0],
elements[i + 2][1],
)
elements[i] = (0, 3)
if i + 1 < len(elements):
elements[i + 1] = None
return list(filter(lambda s: s != None, elements)), True
return elements, False
def split(elements):
for i, e in enumerate(elements):
if e[0] >= 10:
return (
elements[:i]
+ [(math.floor(e[0] / 2), e[1] + 1), (math.ceil(e[0] / 2), e[1] + 1)]
+ elements[i + 1 :],
True,
)
return elements, False
def s_sum(sa, sb):
tmp = []
for a in sa:
tmp.append((a[0], a[1] + 1))
for a in sb:
tmp.append((a[0], a[1] + 1))
while True:
tmp, x = explode(tmp)
if x == True:
continue
tmp, y = split(tmp)
if y == True:
continue
break
return tmp
def magni_red(lit, level):
for i, s in enumerate(lit):
if s[1] == level:
lit[i] = (lit[i][0] * 3 + 2 * lit[i + 1][0], lit[i][1] - 1)
lit[i + 1] = None
return list(filter(lambda s: s != None, lit)), True
return lit, False
def magnitute(lit):
c = True
while c:
lit, c = magni_red(lit, 3)
c = True
while c:
lit, c = magni_red(lit, 2)
c = True
while c:
lit, c = magni_red(lit, 1)
assert len(lit) == 2
return lit[0][0] * 3 + 2 * lit[1][0]
assert magnitute(list(get_elements([[1, 2], [[3, 4], 5]], 0))) == 143
assert (
magnitute(list(get_elements([[[[0, 7], 4], [[7, 8], [6, 0]]], [8, 1]], 0))) == 1384
)
assert magnitute(list(get_elements([[[[1, 1], [2, 2]], [3, 3]], [4, 4]], 0))) == 445
assert magnitute(list(get_elements([[[[3, 0], [5, 3]], [4, 4]], [5, 5]], 0))) == 791
assert magnitute(list(get_elements([[[[5, 0], [7, 4]], [5, 5]], [6, 6]], 0))) == 1137
assert (
magnitute(
list(
get_elements(
[[[[8, 7], [7, 7]], [[8, 6], [7, 7]]], [[[0, 7], [6, 6]], [8, 7]]], 0
)
)
)
== 3488
)
def solution_for_first_part(task_input):
result = functools.reduce(
s_sum, map(lambda line: list(get_elements(line, 0)), parse(task_input))
)
return magnitute(result)
def solution_for_second_part(task_input):
max_magnitude = 0
nums = list(map(lambda line: list(get_elements(line, 0)), parse(task_input)))
n = len(nums)
for i in range(n):
for j in range(i + 1, n):
x = nums[i]
y = nums[j]
s = s_sum(x, y)
m = magnitute(s)
if m > max_magnitude:
max_magnitude = m
s = s_sum(y, x)
m = magnitute(s)
if m > max_magnitude:
max_magnitude = m
return max_magnitude
example_input = """[[[0,[5,8]],[[1,7],[9,6]]],[[4,[1,2]],[[1,4],2]]]
[[[5,[2,8]],4],[5,[[9,9],0]]]
[6,[[[6,2],[5,6]],[[7,6],[4,7]]]]
[[[6,[0,7]],[0,9]],[4,[9,[9,0]]]]
[[[7,[6,4]],[3,[1,3]]],[[[5,5],1],9]]
[[6,[[7,3],[3,2]]],[[[3,8],[5,7]],4]]
[[[[5,4],[7,7]],8],[[8,3],8]]
[[9,3],[[9,9],[6,[4,9]]]]
[[2,[[7,7],7]],[[5,8],[[9,3],[0,2]]]]
[[[[5,2],5],[8,[3,7]]],[[5,[7,5]],[4,4]]]""".splitlines()
assert solution_for_first_part(example_input) == 4140
ans1 = solution_for_first_part(list(load_input_file("input.txt")))
ans2 = solution_for_second_part(list(load_input_file("input.txt")))
# 1
"""
Solution to Part 1
"""
print("Part One : " + str(ans1))
# 2
"""
Solution to Part 2
"""
print("Part Two : " + str(ans2)) | 0.314893 | 0.552178 |
import random
# My libraries
from backprop2 import Network, sigmoid_vec
# Third-party libraries
import numpy as np
def plot_helper(x):
import matplotlib
import matplotlib.pyplot as plt
x = np.reshape(x, (-1, 28))
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.matshow(x, cmap = matplotlib.cm.binary)
plt.xticks(np.array([]))
plt.yticks(np.array([]))
plt.show()
class DeepAutoencoder(Network):
def __init__(self, layers):
"""
The list ``layers`` specifies the sizes of the nested
autoencoders. For example, if ``layers`` is [50, 20, 10] then
the deep autoencoder will be a neural network with layers of
size [50, 20, 10, 20, 50]."""
self.layers = layers
Network.__init__(self, layers+layers[-2::-1])
def train(self, training_data, epochs, mini_batch_size, eta,
lmbda):
"""
Train the DeepAutoencoder. The ``training_data`` is a list of
training inputs, ``x``, ``mini_batch_size`` is a single
positive integer, and ``epochs``, ``eta``, ``lmbda`` are lists
of parameters, with the different list members corresponding
to the different stages of training. For example, ``eta[0]``
is the learning rate used for the first nested autoencoder,
``eta[1]`` is the learning rate for the second nested
autoencoder, and so on. ``eta[-1]`` is the learning rate used
for the final stage of fine-tuning.
"""
print "\nTraining a %s deep autoencoder" % (
"-".join([str(j) for j in self.sizes]),)
training_data = double(training_data)
cur_training_data = training_data[::]
for j in range(len(self.layers)-1):
print "\nTraining the %s-%s-%s nested autoencoder" % (
self.layers[j], self.layers[j+1], self.layers[j])
print "%s epochs, mini-batch size %s, eta = %s, lambda = %s" % (
epochs[j], mini_batch_size, eta[j], lmbda[j])
self.train_nested_autoencoder(
j, cur_training_data, epochs[j], mini_batch_size, eta[j],
lmbda[j])
cur_training_data = [
(sigmoid_vec(np.dot(net.weights[0], x)+net.biases[0]),)*2
for (x, _) in cur_training_data]
print "\nFine-tuning network weights with backpropagation"
print "%s epochs, mini-batch size %s, eta = %s, lambda = %s" % (
epochs[-1], mini_batch_size, eta[-1], lmbda[-1])
self.SGD(training_data, epochs[-1], mini_batch_size, eta[-1],
lmbda[-1])
def train_nested_autoencoder(
self, j, encoded_training_data, epochs, mini_batch_size, eta, lmbda):
"""
Train the nested autoencoder that starts at layer ``j`` in the
deep autoencoder. Note that ``encoded_training_data`` is a
list with entries of the form ``(x, x)``, where the ``x`` are
encoded training inputs for layer ``j``."""
net = Network([self.layers[j], self.layers[j+1], self.layers[j]])
net.biases[0] = self.biases[j]
net.biases[1] = self.biases[-j-1]
net.weights[0] = self.weights[j]
net.weights[1] = self.weights[-j-1]
net.SGD(encoded_training_data, epochs, mini_batch_size, eta, lmbda)
self.biases[j] = net.biases[0]
self.biases[-j-1] = net.biases[1]
self.weights[j] = net.weights[0]
self.weights[-j-1] = net.weights[1]
def train_nested_autoencoder_repl(
self, j, training_data, epochs, mini_batch_size, eta, lmbda):
"""
This is a convenience method that can be used from the REPL to
train the nested autoencoder that starts at level ``j`` in the
deep autoencoder. Note that ``training_data`` is the input
data for the first layer of the network, and is a list of
entries ``x``."""
self.train_nested_autoencoder(
j,
double(
[self.feedforward(x, start=0, end=j) for x in training_data]),
epochs, mini_batch_size, eta, lmbda)
def feature(self, j, k):
"""
Return the output if neuron number ``k`` in layer ``j`` is
activated, and all others are not active. """
a = np.zeros((self.sizes[j], 1))
a[k] = 1.0
return self.feedforward(a, start=j, end=self.num_layers)
def double(l):
return [(x, x) for x in l] | code/deep_autoencoder.py | import random
# My libraries
from backprop2 import Network, sigmoid_vec
# Third-party libraries
import numpy as np
def plot_helper(x):
import matplotlib
import matplotlib.pyplot as plt
x = np.reshape(x, (-1, 28))
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.matshow(x, cmap = matplotlib.cm.binary)
plt.xticks(np.array([]))
plt.yticks(np.array([]))
plt.show()
class DeepAutoencoder(Network):
def __init__(self, layers):
"""
The list ``layers`` specifies the sizes of the nested
autoencoders. For example, if ``layers`` is [50, 20, 10] then
the deep autoencoder will be a neural network with layers of
size [50, 20, 10, 20, 50]."""
self.layers = layers
Network.__init__(self, layers+layers[-2::-1])
def train(self, training_data, epochs, mini_batch_size, eta,
lmbda):
"""
Train the DeepAutoencoder. The ``training_data`` is a list of
training inputs, ``x``, ``mini_batch_size`` is a single
positive integer, and ``epochs``, ``eta``, ``lmbda`` are lists
of parameters, with the different list members corresponding
to the different stages of training. For example, ``eta[0]``
is the learning rate used for the first nested autoencoder,
``eta[1]`` is the learning rate for the second nested
autoencoder, and so on. ``eta[-1]`` is the learning rate used
for the final stage of fine-tuning.
"""
print "\nTraining a %s deep autoencoder" % (
"-".join([str(j) for j in self.sizes]),)
training_data = double(training_data)
cur_training_data = training_data[::]
for j in range(len(self.layers)-1):
print "\nTraining the %s-%s-%s nested autoencoder" % (
self.layers[j], self.layers[j+1], self.layers[j])
print "%s epochs, mini-batch size %s, eta = %s, lambda = %s" % (
epochs[j], mini_batch_size, eta[j], lmbda[j])
self.train_nested_autoencoder(
j, cur_training_data, epochs[j], mini_batch_size, eta[j],
lmbda[j])
cur_training_data = [
(sigmoid_vec(np.dot(net.weights[0], x)+net.biases[0]),)*2
for (x, _) in cur_training_data]
print "\nFine-tuning network weights with backpropagation"
print "%s epochs, mini-batch size %s, eta = %s, lambda = %s" % (
epochs[-1], mini_batch_size, eta[-1], lmbda[-1])
self.SGD(training_data, epochs[-1], mini_batch_size, eta[-1],
lmbda[-1])
def train_nested_autoencoder(
self, j, encoded_training_data, epochs, mini_batch_size, eta, lmbda):
"""
Train the nested autoencoder that starts at layer ``j`` in the
deep autoencoder. Note that ``encoded_training_data`` is a
list with entries of the form ``(x, x)``, where the ``x`` are
encoded training inputs for layer ``j``."""
net = Network([self.layers[j], self.layers[j+1], self.layers[j]])
net.biases[0] = self.biases[j]
net.biases[1] = self.biases[-j-1]
net.weights[0] = self.weights[j]
net.weights[1] = self.weights[-j-1]
net.SGD(encoded_training_data, epochs, mini_batch_size, eta, lmbda)
self.biases[j] = net.biases[0]
self.biases[-j-1] = net.biases[1]
self.weights[j] = net.weights[0]
self.weights[-j-1] = net.weights[1]
def train_nested_autoencoder_repl(
self, j, training_data, epochs, mini_batch_size, eta, lmbda):
"""
This is a convenience method that can be used from the REPL to
train the nested autoencoder that starts at level ``j`` in the
deep autoencoder. Note that ``training_data`` is the input
data for the first layer of the network, and is a list of
entries ``x``."""
self.train_nested_autoencoder(
j,
double(
[self.feedforward(x, start=0, end=j) for x in training_data]),
epochs, mini_batch_size, eta, lmbda)
def feature(self, j, k):
"""
Return the output if neuron number ``k`` in layer ``j`` is
activated, and all others are not active. """
a = np.zeros((self.sizes[j], 1))
a[k] = 1.0
return self.feedforward(a, start=j, end=self.num_layers)
def double(l):
return [(x, x) for x in l] | 0.845751 | 0.685871 |
import numpy as np
import math
import glob
from benchpress import prof
ppn = prof.max_ppn
files = glob.glob("%s/mult_pong.*.out"%(prof.folder))
class TimeList():
ppn_times = ""
def __init__(self, np):
self.ppn_times = list()
for i in range(np):
self.ppn_times.append(list())
def add_time(self, pos, time, ppn):
time_list = self.ppn_times[ppn]
while (pos > len(time_list)):
time_list.append(-1)
if (pos == len(time_list)):
time_list.append(time)
else:
if time < time_list[pos]:
time_list[pos] = time
cpu_times = TimeList(ppn)
gpu_times = ""
if prof.cuda_aware:
gpu_times = TimeList(prof.n_gpus)
time_list = ""
for filename in files:
f = open(filename, 'r')
for line in f:
if "app" in line:
break
elif "Multiple Messages" in line:
if "CPU" in line:
time_list = cpu_times
elif "GPU" in line:
time_list = gpu_times
elif "Size" in line:
times = line.rsplit('\t')
size = (int)(times[0].rsplit(' ')[-1])
for i in range(1, len(times)-1):
time_list.add_time((int)(math.log2(size)), (float)(times[i]), i-1)
f.close()
if __name__=='__main__':
import pyfancyplot.plot as plt
i_list = list()
for i in range(len(cpu_times.ppn_times[0])):
if (cpu_times.ppn_times[0][i] == -1):
continue
i_list.append(i)
if 1:
# CPU Mult Pong
np = [0,4,9,19,29,39]
plt.add_luke_options()
plt.set_palette(palette="deep", n_colors = 6)
x_data = [4*2**i for i in i_list]
for n in np:
plt.line_plot([cpu_times.ppn_times[n][i] for i in i_list], x_data, label = "NMsgs %d"%(n+1))
plt.add_anchored_legend(ncol=3)
plt.set_yticks([1e-7,1e-6,1e-5,1e-4,1e-3],['1e-7','1e-6','1e-5','1e-4','1e-3'])
plt.set_scale('log', 'log')
plt.add_labels("Message Size (Bytes)", "Time (Seconds)")
print("Plotting %s/%s_cpu_mult_pong.pdf"%(prof.folder_out, prof.computer))
plt.save_plot("%s/%s_cpu_mult_pong.pdf"%(prof.folder_out, prof.computer))
if 1:
# CPU Mult Slowdown
np = [0,4,9,19,29,39]
plt.add_luke_options()
plt.set_palette(palette="deep", n_colors = 6)
x_data = [4*2**i for i in i_list]
for n in np:
y_data = [cpu_times.ppn_times[n][i] / cpu_times.ppn_times[0][i] for i in i_list]
plt.line_plot(y_data, x_data, label = "NMsgs %d"%(n+1))
plt.add_anchored_legend(ncol=3)
plt.set_scale('log', 'linear')
plt.add_labels("Message Size (Bytes)", "Times Slowdown")
plt.save_plot("%s/%s_cpu_mult_slowdown.pdf"%(prof.folder_out, prof.computer))
i_list = list()
for i in range(len(gpu_times.ppn_times[0])):
if (gpu_times.ppn_times[0][i] == -1):
continue
i_list.append(i)
if 1:
# GPU Max-Rate
plt.add_luke_options()
plt.set_palette(palette="deep", n_colors = prof.n_gpus)
x_data = [4*2**i for i in i_list]
for i in range(prof.n_gpus):
plt.line_plot([gpu_times.ppn_times[i][il] for il in i_list], x_data, label = "NMsgs %d"%(i+1))
plt.add_anchored_legend(ncol=prof.n_gpus/2)
plt.set_yticks([1e-7,1e-6,1e-5,1e-4,1e-3],['1e-7','1e-6','1e-5','1e-4','1e-3'])
plt.set_scale('log', 'log')
plt.add_labels("Message Size (Bytes)", "Time (Seconds)")
plt.save_plot("%s/%s_gpu_mult_pong.pdf"%(prof.folder_out, prof.computer))
if 1:
# GPU Diff X
plt.add_luke_options()
plt.set_palette(palette="deep", n_colors=prof.n_gpus)
x_data = [4*2**i for i in i_list]
for i in range(prof.n_gpus):
y_data = [gpu_times.ppn_times[i][il] / gpu_times.ppn_times[0][il] for il in i_list]
plt.line_plot(y_data, x_data, label = "NMsgs %d"%(i+1))
plt.add_anchored_legend(ncol=prof.n_gpus/2)
plt.set_scale('log', 'linear')
plt.add_labels("Message Size (Bytes)", "Times Slowdown")
plt.save_plot("%s/%s_gpu_mult_slowdown.pdf"%(prof.folder_out, prof.computer)) | plots/benchpress/ping_pong/mult_pong.py | import numpy as np
import math
import glob
from benchpress import prof
ppn = prof.max_ppn
files = glob.glob("%s/mult_pong.*.out"%(prof.folder))
class TimeList():
ppn_times = ""
def __init__(self, np):
self.ppn_times = list()
for i in range(np):
self.ppn_times.append(list())
def add_time(self, pos, time, ppn):
time_list = self.ppn_times[ppn]
while (pos > len(time_list)):
time_list.append(-1)
if (pos == len(time_list)):
time_list.append(time)
else:
if time < time_list[pos]:
time_list[pos] = time
cpu_times = TimeList(ppn)
gpu_times = ""
if prof.cuda_aware:
gpu_times = TimeList(prof.n_gpus)
time_list = ""
for filename in files:
f = open(filename, 'r')
for line in f:
if "app" in line:
break
elif "Multiple Messages" in line:
if "CPU" in line:
time_list = cpu_times
elif "GPU" in line:
time_list = gpu_times
elif "Size" in line:
times = line.rsplit('\t')
size = (int)(times[0].rsplit(' ')[-1])
for i in range(1, len(times)-1):
time_list.add_time((int)(math.log2(size)), (float)(times[i]), i-1)
f.close()
if __name__=='__main__':
import pyfancyplot.plot as plt
i_list = list()
for i in range(len(cpu_times.ppn_times[0])):
if (cpu_times.ppn_times[0][i] == -1):
continue
i_list.append(i)
if 1:
# CPU Mult Pong
np = [0,4,9,19,29,39]
plt.add_luke_options()
plt.set_palette(palette="deep", n_colors = 6)
x_data = [4*2**i for i in i_list]
for n in np:
plt.line_plot([cpu_times.ppn_times[n][i] for i in i_list], x_data, label = "NMsgs %d"%(n+1))
plt.add_anchored_legend(ncol=3)
plt.set_yticks([1e-7,1e-6,1e-5,1e-4,1e-3],['1e-7','1e-6','1e-5','1e-4','1e-3'])
plt.set_scale('log', 'log')
plt.add_labels("Message Size (Bytes)", "Time (Seconds)")
print("Plotting %s/%s_cpu_mult_pong.pdf"%(prof.folder_out, prof.computer))
plt.save_plot("%s/%s_cpu_mult_pong.pdf"%(prof.folder_out, prof.computer))
if 1:
# CPU Mult Slowdown
np = [0,4,9,19,29,39]
plt.add_luke_options()
plt.set_palette(palette="deep", n_colors = 6)
x_data = [4*2**i for i in i_list]
for n in np:
y_data = [cpu_times.ppn_times[n][i] / cpu_times.ppn_times[0][i] for i in i_list]
plt.line_plot(y_data, x_data, label = "NMsgs %d"%(n+1))
plt.add_anchored_legend(ncol=3)
plt.set_scale('log', 'linear')
plt.add_labels("Message Size (Bytes)", "Times Slowdown")
plt.save_plot("%s/%s_cpu_mult_slowdown.pdf"%(prof.folder_out, prof.computer))
i_list = list()
for i in range(len(gpu_times.ppn_times[0])):
if (gpu_times.ppn_times[0][i] == -1):
continue
i_list.append(i)
if 1:
# GPU Max-Rate
plt.add_luke_options()
plt.set_palette(palette="deep", n_colors = prof.n_gpus)
x_data = [4*2**i for i in i_list]
for i in range(prof.n_gpus):
plt.line_plot([gpu_times.ppn_times[i][il] for il in i_list], x_data, label = "NMsgs %d"%(i+1))
plt.add_anchored_legend(ncol=prof.n_gpus/2)
plt.set_yticks([1e-7,1e-6,1e-5,1e-4,1e-3],['1e-7','1e-6','1e-5','1e-4','1e-3'])
plt.set_scale('log', 'log')
plt.add_labels("Message Size (Bytes)", "Time (Seconds)")
plt.save_plot("%s/%s_gpu_mult_pong.pdf"%(prof.folder_out, prof.computer))
if 1:
# GPU Diff X
plt.add_luke_options()
plt.set_palette(palette="deep", n_colors=prof.n_gpus)
x_data = [4*2**i for i in i_list]
for i in range(prof.n_gpus):
y_data = [gpu_times.ppn_times[i][il] / gpu_times.ppn_times[0][il] for il in i_list]
plt.line_plot(y_data, x_data, label = "NMsgs %d"%(i+1))
plt.add_anchored_legend(ncol=prof.n_gpus/2)
plt.set_scale('log', 'linear')
plt.add_labels("Message Size (Bytes)", "Times Slowdown")
plt.save_plot("%s/%s_gpu_mult_slowdown.pdf"%(prof.folder_out, prof.computer)) | 0.165796 | 0.190347 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import yaml
import copy
import pandas as pd
import logging
import tensorflow as tf
from collections import Counter, namedtuple
from tensorflow.python.platform import gfile
from subprocess import Popen, PIPE
from chatbot.globals import DEFAULT_FULL_CONFIG
# Special vocabulary symbols.
_PAD = b"_PAD" # Append to unused space for both encoder/decoder.
_GO = b"_GO" # Prepend to each decoder input.
_EOS = b"_EOS" # Append to outputs only. Stopping signal when decoding.
_UNK = b"_UNK" # For any symbols not in our vocabulary.
_START_VOCAB = [_PAD, _GO, _EOS, _UNK]
# Enumerations for ease of use by this and other files.
PAD_ID = 0
GO_ID = 1
EOS_ID = 2
UNK_ID = 3
# Regular expressions used to tokenize.
_WORD_SPLIT = re.compile(b"([.,!?\"':;)(])")
_DIGIT_RE = re.compile(br"\d")
# Build mock FLAGS object for utils to wrap info around if needed.
# This makes the API more user-friendly, since it takes care of
# formatting data if the user doesn't do it exactly as expected.
# Note: I did initially try this with an actual tf.app.flags object,
# but it was a nightmare.
_flag_names = ["pretrained_dir",
"config",
"debug",
"model",
"model_params",
"dataset",
"dataset_params"]
Flags = namedtuple('Flags', _flag_names)
_FLAGS = Flags(pretrained_dir=None,
config=None,
debug=None,
model='{}',
dataset='{}',
model_params='{}',
dataset_params='{}')
def save_hyper_params(hyper_params, fname):
# Append to file if exists, else create.
df = pd.DataFrame(hyper_params)
with open(fname, 'a+') as f:
df.to_csv(f, header=False)
def get_sentence(lower=True):
"""Simple function to prompt user for input and return it w/o newline.
Frequently used in chat sessions, of course.
"""
sys.stdout.write("Human: ")
sys.stdout.flush()
sentence = input()
if lower:
return sentence.lower()
return sentence
def update_config(config=None,
config_path=None,
return_config=True,
**kwargs):
"""Update contents of a config file, overwriting any that
match those in kwargs.
Args:
config: (dict) subset of DEFAULT_FULL_CONFIG.
config_path: (str) location of a yaml config file.
return_config: (bool) whether or not to return the config dictionary.
kwargs: key-value pairs to update in the config dictionary and/or file.
At least one of {config, config_path} must be not None. If both are not
None, then we update the config dictionary with the kwargs, and then set
the file at config_path to match updated dictionary contents.
In other words, if config is not None, we do won't consider the contents
of config_path when doing the updates.
"""
if config is None and config_path is None:
raise ValueError("Configuration info not given to update_config.")
if config is None:
# Grab the current config file contents into a dictionary.
config = get_yaml_config(config_path)
logging.info("Updating config values %r for %s",
list(kwargs.keys()), config_path)
# Update its values with those in kwargs.
for top_level_key in DEFAULT_FULL_CONFIG:
for update_key in kwargs:
if update_key == top_level_key:
config[update_key] = kwargs[update_key]
elif update_key in DEFAULT_FULL_CONFIG[top_level_key]:
if config.get(top_level_key) is None:
config[top_level_key] = {}
config[top_level_key][update_key] = kwargs[update_key]
# Rewrite the config file.
if config_path is not None:
with open(os.path.join(config_path), 'w') as f:
yaml.dump(config, f, default_flow_style=False)
# Return the dictionary if requested.
if return_config:
return config
def get_yaml_config(path, save_path=True):
with open(path) as file:
config = yaml.load(file)
if save_path:
if config.get('dataset_params') is not None:
config['dataset_params']['config_path'] = path
return config
def load_pretrained_config(pretrained_dir):
"""Get the full configuration dictionary for a pretrained model.
Args:
pretrained_dir: path (relative to project root) that is assumed to contain:
- config.yml: full configuration file (automatically saved by all models).
- checkpoint(s) from training session (also saved automatically).
Returns:
config: dictionary loaded from config.yml, and with all training flags reset to
chat session flags, since the only time this is called is for chatting.
"""
config_path = os.path.join(pretrained_dir, "config.yml")
config = get_yaml_config(config_path)
# The loaded config will have "training" values, so we need
# to set some of them to "chatting" values, instead of requiring
# user to specify them (since they are mandatory for any chat sesion).
config['model_params']['decode'] = True
config['model_params']['is_chatting'] = True # alias
config['model_params']['reset_model'] = False
config['model_params']['ckpt_dir'] = pretrained_dir
return config
def print_non_defaults(config):
"""Prints all values in config that aren't the default values in DEFAULT_FULL_CONFIG.
Args:
config: dict of parameters with same structure as DEFAULT_FULL_CONFIG.
"""
print("\n---------- Your non-default parameters: ----------")
if config['model'] != DEFAULT_FULL_CONFIG['model']:
print("{}: {}".format('model', config['model']))
if config['dataset'] != DEFAULT_FULL_CONFIG['dataset']:
print("{}: {}".format('dataset', config['dataset']))
for dict_id in ['model_params', 'dataset_params']:
print(dict_id, end=":\n")
for key, val in config[dict_id].items():
# First check if key isn't even specified by defaults.
if key not in DEFAULT_FULL_CONFIG[dict_id]:
print("\t{}: {}".format(key, val))
elif DEFAULT_FULL_CONFIG[dict_id][key] != val:
print("\t{}: {}".format(key, val))
print("--------------------------------------------------\n")
def flags_to_dict(flags):
"""Builds and return a dictionary from flags keys, namely
'model', 'dataset', 'model_params', 'dataset_params'.
"""
if isinstance(flags, dict):
logging.warning('The `flags` object is already a dictionary!')
return flags
if flags.pretrained_dir is not None:
config = load_pretrained_config(flags.pretrained_dir)
config['model_params'] = {**config['model_params'],
**yaml.load(getattr(flags, 'model_params'))}
return config
flags_dict = {}
# Grab any values under supported keys defined in default config.
for stream in DEFAULT_FULL_CONFIG:
stream_attr = getattr(flags, stream)
if not isinstance(stream_attr, dict):
yaml_stream = yaml.load(getattr(flags, stream))
else:
yaml_stream = stream_attr
if yaml_stream:
flags_dict.update({stream: yaml_stream})
elif stream in ['model_params', 'dataset_params']:
# Explicitly set it as empty for merging with default later.
flags_dict[stream] = {}
# If provided, incorporate yaml config file as well.
# Give preference to values in flags_dict, since those are
# values provided by user on command-line.
if flags.config is not None:
yaml_config = get_yaml_config(flags.config)
flags_dict = merge_dicts(
default_dict=yaml_config,
preference_dict=flags_dict)
return flags_dict
def merge_dicts(default_dict, preference_dict):
"""Preferentially (and recursively) merge input dictionaries.
Ensures that all values in preference dict are used, and
all other (i.e. unspecified) items are from default dict.
"""
merged_dict = copy.deepcopy(default_dict)
for pref_key in preference_dict:
if isinstance(preference_dict[pref_key], dict) and pref_key in merged_dict:
# Dictionaries are expected to have the same type structure.
# So if any preference_dict[key] is a dict, then require default_dict[key]
# must also be a dict (if it exists, that is).
assert isinstance(merged_dict[pref_key], dict), \
"Expected default_dict[%r]=%r to have type dict." % \
(pref_key, merged_dict[pref_key])
# Since these are both dictionaries, can just recurse.
merged_dict[pref_key] = merge_dicts(merged_dict[pref_key],
preference_dict[pref_key])
else:
merged_dict[pref_key] = preference_dict[pref_key]
return merged_dict
def parse_config(flags=None, pretrained_dir=None, config_path=None):
"""Get custom configuration dictionary from either a tensorflow flags
object, a path to a training directory, or a path to a yaml file. Only pass
one of these. See "Args" below for more details.
The result is a dictionary of the same key-val structure as seen in
chatbot.globals.DEFAULT_FULL_CONFIG. For any key-value pair not found from
the (single) argument passed, it will be set to the default found in
DEFAULT_FULL_CONFIG.
Args:
flags: A tf.app.flags.FLAGS object. See FLAGS in main.py.
pretrained_dir: relative [to project root] path to a pretrained model
directory, i.e. a directory where a chatbot was previously
saved/trained (a ckpt_dir).
config_path: relative [to project root] path to a valid yaml
configuration file. For example: 'configs/my_config.yml'.
Returns:
config: dictionary of merged config info, where precedence is given to
user-specified params on command-line (over .yml config files).
"""
# Only pass one of the options!
assert sum(x is not None for x in [flags, pretrained_dir, config_path]) == 1
# Build a flags object from other params, if it doesn't exist.
if flags is None:
# Get the config_path from the pretrained directory.
if config_path is None:
config_path = os.path.join(pretrained_dir, 'config.yml')
assert gfile.Exists(config_path), \
"Cannot parse from %s. No config.yml." % config_path
# Wrap flags string inside an actual tf.app.flags object.
flags = _FLAGS
flags = flags._replace(config=config_path)
assert flags is not None
# Get configuration dictionary containing user-specified parameters.
config = flags_to_dict(flags)
# Sanity check: make sure we have values that don't have defaults.
if 'ckpt_dir' not in config['model_params']:
print('Robot: Please enter a directory for saving checkpoints:')
config['model_params']['ckpt_dir'] = get_sentence(lower=False)
if 'data_dir' not in config['dataset_params']:
print('Robot: Please enter full path to directory containing data:')
config['dataset_params']['data_dir'] = get_sentence(lower=False)
# Then, fill in any blanks with the full default config.
config = merge_dicts(default_dict=DEFAULT_FULL_CONFIG,
preference_dict=config)
return config
def basic_tokenizer(sentence):
"""Very basic tokenizer: split the sentence into a list of tokens."""
words = []
for space_separated_fragment in sentence.strip().lower().split():
words.extend(_WORD_SPLIT.split(space_separated_fragment))
return [w for w in words if w]
def num_lines(file_path):
"""Return the number of lines in file given by its absolute path."""
(num_samples, stderr) = Popen(['wc', '-l', file_path], stdout=PIPE).communicate()
return int(num_samples.strip().split()[0])
def get_word_freqs(path, counter, norm_digits=True):
"""Extract word-frequency mapping from file given by path.
Args:
path: data file of words we wish to extract vocab counts from.
counter: collections.Counter object for mapping word -> frequency.
norm_digits: Boolean; if true, all digits are replaced by 0s.
Returns:
The counter (dict), updated with mappings from word -> frequency.
"""
print("Creating vocabulary for data", path)
with gfile.GFile(path, mode="rb") as f:
for i, line in enumerate(f):
if (i + 1) % 100000 == 0:
print("\tProcessing line", (i + 1))
line = tf.compat.as_bytes(line)
tokens = basic_tokenizer(line)
# Update word frequency counts in vocab counter dict.
for w in tokens:
word = _DIGIT_RE.sub(b"0", w) if norm_digits else w
counter[word] += 1
return counter
def create_vocabulary(vocab_path, from_path, to_path, max_vocab_size, norm_digits=True):
"""Create vocabulary file (if it does not exist yet) from data file.
Data file is assumed to contain one sentence per line. Each sentence is
tokenized and digits are normalized (if norm_digits is set).
Vocabulary contains the most-frequent tokens up to max_vocab_size.
We write it to vocabulary_path in a one-token-per-line format, so that later
token in the first line gets id=0, second line gets id=1, and so on.
Args:
vocab_path: path where the vocabulary will be created.
from_path: data file for encoder inputs.
to_path: data file for decoder inputs.
max_vocab_size: limit on the size of the created vocabulary.
norm_digits: Boolean; if true, all digits are replaced by 0s.
"""
if gfile.Exists(vocab_path):
return num_lines(vocab_path)
vocab = Counter()
# Pool all data words together to reflect the data distribution well.
vocab = get_word_freqs(from_path, vocab, norm_digits)
vocab = get_word_freqs(to_path, vocab, norm_digits)
# Get sorted vocabulary, from most frequent to least frequent.
vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)
vocab_list = vocab_list[:max_vocab_size]
# Write the list to a file.
with gfile.GFile(vocab_path, mode="wb") as vocab_file:
for w in vocab_list:
vocab_file.write(w + b"\n")
return len(vocab_list)
def get_vocab_dicts(vocabulary_path):
"""Returns word_to_idx, idx_to_word dictionaries given vocabulary.
Args:
vocabulary_path: path to the file containing the vocabulary.
Returns:
a pair: the vocabulary (a dictionary mapping string to integers), and
the reversed vocabulary (a list, which reverses the vocabulary mapping).
Raises:
ValueError: if the provided vocabulary_path does not exist.
"""
if gfile.Exists(vocabulary_path):
rev_vocab = []
with gfile.GFile(vocabulary_path, mode="rb") as f:
rev_vocab.extend(f.readlines())
rev_vocab = [tf.compat.as_bytes(line.strip()) for line in rev_vocab]
vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])
return vocab, rev_vocab
else:
raise ValueError("Vocabulary file %s not found.", vocabulary_path)
def sentence_to_token_ids(sentence, vocabulary, normalize_digits=True):
"""Convert a string to list of integers representing token-ids.
For example, a sentence "I have a dog" may become tokenized into
["I", "have", "a", "dog"] and with vocabulary {"I": 1, "have": 2,
"a": 4, "dog": 7"} this function will return [1, 2, 4, 7].
Args:
sentence: the sentence in bytes format to convert to token-ids.
vocabulary: a dictionary mapping tokens to integers.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
Returns:
a list of integers, the token-ids for the sentence.
"""
words = basic_tokenizer(sentence)
if not normalize_digits:
return [vocabulary.get(w, UNK_ID) for w in words]
# Normalize digits by 0 before looking words up in the vocabulary.
return [vocabulary.get(_DIGIT_RE.sub(b"0", w), UNK_ID) for w in words]
def data_to_token_ids(data_path, target_path, vocabulary_path, normalize_digits=True):
"""Tokenize data file and turn into token-ids using given vocabulary file.
This function loads data line-by-line from data_path, calls the above
sentence_to_token_ids, and saves the result to target_path.
Args:
data_path: path to the data file in one-sentence-per-line format.
target_path: path where the file with token-ids will be created.
vocabulary_path: path to the vocabulary file.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
"""
if not gfile.Exists(target_path):
print("Tokenizing data in %s" % data_path)
vocab, _ = get_vocab_dicts(vocabulary_path=vocabulary_path)
with gfile.GFile(data_path, mode="rb") as data_file:
with gfile.GFile(target_path, mode="w") as tokens_file:
counter = 0
for line in data_file:
counter += 1
if counter % 100000 == 0:
print(" tokenizing line %d" % counter)
token_ids = sentence_to_token_ids(
tf.compat.as_bytes(line), vocab, normalize_digits)
tokens_file.write(" ".join([str(tok) for tok in token_ids]) + "\n")
def prepare_data(data_dir,
vocab_size,
from_train_path=None,
to_train_path=None,
from_valid_path=None,
to_valid_path=None,
optimize=True,
config_path=None):
"""Prepare all necessary files that are required for the training.
Args:
data_dir: directory in which the data sets will be stored.
from_train_path: path to the file that includes "from" training samples.
to_train_path: path to the file that includes "to" training samples.
from_valid_path: path to the file that includes "valid_from" samples.
to_valid_path: path to the file that includes "valid_to" samples.
vocab_size: preferred number of words to use in vocabulary.
optimize: if True, allow program to rest this value if the actual
vocab_size (num unique words in data) < preferred vocab_size.
This would decrease computational cost, should the situation arise.
config_path: (required if optimize==True) location of config file.
Note on optimize:
- It will only have an effect if the following conditions are ALL met:
- config_path is not None (and is a valid path)
- optimize == True (of course)
- true vocab size != [preferred] vocab_size
Returns:
Tuple of:
(1) path to the token-ids for "from language" training data-set,
(2) path to the token-ids for "to language" training data-set,
(3) path to the token-ids for "from language" development data-set,
(4) path to the token-ids for "to language" development data-set,
(5) path to the vocabulary file,
(6) the true vocabulary size (less than or equal to max allowed)
"""
if optimize is None:
logging.warning("You have not requested that your choice for "
"vocab_size be optimized. This can lead to slower "
"training times.\nSet 'optimize_params: true' under "
"dataset_params in your yaml config to enable.")
def maybe_set_param(param, file_name):
if param is None:
param = os.path.join(data_dir, file_name)
logging.info('Set path from None to %s', param)
return param
def get_vocab_path(vocab_size):
return os.path.join(data_dir, "vocab%d.txt" % vocab_size)
def append_to_paths(s, **paths):
return {name: path + s for name, path in paths.items()}
# Set any paths that are None to default values.
from_train_path = maybe_set_param(from_train_path, 'train_from.txt')
to_train_path = maybe_set_param(to_train_path, 'train_to.txt')
from_valid_path = maybe_set_param(from_valid_path, 'valid_from.txt')
to_valid_path = maybe_set_param(to_valid_path, 'valid_to.txt')
# Create vocabularies of the appropriate sizes.
vocab_path = get_vocab_path(vocab_size)
true_vocab_size = create_vocabulary(
vocab_path,
from_train_path,
to_train_path,
vocab_size)
assert true_vocab_size <= vocab_size
# User-permitted, we reset the config file's vocab size and rename the
# vocabulary path name to the optimal values.
should_optimize = config_path is not None
should_optimize = (vocab_size != true_vocab_size) and should_optimize
should_optimize = optimize and should_optimize
if should_optimize:
logging.info('Optimizing vocab size in config and renaming files.')
# Necessary when we overestimate the number of unique words in the data.
# e.g. we set vocab_size = 40k but our data only has 5 unique words,
# it would be wasteful to train a model on 40k.
# Thus, we rename vocab filenames to have the true vocab size.
vocab_size = true_vocab_size
old_vocab_path = vocab_path
vocab_path = get_vocab_path(true_vocab_size)
if old_vocab_path != vocab_path:
Popen(['mv', old_vocab_path, vocab_path], stdout=PIPE).communicate()
# Reset the value of 'vocab_size' in the configuration file, so that
# we won't need to regenerate everything again if the user wants to
# resume training/chat/etc.
update_config(config_path=config_path, vocab_size=true_vocab_size)
id_paths = append_to_paths(
'.ids%d' % vocab_size,
from_train=from_train_path,
to_train=to_train_path,
from_valid=from_valid_path,
to_valid=to_valid_path)
# Create token ids for all training and validation data.
for name in id_paths:
data_to_token_ids(
eval(name + '_path'),
id_paths[name],
vocab_path)
return id_paths, vocab_path, vocab_size | utils/io_utils.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import yaml
import copy
import pandas as pd
import logging
import tensorflow as tf
from collections import Counter, namedtuple
from tensorflow.python.platform import gfile
from subprocess import Popen, PIPE
from chatbot.globals import DEFAULT_FULL_CONFIG
# Special vocabulary symbols.
_PAD = b"_PAD" # Append to unused space for both encoder/decoder.
_GO = b"_GO" # Prepend to each decoder input.
_EOS = b"_EOS" # Append to outputs only. Stopping signal when decoding.
_UNK = b"_UNK" # For any symbols not in our vocabulary.
_START_VOCAB = [_PAD, _GO, _EOS, _UNK]
# Enumerations for ease of use by this and other files.
PAD_ID = 0
GO_ID = 1
EOS_ID = 2
UNK_ID = 3
# Regular expressions used to tokenize.
_WORD_SPLIT = re.compile(b"([.,!?\"':;)(])")
_DIGIT_RE = re.compile(br"\d")
# Build mock FLAGS object for utils to wrap info around if needed.
# This makes the API more user-friendly, since it takes care of
# formatting data if the user doesn't do it exactly as expected.
# Note: I did initially try this with an actual tf.app.flags object,
# but it was a nightmare.
_flag_names = ["pretrained_dir",
"config",
"debug",
"model",
"model_params",
"dataset",
"dataset_params"]
Flags = namedtuple('Flags', _flag_names)
_FLAGS = Flags(pretrained_dir=None,
config=None,
debug=None,
model='{}',
dataset='{}',
model_params='{}',
dataset_params='{}')
def save_hyper_params(hyper_params, fname):
# Append to file if exists, else create.
df = pd.DataFrame(hyper_params)
with open(fname, 'a+') as f:
df.to_csv(f, header=False)
def get_sentence(lower=True):
"""Simple function to prompt user for input and return it w/o newline.
Frequently used in chat sessions, of course.
"""
sys.stdout.write("Human: ")
sys.stdout.flush()
sentence = input()
if lower:
return sentence.lower()
return sentence
def update_config(config=None,
config_path=None,
return_config=True,
**kwargs):
"""Update contents of a config file, overwriting any that
match those in kwargs.
Args:
config: (dict) subset of DEFAULT_FULL_CONFIG.
config_path: (str) location of a yaml config file.
return_config: (bool) whether or not to return the config dictionary.
kwargs: key-value pairs to update in the config dictionary and/or file.
At least one of {config, config_path} must be not None. If both are not
None, then we update the config dictionary with the kwargs, and then set
the file at config_path to match updated dictionary contents.
In other words, if config is not None, we do won't consider the contents
of config_path when doing the updates.
"""
if config is None and config_path is None:
raise ValueError("Configuration info not given to update_config.")
if config is None:
# Grab the current config file contents into a dictionary.
config = get_yaml_config(config_path)
logging.info("Updating config values %r for %s",
list(kwargs.keys()), config_path)
# Update its values with those in kwargs.
for top_level_key in DEFAULT_FULL_CONFIG:
for update_key in kwargs:
if update_key == top_level_key:
config[update_key] = kwargs[update_key]
elif update_key in DEFAULT_FULL_CONFIG[top_level_key]:
if config.get(top_level_key) is None:
config[top_level_key] = {}
config[top_level_key][update_key] = kwargs[update_key]
# Rewrite the config file.
if config_path is not None:
with open(os.path.join(config_path), 'w') as f:
yaml.dump(config, f, default_flow_style=False)
# Return the dictionary if requested.
if return_config:
return config
def get_yaml_config(path, save_path=True):
with open(path) as file:
config = yaml.load(file)
if save_path:
if config.get('dataset_params') is not None:
config['dataset_params']['config_path'] = path
return config
def load_pretrained_config(pretrained_dir):
"""Get the full configuration dictionary for a pretrained model.
Args:
pretrained_dir: path (relative to project root) that is assumed to contain:
- config.yml: full configuration file (automatically saved by all models).
- checkpoint(s) from training session (also saved automatically).
Returns:
config: dictionary loaded from config.yml, and with all training flags reset to
chat session flags, since the only time this is called is for chatting.
"""
config_path = os.path.join(pretrained_dir, "config.yml")
config = get_yaml_config(config_path)
# The loaded config will have "training" values, so we need
# to set some of them to "chatting" values, instead of requiring
# user to specify them (since they are mandatory for any chat sesion).
config['model_params']['decode'] = True
config['model_params']['is_chatting'] = True # alias
config['model_params']['reset_model'] = False
config['model_params']['ckpt_dir'] = pretrained_dir
return config
def print_non_defaults(config):
"""Prints all values in config that aren't the default values in DEFAULT_FULL_CONFIG.
Args:
config: dict of parameters with same structure as DEFAULT_FULL_CONFIG.
"""
print("\n---------- Your non-default parameters: ----------")
if config['model'] != DEFAULT_FULL_CONFIG['model']:
print("{}: {}".format('model', config['model']))
if config['dataset'] != DEFAULT_FULL_CONFIG['dataset']:
print("{}: {}".format('dataset', config['dataset']))
for dict_id in ['model_params', 'dataset_params']:
print(dict_id, end=":\n")
for key, val in config[dict_id].items():
# First check if key isn't even specified by defaults.
if key not in DEFAULT_FULL_CONFIG[dict_id]:
print("\t{}: {}".format(key, val))
elif DEFAULT_FULL_CONFIG[dict_id][key] != val:
print("\t{}: {}".format(key, val))
print("--------------------------------------------------\n")
def flags_to_dict(flags):
"""Builds and return a dictionary from flags keys, namely
'model', 'dataset', 'model_params', 'dataset_params'.
"""
if isinstance(flags, dict):
logging.warning('The `flags` object is already a dictionary!')
return flags
if flags.pretrained_dir is not None:
config = load_pretrained_config(flags.pretrained_dir)
config['model_params'] = {**config['model_params'],
**yaml.load(getattr(flags, 'model_params'))}
return config
flags_dict = {}
# Grab any values under supported keys defined in default config.
for stream in DEFAULT_FULL_CONFIG:
stream_attr = getattr(flags, stream)
if not isinstance(stream_attr, dict):
yaml_stream = yaml.load(getattr(flags, stream))
else:
yaml_stream = stream_attr
if yaml_stream:
flags_dict.update({stream: yaml_stream})
elif stream in ['model_params', 'dataset_params']:
# Explicitly set it as empty for merging with default later.
flags_dict[stream] = {}
# If provided, incorporate yaml config file as well.
# Give preference to values in flags_dict, since those are
# values provided by user on command-line.
if flags.config is not None:
yaml_config = get_yaml_config(flags.config)
flags_dict = merge_dicts(
default_dict=yaml_config,
preference_dict=flags_dict)
return flags_dict
def merge_dicts(default_dict, preference_dict):
"""Preferentially (and recursively) merge input dictionaries.
Ensures that all values in preference dict are used, and
all other (i.e. unspecified) items are from default dict.
"""
merged_dict = copy.deepcopy(default_dict)
for pref_key in preference_dict:
if isinstance(preference_dict[pref_key], dict) and pref_key in merged_dict:
# Dictionaries are expected to have the same type structure.
# So if any preference_dict[key] is a dict, then require default_dict[key]
# must also be a dict (if it exists, that is).
assert isinstance(merged_dict[pref_key], dict), \
"Expected default_dict[%r]=%r to have type dict." % \
(pref_key, merged_dict[pref_key])
# Since these are both dictionaries, can just recurse.
merged_dict[pref_key] = merge_dicts(merged_dict[pref_key],
preference_dict[pref_key])
else:
merged_dict[pref_key] = preference_dict[pref_key]
return merged_dict
def parse_config(flags=None, pretrained_dir=None, config_path=None):
"""Get custom configuration dictionary from either a tensorflow flags
object, a path to a training directory, or a path to a yaml file. Only pass
one of these. See "Args" below for more details.
The result is a dictionary of the same key-val structure as seen in
chatbot.globals.DEFAULT_FULL_CONFIG. For any key-value pair not found from
the (single) argument passed, it will be set to the default found in
DEFAULT_FULL_CONFIG.
Args:
flags: A tf.app.flags.FLAGS object. See FLAGS in main.py.
pretrained_dir: relative [to project root] path to a pretrained model
directory, i.e. a directory where a chatbot was previously
saved/trained (a ckpt_dir).
config_path: relative [to project root] path to a valid yaml
configuration file. For example: 'configs/my_config.yml'.
Returns:
config: dictionary of merged config info, where precedence is given to
user-specified params on command-line (over .yml config files).
"""
# Only pass one of the options!
assert sum(x is not None for x in [flags, pretrained_dir, config_path]) == 1
# Build a flags object from other params, if it doesn't exist.
if flags is None:
# Get the config_path from the pretrained directory.
if config_path is None:
config_path = os.path.join(pretrained_dir, 'config.yml')
assert gfile.Exists(config_path), \
"Cannot parse from %s. No config.yml." % config_path
# Wrap flags string inside an actual tf.app.flags object.
flags = _FLAGS
flags = flags._replace(config=config_path)
assert flags is not None
# Get configuration dictionary containing user-specified parameters.
config = flags_to_dict(flags)
# Sanity check: make sure we have values that don't have defaults.
if 'ckpt_dir' not in config['model_params']:
print('Robot: Please enter a directory for saving checkpoints:')
config['model_params']['ckpt_dir'] = get_sentence(lower=False)
if 'data_dir' not in config['dataset_params']:
print('Robot: Please enter full path to directory containing data:')
config['dataset_params']['data_dir'] = get_sentence(lower=False)
# Then, fill in any blanks with the full default config.
config = merge_dicts(default_dict=DEFAULT_FULL_CONFIG,
preference_dict=config)
return config
def basic_tokenizer(sentence):
"""Very basic tokenizer: split the sentence into a list of tokens."""
words = []
for space_separated_fragment in sentence.strip().lower().split():
words.extend(_WORD_SPLIT.split(space_separated_fragment))
return [w for w in words if w]
def num_lines(file_path):
"""Return the number of lines in file given by its absolute path."""
(num_samples, stderr) = Popen(['wc', '-l', file_path], stdout=PIPE).communicate()
return int(num_samples.strip().split()[0])
def get_word_freqs(path, counter, norm_digits=True):
"""Extract word-frequency mapping from file given by path.
Args:
path: data file of words we wish to extract vocab counts from.
counter: collections.Counter object for mapping word -> frequency.
norm_digits: Boolean; if true, all digits are replaced by 0s.
Returns:
The counter (dict), updated with mappings from word -> frequency.
"""
print("Creating vocabulary for data", path)
with gfile.GFile(path, mode="rb") as f:
for i, line in enumerate(f):
if (i + 1) % 100000 == 0:
print("\tProcessing line", (i + 1))
line = tf.compat.as_bytes(line)
tokens = basic_tokenizer(line)
# Update word frequency counts in vocab counter dict.
for w in tokens:
word = _DIGIT_RE.sub(b"0", w) if norm_digits else w
counter[word] += 1
return counter
def create_vocabulary(vocab_path, from_path, to_path, max_vocab_size, norm_digits=True):
"""Create vocabulary file (if it does not exist yet) from data file.
Data file is assumed to contain one sentence per line. Each sentence is
tokenized and digits are normalized (if norm_digits is set).
Vocabulary contains the most-frequent tokens up to max_vocab_size.
We write it to vocabulary_path in a one-token-per-line format, so that later
token in the first line gets id=0, second line gets id=1, and so on.
Args:
vocab_path: path where the vocabulary will be created.
from_path: data file for encoder inputs.
to_path: data file for decoder inputs.
max_vocab_size: limit on the size of the created vocabulary.
norm_digits: Boolean; if true, all digits are replaced by 0s.
"""
if gfile.Exists(vocab_path):
return num_lines(vocab_path)
vocab = Counter()
# Pool all data words together to reflect the data distribution well.
vocab = get_word_freqs(from_path, vocab, norm_digits)
vocab = get_word_freqs(to_path, vocab, norm_digits)
# Get sorted vocabulary, from most frequent to least frequent.
vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)
vocab_list = vocab_list[:max_vocab_size]
# Write the list to a file.
with gfile.GFile(vocab_path, mode="wb") as vocab_file:
for w in vocab_list:
vocab_file.write(w + b"\n")
return len(vocab_list)
def get_vocab_dicts(vocabulary_path):
"""Returns word_to_idx, idx_to_word dictionaries given vocabulary.
Args:
vocabulary_path: path to the file containing the vocabulary.
Returns:
a pair: the vocabulary (a dictionary mapping string to integers), and
the reversed vocabulary (a list, which reverses the vocabulary mapping).
Raises:
ValueError: if the provided vocabulary_path does not exist.
"""
if gfile.Exists(vocabulary_path):
rev_vocab = []
with gfile.GFile(vocabulary_path, mode="rb") as f:
rev_vocab.extend(f.readlines())
rev_vocab = [tf.compat.as_bytes(line.strip()) for line in rev_vocab]
vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])
return vocab, rev_vocab
else:
raise ValueError("Vocabulary file %s not found.", vocabulary_path)
def sentence_to_token_ids(sentence, vocabulary, normalize_digits=True):
"""Convert a string to list of integers representing token-ids.
For example, a sentence "I have a dog" may become tokenized into
["I", "have", "a", "dog"] and with vocabulary {"I": 1, "have": 2,
"a": 4, "dog": 7"} this function will return [1, 2, 4, 7].
Args:
sentence: the sentence in bytes format to convert to token-ids.
vocabulary: a dictionary mapping tokens to integers.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
Returns:
a list of integers, the token-ids for the sentence.
"""
words = basic_tokenizer(sentence)
if not normalize_digits:
return [vocabulary.get(w, UNK_ID) for w in words]
# Normalize digits by 0 before looking words up in the vocabulary.
return [vocabulary.get(_DIGIT_RE.sub(b"0", w), UNK_ID) for w in words]
def data_to_token_ids(data_path, target_path, vocabulary_path, normalize_digits=True):
"""Tokenize data file and turn into token-ids using given vocabulary file.
This function loads data line-by-line from data_path, calls the above
sentence_to_token_ids, and saves the result to target_path.
Args:
data_path: path to the data file in one-sentence-per-line format.
target_path: path where the file with token-ids will be created.
vocabulary_path: path to the vocabulary file.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
"""
if not gfile.Exists(target_path):
print("Tokenizing data in %s" % data_path)
vocab, _ = get_vocab_dicts(vocabulary_path=vocabulary_path)
with gfile.GFile(data_path, mode="rb") as data_file:
with gfile.GFile(target_path, mode="w") as tokens_file:
counter = 0
for line in data_file:
counter += 1
if counter % 100000 == 0:
print(" tokenizing line %d" % counter)
token_ids = sentence_to_token_ids(
tf.compat.as_bytes(line), vocab, normalize_digits)
tokens_file.write(" ".join([str(tok) for tok in token_ids]) + "\n")
def prepare_data(data_dir,
vocab_size,
from_train_path=None,
to_train_path=None,
from_valid_path=None,
to_valid_path=None,
optimize=True,
config_path=None):
"""Prepare all necessary files that are required for the training.
Args:
data_dir: directory in which the data sets will be stored.
from_train_path: path to the file that includes "from" training samples.
to_train_path: path to the file that includes "to" training samples.
from_valid_path: path to the file that includes "valid_from" samples.
to_valid_path: path to the file that includes "valid_to" samples.
vocab_size: preferred number of words to use in vocabulary.
optimize: if True, allow program to rest this value if the actual
vocab_size (num unique words in data) < preferred vocab_size.
This would decrease computational cost, should the situation arise.
config_path: (required if optimize==True) location of config file.
Note on optimize:
- It will only have an effect if the following conditions are ALL met:
- config_path is not None (and is a valid path)
- optimize == True (of course)
- true vocab size != [preferred] vocab_size
Returns:
Tuple of:
(1) path to the token-ids for "from language" training data-set,
(2) path to the token-ids for "to language" training data-set,
(3) path to the token-ids for "from language" development data-set,
(4) path to the token-ids for "to language" development data-set,
(5) path to the vocabulary file,
(6) the true vocabulary size (less than or equal to max allowed)
"""
if optimize is None:
logging.warning("You have not requested that your choice for "
"vocab_size be optimized. This can lead to slower "
"training times.\nSet 'optimize_params: true' under "
"dataset_params in your yaml config to enable.")
def maybe_set_param(param, file_name):
if param is None:
param = os.path.join(data_dir, file_name)
logging.info('Set path from None to %s', param)
return param
def get_vocab_path(vocab_size):
return os.path.join(data_dir, "vocab%d.txt" % vocab_size)
def append_to_paths(s, **paths):
return {name: path + s for name, path in paths.items()}
# Set any paths that are None to default values.
from_train_path = maybe_set_param(from_train_path, 'train_from.txt')
to_train_path = maybe_set_param(to_train_path, 'train_to.txt')
from_valid_path = maybe_set_param(from_valid_path, 'valid_from.txt')
to_valid_path = maybe_set_param(to_valid_path, 'valid_to.txt')
# Create vocabularies of the appropriate sizes.
vocab_path = get_vocab_path(vocab_size)
true_vocab_size = create_vocabulary(
vocab_path,
from_train_path,
to_train_path,
vocab_size)
assert true_vocab_size <= vocab_size
# User-permitted, we reset the config file's vocab size and rename the
# vocabulary path name to the optimal values.
should_optimize = config_path is not None
should_optimize = (vocab_size != true_vocab_size) and should_optimize
should_optimize = optimize and should_optimize
if should_optimize:
logging.info('Optimizing vocab size in config and renaming files.')
# Necessary when we overestimate the number of unique words in the data.
# e.g. we set vocab_size = 40k but our data only has 5 unique words,
# it would be wasteful to train a model on 40k.
# Thus, we rename vocab filenames to have the true vocab size.
vocab_size = true_vocab_size
old_vocab_path = vocab_path
vocab_path = get_vocab_path(true_vocab_size)
if old_vocab_path != vocab_path:
Popen(['mv', old_vocab_path, vocab_path], stdout=PIPE).communicate()
# Reset the value of 'vocab_size' in the configuration file, so that
# we won't need to regenerate everything again if the user wants to
# resume training/chat/etc.
update_config(config_path=config_path, vocab_size=true_vocab_size)
id_paths = append_to_paths(
'.ids%d' % vocab_size,
from_train=from_train_path,
to_train=to_train_path,
from_valid=from_valid_path,
to_valid=to_valid_path)
# Create token ids for all training and validation data.
for name in id_paths:
data_to_token_ids(
eval(name + '_path'),
id_paths[name],
vocab_path)
return id_paths, vocab_path, vocab_size | 0.643217 | 0.135833 |
__all__ = ['open_geotiff', 'calc_normalized_spectral_index', 'calc_avi', 'calc_savi', 'calc_gci',
'mask_plot_from_image', 'image_metrics', 'glcm_xplusy', 'glcm_xminusy', 'textural_features',
'process_image_features']
# Cell
import rasterio as rio
import numpy as np
import matplotlib.pyplot as plt
from typing import List
import pandas as pd
import skimage
from skimage.feature import greycomatrix, greycoprops
from itertools import product
# Cell
def open_geotiff(fn, bands:List[int]=None) -> np.ndarray:
"""Open geotiff image from path, cast it to float and scale it to 0-1 range, optionally with only `bands` input bands."
Returns numpy array of shape (C,W,H)
"""
with rio.open(str(fn)) as f:
data = f.read()
data = data.astype(np.float32)
data /= 255.
if bands is not None: data = data[bands]
return data
# Cell
def calc_normalized_spectral_index(im:np.ndarray, band_1:int, band_2:int) -> np.ndarray:
"Calculate normalized spectral index (band_1 - band_2)/(band_1 + band_2). Can be used with NDVI and such simple indices"
return (im[band_1] - im[band_2]) / (im[band_1] + im[band_2])
def calc_avi(im:np.ndarray, nir:int, red:int) -> np.ndarray:
"Calculate AVI (nir *(1-red) * (nir-red))"
return im[nir] * (1 - im[red]) * (nir - red)
def calc_savi(im:np.ndarray, nir:int, red:int, l:float=0.5) -> np.ndarray:
"Calculate Soil Adjusted Vegetation Index ((nir-red)/(nir+red+l)) * (1+l). Default uses Landsat coefficient L"
return ((im[nir] - im[red]) / (im[nir] + im[red] + l)) * (1 + l)
def calc_gci(im:np.ndarray, nir:int, green:int) -> np.ndarray:
"Calculate Green Clorophyll Index nir/green - 1"
return im[nir] / im[green] - 1
# Cell
def mask_plot_from_image(data:np.ndarray, radius:float=31) -> np.ndarray:
"Select only data from within field plot of radius (radius-1) pixels"
center = (int(data.shape[1]/2), int(data.shape[2]/2))
Y, X = np.ogrid[:data.shape[1], :data.shape[2]]
dist_from_center = np.sqrt((X-center[0])**2 + (Y-center[1])**2)
mask = dist_from_center <= radius
data[:,~mask] = np.nan
return data
# Cell
def image_metrics(fn, mask_plot:bool=True, radius:int=31) -> dict:
"Calculate metrics from NIR-red-green -images"
image = open_geotiff(fn)
if mask_plot == True: image = mask_plot_from_image(image, radius=radius)
# Max, mean, std and coefficient of variation
features = {}
features['nir_pix_max'] = np.nanmax(image[0])
features['nir_pix_min'] = np.nanmin(image[0])
features['nir_pix_mean'] = np.nanmean(image[0])
features['nir_pix_std'] = np.nanstd(image[0])
features['nir_pix_var'] = np.nanvar(image[0])
features['red_pix_max'] = np.nanmax(image[1])
features['red_pix_min'] = np.nanmin(image[1])
features['red_pix_mean'] = np.nanmean(image[1])
features['red_pix_std'] = np.nanstd(image[1])
features['red_pix_var'] = np.nanvar(image[1])
features['green_pix_max'] = np.nanmax(image[2])
features['green_pix_min'] = np.nanmin(image[2])
features['green_pix_mean'] = np.nanmean(image[2])
features['green_pix_std'] = np.nanstd(image[2])
features['green_pix_var'] = np.nanvar(image[2])
# spectral indices
# NDVI
ndvi = calc_normalized_spectral_index(image, 0, 1)
features['ndvi_pix_max'] = np.nanmax(ndvi)
features['ndvi_pix_min'] = np.nanmin(ndvi)
features['ndvi_pix_mean'] = np.nanmean(ndvi)
features['ndvi_pix_std'] = np.nanstd(ndvi)
features['ndvi_pix_var'] = np.nanvar(ndvi)
return features
# Cell
def glcm_xplusy(glcm, k, distance, angle):
"sum each element where the indices of the glcm sum to k"
s = 0
for c in range(0, glcm.shape[0]):
targ = k - c
if targ >= 0 and targ < glcm.shape[0]: s += glcm[targ, c, distance, angle]
if targ > k: return s
return s
def glcm_xminusy(glcm, k, distance, angle):
"sum each element where the difference of the indices is k"
s = 0
for c in range(0, glcm.shape[0]):
targ = k + c
if targ < glcm.shape[0]: s += glcm[targ, c, distance, angle]
if k == 0: return s
return s*2
def textural_features(fn,
band_names:List=['nir', 'red', 'green', 'ndvi'],
distances:List[int]=[8],
angles:List[float]=[0, np.pi/4, np.pi/2, 3*np.pi/4],
n_grey:int=20) -> dict:
"""Get textural features from images. Works close to R package radiomics `GLCMFeatures` functions.
However skimage makes glcm a bit differently"""
tex_features = {}
im = open_geotiff(fn)
# add NDVI
im = np.vstack((im, calc_normalized_spectral_index(im, 1, 0)[None]))
for b in range(im.shape[0]):
pref = band_names[b]
# bin image to at maximum n_grey levels
bins = np.linspace(im[b].min(),im[b].max(), min(n_grey, len(np.unique(im[b]))) + 1)
binned_im = np.digitize(im[b], bins) - 1
n_levels = binned_im.max() + 1
# get glcm. Note that skimage makes glcm differently than radiomics
glcm = greycomatrix(binned_im,
distances=distances,
angles=angles,
levels=n_levels,
normed=True, symmetric=True)
# greycoprops gives some features easily
# Others not so much
means = np.zeros((len(distances), len(angles)))
variances = np.zeros((len(distances), len(angles)))
autocorrelations = np.zeros((len(distances), len(angles)))
cluster_prominences = np.zeros((len(distances), len(angles)))
cluster_shades = np.zeros((len(distances), len(angles)))
cluster_tendencies = np.zeros((len(distances), len(angles)))
diff_entropies = np.zeros((len(distances), len(angles)))
energies = np.zeros((len(distances), len(angles)))
entropies = np.zeros((len(distances), len(angles)))
hgn1s = np.zeros((len(distances), len(angles)))
idmns = np.zeros((len(distances), len(angles)))
idns = np.zeros((len(distances), len(angles)))
inverse_variances = np.zeros((len(distances), len(angles)))
sum_averages = np.zeros((len(distances), len(angles)))
sum_entropies = np.zeros((len(distances), len(angles)))
sum_variances = np.zeros((len(distances), len(angles)))
# Todo chech that multiple distances work
for d,a in product(range(len(distances)), range(len(angles))):
# means
means[d,a] = np.sum(np.sum(glcm[:,:,d,a], axis=1) * np.arange(1,n_levels+1))
scale_matrix = np.empty((n_levels, n_levels))
for i, j in product(range(n_levels), range(n_levels)):
# variance
variances[d,a] += (((i) - means[d,a])**2) * glcm[i,j,d,a]
# cluster metrix
cluster_prominences[d,a] += ((i + j - 2*means[d,a])**4) * glcm[i,j,d,a]
cluster_shades[d,a] += ((i+j - 2*means[d,a])**3)*glcm[i,j,0,a]
cluster_tendencies[d,a] += ((i + j - 2 * means[d,a])**2) * glcm[i,j,d,a]
# scale matrix for autocorrelations
scale_matrix[i,j] = (i+1) * (j+1)
# homogeneity 1
hgn1s[d,a] += glcm[i,j,d,a] / (1 + (np.abs(i-j)))
# IDM normalized
idmns[d,a] += glcm[i,j,d,a] / (1 + ((np.abs(i-j)**2)/(n_levels)**2))
# ID normalized
idns[d,a] += glcm[i,j,d,a] / (1 + (np.abs(i-j)/n_levels))
# Inverse variance
if i != j: inverse_variances[d,a] += glcm[i,j,d,a] / np.abs(i-j)**2
# autocorrelations
autocorrelations[d,a] = np.sum(glcm[:,:,0,a]*scale_matrix)
# diff_entropy
for i in range(n_levels-1):
pxy = glcm_xminusy(glcm, k=i, distance=d, angle=a)
if pxy > 0: diff_entropies[d,a] += pxy * np.log2(pxy)
diff_entropies[d,a] *= -1
# energy
energies[d,a] = np.sum(np.square(glcm[...,d,a]))
# entropy
entropies[d,a] = skimage.measure.shannon_entropy(glcm[...,d,a])
for i in range(2*(n_levels)-1):
# sum averages
pxy = glcm_xplusy(glcm, k=i, distance=d, angle=a)
sum_averages[d,a] += (i+2) * pxy
# sum entropies
if pxy > 0: sum_entropies[d,a] += pxy * np.log2(pxy)
sum_entropies[d,a] *= -1
for i in range(2*(n_levels) - 1):
# sum variances
pxy = glcm_xplusy(glcm, k=i-1, distance=d, angle=a)
sum_variances[d,a] += ((i+2 - sum_entropies[d,a])**2) * pxy
# Average all the angles
tex_features[f'{pref}_mean'] = np.mean(means)
tex_features[f'{pref}_var'] = np.mean(variances)
tex_features[f'{pref}_ac'] = np.mean(autocorrelations)
tex_features[f'{pref}_cProminence'] = np.mean(cluster_prominences)
tex_features[f'{pref}_cShade'] = np.mean(cluster_shades)
tex_features[f'{pref}_cTendency'] = np.mean(cluster_tendencies)
tex_features[f'{pref}_contrast'] = np.mean(greycoprops(glcm, 'contrast'))
tex_features[f'{pref}_corr'] = np.mean(greycoprops(glcm, 'correlation'))
tex_features[f'{pref}_diffentropy'] = np.mean(diff_entropies)
tex_features[f'{pref}_dissimilarity'] = np.mean(greycoprops(glcm, 'dissimilarity'))
tex_features[f'{pref}_energy'] = np.mean(energies)
tex_features[f'{pref}_ent'] = np.mean(entropies)
tex_features[f'{pref}_homogeneity1'] = np.mean(hgn1s)
tex_features[f'{pref}_homogeneity2'] = np.mean(greycoprops(glcm, 'homogeneity'))
tex_features[f'{pref}_idmn'] = np.mean(idmns)
tex_features[f'{pref}_idn'] = np.mean(idns)
tex_features[f'{pref}_iv'] = np.mean(inverse_variances)
tex_features[f'{pref}_maxProb'] = np.mean(glcm.max(axis=(0,1)))
tex_features[f'{pref}_sumaverage'] = np.mean(sum_averages)
tex_features[f'{pref}_sumentropy'] = np.mean(sum_entropies)
tex_features[f'{pref}_sumvariance'] = np.mean(sum_variances)
# Information measures of correlation TODO
#tex_features[f'{pref}_icm1'] = None
#tex_features[f'{pref}_icm2'] = None
return tex_features
# Cell
def process_image_features(fn:str, mask_plot:bool=True, radius:int=31):
"Process rasters to tabular format. Todo Textural features parasm"
image_features = image_metrics(fn, mask_plot=mask_plot, radius=radius)
texture_features = textural_features(fn)
features = {**image_features, **texture_features}
return features | enveco/data/image.py |
__all__ = ['open_geotiff', 'calc_normalized_spectral_index', 'calc_avi', 'calc_savi', 'calc_gci',
'mask_plot_from_image', 'image_metrics', 'glcm_xplusy', 'glcm_xminusy', 'textural_features',
'process_image_features']
# Cell
import rasterio as rio
import numpy as np
import matplotlib.pyplot as plt
from typing import List
import pandas as pd
import skimage
from skimage.feature import greycomatrix, greycoprops
from itertools import product
# Cell
def open_geotiff(fn, bands:List[int]=None) -> np.ndarray:
"""Open geotiff image from path, cast it to float and scale it to 0-1 range, optionally with only `bands` input bands."
Returns numpy array of shape (C,W,H)
"""
with rio.open(str(fn)) as f:
data = f.read()
data = data.astype(np.float32)
data /= 255.
if bands is not None: data = data[bands]
return data
# Cell
def calc_normalized_spectral_index(im:np.ndarray, band_1:int, band_2:int) -> np.ndarray:
"Calculate normalized spectral index (band_1 - band_2)/(band_1 + band_2). Can be used with NDVI and such simple indices"
return (im[band_1] - im[band_2]) / (im[band_1] + im[band_2])
def calc_avi(im:np.ndarray, nir:int, red:int) -> np.ndarray:
"Calculate AVI (nir *(1-red) * (nir-red))"
return im[nir] * (1 - im[red]) * (nir - red)
def calc_savi(im:np.ndarray, nir:int, red:int, l:float=0.5) -> np.ndarray:
"Calculate Soil Adjusted Vegetation Index ((nir-red)/(nir+red+l)) * (1+l). Default uses Landsat coefficient L"
return ((im[nir] - im[red]) / (im[nir] + im[red] + l)) * (1 + l)
def calc_gci(im:np.ndarray, nir:int, green:int) -> np.ndarray:
"Calculate Green Clorophyll Index nir/green - 1"
return im[nir] / im[green] - 1
# Cell
def mask_plot_from_image(data:np.ndarray, radius:float=31) -> np.ndarray:
"Select only data from within field plot of radius (radius-1) pixels"
center = (int(data.shape[1]/2), int(data.shape[2]/2))
Y, X = np.ogrid[:data.shape[1], :data.shape[2]]
dist_from_center = np.sqrt((X-center[0])**2 + (Y-center[1])**2)
mask = dist_from_center <= radius
data[:,~mask] = np.nan
return data
# Cell
def image_metrics(fn, mask_plot:bool=True, radius:int=31) -> dict:
"Calculate metrics from NIR-red-green -images"
image = open_geotiff(fn)
if mask_plot == True: image = mask_plot_from_image(image, radius=radius)
# Max, mean, std and coefficient of variation
features = {}
features['nir_pix_max'] = np.nanmax(image[0])
features['nir_pix_min'] = np.nanmin(image[0])
features['nir_pix_mean'] = np.nanmean(image[0])
features['nir_pix_std'] = np.nanstd(image[0])
features['nir_pix_var'] = np.nanvar(image[0])
features['red_pix_max'] = np.nanmax(image[1])
features['red_pix_min'] = np.nanmin(image[1])
features['red_pix_mean'] = np.nanmean(image[1])
features['red_pix_std'] = np.nanstd(image[1])
features['red_pix_var'] = np.nanvar(image[1])
features['green_pix_max'] = np.nanmax(image[2])
features['green_pix_min'] = np.nanmin(image[2])
features['green_pix_mean'] = np.nanmean(image[2])
features['green_pix_std'] = np.nanstd(image[2])
features['green_pix_var'] = np.nanvar(image[2])
# spectral indices
# NDVI
ndvi = calc_normalized_spectral_index(image, 0, 1)
features['ndvi_pix_max'] = np.nanmax(ndvi)
features['ndvi_pix_min'] = np.nanmin(ndvi)
features['ndvi_pix_mean'] = np.nanmean(ndvi)
features['ndvi_pix_std'] = np.nanstd(ndvi)
features['ndvi_pix_var'] = np.nanvar(ndvi)
return features
# Cell
def glcm_xplusy(glcm, k, distance, angle):
"sum each element where the indices of the glcm sum to k"
s = 0
for c in range(0, glcm.shape[0]):
targ = k - c
if targ >= 0 and targ < glcm.shape[0]: s += glcm[targ, c, distance, angle]
if targ > k: return s
return s
def glcm_xminusy(glcm, k, distance, angle):
"sum each element where the difference of the indices is k"
s = 0
for c in range(0, glcm.shape[0]):
targ = k + c
if targ < glcm.shape[0]: s += glcm[targ, c, distance, angle]
if k == 0: return s
return s*2
def textural_features(fn,
band_names:List=['nir', 'red', 'green', 'ndvi'],
distances:List[int]=[8],
angles:List[float]=[0, np.pi/4, np.pi/2, 3*np.pi/4],
n_grey:int=20) -> dict:
"""Get textural features from images. Works close to R package radiomics `GLCMFeatures` functions.
However skimage makes glcm a bit differently"""
tex_features = {}
im = open_geotiff(fn)
# add NDVI
im = np.vstack((im, calc_normalized_spectral_index(im, 1, 0)[None]))
for b in range(im.shape[0]):
pref = band_names[b]
# bin image to at maximum n_grey levels
bins = np.linspace(im[b].min(),im[b].max(), min(n_grey, len(np.unique(im[b]))) + 1)
binned_im = np.digitize(im[b], bins) - 1
n_levels = binned_im.max() + 1
# get glcm. Note that skimage makes glcm differently than radiomics
glcm = greycomatrix(binned_im,
distances=distances,
angles=angles,
levels=n_levels,
normed=True, symmetric=True)
# greycoprops gives some features easily
# Others not so much
means = np.zeros((len(distances), len(angles)))
variances = np.zeros((len(distances), len(angles)))
autocorrelations = np.zeros((len(distances), len(angles)))
cluster_prominences = np.zeros((len(distances), len(angles)))
cluster_shades = np.zeros((len(distances), len(angles)))
cluster_tendencies = np.zeros((len(distances), len(angles)))
diff_entropies = np.zeros((len(distances), len(angles)))
energies = np.zeros((len(distances), len(angles)))
entropies = np.zeros((len(distances), len(angles)))
hgn1s = np.zeros((len(distances), len(angles)))
idmns = np.zeros((len(distances), len(angles)))
idns = np.zeros((len(distances), len(angles)))
inverse_variances = np.zeros((len(distances), len(angles)))
sum_averages = np.zeros((len(distances), len(angles)))
sum_entropies = np.zeros((len(distances), len(angles)))
sum_variances = np.zeros((len(distances), len(angles)))
# Todo chech that multiple distances work
for d,a in product(range(len(distances)), range(len(angles))):
# means
means[d,a] = np.sum(np.sum(glcm[:,:,d,a], axis=1) * np.arange(1,n_levels+1))
scale_matrix = np.empty((n_levels, n_levels))
for i, j in product(range(n_levels), range(n_levels)):
# variance
variances[d,a] += (((i) - means[d,a])**2) * glcm[i,j,d,a]
# cluster metrix
cluster_prominences[d,a] += ((i + j - 2*means[d,a])**4) * glcm[i,j,d,a]
cluster_shades[d,a] += ((i+j - 2*means[d,a])**3)*glcm[i,j,0,a]
cluster_tendencies[d,a] += ((i + j - 2 * means[d,a])**2) * glcm[i,j,d,a]
# scale matrix for autocorrelations
scale_matrix[i,j] = (i+1) * (j+1)
# homogeneity 1
hgn1s[d,a] += glcm[i,j,d,a] / (1 + (np.abs(i-j)))
# IDM normalized
idmns[d,a] += glcm[i,j,d,a] / (1 + ((np.abs(i-j)**2)/(n_levels)**2))
# ID normalized
idns[d,a] += glcm[i,j,d,a] / (1 + (np.abs(i-j)/n_levels))
# Inverse variance
if i != j: inverse_variances[d,a] += glcm[i,j,d,a] / np.abs(i-j)**2
# autocorrelations
autocorrelations[d,a] = np.sum(glcm[:,:,0,a]*scale_matrix)
# diff_entropy
for i in range(n_levels-1):
pxy = glcm_xminusy(glcm, k=i, distance=d, angle=a)
if pxy > 0: diff_entropies[d,a] += pxy * np.log2(pxy)
diff_entropies[d,a] *= -1
# energy
energies[d,a] = np.sum(np.square(glcm[...,d,a]))
# entropy
entropies[d,a] = skimage.measure.shannon_entropy(glcm[...,d,a])
for i in range(2*(n_levels)-1):
# sum averages
pxy = glcm_xplusy(glcm, k=i, distance=d, angle=a)
sum_averages[d,a] += (i+2) * pxy
# sum entropies
if pxy > 0: sum_entropies[d,a] += pxy * np.log2(pxy)
sum_entropies[d,a] *= -1
for i in range(2*(n_levels) - 1):
# sum variances
pxy = glcm_xplusy(glcm, k=i-1, distance=d, angle=a)
sum_variances[d,a] += ((i+2 - sum_entropies[d,a])**2) * pxy
# Average all the angles
tex_features[f'{pref}_mean'] = np.mean(means)
tex_features[f'{pref}_var'] = np.mean(variances)
tex_features[f'{pref}_ac'] = np.mean(autocorrelations)
tex_features[f'{pref}_cProminence'] = np.mean(cluster_prominences)
tex_features[f'{pref}_cShade'] = np.mean(cluster_shades)
tex_features[f'{pref}_cTendency'] = np.mean(cluster_tendencies)
tex_features[f'{pref}_contrast'] = np.mean(greycoprops(glcm, 'contrast'))
tex_features[f'{pref}_corr'] = np.mean(greycoprops(glcm, 'correlation'))
tex_features[f'{pref}_diffentropy'] = np.mean(diff_entropies)
tex_features[f'{pref}_dissimilarity'] = np.mean(greycoprops(glcm, 'dissimilarity'))
tex_features[f'{pref}_energy'] = np.mean(energies)
tex_features[f'{pref}_ent'] = np.mean(entropies)
tex_features[f'{pref}_homogeneity1'] = np.mean(hgn1s)
tex_features[f'{pref}_homogeneity2'] = np.mean(greycoprops(glcm, 'homogeneity'))
tex_features[f'{pref}_idmn'] = np.mean(idmns)
tex_features[f'{pref}_idn'] = np.mean(idns)
tex_features[f'{pref}_iv'] = np.mean(inverse_variances)
tex_features[f'{pref}_maxProb'] = np.mean(glcm.max(axis=(0,1)))
tex_features[f'{pref}_sumaverage'] = np.mean(sum_averages)
tex_features[f'{pref}_sumentropy'] = np.mean(sum_entropies)
tex_features[f'{pref}_sumvariance'] = np.mean(sum_variances)
# Information measures of correlation TODO
#tex_features[f'{pref}_icm1'] = None
#tex_features[f'{pref}_icm2'] = None
return tex_features
# Cell
def process_image_features(fn:str, mask_plot:bool=True, radius:int=31):
"Process rasters to tabular format. Todo Textural features parasm"
image_features = image_metrics(fn, mask_plot=mask_plot, radius=radius)
texture_features = textural_features(fn)
features = {**image_features, **texture_features}
return features | 0.765856 | 0.612657 |
from . import db, login_manager
from flask_login import UserMixin
from werkzeug.security import generate_password_hash,check_password_hash
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(UserMixin,db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer,primary_key = True)
username = db.Column(db.String(255))
email = db.Column(db.String(255))
pass_secure = db.Column(db.String(255))
@property
def password(self):
raise AttributeError('You cannot read the password attribute')
@password.setter
def password(self, password):
self.pass_secure = generate_password_hash(password)
def verify_password(self,password):
return check_password_hash(self.pass_secure,password)
def __repr__(self):
return f'User {self.username}'
class Subscribers(db.Model):
__tablename__='subscribers'
id = db.Column(db.Integer,primary_key = True)
email = db.Column(db.String(255))
def save_subscriber(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_subscribers(cls):
subscribers=Subscribers.query.all()
return subscribers
def __repr__(self):
return f'Subscribers {self.email}'
class BlogPost(db.Model):
__tablename__='blogs'
id = db.Column(db.Integer,primary_key = True)
title = db.Column(db.String())
blog_post = db.Column(db.String)
blog_pic = db.Column(db.String)
photo_url = db.Column(db.String)
comment = db.relationship('Comment',backref='blog',lazy='dynamic')
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
def save_blog(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_blog(cls,id):
blog = BlogPost.query.filter_by(id = id).first()
return blog
@classmethod
def get_all_blogs(cls):
blogs = BlogPost.query.order_by('-id').all()
return blogs
class Comment(db.Model):
__tablename__='comments'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String())
comment_content = db.Column(db.String())
blog_id = db.Column(db.Integer, db.ForeignKey('blogs.id'))
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
def save_comment(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_single_comment(cls,id_blog,id):
comment = Comment.query.filter_by(blog_id=id_blog,id=id).first()
return comment
@classmethod
def get_all_comments(cls,id):
comments = Comment.query.filter_by(blog_id=id).order_by('-id').all()
return comments | app/models.py | from . import db, login_manager
from flask_login import UserMixin
from werkzeug.security import generate_password_hash,check_password_hash
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(UserMixin,db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer,primary_key = True)
username = db.Column(db.String(255))
email = db.Column(db.String(255))
pass_secure = db.Column(db.String(255))
@property
def password(self):
raise AttributeError('You cannot read the password attribute')
@password.setter
def password(self, password):
self.pass_secure = generate_password_hash(password)
def verify_password(self,password):
return check_password_hash(self.pass_secure,password)
def __repr__(self):
return f'User {self.username}'
class Subscribers(db.Model):
__tablename__='subscribers'
id = db.Column(db.Integer,primary_key = True)
email = db.Column(db.String(255))
def save_subscriber(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_subscribers(cls):
subscribers=Subscribers.query.all()
return subscribers
def __repr__(self):
return f'Subscribers {self.email}'
class BlogPost(db.Model):
__tablename__='blogs'
id = db.Column(db.Integer,primary_key = True)
title = db.Column(db.String())
blog_post = db.Column(db.String)
blog_pic = db.Column(db.String)
photo_url = db.Column(db.String)
comment = db.relationship('Comment',backref='blog',lazy='dynamic')
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
def save_blog(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_blog(cls,id):
blog = BlogPost.query.filter_by(id = id).first()
return blog
@classmethod
def get_all_blogs(cls):
blogs = BlogPost.query.order_by('-id').all()
return blogs
class Comment(db.Model):
__tablename__='comments'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String())
comment_content = db.Column(db.String())
blog_id = db.Column(db.Integer, db.ForeignKey('blogs.id'))
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
def save_comment(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_single_comment(cls,id_blog,id):
comment = Comment.query.filter_by(blog_id=id_blog,id=id).first()
return comment
@classmethod
def get_all_comments(cls,id):
comments = Comment.query.filter_by(blog_id=id).order_by('-id').all()
return comments | 0.408159 | 0.057361 |
import io
import unittest
from advisor.makefile_scanner import MakefileScanner
from advisor.report import Report
class TestMakefileScanner(unittest.TestCase):
def test_accepts_file(self):
makefile_scanner = MakefileScanner()
self.assertFalse(makefile_scanner.accepts_file('test'))
self.assertTrue(makefile_scanner.accepts_file('Makefile'))
self.assertTrue(makefile_scanner.accepts_file('makefile'))
self.assertTrue(makefile_scanner.accepts_file('MAKEFILE'))
self.assertTrue(makefile_scanner.accepts_file('Makefile.in'))
self.assertTrue(makefile_scanner.accepts_file('Makefile.am'))
self.assertTrue(makefile_scanner.accepts_file('NMakefile'))
self.assertTrue(makefile_scanner.accepts_file('nmakefile'))
self.assertTrue(makefile_scanner.accepts_file('NMAKEFILE'))
self.assertTrue(makefile_scanner.accepts_file('makefile.mk'))
self.assertTrue(makefile_scanner.accepts_file('Makefile.mk'))
self.assertTrue(makefile_scanner.accepts_file('MAKEFILE.MK'))
def test_scan_file_object(self):
makefile_scanner = MakefileScanner()
report = Report('/root')
io_object = io.StringIO('xxx')
makefile_scanner.scan_file_object(
'Makefile', io_object, report)
self.assertEqual(len(report.issues), 0)
def test_arch_specific_libs_re(self):
match = MakefileScanner.ARCH_SPECIFIC_LIBS_RE_PROG.search('LIBS=-lfoo')
self.assertIsNone(match)
match = MakefileScanner.ARCH_SPECIFIC_LIBS_RE_PROG.search('LIBS=-lotherarch')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), "otherarch")
def test_arch_specific_libs(self):
makefile_scanner = MakefileScanner()
report = Report('/root')
io_object = io.StringIO('-lotherarch')
makefile_scanner.scan_file_object(
'Makefile', io_object, report)
self.assertEqual(len(report.issues), 1)
def test_old_crt_re(self):
match = MakefileScanner.OLD_CRT_RE_PROG.search('LIBS=libfoo.lib')
self.assertIsNone(match)
match = MakefileScanner.OLD_CRT_RE_PROG.search('LIBS=libcmt.lib')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), "libcmt.lib")
match = MakefileScanner.OLD_CRT_RE_PROG.search('LIBS=libcmtd.lib')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), "libcmtd.lib")
def test_ucrt_re(self):
match = MakefileScanner.UCRT_RE_PROG.search('LIBS=libfoo.lib')
self.assertIsNone(match)
match = MakefileScanner.UCRT_RE_PROG.search('LIBS=libucrt.lib')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), "libucrt.lib")
match = MakefileScanner.UCRT_RE_PROG.search('LIBS=libucrtd.lib')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), "libucrtd.lib")
def test_old_crt(self):
makefile_scanner = MakefileScanner()
report = Report('/root')
io_object = io.StringIO('LIBS=libcmt.lib')
makefile_scanner.scan_file_object(
'Makefile', io_object, report)
self.assertEqual(len(report.issues), 1)
report = Report('/root')
io_object = io.StringIO('!IF $(OLD_CRT)\nLIBS=libcmt.lib\n!ELSE\nLIBS=libucrt.lib\n!ENDIF')
makefile_scanner.scan_file_object(
'Makefile', io_object, report)
self.assertEqual(len(report.issues), 0)
def test_other_arch_cpu_line_re(self):
match = MakefileScanner.OTHER_ARCH_CPU_LINE_RE_PROG.search('!IF "$(CPU)" == "aarch64"')
self.assertIsNone(match)
match = MakefileScanner.OTHER_ARCH_CPU_LINE_RE_PROG.search('!IF "$(CPU)" == "otherarch"')
self.assertIsNotNone(match)
def test_aarch64_cpu_line_re(self):
match = MakefileScanner.AARCH64_CPU_LINE_RE_PROG.search('!IF "$(CPU)" == "otherarch"')
self.assertIsNone(match)
match = MakefileScanner.AARCH64_CPU_LINE_RE_PROG.search('!IF "$(VSCMD_ARG_TGT_ARCH)" == "aarch64"')
self.assertIsNotNone(match)
def test_other_arch_cpu_line(self):
makefile_scanner = MakefileScanner()
report = Report('/root')
io_object = io.StringIO('!IF "$(CPU)" == "otherarch"')
makefile_scanner.scan_file_object(
'Makefile', io_object, report)
self.assertEqual(len(report.issues), 1)
report = Report('/root')
io_object = io.StringIO('!IF "$(VSCMD_ARG_TGT_ARCH)" == "arm"\nTARGET_ARCH=aarch64\n!ELIF "$(CPU)" == "otherarch"\nTARGET_ARCH=otherarch\n!ENDIF')
makefile_scanner.scan_file_object(
'Makefile', io_object, report)
self.assertEqual(len(report.issues), 0)
def test_target_re(self):
match = MakefileScanner.TARGET_RE_PROG.search('\tsomecommand')
self.assertIsNone(match)
match = MakefileScanner.TARGET_RE_PROG.search('#a comment')
self.assertIsNone(match)
match = MakefileScanner.TARGET_RE_PROG.search('target:')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), 'target')
match = MakefileScanner.TARGET_RE_PROG.search('$(TARGET):')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), '$(TARGET)')
def test_command_re(self):
match = MakefileScanner.COMMAND_RE_PROG.search('#a comment')
self.assertIsNone(match)
match = MakefileScanner.COMMAND_RE_PROG.search('target:')
self.assertIsNone(match)
match = MakefileScanner.COMMAND_RE_PROG.search('$(TARGET):')
self.assertIsNone(match)
match = MakefileScanner.COMMAND_RE_PROG.search('\tsomecommand')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), 'somecommand')
match = MakefileScanner.COMMAND_RE_PROG.search('\t"somecommand"')
self.assertIsNotNone(match)
self.assertEqual(match.group(2), 'somecommand')
match = MakefileScanner.COMMAND_RE_PROG.search('\tsomecommand arg')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), 'somecommand')
match = MakefileScanner.COMMAND_RE_PROG.search('\t"somecommand" arg')
self.assertIsNotNone(match)
self.assertEqual(match.group(2), 'somecommand')
match = MakefileScanner.COMMAND_RE_PROG.search('\t"word1 word2" arg')
self.assertIsNotNone(match)
self.assertEqual(match.group(2), 'word1 word2')
match = MakefileScanner.COMMAND_RE_PROG.search('\t$(TARGET)')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), '$(TARGET)')
match = MakefileScanner.COMMAND_RE_PROG.search('\t$(TARGET) arg')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), '$(TARGET)')
match = MakefileScanner.COMMAND_RE_PROG.search('\t"$(TARGET)" arg')
self.assertIsNotNone(match)
self.assertEqual(match.group(2), '$(TARGET)')
match = MakefileScanner.COMMAND_RE_PROG.search('\t./target.exe arg')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), './target.exe')
def test_target_command(self):
makefile_scanner = MakefileScanner()
report = Report('/root')
io_object = io.StringIO('target.exe: target.c\n\tcl target.c /Fe:target.exe\n\nfoobar.h: target.exe\n\t./target.exe >foobar.h')
makefile_scanner.scan_file_object(
'Makefile', io_object, report)
self.assertEqual(len(report.issues), 1)
report = Report('/root')
io_object = io.StringIO('$(TARGET): target.c\n\tcl target.c /Fe:target.exe\n\nfoobar.h: $(TARGET)\n\t$(TARGET) >foobar.h')
makefile_scanner.scan_file_object(
'Makefile', io_object, report)
self.assertEqual(len(report.issues), 1)
def test_assignment_re(self):
match = MakefileScanner.ASSIGNMENT_RE_PROG.search('# foo')
self.assertIsNone(match)
match = MakefileScanner.ASSIGNMENT_RE_PROG.search('A=B')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), 'A')
self.assertEqual(match.group(2), 'B')
def test_target_command_with_assignment(self):
makefile_scanner = MakefileScanner()
report = Report('/root')
io_object = io.StringIO('TARGET=target.exe\n\n$(TARGET): target.c\n\tcl target.c /Fe:target.exe\n\nfoobar.h: $(TARGET)\n\t$(TARGET) >foobar.h')
makefile_scanner.scan_file_object(
'Makefile', io_object, report)
self.assertEqual(len(report.issues), 1)
issue = report.issues[0]
self.assertEqual(issue.target, 'target.exe')
def test_d_other_arch_re(self):
match = MakefileScanner.D_OTHER_ARCH_RE_PROG.search('/Dfoo')
self.assertIsNone(match)
match = MakefileScanner.D_OTHER_ARCH_RE_PROG.search('/Dotherarch')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), 'otherarch')
match = MakefileScanner.D_OTHER_ARCH_RE_PROG.search('/D_otherarch_')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), '_otherarch_')
match = MakefileScanner.D_OTHER_ARCH_RE_PROG.search('/D__otherarch__')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), '__otherarch__')
match = MakefileScanner.D_OTHER_ARCH_RE_PROG.search('/Daarch64')
self.assertIsNone(match)
match = MakefileScanner.D_OTHER_ARCH_RE_PROG.search('/D_aarch64_')
self.assertIsNone(match)
match = MakefileScanner.D_OTHER_ARCH_RE_PROG.search('/D__aarch64__')
self.assertIsNone(match)
match = MakefileScanner.D_OTHER_ARCH_RE_PROG.search('-Dfoo')
self.assertIsNone(match)
match = MakefileScanner.D_OTHER_ARCH_RE_PROG.search('-Dotherarch')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), 'otherarch')
match = MakefileScanner.D_OTHER_ARCH_RE_PROG.search('-D_otherarch_')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), '_otherarch_')
match = MakefileScanner.D_OTHER_ARCH_RE_PROG.search('-D__otherarch__')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), '__otherarch__')
match = MakefileScanner.D_OTHER_ARCH_RE_PROG.search('-Daarch64')
self.assertIsNone(match)
match = MakefileScanner.D_OTHER_ARCH_RE_PROG.search('-D_aarch64_')
self.assertIsNone(match)
match = MakefileScanner.D_OTHER_ARCH_RE_PROG.search('-D__aarch64__')
self.assertIsNone(match)
def test_d_aarch64_re(self):
match = MakefileScanner.D_OTHER_ARCH_RE_PROG.search('/Dfoo')
self.assertIsNone(match)
match = MakefileScanner.D_AARCH64_RE_PROG.search('/Daarch64')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), 'aarch64')
match = MakefileScanner.D_AARCH64_RE_PROG.search('/D_aarch64_')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), '_aarch64_')
match = MakefileScanner.D_AARCH64_RE_PROG.search('/D__aarch64__')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), '__aarch64__')
match = MakefileScanner.D_AARCH64_RE_PROG.search('/Dotherarch')
self.assertIsNone(match)
match = MakefileScanner.D_AARCH64_RE_PROG.search('/D_otherarch_')
self.assertIsNone(match)
match = MakefileScanner.D_AARCH64_RE_PROG.search('/D__otherarch__')
self.assertIsNone(match)
match = MakefileScanner.D_AARCH64_RE_PROG.search('-Dfoo')
self.assertIsNone(match)
match = MakefileScanner.D_AARCH64_RE_PROG.search('-Daarch64')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), 'aarch64')
match = MakefileScanner.D_AARCH64_RE_PROG.search('-D_aarch64_')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), '_aarch64_')
match = MakefileScanner.D_AARCH64_RE_PROG.search('-D__aarch64__')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), '__aarch64__')
match = MakefileScanner.D_AARCH64_RE_PROG.search('-Dotherarch')
self.assertIsNone(match)
match = MakefileScanner.D_AARCH64_RE_PROG.search('-D_otherarch_')
self.assertIsNone(match)
match = MakefileScanner.D_AARCH64_RE_PROG.search('-D__otherarch__')
self.assertIsNone(match)
def test_define_other_arch(self):
makefile_scanner = MakefileScanner()
report = Report('/root')
io_object = io.StringIO('CFLAGS=/D__otherarch__')
makefile_scanner.scan_file_object(
'Makefile', io_object, report)
self.assertEqual(len(report.issues), 1)
report = Report('/root')
io_object = io.StringIO('!IF "$(VSCMD_ARG_TGT_ARCH)" == "arm"\nCFLAGS=/D__arm__\n!ELIF "$(CPU)" == "otherarch"\nCFLAGS=/D__otherarch__\n!ENDIF')
makefile_scanner.scan_file_object(
'Makefile', io_object, report)
self.assertEqual(len(report.issues), 0)
def test_continuation(self):
makefile_scanner = MakefileScanner()
report = Report('/root')
# Should be treated as a single line and only one issue reported.
io_object = io.StringIO('LIBS=-lotherarch\\\n-lotherarch')
makefile_scanner.scan_file_object(
'Makefile', io_object, report)
self.assertEqual(len(report.issues), 1) | unittest/test_makefile_scanner.py | import io
import unittest
from advisor.makefile_scanner import MakefileScanner
from advisor.report import Report
class TestMakefileScanner(unittest.TestCase):
def test_accepts_file(self):
makefile_scanner = MakefileScanner()
self.assertFalse(makefile_scanner.accepts_file('test'))
self.assertTrue(makefile_scanner.accepts_file('Makefile'))
self.assertTrue(makefile_scanner.accepts_file('makefile'))
self.assertTrue(makefile_scanner.accepts_file('MAKEFILE'))
self.assertTrue(makefile_scanner.accepts_file('Makefile.in'))
self.assertTrue(makefile_scanner.accepts_file('Makefile.am'))
self.assertTrue(makefile_scanner.accepts_file('NMakefile'))
self.assertTrue(makefile_scanner.accepts_file('nmakefile'))
self.assertTrue(makefile_scanner.accepts_file('NMAKEFILE'))
self.assertTrue(makefile_scanner.accepts_file('makefile.mk'))
self.assertTrue(makefile_scanner.accepts_file('Makefile.mk'))
self.assertTrue(makefile_scanner.accepts_file('MAKEFILE.MK'))
def test_scan_file_object(self):
makefile_scanner = MakefileScanner()
report = Report('/root')
io_object = io.StringIO('xxx')
makefile_scanner.scan_file_object(
'Makefile', io_object, report)
self.assertEqual(len(report.issues), 0)
def test_arch_specific_libs_re(self):
match = MakefileScanner.ARCH_SPECIFIC_LIBS_RE_PROG.search('LIBS=-lfoo')
self.assertIsNone(match)
match = MakefileScanner.ARCH_SPECIFIC_LIBS_RE_PROG.search('LIBS=-lotherarch')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), "otherarch")
def test_arch_specific_libs(self):
makefile_scanner = MakefileScanner()
report = Report('/root')
io_object = io.StringIO('-lotherarch')
makefile_scanner.scan_file_object(
'Makefile', io_object, report)
self.assertEqual(len(report.issues), 1)
def test_old_crt_re(self):
match = MakefileScanner.OLD_CRT_RE_PROG.search('LIBS=libfoo.lib')
self.assertIsNone(match)
match = MakefileScanner.OLD_CRT_RE_PROG.search('LIBS=libcmt.lib')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), "libcmt.lib")
match = MakefileScanner.OLD_CRT_RE_PROG.search('LIBS=libcmtd.lib')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), "libcmtd.lib")
def test_ucrt_re(self):
match = MakefileScanner.UCRT_RE_PROG.search('LIBS=libfoo.lib')
self.assertIsNone(match)
match = MakefileScanner.UCRT_RE_PROG.search('LIBS=libucrt.lib')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), "libucrt.lib")
match = MakefileScanner.UCRT_RE_PROG.search('LIBS=libucrtd.lib')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), "libucrtd.lib")
def test_old_crt(self):
makefile_scanner = MakefileScanner()
report = Report('/root')
io_object = io.StringIO('LIBS=libcmt.lib')
makefile_scanner.scan_file_object(
'Makefile', io_object, report)
self.assertEqual(len(report.issues), 1)
report = Report('/root')
io_object = io.StringIO('!IF $(OLD_CRT)\nLIBS=libcmt.lib\n!ELSE\nLIBS=libucrt.lib\n!ENDIF')
makefile_scanner.scan_file_object(
'Makefile', io_object, report)
self.assertEqual(len(report.issues), 0)
def test_other_arch_cpu_line_re(self):
match = MakefileScanner.OTHER_ARCH_CPU_LINE_RE_PROG.search('!IF "$(CPU)" == "aarch64"')
self.assertIsNone(match)
match = MakefileScanner.OTHER_ARCH_CPU_LINE_RE_PROG.search('!IF "$(CPU)" == "otherarch"')
self.assertIsNotNone(match)
def test_aarch64_cpu_line_re(self):
match = MakefileScanner.AARCH64_CPU_LINE_RE_PROG.search('!IF "$(CPU)" == "otherarch"')
self.assertIsNone(match)
match = MakefileScanner.AARCH64_CPU_LINE_RE_PROG.search('!IF "$(VSCMD_ARG_TGT_ARCH)" == "aarch64"')
self.assertIsNotNone(match)
def test_other_arch_cpu_line(self):
makefile_scanner = MakefileScanner()
report = Report('/root')
io_object = io.StringIO('!IF "$(CPU)" == "otherarch"')
makefile_scanner.scan_file_object(
'Makefile', io_object, report)
self.assertEqual(len(report.issues), 1)
report = Report('/root')
io_object = io.StringIO('!IF "$(VSCMD_ARG_TGT_ARCH)" == "arm"\nTARGET_ARCH=aarch64\n!ELIF "$(CPU)" == "otherarch"\nTARGET_ARCH=otherarch\n!ENDIF')
makefile_scanner.scan_file_object(
'Makefile', io_object, report)
self.assertEqual(len(report.issues), 0)
def test_target_re(self):
match = MakefileScanner.TARGET_RE_PROG.search('\tsomecommand')
self.assertIsNone(match)
match = MakefileScanner.TARGET_RE_PROG.search('#a comment')
self.assertIsNone(match)
match = MakefileScanner.TARGET_RE_PROG.search('target:')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), 'target')
match = MakefileScanner.TARGET_RE_PROG.search('$(TARGET):')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), '$(TARGET)')
def test_command_re(self):
match = MakefileScanner.COMMAND_RE_PROG.search('#a comment')
self.assertIsNone(match)
match = MakefileScanner.COMMAND_RE_PROG.search('target:')
self.assertIsNone(match)
match = MakefileScanner.COMMAND_RE_PROG.search('$(TARGET):')
self.assertIsNone(match)
match = MakefileScanner.COMMAND_RE_PROG.search('\tsomecommand')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), 'somecommand')
match = MakefileScanner.COMMAND_RE_PROG.search('\t"somecommand"')
self.assertIsNotNone(match)
self.assertEqual(match.group(2), 'somecommand')
match = MakefileScanner.COMMAND_RE_PROG.search('\tsomecommand arg')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), 'somecommand')
match = MakefileScanner.COMMAND_RE_PROG.search('\t"somecommand" arg')
self.assertIsNotNone(match)
self.assertEqual(match.group(2), 'somecommand')
match = MakefileScanner.COMMAND_RE_PROG.search('\t"word1 word2" arg')
self.assertIsNotNone(match)
self.assertEqual(match.group(2), 'word1 word2')
match = MakefileScanner.COMMAND_RE_PROG.search('\t$(TARGET)')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), '$(TARGET)')
match = MakefileScanner.COMMAND_RE_PROG.search('\t$(TARGET) arg')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), '$(TARGET)')
match = MakefileScanner.COMMAND_RE_PROG.search('\t"$(TARGET)" arg')
self.assertIsNotNone(match)
self.assertEqual(match.group(2), '$(TARGET)')
match = MakefileScanner.COMMAND_RE_PROG.search('\t./target.exe arg')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), './target.exe')
def test_target_command(self):
makefile_scanner = MakefileScanner()
report = Report('/root')
io_object = io.StringIO('target.exe: target.c\n\tcl target.c /Fe:target.exe\n\nfoobar.h: target.exe\n\t./target.exe >foobar.h')
makefile_scanner.scan_file_object(
'Makefile', io_object, report)
self.assertEqual(len(report.issues), 1)
report = Report('/root')
io_object = io.StringIO('$(TARGET): target.c\n\tcl target.c /Fe:target.exe\n\nfoobar.h: $(TARGET)\n\t$(TARGET) >foobar.h')
makefile_scanner.scan_file_object(
'Makefile', io_object, report)
self.assertEqual(len(report.issues), 1)
def test_assignment_re(self):
match = MakefileScanner.ASSIGNMENT_RE_PROG.search('# foo')
self.assertIsNone(match)
match = MakefileScanner.ASSIGNMENT_RE_PROG.search('A=B')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), 'A')
self.assertEqual(match.group(2), 'B')
def test_target_command_with_assignment(self):
makefile_scanner = MakefileScanner()
report = Report('/root')
io_object = io.StringIO('TARGET=target.exe\n\n$(TARGET): target.c\n\tcl target.c /Fe:target.exe\n\nfoobar.h: $(TARGET)\n\t$(TARGET) >foobar.h')
makefile_scanner.scan_file_object(
'Makefile', io_object, report)
self.assertEqual(len(report.issues), 1)
issue = report.issues[0]
self.assertEqual(issue.target, 'target.exe')
def test_d_other_arch_re(self):
match = MakefileScanner.D_OTHER_ARCH_RE_PROG.search('/Dfoo')
self.assertIsNone(match)
match = MakefileScanner.D_OTHER_ARCH_RE_PROG.search('/Dotherarch')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), 'otherarch')
match = MakefileScanner.D_OTHER_ARCH_RE_PROG.search('/D_otherarch_')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), '_otherarch_')
match = MakefileScanner.D_OTHER_ARCH_RE_PROG.search('/D__otherarch__')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), '__otherarch__')
match = MakefileScanner.D_OTHER_ARCH_RE_PROG.search('/Daarch64')
self.assertIsNone(match)
match = MakefileScanner.D_OTHER_ARCH_RE_PROG.search('/D_aarch64_')
self.assertIsNone(match)
match = MakefileScanner.D_OTHER_ARCH_RE_PROG.search('/D__aarch64__')
self.assertIsNone(match)
match = MakefileScanner.D_OTHER_ARCH_RE_PROG.search('-Dfoo')
self.assertIsNone(match)
match = MakefileScanner.D_OTHER_ARCH_RE_PROG.search('-Dotherarch')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), 'otherarch')
match = MakefileScanner.D_OTHER_ARCH_RE_PROG.search('-D_otherarch_')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), '_otherarch_')
match = MakefileScanner.D_OTHER_ARCH_RE_PROG.search('-D__otherarch__')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), '__otherarch__')
match = MakefileScanner.D_OTHER_ARCH_RE_PROG.search('-Daarch64')
self.assertIsNone(match)
match = MakefileScanner.D_OTHER_ARCH_RE_PROG.search('-D_aarch64_')
self.assertIsNone(match)
match = MakefileScanner.D_OTHER_ARCH_RE_PROG.search('-D__aarch64__')
self.assertIsNone(match)
def test_d_aarch64_re(self):
match = MakefileScanner.D_OTHER_ARCH_RE_PROG.search('/Dfoo')
self.assertIsNone(match)
match = MakefileScanner.D_AARCH64_RE_PROG.search('/Daarch64')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), 'aarch64')
match = MakefileScanner.D_AARCH64_RE_PROG.search('/D_aarch64_')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), '_aarch64_')
match = MakefileScanner.D_AARCH64_RE_PROG.search('/D__aarch64__')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), '__aarch64__')
match = MakefileScanner.D_AARCH64_RE_PROG.search('/Dotherarch')
self.assertIsNone(match)
match = MakefileScanner.D_AARCH64_RE_PROG.search('/D_otherarch_')
self.assertIsNone(match)
match = MakefileScanner.D_AARCH64_RE_PROG.search('/D__otherarch__')
self.assertIsNone(match)
match = MakefileScanner.D_AARCH64_RE_PROG.search('-Dfoo')
self.assertIsNone(match)
match = MakefileScanner.D_AARCH64_RE_PROG.search('-Daarch64')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), 'aarch64')
match = MakefileScanner.D_AARCH64_RE_PROG.search('-D_aarch64_')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), '_aarch64_')
match = MakefileScanner.D_AARCH64_RE_PROG.search('-D__aarch64__')
self.assertIsNotNone(match)
self.assertEqual(match.group(1), '__aarch64__')
match = MakefileScanner.D_AARCH64_RE_PROG.search('-Dotherarch')
self.assertIsNone(match)
match = MakefileScanner.D_AARCH64_RE_PROG.search('-D_otherarch_')
self.assertIsNone(match)
match = MakefileScanner.D_AARCH64_RE_PROG.search('-D__otherarch__')
self.assertIsNone(match)
def test_define_other_arch(self):
makefile_scanner = MakefileScanner()
report = Report('/root')
io_object = io.StringIO('CFLAGS=/D__otherarch__')
makefile_scanner.scan_file_object(
'Makefile', io_object, report)
self.assertEqual(len(report.issues), 1)
report = Report('/root')
io_object = io.StringIO('!IF "$(VSCMD_ARG_TGT_ARCH)" == "arm"\nCFLAGS=/D__arm__\n!ELIF "$(CPU)" == "otherarch"\nCFLAGS=/D__otherarch__\n!ENDIF')
makefile_scanner.scan_file_object(
'Makefile', io_object, report)
self.assertEqual(len(report.issues), 0)
def test_continuation(self):
makefile_scanner = MakefileScanner()
report = Report('/root')
# Should be treated as a single line and only one issue reported.
io_object = io.StringIO('LIBS=-lotherarch\\\n-lotherarch')
makefile_scanner.scan_file_object(
'Makefile', io_object, report)
self.assertEqual(len(report.issues), 1) | 0.307254 | 0.309148 |
from django.utils import timezone
from django import forms
from django.contrib.auth.models import User
from datetimewidget.widgets import DateWidget
from .models import Patient, Hospital, Doctor, Nurse
class UserRegForm(forms.ModelForm):
"""
Form for user registration
"""
password = forms.CharField(widget=forms.PasswordInput())
class Meta:
model = User
fields = ('username', 'password', 'email', 'first_name', 'last_name')
def __init__(self, *args, **kwargs):
super(UserRegForm, self).__init__(*args, **kwargs)
self.fields['username'].widget.attrs.update({'class': 'form-control'})
self.fields['password'].widget.attrs.update({'class': 'form-control'})
self.fields['email'].widget.attrs.update({'class': 'form-control'})
self.fields['first_name'].widget.attrs.update({'class': 'form-control'})
self.fields['last_name'].widget.attrs.update({'class': 'form-control'})
def is_valid(self):
return super(UserRegForm, self).is_valid() and \
User.objects.all().filter(email=self.cleaned_data['email']).count() == 0
class UserUpdateForm(forms.ModelForm):
"""
Form for user updates
"""
class Meta:
model = User
fields = ['email', 'first_name', 'last_name']
class PatientRegForm(forms.ModelForm):
"""
Form for patient registration
Note: Seperate from user registration form
"""
now = timezone.now()
birthday = forms.DateField(widget=DateWidget(usel10n=True, bootstrap_version=3))
preferred_hospital = forms.ModelChoiceField(queryset=Hospital.objects.all(), required=False)
# hospital = forms.ModelChoiceField(queryset=Hospital.objects.all(), required=True)
emergency_contact = forms.EmailField(label="Emergency Contact Email Address:")
class Meta:
model = Patient
fields = ('birthday', 'sex', 'blood_type', 'height', 'weight', 'allergies', 'medical_history', 'insurance_info',
'emergency_contact', 'preferred_hospital') # , 'hospital')
def __init__(self, *args, **kwargs):
super(PatientRegForm, self).__init__(*args, **kwargs)
self.fields['birthday'].widget.attrs.update({'class': 'form-control'})
self.fields['sex'].widget.attrs.update({'class': 'form-control'})
self.fields['blood_type'].widget.attrs.update({'class': 'form-control'})
self.fields['height'].widget.attrs.update({'class': 'form-control'})
self.fields['weight'].widget.attrs.update({'class': 'form-control'})
self.fields['allergies'].widget.attrs.update({'class': 'form-control'})
self.fields['insurance_info'].widget.attrs.update({'class': 'form-control'})
self.fields['emergency_contact'].widget.attrs.update({'class': 'form-control'})
self.fields['preferred_hospital'].widget.attrs.update({'class': 'form-control'})
self.fields['medical_history'].widget.attrs.update({'class': 'form-control'})
# self.fields['hospital'].widget.attrs.update({'class': 'form-control'})
class NurseRegForm(forms.ModelForm):
"""
Form for Nurse registration
Note: Seperate from user registration form
"""
now = timezone.now()
phoneNum = forms.IntegerField(label="Phone Number")
hospital = forms.ModelChoiceField(queryset=Hospital.objects.all(), required=True, label="Hospital")
class Meta:
model = Nurse
fields = ('phoneNum', 'hospital')
class DoctorRegForm(forms.ModelForm):
"""
Form for Doctor registration
Note: Seperate from user registration form
"""
phoneNum = forms.IntegerField(label="Phone Number")
hospital = forms.ModelChoiceField(queryset=Hospital.objects.all(), required=True, label="Hospital")
class Meta:
model = Doctor
fields = ('phoneNum', 'hospital')
class LoginForm(forms.ModelForm):
"""
Form for logging in
"""
class Meta:
model = User
username = forms.CharField(max_length=50)
password = forms.CharField(max_length=50)
fields = ["username", "password"]
class PatientForm(forms.ModelForm):
"""
Form for accessing Patient Data
"""
class Meta:
model = Patient
fields = ['birthday', 'sex', 'height', 'weight', 'allergies', 'medical_history', 'insurance_info',
'emergency_contact', 'preferred_hospital']
class PatientMediForm(forms.ModelForm):
"""
Form for accessing Patient Medical Data
"""
class Meta:
model = Patient
fields = ('sex', 'blood_type', 'height', 'weight', 'allergies', 'medical_history')
class UploadFileForm(forms.Form):
"""
Form for Uploading Files
"""
file = forms.FileField()
class NewHospitalForm(forms.ModelForm):
"""
Form for creating a new Hospital
"""
class Meta:
model = Hospital
fields = ('name',) | HealthNet/core/forms.py | from django.utils import timezone
from django import forms
from django.contrib.auth.models import User
from datetimewidget.widgets import DateWidget
from .models import Patient, Hospital, Doctor, Nurse
class UserRegForm(forms.ModelForm):
"""
Form for user registration
"""
password = forms.CharField(widget=forms.PasswordInput())
class Meta:
model = User
fields = ('username', 'password', 'email', 'first_name', 'last_name')
def __init__(self, *args, **kwargs):
super(UserRegForm, self).__init__(*args, **kwargs)
self.fields['username'].widget.attrs.update({'class': 'form-control'})
self.fields['password'].widget.attrs.update({'class': 'form-control'})
self.fields['email'].widget.attrs.update({'class': 'form-control'})
self.fields['first_name'].widget.attrs.update({'class': 'form-control'})
self.fields['last_name'].widget.attrs.update({'class': 'form-control'})
def is_valid(self):
return super(UserRegForm, self).is_valid() and \
User.objects.all().filter(email=self.cleaned_data['email']).count() == 0
class UserUpdateForm(forms.ModelForm):
"""
Form for user updates
"""
class Meta:
model = User
fields = ['email', 'first_name', 'last_name']
class PatientRegForm(forms.ModelForm):
"""
Form for patient registration
Note: Seperate from user registration form
"""
now = timezone.now()
birthday = forms.DateField(widget=DateWidget(usel10n=True, bootstrap_version=3))
preferred_hospital = forms.ModelChoiceField(queryset=Hospital.objects.all(), required=False)
# hospital = forms.ModelChoiceField(queryset=Hospital.objects.all(), required=True)
emergency_contact = forms.EmailField(label="Emergency Contact Email Address:")
class Meta:
model = Patient
fields = ('birthday', 'sex', 'blood_type', 'height', 'weight', 'allergies', 'medical_history', 'insurance_info',
'emergency_contact', 'preferred_hospital') # , 'hospital')
def __init__(self, *args, **kwargs):
super(PatientRegForm, self).__init__(*args, **kwargs)
self.fields['birthday'].widget.attrs.update({'class': 'form-control'})
self.fields['sex'].widget.attrs.update({'class': 'form-control'})
self.fields['blood_type'].widget.attrs.update({'class': 'form-control'})
self.fields['height'].widget.attrs.update({'class': 'form-control'})
self.fields['weight'].widget.attrs.update({'class': 'form-control'})
self.fields['allergies'].widget.attrs.update({'class': 'form-control'})
self.fields['insurance_info'].widget.attrs.update({'class': 'form-control'})
self.fields['emergency_contact'].widget.attrs.update({'class': 'form-control'})
self.fields['preferred_hospital'].widget.attrs.update({'class': 'form-control'})
self.fields['medical_history'].widget.attrs.update({'class': 'form-control'})
# self.fields['hospital'].widget.attrs.update({'class': 'form-control'})
class NurseRegForm(forms.ModelForm):
"""
Form for Nurse registration
Note: Seperate from user registration form
"""
now = timezone.now()
phoneNum = forms.IntegerField(label="Phone Number")
hospital = forms.ModelChoiceField(queryset=Hospital.objects.all(), required=True, label="Hospital")
class Meta:
model = Nurse
fields = ('phoneNum', 'hospital')
class DoctorRegForm(forms.ModelForm):
"""
Form for Doctor registration
Note: Seperate from user registration form
"""
phoneNum = forms.IntegerField(label="Phone Number")
hospital = forms.ModelChoiceField(queryset=Hospital.objects.all(), required=True, label="Hospital")
class Meta:
model = Doctor
fields = ('phoneNum', 'hospital')
class LoginForm(forms.ModelForm):
"""
Form for logging in
"""
class Meta:
model = User
username = forms.CharField(max_length=50)
password = forms.CharField(max_length=50)
fields = ["username", "password"]
class PatientForm(forms.ModelForm):
"""
Form for accessing Patient Data
"""
class Meta:
model = Patient
fields = ['birthday', 'sex', 'height', 'weight', 'allergies', 'medical_history', 'insurance_info',
'emergency_contact', 'preferred_hospital']
class PatientMediForm(forms.ModelForm):
"""
Form for accessing Patient Medical Data
"""
class Meta:
model = Patient
fields = ('sex', 'blood_type', 'height', 'weight', 'allergies', 'medical_history')
class UploadFileForm(forms.Form):
"""
Form for Uploading Files
"""
file = forms.FileField()
class NewHospitalForm(forms.ModelForm):
"""
Form for creating a new Hospital
"""
class Meta:
model = Hospital
fields = ('name',) | 0.632503 | 0.116915 |
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.contrib.sites.models import Site
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from django.views.generic import UpdateView
from one.emails.models import AllauthTemplate
from .forms import SettingForm
class SiteDetailView(LoginRequiredMixin, SuccessMessageMixin, UpdateView):
model = Site
fields = ["name", "domain"]
success_message = _("Information successfully updated")
def get_success_url(self):
return reverse("settings:site")
def get_object(self):
return Site.objects.get_current()
def get_context_data(self, **kwargs):
kwargs = super().get_context_data(**kwargs)
kwargs["breadcrumb"] = {
"title": _("Site Setting"),
"parent": None,
"current": f"{_('Update for')} {self.object.name}",
}
kwargs["allauth_templates"] = AllauthTemplate.lingual_objects.group_by_language(
order_by="code"
)
if self.request.method == "POST":
kwargs["setting_form"] = SettingForm(
self.request.POST, self.request.FILES, instance=self.object.setting
)
else:
kwargs["setting_form"] = SettingForm(instance=self.object.setting)
return kwargs
def post(self, request, *args, **kwargs):
"""
Handle POST requests: instantiate a form instance with the passed
POST variables and then check if it's valid.
"""
self.object = self.get_object()
setting_form = SettingForm(
self.request.POST, self.request.FILES, instance=self.object.setting
)
form = self.get_form()
if form.is_valid():
if setting_form.is_valid():
self.form_valid(setting_form)
return self.form_valid(form)
messages.error(request, _("Step Images has error"))
return self.form_invalid(form)
site_detail_view = SiteDetailView.as_view() | one/contrib/sites/settings/views.py | from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.contrib.sites.models import Site
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from django.views.generic import UpdateView
from one.emails.models import AllauthTemplate
from .forms import SettingForm
class SiteDetailView(LoginRequiredMixin, SuccessMessageMixin, UpdateView):
model = Site
fields = ["name", "domain"]
success_message = _("Information successfully updated")
def get_success_url(self):
return reverse("settings:site")
def get_object(self):
return Site.objects.get_current()
def get_context_data(self, **kwargs):
kwargs = super().get_context_data(**kwargs)
kwargs["breadcrumb"] = {
"title": _("Site Setting"),
"parent": None,
"current": f"{_('Update for')} {self.object.name}",
}
kwargs["allauth_templates"] = AllauthTemplate.lingual_objects.group_by_language(
order_by="code"
)
if self.request.method == "POST":
kwargs["setting_form"] = SettingForm(
self.request.POST, self.request.FILES, instance=self.object.setting
)
else:
kwargs["setting_form"] = SettingForm(instance=self.object.setting)
return kwargs
def post(self, request, *args, **kwargs):
"""
Handle POST requests: instantiate a form instance with the passed
POST variables and then check if it's valid.
"""
self.object = self.get_object()
setting_form = SettingForm(
self.request.POST, self.request.FILES, instance=self.object.setting
)
form = self.get_form()
if form.is_valid():
if setting_form.is_valid():
self.form_valid(setting_form)
return self.form_valid(form)
messages.error(request, _("Step Images has error"))
return self.form_invalid(form)
site_detail_view = SiteDetailView.as_view() | 0.492432 | 0.079246 |
from geopy.distance import distance
import os
import gdal
import osr
import re
from reader.gdal_reader import GdalReader
import numpy as np
def rename_files(files, name=None):
"""
Given a list of file paths for elevation files, this function will rename
those files to the format required by the pyDEM package.
This assumes a .tif extension.
Parameters
-----------
files : list
A list of strings of the paths to the elevation files that will be
renamed
name : str (optional)
Default = None. A suffix to the filename. For example
<filename>_suffix.tif
Notes
------
The files are renamed in the same directory as the original file locations
"""
for fil in files:
elev_file = GdalReader(file_name=fil)
elev, = elev_file.raster_layers
fn = get_fn(elev, name)
del elev_file
del elev
fn = os.path.join(os.path.split(fil)[0], fn)
os.rename(fil, fn)
print "Renamed", fil, "to", fn
def parse_fn(fn):
""" This parses the file name and returns the coordinates of the tile
Parameters
-----------
fn : str
Filename of a GEOTIFF
Returns
--------
coords = [LLC.lat, LLC.lon, URC.lat, URC.lon]
"""
try:
parts = os.path.splitext(os.path.split(fn)[-1])[0].replace('o', '.')\
.split('_')[:2]
coords = [float(crds)
for crds in re.split('[NSEW]', parts[0] + parts[1])[1:]]
except:
coords = [np.nan] * 4
return coords
def get_fn(elev, name=None):
"""
Determines the standard filename for a given GeoTIFF Layer.
Parameters
-----------
elev : GdalReader.raster_layer
A raster layer from the GdalReader object.
name : str (optional)
An optional suffix to the filename.
Returns
-------
fn : str
The standard <filename>_<name>.tif with suffix (if supplied)
"""
gcs = elev.grid_coordinates
coords = [gcs.LLC.lat, gcs.LLC.lon, gcs.URC.lat, gcs.URC.lon]
return get_fn_from_coords(coords, name)
def get_fn_from_coords(coords, name=None):
""" Given a set of coordinates, returns the standard filename.
Parameters
-----------
coords : list
[LLC.lat, LLC.lon, URC.lat, URC.lon]
name : str (optional)
An optional suffix to the filename.
Returns
-------
fn : str
The standard <filename>_<name>.tif with suffix (if supplied)
"""
NS1 = ["S", "N"][coords[0] > 0]
EW1 = ["W", "E"][coords[1] > 0]
NS2 = ["S", "N"][coords[2] > 0]
EW2 = ["W", "E"][coords[3] > 0]
new_name = "%s%0.3g%s%0.3g_%s%0.3g%s%0.3g" % \
(NS1, coords[0], EW1, coords[1], NS2, coords[2], EW2, coords[3])
if name is not None:
new_name += '_' + name
return new_name.replace('.', 'o') + '.tif'
def mk_dx_dy_from_geotif_layer(geotif):
"""
Extracts the change in x and y coordinates from the geotiff file. Presently
only supports WGS-84 files.
"""
ELLIPSOID_MAP = {'WGS84': 'WGS-84'}
ellipsoid = ELLIPSOID_MAP[geotif.grid_coordinates.wkt]
d = distance(ellipsoid=ellipsoid)
dx = geotif.grid_coordinates.x_axis
dy = geotif.grid_coordinates.y_axis
dX = np.zeros((dy.shape[0]-1))
for j in xrange(len(dX)):
dX[j] = d.measure((dy[j+1], dx[1]), (dy[j+1], dx[0])) * 1000 # km2m
dY = np.zeros((dy.shape[0]-1))
for i in xrange(len(dY)):
dY[i] = d.measure((dy[i], 0), (dy[i+1], 0)) * 1000 # km2m
return dX, dY
def mk_geotiff_obj(raster, fn, bands=1, gdal_data_type=gdal.GDT_Float32,
lat=[46, 45], lon=[-73, -72]):
"""
Creates a new geotiff file objects using the WGS84 coordinate system, saves
it to disk, and returns a handle to the python file object and driver
Parameters
------------
raster : array
Numpy array of the raster data to be added to the object
fn : str
Name of the geotiff file
bands : int (optional)
See :py:func:`gdal.GetDriverByName('Gtiff').Create
gdal_data : gdal.GDT_<type>
Gdal data type (see gdal.GDT_...)
lat : list
northern lat, southern lat
lon : list
[western lon, eastern lon]
"""
NNi, NNj = raster.shape
driver = gdal.GetDriverByName('GTiff')
obj = driver.Create(fn, NNj, NNi, bands, gdal_data_type)
pixel_height = -np.abs(lat[0] - lat[1]) / (NNi - 1.0)
pixel_width = np.abs(lon[0] - lon[1]) / (NNj - 1.0)
obj.SetGeoTransform([lon[0], pixel_width, 0, lat[0], 0, pixel_height])
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS('WGS84')
obj.SetProjection(srs.ExportToWkt())
obj.GetRasterBand(1).WriteArray(raster)
return obj, driver
def sortrows(a, i=0, index_out=False, recurse=True):
""" Sorts array "a" by columns i
Parameters
------------
a : np.ndarray
array to be sorted
i : int (optional)
column to be sorted by, taken as 0 by default
index_out : bool (optional)
return the index I such that a(I) = sortrows(a,i). Default = False
recurse : bool (optional)
recursively sort by each of the columns. i.e.
once column i is sort, we sort the smallest column number
etc. True by default.
Returns
--------
a : np.ndarray
The array 'a' sorted in descending order by column i
I : np.ndarray (optional)
The index such that a[I, :] = sortrows(a, i). Only return if
index_out = True
Examples
---------
>>> a = array([[1,2],[3,1],[2,3]])
>>> b = sortrows(a,0)
>>> b
array([[1, 2],
[2, 3],
[3, 1]])
c, I = sortrows(a,1,True)
>>> c
array([[3, 1],
[1, 2],
[2, 3]])
>>> I
array([1, 0, 2])
>>> a[I,:] - c
array([[0, 0],
[0, 0],
[0, 0]])
"""
I = np.argsort(a[:, i])
a = a[I, :]
# We recursively call sortrows to make sure it is sorted best by every
# column
if recurse & (len(a[0]) > i + 1):
for b in np.unique(a[:, i]):
ids = a[:, i] == b
colids = range(i) + range(i+1, len(a[0]))
a[np.ix_(ids, colids)], I2 = sortrows(a[np.ix_(ids, colids)],
0, True, True)
I[ids] = I[np.nonzero(ids)[0][I2]]
if index_out:
return a, I
else:
return a | pydem/utils.py | from geopy.distance import distance
import os
import gdal
import osr
import re
from reader.gdal_reader import GdalReader
import numpy as np
def rename_files(files, name=None):
"""
Given a list of file paths for elevation files, this function will rename
those files to the format required by the pyDEM package.
This assumes a .tif extension.
Parameters
-----------
files : list
A list of strings of the paths to the elevation files that will be
renamed
name : str (optional)
Default = None. A suffix to the filename. For example
<filename>_suffix.tif
Notes
------
The files are renamed in the same directory as the original file locations
"""
for fil in files:
elev_file = GdalReader(file_name=fil)
elev, = elev_file.raster_layers
fn = get_fn(elev, name)
del elev_file
del elev
fn = os.path.join(os.path.split(fil)[0], fn)
os.rename(fil, fn)
print "Renamed", fil, "to", fn
def parse_fn(fn):
""" This parses the file name and returns the coordinates of the tile
Parameters
-----------
fn : str
Filename of a GEOTIFF
Returns
--------
coords = [LLC.lat, LLC.lon, URC.lat, URC.lon]
"""
try:
parts = os.path.splitext(os.path.split(fn)[-1])[0].replace('o', '.')\
.split('_')[:2]
coords = [float(crds)
for crds in re.split('[NSEW]', parts[0] + parts[1])[1:]]
except:
coords = [np.nan] * 4
return coords
def get_fn(elev, name=None):
"""
Determines the standard filename for a given GeoTIFF Layer.
Parameters
-----------
elev : GdalReader.raster_layer
A raster layer from the GdalReader object.
name : str (optional)
An optional suffix to the filename.
Returns
-------
fn : str
The standard <filename>_<name>.tif with suffix (if supplied)
"""
gcs = elev.grid_coordinates
coords = [gcs.LLC.lat, gcs.LLC.lon, gcs.URC.lat, gcs.URC.lon]
return get_fn_from_coords(coords, name)
def get_fn_from_coords(coords, name=None):
""" Given a set of coordinates, returns the standard filename.
Parameters
-----------
coords : list
[LLC.lat, LLC.lon, URC.lat, URC.lon]
name : str (optional)
An optional suffix to the filename.
Returns
-------
fn : str
The standard <filename>_<name>.tif with suffix (if supplied)
"""
NS1 = ["S", "N"][coords[0] > 0]
EW1 = ["W", "E"][coords[1] > 0]
NS2 = ["S", "N"][coords[2] > 0]
EW2 = ["W", "E"][coords[3] > 0]
new_name = "%s%0.3g%s%0.3g_%s%0.3g%s%0.3g" % \
(NS1, coords[0], EW1, coords[1], NS2, coords[2], EW2, coords[3])
if name is not None:
new_name += '_' + name
return new_name.replace('.', 'o') + '.tif'
def mk_dx_dy_from_geotif_layer(geotif):
"""
Extracts the change in x and y coordinates from the geotiff file. Presently
only supports WGS-84 files.
"""
ELLIPSOID_MAP = {'WGS84': 'WGS-84'}
ellipsoid = ELLIPSOID_MAP[geotif.grid_coordinates.wkt]
d = distance(ellipsoid=ellipsoid)
dx = geotif.grid_coordinates.x_axis
dy = geotif.grid_coordinates.y_axis
dX = np.zeros((dy.shape[0]-1))
for j in xrange(len(dX)):
dX[j] = d.measure((dy[j+1], dx[1]), (dy[j+1], dx[0])) * 1000 # km2m
dY = np.zeros((dy.shape[0]-1))
for i in xrange(len(dY)):
dY[i] = d.measure((dy[i], 0), (dy[i+1], 0)) * 1000 # km2m
return dX, dY
def mk_geotiff_obj(raster, fn, bands=1, gdal_data_type=gdal.GDT_Float32,
lat=[46, 45], lon=[-73, -72]):
"""
Creates a new geotiff file objects using the WGS84 coordinate system, saves
it to disk, and returns a handle to the python file object and driver
Parameters
------------
raster : array
Numpy array of the raster data to be added to the object
fn : str
Name of the geotiff file
bands : int (optional)
See :py:func:`gdal.GetDriverByName('Gtiff').Create
gdal_data : gdal.GDT_<type>
Gdal data type (see gdal.GDT_...)
lat : list
northern lat, southern lat
lon : list
[western lon, eastern lon]
"""
NNi, NNj = raster.shape
driver = gdal.GetDriverByName('GTiff')
obj = driver.Create(fn, NNj, NNi, bands, gdal_data_type)
pixel_height = -np.abs(lat[0] - lat[1]) / (NNi - 1.0)
pixel_width = np.abs(lon[0] - lon[1]) / (NNj - 1.0)
obj.SetGeoTransform([lon[0], pixel_width, 0, lat[0], 0, pixel_height])
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS('WGS84')
obj.SetProjection(srs.ExportToWkt())
obj.GetRasterBand(1).WriteArray(raster)
return obj, driver
def sortrows(a, i=0, index_out=False, recurse=True):
""" Sorts array "a" by columns i
Parameters
------------
a : np.ndarray
array to be sorted
i : int (optional)
column to be sorted by, taken as 0 by default
index_out : bool (optional)
return the index I such that a(I) = sortrows(a,i). Default = False
recurse : bool (optional)
recursively sort by each of the columns. i.e.
once column i is sort, we sort the smallest column number
etc. True by default.
Returns
--------
a : np.ndarray
The array 'a' sorted in descending order by column i
I : np.ndarray (optional)
The index such that a[I, :] = sortrows(a, i). Only return if
index_out = True
Examples
---------
>>> a = array([[1,2],[3,1],[2,3]])
>>> b = sortrows(a,0)
>>> b
array([[1, 2],
[2, 3],
[3, 1]])
c, I = sortrows(a,1,True)
>>> c
array([[3, 1],
[1, 2],
[2, 3]])
>>> I
array([1, 0, 2])
>>> a[I,:] - c
array([[0, 0],
[0, 0],
[0, 0]])
"""
I = np.argsort(a[:, i])
a = a[I, :]
# We recursively call sortrows to make sure it is sorted best by every
# column
if recurse & (len(a[0]) > i + 1):
for b in np.unique(a[:, i]):
ids = a[:, i] == b
colids = range(i) + range(i+1, len(a[0]))
a[np.ix_(ids, colids)], I2 = sortrows(a[np.ix_(ids, colids)],
0, True, True)
I[ids] = I[np.nonzero(ids)[0][I2]]
if index_out:
return a, I
else:
return a | 0.819713 | 0.496948 |
import numpy as np
import numpy.linalg as linalg
class HomogeneousCoordinate(np.ndarray):
def __new__(cls, *args, **kwargs):
# creates an array with the homogeneous coordinates
obj = np.zeros(4, dtype=float).view(cls)
return obj
def __init__(self, x=0, y=0, z=0, w=0):
# assign initialization
self[0] = x
self[1] = y
self[2] = z
self[3] = w
@property
def x(self):
return self[0]
@x.setter
def x(self, x):
self[0] = x
@property
def y(self):
return self[1]
@y.setter
def y(self, y):
self[1] = y
@property
def z(self):
return self[2]
@z.setter
def z(self, z):
self[2] = z
@property
def w(self):
return self[3]
@w.setter
def w(self, w):
self[3] = w
class Point(HomogeneousCoordinate):
def __init__(self, x=0, y=0, z=0, *args, **kwargs):
super().__init__(x, y, z, 1) # call the homogeneous coordinate constructor, points have a coord of 1
class Vector(HomogeneousCoordinate):
def __init__(self, x=0, y=0, z=0, *args, **kwargs):
super().__init__(x, y, z, 0)
class WorldObject(object):
"""
a world object represents an object in 3D space, it has an origin and a direction, as well as a transform
matrix to convert it from the local coordinate system to the global coordinate system
"""
@staticmethod
def _transfer_matrix():
"""
Create and return a 4x4 identity matrix
:return:
"""
return np.identity(4)
@staticmethod
def _sin_cos(angle, format="deg"):
"""
returns the sine and cosine of the input angle
:param angle:
:param format:
:return:
"""
if format == "deg":
cos_a = np.cos(angle * np.pi / 180.)
sin_a = np.sin(angle * np.pi / 180.)
elif format == "rad":
cos_a = np.cos(angle)
sin_a = np.sin(angle)
else:
raise ValueError(f"{format} is not a valid option for angle units")
return sin_a, cos_a
def __init__(self):
self._obj_origin = Point(0, 0, 0) # position in object space
self._obj_direction = Vector(0, 0, 1) # direction in object space
self._world_position = Point(0,0,0) # the objects position in world space
self._world_direction = Vector(0,0,1) # the objects direction in world space
# Flags that get set to false whenever the transform matrix has been updated
self._dir_valid = True
self._pos_valid = True
self._world_coordinate_transform = np.identity(4, dtype=float) # transform matrix from object to world space
def _append_world_transform(self, new_transform):
self._world_coordinate_transform = np.matmul(new_transform, self._world_coordinate_transform)
self._dir_valid = False
self._pos_valid = False
def get_position(self):
# check if the position is valid, if not update it and return
if not self._pos_valid:
self._world_position = np.matmul(self._world_coordinate_transform, self._obj_origin)
self._pos_valid = True
return self._world_position
def get_orientation(self):
# check if we need to update the direction vector
if not self._dir_valid:
world_dir = np.matmul(self._world_coordinate_transform, self._obj_direction)
norm = linalg.norm(world_dir)
if norm < 1E-7:
raise ValueError(f"Measured Norm of World Vector below tolerance: {norm}")
else:
self._world_direction = world_dir / norm
return self._world_direction
# Movement operations
def move(self, x=0, y=0, z=0):
tx = self._transfer_matrix()
tx[:-1, -1] = (x, y, z)
# update the transform matrix
self._append_world_transform(tx)
return self
def move_x(self, movement):
self.move(x=movement)
return self
def move_y(self, movement):
self.move(y=movement)
return self
def move_z(self, movement):
self.move(z=movement)
return self
# Scale operations
def scale(self, x=1, y=1, z=1):
tx = np.diag((x, y, z, 1))
self._append_world_transform(tx)
return self
def scale_x(self, scale_val):
return self.scale(x=scale_val)
def scale_y(self, scale_val):
return self.scale(y=scale_val)
def scale_z(self, scale_val):
return self.scale(z=scale_val)
def scale_all(self, scale_val):
return self.scale(scale_val, scale_val, scale_val)
# Rotation Operations
def rotate_x(self, angle, units="deg"):
sin_a, cos_a = self._sin_cos(angle, units)
tx = self._transfer_matrix()
tx[1, 1] = cos_a
tx[2, 2] = cos_a
tx[1, 2] = -sin_a
tx[2, 1] = sin_a
self._append_world_transform(tx)
return self
def rotate_y(self, angle, units="deg"):
sin_a, cos_a = self._sin_cos(angle, units)
tx = self._transfer_matrix()
tx[0, 0] = cos_a
tx[2, 2] = cos_a
tx[2, 0] = -sin_a
tx[0, 2] = sin_a
self._append_world_transform(tx)
return self
def rotate_z(self, angle, units="deg"):
sin_a, cos_a = self._sin_cos(angle, units)
tx = self._transfer_matrix()
tx[0, 0] = cos_a
tx[1, 1] = cos_a
tx[0, 1] = -sin_a
tx[1, 0] = sin_a
self._append_world_transform(tx)
return self | pycg/pycg.py | import numpy as np
import numpy.linalg as linalg
class HomogeneousCoordinate(np.ndarray):
def __new__(cls, *args, **kwargs):
# creates an array with the homogeneous coordinates
obj = np.zeros(4, dtype=float).view(cls)
return obj
def __init__(self, x=0, y=0, z=0, w=0):
# assign initialization
self[0] = x
self[1] = y
self[2] = z
self[3] = w
@property
def x(self):
return self[0]
@x.setter
def x(self, x):
self[0] = x
@property
def y(self):
return self[1]
@y.setter
def y(self, y):
self[1] = y
@property
def z(self):
return self[2]
@z.setter
def z(self, z):
self[2] = z
@property
def w(self):
return self[3]
@w.setter
def w(self, w):
self[3] = w
class Point(HomogeneousCoordinate):
def __init__(self, x=0, y=0, z=0, *args, **kwargs):
super().__init__(x, y, z, 1) # call the homogeneous coordinate constructor, points have a coord of 1
class Vector(HomogeneousCoordinate):
def __init__(self, x=0, y=0, z=0, *args, **kwargs):
super().__init__(x, y, z, 0)
class WorldObject(object):
"""
a world object represents an object in 3D space, it has an origin and a direction, as well as a transform
matrix to convert it from the local coordinate system to the global coordinate system
"""
@staticmethod
def _transfer_matrix():
"""
Create and return a 4x4 identity matrix
:return:
"""
return np.identity(4)
@staticmethod
def _sin_cos(angle, format="deg"):
"""
returns the sine and cosine of the input angle
:param angle:
:param format:
:return:
"""
if format == "deg":
cos_a = np.cos(angle * np.pi / 180.)
sin_a = np.sin(angle * np.pi / 180.)
elif format == "rad":
cos_a = np.cos(angle)
sin_a = np.sin(angle)
else:
raise ValueError(f"{format} is not a valid option for angle units")
return sin_a, cos_a
def __init__(self):
self._obj_origin = Point(0, 0, 0) # position in object space
self._obj_direction = Vector(0, 0, 1) # direction in object space
self._world_position = Point(0,0,0) # the objects position in world space
self._world_direction = Vector(0,0,1) # the objects direction in world space
# Flags that get set to false whenever the transform matrix has been updated
self._dir_valid = True
self._pos_valid = True
self._world_coordinate_transform = np.identity(4, dtype=float) # transform matrix from object to world space
def _append_world_transform(self, new_transform):
self._world_coordinate_transform = np.matmul(new_transform, self._world_coordinate_transform)
self._dir_valid = False
self._pos_valid = False
def get_position(self):
# check if the position is valid, if not update it and return
if not self._pos_valid:
self._world_position = np.matmul(self._world_coordinate_transform, self._obj_origin)
self._pos_valid = True
return self._world_position
def get_orientation(self):
# check if we need to update the direction vector
if not self._dir_valid:
world_dir = np.matmul(self._world_coordinate_transform, self._obj_direction)
norm = linalg.norm(world_dir)
if norm < 1E-7:
raise ValueError(f"Measured Norm of World Vector below tolerance: {norm}")
else:
self._world_direction = world_dir / norm
return self._world_direction
# Movement operations
def move(self, x=0, y=0, z=0):
tx = self._transfer_matrix()
tx[:-1, -1] = (x, y, z)
# update the transform matrix
self._append_world_transform(tx)
return self
def move_x(self, movement):
self.move(x=movement)
return self
def move_y(self, movement):
self.move(y=movement)
return self
def move_z(self, movement):
self.move(z=movement)
return self
# Scale operations
def scale(self, x=1, y=1, z=1):
tx = np.diag((x, y, z, 1))
self._append_world_transform(tx)
return self
def scale_x(self, scale_val):
return self.scale(x=scale_val)
def scale_y(self, scale_val):
return self.scale(y=scale_val)
def scale_z(self, scale_val):
return self.scale(z=scale_val)
def scale_all(self, scale_val):
return self.scale(scale_val, scale_val, scale_val)
# Rotation Operations
def rotate_x(self, angle, units="deg"):
sin_a, cos_a = self._sin_cos(angle, units)
tx = self._transfer_matrix()
tx[1, 1] = cos_a
tx[2, 2] = cos_a
tx[1, 2] = -sin_a
tx[2, 1] = sin_a
self._append_world_transform(tx)
return self
def rotate_y(self, angle, units="deg"):
sin_a, cos_a = self._sin_cos(angle, units)
tx = self._transfer_matrix()
tx[0, 0] = cos_a
tx[2, 2] = cos_a
tx[2, 0] = -sin_a
tx[0, 2] = sin_a
self._append_world_transform(tx)
return self
def rotate_z(self, angle, units="deg"):
sin_a, cos_a = self._sin_cos(angle, units)
tx = self._transfer_matrix()
tx[0, 0] = cos_a
tx[1, 1] = cos_a
tx[0, 1] = -sin_a
tx[1, 0] = sin_a
self._append_world_transform(tx)
return self | 0.892393 | 0.621053 |
import SimpleITK as sitk
import numpy as np
from disptools import *
def create_target_volume(image: sitk.Image, atrophy_rate: float):
r""" Create a target volume map for the PREDICT tool.
Given an input segmentation map, create mask target volume map for the
PREDICT tool, with the given atrophy rate. The volume change in
:math:`(x,y,z)` is defined over a cubic pach formed by the voxel
:math:`(x,y,z)`, its three successors along the axis and their respective
successors to close the cube. Hence, if the input mask has size
:math:`m \times n \times s`, the volume map has size
:math:`(m-1) \times (n-1) \times (s-1)`.
.. note::
The output must be in np.float32, since the PREDICT tool uses
the C `float` type as hard-coded type.
Parameters
----------
image : sitk.Image
Input segmentation map.
atrophy_rate : float
Target atropy rate for the segmented ROI.
Returns
-------
sitk.Image
A SimpleITK image object with the target volume map.
"""
(x,y,z) = tuple([x - 1 for x in image.GetSize()])
volumes = 1.0 - atrophy_rate * sitk.GetArrayViewFromImage(I)[0:x,0:y,0:z]
result = sitk.GetImageFromArray(volumes)
result.CopyInformation(image)
return result
def read_deformation(filename: str, size: Tuple[int, int, int]) -> sitk.Image:
r""" Read a deformation field from a file in the PREDICT format
The deformation field is stored as binary uncompressed 32-float data.
PREDICT uses a different convention for the coordinates (with rows
along the :math:`y` axis, columns along the :math:`x` axis, and slices
along the :math:`z` axis), so a permutation of the components in the
result is required. The identity is subtracted in order to convert the
deformation to a displacement field.
.. note::
The size of the volume is not stored within PREDICT files, so it needs
to be known and passed as an argument.
Parameters
----------
filename : str
Input file.
size : (int, int, int)
Size of the volume `(x,y,z)`.
Returns
-------
sitk.Image
A SimpleITK image object containing the corresponding displacement field.
"""
with open(filename, 'rb') as f:
a = np.fromfile(f, dtype=np.float32)
a = a.reshape((*size, 3))
b = np.empty(a.shape)
# Convert from PREDICT's coordinate system to ITK's.
# Also convert from deformation field to displacement field, i.e.
# subtract the identity transformation.
for x, y, z in np.ndindex(b.shape[0:3]):
b[x,y,z,0] = a[x,y,z,1] - (z+1)
b[x,y,z,1] = a[x,y,z,0] - (y+1)
b[x,y,z,2] = a[x,y,z,2] - (x+1)
return sitk.GetImageFromArray(b)
def read_img(filename: str, size: Tuple[int, int, int]) -> sitk.Image:
r""" Read an image from file in the PREDICT format.
PREDICT uses a different convention for the coordinates (with rows
along the :math:`y` axis, columns along the :math:`x` axis, and slices
along the :math:`z` axis), so a permutation of the components in the
result is required.
.. note::
The size of the volume is not stored within PREDICT files, so it needs
to be known and passed as an argument.
Parameters
----------
filename : str
Input file.
size : (int, int, int)
Size of the volume `(x,y,z)`.
Returns
-------
sitk.Image
a SimpleITK image object containing the corresponding image
"""
with open(filename, 'rb') as f:
a = np.fromfile(f, dtype=np.float32)
a = a.reshape(size)
b = np.empty(a.shape)
# Convert the coordinates of the points from the tool's
# coordinate system to ITK's
for x, y in np.ndindex(a.shape[0:2]):
b[x,y,:] = a[y,x,:]
return sitk.GetImageFromArray(b)
def write_img(image: sitk.Image, filename: str) -> None:
r""" Write an image to file in the PREDICT format.
PREDICT uses a different convention for the coordinates (with rows
along the :math:`y` axis, columns along the :math:`x` axis, and slices
along the :math:`z` axis), so a permutation of the components in the
result is required.
.. note::
The size of the volume will not be stored within the file.
Parameters
----------
filename : str
Input file.
size : (int, int, int)
Size of the volume `(x,y,z)`.
Returns
-------
sitk.Image
A SimpleITK image object containing the corresponding image.
"""
a = sitk.GetArrayViewFromImage(image)
b = np.empty(a.shape)
# Convert the coordinates of the points
for x, y in np.ndindex(b.shape[0:2]):
b[x,y,:] = a[y,x,:]
with open(filename, 'wb') as f:
b.tofile(f) | disptools/predict.py | import SimpleITK as sitk
import numpy as np
from disptools import *
def create_target_volume(image: sitk.Image, atrophy_rate: float):
r""" Create a target volume map for the PREDICT tool.
Given an input segmentation map, create mask target volume map for the
PREDICT tool, with the given atrophy rate. The volume change in
:math:`(x,y,z)` is defined over a cubic pach formed by the voxel
:math:`(x,y,z)`, its three successors along the axis and their respective
successors to close the cube. Hence, if the input mask has size
:math:`m \times n \times s`, the volume map has size
:math:`(m-1) \times (n-1) \times (s-1)`.
.. note::
The output must be in np.float32, since the PREDICT tool uses
the C `float` type as hard-coded type.
Parameters
----------
image : sitk.Image
Input segmentation map.
atrophy_rate : float
Target atropy rate for the segmented ROI.
Returns
-------
sitk.Image
A SimpleITK image object with the target volume map.
"""
(x,y,z) = tuple([x - 1 for x in image.GetSize()])
volumes = 1.0 - atrophy_rate * sitk.GetArrayViewFromImage(I)[0:x,0:y,0:z]
result = sitk.GetImageFromArray(volumes)
result.CopyInformation(image)
return result
def read_deformation(filename: str, size: Tuple[int, int, int]) -> sitk.Image:
r""" Read a deformation field from a file in the PREDICT format
The deformation field is stored as binary uncompressed 32-float data.
PREDICT uses a different convention for the coordinates (with rows
along the :math:`y` axis, columns along the :math:`x` axis, and slices
along the :math:`z` axis), so a permutation of the components in the
result is required. The identity is subtracted in order to convert the
deformation to a displacement field.
.. note::
The size of the volume is not stored within PREDICT files, so it needs
to be known and passed as an argument.
Parameters
----------
filename : str
Input file.
size : (int, int, int)
Size of the volume `(x,y,z)`.
Returns
-------
sitk.Image
A SimpleITK image object containing the corresponding displacement field.
"""
with open(filename, 'rb') as f:
a = np.fromfile(f, dtype=np.float32)
a = a.reshape((*size, 3))
b = np.empty(a.shape)
# Convert from PREDICT's coordinate system to ITK's.
# Also convert from deformation field to displacement field, i.e.
# subtract the identity transformation.
for x, y, z in np.ndindex(b.shape[0:3]):
b[x,y,z,0] = a[x,y,z,1] - (z+1)
b[x,y,z,1] = a[x,y,z,0] - (y+1)
b[x,y,z,2] = a[x,y,z,2] - (x+1)
return sitk.GetImageFromArray(b)
def read_img(filename: str, size: Tuple[int, int, int]) -> sitk.Image:
r""" Read an image from file in the PREDICT format.
PREDICT uses a different convention for the coordinates (with rows
along the :math:`y` axis, columns along the :math:`x` axis, and slices
along the :math:`z` axis), so a permutation of the components in the
result is required.
.. note::
The size of the volume is not stored within PREDICT files, so it needs
to be known and passed as an argument.
Parameters
----------
filename : str
Input file.
size : (int, int, int)
Size of the volume `(x,y,z)`.
Returns
-------
sitk.Image
a SimpleITK image object containing the corresponding image
"""
with open(filename, 'rb') as f:
a = np.fromfile(f, dtype=np.float32)
a = a.reshape(size)
b = np.empty(a.shape)
# Convert the coordinates of the points from the tool's
# coordinate system to ITK's
for x, y in np.ndindex(a.shape[0:2]):
b[x,y,:] = a[y,x,:]
return sitk.GetImageFromArray(b)
def write_img(image: sitk.Image, filename: str) -> None:
r""" Write an image to file in the PREDICT format.
PREDICT uses a different convention for the coordinates (with rows
along the :math:`y` axis, columns along the :math:`x` axis, and slices
along the :math:`z` axis), so a permutation of the components in the
result is required.
.. note::
The size of the volume will not be stored within the file.
Parameters
----------
filename : str
Input file.
size : (int, int, int)
Size of the volume `(x,y,z)`.
Returns
-------
sitk.Image
A SimpleITK image object containing the corresponding image.
"""
a = sitk.GetArrayViewFromImage(image)
b = np.empty(a.shape)
# Convert the coordinates of the points
for x, y in np.ndindex(b.shape[0:2]):
b[x,y,:] = a[y,x,:]
with open(filename, 'wb') as f:
b.tofile(f) | 0.94379 | 0.793466 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
def init_weights(m, gain):
if (type(m) == nn.Linear) | (type(m) == nn.Conv2d):
nn.init.orthogonal_(m.weight, gain)
nn.init.zeros_(m.bias)
class CNNDeepmind(nn.Module):
def __init__(self, observation_space, n_outputs, width=512,**kwargs):
# CNN architechture of DeepMind described in https://storage.googleapis.com/deepmind-media/dqn/DQNNaturePaper.pdf :
# The first hidden layer convolves 32 filters of 8 3 8 with stride 4 with the input image and applies a rectifier nonlinearity
# The second hidden layer convolves 64 filters of 4 3 4 with stride 2, again followed by a rectifier nonlinearity
# This is followed by a third convolutional layer that convolves 64 filters of 3 3 3 with stride 1 followed by a rectifier.
# The final hidden layer is fully-connected and consists of 512 rectifier units.
super().__init__()
if len(observation_space.shape) != 3:
raise NotImplementedError
# Defining the network architechture
self.conv = nn.Sequential(nn.Conv2d(4, 32, 8, stride=4),
nn.ReLU(),
nn.Conv2d(32, 64, 4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, 3, stride=1),
nn.ReLU())
self.output = nn.Sequential(nn.Linear(64 * 7 * 7, width),
nn.ReLU(),
nn.Linear(width, n_outputs))
self.conv.apply(lambda x: init_weights(x, np.sqrt(2)))
self.output.apply(lambda x: init_weights(x, np.sqrt(2)))
def forward(self, obs):
if len(obs.shape) != 4:
obs = obs.unsqueeze(0)
obs = obs.permute(0,3,1,2)
obs = obs/255
obs = self.conv(obs)
obs = obs.view(obs.size(0), -1)
return self.output(obs) | Archive/appendix/Atari/baseline-QR-DQN/cnn_deepmind.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
def init_weights(m, gain):
if (type(m) == nn.Linear) | (type(m) == nn.Conv2d):
nn.init.orthogonal_(m.weight, gain)
nn.init.zeros_(m.bias)
class CNNDeepmind(nn.Module):
def __init__(self, observation_space, n_outputs, width=512,**kwargs):
# CNN architechture of DeepMind described in https://storage.googleapis.com/deepmind-media/dqn/DQNNaturePaper.pdf :
# The first hidden layer convolves 32 filters of 8 3 8 with stride 4 with the input image and applies a rectifier nonlinearity
# The second hidden layer convolves 64 filters of 4 3 4 with stride 2, again followed by a rectifier nonlinearity
# This is followed by a third convolutional layer that convolves 64 filters of 3 3 3 with stride 1 followed by a rectifier.
# The final hidden layer is fully-connected and consists of 512 rectifier units.
super().__init__()
if len(observation_space.shape) != 3:
raise NotImplementedError
# Defining the network architechture
self.conv = nn.Sequential(nn.Conv2d(4, 32, 8, stride=4),
nn.ReLU(),
nn.Conv2d(32, 64, 4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, 3, stride=1),
nn.ReLU())
self.output = nn.Sequential(nn.Linear(64 * 7 * 7, width),
nn.ReLU(),
nn.Linear(width, n_outputs))
self.conv.apply(lambda x: init_weights(x, np.sqrt(2)))
self.output.apply(lambda x: init_weights(x, np.sqrt(2)))
def forward(self, obs):
if len(obs.shape) != 4:
obs = obs.unsqueeze(0)
obs = obs.permute(0,3,1,2)
obs = obs/255
obs = self.conv(obs)
obs = obs.view(obs.size(0), -1)
return self.output(obs) | 0.941594 | 0.555797 |
import json
import os
import sqlite3
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_cytoscape as cyto
import dash_html_components as html
import dash_table
import networkx as nx
import pandas as pd
from OmicsIntegrator import Graph
from dash.dash import no_update
from dash.dependencies import Output, Input, State, ALL
from dash_extensions import Download
from dash_extensions.snippets import send_file
import dash_app.vis_stylesheets as stylesheets
import go_enrichment.go_enrichment as goe
from dash_app.app import app # Loads app variable from app script
def make_cyto_elements(network):
"""Takes a networkx network and outputs Cytoscape elements that can be visualized with Dash. Also creates selector
classes according to the attributes and the layout coordinates."""
# Get node degrees
nx.set_node_attributes(network, dict(network.degree()), 'degree')
# Convert nx network to cytoscape JSON
json_elements = nx.readwrite.json_graph.cytoscape_data(network)['elements']
# Make layout (much faster than default Cytoscape layouts)
layout = nx.nx_agraph.graphviz_layout(network)
nodes = json_elements['nodes']
for node in nodes:
node['data']['label'] = node['data']['shortName'] # Use short name as node label
pos = layout[node['data']['id']] # Extract positions from layout dict
node['position'] = {'x': pos[0], 'y': pos[1]} # Set positions
elements = dict()
elements['nodes'] = nodes
elements['edges'] = json_elements['edges']
return elements
def make_vis_layout(network_df, enrichment_results, cyto_network, network_params):
"""Generates a custom layout depending on the network type."""
network_params = json.loads(network_params)
# This filter is only used with RNASeq/Combined networks
regulation_filter = html.Details(
id='diff-exp-details',
open=True,
children=[
html.Summary('By differential expression'),
dbc.Checklist(
# ID type for pattern matching callback
id={
'type': 'filter',
'index': 3
},
options=[
{'label': 'Up-Regulated', 'value': 'up'},
{'label': 'Down-Regulated', 'value': 'down'}
],
value=[],
)
],
)
# This filter is only used with Combined networks
source_filter = html.Details(
id='source-details',
open=True,
children=[
html.Summary('By experiment '),
dbc.Checklist(
# ID type for pattern matching callback
id={
'type': 'filter',
'index': 4
},
options=[
{'label': 'RNA-Seq', 'value': 'RNASeq'},
{'label': 'Tn-Seq', 'value': 'TnSeq'}
],
value=[],
)
],
)
# These filters are used for all networks
sidebar_filters = [
html.H5('Select nodes'),
html.Details(
open=True,
children=[
html.Summary('By name'),
dcc.Dropdown(
# ID type for pattern-matching callback
id={
'type': 'filter',
'index': 0
},
# Allow search by both short name and locus tag (index)
options=[{'label': term, 'value': term}
for term in network_df['shortName']] +
[{'label': term, 'value': term}
for term in network_df.index],
multi=True,
optionHeight=50
)
],
),
html.Br(),
html.Details(
open=True,
children=[
html.Summary('By localization '),
dcc.Dropdown(
# ID type for pattern-matching callback
id={
'type': 'filter',
'index': 1
},
options=[
dict(label=location, value=location)
for location in network_df['localization'].unique()
],
multi=True,
)
],
),
html.Br(),
html.Details(
open=True,
children=[
html.Summary('By enriched GO term'),
dcc.Dropdown(
# ID type for pattern-matching callback
id={
'type': 'filter',
'index': 2
},
# Use enriched GO terms as options.
options=[{'label': term, 'value': term}
for term in enrichment_results['name']],
multi=True,
optionHeight=50
)
],
),
html.Br()
]
# Add extra filters for DE/Combined networks
if network_params['type'] == 'rna_seq' or network_params['type'] == 'combined':
sidebar_filters.extend([regulation_filter, html.Br()])
if network_params['type'] == 'combined':
sidebar_filters.extend([source_filter])
color_mapping = [html.Div(id='color-map'), html.Div(id='legend')]
stylesheet = stylesheets.default
# Add color mapping functionality for DE/Combined networks
if network_params['type'] == 'gene_list':
stylesheet = stylesheets.default
elif network_params['type'] == 'rna_seq':
color_mapping = [
html.H5('Color Mapping'),
html.Div(id='color-map'),
html.Div(style={'padding': '10px'},
children=html.Img(
src=app.get_asset_url('de_legend.svg'),
id='legend',
width=100)
)
]
stylesheet = stylesheets.fold_change
elif network_params['type'] == 'combined':
color_mapping = [
html.H5('Color Mapping'),
dbc.RadioItems(
options=[
{'label': 'Experiment', 'value': 'experiment'},
{'label': 'Differential Expression', 'value': 'regulation'}
],
value='experiment',
id='color-map',
),
html.Div(style={'padding': '10px'},
children=html.Img(
id='legend',
width=100)
)
]
# Layout begins here
return html.Div(
[
html.Div(
style={
'width': '24vw',
'backgroundColor': '#a6edff',
'padding': '10px',
'display': 'inline-block',
'height': 'calc(100vh - 65px)',
'vertical-align': 'top',
'overflow': 'auto'
},
children=[
html.Div(color_mapping),
# legend,
dbc.Checklist(
id='show-labels',
options=[
{'label': 'Show node labels', 'value': 1}
],
switch=True,
value=[]
),
html.Hr(),
html.Div(
id='full-network-panel',
children=[
html.Div(
id='node-filters',
children=sidebar_filters
),
html.P(id='num-selected-nodes', style={'padding-top': '5px'}),
dbc.Button('Make Sub-Network', id='make-subnetwork', color='primary',
style={'display': 'none'})
]
),
html.Div(
id='subnetwork-btns',
style={'display': 'none'},
children=[
dbc.Checklist(id='include-extra-genes',
options=[
{'label': 'Include additional genes', 'value': 1}
],
switch=True,
value=[]
),
html.Abbr('Help',
title=(('Include additional genes, called Steiner nodes, that are are not '
'included in the original data, but help connect other genes that are. '
'Useful to connect subnetworks with '
'many smaller components.')),
),
html.Br(),
dbc.Checklist(id='include-low-confidence',
options=[
{'label': 'Include low-confidence interactions', 'value': 1}
],
switch=True,
value=[]
),
html.Abbr('Help',
title=(('Include all interactions in the subnetwork, instead of prioritizing '
'experimental interactions.')),
),
html.Br(),
dbc.Button(
'Return to selection',
id='reset-network',
color='primary',
)
]
),
html.Hr(),
dbc.DropdownMenu(
id='download-dropdown',
color='primary',
style={'padding-top': '5px'},
label='Download',
direction='right',
children=[
dbc.DropdownMenuItem('Network (.graphml)',
id='download-network'),
dbc.DropdownMenuItem('Network Image (.png)',
id='download-network-img'),
dbc.DropdownMenuItem('Table (.csv)',
id='download-table',
style={'display': 'none'})
]
),
Download(id='graphml-download'),
Download(id='csv-download'),
# Hidden Divs to store node details and subnetwork for download
html.Div(id='filtered-node-details', style={'display': 'none'}),
html.Div(id='hidden-subnetwork', style={'display': 'none'})
],
),
html.Div(
style={
'display': 'inline-block',
'width': '74vw'
},
children=dbc.Container(
fluid=True,
children=[
dbc.Row(
[
dbc.Col(
children=cyto.Cytoscape(
id='main-view',
style={
'width': '100%',
'height': 'calc(100vh - 80px)'
},
stylesheet=stylesheet,
maxZoom=5,
minZoom=0.3,
zoom=1,
layout={'name': 'preset'},
elements=cyto_network,
boxSelectionEnabled=True
)
),
]
),
dbc.Row(
dbc.Col(
html.Div(id='node-details-table',
style={'margin-top': '-25vh'}
)
)
)
]
)
)
]
)
@app.callback(
[Output('main-view', 'stylesheet'),
Output('legend', 'src')],
[Input('color-map', 'value'),
Input('show-labels', 'value')]
)
def change_stylesheet(color_map, show_labels):
if color_map == 'experiment':
legend = app.get_asset_url('sig_source_legend.svg')
stylesheet = stylesheets.combined
else:
legend = app.get_asset_url('de_legend.svg')
stylesheet = stylesheets.fold_change
if show_labels:
return stylesheets.add_labels(stylesheet), legend
else:
return stylesheet, legend
@app.callback(
[Output('full-network-panel', 'style'),
Output('subnetwork-btns', 'style'),
Output('main-view', 'elements'),
Output('hidden-subnetwork', 'children'),
Output('num-selected-nodes', 'children'),
Output('make-subnetwork', 'style')],
[Input({'type': 'filter', 'index': ALL}, 'value'), # Pattern-matching all callbacks with filter type
Input('make-subnetwork', 'n_clicks'),
Input('include-low-confidence', 'value'),
Input('include-extra-genes', 'value')],
[State('node-details-df', 'children'),
State('enrichment-results', 'children'),
State('network-parameters', 'children'),
State('cyto-network', 'children'),
State('hidden-bionetwork', 'children')]
)
def select_nodes(values, subnetwork_clicks, low_confidence, extra_genes, node_details, enrichment_results,
network_params, cyto_network, bio_network):
"""Select nodes according to user selected filters. Creates subnetwork with selected nodes."""
cyto_network = json.loads(cyto_network)
enrichment_results = pd.read_json(enrichment_results)
nodes = cyto_network['nodes']
# edges = cyto_network['edges']
network_df = pd.read_json(node_details)
network_params = json.loads(network_params)
strain = network_params['strain']
network_type = network_params['type']
query = [] # Query to filter nodes
# Get selected filter input values
short_name = values[0]
location = values[1]
enriched_terms = values[2]
if network_type == 'rna_seq' or network_params['type'] == 'combined':
regulation = values[3]
if network_params['type'] == 'combined':
significance_source = values[4]
else:
significance_source = []
else:
regulation, significance_source = [], []
# Add queries depending on GUI filter selections
if location:
query.append('localization in @location')
if enriched_terms:
# Get genes associated with selected GO term(s)
genes_in_term = enrichment_results.loc[enrichment_results['name'].isin(enriched_terms), 'study_items']
total_genes = [gene for term in genes_in_term for gene in term.split(', ')] # Unnest genes in term
if strain == 'PA14':
total_genes = goe.map_pao1_genes(total_genes)
query.append('index in @total_genes')
if significance_source:
significance_source.append('both')
query.append('significanceSource in @significance_source')
if regulation:
query.append('regulation in @regulation')
query_str = ' & '.join(query) # Join all queries
if short_name:
if not query:
query_str += 'shortName in @short_name | index in @short_name'
else:
query_str += '| shortName in @short_name | index in @short_name'
# Use query to select nodes
if query_str:
queried_nodes = network_df.query(query_str).index.tolist()
else:
queried_nodes = []
# Select nodes
for node in nodes:
node['selected'] = True if node['data']['id'] in queried_nodes else False
selected_msg = 'Selected {} out of {} nodes'.format(len(queried_nodes), len(nodes))
# Display make network button after selecting nodes
btn_display = {'display': 'block'} if len(queried_nodes) != 0 else {'display': 'none'}
# Generate subnetwork when button is clicked.
if subnetwork_clicks:
cyto_sub_network, json_sub_network = make_subnetwork(queried_nodes, network_df, bio_network, strain,
network_type, low_confidence, extra_genes)
# Throws warning if subnetwork is empty.
if json_sub_network is None:
selected_msg = dbc.Alert('Could not compute subnetwork using the selected nodes. Try selecting more nodes.',
color='warning')
cyto_sub_network = no_update
json_sub_network = no_update
# Return subnetwork
else:
selected_msg = ''
return {'display': 'none'}, {'display': 'block'}, cyto_sub_network, json_sub_network, selected_msg, \
btn_display
# Return full network
return {'display': 'block'}, {'display': 'none'}, cyto_network, bio_network, selected_msg, btn_display
@app.callback(
Output('make-subnetwork', 'n_clicks'),
[Input('reset-network', 'n_clicks')])
def reset_subnetwork_clicks(n_clicks):
"""Reset subnetwork clicks to cycle through full network/subnetwork view."""
return 0
def make_subnetwork(queried_nodes, network_df, json_str_network, strain, network_type, low_confidence, extra_genes):
"""Returns a subnetwork using the PCSF algorithm, using the user-selected nodes as terminals."""
def make_prize_file(network_df, queried_nodes, network_type):
"""Generates .tsv file with node prizes for use with OmicsIntegrator."""
if network_type == 'gene_list':
# If there is no expression data, all prizes = 1
network_df['prize'] = 1
terminal_prizes = network_df.loc[network_df.index.isin(queried_nodes), 'prize']
elif network_type == 'rna_seq' or network_type == 'combined':
# Set prizes to expression values
terminal_prizes = network_df.loc[network_df.index.isin(queried_nodes), ['log2FoldChange']]
# The bigger the fold change, the bigger the prize
terminal_prizes.log2FoldChange = abs(terminal_prizes.log2FoldChange)
terminal_prizes = terminal_prizes.rename(columns={'log2FoldChange': 'prize'})
if network_type == 'combined':
# Set TnSeq prizes to the max prize
terminal_prizes.loc[network_df['significanceSource'] == 'TnSeq', :] = terminal_prizes['prize'].max()
terminal_prizes.to_csv('node_prizes.tsv', sep='\t')
network = nx.node_link_graph(json.loads(json_str_network))
# Make Graph object for prize-collecting Steiner forest (PCSF)
graph = Graph(os.path.join('data', '{}_interactome.tsv'.format(strain)), # Get interactome with costs
{'b': 10, # b > 1 results in more terminal nodes in sub_network
'g': 0} # g = 0 = disable degree cost correction
)
make_prize_file(network_df, queried_nodes, network_type)
graph.prepare_prizes('node_prizes.tsv')
os.remove('node_prizes.tsv') # Delete prize file (not needed anymore after running PCSF)
vertex_indices, edge_indices = graph.pcsf()
forest, augmented_forest = graph.output_forest_as_networkx(vertex_indices, edge_indices)
# Include low confidence edges if selected by the user
sub_network = augmented_forest if low_confidence else forest
# If sub-network is empty, warning is shown
if len(sub_network.nodes) == 0:
return None, None
# Sub-network includes extra genes (not in the input genes)
if extra_genes:
nodes = [node for node in sub_network.nodes]
# Get extra gene information from database
with sqlite3.connect('PaIntDB.db') as db_connection:
descriptions = pd.read_sql_query("""SELECT id, product_name
FROM protein
WHERE id IN (%s)""" % ', '.join('?' * len(nodes)),
con=db_connection, params=nodes)
short_names = pd.read_sql_query("""SELECT id, name
FROM interactor
WHERE id IN (%s)""" % ', '.join('?' * len(nodes)),
con=db_connection, params=nodes)
# Format results to use as node attributes
descriptions = descriptions.set_index('id').to_dict(orient='index')
short_names = short_names.set_index('id').to_dict(orient='index')
description_attr = dict()
short_name_attr = dict()
for key, value in descriptions.items():
description_attr[key] = dict(description=value['product_name'])
for key, value in short_names.items():
short_name_attr[key] = dict(shortName=value['name'])
nx.set_node_attributes(sub_network, description_attr)
nx.set_node_attributes(sub_network, short_name_attr)
# Set locus tags as short names for new genes
for node in sub_network.nodes:
if sub_network.nodes[node]['shortName'] is None:
sub_network.nodes[node]['shortName'] = node
sub_network.remove_edges_from(nx.selfloop_edges(sub_network))
# Sub-network only includes genes in input genes
else:
sub_network = network.edge_subgraph(sub_network.edges())
unfrozen_sub = nx.Graph(sub_network) # Copy needed to remove orphan nodes
unfrozen_sub.remove_nodes_from(list(nx.isolates(unfrozen_sub)))
cyto_sub_network = make_cyto_elements(unfrozen_sub)
json_sub_network = json.dumps(nx.node_link_data(unfrozen_sub)) # For downloading
return cyto_sub_network, json_sub_network
@app.callback(
[Output('node-details-table', 'children'),
Output('filtered-node-details', 'children'),
Output('download-table', 'style')],
[Input('main-view', 'selectedNodeData')],
[State('node-details-df', 'children'),
State('network-parameters', 'children')]
)
def show_node_details(node_data, node_details, network_params):
"""Filters the network DataFrame with the user-selected nodes and returns a DataTable."""
if node_data:
# Columns to display
cols = ['shortName', 'description']
network_params = json.loads(network_params)
# Get selected nodes
node_ids = [node['label'] for node in node_data]
network_df = pd.read_json(node_details)
if network_params['type'] == 'rna_seq' or network_params['type'] == 'combined':
cols.extend(['log2FoldChange', 'padj'])
network_df['log2FoldChange'] = network_df['log2FoldChange'].round(2)
# network_df['padj'] = [sigfig.round(n, sigfigs=3) for n in network_df['padj']]
filtered_df = (network_df.loc[network_df.shortName.isin(node_ids), cols]
.reset_index()
.rename(columns={'index': 'Locus Tag',
'shortName': 'Short Name',
'description': 'Description',
'log2FoldChange': 'Log2 Fold Change',
'padj': 'Adjusted p-value'
}
)
)
nodes_table = [
dash_table.DataTable(
data=filtered_df.to_dict('records'),
columns=[{'name': i, 'id': i} for i in filtered_df.columns],
fixed_rows={'headers': True},
css=[{'selector': '.row', 'rule': 'margin: 0'}], # Fixes left margin crop
page_action='none',
sort_action='native',
style_as_list_view=True,
style_table={
'maxHeight': '25vh',
'overflowY': 'auto'
},
style_cell={'textAlign': 'left',
'minWidth': '150px',
'width': '150px',
'maxWidth': '150px',
'font-family': 'sans-serif'
},
style_header={'backgroundColor': 'rgb(166, 237, 255)',
'fontWeight': 'bold'
},
style_data={'whiteSpace': 'normal',
'table-layout': 'fixed'
}
)
]
return nodes_table, filtered_df.to_json(), {'display': 'block'}
else:
return None, None, {'display': 'none'}
@app.callback(
Output('csv-download', 'data'),
[Input('download-table', 'n_clicks')],
[State('filtered-node-details', 'children')]
)
def download_nodes_csv(n_clicks, json_df):
if n_clicks:
downloads_dir = os.path.join(os.getcwd(), 'downloads')
if not os.path.exists(downloads_dir):
os.mkdir(downloads_dir)
nodes_df = pd.read_json(json_df)
abs_filename = os.path.join(downloads_dir, 'node_details.csv')
nodes_df.to_csv(abs_filename, index=False)
return send_file(abs_filename)
@app.callback(
Output('graphml-download', 'data'),
Input('download-network', 'n_clicks'),
State('hidden-subnetwork', 'children')
)
def download_graphml(n_clicks, json_str_sub_network):
if n_clicks:
downloads_dir = os.path.join(os.getcwd(), 'downloads')
if not os.path.exists(downloads_dir):
os.mkdir(downloads_dir)
rel_filename = os.path.join('downloads', 'network.graphml')
abs_filename = os.path.join(os.getcwd(), rel_filename)
sub_network = nx.node_link_graph(json.loads(json_str_sub_network))
nx.write_graphml(sub_network, path=abs_filename)
return send_file(abs_filename)
@app.callback(
Output('main-view', 'generateImage'),
[Input('download-network-img', 'n_clicks')]
)
def download_png(n_clicks):
file_type = 'png'
action = 'store'
if n_clicks:
file_type = 'png'
action = 'download'
return {'type': file_type, 'action': action, 'filename': 'subnetwork'} | dash_app/pages/vis.py | import json
import os
import sqlite3
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_cytoscape as cyto
import dash_html_components as html
import dash_table
import networkx as nx
import pandas as pd
from OmicsIntegrator import Graph
from dash.dash import no_update
from dash.dependencies import Output, Input, State, ALL
from dash_extensions import Download
from dash_extensions.snippets import send_file
import dash_app.vis_stylesheets as stylesheets
import go_enrichment.go_enrichment as goe
from dash_app.app import app # Loads app variable from app script
def make_cyto_elements(network):
"""Takes a networkx network and outputs Cytoscape elements that can be visualized with Dash. Also creates selector
classes according to the attributes and the layout coordinates."""
# Get node degrees
nx.set_node_attributes(network, dict(network.degree()), 'degree')
# Convert nx network to cytoscape JSON
json_elements = nx.readwrite.json_graph.cytoscape_data(network)['elements']
# Make layout (much faster than default Cytoscape layouts)
layout = nx.nx_agraph.graphviz_layout(network)
nodes = json_elements['nodes']
for node in nodes:
node['data']['label'] = node['data']['shortName'] # Use short name as node label
pos = layout[node['data']['id']] # Extract positions from layout dict
node['position'] = {'x': pos[0], 'y': pos[1]} # Set positions
elements = dict()
elements['nodes'] = nodes
elements['edges'] = json_elements['edges']
return elements
def make_vis_layout(network_df, enrichment_results, cyto_network, network_params):
"""Generates a custom layout depending on the network type."""
network_params = json.loads(network_params)
# This filter is only used with RNASeq/Combined networks
regulation_filter = html.Details(
id='diff-exp-details',
open=True,
children=[
html.Summary('By differential expression'),
dbc.Checklist(
# ID type for pattern matching callback
id={
'type': 'filter',
'index': 3
},
options=[
{'label': 'Up-Regulated', 'value': 'up'},
{'label': 'Down-Regulated', 'value': 'down'}
],
value=[],
)
],
)
# This filter is only used with Combined networks
source_filter = html.Details(
id='source-details',
open=True,
children=[
html.Summary('By experiment '),
dbc.Checklist(
# ID type for pattern matching callback
id={
'type': 'filter',
'index': 4
},
options=[
{'label': 'RNA-Seq', 'value': 'RNASeq'},
{'label': 'Tn-Seq', 'value': 'TnSeq'}
],
value=[],
)
],
)
# These filters are used for all networks
sidebar_filters = [
html.H5('Select nodes'),
html.Details(
open=True,
children=[
html.Summary('By name'),
dcc.Dropdown(
# ID type for pattern-matching callback
id={
'type': 'filter',
'index': 0
},
# Allow search by both short name and locus tag (index)
options=[{'label': term, 'value': term}
for term in network_df['shortName']] +
[{'label': term, 'value': term}
for term in network_df.index],
multi=True,
optionHeight=50
)
],
),
html.Br(),
html.Details(
open=True,
children=[
html.Summary('By localization '),
dcc.Dropdown(
# ID type for pattern-matching callback
id={
'type': 'filter',
'index': 1
},
options=[
dict(label=location, value=location)
for location in network_df['localization'].unique()
],
multi=True,
)
],
),
html.Br(),
html.Details(
open=True,
children=[
html.Summary('By enriched GO term'),
dcc.Dropdown(
# ID type for pattern-matching callback
id={
'type': 'filter',
'index': 2
},
# Use enriched GO terms as options.
options=[{'label': term, 'value': term}
for term in enrichment_results['name']],
multi=True,
optionHeight=50
)
],
),
html.Br()
]
# Add extra filters for DE/Combined networks
if network_params['type'] == 'rna_seq' or network_params['type'] == 'combined':
sidebar_filters.extend([regulation_filter, html.Br()])
if network_params['type'] == 'combined':
sidebar_filters.extend([source_filter])
color_mapping = [html.Div(id='color-map'), html.Div(id='legend')]
stylesheet = stylesheets.default
# Add color mapping functionality for DE/Combined networks
if network_params['type'] == 'gene_list':
stylesheet = stylesheets.default
elif network_params['type'] == 'rna_seq':
color_mapping = [
html.H5('Color Mapping'),
html.Div(id='color-map'),
html.Div(style={'padding': '10px'},
children=html.Img(
src=app.get_asset_url('de_legend.svg'),
id='legend',
width=100)
)
]
stylesheet = stylesheets.fold_change
elif network_params['type'] == 'combined':
color_mapping = [
html.H5('Color Mapping'),
dbc.RadioItems(
options=[
{'label': 'Experiment', 'value': 'experiment'},
{'label': 'Differential Expression', 'value': 'regulation'}
],
value='experiment',
id='color-map',
),
html.Div(style={'padding': '10px'},
children=html.Img(
id='legend',
width=100)
)
]
# Layout begins here
return html.Div(
[
html.Div(
style={
'width': '24vw',
'backgroundColor': '#a6edff',
'padding': '10px',
'display': 'inline-block',
'height': 'calc(100vh - 65px)',
'vertical-align': 'top',
'overflow': 'auto'
},
children=[
html.Div(color_mapping),
# legend,
dbc.Checklist(
id='show-labels',
options=[
{'label': 'Show node labels', 'value': 1}
],
switch=True,
value=[]
),
html.Hr(),
html.Div(
id='full-network-panel',
children=[
html.Div(
id='node-filters',
children=sidebar_filters
),
html.P(id='num-selected-nodes', style={'padding-top': '5px'}),
dbc.Button('Make Sub-Network', id='make-subnetwork', color='primary',
style={'display': 'none'})
]
),
html.Div(
id='subnetwork-btns',
style={'display': 'none'},
children=[
dbc.Checklist(id='include-extra-genes',
options=[
{'label': 'Include additional genes', 'value': 1}
],
switch=True,
value=[]
),
html.Abbr('Help',
title=(('Include additional genes, called Steiner nodes, that are are not '
'included in the original data, but help connect other genes that are. '
'Useful to connect subnetworks with '
'many smaller components.')),
),
html.Br(),
dbc.Checklist(id='include-low-confidence',
options=[
{'label': 'Include low-confidence interactions', 'value': 1}
],
switch=True,
value=[]
),
html.Abbr('Help',
title=(('Include all interactions in the subnetwork, instead of prioritizing '
'experimental interactions.')),
),
html.Br(),
dbc.Button(
'Return to selection',
id='reset-network',
color='primary',
)
]
),
html.Hr(),
dbc.DropdownMenu(
id='download-dropdown',
color='primary',
style={'padding-top': '5px'},
label='Download',
direction='right',
children=[
dbc.DropdownMenuItem('Network (.graphml)',
id='download-network'),
dbc.DropdownMenuItem('Network Image (.png)',
id='download-network-img'),
dbc.DropdownMenuItem('Table (.csv)',
id='download-table',
style={'display': 'none'})
]
),
Download(id='graphml-download'),
Download(id='csv-download'),
# Hidden Divs to store node details and subnetwork for download
html.Div(id='filtered-node-details', style={'display': 'none'}),
html.Div(id='hidden-subnetwork', style={'display': 'none'})
],
),
html.Div(
style={
'display': 'inline-block',
'width': '74vw'
},
children=dbc.Container(
fluid=True,
children=[
dbc.Row(
[
dbc.Col(
children=cyto.Cytoscape(
id='main-view',
style={
'width': '100%',
'height': 'calc(100vh - 80px)'
},
stylesheet=stylesheet,
maxZoom=5,
minZoom=0.3,
zoom=1,
layout={'name': 'preset'},
elements=cyto_network,
boxSelectionEnabled=True
)
),
]
),
dbc.Row(
dbc.Col(
html.Div(id='node-details-table',
style={'margin-top': '-25vh'}
)
)
)
]
)
)
]
)
@app.callback(
[Output('main-view', 'stylesheet'),
Output('legend', 'src')],
[Input('color-map', 'value'),
Input('show-labels', 'value')]
)
def change_stylesheet(color_map, show_labels):
if color_map == 'experiment':
legend = app.get_asset_url('sig_source_legend.svg')
stylesheet = stylesheets.combined
else:
legend = app.get_asset_url('de_legend.svg')
stylesheet = stylesheets.fold_change
if show_labels:
return stylesheets.add_labels(stylesheet), legend
else:
return stylesheet, legend
@app.callback(
[Output('full-network-panel', 'style'),
Output('subnetwork-btns', 'style'),
Output('main-view', 'elements'),
Output('hidden-subnetwork', 'children'),
Output('num-selected-nodes', 'children'),
Output('make-subnetwork', 'style')],
[Input({'type': 'filter', 'index': ALL}, 'value'), # Pattern-matching all callbacks with filter type
Input('make-subnetwork', 'n_clicks'),
Input('include-low-confidence', 'value'),
Input('include-extra-genes', 'value')],
[State('node-details-df', 'children'),
State('enrichment-results', 'children'),
State('network-parameters', 'children'),
State('cyto-network', 'children'),
State('hidden-bionetwork', 'children')]
)
def select_nodes(values, subnetwork_clicks, low_confidence, extra_genes, node_details, enrichment_results,
network_params, cyto_network, bio_network):
"""Select nodes according to user selected filters. Creates subnetwork with selected nodes."""
cyto_network = json.loads(cyto_network)
enrichment_results = pd.read_json(enrichment_results)
nodes = cyto_network['nodes']
# edges = cyto_network['edges']
network_df = pd.read_json(node_details)
network_params = json.loads(network_params)
strain = network_params['strain']
network_type = network_params['type']
query = [] # Query to filter nodes
# Get selected filter input values
short_name = values[0]
location = values[1]
enriched_terms = values[2]
if network_type == 'rna_seq' or network_params['type'] == 'combined':
regulation = values[3]
if network_params['type'] == 'combined':
significance_source = values[4]
else:
significance_source = []
else:
regulation, significance_source = [], []
# Add queries depending on GUI filter selections
if location:
query.append('localization in @location')
if enriched_terms:
# Get genes associated with selected GO term(s)
genes_in_term = enrichment_results.loc[enrichment_results['name'].isin(enriched_terms), 'study_items']
total_genes = [gene for term in genes_in_term for gene in term.split(', ')] # Unnest genes in term
if strain == 'PA14':
total_genes = goe.map_pao1_genes(total_genes)
query.append('index in @total_genes')
if significance_source:
significance_source.append('both')
query.append('significanceSource in @significance_source')
if regulation:
query.append('regulation in @regulation')
query_str = ' & '.join(query) # Join all queries
if short_name:
if not query:
query_str += 'shortName in @short_name | index in @short_name'
else:
query_str += '| shortName in @short_name | index in @short_name'
# Use query to select nodes
if query_str:
queried_nodes = network_df.query(query_str).index.tolist()
else:
queried_nodes = []
# Select nodes
for node in nodes:
node['selected'] = True if node['data']['id'] in queried_nodes else False
selected_msg = 'Selected {} out of {} nodes'.format(len(queried_nodes), len(nodes))
# Display make network button after selecting nodes
btn_display = {'display': 'block'} if len(queried_nodes) != 0 else {'display': 'none'}
# Generate subnetwork when button is clicked.
if subnetwork_clicks:
cyto_sub_network, json_sub_network = make_subnetwork(queried_nodes, network_df, bio_network, strain,
network_type, low_confidence, extra_genes)
# Throws warning if subnetwork is empty.
if json_sub_network is None:
selected_msg = dbc.Alert('Could not compute subnetwork using the selected nodes. Try selecting more nodes.',
color='warning')
cyto_sub_network = no_update
json_sub_network = no_update
# Return subnetwork
else:
selected_msg = ''
return {'display': 'none'}, {'display': 'block'}, cyto_sub_network, json_sub_network, selected_msg, \
btn_display
# Return full network
return {'display': 'block'}, {'display': 'none'}, cyto_network, bio_network, selected_msg, btn_display
@app.callback(
Output('make-subnetwork', 'n_clicks'),
[Input('reset-network', 'n_clicks')])
def reset_subnetwork_clicks(n_clicks):
"""Reset subnetwork clicks to cycle through full network/subnetwork view."""
return 0
def make_subnetwork(queried_nodes, network_df, json_str_network, strain, network_type, low_confidence, extra_genes):
"""Returns a subnetwork using the PCSF algorithm, using the user-selected nodes as terminals."""
def make_prize_file(network_df, queried_nodes, network_type):
"""Generates .tsv file with node prizes for use with OmicsIntegrator."""
if network_type == 'gene_list':
# If there is no expression data, all prizes = 1
network_df['prize'] = 1
terminal_prizes = network_df.loc[network_df.index.isin(queried_nodes), 'prize']
elif network_type == 'rna_seq' or network_type == 'combined':
# Set prizes to expression values
terminal_prizes = network_df.loc[network_df.index.isin(queried_nodes), ['log2FoldChange']]
# The bigger the fold change, the bigger the prize
terminal_prizes.log2FoldChange = abs(terminal_prizes.log2FoldChange)
terminal_prizes = terminal_prizes.rename(columns={'log2FoldChange': 'prize'})
if network_type == 'combined':
# Set TnSeq prizes to the max prize
terminal_prizes.loc[network_df['significanceSource'] == 'TnSeq', :] = terminal_prizes['prize'].max()
terminal_prizes.to_csv('node_prizes.tsv', sep='\t')
network = nx.node_link_graph(json.loads(json_str_network))
# Make Graph object for prize-collecting Steiner forest (PCSF)
graph = Graph(os.path.join('data', '{}_interactome.tsv'.format(strain)), # Get interactome with costs
{'b': 10, # b > 1 results in more terminal nodes in sub_network
'g': 0} # g = 0 = disable degree cost correction
)
make_prize_file(network_df, queried_nodes, network_type)
graph.prepare_prizes('node_prizes.tsv')
os.remove('node_prizes.tsv') # Delete prize file (not needed anymore after running PCSF)
vertex_indices, edge_indices = graph.pcsf()
forest, augmented_forest = graph.output_forest_as_networkx(vertex_indices, edge_indices)
# Include low confidence edges if selected by the user
sub_network = augmented_forest if low_confidence else forest
# If sub-network is empty, warning is shown
if len(sub_network.nodes) == 0:
return None, None
# Sub-network includes extra genes (not in the input genes)
if extra_genes:
nodes = [node for node in sub_network.nodes]
# Get extra gene information from database
with sqlite3.connect('PaIntDB.db') as db_connection:
descriptions = pd.read_sql_query("""SELECT id, product_name
FROM protein
WHERE id IN (%s)""" % ', '.join('?' * len(nodes)),
con=db_connection, params=nodes)
short_names = pd.read_sql_query("""SELECT id, name
FROM interactor
WHERE id IN (%s)""" % ', '.join('?' * len(nodes)),
con=db_connection, params=nodes)
# Format results to use as node attributes
descriptions = descriptions.set_index('id').to_dict(orient='index')
short_names = short_names.set_index('id').to_dict(orient='index')
description_attr = dict()
short_name_attr = dict()
for key, value in descriptions.items():
description_attr[key] = dict(description=value['product_name'])
for key, value in short_names.items():
short_name_attr[key] = dict(shortName=value['name'])
nx.set_node_attributes(sub_network, description_attr)
nx.set_node_attributes(sub_network, short_name_attr)
# Set locus tags as short names for new genes
for node in sub_network.nodes:
if sub_network.nodes[node]['shortName'] is None:
sub_network.nodes[node]['shortName'] = node
sub_network.remove_edges_from(nx.selfloop_edges(sub_network))
# Sub-network only includes genes in input genes
else:
sub_network = network.edge_subgraph(sub_network.edges())
unfrozen_sub = nx.Graph(sub_network) # Copy needed to remove orphan nodes
unfrozen_sub.remove_nodes_from(list(nx.isolates(unfrozen_sub)))
cyto_sub_network = make_cyto_elements(unfrozen_sub)
json_sub_network = json.dumps(nx.node_link_data(unfrozen_sub)) # For downloading
return cyto_sub_network, json_sub_network
@app.callback(
[Output('node-details-table', 'children'),
Output('filtered-node-details', 'children'),
Output('download-table', 'style')],
[Input('main-view', 'selectedNodeData')],
[State('node-details-df', 'children'),
State('network-parameters', 'children')]
)
def show_node_details(node_data, node_details, network_params):
"""Filters the network DataFrame with the user-selected nodes and returns a DataTable."""
if node_data:
# Columns to display
cols = ['shortName', 'description']
network_params = json.loads(network_params)
# Get selected nodes
node_ids = [node['label'] for node in node_data]
network_df = pd.read_json(node_details)
if network_params['type'] == 'rna_seq' or network_params['type'] == 'combined':
cols.extend(['log2FoldChange', 'padj'])
network_df['log2FoldChange'] = network_df['log2FoldChange'].round(2)
# network_df['padj'] = [sigfig.round(n, sigfigs=3) for n in network_df['padj']]
filtered_df = (network_df.loc[network_df.shortName.isin(node_ids), cols]
.reset_index()
.rename(columns={'index': 'Locus Tag',
'shortName': 'Short Name',
'description': 'Description',
'log2FoldChange': 'Log2 Fold Change',
'padj': 'Adjusted p-value'
}
)
)
nodes_table = [
dash_table.DataTable(
data=filtered_df.to_dict('records'),
columns=[{'name': i, 'id': i} for i in filtered_df.columns],
fixed_rows={'headers': True},
css=[{'selector': '.row', 'rule': 'margin: 0'}], # Fixes left margin crop
page_action='none',
sort_action='native',
style_as_list_view=True,
style_table={
'maxHeight': '25vh',
'overflowY': 'auto'
},
style_cell={'textAlign': 'left',
'minWidth': '150px',
'width': '150px',
'maxWidth': '150px',
'font-family': 'sans-serif'
},
style_header={'backgroundColor': 'rgb(166, 237, 255)',
'fontWeight': 'bold'
},
style_data={'whiteSpace': 'normal',
'table-layout': 'fixed'
}
)
]
return nodes_table, filtered_df.to_json(), {'display': 'block'}
else:
return None, None, {'display': 'none'}
@app.callback(
Output('csv-download', 'data'),
[Input('download-table', 'n_clicks')],
[State('filtered-node-details', 'children')]
)
def download_nodes_csv(n_clicks, json_df):
if n_clicks:
downloads_dir = os.path.join(os.getcwd(), 'downloads')
if not os.path.exists(downloads_dir):
os.mkdir(downloads_dir)
nodes_df = pd.read_json(json_df)
abs_filename = os.path.join(downloads_dir, 'node_details.csv')
nodes_df.to_csv(abs_filename, index=False)
return send_file(abs_filename)
@app.callback(
Output('graphml-download', 'data'),
Input('download-network', 'n_clicks'),
State('hidden-subnetwork', 'children')
)
def download_graphml(n_clicks, json_str_sub_network):
if n_clicks:
downloads_dir = os.path.join(os.getcwd(), 'downloads')
if not os.path.exists(downloads_dir):
os.mkdir(downloads_dir)
rel_filename = os.path.join('downloads', 'network.graphml')
abs_filename = os.path.join(os.getcwd(), rel_filename)
sub_network = nx.node_link_graph(json.loads(json_str_sub_network))
nx.write_graphml(sub_network, path=abs_filename)
return send_file(abs_filename)
@app.callback(
Output('main-view', 'generateImage'),
[Input('download-network-img', 'n_clicks')]
)
def download_png(n_clicks):
file_type = 'png'
action = 'store'
if n_clicks:
file_type = 'png'
action = 'download'
return {'type': file_type, 'action': action, 'filename': 'subnetwork'} | 0.586641 | 0.157979 |
import __builtin__
import sys
from __mimic.util import patch
import unittest
class BuiltinPatchTest(unittest.TestCase):
"""Unit tests for Patch."""
def setUp(self):
self._patch = None
def tearDown(self):
if self._patch:
self._patch.Remove()
def testPatch(self):
self._patch = patch.BuiltinPatch('abs', lambda x: x)
self.assertEquals(3, abs(-3))
self._patch.Install()
self.assertEquals(-3, abs(-3))
self.assertTrue(self._patch.installed)
self._patch.Remove()
self.assertEquals(3, abs(-3))
self.assertFalse(self._patch.installed)
def testNeedsOriginal(self):
@patch.NeedsOriginal
def DoubleAbs(original, x):
return original(x) * 2
self._patch = patch.BuiltinPatch('abs', DoubleAbs)
self.assertEquals(7, abs(-7))
self._patch.Install()
self.assertEquals(14, abs(-7))
self._patch.Remove()
self.assertEquals(7, abs(-7))
def testCustomBuiltins(self):
def CustomAbs(x):
return x * x
def PatchedAbs(x):
return x
self._patch = patch.BuiltinPatch('abs', PatchedAbs)
original_abs = __builtin__.abs
saved_builtins = patch.__builtins__
custom_builtins = patch._GetBuiltinsDict().copy()
custom_builtins['abs'] = CustomAbs
try:
patch.__builtins__ = custom_builtins
self._patch.Install()
self.assertEquals(PatchedAbs, __builtin__.abs)
self.assertEquals(PatchedAbs, patch.__builtins__['abs'])
self.assertEquals(CustomAbs, self._patch._original)
self._patch.Remove()
self.assertEquals(original_abs, __builtin__.abs)
self.assertEquals(CustomAbs, patch.__builtins__['abs'])
finally:
patch.__builtins__ = saved_builtins
def Square(x):
return x * x
class AttributePatchTest(unittest.TestCase):
def testPatch(self):
module = sys.modules[__name__]
a_patch = patch.AttributePatch(module, 'Square', lambda x: x)
self.assertEquals(25, Square(5))
a_patch.Install()
self.assertEquals(5, Square(5))
self.assertTrue(a_patch.installed)
a_patch.Remove()
self.assertEquals(25, Square(5))
self.assertFalse(a_patch.installed)
def testNeedsOriginal(self):
@patch.NeedsOriginal
def NegativeSquare(original, x):
return -original(x)
module = sys.modules[__name__]
a_patch = patch.AttributePatch(module, 'Square', NegativeSquare)
self.assertEquals(25, Square(5))
a_patch.Install()
self.assertEquals(-25, Square(5))
a_patch.Remove()
self.assertEquals(25, Square(5))
class Math(object):
@staticmethod
def Tripple(x):
return 3 * x
class AttributePatchStaticTest(unittest.TestCase):
def testPatch(self):
a_patch = patch.AttributePatch(Math, 'Tripple', lambda x: x)
self.assertEquals(15, Math.Tripple(5))
a_patch.Install()
self.assertEquals(5, Math.Tripple(5))
self.assertTrue(a_patch.installed)
a_patch.Remove()
self.assertEquals(15, Math.Tripple(5))
self.assertFalse(a_patch.installed)
def testNeedsOriginal(self):
@patch.NeedsOriginal
def NegativeTripple(original, x):
return -original(x)
a_patch = patch.AttributePatch(Math, 'Tripple', NegativeTripple)
self.assertEquals(15, Math.Tripple(5))
a_patch.Install()
self.assertEquals(-15, Math.Tripple(5))
a_patch.Remove()
self.assertEquals(15, Math.Tripple(5))
if __name__ == '__main__':
unittest.main() | __mimic/util/tests/patch_test.py | import __builtin__
import sys
from __mimic.util import patch
import unittest
class BuiltinPatchTest(unittest.TestCase):
"""Unit tests for Patch."""
def setUp(self):
self._patch = None
def tearDown(self):
if self._patch:
self._patch.Remove()
def testPatch(self):
self._patch = patch.BuiltinPatch('abs', lambda x: x)
self.assertEquals(3, abs(-3))
self._patch.Install()
self.assertEquals(-3, abs(-3))
self.assertTrue(self._patch.installed)
self._patch.Remove()
self.assertEquals(3, abs(-3))
self.assertFalse(self._patch.installed)
def testNeedsOriginal(self):
@patch.NeedsOriginal
def DoubleAbs(original, x):
return original(x) * 2
self._patch = patch.BuiltinPatch('abs', DoubleAbs)
self.assertEquals(7, abs(-7))
self._patch.Install()
self.assertEquals(14, abs(-7))
self._patch.Remove()
self.assertEquals(7, abs(-7))
def testCustomBuiltins(self):
def CustomAbs(x):
return x * x
def PatchedAbs(x):
return x
self._patch = patch.BuiltinPatch('abs', PatchedAbs)
original_abs = __builtin__.abs
saved_builtins = patch.__builtins__
custom_builtins = patch._GetBuiltinsDict().copy()
custom_builtins['abs'] = CustomAbs
try:
patch.__builtins__ = custom_builtins
self._patch.Install()
self.assertEquals(PatchedAbs, __builtin__.abs)
self.assertEquals(PatchedAbs, patch.__builtins__['abs'])
self.assertEquals(CustomAbs, self._patch._original)
self._patch.Remove()
self.assertEquals(original_abs, __builtin__.abs)
self.assertEquals(CustomAbs, patch.__builtins__['abs'])
finally:
patch.__builtins__ = saved_builtins
def Square(x):
return x * x
class AttributePatchTest(unittest.TestCase):
def testPatch(self):
module = sys.modules[__name__]
a_patch = patch.AttributePatch(module, 'Square', lambda x: x)
self.assertEquals(25, Square(5))
a_patch.Install()
self.assertEquals(5, Square(5))
self.assertTrue(a_patch.installed)
a_patch.Remove()
self.assertEquals(25, Square(5))
self.assertFalse(a_patch.installed)
def testNeedsOriginal(self):
@patch.NeedsOriginal
def NegativeSquare(original, x):
return -original(x)
module = sys.modules[__name__]
a_patch = patch.AttributePatch(module, 'Square', NegativeSquare)
self.assertEquals(25, Square(5))
a_patch.Install()
self.assertEquals(-25, Square(5))
a_patch.Remove()
self.assertEquals(25, Square(5))
class Math(object):
@staticmethod
def Tripple(x):
return 3 * x
class AttributePatchStaticTest(unittest.TestCase):
def testPatch(self):
a_patch = patch.AttributePatch(Math, 'Tripple', lambda x: x)
self.assertEquals(15, Math.Tripple(5))
a_patch.Install()
self.assertEquals(5, Math.Tripple(5))
self.assertTrue(a_patch.installed)
a_patch.Remove()
self.assertEquals(15, Math.Tripple(5))
self.assertFalse(a_patch.installed)
def testNeedsOriginal(self):
@patch.NeedsOriginal
def NegativeTripple(original, x):
return -original(x)
a_patch = patch.AttributePatch(Math, 'Tripple', NegativeTripple)
self.assertEquals(15, Math.Tripple(5))
a_patch.Install()
self.assertEquals(-15, Math.Tripple(5))
a_patch.Remove()
self.assertEquals(15, Math.Tripple(5))
if __name__ == '__main__':
unittest.main() | 0.527803 | 0.444444 |
# Copyright: (c) 2018, <NAME> <<EMAIL>>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: tetration_software_agent
short_description: Queries and deletes software agents by uuid
version_added: '2.9'
description:
- Enables query or removal of software agents by uuid
- Searching by C(uuid) returns all parameters from the API
- Marking as absent deletes the
options:
uuid:
description: UUID of target agent
type: string
required: true
state:
choices: [absent, query]
default: query
description: Remove or query for software agent
required: true
type: string
extends_documentation_fragment: tetration_doc_common
notes:
- Requires the `requests` Python module.
- 'Required API Permission(s): sensor_management'
requirements:
- requests
author:
- <NAME> (@techbeck03)
- <NAME>(@joej164)
'''
EXAMPLES = '''
# Remove agent by uuid
tetration_software_agent:
uuid: 4b35fa6001339e5313af5e34bd88012381a9aaaa
state: absent
provider:
host: "https://tetration-cluster.company.com"
api_key: 1234567890QWERTY
api_secret: 1234567890QWERTY
# Query agent by hostname
tetration_software_agent:
uuid: 4b35fa6001339e5313af5e34bd88012381a9aaaa
state: query
provider:
host: "https://tetration-cluster.company.com"
api_key: 1234567890QWERTY
api_secret: 1234567890QWERTY
'''
RETURN = '''
---
object:
contains:
agent_type:
description: Agent type
sample: ENFORCER
type: string
arch:
description: CPU architecture type
sample: x86_64
type: string
auto_upgrade_opt_out:
description: If True, agents are not auto-upgraded during upgrade of Tetration
cluster
sample: 'False'
type: bool
cpu_quota_mode:
description: The amount of CPU quota to give to agent on the end host (pct)
sample: 1
type: int
cpu_quota_us:
description: The amount of CPU quota to give to agent on the end host (us)
sample: 30000
type: int
created_at:
description: Date this inventory was created (Unix Epoch)
sample: 1553626033
type: string
current_sw_version:
description: Current version of software agent
sample: 3.1.1.65-enforcer
type: string
data_plane_disabled:
description: If true, agent stops reporting flows to Tetration
sample: 'False'
type: bool
desired_sw_version:
description: Desired version of software agent
sample: 3.1.1.65-enforcer
type: string
enable_cache_sidechannel:
description: Whether or not sidechannel detection is enabled
sample: 'True'
type: bool
enable_forensic:
description: Whether or not forensics is enabled
sample: 'True'
type: bool
enable_meltdown:
description: Whether or not meltdown detection is enabled
sample: 'True'
type: bool
enable_pid_lookup:
description: Whether or not pid lookup for flow search is enabled
sample: 'True'
type: bool
host_name:
description: Hostname as reported by software agent
returned: when C(state) is present or query
sample: acme-example-host
type: string
interfaces:
description: List of interfaces reported by software agent
sample: JSON Interfaces
type: list
last_config_fetch_at:
description: Date of last configuration fetch (Unix Epoch)
sample: 1563458124
type: string
last_software_update_at:
description: Date of last software update (Unix Epoch)
sample: 1553626033
type: string
platform:
description: OS platform type
sample: CentOS-7.6
type: string
uuid:
description: UUID of the registered software agent
returned: when C(state) is present or query
sample: d322189839fb70b2f4569f3657eea58f096c0686
type: int
description: the changed or modified object(s)
returned: always
type: complex
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.tetration_constants import TETRATION_PROVIDER_SPEC
from ansible.module_utils.tetration_constants import TETRATION_API_SENSORS
from ansible.module_utils.tetration import TetrationApiModule
def run_module():
# define available arguments/parameters a user can pass to the module
module_args = dict(
uuid=dict(type='str', required=True),
state=dict(type='str', required=True, choices=['absent', 'query']),
provider=dict(type='dict', options=TETRATION_PROVIDER_SPEC)
)
# Create the return object
result = {
'object': None,
'changed': False
}
module = AnsibleModule(
argument_spec=module_args
)
tet_module = TetrationApiModule(module)
response = None
route = f"{TETRATION_API_SENSORS}/{module.params['uuid']}"
if module.params['state'] == 'query':
response = tet_module.run_method('GET', route)
elif module.params['state'] == 'absent':
response = tet_module.run_method('DELETE', route)
result['changed'] is True
result['object'] = response
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main() | library/tetration_software_agent.py |
# Copyright: (c) 2018, <NAME> <<EMAIL>>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: tetration_software_agent
short_description: Queries and deletes software agents by uuid
version_added: '2.9'
description:
- Enables query or removal of software agents by uuid
- Searching by C(uuid) returns all parameters from the API
- Marking as absent deletes the
options:
uuid:
description: UUID of target agent
type: string
required: true
state:
choices: [absent, query]
default: query
description: Remove or query for software agent
required: true
type: string
extends_documentation_fragment: tetration_doc_common
notes:
- Requires the `requests` Python module.
- 'Required API Permission(s): sensor_management'
requirements:
- requests
author:
- <NAME> (@techbeck03)
- <NAME>(@joej164)
'''
EXAMPLES = '''
# Remove agent by uuid
tetration_software_agent:
uuid: 4b35fa6001339e5313af5e34bd88012381a9aaaa
state: absent
provider:
host: "https://tetration-cluster.company.com"
api_key: 1234567890QWERTY
api_secret: 1234567890QWERTY
# Query agent by hostname
tetration_software_agent:
uuid: 4b35fa6001339e5313af5e34bd88012381a9aaaa
state: query
provider:
host: "https://tetration-cluster.company.com"
api_key: 1234567890QWERTY
api_secret: 1234567890QWERTY
'''
RETURN = '''
---
object:
contains:
agent_type:
description: Agent type
sample: ENFORCER
type: string
arch:
description: CPU architecture type
sample: x86_64
type: string
auto_upgrade_opt_out:
description: If True, agents are not auto-upgraded during upgrade of Tetration
cluster
sample: 'False'
type: bool
cpu_quota_mode:
description: The amount of CPU quota to give to agent on the end host (pct)
sample: 1
type: int
cpu_quota_us:
description: The amount of CPU quota to give to agent on the end host (us)
sample: 30000
type: int
created_at:
description: Date this inventory was created (Unix Epoch)
sample: 1553626033
type: string
current_sw_version:
description: Current version of software agent
sample: 3.1.1.65-enforcer
type: string
data_plane_disabled:
description: If true, agent stops reporting flows to Tetration
sample: 'False'
type: bool
desired_sw_version:
description: Desired version of software agent
sample: 3.1.1.65-enforcer
type: string
enable_cache_sidechannel:
description: Whether or not sidechannel detection is enabled
sample: 'True'
type: bool
enable_forensic:
description: Whether or not forensics is enabled
sample: 'True'
type: bool
enable_meltdown:
description: Whether or not meltdown detection is enabled
sample: 'True'
type: bool
enable_pid_lookup:
description: Whether or not pid lookup for flow search is enabled
sample: 'True'
type: bool
host_name:
description: Hostname as reported by software agent
returned: when C(state) is present or query
sample: acme-example-host
type: string
interfaces:
description: List of interfaces reported by software agent
sample: JSON Interfaces
type: list
last_config_fetch_at:
description: Date of last configuration fetch (Unix Epoch)
sample: 1563458124
type: string
last_software_update_at:
description: Date of last software update (Unix Epoch)
sample: 1553626033
type: string
platform:
description: OS platform type
sample: CentOS-7.6
type: string
uuid:
description: UUID of the registered software agent
returned: when C(state) is present or query
sample: d322189839fb70b2f4569f3657eea58f096c0686
type: int
description: the changed or modified object(s)
returned: always
type: complex
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.tetration_constants import TETRATION_PROVIDER_SPEC
from ansible.module_utils.tetration_constants import TETRATION_API_SENSORS
from ansible.module_utils.tetration import TetrationApiModule
def run_module():
# define available arguments/parameters a user can pass to the module
module_args = dict(
uuid=dict(type='str', required=True),
state=dict(type='str', required=True, choices=['absent', 'query']),
provider=dict(type='dict', options=TETRATION_PROVIDER_SPEC)
)
# Create the return object
result = {
'object': None,
'changed': False
}
module = AnsibleModule(
argument_spec=module_args
)
tet_module = TetrationApiModule(module)
response = None
route = f"{TETRATION_API_SENSORS}/{module.params['uuid']}"
if module.params['state'] == 'query':
response = tet_module.run_method('GET', route)
elif module.params['state'] == 'absent':
response = tet_module.run_method('DELETE', route)
result['changed'] is True
result['object'] = response
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main() | 0.819244 | 0.23699 |
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class RepositoryApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def repository_controller_create(self, body, api, user_agent, instance, **kwargs): # noqa: E501
"""Begin cloning the repository if it doesn't exist. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.repository_controller_create(body, api, user_agent, instance, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Body52 body: The Tgstation.Server.Api.Models.Request.RepositoryCreateRequest. (required)
:param str api: The API version being used in the form \"Tgstation.Server.Api/[API version]\" (required)
:param str user_agent: The user agent of the calling client. (required)
:param int instance: The instance ID being accessed (required)
:return: RepositoryResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.repository_controller_create_with_http_info(body, api, user_agent, instance, **kwargs) # noqa: E501
else:
(data) = self.repository_controller_create_with_http_info(body, api, user_agent, instance, **kwargs) # noqa: E501
return data
def repository_controller_create_with_http_info(self, body, api, user_agent, instance, **kwargs): # noqa: E501
"""Begin cloning the repository if it doesn't exist. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.repository_controller_create_with_http_info(body, api, user_agent, instance, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Body52 body: The Tgstation.Server.Api.Models.Request.RepositoryCreateRequest. (required)
:param str api: The API version being used in the form \"Tgstation.Server.Api/[API version]\" (required)
:param str user_agent: The user agent of the calling client. (required)
:param int instance: The instance ID being accessed (required)
:return: RepositoryResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'api', 'user_agent', 'instance'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method repository_controller_create" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `repository_controller_create`") # noqa: E501
# verify the required parameter 'api' is set
if ('api' not in params or
params['api'] is None):
raise ValueError("Missing the required parameter `api` when calling `repository_controller_create`") # noqa: E501
# verify the required parameter 'user_agent' is set
if ('user_agent' not in params or
params['user_agent'] is None):
raise ValueError("Missing the required parameter `user_agent` when calling `repository_controller_create`") # noqa: E501
# verify the required parameter 'instance' is set
if ('instance' not in params or
params['instance'] is None):
raise ValueError("Missing the required parameter `instance` when calling `repository_controller_create`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'api' in params:
header_params['Api'] = params['api'] # noqa: E501
if 'user_agent' in params:
header_params['User-Agent'] = params['user_agent'] # noqa: E501
if 'instance' in params:
header_params['Instance'] = params['instance'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/json', 'text/json', 'application/*+json']) # noqa: E501
# Authentication setting
auth_settings = ['Token_Authorization_Scheme'] # noqa: E501
return self.api_client.call_api(
'/Repository', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RepositoryResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def repository_controller_delete(self, api, user_agent, instance, **kwargs): # noqa: E501
"""Delete the repository. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.repository_controller_delete(api, user_agent, instance, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api: The API version being used in the form \"Tgstation.Server.Api/[API version]\" (required)
:param str user_agent: The user agent of the calling client. (required)
:param int instance: The instance ID being accessed (required)
:return: RepositoryResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.repository_controller_delete_with_http_info(api, user_agent, instance, **kwargs) # noqa: E501
else:
(data) = self.repository_controller_delete_with_http_info(api, user_agent, instance, **kwargs) # noqa: E501
return data
def repository_controller_delete_with_http_info(self, api, user_agent, instance, **kwargs): # noqa: E501
"""Delete the repository. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.repository_controller_delete_with_http_info(api, user_agent, instance, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api: The API version being used in the form \"Tgstation.Server.Api/[API version]\" (required)
:param str user_agent: The user agent of the calling client. (required)
:param int instance: The instance ID being accessed (required)
:return: RepositoryResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api', 'user_agent', 'instance'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method repository_controller_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api' is set
if ('api' not in params or
params['api'] is None):
raise ValueError("Missing the required parameter `api` when calling `repository_controller_delete`") # noqa: E501
# verify the required parameter 'user_agent' is set
if ('user_agent' not in params or
params['user_agent'] is None):
raise ValueError("Missing the required parameter `user_agent` when calling `repository_controller_delete`") # noqa: E501
# verify the required parameter 'instance' is set
if ('instance' not in params or
params['instance'] is None):
raise ValueError("Missing the required parameter `instance` when calling `repository_controller_delete`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'api' in params:
header_params['Api'] = params['api'] # noqa: E501
if 'user_agent' in params:
header_params['User-Agent'] = params['user_agent'] # noqa: E501
if 'instance' in params:
header_params['Instance'] = params['instance'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Token_Authorization_Scheme'] # noqa: E501
return self.api_client.call_api(
'/Repository', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RepositoryResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def repository_controller_read(self, api, user_agent, instance, **kwargs): # noqa: E501
"""Get the repository's status. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.repository_controller_read(api, user_agent, instance, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api: The API version being used in the form \"Tgstation.Server.Api/[API version]\" (required)
:param str user_agent: The user agent of the calling client. (required)
:param int instance: The instance ID being accessed (required)
:return: RepositoryResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.repository_controller_read_with_http_info(api, user_agent, instance, **kwargs) # noqa: E501
else:
(data) = self.repository_controller_read_with_http_info(api, user_agent, instance, **kwargs) # noqa: E501
return data
def repository_controller_read_with_http_info(self, api, user_agent, instance, **kwargs): # noqa: E501
"""Get the repository's status. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.repository_controller_read_with_http_info(api, user_agent, instance, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api: The API version being used in the form \"Tgstation.Server.Api/[API version]\" (required)
:param str user_agent: The user agent of the calling client. (required)
:param int instance: The instance ID being accessed (required)
:return: RepositoryResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api', 'user_agent', 'instance'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method repository_controller_read" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api' is set
if ('api' not in params or
params['api'] is None):
raise ValueError("Missing the required parameter `api` when calling `repository_controller_read`") # noqa: E501
# verify the required parameter 'user_agent' is set
if ('user_agent' not in params or
params['user_agent'] is None):
raise ValueError("Missing the required parameter `user_agent` when calling `repository_controller_read`") # noqa: E501
# verify the required parameter 'instance' is set
if ('instance' not in params or
params['instance'] is None):
raise ValueError("Missing the required parameter `instance` when calling `repository_controller_read`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'api' in params:
header_params['Api'] = params['api'] # noqa: E501
if 'user_agent' in params:
header_params['User-Agent'] = params['user_agent'] # noqa: E501
if 'instance' in params:
header_params['Instance'] = params['instance'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Token_Authorization_Scheme'] # noqa: E501
return self.api_client.call_api(
'/Repository', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RepositoryResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def repository_controller_update(self, body, api, user_agent, instance, **kwargs): # noqa: E501
"""Perform updates to the repository. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.repository_controller_update(body, api, user_agent, instance, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Body56 body: The Tgstation.Server.Api.Models.Request.RepositoryUpdateRequest. (required)
:param str api: The API version being used in the form \"Tgstation.Server.Api/[API version]\" (required)
:param str user_agent: The user agent of the calling client. (required)
:param int instance: The instance ID being accessed (required)
:return: RepositoryResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.repository_controller_update_with_http_info(body, api, user_agent, instance, **kwargs) # noqa: E501
else:
(data) = self.repository_controller_update_with_http_info(body, api, user_agent, instance, **kwargs) # noqa: E501
return data
def repository_controller_update_with_http_info(self, body, api, user_agent, instance, **kwargs): # noqa: E501
"""Perform updates to the repository. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.repository_controller_update_with_http_info(body, api, user_agent, instance, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Body56 body: The Tgstation.Server.Api.Models.Request.RepositoryUpdateRequest. (required)
:param str api: The API version being used in the form \"Tgstation.Server.Api/[API version]\" (required)
:param str user_agent: The user agent of the calling client. (required)
:param int instance: The instance ID being accessed (required)
:return: RepositoryResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'api', 'user_agent', 'instance'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method repository_controller_update" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `repository_controller_update`") # noqa: E501
# verify the required parameter 'api' is set
if ('api' not in params or
params['api'] is None):
raise ValueError("Missing the required parameter `api` when calling `repository_controller_update`") # noqa: E501
# verify the required parameter 'user_agent' is set
if ('user_agent' not in params or
params['user_agent'] is None):
raise ValueError("Missing the required parameter `user_agent` when calling `repository_controller_update`") # noqa: E501
# verify the required parameter 'instance' is set
if ('instance' not in params or
params['instance'] is None):
raise ValueError("Missing the required parameter `instance` when calling `repository_controller_update`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'api' in params:
header_params['Api'] = params['api'] # noqa: E501
if 'user_agent' in params:
header_params['User-Agent'] = params['user_agent'] # noqa: E501
if 'instance' in params:
header_params['Instance'] = params['instance'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/json', 'text/json', 'application/*+json']) # noqa: E501
# Authentication setting
auth_settings = ['Token_Authorization_Scheme'] # noqa: E501
return self.api_client.call_api(
'/Repository', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RepositoryResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats) | swagger_client/api/repository_api.py | from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class RepositoryApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def repository_controller_create(self, body, api, user_agent, instance, **kwargs): # noqa: E501
"""Begin cloning the repository if it doesn't exist. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.repository_controller_create(body, api, user_agent, instance, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Body52 body: The Tgstation.Server.Api.Models.Request.RepositoryCreateRequest. (required)
:param str api: The API version being used in the form \"Tgstation.Server.Api/[API version]\" (required)
:param str user_agent: The user agent of the calling client. (required)
:param int instance: The instance ID being accessed (required)
:return: RepositoryResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.repository_controller_create_with_http_info(body, api, user_agent, instance, **kwargs) # noqa: E501
else:
(data) = self.repository_controller_create_with_http_info(body, api, user_agent, instance, **kwargs) # noqa: E501
return data
def repository_controller_create_with_http_info(self, body, api, user_agent, instance, **kwargs): # noqa: E501
"""Begin cloning the repository if it doesn't exist. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.repository_controller_create_with_http_info(body, api, user_agent, instance, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Body52 body: The Tgstation.Server.Api.Models.Request.RepositoryCreateRequest. (required)
:param str api: The API version being used in the form \"Tgstation.Server.Api/[API version]\" (required)
:param str user_agent: The user agent of the calling client. (required)
:param int instance: The instance ID being accessed (required)
:return: RepositoryResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'api', 'user_agent', 'instance'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method repository_controller_create" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `repository_controller_create`") # noqa: E501
# verify the required parameter 'api' is set
if ('api' not in params or
params['api'] is None):
raise ValueError("Missing the required parameter `api` when calling `repository_controller_create`") # noqa: E501
# verify the required parameter 'user_agent' is set
if ('user_agent' not in params or
params['user_agent'] is None):
raise ValueError("Missing the required parameter `user_agent` when calling `repository_controller_create`") # noqa: E501
# verify the required parameter 'instance' is set
if ('instance' not in params or
params['instance'] is None):
raise ValueError("Missing the required parameter `instance` when calling `repository_controller_create`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'api' in params:
header_params['Api'] = params['api'] # noqa: E501
if 'user_agent' in params:
header_params['User-Agent'] = params['user_agent'] # noqa: E501
if 'instance' in params:
header_params['Instance'] = params['instance'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/json', 'text/json', 'application/*+json']) # noqa: E501
# Authentication setting
auth_settings = ['Token_Authorization_Scheme'] # noqa: E501
return self.api_client.call_api(
'/Repository', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RepositoryResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def repository_controller_delete(self, api, user_agent, instance, **kwargs): # noqa: E501
"""Delete the repository. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.repository_controller_delete(api, user_agent, instance, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api: The API version being used in the form \"Tgstation.Server.Api/[API version]\" (required)
:param str user_agent: The user agent of the calling client. (required)
:param int instance: The instance ID being accessed (required)
:return: RepositoryResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.repository_controller_delete_with_http_info(api, user_agent, instance, **kwargs) # noqa: E501
else:
(data) = self.repository_controller_delete_with_http_info(api, user_agent, instance, **kwargs) # noqa: E501
return data
def repository_controller_delete_with_http_info(self, api, user_agent, instance, **kwargs): # noqa: E501
"""Delete the repository. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.repository_controller_delete_with_http_info(api, user_agent, instance, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api: The API version being used in the form \"Tgstation.Server.Api/[API version]\" (required)
:param str user_agent: The user agent of the calling client. (required)
:param int instance: The instance ID being accessed (required)
:return: RepositoryResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api', 'user_agent', 'instance'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method repository_controller_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api' is set
if ('api' not in params or
params['api'] is None):
raise ValueError("Missing the required parameter `api` when calling `repository_controller_delete`") # noqa: E501
# verify the required parameter 'user_agent' is set
if ('user_agent' not in params or
params['user_agent'] is None):
raise ValueError("Missing the required parameter `user_agent` when calling `repository_controller_delete`") # noqa: E501
# verify the required parameter 'instance' is set
if ('instance' not in params or
params['instance'] is None):
raise ValueError("Missing the required parameter `instance` when calling `repository_controller_delete`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'api' in params:
header_params['Api'] = params['api'] # noqa: E501
if 'user_agent' in params:
header_params['User-Agent'] = params['user_agent'] # noqa: E501
if 'instance' in params:
header_params['Instance'] = params['instance'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Token_Authorization_Scheme'] # noqa: E501
return self.api_client.call_api(
'/Repository', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RepositoryResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def repository_controller_read(self, api, user_agent, instance, **kwargs): # noqa: E501
"""Get the repository's status. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.repository_controller_read(api, user_agent, instance, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api: The API version being used in the form \"Tgstation.Server.Api/[API version]\" (required)
:param str user_agent: The user agent of the calling client. (required)
:param int instance: The instance ID being accessed (required)
:return: RepositoryResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.repository_controller_read_with_http_info(api, user_agent, instance, **kwargs) # noqa: E501
else:
(data) = self.repository_controller_read_with_http_info(api, user_agent, instance, **kwargs) # noqa: E501
return data
def repository_controller_read_with_http_info(self, api, user_agent, instance, **kwargs): # noqa: E501
"""Get the repository's status. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.repository_controller_read_with_http_info(api, user_agent, instance, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api: The API version being used in the form \"Tgstation.Server.Api/[API version]\" (required)
:param str user_agent: The user agent of the calling client. (required)
:param int instance: The instance ID being accessed (required)
:return: RepositoryResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api', 'user_agent', 'instance'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method repository_controller_read" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api' is set
if ('api' not in params or
params['api'] is None):
raise ValueError("Missing the required parameter `api` when calling `repository_controller_read`") # noqa: E501
# verify the required parameter 'user_agent' is set
if ('user_agent' not in params or
params['user_agent'] is None):
raise ValueError("Missing the required parameter `user_agent` when calling `repository_controller_read`") # noqa: E501
# verify the required parameter 'instance' is set
if ('instance' not in params or
params['instance'] is None):
raise ValueError("Missing the required parameter `instance` when calling `repository_controller_read`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'api' in params:
header_params['Api'] = params['api'] # noqa: E501
if 'user_agent' in params:
header_params['User-Agent'] = params['user_agent'] # noqa: E501
if 'instance' in params:
header_params['Instance'] = params['instance'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Token_Authorization_Scheme'] # noqa: E501
return self.api_client.call_api(
'/Repository', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RepositoryResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def repository_controller_update(self, body, api, user_agent, instance, **kwargs): # noqa: E501
"""Perform updates to the repository. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.repository_controller_update(body, api, user_agent, instance, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Body56 body: The Tgstation.Server.Api.Models.Request.RepositoryUpdateRequest. (required)
:param str api: The API version being used in the form \"Tgstation.Server.Api/[API version]\" (required)
:param str user_agent: The user agent of the calling client. (required)
:param int instance: The instance ID being accessed (required)
:return: RepositoryResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.repository_controller_update_with_http_info(body, api, user_agent, instance, **kwargs) # noqa: E501
else:
(data) = self.repository_controller_update_with_http_info(body, api, user_agent, instance, **kwargs) # noqa: E501
return data
def repository_controller_update_with_http_info(self, body, api, user_agent, instance, **kwargs): # noqa: E501
"""Perform updates to the repository. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.repository_controller_update_with_http_info(body, api, user_agent, instance, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Body56 body: The Tgstation.Server.Api.Models.Request.RepositoryUpdateRequest. (required)
:param str api: The API version being used in the form \"Tgstation.Server.Api/[API version]\" (required)
:param str user_agent: The user agent of the calling client. (required)
:param int instance: The instance ID being accessed (required)
:return: RepositoryResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'api', 'user_agent', 'instance'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method repository_controller_update" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `repository_controller_update`") # noqa: E501
# verify the required parameter 'api' is set
if ('api' not in params or
params['api'] is None):
raise ValueError("Missing the required parameter `api` when calling `repository_controller_update`") # noqa: E501
# verify the required parameter 'user_agent' is set
if ('user_agent' not in params or
params['user_agent'] is None):
raise ValueError("Missing the required parameter `user_agent` when calling `repository_controller_update`") # noqa: E501
# verify the required parameter 'instance' is set
if ('instance' not in params or
params['instance'] is None):
raise ValueError("Missing the required parameter `instance` when calling `repository_controller_update`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'api' in params:
header_params['Api'] = params['api'] # noqa: E501
if 'user_agent' in params:
header_params['User-Agent'] = params['user_agent'] # noqa: E501
if 'instance' in params:
header_params['Instance'] = params['instance'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/json', 'text/json', 'application/*+json']) # noqa: E501
# Authentication setting
auth_settings = ['Token_Authorization_Scheme'] # noqa: E501
return self.api_client.call_api(
'/Repository', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RepositoryResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats) | 0.704668 | 0.048047 |
import numpy as np
def binary_repr(x):
return list(reversed([int(e) for e in np.binary_repr(x)]))
class Reduction1:
def __init__(self, modulus):
self.p = modulus
# Next power of 2
self.k = p.bit_length()
self.table = {}
for l in range(self.k, self.k*2):
self.table[l] = (2**l % self.p)
""" TODO: Can further reduce table because can just shift by one
l: 2k-1 2k-2 ... k
r: (2^l mod p)
"""
def reduce(self, x):
if x < self.p:
return x
s = binary_repr(x)
r = 0
for i in reversed(range(self.k, x.bit_length())):
if s[i] == 1:
r += self.table[i]
r += sum([s[j]*2**j for j in range(0, self.k)])
while r >= self.p:
r = r - self.p
return r
class Reduction2:
def __init__(self, modulus):
self.p = modulus
# Next power of 2
self.k = self.p.bit_length()
self.shift = 2
self.table = {
0 : lambda x,y=0: (x*y + x) % 2,
1 : lambda x,y=0: (x*y + y) % 2,
2 : lambda x,y=0: (x*y) % 2,
3 : lambda x,y=0: (x*y + x) % 2,
4 : lambda x,y=0: (x*y + y) % 2,
}
#self.table = {
# 0 : lambda x,y=0: x,
# 1 : lambda x,y=0: x,
# 2 : lambda x,y=0: x,
# 3 : lambda x,y=0: x,
# 4 : lambda x,y=0: x,
# 5 : lambda x,y=0: x,
# 6 : lambda x,y=0: x,
# 7 : lambda x,y=0: x,
# 8 : lambda x,y=0: x,
# 9 : lambda x,y=0: x,
# 10 : lambda x,y=0: x,
# 11 : lambda x,y=0: x,
# 12 : lambda x,y=0: x,
# 13 : lambda x,y=0: x,
# 14 : lambda x,y=0: x,
# 15 : lambda x,y=0: x,
# 16 : lambda x,y=0: x,
# 17 : lambda x,y=0: x,
# 18 : lambda x,y=0: x,
# 19 : lambda x,y=0: x,
# 20 : lambda x,y=0: x,
# 21 : lambda x,y=0: x,
# 22 : lambda x,y=0: x,
# 23 : lambda x,y=0: x,
# 24 : lambda x,y=0: x,
# 25 : lambda x,y=0: x,
# 26 : lambda x,y=0: x,
# 27 : lambda x,y=0: x,
# 28 : lambda x,y=0: x,
# 29 : lambda x,y=0: x,
# 30 : lambda x,y=0: x,
# 31 : lambda x,y=0: x,
# 32 : lambda x,y=0: x,
# 33 : lambda x,y=0: x,
# 34 : lambda x,y=0: x,
# 35 : lambda x,y=0: x,
# 36 : lambda x,y=0: x,
# 37 : lambda x,y=0: 0,
# 38 : lambda x,y=0: 0,
# 39 : lambda x,y=0: 0,
# 40 : lambda x,y=0: 0,
#}
#self.table = {
# 0 : lambda x,y=0: x,
# 1 : lambda x,y=0: x + y,
# 2 : lambda x,y=0: x*y + x + y,
# 3 : lambda x,y=0: x*y + x + y,
# 4 : lambda x,y=0: x*y + x + y,
# 5 : lambda x,y=0: x*y + x + y,
# 6 : lambda x,y=0: x*y + x + y,
# 7 : lambda x,y=0: x*y + x + y,
# 8 : lambda x,y=0: x*y + x + y,
# 9 : lambda x,y=0: x*y + x + y,
# 10 : lambda x,y=0: x*y + x + y,
# 11 : lambda x,y=0: x*y + x + y,
# 12 : lambda x,y=0: x*y + x + y,
# 13 : lambda x,y=0: x*y + x + y,
# 14 : lambda x,y=0: x*y + x + y,
# 15 : lambda x,y=0: x*y + x + y,
# 16 : lambda x,y=0: x*y + x + y,
# 17 : lambda x,y=0: x*y + x + y,
# 18 : lambda x,y=0: x*y + x + y,
# 19 : lambda x,y=0: x*y + x + y,
# 20 : lambda x,y=0: x*y + x + y,
# 21 : lambda x,y=0: x*y + x + y,
# 22 : lambda x,y=0: x*y + x + y,
# 23 : lambda x,y=0: x*y + x + y,
# 24 : lambda x,y=0: x*y + x + y,
# 25 : lambda x,y=0: x*y + x + y,
# 26 : lambda x,y=0: x*y + x + y,
# 27 : lambda x,y=0: x*y + x + y,
# 28 : lambda x,y=0: x*y + x + y,
# 29 : lambda x,y=0: x*y + x + y,
# 30 : lambda x,y=0: x*y + x + y,
# 31 : lambda x,y=0: x*y + x + y,
# 32 : lambda x,y=0: x*y + x + y,
# 33 : lambda x,y=0: x*y + x + y,
# 34 : lambda x,y=0: x*y + x + y,
# 35 : lambda x,y=0: x*y + x + y,
# 36 : lambda x,y=0: x*y + x + y,
# 37 : lambda x,y=0: x*y + y,
# 38 : lambda x,y=0: x*y,
# 39 : lambda x,y=0: 0,
# 40 : lambda x,y=0: 0,
#}
def _split(self, x):
bits = [int(b) for b in np.binary_repr(x, width=2*self.k)]
split = [bits[i:(i+self.k)] for i in reversed(range(0, len(bits), self.k))]
return split
def _shift(self, x, shift):
return x[:shift], x[shift:] + [0]*shift
def _bits_to_num(self, x):
return sum(x[-(i+1)]*2**i for i in range(0, len(x)))
def _add_num(self, x, num):
x_num = self._bits_to_num(x)
bits = [int(b) for b in np.binary_repr(x_num+num, width=len(x)+1)]
return bits[0], bits[1:]
def _add_repr(self, x, y):
x_num = self._bits_to_num(x)
y_num = self._bits_to_num(y)
bits = [int(b) for b in np.binary_repr(x_num+y_num, width=len(x)+1)]
return bits[0], bits[1:]
def _mod(self, x):
neg_p = -self.p
c, res = self._add_num(x, neg_p)
if c == 1:
return x[-self.k:]
else:
return res[-self.k:]
def reduce(self, x):
split = self._split(x)
print("SPLIT: ", split, "\n")
for i in range(len(split)-1, 0, -1):
t = split[i]
shifts = self.k
while shifts > 0:
if shifts < self.shift:
to_shift = shifts
else:
to_shift = self.shift
c, t = self._shift(t, to_shift)
to_add = [self.table[i](*reversed(c)) for i in reversed(range(self.k))]
c, t = self._add_repr(t, to_add)
t = self._mod([c] + t)
shifts -= to_shift
print(f"Shift={shifts}, i={i}, carry={c}, added={to_add}: {t}")
c, split[i-1] = self._add_repr(t, split[i-1])
split[i-1] = self._mod([c] + split[i-1])
# TODO: while
out = self._mod(split[0])
return self._bits_to_num(out)
def test_reduce(reducer):
p = reducer.p
possible_vals = range(64, 65)
#possible_vals = range(0, (p-1)**2)
for elem in possible_vals:
print(f"For {elem} expected {elem%p} got {reducer.reduce(elem)}")
assert(reducer.reduce(elem) == (elem % p))
if __name__ == '__main__':
#t = Reduction2(2061584302081)
t = Reduction2(23)
test_reduce(t) | rust/crypto-primitives/mod_mul.py | import numpy as np
def binary_repr(x):
return list(reversed([int(e) for e in np.binary_repr(x)]))
class Reduction1:
def __init__(self, modulus):
self.p = modulus
# Next power of 2
self.k = p.bit_length()
self.table = {}
for l in range(self.k, self.k*2):
self.table[l] = (2**l % self.p)
""" TODO: Can further reduce table because can just shift by one
l: 2k-1 2k-2 ... k
r: (2^l mod p)
"""
def reduce(self, x):
if x < self.p:
return x
s = binary_repr(x)
r = 0
for i in reversed(range(self.k, x.bit_length())):
if s[i] == 1:
r += self.table[i]
r += sum([s[j]*2**j for j in range(0, self.k)])
while r >= self.p:
r = r - self.p
return r
class Reduction2:
def __init__(self, modulus):
self.p = modulus
# Next power of 2
self.k = self.p.bit_length()
self.shift = 2
self.table = {
0 : lambda x,y=0: (x*y + x) % 2,
1 : lambda x,y=0: (x*y + y) % 2,
2 : lambda x,y=0: (x*y) % 2,
3 : lambda x,y=0: (x*y + x) % 2,
4 : lambda x,y=0: (x*y + y) % 2,
}
#self.table = {
# 0 : lambda x,y=0: x,
# 1 : lambda x,y=0: x,
# 2 : lambda x,y=0: x,
# 3 : lambda x,y=0: x,
# 4 : lambda x,y=0: x,
# 5 : lambda x,y=0: x,
# 6 : lambda x,y=0: x,
# 7 : lambda x,y=0: x,
# 8 : lambda x,y=0: x,
# 9 : lambda x,y=0: x,
# 10 : lambda x,y=0: x,
# 11 : lambda x,y=0: x,
# 12 : lambda x,y=0: x,
# 13 : lambda x,y=0: x,
# 14 : lambda x,y=0: x,
# 15 : lambda x,y=0: x,
# 16 : lambda x,y=0: x,
# 17 : lambda x,y=0: x,
# 18 : lambda x,y=0: x,
# 19 : lambda x,y=0: x,
# 20 : lambda x,y=0: x,
# 21 : lambda x,y=0: x,
# 22 : lambda x,y=0: x,
# 23 : lambda x,y=0: x,
# 24 : lambda x,y=0: x,
# 25 : lambda x,y=0: x,
# 26 : lambda x,y=0: x,
# 27 : lambda x,y=0: x,
# 28 : lambda x,y=0: x,
# 29 : lambda x,y=0: x,
# 30 : lambda x,y=0: x,
# 31 : lambda x,y=0: x,
# 32 : lambda x,y=0: x,
# 33 : lambda x,y=0: x,
# 34 : lambda x,y=0: x,
# 35 : lambda x,y=0: x,
# 36 : lambda x,y=0: x,
# 37 : lambda x,y=0: 0,
# 38 : lambda x,y=0: 0,
# 39 : lambda x,y=0: 0,
# 40 : lambda x,y=0: 0,
#}
#self.table = {
# 0 : lambda x,y=0: x,
# 1 : lambda x,y=0: x + y,
# 2 : lambda x,y=0: x*y + x + y,
# 3 : lambda x,y=0: x*y + x + y,
# 4 : lambda x,y=0: x*y + x + y,
# 5 : lambda x,y=0: x*y + x + y,
# 6 : lambda x,y=0: x*y + x + y,
# 7 : lambda x,y=0: x*y + x + y,
# 8 : lambda x,y=0: x*y + x + y,
# 9 : lambda x,y=0: x*y + x + y,
# 10 : lambda x,y=0: x*y + x + y,
# 11 : lambda x,y=0: x*y + x + y,
# 12 : lambda x,y=0: x*y + x + y,
# 13 : lambda x,y=0: x*y + x + y,
# 14 : lambda x,y=0: x*y + x + y,
# 15 : lambda x,y=0: x*y + x + y,
# 16 : lambda x,y=0: x*y + x + y,
# 17 : lambda x,y=0: x*y + x + y,
# 18 : lambda x,y=0: x*y + x + y,
# 19 : lambda x,y=0: x*y + x + y,
# 20 : lambda x,y=0: x*y + x + y,
# 21 : lambda x,y=0: x*y + x + y,
# 22 : lambda x,y=0: x*y + x + y,
# 23 : lambda x,y=0: x*y + x + y,
# 24 : lambda x,y=0: x*y + x + y,
# 25 : lambda x,y=0: x*y + x + y,
# 26 : lambda x,y=0: x*y + x + y,
# 27 : lambda x,y=0: x*y + x + y,
# 28 : lambda x,y=0: x*y + x + y,
# 29 : lambda x,y=0: x*y + x + y,
# 30 : lambda x,y=0: x*y + x + y,
# 31 : lambda x,y=0: x*y + x + y,
# 32 : lambda x,y=0: x*y + x + y,
# 33 : lambda x,y=0: x*y + x + y,
# 34 : lambda x,y=0: x*y + x + y,
# 35 : lambda x,y=0: x*y + x + y,
# 36 : lambda x,y=0: x*y + x + y,
# 37 : lambda x,y=0: x*y + y,
# 38 : lambda x,y=0: x*y,
# 39 : lambda x,y=0: 0,
# 40 : lambda x,y=0: 0,
#}
def _split(self, x):
bits = [int(b) for b in np.binary_repr(x, width=2*self.k)]
split = [bits[i:(i+self.k)] for i in reversed(range(0, len(bits), self.k))]
return split
def _shift(self, x, shift):
return x[:shift], x[shift:] + [0]*shift
def _bits_to_num(self, x):
return sum(x[-(i+1)]*2**i for i in range(0, len(x)))
def _add_num(self, x, num):
x_num = self._bits_to_num(x)
bits = [int(b) for b in np.binary_repr(x_num+num, width=len(x)+1)]
return bits[0], bits[1:]
def _add_repr(self, x, y):
x_num = self._bits_to_num(x)
y_num = self._bits_to_num(y)
bits = [int(b) for b in np.binary_repr(x_num+y_num, width=len(x)+1)]
return bits[0], bits[1:]
def _mod(self, x):
neg_p = -self.p
c, res = self._add_num(x, neg_p)
if c == 1:
return x[-self.k:]
else:
return res[-self.k:]
def reduce(self, x):
split = self._split(x)
print("SPLIT: ", split, "\n")
for i in range(len(split)-1, 0, -1):
t = split[i]
shifts = self.k
while shifts > 0:
if shifts < self.shift:
to_shift = shifts
else:
to_shift = self.shift
c, t = self._shift(t, to_shift)
to_add = [self.table[i](*reversed(c)) for i in reversed(range(self.k))]
c, t = self._add_repr(t, to_add)
t = self._mod([c] + t)
shifts -= to_shift
print(f"Shift={shifts}, i={i}, carry={c}, added={to_add}: {t}")
c, split[i-1] = self._add_repr(t, split[i-1])
split[i-1] = self._mod([c] + split[i-1])
# TODO: while
out = self._mod(split[0])
return self._bits_to_num(out)
def test_reduce(reducer):
p = reducer.p
possible_vals = range(64, 65)
#possible_vals = range(0, (p-1)**2)
for elem in possible_vals:
print(f"For {elem} expected {elem%p} got {reducer.reduce(elem)}")
assert(reducer.reduce(elem) == (elem % p))
if __name__ == '__main__':
#t = Reduction2(2061584302081)
t = Reduction2(23)
test_reduce(t) | 0.23793 | 0.52409 |
from lib.canvas import Canvas
from lib.figure import Figure
import pickle
def _offset_rounding_error(figure):
""" """
fig = []
pre_part = []
for line in figure:
if not pre_part == []:
fig.append((pre_part[3], line[1], line[2], line[3]))
else:
fig.append(line)
pre_part = line
return fig
if __name__ == '__main__':
img = 'example.svg'
ske = 'hieroglyph_frog.png'
surface, ctx = Canvas(img, 0.75).surface512(ske, sketch='off', grid='off', circle='off')
body = Figure()
body.set_hexagon(150, (0, 0))
body.cut_figure(5)
body.shear(0.5, 0)
body.j_m(0, 1, m=(0, -10)).j_m(1, 2, m=(35, -60)).j_m(2, 3, m=(30, -10))
body.j_r(0, 1, r=20).j_r(1, 2, r=30).j_r(2, 3, r=-20)
body.j_r(3, 4, r=20)
body.j_m(4, 5, m=(-10, 10)).j_m(5, 6, m=(0, 35)).j_m(6, 0, m=(-30, 20))
body.j_r(4, 5, r=-3).j_r(5, 6, r=20).j_r(6, 0, r=-25)
body.draw(ctx, width=1.0, color='grass green', fill='on', control='off', number='off')
body_outline = body.deepcopy()
body_outline.draw(ctx, width=2.0, color='bottle green')
""" arm """
sucker = Figure()
sucker.set_ellipse(5, 4, (0, 0))
sucker1 = sucker.deepcopy()
sucker1.move(156, -137)
# sucker1.draw(ctx, width=2.0, color='bottle green')
sucker2 = sucker.deepcopy()
sucker2.move(136, -139)
# sucker2.draw(ctx, width=1.0, color='bottle green')
sucker3 = sucker.deepcopy()
sucker3.move(68, -140)
# sucker3.draw(ctx, width=2.0, color='bottle green')
arm = Figure()
arm.set_hexagon(75, (60, -95))
arm.cut_figure(1, 4, 5)
arm.cut_figure(8)
arm.scale(1, 0.2)
arm.rotate(-85)
arm.j_m(7, 8, m=(43, 23)).j_m(8, 9, m=(97, 29)).j_m(9, 0, m=(55, 40))
arm.j_r(7, 8, r=60).j_r(7, 8, r=40).j_r(9, 0, r=-10)
arm.j_m(0, 1, m=(33, 10)).j_m(1, 2, m=(2, 0)).j_m(2, 3, m=(0., 0.))
arm.j_r(0, 1, r=55).j_r(1, 2, r=20).j_r(2, 3, r=-5)
arm.j_m(3, 4, m=(-10, -35)).j_m(4, 5, m=(10, -40)).j_m(5, 6, m=(40, -35))
arm.j_r(3, 4, r=60).j_r(4, 5, r=25).j_r(5, 6, r=45)
arm.j_m(6, 7, m=(18, -8))
arm.j_r(6, 7, r=65)
is_pickle1 = False
if is_pickle1:
arm.combine(sucker1)
arm.combine(sucker3)
with open('example1.pickle', mode='wb') as f:
pickle.dump(arm, f)
else:
with open('example1.pickle', mode='rb') as f:
arm = pickle.load(f)
arm.draw(ctx, width=2.0, color='grass green', fill='on', control='off', number='off')
arm_outline = arm.deepcopy()
del arm_outline.figure[10]
arm_outline.draw(ctx, width=2.0, color='bottle green', number='off')
""" leg """
leg = Figure()
leg.set_hexagon(55, (-88, -90))
leg.shear(0.3, 0)
leg.rotate(-30)
leg.j_m(0, 1, m=(-3, -5)).j_m(1, 2, m=(0, -5))
# leg.draw(ctx)
foot = Figure()
foot.set_hexagon(28, (-33, -128))
foot.shear(0.8, 0.5)
foot.rotate(-37)
foot.j_m(0, 1, m=(20, 0)).j_m(2, 3, m=(0, -3)).j_m(5, 0, m=(0, 7))
# foot.draw(ctx)
finger = foot.deepcopy()
finger.scale(0.6, 0.6)
finger.move(13, 1)
finger.rotate(-10)
# finger.draw(ctx)
sucker4 = sucker.deepcopy()
sucker4.move(37, -125)
sucker5 = sucker.deepcopy()
sucker5.move(26, -131)
sucker6 = sucker.deepcopy()
sucker6.move(6, -133)
sucker6_outline = sucker6.deepcopy()
sucker6.draw(ctx, width=2.0, color='grass green', fill='on')
sucker6_outline.draw(ctx, width=2.0, color='bottle green')
is_pickle2 = False
if is_pickle2:
foot.combine(finger)
foot.combine(sucker4)
foot.combine(sucker5)
leg.combine(foot)
with open('example2.pickle', mode='wb') as f:
pickle.dump(leg, f)
else:
with open('example2.pickle', mode='rb') as f:
leg = pickle.load(f)
# leg.draw(ctx)
leg.j_m(6, 7, m=(0, -5))
leg_outline = leg.deepcopy()
del leg_outline.figure[4:6]
leg.draw(ctx, width=2.0, color='grass green', fill='on', control='off', number='off')
leg_outline.draw(ctx, width=2.0, color='bottle green', number='off')
eye = Figure()
eye.set_ellipse(10, 11, (77, 93))
eye.rotate(10)
eye.draw(ctx, color='bottle green', width=2.0, fill='on', control='off', number='off')
mouth = Figure()
mouth.set_line(65, (105, 77))
mouth.rotate(45)
mouth.p_r(0, 0, 1, r=-20).p_r(0, 3, 2, r=10)
mouth.draw(ctx, width=3.0, color='bottle green', fill='off', control='off', number='off')
surface.finish() | example.py | from lib.canvas import Canvas
from lib.figure import Figure
import pickle
def _offset_rounding_error(figure):
""" """
fig = []
pre_part = []
for line in figure:
if not pre_part == []:
fig.append((pre_part[3], line[1], line[2], line[3]))
else:
fig.append(line)
pre_part = line
return fig
if __name__ == '__main__':
img = 'example.svg'
ske = 'hieroglyph_frog.png'
surface, ctx = Canvas(img, 0.75).surface512(ske, sketch='off', grid='off', circle='off')
body = Figure()
body.set_hexagon(150, (0, 0))
body.cut_figure(5)
body.shear(0.5, 0)
body.j_m(0, 1, m=(0, -10)).j_m(1, 2, m=(35, -60)).j_m(2, 3, m=(30, -10))
body.j_r(0, 1, r=20).j_r(1, 2, r=30).j_r(2, 3, r=-20)
body.j_r(3, 4, r=20)
body.j_m(4, 5, m=(-10, 10)).j_m(5, 6, m=(0, 35)).j_m(6, 0, m=(-30, 20))
body.j_r(4, 5, r=-3).j_r(5, 6, r=20).j_r(6, 0, r=-25)
body.draw(ctx, width=1.0, color='grass green', fill='on', control='off', number='off')
body_outline = body.deepcopy()
body_outline.draw(ctx, width=2.0, color='bottle green')
""" arm """
sucker = Figure()
sucker.set_ellipse(5, 4, (0, 0))
sucker1 = sucker.deepcopy()
sucker1.move(156, -137)
# sucker1.draw(ctx, width=2.0, color='bottle green')
sucker2 = sucker.deepcopy()
sucker2.move(136, -139)
# sucker2.draw(ctx, width=1.0, color='bottle green')
sucker3 = sucker.deepcopy()
sucker3.move(68, -140)
# sucker3.draw(ctx, width=2.0, color='bottle green')
arm = Figure()
arm.set_hexagon(75, (60, -95))
arm.cut_figure(1, 4, 5)
arm.cut_figure(8)
arm.scale(1, 0.2)
arm.rotate(-85)
arm.j_m(7, 8, m=(43, 23)).j_m(8, 9, m=(97, 29)).j_m(9, 0, m=(55, 40))
arm.j_r(7, 8, r=60).j_r(7, 8, r=40).j_r(9, 0, r=-10)
arm.j_m(0, 1, m=(33, 10)).j_m(1, 2, m=(2, 0)).j_m(2, 3, m=(0., 0.))
arm.j_r(0, 1, r=55).j_r(1, 2, r=20).j_r(2, 3, r=-5)
arm.j_m(3, 4, m=(-10, -35)).j_m(4, 5, m=(10, -40)).j_m(5, 6, m=(40, -35))
arm.j_r(3, 4, r=60).j_r(4, 5, r=25).j_r(5, 6, r=45)
arm.j_m(6, 7, m=(18, -8))
arm.j_r(6, 7, r=65)
is_pickle1 = False
if is_pickle1:
arm.combine(sucker1)
arm.combine(sucker3)
with open('example1.pickle', mode='wb') as f:
pickle.dump(arm, f)
else:
with open('example1.pickle', mode='rb') as f:
arm = pickle.load(f)
arm.draw(ctx, width=2.0, color='grass green', fill='on', control='off', number='off')
arm_outline = arm.deepcopy()
del arm_outline.figure[10]
arm_outline.draw(ctx, width=2.0, color='bottle green', number='off')
""" leg """
leg = Figure()
leg.set_hexagon(55, (-88, -90))
leg.shear(0.3, 0)
leg.rotate(-30)
leg.j_m(0, 1, m=(-3, -5)).j_m(1, 2, m=(0, -5))
# leg.draw(ctx)
foot = Figure()
foot.set_hexagon(28, (-33, -128))
foot.shear(0.8, 0.5)
foot.rotate(-37)
foot.j_m(0, 1, m=(20, 0)).j_m(2, 3, m=(0, -3)).j_m(5, 0, m=(0, 7))
# foot.draw(ctx)
finger = foot.deepcopy()
finger.scale(0.6, 0.6)
finger.move(13, 1)
finger.rotate(-10)
# finger.draw(ctx)
sucker4 = sucker.deepcopy()
sucker4.move(37, -125)
sucker5 = sucker.deepcopy()
sucker5.move(26, -131)
sucker6 = sucker.deepcopy()
sucker6.move(6, -133)
sucker6_outline = sucker6.deepcopy()
sucker6.draw(ctx, width=2.0, color='grass green', fill='on')
sucker6_outline.draw(ctx, width=2.0, color='bottle green')
is_pickle2 = False
if is_pickle2:
foot.combine(finger)
foot.combine(sucker4)
foot.combine(sucker5)
leg.combine(foot)
with open('example2.pickle', mode='wb') as f:
pickle.dump(leg, f)
else:
with open('example2.pickle', mode='rb') as f:
leg = pickle.load(f)
# leg.draw(ctx)
leg.j_m(6, 7, m=(0, -5))
leg_outline = leg.deepcopy()
del leg_outline.figure[4:6]
leg.draw(ctx, width=2.0, color='grass green', fill='on', control='off', number='off')
leg_outline.draw(ctx, width=2.0, color='bottle green', number='off')
eye = Figure()
eye.set_ellipse(10, 11, (77, 93))
eye.rotate(10)
eye.draw(ctx, color='bottle green', width=2.0, fill='on', control='off', number='off')
mouth = Figure()
mouth.set_line(65, (105, 77))
mouth.rotate(45)
mouth.p_r(0, 0, 1, r=-20).p_r(0, 3, 2, r=10)
mouth.draw(ctx, width=3.0, color='bottle green', fill='off', control='off', number='off')
surface.finish() | 0.609989 | 0.455622 |
import os
import time as t
import numpy as np
if os.name == 'nt':
import msvcrt
def getch():
return msvcrt.getch().decode()
else:
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
def getch():
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
from dynamixel_sdk import * # Uses Dynamixel SDK library
# Control table address
ADDR_PRO_TORQUE_ENABLE = 64 # Control table address is different in Dynamixel model
ADDR_PRO_GOAL_POSITION = 116
ADDR_PRO_PRESENT_POSITION = 132
# Protocol version
PROTOCOL_VERSION = 2.0 # See which protocol version is used in the Dynamixel
# Default setting
DXL_ID = 1 # Dynamixel ID : 1
BAUDRATE = 57600 # Dynamixel default baudrate : 57600
DEVICENAME = '/dev/ttyUSB0' # Check which port is being used on your controller
# ex) Windows: "COM1" Linux: "/dev/ttyUSB0" Mac: "/dev/tty.usbserial-*"
TORQUE_ENABLE = 1 # Value for enabling the torque
TORQUE_DISABLE = 0 # Value for disabling the torque
DXL_MINIMUM_POSITION_VALUE = 1200 # Dynamixel will rotate between this value
DXL_MAXIMUM_POSITION_VALUE = 2200 # and this value (note that the Dynamixel would not move when the position value is out of movable range. Check e-manual about the range of the Dynamixel you use.)
DXL_MOVING_STATUS_THRESHOLD = 20 # Dynamixel moving status threshold
index = 0
dxl_goal_position = [DXL_MINIMUM_POSITION_VALUE, DXL_MAXIMUM_POSITION_VALUE] # Goal position
# Initialize PortHandler instance
# Set the port path
# Get methods and members of PortHandlerLinux or PortHandlerWindows
portHandler = PortHandler(DEVICENAME)
# Initialize PacketHandler instance
# Set the protocol version
# Get methods and members of Protocol1PacketHandler or Protocol2PacketHandler
packetHandler = PacketHandler(PROTOCOL_VERSION)
# Open port
if portHandler.openPort():
print("Succeeded to open the port")
else:
print("Failed to open the port")
print("Press any key to terminate...")
getch()
quit()
# Set port baudrate
if portHandler.setBaudRate(BAUDRATE):
print("Succeeded to change the baudrate")
else:
print("Failed to change the baudrate")
print("Press any key to terminate...")
getch()
quit()
# Enable Dynamixel Torque
dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, DXL_ID, ADDR_PRO_TORQUE_ENABLE, TORQUE_ENABLE)
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
elif dxl_error != 0:
print("%s" % packetHandler.getRxPacketError(dxl_error))
else:
print("Dynamixel has been successfully connected")
def setPos(val):
# Write goal position
dxl_comm_result, dxl_error = packetHandler.write4ByteTxRx(portHandler, DXL_ID, ADDR_PRO_GOAL_POSITION, val)
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
elif dxl_error != 0:
print("%s" % packetHandler.getRxPacketError(dxl_error))
dxl_present_position, dxl_comm_result, dxl_error = packetHandler.read4ByteTxRx(portHandler, DXL_ID, ADDR_PRO_PRESENT_POSITION)
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
elif dxl_error != 0:
print("%s" % packetHandler.getRxPacketError(dxl_error))
### print("[ID:%03d] GoalPos:%03d PresPos:%03d" % (DXL_ID, val, dxl_present_position))
# if not abs(dxl_goal_position[index] - dxl_present_position) > DXL_MOVING_STATUS_THRESHOLD:
def disable():
# Disable Dynamixel Torque
dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, DXL_ID, ADDR_PRO_TORQUE_ENABLE, TORQUE_DISABLE)
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
elif dxl_error != 0:
print("%s" % packetHandler.getRxPacketError(dxl_error))
# Close port
def loop():
while True:
# setPos(1200) 1200 - 2350
# t.sleep(1.0)
T = 0.005
f = 12.0/60.0
theMin = 1800
theMax = 2350
v = np.sin(2.0*np.pi*f*t.time()) * (theMax - theMin)/2.0 + theMin + (theMax - theMin)/2.0
print v
vv = int(np.floor(v))
if(vv < theMin):
vv = theMin
if(vv > theMax):
vv = theMax
setPos(vv)
t.sleep(T)
loop()
portHandler.closePort() | R1-M2/src/lofaro_ventilator.py |
import os
import time as t
import numpy as np
if os.name == 'nt':
import msvcrt
def getch():
return msvcrt.getch().decode()
else:
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
def getch():
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
from dynamixel_sdk import * # Uses Dynamixel SDK library
# Control table address
ADDR_PRO_TORQUE_ENABLE = 64 # Control table address is different in Dynamixel model
ADDR_PRO_GOAL_POSITION = 116
ADDR_PRO_PRESENT_POSITION = 132
# Protocol version
PROTOCOL_VERSION = 2.0 # See which protocol version is used in the Dynamixel
# Default setting
DXL_ID = 1 # Dynamixel ID : 1
BAUDRATE = 57600 # Dynamixel default baudrate : 57600
DEVICENAME = '/dev/ttyUSB0' # Check which port is being used on your controller
# ex) Windows: "COM1" Linux: "/dev/ttyUSB0" Mac: "/dev/tty.usbserial-*"
TORQUE_ENABLE = 1 # Value for enabling the torque
TORQUE_DISABLE = 0 # Value for disabling the torque
DXL_MINIMUM_POSITION_VALUE = 1200 # Dynamixel will rotate between this value
DXL_MAXIMUM_POSITION_VALUE = 2200 # and this value (note that the Dynamixel would not move when the position value is out of movable range. Check e-manual about the range of the Dynamixel you use.)
DXL_MOVING_STATUS_THRESHOLD = 20 # Dynamixel moving status threshold
index = 0
dxl_goal_position = [DXL_MINIMUM_POSITION_VALUE, DXL_MAXIMUM_POSITION_VALUE] # Goal position
# Initialize PortHandler instance
# Set the port path
# Get methods and members of PortHandlerLinux or PortHandlerWindows
portHandler = PortHandler(DEVICENAME)
# Initialize PacketHandler instance
# Set the protocol version
# Get methods and members of Protocol1PacketHandler or Protocol2PacketHandler
packetHandler = PacketHandler(PROTOCOL_VERSION)
# Open port
if portHandler.openPort():
print("Succeeded to open the port")
else:
print("Failed to open the port")
print("Press any key to terminate...")
getch()
quit()
# Set port baudrate
if portHandler.setBaudRate(BAUDRATE):
print("Succeeded to change the baudrate")
else:
print("Failed to change the baudrate")
print("Press any key to terminate...")
getch()
quit()
# Enable Dynamixel Torque
dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, DXL_ID, ADDR_PRO_TORQUE_ENABLE, TORQUE_ENABLE)
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
elif dxl_error != 0:
print("%s" % packetHandler.getRxPacketError(dxl_error))
else:
print("Dynamixel has been successfully connected")
def setPos(val):
# Write goal position
dxl_comm_result, dxl_error = packetHandler.write4ByteTxRx(portHandler, DXL_ID, ADDR_PRO_GOAL_POSITION, val)
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
elif dxl_error != 0:
print("%s" % packetHandler.getRxPacketError(dxl_error))
dxl_present_position, dxl_comm_result, dxl_error = packetHandler.read4ByteTxRx(portHandler, DXL_ID, ADDR_PRO_PRESENT_POSITION)
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
elif dxl_error != 0:
print("%s" % packetHandler.getRxPacketError(dxl_error))
### print("[ID:%03d] GoalPos:%03d PresPos:%03d" % (DXL_ID, val, dxl_present_position))
# if not abs(dxl_goal_position[index] - dxl_present_position) > DXL_MOVING_STATUS_THRESHOLD:
def disable():
# Disable Dynamixel Torque
dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, DXL_ID, ADDR_PRO_TORQUE_ENABLE, TORQUE_DISABLE)
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
elif dxl_error != 0:
print("%s" % packetHandler.getRxPacketError(dxl_error))
# Close port
def loop():
while True:
# setPos(1200) 1200 - 2350
# t.sleep(1.0)
T = 0.005
f = 12.0/60.0
theMin = 1800
theMax = 2350
v = np.sin(2.0*np.pi*f*t.time()) * (theMax - theMin)/2.0 + theMin + (theMax - theMin)/2.0
print v
vv = int(np.floor(v))
if(vv < theMin):
vv = theMin
if(vv > theMax):
vv = theMax
setPos(vv)
t.sleep(T)
loop()
portHandler.closePort() | 0.289874 | 0.108519 |
from keras.models import Sequential, Model
from keras.layers import Embedding, LSTM, Flatten, Dense, BatchNormalization, \
Activation, Dropout, concatenate, Lambda, Reshape, Conv2D, MaxPooling2D, TimeDistributed
from keras.constraints import maxnorm
from keras.optimizers import rmsprop, TFOptimizer, Adam, Adadelta
import tensorflow as tf
from keras.utils.generic_utils import get_custom_objects
import utils
import sys
import importlib
from keras.constraints import unitnorm
config_path = ".".join(["models", sys.argv[1]]) + "." if len(sys.argv) >= 2 else ""
config = importlib.import_module(config_path+"config")
def swish(x):
return (tf.sigmoid(x) * x)
class Swish(Activation):
def __init__(self, activation, **kwargs):
super(Swish, self).__init__(activation, **kwargs)
self.__name__ = 'swish'
get_custom_objects().update({'swish': Swish(swish)})
class CrisprCasModel():
def __init__(self, for_seq_input, rev_seq_input, bio_features, off_target_features, all_features, for_cnn_input = None, weight_matrix = None):
self.weight_matrix = weight_matrix
self.seq_input_len = int(for_seq_input.shape[1])
self.bio_features_len = int(bio_features.shape[1])
self.seq_input = for_seq_input
self.rev_seq_input = rev_seq_input
self.for_cnn_input = for_cnn_input
self.bio_features = bio_features
self.off_target_features = off_target_features
self.off_target_len = int(off_target_features.shape[1])
self.for_seq_input_index = range(self.seq_input_len)
self.rev_seq_input_index = range(len(self.for_seq_input_index), len(self.for_seq_input_index)+int(rev_seq_input.shape[1]))
self.bio_features_index = range(len(self.for_seq_input_index)+len(self.rev_seq_input_index),
len(self.for_seq_input_index)+len(self.rev_seq_input_index)+self.bio_features_len)
self.off_target_features_index = range(len(self.for_seq_input_index)+len(self.rev_seq_input_index)+len(self.bio_features_index),
len(self.for_seq_input_index) + len(self.rev_seq_input_index) + len(
self.bio_features_index)+self.off_target_len)
self.all_inputs = all_features
def __seq_embedding_cnn(self, input, name_suffix = '', nt = 3):
weights = self.weight_matrix
voca_size = config.embedding_voca_size
vec_dim = config.embedding_vec_dim
input_len = self.seq_input_len
if nt == 1:
weights = None
voca_size = 5
vec_dim = 8
input_len = config.seq_len
def cov_model(kernel_size = (3,1), pool_size = (21-3+1,1), levels = config.cnn_levels):
model = Sequential()
model.add(Conv2D(levels[0], input_shape=(input_len, vec_dim, 1), kernel_size=(kernel_size[0], 1),
padding='same'))
print(input_len)
model.add(BatchNormalization())
model.add(Activation('swish'))
# output shape is (None, len, dim, 32)
model.add(MaxPooling2D(pool_size=(2,1), padding='same'))
# output shape is (None, len+1/2, dim, 32)
for i in range(len(levels)-2):
model.add(Conv2D(levels[i+1], kernel_size=(kernel_size[0], 1), padding='same'))
# output shape is (None, len+1/2, dim, 64)
model.add(BatchNormalization())
model.add(Activation('swish'))
model.add(MaxPooling2D(pool_size=(2, 1), padding='same'))
# output shape is (None, (len+1/2+1)/2, dim, 64)
last_kernal_size = (3, kernel_size[1])
model.add(Conv2D(config.cnn_levels[-1], kernel_size=last_kernal_size, strides= (1, kernel_size[1]), padding='same'))
model.add(Activation('swish'))
# output shape is (None, (len+1/2+1)/2-ker_len+1, 1, 128)
last_pool_len = pool_size[0]
for _ in range(len(levels)-1):
last_pool_len =(last_pool_len + 1) // 2
last_pool_size = (last_pool_len, 1)
model.add(MaxPooling2D(pool_size=last_pool_size, padding='valid'))
model.add(Flatten())
utils.output_model_info(model)
return model
def embedding_model(input):
em = Embedding(voca_size, vec_dim, weights= weights,
input_length=input_len, trainable=True)
embedded_input = em(input)
reshaped_embedded_input = (Reshape((input_len, vec_dim, 1)))(embedded_input)
return reshaped_embedded_input
reshaped_embedded_input_1 = embedding_model(input = input)
cov_1_1 = cov_model(kernel_size=(3,vec_dim), pool_size=(input_len, 1))(reshaped_embedded_input_1)
#reshaped_embedded_input_2 = embedding_model()
#cov_1_2 = cov_model(kernel_size=(4,config.embedding_vec_dim), pool_size=(self.seq_input_len, 1))(reshaped_embedded_input_1)
#reshaped_embedded_input_3 = embedding_model()
cov_1_3 = cov_model(kernel_size=(5, vec_dim), pool_size=(input_len, 1))(reshaped_embedded_input_1)
cnn_total = concatenate([cov_1_1, cov_1_3])
return cnn_total
'''
def __embedding_cnn(self, name_suffix = '', nt = 3):
weights = self.weight_matrix
voca_size = config.embedding_voca_size
vec_dim = config.embedding_vec_dim
input_len = self.seq_input_len
if nt == 1:
weights = None
voca_size = 5
vec_dim = 8
input_len = 20
input_matrix_len = input_len
input_matrix_height = vec_dim
model = Sequential(name='embedding_and_cnn_' + name_suffix)
model.add(Embedding(voca_size, vec_dim, weights= weights, input_length=input_len, trainable=True))
model.add(Reshape((input_len, vec_dim, 1)))
# output shape is (self.seq_input_len, config.embedding_vec_dim, 1)
model.add(Conv2D(32, kernel_size=(3, 3), activation='swish', padding='same'))
model.add(Conv2D(32, kernel_size=(3, 3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('swish'))
model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
input_matrix_len = (input_matrix_len + 1) / 2
input_matrix_height = (input_matrix_height + 1) / 2
# output shape is (self.seq_input_len + 1)/2, (config.embedding_vec_dim+1)/2, 32)
model.add(Conv2D(64, (3, 3), activation='swish', padding='same'))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('swish'))
model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
input_matrix_len = (input_matrix_len + 1) / 2
input_matrix_height = (input_matrix_height + 1) / 2
# output shape is (input_matrix_len, input_matrix_height, 64)
model.add(Conv2D(128, (3, 3), activation='swish', padding='same'))
model.add(Conv2D(128, (3, 3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('swish'))
model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
input_matrix_len = (input_matrix_len + 1) / 2
input_matrix_height = (input_matrix_height + 1) / 2
# output shape is (input_matrix_len, input_matrix_height, 128)
#model.add(Conv2D(256, (3, 3), activation='swish', padding='same'))
#model.add(Conv2D(256, (3, 3), padding='same'))
#model.add(BatchNormalization())
#model.add(Activation('swish'))
#model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
#input_matrix_len = (input_matrix_len + 1) / 2
#input_matrix_height = (input_matrix_height + 1) / 2
# output shape is (input_matrix_len, input_matrix_height, 256)
# model.add(Conv2D(64, (5, config.embedding_vec_dim), strides=(1, config.embedding_vec_dim), activation='swish', padding='same'))
# output shape is (self.seq_input_len + 1)/2, 1, 128)
#model.add(MaxPooling2D(pool_size=(2, vec_dim), strides=(2, vec_dim), padding='same'))
# model.add(MaxPooling2D(pool_size=((self.seq_input_len + 1)/2, 1), padding='valid'))
# output shape is (1,1,128)
model.add(Flatten())
model.add(Dense(units=config.cnn_levels[-1]))
utils.output_model_info(model)
return model
'''
def __embedding_cnn(self, name_suffix = '', nt = 3):
weights = self.weight_matrix
voca_size = config.embedding_voca_size
vec_dim = config.embedding_vec_dim
input_len = self.seq_input_len
if nt == 1:
weights = None
voca_size = 5
vec_dim = 8
input_len = config.seq_len
model = Sequential(name='embedding_and_cnn_' + name_suffix)
model.add(Embedding(voca_size, vec_dim, weights= weights, input_length=input_len, trainable=True))
model.add(Reshape((1, input_len, vec_dim)))
model.add(Conv2D(32, kernel_size=(1, 4), strides=2, padding='same'))
if config.add_norm:
model.add(BatchNormalization(momentum=0))
model.add(Activation('swish'))
# (1, 10, 32)
model.add(Conv2D(64, kernel_size=(1, 4), strides=2, padding='same'))
if config.add_norm:
model.add(BatchNormalization(momentum=0))
model.add(Activation('swish'))
# (1, 5, 64)
model.add(Conv2D(128, kernel_size=(1, 4), strides=2, padding='same'))
if config.add_norm:
model.add(BatchNormalization(momentum=0))
model.add(Activation('swish'))
# (1,3,128)
model.add(Conv2D(256, kernel_size=(1, 3), strides=2, padding='valid'))
if config.add_norm:
model.add(BatchNormalization(momentum=0))
model.add(Activation('swish'))
model.add(Flatten())
model.add(Dense(units=config.cnn_levels[-1]))
utils.output_model_info(model)
return model
def __embedding_rnn(self, name_suffix = ''):
model = Sequential(name='embedding_and_rnn_' + name_suffix)
# activation function is tanh, gates using sigmoid function
model.add(Embedding(config.embedding_voca_size, config.embedding_vec_dim, weights= self.weight_matrix, input_length=self.seq_input_len, trainable=True))
model.add(Dropout(rate=config.dropout))
# embedding layer output shape is (batch_size, self.seq_input_len=21, config.embedding_vec_dim=32)
for _ in range(config.LSTM_stacks_num):
model.add(LSTM(config.LSTM_hidden_unit, return_sequences=True, dropout=config.dropout, kernel_constraint=maxnorm(config.maxnorm)))
# output shape is (batch_size, self.seq_input_len=21, config.LSTM_hidden_unit=8)
model.add(TimeDistributed(Dense(config.rnn_time_distributed_dim)))
model.add(Flatten())
return model
def __fully_connected(self, nodes_unit_nums, input_len, name_suffix= ''):
model = Sequential(name = 'FC_' + name_suffix)
for i in range(len(nodes_unit_nums)):
if i == 0:
model.add(Dense(nodes_unit_nums[i], input_shape=(input_len,), kernel_constraint=maxnorm(config.maxnorm)))
else:
model.add(Dense(nodes_unit_nums[i], kernel_constraint=maxnorm(config.maxnorm)))
if config.add_norm:
model.add(BatchNormalization(momentum=0))
model.add(Activation(config.activation_method[i%len(config.activation_method)]))
model.add(Dropout(rate=config.dropout))
utils.output_model_info(model)
return model
def __cas9_concat_model(self):
# Embedding and LSTM model is in the front
seq2vec_input = self.seq_input
rnn_output = self.__embedding_rnn(name_suffix='for')(seq2vec_input)
# Embedding and LSTM model for reverse seq
rev_seq2vec_input = self.rev_seq_input
rev_rnn_output = self.__embedding_rnn(name_suffix='rev')(rev_seq2vec_input) if config.rev_seq else rev_seq2vec_input
# concatenate rnn trained features and extra features
extra_raw_input = self.bio_features
if self.bio_features_len:
fully_connected_bio = self.__fully_connected(config.bio_fully_connected_layer_layout, self.bio_features_len, "bio")
processed_bio_features = fully_connected_bio(extra_raw_input)
else:
processed_bio_features = extra_raw_input
off_target_input = self.off_target_features
merged_features = concatenate([processed_bio_features, rnn_output, rev_rnn_output, off_target_input])
dropouted_merged_features = Dropout(rate=0.2)(merged_features)
# fully connected layer
used_seq_input_len = 2 * self.seq_input_len if config.rev_seq else self.seq_input_len
fully_connected_output = self.__fully_connected(config.fully_connected_layer_layout,
self.off_target_len + config.bio_fully_connected_layer_layout[-1] + used_seq_input_len * config.rnn_time_distributed_dim)(dropouted_merged_features)
dropouted_fully_connected_output = Dropout(rate=0.2)(fully_connected_output)
output = Dense(1, kernel_constraint=maxnorm(config.maxnorm))(dropouted_fully_connected_output)
# Build the model
crispr_model = Model(inputs=[seq2vec_input, rev_seq2vec_input, off_target_input, extra_raw_input], outputs=[output])
return crispr_model
def __cas9_mixed_model(self):
#Embedding and LSTM model is in the front
seq2vec_input = self.seq_input
rnn_output = self.__embedding_rnn(name_suffix='for')(seq2vec_input)
#Embedding and CNN model is in the front
seq2vec_input = self.seq_input
if config.seq_cnn:
cnn_output = self.__seq_embedding_cnn(input = seq2vec_input, name_suffix='for')
else:
cnn_output = self.__embedding_cnn(name_suffix='for')(seq2vec_input)
rev_seq2vec_input = self.rev_seq_input
rev_rnn_output = rev_seq2vec_input
# concatenate rnn trained features and extra features
extra_raw_input = self.bio_features
if self.bio_features_len:
fully_connected_bio = self.__fully_connected(config.bio_fully_connected_layer_layout, self.bio_features_len, "bio")
processed_bio_features = fully_connected_bio(extra_raw_input)
else:
processed_bio_features = extra_raw_input
config.bio_fully_connected_layer_layout[-1] = 0
off_target_input = self.off_target_features
merged_features = concatenate([processed_bio_features, cnn_output, rnn_output, off_target_input, rev_rnn_output])
dropouted_merged_features = Dropout(rate=0.2)(merged_features)
# fully connected layer
if config.seq_cnn:
cnn_len = config.cnn_levels[-1] * 2
else:
cnn_len = config.cnn_levels[-1]
used_seq_input_len = self.seq_input_len # self.seq_input_len
input_len = self.off_target_len + config.bio_fully_connected_layer_layout[-1] + used_seq_input_len * config.rnn_time_distributed_dim + cnn_len
fully_connected_output = self.__fully_connected(config.fully_connected_layer_layout, input_len)(dropouted_merged_features)
dropouted_fully_connected_output = Dropout(rate=0.2)(fully_connected_output)
output = Dense(1, kernel_constraint=maxnorm(config.maxnorm))(dropouted_fully_connected_output)
# Build the model
crispr_model = Model(inputs=[seq2vec_input, rev_seq2vec_input, off_target_input, extra_raw_input], outputs=[output])
return crispr_model
def __cas9_ensemble_model(self):
#Embedding and LSTM model is in the front
seq2vec_input = self.seq_input
rnn_output = self.__embedding_rnn(name_suffix='for')(seq2vec_input)
#Embedding and CNN model is in the front
cnn_input = self.for_cnn_input
if config.seq_cnn:
cnn_output = self.__seq_embedding_cnn(input=cnn_input, name_suffix='for', nt = 1)
else:
cnn_output = self.__embedding_cnn(name_suffix='for', nt = 1)(cnn_input)
rev_seq2vec_input = self.rev_seq_input
rev_rnn_output = rev_seq2vec_input
# concatenate rnn trained features and extra features
extra_raw_input = self.bio_features
if self.bio_features_len:
fully_connected_bio = self.__fully_connected(config.bio_fully_connected_layer_layout, self.bio_features_len, "bio")
processed_bio_features = fully_connected_bio(extra_raw_input)
else:
processed_bio_features = extra_raw_input
off_target_input = self.off_target_features
merged_features = concatenate([processed_bio_features, cnn_output, rnn_output, off_target_input, rev_rnn_output])
dropouted_merged_features = Dropout(rate=0.2)(merged_features)
# fully connected layer
if config.seq_cnn:
cnn_len = config.cnn_levels[-1] * 2
else:
cnn_len = config.cnn_levels[-1]
used_seq_input_len = self.seq_input_len # self.seq_input_len
input_len = self.off_target_len + config.bio_fully_connected_layer_layout[-1] + used_seq_input_len * config.rnn_time_distributed_dim + cnn_len
fully_connected_output = self.__fully_connected(config.fully_connected_layer_layout, input_len)(dropouted_merged_features)
dropouted_fully_connected_output = Dropout(rate=0.2)(fully_connected_output)
output = Dense(1, kernel_constraint=maxnorm(config.maxnorm))(dropouted_fully_connected_output)
# Build the model
crispr_model = Model(inputs=[seq2vec_input, rev_seq2vec_input, off_target_input, extra_raw_input, cnn_input], outputs=[output])
return crispr_model
def __cas9_cnn_model(self):
rev_seq2vec_input = self.rev_seq_input
rev_rnn_output = rev_seq2vec_input
seq2vec_input = self.seq_input
if config.seq_cnn:
cnn_output = self.__seq_embedding_cnn(input = seq2vec_input, name_suffix='for')
else:
cnn_output = self.__embedding_cnn(name_suffix='for')(seq2vec_input)
# concatenate rnn trained features and extra features
extra_raw_input = self.bio_features
if self.bio_features_len:
fully_connected_bio = self.__fully_connected(config.bio_fully_connected_layer_layout, self.bio_features_len, "bio")
processed_bio_features = fully_connected_bio(extra_raw_input)
else:
processed_bio_features = extra_raw_input
off_target_input = self.off_target_features
merged_features = concatenate([processed_bio_features, cnn_output, off_target_input, rev_rnn_output])
dropouted_merged_features = Dropout(rate=0.2)(merged_features)
# fully connected layer
# cnn_final_dim = ((config.embedding_vec_dim)/2)/2
# cnn_final_len = ((self.seq_input_len)/2)/2
# cnn_len = 64 * cnn_final_dim * cnn_final_len
if config.seq_cnn:
cnn_len = config.cnn_levels[-1] * 2
else:
cnn_len = config.cnn_levels[-1]
input_len = self.off_target_len + config.bio_fully_connected_layer_layout[-1] + cnn_len
fully_connected_output = self.__fully_connected(config.fully_connected_layer_layout, input_len)(dropouted_merged_features)
dropouted_fully_connected_output = Dropout(rate=0.2)(fully_connected_output)
output = Dense(1, kernel_constraint=maxnorm(config.maxnorm))(dropouted_fully_connected_output)
# Build the model
crispr_model = Model(inputs=[seq2vec_input, rev_seq2vec_input, off_target_input, extra_raw_input], outputs=[output])
return crispr_model
def __cas9_mul_model(self):
# Embedding and LSTM model is in the front
seq2vec_input = self.seq_input
rnn_output = self.__embedding_rnn(name_suffix='for')(seq2vec_input)
# Embedding and LSTM model for reverse seq
rev_seq2vec_input = self.rev_seq_input
rev_rnn_output = self.__embedding_rnn(name_suffix='rev')(rev_seq2vec_input) if config.rev_seq else rev_seq2vec_input
# extra_biological features fully connected nn
extra_raw_input = self.bio_features
bio = Dropout(rate=0.2)(extra_raw_input)
dropouted_extra_raw_input = Reshape([1, -1])(bio)
# off target matrix
off_target_input = self.off_target_features
dropout_off_target_input = Dropout(rate=0.2)(off_target_input)
rnn_output_total = Reshape([-1, 1])(concatenate([rnn_output, rev_rnn_output, dropout_off_target_input]))
merged_feature = Lambda(lambda x: tf.einsum('aij,ajk->aik', x[0], x[1]))([rnn_output_total, dropouted_extra_raw_input])
merged_features_input = Flatten()(merged_feature)
dropouted_merged_features_input = Dropout(rate=0.2)(merged_features_input)
used_seq_input_len = 2 * self.seq_input_len if config.rev_seq else self.seq_input_len
full_layer = self.__fully_connected(config.fully_connected_layer_layout,
(config.rnn_time_distributed_dim * used_seq_input_len + self.off_target_len) * self.bio_features_len)
full_layer_output = full_layer(dropouted_merged_features_input)
output = Dense(1, kernel_constraint=maxnorm(config.maxnorm))(full_layer_output)
crispr_model = Model(inputs=[seq2vec_input, rev_seq2vec_input, off_target_input, extra_raw_input], outputs=[output])
return crispr_model
def get_raw_model(self, method = config.model_type):
crispr_model = getattr(self, "_{!s}__cas9_{!s}_model".format(self.__class__.__name__, method),
self.__cas9_concat_model)()
return crispr_model
def get_model(self, method = config.model_type):
crispr_model = getattr(self, "_{!s}__cas9_{!s}_model".format(self.__class__.__name__, method), self.__cas9_concat_model)()
return self.compile_transfer_learning_model(crispr_model)
@classmethod
def compile_transfer_learning_model(cls, model):
custimized_rmsprop = rmsprop(lr=config.start_lr, decay=config.lr_decay)
model.compile(optimizer=custimized_rmsprop, loss='mse', metrics=[utils.revised_mse_loss, 'mse'])
return model
def get_tf_model(self, ground_truth, method = config.model_type):
global_step = tf.Variable(0, trainable=False)
learn_rate = tf.train.cosine_decay_restarts(learning_rate=0.001, global_step=global_step, first_decay_steps=100)
crispr_model = getattr(self, "_{!s}__cas9_{!s}_model".format(self.__class__.__name__, method),
self.__cas9_concat_model)()
loss = tf.losses.mean_squared_error(ground_truth, crispr_model.output)
rmsprop_optimizer = tf.train.RMSPropOptimizer(learning_rate=learn_rate).minimize(loss, global_step=global_step)
custimized_rmsprop = TFOptimizer(rmsprop_optimizer)
crispr_model.compile(optimizer=custimized_rmsprop, loss='mse', metrics=[utils.revised_mse_loss, 'mse'])
return crispr_model | models.py | from keras.models import Sequential, Model
from keras.layers import Embedding, LSTM, Flatten, Dense, BatchNormalization, \
Activation, Dropout, concatenate, Lambda, Reshape, Conv2D, MaxPooling2D, TimeDistributed
from keras.constraints import maxnorm
from keras.optimizers import rmsprop, TFOptimizer, Adam, Adadelta
import tensorflow as tf
from keras.utils.generic_utils import get_custom_objects
import utils
import sys
import importlib
from keras.constraints import unitnorm
config_path = ".".join(["models", sys.argv[1]]) + "." if len(sys.argv) >= 2 else ""
config = importlib.import_module(config_path+"config")
def swish(x):
return (tf.sigmoid(x) * x)
class Swish(Activation):
def __init__(self, activation, **kwargs):
super(Swish, self).__init__(activation, **kwargs)
self.__name__ = 'swish'
get_custom_objects().update({'swish': Swish(swish)})
class CrisprCasModel():
def __init__(self, for_seq_input, rev_seq_input, bio_features, off_target_features, all_features, for_cnn_input = None, weight_matrix = None):
self.weight_matrix = weight_matrix
self.seq_input_len = int(for_seq_input.shape[1])
self.bio_features_len = int(bio_features.shape[1])
self.seq_input = for_seq_input
self.rev_seq_input = rev_seq_input
self.for_cnn_input = for_cnn_input
self.bio_features = bio_features
self.off_target_features = off_target_features
self.off_target_len = int(off_target_features.shape[1])
self.for_seq_input_index = range(self.seq_input_len)
self.rev_seq_input_index = range(len(self.for_seq_input_index), len(self.for_seq_input_index)+int(rev_seq_input.shape[1]))
self.bio_features_index = range(len(self.for_seq_input_index)+len(self.rev_seq_input_index),
len(self.for_seq_input_index)+len(self.rev_seq_input_index)+self.bio_features_len)
self.off_target_features_index = range(len(self.for_seq_input_index)+len(self.rev_seq_input_index)+len(self.bio_features_index),
len(self.for_seq_input_index) + len(self.rev_seq_input_index) + len(
self.bio_features_index)+self.off_target_len)
self.all_inputs = all_features
def __seq_embedding_cnn(self, input, name_suffix = '', nt = 3):
weights = self.weight_matrix
voca_size = config.embedding_voca_size
vec_dim = config.embedding_vec_dim
input_len = self.seq_input_len
if nt == 1:
weights = None
voca_size = 5
vec_dim = 8
input_len = config.seq_len
def cov_model(kernel_size = (3,1), pool_size = (21-3+1,1), levels = config.cnn_levels):
model = Sequential()
model.add(Conv2D(levels[0], input_shape=(input_len, vec_dim, 1), kernel_size=(kernel_size[0], 1),
padding='same'))
print(input_len)
model.add(BatchNormalization())
model.add(Activation('swish'))
# output shape is (None, len, dim, 32)
model.add(MaxPooling2D(pool_size=(2,1), padding='same'))
# output shape is (None, len+1/2, dim, 32)
for i in range(len(levels)-2):
model.add(Conv2D(levels[i+1], kernel_size=(kernel_size[0], 1), padding='same'))
# output shape is (None, len+1/2, dim, 64)
model.add(BatchNormalization())
model.add(Activation('swish'))
model.add(MaxPooling2D(pool_size=(2, 1), padding='same'))
# output shape is (None, (len+1/2+1)/2, dim, 64)
last_kernal_size = (3, kernel_size[1])
model.add(Conv2D(config.cnn_levels[-1], kernel_size=last_kernal_size, strides= (1, kernel_size[1]), padding='same'))
model.add(Activation('swish'))
# output shape is (None, (len+1/2+1)/2-ker_len+1, 1, 128)
last_pool_len = pool_size[0]
for _ in range(len(levels)-1):
last_pool_len =(last_pool_len + 1) // 2
last_pool_size = (last_pool_len, 1)
model.add(MaxPooling2D(pool_size=last_pool_size, padding='valid'))
model.add(Flatten())
utils.output_model_info(model)
return model
def embedding_model(input):
em = Embedding(voca_size, vec_dim, weights= weights,
input_length=input_len, trainable=True)
embedded_input = em(input)
reshaped_embedded_input = (Reshape((input_len, vec_dim, 1)))(embedded_input)
return reshaped_embedded_input
reshaped_embedded_input_1 = embedding_model(input = input)
cov_1_1 = cov_model(kernel_size=(3,vec_dim), pool_size=(input_len, 1))(reshaped_embedded_input_1)
#reshaped_embedded_input_2 = embedding_model()
#cov_1_2 = cov_model(kernel_size=(4,config.embedding_vec_dim), pool_size=(self.seq_input_len, 1))(reshaped_embedded_input_1)
#reshaped_embedded_input_3 = embedding_model()
cov_1_3 = cov_model(kernel_size=(5, vec_dim), pool_size=(input_len, 1))(reshaped_embedded_input_1)
cnn_total = concatenate([cov_1_1, cov_1_3])
return cnn_total
'''
def __embedding_cnn(self, name_suffix = '', nt = 3):
weights = self.weight_matrix
voca_size = config.embedding_voca_size
vec_dim = config.embedding_vec_dim
input_len = self.seq_input_len
if nt == 1:
weights = None
voca_size = 5
vec_dim = 8
input_len = 20
input_matrix_len = input_len
input_matrix_height = vec_dim
model = Sequential(name='embedding_and_cnn_' + name_suffix)
model.add(Embedding(voca_size, vec_dim, weights= weights, input_length=input_len, trainable=True))
model.add(Reshape((input_len, vec_dim, 1)))
# output shape is (self.seq_input_len, config.embedding_vec_dim, 1)
model.add(Conv2D(32, kernel_size=(3, 3), activation='swish', padding='same'))
model.add(Conv2D(32, kernel_size=(3, 3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('swish'))
model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
input_matrix_len = (input_matrix_len + 1) / 2
input_matrix_height = (input_matrix_height + 1) / 2
# output shape is (self.seq_input_len + 1)/2, (config.embedding_vec_dim+1)/2, 32)
model.add(Conv2D(64, (3, 3), activation='swish', padding='same'))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('swish'))
model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
input_matrix_len = (input_matrix_len + 1) / 2
input_matrix_height = (input_matrix_height + 1) / 2
# output shape is (input_matrix_len, input_matrix_height, 64)
model.add(Conv2D(128, (3, 3), activation='swish', padding='same'))
model.add(Conv2D(128, (3, 3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('swish'))
model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
input_matrix_len = (input_matrix_len + 1) / 2
input_matrix_height = (input_matrix_height + 1) / 2
# output shape is (input_matrix_len, input_matrix_height, 128)
#model.add(Conv2D(256, (3, 3), activation='swish', padding='same'))
#model.add(Conv2D(256, (3, 3), padding='same'))
#model.add(BatchNormalization())
#model.add(Activation('swish'))
#model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
#input_matrix_len = (input_matrix_len + 1) / 2
#input_matrix_height = (input_matrix_height + 1) / 2
# output shape is (input_matrix_len, input_matrix_height, 256)
# model.add(Conv2D(64, (5, config.embedding_vec_dim), strides=(1, config.embedding_vec_dim), activation='swish', padding='same'))
# output shape is (self.seq_input_len + 1)/2, 1, 128)
#model.add(MaxPooling2D(pool_size=(2, vec_dim), strides=(2, vec_dim), padding='same'))
# model.add(MaxPooling2D(pool_size=((self.seq_input_len + 1)/2, 1), padding='valid'))
# output shape is (1,1,128)
model.add(Flatten())
model.add(Dense(units=config.cnn_levels[-1]))
utils.output_model_info(model)
return model
'''
def __embedding_cnn(self, name_suffix = '', nt = 3):
weights = self.weight_matrix
voca_size = config.embedding_voca_size
vec_dim = config.embedding_vec_dim
input_len = self.seq_input_len
if nt == 1:
weights = None
voca_size = 5
vec_dim = 8
input_len = config.seq_len
model = Sequential(name='embedding_and_cnn_' + name_suffix)
model.add(Embedding(voca_size, vec_dim, weights= weights, input_length=input_len, trainable=True))
model.add(Reshape((1, input_len, vec_dim)))
model.add(Conv2D(32, kernel_size=(1, 4), strides=2, padding='same'))
if config.add_norm:
model.add(BatchNormalization(momentum=0))
model.add(Activation('swish'))
# (1, 10, 32)
model.add(Conv2D(64, kernel_size=(1, 4), strides=2, padding='same'))
if config.add_norm:
model.add(BatchNormalization(momentum=0))
model.add(Activation('swish'))
# (1, 5, 64)
model.add(Conv2D(128, kernel_size=(1, 4), strides=2, padding='same'))
if config.add_norm:
model.add(BatchNormalization(momentum=0))
model.add(Activation('swish'))
# (1,3,128)
model.add(Conv2D(256, kernel_size=(1, 3), strides=2, padding='valid'))
if config.add_norm:
model.add(BatchNormalization(momentum=0))
model.add(Activation('swish'))
model.add(Flatten())
model.add(Dense(units=config.cnn_levels[-1]))
utils.output_model_info(model)
return model
def __embedding_rnn(self, name_suffix = ''):
model = Sequential(name='embedding_and_rnn_' + name_suffix)
# activation function is tanh, gates using sigmoid function
model.add(Embedding(config.embedding_voca_size, config.embedding_vec_dim, weights= self.weight_matrix, input_length=self.seq_input_len, trainable=True))
model.add(Dropout(rate=config.dropout))
# embedding layer output shape is (batch_size, self.seq_input_len=21, config.embedding_vec_dim=32)
for _ in range(config.LSTM_stacks_num):
model.add(LSTM(config.LSTM_hidden_unit, return_sequences=True, dropout=config.dropout, kernel_constraint=maxnorm(config.maxnorm)))
# output shape is (batch_size, self.seq_input_len=21, config.LSTM_hidden_unit=8)
model.add(TimeDistributed(Dense(config.rnn_time_distributed_dim)))
model.add(Flatten())
return model
def __fully_connected(self, nodes_unit_nums, input_len, name_suffix= ''):
model = Sequential(name = 'FC_' + name_suffix)
for i in range(len(nodes_unit_nums)):
if i == 0:
model.add(Dense(nodes_unit_nums[i], input_shape=(input_len,), kernel_constraint=maxnorm(config.maxnorm)))
else:
model.add(Dense(nodes_unit_nums[i], kernel_constraint=maxnorm(config.maxnorm)))
if config.add_norm:
model.add(BatchNormalization(momentum=0))
model.add(Activation(config.activation_method[i%len(config.activation_method)]))
model.add(Dropout(rate=config.dropout))
utils.output_model_info(model)
return model
def __cas9_concat_model(self):
# Embedding and LSTM model is in the front
seq2vec_input = self.seq_input
rnn_output = self.__embedding_rnn(name_suffix='for')(seq2vec_input)
# Embedding and LSTM model for reverse seq
rev_seq2vec_input = self.rev_seq_input
rev_rnn_output = self.__embedding_rnn(name_suffix='rev')(rev_seq2vec_input) if config.rev_seq else rev_seq2vec_input
# concatenate rnn trained features and extra features
extra_raw_input = self.bio_features
if self.bio_features_len:
fully_connected_bio = self.__fully_connected(config.bio_fully_connected_layer_layout, self.bio_features_len, "bio")
processed_bio_features = fully_connected_bio(extra_raw_input)
else:
processed_bio_features = extra_raw_input
off_target_input = self.off_target_features
merged_features = concatenate([processed_bio_features, rnn_output, rev_rnn_output, off_target_input])
dropouted_merged_features = Dropout(rate=0.2)(merged_features)
# fully connected layer
used_seq_input_len = 2 * self.seq_input_len if config.rev_seq else self.seq_input_len
fully_connected_output = self.__fully_connected(config.fully_connected_layer_layout,
self.off_target_len + config.bio_fully_connected_layer_layout[-1] + used_seq_input_len * config.rnn_time_distributed_dim)(dropouted_merged_features)
dropouted_fully_connected_output = Dropout(rate=0.2)(fully_connected_output)
output = Dense(1, kernel_constraint=maxnorm(config.maxnorm))(dropouted_fully_connected_output)
# Build the model
crispr_model = Model(inputs=[seq2vec_input, rev_seq2vec_input, off_target_input, extra_raw_input], outputs=[output])
return crispr_model
def __cas9_mixed_model(self):
#Embedding and LSTM model is in the front
seq2vec_input = self.seq_input
rnn_output = self.__embedding_rnn(name_suffix='for')(seq2vec_input)
#Embedding and CNN model is in the front
seq2vec_input = self.seq_input
if config.seq_cnn:
cnn_output = self.__seq_embedding_cnn(input = seq2vec_input, name_suffix='for')
else:
cnn_output = self.__embedding_cnn(name_suffix='for')(seq2vec_input)
rev_seq2vec_input = self.rev_seq_input
rev_rnn_output = rev_seq2vec_input
# concatenate rnn trained features and extra features
extra_raw_input = self.bio_features
if self.bio_features_len:
fully_connected_bio = self.__fully_connected(config.bio_fully_connected_layer_layout, self.bio_features_len, "bio")
processed_bio_features = fully_connected_bio(extra_raw_input)
else:
processed_bio_features = extra_raw_input
config.bio_fully_connected_layer_layout[-1] = 0
off_target_input = self.off_target_features
merged_features = concatenate([processed_bio_features, cnn_output, rnn_output, off_target_input, rev_rnn_output])
dropouted_merged_features = Dropout(rate=0.2)(merged_features)
# fully connected layer
if config.seq_cnn:
cnn_len = config.cnn_levels[-1] * 2
else:
cnn_len = config.cnn_levels[-1]
used_seq_input_len = self.seq_input_len # self.seq_input_len
input_len = self.off_target_len + config.bio_fully_connected_layer_layout[-1] + used_seq_input_len * config.rnn_time_distributed_dim + cnn_len
fully_connected_output = self.__fully_connected(config.fully_connected_layer_layout, input_len)(dropouted_merged_features)
dropouted_fully_connected_output = Dropout(rate=0.2)(fully_connected_output)
output = Dense(1, kernel_constraint=maxnorm(config.maxnorm))(dropouted_fully_connected_output)
# Build the model
crispr_model = Model(inputs=[seq2vec_input, rev_seq2vec_input, off_target_input, extra_raw_input], outputs=[output])
return crispr_model
def __cas9_ensemble_model(self):
#Embedding and LSTM model is in the front
seq2vec_input = self.seq_input
rnn_output = self.__embedding_rnn(name_suffix='for')(seq2vec_input)
#Embedding and CNN model is in the front
cnn_input = self.for_cnn_input
if config.seq_cnn:
cnn_output = self.__seq_embedding_cnn(input=cnn_input, name_suffix='for', nt = 1)
else:
cnn_output = self.__embedding_cnn(name_suffix='for', nt = 1)(cnn_input)
rev_seq2vec_input = self.rev_seq_input
rev_rnn_output = rev_seq2vec_input
# concatenate rnn trained features and extra features
extra_raw_input = self.bio_features
if self.bio_features_len:
fully_connected_bio = self.__fully_connected(config.bio_fully_connected_layer_layout, self.bio_features_len, "bio")
processed_bio_features = fully_connected_bio(extra_raw_input)
else:
processed_bio_features = extra_raw_input
off_target_input = self.off_target_features
merged_features = concatenate([processed_bio_features, cnn_output, rnn_output, off_target_input, rev_rnn_output])
dropouted_merged_features = Dropout(rate=0.2)(merged_features)
# fully connected layer
if config.seq_cnn:
cnn_len = config.cnn_levels[-1] * 2
else:
cnn_len = config.cnn_levels[-1]
used_seq_input_len = self.seq_input_len # self.seq_input_len
input_len = self.off_target_len + config.bio_fully_connected_layer_layout[-1] + used_seq_input_len * config.rnn_time_distributed_dim + cnn_len
fully_connected_output = self.__fully_connected(config.fully_connected_layer_layout, input_len)(dropouted_merged_features)
dropouted_fully_connected_output = Dropout(rate=0.2)(fully_connected_output)
output = Dense(1, kernel_constraint=maxnorm(config.maxnorm))(dropouted_fully_connected_output)
# Build the model
crispr_model = Model(inputs=[seq2vec_input, rev_seq2vec_input, off_target_input, extra_raw_input, cnn_input], outputs=[output])
return crispr_model
def __cas9_cnn_model(self):
rev_seq2vec_input = self.rev_seq_input
rev_rnn_output = rev_seq2vec_input
seq2vec_input = self.seq_input
if config.seq_cnn:
cnn_output = self.__seq_embedding_cnn(input = seq2vec_input, name_suffix='for')
else:
cnn_output = self.__embedding_cnn(name_suffix='for')(seq2vec_input)
# concatenate rnn trained features and extra features
extra_raw_input = self.bio_features
if self.bio_features_len:
fully_connected_bio = self.__fully_connected(config.bio_fully_connected_layer_layout, self.bio_features_len, "bio")
processed_bio_features = fully_connected_bio(extra_raw_input)
else:
processed_bio_features = extra_raw_input
off_target_input = self.off_target_features
merged_features = concatenate([processed_bio_features, cnn_output, off_target_input, rev_rnn_output])
dropouted_merged_features = Dropout(rate=0.2)(merged_features)
# fully connected layer
# cnn_final_dim = ((config.embedding_vec_dim)/2)/2
# cnn_final_len = ((self.seq_input_len)/2)/2
# cnn_len = 64 * cnn_final_dim * cnn_final_len
if config.seq_cnn:
cnn_len = config.cnn_levels[-1] * 2
else:
cnn_len = config.cnn_levels[-1]
input_len = self.off_target_len + config.bio_fully_connected_layer_layout[-1] + cnn_len
fully_connected_output = self.__fully_connected(config.fully_connected_layer_layout, input_len)(dropouted_merged_features)
dropouted_fully_connected_output = Dropout(rate=0.2)(fully_connected_output)
output = Dense(1, kernel_constraint=maxnorm(config.maxnorm))(dropouted_fully_connected_output)
# Build the model
crispr_model = Model(inputs=[seq2vec_input, rev_seq2vec_input, off_target_input, extra_raw_input], outputs=[output])
return crispr_model
def __cas9_mul_model(self):
# Embedding and LSTM model is in the front
seq2vec_input = self.seq_input
rnn_output = self.__embedding_rnn(name_suffix='for')(seq2vec_input)
# Embedding and LSTM model for reverse seq
rev_seq2vec_input = self.rev_seq_input
rev_rnn_output = self.__embedding_rnn(name_suffix='rev')(rev_seq2vec_input) if config.rev_seq else rev_seq2vec_input
# extra_biological features fully connected nn
extra_raw_input = self.bio_features
bio = Dropout(rate=0.2)(extra_raw_input)
dropouted_extra_raw_input = Reshape([1, -1])(bio)
# off target matrix
off_target_input = self.off_target_features
dropout_off_target_input = Dropout(rate=0.2)(off_target_input)
rnn_output_total = Reshape([-1, 1])(concatenate([rnn_output, rev_rnn_output, dropout_off_target_input]))
merged_feature = Lambda(lambda x: tf.einsum('aij,ajk->aik', x[0], x[1]))([rnn_output_total, dropouted_extra_raw_input])
merged_features_input = Flatten()(merged_feature)
dropouted_merged_features_input = Dropout(rate=0.2)(merged_features_input)
used_seq_input_len = 2 * self.seq_input_len if config.rev_seq else self.seq_input_len
full_layer = self.__fully_connected(config.fully_connected_layer_layout,
(config.rnn_time_distributed_dim * used_seq_input_len + self.off_target_len) * self.bio_features_len)
full_layer_output = full_layer(dropouted_merged_features_input)
output = Dense(1, kernel_constraint=maxnorm(config.maxnorm))(full_layer_output)
crispr_model = Model(inputs=[seq2vec_input, rev_seq2vec_input, off_target_input, extra_raw_input], outputs=[output])
return crispr_model
def get_raw_model(self, method = config.model_type):
crispr_model = getattr(self, "_{!s}__cas9_{!s}_model".format(self.__class__.__name__, method),
self.__cas9_concat_model)()
return crispr_model
def get_model(self, method = config.model_type):
crispr_model = getattr(self, "_{!s}__cas9_{!s}_model".format(self.__class__.__name__, method), self.__cas9_concat_model)()
return self.compile_transfer_learning_model(crispr_model)
@classmethod
def compile_transfer_learning_model(cls, model):
custimized_rmsprop = rmsprop(lr=config.start_lr, decay=config.lr_decay)
model.compile(optimizer=custimized_rmsprop, loss='mse', metrics=[utils.revised_mse_loss, 'mse'])
return model
def get_tf_model(self, ground_truth, method = config.model_type):
global_step = tf.Variable(0, trainable=False)
learn_rate = tf.train.cosine_decay_restarts(learning_rate=0.001, global_step=global_step, first_decay_steps=100)
crispr_model = getattr(self, "_{!s}__cas9_{!s}_model".format(self.__class__.__name__, method),
self.__cas9_concat_model)()
loss = tf.losses.mean_squared_error(ground_truth, crispr_model.output)
rmsprop_optimizer = tf.train.RMSPropOptimizer(learning_rate=learn_rate).minimize(loss, global_step=global_step)
custimized_rmsprop = TFOptimizer(rmsprop_optimizer)
crispr_model.compile(optimizer=custimized_rmsprop, loss='mse', metrics=[utils.revised_mse_loss, 'mse'])
return crispr_model | 0.675765 | 0.251912 |
import getpass
import sys
import cryptography.hazmat.backends as backends
import cryptography.hazmat.primitives.asymmetric.rsa as rsa
import cryptography.hazmat.primitives.serialization as serial
import cryptography.hazmat.primitives.hashes as hashes
import cryptography.hazmat.primitives as primitives
import cryptography.hazmat.primitives.asymmetric.padding as padding
class crypto:
def generate(self, passW):
keyPair = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend= backends.default_backend()
)
privatePem = keyPair.private_bytes(
encoding=serial.Encoding.PEM,
format=serial.PrivateFormat.PKCS8,
encryption_algorithm=serial.BestAvailableEncryption(passW.encode())
)
publicPem = keyPair.public_key().public_bytes(
serial.Encoding.PEM,
serial.PublicFormat.SubjectPublicKeyInfo
)
privateFile = open("privKey.txt", "w")
publicFile = open("pubKey.txt", "w")
privateFile.write(privatePem.decode())
publicFile.write(publicPem.decode())
def encrypt(self, message="",mode=0):
#mode 0 = string
#mode 1 = file
publicFile = None
pubKey = None
outMess = None
publicFile = open("pubKey.txt", 'rb')
pubKey = serial.load_pem_public_key(
publicFile.read(),
backend=backends.default_backend()
)
if mode == 0:
return pubKey.encrypt(
message.encode(),
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm= hashes.SHA256(),
label=None
)
)
if mode == 1:
enc = pubKey.encrypt(
open(message, 'rb').read(),
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm= hashes.SHA256(),
label=None
)
)
open(message, "wb").write(enc)
return(message)
def decrypt(self, message="",mode=0, passW=""):
#mode 0 = string
#mode 1 = file
privateFile = None
privKey = None
privateFile = open("privKey.txt", 'rb')
privKey = serial.load_pem_private_key(
privateFile.read(),
password=<PASSWORD>W.encode(),
backend=backends.default_backend()
)
if mode == 0:
return privKey.decrypt(
message,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm= hashes.SHA256(),
label=None
)
)
if mode == 1:
dec = privKey.decrypt(
open(message, 'rb').read(),
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm= hashes.SHA256(),
label=None
)
)
open(message, "wb").write(dec)
return message
password= ""
if len(sys.argv) < 2:
password = getpass.getpass("password->")
else:
password = sys.argv[1]
print(password)
cry = crypto()
cry.generate(password)
encrypted = cry.encrypt("image.zip",1)
decrypted = cry.decrypt(encrypted, 1 , password)
print(decrypted) | Testing/cryptographic/crypto.py | import getpass
import sys
import cryptography.hazmat.backends as backends
import cryptography.hazmat.primitives.asymmetric.rsa as rsa
import cryptography.hazmat.primitives.serialization as serial
import cryptography.hazmat.primitives.hashes as hashes
import cryptography.hazmat.primitives as primitives
import cryptography.hazmat.primitives.asymmetric.padding as padding
class crypto:
def generate(self, passW):
keyPair = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend= backends.default_backend()
)
privatePem = keyPair.private_bytes(
encoding=serial.Encoding.PEM,
format=serial.PrivateFormat.PKCS8,
encryption_algorithm=serial.BestAvailableEncryption(passW.encode())
)
publicPem = keyPair.public_key().public_bytes(
serial.Encoding.PEM,
serial.PublicFormat.SubjectPublicKeyInfo
)
privateFile = open("privKey.txt", "w")
publicFile = open("pubKey.txt", "w")
privateFile.write(privatePem.decode())
publicFile.write(publicPem.decode())
def encrypt(self, message="",mode=0):
#mode 0 = string
#mode 1 = file
publicFile = None
pubKey = None
outMess = None
publicFile = open("pubKey.txt", 'rb')
pubKey = serial.load_pem_public_key(
publicFile.read(),
backend=backends.default_backend()
)
if mode == 0:
return pubKey.encrypt(
message.encode(),
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm= hashes.SHA256(),
label=None
)
)
if mode == 1:
enc = pubKey.encrypt(
open(message, 'rb').read(),
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm= hashes.SHA256(),
label=None
)
)
open(message, "wb").write(enc)
return(message)
def decrypt(self, message="",mode=0, passW=""):
#mode 0 = string
#mode 1 = file
privateFile = None
privKey = None
privateFile = open("privKey.txt", 'rb')
privKey = serial.load_pem_private_key(
privateFile.read(),
password=<PASSWORD>W.encode(),
backend=backends.default_backend()
)
if mode == 0:
return privKey.decrypt(
message,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm= hashes.SHA256(),
label=None
)
)
if mode == 1:
dec = privKey.decrypt(
open(message, 'rb').read(),
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm= hashes.SHA256(),
label=None
)
)
open(message, "wb").write(dec)
return message
password= ""
if len(sys.argv) < 2:
password = getpass.getpass("password->")
else:
password = sys.argv[1]
print(password)
cry = crypto()
cry.generate(password)
encrypted = cry.encrypt("image.zip",1)
decrypted = cry.decrypt(encrypted, 1 , password)
print(decrypted) | 0.297776 | 0.118947 |
import json
import sys
import Pyro4
import subscriber
import publisher
def subscriber_dict_to_class(classname, d):
print('deserializing {}'.format(d))
return subscriber.Subscriber(d['name'])
def publisher_dict_to_class(classname, d):
p = publisher.Publisher(d['name'], d['event'])
p.intermediary = d['intermediary']
return p
Pyro4.util.SerializerBase.register_dict_to_class('subscriber.Subscriber', subscriber_dict_to_class)
Pyro4.util.SerializerBase.register_dict_to_class('publisher.Publisher', publisher_dict_to_class)
@Pyro4.expose
@Pyro4.behavior(instance_mode='single')
class Intermediary(object):
def __init__(self, name):
super(Intermediary, self).__init__()
self._name = name
self._routing = {}
self._subscribers = {}
self._neighbours = set()
self._client = None
@property
def name(self):
return self._name
@property
def client(self):
return self._client
@client.setter
def client(self, value):
self._client = value
def print_neighbours(self):
print(self._neighbours)
def add_neighbours(self, nodelist):
self._neighbours.update(nodelist)
def subscription(self, node, event):
if self._client == node:
self._subscribers[node] = event
print("'{}' subscribed client '{}' for event '{}'".format(self, node.name, event))
else:
self._routing[node] = event
print("'{}' registered route '{}' for event '{}'".format(self, node.name, event))
[n.subscription(self, event) for n in self._neighbours if n.name != node.name]
def publish(self, node, event):
matchlist = [n for (n, e) in self._subscribers.items() if e == event]
[self.notifying(n, event) for n in matchlist]
forwardlist = [n for (n, e) in self._routing.items() if e == event]
[self.forwarding(n, event) for n in forwardlist if n != node]
def notifying(self, node, event):
print("'{}' notifying '{}' with event '{}'".format(self, node, event))
node.notify(event)
def forwarding(self, node, event):
print("'{}' forwarding to '{}' a published event '{}'".format(self, node.name, event))
node.publish(self, event)
def __str__(self):
return self._name
def __eq__(self, other):
return other and self.name == other.name
def __ne__(self, other):
return not(self == other)
def __hash__(self):
return hash(self.name)
def main():
sys.excepthook = Pyro4.util.excepthook
i1 = Intermediary('I1')
i2 = Intermediary('I2')
i3 = Intermediary('I3')
i1.add_neighbours([i2, i3])
i2.add_neighbours([i1])
i3.add_neighbours([i1])
with Pyro4.locateNS() as ns:
s1 = Pyro4.Proxy('PYRONAME:subscriber1')
s2 = Pyro4.Proxy('PYRONAME:subscriber2')
i2.client = s1
i3.client = s2
i2.subscription(s1, 'X')
print
i3.subscription(s2, 'Y')
print
with Pyro4.Daemon() as daemon:
i1_uri = daemon.register(i1)
i2_uri = daemon.register(i2)
i3_uri = daemon.register(i3)
with Pyro4.locateNS() as ns:
ns.register('intermediary1', i1_uri)
ns.register('intermediary2', i2_uri)
ns.register('intermediary3', i3_uri)
print('Intermediaries available.')
daemon.requestLoop()
if __name__ == '__main__':
main() | Python/intermediary.py | import json
import sys
import Pyro4
import subscriber
import publisher
def subscriber_dict_to_class(classname, d):
print('deserializing {}'.format(d))
return subscriber.Subscriber(d['name'])
def publisher_dict_to_class(classname, d):
p = publisher.Publisher(d['name'], d['event'])
p.intermediary = d['intermediary']
return p
Pyro4.util.SerializerBase.register_dict_to_class('subscriber.Subscriber', subscriber_dict_to_class)
Pyro4.util.SerializerBase.register_dict_to_class('publisher.Publisher', publisher_dict_to_class)
@Pyro4.expose
@Pyro4.behavior(instance_mode='single')
class Intermediary(object):
def __init__(self, name):
super(Intermediary, self).__init__()
self._name = name
self._routing = {}
self._subscribers = {}
self._neighbours = set()
self._client = None
@property
def name(self):
return self._name
@property
def client(self):
return self._client
@client.setter
def client(self, value):
self._client = value
def print_neighbours(self):
print(self._neighbours)
def add_neighbours(self, nodelist):
self._neighbours.update(nodelist)
def subscription(self, node, event):
if self._client == node:
self._subscribers[node] = event
print("'{}' subscribed client '{}' for event '{}'".format(self, node.name, event))
else:
self._routing[node] = event
print("'{}' registered route '{}' for event '{}'".format(self, node.name, event))
[n.subscription(self, event) for n in self._neighbours if n.name != node.name]
def publish(self, node, event):
matchlist = [n for (n, e) in self._subscribers.items() if e == event]
[self.notifying(n, event) for n in matchlist]
forwardlist = [n for (n, e) in self._routing.items() if e == event]
[self.forwarding(n, event) for n in forwardlist if n != node]
def notifying(self, node, event):
print("'{}' notifying '{}' with event '{}'".format(self, node, event))
node.notify(event)
def forwarding(self, node, event):
print("'{}' forwarding to '{}' a published event '{}'".format(self, node.name, event))
node.publish(self, event)
def __str__(self):
return self._name
def __eq__(self, other):
return other and self.name == other.name
def __ne__(self, other):
return not(self == other)
def __hash__(self):
return hash(self.name)
def main():
sys.excepthook = Pyro4.util.excepthook
i1 = Intermediary('I1')
i2 = Intermediary('I2')
i3 = Intermediary('I3')
i1.add_neighbours([i2, i3])
i2.add_neighbours([i1])
i3.add_neighbours([i1])
with Pyro4.locateNS() as ns:
s1 = Pyro4.Proxy('PYRONAME:subscriber1')
s2 = Pyro4.Proxy('PYRONAME:subscriber2')
i2.client = s1
i3.client = s2
i2.subscription(s1, 'X')
print
i3.subscription(s2, 'Y')
print
with Pyro4.Daemon() as daemon:
i1_uri = daemon.register(i1)
i2_uri = daemon.register(i2)
i3_uri = daemon.register(i3)
with Pyro4.locateNS() as ns:
ns.register('intermediary1', i1_uri)
ns.register('intermediary2', i2_uri)
ns.register('intermediary3', i3_uri)
print('Intermediaries available.')
daemon.requestLoop()
if __name__ == '__main__':
main() | 0.455925 | 0.130285 |
import math
import operator
import pytest
from capacity import MiB, byte, GiB, KiB, Capacity, bit, from_string, MB, GB, PiB, __version__
from numbers import Integral
from operator import truediv
from .utils import assert_value_error
def test_version():
assert isinstance(__version__.__version__, str)
def test_0_modulo():
assert (0 % byte) == 0
assert ((0 * byte) % byte) == 0
def test_truth():
assert byte
assert GiB
assert not (0 * byte)
def test_bits_attribute():
assert (666 * bit).bits == 666
def test_hashability():
assert hash(MiB) == hash(MiB.bits)
def test_equality():
small = Capacity(666)
great = Capacity(667)
assert small == small
assert great == great
assert small != great
assert great != small
assert great > small
assert small > (- small)
assert great > (- great)
assert 0 > (- small)
assert 0 < small
assert (- small) < 0
assert great >= small
assert small < great
assert small <= great
# negative tests
assert not (small > great)
assert not (small < -small)
assert not (small >= great)
assert not (great < small)
assert not (great <= small)
assert not (small != small)
assert not (great != great)
assert not (great == small)
assert not (small == great)
def test_equality_to_other_objects():
for obj in ("some_string", "", 2.0, 2, True, False):
assert MiB != obj
assert not (MiB == obj)
def test_multiplication_by_zero():
assert isinstance(0 * MiB, Capacity)
def test_equality_to_zero():
assert not (MiB == 0)
assert MiB != 0
assert not (0 * MiB != 0)
assert 0 * MiB == 0
def test_new_style_formatting():
assert "{0}".format(3 * GiB) == str(3 * GiB)
assert "{0!r}".format(3 * GiB) == repr(3 * GiB)
assert "{0:GiB}".format(3 * GiB) == "3.0"
with pytest.raises(ValueError):
assert "{0:kjdkj}".format(3 * GiB)
assert u"{0:byte}".format(100 * byte) == u"100.0"
def test_new_style_with_specifiers():
assert "{0:<5MiB}".format(MiB) == "1.0 "
assert "{0:>5MiB}".format(MiB) == " 1.0"
assert "{0:^5MiB}".format(MiB) == " 1.0 "
assert "{0:05MiB}".format(MiB) == "001.0"
assert "{0:gGiB}".format(3 * GiB) == "3"
assert "{0:.2fGiB}".format(3 * GiB) == "3.00"
assert "{0:+.2fGiB}".format(3 * GiB) == "+3.00"
def test_new_style_with_unit():
assert "{0:<10MiB!}".format(MiB) == "1.0MiB "
def test_simple_textual_representation():
_assert_str_repr_equals(bit, '1 bit', '1*bit')
_assert_str_repr_equals(bit, '1 bit', '1*bit')
@pytest.mark.parametrize('value_and_expected', [
(1013089494912 * KiB, '0.92 PiB', '1013089494912*KiB'),
(1907349 * MiB, '2 TB', '1907349*MiB'),
(110 * MiB, '110 MiB', '110*MiB'),
(0.5 * MiB, '512 KiB', '512*KiB'),
(0.5 * MiB + bit, '512 KiB', '{0}*bit'.format(int((0.5 * MiB).bits + 1))),
(0.5 * MiB - bit, '512 KiB', '{0}*bit'.format(int((0.5 * MiB).bits - 1))),
(2 * MiB, '2 MiB', '2*MiB'),
(GiB - bit, '1 GiB', '{0}*bit'.format(GiB.bits - 1)),
(GiB - 0.5 * bit, '1 GiB', '{0}*bit'.format(GiB.bits - 0.5)),
# fractions with two decimal places
(0.99 * KiB, '0.99 KiB', '{0}*bit'.format(0.99 * 1024 * 8)),
(0.59 * MiB, '0.59 MiB', '{0}*bit'.format(0.59 * 1024 * 1024 * 8)),
# fractions with multiple decimal places
(9122 * byte, '9122 byte', '9122*byte'),
(23124232 * byte, '22.05 MiB', '23124232*byte'),
(58918694226 * byte, '54.87 GiB', '58918694226*byte'),
(213124232 * byte, '0.2 GiB', '213124232*byte'),
# test *B instead of only *iB
(0.5 * MB - bit, '500 KB', '{0}*bit'.format(int((0.5 * MB).bits - 1))),
(0.5 * MB, '500 KB', '500*KB'),
(2 * MB, '2 MB', '2*MB'),
(0 * bit, '0 bit', '0*bit'),
])
def test_representation(value_and_expected):
_assert_str_repr_equals(*value_and_expected)
def _assert_str_repr_equals(obj, str_value, repr_value):
assert str(obj) == str_value
assert repr(obj) == repr_value
def test_add():
assert (MiB + MiB) == Capacity((MiB.bits * 2))
assert (MiB + 0) == MiB
assert (0 + MiB) == MiB
def test_iadd():
a = MiB
a += MiB
assert a == (2 * MiB)
def test_abs():
assert abs((- MiB)) == MiB
assert abs(MiB) == MiB
def test_sub():
assert (MiB - bit) == Capacity((MiB.bits - 1))
assert (0 - bit) == (- bit)
assert (bit - 0) == bit
def test_isub():
a = 2 * MiB
a -= MiB
assert a == MiB
def test_neg():
assert (- MiB).bits == (- MiB.bits)
assert (MiB + (- MiB)) == 0
assert ((- MiB) + (2 * MiB)) == MiB
def test_mul():
assert (2 * MiB) == Capacity((MiB.bits * 2))
assert (0 * MiB) == Capacity(0)
def test_div():
assert (MiB / 2) == (0.5 * MiB)
assert (GB / 10) == (100 * MB)
assert ((2 * MiB) / 2) == MiB
assert ((1.5 * MiB) / MiB) == 1.5
assert ((2 * MiB) / MiB) == 2
assert (0 / MiB) == 0
assert ((2 * MiB) / MiB) == 2
def test_truediv():
assert truediv(MiB, 2) == (0.5 * MiB)
assert truediv((2 * MiB), 2) == MiB
assert truediv((1.5 * MiB), MiB) == 1.5
assert truediv((2 * MiB), MiB) == 2
assert truediv(0, MiB) == 0
assert truediv((2 * MiB), MiB) == 2
def test_mod():
assert (((2 * MiB) + bit) % MiB) == bit
assert (0 % MiB) == 0
assert ((0.5 * MiB) % (0.5 * MiB)) == 0
def test_floordiv():
assert (((2 * MiB) + bit) // MiB) == 2
assert isinstance((((2 * MiB) + bit) // MiB), Integral)
assert isinstance(MiB // MiB, Integral)
assert ((2 * MiB) // 2) == MiB
assert ((2 * MiB) // MiB) == 2
assert isinstance(((2 * MiB) // MiB), Integral)
assert ((2.001 * MiB) // 2) == (8392802 * bit)
assert (0 // MiB) == 0
def test_roundup():
assert MiB.roundup(MiB) == MiB
assert (MiB + bit).roundup(MiB) == (2 * MiB)
def test_rounddown():
assert MiB.rounddown(MiB) == MiB
assert (MiB - bit).rounddown(MiB) == 0
assert ((3 * MiB) - bit).rounddown(MiB) == (2 * MiB)
def test_invalid_arithmetic():
# pylint: disable=pointless-statement
size = 668 * bit
with pytest.raises(TypeError):
size * size
with pytest.raises(TypeError):
size + 3
with pytest.raises(TypeError):
3 / size
with pytest.raises(TypeError):
3 % size
with pytest.raises(TypeError):
size % 3
with pytest.raises(TypeError):
size < 2
with pytest.raises(TypeError):
size > 2
with pytest.raises(ZeroDivisionError):
size / 0
with pytest.raises(ZeroDivisionError):
size % 0
def test_from_string_construction():
assert Capacity('20*GiB') == (20 * GiB)
def test_from_string_fractions():
assert Capacity('1119.63 * TB') == (1119630 * GB)
@pytest.mark.parametrize('string_and_value', [
('GiB', GiB),
('MiB', MiB),
("2*GiB", 2 * GiB),
("2GiB", 2 * GiB),
("2* GiB", 2 * GiB),
("2 *GiB", 2 * GiB),
("20b", 20 * byte),
("20*b", 20 * byte),
])
def test_from_string(string_and_value):
string, value = string_and_value
assert from_string(string) == value
def test_invalid_patterns():
check = assert_value_error
check("2")
check("bla")
check("GIB")
check("1*GiB*bla")
check("1+2")
check("1*2")
def test_huge_long_values():
assert ((1000000000000000064 * byte) // byte) == 1000000000000000064
def test_simple_str():
assert repr(1 * GiB) == '1*GiB'
assert str(1 * GiB) == '1 GiB'
def test_inf():
inf = float('inf')
assert inf * byte == inf * byte
assert inf * byte > GiB
def test_inf_repr_str():
inf = float('inf') * byte
assert 'inf' in str(inf).lower()
assert 'inf' in repr(inf).lower()
def test_compare_to_zero_capacity():
assert (0 * byte) < 2
assert (0 * byte) < GiB
@pytest.mark.parametrize('op', [operator.floordiv, operator.sub, operator.add, operator.mul, operator.truediv])
def test_floordiv_other_class(op):
expected_value = object()
class OtherClass(object):
def __rfloordiv__(self, other):
return expected_value
def __radd__(self, other):
return expected_value
def __rsub__(self, other):
return expected_value
def __rmul__(self, other):
return expected_value
def __rtruediv__(self, other):
return expected_value
__rdiv__ = __rtruediv__
result = op(KiB, OtherClass())
assert result is expected_value | tests/test_capacity.py | import math
import operator
import pytest
from capacity import MiB, byte, GiB, KiB, Capacity, bit, from_string, MB, GB, PiB, __version__
from numbers import Integral
from operator import truediv
from .utils import assert_value_error
def test_version():
assert isinstance(__version__.__version__, str)
def test_0_modulo():
assert (0 % byte) == 0
assert ((0 * byte) % byte) == 0
def test_truth():
assert byte
assert GiB
assert not (0 * byte)
def test_bits_attribute():
assert (666 * bit).bits == 666
def test_hashability():
assert hash(MiB) == hash(MiB.bits)
def test_equality():
small = Capacity(666)
great = Capacity(667)
assert small == small
assert great == great
assert small != great
assert great != small
assert great > small
assert small > (- small)
assert great > (- great)
assert 0 > (- small)
assert 0 < small
assert (- small) < 0
assert great >= small
assert small < great
assert small <= great
# negative tests
assert not (small > great)
assert not (small < -small)
assert not (small >= great)
assert not (great < small)
assert not (great <= small)
assert not (small != small)
assert not (great != great)
assert not (great == small)
assert not (small == great)
def test_equality_to_other_objects():
for obj in ("some_string", "", 2.0, 2, True, False):
assert MiB != obj
assert not (MiB == obj)
def test_multiplication_by_zero():
assert isinstance(0 * MiB, Capacity)
def test_equality_to_zero():
assert not (MiB == 0)
assert MiB != 0
assert not (0 * MiB != 0)
assert 0 * MiB == 0
def test_new_style_formatting():
assert "{0}".format(3 * GiB) == str(3 * GiB)
assert "{0!r}".format(3 * GiB) == repr(3 * GiB)
assert "{0:GiB}".format(3 * GiB) == "3.0"
with pytest.raises(ValueError):
assert "{0:kjdkj}".format(3 * GiB)
assert u"{0:byte}".format(100 * byte) == u"100.0"
def test_new_style_with_specifiers():
assert "{0:<5MiB}".format(MiB) == "1.0 "
assert "{0:>5MiB}".format(MiB) == " 1.0"
assert "{0:^5MiB}".format(MiB) == " 1.0 "
assert "{0:05MiB}".format(MiB) == "001.0"
assert "{0:gGiB}".format(3 * GiB) == "3"
assert "{0:.2fGiB}".format(3 * GiB) == "3.00"
assert "{0:+.2fGiB}".format(3 * GiB) == "+3.00"
def test_new_style_with_unit():
assert "{0:<10MiB!}".format(MiB) == "1.0MiB "
def test_simple_textual_representation():
_assert_str_repr_equals(bit, '1 bit', '1*bit')
_assert_str_repr_equals(bit, '1 bit', '1*bit')
@pytest.mark.parametrize('value_and_expected', [
(1013089494912 * KiB, '0.92 PiB', '1013089494912*KiB'),
(1907349 * MiB, '2 TB', '1907349*MiB'),
(110 * MiB, '110 MiB', '110*MiB'),
(0.5 * MiB, '512 KiB', '512*KiB'),
(0.5 * MiB + bit, '512 KiB', '{0}*bit'.format(int((0.5 * MiB).bits + 1))),
(0.5 * MiB - bit, '512 KiB', '{0}*bit'.format(int((0.5 * MiB).bits - 1))),
(2 * MiB, '2 MiB', '2*MiB'),
(GiB - bit, '1 GiB', '{0}*bit'.format(GiB.bits - 1)),
(GiB - 0.5 * bit, '1 GiB', '{0}*bit'.format(GiB.bits - 0.5)),
# fractions with two decimal places
(0.99 * KiB, '0.99 KiB', '{0}*bit'.format(0.99 * 1024 * 8)),
(0.59 * MiB, '0.59 MiB', '{0}*bit'.format(0.59 * 1024 * 1024 * 8)),
# fractions with multiple decimal places
(9122 * byte, '9122 byte', '9122*byte'),
(23124232 * byte, '22.05 MiB', '23124232*byte'),
(58918694226 * byte, '54.87 GiB', '58918694226*byte'),
(213124232 * byte, '0.2 GiB', '213124232*byte'),
# test *B instead of only *iB
(0.5 * MB - bit, '500 KB', '{0}*bit'.format(int((0.5 * MB).bits - 1))),
(0.5 * MB, '500 KB', '500*KB'),
(2 * MB, '2 MB', '2*MB'),
(0 * bit, '0 bit', '0*bit'),
])
def test_representation(value_and_expected):
_assert_str_repr_equals(*value_and_expected)
def _assert_str_repr_equals(obj, str_value, repr_value):
assert str(obj) == str_value
assert repr(obj) == repr_value
def test_add():
assert (MiB + MiB) == Capacity((MiB.bits * 2))
assert (MiB + 0) == MiB
assert (0 + MiB) == MiB
def test_iadd():
a = MiB
a += MiB
assert a == (2 * MiB)
def test_abs():
assert abs((- MiB)) == MiB
assert abs(MiB) == MiB
def test_sub():
assert (MiB - bit) == Capacity((MiB.bits - 1))
assert (0 - bit) == (- bit)
assert (bit - 0) == bit
def test_isub():
a = 2 * MiB
a -= MiB
assert a == MiB
def test_neg():
assert (- MiB).bits == (- MiB.bits)
assert (MiB + (- MiB)) == 0
assert ((- MiB) + (2 * MiB)) == MiB
def test_mul():
assert (2 * MiB) == Capacity((MiB.bits * 2))
assert (0 * MiB) == Capacity(0)
def test_div():
assert (MiB / 2) == (0.5 * MiB)
assert (GB / 10) == (100 * MB)
assert ((2 * MiB) / 2) == MiB
assert ((1.5 * MiB) / MiB) == 1.5
assert ((2 * MiB) / MiB) == 2
assert (0 / MiB) == 0
assert ((2 * MiB) / MiB) == 2
def test_truediv():
assert truediv(MiB, 2) == (0.5 * MiB)
assert truediv((2 * MiB), 2) == MiB
assert truediv((1.5 * MiB), MiB) == 1.5
assert truediv((2 * MiB), MiB) == 2
assert truediv(0, MiB) == 0
assert truediv((2 * MiB), MiB) == 2
def test_mod():
assert (((2 * MiB) + bit) % MiB) == bit
assert (0 % MiB) == 0
assert ((0.5 * MiB) % (0.5 * MiB)) == 0
def test_floordiv():
assert (((2 * MiB) + bit) // MiB) == 2
assert isinstance((((2 * MiB) + bit) // MiB), Integral)
assert isinstance(MiB // MiB, Integral)
assert ((2 * MiB) // 2) == MiB
assert ((2 * MiB) // MiB) == 2
assert isinstance(((2 * MiB) // MiB), Integral)
assert ((2.001 * MiB) // 2) == (8392802 * bit)
assert (0 // MiB) == 0
def test_roundup():
assert MiB.roundup(MiB) == MiB
assert (MiB + bit).roundup(MiB) == (2 * MiB)
def test_rounddown():
assert MiB.rounddown(MiB) == MiB
assert (MiB - bit).rounddown(MiB) == 0
assert ((3 * MiB) - bit).rounddown(MiB) == (2 * MiB)
def test_invalid_arithmetic():
# pylint: disable=pointless-statement
size = 668 * bit
with pytest.raises(TypeError):
size * size
with pytest.raises(TypeError):
size + 3
with pytest.raises(TypeError):
3 / size
with pytest.raises(TypeError):
3 % size
with pytest.raises(TypeError):
size % 3
with pytest.raises(TypeError):
size < 2
with pytest.raises(TypeError):
size > 2
with pytest.raises(ZeroDivisionError):
size / 0
with pytest.raises(ZeroDivisionError):
size % 0
def test_from_string_construction():
assert Capacity('20*GiB') == (20 * GiB)
def test_from_string_fractions():
assert Capacity('1119.63 * TB') == (1119630 * GB)
@pytest.mark.parametrize('string_and_value', [
('GiB', GiB),
('MiB', MiB),
("2*GiB", 2 * GiB),
("2GiB", 2 * GiB),
("2* GiB", 2 * GiB),
("2 *GiB", 2 * GiB),
("20b", 20 * byte),
("20*b", 20 * byte),
])
def test_from_string(string_and_value):
string, value = string_and_value
assert from_string(string) == value
def test_invalid_patterns():
check = assert_value_error
check("2")
check("bla")
check("GIB")
check("1*GiB*bla")
check("1+2")
check("1*2")
def test_huge_long_values():
assert ((1000000000000000064 * byte) // byte) == 1000000000000000064
def test_simple_str():
assert repr(1 * GiB) == '1*GiB'
assert str(1 * GiB) == '1 GiB'
def test_inf():
inf = float('inf')
assert inf * byte == inf * byte
assert inf * byte > GiB
def test_inf_repr_str():
inf = float('inf') * byte
assert 'inf' in str(inf).lower()
assert 'inf' in repr(inf).lower()
def test_compare_to_zero_capacity():
assert (0 * byte) < 2
assert (0 * byte) < GiB
@pytest.mark.parametrize('op', [operator.floordiv, operator.sub, operator.add, operator.mul, operator.truediv])
def test_floordiv_other_class(op):
expected_value = object()
class OtherClass(object):
def __rfloordiv__(self, other):
return expected_value
def __radd__(self, other):
return expected_value
def __rsub__(self, other):
return expected_value
def __rmul__(self, other):
return expected_value
def __rtruediv__(self, other):
return expected_value
__rdiv__ = __rtruediv__
result = op(KiB, OtherClass())
assert result is expected_value | 0.710126 | 0.80038 |
import argparse
from collections import defaultdict
from os import remove
from random import randrange
import genanki
from pycasia import CASIA
from hsk import HSK
from models import get_model
EXAMPLE_COUNT = 50
def create_deck(name, character_list=None, example_count=30):
"""
Create a deck with the given requirements.
:param name: The deck name
:param character_list: A list of characters to select. If not given, all characters in the dataset will be used.
:param example_count: How many examples per character to include. Default is 30.
:return: Nothing.
"""
# Must be unique. See genanki details for more.
deck_id = randrange(1 << 30, 1 << 31)
print("Creating deck %s" % name)
# Create deck
deck = genanki.Deck(deck_id, name)
# Initialize data collection
data = CASIA.CASIA()
deck_data = defaultdict(list)
media = []
# Get data and create media
characters_loaded = 0
for image, character in data.load_character_images():
# Only include requested characters
if character_list is None or character in character_list:
# Only include as many examples as requested
count = len(deck_data[character])
if count < example_count:
filename = "%s_%s.jpg" % (character, len(deck_data[character]) + 1)
image.save(filename)
deck_data[character].append(filename)
media.append(filename)
characters_loaded = characters_loaded + 1
# Early stop if you have enough examples
if character_list is None or characters_loaded >= len(character_list) * example_count:
if len([character for character in deck_data if len(deck_data[character]) < example_count]) == 0:
break
# Create notes
print("Creating notes")
for character in deck_data:
note_fields = [character]
examples = ["<img src=\"%s\">" % image for image in deck_data[character]]
assert len(examples) == example_count, "Wrong number of examples for %s" % character
note_fields.extend(examples)
my_note = genanki.Note(model=get_model(example_count=example_count), fields=note_fields)
deck.add_note(my_note)
# Create the package and output
print("Creating final output")
package = genanki.Package(deck)
package.media_files = media
filename = '%s.apkg' % name
package.write_to_file(filename)
print("Created deck %s" % filename)
# Delete all intermediate files
print("Cleaning up")
for path in media:
remove(path)
def make_hsk_decks():
create_deck("HSK1", character_list=HSK["HSK1"], example_count=EXAMPLE_COUNT)
create_deck("HSK2", character_list=HSK["HSK2"], example_count=EXAMPLE_COUNT)
create_deck("HSK3", character_list=HSK["HSK3"], example_count=EXAMPLE_COUNT)
create_deck("HSK4", character_list=HSK["HSK4"], example_count=EXAMPLE_COUNT)
create_deck("HSK5", character_list=HSK["HSK5"], example_count=EXAMPLE_COUNT)
create_deck("HSK6", character_list=HSK["HSK6"], example_count=EXAMPLE_COUNT)
def main():
# make_hsk_decks()
parser = argparse.ArgumentParser(description='Create Anki decks based on characters .')
parser.add_argument('name', nargs=1, type=str, help='What do we call the deck?')
parser.add_argument('--count', nargs=1, type=int, help="How many examples to create", required=False)
parser.add_argument('characters', nargs='*', type=str, help="Which characters should we use?")
args = parser.parse_args()
deck_name = args.name[0]
characters = args.characters
if args.count is not None:
example_count = args.count[0]
create_deck(deck_name, character_list=characters, example_count=example_count)
else:
create_deck(deck_name, character_list=characters)
if __name__ == '__main__':
main() | main.py | import argparse
from collections import defaultdict
from os import remove
from random import randrange
import genanki
from pycasia import CASIA
from hsk import HSK
from models import get_model
EXAMPLE_COUNT = 50
def create_deck(name, character_list=None, example_count=30):
"""
Create a deck with the given requirements.
:param name: The deck name
:param character_list: A list of characters to select. If not given, all characters in the dataset will be used.
:param example_count: How many examples per character to include. Default is 30.
:return: Nothing.
"""
# Must be unique. See genanki details for more.
deck_id = randrange(1 << 30, 1 << 31)
print("Creating deck %s" % name)
# Create deck
deck = genanki.Deck(deck_id, name)
# Initialize data collection
data = CASIA.CASIA()
deck_data = defaultdict(list)
media = []
# Get data and create media
characters_loaded = 0
for image, character in data.load_character_images():
# Only include requested characters
if character_list is None or character in character_list:
# Only include as many examples as requested
count = len(deck_data[character])
if count < example_count:
filename = "%s_%s.jpg" % (character, len(deck_data[character]) + 1)
image.save(filename)
deck_data[character].append(filename)
media.append(filename)
characters_loaded = characters_loaded + 1
# Early stop if you have enough examples
if character_list is None or characters_loaded >= len(character_list) * example_count:
if len([character for character in deck_data if len(deck_data[character]) < example_count]) == 0:
break
# Create notes
print("Creating notes")
for character in deck_data:
note_fields = [character]
examples = ["<img src=\"%s\">" % image for image in deck_data[character]]
assert len(examples) == example_count, "Wrong number of examples for %s" % character
note_fields.extend(examples)
my_note = genanki.Note(model=get_model(example_count=example_count), fields=note_fields)
deck.add_note(my_note)
# Create the package and output
print("Creating final output")
package = genanki.Package(deck)
package.media_files = media
filename = '%s.apkg' % name
package.write_to_file(filename)
print("Created deck %s" % filename)
# Delete all intermediate files
print("Cleaning up")
for path in media:
remove(path)
def make_hsk_decks():
create_deck("HSK1", character_list=HSK["HSK1"], example_count=EXAMPLE_COUNT)
create_deck("HSK2", character_list=HSK["HSK2"], example_count=EXAMPLE_COUNT)
create_deck("HSK3", character_list=HSK["HSK3"], example_count=EXAMPLE_COUNT)
create_deck("HSK4", character_list=HSK["HSK4"], example_count=EXAMPLE_COUNT)
create_deck("HSK5", character_list=HSK["HSK5"], example_count=EXAMPLE_COUNT)
create_deck("HSK6", character_list=HSK["HSK6"], example_count=EXAMPLE_COUNT)
def main():
# make_hsk_decks()
parser = argparse.ArgumentParser(description='Create Anki decks based on characters .')
parser.add_argument('name', nargs=1, type=str, help='What do we call the deck?')
parser.add_argument('--count', nargs=1, type=int, help="How many examples to create", required=False)
parser.add_argument('characters', nargs='*', type=str, help="Which characters should we use?")
args = parser.parse_args()
deck_name = args.name[0]
characters = args.characters
if args.count is not None:
example_count = args.count[0]
create_deck(deck_name, character_list=characters, example_count=example_count)
else:
create_deck(deck_name, character_list=characters)
if __name__ == '__main__':
main() | 0.610802 | 0.322366 |
import base64
from odoo import _, api, fields, models
from odoo.exceptions import UserError, ValidationError
class AccountPaymentOrder(models.Model):
_name = "account.payment.order"
_description = "Payment Order"
_inherit = ["mail.thread"]
_order = "id desc"
_check_company_auto = True
name = fields.Char(string="Number", readonly=True, copy=False)
payment_mode_id = fields.Many2one(
comodel_name="account.payment.mode",
required=True,
ondelete="restrict",
tracking=True,
states={"draft": [("readonly", False)]},
check_company=True,
)
payment_type = fields.Selection(
selection=[("inbound", "Inbound"), ("outbound", "Outbound")],
string="Payment Type",
readonly=True,
required=True,
)
payment_method_id = fields.Many2one(
comodel_name="account.payment.method",
related="payment_mode_id.payment_method_id",
readonly=True,
store=True,
)
company_id = fields.Many2one(
related="payment_mode_id.company_id", store=True, readonly=True
)
company_currency_id = fields.Many2one(
related="payment_mode_id.company_id.currency_id", store=True, readonly=True
)
bank_account_link = fields.Selection(
related="payment_mode_id.bank_account_link", readonly=True
)
allowed_journal_ids = fields.Many2many(
comodel_name="account.journal",
compute="_compute_allowed_journal_ids",
string="Allowed journals",
)
journal_id = fields.Many2one(
comodel_name="account.journal",
string="Bank Journal",
ondelete="restrict",
readonly=True,
states={"draft": [("readonly", False)]},
tracking=True,
check_company=True,
)
# The journal_id field is only required at confirm step, to
# allow auto-creation of payment order from invoice
company_partner_bank_id = fields.Many2one(
related="journal_id.bank_account_id",
string="Company Bank Account",
readonly=True,
)
state = fields.Selection(
selection=[
("draft", "Draft"),
("open", "Confirmed"),
("generated", "File Generated"),
("uploaded", "File Uploaded"),
("done", "Done"),
("cancel", "Cancel"),
],
string="Status",
readonly=True,
copy=False,
default="draft",
tracking=True,
)
date_prefered = fields.Selection(
selection=[
("now", "Immediately"),
("due", "Due Date"),
("fixed", "Fixed Date"),
],
string="Payment Execution Date Type",
required=True,
default="due",
tracking=True,
readonly=True,
states={"draft": [("readonly", False)]},
)
date_scheduled = fields.Date(
string="Payment Execution Date",
readonly=True,
states={"draft": [("readonly", False)]},
tracking=True,
help="Select a requested date of execution if you selected 'Due Date' "
"as the Payment Execution Date Type.",
)
date_generated = fields.Date(string="File Generation Date", readonly=True)
date_uploaded = fields.Date(string="File Upload Date", readonly=True)
date_done = fields.Date(string="Done Date", readonly=True)
generated_user_id = fields.Many2one(
comodel_name="res.users",
string="Generated by",
readonly=True,
ondelete="restrict",
copy=False,
check_company=True,
)
payment_line_ids = fields.One2many(
comodel_name="account.payment.line",
inverse_name="order_id",
string="Transaction Lines",
readonly=True,
states={"draft": [("readonly", False)]},
)
bank_line_ids = fields.One2many(
comodel_name="bank.payment.line",
inverse_name="order_id",
string="Bank Payment Lines",
readonly=True,
help="The bank payment lines are used to generate the payment file. "
"They are automatically created from transaction lines upon "
"confirmation of the payment order: one bank payment line can "
"group several transaction lines if the option "
"'Group Transactions in Payment Orders' is active on the payment "
"mode.",
)
total_company_currency = fields.Monetary(
compute="_compute_total", store=True, currency_field="company_currency_id"
)
bank_line_count = fields.Integer(
compute="_compute_bank_line_count", string="Number of Bank Lines"
)
move_ids = fields.One2many(
comodel_name="account.move",
inverse_name="payment_order_id",
string="Journal Entries",
readonly=True,
)
description = fields.Char()
@api.depends("payment_mode_id")
def _compute_allowed_journal_ids(self):
for record in self:
if record.payment_mode_id.bank_account_link == "fixed":
record.allowed_journal_ids = record.payment_mode_id.fixed_journal_id
elif record.payment_mode_id.bank_account_link == "variable":
record.allowed_journal_ids = record.payment_mode_id.variable_journal_ids
else:
record.allowed_journal_ids = False
def unlink(self):
for order in self:
if order.state == "uploaded":
raise UserError(
_(
"You cannot delete an uploaded payment order. You can "
"cancel it in order to do so."
)
)
return super(AccountPaymentOrder, self).unlink()
@api.constrains("payment_type", "payment_mode_id")
def payment_order_constraints(self):
for order in self:
if (
order.payment_mode_id.payment_type
and order.payment_mode_id.payment_type != order.payment_type
):
raise ValidationError(
_(
"The payment type (%s) is not the same as the payment "
"type of the payment mode (%s)"
)
% (order.payment_type, order.payment_mode_id.payment_type)
)
@api.constrains("date_scheduled")
def check_date_scheduled(self):
today = fields.Date.context_today(self)
for order in self:
if order.date_scheduled:
if order.date_scheduled < today:
raise ValidationError(
_(
"On payment order %s, the Payment Execution Date "
"is in the past (%s)."
)
% (order.name, order.date_scheduled)
)
@api.depends("payment_line_ids", "payment_line_ids.amount_company_currency")
def _compute_total(self):
for rec in self:
rec.total_company_currency = sum(
rec.mapped("payment_line_ids.amount_company_currency") or [0.0]
)
@api.depends("bank_line_ids")
def _compute_bank_line_count(self):
for order in self:
order.bank_line_count = len(order.bank_line_ids)
@api.model
def create(self, vals):
if vals.get("name", "New") == "New":
vals["name"] = (
self.env["ir.sequence"].next_by_code("account.payment.order") or "New"
)
if vals.get("payment_mode_id"):
payment_mode = self.env["account.payment.mode"].browse(
vals["payment_mode_id"]
)
vals["payment_type"] = payment_mode.payment_type
if payment_mode.bank_account_link == "fixed":
vals["journal_id"] = payment_mode.fixed_journal_id.id
if not vals.get("date_prefered") and payment_mode.default_date_prefered:
vals["date_prefered"] = payment_mode.default_date_prefered
return super(AccountPaymentOrder, self).create(vals)
@api.onchange("payment_mode_id")
def payment_mode_id_change(self):
if len(self.allowed_journal_ids) == 1:
self.journal_id = self.allowed_journal_ids
if self.payment_mode_id.default_date_prefered:
self.date_prefered = self.payment_mode_id.default_date_prefered
def action_done(self):
self.write({"date_done": fields.Date.context_today(self), "state": "done"})
return True
def action_done_cancel(self):
for move in self.move_ids:
move.button_cancel()
for move_line in move.line_ids:
move_line.remove_move_reconcile()
move.with_context(force_delete=True).unlink()
self.action_cancel()
return True
def cancel2draft(self):
self.write({"state": "draft"})
return True
def action_cancel(self):
for order in self:
order.write({"state": "cancel"})
order.bank_line_ids.unlink()
return True
@api.model
def _prepare_bank_payment_line(self, paylines):
return {
"order_id": paylines[0].order_id.id,
"payment_line_ids": [(6, 0, paylines.ids)],
"communication": "-".join([line.communication for line in paylines]),
}
def draft2open(self):
"""
Called when you click on the 'Confirm' button
Set the 'date' on payment line depending on the 'date_prefered'
setting of the payment.order
Re-generate the bank payment lines
"""
bplo = self.env["bank.payment.line"]
today = fields.Date.context_today(self)
for order in self:
if not order.journal_id:
raise UserError(
_("Missing Bank Journal on payment order %s.") % order.name
)
if (
order.payment_method_id.bank_account_required
and not order.journal_id.bank_account_id
):
raise UserError(
_("Missing bank account on bank journal '%s'.")
% order.journal_id.display_name
)
if not order.payment_line_ids:
raise UserError(
_("There are no transactions on payment order %s.") % order.name
)
# Delete existing bank payment lines
order.bank_line_ids.unlink()
# Create the bank payment lines from the payment lines
group_paylines = {} # key = hashcode
for payline in order.payment_line_ids:
payline.draft2open_payment_line_check()
# Compute requested payment date
if order.date_prefered == "due":
requested_date = payline.ml_maturity_date or today
elif order.date_prefered == "fixed":
requested_date = order.date_scheduled or today
else:
requested_date = today
# No payment date in the past
if requested_date < today:
requested_date = today
# inbound: check option no_debit_before_maturity
if (
order.payment_type == "inbound"
and order.payment_mode_id.no_debit_before_maturity
and payline.ml_maturity_date
and requested_date < payline.ml_maturity_date
):
raise UserError(
_(
"The payment mode '%s' has the option "
"'Disallow Debit Before Maturity Date'. The "
"payment line %s has a maturity date %s "
"which is after the computed payment date %s."
)
% (
order.payment_mode_id.name,
payline.name,
payline.ml_maturity_date,
requested_date,
)
)
# Write requested_date on 'date' field of payment line
# norecompute is for avoiding a chained recomputation
# payment_line_ids.date
# > payment_line_ids.amount_company_currency
# > total_company_currency
with self.env.norecompute():
payline.date = requested_date
# Group options
if order.payment_mode_id.group_lines:
hashcode = payline.payment_line_hashcode()
else:
# Use line ID as hascode, which actually means no grouping
hashcode = payline.id
if hashcode in group_paylines:
group_paylines[hashcode]["paylines"] += payline
group_paylines[hashcode]["total"] += payline.amount_currency
else:
group_paylines[hashcode] = {
"paylines": payline,
"total": payline.amount_currency,
}
order.recompute()
# Create bank payment lines
for paydict in list(group_paylines.values()):
# Block if a bank payment line is <= 0
if paydict["total"] <= 0:
raise UserError(
_("The amount for Partner '%s' is negative " "or null (%.2f) !")
% (paydict["paylines"][0].partner_id.name, paydict["total"])
)
vals = self._prepare_bank_payment_line(paydict["paylines"])
bplo.create(vals)
self.write({"state": "open"})
return True
def generate_payment_file(self):
"""Returns (payment file as string, filename)"""
self.ensure_one()
if self.payment_method_id.code == "manual":
return (False, False)
else:
raise UserError(
_(
"No handler for this payment method. Maybe you haven't "
"installed the related Odoo module."
)
)
def open2generated(self):
self.ensure_one()
payment_file_str, filename = self.generate_payment_file()
action = {}
if payment_file_str and filename:
attachment = self.env["ir.attachment"].create(
{
"res_model": "account.payment.order",
"res_id": self.id,
"name": filename,
"datas": base64.b64encode(payment_file_str),
}
)
simplified_form_view = self.env.ref(
"account_payment_order.view_attachment_simplified_form"
)
action = {
"name": _("Payment File"),
"view_mode": "form",
"view_id": simplified_form_view.id,
"res_model": "ir.attachment",
"type": "ir.actions.act_window",
"target": "current",
"res_id": attachment.id,
}
self.write(
{
"date_generated": fields.Date.context_today(self),
"state": "generated",
"generated_user_id": self._uid,
}
)
return action
def generated2uploaded(self):
for order in self:
if order.payment_mode_id.generate_move:
order.generate_move()
self.write(
{"state": "uploaded", "date_uploaded": fields.Date.context_today(self)}
)
return True
def _prepare_move(self, bank_lines=None):
if self.payment_type == "outbound":
ref = _("Payment order %s") % self.name
else:
ref = _("Debit order %s") % self.name
if bank_lines and len(bank_lines) == 1:
ref += " - " + bank_lines.name
if self.payment_mode_id.offsetting_account == "bank_account":
journal_id = self.journal_id.id
elif self.payment_mode_id.offsetting_account == "transfer_account":
journal_id = self.payment_mode_id.transfer_journal_id.id
vals = {
"journal_id": journal_id,
"ref": ref,
"payment_order_id": self.id,
"line_ids": [],
}
total_company_currency = total_payment_currency = 0
for bline in bank_lines:
total_company_currency += bline.amount_company_currency
total_payment_currency += bline.amount_currency
partner_ml_vals = self._prepare_move_line_partner_account(bline)
vals["line_ids"].append((0, 0, partner_ml_vals))
trf_ml_vals = self._prepare_move_line_offsetting_account(
total_company_currency, total_payment_currency, bank_lines
)
vals["line_ids"].append((0, 0, trf_ml_vals))
return vals
def _prepare_move_line_offsetting_account(
self, amount_company_currency, amount_payment_currency, bank_lines
):
vals = {}
if self.payment_type == "outbound":
name = _("Payment order %s") % self.name
else:
name = _("Debit order %s") % self.name
if self.payment_mode_id.offsetting_account == "bank_account":
vals.update({"date": bank_lines[0].date})
else:
vals.update({"date_maturity": bank_lines[0].date})
if self.payment_mode_id.offsetting_account == "bank_account":
account_id = self.journal_id.default_account_id.id
elif self.payment_mode_id.offsetting_account == "transfer_account":
account_id = self.payment_mode_id.transfer_account_id.id
partner_id = False
for index, bank_line in enumerate(bank_lines):
if index == 0:
partner_id = bank_line.payment_line_ids[0].partner_id.id
elif bank_line.payment_line_ids[0].partner_id.id != partner_id:
# we have different partners in the grouped move
partner_id = False
break
vals.update(
{
"name": name,
"partner_id": partner_id,
"account_id": account_id,
"credit": (
self.payment_type == "outbound" and amount_company_currency or 0.0
),
"debit": (
self.payment_type == "inbound" and amount_company_currency or 0.0
),
}
)
if bank_lines[0].currency_id != bank_lines[0].company_currency_id:
sign = self.payment_type == "outbound" and -1 or 1
vals.update(
{
"currency_id": bank_lines[0].currency_id.id,
"amount_currency": amount_payment_currency * sign,
}
)
return vals
def _prepare_move_line_partner_account(self, bank_line):
if bank_line.payment_line_ids[0].move_line_id:
account_id = bank_line.payment_line_ids[0].move_line_id.account_id.id
else:
if self.payment_type == "inbound":
account_id = bank_line.partner_id.property_account_receivable_id.id
else:
account_id = bank_line.partner_id.property_account_payable_id.id
if self.payment_type == "outbound":
name = _("Payment bank line %s") % bank_line.name
else:
name = _("Debit bank line %s") % bank_line.name
vals = {
"name": name,
"bank_payment_line_id": bank_line.id,
"partner_id": bank_line.partner_id.id,
"account_id": account_id,
"credit": (
self.payment_type == "inbound"
and bank_line.amount_company_currency
or 0.0
),
"debit": (
self.payment_type == "outbound"
and bank_line.amount_company_currency
or 0.0
),
}
if bank_line.currency_id != bank_line.company_currency_id:
sign = self.payment_type == "inbound" and -1 or 1
vals.update(
{
"currency_id": bank_line.currency_id.id,
"amount_currency": bank_line.amount_currency * sign,
}
)
return vals
def _create_reconcile_move(self, hashcode, blines):
self.ensure_one()
post_move = self.payment_mode_id.post_move
am_obj = self.env["account.move"]
mvals = self._prepare_move(blines)
move = am_obj.create(mvals)
if post_move:
move.action_post()
blines.reconcile_payment_lines()
def _prepare_trf_moves(self):
"""
prepare a dict "trfmoves" that can be used when
self.payment_mode_id.move_option = date or line
key = unique identifier (date or True or line.id)
value = bank_pay_lines (recordset that can have several entries)
"""
self.ensure_one()
trfmoves = {}
for bline in self.bank_line_ids:
hashcode = bline.move_line_offsetting_account_hashcode()
if hashcode in trfmoves:
trfmoves[hashcode] += bline
else:
trfmoves[hashcode] = bline
return trfmoves
def generate_move(self):
"""
Create the moves that pay off the move lines from
the payment/debit order.
"""
self.ensure_one()
trfmoves = self._prepare_trf_moves()
for hashcode, blines in trfmoves.items():
self._create_reconcile_move(hashcode, blines)
def action_bank_payment_line(self):
self.ensure_one()
action = self.env.ref("account_payment_order.bank_payment_line_action")
action_dict = action.read()[0]
action_dict["domain"] = [("id", "in", self.bank_line_ids.ids)]
return action_dict
def action_move_journal_line(self):
self.ensure_one()
action = self.env.ref("account.action_move_journal_line")
action_dict = action.read()[0]
action_dict["domain"] = [("id", "in", self.move_ids.ids)]
ctx = self.env.context.copy()
ctx.update({"search_default_misc_filter": 0})
action_dict["context"] = ctx
return action_dict | addons14/account_payment_order/models/account_payment_order.py |
import base64
from odoo import _, api, fields, models
from odoo.exceptions import UserError, ValidationError
class AccountPaymentOrder(models.Model):
_name = "account.payment.order"
_description = "Payment Order"
_inherit = ["mail.thread"]
_order = "id desc"
_check_company_auto = True
name = fields.Char(string="Number", readonly=True, copy=False)
payment_mode_id = fields.Many2one(
comodel_name="account.payment.mode",
required=True,
ondelete="restrict",
tracking=True,
states={"draft": [("readonly", False)]},
check_company=True,
)
payment_type = fields.Selection(
selection=[("inbound", "Inbound"), ("outbound", "Outbound")],
string="Payment Type",
readonly=True,
required=True,
)
payment_method_id = fields.Many2one(
comodel_name="account.payment.method",
related="payment_mode_id.payment_method_id",
readonly=True,
store=True,
)
company_id = fields.Many2one(
related="payment_mode_id.company_id", store=True, readonly=True
)
company_currency_id = fields.Many2one(
related="payment_mode_id.company_id.currency_id", store=True, readonly=True
)
bank_account_link = fields.Selection(
related="payment_mode_id.bank_account_link", readonly=True
)
allowed_journal_ids = fields.Many2many(
comodel_name="account.journal",
compute="_compute_allowed_journal_ids",
string="Allowed journals",
)
journal_id = fields.Many2one(
comodel_name="account.journal",
string="Bank Journal",
ondelete="restrict",
readonly=True,
states={"draft": [("readonly", False)]},
tracking=True,
check_company=True,
)
# The journal_id field is only required at confirm step, to
# allow auto-creation of payment order from invoice
company_partner_bank_id = fields.Many2one(
related="journal_id.bank_account_id",
string="Company Bank Account",
readonly=True,
)
state = fields.Selection(
selection=[
("draft", "Draft"),
("open", "Confirmed"),
("generated", "File Generated"),
("uploaded", "File Uploaded"),
("done", "Done"),
("cancel", "Cancel"),
],
string="Status",
readonly=True,
copy=False,
default="draft",
tracking=True,
)
date_prefered = fields.Selection(
selection=[
("now", "Immediately"),
("due", "Due Date"),
("fixed", "Fixed Date"),
],
string="Payment Execution Date Type",
required=True,
default="due",
tracking=True,
readonly=True,
states={"draft": [("readonly", False)]},
)
date_scheduled = fields.Date(
string="Payment Execution Date",
readonly=True,
states={"draft": [("readonly", False)]},
tracking=True,
help="Select a requested date of execution if you selected 'Due Date' "
"as the Payment Execution Date Type.",
)
date_generated = fields.Date(string="File Generation Date", readonly=True)
date_uploaded = fields.Date(string="File Upload Date", readonly=True)
date_done = fields.Date(string="Done Date", readonly=True)
generated_user_id = fields.Many2one(
comodel_name="res.users",
string="Generated by",
readonly=True,
ondelete="restrict",
copy=False,
check_company=True,
)
payment_line_ids = fields.One2many(
comodel_name="account.payment.line",
inverse_name="order_id",
string="Transaction Lines",
readonly=True,
states={"draft": [("readonly", False)]},
)
bank_line_ids = fields.One2many(
comodel_name="bank.payment.line",
inverse_name="order_id",
string="Bank Payment Lines",
readonly=True,
help="The bank payment lines are used to generate the payment file. "
"They are automatically created from transaction lines upon "
"confirmation of the payment order: one bank payment line can "
"group several transaction lines if the option "
"'Group Transactions in Payment Orders' is active on the payment "
"mode.",
)
total_company_currency = fields.Monetary(
compute="_compute_total", store=True, currency_field="company_currency_id"
)
bank_line_count = fields.Integer(
compute="_compute_bank_line_count", string="Number of Bank Lines"
)
move_ids = fields.One2many(
comodel_name="account.move",
inverse_name="payment_order_id",
string="Journal Entries",
readonly=True,
)
description = fields.Char()
@api.depends("payment_mode_id")
def _compute_allowed_journal_ids(self):
for record in self:
if record.payment_mode_id.bank_account_link == "fixed":
record.allowed_journal_ids = record.payment_mode_id.fixed_journal_id
elif record.payment_mode_id.bank_account_link == "variable":
record.allowed_journal_ids = record.payment_mode_id.variable_journal_ids
else:
record.allowed_journal_ids = False
def unlink(self):
for order in self:
if order.state == "uploaded":
raise UserError(
_(
"You cannot delete an uploaded payment order. You can "
"cancel it in order to do so."
)
)
return super(AccountPaymentOrder, self).unlink()
@api.constrains("payment_type", "payment_mode_id")
def payment_order_constraints(self):
for order in self:
if (
order.payment_mode_id.payment_type
and order.payment_mode_id.payment_type != order.payment_type
):
raise ValidationError(
_(
"The payment type (%s) is not the same as the payment "
"type of the payment mode (%s)"
)
% (order.payment_type, order.payment_mode_id.payment_type)
)
@api.constrains("date_scheduled")
def check_date_scheduled(self):
today = fields.Date.context_today(self)
for order in self:
if order.date_scheduled:
if order.date_scheduled < today:
raise ValidationError(
_(
"On payment order %s, the Payment Execution Date "
"is in the past (%s)."
)
% (order.name, order.date_scheduled)
)
@api.depends("payment_line_ids", "payment_line_ids.amount_company_currency")
def _compute_total(self):
for rec in self:
rec.total_company_currency = sum(
rec.mapped("payment_line_ids.amount_company_currency") or [0.0]
)
@api.depends("bank_line_ids")
def _compute_bank_line_count(self):
for order in self:
order.bank_line_count = len(order.bank_line_ids)
@api.model
def create(self, vals):
if vals.get("name", "New") == "New":
vals["name"] = (
self.env["ir.sequence"].next_by_code("account.payment.order") or "New"
)
if vals.get("payment_mode_id"):
payment_mode = self.env["account.payment.mode"].browse(
vals["payment_mode_id"]
)
vals["payment_type"] = payment_mode.payment_type
if payment_mode.bank_account_link == "fixed":
vals["journal_id"] = payment_mode.fixed_journal_id.id
if not vals.get("date_prefered") and payment_mode.default_date_prefered:
vals["date_prefered"] = payment_mode.default_date_prefered
return super(AccountPaymentOrder, self).create(vals)
@api.onchange("payment_mode_id")
def payment_mode_id_change(self):
if len(self.allowed_journal_ids) == 1:
self.journal_id = self.allowed_journal_ids
if self.payment_mode_id.default_date_prefered:
self.date_prefered = self.payment_mode_id.default_date_prefered
def action_done(self):
self.write({"date_done": fields.Date.context_today(self), "state": "done"})
return True
def action_done_cancel(self):
for move in self.move_ids:
move.button_cancel()
for move_line in move.line_ids:
move_line.remove_move_reconcile()
move.with_context(force_delete=True).unlink()
self.action_cancel()
return True
def cancel2draft(self):
self.write({"state": "draft"})
return True
def action_cancel(self):
for order in self:
order.write({"state": "cancel"})
order.bank_line_ids.unlink()
return True
@api.model
def _prepare_bank_payment_line(self, paylines):
return {
"order_id": paylines[0].order_id.id,
"payment_line_ids": [(6, 0, paylines.ids)],
"communication": "-".join([line.communication for line in paylines]),
}
def draft2open(self):
"""
Called when you click on the 'Confirm' button
Set the 'date' on payment line depending on the 'date_prefered'
setting of the payment.order
Re-generate the bank payment lines
"""
bplo = self.env["bank.payment.line"]
today = fields.Date.context_today(self)
for order in self:
if not order.journal_id:
raise UserError(
_("Missing Bank Journal on payment order %s.") % order.name
)
if (
order.payment_method_id.bank_account_required
and not order.journal_id.bank_account_id
):
raise UserError(
_("Missing bank account on bank journal '%s'.")
% order.journal_id.display_name
)
if not order.payment_line_ids:
raise UserError(
_("There are no transactions on payment order %s.") % order.name
)
# Delete existing bank payment lines
order.bank_line_ids.unlink()
# Create the bank payment lines from the payment lines
group_paylines = {} # key = hashcode
for payline in order.payment_line_ids:
payline.draft2open_payment_line_check()
# Compute requested payment date
if order.date_prefered == "due":
requested_date = payline.ml_maturity_date or today
elif order.date_prefered == "fixed":
requested_date = order.date_scheduled or today
else:
requested_date = today
# No payment date in the past
if requested_date < today:
requested_date = today
# inbound: check option no_debit_before_maturity
if (
order.payment_type == "inbound"
and order.payment_mode_id.no_debit_before_maturity
and payline.ml_maturity_date
and requested_date < payline.ml_maturity_date
):
raise UserError(
_(
"The payment mode '%s' has the option "
"'Disallow Debit Before Maturity Date'. The "
"payment line %s has a maturity date %s "
"which is after the computed payment date %s."
)
% (
order.payment_mode_id.name,
payline.name,
payline.ml_maturity_date,
requested_date,
)
)
# Write requested_date on 'date' field of payment line
# norecompute is for avoiding a chained recomputation
# payment_line_ids.date
# > payment_line_ids.amount_company_currency
# > total_company_currency
with self.env.norecompute():
payline.date = requested_date
# Group options
if order.payment_mode_id.group_lines:
hashcode = payline.payment_line_hashcode()
else:
# Use line ID as hascode, which actually means no grouping
hashcode = payline.id
if hashcode in group_paylines:
group_paylines[hashcode]["paylines"] += payline
group_paylines[hashcode]["total"] += payline.amount_currency
else:
group_paylines[hashcode] = {
"paylines": payline,
"total": payline.amount_currency,
}
order.recompute()
# Create bank payment lines
for paydict in list(group_paylines.values()):
# Block if a bank payment line is <= 0
if paydict["total"] <= 0:
raise UserError(
_("The amount for Partner '%s' is negative " "or null (%.2f) !")
% (paydict["paylines"][0].partner_id.name, paydict["total"])
)
vals = self._prepare_bank_payment_line(paydict["paylines"])
bplo.create(vals)
self.write({"state": "open"})
return True
def generate_payment_file(self):
"""Returns (payment file as string, filename)"""
self.ensure_one()
if self.payment_method_id.code == "manual":
return (False, False)
else:
raise UserError(
_(
"No handler for this payment method. Maybe you haven't "
"installed the related Odoo module."
)
)
def open2generated(self):
self.ensure_one()
payment_file_str, filename = self.generate_payment_file()
action = {}
if payment_file_str and filename:
attachment = self.env["ir.attachment"].create(
{
"res_model": "account.payment.order",
"res_id": self.id,
"name": filename,
"datas": base64.b64encode(payment_file_str),
}
)
simplified_form_view = self.env.ref(
"account_payment_order.view_attachment_simplified_form"
)
action = {
"name": _("Payment File"),
"view_mode": "form",
"view_id": simplified_form_view.id,
"res_model": "ir.attachment",
"type": "ir.actions.act_window",
"target": "current",
"res_id": attachment.id,
}
self.write(
{
"date_generated": fields.Date.context_today(self),
"state": "generated",
"generated_user_id": self._uid,
}
)
return action
def generated2uploaded(self):
for order in self:
if order.payment_mode_id.generate_move:
order.generate_move()
self.write(
{"state": "uploaded", "date_uploaded": fields.Date.context_today(self)}
)
return True
def _prepare_move(self, bank_lines=None):
if self.payment_type == "outbound":
ref = _("Payment order %s") % self.name
else:
ref = _("Debit order %s") % self.name
if bank_lines and len(bank_lines) == 1:
ref += " - " + bank_lines.name
if self.payment_mode_id.offsetting_account == "bank_account":
journal_id = self.journal_id.id
elif self.payment_mode_id.offsetting_account == "transfer_account":
journal_id = self.payment_mode_id.transfer_journal_id.id
vals = {
"journal_id": journal_id,
"ref": ref,
"payment_order_id": self.id,
"line_ids": [],
}
total_company_currency = total_payment_currency = 0
for bline in bank_lines:
total_company_currency += bline.amount_company_currency
total_payment_currency += bline.amount_currency
partner_ml_vals = self._prepare_move_line_partner_account(bline)
vals["line_ids"].append((0, 0, partner_ml_vals))
trf_ml_vals = self._prepare_move_line_offsetting_account(
total_company_currency, total_payment_currency, bank_lines
)
vals["line_ids"].append((0, 0, trf_ml_vals))
return vals
def _prepare_move_line_offsetting_account(
self, amount_company_currency, amount_payment_currency, bank_lines
):
vals = {}
if self.payment_type == "outbound":
name = _("Payment order %s") % self.name
else:
name = _("Debit order %s") % self.name
if self.payment_mode_id.offsetting_account == "bank_account":
vals.update({"date": bank_lines[0].date})
else:
vals.update({"date_maturity": bank_lines[0].date})
if self.payment_mode_id.offsetting_account == "bank_account":
account_id = self.journal_id.default_account_id.id
elif self.payment_mode_id.offsetting_account == "transfer_account":
account_id = self.payment_mode_id.transfer_account_id.id
partner_id = False
for index, bank_line in enumerate(bank_lines):
if index == 0:
partner_id = bank_line.payment_line_ids[0].partner_id.id
elif bank_line.payment_line_ids[0].partner_id.id != partner_id:
# we have different partners in the grouped move
partner_id = False
break
vals.update(
{
"name": name,
"partner_id": partner_id,
"account_id": account_id,
"credit": (
self.payment_type == "outbound" and amount_company_currency or 0.0
),
"debit": (
self.payment_type == "inbound" and amount_company_currency or 0.0
),
}
)
if bank_lines[0].currency_id != bank_lines[0].company_currency_id:
sign = self.payment_type == "outbound" and -1 or 1
vals.update(
{
"currency_id": bank_lines[0].currency_id.id,
"amount_currency": amount_payment_currency * sign,
}
)
return vals
def _prepare_move_line_partner_account(self, bank_line):
if bank_line.payment_line_ids[0].move_line_id:
account_id = bank_line.payment_line_ids[0].move_line_id.account_id.id
else:
if self.payment_type == "inbound":
account_id = bank_line.partner_id.property_account_receivable_id.id
else:
account_id = bank_line.partner_id.property_account_payable_id.id
if self.payment_type == "outbound":
name = _("Payment bank line %s") % bank_line.name
else:
name = _("Debit bank line %s") % bank_line.name
vals = {
"name": name,
"bank_payment_line_id": bank_line.id,
"partner_id": bank_line.partner_id.id,
"account_id": account_id,
"credit": (
self.payment_type == "inbound"
and bank_line.amount_company_currency
or 0.0
),
"debit": (
self.payment_type == "outbound"
and bank_line.amount_company_currency
or 0.0
),
}
if bank_line.currency_id != bank_line.company_currency_id:
sign = self.payment_type == "inbound" and -1 or 1
vals.update(
{
"currency_id": bank_line.currency_id.id,
"amount_currency": bank_line.amount_currency * sign,
}
)
return vals
def _create_reconcile_move(self, hashcode, blines):
self.ensure_one()
post_move = self.payment_mode_id.post_move
am_obj = self.env["account.move"]
mvals = self._prepare_move(blines)
move = am_obj.create(mvals)
if post_move:
move.action_post()
blines.reconcile_payment_lines()
def _prepare_trf_moves(self):
"""
prepare a dict "trfmoves" that can be used when
self.payment_mode_id.move_option = date or line
key = unique identifier (date or True or line.id)
value = bank_pay_lines (recordset that can have several entries)
"""
self.ensure_one()
trfmoves = {}
for bline in self.bank_line_ids:
hashcode = bline.move_line_offsetting_account_hashcode()
if hashcode in trfmoves:
trfmoves[hashcode] += bline
else:
trfmoves[hashcode] = bline
return trfmoves
def generate_move(self):
"""
Create the moves that pay off the move lines from
the payment/debit order.
"""
self.ensure_one()
trfmoves = self._prepare_trf_moves()
for hashcode, blines in trfmoves.items():
self._create_reconcile_move(hashcode, blines)
def action_bank_payment_line(self):
self.ensure_one()
action = self.env.ref("account_payment_order.bank_payment_line_action")
action_dict = action.read()[0]
action_dict["domain"] = [("id", "in", self.bank_line_ids.ids)]
return action_dict
def action_move_journal_line(self):
self.ensure_one()
action = self.env.ref("account.action_move_journal_line")
action_dict = action.read()[0]
action_dict["domain"] = [("id", "in", self.move_ids.ids)]
ctx = self.env.context.copy()
ctx.update({"search_default_misc_filter": 0})
action_dict["context"] = ctx
return action_dict | 0.578091 | 0.225715 |
import sys
from oslo_config import cfg
from oslo_log import log as logging
from kuryr_kubernetes.cmd.sanity import checks
from kuryr_kubernetes import config
from kuryr_kubernetes.controller.drivers import vif_pool # noqa
LOG = logging.getLogger(__name__)
class BoolOptCallback(cfg.BoolOpt):
def __init__(self, name, callback, **kwargs):
if 'default' not in kwargs:
kwargs['default'] = False
self.callback = callback
super(BoolOptCallback, self).__init__(name, **kwargs)
def check_ports_pool_min_max():
result = checks.ports_pool_min_max()
if not result:
LOG.warning("The ports_pool_max is enabled, "
"the ports_pool_min should be smaller than "
"ports_pool_max. Either disable ports_pool_max "
"setting it to 0 or increase it's value.")
return result
def check_ports_pool_min_batch():
result = checks.ports_pool_min_batch()
if not result:
LOG.warning("The ports_pool_min should be lower than "
"ports_pool_batch. Please decrease it's value.")
return result
def check_ports_pool_max_batch():
result = checks.ports_pool_max_batch()
if not result:
LOG.warning("The ports_pool_max is enabled, "
"the ports_pool_max should be higher than "
"ports_pool_batch. Either disable ports_pool_max "
"setting it to 0 or decrease it's value.")
return result
# Define CLI opts to test specific features, with a callback for the test
OPTS = [
BoolOptCallback('vif_pool_min_max', check_ports_pool_min_max,
default=False,
help='Check configuration sanity of ports_pool_min and '
'ports_pool_max.'),
BoolOptCallback('vif_pool_min_batch', check_ports_pool_min_batch,
default=False,
help='Check configuration sanity of ports_pool_min and '
'ports_pool_batch.'),
BoolOptCallback('vif_pool_max_batch', check_ports_pool_max_batch,
default=False,
help='Check configuration sanity of ports_pool_max and '
'ports_pool_batch.'),
]
CLI_OPTS = [
cfg.BoolOpt('sanity_check_error', default=False,
help='If this flag is configured, the sanity command fails '
'if any of the sanity tests fails.'),
]
def all_tests_passed():
results = [opt.callback() for opt in OPTS if cfg.CONF.get(opt.name)]
return all(results)
def main():
cfg.CONF.register_cli_opts(OPTS)
cfg.CONF.register_cli_opts(CLI_OPTS)
config.init(sys.argv[1:], default_config_files=['/etc/kuryr/kuryr.conf'])
config.setup_logging()
return 0 if all_tests_passed() else 1
if __name__ == '__main__':
main() | kuryr_kubernetes/cmd/sanity_checks.py | import sys
from oslo_config import cfg
from oslo_log import log as logging
from kuryr_kubernetes.cmd.sanity import checks
from kuryr_kubernetes import config
from kuryr_kubernetes.controller.drivers import vif_pool # noqa
LOG = logging.getLogger(__name__)
class BoolOptCallback(cfg.BoolOpt):
def __init__(self, name, callback, **kwargs):
if 'default' not in kwargs:
kwargs['default'] = False
self.callback = callback
super(BoolOptCallback, self).__init__(name, **kwargs)
def check_ports_pool_min_max():
result = checks.ports_pool_min_max()
if not result:
LOG.warning("The ports_pool_max is enabled, "
"the ports_pool_min should be smaller than "
"ports_pool_max. Either disable ports_pool_max "
"setting it to 0 or increase it's value.")
return result
def check_ports_pool_min_batch():
result = checks.ports_pool_min_batch()
if not result:
LOG.warning("The ports_pool_min should be lower than "
"ports_pool_batch. Please decrease it's value.")
return result
def check_ports_pool_max_batch():
result = checks.ports_pool_max_batch()
if not result:
LOG.warning("The ports_pool_max is enabled, "
"the ports_pool_max should be higher than "
"ports_pool_batch. Either disable ports_pool_max "
"setting it to 0 or decrease it's value.")
return result
# Define CLI opts to test specific features, with a callback for the test
OPTS = [
BoolOptCallback('vif_pool_min_max', check_ports_pool_min_max,
default=False,
help='Check configuration sanity of ports_pool_min and '
'ports_pool_max.'),
BoolOptCallback('vif_pool_min_batch', check_ports_pool_min_batch,
default=False,
help='Check configuration sanity of ports_pool_min and '
'ports_pool_batch.'),
BoolOptCallback('vif_pool_max_batch', check_ports_pool_max_batch,
default=False,
help='Check configuration sanity of ports_pool_max and '
'ports_pool_batch.'),
]
CLI_OPTS = [
cfg.BoolOpt('sanity_check_error', default=False,
help='If this flag is configured, the sanity command fails '
'if any of the sanity tests fails.'),
]
def all_tests_passed():
results = [opt.callback() for opt in OPTS if cfg.CONF.get(opt.name)]
return all(results)
def main():
cfg.CONF.register_cli_opts(OPTS)
cfg.CONF.register_cli_opts(CLI_OPTS)
config.init(sys.argv[1:], default_config_files=['/etc/kuryr/kuryr.conf'])
config.setup_logging()
return 0 if all_tests_passed() else 1
if __name__ == '__main__':
main() | 0.372277 | 0.066478 |
from crummycm.validation.types.values.base import BaseValue
import operator
class Numeric(BaseValue):
def __init__(
self,
default_value=None,
is_type=None,
required=None,
description=None,
fn=None,
fn_kwargs=None,
# specific
bounds=None,
bounds_inclusive=None,
):
self.ALLOWED_TYPES = (float, int, complex)
assert is_type in self.ALLOWED_TYPES, ValueError(
f"Numeric class can only be of type {self.ALLOWED_TYPES} not {is_type}"
)
super().__init__(
default_value=default_value,
is_type=is_type,
required=required,
description=description,
fn=fn,
fn_kwargs=fn_kwargs,
)
self.bounds = bounds
if bounds_inclusive:
if not isinstance(bounds_inclusive, tuple):
raise ValueError(
f"`bounds_inclusive` ({bounds_inclusive}) expected to be tuple, not {type(bounds_inclusive)}"
)
else:
if len(bounds_inclusive) != 2:
raise ValueError(
f"`bounds_inclusive` ({bounds_inclusive}) expected to be len 2, not {len(bounds_inclusive)}"
)
for b in bounds_inclusive:
if not isinstance(b, bool):
raise ValueError(
f"`bounds_inclusive` ({bounds_inclusive}), value ({b}) should be type {bool}, not {type(b)}"
)
self.bounds_inclusive = bounds_inclusive
def template(self, level=0):
ret_str = f"[{self.__class__.__name__}]"
if self.is_type:
ret_str = f"{self.is_type}{ret_str}"
if self.default_value:
ret_str += f"({self.default_value})"
if level == 0:
if self.bounds:
ret_str += "^"
elif level > 0:
if self.bounds:
ret_str += f"[{self.bounds}]"
if self.fn:
ret_str += "!"
if self.required:
ret_str += "*"
return ret_str
def transform(self, cur_value=None):
if cur_value is not None:
if not isinstance(cur_value, self.ALLOWED_TYPES) and not isinstance(
cur_value, bool
):
raise TypeError(
f"cur_value ({cur_value}) is not type {self.ALLOWED_TYPES}"
)
self.user_in = cur_value
iv = super().transform(self.user_in)
if iv:
if self.bounds:
# ensure w/in bounds
if self.bounds_inclusive:
if self.bounds_inclusive[0]:
l_op = operator.ge
else:
l_op = operator.gt
if self.bounds_inclusive[1]:
r_op = operator.le
else:
r_op = operator.lt
else:
l_op = operator.gt
r_op = operator.lt
if not r_op(iv, self.bounds[1]):
raise ValueError(
f"value {cur_value} tranformed by {super()} into {iv} is greater than {self.bounds[1]} (op: {r_op}), description: {self.description}"
)
if not l_op(iv, self.bounds[0]):
raise ValueError(
f"value {cur_value} tranformed by {super()} into {iv} is less than {self.bounds[0]} (op: {r_op}),, description: {self.description}"
)
self.out = iv
return self.out | src/crummycm/validation/types/values/element/numeric.py | from crummycm.validation.types.values.base import BaseValue
import operator
class Numeric(BaseValue):
def __init__(
self,
default_value=None,
is_type=None,
required=None,
description=None,
fn=None,
fn_kwargs=None,
# specific
bounds=None,
bounds_inclusive=None,
):
self.ALLOWED_TYPES = (float, int, complex)
assert is_type in self.ALLOWED_TYPES, ValueError(
f"Numeric class can only be of type {self.ALLOWED_TYPES} not {is_type}"
)
super().__init__(
default_value=default_value,
is_type=is_type,
required=required,
description=description,
fn=fn,
fn_kwargs=fn_kwargs,
)
self.bounds = bounds
if bounds_inclusive:
if not isinstance(bounds_inclusive, tuple):
raise ValueError(
f"`bounds_inclusive` ({bounds_inclusive}) expected to be tuple, not {type(bounds_inclusive)}"
)
else:
if len(bounds_inclusive) != 2:
raise ValueError(
f"`bounds_inclusive` ({bounds_inclusive}) expected to be len 2, not {len(bounds_inclusive)}"
)
for b in bounds_inclusive:
if not isinstance(b, bool):
raise ValueError(
f"`bounds_inclusive` ({bounds_inclusive}), value ({b}) should be type {bool}, not {type(b)}"
)
self.bounds_inclusive = bounds_inclusive
def template(self, level=0):
ret_str = f"[{self.__class__.__name__}]"
if self.is_type:
ret_str = f"{self.is_type}{ret_str}"
if self.default_value:
ret_str += f"({self.default_value})"
if level == 0:
if self.bounds:
ret_str += "^"
elif level > 0:
if self.bounds:
ret_str += f"[{self.bounds}]"
if self.fn:
ret_str += "!"
if self.required:
ret_str += "*"
return ret_str
def transform(self, cur_value=None):
if cur_value is not None:
if not isinstance(cur_value, self.ALLOWED_TYPES) and not isinstance(
cur_value, bool
):
raise TypeError(
f"cur_value ({cur_value}) is not type {self.ALLOWED_TYPES}"
)
self.user_in = cur_value
iv = super().transform(self.user_in)
if iv:
if self.bounds:
# ensure w/in bounds
if self.bounds_inclusive:
if self.bounds_inclusive[0]:
l_op = operator.ge
else:
l_op = operator.gt
if self.bounds_inclusive[1]:
r_op = operator.le
else:
r_op = operator.lt
else:
l_op = operator.gt
r_op = operator.lt
if not r_op(iv, self.bounds[1]):
raise ValueError(
f"value {cur_value} tranformed by {super()} into {iv} is greater than {self.bounds[1]} (op: {r_op}), description: {self.description}"
)
if not l_op(iv, self.bounds[0]):
raise ValueError(
f"value {cur_value} tranformed by {super()} into {iv} is less than {self.bounds[0]} (op: {r_op}),, description: {self.description}"
)
self.out = iv
return self.out | 0.755907 | 0.199503 |
# Importamos la librería
import pygame
import sys
import random
from math import sqrt,exp
# Importamos constantes locales de pygame
from pygame.locals import *
from aux import *
sprites_serp = pygame.sprite.Group()
manzanas = pygame.sprite.Group()
# Establecemos el largo y alto de cada segmento de la serpiente
largodel_segmento = 10
altodel_segmento = 10
# Margen entre cada segmento
margendel_segmento = 3
#rectangulo delimitador
recta = pygame.Rect(0, 50, 958, 640-52)
class Game:
def __init__(self):
Screen = pygame.display.get_surface()
self.ancho_col = 5
self.x_ini=self.ancho_col
self.x_fin= Screen.get_width()-(self.ancho_col*2)
self.y_ini = 50
self.y_fin = Screen.get_height()-self.y_ini-(self.ancho_col*2)
recta = pygame.Rect(self.x_ini, self.y_ini, self.x_fin, self.y_fin)
self.score = 0
self.level = 1
self.direction = 1
#sprites_serp.clear(pygame.display.get_surface(),pygame.display.get_surface())
self.score_img = load_image('./assets/images/puntuacion.png', (150,40),True)
self.level_img = load_image('./assets/images/nivel.png', (150,40),True)
self.serpiente = Serpiente()
self.nuevaManzana()
pygame.display.flip()
#direcciones de la serpiente: 0: abajo 1:dcha 2: arriba 3:izda
self.state = 'JUEGO'
self.score = 0
def wait_for_key(self):
message = 'PULSE UNA TECLA PARA CONTINUAR'
self.black_screen(message)
pygame.event.clear()
wait = True
while wait:
evento = pygame.event.wait()
if evento.type == KEYDOWN and evento.key != K_ESCAPE:
print self.direction
if evento.key == K_UP and self.direction != 0: self.direction = 2
elif evento.key == K_DOWN and self.direction != 2: self.direction = 0
elif evento.key == K_LEFT and self.direction != 1: self.direction = 3
elif evento.key == K_RIGHT and self.direction != 3:self.direction = 1
print self.direction
wait = False
def continuar(self):
self.wait_for_key()
return self.direction
def new(self):
self.score = 0
self.level = 1
self.serpiente.destroy()
self.serpiente = Serpiente()
self.nuevaManzana()
self.wait_for_key()
return self.direction
def getLevel(self):
return self.level
def puntosNecesarios(self):
constant = 0.1
return round(pow(self.level,2) / constant)
def black_screen(self,message=None):
Screen = pygame.display.get_surface()
Screen.fill(NEGRO)
Screen.fill(EGG,recta)
Screen.blit(self.score_img,(5,5))
Screen.blit(self.level_img,(155,5))
fuente= pygame.font.SysFont('Impact', 25)
score = fuente.render(str(self.score), 1, BLANCO)
Screen.blit(score,(30,8))
level = fuente.render(str(self.level), 1, BLANCO)
Screen.blit(level,(230,8))
pygame.draw.rect(Screen, BLANCO, recta, self.ancho_col)
sprites_serp.draw(pygame.display.get_surface())
manzanas.draw(pygame.display.get_surface())
if message:
message = fuente.render(message, 1, NEGRO)
Screen.blit(message,(300,300))
pygame.display.flip()
def mover(self,dir):
self.direction = dir
self.state = self.serpiente.mover(dir)
if self.state == 'SUMA':
self.sumapunto()
self.black_screen()
return self.state
def gameover(self):
self.serpiente.destroy()
def sumapunto(self):
self.score = self.score + 1
if self.score == self.puntosNecesarios():
self.levelup()
self.nuevaManzana()
self.state = 'JUEGO'
print 'puntuacion: %d' % (self.score)
def levelup(self):
self.level = self.level+1
def nuevaManzana(self):
for i in range (random.randint(1, 3)):
if len(manzanas) < 3:
x =random.randint(self.x_ini+largodel_segmento, self.x_fin-largodel_segmento)
y =random.randint(self.y_ini+altodel_segmento, self.y_fin-altodel_segmento)
manzanas.add(Manzana(x,y))
class Segmento(pygame.sprite.Sprite):
""" Clase que representa un segmento de la serpiente. """
# -- Métodos
# Función constructor
def __init__(self, x, y,head=False):
# Llamada al constructor padre
pygame.sprite.Sprite.__init__(self)
# Establecemos el alto y largo
self.image = pygame.Surface([largodel_segmento, altodel_segmento])
self.image.fill(BLANCO)
# Establecemos como punto de partida la esquina superior izquierda.
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
class Serpiente():
def __init__(self):
#Velocidad inicial
self.cambio_x = largodel_segmento + margendel_segmento
self.cambio_y = 0
# Creamos la serpiente inicial.
ini_x = 300
ini_y = 300
self.segmentos_serpiente = []
for i in range(3):
x = ini_x - (largodel_segmento + margendel_segmento) * i
y = ini_y
if i == 0:
segmento = Segmento(x, y,True)
else:
segmento = Segmento(x, y)
self.segmentos_serpiente.append(segmento)
sprites_serp.add(segmento)
def mover(self,dirs):
state = 'JUEGO'
if dirs == 3:
cambio_x = (largodel_segmento + margendel_segmento) * -1
cambio_y = 0
if dirs == 1:
cambio_x = (largodel_segmento + margendel_segmento)
cambio_y = 0
if dirs == 2:
cambio_x = 0
cambio_y = (altodel_segmento + margendel_segmento) * -1
if dirs == 0:
cambio_x = 0
cambio_y = (altodel_segmento + margendel_segmento)
# Determinamos dónde aparecerá el nuevo segmento
x = self.segmentos_serpiente[0].rect.x + cambio_x
y = self.segmentos_serpiente[0].rect.y + cambio_y
segmento = Segmento(x, y)
if not recta.collidepoint(x,y) or len(pygame.sprite.spritecollide(segmento, sprites_serp, False)) > 0:
state = 'GAMEOVER'
elif len(pygame.sprite.groupcollide(sprites_serp, manzanas, False, True)) > 0 :
state = 'SUMA'
else:
segmento_viejo = self.segmentos_serpiente.pop()
sprites_serp.remove(segmento_viejo)
# Insertamos un nuevo segmento en la lista
self.segmentos_serpiente.insert(0, segmento)
if state != 'GAMEOVER':
sprites_serp.add(segmento)
else:
self.destroy()
return state
def destroy(self):
del self.segmentos_serpiente[:]
sprites_serp.empty()
sprites_serp.remove()
manzanas.empty()
manzanas.remove()
class Manzana(pygame.sprite.Sprite):
def __init__(self,x,y):
pygame.sprite.Sprite.__init__(self)
# Establecemos el alto y largo
#self.image = pygame.Surface([largodel_segmento, altodel_segmento])
#self.image.fill(RED)
self.image = load_image('./assets/images/apple.png',(largodel_segmento, altodel_segmento),True)
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
def update(self,x,y):
self.rect.x = x
self.rect.y = y
#Add this draw function so we can draw individual sprites
def draw(self,screen):
screen.blit(self.image, self.rect) | snake.py |
# Importamos la librería
import pygame
import sys
import random
from math import sqrt,exp
# Importamos constantes locales de pygame
from pygame.locals import *
from aux import *
sprites_serp = pygame.sprite.Group()
manzanas = pygame.sprite.Group()
# Establecemos el largo y alto de cada segmento de la serpiente
largodel_segmento = 10
altodel_segmento = 10
# Margen entre cada segmento
margendel_segmento = 3
#rectangulo delimitador
recta = pygame.Rect(0, 50, 958, 640-52)
class Game:
def __init__(self):
Screen = pygame.display.get_surface()
self.ancho_col = 5
self.x_ini=self.ancho_col
self.x_fin= Screen.get_width()-(self.ancho_col*2)
self.y_ini = 50
self.y_fin = Screen.get_height()-self.y_ini-(self.ancho_col*2)
recta = pygame.Rect(self.x_ini, self.y_ini, self.x_fin, self.y_fin)
self.score = 0
self.level = 1
self.direction = 1
#sprites_serp.clear(pygame.display.get_surface(),pygame.display.get_surface())
self.score_img = load_image('./assets/images/puntuacion.png', (150,40),True)
self.level_img = load_image('./assets/images/nivel.png', (150,40),True)
self.serpiente = Serpiente()
self.nuevaManzana()
pygame.display.flip()
#direcciones de la serpiente: 0: abajo 1:dcha 2: arriba 3:izda
self.state = 'JUEGO'
self.score = 0
def wait_for_key(self):
message = 'PULSE UNA TECLA PARA CONTINUAR'
self.black_screen(message)
pygame.event.clear()
wait = True
while wait:
evento = pygame.event.wait()
if evento.type == KEYDOWN and evento.key != K_ESCAPE:
print self.direction
if evento.key == K_UP and self.direction != 0: self.direction = 2
elif evento.key == K_DOWN and self.direction != 2: self.direction = 0
elif evento.key == K_LEFT and self.direction != 1: self.direction = 3
elif evento.key == K_RIGHT and self.direction != 3:self.direction = 1
print self.direction
wait = False
def continuar(self):
self.wait_for_key()
return self.direction
def new(self):
self.score = 0
self.level = 1
self.serpiente.destroy()
self.serpiente = Serpiente()
self.nuevaManzana()
self.wait_for_key()
return self.direction
def getLevel(self):
return self.level
def puntosNecesarios(self):
constant = 0.1
return round(pow(self.level,2) / constant)
def black_screen(self,message=None):
Screen = pygame.display.get_surface()
Screen.fill(NEGRO)
Screen.fill(EGG,recta)
Screen.blit(self.score_img,(5,5))
Screen.blit(self.level_img,(155,5))
fuente= pygame.font.SysFont('Impact', 25)
score = fuente.render(str(self.score), 1, BLANCO)
Screen.blit(score,(30,8))
level = fuente.render(str(self.level), 1, BLANCO)
Screen.blit(level,(230,8))
pygame.draw.rect(Screen, BLANCO, recta, self.ancho_col)
sprites_serp.draw(pygame.display.get_surface())
manzanas.draw(pygame.display.get_surface())
if message:
message = fuente.render(message, 1, NEGRO)
Screen.blit(message,(300,300))
pygame.display.flip()
def mover(self,dir):
self.direction = dir
self.state = self.serpiente.mover(dir)
if self.state == 'SUMA':
self.sumapunto()
self.black_screen()
return self.state
def gameover(self):
self.serpiente.destroy()
def sumapunto(self):
self.score = self.score + 1
if self.score == self.puntosNecesarios():
self.levelup()
self.nuevaManzana()
self.state = 'JUEGO'
print 'puntuacion: %d' % (self.score)
def levelup(self):
self.level = self.level+1
def nuevaManzana(self):
for i in range (random.randint(1, 3)):
if len(manzanas) < 3:
x =random.randint(self.x_ini+largodel_segmento, self.x_fin-largodel_segmento)
y =random.randint(self.y_ini+altodel_segmento, self.y_fin-altodel_segmento)
manzanas.add(Manzana(x,y))
class Segmento(pygame.sprite.Sprite):
""" Clase que representa un segmento de la serpiente. """
# -- Métodos
# Función constructor
def __init__(self, x, y,head=False):
# Llamada al constructor padre
pygame.sprite.Sprite.__init__(self)
# Establecemos el alto y largo
self.image = pygame.Surface([largodel_segmento, altodel_segmento])
self.image.fill(BLANCO)
# Establecemos como punto de partida la esquina superior izquierda.
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
class Serpiente():
def __init__(self):
#Velocidad inicial
self.cambio_x = largodel_segmento + margendel_segmento
self.cambio_y = 0
# Creamos la serpiente inicial.
ini_x = 300
ini_y = 300
self.segmentos_serpiente = []
for i in range(3):
x = ini_x - (largodel_segmento + margendel_segmento) * i
y = ini_y
if i == 0:
segmento = Segmento(x, y,True)
else:
segmento = Segmento(x, y)
self.segmentos_serpiente.append(segmento)
sprites_serp.add(segmento)
def mover(self,dirs):
state = 'JUEGO'
if dirs == 3:
cambio_x = (largodel_segmento + margendel_segmento) * -1
cambio_y = 0
if dirs == 1:
cambio_x = (largodel_segmento + margendel_segmento)
cambio_y = 0
if dirs == 2:
cambio_x = 0
cambio_y = (altodel_segmento + margendel_segmento) * -1
if dirs == 0:
cambio_x = 0
cambio_y = (altodel_segmento + margendel_segmento)
# Determinamos dónde aparecerá el nuevo segmento
x = self.segmentos_serpiente[0].rect.x + cambio_x
y = self.segmentos_serpiente[0].rect.y + cambio_y
segmento = Segmento(x, y)
if not recta.collidepoint(x,y) or len(pygame.sprite.spritecollide(segmento, sprites_serp, False)) > 0:
state = 'GAMEOVER'
elif len(pygame.sprite.groupcollide(sprites_serp, manzanas, False, True)) > 0 :
state = 'SUMA'
else:
segmento_viejo = self.segmentos_serpiente.pop()
sprites_serp.remove(segmento_viejo)
# Insertamos un nuevo segmento en la lista
self.segmentos_serpiente.insert(0, segmento)
if state != 'GAMEOVER':
sprites_serp.add(segmento)
else:
self.destroy()
return state
def destroy(self):
del self.segmentos_serpiente[:]
sprites_serp.empty()
sprites_serp.remove()
manzanas.empty()
manzanas.remove()
class Manzana(pygame.sprite.Sprite):
def __init__(self,x,y):
pygame.sprite.Sprite.__init__(self)
# Establecemos el alto y largo
#self.image = pygame.Surface([largodel_segmento, altodel_segmento])
#self.image.fill(RED)
self.image = load_image('./assets/images/apple.png',(largodel_segmento, altodel_segmento),True)
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
def update(self,x,y):
self.rect.x = x
self.rect.y = y
#Add this draw function so we can draw individual sprites
def draw(self,screen):
screen.blit(self.image, self.rect) | 0.14777 | 0.38341 |
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class Dcc(Base):
"""The Layer 1 Configuration is being configured for a POS port and DCC is selected as the Payload Type.
The Dcc class encapsulates a required dcc resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'dcc'
_SDM_ATT_MAP = {
'AvailableSpeeds': 'availableSpeeds',
'CanModifySpeed': 'canModifySpeed',
'CanSetMultipleSpeeds': 'canSetMultipleSpeeds',
'Crc': 'crc',
'OverheadByte': 'overheadByte',
'SelectedSpeeds': 'selectedSpeeds',
'TimeFill': 'timeFill',
}
_SDM_ENUM_MAP = {
'crc': ['crc16', 'crc32'],
'overheadByte': ['loh', 'soh'],
'timeFill': ['flag7E', 'markIdle'],
}
def __init__(self, parent, list_op=False):
super(Dcc, self).__init__(parent, list_op)
@property
def AvailableSpeeds(self):
# type: () -> List[str]
"""
Returns
-------
- list(str[]): Which speeds are available for the current media and AN settings.
"""
return self._get_attribute(self._SDM_ATT_MAP['AvailableSpeeds'])
@property
def CanModifySpeed(self):
# type: () -> bool
"""
Returns
-------
- bool: Returns true/false depending upon if the port can change speed for the current media and AN settings.
"""
return self._get_attribute(self._SDM_ATT_MAP['CanModifySpeed'])
@property
def CanSetMultipleSpeeds(self):
# type: () -> bool
"""
Returns
-------
- bool: Can this port selectmultiple speeds for the current media and AN settings.
"""
return self._get_attribute(self._SDM_ATT_MAP['CanSetMultipleSpeeds'])
@property
def Crc(self):
# type: () -> str
"""
Returns
-------
- str(crc16 | crc32): Choose the type of Cyclic Redundancy Check to be used.
"""
return self._get_attribute(self._SDM_ATT_MAP['Crc'])
@Crc.setter
def Crc(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Crc'], value)
@property
def OverheadByte(self):
# type: () -> str
"""
Returns
-------
- str(loh | soh): Choose the type of Overhead bytes to be used for transmitting the DCC packet streams.
"""
return self._get_attribute(self._SDM_ATT_MAP['OverheadByte'])
@OverheadByte.setter
def OverheadByte(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['OverheadByte'], value)
@property
def SelectedSpeeds(self):
# type: () -> List[str]
"""
Returns
-------
- list(str[]): Which speeds are selected for the current media and AN settings.
"""
return self._get_attribute(self._SDM_ATT_MAP['SelectedSpeeds'])
@SelectedSpeeds.setter
def SelectedSpeeds(self, value):
# type: (List[str]) -> None
self._set_attribute(self._SDM_ATT_MAP['SelectedSpeeds'], value)
@property
def TimeFill(self):
# type: () -> str
"""
Returns
-------
- str(flag7E | markIdle): Choose the type of bytes used to fill the gaps between DCC frames.
"""
return self._get_attribute(self._SDM_ATT_MAP['TimeFill'])
@TimeFill.setter
def TimeFill(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['TimeFill'], value)
def update(self, Crc=None, OverheadByte=None, SelectedSpeeds=None, TimeFill=None):
# type: (str, str, List[str], str) -> Dcc
"""Updates dcc resource on the server.
Args
----
- Crc (str(crc16 | crc32)): Choose the type of Cyclic Redundancy Check to be used.
- OverheadByte (str(loh | soh)): Choose the type of Overhead bytes to be used for transmitting the DCC packet streams.
- SelectedSpeeds (list(str[])): Which speeds are selected for the current media and AN settings.
- TimeFill (str(flag7E | markIdle)): Choose the type of bytes used to fill the gaps between DCC frames.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals())) | ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/l1config/pos/dcc/dcc.py | from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class Dcc(Base):
"""The Layer 1 Configuration is being configured for a POS port and DCC is selected as the Payload Type.
The Dcc class encapsulates a required dcc resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'dcc'
_SDM_ATT_MAP = {
'AvailableSpeeds': 'availableSpeeds',
'CanModifySpeed': 'canModifySpeed',
'CanSetMultipleSpeeds': 'canSetMultipleSpeeds',
'Crc': 'crc',
'OverheadByte': 'overheadByte',
'SelectedSpeeds': 'selectedSpeeds',
'TimeFill': 'timeFill',
}
_SDM_ENUM_MAP = {
'crc': ['crc16', 'crc32'],
'overheadByte': ['loh', 'soh'],
'timeFill': ['flag7E', 'markIdle'],
}
def __init__(self, parent, list_op=False):
super(Dcc, self).__init__(parent, list_op)
@property
def AvailableSpeeds(self):
# type: () -> List[str]
"""
Returns
-------
- list(str[]): Which speeds are available for the current media and AN settings.
"""
return self._get_attribute(self._SDM_ATT_MAP['AvailableSpeeds'])
@property
def CanModifySpeed(self):
# type: () -> bool
"""
Returns
-------
- bool: Returns true/false depending upon if the port can change speed for the current media and AN settings.
"""
return self._get_attribute(self._SDM_ATT_MAP['CanModifySpeed'])
@property
def CanSetMultipleSpeeds(self):
# type: () -> bool
"""
Returns
-------
- bool: Can this port selectmultiple speeds for the current media and AN settings.
"""
return self._get_attribute(self._SDM_ATT_MAP['CanSetMultipleSpeeds'])
@property
def Crc(self):
# type: () -> str
"""
Returns
-------
- str(crc16 | crc32): Choose the type of Cyclic Redundancy Check to be used.
"""
return self._get_attribute(self._SDM_ATT_MAP['Crc'])
@Crc.setter
def Crc(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Crc'], value)
@property
def OverheadByte(self):
# type: () -> str
"""
Returns
-------
- str(loh | soh): Choose the type of Overhead bytes to be used for transmitting the DCC packet streams.
"""
return self._get_attribute(self._SDM_ATT_MAP['OverheadByte'])
@OverheadByte.setter
def OverheadByte(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['OverheadByte'], value)
@property
def SelectedSpeeds(self):
# type: () -> List[str]
"""
Returns
-------
- list(str[]): Which speeds are selected for the current media and AN settings.
"""
return self._get_attribute(self._SDM_ATT_MAP['SelectedSpeeds'])
@SelectedSpeeds.setter
def SelectedSpeeds(self, value):
# type: (List[str]) -> None
self._set_attribute(self._SDM_ATT_MAP['SelectedSpeeds'], value)
@property
def TimeFill(self):
# type: () -> str
"""
Returns
-------
- str(flag7E | markIdle): Choose the type of bytes used to fill the gaps between DCC frames.
"""
return self._get_attribute(self._SDM_ATT_MAP['TimeFill'])
@TimeFill.setter
def TimeFill(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['TimeFill'], value)
def update(self, Crc=None, OverheadByte=None, SelectedSpeeds=None, TimeFill=None):
# type: (str, str, List[str], str) -> Dcc
"""Updates dcc resource on the server.
Args
----
- Crc (str(crc16 | crc32)): Choose the type of Cyclic Redundancy Check to be used.
- OverheadByte (str(loh | soh)): Choose the type of Overhead bytes to be used for transmitting the DCC packet streams.
- SelectedSpeeds (list(str[])): Which speeds are selected for the current media and AN settings.
- TimeFill (str(flag7E | markIdle)): Choose the type of bytes used to fill the gaps between DCC frames.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals())) | 0.89654 | 0.372163 |
# ==================================================
# Import
import pytest
import random
import math
# ==================================================
# Phase 1
def phase1(X, k, d):
# Initiation
n = len(X)
random.shuffle(X)
S = X[:k]
XS = X[k:]
S.sort()
# Keeping the list entries below k/2
if 2*(k*math.log2(n))**0.5 < k/2:
lst = [2*(k*math.log2(n))**0.5]
if 3*(k*math.log2(n))**0.5 < k/2:
lst.append(3*(k*math.log2(n))**0.5)
while d*lst[len(lst) - 1] < k/2:
lst.append(d*lst[len(lst) - 1])
lst.append(k/2)
else:
lst = [k/2]
# Buckets
L = [[] for _ in range(len(lst) - 1)]
R = [[] for _ in range(len(lst) - 1)]
C = []
for s in S[math.floor(k / 2 - lst[0]): math.ceil(k / 2 + lst[0])]:
C.append(s)
for i in range(1, len(lst)):
for s in S[math.floor(k / 2 - lst[i]): math.floor(k / 2 - lst[i - 1])]:
L[i - 1].append(s)
for s in S[math.ceil(k / 2 + lst[i - 1]): math.ceil(k / 2 + lst[i])]:
R[i - 1].append(s)
return S, XS, L, C, R
# ==================================================
# Phase 2
def phase2(S, XS, L, C, R, cnt):
mark = [False for _ in range(len(XS) + len(S))]
b = len(L)
random.shuffle(XS)
for x_i in XS:
med = 0
for j in reversed(range(0, b - 1)):
current = 2 ** 50
random.shuffle(L[j])
for l in L[j]:
if cnt[l] < current:
x_A = l
if mark[x_A] == True:
c = 1
else:
c = 2
cnt[x_i] += 1
cnt[x_A] += 1
if x_i < x_A:
if j + c < b:
mark[x_i] = True
L[j + c].append(x_i)
med = -1
else:
med = -2
break
current2 = 2 ** 50
random.shuffle(R[j])
for r in R[j]:
if cnt[r] < current2:
x_B = r
if mark[x_B] == True:
c = 1
else:
c = 2
cnt[x_i] += 1
cnt[x_B] += 1
if x_i > x_B:
if j + c < b:
mark[x_i] = True
R[j + c].append(x_i)
med = 1
else:
med = 2
break
if med == 0:
C.append(x_i)
elif med == -2:
L[len(L) - 1].append(x_i)
elif med == 2:
R[len(R) - 1].append(x_i)
return S, XS, L, C, R, cnt
# ==================================================
# Unittest : Parameter
@pytest.mark.parametrize(('n'), [
# Randomized input
random.randint(2**9, 2**15),
# Manuel input
2**10, 2**12, 2**14, 2**12 + 1, 2**12 - 1
])
# ==================================================
# Unittest : Test
def test_p1(n):
# Generating Tastcase
X0 = [i for i in range(n)]
cnt0 = [0 for _ in range(n)]
k0 = int(n ** (2 / 3))
d0 = int(n ** (1 / 12))
S0, XS0, L0, C0, R0, = phase1(X0, k0, d0)
S0, XS0, L0, C0, R0, cnt0 = phase2(S0, XS0, L0, C0, R0, cnt0)
X1 = [i for i in range(n)]
cnt1 = [0 for _ in range(n)]
k1 = int(n / math.log(n, 2)**(1/3))
d1 = int(math.log(n, 2)**(1/3))
S1, XS1, L1, C1, R1 = phase1(X1, k1, d1)
S1, XS1, L1, C1, R1, cnt1 = phase2(S1, XS1, L1, C1, R1, cnt1)
if n % 2 == 0:
assert int((n / 2) + 1) in C0
assert int((n / 2) + 1) in C1
assert cnt0[int((n / 2) + 1)] <= len(L0) + len(R0)
assert cnt1[int((n / 2) + 1)] <= len(L1) + len(R1)
elif n % 2 == 1:
assert int(n / 2) in C0
assert int(n / 2) in C1
assert cnt0[int(n / 2)] <= len(L0) + len(R0)
assert cnt1[int(n / 2)] <= len(L1) + len(R1)
# Test
return
# ================================================== | tests/phase2_test.py | # ==================================================
# Import
import pytest
import random
import math
# ==================================================
# Phase 1
def phase1(X, k, d):
# Initiation
n = len(X)
random.shuffle(X)
S = X[:k]
XS = X[k:]
S.sort()
# Keeping the list entries below k/2
if 2*(k*math.log2(n))**0.5 < k/2:
lst = [2*(k*math.log2(n))**0.5]
if 3*(k*math.log2(n))**0.5 < k/2:
lst.append(3*(k*math.log2(n))**0.5)
while d*lst[len(lst) - 1] < k/2:
lst.append(d*lst[len(lst) - 1])
lst.append(k/2)
else:
lst = [k/2]
# Buckets
L = [[] for _ in range(len(lst) - 1)]
R = [[] for _ in range(len(lst) - 1)]
C = []
for s in S[math.floor(k / 2 - lst[0]): math.ceil(k / 2 + lst[0])]:
C.append(s)
for i in range(1, len(lst)):
for s in S[math.floor(k / 2 - lst[i]): math.floor(k / 2 - lst[i - 1])]:
L[i - 1].append(s)
for s in S[math.ceil(k / 2 + lst[i - 1]): math.ceil(k / 2 + lst[i])]:
R[i - 1].append(s)
return S, XS, L, C, R
# ==================================================
# Phase 2
def phase2(S, XS, L, C, R, cnt):
mark = [False for _ in range(len(XS) + len(S))]
b = len(L)
random.shuffle(XS)
for x_i in XS:
med = 0
for j in reversed(range(0, b - 1)):
current = 2 ** 50
random.shuffle(L[j])
for l in L[j]:
if cnt[l] < current:
x_A = l
if mark[x_A] == True:
c = 1
else:
c = 2
cnt[x_i] += 1
cnt[x_A] += 1
if x_i < x_A:
if j + c < b:
mark[x_i] = True
L[j + c].append(x_i)
med = -1
else:
med = -2
break
current2 = 2 ** 50
random.shuffle(R[j])
for r in R[j]:
if cnt[r] < current2:
x_B = r
if mark[x_B] == True:
c = 1
else:
c = 2
cnt[x_i] += 1
cnt[x_B] += 1
if x_i > x_B:
if j + c < b:
mark[x_i] = True
R[j + c].append(x_i)
med = 1
else:
med = 2
break
if med == 0:
C.append(x_i)
elif med == -2:
L[len(L) - 1].append(x_i)
elif med == 2:
R[len(R) - 1].append(x_i)
return S, XS, L, C, R, cnt
# ==================================================
# Unittest : Parameter
@pytest.mark.parametrize(('n'), [
# Randomized input
random.randint(2**9, 2**15),
# Manuel input
2**10, 2**12, 2**14, 2**12 + 1, 2**12 - 1
])
# ==================================================
# Unittest : Test
def test_p1(n):
# Generating Tastcase
X0 = [i for i in range(n)]
cnt0 = [0 for _ in range(n)]
k0 = int(n ** (2 / 3))
d0 = int(n ** (1 / 12))
S0, XS0, L0, C0, R0, = phase1(X0, k0, d0)
S0, XS0, L0, C0, R0, cnt0 = phase2(S0, XS0, L0, C0, R0, cnt0)
X1 = [i for i in range(n)]
cnt1 = [0 for _ in range(n)]
k1 = int(n / math.log(n, 2)**(1/3))
d1 = int(math.log(n, 2)**(1/3))
S1, XS1, L1, C1, R1 = phase1(X1, k1, d1)
S1, XS1, L1, C1, R1, cnt1 = phase2(S1, XS1, L1, C1, R1, cnt1)
if n % 2 == 0:
assert int((n / 2) + 1) in C0
assert int((n / 2) + 1) in C1
assert cnt0[int((n / 2) + 1)] <= len(L0) + len(R0)
assert cnt1[int((n / 2) + 1)] <= len(L1) + len(R1)
elif n % 2 == 1:
assert int(n / 2) in C0
assert int(n / 2) in C1
assert cnt0[int(n / 2)] <= len(L0) + len(R0)
assert cnt1[int(n / 2)] <= len(L1) + len(R1)
# Test
return
# ================================================== | 0.128225 | 0.46035 |
import numpy as np
import pandas as pd
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.linear_model import Ridge, LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline, make_union
from sklearn.metrics import mean_absolute_error
from sklearn.preprocessing import RobustScaler
from tpot.builtins import StackingEstimator
from BayOptPy.helperfunctions import (get_paths, get_data,
drop_missing_features,
set_publication_style)
"""
This script tests the best model recommened by the combined dataset (UKBIO +
BANC) for 100 generations, random
seed 20, initial population 1000, mutation rate and cross-validation rate 0.9
and cross-over 0.1
"""
# General Settings
#-------------------------------------------------------------------------------
debug = False
resamplefactor = 1
random_seed = 20
save_path = '/code/BayOptPy/tpot/Output/random_seed/100_generations/random_seed_%03d/' %(random_seed)
# Load the combined dataset
project_wd, project_data, _ = get_paths(debug, 'freesurf_combined')
demographics, _, df_data = \
get_data(project_data, 'freesurf_combined', debug,
project_wd, resamplefactor, raw=False, analysis=None)
# Drop the last column that corresponds the name of the dataset
df_data = df_data.drop('dataset', axis=1)
#-------------------------------------------------------------------------------
# Train the model with BANC
#-------------------------------------------------------------------------------
targetAttribute = demographics[['age']]
demographics = demographics.set_index('id')
# Add a few of the BIOBANK Dataset into the training set
Xtrain, Xtemp, Ytrain, Ytemp = train_test_split(df_data, targetAttribute,
test_size=.90,
stratify=demographics['stratify'],
random_state=random_seed)
train_demographics = demographics.loc[Xtemp.index]
Xvalidate, Xtest, Yvalidate, Ytest = train_test_split(Xtemp, Ytemp,
test_size=.05,
stratify=train_demographics['stratify'],
random_state=random_seed)
print('Divided BANC dataset into test and training')
print('Check train test split sizes')
print('X_train: ' + str(Xtrain.shape))
print('X_test: ' + str(Xtest.shape))
print('Y_train: ' + str(Ytrain.shape))
print('Y_test: ' + str(Ytest.shape))
print('X_valitation ' + str(Xvalidate.shape))
print('Y_test: ' + str(Yvalidate.shape))
# Normalise the test dataset and apply the transformation to the train
# dataset
robustscaler = RobustScaler().fit(Xtrain)
Xtrain_scaled = robustscaler.transform(Xtrain)
Xtest_scaled = robustscaler.transform(Xtest)
Xvalidate_scaled = robustscaler.transform(Xvalidate)
# Transform pandas into numpy arrays (no nneed to do it if you are scaling
# the results)
Ytrain = Ytrain.values
Ytest = Ytest.values
Yvalidate = Yvalidate.values
# Best pipeline recommended by TPOT
exported_pipeline = make_pipeline(
StackingEstimator(estimator=LinearRegression()),
StackingEstimator(estimator=ExtraTreesRegressor(bootstrap=True,
max_features=0.9000000000000001,
min_samples_leaf=3,
min_samples_split=10,
n_estimators=100,
random_state=42)),
ExtraTreesRegressor(bootstrap=False,
max_features=0.55,
min_samples_leaf=5,
min_samples_split=17,
n_estimators=100,
random_state=42)
)
exported_pipeline.fit(Xtrain_scaled, Ytrain)
print('Print MAE - test')
y_predicted = exported_pipeline.predict(Xtest_scaled)
mae = mean_absolute_error(Ytest, y_predicted)
print(mae)
print('Print MAE - training')
y_predicted_train = exported_pipeline.predict(Xtrain_scaled)
mae_train = mean_absolute_error(Ytrain, y_predicted_train)
print(mae_train)
print('Print MAE - validation')
y_predicted_validation = exported_pipeline.predict(Xvalidate_scaled)
mae_validation = mean_absolute_error(Yvalidate, y_predicted_validation)
print(mae_validation)
# plot predicted vs true for the test
fig = plt.figure()
plt.scatter(Ytest, y_predicted)
plt.ylabel('Predicted Age')
plt.xlabel('True Age')
plt.savefig(os.path.join(save_path, 'test_predicted_true_age.png'))
plt.close()
# plot predicted vs true for the validation
fig = plt.figure()
plt.scatter(Yvalidate, y_predicted_validation)
plt.ylabel('Predicted Age')
plt.xlabel('True Age')
plt.savefig(os.path.join(save_path, 'validation_predicted_true_age.png'))
plt.close() | BayOptPy/freesurfer_preprocess/original_dataset/UKBIO/tpot_model_analysis_4614144.py | import numpy as np
import pandas as pd
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.linear_model import Ridge, LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline, make_union
from sklearn.metrics import mean_absolute_error
from sklearn.preprocessing import RobustScaler
from tpot.builtins import StackingEstimator
from BayOptPy.helperfunctions import (get_paths, get_data,
drop_missing_features,
set_publication_style)
"""
This script tests the best model recommened by the combined dataset (UKBIO +
BANC) for 100 generations, random
seed 20, initial population 1000, mutation rate and cross-validation rate 0.9
and cross-over 0.1
"""
# General Settings
#-------------------------------------------------------------------------------
debug = False
resamplefactor = 1
random_seed = 20
save_path = '/code/BayOptPy/tpot/Output/random_seed/100_generations/random_seed_%03d/' %(random_seed)
# Load the combined dataset
project_wd, project_data, _ = get_paths(debug, 'freesurf_combined')
demographics, _, df_data = \
get_data(project_data, 'freesurf_combined', debug,
project_wd, resamplefactor, raw=False, analysis=None)
# Drop the last column that corresponds the name of the dataset
df_data = df_data.drop('dataset', axis=1)
#-------------------------------------------------------------------------------
# Train the model with BANC
#-------------------------------------------------------------------------------
targetAttribute = demographics[['age']]
demographics = demographics.set_index('id')
# Add a few of the BIOBANK Dataset into the training set
Xtrain, Xtemp, Ytrain, Ytemp = train_test_split(df_data, targetAttribute,
test_size=.90,
stratify=demographics['stratify'],
random_state=random_seed)
train_demographics = demographics.loc[Xtemp.index]
Xvalidate, Xtest, Yvalidate, Ytest = train_test_split(Xtemp, Ytemp,
test_size=.05,
stratify=train_demographics['stratify'],
random_state=random_seed)
print('Divided BANC dataset into test and training')
print('Check train test split sizes')
print('X_train: ' + str(Xtrain.shape))
print('X_test: ' + str(Xtest.shape))
print('Y_train: ' + str(Ytrain.shape))
print('Y_test: ' + str(Ytest.shape))
print('X_valitation ' + str(Xvalidate.shape))
print('Y_test: ' + str(Yvalidate.shape))
# Normalise the test dataset and apply the transformation to the train
# dataset
robustscaler = RobustScaler().fit(Xtrain)
Xtrain_scaled = robustscaler.transform(Xtrain)
Xtest_scaled = robustscaler.transform(Xtest)
Xvalidate_scaled = robustscaler.transform(Xvalidate)
# Transform pandas into numpy arrays (no nneed to do it if you are scaling
# the results)
Ytrain = Ytrain.values
Ytest = Ytest.values
Yvalidate = Yvalidate.values
# Best pipeline recommended by TPOT
exported_pipeline = make_pipeline(
StackingEstimator(estimator=LinearRegression()),
StackingEstimator(estimator=ExtraTreesRegressor(bootstrap=True,
max_features=0.9000000000000001,
min_samples_leaf=3,
min_samples_split=10,
n_estimators=100,
random_state=42)),
ExtraTreesRegressor(bootstrap=False,
max_features=0.55,
min_samples_leaf=5,
min_samples_split=17,
n_estimators=100,
random_state=42)
)
exported_pipeline.fit(Xtrain_scaled, Ytrain)
print('Print MAE - test')
y_predicted = exported_pipeline.predict(Xtest_scaled)
mae = mean_absolute_error(Ytest, y_predicted)
print(mae)
print('Print MAE - training')
y_predicted_train = exported_pipeline.predict(Xtrain_scaled)
mae_train = mean_absolute_error(Ytrain, y_predicted_train)
print(mae_train)
print('Print MAE - validation')
y_predicted_validation = exported_pipeline.predict(Xvalidate_scaled)
mae_validation = mean_absolute_error(Yvalidate, y_predicted_validation)
print(mae_validation)
# plot predicted vs true for the test
fig = plt.figure()
plt.scatter(Ytest, y_predicted)
plt.ylabel('Predicted Age')
plt.xlabel('True Age')
plt.savefig(os.path.join(save_path, 'test_predicted_true_age.png'))
plt.close()
# plot predicted vs true for the validation
fig = plt.figure()
plt.scatter(Yvalidate, y_predicted_validation)
plt.ylabel('Predicted Age')
plt.xlabel('True Age')
plt.savefig(os.path.join(save_path, 'validation_predicted_true_age.png'))
plt.close() | 0.733261 | 0.508666 |
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
class FeatureSelector(BaseEstimator, TransformerMixin):
"""This transformer select features.
Attributes
----------
columns: list of columns to transformer [n_columns]
Examples
--------
For usage examples, please see
https://jaisenbe58r.github.io/MLearner/user_guide/preprocessing/FeatureSelector/
"""
def __init__(self, columns=None, random_state=99):
"""Init replace missing values."""
if columns is not None:
if isinstance(columns, list) or isinstance(columns, tuple):
self.columns = columns
else:
raise NameError("Invalid type {}".format(type(columns)))
else:
self.columns = columns
self.random_state = random_state
def fit(self, X, y=None, **fit_params):
"""Gets the columns to make a replace missing values.
Parameters
----------
X : {Dataframe}, shape = [n_samples, n_features]
Dataframe, where n_samples is the number of samples and
n_features is the number of features.
Returns
--------
self
"""
if not isinstance(X, pd.core.frame.DataFrame):
raise NameError("Invalid type {}".format(type(X)))
if self.columns is None:
self.columns = X.columns
_lista = [i for i in self.columns if i not in X.columns.tolist()]
if len(_lista) > 0:
raise NameError("The columns {} no exist in Dataframe".format(_lista))
self._fitted = True
return self
def transform(self, X):
"""this transformer handles missing values.
Parameters
----------
X : {Dataframe}, shape = [n_samples, n_features]
Dataframe of samples, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
X : {Dataframe}, shape = [n_samples, n_features]
A copy of the input Dataframe with the columns replaced.
"""
if not hasattr(self, "_fitted"):
raise AttributeError("FeatureSelector has not been fitted, yet.")
if not isinstance(X, pd.core.frame.DataFrame):
raise NameError("Invalid type {}".format(type(X)))
return X[self.columns]
class DataFrameSelector(BaseEstimator, TransformerMixin):
def __init__(self, attribute_names):
self.attribute_names = attribute_names
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.attribute_names]
class CopyFeatures(BaseEstimator, TransformerMixin):
def __init__(self, columns=None, prefix=""):
"""Ini copy features."""
if columns is not None:
if isinstance(columns, list) or isinstance(columns, tuple):
self.columns = columns
else:
raise TypeError("Invalid type {}".format(type(columns)))
else:
self.columns = columns
self.name = prefix
def fit(self, X, y=None):
if self.columns is None:
self.columns = X.select_dtypes(exclude=["object"]).columns
if isinstance(X, pd.core.frame.DataFrame):
try:
_test = X[self.columns].astype(np.float32)
del(_test)
except ValueError:
raise NameError("Null or categorical variables are not allowed: {}".format(X.dtypes))
else:
raise NameError("Invalid type {}".format(type(X)))
self._fitted = True
return self
def transform(self, X):
if not hasattr(self, "_fitted"):
raise AttributeError("CopyFeatures has not been fitted, yet.")
if not isinstance(X, pd.core.frame.DataFrame):
raise NameError("Invalid type {}".format(type(X)))
self.X_transform = X.copy()
for i in self.columns:
name_col = self.name + "_" + str(i)
self.X_transform[name_col] = X[i].values
return self.X_transform
class DropFeatures(BaseEstimator, TransformerMixin):
"""This transformer drop features.
Attributes
----------
columns: list of columns to transformer [n_columns]
Examples
--------
For usage examples, please see
https://jaisenbe58r.github.io/MLearner/user_guide/preprocessing/DropFeatures/
"""
def __init__(self, columns_drop=None, random_state=99):
"""Init replace missing values."""
if columns_drop is not None:
if isinstance(columns_drop, list) or isinstance(columns_drop, tuple):
self.columns_drop = columns_drop
else:
raise NameError("Invalid type {}".format(type(columns_drop)))
else:
self.columns_drop = columns_drop
self.random_state = random_state
def fit(self, X, y=None, **fit_params):
"""Gets the columns to make a replace missing values.
Parameters
----------
X : {Dataframe}, shape = [n_samples, n_features]
Dataframe, where n_samples is the number of samples and
n_features is the number of features.
Returns
--------
self
"""
if not isinstance(X, pd.core.frame.DataFrame):
raise NameError("Invalid type {}".format(type(X)))
if self.columns_drop is None:
self.columns_drop = X.columns_drop
_lista = [i for i in self.columns_drop if i not in X.columns.tolist()]
if len(_lista) > 0:
raise NameError("The columns {} no exist in Dataframe".format(_lista))
self._fitted = True
return self
def transform(self, X):
"""this transformer handles missing values.
Parameters
----------
X : {Dataframe}, shape = [n_samples, n_features]
Dataframe of samples, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
X : {Dataframe}, shape = [n_samples, n_features]
A copy of the input Dataframe with the columns replaced.
"""
if not hasattr(self, "_fitted"):
raise AttributeError("DropFeatures has not been fitted, yet.")
if not isinstance(X, pd.core.frame.DataFrame):
raise NameError("Invalid type {}".format(type(X)))
return X.drop(self.columns_drop, axis=1) | mlearner/preprocessing/feature_selector.py | import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
class FeatureSelector(BaseEstimator, TransformerMixin):
"""This transformer select features.
Attributes
----------
columns: list of columns to transformer [n_columns]
Examples
--------
For usage examples, please see
https://jaisenbe58r.github.io/MLearner/user_guide/preprocessing/FeatureSelector/
"""
def __init__(self, columns=None, random_state=99):
"""Init replace missing values."""
if columns is not None:
if isinstance(columns, list) or isinstance(columns, tuple):
self.columns = columns
else:
raise NameError("Invalid type {}".format(type(columns)))
else:
self.columns = columns
self.random_state = random_state
def fit(self, X, y=None, **fit_params):
"""Gets the columns to make a replace missing values.
Parameters
----------
X : {Dataframe}, shape = [n_samples, n_features]
Dataframe, where n_samples is the number of samples and
n_features is the number of features.
Returns
--------
self
"""
if not isinstance(X, pd.core.frame.DataFrame):
raise NameError("Invalid type {}".format(type(X)))
if self.columns is None:
self.columns = X.columns
_lista = [i for i in self.columns if i not in X.columns.tolist()]
if len(_lista) > 0:
raise NameError("The columns {} no exist in Dataframe".format(_lista))
self._fitted = True
return self
def transform(self, X):
"""this transformer handles missing values.
Parameters
----------
X : {Dataframe}, shape = [n_samples, n_features]
Dataframe of samples, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
X : {Dataframe}, shape = [n_samples, n_features]
A copy of the input Dataframe with the columns replaced.
"""
if not hasattr(self, "_fitted"):
raise AttributeError("FeatureSelector has not been fitted, yet.")
if not isinstance(X, pd.core.frame.DataFrame):
raise NameError("Invalid type {}".format(type(X)))
return X[self.columns]
class DataFrameSelector(BaseEstimator, TransformerMixin):
def __init__(self, attribute_names):
self.attribute_names = attribute_names
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.attribute_names]
class CopyFeatures(BaseEstimator, TransformerMixin):
def __init__(self, columns=None, prefix=""):
"""Ini copy features."""
if columns is not None:
if isinstance(columns, list) or isinstance(columns, tuple):
self.columns = columns
else:
raise TypeError("Invalid type {}".format(type(columns)))
else:
self.columns = columns
self.name = prefix
def fit(self, X, y=None):
if self.columns is None:
self.columns = X.select_dtypes(exclude=["object"]).columns
if isinstance(X, pd.core.frame.DataFrame):
try:
_test = X[self.columns].astype(np.float32)
del(_test)
except ValueError:
raise NameError("Null or categorical variables are not allowed: {}".format(X.dtypes))
else:
raise NameError("Invalid type {}".format(type(X)))
self._fitted = True
return self
def transform(self, X):
if not hasattr(self, "_fitted"):
raise AttributeError("CopyFeatures has not been fitted, yet.")
if not isinstance(X, pd.core.frame.DataFrame):
raise NameError("Invalid type {}".format(type(X)))
self.X_transform = X.copy()
for i in self.columns:
name_col = self.name + "_" + str(i)
self.X_transform[name_col] = X[i].values
return self.X_transform
class DropFeatures(BaseEstimator, TransformerMixin):
"""This transformer drop features.
Attributes
----------
columns: list of columns to transformer [n_columns]
Examples
--------
For usage examples, please see
https://jaisenbe58r.github.io/MLearner/user_guide/preprocessing/DropFeatures/
"""
def __init__(self, columns_drop=None, random_state=99):
"""Init replace missing values."""
if columns_drop is not None:
if isinstance(columns_drop, list) or isinstance(columns_drop, tuple):
self.columns_drop = columns_drop
else:
raise NameError("Invalid type {}".format(type(columns_drop)))
else:
self.columns_drop = columns_drop
self.random_state = random_state
def fit(self, X, y=None, **fit_params):
"""Gets the columns to make a replace missing values.
Parameters
----------
X : {Dataframe}, shape = [n_samples, n_features]
Dataframe, where n_samples is the number of samples and
n_features is the number of features.
Returns
--------
self
"""
if not isinstance(X, pd.core.frame.DataFrame):
raise NameError("Invalid type {}".format(type(X)))
if self.columns_drop is None:
self.columns_drop = X.columns_drop
_lista = [i for i in self.columns_drop if i not in X.columns.tolist()]
if len(_lista) > 0:
raise NameError("The columns {} no exist in Dataframe".format(_lista))
self._fitted = True
return self
def transform(self, X):
"""this transformer handles missing values.
Parameters
----------
X : {Dataframe}, shape = [n_samples, n_features]
Dataframe of samples, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
X : {Dataframe}, shape = [n_samples, n_features]
A copy of the input Dataframe with the columns replaced.
"""
if not hasattr(self, "_fitted"):
raise AttributeError("DropFeatures has not been fitted, yet.")
if not isinstance(X, pd.core.frame.DataFrame):
raise NameError("Invalid type {}".format(type(X)))
return X.drop(self.columns_drop, axis=1) | 0.884139 | 0.506958 |
import uuid
import re
import base64
class Streamlit_elements():
def mymarkdown(object, number, text):
form = """<style type="text/css">
.low {
color: #585858;
position: relative;
bottom: 1ex;
font-size: 60%;
text-align: center;
}
.hh1g {
color: green;
text-align: center;
font-size: 100%;
}
.hh1r {
color: red;
text-align: center;
font-size: 100%;
}
</style>"""
try:
if float(number.strip('%'))>=0:
markkdown_number = '<p class="hh1g"><strong>{}</strong></p>'.format(number)
else:
markkdown_number = '<p class="hh1r"><strong>{}</strong></p>'.format(number)
except:
markkdown_number = '<p class="hh1g"><strong>{}</strong></p>'.format(number)
markkdown_text = '<p class="low">{}</p>'.format(text)
object.markdown(form,unsafe_allow_html=True)
object.markdown(markkdown_number + markkdown_text ,unsafe_allow_html=True)
def changemarkdown(object, number, text, prefix='',suffix='', change=0):
form = """<style type="text/css">
.sup {
position: relative;
color: #585858;
bottom: 1ex;
font-size: 100%;
text-align: center;
}
.main {
color: green;
text-align: center;
font-size: 175%;
}
.h2g {
color: green;
text-align: center;
font-size: 120%;
}
.h2r {
color: red;
text-align: center;
font-size: 120%;
}
</style>"""
markkdown_number = '<span class="main"><strong>{}{}{}</strong></span>'.format(prefix,number,suffix)
if float(change)==0:
markkdown_change = '<span class="h2g"></span>'
elif float(change)>0:
markkdown_change = '<span class="h2g"> ({}%)</span>'.format(change)
else:
markkdown_change = '<span class="h2r"> ({}%)</span>'.format(change)
markkdown_text = '<p class="sup">{}</p>'.format(text)
object.markdown(form,unsafe_allow_html=True)
object.markdown(markkdown_number + markkdown_change + markkdown_text ,unsafe_allow_html=True)
def get_table_download_link(df):
"""Generates a link allowing the data in a given panda dataframe to be downloaded
in: dataframe
out: href string
"""
csv = df.to_csv(index=False)
file_name = 'data_from_streamlit.txt'
file_type = file_name.split('.')[-1] # e.g. txt
b64 = base64.b64encode(csv.encode()).decode() # some strings <-> bytes conversions necessary here
button_uuid = str(uuid.uuid4()).replace("-", "")
button_id = re.sub("\d+", "", button_uuid)
custom_css = f"""
<style>
#{button_id} {{
color: #fff !important;
text-transform: uppercase;
text-decoration: none;
background: #ed3330;
padding: 4px;
border-radius: 5px;
display: inline-block;
border: none;
transition: all 0.8s ease 0s;
}}
#{button_id}:hover {{
background: #434343;
letter-spacing: 1px;
-webkit-box-shadow: 0px 5px 40px -10px rgba(0,0,0,0.57);
-moz-box-shadow: 0px 5px 40px -10px rgba(0,0,0,0.57);
box-shadow: 5px 40px -10px rgba(0,0,0,0.57);
transition: all 0.4s ease 0s;
}}
#{button_id}:active {{
color: #fff !important;
text-transform: uppercase;
text-decoration: none;
background: #ed3330;
padding: 20px;
border-radius: 5px;
display: inline-block;
border: none;
transition: all 0.4s ease 0s;
}}
</style> """
dl_link = (
custom_css
+ f'<a download="{file_name}" id="{button_id}" href="data:file/{file_type};base64,{b64}">Download data</a><br></br>'
)
return dl_link
def measuresmarkdown(object, measure, text, prefix='',suffix='', format=':.0f'):
form = """<style type="text/css">
.sup {
position: relative;
color: #585858;
bottom: 1ex;
font-size: 100%;
text-align: center;
}
.main {
color: green;
text-align: center;
font-size: 175%;
}
</style>"""
markdown_value = '<span class="main"><strong>{}{}{}</strong></span>'.format(prefix,measure,suffix)
markdown_text = '<p class="sup"><strong>{}</strong></p>'.format(text)
object.markdown(form,unsafe_allow_html=True)
object.markdown( markdown_value + markdown_text,unsafe_allow_html=True) | elements/elements.py | import uuid
import re
import base64
class Streamlit_elements():
def mymarkdown(object, number, text):
form = """<style type="text/css">
.low {
color: #585858;
position: relative;
bottom: 1ex;
font-size: 60%;
text-align: center;
}
.hh1g {
color: green;
text-align: center;
font-size: 100%;
}
.hh1r {
color: red;
text-align: center;
font-size: 100%;
}
</style>"""
try:
if float(number.strip('%'))>=0:
markkdown_number = '<p class="hh1g"><strong>{}</strong></p>'.format(number)
else:
markkdown_number = '<p class="hh1r"><strong>{}</strong></p>'.format(number)
except:
markkdown_number = '<p class="hh1g"><strong>{}</strong></p>'.format(number)
markkdown_text = '<p class="low">{}</p>'.format(text)
object.markdown(form,unsafe_allow_html=True)
object.markdown(markkdown_number + markkdown_text ,unsafe_allow_html=True)
def changemarkdown(object, number, text, prefix='',suffix='', change=0):
form = """<style type="text/css">
.sup {
position: relative;
color: #585858;
bottom: 1ex;
font-size: 100%;
text-align: center;
}
.main {
color: green;
text-align: center;
font-size: 175%;
}
.h2g {
color: green;
text-align: center;
font-size: 120%;
}
.h2r {
color: red;
text-align: center;
font-size: 120%;
}
</style>"""
markkdown_number = '<span class="main"><strong>{}{}{}</strong></span>'.format(prefix,number,suffix)
if float(change)==0:
markkdown_change = '<span class="h2g"></span>'
elif float(change)>0:
markkdown_change = '<span class="h2g"> ({}%)</span>'.format(change)
else:
markkdown_change = '<span class="h2r"> ({}%)</span>'.format(change)
markkdown_text = '<p class="sup">{}</p>'.format(text)
object.markdown(form,unsafe_allow_html=True)
object.markdown(markkdown_number + markkdown_change + markkdown_text ,unsafe_allow_html=True)
def get_table_download_link(df):
"""Generates a link allowing the data in a given panda dataframe to be downloaded
in: dataframe
out: href string
"""
csv = df.to_csv(index=False)
file_name = 'data_from_streamlit.txt'
file_type = file_name.split('.')[-1] # e.g. txt
b64 = base64.b64encode(csv.encode()).decode() # some strings <-> bytes conversions necessary here
button_uuid = str(uuid.uuid4()).replace("-", "")
button_id = re.sub("\d+", "", button_uuid)
custom_css = f"""
<style>
#{button_id} {{
color: #fff !important;
text-transform: uppercase;
text-decoration: none;
background: #ed3330;
padding: 4px;
border-radius: 5px;
display: inline-block;
border: none;
transition: all 0.8s ease 0s;
}}
#{button_id}:hover {{
background: #434343;
letter-spacing: 1px;
-webkit-box-shadow: 0px 5px 40px -10px rgba(0,0,0,0.57);
-moz-box-shadow: 0px 5px 40px -10px rgba(0,0,0,0.57);
box-shadow: 5px 40px -10px rgba(0,0,0,0.57);
transition: all 0.4s ease 0s;
}}
#{button_id}:active {{
color: #fff !important;
text-transform: uppercase;
text-decoration: none;
background: #ed3330;
padding: 20px;
border-radius: 5px;
display: inline-block;
border: none;
transition: all 0.4s ease 0s;
}}
</style> """
dl_link = (
custom_css
+ f'<a download="{file_name}" id="{button_id}" href="data:file/{file_type};base64,{b64}">Download data</a><br></br>'
)
return dl_link
def measuresmarkdown(object, measure, text, prefix='',suffix='', format=':.0f'):
form = """<style type="text/css">
.sup {
position: relative;
color: #585858;
bottom: 1ex;
font-size: 100%;
text-align: center;
}
.main {
color: green;
text-align: center;
font-size: 175%;
}
</style>"""
markdown_value = '<span class="main"><strong>{}{}{}</strong></span>'.format(prefix,measure,suffix)
markdown_text = '<p class="sup"><strong>{}</strong></p>'.format(text)
object.markdown(form,unsafe_allow_html=True)
object.markdown( markdown_value + markdown_text,unsafe_allow_html=True) | 0.287268 | 0.091301 |