max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
fixWL_trainData.py | jbhunter804/NBANeural | 0 | 6623451 | <reponame>jbhunter804/NBANeural<filename>fixWL_trainData.py
#!/usr/bin/python
import csv
import random
import numpy as np
import pandas as pd
##inFile has meta data on where last cut off due to server issues. Will resume collecting there###
inFile = "testtrainData.csv"
iTrainFile = open(inFile, "r")
readerTrain = csv.reader(iTrainFile, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
outTrainFile = "fixedWL_trainData.csv"
oTrainFile = open(outTrainFile, "w")
writerTrain = csv.writer(oTrainFile, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
for row in readerTrain:
#################################
# readerTrain[0] = headers
# readerTrain[row][0] = gameid
# readerTrain[row][1] = homeWin
# readerTrain[row][2] = team1 score
# readerTrain[row][3] = team2 score
# readerTrain[row][4] = is team1 home?
# readerTrain[row][5:707] = team1 data
# readerTrain[row][708:] = team2 data
currentRow = []
if row[0] == '':
writerTrain.writerow(row)
continue
currentRow.append(row[0])
if (row[2]) > (row[3]):
currentRow.append(1)
else:
currentRow.append(0)
for i in range(2,len(row)):
currentRow.append(row[i])
writerTrain.writerow(currentRow)
# print(currentRow)
iTrainFile.close()
oTrainFile.close()
| #!/usr/bin/python
import csv
import random
import numpy as np
import pandas as pd
##inFile has meta data on where last cut off due to server issues. Will resume collecting there###
inFile = "testtrainData.csv"
iTrainFile = open(inFile, "r")
readerTrain = csv.reader(iTrainFile, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
outTrainFile = "fixedWL_trainData.csv"
oTrainFile = open(outTrainFile, "w")
writerTrain = csv.writer(oTrainFile, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
for row in readerTrain:
#################################
# readerTrain[0] = headers
# readerTrain[row][0] = gameid
# readerTrain[row][1] = homeWin
# readerTrain[row][2] = team1 score
# readerTrain[row][3] = team2 score
# readerTrain[row][4] = is team1 home?
# readerTrain[row][5:707] = team1 data
# readerTrain[row][708:] = team2 data
currentRow = []
if row[0] == '':
writerTrain.writerow(row)
continue
currentRow.append(row[0])
if (row[2]) > (row[3]):
currentRow.append(1)
else:
currentRow.append(0)
for i in range(2,len(row)):
currentRow.append(row[i])
writerTrain.writerow(currentRow)
# print(currentRow)
iTrainFile.close()
oTrainFile.close() | en | 0.670433 | #!/usr/bin/python ##inFile has meta data on where last cut off due to server issues. Will resume collecting there### ################################# # readerTrain[0] = headers # readerTrain[row][0] = gameid # readerTrain[row][1] = homeWin # readerTrain[row][2] = team1 score # readerTrain[row][3] = team2 score # readerTrain[row][4] = is team1 home? # readerTrain[row][5:707] = team1 data # readerTrain[row][708:] = team2 data # print(currentRow) | 2.878303 | 3 |
vk_bot.py | KirillYabl/quiz_bot | 0 | 6623452 | <gh_stars>0
import random
import logging
import os
import json
import dotenv
import vk_api as vk
from vk_api.longpoll import VkLongPoll, VkEventType
from vk_api.keyboard import VkKeyboard, VkKeyboardColor
import redis
from common_functions import is_correct_answer, normalize_answer
logger = logging.getLogger(__name__)
class VkSessionUsersCondition:
"""User DB in Redis.
Usage:
For queries economy purpose
1. ALWAYS get user_info
2. Do other operations
:param redis_db: object of connection redis db
:param name_of_hash: str, name of your hash in redis
"""
def __init__(self, redis_db, name_of_hash):
# Template of info about new user
self.new_user_template = json.dumps({
'got_q': False, # Is user got question
'q': '', # text of question
'a': '' # text of answer
})
self.redis_db = redis_db
self.name_of_hash = name_of_hash
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.debug('Class params were initialized')
def add_or_update_user(self, user_id, user_info=None):
"""Add new user or update existing user if he got answer.
:param user_id: id of user in VK
:param user_info: dict or None, if don't need template
"""
if user_info is None:
self.redis_db.hset(self.name_of_hash, user_id, self.new_user_template)
self.logger.debug(f'User created, user_id={user_id}')
return
dumped_user_info = json.dumps(user_info)
self.redis_db.hset(self.name_of_hash, user_id, dumped_user_info)
self.logger.debug(f'User updated, user_id={user_id}')
return
def get_user_info(self, user_id):
"""Get user info by user_id, convert to JSON if user in db.
:param user_id: id of user in VK
:return: bool, True if user found
"""
user_info = self.redis_db.hget(self.name_of_hash, user_id)
if user_info is not None:
user_info = json.loads(user_info).decode('utf-8')
return user_info
def is_user_got_question(self, user_id, user_info):
"""Check status of user question.
:param user_id: id of user in VK
:return: bool, True if user got question
"""
if user_info is None:
self.add_or_update_user(user_id)
return False
return user_info['got_q']
def get_user_correct_answer(self, user_id, user_info):
"""Get user correct answer.
If this method gave answer, user will update.
If this method didn't know user, user will initialize. And method will return 'None' for the next handling.
If user didn't get question, method will return 'None' for the next handling.
:param user_id: id of user in VK
:param user_info: dict or None, if don't need template
:return: answer or None (don't know user or user didn't get question)
"""
if user_info is None:
self.add_or_update_user(user_id)
return None
if user_info['got_q']:
answer = user_info['a']
self.add_or_update_user(user_id)
return answer
return None
def add_answer_to_user(self, user_id, user_info, question, answer):
"""Update user with new answer.
:param user_id: id of user in VK
:param user_info: dict or None, if don't need template
:param question: str, user question
:param answer: str, correct answer
"""
if user_info is None:
self.add_or_update_user(user_id)
user_info['got_q'] = True
user_info['q'] = question
user_info['a'] = answer
self.add_or_update_user(user_id, user_info)
self.logger.debug(f'User got answer and updated, user_id={user_id}')
def init_keyboard():
"""Initialize keyboard.
:return: keyboard object
"""
keyboard = VkKeyboard(one_time=True)
keyboard.add_button('Новый вопрос', color=VkKeyboardColor.POSITIVE)
keyboard.add_button('Сдаться', color=VkKeyboardColor.NEGATIVE)
keyboard.add_line() # Переход на вторую строку
keyboard.add_button('Мой счёт', color=VkKeyboardColor.PRIMARY)
logger.debug('Keyboard was initialized')
return keyboard
def give_up(event, vk_api, **kwargs):
"""Button give up logic.
:param event: event which discribe message
:param vk_api: authorized session in vk
:param kwargs: dict, named args
:return: str, type of answer
"""
answer = kwargs['answer']
msg = kwargs['msg']
if answer is None:
msg += 'Еще не получили вопрос, а уже сдаетесь? Попробуйте сыграть в викторину.\n'
msg += 'Нажмите на кнопку "Новый вопрос".'
vk_api.messages.send(
user_id=event.user_id,
message=msg,
random_id=random.randint(1, 1000),
keyboard=init_keyboard().get_keyboard()
)
return 'give up without question'
msg = f'Жаль, правильный ответ:\n{answer}'
vk_api.messages.send(
user_id=event.user_id,
message=msg,
random_id=random.randint(1, 1000),
keyboard=init_keyboard().get_keyboard()
)
return 'give up'
def new_question_old_user(event, vk_api, **kwargs):
"""Button new question logic in situation where user got questions early.
:param event: event which discribe message
:param vk_api: authorized session in vk
:param kwargs: dict, named args
:return: str, type of answer
"""
msg = kwargs['msg']
answer = kwargs['answer']
new_q = kwargs['q']
msg += f'А как же предыдущий вопрос?\nПравильный ответ:\n{answer}'
vk_api.messages.send(
user_id=event.user_id,
message=msg,
random_id=random.randint(1, 1000),
keyboard=init_keyboard().get_keyboard()
)
msg = f'Ваш новый вопрос:\n{new_q}'
vk_api.messages.send(
user_id=event.user_id,
message=msg,
random_id=random.randint(1, 1000),
keyboard=init_keyboard().get_keyboard()
)
return "new question for old user without answer previous"
def new_question_new_user(event, vk_api, **kwargs):
"""Button new question logic for new user.
:param event: event which discribe message
:param vk_api: authorized session in vk
:param kwargs: dict, named args
:return: str, type of answer
"""
msg = kwargs['msg']
new_q = kwargs['q']
msg += new_q
vk_api.messages.send(
user_id=event.user_id,
message=msg,
random_id=random.randint(1, 1000),
keyboard=init_keyboard().get_keyboard()
)
return 'new question for new user'
def check_answer(event, vk_api, **kwargs):
"""Logic of checking answers.
:param event: event which discribe message
:param vk_api: authorized session in vk
:param kwargs: dict, named args
:return: str, type of answer
"""
correct_answer = kwargs['correct_answer']
if is_correct_answer(event.text, correct_answer, limit=0.5, answer_handler=normalize_answer):
msg = f'Правильно! Полный ответ:\n{correct_answer}\nХотите новый вопрос? Выберите в меню.'
type_of_answer = 'correct answer'
else:
msg = f'К сожалению нет! Полный ответ:\n{correct_answer}\nХотите новый вопрос? Выберите в меню.'
type_of_answer = 'incorrect answer'
vk_api.messages.send(
user_id=event.user_id,
message=msg,
random_id=random.randint(1, 1000),
keyboard=init_keyboard().get_keyboard()
)
return type_of_answer
def send_new_question_msg(event, vk_api, **kwargs):
"""Send recommendation to press button 'new question'.
:param event: event which discribe message
:param vk_api: authorized session in vk
:param kwargs: dict, named args
:return: str, type of answer
"""
msg = kwargs['msg']
msg += 'Нажмите на кнопку "Новый вопрос" для получения вопроса.'
vk_api.messages.send(
user_id=event.user_id,
message=msg,
random_id=random.randint(1, 1000),
keyboard=init_keyboard().get_keyboard()
)
return 'press new question'
def run_bot_logic(event, vk_api, redis_db, users_db, redis_set_name, redis_hash_name):
"""Logic of bot.
:param event: event which discribe message
:param vk_api: authorized session in vk
:param redis_db: redis DB with questions
:param users_db: custom DB of users condition
"""
first_time = False
got_question = True
msg = ''
if not (event.type == VkEventType.MESSAGE_NEW and event.to_me):
return
logger.debug(f'Starting work. user_id={event.user_id}')
# in start we will get future question and answer if didn't get early
user_info = users_db.get_user_info(event.user_id)
if user_info is None:
users_db.add_or_update_user(event.user_id)
first_time = True
msg += 'Рады приветствовать вас в нашей викторине!\n'
logger.debug('User play first time.')
if not users_db.is_user_got_question(event.user_id, user_info):
got_question = False
logger.debug('User didn\'t get question.')
if event.text == "Сдаться":
logger.debug('User gave up')
answer = users_db.get_user_correct_answer(event.user_id, user_info)
type_of_answer = give_up(event, vk_api, answer=answer, msg=msg)
logger.debug(f'"{type_of_answer}" message was sent')
return
elif event.text == "Новый вопрос":
logger.debug('User is getting new question')
# user isn't playing first time. But he pressed "new question" instead answer to question
if got_question and not first_time:
answer = users_db.get_user_correct_answer(event.user_id, user_info)
new_q = redis_db.srandmember(redis_set_name, 1)[0].decode('utf-8')
new_answer = redis_db.hget(redis_hash_name, new_q).decode('utf-8')
users_db.add_answer_to_user(event.user_id, user_info, new_q, new_answer)
type_of_answer = new_question_old_user(event, vk_api, answer=answer, new_q=new_q, msg=msg)
logger.debug(f'"{type_of_answer}" message was sent')
return
# user is playing first time
new_q = redis_db.srandmember(redis_set_name, 1)[0].decode('utf-8')
new_answer = redis_db.hget(redis_hash_name, new_q).decode('utf-8')
users_db.add_answer_to_user(event.user_id, user_info, new_q, new_answer)
type_of_answer = new_question_new_user(event, vk_api, new_q=new_q, msg=msg)
logger.debug(f'"{type_of_answer}" message was sent')
return
else:
# user got question and he is trying answer
if got_question:
correct_answer = users_db.get_user_correct_answer(event.user_id, user_info)
type_of_answer = check_answer(event, vk_api, correct_answer=correct_answer)
logger.debug(f'"{type_of_answer}" message was sent')
return
# user didn't get question and bot must get recommendation to press 'new question' button
type_of_answer = send_new_question_msg(event, vk_api, msg=msg)
logger.debug(f'"{type_of_answer}" message was sent')
return
if __name__ == "__main__":
logging.basicConfig(format='%(asctime)s %(name)s %(levelname)s %(message)s', level=logging.DEBUG)
dotenv.load_dotenv()
vk_app_token = os.getenv('VK_APP_TOKEN')
redis_db_address = os.getenv('REDIS_DB_ADDRESS')
redis_db_port = os.getenv('REDIS_DB_PORT')
redis_db_password = <PASSWORD>('REDIS_DB_PASSWORD')
redis_set_of_questions_name = os.getenv('REDIS_SET_OF_QUESTIONS_NAME', default='QuestionAnswerSet')
redis_hash_of_questions_and_answers_name = os.getenv('REDIS_HASH_OF_QUESTIONS_AND_ANSWERS_NAME',
default='QuestionAnswerHash')
redis_hash_users_info_name = os.getenv('REDIS_HASH_USERS_INFO_NAME', default='UsersHash')
logger.debug('.env was read')
redis_db = redis.Redis(host=redis_db_address, port=redis_db_port, password=redis_db_password)
logger.debug('Got DB connection')
users_db = VkSessionUsersCondition(redis_db, redis_hash_users_info_name)
while True:
try:
vk_session = vk.VkApi(token=vk_app_token)
logger.debug('Got VK API connection')
vk_api = vk_session.get_api()
longpoll = VkLongPoll(vk_session)
for event in longpoll.listen():
if event.type == VkEventType.MESSAGE_NEW and event.to_me:
run_bot_logic(event, vk_api, redis_db, users_db, redis_set_of_questions_name,
redis_hash_of_questions_and_answers_name)
except Exception:
logger.exception('Critical error in ')
| import random
import logging
import os
import json
import dotenv
import vk_api as vk
from vk_api.longpoll import VkLongPoll, VkEventType
from vk_api.keyboard import VkKeyboard, VkKeyboardColor
import redis
from common_functions import is_correct_answer, normalize_answer
logger = logging.getLogger(__name__)
class VkSessionUsersCondition:
"""User DB in Redis.
Usage:
For queries economy purpose
1. ALWAYS get user_info
2. Do other operations
:param redis_db: object of connection redis db
:param name_of_hash: str, name of your hash in redis
"""
def __init__(self, redis_db, name_of_hash):
# Template of info about new user
self.new_user_template = json.dumps({
'got_q': False, # Is user got question
'q': '', # text of question
'a': '' # text of answer
})
self.redis_db = redis_db
self.name_of_hash = name_of_hash
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.debug('Class params were initialized')
def add_or_update_user(self, user_id, user_info=None):
"""Add new user or update existing user if he got answer.
:param user_id: id of user in VK
:param user_info: dict or None, if don't need template
"""
if user_info is None:
self.redis_db.hset(self.name_of_hash, user_id, self.new_user_template)
self.logger.debug(f'User created, user_id={user_id}')
return
dumped_user_info = json.dumps(user_info)
self.redis_db.hset(self.name_of_hash, user_id, dumped_user_info)
self.logger.debug(f'User updated, user_id={user_id}')
return
def get_user_info(self, user_id):
"""Get user info by user_id, convert to JSON if user in db.
:param user_id: id of user in VK
:return: bool, True if user found
"""
user_info = self.redis_db.hget(self.name_of_hash, user_id)
if user_info is not None:
user_info = json.loads(user_info).decode('utf-8')
return user_info
def is_user_got_question(self, user_id, user_info):
"""Check status of user question.
:param user_id: id of user in VK
:return: bool, True if user got question
"""
if user_info is None:
self.add_or_update_user(user_id)
return False
return user_info['got_q']
def get_user_correct_answer(self, user_id, user_info):
"""Get user correct answer.
If this method gave answer, user will update.
If this method didn't know user, user will initialize. And method will return 'None' for the next handling.
If user didn't get question, method will return 'None' for the next handling.
:param user_id: id of user in VK
:param user_info: dict or None, if don't need template
:return: answer or None (don't know user or user didn't get question)
"""
if user_info is None:
self.add_or_update_user(user_id)
return None
if user_info['got_q']:
answer = user_info['a']
self.add_or_update_user(user_id)
return answer
return None
def add_answer_to_user(self, user_id, user_info, question, answer):
"""Update user with new answer.
:param user_id: id of user in VK
:param user_info: dict or None, if don't need template
:param question: str, user question
:param answer: str, correct answer
"""
if user_info is None:
self.add_or_update_user(user_id)
user_info['got_q'] = True
user_info['q'] = question
user_info['a'] = answer
self.add_or_update_user(user_id, user_info)
self.logger.debug(f'User got answer and updated, user_id={user_id}')
def init_keyboard():
"""Initialize keyboard.
:return: keyboard object
"""
keyboard = VkKeyboard(one_time=True)
keyboard.add_button('Новый вопрос', color=VkKeyboardColor.POSITIVE)
keyboard.add_button('Сдаться', color=VkKeyboardColor.NEGATIVE)
keyboard.add_line() # Переход на вторую строку
keyboard.add_button('Мой счёт', color=VkKeyboardColor.PRIMARY)
logger.debug('Keyboard was initialized')
return keyboard
def give_up(event, vk_api, **kwargs):
"""Button give up logic.
:param event: event which discribe message
:param vk_api: authorized session in vk
:param kwargs: dict, named args
:return: str, type of answer
"""
answer = kwargs['answer']
msg = kwargs['msg']
if answer is None:
msg += 'Еще не получили вопрос, а уже сдаетесь? Попробуйте сыграть в викторину.\n'
msg += 'Нажмите на кнопку "Новый вопрос".'
vk_api.messages.send(
user_id=event.user_id,
message=msg,
random_id=random.randint(1, 1000),
keyboard=init_keyboard().get_keyboard()
)
return 'give up without question'
msg = f'Жаль, правильный ответ:\n{answer}'
vk_api.messages.send(
user_id=event.user_id,
message=msg,
random_id=random.randint(1, 1000),
keyboard=init_keyboard().get_keyboard()
)
return 'give up'
def new_question_old_user(event, vk_api, **kwargs):
"""Button new question logic in situation where user got questions early.
:param event: event which discribe message
:param vk_api: authorized session in vk
:param kwargs: dict, named args
:return: str, type of answer
"""
msg = kwargs['msg']
answer = kwargs['answer']
new_q = kwargs['q']
msg += f'А как же предыдущий вопрос?\nПравильный ответ:\n{answer}'
vk_api.messages.send(
user_id=event.user_id,
message=msg,
random_id=random.randint(1, 1000),
keyboard=init_keyboard().get_keyboard()
)
msg = f'Ваш новый вопрос:\n{new_q}'
vk_api.messages.send(
user_id=event.user_id,
message=msg,
random_id=random.randint(1, 1000),
keyboard=init_keyboard().get_keyboard()
)
return "new question for old user without answer previous"
def new_question_new_user(event, vk_api, **kwargs):
"""Button new question logic for new user.
:param event: event which discribe message
:param vk_api: authorized session in vk
:param kwargs: dict, named args
:return: str, type of answer
"""
msg = kwargs['msg']
new_q = kwargs['q']
msg += new_q
vk_api.messages.send(
user_id=event.user_id,
message=msg,
random_id=random.randint(1, 1000),
keyboard=init_keyboard().get_keyboard()
)
return 'new question for new user'
def check_answer(event, vk_api, **kwargs):
"""Logic of checking answers.
:param event: event which discribe message
:param vk_api: authorized session in vk
:param kwargs: dict, named args
:return: str, type of answer
"""
correct_answer = kwargs['correct_answer']
if is_correct_answer(event.text, correct_answer, limit=0.5, answer_handler=normalize_answer):
msg = f'Правильно! Полный ответ:\n{correct_answer}\nХотите новый вопрос? Выберите в меню.'
type_of_answer = 'correct answer'
else:
msg = f'К сожалению нет! Полный ответ:\n{correct_answer}\nХотите новый вопрос? Выберите в меню.'
type_of_answer = 'incorrect answer'
vk_api.messages.send(
user_id=event.user_id,
message=msg,
random_id=random.randint(1, 1000),
keyboard=init_keyboard().get_keyboard()
)
return type_of_answer
def send_new_question_msg(event, vk_api, **kwargs):
"""Send recommendation to press button 'new question'.
:param event: event which discribe message
:param vk_api: authorized session in vk
:param kwargs: dict, named args
:return: str, type of answer
"""
msg = kwargs['msg']
msg += 'Нажмите на кнопку "Новый вопрос" для получения вопроса.'
vk_api.messages.send(
user_id=event.user_id,
message=msg,
random_id=random.randint(1, 1000),
keyboard=init_keyboard().get_keyboard()
)
return 'press new question'
def run_bot_logic(event, vk_api, redis_db, users_db, redis_set_name, redis_hash_name):
"""Logic of bot.
:param event: event which discribe message
:param vk_api: authorized session in vk
:param redis_db: redis DB with questions
:param users_db: custom DB of users condition
"""
first_time = False
got_question = True
msg = ''
if not (event.type == VkEventType.MESSAGE_NEW and event.to_me):
return
logger.debug(f'Starting work. user_id={event.user_id}')
# in start we will get future question and answer if didn't get early
user_info = users_db.get_user_info(event.user_id)
if user_info is None:
users_db.add_or_update_user(event.user_id)
first_time = True
msg += 'Рады приветствовать вас в нашей викторине!\n'
logger.debug('User play first time.')
if not users_db.is_user_got_question(event.user_id, user_info):
got_question = False
logger.debug('User didn\'t get question.')
if event.text == "Сдаться":
logger.debug('User gave up')
answer = users_db.get_user_correct_answer(event.user_id, user_info)
type_of_answer = give_up(event, vk_api, answer=answer, msg=msg)
logger.debug(f'"{type_of_answer}" message was sent')
return
elif event.text == "Новый вопрос":
logger.debug('User is getting new question')
# user isn't playing first time. But he pressed "new question" instead answer to question
if got_question and not first_time:
answer = users_db.get_user_correct_answer(event.user_id, user_info)
new_q = redis_db.srandmember(redis_set_name, 1)[0].decode('utf-8')
new_answer = redis_db.hget(redis_hash_name, new_q).decode('utf-8')
users_db.add_answer_to_user(event.user_id, user_info, new_q, new_answer)
type_of_answer = new_question_old_user(event, vk_api, answer=answer, new_q=new_q, msg=msg)
logger.debug(f'"{type_of_answer}" message was sent')
return
# user is playing first time
new_q = redis_db.srandmember(redis_set_name, 1)[0].decode('utf-8')
new_answer = redis_db.hget(redis_hash_name, new_q).decode('utf-8')
users_db.add_answer_to_user(event.user_id, user_info, new_q, new_answer)
type_of_answer = new_question_new_user(event, vk_api, new_q=new_q, msg=msg)
logger.debug(f'"{type_of_answer}" message was sent')
return
else:
# user got question and he is trying answer
if got_question:
correct_answer = users_db.get_user_correct_answer(event.user_id, user_info)
type_of_answer = check_answer(event, vk_api, correct_answer=correct_answer)
logger.debug(f'"{type_of_answer}" message was sent')
return
# user didn't get question and bot must get recommendation to press 'new question' button
type_of_answer = send_new_question_msg(event, vk_api, msg=msg)
logger.debug(f'"{type_of_answer}" message was sent')
return
if __name__ == "__main__":
logging.basicConfig(format='%(asctime)s %(name)s %(levelname)s %(message)s', level=logging.DEBUG)
dotenv.load_dotenv()
vk_app_token = os.getenv('VK_APP_TOKEN')
redis_db_address = os.getenv('REDIS_DB_ADDRESS')
redis_db_port = os.getenv('REDIS_DB_PORT')
redis_db_password = <PASSWORD>('REDIS_DB_PASSWORD')
redis_set_of_questions_name = os.getenv('REDIS_SET_OF_QUESTIONS_NAME', default='QuestionAnswerSet')
redis_hash_of_questions_and_answers_name = os.getenv('REDIS_HASH_OF_QUESTIONS_AND_ANSWERS_NAME',
default='QuestionAnswerHash')
redis_hash_users_info_name = os.getenv('REDIS_HASH_USERS_INFO_NAME', default='UsersHash')
logger.debug('.env was read')
redis_db = redis.Redis(host=redis_db_address, port=redis_db_port, password=redis_db_password)
logger.debug('Got DB connection')
users_db = VkSessionUsersCondition(redis_db, redis_hash_users_info_name)
while True:
try:
vk_session = vk.VkApi(token=vk_app_token)
logger.debug('Got VK API connection')
vk_api = vk_session.get_api()
longpoll = VkLongPoll(vk_session)
for event in longpoll.listen():
if event.type == VkEventType.MESSAGE_NEW and event.to_me:
run_bot_logic(event, vk_api, redis_db, users_db, redis_set_of_questions_name,
redis_hash_of_questions_and_answers_name)
except Exception:
logger.exception('Critical error in ') | en | 0.864452 | User DB in Redis. Usage: For queries economy purpose 1. ALWAYS get user_info 2. Do other operations :param redis_db: object of connection redis db :param name_of_hash: str, name of your hash in redis # Template of info about new user # Is user got question # text of question # text of answer Add new user or update existing user if he got answer. :param user_id: id of user in VK :param user_info: dict or None, if don't need template Get user info by user_id, convert to JSON if user in db. :param user_id: id of user in VK :return: bool, True if user found Check status of user question. :param user_id: id of user in VK :return: bool, True if user got question Get user correct answer. If this method gave answer, user will update. If this method didn't know user, user will initialize. And method will return 'None' for the next handling. If user didn't get question, method will return 'None' for the next handling. :param user_id: id of user in VK :param user_info: dict or None, if don't need template :return: answer or None (don't know user or user didn't get question) Update user with new answer. :param user_id: id of user in VK :param user_info: dict or None, if don't need template :param question: str, user question :param answer: str, correct answer Initialize keyboard. :return: keyboard object # Переход на вторую строку Button give up logic. :param event: event which discribe message :param vk_api: authorized session in vk :param kwargs: dict, named args :return: str, type of answer Button new question logic in situation where user got questions early. :param event: event which discribe message :param vk_api: authorized session in vk :param kwargs: dict, named args :return: str, type of answer Button new question logic for new user. :param event: event which discribe message :param vk_api: authorized session in vk :param kwargs: dict, named args :return: str, type of answer Logic of checking answers. :param event: event which discribe message :param vk_api: authorized session in vk :param kwargs: dict, named args :return: str, type of answer Send recommendation to press button 'new question'. :param event: event which discribe message :param vk_api: authorized session in vk :param kwargs: dict, named args :return: str, type of answer Logic of bot. :param event: event which discribe message :param vk_api: authorized session in vk :param redis_db: redis DB with questions :param users_db: custom DB of users condition # in start we will get future question and answer if didn't get early # user isn't playing first time. But he pressed "new question" instead answer to question # user is playing first time # user got question and he is trying answer # user didn't get question and bot must get recommendation to press 'new question' button | 2.540582 | 3 |
scripts/train_resnet_covidx.py | probayes/covid19xray | 3 | 6623453 | from pycovid19xray.utils import configure_logging, set_gpu
configure_logging()
# set_gpu(1)
import torch
from pycovid19xray.fastai import FastaiModel, show_metrics
from pycovid19xray.tests import DATA_DIR
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
import fastai.vision as fv
from PIL import Image
import numpy as np
fastaimodel = FastaiModel(modelname="rs50-dataaug-covidxv3",
bs=64)
data, test = fastaimodel.create_covidx_databunch()
test_data = test.databunch()
# show a grid
plt.close("all")
data.show_batch(rows=4, figsize=(12,9))
fastaimodel.save_fig("covidx_grid.jpg")
plt.close("all")
data.show_batch(rows=4, figsize=(12,9), ds_type=fv.DatasetType.Valid)
fastaimodel.save_fig("covidx_grid_valid.jpg")
plt.close("all")
test_data.show_batch(rows=3, figsize=(12,9))
fastaimodel.save_fig("covidx_grid_test.jpg")
#######
# model
# w = torch.cuda.FloatTensor([1., 1., 6.])
# learn = fastaimodel.create_learner(data, loss_func=torch.nn.CrossEntropyLoss(weight=w))
learn = fastaimodel.create_learner(data, loss_func=torch.nn.CrossEntropyLoss())
learn.lr_find()
plt.close("all")
learn.recorder.plot()
fastaimodel.save_fig("train_resnet_covidx_find_lr.jpg")
lr = 1e-2
learn.fit_one_cycle(5, slice(lr))
fastaimodel.save_learner(learn, 'stage-1')
# eval on evaluation dataset
interp = fv.ClassificationInterpretation.from_learner(learn)
interp.plot_confusion_matrix(figsize=(12,12), dpi=60)
fastaimodel.save_fig("val_confmat.jpg")
# show metrics
show_metrics(learn, test)
# unfreeze and fine tune
learn.unfreeze()
learn.lr_find()
plt.close("all")
learn.recorder.plot()
fastaimodel.save_fig("rain_resnet_covidx_find_lr_unfreeze.jpg")
learn.fit_one_cycle(15, slice(1e-5, 1e-4))
fastaimodel.save_learner(learn, 'stage-2')
# eval on evaluation dataset
interp = fv.ClassificationInterpretation.from_learner(learn)
plt.close("all")
interp.plot_confusion_matrix(figsize=(12,12), dpi=60)
fastaimodel.save_fig("eval_confmat_2.jpg")
show_metrics(learn, test)
| from pycovid19xray.utils import configure_logging, set_gpu
configure_logging()
# set_gpu(1)
import torch
from pycovid19xray.fastai import FastaiModel, show_metrics
from pycovid19xray.tests import DATA_DIR
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
import fastai.vision as fv
from PIL import Image
import numpy as np
fastaimodel = FastaiModel(modelname="rs50-dataaug-covidxv3",
bs=64)
data, test = fastaimodel.create_covidx_databunch()
test_data = test.databunch()
# show a grid
plt.close("all")
data.show_batch(rows=4, figsize=(12,9))
fastaimodel.save_fig("covidx_grid.jpg")
plt.close("all")
data.show_batch(rows=4, figsize=(12,9), ds_type=fv.DatasetType.Valid)
fastaimodel.save_fig("covidx_grid_valid.jpg")
plt.close("all")
test_data.show_batch(rows=3, figsize=(12,9))
fastaimodel.save_fig("covidx_grid_test.jpg")
#######
# model
# w = torch.cuda.FloatTensor([1., 1., 6.])
# learn = fastaimodel.create_learner(data, loss_func=torch.nn.CrossEntropyLoss(weight=w))
learn = fastaimodel.create_learner(data, loss_func=torch.nn.CrossEntropyLoss())
learn.lr_find()
plt.close("all")
learn.recorder.plot()
fastaimodel.save_fig("train_resnet_covidx_find_lr.jpg")
lr = 1e-2
learn.fit_one_cycle(5, slice(lr))
fastaimodel.save_learner(learn, 'stage-1')
# eval on evaluation dataset
interp = fv.ClassificationInterpretation.from_learner(learn)
interp.plot_confusion_matrix(figsize=(12,12), dpi=60)
fastaimodel.save_fig("val_confmat.jpg")
# show metrics
show_metrics(learn, test)
# unfreeze and fine tune
learn.unfreeze()
learn.lr_find()
plt.close("all")
learn.recorder.plot()
fastaimodel.save_fig("rain_resnet_covidx_find_lr_unfreeze.jpg")
learn.fit_one_cycle(15, slice(1e-5, 1e-4))
fastaimodel.save_learner(learn, 'stage-2')
# eval on evaluation dataset
interp = fv.ClassificationInterpretation.from_learner(learn)
plt.close("all")
interp.plot_confusion_matrix(figsize=(12,12), dpi=60)
fastaimodel.save_fig("eval_confmat_2.jpg")
show_metrics(learn, test)
| en | 0.506395 | # set_gpu(1) # show a grid ####### # model # w = torch.cuda.FloatTensor([1., 1., 6.]) # learn = fastaimodel.create_learner(data, loss_func=torch.nn.CrossEntropyLoss(weight=w)) # eval on evaluation dataset # show metrics # unfreeze and fine tune # eval on evaluation dataset | 2.228893 | 2 |
configutilities/configutilities/configutilities/common/wrs_ico.py | etaivan/stx-config | 0 | 6623454 | <reponame>etaivan/stx-config<filename>configutilities/configutilities/configutilities/common/wrs_ico.py
# ----------------------------------------------------------------------
# This file was generated by img2py.py
#
#
# Copyright (c) 2015-2016 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
#
# Stylized red Wind River 'W' icon
#
from wx.lib.embeddedimage import PyEmbeddedImage
favicon = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAABHNCSVQICAgIfAhkiAAAA99J"
"REFUWIXtls1PE2sUxn/z0Q7U0g/AXmNAUBGNxsSYGnWjQQ0YNAISY1y4kLh16T/g1qV/gwtJ"
"JFFATbBCmuBXrhiMitGk0hpBJaQtM4Vpp+3cxXAHSltub1zggpPM4p155pznfd5zznuEv8Fk"
"<KEY>/BEE5EpqUPiP72YFmHI42bV/f0ng6uDG3BzZubmSTuW6OhzbtmEaBuloFDOd"
"Lo3buhVHIGDhpqcxMxnLfzoaXYlnmiAICJKEoCjWWpKI37tH7MYNyOUKvQoCDbdvU3ftGnlN"
"Y7qvj4VQqGiXgizTeOcOtVeukHr1isjly+QSCYvYdF9fEVvR5aLu6lX8ly4B4OnoQGlqIh2J"
"FOCcTU34urqQa2uhthZ/by/qs2cW8VWmtLTgPXcOyetlcWLCDg4gL4RCJSXLfPtGzcmTyIEA"
"SnMz7hMn0CMRe3cm4GlvR9m1y/7H096Os7mZzNevK6ICnrNncTY2kksmSQ4PF+SCKCwv1j76"
"p0+kXr9eRol4z59HdDptx5LLhf/iRRBXCknZuRPPmTMFOSR7PPi6ugBYfPuWpcnJgiMqW4Z5"
"XScxOGjL6T5+HGXPHszlXbkOH2bL0aOQz2PMzlo4UcTX04Pkctm7dx05wpZgEIDko0dkVbUg"
"TlkCAqCOjpKJxQBwbN+O59Qp+5uvuxvJ5yMTizF765Z9ru5jx6g+dMiSWRDwdXcjut1kf/1i"
"<KEY>"
<KEY>"
"3WCaJIeGyJfoEesSEABtfBz982cA5Pp6/D09+Ht7ESSJpclJUs+fIwALIyPoU1MAVO3di+/C"
"<KEY>"
"cPDXzZs4AgEA1HCYzJoeUpECYGVyYmiIfCoFWMkoeTwYP36QHB4uxA0MYPz8aanQ2ork82Ea"
"hiX/2i5aKQEBWHzzhqV37wrea+Ew+sePtqwCsPT+vdUJV1n6yxe08fGyl1VF13E2Hif5+LG9"
"Ng2D+P375JeT81/LGwbx/n7yum6/WwiFML5/L+u74nkg+eSJfSMuffiAFg4XXzpY5704MWER"
"0jS79ZYzodKxXFAUatrakP1+MtGoVfdm6V9dwSBVLS3kUinU0VHymvb7BKB4TvhdHFRQhqut"
"kqnn/+DgD5gJNwlsEthwAv8AApOBr7T8BuQAAAAASUVORK5CYII=")
| # ----------------------------------------------------------------------
# This file was generated by img2py.py
#
#
# Copyright (c) 2015-2016 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
#
# Stylized red Wind River 'W' icon
#
from wx.lib.embeddedimage import PyEmbeddedImage
favicon = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAABHNCSVQICAgIfAhkiAAAA99J"
"REFUWIXtls1PE2sUxn/z0Q7U0g/AXmNAUBGNxsSYGnWjQQ0YNAISY1y4kLh16T/g1qV/gwtJ"
"JFFATbBCmuBXrhiMitGk0hpBJaQtM4Vpp+3cxXAHSltub1zggpPM4p155pznfd5zznuEv8Fk"
"<KEY>/BEE5EpqUPiP72YFmHI42bV/f0ng6uDG3BzZubmSTuW6OhzbtmEaBuloFDOd"
"Lo3buhVHIGDhpqcxMxnLfzoaXYlnmiAICJKEoCjWWpKI37tH7MYNyOUKvQoCDbdvU3ftGnlN"
"Y7qvj4VQqGiXgizTeOcOtVeukHr1isjly+QSCYvYdF9fEVvR5aLu6lX8ly4B4OnoQGlqIh2J"
"FOCcTU34urqQa2uhthZ/by/qs2cW8VWmtLTgPXcOyetlcWLCDg4gL4RCJSXLfPtGzcmTyIEA"
"SnMz7hMn0CMRe3cm4GlvR9m1y/7H096Os7mZzNevK6ICnrNncTY2kksmSQ4PF+SCKCwv1j76"
"p0+kXr9eRol4z59HdDptx5LLhf/iRRBXCknZuRPPmTMFOSR7PPi6ugBYfPuWpcnJgiMqW4Z5"
"XScxOGjL6T5+HGXPHszlXbkOH2bL0aOQz2PMzlo4UcTX04Pkctm7dx05wpZgEIDko0dkVbUg"
"TlkCAqCOjpKJxQBwbN+O59Qp+5uvuxvJ5yMTizF765Z9ru5jx6g+dMiSWRDwdXcjut1kf/1i"
"<KEY>"
<KEY>"
"3WCaJIeGyJfoEesSEABtfBz982cA5Pp6/D09+Ht7ESSJpclJUs+fIwALIyPoU1MAVO3di+/C"
"<KEY>"
"cPDXzZs4AgEA1HCYzJoeUpECYGVyYmiIfCoFWMkoeTwYP36QHB4uxA0MYPz8aanQ2ork82Ea"
"hiX/2i5aKQEBWHzzhqV37wrea+Ew+sePtqwCsPT+vdUJV1n6yxe08fGyl1VF13E2Hif5+LG9"
"Ng2D+P375JeT81/LGwbx/n7yum6/WwiFML5/L+u74nkg+eSJfSMuffiAFg4XXzpY5704MWER"
"0jS79ZYzodKxXFAUatrakP1+MtGoVfdm6V9dwSBVLS3kUinU0VHymvb7BKB4TvhdHFRQhqut"
"kqnn/+DgD5gJNwlsEthwAv8AApOBr7T8BuQAAAAASUVORK5CYII=") | en | 0.618888 | # ---------------------------------------------------------------------- # This file was generated by img2py.py # # # Copyright (c) 2015-2016 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # # # Stylized red Wind River 'W' icon # | 1.329007 | 1 |
DI2.py | stalei/DIProject | 0 | 6623455 | <reponame>stalei/DIProject
import datetime as dt
import os
import csv
import pandas as pd
import numpy as np
import datetime
import seaborn as sns
from datetime import timedelta, date
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from scipy.stats import chisquare
def FilterDate(Data, DStart, DEnd):
Data['StartDate']=pd.to_datetime(Data['StartDate'], format="%m/%d/%Y")
Data = Data[(Data['StartDate'] >= DStart) & (Data['StartDate'] <= DEnd)]
return Data
def FilterConractAmount(Data,LowerLimit):
Data = Data[(Data['ContractAmount'] > LowerLimit)]
return Data
def FilterAgency(Data,Agency):
Data = Data[(Data['AgencyName'] == Agency)]
return Data
def FilterCategories(Data,Cat1,Cat2):
Data = Data[(Data['CategoryDescription'] == Cat1) | (Data['CategoryDescription'] == Cat2)]
return Data
def FilterShortTitle(Data,title):
#t="+."+title
#Data = Data[(Data['ShortTitle'] == t)]
Data = Data[(Data['ShortTitle'].str.contains(title))]
return Data
def FilterCategory(Data,Cat):
Data = Data[(Data['CategoryDescription'] == Cat1)]
return Data
def FilterZipcode(Data,zip):
#t="+."+title
#Data = Data[(Data['ShortTitle'] == t)]
Data = Data[(Data['VendorAddress'].str.endswith(zip))]
return Data
DataRaw = pd.read_csv('Recent_Contract_Awards.csv',low_memory=False)
Data = DataRaw#[DataRaw['BOROUGH'] == 'MANHATTAN'].copy()
del DataRaw
print(Data.head(1))
#print(Data['StartDate'].year)
DStart=date(2010,1,1)
DEnd=date(2019,12,31)
Data2 = FilterDate(Data, DStart,DEnd)
#print(Data2)
Data2.dropna(subset=['ContractAmount'])
Data3=FilterConractAmount(Data2,0) ### Use this for the rest
print(np.sum(Data3['ContractAmount']))
#Data2[['HOUR']].plot.hist()
#Data['StartDate']=pd.to_datetime(Data['StartDate'], format="%m/%d/%Y")
#print(Data['StartDate'])
#2
Agency="Citywide Administrative Services"
DataFor2=FilterAgency(Data3,Agency)
AgencyTotalAmount=np.sum(DataFor2['ContractAmount'])
#print(len(set(DataFor2['VendorName'])))
UniqueVendors=set(DataFor2['VendorName'])
size =len(UniqueVendors)
print("size:%d"%size)
VendorName=[]*size
VendorAmount=[0]*size
i=0
for v in UniqueVendors:
dtemp=DataFor2[(DataFor2['VendorName']==v)]
amount=dtemp['ContractAmount']
#VendorName[i]=v
VendorAmount[i]=np.sum(amount)
#print(i)
i+=1
i=0
for v in UniqueVendors:
print(v,VendorAmount[i])
i+=1
Top50Amount = sorted(VendorAmount, reverse = True)[:50]
#print(Top50Amount)
#print(Top50Amount[49])
Top50AmountSum=np.sum(Top50Amount)
FracTop50=Top50AmountSum/AgencyTotalAmount
print("Top 50 frac:%g"%FracTop50)
#3
Cat1="Construction Related Services"
Cat2="Construction/Construction Services"
DataFor3=FilterCategories(Data3,Cat1,Cat2)
TotFor3=np.sum(DataFor3['ContractAmount'])
#print(DataFor3['ContractAmount'],DataFor3['ShortTitle'],DataFor3['CategoryDescription'])
DataCentralPark=FilterShortTitle(DataFor3,"CENTRAL PARK")
DataWSP=FilterShortTitle(DataFor3,"WASHINGTON SQUARE PARK")
pd.set_option('display.max_colwidth', -1)
print(DataCentralPark['ShortTitle'])
print(DataWSP['ShortTitle'])
TotForCentralPark=np.sum(DataCentralPark['ContractAmount'])
TotForWSP=np.sum(DataWSP['ContractAmount'])
FracCentralPark=TotForCentralPark/TotFor3
FracWSP=TotForWSP/TotFor3
ANS3=TotForCentralPark/TotForWSP
print("Answer 3:%g"%ANS3)
#4
DataFor4=FilterCategory(Data3,"Goods")
totYear=[]
y=np.array(range(2010,2019)).reshape((-1, 1))
#years=range(2010,2019)
for yr in range(2010,2019):
Di=date(yr,1,1)
Df=date(yr+1,12,31)
D4= FilterDate(DataFor4, Di,Df)# DataFor4[date(DataFor4['StartDate']).year==yr]
totYear.append(np.sum(D4['ContractAmount']))
print(totYear)
model = LinearRegression().fit(y, totYear)
r_sq = model.score(y, totYear)
print('coefficient of determination:', r_sq)
print('intercept:', model.intercept_)
print('slope:', model.coef_)
fig2 = plt.figure(figsize=plt.figaspect(1))
fig2.suptitle('Yearly expenditure')
ax21 = fig2.add_subplot(111)
ax21.set_ylabel('Amount')
ax21.set_xlabel('Year')
ax21.title.set_text('Contract amount-year')
ax21.grid(True)
ax21.plot(y,totYear,'o',label='Data')
ax21.legend(loc=2)
#5
UniqueAgencies=set(Data3['AgencyName'])
ContractCount=[]
for Agency in UniqueAgencies:
D5=Data3[Data3['AgencyName']==Agency]
ContractCount.append(len(D5['ContractAmount']))
Top5Contract = sorted(ContractCount, reverse = True)[:5]
ContractLimit=Top5Contract[4]
meanAmount=[]
for Agency in UniqueAgencies:
D5=Data3[Data3['AgencyName']==Agency]
cont=len(D5['ContractAmount'])
if cont>=ContractLimit:
meanAmount.append(np.mean(D5['ContractAmount']))
print(meanAmount)
meanAmountSorted= sorted(meanAmount, reverse = True)[:5]
print(meanAmountSorted)
print("Answer5:%g"%(meanAmountSorted[0]/meanAmountSorted[1]))
######
##6
Agency6="Parks and Recreation"
DataFor6=FilterAgency(Data3,Agency6)
DataNoticeFor6=FilterShortTitle(DataFor6,"NOTICE OF AWARD")
DataNoticeFor6['StartDate']=pd.to_datetime(DataFor6['StartDate'], format="%Y/%m/%d")
print(DataNoticeFor6['StartDate'].dt.dayofweek)
dofweek=DataNoticeFor6['StartDate'].dt.dayofweek
chi2=chisquare(dofweek)
print("chi square:")
print(chi2)
fig3 = plt.figure(2,figsize=plt.figaspect(1./2.))
ax3 = fig3.add_subplot(121)
ax3.hist(dofweek)
######
#7
Agency7="Environmental Protection"
DataFor7=FilterAgency(Data3,Agency7)
DataFor7['StartDate']=pd.to_datetime(DataFor7['StartDate'], format="%Y/%m/%d")
#print(DataFor7['StartDate'].dt.month)
monthlyExp=[]
m0=range(1,12)
for y in range(2010,2019):
D=DataFor7[DataFor7['StartDate'].dt.year==y]
for m in range(1,12):
D2=D[D['StartDate'].dt.month==m]
monthlyExp.append(np.sum(D2['ContractAmount']))
ax4 = fig3.add_subplot(122)
ax4.plot(monthlyExp)
s = pd.Series(monthlyExp)
print("Autocorrolation:%g"%s.autocorr(lag=12))
##########################
##8
DStart=date(2018,1,1)
DEnd=date(2018,12,31)
DataFor8 = FilterDate(Data3, DStart,DEnd)
NYCTot=0
Tot=np.sum(DataFor8['ContractAmount'])
with open('Zip2.csv') as csvfile: #Scraped all zipcodes to this csv file
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
#print(row[1])
#print(len(row))
for i in range(0,len(row)):
D=FilterZipcode(DataFor8,row[i])
#print(D['VendorAddress'])
NYCTot+=np.sum(D['ContractAmount'])
TotOthers=Tot-NYCTot
print("NYC/Tot:%g"%(NYCTot/Tot))
print("NYC/TotOthers:%g"%(NYCTot/TotOthers))
plt.show()
| import datetime as dt
import os
import csv
import pandas as pd
import numpy as np
import datetime
import seaborn as sns
from datetime import timedelta, date
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from scipy.stats import chisquare
def FilterDate(Data, DStart, DEnd):
Data['StartDate']=pd.to_datetime(Data['StartDate'], format="%m/%d/%Y")
Data = Data[(Data['StartDate'] >= DStart) & (Data['StartDate'] <= DEnd)]
return Data
def FilterConractAmount(Data,LowerLimit):
Data = Data[(Data['ContractAmount'] > LowerLimit)]
return Data
def FilterAgency(Data,Agency):
Data = Data[(Data['AgencyName'] == Agency)]
return Data
def FilterCategories(Data,Cat1,Cat2):
Data = Data[(Data['CategoryDescription'] == Cat1) | (Data['CategoryDescription'] == Cat2)]
return Data
def FilterShortTitle(Data,title):
#t="+."+title
#Data = Data[(Data['ShortTitle'] == t)]
Data = Data[(Data['ShortTitle'].str.contains(title))]
return Data
def FilterCategory(Data,Cat):
Data = Data[(Data['CategoryDescription'] == Cat1)]
return Data
def FilterZipcode(Data,zip):
#t="+."+title
#Data = Data[(Data['ShortTitle'] == t)]
Data = Data[(Data['VendorAddress'].str.endswith(zip))]
return Data
DataRaw = pd.read_csv('Recent_Contract_Awards.csv',low_memory=False)
Data = DataRaw#[DataRaw['BOROUGH'] == 'MANHATTAN'].copy()
del DataRaw
print(Data.head(1))
#print(Data['StartDate'].year)
DStart=date(2010,1,1)
DEnd=date(2019,12,31)
Data2 = FilterDate(Data, DStart,DEnd)
#print(Data2)
Data2.dropna(subset=['ContractAmount'])
Data3=FilterConractAmount(Data2,0) ### Use this for the rest
print(np.sum(Data3['ContractAmount']))
#Data2[['HOUR']].plot.hist()
#Data['StartDate']=pd.to_datetime(Data['StartDate'], format="%m/%d/%Y")
#print(Data['StartDate'])
#2
Agency="Citywide Administrative Services"
DataFor2=FilterAgency(Data3,Agency)
AgencyTotalAmount=np.sum(DataFor2['ContractAmount'])
#print(len(set(DataFor2['VendorName'])))
UniqueVendors=set(DataFor2['VendorName'])
size =len(UniqueVendors)
print("size:%d"%size)
VendorName=[]*size
VendorAmount=[0]*size
i=0
for v in UniqueVendors:
dtemp=DataFor2[(DataFor2['VendorName']==v)]
amount=dtemp['ContractAmount']
#VendorName[i]=v
VendorAmount[i]=np.sum(amount)
#print(i)
i+=1
i=0
for v in UniqueVendors:
print(v,VendorAmount[i])
i+=1
Top50Amount = sorted(VendorAmount, reverse = True)[:50]
#print(Top50Amount)
#print(Top50Amount[49])
Top50AmountSum=np.sum(Top50Amount)
FracTop50=Top50AmountSum/AgencyTotalAmount
print("Top 50 frac:%g"%FracTop50)
#3
Cat1="Construction Related Services"
Cat2="Construction/Construction Services"
DataFor3=FilterCategories(Data3,Cat1,Cat2)
TotFor3=np.sum(DataFor3['ContractAmount'])
#print(DataFor3['ContractAmount'],DataFor3['ShortTitle'],DataFor3['CategoryDescription'])
DataCentralPark=FilterShortTitle(DataFor3,"CENTRAL PARK")
DataWSP=FilterShortTitle(DataFor3,"WASHINGTON SQUARE PARK")
pd.set_option('display.max_colwidth', -1)
print(DataCentralPark['ShortTitle'])
print(DataWSP['ShortTitle'])
TotForCentralPark=np.sum(DataCentralPark['ContractAmount'])
TotForWSP=np.sum(DataWSP['ContractAmount'])
FracCentralPark=TotForCentralPark/TotFor3
FracWSP=TotForWSP/TotFor3
ANS3=TotForCentralPark/TotForWSP
print("Answer 3:%g"%ANS3)
#4
DataFor4=FilterCategory(Data3,"Goods")
totYear=[]
y=np.array(range(2010,2019)).reshape((-1, 1))
#years=range(2010,2019)
for yr in range(2010,2019):
Di=date(yr,1,1)
Df=date(yr+1,12,31)
D4= FilterDate(DataFor4, Di,Df)# DataFor4[date(DataFor4['StartDate']).year==yr]
totYear.append(np.sum(D4['ContractAmount']))
print(totYear)
model = LinearRegression().fit(y, totYear)
r_sq = model.score(y, totYear)
print('coefficient of determination:', r_sq)
print('intercept:', model.intercept_)
print('slope:', model.coef_)
fig2 = plt.figure(figsize=plt.figaspect(1))
fig2.suptitle('Yearly expenditure')
ax21 = fig2.add_subplot(111)
ax21.set_ylabel('Amount')
ax21.set_xlabel('Year')
ax21.title.set_text('Contract amount-year')
ax21.grid(True)
ax21.plot(y,totYear,'o',label='Data')
ax21.legend(loc=2)
#5
UniqueAgencies=set(Data3['AgencyName'])
ContractCount=[]
for Agency in UniqueAgencies:
D5=Data3[Data3['AgencyName']==Agency]
ContractCount.append(len(D5['ContractAmount']))
Top5Contract = sorted(ContractCount, reverse = True)[:5]
ContractLimit=Top5Contract[4]
meanAmount=[]
for Agency in UniqueAgencies:
D5=Data3[Data3['AgencyName']==Agency]
cont=len(D5['ContractAmount'])
if cont>=ContractLimit:
meanAmount.append(np.mean(D5['ContractAmount']))
print(meanAmount)
meanAmountSorted= sorted(meanAmount, reverse = True)[:5]
print(meanAmountSorted)
print("Answer5:%g"%(meanAmountSorted[0]/meanAmountSorted[1]))
######
##6
Agency6="Parks and Recreation"
DataFor6=FilterAgency(Data3,Agency6)
DataNoticeFor6=FilterShortTitle(DataFor6,"NOTICE OF AWARD")
DataNoticeFor6['StartDate']=pd.to_datetime(DataFor6['StartDate'], format="%Y/%m/%d")
print(DataNoticeFor6['StartDate'].dt.dayofweek)
dofweek=DataNoticeFor6['StartDate'].dt.dayofweek
chi2=chisquare(dofweek)
print("chi square:")
print(chi2)
fig3 = plt.figure(2,figsize=plt.figaspect(1./2.))
ax3 = fig3.add_subplot(121)
ax3.hist(dofweek)
######
#7
Agency7="Environmental Protection"
DataFor7=FilterAgency(Data3,Agency7)
DataFor7['StartDate']=pd.to_datetime(DataFor7['StartDate'], format="%Y/%m/%d")
#print(DataFor7['StartDate'].dt.month)
monthlyExp=[]
m0=range(1,12)
for y in range(2010,2019):
D=DataFor7[DataFor7['StartDate'].dt.year==y]
for m in range(1,12):
D2=D[D['StartDate'].dt.month==m]
monthlyExp.append(np.sum(D2['ContractAmount']))
ax4 = fig3.add_subplot(122)
ax4.plot(monthlyExp)
s = pd.Series(monthlyExp)
print("Autocorrolation:%g"%s.autocorr(lag=12))
##########################
##8
DStart=date(2018,1,1)
DEnd=date(2018,12,31)
DataFor8 = FilterDate(Data3, DStart,DEnd)
NYCTot=0
Tot=np.sum(DataFor8['ContractAmount'])
with open('Zip2.csv') as csvfile: #Scraped all zipcodes to this csv file
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
#print(row[1])
#print(len(row))
for i in range(0,len(row)):
D=FilterZipcode(DataFor8,row[i])
#print(D['VendorAddress'])
NYCTot+=np.sum(D['ContractAmount'])
TotOthers=Tot-NYCTot
print("NYC/Tot:%g"%(NYCTot/Tot))
print("NYC/TotOthers:%g"%(NYCTot/TotOthers))
plt.show() | en | 0.240516 | #t="+."+title #Data = Data[(Data['ShortTitle'] == t)] #t="+."+title #Data = Data[(Data['ShortTitle'] == t)] #[DataRaw['BOROUGH'] == 'MANHATTAN'].copy() #print(Data['StartDate'].year) #print(Data2) ### Use this for the rest #Data2[['HOUR']].plot.hist() #Data['StartDate']=pd.to_datetime(Data['StartDate'], format="%m/%d/%Y") #print(Data['StartDate']) #2 #print(len(set(DataFor2['VendorName']))) #VendorName[i]=v #print(i) #print(Top50Amount) #print(Top50Amount[49]) #3 #print(DataFor3['ContractAmount'],DataFor3['ShortTitle'],DataFor3['CategoryDescription']) #4 #years=range(2010,2019) # DataFor4[date(DataFor4['StartDate']).year==yr] #5 ###### ##6 ###### #7 #print(DataFor7['StartDate'].dt.month) ########################## ##8 #Scraped all zipcodes to this csv file #print(row[1]) #print(len(row)) #print(D['VendorAddress']) | 2.948957 | 3 |
report/report_generator.py | Raed-Mughaus/ICS-503-Assignments | 0 | 6623456 | <filename>report/report_generator.py
from docx import Document
from docx.shared import Inches
from docx.enum.text import WD_BREAK
from report.experiment_files import *
from report.utils import get_descriptive_statistics
import pandas as pd
def _add_page_break(document):
document.add_paragraph().add_run().add_break(WD_BREAK.PAGE)
def _add_image_to_cell(image_path, cell, width_inches, height_inches):
cell.paragraphs[0]\
.add_run()\
.add_picture(image_path, width=Inches(width_inches), height=Inches(height_inches))
def _add_descriptive_statistics_table(data_path, algorithm_instances, document):
document.add_paragraph()
table = document.add_table(rows=len(algorithm_instances) + 1, cols=5)
table.style = 'Table Grid'
header_cells = table.rows[0].cells
header_cells[0].text = 'Algorithm'
header_cells[1].text = 'Mean'
header_cells[2].text = 'Standard deviation'
header_cells[3].text = 'Median'
header_cells[4].text = 'Confidence interval'
for i in range(len(algorithm_instances)):
data = pd.read_csv(f'{data_path}/{algorithm_instances[i]}/Best so far.csv', header=None, index_col=None)
mean, stdev, median, confidence_interval = get_descriptive_statistics(data.iloc[len(data) - 1])
j = i + 1
table.rows[j].cells[0].text = algorithm_instances[i]
table.rows[j].cells[1].text = str(round(mean, 2))
table.rows[j].cells[2].text = str(round(stdev, 2))
table.rows[j].cells[3].text = str(round(median, 2))
table.rows[j].cells[4].text = f'\u00B1 {round(confidence_interval, 2)}'
def _generate_report(experiment_number, algorithm_name, algorithm_instances, instances_indices):
document = Document()
for instance_idx in instances_indices:
instance_number = instance_idx + 1
plots_path = get_plots_path(experiment_number, instance_number, algorithm_name)
document.add_heading(f'Instance {instance_number}', level=3)
table = document.add_table(rows=len(algorithm_instances), cols=2)
for i in range(len(algorithm_instances)):
_add_image_to_cell(
f'{plots_path}/{algorithm_instances[i]}/Best so far.png',
table.rows[i].cells[0],
2.98, 1.77
)
_add_image_to_cell(
f'{plots_path}/{algorithm_instances[i]}/Current best.png',
table.rows[i].cells[1],
2.98, 1.77
)
for instance_idx in instances_indices:
instance_number = instance_idx + 1
_add_page_break(document)
plots_path = get_plots_path(experiment_number, instance_number, algorithm_name)
data_path = get_data_path(experiment_number, instance_number, algorithm_name)
document.add_heading(f'Instance {instance_number} Final Results', level=3)
_add_descriptive_statistics_table(data_path, algorithm_instances, document)
document.add_paragraph()
table = document.add_table(rows=2, cols=1)
_add_image_to_cell(
f'{plots_path}/Box Plots.png',
table.rows[0].cells[0],
6.11, 3.36,
)
_add_image_to_cell(
f'{plots_path}/Confidence Intervals.png',
table.rows[1].cells[0],
6.11, 3.36,
)
document.save(f'experiments/Experiment {experiment_number}/{algorithm_name}.docx')
def generate_final_result_report(experiment_number, instances_indices):
document = Document()
for instance_idx in instances_indices:
instance_number = instance_idx + 1
_add_page_break(document)
plots_path = get_plots_path(experiment_number, instance_number)
document.add_heading(f'Instance {instance_number} Final Results', level=3)
document.add_paragraph()
table = document.add_table(rows=2, cols=1)
_add_image_to_cell(
f'{plots_path}/Box Plots.png',
table.rows[0].cells[0],
6.11, 3.9715,
)
_add_image_to_cell(
f'{plots_path}/Confidence Intervals.png',
table.rows[1].cells[0],
6.11, 3.9715,
)
document.save(f'experiments/Experiment {experiment_number}/Final Results.docx')
def generate_report(experiment_number, algorithm_name_to_instances, instances_indices):
for algorithm_name, algorithms_instances in algorithm_name_to_instances.items():
_generate_report(experiment_number, algorithm_name, algorithms_instances, instances_indices)
generate_final_result_report(experiment_number, instances_indices)
| <filename>report/report_generator.py
from docx import Document
from docx.shared import Inches
from docx.enum.text import WD_BREAK
from report.experiment_files import *
from report.utils import get_descriptive_statistics
import pandas as pd
def _add_page_break(document):
document.add_paragraph().add_run().add_break(WD_BREAK.PAGE)
def _add_image_to_cell(image_path, cell, width_inches, height_inches):
cell.paragraphs[0]\
.add_run()\
.add_picture(image_path, width=Inches(width_inches), height=Inches(height_inches))
def _add_descriptive_statistics_table(data_path, algorithm_instances, document):
document.add_paragraph()
table = document.add_table(rows=len(algorithm_instances) + 1, cols=5)
table.style = 'Table Grid'
header_cells = table.rows[0].cells
header_cells[0].text = 'Algorithm'
header_cells[1].text = 'Mean'
header_cells[2].text = 'Standard deviation'
header_cells[3].text = 'Median'
header_cells[4].text = 'Confidence interval'
for i in range(len(algorithm_instances)):
data = pd.read_csv(f'{data_path}/{algorithm_instances[i]}/Best so far.csv', header=None, index_col=None)
mean, stdev, median, confidence_interval = get_descriptive_statistics(data.iloc[len(data) - 1])
j = i + 1
table.rows[j].cells[0].text = algorithm_instances[i]
table.rows[j].cells[1].text = str(round(mean, 2))
table.rows[j].cells[2].text = str(round(stdev, 2))
table.rows[j].cells[3].text = str(round(median, 2))
table.rows[j].cells[4].text = f'\u00B1 {round(confidence_interval, 2)}'
def _generate_report(experiment_number, algorithm_name, algorithm_instances, instances_indices):
document = Document()
for instance_idx in instances_indices:
instance_number = instance_idx + 1
plots_path = get_plots_path(experiment_number, instance_number, algorithm_name)
document.add_heading(f'Instance {instance_number}', level=3)
table = document.add_table(rows=len(algorithm_instances), cols=2)
for i in range(len(algorithm_instances)):
_add_image_to_cell(
f'{plots_path}/{algorithm_instances[i]}/Best so far.png',
table.rows[i].cells[0],
2.98, 1.77
)
_add_image_to_cell(
f'{plots_path}/{algorithm_instances[i]}/Current best.png',
table.rows[i].cells[1],
2.98, 1.77
)
for instance_idx in instances_indices:
instance_number = instance_idx + 1
_add_page_break(document)
plots_path = get_plots_path(experiment_number, instance_number, algorithm_name)
data_path = get_data_path(experiment_number, instance_number, algorithm_name)
document.add_heading(f'Instance {instance_number} Final Results', level=3)
_add_descriptive_statistics_table(data_path, algorithm_instances, document)
document.add_paragraph()
table = document.add_table(rows=2, cols=1)
_add_image_to_cell(
f'{plots_path}/Box Plots.png',
table.rows[0].cells[0],
6.11, 3.36,
)
_add_image_to_cell(
f'{plots_path}/Confidence Intervals.png',
table.rows[1].cells[0],
6.11, 3.36,
)
document.save(f'experiments/Experiment {experiment_number}/{algorithm_name}.docx')
def generate_final_result_report(experiment_number, instances_indices):
document = Document()
for instance_idx in instances_indices:
instance_number = instance_idx + 1
_add_page_break(document)
plots_path = get_plots_path(experiment_number, instance_number)
document.add_heading(f'Instance {instance_number} Final Results', level=3)
document.add_paragraph()
table = document.add_table(rows=2, cols=1)
_add_image_to_cell(
f'{plots_path}/Box Plots.png',
table.rows[0].cells[0],
6.11, 3.9715,
)
_add_image_to_cell(
f'{plots_path}/Confidence Intervals.png',
table.rows[1].cells[0],
6.11, 3.9715,
)
document.save(f'experiments/Experiment {experiment_number}/Final Results.docx')
def generate_report(experiment_number, algorithm_name_to_instances, instances_indices):
for algorithm_name, algorithms_instances in algorithm_name_to_instances.items():
_generate_report(experiment_number, algorithm_name, algorithms_instances, instances_indices)
generate_final_result_report(experiment_number, instances_indices)
| none | 1 | 2.538377 | 3 | |
assets/containers/prowler_scan_check/app.py | aws-samples/aws-security-hub-analytic-pipeline | 7 | 6623457 | import os
import boto3
from typing import List
import json
import sys
import os
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
class ProwlerScanGroup:
def __init__(self, topic_arn):
self.__topic = boto3.resource('sns').Topic(topic_arn)
self.__region = os.environ['AWS_REGION']
logger.debug(f'topic_arn={topic_arn}')
logger.debug(f'region={self.__region}')
def __get_check(self, check_id:str) -> str:
logger.debug('Executing ' + f"/prowler/prowler -r {self.__region} -c '{check_id}' -M 'json-asff' -S")
stream = os.popen(f"/prowler/prowler -r {self.__region} -f {self.__region} -c '{check_id}' -M 'json-asff' -S")
raw_out = stream.read()
return raw_out
def handle(self, event, context):
logger.debug(event)
records = event['Records']
for r in records:
group = r['Sns']['Message']
logger.debug(self.__get_check(group))
def handler(event, context):
ProwlerScanGroup(topic_arn=os.environ['topic_arn']).handle(event, context)
return 'Done: python'
| import os
import boto3
from typing import List
import json
import sys
import os
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
class ProwlerScanGroup:
def __init__(self, topic_arn):
self.__topic = boto3.resource('sns').Topic(topic_arn)
self.__region = os.environ['AWS_REGION']
logger.debug(f'topic_arn={topic_arn}')
logger.debug(f'region={self.__region}')
def __get_check(self, check_id:str) -> str:
logger.debug('Executing ' + f"/prowler/prowler -r {self.__region} -c '{check_id}' -M 'json-asff' -S")
stream = os.popen(f"/prowler/prowler -r {self.__region} -f {self.__region} -c '{check_id}' -M 'json-asff' -S")
raw_out = stream.read()
return raw_out
def handle(self, event, context):
logger.debug(event)
records = event['Records']
for r in records:
group = r['Sns']['Message']
logger.debug(self.__get_check(group))
def handler(event, context):
ProwlerScanGroup(topic_arn=os.environ['topic_arn']).handle(event, context)
return 'Done: python'
| none | 1 | 2.055715 | 2 | |
flexneuart/data_convert/wikipedia_dpr/utils.py | gitter-badger/FlexNeuART | 101 | 6623458 | #
# Copyright 2014+ <NAME> University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
def get_passage_id(ctx_entry):
"""Retrieve a passage ID from the positive or negative context entry:
an element in an array with a key positive_ctxs, negative_ctxs, or
hard_negative_ctxs. The problem is that some entries encode them
using the key psg_id and some use the key passage_id.
"""
for psg_key in ['psg_id', 'passage_id']:
if psg_key in ctx_entry:
return ctx_entry[psg_key]
raise Exception('No passage keys in the entry: ' + json.dumps(ctx_entry))
def dpr_json_reader(file_to_read):
"""A simple streaming json reader. It assumes the file is well formated,
which is the case of Facebook DPR data, but it cannot be used as a generic
JSON stream reader, where blocks start/end at arbitrary positions
(unlike Facebook DPR data).
:param file_to_read:
:return: yields an unparsed textual representation of all entries for one question
"""
current_depth = 0
buffer = []
for i, line in enumerate(map(lambda line: line.strip(), file_to_read)):
if current_depth == 0 and line in ("[", "]"):
continue
if line == "{":
current_depth += 1
if line == "}" or line == "},":
current_depth -= 1
if current_depth == 0:
buffer.append("}")
yield "\n".join(buffer)
buffer = []
else:
buffer.append(line)
else:
buffer.append(line)
| #
# Copyright 2014+ <NAME> University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
def get_passage_id(ctx_entry):
"""Retrieve a passage ID from the positive or negative context entry:
an element in an array with a key positive_ctxs, negative_ctxs, or
hard_negative_ctxs. The problem is that some entries encode them
using the key psg_id and some use the key passage_id.
"""
for psg_key in ['psg_id', 'passage_id']:
if psg_key in ctx_entry:
return ctx_entry[psg_key]
raise Exception('No passage keys in the entry: ' + json.dumps(ctx_entry))
def dpr_json_reader(file_to_read):
"""A simple streaming json reader. It assumes the file is well formated,
which is the case of Facebook DPR data, but it cannot be used as a generic
JSON stream reader, where blocks start/end at arbitrary positions
(unlike Facebook DPR data).
:param file_to_read:
:return: yields an unparsed textual representation of all entries for one question
"""
current_depth = 0
buffer = []
for i, line in enumerate(map(lambda line: line.strip(), file_to_read)):
if current_depth == 0 and line in ("[", "]"):
continue
if line == "{":
current_depth += 1
if line == "}" or line == "},":
current_depth -= 1
if current_depth == 0:
buffer.append("}")
yield "\n".join(buffer)
buffer = []
else:
buffer.append(line)
else:
buffer.append(line)
| en | 0.831502 | # # Copyright 2014+ <NAME> University # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Retrieve a passage ID from the positive or negative context entry: an element in an array with a key positive_ctxs, negative_ctxs, or hard_negative_ctxs. The problem is that some entries encode them using the key psg_id and some use the key passage_id. A simple streaming json reader. It assumes the file is well formated, which is the case of Facebook DPR data, but it cannot be used as a generic JSON stream reader, where blocks start/end at arbitrary positions (unlike Facebook DPR data). :param file_to_read: :return: yields an unparsed textual representation of all entries for one question | 2.837778 | 3 |
setup.py | pytdlib/pytdlib | 6 | 6623459 | <reponame>pytdlib/pytdlib<filename>setup.py
"""The setup and build script for the pytdlib library."""
from setuptools import setup, find_packages, Command
from sys import argv
import shutil
import os
import re
from generate.api import generator
if len(argv) > 1 and argv[1] in ["bdist_wheel", "install"]:
generator.start()
with open("README.md", "r", encoding="utf-8") as f:
long_description = f.read()
with open(os.path.join("pytdlib", "version.py"), encoding="utf-8") as f:
version = re.findall(r"__version__ = \"(.+)\"", f.read())[0]
class Clear(Command):
DIST = ["./build", "./dist", "./Pytdlib.egg-info"]
API = ["pytdlib/api/functions", "pytdlib/api/types"]
ALL = DIST + API
description = "Clean generated files"
user_options = [
("dist", None, "Clean distribution files"),
("api", None, "Clean generated API files"),
("all", None, "Clean all generated files"),
]
def __init__(self, dist, **kw):
super().__init__(dist, **kw)
self.dist = None
self.api = None
self.all = None
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
paths = set()
if self.dist:
paths.update(Clear.DIST)
if self.api:
paths.update(Clear.API)
if self.all or not paths:
paths.update(Clear.ALL)
for path in sorted(list(paths)):
try:
shutil.rmtree(path) if os.path.isdir(path) else os.remove(path)
except OSError:
print("skipping {}".format(path))
else:
print("removing {}".format(path))
setup(
name="Pytdlib",
version=version,
description="Telegram TDLib Client Library for Python",
long_description=long_description,
url="https://github.com/pytdlib/pytdlib",
author="Naji",
license="MIT",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Communications :: Chat",
"Topic :: Software Development :: Libraries",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: Implementation",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
keywords="telegram chat td tdlib api client library python mtproto",
python_requires="~=3.4",
packages=find_packages(),
cmdclass={
"clean": Clear,
}
)
| """The setup and build script for the pytdlib library."""
from setuptools import setup, find_packages, Command
from sys import argv
import shutil
import os
import re
from generate.api import generator
if len(argv) > 1 and argv[1] in ["bdist_wheel", "install"]:
generator.start()
with open("README.md", "r", encoding="utf-8") as f:
long_description = f.read()
with open(os.path.join("pytdlib", "version.py"), encoding="utf-8") as f:
version = re.findall(r"__version__ = \"(.+)\"", f.read())[0]
class Clear(Command):
DIST = ["./build", "./dist", "./Pytdlib.egg-info"]
API = ["pytdlib/api/functions", "pytdlib/api/types"]
ALL = DIST + API
description = "Clean generated files"
user_options = [
("dist", None, "Clean distribution files"),
("api", None, "Clean generated API files"),
("all", None, "Clean all generated files"),
]
def __init__(self, dist, **kw):
super().__init__(dist, **kw)
self.dist = None
self.api = None
self.all = None
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
paths = set()
if self.dist:
paths.update(Clear.DIST)
if self.api:
paths.update(Clear.API)
if self.all or not paths:
paths.update(Clear.ALL)
for path in sorted(list(paths)):
try:
shutil.rmtree(path) if os.path.isdir(path) else os.remove(path)
except OSError:
print("skipping {}".format(path))
else:
print("removing {}".format(path))
setup(
name="Pytdlib",
version=version,
description="Telegram TDLib Client Library for Python",
long_description=long_description,
url="https://github.com/pytdlib/pytdlib",
author="Naji",
license="MIT",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Communications :: Chat",
"Topic :: Software Development :: Libraries",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: Implementation",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
keywords="telegram chat td tdlib api client library python mtproto",
python_requires="~=3.4",
packages=find_packages(),
cmdclass={
"clean": Clear,
}
) | en | 0.862013 | The setup and build script for the pytdlib library. | 2.115323 | 2 |
vesper/django/manage.py | RichardLitt/Vesper | 29 | 6623460 | #!/usr/bin/env python
# Vesper server administration script.
#
# This script is a simple derivative of the standard Django manage.py script.
import os
import sys
from vesper.archive_settings import archive_settings
from vesper.archive_paths import archive_paths
def main():
args = sys.argv
# We did not used to do this, but when we switched to version 3 of
# the `conda-build` package the command `vesper_admin runserver`
# began failing on Windows (at least, and also on macOS, if I
# remember correctly) with an error message indicating that
# `python.exe` could not find `vesper_admin` in its environment's
# `Scripts` directory (indeed, there is no such file there). The
# code below is a workaround for that problem. Interestingly,
# the command `vesper_admin` (with no arguments) continued to work
# as before.
#
# In the future, we might just want to do away with the
# `vesper_admin` entry point, which is just a shorthand for
# `python -m vesper.django.manage`.
if args[0].endswith('vesper_admin'):
args[0] = __file__
if 'createsuperuser' in args or 'runserver' in args:
_check_archive_dir()
os.environ.setdefault(
'DJANGO_SETTINGS_MODULE', 'vesper.django.project.settings')
from django.core.management import execute_from_command_line
execute_from_command_line(args)
def _check_archive_dir():
"""
Checks that the purported archive directory appears to contain a
Vesper archive.
"""
_check_database()
_check_preferences()
_check_presets()
def _check_database():
if archive_settings.database.engine == 'SQLite':
file_path = archive_paths.sqlite_database_file_path
if not file_path.exists():
archive_dir_path = archive_paths.archive_dir_path
relative_file_path = file_path.relative_to(archive_dir_path)
print(
f'The directory "{archive_dir_path}" does not appear to '
f'be a Vesper archive directory, since it does not contain '
f'an archive database file "{relative_file_path}". Please '
f'run your command again in an archive directory.')
sys.exit(1)
def _check_preferences():
file_path = archive_paths.preference_file_path
if not file_path.exists():
archive_dir_path = archive_paths.archive_dir_path
relative_file_path = file_path.relative_to(archive_dir_path)
print(
f'WARNING: The Vesper archive at "{archive_dir_path}" does '
f'not contain a preference file "{relative_file_path.name}". '
f'The server will use default preferences for this archive.')
def _check_presets():
dir_path = archive_paths.preset_dir_path
if not dir_path.exists():
archive_dir_path = archive_paths.archive_dir_path
relative_dir_path = dir_path.relative_to(archive_dir_path)
print(
f'WARNING: The Vesper archive at "{archive_dir_path}" does '
f'not contain a preset directory "{relative_dir_path}". '
f'No presets will be available for use with this archive.')
if __name__ == '__main__':
main()
# The following is the standard Django manage.py script:
# #!/usr/bin/env python
# import os
# import sys
#
# if __name__ == "__main__":
# os.environ.setdefault(
# "DJANGO_SETTINGS_MODULE", "vesper.django.project.settings")
#
# from django.core.management import execute_from_command_line
#
# execute_from_command_line(sys.argv)
| #!/usr/bin/env python
# Vesper server administration script.
#
# This script is a simple derivative of the standard Django manage.py script.
import os
import sys
from vesper.archive_settings import archive_settings
from vesper.archive_paths import archive_paths
def main():
args = sys.argv
# We did not used to do this, but when we switched to version 3 of
# the `conda-build` package the command `vesper_admin runserver`
# began failing on Windows (at least, and also on macOS, if I
# remember correctly) with an error message indicating that
# `python.exe` could not find `vesper_admin` in its environment's
# `Scripts` directory (indeed, there is no such file there). The
# code below is a workaround for that problem. Interestingly,
# the command `vesper_admin` (with no arguments) continued to work
# as before.
#
# In the future, we might just want to do away with the
# `vesper_admin` entry point, which is just a shorthand for
# `python -m vesper.django.manage`.
if args[0].endswith('vesper_admin'):
args[0] = __file__
if 'createsuperuser' in args or 'runserver' in args:
_check_archive_dir()
os.environ.setdefault(
'DJANGO_SETTINGS_MODULE', 'vesper.django.project.settings')
from django.core.management import execute_from_command_line
execute_from_command_line(args)
def _check_archive_dir():
"""
Checks that the purported archive directory appears to contain a
Vesper archive.
"""
_check_database()
_check_preferences()
_check_presets()
def _check_database():
if archive_settings.database.engine == 'SQLite':
file_path = archive_paths.sqlite_database_file_path
if not file_path.exists():
archive_dir_path = archive_paths.archive_dir_path
relative_file_path = file_path.relative_to(archive_dir_path)
print(
f'The directory "{archive_dir_path}" does not appear to '
f'be a Vesper archive directory, since it does not contain '
f'an archive database file "{relative_file_path}". Please '
f'run your command again in an archive directory.')
sys.exit(1)
def _check_preferences():
file_path = archive_paths.preference_file_path
if not file_path.exists():
archive_dir_path = archive_paths.archive_dir_path
relative_file_path = file_path.relative_to(archive_dir_path)
print(
f'WARNING: The Vesper archive at "{archive_dir_path}" does '
f'not contain a preference file "{relative_file_path.name}". '
f'The server will use default preferences for this archive.')
def _check_presets():
dir_path = archive_paths.preset_dir_path
if not dir_path.exists():
archive_dir_path = archive_paths.archive_dir_path
relative_dir_path = dir_path.relative_to(archive_dir_path)
print(
f'WARNING: The Vesper archive at "{archive_dir_path}" does '
f'not contain a preset directory "{relative_dir_path}". '
f'No presets will be available for use with this archive.')
if __name__ == '__main__':
main()
# The following is the standard Django manage.py script:
# #!/usr/bin/env python
# import os
# import sys
#
# if __name__ == "__main__":
# os.environ.setdefault(
# "DJANGO_SETTINGS_MODULE", "vesper.django.project.settings")
#
# from django.core.management import execute_from_command_line
#
# execute_from_command_line(sys.argv)
| en | 0.815639 | #!/usr/bin/env python # Vesper server administration script. # # This script is a simple derivative of the standard Django manage.py script. # We did not used to do this, but when we switched to version 3 of # the `conda-build` package the command `vesper_admin runserver` # began failing on Windows (at least, and also on macOS, if I # remember correctly) with an error message indicating that # `python.exe` could not find `vesper_admin` in its environment's # `Scripts` directory (indeed, there is no such file there). The # code below is a workaround for that problem. Interestingly, # the command `vesper_admin` (with no arguments) continued to work # as before. # # In the future, we might just want to do away with the # `vesper_admin` entry point, which is just a shorthand for # `python -m vesper.django.manage`. Checks that the purported archive directory appears to contain a Vesper archive. # The following is the standard Django manage.py script: # #!/usr/bin/env python # import os # import sys # # if __name__ == "__main__": # os.environ.setdefault( # "DJANGO_SETTINGS_MODULE", "vesper.django.project.settings") # # from django.core.management import execute_from_command_line # # execute_from_command_line(sys.argv) | 2.199041 | 2 |
carpenter/default_settings.py | stefanw/carpenter | 4 | 6623461 | <reponame>stefanw/carpenter<gh_stars>1-10
import os
DEBUG = True
PROJECT_PATH = os.path.abspath(os.path.dirname(__file__))
STATIC_PATH = os.path.join(PROJECT_PATH, 'static')
MEDIA_PATH = os.path.join(PROJECT_PATH, 'static', 'media')
CELERY_IMPORTS = ("carpenter.tasks", )
| import os
DEBUG = True
PROJECT_PATH = os.path.abspath(os.path.dirname(__file__))
STATIC_PATH = os.path.join(PROJECT_PATH, 'static')
MEDIA_PATH = os.path.join(PROJECT_PATH, 'static', 'media')
CELERY_IMPORTS = ("carpenter.tasks", ) | none | 1 | 1.730468 | 2 | |
app/models/ltr559.py | mygulamali/pi-sensors | 0 | 6623462 | from pydantic import BaseModel
try:
from ltr559 import LTR559 as Sensor
except ImportError:
from mocks import LTR559 as Sensor
class LTR559(BaseModel):
lux: float
proximity: int
@classmethod
def poll(cls, sensor: Sensor) -> "LTR559":
sensor.update_sensor()
return LTR559(lux=sensor._lux, proximity=sensor._ps0)
| from pydantic import BaseModel
try:
from ltr559 import LTR559 as Sensor
except ImportError:
from mocks import LTR559 as Sensor
class LTR559(BaseModel):
lux: float
proximity: int
@classmethod
def poll(cls, sensor: Sensor) -> "LTR559":
sensor.update_sensor()
return LTR559(lux=sensor._lux, proximity=sensor._ps0)
| none | 1 | 2.526833 | 3 | |
app/main.py | Jonathpc/company-flaskapp | 0 | 6623463 | from app import app
from flask import request, render_template, flash, redirect
from app.forms import ContactForm, flash_errors
import os
import smtplib
@app.route("/")
def index():
return render_template("public/index.html")
@app.route("/contact", methods=("GET", "POST"))
def contact():
form = ContactForm()
flash_errors(form)
MAIL_PASS = request.environ['MAIL_PASS']
if form.validate_on_submit():
sender = "%s <%s>" % (form.name.data, form.email.data)
subject = "Subject: %s, %s" % (form.subject.data , form.email.data)
message = "From: %s, \n\n %s, \n\n %s" % (
sender, subject, form.body.data)
server = smtplib.SMTP("smtp.gmail.com", 587)
server.starttls()
server.login("sender_mail", MAIL_PASS)
server.sendmail("sender_mail",
"receiver_mail", message.encode('utf-8'))
flash("Your message was sent")
return redirect("/contact")
else:
flash_errors(form)
return render_template("public/contact.html", form=form)
| from app import app
from flask import request, render_template, flash, redirect
from app.forms import ContactForm, flash_errors
import os
import smtplib
@app.route("/")
def index():
return render_template("public/index.html")
@app.route("/contact", methods=("GET", "POST"))
def contact():
form = ContactForm()
flash_errors(form)
MAIL_PASS = request.environ['MAIL_PASS']
if form.validate_on_submit():
sender = "%s <%s>" % (form.name.data, form.email.data)
subject = "Subject: %s, %s" % (form.subject.data , form.email.data)
message = "From: %s, \n\n %s, \n\n %s" % (
sender, subject, form.body.data)
server = smtplib.SMTP("smtp.gmail.com", 587)
server.starttls()
server.login("sender_mail", MAIL_PASS)
server.sendmail("sender_mail",
"receiver_mail", message.encode('utf-8'))
flash("Your message was sent")
return redirect("/contact")
else:
flash_errors(form)
return render_template("public/contact.html", form=form)
| none | 1 | 2.756924 | 3 | |
code/4Class-Classification/Natural Images/Baseline_ResNet50.py | mueedhafiz1982/CNNTreeEnsemble | 7 | 6623464 | # -*- coding: utf-8 -*-
"""ResNet50_natimdb_v1.01.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1q2dlcdGadeLlqI9q44EKbUzy-e57Rmls
"""
from google.colab import drive
drive.mount('/content/drive')
#code part 4
img_rows, img_cols = 224, 224 #number of rows and columns to convert the images to
input_shape = (img_rows, img_cols, 3)#format to store the images (rows, columns,channels) called channels last
def url_to_image(url):
# download the image, convert it to a NumPy array, and then read
# it into OpenCV format
resp = urllib.request.urlopen(url)
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
# return the image
return image
#%clear base_model
import tensorflow as tf
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
#train_datagen = ImageDataGenerator(rescale=1./255)
#valid_datagen = ImageDataGenerator(rescale=1./255)
train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(preprocessing_function=tf.keras.applications.resnet50.preprocess_input)
valid_datagen = tf.keras.preprocessing.image.ImageDataGenerator(preprocessing_function=tf.keras.applications.resnet50.preprocess_input)
train_generator1 = train_datagen.flow_from_directory(
'/content/drive/My Drive/db/natimdb/train/',
classes=['airplane','motorbike','car','person'],
target_size=(img_rows, img_cols),batch_size=32,class_mode='categorical')
valid_generator1 = valid_datagen.flow_from_directory(
'/content/drive/My Drive/db/natimdb/valid/',
classes=['airplane','motorbike','car','person'],
target_size=(img_rows, img_cols),batch_size=32,class_mode='categorical')
base_model1 = tf.keras.applications.ResNet50(weights='imagenet', include_top = False)
for layer in base_model1.layers: layer.trainable = False
x = base_model1.output
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Dense(1024, activation='relu')(x)
x = tf.keras.layers.Dense(1024, activation='relu')(x)
x = tf.keras.layers.Dense(1024, activation='relu')(x)
x = tf.keras.layers.Dense(512, activation='relu')(x)
preds = tf.keras.layers.Dense(4, activation ='softmax')(x)
model1 = tf.keras.models.Model(inputs=base_model1.input, outputs=preds)
model1.compile(optimizer='RMSProp', loss='categorical_crossentropy', metrics=['accuracy'])
history = model1.fit(
train_generator1,
steps_per_epoch=train_generator1.n//train_generator1.batch_size,
epochs=4,
validation_data=valid_generator1,
validation_steps=25)
import tensorflow as tf
from keras.preprocessing.image import ImageDataGenerator #, array_to_img, img_to_array, load_img
"""
model1.save('/content/drive/My Drive/db/m1.h5')
model21.save('/content/drive/My Drive/db/m21.h5')
model22.save('/content/drive/My Drive/db/m22.h5')
model1 = tf.keras.models.load_model('m1.h5')
model21 = tf.keras.models.load_model('m21.h5')
model22 = tf.keras.models.load_model('m22.h5')
"""
# ntest_total = 745, accuracy_% = 0.9168
# classified = 683
test_datagen = tf.keras.preprocessing.image.ImageDataGenerator(preprocessing_function=tf.keras.applications.resnet50.preprocess_input)
#test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = valid_datagen.flow_from_directory(
'/content/drive/My Drive/db/natimdb/test/',
classes=['airplane','motorbike','car','person'],
target_size=(img_rows, img_cols),batch_size=32,class_mode='categorical')
history=model1.evaluate(test_generator) | # -*- coding: utf-8 -*-
"""ResNet50_natimdb_v1.01.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1q2dlcdGadeLlqI9q44EKbUzy-e57Rmls
"""
from google.colab import drive
drive.mount('/content/drive')
#code part 4
img_rows, img_cols = 224, 224 #number of rows and columns to convert the images to
input_shape = (img_rows, img_cols, 3)#format to store the images (rows, columns,channels) called channels last
def url_to_image(url):
# download the image, convert it to a NumPy array, and then read
# it into OpenCV format
resp = urllib.request.urlopen(url)
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
# return the image
return image
#%clear base_model
import tensorflow as tf
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
#train_datagen = ImageDataGenerator(rescale=1./255)
#valid_datagen = ImageDataGenerator(rescale=1./255)
train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(preprocessing_function=tf.keras.applications.resnet50.preprocess_input)
valid_datagen = tf.keras.preprocessing.image.ImageDataGenerator(preprocessing_function=tf.keras.applications.resnet50.preprocess_input)
train_generator1 = train_datagen.flow_from_directory(
'/content/drive/My Drive/db/natimdb/train/',
classes=['airplane','motorbike','car','person'],
target_size=(img_rows, img_cols),batch_size=32,class_mode='categorical')
valid_generator1 = valid_datagen.flow_from_directory(
'/content/drive/My Drive/db/natimdb/valid/',
classes=['airplane','motorbike','car','person'],
target_size=(img_rows, img_cols),batch_size=32,class_mode='categorical')
base_model1 = tf.keras.applications.ResNet50(weights='imagenet', include_top = False)
for layer in base_model1.layers: layer.trainable = False
x = base_model1.output
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Dense(1024, activation='relu')(x)
x = tf.keras.layers.Dense(1024, activation='relu')(x)
x = tf.keras.layers.Dense(1024, activation='relu')(x)
x = tf.keras.layers.Dense(512, activation='relu')(x)
preds = tf.keras.layers.Dense(4, activation ='softmax')(x)
model1 = tf.keras.models.Model(inputs=base_model1.input, outputs=preds)
model1.compile(optimizer='RMSProp', loss='categorical_crossentropy', metrics=['accuracy'])
history = model1.fit(
train_generator1,
steps_per_epoch=train_generator1.n//train_generator1.batch_size,
epochs=4,
validation_data=valid_generator1,
validation_steps=25)
import tensorflow as tf
from keras.preprocessing.image import ImageDataGenerator #, array_to_img, img_to_array, load_img
"""
model1.save('/content/drive/My Drive/db/m1.h5')
model21.save('/content/drive/My Drive/db/m21.h5')
model22.save('/content/drive/My Drive/db/m22.h5')
model1 = tf.keras.models.load_model('m1.h5')
model21 = tf.keras.models.load_model('m21.h5')
model22 = tf.keras.models.load_model('m22.h5')
"""
# ntest_total = 745, accuracy_% = 0.9168
# classified = 683
test_datagen = tf.keras.preprocessing.image.ImageDataGenerator(preprocessing_function=tf.keras.applications.resnet50.preprocess_input)
#test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = valid_datagen.flow_from_directory(
'/content/drive/My Drive/db/natimdb/test/',
classes=['airplane','motorbike','car','person'],
target_size=(img_rows, img_cols),batch_size=32,class_mode='categorical')
history=model1.evaluate(test_generator) | en | 0.496829 | # -*- coding: utf-8 -*- ResNet50_natimdb_v1.01.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1q2dlcdGadeLlqI9q44EKbUzy-e57Rmls #code part 4 #number of rows and columns to convert the images to #format to store the images (rows, columns,channels) called channels last # download the image, convert it to a NumPy array, and then read # it into OpenCV format # return the image #%clear base_model #train_datagen = ImageDataGenerator(rescale=1./255) #valid_datagen = ImageDataGenerator(rescale=1./255) #, array_to_img, img_to_array, load_img model1.save('/content/drive/My Drive/db/m1.h5') model21.save('/content/drive/My Drive/db/m21.h5') model22.save('/content/drive/My Drive/db/m22.h5') model1 = tf.keras.models.load_model('m1.h5') model21 = tf.keras.models.load_model('m21.h5') model22 = tf.keras.models.load_model('m22.h5') # ntest_total = 745, accuracy_% = 0.9168 # classified = 683 #test_datagen = ImageDataGenerator(rescale=1./255) | 3.080584 | 3 |
ALGORITHM/commom/traj.py | Harold0/hmp | 2 | 6623465 | # cython: language_level=3
import numpy as np
from UTILS.colorful import *
from UTILS.tensor_ops import __hash__
class TRAJ_BASE():
key_data_type = {}
key_data_shape = {}
max_mem_length = -1
def __init__(self, traj_limit, env_id):
self.traj_limit = traj_limit
self.env_id = env_id
self.readonly_lock = False
self.key_dict = []
self.time_pointer = 0
self.need_reward_bootstrap = False
self.deprecated_flag = False
# remember something in a time step, add it to trajectory
def remember(self, key, content):
assert not self.readonly_lock
if not (key in self.key_dict) and (content is not None):
self.init_track(key=key, first_content=content)
getattr(self, key)[self.time_pointer] = content
elif not (key in self.key_dict) and (content is None):
self.init_track_none(key=key)
elif (key in self.key_dict) and (content is not None):
getattr(self, key)[self.time_pointer] = content
else:
pass
# duplicate/rename a trajectory
def copy_track(self, origin_key, new_key):
if hasattr(self, origin_key):
origin_handle = getattr(self, origin_key)
setattr(self, new_key, origin_handle.copy())
new_handle = getattr(self, new_key)
self.key_dict.append(new_key)
#return origin_handle, new_handle
else:
real_key_list = [real_key for real_key in self.__dict__ if (origin_key+'>' in real_key)]
assert len(real_key_list)>0, ('this key does not exist (yet), check:', origin_key)
for real_key in real_key_list:
mainkey, subkey = real_key.split('>')
self.copy_track(real_key, (new_key+'>'+subkey))
#return
# make sure dtype is ok
def check_type_shape(self, key, first_content=None):
if first_content is not None:
content_type = first_content.dtype
content_shape = first_content.shape
if key in TRAJ_BASE.key_data_type:
assert TRAJ_BASE.key_data_type[key] == content_type
else:
TRAJ_BASE.key_data_type[key] = content_type
TRAJ_BASE.key_data_shape[key] = content_shape
return content_type, content_shape
assert key in TRAJ_BASE.key_data_type
return TRAJ_BASE.key_data_type[key], TRAJ_BASE.key_data_shape[key]
# create track, executed used when a key showing up for the first time in 'self.remember'
def init_track(self, key, first_content):
content = first_content
self.check_type_shape(key, first_content)
assert isinstance(content, np.ndarray) or isinstance(content, float), (key, content.__class__)
tensor_size = ((self.traj_limit,) + tuple(content.shape))
set_item = np.zeros(shape=tensor_size, dtype=content.dtype)
set_item[:] = np.nan if np.issubdtype(content.dtype, np.floating) else 0
setattr(self, key, set_item)
self.key_dict.append(key)
# key pop up yet content is None,
# read dtype from history dtype dictionary to fill the hole
def init_track_none(self, key):
content_dtype, content_shape = self.check_type_shape(key)
tensor_size = ((self.traj_limit,) + tuple(content_shape))
set_item = np.zeros(shape=tensor_size, dtype=content_dtype)
set_item[:] = np.nan if np.issubdtype(content_dtype, np.floating) else 0
setattr(self, key, set_item)
self.key_dict.append(key)
# push the time pointer forward, before you call 'self.remember' again to fill t+1 data
def time_shift(self):
assert self.time_pointer < self.traj_limit
self.time_pointer += 1
# cut trajectory tail, when the number of episode time step < traj_limit
def cut_tail(self):
TJ = lambda key: getattr(self, key)
self.readonly_lock = True
n_frame = self.time_pointer
# check is buffer size too big
if n_frame > TRAJ_BASE.max_mem_length:
TRAJ_BASE.max_mem_length = n_frame
print('max_mem_length:%d, traj_limit:%d'%(TRAJ_BASE.max_mem_length, self.traj_limit))
# clip tail
for key in self.key_dict: setattr(self, key, TJ(key)[:n_frame]) | # cython: language_level=3
import numpy as np
from UTILS.colorful import *
from UTILS.tensor_ops import __hash__
class TRAJ_BASE():
key_data_type = {}
key_data_shape = {}
max_mem_length = -1
def __init__(self, traj_limit, env_id):
self.traj_limit = traj_limit
self.env_id = env_id
self.readonly_lock = False
self.key_dict = []
self.time_pointer = 0
self.need_reward_bootstrap = False
self.deprecated_flag = False
# remember something in a time step, add it to trajectory
def remember(self, key, content):
assert not self.readonly_lock
if not (key in self.key_dict) and (content is not None):
self.init_track(key=key, first_content=content)
getattr(self, key)[self.time_pointer] = content
elif not (key in self.key_dict) and (content is None):
self.init_track_none(key=key)
elif (key in self.key_dict) and (content is not None):
getattr(self, key)[self.time_pointer] = content
else:
pass
# duplicate/rename a trajectory
def copy_track(self, origin_key, new_key):
if hasattr(self, origin_key):
origin_handle = getattr(self, origin_key)
setattr(self, new_key, origin_handle.copy())
new_handle = getattr(self, new_key)
self.key_dict.append(new_key)
#return origin_handle, new_handle
else:
real_key_list = [real_key for real_key in self.__dict__ if (origin_key+'>' in real_key)]
assert len(real_key_list)>0, ('this key does not exist (yet), check:', origin_key)
for real_key in real_key_list:
mainkey, subkey = real_key.split('>')
self.copy_track(real_key, (new_key+'>'+subkey))
#return
# make sure dtype is ok
def check_type_shape(self, key, first_content=None):
if first_content is not None:
content_type = first_content.dtype
content_shape = first_content.shape
if key in TRAJ_BASE.key_data_type:
assert TRAJ_BASE.key_data_type[key] == content_type
else:
TRAJ_BASE.key_data_type[key] = content_type
TRAJ_BASE.key_data_shape[key] = content_shape
return content_type, content_shape
assert key in TRAJ_BASE.key_data_type
return TRAJ_BASE.key_data_type[key], TRAJ_BASE.key_data_shape[key]
# create track, executed used when a key showing up for the first time in 'self.remember'
def init_track(self, key, first_content):
content = first_content
self.check_type_shape(key, first_content)
assert isinstance(content, np.ndarray) or isinstance(content, float), (key, content.__class__)
tensor_size = ((self.traj_limit,) + tuple(content.shape))
set_item = np.zeros(shape=tensor_size, dtype=content.dtype)
set_item[:] = np.nan if np.issubdtype(content.dtype, np.floating) else 0
setattr(self, key, set_item)
self.key_dict.append(key)
# key pop up yet content is None,
# read dtype from history dtype dictionary to fill the hole
def init_track_none(self, key):
content_dtype, content_shape = self.check_type_shape(key)
tensor_size = ((self.traj_limit,) + tuple(content_shape))
set_item = np.zeros(shape=tensor_size, dtype=content_dtype)
set_item[:] = np.nan if np.issubdtype(content_dtype, np.floating) else 0
setattr(self, key, set_item)
self.key_dict.append(key)
# push the time pointer forward, before you call 'self.remember' again to fill t+1 data
def time_shift(self):
assert self.time_pointer < self.traj_limit
self.time_pointer += 1
# cut trajectory tail, when the number of episode time step < traj_limit
def cut_tail(self):
TJ = lambda key: getattr(self, key)
self.readonly_lock = True
n_frame = self.time_pointer
# check is buffer size too big
if n_frame > TRAJ_BASE.max_mem_length:
TRAJ_BASE.max_mem_length = n_frame
print('max_mem_length:%d, traj_limit:%d'%(TRAJ_BASE.max_mem_length, self.traj_limit))
# clip tail
for key in self.key_dict: setattr(self, key, TJ(key)[:n_frame]) | en | 0.699707 | # cython: language_level=3 # remember something in a time step, add it to trajectory # duplicate/rename a trajectory #return origin_handle, new_handle #return # make sure dtype is ok # create track, executed used when a key showing up for the first time in 'self.remember' # key pop up yet content is None, # read dtype from history dtype dictionary to fill the hole # push the time pointer forward, before you call 'self.remember' again to fill t+1 data # cut trajectory tail, when the number of episode time step < traj_limit # check is buffer size too big # clip tail | 2.016556 | 2 |
rnacentral/portal/models/sequence_regions.py | pythseq/rnacentral-webcode | 21 | 6623466 | """
Copyright [2009-2018] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.db import models
from django.contrib.postgres.fields import ArrayField
from portal.models import EnsemblAssembly, RnaPrecomputed
class SequenceRegion(models.Model):
id = models.AutoField(primary_key=True)
urs_taxid = models.ForeignKey(
RnaPrecomputed,
related_name='regions',
db_column='urs_taxid',
to_field='id',
on_delete=models.CASCADE
)
region_name = models.TextField()
chromosome = models.TextField()
strand = models.IntegerField()
region_start = models.IntegerField()
region_stop = models.IntegerField()
assembly = models.ForeignKey(
EnsemblAssembly,
related_name='regions',
db_column='assembly_id',
to_field='assembly_id',
on_delete=models.CASCADE
)
was_mapped = models.BooleanField()
identity = models.IntegerField()
providing_databases = ArrayField(models.TextField())
exon_count = models.IntegerField()
class Meta:
db_table = 'rnc_sequence_regions'
| """
Copyright [2009-2018] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.db import models
from django.contrib.postgres.fields import ArrayField
from portal.models import EnsemblAssembly, RnaPrecomputed
class SequenceRegion(models.Model):
id = models.AutoField(primary_key=True)
urs_taxid = models.ForeignKey(
RnaPrecomputed,
related_name='regions',
db_column='urs_taxid',
to_field='id',
on_delete=models.CASCADE
)
region_name = models.TextField()
chromosome = models.TextField()
strand = models.IntegerField()
region_start = models.IntegerField()
region_stop = models.IntegerField()
assembly = models.ForeignKey(
EnsemblAssembly,
related_name='regions',
db_column='assembly_id',
to_field='assembly_id',
on_delete=models.CASCADE
)
was_mapped = models.BooleanField()
identity = models.IntegerField()
providing_databases = ArrayField(models.TextField())
exon_count = models.IntegerField()
class Meta:
db_table = 'rnc_sequence_regions'
| en | 0.833717 | Copyright [2009-2018] EMBL-European Bioinformatics Institute Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 1.661153 | 2 |
Project-1/src/utils.py | TooSchoolForCool/EE219-Larger-Scale-Data-Mining | 0 | 6623467 | from sklearn.metrics import roc_curve
import matplotlib.pyplot as plt
import numpy as np
# plot histogram based on the number of documents in each topic
# in the dateset
def plotHist(dataset):
categories = dataset.getAllCategories()
y_pos = np.arange( len(categories) )
counter = dataset.getCategorySize()
height = [counter[i] for i in range(0, len(categories))]
palette = ['r', 'b', 'y', 'g', 'purple', 'orange', 'pink', 'maroon', '#624ea7']
colors = [palette[i % len(palette)] for i in range(0, len(categories))]
plt.rcdefaults()
fig, ax = plt.subplots()
ax.barh(y_pos, height, align = 'center', color = colors)
ax.set_yticks(y_pos)
ax.set_yticklabels(categories)
ax.set_title('Number of documents in each topic')
ax.set_xlabel('Number of documents')
ax.set_ylabel('Topic')
plt.subplots_adjust(left=0.25)
# change figure size
# fig_size = fig.get_size_inches()
# fig.set_size_inches(fig_size[0] * 1.5, fig_size[1], forward=True)
# size figure in local directory
# fig.savefig('foo.png', bbox_inches='tight')
plt.show()
#######################################################################
# Print Title for each task
#######################################################################
def printTitle(msg, length = 60):
print('*' * length)
print('* %s' % msg)
print('*' * length)
#######################################################################
# Print ROC curve
#######################################################################
def printROC(test_y, predict_y_score, title='Learning Model'):
fpr, tpr, threshold = roc_curve(test_y, predict_y_score)
line = [0, 1]
plt.plot(fpr, tpr)
plt.plot([0,1],[0,1])
plt.axis([-0.004, 1, 0, 1.006])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.title('ROC-Curve of ' + title)
plt.show()
def main():
pass
if __name__ == '__main__':
main() | from sklearn.metrics import roc_curve
import matplotlib.pyplot as plt
import numpy as np
# plot histogram based on the number of documents in each topic
# in the dateset
def plotHist(dataset):
categories = dataset.getAllCategories()
y_pos = np.arange( len(categories) )
counter = dataset.getCategorySize()
height = [counter[i] for i in range(0, len(categories))]
palette = ['r', 'b', 'y', 'g', 'purple', 'orange', 'pink', 'maroon', '#624ea7']
colors = [palette[i % len(palette)] for i in range(0, len(categories))]
plt.rcdefaults()
fig, ax = plt.subplots()
ax.barh(y_pos, height, align = 'center', color = colors)
ax.set_yticks(y_pos)
ax.set_yticklabels(categories)
ax.set_title('Number of documents in each topic')
ax.set_xlabel('Number of documents')
ax.set_ylabel('Topic')
plt.subplots_adjust(left=0.25)
# change figure size
# fig_size = fig.get_size_inches()
# fig.set_size_inches(fig_size[0] * 1.5, fig_size[1], forward=True)
# size figure in local directory
# fig.savefig('foo.png', bbox_inches='tight')
plt.show()
#######################################################################
# Print Title for each task
#######################################################################
def printTitle(msg, length = 60):
print('*' * length)
print('* %s' % msg)
print('*' * length)
#######################################################################
# Print ROC curve
#######################################################################
def printROC(test_y, predict_y_score, title='Learning Model'):
fpr, tpr, threshold = roc_curve(test_y, predict_y_score)
line = [0, 1]
plt.plot(fpr, tpr)
plt.plot([0,1],[0,1])
plt.axis([-0.004, 1, 0, 1.006])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.title('ROC-Curve of ' + title)
plt.show()
def main():
pass
if __name__ == '__main__':
main() | de | 0.520979 | # plot histogram based on the number of documents in each topic # in the dateset # change figure size # fig_size = fig.get_size_inches() # fig.set_size_inches(fig_size[0] * 1.5, fig_size[1], forward=True) # size figure in local directory # fig.savefig('foo.png', bbox_inches='tight') ####################################################################### # Print Title for each task ####################################################################### ####################################################################### # Print ROC curve ####################################################################### | 2.828347 | 3 |
MindLink-Eumpy/test/__init__.py | Breeze1in1drizzle/MindLink-Exploring | 7 | 6623468 | import numpy as np
if __name__ == "__main__":
x = np.load('EEG.npy')
print(x.shape)
y = np.load('EEG_1.npy')
print(y.shape)
| import numpy as np
if __name__ == "__main__":
x = np.load('EEG.npy')
print(x.shape)
y = np.load('EEG_1.npy')
print(y.shape)
| none | 1 | 1.899042 | 2 | |
Python/main.py | AsceDusk/PyJSON | 0 | 6623469 | import sys
from PyQt5.QtWidgets import QApplication, QLabel, QMainWindow
from PyQt5.QtCore import Qt
class MainWindow(QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.setWindowTitle("My First Python Application")
label = QLabel("This is a PyQt5 window!")
label.setAlignment(Qt.AlignCenter)
self.setCentralWidget(label)
app = QApplication(sys.argv)
window = MainWindow()
window.show() # IMPORTANT!!!!! Windows are hidden by default.
# Start the event loop.
app.exec_() | import sys
from PyQt5.QtWidgets import QApplication, QLabel, QMainWindow
from PyQt5.QtCore import Qt
class MainWindow(QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.setWindowTitle("My First Python Application")
label = QLabel("This is a PyQt5 window!")
label.setAlignment(Qt.AlignCenter)
self.setCentralWidget(label)
app = QApplication(sys.argv)
window = MainWindow()
window.show() # IMPORTANT!!!!! Windows are hidden by default.
# Start the event loop.
app.exec_() | en | 0.74689 | # IMPORTANT!!!!! Windows are hidden by default. # Start the event loop. | 3.415507 | 3 |
henon_map/__init__.py | carlidel/c_henon_map | 0 | 6623470 | import matplotlib.pyplot as plt
from numba import cuda, jit, njit, prange
import numpy as np
from tqdm import tqdm
import pickle
import time
from . import gpu_henon_core as gpu
from . import cpu_henon_core as cpu
from .cpu_henon_core import recursive_accumulation as cpu_accumulate_and_return
def polar_to_cartesian(radius, alpha, theta1, theta2):
return cpu.polar_to_cartesian(radius, alpha, theta1, theta2)
def cartesian_to_polar(x, px, y, py):
return cpu.cartesian_to_polar(x, px, y, py)
@njit
def modulation(epsilon, n_elements, first_index=0):
"""Generates a modulation
Parameters
----------
epsilon : float
intensity of modulation
n_elements : float
number of elements
first_index : int, optional
starting point of the modulation, by default 0
Returns
-------
tuple of ndarray
(omega_x, omega_y)
"""
coefficients = np.array([1.000e-4,
0.218e-4,
0.708e-4,
0.254e-4,
0.100e-4,
0.078e-4,
0.218e-4])
modulations = np.array([1 * (2 * np.pi / 868.12),
2 * (2 * np.pi / 868.12),
3 * (2 * np.pi / 868.12),
6 * (2 * np.pi / 868.12),
7 * (2 * np.pi / 868.12),
10 * (2 * np.pi / 868.12),
12 * (2 * np.pi / 868.12)])
omega_sum = np.array([
np.sum(coefficients * np.cos(modulations * k)) for k in range(first_index, first_index + n_elements)
])
omega_x = 0.168 * 2 * np.pi * (1 + epsilon * omega_sum)
omega_y = 0.201 * 2 * np.pi * (1 + epsilon * omega_sum)
return omega_x, omega_y
class partial_track(object):
"""Kinda of a deprecated method. This class is meant to do a partial tracking (i.e. only last step is considered) of given initial condistions.
"""
def __init__(self):
pass
def compute(self, n_iterations):
pass
def reset(self):
pass
def get_data(self):
"""Get the data
Returns
-------
tuple
(radius, alpha, theta1, theta2)
"""
return self.r, self.alpha, self.theta1, self.theta2, self.step
def get_cartesian_data(self):
x, px, y, py = polar_to_cartesian(self.r, self.alpha, self.theta1, self.theta2)
return x, px, y, py, self.step
def get_radiuses(self):
return self.r
def get_filtered_radiuses(self):
return self.r[self.r != 0.0]
def get_times(self):
return self.step
def get_action(self):
return np.power(self.r, 2) / 2
def get_filtered_action(self):
return np.power(self.r[self.r != 0.0], 2) / 2
def get_survival_count(self):
return np.count_nonzero(self.r != 0.0)
def get_total_count(self):
return self.r.size
def get_survival_rate(self):
return np.count_nonzero(self.r != 0.0) / self.r.size
@staticmethod
def generate_instance(radius, alpha, theta1, theta2, epsilon, cuda_device=None):
"""Generate an instance of the engine.
Parameters
----------
radius : ndarray
array of radiuses to consider
alpha : ndarray
array of initial alphas
theta1 : ndarray
array of initial theta1
theta2 : ndarray
array of initial theta2
epsilon : float
modulation intensity
Returns
-------
class instance
optimized class instance
"""
if cuda_device == None:
cuda_device = cuda.is_available()
if cuda_device:
return gpu_partial_track(radius, alpha, theta1, theta2, epsilon)
else:
return cpu_partial_track(radius, alpha, theta1, theta2, epsilon)
class cpu_partial_track(partial_track):
def __init__(self, radius, alpha, theta1, theta2, epsilon):
assert alpha.size == theta1.size
assert alpha.size == theta2.size
assert alpha.size == radius.size
# save data as members
self.r = radius
self.alpha = alpha
self.theta1 = theta1
self.theta2 = theta2
self.r_0 = radius.copy()
self.alpha_0 = alpha.copy()
self.theta1_0 = theta1.copy()
self.theta2_0 = theta2.copy()
self.epsilon = epsilon
self.total_iters = 0
self.limit = 1.0
# make containers
self.step = np.zeros((alpha.size), dtype=np.int)
self.x = np.empty(alpha.size)
self.px = np.empty(alpha.size)
self.y = np.empty(alpha.size)
self.py = np.empty(alpha.size)
self.x, self.px, self.y, self.py = cpu.polar_to_cartesian(
self.r_0, self.alpha_0, self.theta1_0, self.theta2_0)
def compute(self, n_iterations):
"""Compute the tracking
Returns
-------
tuple of ndarray [n_elements]
(radius, alpha, theta1, theta2, steps)
"""
omega_x, omega_y = modulation(
self.epsilon, n_iterations, self.total_iters)
# Execution
self.x, self.px, self.y, self.py, self.step = cpu.henon_partial_track(
self.x, self.px, self.y, self.py, self.step, self.limit,
n_iterations, omega_x, omega_y
)
self.total_iters += n_iterations
self.r, self.alpha, self.theta1, self.theta2 = cpu.cartesian_to_polar(
self.x, self.px, self.y, self.py)
return self.x, self.px, self.y, self.py, self.step
#return self.r, self.alpha, self.theta1, self.theta2, self.step
def reset(self):
"""Resets the engine
"""
self.r = self.r_0
self.alpha = self.alpha_0
self.theta1 = self.theta1_0
self.theta2 = self.theta2_0
self.step = np.zeros((self.alpha.size), dtype=np.int)
self.x, self.px, self.y, self.py = cpu.polar_to_cartesian(
self.r_0, self.alpha_0, self.theta1_0, self.theta2_0)
self.total_iters = 0
class gpu_partial_track(partial_track):
def __init__(self, radius, alpha, theta1, theta2, epsilon):
assert alpha.size == theta1.size
assert alpha.size == theta2.size
assert alpha.size == radius.size
# save data as members
self.r = radius
self.alpha = alpha
self.theta1 = theta1
self.theta2 = theta2
self.r_0 = radius.copy()
self.alpha_0 = alpha.copy()
self.theta1_0 = theta1.copy()
self.theta2_0 = theta2.copy()
self.epsilon = epsilon
self.total_iters = 0
self.limit = 1.0
# make containers
self.step = np.zeros((alpha.size), dtype=np.int)
self.x = np.empty(alpha.size)
self.px = np.empty(alpha.size)
self.y = np.empty(alpha.size)
self.py = np.empty(alpha.size)
self.x, self.px, self.y, self.py = cpu.polar_to_cartesian(
self.r_0, self.alpha_0, self.theta1_0, self.theta2_0)
def compute(self, n_iterations):
"""Compute the tracking
Returns
-------
tuple of ndarray [n_elements]
(radius, alpha, theta1, theta2, steps)
"""
threads_per_block = 512
blocks_per_grid = self.alpha.size // 512 + 1
# load to GPU
d_x = cuda.to_device(self.x)
d_y = cuda.to_device(self.y)
d_px = cuda.to_device(self.px)
d_py = cuda.to_device(self.py)
d_step = cuda.to_device(self.step)
omega_x, omega_y = modulation(
self.epsilon, n_iterations, self.total_iters)
d_omega_x = cuda.to_device(omega_x)
d_omega_y = cuda.to_device(omega_y)
# Execution
gpu.henon_partial_track[blocks_per_grid, threads_per_block](
d_x, d_px, d_y, d_py, d_step, self.limit,
n_iterations, d_omega_x, d_omega_y
)
self.total_iters += n_iterations
d_x.copy_to_host(self.x)
d_y.copy_to_host(self.y)
d_px.copy_to_host(self.px)
d_py.copy_to_host(self.py)
d_step.copy_to_host(self.step)
self.r, self.alpha, self.theta1, self.theta2 = cpu.cartesian_to_polar(self.x, self.px, self.y, self.py)
return self.x, self.px, self.y, self.py, self.step
#return self.r, self.alpha, self.theta1, self.theta2, self.step
def reset(self):
"""Resets the engine
"""
self.r = self.r_0
self.alpha = self.alpha_0
self.theta1 = self.theta1_0
self.theta2 = self.theta2_0
self.step = np.zeros((self.alpha.size), dtype=np.int)
self.x, self.px, self.y, self.py = gpu.actual_polar_to_cartesian(
self.r_0, self.alpha_0, self.theta1_0, self.theta2_0)
self.total_iters = 0
class uniform_scan(object):
"""With this class we can easly scan a uniform 4D cube of the Hénon map"""
def __init__(self):
pass
def scan(self):
pass
def save_values(self, f, label="SixTrack LHC no bb flat"):
self.label = label
data_dict = {
"label": label,
"top": self.top,
"steps": self.steps,
"starting_radius": self.starting_radius,
"times": self.times,
"max_turns": self.max_turns
}
with open(f, 'wb') as destination:
pickle.dump(data_dict, destination, protocol=4)
@staticmethod
def generate_instance(epsilon, top, steps, starting_radius=0.0001, cuda_device=None):
"""Create an uniform scan object
Parameters
----------
epsilon : float
modulation intensity
top : float
maximum radius
steps : int
steps from zero to top (becomes steps * 2 + 1)
starting_radius : float, optional
from which position we have to start with the actual computation, by default 0.0001
cuda_device : bool, optional
do we have a CUDA capable device (make it manual), by default None
Returns
-------
object
uniform_scan object
"""
if cuda_device == None:
cuda_device = cuda.is_available()
if cuda_device:
return gpu_uniform_scan(epsilon, top, steps, starting_radius)
else:
return cpu_uniform_scan(epsilon, top, steps, starting_radius)
class cpu_uniform_scan(uniform_scan):
def __init__(self, epsilon, top, steps, starting_radius=0.0001):
self.epsilon = epsilon
self.top = top
self.steps = steps
self.starting_radius = starting_radius
self.coords = np.linspace(-top, top, steps * 2 + 1)
self.X, self.PX, self.Y, self.PY = np.meshgrid(
self.coords, self.coords, self.coords, self.coords)
self.X2 = np.power(self.X, 2)
self.PX2 = np.power(self.PX, 2)
self.Y2 = np.power(self.Y, 2)
self.PY2 = np.power(self.PY, 2)
self.bool_mask = (
self.X2
+ self.PX2
+ self.Y2
+ self.PY2
>= np.power(starting_radius, 2)
)
self.X_f = self.X.flatten()
self.PX_f = self.PX.flatten()
self.Y_f = self.Y.flatten()
self.PY_f = self.PY.flatten()
self.bool_mask_f = self.bool_mask.flatten()
self.times = np.zeros_like(self.X)
self.times_f = self.times.flatten()
self.n_samples = np.count_nonzero(self.bool_mask_f)
def scan(self, max_turns):
"""Execute a scanning of everything
Parameters
----------
max_turns : int
turn limit
Returns
-------
ndarray
4d array with stable iterations inside
"""
self.max_turns = max_turns
omega_x, omega_y = modulation(self.epsilon, self.n_samples)
# Filling
start = time.time()
self.times_f = cpu.henon_map_to_the_end(
self.X_f, self.PX_f, self.Y_f, self.PY_f, 100.0, max_turns, omega_x, omega_y, self.bool_mask_f)
print("Elapsed time for execution: {} s".format(time.time() - start))
self.times = self.times_f.reshape(
(self.steps * 2 + 1, self.steps * 2 + 1, self.steps * 2 + 1, self.steps * 2 + 1))
return self.times
class gpu_uniform_scan(uniform_scan):
def __init__(self, epsilon, top, steps, starting_radius=0.0001):
self.epsilon = epsilon
self.top = top
self.steps = steps
self.starting_radius = starting_radius
self.coords = np.linspace(-top, top, steps * 2 + 1)
self.X, self.PX, self.Y, self.PY = np.meshgrid(
self.coords, self.coords, self.coords, self.coords,
indexing='ij')
self.X2 = np.power(self.X, 2)
self.PX2 = np.power(self.PX, 2)
self.Y2 = np.power(self.Y, 2)
self.PY2 = np.power(self.PY, 2)
self.bool_mask = (
self.X2
+ self.PX2
+ self.Y2
+ self.PY2
>= np.power(starting_radius, 2)
)
self.X_f = self.X.flatten()
self.PX_f = self.PX.flatten()
self.Y_f = self.Y.flatten()
self.PY_f = self.PY.flatten()
self.bool_mask_f = self.bool_mask.flatten()
self.times = np.zeros_like(self.X)
self.times_f = self.times.flatten()
self.n_samples = np.count_nonzero(self.bool_mask_f)
def scan(self, max_turns):
"""Execute a scanning of everything
Parameters
----------
max_turns : int
turn limit
Returns
-------
ndarray
4d array with stable iterations inside
"""
threads_per_block = 512
blocks_per_grid = 10
d_x = cuda.to_device(self.X_f)
d_px = cuda.to_device(self.PX_f)
d_y = cuda.to_device(self.Y_f)
d_py = cuda.to_device(self.PY_f)
d_times = cuda.to_device(self.times_f)
d_bool_mask = cuda.to_device(self.bool_mask_f)
self.max_turns = max_turns
omega_x, omega_y = modulation(self.epsilon, self.n_samples)
d_omega_x = cuda.to_device(omega_x)
d_omega_y = cuda.to_device(omega_y)
# Filling
start = time.time()
gpu.henon_map_to_the_end[blocks_per_grid, threads_per_block](
d_x, d_px, d_y, d_py, d_times, 100.0, max_turns, d_omega_x, d_omega_y, d_bool_mask
)
print("Elapsed time for execution: {} s".format(time.time() - start))
d_times.copy_to_host(self.times_f)
self.times = self.times_f.reshape(
(self.steps * 2 + 1, self.steps * 2 + 1, self.steps * 2 + 1, self.steps * 2 + 1))
return self.times
class radial_scan(object):
"""This class contains most of the tools required for doing a precise and on point radial scan for Dynamic Aperture estimations. It's a bit messy tho...
"""
def __init__(self):
pass
def compute(self):
pass
def dummy_compute(self):
pass
def reset(self):
pass
def get_data(self):
"""Get the data
Returns
-------
ndarray
The data intended as last stable radius for the given amount of turns.
"""
return np.transpose(np.asarray(self.container)) * self.dr
@staticmethod
def generate_instance(dr, alpha, theta1, theta2, epsilon, starting_position=0.0, cuda_device=None):
"""init an henon optimized radial tracker!
Parameters
----------
dr : float
radial step
alpha : ndarray
alpha angles to consider (raw)
theta1 : ndarray
theta1 angles to consider (raw)
theta2 : ndarray
theta2 angles to consider (raw)
epsilon : float
intensity of modulation
Returns
-------
Optimized instance
optimized instance of the class (CPU or GPU)
"""
if cuda_device == None:
cuda_device = cuda.is_available()
if cuda_device:
return gpu_radial_scan(dr, alpha, theta1, theta2, epsilon, starting_position)
else:
return cpu_radial_scan(dr, alpha, theta1, theta2, epsilon, starting_position)
def save_values(self, f, label="Hénon map scanning"):
self.label = label
data_dict = {
"label": label,
"alpha": self.alpha,
"theta1": self.theta1,
"theta2": self.theta2,
"dr": self.dr,
"starting_position": self.starting_position,
"starting_step": 0, # this has its meaning in the bigger picture, trust me!
"values": np.transpose(self.steps),
"max_turns": self.sample_list[0],
"min_turns": self.sample_list[-1]
}
with open(f, 'wb') as destination:
pickle.dump(data_dict, destination, protocol=4)
class gpu_radial_scan(radial_scan):
def __init__(self, dr, alpha, theta1, theta2, epsilon, starting_position=0.0):
assert alpha.size == theta1.size
assert alpha.size == theta2.size
assert starting_position >= 0.0
# save data as members
self.dr = dr
self.alpha = alpha
self.theta1 = theta1
self.theta2 = theta2
self.epsilon = epsilon
self.limit = 100.0
self.starting_position = starting_position
# prepare data
self.starting_step = int(starting_position / dr)
self.step = np.ones(alpha.shape, dtype=np.int) * int(starting_position / dr)
# make container
self.container = []
# load vectors to gpu
self.d_alpha = cuda.to_device(np.ascontiguousarray(self.alpha))
self.d_theta1 = cuda.to_device(np.ascontiguousarray(self.theta1))
self.d_theta2 = cuda.to_device(np.ascontiguousarray(self.theta2))
self.d_step = cuda.to_device(np.ascontiguousarray(self.step))
def reset(self):
"""Resets the engine.
"""
self.container = []
self.step = np.ones(self.alpha.shape, dtype=np.int) * \
int(self.starting_position / self.dr)
self.d_step = cuda.to_device(self.step)
def compute(self, sample_list):
"""Compute the tracking
Parameters
----------
sample_list : ndarray
iterations to consider
Returns
-------
ndarray
radius scan results
"""
self.sample_list = sample_list
threads_per_block = 512
blocks_per_grid = self.step.size // 512 + 1
# Sanity check
assert blocks_per_grid * threads_per_block > self.alpha.size
for i in range(1, len(sample_list)):
assert sample_list[i] <= sample_list[i - 1]
omega_x, omega_y = modulation(self.epsilon, sample_list[0])
d_omega_x = cuda.to_device(omega_x)
d_omega_y = cuda.to_device(omega_y)
# Execution
for sample in sample_list:
gpu.henon_map[blocks_per_grid, threads_per_block](
self.d_alpha, self.d_theta1, self.d_theta2,
self.dr, self.d_step, self.limit,
sample, d_omega_x, d_omega_y)
cuda.synchronize()
self.d_step.copy_to_host(self.step)
self.container.append(self.step.copy())
return np.transpose(np.asarray(self.container)) * self.dr
def block_compute(self, max_turns, min_turns):
"""Optimize block computation for ending up with a proper steps block!
Parameters
----------
max_turns : int
max number of turns
min_turns : int
min number of turns
Returns
-------
ndarray
the steps array
"""
# precomputation
self.compute([min_turns])
# computation
maximum = np.max(self.container)
minimum = np.min(self.container)
self.steps = np.zeros((self.alpha.shape[0], maximum))
rs = (np.arange(maximum) + 2) * self.dr
bool_mask = rs > (minimum * self.dr) / 2
bb, aa = np.meshgrid(bool_mask, self.alpha, indexing='ij')
rr, aa = np.meshgrid(rs, self.alpha, indexing='ij')
rr, th1 = np.meshgrid(rs, self.theta1, indexing='ij')
rr, th2 = np.meshgrid(rs, self.theta2, indexing='ij')
bb = bb.flatten()
aa = aa.flatten()
th1 = th1.flatten()
th2 = th2.flatten()
rr = rr.flatten()
x, px, y, py = polar_to_cartesian(rr, aa, th1, th2)
steps = np.zeros_like(x, dtype=np.int)
threads_per_block = 512
blocks_per_grid = 10
omega_x, omega_y = modulation(self.epsilon, max_turns)
d_bb = cuda.to_device(bb)
d_omega_x = cuda.to_device(omega_x)
d_omega_y = cuda.to_device(omega_y)
d_x = cuda.to_device(x)
d_px = cuda.to_device(px)
d_y = cuda.to_device(y)
d_py = cuda.to_device(py)
d_steps = cuda.to_device(steps)
gpu.henon_map_to_the_end[blocks_per_grid, threads_per_block](
d_x, d_px, d_y, d_py,
d_steps, self.limit, max_turns,
d_omega_x, d_omega_y,
d_bb
)
d_steps.copy_to_host(steps)
self.steps = steps.reshape(
(rs.shape[0], self.alpha.shape[0]))
return self.steps
class cpu_radial_scan(radial_scan):
def __init__(self, dr, alpha, theta1, theta2, epsilon, starting_position=0.0):
assert alpha.size == theta1.size
assert alpha.size == theta2.size
# save data as members
self.dr = dr
self.alpha = alpha
self.theta1 = theta1
self.theta2 = theta2
self.epsilon = epsilon
self.limit = 100.0
self.starting_position = starting_position
# prepare data
self.step = np.ones(alpha.shape, dtype=np.int) * int(starting_position / dr)
# make container
self.container = []
def reset(self):
"""Resets the engine.
"""
self.container = []
self.step = np.ones(self.alpha.shape, dtype=np.int) * \
int(self.starting_position / self.dr)
def compute(self, sample_list):
"""Compute the tracking
Parameters
----------
sample_list : ndarray
iterations to consider
Returns
-------
ndarray
radius scan results
"""
self.sample_list = sample_list
# Sanity check
for i in range(1, len(sample_list)):
assert sample_list[i] <= sample_list[i - 1]
omega_x, omega_y = modulation(self.epsilon, sample_list[0])
# Execution
for sample in sample_list:
self.step = cpu.henon_map(
self.alpha, self.theta1, self.theta2,
self.dr, self.step, self.limit,
sample, omega_x, omega_y)
self.container.append(self.step.copy())
return np.transpose(np.asarray(self.container)) * self.dr
def block_compute(self, max_turns, min_turns):
"""Optimize block computation for ending up with a proper steps block!
Parameters
----------
max_turns : int
max number of turns
min_turns : int
min number of turns
Returns
-------
ndarray
the steps array
"""
# precomputation
self.compute([min_turns])
# computation
maximum = np.max(self.container)
minimum = np.min(self.container)
rs = (np.arange(maximum) + 2) * self.dr
bool_mask = rs > (minimum * self.dr) / 2
bb, aa = np.meshgrid(bool_mask, self.alpha, indexing='ij')
rr, aa = np.meshgrid(rs, self.alpha, indexing='ij')
rr, th1 = np.meshgrid(rs, self.theta1, indexing='ij')
rr, th2 = np.meshgrid(rs, self.theta2, indexing='ij')
bb = bb.flatten()
aa = aa.flatten()
th1 = th1.flatten()
th2 = th2.flatten()
rr = rr.flatten()
x, px, y, py = polar_to_cartesian(rr, aa, th1, th2)
omega_x, omega_y = modulation(self.epsilon, max_turns)
steps = cpu.henon_map_to_the_end(
x, px, y, py,
self.limit, max_turns,
omega_x, omega_y,
bb
)
self.steps = steps.reshape(
(rs.shape[0], self.alpha.shape[0]))
return self.steps
class full_track(object):
def __init__(self):
pass
def compute(self):
pass
def get_data(self):
"""Get the data
Returns
-------
tuple of 2D ndarray [n_iterations, n_samples]
(radius, alpha, theta1, theta2)
"""
return cpu.cartesian_to_polar(self.x, self.px, self.y, self.py)
def accumulate_and_return(self, n_sectors):
"""Returns the summed results (power 4 as documented)
Parameters
----------
n_sectors : int
number of sectors to consider in the 2 theta space
Returns
-------
ndarray
list of values for the different istances considered.
"""
radius, alpha, th1, th2 = cpu.cartesian_to_polar(
self.x, self.px, self.y, self.py)
self.count_matrix, self.matrices, result = cpu.accumulate_and_return(radius, alpha, th1, th2, n_sectors)
return result
def recursive_accumulation(self):
"""Executes a recursive accumulation in order to test lower binning values.
N.B. execute "accumulate_and_return first!!!"
Returns
-------
tuple of lists
Tuple of lists with (count_matrices, averages, results)
"""
return cpu.recursive_accumulation(self.count_matrix, self.matrices)
@staticmethod
def generate_instance(radius, alpha, theta1, theta2, iters, epsilon, cuda_device=None):
"""Generate an instance of the class
Parameters
----------
radius : ndarray
radius to consider
alpha : ndarray
initial angle
theta1 : ndarray
initial theta1
theta2 : ndarray
initial theta2
iters : ndarray
n_iterations to perform
epsilon : float
intensity of the modulation
Returns
-------
class instance
optimized class instance
"""
if cuda_device == None:
cuda_device = cuda.is_available()
if cuda_device:
return gpu_full_track(radius, alpha, theta1, theta2, iters, epsilon)
else:
return cpu_full_track(radius, alpha, theta1, theta2, iters, epsilon)
class gpu_full_track(full_track):
def __init__(self, radius, alpha, theta1, theta2, iters, epsilon):
assert alpha.size == theta1.size
assert alpha.size == theta2.size
assert alpha.size == radius.size
# save data as members
self.radius = radius
self.alpha = alpha
self.theta1 = theta1
self.theta2 = theta2
self.epsilon = epsilon
self.iters = iters
self.max_iters = np.max(self.iters)
# make containers
self.x = np.empty((self.max_iters, alpha.size)) * np.nan
self.px = np.empty((self.max_iters, alpha.size)) * np.nan
self.y = np.empty((self.max_iters, alpha.size)) * np.nan
self.py = np.empty((self.max_iters, alpha.size)) * np.nan
self.x[0, :], self.px[0, :], self.y[0, :], self.py[0, :] = gpu.actual_polar_to_cartesian(radius, alpha, theta1, theta2)
def compute(self):
"""Compute the tracking
Returns
-------
tuple of 2D ndarray [n_iterations, n_samples]
(radius, alpha, theta1, theta2)
"""
# load vectors to gpu
d_x = cuda.to_device(self.x)
d_px = cuda.to_device(self.px)
d_y = cuda.to_device(self.y)
d_py = cuda.to_device(self.py)
d_iters = cuda.to_device(self.iters)
threads_per_block = 512
blocks_per_grid = 10
omega_x, omega_y = modulation(self.epsilon, self.max_iters)
d_omega_x = cuda.to_device(omega_x)
d_omega_y = cuda.to_device(omega_y)
# Execution
gpu.henon_full_track[blocks_per_grid, threads_per_block](
d_x, d_px, d_y, d_py,
d_iters, d_omega_x, d_omega_y
)
d_x.copy_to_host(self.x)
d_y.copy_to_host(self.y)
d_px.copy_to_host(self.px)
d_py.copy_to_host(self.py)
d_iters.copy_to_host(self.iters)
return self.x, self.px, self.y, self.py
class cpu_full_track(full_track):
def __init__(self, radius, alpha, theta1, theta2, iters, epsilon):
assert alpha.size == theta1.size
assert alpha.size == theta2.size
assert alpha.size == radius.size
# save data as members
self.radius = radius
self.alpha = alpha
self.theta1 = theta1
self.theta2 = theta2
self.epsilon = epsilon
self.iters = iters
self.max_iters = np.max(self.iters)
# make containers
self.x = np.zeros((self.max_iters, alpha.size)) * np.nan
self.px = np.zeros((self.max_iters, alpha.size)) * np.nan
self.y = np.zeros((self.max_iters, alpha.size)) * np.nan
self.py = np.zeros((self.max_iters, alpha.size)) * np.nan
self.x[0, :], self.px[0, :], self.y[0, :], self.py[0, :] = cpu.polar_to_cartesian(radius, alpha, theta1, theta2)
def compute(self):
"""Compute the tracking
Returns
-------
tuple of 2D ndarray [n_iterations, n_samples]
(radius, alpha, theta1, theta2)
"""
omega_x, omega_y = modulation(self.epsilon, self.max_iters)
# Execution
self.x, self.px, self.y, self.py = cpu.henon_full_track(
self.x, self.px, self.y, self.py,
self.iters, omega_x, omega_y
)
return self.x, self.px, self.y, self.py
| import matplotlib.pyplot as plt
from numba import cuda, jit, njit, prange
import numpy as np
from tqdm import tqdm
import pickle
import time
from . import gpu_henon_core as gpu
from . import cpu_henon_core as cpu
from .cpu_henon_core import recursive_accumulation as cpu_accumulate_and_return
def polar_to_cartesian(radius, alpha, theta1, theta2):
return cpu.polar_to_cartesian(radius, alpha, theta1, theta2)
def cartesian_to_polar(x, px, y, py):
return cpu.cartesian_to_polar(x, px, y, py)
@njit
def modulation(epsilon, n_elements, first_index=0):
"""Generates a modulation
Parameters
----------
epsilon : float
intensity of modulation
n_elements : float
number of elements
first_index : int, optional
starting point of the modulation, by default 0
Returns
-------
tuple of ndarray
(omega_x, omega_y)
"""
coefficients = np.array([1.000e-4,
0.218e-4,
0.708e-4,
0.254e-4,
0.100e-4,
0.078e-4,
0.218e-4])
modulations = np.array([1 * (2 * np.pi / 868.12),
2 * (2 * np.pi / 868.12),
3 * (2 * np.pi / 868.12),
6 * (2 * np.pi / 868.12),
7 * (2 * np.pi / 868.12),
10 * (2 * np.pi / 868.12),
12 * (2 * np.pi / 868.12)])
omega_sum = np.array([
np.sum(coefficients * np.cos(modulations * k)) for k in range(first_index, first_index + n_elements)
])
omega_x = 0.168 * 2 * np.pi * (1 + epsilon * omega_sum)
omega_y = 0.201 * 2 * np.pi * (1 + epsilon * omega_sum)
return omega_x, omega_y
class partial_track(object):
"""Kinda of a deprecated method. This class is meant to do a partial tracking (i.e. only last step is considered) of given initial condistions.
"""
def __init__(self):
pass
def compute(self, n_iterations):
pass
def reset(self):
pass
def get_data(self):
"""Get the data
Returns
-------
tuple
(radius, alpha, theta1, theta2)
"""
return self.r, self.alpha, self.theta1, self.theta2, self.step
def get_cartesian_data(self):
x, px, y, py = polar_to_cartesian(self.r, self.alpha, self.theta1, self.theta2)
return x, px, y, py, self.step
def get_radiuses(self):
return self.r
def get_filtered_radiuses(self):
return self.r[self.r != 0.0]
def get_times(self):
return self.step
def get_action(self):
return np.power(self.r, 2) / 2
def get_filtered_action(self):
return np.power(self.r[self.r != 0.0], 2) / 2
def get_survival_count(self):
return np.count_nonzero(self.r != 0.0)
def get_total_count(self):
return self.r.size
def get_survival_rate(self):
return np.count_nonzero(self.r != 0.0) / self.r.size
@staticmethod
def generate_instance(radius, alpha, theta1, theta2, epsilon, cuda_device=None):
"""Generate an instance of the engine.
Parameters
----------
radius : ndarray
array of radiuses to consider
alpha : ndarray
array of initial alphas
theta1 : ndarray
array of initial theta1
theta2 : ndarray
array of initial theta2
epsilon : float
modulation intensity
Returns
-------
class instance
optimized class instance
"""
if cuda_device == None:
cuda_device = cuda.is_available()
if cuda_device:
return gpu_partial_track(radius, alpha, theta1, theta2, epsilon)
else:
return cpu_partial_track(radius, alpha, theta1, theta2, epsilon)
class cpu_partial_track(partial_track):
def __init__(self, radius, alpha, theta1, theta2, epsilon):
assert alpha.size == theta1.size
assert alpha.size == theta2.size
assert alpha.size == radius.size
# save data as members
self.r = radius
self.alpha = alpha
self.theta1 = theta1
self.theta2 = theta2
self.r_0 = radius.copy()
self.alpha_0 = alpha.copy()
self.theta1_0 = theta1.copy()
self.theta2_0 = theta2.copy()
self.epsilon = epsilon
self.total_iters = 0
self.limit = 1.0
# make containers
self.step = np.zeros((alpha.size), dtype=np.int)
self.x = np.empty(alpha.size)
self.px = np.empty(alpha.size)
self.y = np.empty(alpha.size)
self.py = np.empty(alpha.size)
self.x, self.px, self.y, self.py = cpu.polar_to_cartesian(
self.r_0, self.alpha_0, self.theta1_0, self.theta2_0)
def compute(self, n_iterations):
"""Compute the tracking
Returns
-------
tuple of ndarray [n_elements]
(radius, alpha, theta1, theta2, steps)
"""
omega_x, omega_y = modulation(
self.epsilon, n_iterations, self.total_iters)
# Execution
self.x, self.px, self.y, self.py, self.step = cpu.henon_partial_track(
self.x, self.px, self.y, self.py, self.step, self.limit,
n_iterations, omega_x, omega_y
)
self.total_iters += n_iterations
self.r, self.alpha, self.theta1, self.theta2 = cpu.cartesian_to_polar(
self.x, self.px, self.y, self.py)
return self.x, self.px, self.y, self.py, self.step
#return self.r, self.alpha, self.theta1, self.theta2, self.step
def reset(self):
"""Resets the engine
"""
self.r = self.r_0
self.alpha = self.alpha_0
self.theta1 = self.theta1_0
self.theta2 = self.theta2_0
self.step = np.zeros((self.alpha.size), dtype=np.int)
self.x, self.px, self.y, self.py = cpu.polar_to_cartesian(
self.r_0, self.alpha_0, self.theta1_0, self.theta2_0)
self.total_iters = 0
class gpu_partial_track(partial_track):
def __init__(self, radius, alpha, theta1, theta2, epsilon):
assert alpha.size == theta1.size
assert alpha.size == theta2.size
assert alpha.size == radius.size
# save data as members
self.r = radius
self.alpha = alpha
self.theta1 = theta1
self.theta2 = theta2
self.r_0 = radius.copy()
self.alpha_0 = alpha.copy()
self.theta1_0 = theta1.copy()
self.theta2_0 = theta2.copy()
self.epsilon = epsilon
self.total_iters = 0
self.limit = 1.0
# make containers
self.step = np.zeros((alpha.size), dtype=np.int)
self.x = np.empty(alpha.size)
self.px = np.empty(alpha.size)
self.y = np.empty(alpha.size)
self.py = np.empty(alpha.size)
self.x, self.px, self.y, self.py = cpu.polar_to_cartesian(
self.r_0, self.alpha_0, self.theta1_0, self.theta2_0)
def compute(self, n_iterations):
"""Compute the tracking
Returns
-------
tuple of ndarray [n_elements]
(radius, alpha, theta1, theta2, steps)
"""
threads_per_block = 512
blocks_per_grid = self.alpha.size // 512 + 1
# load to GPU
d_x = cuda.to_device(self.x)
d_y = cuda.to_device(self.y)
d_px = cuda.to_device(self.px)
d_py = cuda.to_device(self.py)
d_step = cuda.to_device(self.step)
omega_x, omega_y = modulation(
self.epsilon, n_iterations, self.total_iters)
d_omega_x = cuda.to_device(omega_x)
d_omega_y = cuda.to_device(omega_y)
# Execution
gpu.henon_partial_track[blocks_per_grid, threads_per_block](
d_x, d_px, d_y, d_py, d_step, self.limit,
n_iterations, d_omega_x, d_omega_y
)
self.total_iters += n_iterations
d_x.copy_to_host(self.x)
d_y.copy_to_host(self.y)
d_px.copy_to_host(self.px)
d_py.copy_to_host(self.py)
d_step.copy_to_host(self.step)
self.r, self.alpha, self.theta1, self.theta2 = cpu.cartesian_to_polar(self.x, self.px, self.y, self.py)
return self.x, self.px, self.y, self.py, self.step
#return self.r, self.alpha, self.theta1, self.theta2, self.step
def reset(self):
"""Resets the engine
"""
self.r = self.r_0
self.alpha = self.alpha_0
self.theta1 = self.theta1_0
self.theta2 = self.theta2_0
self.step = np.zeros((self.alpha.size), dtype=np.int)
self.x, self.px, self.y, self.py = gpu.actual_polar_to_cartesian(
self.r_0, self.alpha_0, self.theta1_0, self.theta2_0)
self.total_iters = 0
class uniform_scan(object):
"""With this class we can easly scan a uniform 4D cube of the Hénon map"""
def __init__(self):
pass
def scan(self):
pass
def save_values(self, f, label="SixTrack LHC no bb flat"):
self.label = label
data_dict = {
"label": label,
"top": self.top,
"steps": self.steps,
"starting_radius": self.starting_radius,
"times": self.times,
"max_turns": self.max_turns
}
with open(f, 'wb') as destination:
pickle.dump(data_dict, destination, protocol=4)
@staticmethod
def generate_instance(epsilon, top, steps, starting_radius=0.0001, cuda_device=None):
"""Create an uniform scan object
Parameters
----------
epsilon : float
modulation intensity
top : float
maximum radius
steps : int
steps from zero to top (becomes steps * 2 + 1)
starting_radius : float, optional
from which position we have to start with the actual computation, by default 0.0001
cuda_device : bool, optional
do we have a CUDA capable device (make it manual), by default None
Returns
-------
object
uniform_scan object
"""
if cuda_device == None:
cuda_device = cuda.is_available()
if cuda_device:
return gpu_uniform_scan(epsilon, top, steps, starting_radius)
else:
return cpu_uniform_scan(epsilon, top, steps, starting_radius)
class cpu_uniform_scan(uniform_scan):
def __init__(self, epsilon, top, steps, starting_radius=0.0001):
self.epsilon = epsilon
self.top = top
self.steps = steps
self.starting_radius = starting_radius
self.coords = np.linspace(-top, top, steps * 2 + 1)
self.X, self.PX, self.Y, self.PY = np.meshgrid(
self.coords, self.coords, self.coords, self.coords)
self.X2 = np.power(self.X, 2)
self.PX2 = np.power(self.PX, 2)
self.Y2 = np.power(self.Y, 2)
self.PY2 = np.power(self.PY, 2)
self.bool_mask = (
self.X2
+ self.PX2
+ self.Y2
+ self.PY2
>= np.power(starting_radius, 2)
)
self.X_f = self.X.flatten()
self.PX_f = self.PX.flatten()
self.Y_f = self.Y.flatten()
self.PY_f = self.PY.flatten()
self.bool_mask_f = self.bool_mask.flatten()
self.times = np.zeros_like(self.X)
self.times_f = self.times.flatten()
self.n_samples = np.count_nonzero(self.bool_mask_f)
def scan(self, max_turns):
"""Execute a scanning of everything
Parameters
----------
max_turns : int
turn limit
Returns
-------
ndarray
4d array with stable iterations inside
"""
self.max_turns = max_turns
omega_x, omega_y = modulation(self.epsilon, self.n_samples)
# Filling
start = time.time()
self.times_f = cpu.henon_map_to_the_end(
self.X_f, self.PX_f, self.Y_f, self.PY_f, 100.0, max_turns, omega_x, omega_y, self.bool_mask_f)
print("Elapsed time for execution: {} s".format(time.time() - start))
self.times = self.times_f.reshape(
(self.steps * 2 + 1, self.steps * 2 + 1, self.steps * 2 + 1, self.steps * 2 + 1))
return self.times
class gpu_uniform_scan(uniform_scan):
def __init__(self, epsilon, top, steps, starting_radius=0.0001):
self.epsilon = epsilon
self.top = top
self.steps = steps
self.starting_radius = starting_radius
self.coords = np.linspace(-top, top, steps * 2 + 1)
self.X, self.PX, self.Y, self.PY = np.meshgrid(
self.coords, self.coords, self.coords, self.coords,
indexing='ij')
self.X2 = np.power(self.X, 2)
self.PX2 = np.power(self.PX, 2)
self.Y2 = np.power(self.Y, 2)
self.PY2 = np.power(self.PY, 2)
self.bool_mask = (
self.X2
+ self.PX2
+ self.Y2
+ self.PY2
>= np.power(starting_radius, 2)
)
self.X_f = self.X.flatten()
self.PX_f = self.PX.flatten()
self.Y_f = self.Y.flatten()
self.PY_f = self.PY.flatten()
self.bool_mask_f = self.bool_mask.flatten()
self.times = np.zeros_like(self.X)
self.times_f = self.times.flatten()
self.n_samples = np.count_nonzero(self.bool_mask_f)
def scan(self, max_turns):
"""Execute a scanning of everything
Parameters
----------
max_turns : int
turn limit
Returns
-------
ndarray
4d array with stable iterations inside
"""
threads_per_block = 512
blocks_per_grid = 10
d_x = cuda.to_device(self.X_f)
d_px = cuda.to_device(self.PX_f)
d_y = cuda.to_device(self.Y_f)
d_py = cuda.to_device(self.PY_f)
d_times = cuda.to_device(self.times_f)
d_bool_mask = cuda.to_device(self.bool_mask_f)
self.max_turns = max_turns
omega_x, omega_y = modulation(self.epsilon, self.n_samples)
d_omega_x = cuda.to_device(omega_x)
d_omega_y = cuda.to_device(omega_y)
# Filling
start = time.time()
gpu.henon_map_to_the_end[blocks_per_grid, threads_per_block](
d_x, d_px, d_y, d_py, d_times, 100.0, max_turns, d_omega_x, d_omega_y, d_bool_mask
)
print("Elapsed time for execution: {} s".format(time.time() - start))
d_times.copy_to_host(self.times_f)
self.times = self.times_f.reshape(
(self.steps * 2 + 1, self.steps * 2 + 1, self.steps * 2 + 1, self.steps * 2 + 1))
return self.times
class radial_scan(object):
"""This class contains most of the tools required for doing a precise and on point radial scan for Dynamic Aperture estimations. It's a bit messy tho...
"""
def __init__(self):
pass
def compute(self):
pass
def dummy_compute(self):
pass
def reset(self):
pass
def get_data(self):
"""Get the data
Returns
-------
ndarray
The data intended as last stable radius for the given amount of turns.
"""
return np.transpose(np.asarray(self.container)) * self.dr
@staticmethod
def generate_instance(dr, alpha, theta1, theta2, epsilon, starting_position=0.0, cuda_device=None):
"""init an henon optimized radial tracker!
Parameters
----------
dr : float
radial step
alpha : ndarray
alpha angles to consider (raw)
theta1 : ndarray
theta1 angles to consider (raw)
theta2 : ndarray
theta2 angles to consider (raw)
epsilon : float
intensity of modulation
Returns
-------
Optimized instance
optimized instance of the class (CPU or GPU)
"""
if cuda_device == None:
cuda_device = cuda.is_available()
if cuda_device:
return gpu_radial_scan(dr, alpha, theta1, theta2, epsilon, starting_position)
else:
return cpu_radial_scan(dr, alpha, theta1, theta2, epsilon, starting_position)
def save_values(self, f, label="Hénon map scanning"):
self.label = label
data_dict = {
"label": label,
"alpha": self.alpha,
"theta1": self.theta1,
"theta2": self.theta2,
"dr": self.dr,
"starting_position": self.starting_position,
"starting_step": 0, # this has its meaning in the bigger picture, trust me!
"values": np.transpose(self.steps),
"max_turns": self.sample_list[0],
"min_turns": self.sample_list[-1]
}
with open(f, 'wb') as destination:
pickle.dump(data_dict, destination, protocol=4)
class gpu_radial_scan(radial_scan):
def __init__(self, dr, alpha, theta1, theta2, epsilon, starting_position=0.0):
assert alpha.size == theta1.size
assert alpha.size == theta2.size
assert starting_position >= 0.0
# save data as members
self.dr = dr
self.alpha = alpha
self.theta1 = theta1
self.theta2 = theta2
self.epsilon = epsilon
self.limit = 100.0
self.starting_position = starting_position
# prepare data
self.starting_step = int(starting_position / dr)
self.step = np.ones(alpha.shape, dtype=np.int) * int(starting_position / dr)
# make container
self.container = []
# load vectors to gpu
self.d_alpha = cuda.to_device(np.ascontiguousarray(self.alpha))
self.d_theta1 = cuda.to_device(np.ascontiguousarray(self.theta1))
self.d_theta2 = cuda.to_device(np.ascontiguousarray(self.theta2))
self.d_step = cuda.to_device(np.ascontiguousarray(self.step))
def reset(self):
"""Resets the engine.
"""
self.container = []
self.step = np.ones(self.alpha.shape, dtype=np.int) * \
int(self.starting_position / self.dr)
self.d_step = cuda.to_device(self.step)
def compute(self, sample_list):
"""Compute the tracking
Parameters
----------
sample_list : ndarray
iterations to consider
Returns
-------
ndarray
radius scan results
"""
self.sample_list = sample_list
threads_per_block = 512
blocks_per_grid = self.step.size // 512 + 1
# Sanity check
assert blocks_per_grid * threads_per_block > self.alpha.size
for i in range(1, len(sample_list)):
assert sample_list[i] <= sample_list[i - 1]
omega_x, omega_y = modulation(self.epsilon, sample_list[0])
d_omega_x = cuda.to_device(omega_x)
d_omega_y = cuda.to_device(omega_y)
# Execution
for sample in sample_list:
gpu.henon_map[blocks_per_grid, threads_per_block](
self.d_alpha, self.d_theta1, self.d_theta2,
self.dr, self.d_step, self.limit,
sample, d_omega_x, d_omega_y)
cuda.synchronize()
self.d_step.copy_to_host(self.step)
self.container.append(self.step.copy())
return np.transpose(np.asarray(self.container)) * self.dr
def block_compute(self, max_turns, min_turns):
"""Optimize block computation for ending up with a proper steps block!
Parameters
----------
max_turns : int
max number of turns
min_turns : int
min number of turns
Returns
-------
ndarray
the steps array
"""
# precomputation
self.compute([min_turns])
# computation
maximum = np.max(self.container)
minimum = np.min(self.container)
self.steps = np.zeros((self.alpha.shape[0], maximum))
rs = (np.arange(maximum) + 2) * self.dr
bool_mask = rs > (minimum * self.dr) / 2
bb, aa = np.meshgrid(bool_mask, self.alpha, indexing='ij')
rr, aa = np.meshgrid(rs, self.alpha, indexing='ij')
rr, th1 = np.meshgrid(rs, self.theta1, indexing='ij')
rr, th2 = np.meshgrid(rs, self.theta2, indexing='ij')
bb = bb.flatten()
aa = aa.flatten()
th1 = th1.flatten()
th2 = th2.flatten()
rr = rr.flatten()
x, px, y, py = polar_to_cartesian(rr, aa, th1, th2)
steps = np.zeros_like(x, dtype=np.int)
threads_per_block = 512
blocks_per_grid = 10
omega_x, omega_y = modulation(self.epsilon, max_turns)
d_bb = cuda.to_device(bb)
d_omega_x = cuda.to_device(omega_x)
d_omega_y = cuda.to_device(omega_y)
d_x = cuda.to_device(x)
d_px = cuda.to_device(px)
d_y = cuda.to_device(y)
d_py = cuda.to_device(py)
d_steps = cuda.to_device(steps)
gpu.henon_map_to_the_end[blocks_per_grid, threads_per_block](
d_x, d_px, d_y, d_py,
d_steps, self.limit, max_turns,
d_omega_x, d_omega_y,
d_bb
)
d_steps.copy_to_host(steps)
self.steps = steps.reshape(
(rs.shape[0], self.alpha.shape[0]))
return self.steps
class cpu_radial_scan(radial_scan):
def __init__(self, dr, alpha, theta1, theta2, epsilon, starting_position=0.0):
assert alpha.size == theta1.size
assert alpha.size == theta2.size
# save data as members
self.dr = dr
self.alpha = alpha
self.theta1 = theta1
self.theta2 = theta2
self.epsilon = epsilon
self.limit = 100.0
self.starting_position = starting_position
# prepare data
self.step = np.ones(alpha.shape, dtype=np.int) * int(starting_position / dr)
# make container
self.container = []
def reset(self):
"""Resets the engine.
"""
self.container = []
self.step = np.ones(self.alpha.shape, dtype=np.int) * \
int(self.starting_position / self.dr)
def compute(self, sample_list):
"""Compute the tracking
Parameters
----------
sample_list : ndarray
iterations to consider
Returns
-------
ndarray
radius scan results
"""
self.sample_list = sample_list
# Sanity check
for i in range(1, len(sample_list)):
assert sample_list[i] <= sample_list[i - 1]
omega_x, omega_y = modulation(self.epsilon, sample_list[0])
# Execution
for sample in sample_list:
self.step = cpu.henon_map(
self.alpha, self.theta1, self.theta2,
self.dr, self.step, self.limit,
sample, omega_x, omega_y)
self.container.append(self.step.copy())
return np.transpose(np.asarray(self.container)) * self.dr
def block_compute(self, max_turns, min_turns):
"""Optimize block computation for ending up with a proper steps block!
Parameters
----------
max_turns : int
max number of turns
min_turns : int
min number of turns
Returns
-------
ndarray
the steps array
"""
# precomputation
self.compute([min_turns])
# computation
maximum = np.max(self.container)
minimum = np.min(self.container)
rs = (np.arange(maximum) + 2) * self.dr
bool_mask = rs > (minimum * self.dr) / 2
bb, aa = np.meshgrid(bool_mask, self.alpha, indexing='ij')
rr, aa = np.meshgrid(rs, self.alpha, indexing='ij')
rr, th1 = np.meshgrid(rs, self.theta1, indexing='ij')
rr, th2 = np.meshgrid(rs, self.theta2, indexing='ij')
bb = bb.flatten()
aa = aa.flatten()
th1 = th1.flatten()
th2 = th2.flatten()
rr = rr.flatten()
x, px, y, py = polar_to_cartesian(rr, aa, th1, th2)
omega_x, omega_y = modulation(self.epsilon, max_turns)
steps = cpu.henon_map_to_the_end(
x, px, y, py,
self.limit, max_turns,
omega_x, omega_y,
bb
)
self.steps = steps.reshape(
(rs.shape[0], self.alpha.shape[0]))
return self.steps
class full_track(object):
def __init__(self):
pass
def compute(self):
pass
def get_data(self):
"""Get the data
Returns
-------
tuple of 2D ndarray [n_iterations, n_samples]
(radius, alpha, theta1, theta2)
"""
return cpu.cartesian_to_polar(self.x, self.px, self.y, self.py)
def accumulate_and_return(self, n_sectors):
"""Returns the summed results (power 4 as documented)
Parameters
----------
n_sectors : int
number of sectors to consider in the 2 theta space
Returns
-------
ndarray
list of values for the different istances considered.
"""
radius, alpha, th1, th2 = cpu.cartesian_to_polar(
self.x, self.px, self.y, self.py)
self.count_matrix, self.matrices, result = cpu.accumulate_and_return(radius, alpha, th1, th2, n_sectors)
return result
def recursive_accumulation(self):
"""Executes a recursive accumulation in order to test lower binning values.
N.B. execute "accumulate_and_return first!!!"
Returns
-------
tuple of lists
Tuple of lists with (count_matrices, averages, results)
"""
return cpu.recursive_accumulation(self.count_matrix, self.matrices)
@staticmethod
def generate_instance(radius, alpha, theta1, theta2, iters, epsilon, cuda_device=None):
"""Generate an instance of the class
Parameters
----------
radius : ndarray
radius to consider
alpha : ndarray
initial angle
theta1 : ndarray
initial theta1
theta2 : ndarray
initial theta2
iters : ndarray
n_iterations to perform
epsilon : float
intensity of the modulation
Returns
-------
class instance
optimized class instance
"""
if cuda_device == None:
cuda_device = cuda.is_available()
if cuda_device:
return gpu_full_track(radius, alpha, theta1, theta2, iters, epsilon)
else:
return cpu_full_track(radius, alpha, theta1, theta2, iters, epsilon)
class gpu_full_track(full_track):
def __init__(self, radius, alpha, theta1, theta2, iters, epsilon):
assert alpha.size == theta1.size
assert alpha.size == theta2.size
assert alpha.size == radius.size
# save data as members
self.radius = radius
self.alpha = alpha
self.theta1 = theta1
self.theta2 = theta2
self.epsilon = epsilon
self.iters = iters
self.max_iters = np.max(self.iters)
# make containers
self.x = np.empty((self.max_iters, alpha.size)) * np.nan
self.px = np.empty((self.max_iters, alpha.size)) * np.nan
self.y = np.empty((self.max_iters, alpha.size)) * np.nan
self.py = np.empty((self.max_iters, alpha.size)) * np.nan
self.x[0, :], self.px[0, :], self.y[0, :], self.py[0, :] = gpu.actual_polar_to_cartesian(radius, alpha, theta1, theta2)
def compute(self):
"""Compute the tracking
Returns
-------
tuple of 2D ndarray [n_iterations, n_samples]
(radius, alpha, theta1, theta2)
"""
# load vectors to gpu
d_x = cuda.to_device(self.x)
d_px = cuda.to_device(self.px)
d_y = cuda.to_device(self.y)
d_py = cuda.to_device(self.py)
d_iters = cuda.to_device(self.iters)
threads_per_block = 512
blocks_per_grid = 10
omega_x, omega_y = modulation(self.epsilon, self.max_iters)
d_omega_x = cuda.to_device(omega_x)
d_omega_y = cuda.to_device(omega_y)
# Execution
gpu.henon_full_track[blocks_per_grid, threads_per_block](
d_x, d_px, d_y, d_py,
d_iters, d_omega_x, d_omega_y
)
d_x.copy_to_host(self.x)
d_y.copy_to_host(self.y)
d_px.copy_to_host(self.px)
d_py.copy_to_host(self.py)
d_iters.copy_to_host(self.iters)
return self.x, self.px, self.y, self.py
class cpu_full_track(full_track):
def __init__(self, radius, alpha, theta1, theta2, iters, epsilon):
assert alpha.size == theta1.size
assert alpha.size == theta2.size
assert alpha.size == radius.size
# save data as members
self.radius = radius
self.alpha = alpha
self.theta1 = theta1
self.theta2 = theta2
self.epsilon = epsilon
self.iters = iters
self.max_iters = np.max(self.iters)
# make containers
self.x = np.zeros((self.max_iters, alpha.size)) * np.nan
self.px = np.zeros((self.max_iters, alpha.size)) * np.nan
self.y = np.zeros((self.max_iters, alpha.size)) * np.nan
self.py = np.zeros((self.max_iters, alpha.size)) * np.nan
self.x[0, :], self.px[0, :], self.y[0, :], self.py[0, :] = cpu.polar_to_cartesian(radius, alpha, theta1, theta2)
def compute(self):
"""Compute the tracking
Returns
-------
tuple of 2D ndarray [n_iterations, n_samples]
(radius, alpha, theta1, theta2)
"""
omega_x, omega_y = modulation(self.epsilon, self.max_iters)
# Execution
self.x, self.px, self.y, self.py = cpu.henon_full_track(
self.x, self.px, self.y, self.py,
self.iters, omega_x, omega_y
)
return self.x, self.px, self.y, self.py
| en | 0.655203 | Generates a modulation
Parameters
----------
epsilon : float
intensity of modulation
n_elements : float
number of elements
first_index : int, optional
starting point of the modulation, by default 0
Returns
-------
tuple of ndarray
(omega_x, omega_y) Kinda of a deprecated method. This class is meant to do a partial tracking (i.e. only last step is considered) of given initial condistions. Get the data
Returns
-------
tuple
(radius, alpha, theta1, theta2) Generate an instance of the engine.
Parameters
----------
radius : ndarray
array of radiuses to consider
alpha : ndarray
array of initial alphas
theta1 : ndarray
array of initial theta1
theta2 : ndarray
array of initial theta2
epsilon : float
modulation intensity
Returns
-------
class instance
optimized class instance # save data as members # make containers Compute the tracking
Returns
-------
tuple of ndarray [n_elements]
(radius, alpha, theta1, theta2, steps) # Execution #return self.r, self.alpha, self.theta1, self.theta2, self.step Resets the engine # save data as members # make containers Compute the tracking
Returns
-------
tuple of ndarray [n_elements]
(radius, alpha, theta1, theta2, steps) # load to GPU # Execution #return self.r, self.alpha, self.theta1, self.theta2, self.step Resets the engine With this class we can easly scan a uniform 4D cube of the Hénon map Create an uniform scan object
Parameters
----------
epsilon : float
modulation intensity
top : float
maximum radius
steps : int
steps from zero to top (becomes steps * 2 + 1)
starting_radius : float, optional
from which position we have to start with the actual computation, by default 0.0001
cuda_device : bool, optional
do we have a CUDA capable device (make it manual), by default None
Returns
-------
object
uniform_scan object Execute a scanning of everything
Parameters
----------
max_turns : int
turn limit
Returns
-------
ndarray
4d array with stable iterations inside # Filling Execute a scanning of everything
Parameters
----------
max_turns : int
turn limit
Returns
-------
ndarray
4d array with stable iterations inside # Filling This class contains most of the tools required for doing a precise and on point radial scan for Dynamic Aperture estimations. It's a bit messy tho... Get the data
Returns
-------
ndarray
The data intended as last stable radius for the given amount of turns. init an henon optimized radial tracker!
Parameters
----------
dr : float
radial step
alpha : ndarray
alpha angles to consider (raw)
theta1 : ndarray
theta1 angles to consider (raw)
theta2 : ndarray
theta2 angles to consider (raw)
epsilon : float
intensity of modulation
Returns
-------
Optimized instance
optimized instance of the class (CPU or GPU) # this has its meaning in the bigger picture, trust me! # save data as members # prepare data # make container # load vectors to gpu Resets the engine. Compute the tracking
Parameters
----------
sample_list : ndarray
iterations to consider
Returns
-------
ndarray
radius scan results # Sanity check # Execution Optimize block computation for ending up with a proper steps block!
Parameters
----------
max_turns : int
max number of turns
min_turns : int
min number of turns
Returns
-------
ndarray
the steps array # precomputation # computation # save data as members # prepare data # make container Resets the engine. Compute the tracking
Parameters
----------
sample_list : ndarray
iterations to consider
Returns
-------
ndarray
radius scan results # Sanity check # Execution Optimize block computation for ending up with a proper steps block!
Parameters
----------
max_turns : int
max number of turns
min_turns : int
min number of turns
Returns
-------
ndarray
the steps array # precomputation # computation Get the data
Returns
-------
tuple of 2D ndarray [n_iterations, n_samples]
(radius, alpha, theta1, theta2) Returns the summed results (power 4 as documented)
Parameters
----------
n_sectors : int
number of sectors to consider in the 2 theta space
Returns
-------
ndarray
list of values for the different istances considered. Executes a recursive accumulation in order to test lower binning values.
N.B. execute "accumulate_and_return first!!!"
Returns
-------
tuple of lists
Tuple of lists with (count_matrices, averages, results) Generate an instance of the class
Parameters
----------
radius : ndarray
radius to consider
alpha : ndarray
initial angle
theta1 : ndarray
initial theta1
theta2 : ndarray
initial theta2
iters : ndarray
n_iterations to perform
epsilon : float
intensity of the modulation
Returns
-------
class instance
optimized class instance # save data as members # make containers Compute the tracking
Returns
-------
tuple of 2D ndarray [n_iterations, n_samples]
(radius, alpha, theta1, theta2) # load vectors to gpu # Execution # save data as members # make containers Compute the tracking
Returns
-------
tuple of 2D ndarray [n_iterations, n_samples]
(radius, alpha, theta1, theta2) # Execution | 2.285385 | 2 |
_doc/sphinxdoc/source/conf.py | sdpython/manyapi | 1 | 6623471 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import sys
import os
import alabaster
from pyquickhelper.helpgen.default_conf import set_sphinx_variables
sys.path.insert(0, os.path.abspath(os.path.join(os.path.split(__file__)[0])))
set_sphinx_variables(__file__, "manydataapi", "<NAME>", 2021,
"alabaster", alabaster.get_path(),
locals(), add_extensions=None,
extlinks=dict(issue=('https://github.com/sdpython/manydataapi/issues/%s', 'issue')))
blog_root = "http://www.xavierdupre.fr/app/manydataapi/helpsphinx/"
blog_background = False
html_css_files = ['my-styles.css']
epkg_dictionary.update({
"JCDecaux": 'http://www.jcdecaux.com/fr/pour-nos-partenaires/velos-en-libre-service',
"linkedin": 'https://www.linkedin.com/',
"LinkedIn": 'https://www.linkedin.com/',
'moviepy': 'https://zulko.github.io/moviepy/',
'tqdm': 'https://github.com/tqdm/tqdm',
"velib": 'https://www.velib-metropole.fr/',
})
| # -*- coding: utf-8 -*-
import sys
import os
import alabaster
from pyquickhelper.helpgen.default_conf import set_sphinx_variables
sys.path.insert(0, os.path.abspath(os.path.join(os.path.split(__file__)[0])))
set_sphinx_variables(__file__, "manydataapi", "<NAME>", 2021,
"alabaster", alabaster.get_path(),
locals(), add_extensions=None,
extlinks=dict(issue=('https://github.com/sdpython/manydataapi/issues/%s', 'issue')))
blog_root = "http://www.xavierdupre.fr/app/manydataapi/helpsphinx/"
blog_background = False
html_css_files = ['my-styles.css']
epkg_dictionary.update({
"JCDecaux": 'http://www.jcdecaux.com/fr/pour-nos-partenaires/velos-en-libre-service',
"linkedin": 'https://www.linkedin.com/',
"LinkedIn": 'https://www.linkedin.com/',
'moviepy': 'https://zulko.github.io/moviepy/',
'tqdm': 'https://github.com/tqdm/tqdm',
"velib": 'https://www.velib-metropole.fr/',
}) | en | 0.769321 | # -*- coding: utf-8 -*- | 1.482055 | 1 |
Chapter05/nlp47.py | gushwell/PythonNLP100 | 2 | 6623472 | import re
import functools
class Morph:
def __init__(self, surface, base, pos, pos1):
self.surface = surface
self.base = base
self.pos = pos
self.pos1 = pos1
def toList(self):
return [self.surface, self.base, self.pos, self.pos1]
class Chunk:
def __init__(self, number, dst):
self.number = number
self.morphs = []
self.dst = dst
self.srcs = []
def print(self):
print(self.number)
print([x.toList() for x in self.morphs])
print(self.dst, self.srcs)
print()
def concatMorphs(self):
seq = filter(lambda x: x.pos != '記号', self.morphs)
return functools.reduce(lambda x, y: x + y.surface, seq, '')
class Pair:
def __init__(self, particle, paragraph):
self.particle = particle
self.paragraph = paragraph
def analyze():
article = []
sentence = []
chunk = None
with open('neko.txt.cabocha', 'r', encoding='utf8') as fin:
for line in fin:
words = re.split(r'\t|,|\n| ', line)
if line[0] == '*':
num = int(words[1])
destNo = int(words[2].rstrip('D'))
chunk = Chunk(num, destNo)
sentence.append(chunk)
elif words[0] == 'EOS':
if sentence:
for index, c in enumerate(sentence, 0):
sentence[c.dst].srcs.append(index)
article.append(sentence)
sentence = []
else:
chunk.morphs.append(Morph(
words[0],
words[7],
words[1],
words[2],
))
return article
def findVerbs(sentence):
for chunk in sentence:
for m in reversed(chunk.morphs):
if m.pos == '動詞':
yield m, chunk.number
break
def findParticles(sentence, chunkNo):
for chunk in sentence:
if chunk.dst == chunkNo:
nextMorph = Morph('', '', '', '')
for m in reversed(chunk.morphs):
if nextMorph.pos == '助詞':
yield m, nextMorph, chunk.concatMorphs()
break
nextMorph = m
def enumPattern(article):
for sentence in article:
for v, num in findVerbs(sentence):
pairlist = []
obj = ''
for part1, part2, para in findParticles(sentence, num):
if part1.pos == '名詞' and part1.pos1 == 'サ変接続' and part2.surface == 'を':
obj = part1.surface + part2.surface
else:
pairlist.append(Pair(part2.surface, para))
if pairlist and obj != '':
yield obj+v.base, sorted(pairlist, key=lambda x: x.particle)
def main():
article = analyze()
with open('result47.txt', 'w', encoding='utf8') as w:
for v, pairList in enumPattern(article):
w.write('{}\t{}\t{}\n'.format(v, ' '.join([x.particle for x in pairList]), \
' '.join([x.paragraph for x in pairList])))
if __name__ == '__main__':
main()
| import re
import functools
class Morph:
def __init__(self, surface, base, pos, pos1):
self.surface = surface
self.base = base
self.pos = pos
self.pos1 = pos1
def toList(self):
return [self.surface, self.base, self.pos, self.pos1]
class Chunk:
def __init__(self, number, dst):
self.number = number
self.morphs = []
self.dst = dst
self.srcs = []
def print(self):
print(self.number)
print([x.toList() for x in self.morphs])
print(self.dst, self.srcs)
print()
def concatMorphs(self):
seq = filter(lambda x: x.pos != '記号', self.morphs)
return functools.reduce(lambda x, y: x + y.surface, seq, '')
class Pair:
def __init__(self, particle, paragraph):
self.particle = particle
self.paragraph = paragraph
def analyze():
article = []
sentence = []
chunk = None
with open('neko.txt.cabocha', 'r', encoding='utf8') as fin:
for line in fin:
words = re.split(r'\t|,|\n| ', line)
if line[0] == '*':
num = int(words[1])
destNo = int(words[2].rstrip('D'))
chunk = Chunk(num, destNo)
sentence.append(chunk)
elif words[0] == 'EOS':
if sentence:
for index, c in enumerate(sentence, 0):
sentence[c.dst].srcs.append(index)
article.append(sentence)
sentence = []
else:
chunk.morphs.append(Morph(
words[0],
words[7],
words[1],
words[2],
))
return article
def findVerbs(sentence):
for chunk in sentence:
for m in reversed(chunk.morphs):
if m.pos == '動詞':
yield m, chunk.number
break
def findParticles(sentence, chunkNo):
for chunk in sentence:
if chunk.dst == chunkNo:
nextMorph = Morph('', '', '', '')
for m in reversed(chunk.morphs):
if nextMorph.pos == '助詞':
yield m, nextMorph, chunk.concatMorphs()
break
nextMorph = m
def enumPattern(article):
for sentence in article:
for v, num in findVerbs(sentence):
pairlist = []
obj = ''
for part1, part2, para in findParticles(sentence, num):
if part1.pos == '名詞' and part1.pos1 == 'サ変接続' and part2.surface == 'を':
obj = part1.surface + part2.surface
else:
pairlist.append(Pair(part2.surface, para))
if pairlist and obj != '':
yield obj+v.base, sorted(pairlist, key=lambda x: x.particle)
def main():
article = analyze()
with open('result47.txt', 'w', encoding='utf8') as w:
for v, pairList in enumPattern(article):
w.write('{}\t{}\t{}\n'.format(v, ' '.join([x.particle for x in pairList]), \
' '.join([x.paragraph for x in pairList])))
if __name__ == '__main__':
main()
| none | 1 | 2.948699 | 3 | |
Python/OneLang/One/Transforms/InferTypesPlugins/NullabilityCheckWithNot.py | onelang/OneLang-CrossCompiled | 2 | 6623473 | from onelang_core import *
import OneLang.One.Transforms.InferTypesPlugins.Helpers.InferTypesPlugin as inferTypesPlug
import OneLang.One.Ast.Expressions as exprs
import OneLang.One.Ast.AstTypes as astTypes
class NullabilityCheckWithNot(inferTypesPlug.InferTypesPlugin):
def __init__(self):
super().__init__("NullabilityCheckWithNot")
def can_transform(self, expr):
return expr.operator == "!" if isinstance(expr, exprs.UnaryExpression) else False
def transform(self, expr):
unary_expr = expr
if unary_expr.operator == "!":
self.main.process_expression(expr)
type = unary_expr.operand.actual_type
lit_types = self.main.current_file.literal_types
if isinstance(type, astTypes.ClassType) and type.decl != lit_types.boolean.decl and type.decl != lit_types.numeric.decl:
return exprs.BinaryExpression(unary_expr.operand, "==", exprs.NullLiteral())
return expr | from onelang_core import *
import OneLang.One.Transforms.InferTypesPlugins.Helpers.InferTypesPlugin as inferTypesPlug
import OneLang.One.Ast.Expressions as exprs
import OneLang.One.Ast.AstTypes as astTypes
class NullabilityCheckWithNot(inferTypesPlug.InferTypesPlugin):
def __init__(self):
super().__init__("NullabilityCheckWithNot")
def can_transform(self, expr):
return expr.operator == "!" if isinstance(expr, exprs.UnaryExpression) else False
def transform(self, expr):
unary_expr = expr
if unary_expr.operator == "!":
self.main.process_expression(expr)
type = unary_expr.operand.actual_type
lit_types = self.main.current_file.literal_types
if isinstance(type, astTypes.ClassType) and type.decl != lit_types.boolean.decl and type.decl != lit_types.numeric.decl:
return exprs.BinaryExpression(unary_expr.operand, "==", exprs.NullLiteral())
return expr | none | 1 | 2.17112 | 2 | |
android/app/src/main/python/electroncash_plugins/__init__.py | proteanx/DeLight | 16 | 6623474 | <filename>android/app/src/main/python/electroncash_plugins/__init__.py
# Plugins are not used in the Android app, but various code still expects this package to
# exist.
| <filename>android/app/src/main/python/electroncash_plugins/__init__.py
# Plugins are not used in the Android app, but various code still expects this package to
# exist.
| en | 0.933994 | # Plugins are not used in the Android app, but various code still expects this package to # exist. | 1.284544 | 1 |
fedlab/core/client/scale/trainer.py | SMILELab-FL/FedLab | 171 | 6623475 | # Copyright 2021 Peng Cheng Laboratory (http://www.szpclab.com/) and FedLab Authors (smilelab.group)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ...client import SERIAL_TRAINER
from ..trainer import ClientTrainer
from ....utils.serialization import SerializationTool
from ....utils.dataset.sampler import SubsetSampler
from ....utils import Logger
class SerialTrainer(ClientTrainer):
"""Base class. Train multiple clients in sequence with a single process.
Args:
model (torch.nn.Module): Model used in this federation.
client_num (int): Number of clients in current trainer.
aggregator (Aggregators, callable, optional): Function to perform aggregation on a list of serialized model parameters.
cuda (bool): Use GPUs or not. Default: ``True``.
logger (Logger, optional): object of :class:`Logger`.
"""
def __init__(self,
model,
client_num,
aggregator=None,
cuda=True,
logger=Logger()):
super().__init__(model, cuda)
self.client_num = client_num
self.type = SERIAL_TRAINER # represent serial trainer
self.aggregator = aggregator
self._LOGGER = logger
def _train_alone(self, model_parameters, train_loader):
"""Train local model with :attr:`model_parameters` on :attr:`train_loader`.
Args:
model_parameters (torch.Tensor): Serialized model parameters of one model.
train_loader (torch.utils.data.DataLoader): :class:`torch.utils.data.DataLoader` for this client.
"""
raise NotImplementedError()
def _get_dataloader(self, client_id):
"""Get :class:`DataLoader` for ``client_id``."""
raise NotImplementedError()
def train(self, model_parameters, id_list, aggregate=False):
"""Train local model with different dataset according to client id in ``id_list``.
Args:
model_parameters (torch.Tensor): Serialized model parameters.
id_list (list[int]): Client id in this training serial.
aggregate (bool): Whether to perform partial aggregation on this group of clients' local models at the end of each local training round.
Note:
Normally, aggregation is performed by server, while we provide :attr:`aggregate` option here to perform
partial aggregation on current client group. This partial aggregation can reduce the aggregation workload
of server.
Returns:
Serialized model parameters / list of model parameters.
"""
param_list = []
self._LOGGER.info(
"Local training with client id list: {}".format(id_list))
for idx in id_list:
self._LOGGER.info(
"Starting training procedure of client [{}]".format(idx))
data_loader = self._get_dataloader(client_id=idx)
self._train_alone(model_parameters=model_parameters,
train_loader=data_loader)
param_list.append(self.model_parameters)
if aggregate is True and self.aggregator is not None:
# aggregate model parameters of this client group
aggregated_parameters = self.aggregator(param_list)
return aggregated_parameters
else:
return param_list
class SubsetSerialTrainer(SerialTrainer):
"""Train multiple clients in a single process.
Customize :meth:`_get_dataloader` or :meth:`_train_alone` for specific algorithm design in clients.
Args:
model (torch.nn.Module): Model used in this federation.
dataset (torch.utils.data.Dataset): Local dataset for this group of clients.
data_slices (list[list]): subset of indices of dataset.
aggregator (Aggregators, callable, optional): Function to perform aggregation on a list of model parameters.
logger (Logger, optional): object of :class:`Logger`.
cuda (bool): Use GPUs or not. Default: ``True``.
args (dict, optional): Uncertain variables.
.. note::
``len(data_slices) == client_num``, that is, each sub-index of :attr:`dataset` corresponds to a client's local dataset one-by-one.
"""
def __init__(self,
model,
dataset,
data_slices,
aggregator=None,
logger=Logger(),
cuda=True,
args=None) -> None:
super(SubsetSerialTrainer, self).__init__(model=model,
client_num=len(data_slices),
cuda=cuda,
aggregator=aggregator,
logger=logger)
self.dataset = dataset
self.data_slices = data_slices # [0, client_num)
self.args = args
def _get_dataloader(self, client_id):
"""Return a training dataloader used in :meth:`train` for client with :attr:`id`
Args:
client_id (int): :attr:`client_id` of client to generate dataloader
Note:
:attr:`client_id` here is not equal to ``client_id`` in global FL setting. It is the index of client in current :class:`SerialTrainer`.
Returns:
:class:`DataLoader` for specific client's sub-dataset
"""
batch_size = self.args["batch_size"]
train_loader = torch.utils.data.DataLoader(
self.dataset,
sampler=SubsetSampler(indices=self.data_slices[client_id],
shuffle=True),
batch_size=batch_size)
return train_loader
def _train_alone(self, model_parameters, train_loader):
"""Single round of local training for one client.
Note:
Overwrite this method to customize the PyTorch training pipeline.
Args:
model_parameters (torch.Tensor): serialized model parameters.
train_loader (torch.utils.data.DataLoader): :class:`torch.utils.data.DataLoader` for this client.
"""
epochs, lr = self.args["epochs"], self.args["lr"]
SerializationTool.deserialize_model(self._model, model_parameters)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(self._model.parameters(), lr=lr)
self._model.train()
for _ in range(epochs):
for data, target in train_loader:
if self.cuda:
data = data.cuda(self.gpu)
target = target.cuda(self.gpu)
output = self.model(data)
loss = criterion(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
return self.model_parameters
| # Copyright 2021 Peng Cheng Laboratory (http://www.szpclab.com/) and FedLab Authors (smilelab.group)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ...client import SERIAL_TRAINER
from ..trainer import ClientTrainer
from ....utils.serialization import SerializationTool
from ....utils.dataset.sampler import SubsetSampler
from ....utils import Logger
class SerialTrainer(ClientTrainer):
"""Base class. Train multiple clients in sequence with a single process.
Args:
model (torch.nn.Module): Model used in this federation.
client_num (int): Number of clients in current trainer.
aggregator (Aggregators, callable, optional): Function to perform aggregation on a list of serialized model parameters.
cuda (bool): Use GPUs or not. Default: ``True``.
logger (Logger, optional): object of :class:`Logger`.
"""
def __init__(self,
model,
client_num,
aggregator=None,
cuda=True,
logger=Logger()):
super().__init__(model, cuda)
self.client_num = client_num
self.type = SERIAL_TRAINER # represent serial trainer
self.aggregator = aggregator
self._LOGGER = logger
def _train_alone(self, model_parameters, train_loader):
"""Train local model with :attr:`model_parameters` on :attr:`train_loader`.
Args:
model_parameters (torch.Tensor): Serialized model parameters of one model.
train_loader (torch.utils.data.DataLoader): :class:`torch.utils.data.DataLoader` for this client.
"""
raise NotImplementedError()
def _get_dataloader(self, client_id):
"""Get :class:`DataLoader` for ``client_id``."""
raise NotImplementedError()
def train(self, model_parameters, id_list, aggregate=False):
"""Train local model with different dataset according to client id in ``id_list``.
Args:
model_parameters (torch.Tensor): Serialized model parameters.
id_list (list[int]): Client id in this training serial.
aggregate (bool): Whether to perform partial aggregation on this group of clients' local models at the end of each local training round.
Note:
Normally, aggregation is performed by server, while we provide :attr:`aggregate` option here to perform
partial aggregation on current client group. This partial aggregation can reduce the aggregation workload
of server.
Returns:
Serialized model parameters / list of model parameters.
"""
param_list = []
self._LOGGER.info(
"Local training with client id list: {}".format(id_list))
for idx in id_list:
self._LOGGER.info(
"Starting training procedure of client [{}]".format(idx))
data_loader = self._get_dataloader(client_id=idx)
self._train_alone(model_parameters=model_parameters,
train_loader=data_loader)
param_list.append(self.model_parameters)
if aggregate is True and self.aggregator is not None:
# aggregate model parameters of this client group
aggregated_parameters = self.aggregator(param_list)
return aggregated_parameters
else:
return param_list
class SubsetSerialTrainer(SerialTrainer):
"""Train multiple clients in a single process.
Customize :meth:`_get_dataloader` or :meth:`_train_alone` for specific algorithm design in clients.
Args:
model (torch.nn.Module): Model used in this federation.
dataset (torch.utils.data.Dataset): Local dataset for this group of clients.
data_slices (list[list]): subset of indices of dataset.
aggregator (Aggregators, callable, optional): Function to perform aggregation on a list of model parameters.
logger (Logger, optional): object of :class:`Logger`.
cuda (bool): Use GPUs or not. Default: ``True``.
args (dict, optional): Uncertain variables.
.. note::
``len(data_slices) == client_num``, that is, each sub-index of :attr:`dataset` corresponds to a client's local dataset one-by-one.
"""
def __init__(self,
model,
dataset,
data_slices,
aggregator=None,
logger=Logger(),
cuda=True,
args=None) -> None:
super(SubsetSerialTrainer, self).__init__(model=model,
client_num=len(data_slices),
cuda=cuda,
aggregator=aggregator,
logger=logger)
self.dataset = dataset
self.data_slices = data_slices # [0, client_num)
self.args = args
def _get_dataloader(self, client_id):
"""Return a training dataloader used in :meth:`train` for client with :attr:`id`
Args:
client_id (int): :attr:`client_id` of client to generate dataloader
Note:
:attr:`client_id` here is not equal to ``client_id`` in global FL setting. It is the index of client in current :class:`SerialTrainer`.
Returns:
:class:`DataLoader` for specific client's sub-dataset
"""
batch_size = self.args["batch_size"]
train_loader = torch.utils.data.DataLoader(
self.dataset,
sampler=SubsetSampler(indices=self.data_slices[client_id],
shuffle=True),
batch_size=batch_size)
return train_loader
def _train_alone(self, model_parameters, train_loader):
"""Single round of local training for one client.
Note:
Overwrite this method to customize the PyTorch training pipeline.
Args:
model_parameters (torch.Tensor): serialized model parameters.
train_loader (torch.utils.data.DataLoader): :class:`torch.utils.data.DataLoader` for this client.
"""
epochs, lr = self.args["epochs"], self.args["lr"]
SerializationTool.deserialize_model(self._model, model_parameters)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(self._model.parameters(), lr=lr)
self._model.train()
for _ in range(epochs):
for data, target in train_loader:
if self.cuda:
data = data.cuda(self.gpu)
target = target.cuda(self.gpu)
output = self.model(data)
loss = criterion(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
return self.model_parameters
| en | 0.722833 | # Copyright 2021 Peng Cheng Laboratory (http://www.szpclab.com/) and FedLab Authors (smilelab.group) # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Base class. Train multiple clients in sequence with a single process. Args: model (torch.nn.Module): Model used in this federation. client_num (int): Number of clients in current trainer. aggregator (Aggregators, callable, optional): Function to perform aggregation on a list of serialized model parameters. cuda (bool): Use GPUs or not. Default: ``True``. logger (Logger, optional): object of :class:`Logger`. # represent serial trainer Train local model with :attr:`model_parameters` on :attr:`train_loader`. Args: model_parameters (torch.Tensor): Serialized model parameters of one model. train_loader (torch.utils.data.DataLoader): :class:`torch.utils.data.DataLoader` for this client. Get :class:`DataLoader` for ``client_id``. Train local model with different dataset according to client id in ``id_list``. Args: model_parameters (torch.Tensor): Serialized model parameters. id_list (list[int]): Client id in this training serial. aggregate (bool): Whether to perform partial aggregation on this group of clients' local models at the end of each local training round. Note: Normally, aggregation is performed by server, while we provide :attr:`aggregate` option here to perform partial aggregation on current client group. This partial aggregation can reduce the aggregation workload of server. Returns: Serialized model parameters / list of model parameters. # aggregate model parameters of this client group Train multiple clients in a single process. Customize :meth:`_get_dataloader` or :meth:`_train_alone` for specific algorithm design in clients. Args: model (torch.nn.Module): Model used in this federation. dataset (torch.utils.data.Dataset): Local dataset for this group of clients. data_slices (list[list]): subset of indices of dataset. aggregator (Aggregators, callable, optional): Function to perform aggregation on a list of model parameters. logger (Logger, optional): object of :class:`Logger`. cuda (bool): Use GPUs or not. Default: ``True``. args (dict, optional): Uncertain variables. .. note:: ``len(data_slices) == client_num``, that is, each sub-index of :attr:`dataset` corresponds to a client's local dataset one-by-one. # [0, client_num) Return a training dataloader used in :meth:`train` for client with :attr:`id` Args: client_id (int): :attr:`client_id` of client to generate dataloader Note: :attr:`client_id` here is not equal to ``client_id`` in global FL setting. It is the index of client in current :class:`SerialTrainer`. Returns: :class:`DataLoader` for specific client's sub-dataset Single round of local training for one client. Note: Overwrite this method to customize the PyTorch training pipeline. Args: model_parameters (torch.Tensor): serialized model parameters. train_loader (torch.utils.data.DataLoader): :class:`torch.utils.data.DataLoader` for this client. | 2.044127 | 2 |
numerics/solver.py | gdikov/MNIST_Challenge | 1 | 6623476 | <filename>numerics/solver.py
import numpy as np
class Solver(object):
def __init__(self):
pass
def update(self, x, dx):
pass
class SGD(Solver):
def __init__(self, config=None):
super(SGD, self).__init__()
if config is None:
config = dict()
config['learning_rate'] = 0.01
config['momentum'] = 0.9
config['t'] = 0
self.config = config
def update(self, x, dx):
x -= self.config['learning_rate'] * dx
class Adam(Solver):
def __init__(self, data_dim=28*28, config=None):
super(Adam, self).__init__()
if config is None:
config = dict()
config['learning_rate'] = 0.001
config['beta1'] = 0.9
config['beta2'] = 0.999
config['epsilon'] = 1e-8
config['mov_avg_grad'] = np.zeros(data_dim)
config['mov_avg_sq_grad'] = np.zeros(data_dim)
config['t'] = 0
self.config = config
def update(self, x, dx):
self.config['mov_avg_grad'] = self.config['beta1'] * self.config['mov_avg_grad'] + \
(1 - self.config['beta1']) * dx
self.config['mov_avg_sq_grad'] = self.config['beta2'] * self.config['mov_avg_sq_grad'] + \
(1 - self.config['beta2']) * (dx ** 2)
self.config['t'] += 1
m_hat = self.config['mov_avg_grad'] / (1 - self.config['beta1'] ** self.config['t'])
v_hat = self.config['mov_avg_sq_grad'] / (1 - self.config['beta2'] ** self.config['t'])
x -= (self.config['learning_rate'] * m_hat) / (np.sqrt(v_hat) + self.config['epsilon'])
| <filename>numerics/solver.py
import numpy as np
class Solver(object):
def __init__(self):
pass
def update(self, x, dx):
pass
class SGD(Solver):
def __init__(self, config=None):
super(SGD, self).__init__()
if config is None:
config = dict()
config['learning_rate'] = 0.01
config['momentum'] = 0.9
config['t'] = 0
self.config = config
def update(self, x, dx):
x -= self.config['learning_rate'] * dx
class Adam(Solver):
def __init__(self, data_dim=28*28, config=None):
super(Adam, self).__init__()
if config is None:
config = dict()
config['learning_rate'] = 0.001
config['beta1'] = 0.9
config['beta2'] = 0.999
config['epsilon'] = 1e-8
config['mov_avg_grad'] = np.zeros(data_dim)
config['mov_avg_sq_grad'] = np.zeros(data_dim)
config['t'] = 0
self.config = config
def update(self, x, dx):
self.config['mov_avg_grad'] = self.config['beta1'] * self.config['mov_avg_grad'] + \
(1 - self.config['beta1']) * dx
self.config['mov_avg_sq_grad'] = self.config['beta2'] * self.config['mov_avg_sq_grad'] + \
(1 - self.config['beta2']) * (dx ** 2)
self.config['t'] += 1
m_hat = self.config['mov_avg_grad'] / (1 - self.config['beta1'] ** self.config['t'])
v_hat = self.config['mov_avg_sq_grad'] / (1 - self.config['beta2'] ** self.config['t'])
x -= (self.config['learning_rate'] * m_hat) / (np.sqrt(v_hat) + self.config['epsilon'])
| none | 1 | 3.038327 | 3 | |
src/pdes/wave.py | gelijergensen/Constrained-Neural-Nets-Workbook | 3 | 6623477 | <reponame>gelijergensen/Constrained-Neural-Nets-Workbook
"""Implementation of the wave PDE"""
import numpy as np
import torch
from src.derivatives import jacobian, jacobian_and_laplacian
__all__ = ["helmholtz_equation", "pythagorean_equation"]
def helmholtz_equation(
outputs, inputs, parameterization, return_diagnostics=False
):
"""Computes the Helmholtz equation (time independent wave equation) value,
given the model inputs, outputs, and paramerization of the wave
:param outputs: output of some network
:param inputs: inputs to some network
:param parameterization: parameterization of the PDE which we expect to
follow: [amplitude, frequency, phase]
:param return_diagnostics: whether to return an object containing
diagnostics information
:return PDE value (, diagnostics tuple)
"""
batched = len(inputs.size()) > 1
jac, lap = jacobian_and_laplacian(
outputs, inputs, batched=batched, create_graph=True, allow_unused=False
)
frequency = (2 * np.pi * parameterization[..., 1]).view(outputs.size())
# r$ \nabla^2 u = - k^2 u$
lhs = lap
rhs = -frequency * frequency * outputs
if return_diagnostics:
return lhs - rhs, (lhs, rhs, jac)
else:
return lhs - rhs
def pythagorean_equation(
outputs, inputs, parameterization, return_diagnostics=False
):
"""Computes the Pythagorean equation ((f * y)^2 + (y')^2 = f^2), assuming
that the network should satisfy that f * f * y = y'' (Helmholtz)
:param outputs: output of some network
:param inputs: inputs to some network
:param parameterization: parameterization of the PDE which we expect to
follow: [amplitude, frequency, phase]
:param return_diagnostics: whether to return an object containing
diagnostics information
:return PDE value (, diagnostics tuple)
"""
batched = len(inputs.size()) > 1
jac = jacobian(
outputs, inputs, batched=batched, create_graph=True, allow_unused=False
)
frequency = (2 * np.pi * parameterization[..., 1]).view(outputs.size())
# r$ (f * y)^2 + (y')^2 = 1$
lhs = (frequency * outputs) ** 2 + jac.view(outputs.size()) ** 2
rhs = lhs.new_ones(lhs.size()) * (frequency ** 2)
if return_diagnostics:
return lhs - rhs, (lhs, rhs, jac)
else:
return lhs - rhs
| """Implementation of the wave PDE"""
import numpy as np
import torch
from src.derivatives import jacobian, jacobian_and_laplacian
__all__ = ["helmholtz_equation", "pythagorean_equation"]
def helmholtz_equation(
outputs, inputs, parameterization, return_diagnostics=False
):
"""Computes the Helmholtz equation (time independent wave equation) value,
given the model inputs, outputs, and paramerization of the wave
:param outputs: output of some network
:param inputs: inputs to some network
:param parameterization: parameterization of the PDE which we expect to
follow: [amplitude, frequency, phase]
:param return_diagnostics: whether to return an object containing
diagnostics information
:return PDE value (, diagnostics tuple)
"""
batched = len(inputs.size()) > 1
jac, lap = jacobian_and_laplacian(
outputs, inputs, batched=batched, create_graph=True, allow_unused=False
)
frequency = (2 * np.pi * parameterization[..., 1]).view(outputs.size())
# r$ \nabla^2 u = - k^2 u$
lhs = lap
rhs = -frequency * frequency * outputs
if return_diagnostics:
return lhs - rhs, (lhs, rhs, jac)
else:
return lhs - rhs
def pythagorean_equation(
outputs, inputs, parameterization, return_diagnostics=False
):
"""Computes the Pythagorean equation ((f * y)^2 + (y')^2 = f^2), assuming
that the network should satisfy that f * f * y = y'' (Helmholtz)
:param outputs: output of some network
:param inputs: inputs to some network
:param parameterization: parameterization of the PDE which we expect to
follow: [amplitude, frequency, phase]
:param return_diagnostics: whether to return an object containing
diagnostics information
:return PDE value (, diagnostics tuple)
"""
batched = len(inputs.size()) > 1
jac = jacobian(
outputs, inputs, batched=batched, create_graph=True, allow_unused=False
)
frequency = (2 * np.pi * parameterization[..., 1]).view(outputs.size())
# r$ (f * y)^2 + (y')^2 = 1$
lhs = (frequency * outputs) ** 2 + jac.view(outputs.size()) ** 2
rhs = lhs.new_ones(lhs.size()) * (frequency ** 2)
if return_diagnostics:
return lhs - rhs, (lhs, rhs, jac)
else:
return lhs - rhs | en | 0.676683 | Implementation of the wave PDE Computes the Helmholtz equation (time independent wave equation) value, given the model inputs, outputs, and paramerization of the wave :param outputs: output of some network :param inputs: inputs to some network :param parameterization: parameterization of the PDE which we expect to follow: [amplitude, frequency, phase] :param return_diagnostics: whether to return an object containing diagnostics information :return PDE value (, diagnostics tuple) # r$ \nabla^2 u = - k^2 u$ Computes the Pythagorean equation ((f * y)^2 + (y')^2 = f^2), assuming that the network should satisfy that f * f * y = y'' (Helmholtz) :param outputs: output of some network :param inputs: inputs to some network :param parameterization: parameterization of the PDE which we expect to follow: [amplitude, frequency, phase] :param return_diagnostics: whether to return an object containing diagnostics information :return PDE value (, diagnostics tuple) # r$ (f * y)^2 + (y')^2 = 1$ | 3.409109 | 3 |
tests/data/annotation_types/classification/test_classification.py | nickaustinlee/labelbox-python | 0 | 6623478 | import pytest
from pydantic import ValidationError
from labelbox.data.annotation_types import (Checklist, ClassificationAnswer,
Dropdown, Radio, Text,
ClassificationAnnotation)
def test_classification_answer():
with pytest.raises(ValidationError):
ClassificationAnswer()
feature_schema_id = "schema_id"
name = "my_feature"
answer = ClassificationAnswer(name=name)
assert answer.feature_schema_id is None
assert answer.name == name
answer = ClassificationAnswer(feature_schema_id=feature_schema_id,
name=name)
assert answer.feature_schema_id == feature_schema_id
assert answer.name == name
def test_classification():
answer = "1234"
classification = ClassificationAnnotation(value=Text(answer=answer),
name="a classification")
assert classification.dict()['value']['answer'] == answer
with pytest.raises(ValidationError):
ClassificationAnnotation()
def test_subclass():
answer = "1234"
feature_schema_id = "11232"
name = "my_feature"
with pytest.raises(ValidationError):
# Should have feature schema info
classification = ClassificationAnnotation(value=Text(answer=answer))
classification = ClassificationAnnotation(value=Text(answer=answer),
name=name)
assert classification.dict() == {
'name': name,
'feature_schema_id': None,
'extra': {},
'value': {
'answer': answer
}
}
classification = ClassificationAnnotation(
value=Text(answer=answer),
name=name,
feature_schema_id=feature_schema_id)
assert classification.dict() == {
'name': None,
'feature_schema_id': feature_schema_id,
'extra': {},
'value': {
'answer': answer
},
'name': name
}
classification = ClassificationAnnotation(
value=Text(answer=answer),
feature_schema_id=feature_schema_id,
name=name)
assert classification.dict() == {
'name': name,
'feature_schema_id': feature_schema_id,
'extra': {},
'value': {
'answer': answer
}
}
def test_radio():
answer = ClassificationAnswer(name="1")
feature_schema_id = "feature_schema_id"
name = "my_feature"
with pytest.raises(ValidationError):
classification = ClassificationAnnotation(value=Radio(
answer=answer.name))
with pytest.raises(ValidationError):
classification = Radio(answer=[answer])
classification = Radio(answer=answer)
assert classification.dict() == {
'answer': {
'name': answer.name,
'feature_schema_id': None,
'extra': {}
}
}
classification = ClassificationAnnotation(
value=Radio(answer=answer),
feature_schema_id=feature_schema_id,
name=name)
assert classification.dict() == {
'name': name,
'feature_schema_id': feature_schema_id,
'extra': {},
'value': {
'answer': {
'name': answer.name,
'feature_schema_id': None,
'extra': {}
}
}
}
def test_checklist():
answer = ClassificationAnswer(name="1")
feature_schema_id = "feature_schema_id"
name = "my_feature"
with pytest.raises(ValidationError):
classification = Checklist(answer=answer.name)
with pytest.raises(ValidationError):
classification = Checklist(answer=answer)
classification = Checklist(answer=[answer])
assert classification.dict() == {
'answer': [{
'name': answer.name,
'feature_schema_id': None,
'extra': {}
}]
}
classification = ClassificationAnnotation(
value=Checklist(answer=[answer]),
feature_schema_id=feature_schema_id,
name=name,
)
assert classification.dict() == {
'name': name,
'feature_schema_id': feature_schema_id,
'extra': {},
'value': {
'answer': [{
'name': answer.name,
'feature_schema_id': None,
'extra': {}
}]
},
}
def test_dropdown():
answer = ClassificationAnswer(name="1")
feature_schema_id = "feature_schema_id"
name = "my_feature"
with pytest.raises(ValidationError):
classification = ClassificationAnnotation(
value=Dropdown(answer=answer.name), name="test")
with pytest.raises(ValidationError):
classification = Dropdown(answer=answer)
classification = Dropdown(answer=[answer])
assert classification.dict() == {
'answer': [{
'name': '1',
'feature_schema_id': None,
'extra': {}
}]
}
classification = ClassificationAnnotation(
value=Dropdown(answer=[answer]),
feature_schema_id=feature_schema_id,
name=name)
assert classification.dict() == {
'name': name,
'feature_schema_id': feature_schema_id,
'extra': {},
'value': {
'answer': [{
'name': answer.name,
'feature_schema_id': None,
'extra': {}
}]
}
}
| import pytest
from pydantic import ValidationError
from labelbox.data.annotation_types import (Checklist, ClassificationAnswer,
Dropdown, Radio, Text,
ClassificationAnnotation)
def test_classification_answer():
with pytest.raises(ValidationError):
ClassificationAnswer()
feature_schema_id = "schema_id"
name = "my_feature"
answer = ClassificationAnswer(name=name)
assert answer.feature_schema_id is None
assert answer.name == name
answer = ClassificationAnswer(feature_schema_id=feature_schema_id,
name=name)
assert answer.feature_schema_id == feature_schema_id
assert answer.name == name
def test_classification():
answer = "1234"
classification = ClassificationAnnotation(value=Text(answer=answer),
name="a classification")
assert classification.dict()['value']['answer'] == answer
with pytest.raises(ValidationError):
ClassificationAnnotation()
def test_subclass():
answer = "1234"
feature_schema_id = "11232"
name = "my_feature"
with pytest.raises(ValidationError):
# Should have feature schema info
classification = ClassificationAnnotation(value=Text(answer=answer))
classification = ClassificationAnnotation(value=Text(answer=answer),
name=name)
assert classification.dict() == {
'name': name,
'feature_schema_id': None,
'extra': {},
'value': {
'answer': answer
}
}
classification = ClassificationAnnotation(
value=Text(answer=answer),
name=name,
feature_schema_id=feature_schema_id)
assert classification.dict() == {
'name': None,
'feature_schema_id': feature_schema_id,
'extra': {},
'value': {
'answer': answer
},
'name': name
}
classification = ClassificationAnnotation(
value=Text(answer=answer),
feature_schema_id=feature_schema_id,
name=name)
assert classification.dict() == {
'name': name,
'feature_schema_id': feature_schema_id,
'extra': {},
'value': {
'answer': answer
}
}
def test_radio():
answer = ClassificationAnswer(name="1")
feature_schema_id = "feature_schema_id"
name = "my_feature"
with pytest.raises(ValidationError):
classification = ClassificationAnnotation(value=Radio(
answer=answer.name))
with pytest.raises(ValidationError):
classification = Radio(answer=[answer])
classification = Radio(answer=answer)
assert classification.dict() == {
'answer': {
'name': answer.name,
'feature_schema_id': None,
'extra': {}
}
}
classification = ClassificationAnnotation(
value=Radio(answer=answer),
feature_schema_id=feature_schema_id,
name=name)
assert classification.dict() == {
'name': name,
'feature_schema_id': feature_schema_id,
'extra': {},
'value': {
'answer': {
'name': answer.name,
'feature_schema_id': None,
'extra': {}
}
}
}
def test_checklist():
answer = ClassificationAnswer(name="1")
feature_schema_id = "feature_schema_id"
name = "my_feature"
with pytest.raises(ValidationError):
classification = Checklist(answer=answer.name)
with pytest.raises(ValidationError):
classification = Checklist(answer=answer)
classification = Checklist(answer=[answer])
assert classification.dict() == {
'answer': [{
'name': answer.name,
'feature_schema_id': None,
'extra': {}
}]
}
classification = ClassificationAnnotation(
value=Checklist(answer=[answer]),
feature_schema_id=feature_schema_id,
name=name,
)
assert classification.dict() == {
'name': name,
'feature_schema_id': feature_schema_id,
'extra': {},
'value': {
'answer': [{
'name': answer.name,
'feature_schema_id': None,
'extra': {}
}]
},
}
def test_dropdown():
answer = ClassificationAnswer(name="1")
feature_schema_id = "feature_schema_id"
name = "my_feature"
with pytest.raises(ValidationError):
classification = ClassificationAnnotation(
value=Dropdown(answer=answer.name), name="test")
with pytest.raises(ValidationError):
classification = Dropdown(answer=answer)
classification = Dropdown(answer=[answer])
assert classification.dict() == {
'answer': [{
'name': '1',
'feature_schema_id': None,
'extra': {}
}]
}
classification = ClassificationAnnotation(
value=Dropdown(answer=[answer]),
feature_schema_id=feature_schema_id,
name=name)
assert classification.dict() == {
'name': name,
'feature_schema_id': feature_schema_id,
'extra': {},
'value': {
'answer': [{
'name': answer.name,
'feature_schema_id': None,
'extra': {}
}]
}
}
| en | 0.836949 | # Should have feature schema info | 2.640284 | 3 |
encoder_ui/app.py | hidnoiz/encoder_ui | 0 | 6623479 | <reponame>hidnoiz/encoder_ui
from flask import Flask
from werkzeug.debug import DebuggedApplication
from celery import Celery
from encoder_ui.api import blueprint_api
from encoder_ui.web import blueprint_web
CELERY_TASK_LIST = [
'encoder_ui.celery.discovery.tasks'
]
def create_celery_app(app=None):
app = app or create_app()
celery = Celery(app.import_name, broker=app.config['CELERY_BROKER_URL'],
include=CELERY_TASK_LIST)
celery.conf.update(app.config)
return celery
def create_app(settings_override=None):
"""
Create a Flask application using the app factory pattern.
:param settings_override: Override settings
:return: Flask app
"""
app = Flask(__name__, static_folder='web/static', template_folder='web/templates', static_url_path='')
app.config.from_object('config.settings')
if settings_override:
app.config.update(settings_override)
if app.debug:
app.wsgi_app = DebuggedApplication(app.wsgi_app, evalex=True)
app.register_blueprint(blueprint_api)
app.register_blueprint(blueprint_web)
return app
| from flask import Flask
from werkzeug.debug import DebuggedApplication
from celery import Celery
from encoder_ui.api import blueprint_api
from encoder_ui.web import blueprint_web
CELERY_TASK_LIST = [
'encoder_ui.celery.discovery.tasks'
]
def create_celery_app(app=None):
app = app or create_app()
celery = Celery(app.import_name, broker=app.config['CELERY_BROKER_URL'],
include=CELERY_TASK_LIST)
celery.conf.update(app.config)
return celery
def create_app(settings_override=None):
"""
Create a Flask application using the app factory pattern.
:param settings_override: Override settings
:return: Flask app
"""
app = Flask(__name__, static_folder='web/static', template_folder='web/templates', static_url_path='')
app.config.from_object('config.settings')
if settings_override:
app.config.update(settings_override)
if app.debug:
app.wsgi_app = DebuggedApplication(app.wsgi_app, evalex=True)
app.register_blueprint(blueprint_api)
app.register_blueprint(blueprint_web)
return app | en | 0.583428 | Create a Flask application using the app factory pattern. :param settings_override: Override settings :return: Flask app | 2.232972 | 2 |
dev/torch/nn/functional/linear.py | Jintao-Huang/torch_study | 4 | 6623480 | <reponame>Jintao-Huang/torch_study<filename>dev/torch/nn/functional/linear.py
# Author: <NAME>
# Email: <EMAIL>
# Date:
import torch
from torch import Tensor
"""
- 矩阵乘时间复杂度: e.g. [A, B] @ [B, C]. Ot(ABC)
- linear时间复杂度: Ot(N*In*Out)
"""
def linear(input: Tensor, weight: Tensor, bias: Tensor = None) -> Tensor:
"""
:param input: shape[N, In]
:param weight: shape[Out, In]
:param bias: shape[Out]
:return: shape[N, Out]"""
x = input
#
y = x @ weight.T # Ot(N*In*Out)
if bias is not None:
y += bias # Ot(N*Out)
return y
| # Author: <NAME>
# Email: <EMAIL>
# Date:
import torch
from torch import Tensor
"""
- 矩阵乘时间复杂度: e.g. [A, B] @ [B, C]. Ot(ABC)
- linear时间复杂度: Ot(N*In*Out)
"""
def linear(input: Tensor, weight: Tensor, bias: Tensor = None) -> Tensor:
"""
:param input: shape[N, In]
:param weight: shape[Out, In]
:param bias: shape[Out]
:return: shape[N, Out]"""
x = input
#
y = x @ weight.T # Ot(N*In*Out)
if bias is not None:
y += bias # Ot(N*Out)
return y | en | 0.440463 | # Author: <NAME> # Email: <EMAIL> # Date: - 矩阵乘时间复杂度: e.g. [A, B] @ [B, C]. Ot(ABC) - linear时间复杂度: Ot(N*In*Out) :param input: shape[N, In] :param weight: shape[Out, In] :param bias: shape[Out] :return: shape[N, Out] # # Ot(N*In*Out) # Ot(N*Out) | 2.972944 | 3 |
wine_informatics/knn_algorithm/knn_algorithm.py | Williano/Data-Mining | 0 | 6623481 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, cross_val_score, \
GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import classification_report, confusion_matrix, \
accuracy_score
from sklearn.utils import shuffle
def load_dataset(wine_dataset_csv):
wine_dataframe = pd.read_csv(wine_dataset_csv, index_col=False)
return wine_dataframe
def analyse_dataset(processed_data):
data = processed_data
print(data.head())
def knn_algorithm_with_holdout_validation(wine_dataset, k_value):
shuffled_data = wine_dataset.sample(frac=1,
random_state=42).reset_index(
drop=True)
label = shuffled_data["Class"].values
dataset = shuffled_data.iloc[:, : 486].values
# dataset = shuffled_data.iloc[:, : 485].values
# dataset = shuffled_data.iloc[:, : 482].values
X_train, X_test, y_train, y_test = train_test_split(dataset, label,
test_size=0.20,
stratify=label)
classifier = KNeighborsClassifier(n_neighbors=k_value, algorithm='auto',
metric="jaccard")
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
accuracy = accuracy_score(y_test, y_pred) * 100
print(f"Accuracy for {k_value} is:")
print(accuracy)
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
print("********************************************")
# error = []
# # Calculating error for K values between 1 and 40
# for i in range(1, 100):
# knn = KNeighborsClassifier(n_neighbors=i)
# knn.fit(X_train, y_train)
# pred_i = knn.predict(X_test)
# error.append(np.mean(pred_i != y_test))
# plt.figure(figsize=(12, 6))
# plt.plot(range(1, 100), error, color='red', linestyle='dashed', marker='o',
# markerfacecolor='blue', markersize=10)
# plt.title('Error Rate K Value')
# plt.xlabel('K Value')
# plt.ylabel('Mean Error')
# plt.show()
def knn_algorithm_with_k_fold_validation(wine_dataset, k_value):
shuffled_data = wine_dataset.sample(frac=1,
random_state=42).reset_index(
drop=True)
# Extract features and label
label = shuffled_data["Class"].values
# dataset = shuffled_data.iloc[:, : 486].values
# dataset = shuffled_data.iloc[:, : 485].values
dataset = shuffled_data.iloc[:, : 482].values
# Create classifier
knn_classifier = KNeighborsClassifier(n_neighbors=k_value,
algorithm='auto',
metric="jaccard")
# Train model with 10 fold cross validation
cross_validation_scores = cross_val_score(knn_classifier, dataset,
label, cv=10)
print(f"The cross validation for {k_value} is: ")
print(cross_validation_scores)
print()
print("Cross validation scores mean: {}%".format(np.mean(
cross_validation_scores
) * 100))
print("***************************************************************")
def knn_algorithm_with_hypertuning(wine_dataset):
shuffled_data = wine_dataset.sample(frac=1,
random_state=42).reset_index(
drop=True)
# Extract features and label
label = shuffled_data["Class"].values
dataset = shuffled_data.iloc[:, : 486].values
# Create classifier
knn_classifier = KNeighborsClassifier(metric="jaccard")
# create a dictionary of all values we want to test for n_neighbors
param_grid = {"n_neighbors": np.arange(1, 100)}
# use gridsearch to test all values for n_neighbors
knn_gscv = GridSearchCV(knn_classifier, param_grid, cv=10)
# fit model to data
knn_gscv.fit(dataset, label)
# check top performing n_neighbors value
best_parameters = knn_gscv.best_params_
# check mean score for the top performing value of n_neighbors
best_score = knn_gscv.best_score_ * 100
print(best_parameters)
print()
print(best_score)
def main():
# wine_dataset_file = "drink_and_hold_dataset.csv"
# tweaked_wine_dataset_file = \
# "drink_and_hold_dataset_with_finish_attribute_deleted.csv"
tweaked_wine_dataset_file = \
"drink_and_hold_dataset_with_4_attributes_above_35_percent_deleted.csv"
processed_data_file = load_dataset(tweaked_wine_dataset_file)
# processed_data_file = load_dataset(wine_dataset_file)
# analyse_dataset(processed_data_file)
# knn_algorithm_with_holdout_validation(processed_data_file)
# knn_algorithm_with_k_fold_validation(processed_data_file)
# knn_algorithm_with_hypertuning(processed_data_file)
k_values = [20, 30, 35, 40, 45, 50, 55, 60, 65, 70, 77, 85, 95, 100]
for k in k_values:
knn_algorithm_with_k_fold_validation(processed_data_file, k)
if __name__ == "__main__":
main()
| import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, cross_val_score, \
GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import classification_report, confusion_matrix, \
accuracy_score
from sklearn.utils import shuffle
def load_dataset(wine_dataset_csv):
wine_dataframe = pd.read_csv(wine_dataset_csv, index_col=False)
return wine_dataframe
def analyse_dataset(processed_data):
data = processed_data
print(data.head())
def knn_algorithm_with_holdout_validation(wine_dataset, k_value):
shuffled_data = wine_dataset.sample(frac=1,
random_state=42).reset_index(
drop=True)
label = shuffled_data["Class"].values
dataset = shuffled_data.iloc[:, : 486].values
# dataset = shuffled_data.iloc[:, : 485].values
# dataset = shuffled_data.iloc[:, : 482].values
X_train, X_test, y_train, y_test = train_test_split(dataset, label,
test_size=0.20,
stratify=label)
classifier = KNeighborsClassifier(n_neighbors=k_value, algorithm='auto',
metric="jaccard")
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
accuracy = accuracy_score(y_test, y_pred) * 100
print(f"Accuracy for {k_value} is:")
print(accuracy)
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
print("********************************************")
# error = []
# # Calculating error for K values between 1 and 40
# for i in range(1, 100):
# knn = KNeighborsClassifier(n_neighbors=i)
# knn.fit(X_train, y_train)
# pred_i = knn.predict(X_test)
# error.append(np.mean(pred_i != y_test))
# plt.figure(figsize=(12, 6))
# plt.plot(range(1, 100), error, color='red', linestyle='dashed', marker='o',
# markerfacecolor='blue', markersize=10)
# plt.title('Error Rate K Value')
# plt.xlabel('K Value')
# plt.ylabel('Mean Error')
# plt.show()
def knn_algorithm_with_k_fold_validation(wine_dataset, k_value):
shuffled_data = wine_dataset.sample(frac=1,
random_state=42).reset_index(
drop=True)
# Extract features and label
label = shuffled_data["Class"].values
# dataset = shuffled_data.iloc[:, : 486].values
# dataset = shuffled_data.iloc[:, : 485].values
dataset = shuffled_data.iloc[:, : 482].values
# Create classifier
knn_classifier = KNeighborsClassifier(n_neighbors=k_value,
algorithm='auto',
metric="jaccard")
# Train model with 10 fold cross validation
cross_validation_scores = cross_val_score(knn_classifier, dataset,
label, cv=10)
print(f"The cross validation for {k_value} is: ")
print(cross_validation_scores)
print()
print("Cross validation scores mean: {}%".format(np.mean(
cross_validation_scores
) * 100))
print("***************************************************************")
def knn_algorithm_with_hypertuning(wine_dataset):
shuffled_data = wine_dataset.sample(frac=1,
random_state=42).reset_index(
drop=True)
# Extract features and label
label = shuffled_data["Class"].values
dataset = shuffled_data.iloc[:, : 486].values
# Create classifier
knn_classifier = KNeighborsClassifier(metric="jaccard")
# create a dictionary of all values we want to test for n_neighbors
param_grid = {"n_neighbors": np.arange(1, 100)}
# use gridsearch to test all values for n_neighbors
knn_gscv = GridSearchCV(knn_classifier, param_grid, cv=10)
# fit model to data
knn_gscv.fit(dataset, label)
# check top performing n_neighbors value
best_parameters = knn_gscv.best_params_
# check mean score for the top performing value of n_neighbors
best_score = knn_gscv.best_score_ * 100
print(best_parameters)
print()
print(best_score)
def main():
# wine_dataset_file = "drink_and_hold_dataset.csv"
# tweaked_wine_dataset_file = \
# "drink_and_hold_dataset_with_finish_attribute_deleted.csv"
tweaked_wine_dataset_file = \
"drink_and_hold_dataset_with_4_attributes_above_35_percent_deleted.csv"
processed_data_file = load_dataset(tweaked_wine_dataset_file)
# processed_data_file = load_dataset(wine_dataset_file)
# analyse_dataset(processed_data_file)
# knn_algorithm_with_holdout_validation(processed_data_file)
# knn_algorithm_with_k_fold_validation(processed_data_file)
# knn_algorithm_with_hypertuning(processed_data_file)
k_values = [20, 30, 35, 40, 45, 50, 55, 60, 65, 70, 77, 85, 95, 100]
for k in k_values:
knn_algorithm_with_k_fold_validation(processed_data_file, k)
if __name__ == "__main__":
main()
| en | 0.352801 | # dataset = shuffled_data.iloc[:, : 485].values # dataset = shuffled_data.iloc[:, : 482].values # error = [] # # Calculating error for K values between 1 and 40 # for i in range(1, 100): # knn = KNeighborsClassifier(n_neighbors=i) # knn.fit(X_train, y_train) # pred_i = knn.predict(X_test) # error.append(np.mean(pred_i != y_test)) # plt.figure(figsize=(12, 6)) # plt.plot(range(1, 100), error, color='red', linestyle='dashed', marker='o', # markerfacecolor='blue', markersize=10) # plt.title('Error Rate K Value') # plt.xlabel('K Value') # plt.ylabel('Mean Error') # plt.show() # Extract features and label # dataset = shuffled_data.iloc[:, : 486].values # dataset = shuffled_data.iloc[:, : 485].values # Create classifier # Train model with 10 fold cross validation # Extract features and label # Create classifier # create a dictionary of all values we want to test for n_neighbors # use gridsearch to test all values for n_neighbors # fit model to data # check top performing n_neighbors value # check mean score for the top performing value of n_neighbors # wine_dataset_file = "drink_and_hold_dataset.csv" # tweaked_wine_dataset_file = \ # "drink_and_hold_dataset_with_finish_attribute_deleted.csv" # processed_data_file = load_dataset(wine_dataset_file) # analyse_dataset(processed_data_file) # knn_algorithm_with_holdout_validation(processed_data_file) # knn_algorithm_with_k_fold_validation(processed_data_file) # knn_algorithm_with_hypertuning(processed_data_file) | 3.118694 | 3 |
match.py | mbardoeChoate/FRC-Monkey-Poll | 0 | 6623482 | <filename>match.py
import random
from math import exp
import constants
class Match(object):
def __init__(self, A_teams, B_teams, score):
self.A_teams=A_teams # a list of A teams
self.B_teams=B_teams # a list of B teams
self.score=score # the score with A teams first and B teams second
def get_team(self, team):
if team in self.A_teams:
teams=self.B_teams
else:
teams=self.A_teams
# Choose the team to go with
my_weights = []
for team in teams:
my_weights.append(exp(constants.SCALE_POINT_DIFF * team.point_diff))
choice=random.choices([0,1,2], weights=my_weights)[0]
return teams[choice]
| <filename>match.py
import random
from math import exp
import constants
class Match(object):
def __init__(self, A_teams, B_teams, score):
self.A_teams=A_teams # a list of A teams
self.B_teams=B_teams # a list of B teams
self.score=score # the score with A teams first and B teams second
def get_team(self, team):
if team in self.A_teams:
teams=self.B_teams
else:
teams=self.A_teams
# Choose the team to go with
my_weights = []
for team in teams:
my_weights.append(exp(constants.SCALE_POINT_DIFF * team.point_diff))
choice=random.choices([0,1,2], weights=my_weights)[0]
return teams[choice]
| en | 0.923385 | # a list of A teams # a list of B teams # the score with A teams first and B teams second # Choose the team to go with | 3.215963 | 3 |
src/lib/email/__init__.py | timmartin/skulpt | 10 | 6623483 | raise NotImplementedError("email is not yet implemented in Skulpt")
| raise NotImplementedError("email is not yet implemented in Skulpt")
| none | 1 | 1.158674 | 1 | |
tests/cameraUp.py | mshafiei/cvutils | 2 | 6623484 | #This script renders input data for Deep Reflectance Volume
import cvgutils.Mitsuba2XML as mts
import cvgutils.Image as im
import cvgutils.Linalg as lin
import cvgutils.Dir as dr
import cvgutils.Utils as util
import cv2
import numpy as np
import torch
import os
from scipy.interpolate import interp1d
def randomPathSphere(x):
"""[Given parameers of a 1d path returns phi, theta on that path]
Args:
x ([ndarray]): [path parameters]
Returns:
[tuple]: [points on the path on a sphere]
"""
u,v = np.random.rand(2,len(x))
return lin.uv2ptUniform(u,v)
def randomBicubicPathSphere(x,r=1,nods=5):
"""[Given parameers of a 1d path returns phi, theta on that path]
Args:
x ([ndarray]): [path parameters]
r (float, optional): [Raidus of the sphere]. Defaults to 1.
nods (int, optional): [Number of nods for spline]. Defaults to 3.
Returns:
[tuple]: [points on the path on a sphere]
"""
x0, y0 = np.random.rand(2,nods)
f = interp1d(x0, y0, kind='cubic')
pathRange = (x0.max() - x0.min())
newRange = (x.max() - x.min())
u = x / newRange * pathRange + x0.min()
v = np.clip(f(u),0,1)
return lin.uv2ptUniform(u,v)
def dumpCameraInfo(trajectory,shape,maskShape, fov, camLookAt,camUp, outdir,nsamples):
ps, ts = trajectory
images = []
for i, (t, p) in enumerate(zip(ts.reshape(-1),ps.reshape(-1))):
x,y,z = lin.pt2xyz(p,t,r)
xl,yl,zl = [x,y,z]
near = (x**2 + y**2 +z**2) ** 0.5 - 1.0
far = (x**2 + y**2 +z**2) ** 0.5 + 1.0
light = mts.pointlight([xl,yl,zl],intensity)
ext = lin.lookAt(torch.Tensor([[x,y,z]]),camLookAt[None,...],camUp[None,...])
camera = mts.camera([x,y,z],camLookAt,camUp,fov,ext=ext,near=near,far=far,w=w,h=h,nsamples=nsamples)
scene = mts.generateScene(shape,light,camera)
img = mts.renderScene(scene)
images.append(img)
images = np.stack(images,axis=0)
im.imageseq2avi('renderout/tst.avi',images.transpose(0,3,1,2),10)
if __name__ == "__main__":
#TODO: create math module
#TODO: create LatLong related modules
outdir = '/home/mohammad/Projects/NRV/dataset/simple/trainData'
outdirTest ='/home/mohammad/Projects/NRV/dataset/simple/testData'
outfmt = '%04d-%04d.png'
outfmtmask = 'mask-%04d-%04d.png'
texturefn = 'cvgutils/tests/testimages/5x5pattern.png'
objfn = 'cvgutils/tests/testobjs/z.obj'
dr.createIfNExist(outdir)
dr.createIfNExist(outdirTest)
center = [0,0,0]
intensity = [1.0,1.0,1.0]
nsamples = 15
radius = 0.8
diffuseReflectanceMask = [1.0,1.0,1.0]
specularReflectance = [1.0,1.0,1.0]
diffuseReflectance = texturefn
dxs = torch.Tensor([0.0,1.0,0.0]) * 0.2
camOrig = torch.Tensor([1.0,0.0,0.0])
# camLookAt = torch.Tensor([0.0,0.0,0.0])
camLookAt = torch.Tensor([0,0,0])
camUp = torch.Tensor([0.0001,0.0,1.000])
ntheta = 10
nphi = 10
nthetal = 15
nphil = 15
r = 1.8
rl = 1.8
intior = 1.0
extior = 1.000277
k = 0
alpha = 0.0
fov = 60.0
w = 64
h = 64
x = np.linspace(0,1,ntheta*nphi)
u,v = np.linspace(0,1,15), np.linspace(0.1,0.9,15)
ps,ts = lin.uv2ptUniform(u,v)
ps,ts = np.meshgrid(ps,ts)
# ps, ts = randomPathSphere(x)
trainTraj = [ps,ts]
material = mts.diffuse(diffuseReflectanceMask)
maskShape = mts.sphere(center, radius,material)
material = mts.diffuse(diffuseReflectance)
shape = mts.sphere(center, radius,material)
dumpCameraInfo(trainTraj,shape,maskShape, fov, camLookAt,camUp, outdir,nsamples)
| #This script renders input data for Deep Reflectance Volume
import cvgutils.Mitsuba2XML as mts
import cvgutils.Image as im
import cvgutils.Linalg as lin
import cvgutils.Dir as dr
import cvgutils.Utils as util
import cv2
import numpy as np
import torch
import os
from scipy.interpolate import interp1d
def randomPathSphere(x):
"""[Given parameers of a 1d path returns phi, theta on that path]
Args:
x ([ndarray]): [path parameters]
Returns:
[tuple]: [points on the path on a sphere]
"""
u,v = np.random.rand(2,len(x))
return lin.uv2ptUniform(u,v)
def randomBicubicPathSphere(x,r=1,nods=5):
"""[Given parameers of a 1d path returns phi, theta on that path]
Args:
x ([ndarray]): [path parameters]
r (float, optional): [Raidus of the sphere]. Defaults to 1.
nods (int, optional): [Number of nods for spline]. Defaults to 3.
Returns:
[tuple]: [points on the path on a sphere]
"""
x0, y0 = np.random.rand(2,nods)
f = interp1d(x0, y0, kind='cubic')
pathRange = (x0.max() - x0.min())
newRange = (x.max() - x.min())
u = x / newRange * pathRange + x0.min()
v = np.clip(f(u),0,1)
return lin.uv2ptUniform(u,v)
def dumpCameraInfo(trajectory,shape,maskShape, fov, camLookAt,camUp, outdir,nsamples):
ps, ts = trajectory
images = []
for i, (t, p) in enumerate(zip(ts.reshape(-1),ps.reshape(-1))):
x,y,z = lin.pt2xyz(p,t,r)
xl,yl,zl = [x,y,z]
near = (x**2 + y**2 +z**2) ** 0.5 - 1.0
far = (x**2 + y**2 +z**2) ** 0.5 + 1.0
light = mts.pointlight([xl,yl,zl],intensity)
ext = lin.lookAt(torch.Tensor([[x,y,z]]),camLookAt[None,...],camUp[None,...])
camera = mts.camera([x,y,z],camLookAt,camUp,fov,ext=ext,near=near,far=far,w=w,h=h,nsamples=nsamples)
scene = mts.generateScene(shape,light,camera)
img = mts.renderScene(scene)
images.append(img)
images = np.stack(images,axis=0)
im.imageseq2avi('renderout/tst.avi',images.transpose(0,3,1,2),10)
if __name__ == "__main__":
#TODO: create math module
#TODO: create LatLong related modules
outdir = '/home/mohammad/Projects/NRV/dataset/simple/trainData'
outdirTest ='/home/mohammad/Projects/NRV/dataset/simple/testData'
outfmt = '%04d-%04d.png'
outfmtmask = 'mask-%04d-%04d.png'
texturefn = 'cvgutils/tests/testimages/5x5pattern.png'
objfn = 'cvgutils/tests/testobjs/z.obj'
dr.createIfNExist(outdir)
dr.createIfNExist(outdirTest)
center = [0,0,0]
intensity = [1.0,1.0,1.0]
nsamples = 15
radius = 0.8
diffuseReflectanceMask = [1.0,1.0,1.0]
specularReflectance = [1.0,1.0,1.0]
diffuseReflectance = texturefn
dxs = torch.Tensor([0.0,1.0,0.0]) * 0.2
camOrig = torch.Tensor([1.0,0.0,0.0])
# camLookAt = torch.Tensor([0.0,0.0,0.0])
camLookAt = torch.Tensor([0,0,0])
camUp = torch.Tensor([0.0001,0.0,1.000])
ntheta = 10
nphi = 10
nthetal = 15
nphil = 15
r = 1.8
rl = 1.8
intior = 1.0
extior = 1.000277
k = 0
alpha = 0.0
fov = 60.0
w = 64
h = 64
x = np.linspace(0,1,ntheta*nphi)
u,v = np.linspace(0,1,15), np.linspace(0.1,0.9,15)
ps,ts = lin.uv2ptUniform(u,v)
ps,ts = np.meshgrid(ps,ts)
# ps, ts = randomPathSphere(x)
trainTraj = [ps,ts]
material = mts.diffuse(diffuseReflectanceMask)
maskShape = mts.sphere(center, radius,material)
material = mts.diffuse(diffuseReflectance)
shape = mts.sphere(center, radius,material)
dumpCameraInfo(trainTraj,shape,maskShape, fov, camLookAt,camUp, outdir,nsamples)
| en | 0.607781 | #This script renders input data for Deep Reflectance Volume [Given parameers of a 1d path returns phi, theta on that path] Args: x ([ndarray]): [path parameters] Returns: [tuple]: [points on the path on a sphere] [Given parameers of a 1d path returns phi, theta on that path] Args: x ([ndarray]): [path parameters] r (float, optional): [Raidus of the sphere]. Defaults to 1. nods (int, optional): [Number of nods for spline]. Defaults to 3. Returns: [tuple]: [points on the path on a sphere] #TODO: create math module #TODO: create LatLong related modules # camLookAt = torch.Tensor([0.0,0.0,0.0]) # ps, ts = randomPathSphere(x) | 2.31959 | 2 |
utils/formatters.py | bijij/yert | 0 | 6623485 | """
MIT License
Copyright (c) 2020 - µYert
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from datetime import datetime
from math import ceil
from random import random, uniform
from typing import Any, Iterator, Tuple, Optional, Union
from discord import Colour, Embed
def random_colour() -> Colour:
"""Returns a random pastel colour"""
return Colour.from_hsv(random(), uniform(0.75, 0.95), 1)
def chunker(to_chunk: list, chunk_size: int = 5) -> Iterator:
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(to_chunk), chunk_size):
yield to_chunk[i:i + chunk_size]
def get_index(indexable, index: int, default=None) -> Any:
"""Tries to get an item using it's index, returns the default is not found"""
try:
return indexable[index]
except IndexError:
return default
def fmt(daytee: Union[datetime, int], stringform: Optional[str]):
""" Quick datetime formatter from timestamp or datetime object. """
if isinstance(daytee, int):
daytee = datetime.fromtimestamp(daytee)
stringform = stringform or "%Y %b %d: %H:%M"
return daytee.strftime(stringform)
class BetterEmbed(Embed):
"""Haha yes"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.color = random_colour()
def fill_fields(self) -> 'ColoredEmbed':
"""Fill the remaining fields so they are lined up properly"""
inlines = len(
self.fields[max(i for i, _ in enumerate(self.fields)):]) + 1
for _ in range(ceil(inlines / 3) * 3 - inlines):
self.add_field(name='\u200b', value='\u200b')
return self
# Useless super delegation. Commenting for now.
# def add_field(self, *, name, value, inline=True) -> 'ColoredEmbed':
# """Makes all field names bold, because I decided to"""
# return super().add_field(name=f"**{name}**", value=value, inline=inline)
def add_fields(self, fields: Iterator[Tuple[str, str, bool]]) -> 'ColoredEmbed':
"""Adds all fields at once"""
for field in fields:
self.add_field(name=field[0], value=field[1],
inline=get_index(field, 2, True))
return self
class Flags:
def __init__(self, value):
self.value = value
self.flags = [*self.__iter__()]
def __iter__(self):
for k, v in self.__class__.__dict__.items():
if not isinstance(v, property):
continue
if self.has_flag(getattr(self, k)):
yield k
def __repr__(self):
return f"<{self.__class__.__name__} value={self.value} flags={self.flags}>"
def has_flag(self, v):
return (self.value & v) == v
@property
def discord_employee(self):
return 1 << 0
@property
def discord_partner(self):
return 1 << 1
@property
def hs_events(self):
return 1 << 2
@property
def bug_hunter_lvl1(self):
return 1 << 3
@property
def mfa_sms(self):
return 1 << 4
@property
def premium_promo_dismissed(self):
return 1 << 5
@property
def hs_bravery(self):
return 1 << 6
@property
def hs_brilliance(self):
return 1 << 7
@property
def hs_balance(self):
return 1 << 8
@property
def early_supporter(self):
return 1 << 9
@property
def team_user(self):
return 1 << 10
@property
def system(self):
return 1 << 12
@property
def unread_sys_msg(self):
return 1 << 13
@property
def bug_hunter_lvl2(self):
return 1 << 14
@property
def underage_deleted(self):
return 1 << 15
@property
def verified_bot(self):
return 1 << 16
@property
def verified_dev(self):
return 1 << 17
| """
MIT License
Copyright (c) 2020 - µYert
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from datetime import datetime
from math import ceil
from random import random, uniform
from typing import Any, Iterator, Tuple, Optional, Union
from discord import Colour, Embed
def random_colour() -> Colour:
"""Returns a random pastel colour"""
return Colour.from_hsv(random(), uniform(0.75, 0.95), 1)
def chunker(to_chunk: list, chunk_size: int = 5) -> Iterator:
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(to_chunk), chunk_size):
yield to_chunk[i:i + chunk_size]
def get_index(indexable, index: int, default=None) -> Any:
"""Tries to get an item using it's index, returns the default is not found"""
try:
return indexable[index]
except IndexError:
return default
def fmt(daytee: Union[datetime, int], stringform: Optional[str]):
""" Quick datetime formatter from timestamp or datetime object. """
if isinstance(daytee, int):
daytee = datetime.fromtimestamp(daytee)
stringform = stringform or "%Y %b %d: %H:%M"
return daytee.strftime(stringform)
class BetterEmbed(Embed):
"""Haha yes"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.color = random_colour()
def fill_fields(self) -> 'ColoredEmbed':
"""Fill the remaining fields so they are lined up properly"""
inlines = len(
self.fields[max(i for i, _ in enumerate(self.fields)):]) + 1
for _ in range(ceil(inlines / 3) * 3 - inlines):
self.add_field(name='\u200b', value='\u200b')
return self
# Useless super delegation. Commenting for now.
# def add_field(self, *, name, value, inline=True) -> 'ColoredEmbed':
# """Makes all field names bold, because I decided to"""
# return super().add_field(name=f"**{name}**", value=value, inline=inline)
def add_fields(self, fields: Iterator[Tuple[str, str, bool]]) -> 'ColoredEmbed':
"""Adds all fields at once"""
for field in fields:
self.add_field(name=field[0], value=field[1],
inline=get_index(field, 2, True))
return self
class Flags:
def __init__(self, value):
self.value = value
self.flags = [*self.__iter__()]
def __iter__(self):
for k, v in self.__class__.__dict__.items():
if not isinstance(v, property):
continue
if self.has_flag(getattr(self, k)):
yield k
def __repr__(self):
return f"<{self.__class__.__name__} value={self.value} flags={self.flags}>"
def has_flag(self, v):
return (self.value & v) == v
@property
def discord_employee(self):
return 1 << 0
@property
def discord_partner(self):
return 1 << 1
@property
def hs_events(self):
return 1 << 2
@property
def bug_hunter_lvl1(self):
return 1 << 3
@property
def mfa_sms(self):
return 1 << 4
@property
def premium_promo_dismissed(self):
return 1 << 5
@property
def hs_bravery(self):
return 1 << 6
@property
def hs_brilliance(self):
return 1 << 7
@property
def hs_balance(self):
return 1 << 8
@property
def early_supporter(self):
return 1 << 9
@property
def team_user(self):
return 1 << 10
@property
def system(self):
return 1 << 12
@property
def unread_sys_msg(self):
return 1 << 13
@property
def bug_hunter_lvl2(self):
return 1 << 14
@property
def underage_deleted(self):
return 1 << 15
@property
def verified_bot(self):
return 1 << 16
@property
def verified_dev(self):
return 1 << 17
| en | 0.738834 | MIT License Copyright (c) 2020 - µYert Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Returns a random pastel colour Yield successive n-sized chunks from lst. Tries to get an item using it's index, returns the default is not found Quick datetime formatter from timestamp or datetime object. Haha yes Fill the remaining fields so they are lined up properly # Useless super delegation. Commenting for now. # def add_field(self, *, name, value, inline=True) -> 'ColoredEmbed': # """Makes all field names bold, because I decided to""" # return super().add_field(name=f"**{name}**", value=value, inline=inline) Adds all fields at once | 2.047321 | 2 |
imager.py | johnnyc2/FOS_View | 2 | 6623486 | <reponame>johnnyc2/FOS_View<gh_stars>1-10
#!/usr/bin/env python
import argparse
from fosfile import Vault
from PIL import Image, ImageDraw
def getCoordinates(col, row, width, config):
x1 = col * (config['roomWidth'] + config['roomSpaceX']) + config['roomOffsetX']
x2 = x1 + width * (config['roomWidth'] + config['roomSpaceX']) - config['roomSpaceX']
y1 = row * (config['roomHeight'] + config['roomSpaceY']) + config['roomOffsetY']
y2 = y1 + config['roomHeight']
return ((x1, y1), (x2, y2))
def enlargeRect(rect, point):
if point[0] < rect[0]:
rect[0] = point[0]
if point[0] > rect[2]:
rect[2] = point[0]
if point[1] < rect[1]:
rect[1] = point[1]
if point[1] > rect[3]:
rect[3] = point[1]
def main(config):
vault = Vault(config['input'])
rect = [0, 0, 0, 0]
for room in vault.vault.rooms:
pos = getCoordinates(room.col, room.row, room.getRoomWidth(), config)
enlargeRect(rect, pos[0])
enlargeRect(rect, pos[1])
for rock in vault.vault.rocks:
pos = getCoordinates(rock.c, rock.r, 2, config)
enlargeRect(rect, pos[0])
enlargeRect(rect, pos[1])
img = Image.new('RGB', (rect[2], rect[3]))
drawer = ImageDraw.Draw(img)
for room in vault.vault.rooms:
pos = getCoordinates(room.col, room.row, room.getRoomWidth(), config)
drawer.rectangle(pos, fill='red', outline='white')
for rock in vault.vault.rocks:
pos = getCoordinates(rock.c, rock.r, 2, config)
drawer.ellipse(pos, outline = 'white', fill='gray')
img.save(config['output'], 'PNG')
def parseCli():
parser = argparse.ArgumentParser(description = 'Produce a picture showing the rooms layout')
parser.add_argument('--input', type=argparse.FileType('rb'), required=True, help='Path to the vault file')
parser.add_argument('--output', type=argparse.FileType('wb'), default='output.png', help='Path for the output PNG file')
parser.add_argument('--roomWidth', type=int, default=30, help='Width of an elevator, in pixel (=1/3 or a room)')
parser.add_argument('--roomHeight', type=int, default=60, help='Height of a room, in pixel')
parser.add_argument('--roomSpaceX', type=int, default=3, help='Horizontal spacing between rooms')
parser.add_argument('--roomSpaceY', type=int, default=3, help='Vertical spacing between rooms')
parser.add_argument('--roomOffsetX', type=int, default=0, help='X Offset to start putting the rooms on the output')
parser.add_argument('--roomOffsetY', type=int, default=0, help='Y Offset to start putting the rooms on the output')
return vars(parser.parse_args())
if __name__ == '__main__':
main(parseCli())
| #!/usr/bin/env python
import argparse
from fosfile import Vault
from PIL import Image, ImageDraw
def getCoordinates(col, row, width, config):
x1 = col * (config['roomWidth'] + config['roomSpaceX']) + config['roomOffsetX']
x2 = x1 + width * (config['roomWidth'] + config['roomSpaceX']) - config['roomSpaceX']
y1 = row * (config['roomHeight'] + config['roomSpaceY']) + config['roomOffsetY']
y2 = y1 + config['roomHeight']
return ((x1, y1), (x2, y2))
def enlargeRect(rect, point):
if point[0] < rect[0]:
rect[0] = point[0]
if point[0] > rect[2]:
rect[2] = point[0]
if point[1] < rect[1]:
rect[1] = point[1]
if point[1] > rect[3]:
rect[3] = point[1]
def main(config):
vault = Vault(config['input'])
rect = [0, 0, 0, 0]
for room in vault.vault.rooms:
pos = getCoordinates(room.col, room.row, room.getRoomWidth(), config)
enlargeRect(rect, pos[0])
enlargeRect(rect, pos[1])
for rock in vault.vault.rocks:
pos = getCoordinates(rock.c, rock.r, 2, config)
enlargeRect(rect, pos[0])
enlargeRect(rect, pos[1])
img = Image.new('RGB', (rect[2], rect[3]))
drawer = ImageDraw.Draw(img)
for room in vault.vault.rooms:
pos = getCoordinates(room.col, room.row, room.getRoomWidth(), config)
drawer.rectangle(pos, fill='red', outline='white')
for rock in vault.vault.rocks:
pos = getCoordinates(rock.c, rock.r, 2, config)
drawer.ellipse(pos, outline = 'white', fill='gray')
img.save(config['output'], 'PNG')
def parseCli():
parser = argparse.ArgumentParser(description = 'Produce a picture showing the rooms layout')
parser.add_argument('--input', type=argparse.FileType('rb'), required=True, help='Path to the vault file')
parser.add_argument('--output', type=argparse.FileType('wb'), default='output.png', help='Path for the output PNG file')
parser.add_argument('--roomWidth', type=int, default=30, help='Width of an elevator, in pixel (=1/3 or a room)')
parser.add_argument('--roomHeight', type=int, default=60, help='Height of a room, in pixel')
parser.add_argument('--roomSpaceX', type=int, default=3, help='Horizontal spacing between rooms')
parser.add_argument('--roomSpaceY', type=int, default=3, help='Vertical spacing between rooms')
parser.add_argument('--roomOffsetX', type=int, default=0, help='X Offset to start putting the rooms on the output')
parser.add_argument('--roomOffsetY', type=int, default=0, help='Y Offset to start putting the rooms on the output')
return vars(parser.parse_args())
if __name__ == '__main__':
main(parseCli()) | ru | 0.26433 | #!/usr/bin/env python | 3.046979 | 3 |
Problems/Binary_Tree/vertical_sum.py | MayaScarlet/python-dsa | 0 | 6623487 | <filename>Problems/Binary_Tree/vertical_sum.py<gh_stars>0
"""
Given a binary tree, the print vertical sum of it.
Assume the left and right child of a node makes a 45–degree angle with the parent.
"""
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
def vertical_sum(root):
d = {}
res = []
def helper(node, dist, values):
if not node:
return
values[dist] = values.get(dist, 0) + node.val
helper(node.left, dist - 1, values)
helper(node.right, dist + 1, values)
helper(root, 0, d)
for key in sorted(d.keys()):
res.append(d.get(key))
return res
def main():
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(3)
root.right.left = TreeNode(5)
root.right.right = TreeNode(6)
root.right.left.left = TreeNode(7)
root.right.left.right = TreeNode(8)
print(vertical_sum(root))
if __name__ == '__main__':
main()
| <filename>Problems/Binary_Tree/vertical_sum.py<gh_stars>0
"""
Given a binary tree, the print vertical sum of it.
Assume the left and right child of a node makes a 45–degree angle with the parent.
"""
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
def vertical_sum(root):
d = {}
res = []
def helper(node, dist, values):
if not node:
return
values[dist] = values.get(dist, 0) + node.val
helper(node.left, dist - 1, values)
helper(node.right, dist + 1, values)
helper(root, 0, d)
for key in sorted(d.keys()):
res.append(d.get(key))
return res
def main():
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(3)
root.right.left = TreeNode(5)
root.right.right = TreeNode(6)
root.right.left.left = TreeNode(7)
root.right.left.right = TreeNode(8)
print(vertical_sum(root))
if __name__ == '__main__':
main()
| en | 0.861352 | Given a binary tree, the print vertical sum of it. Assume the left and right child of a node makes a 45–degree angle with the parent. | 3.950415 | 4 |
community_profiles/datasets/septa.py | nickhand/community-profiles | 0 | 6623488 | import esri2gpd
import geopandas as gpd
from . import EPSG
from .core import Dataset, geocode
from .regions import *
__all__ = ["RegionalRail", "SubwayBroadSt", "SubwayMFL", "Bus"]
class RegionalRail(Dataset):
"""
Spring 2018 Regional Rail Rail Lines with Ridership. Route ridership and
revenue information is from FY2017. Data is from SEPTA's Revenue & Ridership
Department.
Source
------
http://septaopendata-septa.opendata.arcgis.com/datasets/septa-regional-rail-lines
"""
@classmethod
def download(cls, **kwargs):
url = "https://opendata.arcgis.com/datasets/48b0b600abaa4ca1a1bacf917a31c29a_0.zip"
df = gpd.read_file(url)
return (
df.to_crs(epsg=EPSG)
.pipe(geocode, ZIPCodes.get())
.pipe(geocode, Neighborhoods.get())
.pipe(geocode, PUMAs.get())
)
class SubwayBroadSt(Dataset):
"""
Spring 2018 Broad Street Line with Ridership. Route ridership and revenue
information is from FY2017. Data is from SEPTA's Revenue & Ridership
Department.
Source
------
http://septaopendata-septa.opendata.arcgis.com/datasets/septa-broad-street-line
"""
@classmethod
def download(cls, **kwargs):
url = "https://opendata.arcgis.com/datasets/c051c18bb15444b6861a93fd247dde3d_0.zip"
df = gpd.read_file(url)
return (
df.to_crs(epsg=EPSG)
.pipe(geocode, ZIPCodes.get())
.pipe(geocode, Neighborhoods.get())
.pipe(geocode, PUMAs.get())
)
class SubwayMFL(Dataset):
"""
Spring 2018 Market Frankford Line with Ridership. Route ridership and
revenue information is from FY2017. Data is from SEPTA's Revenue & Ridership
Department.
Source
------
http://septaopendata-septa.opendata.arcgis.com/datasets/septa-market-franford-line
"""
@classmethod
def download(cls, **kwargs):
url = "https://opendata.arcgis.com/datasets/6f4ae63a492c407eb95a9e56a6750e7f_0.zip"
df = gpd.read_file(url)
return (
df.to_crs(epsg=EPSG)
.pipe(geocode, ZIPCodes.get())
.pipe(geocode, Neighborhoods.get())
.pipe(geocode, PUMAs.get())
)
class Bus(Dataset):
"""
SEPTA Spring 2018 bus stops with ridership. Ridership contains Spring 2018
APC data where available. Ridecheck data has been used to supplement gaps in
the APC data.
Source
------
http://septaopendata-septa.opendata.arcgis.com/datasets/septa-bus-stops
"""
@classmethod
def download(cls, **kwargs):
url = "https://opendata.arcgis.com/datasets/5c063cd7037547659905ab1761db469e_0.zip"
df = gpd.read_file(url)
return (
df.to_crs(epsg=EPSG)
.pipe(geocode, ZIPCodes.get())
.pipe(geocode, Neighborhoods.get())
.pipe(geocode, PUMAs.get())
)
| import esri2gpd
import geopandas as gpd
from . import EPSG
from .core import Dataset, geocode
from .regions import *
__all__ = ["RegionalRail", "SubwayBroadSt", "SubwayMFL", "Bus"]
class RegionalRail(Dataset):
"""
Spring 2018 Regional Rail Rail Lines with Ridership. Route ridership and
revenue information is from FY2017. Data is from SEPTA's Revenue & Ridership
Department.
Source
------
http://septaopendata-septa.opendata.arcgis.com/datasets/septa-regional-rail-lines
"""
@classmethod
def download(cls, **kwargs):
url = "https://opendata.arcgis.com/datasets/48b0b600abaa4ca1a1bacf917a31c29a_0.zip"
df = gpd.read_file(url)
return (
df.to_crs(epsg=EPSG)
.pipe(geocode, ZIPCodes.get())
.pipe(geocode, Neighborhoods.get())
.pipe(geocode, PUMAs.get())
)
class SubwayBroadSt(Dataset):
"""
Spring 2018 Broad Street Line with Ridership. Route ridership and revenue
information is from FY2017. Data is from SEPTA's Revenue & Ridership
Department.
Source
------
http://septaopendata-septa.opendata.arcgis.com/datasets/septa-broad-street-line
"""
@classmethod
def download(cls, **kwargs):
url = "https://opendata.arcgis.com/datasets/c051c18bb15444b6861a93fd247dde3d_0.zip"
df = gpd.read_file(url)
return (
df.to_crs(epsg=EPSG)
.pipe(geocode, ZIPCodes.get())
.pipe(geocode, Neighborhoods.get())
.pipe(geocode, PUMAs.get())
)
class SubwayMFL(Dataset):
"""
Spring 2018 Market Frankford Line with Ridership. Route ridership and
revenue information is from FY2017. Data is from SEPTA's Revenue & Ridership
Department.
Source
------
http://septaopendata-septa.opendata.arcgis.com/datasets/septa-market-franford-line
"""
@classmethod
def download(cls, **kwargs):
url = "https://opendata.arcgis.com/datasets/6f4ae63a492c407eb95a9e56a6750e7f_0.zip"
df = gpd.read_file(url)
return (
df.to_crs(epsg=EPSG)
.pipe(geocode, ZIPCodes.get())
.pipe(geocode, Neighborhoods.get())
.pipe(geocode, PUMAs.get())
)
class Bus(Dataset):
"""
SEPTA Spring 2018 bus stops with ridership. Ridership contains Spring 2018
APC data where available. Ridecheck data has been used to supplement gaps in
the APC data.
Source
------
http://septaopendata-septa.opendata.arcgis.com/datasets/septa-bus-stops
"""
@classmethod
def download(cls, **kwargs):
url = "https://opendata.arcgis.com/datasets/5c063cd7037547659905ab1761db469e_0.zip"
df = gpd.read_file(url)
return (
df.to_crs(epsg=EPSG)
.pipe(geocode, ZIPCodes.get())
.pipe(geocode, Neighborhoods.get())
.pipe(geocode, PUMAs.get())
)
| en | 0.816748 | Spring 2018 Regional Rail Rail Lines with Ridership. Route ridership and revenue information is from FY2017. Data is from SEPTA's Revenue & Ridership Department. Source ------ http://septaopendata-septa.opendata.arcgis.com/datasets/septa-regional-rail-lines Spring 2018 Broad Street Line with Ridership. Route ridership and revenue information is from FY2017. Data is from SEPTA's Revenue & Ridership Department. Source ------ http://septaopendata-septa.opendata.arcgis.com/datasets/septa-broad-street-line Spring 2018 Market Frankford Line with Ridership. Route ridership and revenue information is from FY2017. Data is from SEPTA's Revenue & Ridership Department. Source ------ http://septaopendata-septa.opendata.arcgis.com/datasets/septa-market-franford-line SEPTA Spring 2018 bus stops with ridership. Ridership contains Spring 2018 APC data where available. Ridecheck data has been used to supplement gaps in the APC data. Source ------ http://septaopendata-septa.opendata.arcgis.com/datasets/septa-bus-stops | 2.64485 | 3 |
yann/utils/tensor.py | michalwols/yann | 32 | 6623489 | import torch
def weighted_sum(tensors, weights):
if len(tensors) < 2:
raise ValueError('must pass at least 2 tensors')
s = tensors[0] * weights[0]
for t, w in zip(tensors[1:], weights[1:]):
s.add_(w, t)
return s
def one_hot(targets: torch.Tensor, num_classes=None, device=None, dtype=None, normalize=False):
if torch.is_tensor(targets):
if len(targets.shape) == 1:
num = targets.shape[0]
hot = torch.zeros(num, num_classes, device=device or targets.device, dtype=dtype)
hot.scatter_(1, targets.unsqueeze(1), 1.0)
return hot
elif len(targets.shape) == 2:
pass
raise ValueError('only dim 1 tensors supported')
def show_hist(hist):
chars = '_▁▂▃▄▅▆▇█'
top = max(hist)
step = (top / float(len(chars) - 1)) or 1
return ''.join(chars[int(round(count / step))] for count in hist)
def describe(tensor: torch.Tensor, bins=10) -> str:
try:
stats = (
f"μ={tensor.mean():.4f} σ={tensor.std():.4f}\n"
)
except:
stats = ''
try:
h = tensor.histc(bins=bins).int().tolist()
hist = (
f"hist: {h}\n"
f"hist: {show_hist(h)}\n"
)
except:
hist = ''
return (
f"{tuple(tensor.shape)} "
f"{tensor.dtype} "
f"{tensor.device}"
f"{' grad' if tensor.requires_grad else ''} "
f"({tensor.numel() * tensor.element_size() / (1e6):,.5f} MB)\n"
f"{stats}"
f"{hist}"
f"min: {tensor.min():.4f} max: {tensor.max():.4f} sum: {tensor.sum():.4f}\n\n"
) | import torch
def weighted_sum(tensors, weights):
if len(tensors) < 2:
raise ValueError('must pass at least 2 tensors')
s = tensors[0] * weights[0]
for t, w in zip(tensors[1:], weights[1:]):
s.add_(w, t)
return s
def one_hot(targets: torch.Tensor, num_classes=None, device=None, dtype=None, normalize=False):
if torch.is_tensor(targets):
if len(targets.shape) == 1:
num = targets.shape[0]
hot = torch.zeros(num, num_classes, device=device or targets.device, dtype=dtype)
hot.scatter_(1, targets.unsqueeze(1), 1.0)
return hot
elif len(targets.shape) == 2:
pass
raise ValueError('only dim 1 tensors supported')
def show_hist(hist):
chars = '_▁▂▃▄▅▆▇█'
top = max(hist)
step = (top / float(len(chars) - 1)) or 1
return ''.join(chars[int(round(count / step))] for count in hist)
def describe(tensor: torch.Tensor, bins=10) -> str:
try:
stats = (
f"μ={tensor.mean():.4f} σ={tensor.std():.4f}\n"
)
except:
stats = ''
try:
h = tensor.histc(bins=bins).int().tolist()
hist = (
f"hist: {h}\n"
f"hist: {show_hist(h)}\n"
)
except:
hist = ''
return (
f"{tuple(tensor.shape)} "
f"{tensor.dtype} "
f"{tensor.device}"
f"{' grad' if tensor.requires_grad else ''} "
f"({tensor.numel() * tensor.element_size() / (1e6):,.5f} MB)\n"
f"{stats}"
f"{hist}"
f"min: {tensor.min():.4f} max: {tensor.max():.4f} sum: {tensor.sum():.4f}\n\n"
) | none | 1 | 2.664646 | 3 | |
sandy-disaster-recovery/aws.py | toddjcrane/crisiscleanup-legacy | 1 | 6623490 | <gh_stars>1-10
"""
Amazon Web Services API
Rationale: the standard boto library throws errors on GAE, seemingly due to
google.appengine.api.urlfetch
"""
import logging
from time import mktime
import datetime
from wsgiref.handlers import format_date_time
import hmac
import hashlib
import base64
from xml.etree import ElementTree
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from urllib import urlencode
from google.appengine.api import urlfetch
def rfc_1123_timestamp(dt):
stamp = mktime(dt.timetuple())
return format_date_time(stamp)
def iso_8601_timestamp(dt):
return dt.strftime("%Y-%m-%dT%H:%M:%SZ")
def aws_api_base_url(region_name):
return u"https://email.%s.amazonaws.com" % region_name
def post_signed(url, params, aws_access_key_id, aws_secret_access_key):
if isinstance(aws_access_key_id, unicode):
aws_access_key_id = aws_access_key_id.encode('ascii')
if isinstance(aws_secret_access_key, unicode):
aws_secret_access_key = aws_secret_access_key.encode('ascii')
now = datetime.datetime.utcnow()
rfc_timestamp = rfc_1123_timestamp(now)
iso_timestamp = iso_8601_timestamp(now)
hmac_hash = hmac.new(aws_secret_access_key, rfc_timestamp, hashlib.sha1)
encoded_hash = base64.b64encode(hmac_hash.digest())
x_amzn_auth = (
"AWS3-HTTPS " +
"AWSAccessKeyId=%s, " % aws_access_key_id +
"Algorithm=HmacSHA1, " +
"Signature=%s" % encoded_hash
)
payload = dict({
"AWSAccessKeyId": aws_access_key_id,
"Timestamp": iso_timestamp,
},
**params
)
response = urlfetch.fetch(
url=url,
method="POST",
headers={
"Date": rfc_timestamp,
"X-Amzn-Authorization": x_amzn_auth,
},
payload=urlencode(payload),
)
if "Error" in response.content:
logging.error("AWS ERROR: %s" % response.content)
return response
def ses_get_verified_email_addresses(
aws_region,
aws_access_key_id,
aws_secret_access_key,
):
url = aws_api_base_url(aws_region)
response = post_signed(
url,
params={
'Action': 'ListIdentities',
},
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
)
try:
xml_tree = ElementTree.fromstring(response.content)
identity_members = xml_tree.findall(
".//xmlns:Identities/xmlns:member",
namespaces={'xmlns': 'http://ses.amazonaws.com/doc/2010-12-01/'}
)
return [el.text for el in identity_members]
except:
return []
def ses_send_email(
source, to_addresses, subject, body, cc=None, bcc=None, html_body=None,
aws_region=None,
aws_access_key_id=None,
aws_secret_access_key=None,
):
# construct multipart email
msg = MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = source
msg['To'] = u', '.join(to_addresses)
logging.error(body)
text_part = MIMEText(body, 'plain')
msg.attach(text_part)
if html_body:
html_part = MIMEText(html_body, 'html')
msg.attach(html_part)
# post to AWS SES
url = aws_api_base_url(aws_region)
return post_signed(
url,
params={
'Action': 'SendRawEmail',
'RawMessage.Data': base64.b64encode(msg.as_string()),
},
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
)
| """
Amazon Web Services API
Rationale: the standard boto library throws errors on GAE, seemingly due to
google.appengine.api.urlfetch
"""
import logging
from time import mktime
import datetime
from wsgiref.handlers import format_date_time
import hmac
import hashlib
import base64
from xml.etree import ElementTree
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from urllib import urlencode
from google.appengine.api import urlfetch
def rfc_1123_timestamp(dt):
stamp = mktime(dt.timetuple())
return format_date_time(stamp)
def iso_8601_timestamp(dt):
return dt.strftime("%Y-%m-%dT%H:%M:%SZ")
def aws_api_base_url(region_name):
return u"https://email.%s.amazonaws.com" % region_name
def post_signed(url, params, aws_access_key_id, aws_secret_access_key):
if isinstance(aws_access_key_id, unicode):
aws_access_key_id = aws_access_key_id.encode('ascii')
if isinstance(aws_secret_access_key, unicode):
aws_secret_access_key = aws_secret_access_key.encode('ascii')
now = datetime.datetime.utcnow()
rfc_timestamp = rfc_1123_timestamp(now)
iso_timestamp = iso_8601_timestamp(now)
hmac_hash = hmac.new(aws_secret_access_key, rfc_timestamp, hashlib.sha1)
encoded_hash = base64.b64encode(hmac_hash.digest())
x_amzn_auth = (
"AWS3-HTTPS " +
"AWSAccessKeyId=%s, " % aws_access_key_id +
"Algorithm=HmacSHA1, " +
"Signature=%s" % encoded_hash
)
payload = dict({
"AWSAccessKeyId": aws_access_key_id,
"Timestamp": iso_timestamp,
},
**params
)
response = urlfetch.fetch(
url=url,
method="POST",
headers={
"Date": rfc_timestamp,
"X-Amzn-Authorization": x_amzn_auth,
},
payload=urlencode(payload),
)
if "Error" in response.content:
logging.error("AWS ERROR: %s" % response.content)
return response
def ses_get_verified_email_addresses(
aws_region,
aws_access_key_id,
aws_secret_access_key,
):
url = aws_api_base_url(aws_region)
response = post_signed(
url,
params={
'Action': 'ListIdentities',
},
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
)
try:
xml_tree = ElementTree.fromstring(response.content)
identity_members = xml_tree.findall(
".//xmlns:Identities/xmlns:member",
namespaces={'xmlns': 'http://ses.amazonaws.com/doc/2010-12-01/'}
)
return [el.text for el in identity_members]
except:
return []
def ses_send_email(
source, to_addresses, subject, body, cc=None, bcc=None, html_body=None,
aws_region=None,
aws_access_key_id=None,
aws_secret_access_key=None,
):
# construct multipart email
msg = MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = source
msg['To'] = u', '.join(to_addresses)
logging.error(body)
text_part = MIMEText(body, 'plain')
msg.attach(text_part)
if html_body:
html_part = MIMEText(html_body, 'html')
msg.attach(html_part)
# post to AWS SES
url = aws_api_base_url(aws_region)
return post_signed(
url,
params={
'Action': 'SendRawEmail',
'RawMessage.Data': base64.b64encode(msg.as_string()),
},
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
) | en | 0.71757 | Amazon Web Services API Rationale: the standard boto library throws errors on GAE, seemingly due to google.appengine.api.urlfetch # construct multipart email # post to AWS SES | 2.360312 | 2 |
test/test_setup.py | ccarouge/payu | 5 | 6623491 | <reponame>ccarouge/payu<gh_stars>1-10
import copy
import os
from pathlib import Path
import pdb
import pytest
import shutil
import yaml
import payu
import payu.models.test
from .common import cd, make_random_file, get_manifests
from .common import tmpdir, ctrldir, labdir, workdir
from .common import sweep_work, payu_init, payu_setup
from .common import config as config_orig
from .common import write_config
from .common import make_exe, make_inputs, make_restarts, make_all_files
verbose = True
config = copy.deepcopy(config_orig)
def make_config_files():
"""
Create files required for test model
"""
config_files = payu.models.test.config_files
for file in config_files:
make_random_file(ctrldir/file, 29)
def setup_module(module):
"""
Put any test-wide setup code in here, e.g. creating test files
"""
if verbose:
print("setup_module module:%s" % module.__name__)
# Should be taken care of by teardown, in case remnants lying around
try:
shutil.rmtree(tmpdir)
except FileNotFoundError:
pass
try:
tmpdir.mkdir()
labdir.mkdir()
ctrldir.mkdir()
make_all_files()
except Exception as e:
print(e)
write_config(config)
def teardown_module(module):
"""
Put any test-wide teardown code in here, e.g. removing test outputs
"""
if verbose:
print("teardown_module module:%s" % module.__name__)
try:
# shutil.rmtree(tmpdir)
print('removing tmp')
except Exception as e:
print(e)
# These are integration tests. They have an undesirable dependence on each
# other. It would be possible to make them independent, but then they'd
# be reproducing previous "tests", like init. So this design is deliberate
# but compromised. It means when running an error in one test can cascade
# and cause other tests to fail.
#
# Unfortunate but there you go.
def test_init():
# Initialise a payu laboratory
with cd(ctrldir):
payu_init(None, None, str(labdir))
# Check all the correct directories have been created
for subdir in ['bin', 'input', 'archive', 'codebase']:
assert((labdir / subdir).is_dir())
def test_setup():
# Create some input and executable files
make_inputs()
make_exe()
bindir = labdir / 'bin'
exe = config['exe']
make_config_files()
# Run setup
payu_setup(lab_path=str(labdir))
assert(workdir.is_symlink())
assert(workdir.is_dir())
assert((workdir/exe).resolve() == (bindir/exe).resolve())
workdirfull = workdir.resolve()
config_files = payu.models.test.config_files
for f in config_files + ['config.yaml']:
assert((workdir/f).is_file())
for i in range(1, 4):
assert((workdir/'input_00{i}.bin'.format(i=i)).stat().st_size
== 1000**2 + i)
with pytest.raises(SystemExit,
match="work path already exists") as setup_error:
payu_setup(lab_path=str(labdir), sweep=False, force=False)
assert setup_error.type == SystemExit
payu_setup(lab_path=str(labdir), sweep=False, force=True)
assert(workdir.is_symlink())
assert(workdir.is_dir())
assert((workdir/exe).resolve() == (bindir/exe).resolve())
workdirfull = workdir.resolve()
config_files = payu.models.test.config_files
for f in config_files + ['config.yaml']:
assert((workdir/f).is_file())
for i in range(1, 4):
assert((workdir/'input_00{i}.bin'.format(i=i)).stat().st_size
== 1000**2 + i)
| import copy
import os
from pathlib import Path
import pdb
import pytest
import shutil
import yaml
import payu
import payu.models.test
from .common import cd, make_random_file, get_manifests
from .common import tmpdir, ctrldir, labdir, workdir
from .common import sweep_work, payu_init, payu_setup
from .common import config as config_orig
from .common import write_config
from .common import make_exe, make_inputs, make_restarts, make_all_files
verbose = True
config = copy.deepcopy(config_orig)
def make_config_files():
"""
Create files required for test model
"""
config_files = payu.models.test.config_files
for file in config_files:
make_random_file(ctrldir/file, 29)
def setup_module(module):
"""
Put any test-wide setup code in here, e.g. creating test files
"""
if verbose:
print("setup_module module:%s" % module.__name__)
# Should be taken care of by teardown, in case remnants lying around
try:
shutil.rmtree(tmpdir)
except FileNotFoundError:
pass
try:
tmpdir.mkdir()
labdir.mkdir()
ctrldir.mkdir()
make_all_files()
except Exception as e:
print(e)
write_config(config)
def teardown_module(module):
"""
Put any test-wide teardown code in here, e.g. removing test outputs
"""
if verbose:
print("teardown_module module:%s" % module.__name__)
try:
# shutil.rmtree(tmpdir)
print('removing tmp')
except Exception as e:
print(e)
# These are integration tests. They have an undesirable dependence on each
# other. It would be possible to make them independent, but then they'd
# be reproducing previous "tests", like init. So this design is deliberate
# but compromised. It means when running an error in one test can cascade
# and cause other tests to fail.
#
# Unfortunate but there you go.
def test_init():
# Initialise a payu laboratory
with cd(ctrldir):
payu_init(None, None, str(labdir))
# Check all the correct directories have been created
for subdir in ['bin', 'input', 'archive', 'codebase']:
assert((labdir / subdir).is_dir())
def test_setup():
# Create some input and executable files
make_inputs()
make_exe()
bindir = labdir / 'bin'
exe = config['exe']
make_config_files()
# Run setup
payu_setup(lab_path=str(labdir))
assert(workdir.is_symlink())
assert(workdir.is_dir())
assert((workdir/exe).resolve() == (bindir/exe).resolve())
workdirfull = workdir.resolve()
config_files = payu.models.test.config_files
for f in config_files + ['config.yaml']:
assert((workdir/f).is_file())
for i in range(1, 4):
assert((workdir/'input_00{i}.bin'.format(i=i)).stat().st_size
== 1000**2 + i)
with pytest.raises(SystemExit,
match="work path already exists") as setup_error:
payu_setup(lab_path=str(labdir), sweep=False, force=False)
assert setup_error.type == SystemExit
payu_setup(lab_path=str(labdir), sweep=False, force=True)
assert(workdir.is_symlink())
assert(workdir.is_dir())
assert((workdir/exe).resolve() == (bindir/exe).resolve())
workdirfull = workdir.resolve()
config_files = payu.models.test.config_files
for f in config_files + ['config.yaml']:
assert((workdir/f).is_file())
for i in range(1, 4):
assert((workdir/'input_00{i}.bin'.format(i=i)).stat().st_size
== 1000**2 + i) | en | 0.902579 | Create files required for test model Put any test-wide setup code in here, e.g. creating test files # Should be taken care of by teardown, in case remnants lying around Put any test-wide teardown code in here, e.g. removing test outputs # shutil.rmtree(tmpdir) # These are integration tests. They have an undesirable dependence on each # other. It would be possible to make them independent, but then they'd # be reproducing previous "tests", like init. So this design is deliberate # but compromised. It means when running an error in one test can cascade # and cause other tests to fail. # # Unfortunate but there you go. # Initialise a payu laboratory # Check all the correct directories have been created # Create some input and executable files # Run setup | 2.308729 | 2 |
user/serializers.py | MrNewaz/Coding-Assignment | 0 | 6623492 | from rest_framework import serializers
from .models import *
# Parent Serializer (for the Parent model)
class ParentSerializer(serializers.ModelSerializer):
class Meta:
model = Parent
fields = '__all__'
# Child Serializer (for the Child model)
class ChildSerializer(serializers.ModelSerializer):
class Meta:
model = Child
fields = '__all__'
depth = 1
| from rest_framework import serializers
from .models import *
# Parent Serializer (for the Parent model)
class ParentSerializer(serializers.ModelSerializer):
class Meta:
model = Parent
fields = '__all__'
# Child Serializer (for the Child model)
class ChildSerializer(serializers.ModelSerializer):
class Meta:
model = Child
fields = '__all__'
depth = 1
| en | 0.716032 | # Parent Serializer (for the Parent model) # Child Serializer (for the Child model) | 2.168593 | 2 |
verify/checker/abc218/b.py | naskya/testcase-generator | 4 | 6623493 | <gh_stars>1-10
def main() -> None:
P = list(map(int, input().split()))
P.sort()
assert len(P) == 26
assert P == list(range(1, 26 + 1))
if __name__ == '__main__':
main()
| def main() -> None:
P = list(map(int, input().split()))
P.sort()
assert len(P) == 26
assert P == list(range(1, 26 + 1))
if __name__ == '__main__':
main() | none | 1 | 2.65079 | 3 | |
api-dev/venv/lib/python3.6/site-packages/web3/_utils/personal.py | twaddle-dev/CoVid-19 | 0 | 6623494 | from typing import (
Any,
Callable,
Dict,
List,
Optional,
)
from eth_typing import (
ChecksumAddress,
HexStr,
)
from hexbytes import (
HexBytes,
)
from web3._utils.compat import (
Protocol,
)
from web3._utils.rpc_abi import (
RPC,
)
from web3.method import (
Method,
default_root_munger,
)
from web3.types import (
TxParams,
)
importRawKey: Method[Callable[[str, str], ChecksumAddress]] = Method(
RPC.personal_importRawKey,
mungers=[default_root_munger],
)
newAccount: Method[Callable[[str], ChecksumAddress]] = Method(
RPC.personal_newAccount,
mungers=[default_root_munger],
)
listAccounts: Method[Callable[[], List[ChecksumAddress]]] = Method(
RPC.personal_listAccounts,
mungers=None,
)
sendTransaction: Method[Callable[[TxParams, str], HexBytes]] = Method(
RPC.personal_sendTransaction,
mungers=[default_root_munger],
)
lockAccount: Method[Callable[[ChecksumAddress], bool]] = Method(
RPC.personal_lockAccount,
mungers=[default_root_munger],
)
class UnlockAccountWrapper(Protocol):
def __call__(self, account: ChecksumAddress, passphrase: str, duration: int=None) -> bool:
pass
unlockAccount: Method[UnlockAccountWrapper] = Method(
RPC.personal_unlockAccount,
mungers=[default_root_munger],
)
sign: Method[Callable[[str, ChecksumAddress, Optional[str]], HexStr]] = Method(
RPC.personal_sign,
mungers=[default_root_munger],
)
signTypedData: Method[Callable[[Dict[str, Any], ChecksumAddress, str], HexStr]] = Method(
RPC.personal_signTypedData,
mungers=[default_root_munger],
)
ecRecover: Method[Callable[[str, HexStr], ChecksumAddress]] = Method(
RPC.personal_ecRecover,
mungers=[default_root_munger],
)
| from typing import (
Any,
Callable,
Dict,
List,
Optional,
)
from eth_typing import (
ChecksumAddress,
HexStr,
)
from hexbytes import (
HexBytes,
)
from web3._utils.compat import (
Protocol,
)
from web3._utils.rpc_abi import (
RPC,
)
from web3.method import (
Method,
default_root_munger,
)
from web3.types import (
TxParams,
)
importRawKey: Method[Callable[[str, str], ChecksumAddress]] = Method(
RPC.personal_importRawKey,
mungers=[default_root_munger],
)
newAccount: Method[Callable[[str], ChecksumAddress]] = Method(
RPC.personal_newAccount,
mungers=[default_root_munger],
)
listAccounts: Method[Callable[[], List[ChecksumAddress]]] = Method(
RPC.personal_listAccounts,
mungers=None,
)
sendTransaction: Method[Callable[[TxParams, str], HexBytes]] = Method(
RPC.personal_sendTransaction,
mungers=[default_root_munger],
)
lockAccount: Method[Callable[[ChecksumAddress], bool]] = Method(
RPC.personal_lockAccount,
mungers=[default_root_munger],
)
class UnlockAccountWrapper(Protocol):
def __call__(self, account: ChecksumAddress, passphrase: str, duration: int=None) -> bool:
pass
unlockAccount: Method[UnlockAccountWrapper] = Method(
RPC.personal_unlockAccount,
mungers=[default_root_munger],
)
sign: Method[Callable[[str, ChecksumAddress, Optional[str]], HexStr]] = Method(
RPC.personal_sign,
mungers=[default_root_munger],
)
signTypedData: Method[Callable[[Dict[str, Any], ChecksumAddress, str], HexStr]] = Method(
RPC.personal_signTypedData,
mungers=[default_root_munger],
)
ecRecover: Method[Callable[[str, HexStr], ChecksumAddress]] = Method(
RPC.personal_ecRecover,
mungers=[default_root_munger],
)
| none | 1 | 1.871402 | 2 | |
postpost/api/filters.py | PiterPy-Meetup/postpost | 6 | 6623495 | from django_filters import rest_framework as filters
from api import models
class PublicationFilterSet(filters.FilterSet):
"""
Uses for filtering in publication-list endpoint by query-string.
"""
# scheduled=false is equivalent to scheduled_at__isnull=True
scheduled = filters.BooleanFilter(
field_name='scheduled_at',
lookup_expr='isnull',
exclude=True,
)
# If you want change this filtering, first read comment
# for PlatformPost.PLATFORM_TYPES
platform_types = filters.MultipleChoiceFilter(
field_name='platform_posts__platform_type',
choices=models.PlatformPost.PLATFORM_TYPES,
)
class Meta(object):
model = models.Publication
fields = ['scheduled', 'platform_types']
| from django_filters import rest_framework as filters
from api import models
class PublicationFilterSet(filters.FilterSet):
"""
Uses for filtering in publication-list endpoint by query-string.
"""
# scheduled=false is equivalent to scheduled_at__isnull=True
scheduled = filters.BooleanFilter(
field_name='scheduled_at',
lookup_expr='isnull',
exclude=True,
)
# If you want change this filtering, first read comment
# for PlatformPost.PLATFORM_TYPES
platform_types = filters.MultipleChoiceFilter(
field_name='platform_posts__platform_type',
choices=models.PlatformPost.PLATFORM_TYPES,
)
class Meta(object):
model = models.Publication
fields = ['scheduled', 'platform_types']
| en | 0.762258 | Uses for filtering in publication-list endpoint by query-string. # scheduled=false is equivalent to scheduled_at__isnull=True # If you want change this filtering, first read comment # for PlatformPost.PLATFORM_TYPES | 2.264413 | 2 |
netpyntest_lib/api.py | aespinosaalvarez/NetPyntest | 1 | 6623496 | # -*- coding: utf-8 -*-
"""
This file contains API calls and Data
"""
import six
from sys import path
from termcolor import colored
from os import geteuid
from os import path
from .data import *
__version__ = "1.0.0"
__all__ = ["run_console", "run", "GlobalParameters"]
# --------------------------------------------------------------------------
#
# Command line options
#
# --------------------------------------------------------------------------
def run_console(config):
"""
:param config: GlobalParameters option instance
:type config: `GlobalParameters`
:raises: TypeError
"""
if not isinstance(config, GlobalParameters):
raise TypeError("Expected GlobalParameters, got '%s' instead" % type(config))
#six.print_(colored("[*]", "blue"), "Starting NetPyntest execution")
run(config)
#six.print_(colored("[*]", "blue"), "Done!")
# ----------------------------------------------------------------------
#
# API call
#
# ----------------------------------------------------------------------
def run(config):
"""
:param config: GlobalParameters option instance
:type config: `GlobalParameters`
:raises: TypeError
"""
if not isinstance(config, GlobalParameters):
raise TypeError("Expected GlobalParameters, got '%s' instead" % type(config))
# --------------------------------------------------------------------------
# CHECK ROOT USER
# --------------------------------------------------------------------------
if geteuid():
six.print_(colored("[!] ERROR - Please run NetPyntest as root.", "red"))
exit()
# --------------------------------------------------------------------------
# CHECK CONFIG FILE
# --------------------------------------------------------------------------
if not path.isfile("control_file"):
six.print_("Creating config_file")
control_file = open("control_file", "w")
data = {'mac_flooding_pid': 0, 'port_stealing_pid': 0}
control_file.write(str(data))
control_file.close()
# --------------------------------------------------------------------------
# SELECT & LAUNCH ATTACK
# --------------------------------------------------------------------------
attack = config.attack[0]
action = config.action[0]
if config.interface != None:
iface = config.interface[0]
#TODO valid interface and introduce interface in calls
else:
iface = "eth0"
################ MAC FLOODING ##############
if attack == "mac_flooding":
from .libs.plugins.mac_flooding import start
from .libs.plugins.mac_flooding import stop
from .libs.plugins.mac_flooding import generate_pcap
if action == "start":#TODO This is not working for Python 2
from sys import version_info
if version_info[0] >=3:
if config.file != None:
file = config.file[0]
if path.isfile(file):
six.print_("[*] Starting MAC Flooding with file '{}'...".format(file))
from scapy.error import Scapy_Exception
try:
start(file, iface)
except Scapy_Exception:
six.print_(colored("[!] ERROR - File '{}' is not a valid PCAP file".format(file), "red"))
else:
six.print_(colored("[!] ERROR - File '{}' doesn't exist.".format(file), "red"))
else:
six.print_(colored("[!] ERROR - You must specify a PCAP file. You can generate one with 'sudo python netpyntest.py mac_flooding generate_pcap'", "red"))
else:
six.print_(colored("[!] ERROR - Sorry, currently this feature is only supported in Python 3 or higher", "red"))
elif action == "stop":
stop()
elif action == "generate_pcap":
if config.size == None:
six.print_("[*] Generating PCAP file with default size of 10000 packets")
generate_pcap(10000)
else:
size = config.size[0]
six.print_("[*] Generating PCAP file with size of {} packets".format(size))
generate_pcap(size)
six.print_(colored("[*] PCAP file generated", "green"))
else:
six.print_(colored("[!] ERROR - Action {} doesn't exist for MAC Flooding attack".format(action), "red"))
################ PORT STEALING ##############
elif attack == "port_stealing":
if action == "start":
if config.target != None:
target = config.target[0]
if validate_ip(target):
if config.output != None:
output = config.output[0]
from .libs.plugins.port_stealing import start
six.print_("[*] Starting Port Stealing...")
start(target, output, iface)
else:
six.print_(colored("[!] ERROR - No output file specified (-o)", "red"))
else:
six.print_(colored("[!] ERROR - IP isn't valid. Enter valid IPv4 address (-t)", "red"))
else:
six.print_(colored("[!] ERROR - You must specify a target (-t)", "red"))
elif action == "stop":
from .libs.plugins.port_stealing import stop
six.print_("[*] Stopping Port Stealing...")
stop()
else:
six.print_(colored("[!] ERROR - Action {} doesn't exist for Port Stealing attack".format(action), "red"))
################ SNMP ##############
elif attack == "snmp":
if action == "sniff":
from .libs.plugins.snmp import sniff_snmp
six.print_("[*] Starting SNMP sniffing...")
sniff_snmp(iface)
elif action == "get":
if config.com != None:
com = config.com[0]
else:
com = "public"
if config.target != None:
target = config.target[0]
if validate_ip(target):
if config.oid != None:
oid = config.oid[0]
from .libs.plugins.snmp import snmp_get
six.print_("[*] Performing SNMP GET request against host {} and OID {}...".format(target, oid))
snmp_get(target, oid, iface, com)
else:
six.print_(colored("[!] ERROR - No OID specified (-oid)", "red"))
else:
six.print_(colored("[!] ERROR - IP isn't valid. Enter valid IPv4 address.", "red"))
else:
six.print_(colored("[!] ERROR - You must specify a target (-t)", "red"))
elif action =="set":
if config.com != None:
com = config.com[0]
else:
com = "private"
if config.target != None:
target = config.target[0]
if validate_ip(target):
if config.oid != None:
oid = config.oid[0]
if config.value != None:
val = config.value[0]
from .libs.plugins.snmp import snmp_set
six.print_("[*] Performing SNMP SET request against host {}. Trying to set value {} in object {}...".format(target, val, oid))
snmp_set(target, oid, iface, com, val)
else:
six.print_(colored("[!] ERROR - No value specified (-v)", "red"))
else:
six.print_(colored("[!] ERROR - No OID specified (-oid)", "red"))
else:
six.print_(colored("[!] ERROR - IP isn't valid. Enter valid IPv4 address (-t)", "red"))
else:
six.print_(colored("[!] ERROR - You must specify a target (-t)", "red"))
elif action == "dictionary_attack":
if config.target != None:
target = config.target[0]
if validate_ip(target):
if config.dict != None:
dict = config.dict[0]
if path.isfile(dict):
from .libs.plugins.snmp import dictionary_attack
six.print_("[*] Starting SNMP dictionary attack...")
dictionary_attack(dict, target, iface)
else:
six.print_(colored("[!] ERROR - File '{}' doesn't exist.".format(dict), "red"))
else:
six.print_(colored("[!] ERROR - You must specify a dictionary file (-d)", "red"))
else:
six.print_(colored("[!] ERROR - IP isn't valid. Enter valid IPv4 address (-t)", "red"))
else:
six.print_(colored("[!] ERROR - You must specify a target (-t, --target)", "red"))
elif action == "dos":
if config.com != None:
com = config.com[0]
else:
com = "private"
if config.target != None:
target = config.target[0]
if validate_ip(target):
from .libs.plugins.snmp import snmp_DoS
six.print_("[*] Starting DoS attack to host {} with RW community {}...".format(target, com))
snmp_DoS(target, iface, com)
else:
six.print_(colored("[!] ERROR - IP isn't valid. Enter valid IPv4 address (-t, --target)", "red"))
else:
six.print_(colored("[!] ERROR - You must specify a target (-t)", "red"))
else:
six.print_(colored("[!] ERROR - Action {} doesn't exist for SNMP".format(action), "red"))
def validate_ip(s):
a = s.split('.')
if len(a) != 4:
return False
for x in a:
if not x.isdigit():
return False
i = int(x)
if i < 0 or i > 255:
return False
return True
| # -*- coding: utf-8 -*-
"""
This file contains API calls and Data
"""
import six
from sys import path
from termcolor import colored
from os import geteuid
from os import path
from .data import *
__version__ = "1.0.0"
__all__ = ["run_console", "run", "GlobalParameters"]
# --------------------------------------------------------------------------
#
# Command line options
#
# --------------------------------------------------------------------------
def run_console(config):
"""
:param config: GlobalParameters option instance
:type config: `GlobalParameters`
:raises: TypeError
"""
if not isinstance(config, GlobalParameters):
raise TypeError("Expected GlobalParameters, got '%s' instead" % type(config))
#six.print_(colored("[*]", "blue"), "Starting NetPyntest execution")
run(config)
#six.print_(colored("[*]", "blue"), "Done!")
# ----------------------------------------------------------------------
#
# API call
#
# ----------------------------------------------------------------------
def run(config):
"""
:param config: GlobalParameters option instance
:type config: `GlobalParameters`
:raises: TypeError
"""
if not isinstance(config, GlobalParameters):
raise TypeError("Expected GlobalParameters, got '%s' instead" % type(config))
# --------------------------------------------------------------------------
# CHECK ROOT USER
# --------------------------------------------------------------------------
if geteuid():
six.print_(colored("[!] ERROR - Please run NetPyntest as root.", "red"))
exit()
# --------------------------------------------------------------------------
# CHECK CONFIG FILE
# --------------------------------------------------------------------------
if not path.isfile("control_file"):
six.print_("Creating config_file")
control_file = open("control_file", "w")
data = {'mac_flooding_pid': 0, 'port_stealing_pid': 0}
control_file.write(str(data))
control_file.close()
# --------------------------------------------------------------------------
# SELECT & LAUNCH ATTACK
# --------------------------------------------------------------------------
attack = config.attack[0]
action = config.action[0]
if config.interface != None:
iface = config.interface[0]
#TODO valid interface and introduce interface in calls
else:
iface = "eth0"
################ MAC FLOODING ##############
if attack == "mac_flooding":
from .libs.plugins.mac_flooding import start
from .libs.plugins.mac_flooding import stop
from .libs.plugins.mac_flooding import generate_pcap
if action == "start":#TODO This is not working for Python 2
from sys import version_info
if version_info[0] >=3:
if config.file != None:
file = config.file[0]
if path.isfile(file):
six.print_("[*] Starting MAC Flooding with file '{}'...".format(file))
from scapy.error import Scapy_Exception
try:
start(file, iface)
except Scapy_Exception:
six.print_(colored("[!] ERROR - File '{}' is not a valid PCAP file".format(file), "red"))
else:
six.print_(colored("[!] ERROR - File '{}' doesn't exist.".format(file), "red"))
else:
six.print_(colored("[!] ERROR - You must specify a PCAP file. You can generate one with 'sudo python netpyntest.py mac_flooding generate_pcap'", "red"))
else:
six.print_(colored("[!] ERROR - Sorry, currently this feature is only supported in Python 3 or higher", "red"))
elif action == "stop":
stop()
elif action == "generate_pcap":
if config.size == None:
six.print_("[*] Generating PCAP file with default size of 10000 packets")
generate_pcap(10000)
else:
size = config.size[0]
six.print_("[*] Generating PCAP file with size of {} packets".format(size))
generate_pcap(size)
six.print_(colored("[*] PCAP file generated", "green"))
else:
six.print_(colored("[!] ERROR - Action {} doesn't exist for MAC Flooding attack".format(action), "red"))
################ PORT STEALING ##############
elif attack == "port_stealing":
if action == "start":
if config.target != None:
target = config.target[0]
if validate_ip(target):
if config.output != None:
output = config.output[0]
from .libs.plugins.port_stealing import start
six.print_("[*] Starting Port Stealing...")
start(target, output, iface)
else:
six.print_(colored("[!] ERROR - No output file specified (-o)", "red"))
else:
six.print_(colored("[!] ERROR - IP isn't valid. Enter valid IPv4 address (-t)", "red"))
else:
six.print_(colored("[!] ERROR - You must specify a target (-t)", "red"))
elif action == "stop":
from .libs.plugins.port_stealing import stop
six.print_("[*] Stopping Port Stealing...")
stop()
else:
six.print_(colored("[!] ERROR - Action {} doesn't exist for Port Stealing attack".format(action), "red"))
################ SNMP ##############
elif attack == "snmp":
if action == "sniff":
from .libs.plugins.snmp import sniff_snmp
six.print_("[*] Starting SNMP sniffing...")
sniff_snmp(iface)
elif action == "get":
if config.com != None:
com = config.com[0]
else:
com = "public"
if config.target != None:
target = config.target[0]
if validate_ip(target):
if config.oid != None:
oid = config.oid[0]
from .libs.plugins.snmp import snmp_get
six.print_("[*] Performing SNMP GET request against host {} and OID {}...".format(target, oid))
snmp_get(target, oid, iface, com)
else:
six.print_(colored("[!] ERROR - No OID specified (-oid)", "red"))
else:
six.print_(colored("[!] ERROR - IP isn't valid. Enter valid IPv4 address.", "red"))
else:
six.print_(colored("[!] ERROR - You must specify a target (-t)", "red"))
elif action =="set":
if config.com != None:
com = config.com[0]
else:
com = "private"
if config.target != None:
target = config.target[0]
if validate_ip(target):
if config.oid != None:
oid = config.oid[0]
if config.value != None:
val = config.value[0]
from .libs.plugins.snmp import snmp_set
six.print_("[*] Performing SNMP SET request against host {}. Trying to set value {} in object {}...".format(target, val, oid))
snmp_set(target, oid, iface, com, val)
else:
six.print_(colored("[!] ERROR - No value specified (-v)", "red"))
else:
six.print_(colored("[!] ERROR - No OID specified (-oid)", "red"))
else:
six.print_(colored("[!] ERROR - IP isn't valid. Enter valid IPv4 address (-t)", "red"))
else:
six.print_(colored("[!] ERROR - You must specify a target (-t)", "red"))
elif action == "dictionary_attack":
if config.target != None:
target = config.target[0]
if validate_ip(target):
if config.dict != None:
dict = config.dict[0]
if path.isfile(dict):
from .libs.plugins.snmp import dictionary_attack
six.print_("[*] Starting SNMP dictionary attack...")
dictionary_attack(dict, target, iface)
else:
six.print_(colored("[!] ERROR - File '{}' doesn't exist.".format(dict), "red"))
else:
six.print_(colored("[!] ERROR - You must specify a dictionary file (-d)", "red"))
else:
six.print_(colored("[!] ERROR - IP isn't valid. Enter valid IPv4 address (-t)", "red"))
else:
six.print_(colored("[!] ERROR - You must specify a target (-t, --target)", "red"))
elif action == "dos":
if config.com != None:
com = config.com[0]
else:
com = "private"
if config.target != None:
target = config.target[0]
if validate_ip(target):
from .libs.plugins.snmp import snmp_DoS
six.print_("[*] Starting DoS attack to host {} with RW community {}...".format(target, com))
snmp_DoS(target, iface, com)
else:
six.print_(colored("[!] ERROR - IP isn't valid. Enter valid IPv4 address (-t, --target)", "red"))
else:
six.print_(colored("[!] ERROR - You must specify a target (-t)", "red"))
else:
six.print_(colored("[!] ERROR - Action {} doesn't exist for SNMP".format(action), "red"))
def validate_ip(s):
a = s.split('.')
if len(a) != 4:
return False
for x in a:
if not x.isdigit():
return False
i = int(x)
if i < 0 or i > 255:
return False
return True
| en | 0.24038 | # -*- coding: utf-8 -*- This file contains API calls and Data # -------------------------------------------------------------------------- # # Command line options # # -------------------------------------------------------------------------- :param config: GlobalParameters option instance :type config: `GlobalParameters` :raises: TypeError #six.print_(colored("[*]", "blue"), "Starting NetPyntest execution") #six.print_(colored("[*]", "blue"), "Done!") # ---------------------------------------------------------------------- # # API call # # ---------------------------------------------------------------------- :param config: GlobalParameters option instance :type config: `GlobalParameters` :raises: TypeError # -------------------------------------------------------------------------- # CHECK ROOT USER # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # CHECK CONFIG FILE # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # SELECT & LAUNCH ATTACK # -------------------------------------------------------------------------- #TODO valid interface and introduce interface in calls ################ MAC FLOODING ############## #TODO This is not working for Python 2 ################ PORT STEALING ############## ################ SNMP ############## | 2.561464 | 3 |
popularshots/conf/settings.py | gustavohenrique/dribbble-popular-shots | 1 | 6623497 | import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECRET_KEY = '<KEY>'
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['*']
INSTALLED_APPS = (
'admin_bootstrap',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'shot',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'conf.urls'
WSGI_APPLICATION = 'conf.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
#'TEST_NAME': os.path.join(os.path.dirname(__file__), 'test.db'),
}
}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = False
USE_L10N = False
USE_TZ = True
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATIC_ROOT = os.path.join(BASE_DIR, 'media')
USE_ETAGS = True
CORS_ORIGIN_ALLOW_ALL = True
DATE_FORMAT = '%d/%m/%Y'
DATE_INPUT_FORMATS = ('%d/%m/%Y', '%Y-%m-%d')
DATETIME_INPUT_FORMATS = ('%d/%m/%Y', '%Y-%m-%d')
| import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECRET_KEY = '<KEY>'
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['*']
INSTALLED_APPS = (
'admin_bootstrap',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'shot',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'conf.urls'
WSGI_APPLICATION = 'conf.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
#'TEST_NAME': os.path.join(os.path.dirname(__file__), 'test.db'),
}
}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = False
USE_L10N = False
USE_TZ = True
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATIC_ROOT = os.path.join(BASE_DIR, 'media')
USE_ETAGS = True
CORS_ORIGIN_ALLOW_ALL = True
DATE_FORMAT = '%d/%m/%Y'
DATE_INPUT_FORMATS = ('%d/%m/%Y', '%Y-%m-%d')
DATETIME_INPUT_FORMATS = ('%d/%m/%Y', '%Y-%m-%d')
| en | 0.112173 | # 'django.middleware.csrf.CsrfViewMiddleware', #'TEST_NAME': os.path.join(os.path.dirname(__file__), 'test.db'), | 1.646654 | 2 |
trains/models.py | llamicron/old_trains | 0 | 6623498 | <reponame>llamicron/old_trains
"""
Stores database models
"""
import os
import sys
from peewee import *
try:
from trains.db import db
except ImportError:
from db import db
class Volunteer(Model):
bsa_id = CharField()
first_name = CharField()
last_name = CharField()
email = CharField()
district = CharField()
council = CharField()
unit_type = CharField()
unit = IntegerField()
class Meta:
database = db
@staticmethod
def populate(data_file):
"""
Pulls data from CSV source file and writes Volunteer models to db
"""
with open(data_file, 'r') as fi:
keys = fi.readline().strip().lower().split(',')
for row in fi.readlines()[1:]:
values = row.strip().split(',')
vol = dict(zip(keys, values)) # Dictionary of volunteer data
if not vol['unit']:
vol['unit_type'] = 0
vol['unit'] = 0
else:
vol['unit_type'] = vol['unit'].split()[0]
vol['unit'] = vol['unit'].split()[1]
Volunteer(
bsa_id=vol['memberid'],
first_name=vol['first_name'],
last_name=vol['last_name'],
email=vol['email'],
district=vol['district'],
council=vol['council'],
unit_type=vol['unit_type'],
unit=int(vol['unit'])
).save()
class Training(Model):
code = CharField()
class Meta:
database = db
@staticmethod
def populate(data_file):
pass
# Read from csv like on Volunteer
class Role(Model):
role = CharField()
class Meta:
database = db
@staticmethod
def populate():
pass
# Read from csv like on Volunteer
def create_tables():
db_file = 'trains/data/trains.db'
if not os.path.isfile(db_file):
open(db_file, 'w')
Volunteer.create_table(True)
Training.create_table(True)
Role.create_table(True)
def update_all():
Volunteer.populate()
# Training.populate()
# Role.populate()
if __name__ == '__main__':
create_tables()
| """
Stores database models
"""
import os
import sys
from peewee import *
try:
from trains.db import db
except ImportError:
from db import db
class Volunteer(Model):
bsa_id = CharField()
first_name = CharField()
last_name = CharField()
email = CharField()
district = CharField()
council = CharField()
unit_type = CharField()
unit = IntegerField()
class Meta:
database = db
@staticmethod
def populate(data_file):
"""
Pulls data from CSV source file and writes Volunteer models to db
"""
with open(data_file, 'r') as fi:
keys = fi.readline().strip().lower().split(',')
for row in fi.readlines()[1:]:
values = row.strip().split(',')
vol = dict(zip(keys, values)) # Dictionary of volunteer data
if not vol['unit']:
vol['unit_type'] = 0
vol['unit'] = 0
else:
vol['unit_type'] = vol['unit'].split()[0]
vol['unit'] = vol['unit'].split()[1]
Volunteer(
bsa_id=vol['memberid'],
first_name=vol['first_name'],
last_name=vol['last_name'],
email=vol['email'],
district=vol['district'],
council=vol['council'],
unit_type=vol['unit_type'],
unit=int(vol['unit'])
).save()
class Training(Model):
code = CharField()
class Meta:
database = db
@staticmethod
def populate(data_file):
pass
# Read from csv like on Volunteer
class Role(Model):
role = CharField()
class Meta:
database = db
@staticmethod
def populate():
pass
# Read from csv like on Volunteer
def create_tables():
db_file = 'trains/data/trains.db'
if not os.path.isfile(db_file):
open(db_file, 'w')
Volunteer.create_table(True)
Training.create_table(True)
Role.create_table(True)
def update_all():
Volunteer.populate()
# Training.populate()
# Role.populate()
if __name__ == '__main__':
create_tables() | en | 0.867341 | Stores database models Pulls data from CSV source file and writes Volunteer models to db # Dictionary of volunteer data # Read from csv like on Volunteer # Read from csv like on Volunteer # Training.populate() # Role.populate() | 3.17448 | 3 |
adi_study_watch/jenkins/zip_gen.py | ArrowElectronics/Vital-Signs-Monitoring | 5 | 6623499 | <reponame>ArrowElectronics/Vital-Signs-Monitoring<filename>adi_study_watch/jenkins/zip_gen.py
import os
import shutil
import time
import comtypes.client
#from docx2pdf import convert
src_path = os.path.abspath(os.path.join(__file__, '../../'))
dest_path = r'zip_gen_tmp/adi_study_watch'
excludes_file_path = os.path.join(src_path, 'jenkins', 'zip_gen_excludes.txt')
def copy_repo_package():
shutil.copytree(src_path, dest_path, ignore=shutil.ignore_patterns('.git'))
def convert_docx_to_pdf():
doc_dir_list = [r'nrf5_sdk_15.2.0/adi_study_watch/doc']
word = comtypes.client.CreateObject('Word.Application')
for doc_dir in doc_dir_list:
doc_dir_path = os.path.abspath(os.path.join(dest_path, doc_dir))
for file in os.listdir(doc_dir_path):
if 'doc' in os.path.splitext(file)[-1].lower():
file_name = os.path.splitext(file)[0]
doc_file_path = os.path.abspath(os.path.join(doc_dir_path, file))
pdf_file_path = os.path.abspath(os.path.join(doc_dir_path, file_name+'.pdf'))
print('Doc File Path:', doc_file_path)
doc = word.Documents.Open(doc_file_path)
doc.SaveAs(pdf_file_path, FileFormat=17)
doc.Close()
#convert(doc_file_path)
time.sleep(3)
word.Quit()
def filter_out_dir(excludes_file):
with open(excludes_file, 'r') as f:
line_list = f.readlines()
line_list = [line.strip() for line in line_list if line.strip()]
for line in line_list:
path = os.path.join(os.path.abspath(dest_path), line)
if os.path.exists(path) and os.path.isfile(path):
os.remove(path)
elif os.path.exists(path) and os.path.isdir(path):
shutil.rmtree(path)
else:
print('Filter Ignoring File/Dir Path:', path)
def update_project_excludes():
proj_file = os.path.abspath(os.path.join(dest_path, 'nrf5_sdk_15.2.0/adi_study_watch/app/nRF52840_app/ses/watchv4_nrf52840.emProject'))
ref_str = r'<file file_name="../../../utilities/tracealyser/trace_recorder/streamports/Jlink_RTT/SEGGER_RTT_tracealyser.c" />'
exclude_str = r'''<file file_name="../../../utilities/tracealyser/trace_recorder/streamports/Jlink_RTT/SEGGER_RTT_tracealyser.c">
<configuration Name="Debug" build_exclude_from_build="Yes" />
<configuration Name="Release" build_exclude_from_build="Yes" />
</file>'''
with open(proj_file, "r+") as f:
proj_file_str = f.read()
proj_file_str = proj_file_str.replace(ref_str, exclude_str)
f.seek(0)
f.write(proj_file_str)
f.truncate()
def generate_zip():
shutil.make_archive('adi_study_watch', 'zip', os.path.abspath(os.path.join(dest_path, '../')))
if __name__ == '__main__':
print('Start Time:', round(time.time()))
copy_repo_package()
print('Copy Done Time:', round(time.time()))
# convert_docx_to_pdf()
# print('Doc2Pdf Conversion Done Time:', round(time.time()))
filter_out_dir(excludes_file_path)
print('Filter Done Time:', round(time.time()))
update_project_excludes()
print('Project File Update Done Time:', round(time.time()))
generate_zip()
print('Zip Generation Done Time:', round(time.time())) | import os
import shutil
import time
import comtypes.client
#from docx2pdf import convert
src_path = os.path.abspath(os.path.join(__file__, '../../'))
dest_path = r'zip_gen_tmp/adi_study_watch'
excludes_file_path = os.path.join(src_path, 'jenkins', 'zip_gen_excludes.txt')
def copy_repo_package():
shutil.copytree(src_path, dest_path, ignore=shutil.ignore_patterns('.git'))
def convert_docx_to_pdf():
doc_dir_list = [r'nrf5_sdk_15.2.0/adi_study_watch/doc']
word = comtypes.client.CreateObject('Word.Application')
for doc_dir in doc_dir_list:
doc_dir_path = os.path.abspath(os.path.join(dest_path, doc_dir))
for file in os.listdir(doc_dir_path):
if 'doc' in os.path.splitext(file)[-1].lower():
file_name = os.path.splitext(file)[0]
doc_file_path = os.path.abspath(os.path.join(doc_dir_path, file))
pdf_file_path = os.path.abspath(os.path.join(doc_dir_path, file_name+'.pdf'))
print('Doc File Path:', doc_file_path)
doc = word.Documents.Open(doc_file_path)
doc.SaveAs(pdf_file_path, FileFormat=17)
doc.Close()
#convert(doc_file_path)
time.sleep(3)
word.Quit()
def filter_out_dir(excludes_file):
with open(excludes_file, 'r') as f:
line_list = f.readlines()
line_list = [line.strip() for line in line_list if line.strip()]
for line in line_list:
path = os.path.join(os.path.abspath(dest_path), line)
if os.path.exists(path) and os.path.isfile(path):
os.remove(path)
elif os.path.exists(path) and os.path.isdir(path):
shutil.rmtree(path)
else:
print('Filter Ignoring File/Dir Path:', path)
def update_project_excludes():
proj_file = os.path.abspath(os.path.join(dest_path, 'nrf5_sdk_15.2.0/adi_study_watch/app/nRF52840_app/ses/watchv4_nrf52840.emProject'))
ref_str = r'<file file_name="../../../utilities/tracealyser/trace_recorder/streamports/Jlink_RTT/SEGGER_RTT_tracealyser.c" />'
exclude_str = r'''<file file_name="../../../utilities/tracealyser/trace_recorder/streamports/Jlink_RTT/SEGGER_RTT_tracealyser.c">
<configuration Name="Debug" build_exclude_from_build="Yes" />
<configuration Name="Release" build_exclude_from_build="Yes" />
</file>'''
with open(proj_file, "r+") as f:
proj_file_str = f.read()
proj_file_str = proj_file_str.replace(ref_str, exclude_str)
f.seek(0)
f.write(proj_file_str)
f.truncate()
def generate_zip():
shutil.make_archive('adi_study_watch', 'zip', os.path.abspath(os.path.join(dest_path, '../')))
if __name__ == '__main__':
print('Start Time:', round(time.time()))
copy_repo_package()
print('Copy Done Time:', round(time.time()))
# convert_docx_to_pdf()
# print('Doc2Pdf Conversion Done Time:', round(time.time()))
filter_out_dir(excludes_file_path)
print('Filter Done Time:', round(time.time()))
update_project_excludes()
print('Project File Update Done Time:', round(time.time()))
generate_zip()
print('Zip Generation Done Time:', round(time.time())) | en | 0.286936 | #from docx2pdf import convert #convert(doc_file_path) <file file_name="../../../utilities/tracealyser/trace_recorder/streamports/Jlink_RTT/SEGGER_RTT_tracealyser.c"> <configuration Name="Debug" build_exclude_from_build="Yes" /> <configuration Name="Release" build_exclude_from_build="Yes" /> </file> # convert_docx_to_pdf() # print('Doc2Pdf Conversion Done Time:', round(time.time())) | 2.559565 | 3 |
setup.py | xando/django-modelhistory | 1 | 6623500 | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from modelhistory import VERSION
long_description = """Simple model logging helper for django projects. Covers functionality of discovering action taken on object: DELETE, UPDATE, CREATE and creates and saves suitable message to database. Supports simple db.models.Models objects as well as forms, formset and inlineformset based on Django ORM Models."""
setup(
name='django-modelhistory',
version=".".join(map(str, VERSION)),
description='django-modelhistory reusable application for loggin models changes.',
long_description=long_description,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/xando/django-modelhistory',
packages=find_packages(),
zip_safe=False,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
],
test_suite='modelhistory.tests.runtests.runtests'
)
| # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from modelhistory import VERSION
long_description = """Simple model logging helper for django projects. Covers functionality of discovering action taken on object: DELETE, UPDATE, CREATE and creates and saves suitable message to database. Supports simple db.models.Models objects as well as forms, formset and inlineformset based on Django ORM Models."""
setup(
name='django-modelhistory',
version=".".join(map(str, VERSION)),
description='django-modelhistory reusable application for loggin models changes.',
long_description=long_description,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/xando/django-modelhistory',
packages=find_packages(),
zip_safe=False,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
],
test_suite='modelhistory.tests.runtests.runtests'
)
| en | 0.855606 | # -*- coding: utf-8 -*- Simple model logging helper for django projects. Covers functionality of discovering action taken on object: DELETE, UPDATE, CREATE and creates and saves suitable message to database. Supports simple db.models.Models objects as well as forms, formset and inlineformset based on Django ORM Models. | 1.852275 | 2 |
amalia/archetypes/TimeSeriesArchetype.py | Aganonce/AMALIA-lite | 0 | 6623501 | import logging
logger = logging.getLogger(__name__.split('.')[-1])
import sys
import itertools
import numpy as np
import pandas as pd
import scipy.sparse as ss
import tools.Cache as Cache
class TimeSeriesArchetype:
'''
Time series archetype generates a sparse matrix representation
of user time series within primary dataframes. Each time series
is created by binning activity within some time delta.
Parameters
----------
time_delta : int (default : 86400)
The time range (in seconds) to bin activity together.
base_action : dict (defalt : {'Twitter' : 'tweet'})
A dictionary of base event types, where the key is
the platform and the value is the action type that
represents base activity within the platform.
Output
------
This class outputs a dictionary of a dictionary of csc matrices,
where the key is the platform, the second key if the information id
and the value is a csc sparse matrix that represents the binned activity
time series associated with the platform and information id. The row
index can be mapped to a userID given the platform's node_map. All time
series data that is not associated with an information id is binned in
'None'.
Notes
-----
A csc matrix is used here because it performs quicker
column-slicing operations, which makes for faster
replay segmentation within the ReplayTimeSeriesFeature
specifically.
'''
def __init__(self, cfg):
self.time_delta = cfg.get('limits.time_delta', type=pd.Timedelta).total_seconds()
self.base_action = cfg.get('time_series_archetype.base_actions')
self.cfg = cfg
@Cache.amalia_cache
def compute(self, dfs):
logger.info('Generating base activity time series.')
platforms = dfs.get_platforms()
res = {}
for platform in platforms:
min_time, max_time = dfs.get_time_range(platform)
time_steps = int(_get_time_bins(max_time, min_time, self.time_delta) + 1)
node_map = dfs.get_node_map(platform)
res[platform] = _process_function(dfs.get_df(platform), node_map, min_time, self.time_delta,
time_steps, self.base_action[platform])
return res
def _process_function(df, node_map, min_time, time_delta, time_steps, base_action, *args, **kwargs):
df = df[df['actionType'] == base_action]
data = np.ones(len(df))
row_ind = np.searchsorted(node_map, df.nodeUserID)
col_ind = np.maximum(_get_time_bins(df.nodeTime, min_time, time_delta), np.zeros(len(df)))
return ss.csc_matrix((data, (row_ind, col_ind)), shape=(len(node_map), time_steps), dtype=np.uint32)
def _get_time_bins(max_time, min_time, time_delta):
return ((max_time - (max_time % time_delta)) - min_time) // time_delta | import logging
logger = logging.getLogger(__name__.split('.')[-1])
import sys
import itertools
import numpy as np
import pandas as pd
import scipy.sparse as ss
import tools.Cache as Cache
class TimeSeriesArchetype:
'''
Time series archetype generates a sparse matrix representation
of user time series within primary dataframes. Each time series
is created by binning activity within some time delta.
Parameters
----------
time_delta : int (default : 86400)
The time range (in seconds) to bin activity together.
base_action : dict (defalt : {'Twitter' : 'tweet'})
A dictionary of base event types, where the key is
the platform and the value is the action type that
represents base activity within the platform.
Output
------
This class outputs a dictionary of a dictionary of csc matrices,
where the key is the platform, the second key if the information id
and the value is a csc sparse matrix that represents the binned activity
time series associated with the platform and information id. The row
index can be mapped to a userID given the platform's node_map. All time
series data that is not associated with an information id is binned in
'None'.
Notes
-----
A csc matrix is used here because it performs quicker
column-slicing operations, which makes for faster
replay segmentation within the ReplayTimeSeriesFeature
specifically.
'''
def __init__(self, cfg):
self.time_delta = cfg.get('limits.time_delta', type=pd.Timedelta).total_seconds()
self.base_action = cfg.get('time_series_archetype.base_actions')
self.cfg = cfg
@Cache.amalia_cache
def compute(self, dfs):
logger.info('Generating base activity time series.')
platforms = dfs.get_platforms()
res = {}
for platform in platforms:
min_time, max_time = dfs.get_time_range(platform)
time_steps = int(_get_time_bins(max_time, min_time, self.time_delta) + 1)
node_map = dfs.get_node_map(platform)
res[platform] = _process_function(dfs.get_df(platform), node_map, min_time, self.time_delta,
time_steps, self.base_action[platform])
return res
def _process_function(df, node_map, min_time, time_delta, time_steps, base_action, *args, **kwargs):
df = df[df['actionType'] == base_action]
data = np.ones(len(df))
row_ind = np.searchsorted(node_map, df.nodeUserID)
col_ind = np.maximum(_get_time_bins(df.nodeTime, min_time, time_delta), np.zeros(len(df)))
return ss.csc_matrix((data, (row_ind, col_ind)), shape=(len(node_map), time_steps), dtype=np.uint32)
def _get_time_bins(max_time, min_time, time_delta):
return ((max_time - (max_time % time_delta)) - min_time) // time_delta | en | 0.847996 | Time series archetype generates a sparse matrix representation of user time series within primary dataframes. Each time series is created by binning activity within some time delta. Parameters ---------- time_delta : int (default : 86400) The time range (in seconds) to bin activity together. base_action : dict (defalt : {'Twitter' : 'tweet'}) A dictionary of base event types, where the key is the platform and the value is the action type that represents base activity within the platform. Output ------ This class outputs a dictionary of a dictionary of csc matrices, where the key is the platform, the second key if the information id and the value is a csc sparse matrix that represents the binned activity time series associated with the platform and information id. The row index can be mapped to a userID given the platform's node_map. All time series data that is not associated with an information id is binned in 'None'. Notes ----- A csc matrix is used here because it performs quicker column-slicing operations, which makes for faster replay segmentation within the ReplayTimeSeriesFeature specifically. | 2.641551 | 3 |
application/product/models.py | riihikallio/tsoha | 0 | 6623502 | from application import db
from application.models import Base
class Product(Base):
number = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(144), nullable=False)
unit = db.Column(db.String(10), nullable=False)
price = db.Column(db.Float(), nullable=False)
category = db.Column(db.String(144), nullable=False, index=True)
def __init__(self, name, unit, price, category):
self.name = name
self.unit = unit
self.price = price
self.category = category
| from application import db
from application.models import Base
class Product(Base):
number = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(144), nullable=False)
unit = db.Column(db.String(10), nullable=False)
price = db.Column(db.Float(), nullable=False)
category = db.Column(db.String(144), nullable=False, index=True)
def __init__(self, name, unit, price, category):
self.name = name
self.unit = unit
self.price = price
self.category = category
| none | 1 | 2.731282 | 3 | |
active_learning_lab/data/embeddings.py | webis-de/acl22-revisiting-uncertainty-based-query-strategies-for-active-learning-with-transformers | 0 | 6623503 | <filename>active_learning_lab/data/embeddings.py
import torch
import numpy as np
from pathlib import Path
def get_embedding_matrix(name, vocab, data_dir='.data/'):
from gensim.models.word2vec import Word2VecKeyedVectors
embedding_dir = Path(data_dir).joinpath('embeddings')
embedding_dir.mkdir(parents=True, exist_ok=True)
serialized_file = embedding_dir.joinpath(name + '.bin')
if not serialized_file.exists():
import gensim.downloader as api
model = api.load('word2vec-google-news-300')
model.save(str(serialized_file.resolve()))
return _build_embedding_matrix_from_keyedvectors(model, vocab)
else:
model = Word2VecKeyedVectors.load(str(serialized_file.resolve()), mmap='r')
return _build_embedding_matrix_from_keyedvectors(model, vocab)
def _build_embedding_matrix_from_keyedvectors(pretrained_vectors, vocab, min_freq=1):
vectors = [
np.zeros(pretrained_vectors.vectors.shape[1]) # <pad>
]
num_special_vectors = len(vectors)
vectors += [
pretrained_vectors.vectors[pretrained_vectors.vocab[vocab.itos[i]].index]
if vocab.itos[i] in pretrained_vectors.vocab
else np.zeros(pretrained_vectors.vectors.shape[1])
for i in range(num_special_vectors, len(vocab))
]
for i in range(num_special_vectors, len(vocab)):
if vocab.itos[i] not in pretrained_vectors.vocab and vocab.freqs[vocab.itos[i]] >= min_freq:
vectors[i] = np.random.uniform(-0.25, 0.25, pretrained_vectors.vectors.shape[1])
return torch.as_tensor(np.stack(vectors))
| <filename>active_learning_lab/data/embeddings.py
import torch
import numpy as np
from pathlib import Path
def get_embedding_matrix(name, vocab, data_dir='.data/'):
from gensim.models.word2vec import Word2VecKeyedVectors
embedding_dir = Path(data_dir).joinpath('embeddings')
embedding_dir.mkdir(parents=True, exist_ok=True)
serialized_file = embedding_dir.joinpath(name + '.bin')
if not serialized_file.exists():
import gensim.downloader as api
model = api.load('word2vec-google-news-300')
model.save(str(serialized_file.resolve()))
return _build_embedding_matrix_from_keyedvectors(model, vocab)
else:
model = Word2VecKeyedVectors.load(str(serialized_file.resolve()), mmap='r')
return _build_embedding_matrix_from_keyedvectors(model, vocab)
def _build_embedding_matrix_from_keyedvectors(pretrained_vectors, vocab, min_freq=1):
vectors = [
np.zeros(pretrained_vectors.vectors.shape[1]) # <pad>
]
num_special_vectors = len(vectors)
vectors += [
pretrained_vectors.vectors[pretrained_vectors.vocab[vocab.itos[i]].index]
if vocab.itos[i] in pretrained_vectors.vocab
else np.zeros(pretrained_vectors.vectors.shape[1])
for i in range(num_special_vectors, len(vocab))
]
for i in range(num_special_vectors, len(vocab)):
if vocab.itos[i] not in pretrained_vectors.vocab and vocab.freqs[vocab.itos[i]] >= min_freq:
vectors[i] = np.random.uniform(-0.25, 0.25, pretrained_vectors.vectors.shape[1])
return torch.as_tensor(np.stack(vectors))
| none | 1 | 2.593697 | 3 | |
models/worker.py | xuanthuong/golfgame | 0 | 6623504 | # -*- coding: utf-8 -*-
# Description: worker_tb table
# By Thuong.Tran
# Date: 29 Aug 2017
from sqlalchemy import create_engine, Table, Column, MetaData, Integer, Text, DateTime, Float
from sqlalchemy import select
class worker():
def __init__(self, db_url):
_engine = create_engine(db_url)
_connection = _engine.connect()
_metadata = MetaData()
_worker_tb = Table("gmf_wrkr", _metadata,
Column("WRKR_ID", Integer, primary_key=True),
Column("WRKR_NM", Text),
Column("CRE_DT", DateTime))
_metadata.create_all(_engine)
self.connection = _connection
self.worker_tb = _worker_tb
pass
def insert_to(self, data):
is_valid = True
if is_valid:
ins_query = self.worker_tb.insert().values(data)
self.connection.execute(ins_query)
def get_all(self):
s = select([self.worker_tb])
result = self.connection.execute(s)
return result
def get_by_username(self, user_name):
s = select([self.worker_tb]).where(self.worker_tb.c.WRKR_NM == user_name.lower())
r = self.connection.execute(s)
for obj in r:
return obj['WRKR_ID']
def get_username_by_id(self, user_id):
s = select([self.worker_tb]).where(self.worker_tb.c.WRKR_ID == user_id)
r = self.connection.execute(s)
for obj in r:
return obj['WRKR_NM']
| # -*- coding: utf-8 -*-
# Description: worker_tb table
# By Thuong.Tran
# Date: 29 Aug 2017
from sqlalchemy import create_engine, Table, Column, MetaData, Integer, Text, DateTime, Float
from sqlalchemy import select
class worker():
def __init__(self, db_url):
_engine = create_engine(db_url)
_connection = _engine.connect()
_metadata = MetaData()
_worker_tb = Table("gmf_wrkr", _metadata,
Column("WRKR_ID", Integer, primary_key=True),
Column("WRKR_NM", Text),
Column("CRE_DT", DateTime))
_metadata.create_all(_engine)
self.connection = _connection
self.worker_tb = _worker_tb
pass
def insert_to(self, data):
is_valid = True
if is_valid:
ins_query = self.worker_tb.insert().values(data)
self.connection.execute(ins_query)
def get_all(self):
s = select([self.worker_tb])
result = self.connection.execute(s)
return result
def get_by_username(self, user_name):
s = select([self.worker_tb]).where(self.worker_tb.c.WRKR_NM == user_name.lower())
r = self.connection.execute(s)
for obj in r:
return obj['WRKR_ID']
def get_username_by_id(self, user_id):
s = select([self.worker_tb]).where(self.worker_tb.c.WRKR_ID == user_id)
r = self.connection.execute(s)
for obj in r:
return obj['WRKR_NM']
| en | 0.591127 | # -*- coding: utf-8 -*- # Description: worker_tb table # By Thuong.Tran # Date: 29 Aug 2017 | 2.923253 | 3 |
PyNN/brunel_test.py | OpenSourceBrain/Brunel2000 | 4 | 6623505 | <reponame>OpenSourceBrain/Brunel2000<gh_stars>1-10
"""
A scaled down version of the Brunel model useful for testing (see OMV files: .test.*)
"""
from brunel08 import runBrunelNetwork
from pyNN.utility import get_script_args
simulator_name = get_script_args(1)[0]
simtime = 1000
order = 100
eta = 2.0 # rel rate of external input
g = 5.0
runBrunelNetwork(g=g, eta=eta, simtime = simtime, order = order, save=True, simulator_name=simulator_name,N_rec=500)
| """
A scaled down version of the Brunel model useful for testing (see OMV files: .test.*)
"""
from brunel08 import runBrunelNetwork
from pyNN.utility import get_script_args
simulator_name = get_script_args(1)[0]
simtime = 1000
order = 100
eta = 2.0 # rel rate of external input
g = 5.0
runBrunelNetwork(g=g, eta=eta, simtime = simtime, order = order, save=True, simulator_name=simulator_name,N_rec=500) | en | 0.665112 | A scaled down version of the Brunel model useful for testing (see OMV files: .test.*) # rel rate of external input | 1.886121 | 2 |
compose/apps.py | iafisher/writingstreak | 2 | 6623506 | from django.apps import AppConfig
class ComposeConfig(AppConfig):
name = 'compose'
| from django.apps import AppConfig
class ComposeConfig(AppConfig):
name = 'compose'
| none | 1 | 1.202895 | 1 | |
respet/recon/reconstruction.py | jjleewustledu/NiftyPETy | 0 | 6623507 | import numpy as np
import os
import logging, sys
# create and configure main logger;
# see also https://stackoverflow.com/questions/50714316/how-to-use-logging-getlogger-name-in-multiple-modules/50715155#50715155
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
class Reconstruction(object):
__author__ = "<NAME>"
__copyright__ = "Copyright 2018"
# see also: mmrrec.py osemone param mask_radious
bootstrap = 0
cached_hdw_mumap = None
datain = {}
DCYCRR = True
DEVID = 0
histogram_si = 63 # sinogram index (<127 for direct sinograms)
hmuSelection = [1,4,5] # selects from ~/.niftypet/resources.py: hrdwr_mu
minTime = 0
mMRparams = {}
outfolder = 'output'
phantom = False
recmod = 3
tracerMemory = None
umap4dfp='umapSynth.4dfp'
umapFolder = 'umap'
umapSynthFileprefix = ''
use_mirror_hdw_mumap = True
use_stored_hdw_mumap = False
use_stored_hist = False
verbose = True
@property
def outpath(self):
"""
:return e.g., '/work/HYGLY48/V1/OO1_V1-Converted-NAC/output':
"""
return os.path.join(self.datain['corepath'], self.outfolder)
@property
def PETpath(self):
"""
:return e.g., '/work/HYGLY48/V1/OO1_V1-Converted-NAC/output/PET':
"""
return os.path.join(self.outpath, 'PET')
@property
def reconstruction_finished(self):
return os.path.exists(self._filename_to_finish('_finished'))
@property
def reconstruction_started(self):
return os.path.exists(self._filename_to_touch('_started'))
@property
def tracer(self):
"""
:return e.g., 'OO1':
"""
import re
return re.split('_', os.path.basename(self.tracerRawdataLocation))[0]
@property
def tracerRawdataLocation(self):
"""
:return e.g., '/work/HYGLY48/V1/OO1_V1-Converted-NAC':
"""
return self._tracerRawdataLocation
def tracerRawdataLocation_with(self, ac=False):
from re import compile
conv = compile('-Converted-')
s = conv.search(self._tracerRawdataLocation)
baseloc = self._tracerRawdataLocation[:s.end()-1]
if not ac:
return baseloc+'-NAC'
else:
return baseloc+'-AC'
@property
def visitStr(self):
"""
e.g., for 'FDG_DT1234567789.000000-Converted-NAC' and 'FDG_V1-Converted-NAC'
:return 'dt123456789' and 'v1':
"""
import re
v = re.split('_', os.path.basename(self.tracerRawdataLocation))[1]
v = re.split('-Converted', v)[0]
w = re.split('\.', v)[0]
if not w:
return v.lower()
else:
return w.upper()
def createStaticNAC(self, time0=None, timeF=None, fcomment='_createStaticNAC'):
self.recmod = 0
self.bootstrap = 0
return self.createStatic(self.muNAC(), 0, time0, timeF, fcomment=fcomment,)
def createStaticUTE(self, time0=None, timeF=None, fcomment='_createStaticUTE'):
self.recmod = 3
self.bootstrap = 0
self.checkUmaps(self.muUTE(), fcomment)
#wtime = self.getWTime(self.json_filename_with(ac=False))
return self.createStatic(self.muUTE(), 0, time0, timeF, fcomment=fcomment)
def createStaticCarney(self, time0=None, timeF=None, fcomment='_createStaticCarney'):
print("########## respet.recon.reconstruction.Reconstruction.createStaticCarney ##########")
self.checkUmaps(self.muCarney(frames=[0]), fcomment)
self.checkHistogramming(fcomment)
wtime = self.getWTime(self.json_filename_with(ac=False))
return self.createStatic(self.muCarney(frames=[0]), wtime, time0, timeF, fcomment=fcomment)
def createPhantom(self, time0=None, timeF=None, fcomment='_createPhantom'):
print("########## respet.recon.reconstruction.Reconstruction.createPhantom ##########")
self.phantom = True
self.checkUmaps(self.muCarney(), fcomment)
self.checkHistogramming(fcomment)
return self.createStatic(self.muCarney(), 0, time0, timeF, fcomment=fcomment)
def createDynamicNAC(self, fcomment='_createDynamicNAC'):
print("########## respet.recon.reconstruction.Reconstruction.createDynamicNAC ##########")
self.recmod = 0
self.bootstrap = 0
self.checkUmaps(self.muHardware(), fcomment)
self.checkHistogramming(fcomment)
return self.createDynamic(self.getTaus(), self.muNAC(), fcomment)
def createDynamicUTE(self, fcomment='_createDynamicUTE'):
print("########## respet.recon.reconstruction.Reconstruction.createDynamicNAC ##########")
self.checkUmaps(self.muUTE(), fcomment)
self.checkHistogramming(fcomment)
return self.createDynamic(self.getTaus(), self.muUTE(), fcomment)
def createDynamic2Carney(self, fcomment='_createDynamic2Carney'):
print("########## respet.recon.reconstruction.Reconstruction.createDynamic2Carney ##########")
self.checkUmaps(self.muCarney(frames=[0]), fcomment)
self.checkHistogramming(fcomment)
taus = self.getTaus(self.json_filename_with(ac=False))
offset = self.getWTime(self.json_filename_with(ac=False))
return self.createDynamic2(max(offset, self.minTime), taus, self.getTaus2(), fcomment)
def createStatic(self, muo, wtime=0, time0=None, timeF=None, fcomment='_createStatic'):
"""
:param muo mu-map of imaged object:
:param wtime is determined by createDynamic:
:param time0 int sec:
:param timeF int sec:
:param fcomment string for naming subspace:
:return result from nipet.mmrchain:
:rtype dictionary:
"""
from niftypet import nipet
self.mMRparams['Cnt']['VERBOSE'] = self.verbose
self.mMRparams['Cnt']['DCYCRR'] = self.DCYCRR
if self.reconstruction_started:
logging.debug('reconstruction.Reconstruction.createDynamics.reconstruction_started == True')
return None # to avoid race-conditions in parallel computing contexts
self._do_touch_file('_started')
if not time0:
time0 = self.getTime0()
time0 = min(wtime+time0, self.getTimeMax())
if not timeF:
timeF = self.getTimeF()
timeF = min(wtime+timeF, self.getTimeMax())
sta = nipet.mmrchain(self.datain, self.mMRparams,
frames = ['fluid', [time0, timeF]],
mu_h = self.muHardware(),
mu_o = muo,
itr = self.getItr(),
fwhm = self.getFwhm(),
recmod = self.recmod,
outpath = self.outpath,
store_img = True,
fcomment = fcomment)
self.save_json(np.int_([timeF-time0]), waittime=wtime)
self._do_touch_file('_finished')
return sta
def createDynamic(self, taus, muo, fcomment='_createDynamic'):
"""
:param taus: np.int_
:param muo: 3D or 4D mu-map of imaged object
:return: last result from nipet.mmrchain
:rtype: dictionary
"""
global dynFrame
from numpy import isnan
from niftypet import nipet
from warnings import warn
self.mMRparams['Cnt']['VERBOSE'] = self.verbose
self.mMRparams['Cnt']['DCYCRR'] = self.DCYCRR
if self.reconstruction_started:
logging.debug('reconstruction.Reconstruction.createDynamics.reconstruction_started == True')
return None # to avoid race-conditions in parallel computing contexts
self._do_touch_file('_started')
dynFrame = None
times,taus = self.getTimes(taus) # length(times) == length(taus) + 1; revise taus using NIPET metrics
print("times->" + str(times))
print("taus->" + str(taus))
wtime = times[0] # time to wait for nans to clear
it_fin = None # passed to save_json()
for it in np.arange(1, times.shape[0]):
try:
if self.frame_exists(times[it-1], times[it], fcomment, it):
continue # and reuse existings reconstructions
logging.info('createDynamic: frame samples {}-{} s;'.format(times[it-1], times[it]))
logging.debug('createDynamic.datain->')
logging.debug(self.datain)
logging.debug('createDynamic.mMRparams->')
logging.debug(self.mMRparams)
dynFrame = nipet.mmrchain(self.datain, self.mMRparams,
frames = ['fluid', [times[it-1], times[it]]],
mu_h = self.muHardware(),
mu_o = muo,
itr = self.getItr(),
fwhm = self.getFwhm(),
recmod = self.recmod,
outpath = self.outpath,
store_img = True,
fcomment = fcomment + '_time' + str(it-1))
it_fin = it
if isnan(dynFrame['im']).any():
if times[it] < times[-1] / 2:
wtime = times[it]
except (UnboundLocalError, IndexError) as e:
warn(e.message)
if times[it] < times[-1]/2:
self.replaceFrameInSitu(times[it-1], times[it], fcomment, it-1)
wtime = times[it]
else:
warn('createDynamic: break for it->' + str(it))
break
self.save_json(taus[:it_fin], waittime=wtime)
self._do_touch_file('_finished')
return dynFrame
def createDynamic2(self, offset, taus, taus2, fcomment='_createDynamic2'):
"""
:param offset is determined externaly by createDynamic():
:param taus np.int_ for mu-map frames:
:param taus2 np.int_ for emission frames:
:param muo 3D or 4D mu-map of imaged object:
:return last result from nipet.mmrchain:
:rtype dictionary:
"""
global dynFrame
from numpy import isnan
from niftypet import nipet
from warnings import warn
self.mMRparams['Cnt']['VERBOSE'] = self.verbose
self.mMRparams['Cnt']['DCYCRR'] = self.DCYCRR
if self.reconstruction_started:
logging.debug('reconstruction.Reconstruction.createDynamics2.reconstruction_started == True')
return None # to avoid race-conditions in parallel computing contexts
self._do_touch_file('_started')
dynFrame = None
times,trash = self.getTimes(taus) # of umap alignments
times2,taus2 = self.getTimes(taus2, offset=offset)
print("times->" + str(times))
print("trash->" + str(trash))
print("times2->" + str(times2))
print("taus2->" + str(taus2))
wtime2 = times2[0]
it2_fin = None
it = 1 # right edge of mu-map frame
for it2 in np.arange(1, times2.shape[0]): # right edge of hist frame to be attenuation corrected
try:
while times[it] < times2[it2-1] and times[it] < times[-1]:
it += 1 # select the mu-map for the hist in: [times2[it2-1], times2[it2]]
if self.frame_exists(times2[it2-1], times2[it2], fcomment, it2):
continue # and reuse existings reconstructions
logging.info('createDynamic2: AC frame samples {}-{} s; NAC frame samples {}-{} s'.format(times2[it2-1], times2[it2], times[it-1], times[it]))
logging.debug('reconstruction.Reconstruction.createDynamic2.datain->')
logging.debug(self.datain)
logging.debug('reconstruction.Reconstruction.createDynamic2.mMRparams->')
logging.debug(self.mMRparams)
dynFrame = nipet.mmrchain(self.datain, self.mMRparams,
frames = ['fluid', [times2[it2-1], times2[it2]]],
mu_h = self.muHardware(),
mu_o = self.muCarney(frames=(it-1)),
itr = self.getItr(),
fwhm = self.getFwhm(),
recmod = self.recmod,
outpath = self.outpath,
store_img = True,
fcomment = fcomment + '_time' + str(it2-1))
it2_fin = it2
if isnan(dynFrame['im']).any():
if times2[it2] < times2[-1]:
wtime2 = times2[it2]
except (UnboundLocalError, IndexError) as e:
warn(e.message)
if times2[it2] < times2[-1]:
self.replaceFrameInSitu(times2[it2-1], times2[it2], fcomment, it2-1)
wtime2 = times2[it2]
else:
warn('Reconstruction.createDynamic2: break for it2->' + str(it2))
break
self.save_json(taus2[:it2_fin], offsettime=offset, waittime=(wtime2-offset))
self._do_touch_file('_finished')
return dynFrame
def createUmapSynthFullBlurred(self):
"""
:return: os.path.join(tracerRawdataLocation, umapSynthFileprefix+'.nii.gz') with 4.3 mm fwhm blur
"""
from subprocess import call
pwd0 = os.getcwd()
os.chdir(self.tracerRawdataLocation_with(ac=False))
call('/data/nil-bluearc/raichle/lin64-tools/nifti_4dfp -n ' +
os.path.join(self.tracerRawdataLocation_with(ac=False), self.umap4dfp) + '.ifh umap_.nii',
shell=True, executable='/bin/bash')
call('/bin/gzip umap_.nii', shell=True, executable='/bin/bash')
call('/usr/local/fsl/bin/fslroi umap_ umap__ -86 344 -86 344 0 -1',
shell=True, executable='/bin/bash')
call('/usr/local/fsl/bin/fslmaths umap__ -s 1.826 ' + self.umapSynthFileprefix,
shell=True, executable='/bin/bash')
os.remove('umap_.nii.gz')
os.remove('umap__.nii.gz')
os.chdir(pwd0)
def checkHistogramming(self, fcomment=''):
from niftypet import nipet
from matplotlib.pyplot import figure, plot, xlabel, ylabel, title, show, savefig, matshow, colorbar, legend, grid
hst = nipet.mmrhist(self.datain, self.mMRparams)
if not os.path.exists(self.outpath):
os.mkdir(self.outpath)
# sinogram index (<127 for direct sinograms, >=127 for oblique sinograms)
si = self.histogram_si
# prompt sinogram
figure()
matshow(hst['psino'][si, :, :], cmap='inferno')
colorbar()
xlabel('bins')
ylabel('angles')
savefig(os.path.join(self.outpath, fcomment+'_promptsino.pdf'))
# prompt sinogram oblique
figure()
matshow(hst['psino'][si+128, :, :], cmap='inferno')
colorbar()
xlabel('bins')
ylabel('angles')
savefig(os.path.join(self.outpath, fcomment+'_promptsino_oblique.pdf'))
# delayed sinogram
figure()
matshow(hst['dsino'][si, :, :], cmap='inferno')
colorbar()
xlabel('bins')
ylabel('angles')
savefig(os.path.join(self.outpath, fcomment+'_delayedsino.pdf'))
# delayed sinogram oblique
figure()
matshow(hst['dsino'][si+128, :, :], cmap='inferno')
colorbar()
xlabel('bins')
ylabel('angles')
savefig(os.path.join(self.outpath, fcomment+'_delayedsino_oblique.pdf'))
# head curve for prompt and delayed events
figure()
plot(hst['phc'], label='prompt TAC')
plot(hst['dhc'], label='delayed TAC')
#show()
legend()
grid('on')
xlabel('time/s')
ylabel('specific activity / (Bq/mL)')
savefig(os.path.join(self.outpath, fcomment+'_tacs.pdf'))
# center of mass
figure()
plot(hst['cmass'])
#show()
grid('on')
xlabel('time / s')
ylabel('center of mas of radiodistribution')
savefig(os.path.join(self.outpath, fcomment+'_cmass.pdf'))
return hst
def checkScattering(self, fcomment=''):
from niftypet import nipet
from matplotlib.pyplot import figure, plot, xlabel, ylabel, title, show, savefig, matshow, colorbar, legend, grid
if not os.path.exists(self.outpath):
os.mkdir(self.outpath)
# scattering
# I don't recommend using it for dynamic scans, but static only, as it drains the memory big time:
recon = nipet.mmrchain(
self.datain, self.mMRparams,
mu_h=self.muHardware(),
mu_o=self.muCarney(frames=1),
itr=2,
fwhm=0.0,
outpath=self.outpath,
fcomment='_scattering',
ret_sinos=True,
store_img=True)
# Then you sum up all sinograms to see the average performace:
ssn = np.sum(recon['sinos']['ssino'], axis=(0, 1))
psn = np.sum(recon['sinos']['psino'], axis=(0, 1))
rsn = np.sum(recon['sinos']['rsino'], axis=(0, 1))
msk = np.sum(recon['sinos']['amask'], axis=(0, 1))
# plotting the sinogram profiles for angle indexes 128 and 196:
figure()
ia = 128
plot(psn[ia, :], label='prompts')
plot(rsn[ia, :], label='randoms')
plot(rsn[ia, :] + ssn[ia, :], label='scatter+randoms')
plot(msk[ia, :], label='mask')
legend()
savefig(os.path.join(self.outpath, fcomment + '_scattering128.pdf'))
figure()
ia = 196
plot(psn[ia, :], label='prompts')
plot(rsn[ia, :], label='randoms')
plot(rsn[ia, :] + ssn[ia, :], label='scatter+randoms')
plot(msk[ia, :], label='mask')
legend()
savefig(os.path.join(self.outpath, fcomment + '_scattering196.pdf'))
return ssn, psn, rsn, msk
def checkTimeAliasingCarney(self, fcomment='_checkTimeAliasingCarney'):
times,trash = self.getTimes(self.getTaus())
print("########## respet.recon.reconstruction.Reconstruction.checkTimeAliasingCarney ##########")
print(times[0:2])
return self.createDynamic(self.getTaus()[0:2], self.muCarney(frames=[0,1]), fcomment)
def checkTimeHierarchiesCarney(self, fcomment='_checkTimeHierarchiesCarney'):
times,trash = self.getTimes(self.getTaus())
times2,trash = self.getTimes(self.getTaus2())
print("########## respet.recon.reconstruction.Reconstruction.checkTimeHierarchiesCarney ##########")
print(times)
return self.createDynamic2(self.getTaus()[0:2], self.getTaus2()[0:6], self.muCarney(frames=[0,1]), fcomment)
def checkUmaps(self, muo, fcomment=''):
from matplotlib.pyplot import figure, plot, xlabel, ylabel, title, show, savefig, matshow, colorbar, legend, grid
muh = self.muHardware()
iz = 64
ix = 172
# plot axial image with a colour bar
matshow(muh['im'][iz, :, :] + muo['im'][iz, :, :], cmap='bone')
colorbar()
savefig(os.path.join(self.outpath, fcomment+'_tumaps.pdf'))
# plot sagittal image with a colour bar
matshow(muh['im'][:, :, ix] + muo['im'][:, :, ix], cmap='bone')
colorbar()
savefig(os.path.join(self.outpath, fcomment+'_sumaps.pdf'))
# plot coronal image with a colour bar
matshow(muh['im'][:, ix, :] + muo['im'][:, ix, :], cmap='bone')
colorbar()
savefig(os.path.join(self.outpath, fcomment+'_cumaps.pdf'))
def emissionsScatterThresh(self):
""" provisions Cnt['ETHRLD'] for use by:
mmrrec.py lines 208, 272 228; mmrimg.py 209; sct_module.cu line 302 """
if self.tracerMemory.lower() == 'fluorodeoxyglucose':
thresh = 0.05
elif self.tracerMemory.lower() == 'oxygen-water':
thresh = 0.05
elif self.tracerMemory.lower() == 'oxygen' or self.tracerMemory.lower() == 'carbon':
thresh = 0.05
else:
raise AssertionError('Reconstruction.emissionsScatterThresh does not support tracerMemory->' + self.tracerMemory)
return thresh
def frame_exists(self, t0, tf, fcomment, it2):
"""
e.g., a_itr-4_t-577-601sec_createDynamic2Carney_time57.nii.gz
:param t0:
:param tf:
:param fcomment:
:param it2:
:return bool:
"""
fn = "a_itr-" + str(self.getItr()) + "_t-" + str(t0) + "-" + str(tf) + "sec" + fcomment + "_time" + str(it2-1) + ".nii.gz"
return os.path.exists(os.path.join(self.PETpath, 'single-frame', fn))
def getAffine(self):
"""
:return: affine transformations for NIfTI
:rtype: list 2D numeric
"""
from niftypet import nipet
cnt = self.mMRparams['Cnt']
vbed, hbed = nipet.mmraux.vh_bedpos(self.datain, cnt) # bed positions
A = np.diag(np.array([-10*cnt['SO_VXX'], 10*cnt['SO_VXY'], 10*cnt['SO_VXZ'], 1]))
A[0,3] = 10*( 0.5*cnt['SO_IMX'] *cnt['SO_VXX'])
A[1,3] = 10*((-0.5*cnt['SO_IMY'] + 1)*cnt['SO_VXY'])
A[2,3] = 10*((-0.5*cnt['SO_IMZ'] + 1)*cnt['SO_VXZ'] + hbed)
return A
def getEmmsks(self):
return self.tracerMemory.lower() == 'oxygen' or self.tracerMemory.lower() == 'carbon'
def getFwhm(self):
if self.tracerMemory.lower() == 'fluorodeoxyglucose':
fwhm = 4.3 / 2.08626 # number of voxels; https://docs.scipy.org/doc/scipy-0.16.1/reference/generated/scipy.ndimage.filters.gaussian_filter.html
elif self.tracerMemory.lower() == 'oxygen-water':
fwhm = 4.3 / 2.08626
elif self.tracerMemory.lower() == 'carbon' or self.tracerMemory.lower() == 'oxygen':
fwhm = 4.3 / 2.08626
else:
raise AssertionError('Reconstruction.getFwhm does not support tracerMemory->' + self.tracerMemory)
return fwhm
def getInterfile(self, dcm):
"""
:param dcm:
:return lm_dict, a dictionary of interfile fields:
"""
from interfile import Interfile
from warnings import warn
try:
try:
lm_dict = Interfile.load(dcm)
except Interfile.ParsingError as e:
warn(e.message)
return None
except (AttributeError, TypeError):
raise AssertionError('dcm must be a filename')
return lm_dict
def getItr(self):
if self.tracerMemory.lower() == 'fluorodeoxyglucose':
itr = 4
elif self.tracerMemory.lower() == 'oxygen-water' or self.tracerMemory.lower() == 'carbon':
itr = 4
elif self.tracerMemory.lower() == 'oxygen':
itr = 2
else:
raise AssertionError('Reconstruction.getItr does not support tracerMemory->' + self.tracerMemory)
return itr
def getMumaps(self, muo, it = 0):
"""
:param muo: numpy.array of len == 3 or len == 4
:param it: list, default is empty
:return: list of numpy.array := [mu-hardware, mu-object]
"""
if muo.ndim == 4:
return [np.squeeze(muo[it,:,:,:]), self.muHardware()]
else:
return [muo, self.muHardware()]
def getSpan(self):
if self.tracerMemory.lower() == 'fluorodeoxyglucose':
span = 11
elif self.tracerMemory.lower() == 'oxygen-water' or self.tracerMemory.lower() == 'carbon':
span = 11
elif self.tracerMemory.lower() == 'oxygen':
span = 11
else:
raise AssertionError('Reconstruction.getSpan does not support tracerMemory->' + self.tracerMemory)
return span
def getTaus(self, json_file=None):
""" see also mfiles/t0_and_dt.m
:param: json_file containing taus
:return: np array of frame durations
:rtype: numpy.int_
"""
if json_file:
taus,wtime = self.open_json(json_file)
return taus
if not self.tracerMemory:
raise AssertionError('Reconstruction.getTaus: no tracerMemory')
if self.tracerMemory.lower() == 'fluorodeoxyglucose':
taus = np.int_([30,35,39,43,47,51,55,59,64,68,72,76,81,85,89,93,98,102,106,111,115,120,124,129,133,138,142,147,151,156,161,165,170,175,171])
# len == 35, nudge = 4, dur == 3601
elif self.tracerMemory.lower() == 'oxygen-water' or self.tracerMemory.lower() == 'carbon' or self.tracerMemory.lower() == 'oxygen':
taus = np.int_([12,13,14,15,17,18,20,23,26,30,35,43,55,75,114,91])
# len == 16, dur == 601
else:
raise AssertionError('Reconstruction.getTaus does not support tracerMemory->' + self.tracerMemory)
return taus
def getTaus2(self, json_file=None):
""" see also mfiles/t0_and_dt.m
:param: json_file containing taus
:return: np array of frame durations, waiting time
:rtype: numpy.int_
"""
if json_file:
taus,wtime = self.open_json(json_file)
return taus
if not self.tracerMemory:
raise AssertionError('Reconstruction.getTaus2: no tracerMemory')
if self.tracerMemory.lower() == 'fluorodeoxyglucose':
taus = np.int_([10,13,14,16,17,19,20,22,23,25,26,28,29,31,32,34,35,37,38,40,41,43,44,46,47,49,50,52,53,56,57,59,60,62,63,65,66,68,69,71,72,74,76,78,79,81,82,84,85,87,88,91,92,94,95,97,98,100,101,104,105,108])
# len == 62, nudge = 1.5, dur == 3601
elif self.tracerMemory.lower() == 'oxygen-water':
taus = np.int_([3,4,4,4,4,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,6,6,6,6,6,6,7,7,7,7,8,8,8,9,9,10,10,11,11,12,13,14,15,16,18,20,22,25,29,34,41,51,52])
# len == 54, dur == 601
elif self.tracerMemory.lower() == 'carbon':
# taus = np.int_([5,5,5,5,6,6,6,6,6,7,7,7,7,8,8,9,9,9,10,11,11,12,13,14,15,16,18,20,22,25,29,34,41,52,137])
# len == 35, dur == 601
taus = np.int_([3,3,3,3,3,3,3,3,5,5,5,5,6,6,6,6,6,7,7,7,7,8,8,8,9,9,10,10,11,11,12,13,14,15,16,18,19,22,24,28,33,39,49,64,49])
# len = 45, dur = 601
elif self.tracerMemory.lower() == 'oxygen':
taus = np.int_([2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,6,6,6,6,6,7,7,7,7,8,8,8,9,9,10,10,15])
# len = 63, dur = 301
else:
raise AssertionError('Reconstruction.getTaus2 does not support tracerMemory->' + self.tracerMemory)
return taus
def getTime0(self):
if self.phantom:
return 0
times,trash = self.getTimes(self.getTaus())
if self.tracerMemory.lower() == 'fluorodeoxyglucose':
return times[-1] - 20*60
elif self.tracerMemory.lower() == 'oxygen-water':
return times[0]
elif self.tracerMemory.lower() == 'carbon':
return times[0] + 2*60
elif self.tracerMemory.lower() == 'oxygen':
return times[0]
else:
raise AssertionError('Reconstruction.getTime0 does not support tracerMemory->' + self.tracerMemory)
def getTimeF(self):
if self.phantom:
return self.getTimeMax()
times,trash = self.getTimes(self.getTaus())
if self.tracerMemory.lower() == 'fluorodeoxyglucose':
return times[-1]
elif self.tracerMemory.lower() == 'oxygen-water':
return times[0] + 60
elif self.tracerMemory.lower() == 'carbon':
return times[0] + 3*60
elif self.tracerMemory.lower() == 'oxygen':
return times[0] + 60
else:
raise AssertionError('Reconstruction.getTimeF does not support tracerMemory->' + self.tracerMemory)
def getTimeMax(self):
"""
:return: max time available from listmode data in sec.
"""
from niftypet.nipet.lm import mmr_lmproc #CUDA
nele, ttags, tpos = mmr_lmproc.lminfo(self.datain['lm_bf'])
return (ttags[1]-ttags[0]+999)/1000 # sec
def getTimes(self, taus=None, offset=0):
"""
:param: offset is predetermined duration to exclude from times
:return: array of times including 0 and np.cumsum(taus); max(times) == getTimeMax()
:return: array of taus revised to be consistent with getTimeMax()
:rtype: numpy.int_
"""
if not isinstance(taus, np.ndarray):
raise AssertionError('Reconstruction.getTimes.taus is missing')
tmax = self.getTimeMax()
t = np.hstack((np.int_(0), np.cumsum(taus)))
t = t + offset
t = t[t < tmax]
t = np.hstack((t, np.int_(tmax)))
taus = t[1:] - t[:-1]
return np.int_(t), np.int_(taus)
def getWTime(self, json_file=None):
"""
:param: json_file containing taus
:return: waiting time
:rtype: numpy.int_
"""
if json_file:
taus,wtime = self.open_json(json_file)
return wtime
return 0
def json_filename(self):
return os.path.join(self.PETpath,
self.tracer + '_' + self.visitStr + '.json')
def json_filename_with(self, ac=False):
return os.path.join(self.tracerRawdataLocation_with(ac), 'output', 'PET',
self.tracer + '_' + self.visitStr + '.json')
def open_json(self, json_file=None):
"""
https://stackoverflow.com/questions/26646362/numpy-array-is-not-json-serializable
:param json_file, a str:
:return taus, a np array of frame durations, including waiting frames but only frames in the listmode archive:
:return wtime, the waiting time in the early scan in sec:
"""
import codecs, json
if not json_file:
raise AssertionError('Reconstruction.open_json.json_file is missing')
t = codecs.open(json_file, 'r', encoding='utf-8').read()
jt = json.loads(t)
logging.debug('reconstruction.Reconstruction.open_json.jt->')
logging.debug(str(jt))
taus = np.array(jt['taus'])
wtime = int(float(jt['waiting time']))
return taus, wtime
def save_json(self, taus=None, offsettime=0, waittime=0):
"""
https://stackoverflow.com/questions/26646362/numpy-array-is-not-json-serializable
:param taus, an np array of frame durations, including waiting frames but only frames in the listmode archive:
:param offsettime, the duration between study time and the first saved frame:
:param waittime, the witing time in the early scan; typically nan <- 0:
:return json_file, a canonical json filename for ancillary data including timings:
"""
import codecs, json
if not isinstance(taus, np.ndarray):
raise AssertionError('Reconstruction.save_json.taus is missing')
jdict = {
"study date": self.lm_studydate(),
"acquisition time": self.lm_acquisitiontime(),
"offset time": offsettime,
"waiting time": waittime,
"taus": taus.tolist(),
"image duration": self.lm_imageduration()
}
logging.debug('reconstruction.Reconstruction.save_json.jdict->')
logging.debug(str(jdict))
json_file = self.json_filename()
j = codecs.open(json_file, 'w', encoding='utf-8') # overwrites existing
json.dump(jdict, j)
return json_file
def lm_dcm(self):
from glob import glob
dcms = glob(os.path.join(self.tracerRawdataLocation, 'LM', '*.dcm'))
for d in dcms:
if os.path.exists(d):
return d
raise AssertionError("Reconstruction.lm_dcm could not open LM *.dcm")
def lm_dcmread(self):
"""
:return dcm_datset is a pydicom.dataset.FileDataset containing properties for DICOM fields:
"""
from pydicom import dcmread
try:
dcm_datset = dcmread(self.lm_dcm())
except (AttributeError, TypeError):
raise AssertionError('dcm must be a filename')
return dcm_datset
def lm_imageduration(self):
lm_dict = self.getInterfile(self.lm_dcm())
if lm_dict:
return lm_dict['image duration']['value'] # sec
else:
return self.getTaus().sum()
def lm_studydate(self):
"""
provides best estimate of date of listmode collection
:param dcm filename:
:return:
"""
d = self.lm_dcmread()
return d.StudyDate # YYYYMMDD after http://dicom.nema.org/medical/dicom/current/output/chtml/part05/sect_6.2.html
def lm_acquisitiontime(self):
"""
provides best estimate of start time (GMT) of listmode collection
:param dcm filename:
:return:
"""
d = self.lm_dcmread()
return d.AcquisitionTime # hhmmss.ffffff after http://dicom.nema.org/medical/dicom/current/output/chtml/part05/sect_6.2.html
def lm_tracer(self):
import re
from warnings import warn
if self.tracerMemory:
return self.tracerMemory
try:
with open(self.lm_dcm(), 'r') as fid:
fcontent = fid.read()
p = re.compile('(?<=Radiopharmaceutical:)[A-Za-z\-]+')
m = re.search(p, fcontent)
self.tracerMemory = m.group(0)
return self.tracerMemory
except IOError as e:
warn(e.message)
raise AssertionError("Reconstruction.lm_tracer could not open LM *.dcm")
def migrateCndaDownloads(self, cndaDownload):
return None
def adjust_mirror_hdw_mumap(self, hmu_dct):
import nibabel as nib
if not self.tracerMemory.lower() == 'oxygen':
return hmu_dct
nim = nib.load(os.path.join(os.getenv('HARDWAREUMAPS'), 'adhoc_mu.nii.gz'))
imo = nim.get_data()
hmu = np.transpose(imo[:,::-1,::-1], (2, 1, 0))
hmu_dct = {'im': hmu+hmu_dct['im'],
'fim': hmu_dct['fim'],
'affine': hmu_dct['affine']}
return hmu_dct
def muHardware(self):
"""
:return: dictionary for hardware mu-map image provided by nipet.hdw_mumap. Keys: 'im', ...
See also self.hmuSelection.
"""
from niftypet import nipet
if self.use_stored_hdw_mumap:
self.datain['hmumap'] = os.path.join(
os.getenv('HARDWAREUMAPS'), 'hmumap.npy')
if not self.cached_hdw_mumap:
self.cached_hdw_mumap = nipet.hdw_mumap(
self.datain, self.hmuSelection, self.mMRparams, outpath=self.outpath, use_stored=self.use_stored_hdw_mumap)
if self.use_mirror_hdw_mumap:
self.cached_hdw_mumap = self.adjust_mirror_hdw_mumap(self.cached_hdw_mumap)
logging.debug('reconstruction.Reconstruction.muHardware.datain[''hmumap'']->')
logging.debug(self.datain['hmumap'])
return self.cached_hdw_mumap
def muCarney(self, fileprefix=None, imtype='object mu-map', fcomment='', frames=None):
"""
get NIfTI of the custom umap; see also nipet.mmrimg.obtain_image
:param fileprefix: string for fileprefix of 4D image-object; default := self.umapSynthFileprefix
:param imgtype: string; cf. obtain_image
:param fcomment: string to append to fileprefix
:param frames: frame indices to select from _im; default selects all frames
:return: np.float32
"""
from niftypet import nimpa
if fileprefix is None:
fileprefix = self.umapSynthFileprefix
fqfn = os.path.join(self.tracerRawdataLocation_with(ac=True), fileprefix + fcomment + '.nii.gz')
nimpa_dct = nimpa.getnii(fqfn, output='all')
_im = nimpa_dct['im']
if not frames is None:
_im = _im[frames,:,:,:]
_im = np.squeeze(_im)
_im[_im < 0] = 0
output = {}
output['im'] = _im
output['affine'] = nimpa_dct['affine']
output['exists'] = True
output['fim'] = fqfn
Cnt = self.mMRparams['Cnt']
logging.debug('reconstruction.Reconstruction.muCarney is')
logging.debug('using ' + imtype + ' from NIfTI file.')
if Cnt and output['im'].shape != (Cnt['SO_IMZ'], Cnt['SO_IMY'], Cnt['SO_IMX']):
print 'e> provided ' + imtype + ' via file has inconsistent dimensions compared to Cnt.'
raise ValueError('Wrong dimensions of the mu-map')
return output
# PREVIOUSLY:
# import nibabel
# if fileprefix is None:
# fileprefix = self.umapSynthFileprefix
# fqfn = os.path.join(self.tracerRawdataLocation, fileprefix + fcomment + '.nii.gz')
# nim = nibabel.load(fqfn)
# _im = np.float32(nim.get_data())
# if frames is None:
# if np.ndim(_im) == 3:
# _im = np.transpose(_im[:,::-1,::-1], (2, 1, 0))
# elif np.ndim(_im) == 4:
# _im = np.transpose(_im[:,::-1,::-1,:], (3, 2, 1, 0))
# else:
# raise ValueError('unsupported np.ndim(Reconstruction.muCarney._im)->' + str(np.ndim(_im)))
# else:
# _im = np.transpose(_im[:,::-1,::-1,frames], (3, 2, 1, 0))
# _im = np.squeeze(_im)
# _im[_im < 0] = 0
def muNAC(self):
"""
:return: [] which NIPET 1.1 uses for no attenuation correction
"""
return []
def muUTE(self):
"""
:return: mu-map image from Siemens UTE
:rtype: numpy.array
"""
from niftypet import nipet
return nipet.obj_mumap(self.datain, self.mMRparams, outpath=self.outpath, store=True)
def nipetFrameFilename(self, t0, t1, tag, fr):
# a_itr-4_t-10-20sec_createDynamic2Carney_time1.nii.gz
return os.path.join(self.outpath, 'PET', 'single-frame',
'a_itr-'+str(self.getItr())+'_t-'+str(t0)+'-'+str(t1)+'sec'+tag+'_time'+str(fr)+'.nii.gz')
def organizeNormAndListmode(self):
import glob
import pydicom
try:
# check umap; move norm and listmode to folders
u = os.path.join(self.tracerRawdataLocation, 'umap', '')
if not os.path.isdir(u):
os.makedirs(u)
fns = glob.glob(os.path.join(self.tracerRawdataLocation, '*.dcm'))
for fn in fns:
ds = pydicom.read_file(fn)
if ds.ImageType[2] == 'PET_NORM':
self._moveToNamedLocation(fn, 'norm')
if ds.ImageType[2] == 'PET_LISTMODE':
self._moveToNamedLocation(fn, 'LM')
except OSError:
os.listdir(self.tracerRawdataLocation)
raise
def organizeRawdataLocation(self, cndaDownload=None):
import shutil
if self.tracerRawdataLocation.find('Twilite') > 0:
self.organizeNormAndListmode()
return
if self.phantom:
self.organizeNormAndListmode()
return
if not self.ac:
if cndaDownload:
self.migrateCndaDownloads(cndaDownload)
self.organizeNormAndListmode()
return
# AC: move .bf, .dcm and umap to tracerRawdataLocation
nac_norm = os.path.join(self.tracerRawdataLocation_with(ac=False), 'norm', '')
ac_norm = os.path.join(self.tracerRawdataLocation_with(ac=True), 'norm', '')
nac_lm = os.path.join(self.tracerRawdataLocation_with(ac=False), 'LM', '')
ac_lm = os.path.join(self.tracerRawdataLocation_with(ac=True), 'LM', '')
nac_umap = os.path.join(self.tracerRawdataLocation_with(ac=False), 'umap', '')
ac_umap = os.path.join(self.tracerRawdataLocation_with(ac=True), 'umap', '')
if not os.path.isdir(ac_norm):
shutil.move(nac_norm, self.tracerRawdataLocation)
if not os.path.isdir(ac_lm):
shutil.move(nac_lm, self.tracerRawdataLocation)
if not os.path.isdir(ac_umap):
shutil.move(nac_umap, self.tracerRawdataLocation)
return
@staticmethod
def printd(d):
for keys, values in d.items():
print(keys)
print(values)
def replaceFrameInSitu(self, t0, t1, tag, fr):
from shutil import copyfile
copyfile(
os.path.join(os.getenv('SUBJECTS_DIR'), 'zeros_frame.nii.gz'),
self.nipetFrameFilename(t0, t1, tag, fr))
def saveDynamicInMemory(self, dyn, mumaps, hst, fcomment=''):
"""
:param dyn: dictionary from nipet.mmrchain
:param mumaps: dictionary of mu-maps from imaged object, hardware
:param hst: dictionary from nipet.mmrhist
:param fcomment: string to append to canonical filename
"""
fout = self._createFilename(fcomment)
im = self._gatherOsemoneList(dyn)
logging.info('reconstruction.Reconstruction.saveDynamicInMemory is')
logging.info('saving ' + str(len(im.shape)) + 'D image to: ' + fout)
A = self.getAffine()
muo,muh = mumaps # object and hardware mu-maps
if hst is None:
hst = self.checkHistogramming()
desc = self._createDescrip(hst, muh, muo)
if len(im.shape) == 3:
self._array2nii(im[::-1,::-1,:], A, fout, descrip=desc)
elif len(im.shape) == 4:
self._array4D2nii(im[:,::-1,::-1,:], A, fout, descrip=desc)
def saveStatic(self, sta, mumaps, hst, fcomment=''):
"""
:param sta: dictionary from nipet.mmrchain
:param mumaps: dictionary of mu-maps from imaged object, hardware
:param hst: dictionary from nipet.mmrhist
:param fcomment: string to append to canonical filename
"""
fout = self._createFilename(fcomment)
im = sta['im']
logging.info('reconstruction.Reconstruction.saveStatic is')
logging.info('saving 3D image to: ' + fout)
A = self.getAffine()
muo,muh = mumaps # object and hardware mu-maps
if hst is None:
hst = self.checkHistogramming()
desc = self._createDescrip(hst, muh, muo)
assert len(im.shape) == 3, "Reconstruction.saveStatic.im.shape == " + str(len(im.shape))
self._array2nii(im[::-1,::-1,:], A, fout, descrip=desc)
# CLASS PRIVATE PROPERTIES & METHODS
def _array2nii(self, im, A, fnii, descrip=''):
"""
Store the numpy array to a NIfTI file <fnii>
"""
if im.ndim == 3:
im = np.transpose(im, (2, 1, 0))
elif im.ndim == 4:
im = np.transpose(im, (3, 2, 1, 0))
else:
raise StandardError('unrecognised image dimensions')
import nibabel as nib
nii = nib.Nifti1Image(im, A)
hdr = nii.header
hdr.set_sform(None, code='scanner')
hdr['cal_max'] = np.max(im)
hdr['cal_min'] = np.min(im)
hdr['descrip'] = descrip
nib.save(nii, fnii)
def _array4D2nii(self, im, A, fnii, descrip=''):
# print 'A = ', A
import nibabel as nib
im = np.transpose(im, (3, 2, 1, 0))
nii = nib.Nifti1Image(im, A)
hdr = nii.header
hdr.set_sform(None, code='scanner')
hdr['cal_max'] = np.max(im)
hdr['cal_min'] = np.min(im)
hdr['descrip'] = descrip
nib.save(nii, fnii)
def _createDescrip(self, hst, muh, muo):
"""
:param hst: from nipet.mmrhist
:param muh: is mumaps list array
:param muo: is mumaps list array
:return: description text for NIfTI
if only bed present, attnum := 0.5
"""
from niftypet import nipet
cnt = self.mMRparams['Cnt']
attnum = (1 * (np.sum(muh) > 0.5) + 1 * (np.sum(muo) > 0.5)) / 2.
ncmp,_ = nipet.mmrnorm.get_components(self.datain, cnt)
rilut = self._riLUT()
qf = ncmp['qf'] / rilut[cnt['ISOTOPE']]['BF'] / float(hst['dur'])
desc = 'alg=osem' + \
';sub=14' + \
';att=' + str(attnum * (self.recmod > 0)) + \
';sct=' + str(1 * (self.recmod > 1)) + \
';spn=' + str(cnt['SPN']) + \
';itr=' + str(self.getItr()) + \
';fwhm=' + str(self.getFwhm()) + \
';t0=' + str(hst['t0']) + \
';t1=' + str(hst['t1']) + \
';dur=' + str(hst['dur']) + \
';qf=' + str(qf)
return desc
def _createFilename(self, fcomment):
from niftypet import nipet
nipet.mmraux.create_dir(self.outpath)
pth = os.path.join(self.outpath, os.path.basename(self.datain['lm_dcm'])[:8] + fcomment + '.nii.gz')
return pth
def _do_touch_file(self, tags=None):
from pathlib2 import Path
if not tags:
return None
f = self._filename_to_touch(tags)
hd, tl = os.path.split(f)
if not os.path.exists(hd):
os.makedirs(hd)
Path(f).touch()
return f
def _filename_to_touch(self, tags=None):
if not tags:
return None
return os.path.join(self.PETpath, 'reconstruction_Reconstruction%s.touch' % tags)
def _gatherOsemoneList(self, olist):
"""
:param olist: list of dictionaries
:return: numpy.array with times concatenated along axis=0 (c-style)
"""
im = [olist[0]['im']]
for i in range(1, len(olist)):
im = np.append(im, [olist[i].im], axis=0)
return np.float_(im)
def _initializeNiftypet(self):
from niftypet import nipet
self.mMRparams = nipet.get_mmrparams()
self.mMRparams['Cnt']['VERBOSE'] = self.verbose
self.mMRparams['Cnt']['SPN'] = self.getSpan()
self.mMRparams['Cnt']['BTP'] = self.bootstrap
self.mMRparams['Cnt']['DCYCRR'] = self.DCYCRR
self.mMRparams['Cnt']['DEVID'] = self.DEVID
self.mMRparams['Cnt']['ETHRLD'] = self.emissionsScatterThresh()
self.mMRparams['Cnt']['EMMSKS'] = self.getEmmsks()
self.datain = nipet.classify_input(self.tracerRawdataLocation, self.mMRparams)
if not os.path.exists(self.outpath):
os.makedirs(self.outpath)
logging.info("reconstruction.Reconstruction._initializeNiftypet.datain->")
logging.info(self.datain)
def _moveToNamedLocation(self, dcm, name):
import shutil
import errno
namedLoc = os.path.join(self.tracerRawdataLocation, name)
if not os.path.exists(namedLoc):
os.makedirs(namedLoc)
try:
bf = os.path.splitext(dcm)[0]+'.bf'
shutil.move(bf, os.path.join(namedLoc, os.path.basename(bf)))
except OSError as e:
if e.errno != errno.EEXIST:
raise
try:
shutil.move(dcm, os.path.join(namedLoc, os.path.basename(dcm)))
except OSError as e:
if e.errno != errno.EEXIST:
raise
def _parse_prefix(self, prefix):
"""
checks that prefix is well-formed and set class properties accordingly
:param prefix is the location of tracer rawdata, e.g., FDG_DT123456789.000000-Converted-NAC:
:return class properties ac & tracerRawdataLocation are valid:
"""
from re import compile
if not prefix:
raise AssertionError(
'reconstruction.Reconstruction requires a prefix parameter, the location of tracer data')
nac = compile('-Converted-NAC')
ac = compile('-Converted-AC')
if nac.search(prefix):
self.ac = False
elif ac.search(prefix):
self.ac = True
else:
raise AssertionError(
'reconstruction.Reconstruction expected prefix parameter to end in -AC or -NAC')
self._tracerRawdataLocation = prefix
if not os.path.exists(self.tracerRawdataLocation):
print(os.listdir('/'))
print(os.listdir('/SubjectsDir'))
raise AssertionError(
'reconstruction.Reconstruction could not find prefix->' + self.tracerRawdataLocation)
#os.makedirs(self.tracerRawdataLocation)
def _riLUT(self):
"""
:return: radioisotope look-up table
"""
return {'Ge68':{'BF':0.891, 'thalf':270.9516*24*60*60},
'Ga68':{'BF':0.891, 'thalf':67.71*60},
'F18':{'BF':0.967, 'thalf':109.77120*60},
'O15':{'BF':0.999, 'thalf':122.2416},
'C11':{'BF':0.998, 'thalf':20.38*60}}
def __init__(self, prefix=None, umapSF='umapSynth', v=False, cndaDownload=None, devid=0, minTime=0, phantom=False, si=63):
"""
:param: prefix specifies the location of tracer rawdata.
:param: self.tracerRawdataLocation contains Siemens sinograms, e.g.:
-rwxr-xr-x+ 1 jjlee wheel 16814660 Sep 13 2016 172.16.31.10.1107.5.2.38.51010.2016090913012239062507614.bf
-rwxr-xr-x+ 1 jjlee wheel 141444 Sep 13 2016 172.16.31.10.1107.5.2.38.51010.2016090913012239062507614.dcm
-rwxr-xr-x+ 1 jjlee wheel 247141283 Sep 13 2016 172.16.31.10.1107.5.2.38.51010.30000016090616552364000000050.bf
-rwxr-xr-x+ 1 jjlee wheel 151868 Sep 13 2016 172.16.31.10.1107.5.2.38.51010.30000016090616552364000000050.dcm
-rw-r--r--+ 1 jjlee wheel 3081280 Nov 14 14:53 umapSynth_full_frame0.nii.gz
:param: self.tracerRawdataLocation also contains folders:
norm, containing, e.g.:
-rwxr-xr-x+ 1 jjlee wheel 323404 Sep 13 2016 1.3.12.2.1107.5.2.38.51010.30000016090616552364000000048.bf
-rwxr-xr-x+ 1 jjlee wheel 143938 Sep 13 2016 172.16.31.10.1107.5.2.38.51010.30000016090616552364000000048.dcm
LM, containing, e.g.:
-rwxr-xr-x+ 1 jjlee wheel 6817490860 Sep 13 2016 172.16.31.10.1107.5.2.38.51010.30000016090616552364000000049.bf
-rwxr-xr-x+ 1 jjlee wheel 145290 Sep 13 2016 1.3.12.2.1107.5.2.38.51010.30000016090616552364000000049.dcm
:param: cndaDownload is a path
:param: ac, attenuation correction, is bool
:param: umapSF is a fileprefix
:param: v, verbosity, is bool
:param: cndaDownload is a path
"""
from niftypet.nipet.dinf import dev_info
logging.info('reconstruction.Reconstruction.__init__')
self._parse_prefix(prefix)
logging.info('self.tracerRawdataLocation->' + self.tracerRawdataLocation)
os.chdir(self.tracerRawdataLocation)
logging.info('cwd->' + os.getcwd())
self.umapSynthFileprefix = umapSF
self.verbose = v
self.phantom = phantom
self.organizeRawdataLocation(cndaDownload)
self.tracerMemory = self.lm_tracer()
logging.info(str(dev_info(1)))
self.DEVID = devid
self._initializeNiftypet()
self.minTime = minTime
self.histogram_si = si
def main():
import argparse, textwrap
from niftypet.nipet.dinf import dev_info
p = argparse.ArgumentParser(
description='provides interfaces to https://github.com/pjmark/NIMPA.git, https://github.com/jjleewustledu/NIPET.git',
usage=textwrap.dedent('''\
python reconstruction.py -h
nvidia-docker run -it \\
-v ${DOCKER_HOME}/hardwareumaps/:/hardwareumaps \\
-v ${SINGULARITY_HOME}/:/SubjectsDir \\
niftypetr-image:reconstruction:latest -h
singularity exec \\
--nv \\
--bind $SINGULARITY_HOME/hardwareumaps:/hardwareumaps \\
--bind $SINGULARITY_HOME:/SubjectsDir \\
$SINGULARITY_HOME/niftypetr-image_reconstruction.sif \\
"python" "/work/NiftyPETy/respet/recon/reconstruction.py" "-h"
'''),
formatter_class=argparse.RawTextHelpFormatter)
p.add_argument('-m', '--method',
metavar='createDynamic|createStatic|createPhantom|info',
type=str,
default='createDynamic')
p.add_argument('-p', '--prefix',
metavar='/path/to/experiment-NAC',
help='location containing tracer listmode and norm data',
type=str,
required=True)
p.add_argument('-v', '--verbose',
metavar='true|false',
type=str,
default='false')
p.add_argument('-g', '--gpu',
metavar='0',
help='device ID used by cudaSetDevice',
type=str,
default='0')
p.add_argument('-t', '--minTime',
metavar='0',
help='min time for which emission reconstruction is performed',
type=str,
default='0')
args = p.parse_args()
#os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
#os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
#os.environ["NVIDIA_VISIBLE_DEVICES"] = str(args.gpu)
v = args.verbose.lower() == 'true'
if args.method.lower() == 'createdynamic':
print('main.args.method->createdynamic')
r = Reconstruction(prefix=args.prefix, v=v, devid=int(args.gpu), minTime=int(args.minTime))
if not r.ac:
print('main.r.createDynamicNAC')
r.createDynamicNAC(fcomment='_createDynamicNAC')
else:
print('main.r.createDynamic2Carney')
r.createDynamic2Carney(fcomment='_createDynamic2Carney')
elif args.method.lower() == 'createstatic':
print('main.args.method->createstatic')
r = Reconstruction(prefix=args.prefix, v=v, devid=int(args.gpu), minTime=int(args.minTime))
if not r.ac:
print('main.r.createStaticNAC')
r.createStaticNAC(fcomment='_createStaticNAC')
else:
print('main.r.createStaticCarney')
r.createStaticCarney(fcomment='_createStaticCarney')
elif args.method.lower() == 'createphantom':
print('main.args.method->createphantom')
print('main.r.createPhantom')
r = Reconstruction(prefix=args.prefix, v=v, devid=int(args.gpu), minTime=int(args.minTime), phantom=True)
r.createPhantom(fcomment='_createPhantom')
elif args.method.lower() == 'info':
print('main.args.method->info')
print(dev_info(1))
print('\n')
print(r.mMRparams)
print('\n')
print(r.datain)
print('\n')
if __name__ == '__main__':
main()
| import numpy as np
import os
import logging, sys
# create and configure main logger;
# see also https://stackoverflow.com/questions/50714316/how-to-use-logging-getlogger-name-in-multiple-modules/50715155#50715155
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
class Reconstruction(object):
__author__ = "<NAME>"
__copyright__ = "Copyright 2018"
# see also: mmrrec.py osemone param mask_radious
bootstrap = 0
cached_hdw_mumap = None
datain = {}
DCYCRR = True
DEVID = 0
histogram_si = 63 # sinogram index (<127 for direct sinograms)
hmuSelection = [1,4,5] # selects from ~/.niftypet/resources.py: hrdwr_mu
minTime = 0
mMRparams = {}
outfolder = 'output'
phantom = False
recmod = 3
tracerMemory = None
umap4dfp='umapSynth.4dfp'
umapFolder = 'umap'
umapSynthFileprefix = ''
use_mirror_hdw_mumap = True
use_stored_hdw_mumap = False
use_stored_hist = False
verbose = True
@property
def outpath(self):
"""
:return e.g., '/work/HYGLY48/V1/OO1_V1-Converted-NAC/output':
"""
return os.path.join(self.datain['corepath'], self.outfolder)
@property
def PETpath(self):
"""
:return e.g., '/work/HYGLY48/V1/OO1_V1-Converted-NAC/output/PET':
"""
return os.path.join(self.outpath, 'PET')
@property
def reconstruction_finished(self):
return os.path.exists(self._filename_to_finish('_finished'))
@property
def reconstruction_started(self):
return os.path.exists(self._filename_to_touch('_started'))
@property
def tracer(self):
"""
:return e.g., 'OO1':
"""
import re
return re.split('_', os.path.basename(self.tracerRawdataLocation))[0]
@property
def tracerRawdataLocation(self):
"""
:return e.g., '/work/HYGLY48/V1/OO1_V1-Converted-NAC':
"""
return self._tracerRawdataLocation
def tracerRawdataLocation_with(self, ac=False):
from re import compile
conv = compile('-Converted-')
s = conv.search(self._tracerRawdataLocation)
baseloc = self._tracerRawdataLocation[:s.end()-1]
if not ac:
return baseloc+'-NAC'
else:
return baseloc+'-AC'
@property
def visitStr(self):
"""
e.g., for 'FDG_DT1234567789.000000-Converted-NAC' and 'FDG_V1-Converted-NAC'
:return 'dt123456789' and 'v1':
"""
import re
v = re.split('_', os.path.basename(self.tracerRawdataLocation))[1]
v = re.split('-Converted', v)[0]
w = re.split('\.', v)[0]
if not w:
return v.lower()
else:
return w.upper()
def createStaticNAC(self, time0=None, timeF=None, fcomment='_createStaticNAC'):
self.recmod = 0
self.bootstrap = 0
return self.createStatic(self.muNAC(), 0, time0, timeF, fcomment=fcomment,)
def createStaticUTE(self, time0=None, timeF=None, fcomment='_createStaticUTE'):
self.recmod = 3
self.bootstrap = 0
self.checkUmaps(self.muUTE(), fcomment)
#wtime = self.getWTime(self.json_filename_with(ac=False))
return self.createStatic(self.muUTE(), 0, time0, timeF, fcomment=fcomment)
def createStaticCarney(self, time0=None, timeF=None, fcomment='_createStaticCarney'):
print("########## respet.recon.reconstruction.Reconstruction.createStaticCarney ##########")
self.checkUmaps(self.muCarney(frames=[0]), fcomment)
self.checkHistogramming(fcomment)
wtime = self.getWTime(self.json_filename_with(ac=False))
return self.createStatic(self.muCarney(frames=[0]), wtime, time0, timeF, fcomment=fcomment)
def createPhantom(self, time0=None, timeF=None, fcomment='_createPhantom'):
print("########## respet.recon.reconstruction.Reconstruction.createPhantom ##########")
self.phantom = True
self.checkUmaps(self.muCarney(), fcomment)
self.checkHistogramming(fcomment)
return self.createStatic(self.muCarney(), 0, time0, timeF, fcomment=fcomment)
def createDynamicNAC(self, fcomment='_createDynamicNAC'):
print("########## respet.recon.reconstruction.Reconstruction.createDynamicNAC ##########")
self.recmod = 0
self.bootstrap = 0
self.checkUmaps(self.muHardware(), fcomment)
self.checkHistogramming(fcomment)
return self.createDynamic(self.getTaus(), self.muNAC(), fcomment)
def createDynamicUTE(self, fcomment='_createDynamicUTE'):
print("########## respet.recon.reconstruction.Reconstruction.createDynamicNAC ##########")
self.checkUmaps(self.muUTE(), fcomment)
self.checkHistogramming(fcomment)
return self.createDynamic(self.getTaus(), self.muUTE(), fcomment)
def createDynamic2Carney(self, fcomment='_createDynamic2Carney'):
print("########## respet.recon.reconstruction.Reconstruction.createDynamic2Carney ##########")
self.checkUmaps(self.muCarney(frames=[0]), fcomment)
self.checkHistogramming(fcomment)
taus = self.getTaus(self.json_filename_with(ac=False))
offset = self.getWTime(self.json_filename_with(ac=False))
return self.createDynamic2(max(offset, self.minTime), taus, self.getTaus2(), fcomment)
def createStatic(self, muo, wtime=0, time0=None, timeF=None, fcomment='_createStatic'):
"""
:param muo mu-map of imaged object:
:param wtime is determined by createDynamic:
:param time0 int sec:
:param timeF int sec:
:param fcomment string for naming subspace:
:return result from nipet.mmrchain:
:rtype dictionary:
"""
from niftypet import nipet
self.mMRparams['Cnt']['VERBOSE'] = self.verbose
self.mMRparams['Cnt']['DCYCRR'] = self.DCYCRR
if self.reconstruction_started:
logging.debug('reconstruction.Reconstruction.createDynamics.reconstruction_started == True')
return None # to avoid race-conditions in parallel computing contexts
self._do_touch_file('_started')
if not time0:
time0 = self.getTime0()
time0 = min(wtime+time0, self.getTimeMax())
if not timeF:
timeF = self.getTimeF()
timeF = min(wtime+timeF, self.getTimeMax())
sta = nipet.mmrchain(self.datain, self.mMRparams,
frames = ['fluid', [time0, timeF]],
mu_h = self.muHardware(),
mu_o = muo,
itr = self.getItr(),
fwhm = self.getFwhm(),
recmod = self.recmod,
outpath = self.outpath,
store_img = True,
fcomment = fcomment)
self.save_json(np.int_([timeF-time0]), waittime=wtime)
self._do_touch_file('_finished')
return sta
def createDynamic(self, taus, muo, fcomment='_createDynamic'):
"""
:param taus: np.int_
:param muo: 3D or 4D mu-map of imaged object
:return: last result from nipet.mmrchain
:rtype: dictionary
"""
global dynFrame
from numpy import isnan
from niftypet import nipet
from warnings import warn
self.mMRparams['Cnt']['VERBOSE'] = self.verbose
self.mMRparams['Cnt']['DCYCRR'] = self.DCYCRR
if self.reconstruction_started:
logging.debug('reconstruction.Reconstruction.createDynamics.reconstruction_started == True')
return None # to avoid race-conditions in parallel computing contexts
self._do_touch_file('_started')
dynFrame = None
times,taus = self.getTimes(taus) # length(times) == length(taus) + 1; revise taus using NIPET metrics
print("times->" + str(times))
print("taus->" + str(taus))
wtime = times[0] # time to wait for nans to clear
it_fin = None # passed to save_json()
for it in np.arange(1, times.shape[0]):
try:
if self.frame_exists(times[it-1], times[it], fcomment, it):
continue # and reuse existings reconstructions
logging.info('createDynamic: frame samples {}-{} s;'.format(times[it-1], times[it]))
logging.debug('createDynamic.datain->')
logging.debug(self.datain)
logging.debug('createDynamic.mMRparams->')
logging.debug(self.mMRparams)
dynFrame = nipet.mmrchain(self.datain, self.mMRparams,
frames = ['fluid', [times[it-1], times[it]]],
mu_h = self.muHardware(),
mu_o = muo,
itr = self.getItr(),
fwhm = self.getFwhm(),
recmod = self.recmod,
outpath = self.outpath,
store_img = True,
fcomment = fcomment + '_time' + str(it-1))
it_fin = it
if isnan(dynFrame['im']).any():
if times[it] < times[-1] / 2:
wtime = times[it]
except (UnboundLocalError, IndexError) as e:
warn(e.message)
if times[it] < times[-1]/2:
self.replaceFrameInSitu(times[it-1], times[it], fcomment, it-1)
wtime = times[it]
else:
warn('createDynamic: break for it->' + str(it))
break
self.save_json(taus[:it_fin], waittime=wtime)
self._do_touch_file('_finished')
return dynFrame
def createDynamic2(self, offset, taus, taus2, fcomment='_createDynamic2'):
"""
:param offset is determined externaly by createDynamic():
:param taus np.int_ for mu-map frames:
:param taus2 np.int_ for emission frames:
:param muo 3D or 4D mu-map of imaged object:
:return last result from nipet.mmrchain:
:rtype dictionary:
"""
global dynFrame
from numpy import isnan
from niftypet import nipet
from warnings import warn
self.mMRparams['Cnt']['VERBOSE'] = self.verbose
self.mMRparams['Cnt']['DCYCRR'] = self.DCYCRR
if self.reconstruction_started:
logging.debug('reconstruction.Reconstruction.createDynamics2.reconstruction_started == True')
return None # to avoid race-conditions in parallel computing contexts
self._do_touch_file('_started')
dynFrame = None
times,trash = self.getTimes(taus) # of umap alignments
times2,taus2 = self.getTimes(taus2, offset=offset)
print("times->" + str(times))
print("trash->" + str(trash))
print("times2->" + str(times2))
print("taus2->" + str(taus2))
wtime2 = times2[0]
it2_fin = None
it = 1 # right edge of mu-map frame
for it2 in np.arange(1, times2.shape[0]): # right edge of hist frame to be attenuation corrected
try:
while times[it] < times2[it2-1] and times[it] < times[-1]:
it += 1 # select the mu-map for the hist in: [times2[it2-1], times2[it2]]
if self.frame_exists(times2[it2-1], times2[it2], fcomment, it2):
continue # and reuse existings reconstructions
logging.info('createDynamic2: AC frame samples {}-{} s; NAC frame samples {}-{} s'.format(times2[it2-1], times2[it2], times[it-1], times[it]))
logging.debug('reconstruction.Reconstruction.createDynamic2.datain->')
logging.debug(self.datain)
logging.debug('reconstruction.Reconstruction.createDynamic2.mMRparams->')
logging.debug(self.mMRparams)
dynFrame = nipet.mmrchain(self.datain, self.mMRparams,
frames = ['fluid', [times2[it2-1], times2[it2]]],
mu_h = self.muHardware(),
mu_o = self.muCarney(frames=(it-1)),
itr = self.getItr(),
fwhm = self.getFwhm(),
recmod = self.recmod,
outpath = self.outpath,
store_img = True,
fcomment = fcomment + '_time' + str(it2-1))
it2_fin = it2
if isnan(dynFrame['im']).any():
if times2[it2] < times2[-1]:
wtime2 = times2[it2]
except (UnboundLocalError, IndexError) as e:
warn(e.message)
if times2[it2] < times2[-1]:
self.replaceFrameInSitu(times2[it2-1], times2[it2], fcomment, it2-1)
wtime2 = times2[it2]
else:
warn('Reconstruction.createDynamic2: break for it2->' + str(it2))
break
self.save_json(taus2[:it2_fin], offsettime=offset, waittime=(wtime2-offset))
self._do_touch_file('_finished')
return dynFrame
def createUmapSynthFullBlurred(self):
"""
:return: os.path.join(tracerRawdataLocation, umapSynthFileprefix+'.nii.gz') with 4.3 mm fwhm blur
"""
from subprocess import call
pwd0 = os.getcwd()
os.chdir(self.tracerRawdataLocation_with(ac=False))
call('/data/nil-bluearc/raichle/lin64-tools/nifti_4dfp -n ' +
os.path.join(self.tracerRawdataLocation_with(ac=False), self.umap4dfp) + '.ifh umap_.nii',
shell=True, executable='/bin/bash')
call('/bin/gzip umap_.nii', shell=True, executable='/bin/bash')
call('/usr/local/fsl/bin/fslroi umap_ umap__ -86 344 -86 344 0 -1',
shell=True, executable='/bin/bash')
call('/usr/local/fsl/bin/fslmaths umap__ -s 1.826 ' + self.umapSynthFileprefix,
shell=True, executable='/bin/bash')
os.remove('umap_.nii.gz')
os.remove('umap__.nii.gz')
os.chdir(pwd0)
def checkHistogramming(self, fcomment=''):
from niftypet import nipet
from matplotlib.pyplot import figure, plot, xlabel, ylabel, title, show, savefig, matshow, colorbar, legend, grid
hst = nipet.mmrhist(self.datain, self.mMRparams)
if not os.path.exists(self.outpath):
os.mkdir(self.outpath)
# sinogram index (<127 for direct sinograms, >=127 for oblique sinograms)
si = self.histogram_si
# prompt sinogram
figure()
matshow(hst['psino'][si, :, :], cmap='inferno')
colorbar()
xlabel('bins')
ylabel('angles')
savefig(os.path.join(self.outpath, fcomment+'_promptsino.pdf'))
# prompt sinogram oblique
figure()
matshow(hst['psino'][si+128, :, :], cmap='inferno')
colorbar()
xlabel('bins')
ylabel('angles')
savefig(os.path.join(self.outpath, fcomment+'_promptsino_oblique.pdf'))
# delayed sinogram
figure()
matshow(hst['dsino'][si, :, :], cmap='inferno')
colorbar()
xlabel('bins')
ylabel('angles')
savefig(os.path.join(self.outpath, fcomment+'_delayedsino.pdf'))
# delayed sinogram oblique
figure()
matshow(hst['dsino'][si+128, :, :], cmap='inferno')
colorbar()
xlabel('bins')
ylabel('angles')
savefig(os.path.join(self.outpath, fcomment+'_delayedsino_oblique.pdf'))
# head curve for prompt and delayed events
figure()
plot(hst['phc'], label='prompt TAC')
plot(hst['dhc'], label='delayed TAC')
#show()
legend()
grid('on')
xlabel('time/s')
ylabel('specific activity / (Bq/mL)')
savefig(os.path.join(self.outpath, fcomment+'_tacs.pdf'))
# center of mass
figure()
plot(hst['cmass'])
#show()
grid('on')
xlabel('time / s')
ylabel('center of mas of radiodistribution')
savefig(os.path.join(self.outpath, fcomment+'_cmass.pdf'))
return hst
def checkScattering(self, fcomment=''):
from niftypet import nipet
from matplotlib.pyplot import figure, plot, xlabel, ylabel, title, show, savefig, matshow, colorbar, legend, grid
if not os.path.exists(self.outpath):
os.mkdir(self.outpath)
# scattering
# I don't recommend using it for dynamic scans, but static only, as it drains the memory big time:
recon = nipet.mmrchain(
self.datain, self.mMRparams,
mu_h=self.muHardware(),
mu_o=self.muCarney(frames=1),
itr=2,
fwhm=0.0,
outpath=self.outpath,
fcomment='_scattering',
ret_sinos=True,
store_img=True)
# Then you sum up all sinograms to see the average performace:
ssn = np.sum(recon['sinos']['ssino'], axis=(0, 1))
psn = np.sum(recon['sinos']['psino'], axis=(0, 1))
rsn = np.sum(recon['sinos']['rsino'], axis=(0, 1))
msk = np.sum(recon['sinos']['amask'], axis=(0, 1))
# plotting the sinogram profiles for angle indexes 128 and 196:
figure()
ia = 128
plot(psn[ia, :], label='prompts')
plot(rsn[ia, :], label='randoms')
plot(rsn[ia, :] + ssn[ia, :], label='scatter+randoms')
plot(msk[ia, :], label='mask')
legend()
savefig(os.path.join(self.outpath, fcomment + '_scattering128.pdf'))
figure()
ia = 196
plot(psn[ia, :], label='prompts')
plot(rsn[ia, :], label='randoms')
plot(rsn[ia, :] + ssn[ia, :], label='scatter+randoms')
plot(msk[ia, :], label='mask')
legend()
savefig(os.path.join(self.outpath, fcomment + '_scattering196.pdf'))
return ssn, psn, rsn, msk
def checkTimeAliasingCarney(self, fcomment='_checkTimeAliasingCarney'):
times,trash = self.getTimes(self.getTaus())
print("########## respet.recon.reconstruction.Reconstruction.checkTimeAliasingCarney ##########")
print(times[0:2])
return self.createDynamic(self.getTaus()[0:2], self.muCarney(frames=[0,1]), fcomment)
def checkTimeHierarchiesCarney(self, fcomment='_checkTimeHierarchiesCarney'):
times,trash = self.getTimes(self.getTaus())
times2,trash = self.getTimes(self.getTaus2())
print("########## respet.recon.reconstruction.Reconstruction.checkTimeHierarchiesCarney ##########")
print(times)
return self.createDynamic2(self.getTaus()[0:2], self.getTaus2()[0:6], self.muCarney(frames=[0,1]), fcomment)
def checkUmaps(self, muo, fcomment=''):
from matplotlib.pyplot import figure, plot, xlabel, ylabel, title, show, savefig, matshow, colorbar, legend, grid
muh = self.muHardware()
iz = 64
ix = 172
# plot axial image with a colour bar
matshow(muh['im'][iz, :, :] + muo['im'][iz, :, :], cmap='bone')
colorbar()
savefig(os.path.join(self.outpath, fcomment+'_tumaps.pdf'))
# plot sagittal image with a colour bar
matshow(muh['im'][:, :, ix] + muo['im'][:, :, ix], cmap='bone')
colorbar()
savefig(os.path.join(self.outpath, fcomment+'_sumaps.pdf'))
# plot coronal image with a colour bar
matshow(muh['im'][:, ix, :] + muo['im'][:, ix, :], cmap='bone')
colorbar()
savefig(os.path.join(self.outpath, fcomment+'_cumaps.pdf'))
def emissionsScatterThresh(self):
""" provisions Cnt['ETHRLD'] for use by:
mmrrec.py lines 208, 272 228; mmrimg.py 209; sct_module.cu line 302 """
if self.tracerMemory.lower() == 'fluorodeoxyglucose':
thresh = 0.05
elif self.tracerMemory.lower() == 'oxygen-water':
thresh = 0.05
elif self.tracerMemory.lower() == 'oxygen' or self.tracerMemory.lower() == 'carbon':
thresh = 0.05
else:
raise AssertionError('Reconstruction.emissionsScatterThresh does not support tracerMemory->' + self.tracerMemory)
return thresh
def frame_exists(self, t0, tf, fcomment, it2):
"""
e.g., a_itr-4_t-577-601sec_createDynamic2Carney_time57.nii.gz
:param t0:
:param tf:
:param fcomment:
:param it2:
:return bool:
"""
fn = "a_itr-" + str(self.getItr()) + "_t-" + str(t0) + "-" + str(tf) + "sec" + fcomment + "_time" + str(it2-1) + ".nii.gz"
return os.path.exists(os.path.join(self.PETpath, 'single-frame', fn))
def getAffine(self):
"""
:return: affine transformations for NIfTI
:rtype: list 2D numeric
"""
from niftypet import nipet
cnt = self.mMRparams['Cnt']
vbed, hbed = nipet.mmraux.vh_bedpos(self.datain, cnt) # bed positions
A = np.diag(np.array([-10*cnt['SO_VXX'], 10*cnt['SO_VXY'], 10*cnt['SO_VXZ'], 1]))
A[0,3] = 10*( 0.5*cnt['SO_IMX'] *cnt['SO_VXX'])
A[1,3] = 10*((-0.5*cnt['SO_IMY'] + 1)*cnt['SO_VXY'])
A[2,3] = 10*((-0.5*cnt['SO_IMZ'] + 1)*cnt['SO_VXZ'] + hbed)
return A
def getEmmsks(self):
return self.tracerMemory.lower() == 'oxygen' or self.tracerMemory.lower() == 'carbon'
def getFwhm(self):
if self.tracerMemory.lower() == 'fluorodeoxyglucose':
fwhm = 4.3 / 2.08626 # number of voxels; https://docs.scipy.org/doc/scipy-0.16.1/reference/generated/scipy.ndimage.filters.gaussian_filter.html
elif self.tracerMemory.lower() == 'oxygen-water':
fwhm = 4.3 / 2.08626
elif self.tracerMemory.lower() == 'carbon' or self.tracerMemory.lower() == 'oxygen':
fwhm = 4.3 / 2.08626
else:
raise AssertionError('Reconstruction.getFwhm does not support tracerMemory->' + self.tracerMemory)
return fwhm
def getInterfile(self, dcm):
"""
:param dcm:
:return lm_dict, a dictionary of interfile fields:
"""
from interfile import Interfile
from warnings import warn
try:
try:
lm_dict = Interfile.load(dcm)
except Interfile.ParsingError as e:
warn(e.message)
return None
except (AttributeError, TypeError):
raise AssertionError('dcm must be a filename')
return lm_dict
def getItr(self):
if self.tracerMemory.lower() == 'fluorodeoxyglucose':
itr = 4
elif self.tracerMemory.lower() == 'oxygen-water' or self.tracerMemory.lower() == 'carbon':
itr = 4
elif self.tracerMemory.lower() == 'oxygen':
itr = 2
else:
raise AssertionError('Reconstruction.getItr does not support tracerMemory->' + self.tracerMemory)
return itr
def getMumaps(self, muo, it = 0):
"""
:param muo: numpy.array of len == 3 or len == 4
:param it: list, default is empty
:return: list of numpy.array := [mu-hardware, mu-object]
"""
if muo.ndim == 4:
return [np.squeeze(muo[it,:,:,:]), self.muHardware()]
else:
return [muo, self.muHardware()]
def getSpan(self):
if self.tracerMemory.lower() == 'fluorodeoxyglucose':
span = 11
elif self.tracerMemory.lower() == 'oxygen-water' or self.tracerMemory.lower() == 'carbon':
span = 11
elif self.tracerMemory.lower() == 'oxygen':
span = 11
else:
raise AssertionError('Reconstruction.getSpan does not support tracerMemory->' + self.tracerMemory)
return span
def getTaus(self, json_file=None):
""" see also mfiles/t0_and_dt.m
:param: json_file containing taus
:return: np array of frame durations
:rtype: numpy.int_
"""
if json_file:
taus,wtime = self.open_json(json_file)
return taus
if not self.tracerMemory:
raise AssertionError('Reconstruction.getTaus: no tracerMemory')
if self.tracerMemory.lower() == 'fluorodeoxyglucose':
taus = np.int_([30,35,39,43,47,51,55,59,64,68,72,76,81,85,89,93,98,102,106,111,115,120,124,129,133,138,142,147,151,156,161,165,170,175,171])
# len == 35, nudge = 4, dur == 3601
elif self.tracerMemory.lower() == 'oxygen-water' or self.tracerMemory.lower() == 'carbon' or self.tracerMemory.lower() == 'oxygen':
taus = np.int_([12,13,14,15,17,18,20,23,26,30,35,43,55,75,114,91])
# len == 16, dur == 601
else:
raise AssertionError('Reconstruction.getTaus does not support tracerMemory->' + self.tracerMemory)
return taus
def getTaus2(self, json_file=None):
""" see also mfiles/t0_and_dt.m
:param: json_file containing taus
:return: np array of frame durations, waiting time
:rtype: numpy.int_
"""
if json_file:
taus,wtime = self.open_json(json_file)
return taus
if not self.tracerMemory:
raise AssertionError('Reconstruction.getTaus2: no tracerMemory')
if self.tracerMemory.lower() == 'fluorodeoxyglucose':
taus = np.int_([10,13,14,16,17,19,20,22,23,25,26,28,29,31,32,34,35,37,38,40,41,43,44,46,47,49,50,52,53,56,57,59,60,62,63,65,66,68,69,71,72,74,76,78,79,81,82,84,85,87,88,91,92,94,95,97,98,100,101,104,105,108])
# len == 62, nudge = 1.5, dur == 3601
elif self.tracerMemory.lower() == 'oxygen-water':
taus = np.int_([3,4,4,4,4,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,6,6,6,6,6,6,7,7,7,7,8,8,8,9,9,10,10,11,11,12,13,14,15,16,18,20,22,25,29,34,41,51,52])
# len == 54, dur == 601
elif self.tracerMemory.lower() == 'carbon':
# taus = np.int_([5,5,5,5,6,6,6,6,6,7,7,7,7,8,8,9,9,9,10,11,11,12,13,14,15,16,18,20,22,25,29,34,41,52,137])
# len == 35, dur == 601
taus = np.int_([3,3,3,3,3,3,3,3,5,5,5,5,6,6,6,6,6,7,7,7,7,8,8,8,9,9,10,10,11,11,12,13,14,15,16,18,19,22,24,28,33,39,49,64,49])
# len = 45, dur = 601
elif self.tracerMemory.lower() == 'oxygen':
taus = np.int_([2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,6,6,6,6,6,7,7,7,7,8,8,8,9,9,10,10,15])
# len = 63, dur = 301
else:
raise AssertionError('Reconstruction.getTaus2 does not support tracerMemory->' + self.tracerMemory)
return taus
def getTime0(self):
if self.phantom:
return 0
times,trash = self.getTimes(self.getTaus())
if self.tracerMemory.lower() == 'fluorodeoxyglucose':
return times[-1] - 20*60
elif self.tracerMemory.lower() == 'oxygen-water':
return times[0]
elif self.tracerMemory.lower() == 'carbon':
return times[0] + 2*60
elif self.tracerMemory.lower() == 'oxygen':
return times[0]
else:
raise AssertionError('Reconstruction.getTime0 does not support tracerMemory->' + self.tracerMemory)
def getTimeF(self):
if self.phantom:
return self.getTimeMax()
times,trash = self.getTimes(self.getTaus())
if self.tracerMemory.lower() == 'fluorodeoxyglucose':
return times[-1]
elif self.tracerMemory.lower() == 'oxygen-water':
return times[0] + 60
elif self.tracerMemory.lower() == 'carbon':
return times[0] + 3*60
elif self.tracerMemory.lower() == 'oxygen':
return times[0] + 60
else:
raise AssertionError('Reconstruction.getTimeF does not support tracerMemory->' + self.tracerMemory)
def getTimeMax(self):
"""
:return: max time available from listmode data in sec.
"""
from niftypet.nipet.lm import mmr_lmproc #CUDA
nele, ttags, tpos = mmr_lmproc.lminfo(self.datain['lm_bf'])
return (ttags[1]-ttags[0]+999)/1000 # sec
def getTimes(self, taus=None, offset=0):
"""
:param: offset is predetermined duration to exclude from times
:return: array of times including 0 and np.cumsum(taus); max(times) == getTimeMax()
:return: array of taus revised to be consistent with getTimeMax()
:rtype: numpy.int_
"""
if not isinstance(taus, np.ndarray):
raise AssertionError('Reconstruction.getTimes.taus is missing')
tmax = self.getTimeMax()
t = np.hstack((np.int_(0), np.cumsum(taus)))
t = t + offset
t = t[t < tmax]
t = np.hstack((t, np.int_(tmax)))
taus = t[1:] - t[:-1]
return np.int_(t), np.int_(taus)
def getWTime(self, json_file=None):
"""
:param: json_file containing taus
:return: waiting time
:rtype: numpy.int_
"""
if json_file:
taus,wtime = self.open_json(json_file)
return wtime
return 0
def json_filename(self):
return os.path.join(self.PETpath,
self.tracer + '_' + self.visitStr + '.json')
def json_filename_with(self, ac=False):
return os.path.join(self.tracerRawdataLocation_with(ac), 'output', 'PET',
self.tracer + '_' + self.visitStr + '.json')
def open_json(self, json_file=None):
"""
https://stackoverflow.com/questions/26646362/numpy-array-is-not-json-serializable
:param json_file, a str:
:return taus, a np array of frame durations, including waiting frames but only frames in the listmode archive:
:return wtime, the waiting time in the early scan in sec:
"""
import codecs, json
if not json_file:
raise AssertionError('Reconstruction.open_json.json_file is missing')
t = codecs.open(json_file, 'r', encoding='utf-8').read()
jt = json.loads(t)
logging.debug('reconstruction.Reconstruction.open_json.jt->')
logging.debug(str(jt))
taus = np.array(jt['taus'])
wtime = int(float(jt['waiting time']))
return taus, wtime
def save_json(self, taus=None, offsettime=0, waittime=0):
"""
https://stackoverflow.com/questions/26646362/numpy-array-is-not-json-serializable
:param taus, an np array of frame durations, including waiting frames but only frames in the listmode archive:
:param offsettime, the duration between study time and the first saved frame:
:param waittime, the witing time in the early scan; typically nan <- 0:
:return json_file, a canonical json filename for ancillary data including timings:
"""
import codecs, json
if not isinstance(taus, np.ndarray):
raise AssertionError('Reconstruction.save_json.taus is missing')
jdict = {
"study date": self.lm_studydate(),
"acquisition time": self.lm_acquisitiontime(),
"offset time": offsettime,
"waiting time": waittime,
"taus": taus.tolist(),
"image duration": self.lm_imageduration()
}
logging.debug('reconstruction.Reconstruction.save_json.jdict->')
logging.debug(str(jdict))
json_file = self.json_filename()
j = codecs.open(json_file, 'w', encoding='utf-8') # overwrites existing
json.dump(jdict, j)
return json_file
def lm_dcm(self):
from glob import glob
dcms = glob(os.path.join(self.tracerRawdataLocation, 'LM', '*.dcm'))
for d in dcms:
if os.path.exists(d):
return d
raise AssertionError("Reconstruction.lm_dcm could not open LM *.dcm")
def lm_dcmread(self):
"""
:return dcm_datset is a pydicom.dataset.FileDataset containing properties for DICOM fields:
"""
from pydicom import dcmread
try:
dcm_datset = dcmread(self.lm_dcm())
except (AttributeError, TypeError):
raise AssertionError('dcm must be a filename')
return dcm_datset
def lm_imageduration(self):
lm_dict = self.getInterfile(self.lm_dcm())
if lm_dict:
return lm_dict['image duration']['value'] # sec
else:
return self.getTaus().sum()
def lm_studydate(self):
"""
provides best estimate of date of listmode collection
:param dcm filename:
:return:
"""
d = self.lm_dcmread()
return d.StudyDate # YYYYMMDD after http://dicom.nema.org/medical/dicom/current/output/chtml/part05/sect_6.2.html
def lm_acquisitiontime(self):
"""
provides best estimate of start time (GMT) of listmode collection
:param dcm filename:
:return:
"""
d = self.lm_dcmread()
return d.AcquisitionTime # hhmmss.ffffff after http://dicom.nema.org/medical/dicom/current/output/chtml/part05/sect_6.2.html
def lm_tracer(self):
import re
from warnings import warn
if self.tracerMemory:
return self.tracerMemory
try:
with open(self.lm_dcm(), 'r') as fid:
fcontent = fid.read()
p = re.compile('(?<=Radiopharmaceutical:)[A-Za-z\-]+')
m = re.search(p, fcontent)
self.tracerMemory = m.group(0)
return self.tracerMemory
except IOError as e:
warn(e.message)
raise AssertionError("Reconstruction.lm_tracer could not open LM *.dcm")
def migrateCndaDownloads(self, cndaDownload):
return None
def adjust_mirror_hdw_mumap(self, hmu_dct):
import nibabel as nib
if not self.tracerMemory.lower() == 'oxygen':
return hmu_dct
nim = nib.load(os.path.join(os.getenv('HARDWAREUMAPS'), 'adhoc_mu.nii.gz'))
imo = nim.get_data()
hmu = np.transpose(imo[:,::-1,::-1], (2, 1, 0))
hmu_dct = {'im': hmu+hmu_dct['im'],
'fim': hmu_dct['fim'],
'affine': hmu_dct['affine']}
return hmu_dct
def muHardware(self):
"""
:return: dictionary for hardware mu-map image provided by nipet.hdw_mumap. Keys: 'im', ...
See also self.hmuSelection.
"""
from niftypet import nipet
if self.use_stored_hdw_mumap:
self.datain['hmumap'] = os.path.join(
os.getenv('HARDWAREUMAPS'), 'hmumap.npy')
if not self.cached_hdw_mumap:
self.cached_hdw_mumap = nipet.hdw_mumap(
self.datain, self.hmuSelection, self.mMRparams, outpath=self.outpath, use_stored=self.use_stored_hdw_mumap)
if self.use_mirror_hdw_mumap:
self.cached_hdw_mumap = self.adjust_mirror_hdw_mumap(self.cached_hdw_mumap)
logging.debug('reconstruction.Reconstruction.muHardware.datain[''hmumap'']->')
logging.debug(self.datain['hmumap'])
return self.cached_hdw_mumap
def muCarney(self, fileprefix=None, imtype='object mu-map', fcomment='', frames=None):
"""
get NIfTI of the custom umap; see also nipet.mmrimg.obtain_image
:param fileprefix: string for fileprefix of 4D image-object; default := self.umapSynthFileprefix
:param imgtype: string; cf. obtain_image
:param fcomment: string to append to fileprefix
:param frames: frame indices to select from _im; default selects all frames
:return: np.float32
"""
from niftypet import nimpa
if fileprefix is None:
fileprefix = self.umapSynthFileprefix
fqfn = os.path.join(self.tracerRawdataLocation_with(ac=True), fileprefix + fcomment + '.nii.gz')
nimpa_dct = nimpa.getnii(fqfn, output='all')
_im = nimpa_dct['im']
if not frames is None:
_im = _im[frames,:,:,:]
_im = np.squeeze(_im)
_im[_im < 0] = 0
output = {}
output['im'] = _im
output['affine'] = nimpa_dct['affine']
output['exists'] = True
output['fim'] = fqfn
Cnt = self.mMRparams['Cnt']
logging.debug('reconstruction.Reconstruction.muCarney is')
logging.debug('using ' + imtype + ' from NIfTI file.')
if Cnt and output['im'].shape != (Cnt['SO_IMZ'], Cnt['SO_IMY'], Cnt['SO_IMX']):
print 'e> provided ' + imtype + ' via file has inconsistent dimensions compared to Cnt.'
raise ValueError('Wrong dimensions of the mu-map')
return output
# PREVIOUSLY:
# import nibabel
# if fileprefix is None:
# fileprefix = self.umapSynthFileprefix
# fqfn = os.path.join(self.tracerRawdataLocation, fileprefix + fcomment + '.nii.gz')
# nim = nibabel.load(fqfn)
# _im = np.float32(nim.get_data())
# if frames is None:
# if np.ndim(_im) == 3:
# _im = np.transpose(_im[:,::-1,::-1], (2, 1, 0))
# elif np.ndim(_im) == 4:
# _im = np.transpose(_im[:,::-1,::-1,:], (3, 2, 1, 0))
# else:
# raise ValueError('unsupported np.ndim(Reconstruction.muCarney._im)->' + str(np.ndim(_im)))
# else:
# _im = np.transpose(_im[:,::-1,::-1,frames], (3, 2, 1, 0))
# _im = np.squeeze(_im)
# _im[_im < 0] = 0
def muNAC(self):
"""
:return: [] which NIPET 1.1 uses for no attenuation correction
"""
return []
def muUTE(self):
"""
:return: mu-map image from Siemens UTE
:rtype: numpy.array
"""
from niftypet import nipet
return nipet.obj_mumap(self.datain, self.mMRparams, outpath=self.outpath, store=True)
def nipetFrameFilename(self, t0, t1, tag, fr):
# a_itr-4_t-10-20sec_createDynamic2Carney_time1.nii.gz
return os.path.join(self.outpath, 'PET', 'single-frame',
'a_itr-'+str(self.getItr())+'_t-'+str(t0)+'-'+str(t1)+'sec'+tag+'_time'+str(fr)+'.nii.gz')
def organizeNormAndListmode(self):
import glob
import pydicom
try:
# check umap; move norm and listmode to folders
u = os.path.join(self.tracerRawdataLocation, 'umap', '')
if not os.path.isdir(u):
os.makedirs(u)
fns = glob.glob(os.path.join(self.tracerRawdataLocation, '*.dcm'))
for fn in fns:
ds = pydicom.read_file(fn)
if ds.ImageType[2] == 'PET_NORM':
self._moveToNamedLocation(fn, 'norm')
if ds.ImageType[2] == 'PET_LISTMODE':
self._moveToNamedLocation(fn, 'LM')
except OSError:
os.listdir(self.tracerRawdataLocation)
raise
def organizeRawdataLocation(self, cndaDownload=None):
import shutil
if self.tracerRawdataLocation.find('Twilite') > 0:
self.organizeNormAndListmode()
return
if self.phantom:
self.organizeNormAndListmode()
return
if not self.ac:
if cndaDownload:
self.migrateCndaDownloads(cndaDownload)
self.organizeNormAndListmode()
return
# AC: move .bf, .dcm and umap to tracerRawdataLocation
nac_norm = os.path.join(self.tracerRawdataLocation_with(ac=False), 'norm', '')
ac_norm = os.path.join(self.tracerRawdataLocation_with(ac=True), 'norm', '')
nac_lm = os.path.join(self.tracerRawdataLocation_with(ac=False), 'LM', '')
ac_lm = os.path.join(self.tracerRawdataLocation_with(ac=True), 'LM', '')
nac_umap = os.path.join(self.tracerRawdataLocation_with(ac=False), 'umap', '')
ac_umap = os.path.join(self.tracerRawdataLocation_with(ac=True), 'umap', '')
if not os.path.isdir(ac_norm):
shutil.move(nac_norm, self.tracerRawdataLocation)
if not os.path.isdir(ac_lm):
shutil.move(nac_lm, self.tracerRawdataLocation)
if not os.path.isdir(ac_umap):
shutil.move(nac_umap, self.tracerRawdataLocation)
return
@staticmethod
def printd(d):
for keys, values in d.items():
print(keys)
print(values)
def replaceFrameInSitu(self, t0, t1, tag, fr):
from shutil import copyfile
copyfile(
os.path.join(os.getenv('SUBJECTS_DIR'), 'zeros_frame.nii.gz'),
self.nipetFrameFilename(t0, t1, tag, fr))
def saveDynamicInMemory(self, dyn, mumaps, hst, fcomment=''):
"""
:param dyn: dictionary from nipet.mmrchain
:param mumaps: dictionary of mu-maps from imaged object, hardware
:param hst: dictionary from nipet.mmrhist
:param fcomment: string to append to canonical filename
"""
fout = self._createFilename(fcomment)
im = self._gatherOsemoneList(dyn)
logging.info('reconstruction.Reconstruction.saveDynamicInMemory is')
logging.info('saving ' + str(len(im.shape)) + 'D image to: ' + fout)
A = self.getAffine()
muo,muh = mumaps # object and hardware mu-maps
if hst is None:
hst = self.checkHistogramming()
desc = self._createDescrip(hst, muh, muo)
if len(im.shape) == 3:
self._array2nii(im[::-1,::-1,:], A, fout, descrip=desc)
elif len(im.shape) == 4:
self._array4D2nii(im[:,::-1,::-1,:], A, fout, descrip=desc)
def saveStatic(self, sta, mumaps, hst, fcomment=''):
"""
:param sta: dictionary from nipet.mmrchain
:param mumaps: dictionary of mu-maps from imaged object, hardware
:param hst: dictionary from nipet.mmrhist
:param fcomment: string to append to canonical filename
"""
fout = self._createFilename(fcomment)
im = sta['im']
logging.info('reconstruction.Reconstruction.saveStatic is')
logging.info('saving 3D image to: ' + fout)
A = self.getAffine()
muo,muh = mumaps # object and hardware mu-maps
if hst is None:
hst = self.checkHistogramming()
desc = self._createDescrip(hst, muh, muo)
assert len(im.shape) == 3, "Reconstruction.saveStatic.im.shape == " + str(len(im.shape))
self._array2nii(im[::-1,::-1,:], A, fout, descrip=desc)
# CLASS PRIVATE PROPERTIES & METHODS
def _array2nii(self, im, A, fnii, descrip=''):
"""
Store the numpy array to a NIfTI file <fnii>
"""
if im.ndim == 3:
im = np.transpose(im, (2, 1, 0))
elif im.ndim == 4:
im = np.transpose(im, (3, 2, 1, 0))
else:
raise StandardError('unrecognised image dimensions')
import nibabel as nib
nii = nib.Nifti1Image(im, A)
hdr = nii.header
hdr.set_sform(None, code='scanner')
hdr['cal_max'] = np.max(im)
hdr['cal_min'] = np.min(im)
hdr['descrip'] = descrip
nib.save(nii, fnii)
def _array4D2nii(self, im, A, fnii, descrip=''):
# print 'A = ', A
import nibabel as nib
im = np.transpose(im, (3, 2, 1, 0))
nii = nib.Nifti1Image(im, A)
hdr = nii.header
hdr.set_sform(None, code='scanner')
hdr['cal_max'] = np.max(im)
hdr['cal_min'] = np.min(im)
hdr['descrip'] = descrip
nib.save(nii, fnii)
def _createDescrip(self, hst, muh, muo):
"""
:param hst: from nipet.mmrhist
:param muh: is mumaps list array
:param muo: is mumaps list array
:return: description text for NIfTI
if only bed present, attnum := 0.5
"""
from niftypet import nipet
cnt = self.mMRparams['Cnt']
attnum = (1 * (np.sum(muh) > 0.5) + 1 * (np.sum(muo) > 0.5)) / 2.
ncmp,_ = nipet.mmrnorm.get_components(self.datain, cnt)
rilut = self._riLUT()
qf = ncmp['qf'] / rilut[cnt['ISOTOPE']]['BF'] / float(hst['dur'])
desc = 'alg=osem' + \
';sub=14' + \
';att=' + str(attnum * (self.recmod > 0)) + \
';sct=' + str(1 * (self.recmod > 1)) + \
';spn=' + str(cnt['SPN']) + \
';itr=' + str(self.getItr()) + \
';fwhm=' + str(self.getFwhm()) + \
';t0=' + str(hst['t0']) + \
';t1=' + str(hst['t1']) + \
';dur=' + str(hst['dur']) + \
';qf=' + str(qf)
return desc
def _createFilename(self, fcomment):
from niftypet import nipet
nipet.mmraux.create_dir(self.outpath)
pth = os.path.join(self.outpath, os.path.basename(self.datain['lm_dcm'])[:8] + fcomment + '.nii.gz')
return pth
def _do_touch_file(self, tags=None):
from pathlib2 import Path
if not tags:
return None
f = self._filename_to_touch(tags)
hd, tl = os.path.split(f)
if not os.path.exists(hd):
os.makedirs(hd)
Path(f).touch()
return f
def _filename_to_touch(self, tags=None):
if not tags:
return None
return os.path.join(self.PETpath, 'reconstruction_Reconstruction%s.touch' % tags)
def _gatherOsemoneList(self, olist):
"""
:param olist: list of dictionaries
:return: numpy.array with times concatenated along axis=0 (c-style)
"""
im = [olist[0]['im']]
for i in range(1, len(olist)):
im = np.append(im, [olist[i].im], axis=0)
return np.float_(im)
def _initializeNiftypet(self):
from niftypet import nipet
self.mMRparams = nipet.get_mmrparams()
self.mMRparams['Cnt']['VERBOSE'] = self.verbose
self.mMRparams['Cnt']['SPN'] = self.getSpan()
self.mMRparams['Cnt']['BTP'] = self.bootstrap
self.mMRparams['Cnt']['DCYCRR'] = self.DCYCRR
self.mMRparams['Cnt']['DEVID'] = self.DEVID
self.mMRparams['Cnt']['ETHRLD'] = self.emissionsScatterThresh()
self.mMRparams['Cnt']['EMMSKS'] = self.getEmmsks()
self.datain = nipet.classify_input(self.tracerRawdataLocation, self.mMRparams)
if not os.path.exists(self.outpath):
os.makedirs(self.outpath)
logging.info("reconstruction.Reconstruction._initializeNiftypet.datain->")
logging.info(self.datain)
def _moveToNamedLocation(self, dcm, name):
import shutil
import errno
namedLoc = os.path.join(self.tracerRawdataLocation, name)
if not os.path.exists(namedLoc):
os.makedirs(namedLoc)
try:
bf = os.path.splitext(dcm)[0]+'.bf'
shutil.move(bf, os.path.join(namedLoc, os.path.basename(bf)))
except OSError as e:
if e.errno != errno.EEXIST:
raise
try:
shutil.move(dcm, os.path.join(namedLoc, os.path.basename(dcm)))
except OSError as e:
if e.errno != errno.EEXIST:
raise
def _parse_prefix(self, prefix):
"""
checks that prefix is well-formed and set class properties accordingly
:param prefix is the location of tracer rawdata, e.g., FDG_DT123456789.000000-Converted-NAC:
:return class properties ac & tracerRawdataLocation are valid:
"""
from re import compile
if not prefix:
raise AssertionError(
'reconstruction.Reconstruction requires a prefix parameter, the location of tracer data')
nac = compile('-Converted-NAC')
ac = compile('-Converted-AC')
if nac.search(prefix):
self.ac = False
elif ac.search(prefix):
self.ac = True
else:
raise AssertionError(
'reconstruction.Reconstruction expected prefix parameter to end in -AC or -NAC')
self._tracerRawdataLocation = prefix
if not os.path.exists(self.tracerRawdataLocation):
print(os.listdir('/'))
print(os.listdir('/SubjectsDir'))
raise AssertionError(
'reconstruction.Reconstruction could not find prefix->' + self.tracerRawdataLocation)
#os.makedirs(self.tracerRawdataLocation)
def _riLUT(self):
"""
:return: radioisotope look-up table
"""
return {'Ge68':{'BF':0.891, 'thalf':270.9516*24*60*60},
'Ga68':{'BF':0.891, 'thalf':67.71*60},
'F18':{'BF':0.967, 'thalf':109.77120*60},
'O15':{'BF':0.999, 'thalf':122.2416},
'C11':{'BF':0.998, 'thalf':20.38*60}}
def __init__(self, prefix=None, umapSF='umapSynth', v=False, cndaDownload=None, devid=0, minTime=0, phantom=False, si=63):
"""
:param: prefix specifies the location of tracer rawdata.
:param: self.tracerRawdataLocation contains Siemens sinograms, e.g.:
-rwxr-xr-x+ 1 jjlee wheel 16814660 Sep 13 2016 172.16.31.10.1107.5.2.38.51010.2016090913012239062507614.bf
-rwxr-xr-x+ 1 jjlee wheel 141444 Sep 13 2016 172.16.31.10.1107.5.2.38.51010.2016090913012239062507614.dcm
-rwxr-xr-x+ 1 jjlee wheel 247141283 Sep 13 2016 172.16.31.10.1107.5.2.38.51010.30000016090616552364000000050.bf
-rwxr-xr-x+ 1 jjlee wheel 151868 Sep 13 2016 172.16.31.10.1107.5.2.38.51010.30000016090616552364000000050.dcm
-rw-r--r--+ 1 jjlee wheel 3081280 Nov 14 14:53 umapSynth_full_frame0.nii.gz
:param: self.tracerRawdataLocation also contains folders:
norm, containing, e.g.:
-rwxr-xr-x+ 1 jjlee wheel 323404 Sep 13 2016 1.3.12.2.1107.5.2.38.51010.30000016090616552364000000048.bf
-rwxr-xr-x+ 1 jjlee wheel 143938 Sep 13 2016 172.16.31.10.1107.5.2.38.51010.30000016090616552364000000048.dcm
LM, containing, e.g.:
-rwxr-xr-x+ 1 jjlee wheel 6817490860 Sep 13 2016 172.16.31.10.1107.5.2.38.51010.30000016090616552364000000049.bf
-rwxr-xr-x+ 1 jjlee wheel 145290 Sep 13 2016 1.3.12.2.1107.5.2.38.51010.30000016090616552364000000049.dcm
:param: cndaDownload is a path
:param: ac, attenuation correction, is bool
:param: umapSF is a fileprefix
:param: v, verbosity, is bool
:param: cndaDownload is a path
"""
from niftypet.nipet.dinf import dev_info
logging.info('reconstruction.Reconstruction.__init__')
self._parse_prefix(prefix)
logging.info('self.tracerRawdataLocation->' + self.tracerRawdataLocation)
os.chdir(self.tracerRawdataLocation)
logging.info('cwd->' + os.getcwd())
self.umapSynthFileprefix = umapSF
self.verbose = v
self.phantom = phantom
self.organizeRawdataLocation(cndaDownload)
self.tracerMemory = self.lm_tracer()
logging.info(str(dev_info(1)))
self.DEVID = devid
self._initializeNiftypet()
self.minTime = minTime
self.histogram_si = si
def main():
import argparse, textwrap
from niftypet.nipet.dinf import dev_info
p = argparse.ArgumentParser(
description='provides interfaces to https://github.com/pjmark/NIMPA.git, https://github.com/jjleewustledu/NIPET.git',
usage=textwrap.dedent('''\
python reconstruction.py -h
nvidia-docker run -it \\
-v ${DOCKER_HOME}/hardwareumaps/:/hardwareumaps \\
-v ${SINGULARITY_HOME}/:/SubjectsDir \\
niftypetr-image:reconstruction:latest -h
singularity exec \\
--nv \\
--bind $SINGULARITY_HOME/hardwareumaps:/hardwareumaps \\
--bind $SINGULARITY_HOME:/SubjectsDir \\
$SINGULARITY_HOME/niftypetr-image_reconstruction.sif \\
"python" "/work/NiftyPETy/respet/recon/reconstruction.py" "-h"
'''),
formatter_class=argparse.RawTextHelpFormatter)
p.add_argument('-m', '--method',
metavar='createDynamic|createStatic|createPhantom|info',
type=str,
default='createDynamic')
p.add_argument('-p', '--prefix',
metavar='/path/to/experiment-NAC',
help='location containing tracer listmode and norm data',
type=str,
required=True)
p.add_argument('-v', '--verbose',
metavar='true|false',
type=str,
default='false')
p.add_argument('-g', '--gpu',
metavar='0',
help='device ID used by cudaSetDevice',
type=str,
default='0')
p.add_argument('-t', '--minTime',
metavar='0',
help='min time for which emission reconstruction is performed',
type=str,
default='0')
args = p.parse_args()
#os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
#os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
#os.environ["NVIDIA_VISIBLE_DEVICES"] = str(args.gpu)
v = args.verbose.lower() == 'true'
if args.method.lower() == 'createdynamic':
print('main.args.method->createdynamic')
r = Reconstruction(prefix=args.prefix, v=v, devid=int(args.gpu), minTime=int(args.minTime))
if not r.ac:
print('main.r.createDynamicNAC')
r.createDynamicNAC(fcomment='_createDynamicNAC')
else:
print('main.r.createDynamic2Carney')
r.createDynamic2Carney(fcomment='_createDynamic2Carney')
elif args.method.lower() == 'createstatic':
print('main.args.method->createstatic')
r = Reconstruction(prefix=args.prefix, v=v, devid=int(args.gpu), minTime=int(args.minTime))
if not r.ac:
print('main.r.createStaticNAC')
r.createStaticNAC(fcomment='_createStaticNAC')
else:
print('main.r.createStaticCarney')
r.createStaticCarney(fcomment='_createStaticCarney')
elif args.method.lower() == 'createphantom':
print('main.args.method->createphantom')
print('main.r.createPhantom')
r = Reconstruction(prefix=args.prefix, v=v, devid=int(args.gpu), minTime=int(args.minTime), phantom=True)
r.createPhantom(fcomment='_createPhantom')
elif args.method.lower() == 'info':
print('main.args.method->info')
print(dev_info(1))
print('\n')
print(r.mMRparams)
print('\n')
print(r.datain)
print('\n')
if __name__ == '__main__':
main()
| en | 0.539339 | # create and configure main logger; # see also https://stackoverflow.com/questions/50714316/how-to-use-logging-getlogger-name-in-multiple-modules/50715155#50715155 # see also: mmrrec.py osemone param mask_radious # sinogram index (<127 for direct sinograms) # selects from ~/.niftypet/resources.py: hrdwr_mu :return e.g., '/work/HYGLY48/V1/OO1_V1-Converted-NAC/output': :return e.g., '/work/HYGLY48/V1/OO1_V1-Converted-NAC/output/PET': :return e.g., 'OO1': :return e.g., '/work/HYGLY48/V1/OO1_V1-Converted-NAC': e.g., for 'FDG_DT1234567789.000000-Converted-NAC' and 'FDG_V1-Converted-NAC' :return 'dt123456789' and 'v1': #wtime = self.getWTime(self.json_filename_with(ac=False)) ######### respet.recon.reconstruction.Reconstruction.createStaticCarney ##########") ######### respet.recon.reconstruction.Reconstruction.createPhantom ##########") ######### respet.recon.reconstruction.Reconstruction.createDynamicNAC ##########") ######### respet.recon.reconstruction.Reconstruction.createDynamicNAC ##########") ######### respet.recon.reconstruction.Reconstruction.createDynamic2Carney ##########") :param muo mu-map of imaged object: :param wtime is determined by createDynamic: :param time0 int sec: :param timeF int sec: :param fcomment string for naming subspace: :return result from nipet.mmrchain: :rtype dictionary: # to avoid race-conditions in parallel computing contexts :param taus: np.int_ :param muo: 3D or 4D mu-map of imaged object :return: last result from nipet.mmrchain :rtype: dictionary # to avoid race-conditions in parallel computing contexts # length(times) == length(taus) + 1; revise taus using NIPET metrics # time to wait for nans to clear # passed to save_json() # and reuse existings reconstructions :param offset is determined externaly by createDynamic(): :param taus np.int_ for mu-map frames: :param taus2 np.int_ for emission frames: :param muo 3D or 4D mu-map of imaged object: :return last result from nipet.mmrchain: :rtype dictionary: # to avoid race-conditions in parallel computing contexts # of umap alignments # right edge of mu-map frame # right edge of hist frame to be attenuation corrected # select the mu-map for the hist in: [times2[it2-1], times2[it2]] # and reuse existings reconstructions :return: os.path.join(tracerRawdataLocation, umapSynthFileprefix+'.nii.gz') with 4.3 mm fwhm blur # sinogram index (<127 for direct sinograms, >=127 for oblique sinograms) # prompt sinogram # prompt sinogram oblique # delayed sinogram # delayed sinogram oblique # head curve for prompt and delayed events #show() # center of mass #show() # scattering # I don't recommend using it for dynamic scans, but static only, as it drains the memory big time: # Then you sum up all sinograms to see the average performace: # plotting the sinogram profiles for angle indexes 128 and 196: ######### respet.recon.reconstruction.Reconstruction.checkTimeAliasingCarney ##########") ######### respet.recon.reconstruction.Reconstruction.checkTimeHierarchiesCarney ##########") # plot axial image with a colour bar # plot sagittal image with a colour bar # plot coronal image with a colour bar provisions Cnt['ETHRLD'] for use by: mmrrec.py lines 208, 272 228; mmrimg.py 209; sct_module.cu line 302 e.g., a_itr-4_t-577-601sec_createDynamic2Carney_time57.nii.gz :param t0: :param tf: :param fcomment: :param it2: :return bool: :return: affine transformations for NIfTI :rtype: list 2D numeric # bed positions # number of voxels; https://docs.scipy.org/doc/scipy-0.16.1/reference/generated/scipy.ndimage.filters.gaussian_filter.html :param dcm: :return lm_dict, a dictionary of interfile fields: :param muo: numpy.array of len == 3 or len == 4 :param it: list, default is empty :return: list of numpy.array := [mu-hardware, mu-object] see also mfiles/t0_and_dt.m :param: json_file containing taus :return: np array of frame durations :rtype: numpy.int_ # len == 35, nudge = 4, dur == 3601 # len == 16, dur == 601 see also mfiles/t0_and_dt.m :param: json_file containing taus :return: np array of frame durations, waiting time :rtype: numpy.int_ # len == 62, nudge = 1.5, dur == 3601 # len == 54, dur == 601 # taus = np.int_([5,5,5,5,6,6,6,6,6,7,7,7,7,8,8,9,9,9,10,11,11,12,13,14,15,16,18,20,22,25,29,34,41,52,137]) # len == 35, dur == 601 # len = 45, dur = 601 # len = 63, dur = 301 :return: max time available from listmode data in sec. #CUDA # sec :param: offset is predetermined duration to exclude from times :return: array of times including 0 and np.cumsum(taus); max(times) == getTimeMax() :return: array of taus revised to be consistent with getTimeMax() :rtype: numpy.int_ :param: json_file containing taus :return: waiting time :rtype: numpy.int_ https://stackoverflow.com/questions/26646362/numpy-array-is-not-json-serializable :param json_file, a str: :return taus, a np array of frame durations, including waiting frames but only frames in the listmode archive: :return wtime, the waiting time in the early scan in sec: https://stackoverflow.com/questions/26646362/numpy-array-is-not-json-serializable :param taus, an np array of frame durations, including waiting frames but only frames in the listmode archive: :param offsettime, the duration between study time and the first saved frame: :param waittime, the witing time in the early scan; typically nan <- 0: :return json_file, a canonical json filename for ancillary data including timings: # overwrites existing :return dcm_datset is a pydicom.dataset.FileDataset containing properties for DICOM fields: # sec provides best estimate of date of listmode collection :param dcm filename: :return: # YYYYMMDD after http://dicom.nema.org/medical/dicom/current/output/chtml/part05/sect_6.2.html provides best estimate of start time (GMT) of listmode collection :param dcm filename: :return: # hhmmss.ffffff after http://dicom.nema.org/medical/dicom/current/output/chtml/part05/sect_6.2.html :return: dictionary for hardware mu-map image provided by nipet.hdw_mumap. Keys: 'im', ... See also self.hmuSelection. get NIfTI of the custom umap; see also nipet.mmrimg.obtain_image :param fileprefix: string for fileprefix of 4D image-object; default := self.umapSynthFileprefix :param imgtype: string; cf. obtain_image :param fcomment: string to append to fileprefix :param frames: frame indices to select from _im; default selects all frames :return: np.float32 # PREVIOUSLY: # import nibabel # if fileprefix is None: # fileprefix = self.umapSynthFileprefix # fqfn = os.path.join(self.tracerRawdataLocation, fileprefix + fcomment + '.nii.gz') # nim = nibabel.load(fqfn) # _im = np.float32(nim.get_data()) # if frames is None: # if np.ndim(_im) == 3: # _im = np.transpose(_im[:,::-1,::-1], (2, 1, 0)) # elif np.ndim(_im) == 4: # _im = np.transpose(_im[:,::-1,::-1,:], (3, 2, 1, 0)) # else: # raise ValueError('unsupported np.ndim(Reconstruction.muCarney._im)->' + str(np.ndim(_im))) # else: # _im = np.transpose(_im[:,::-1,::-1,frames], (3, 2, 1, 0)) # _im = np.squeeze(_im) # _im[_im < 0] = 0 :return: [] which NIPET 1.1 uses for no attenuation correction :return: mu-map image from Siemens UTE :rtype: numpy.array # a_itr-4_t-10-20sec_createDynamic2Carney_time1.nii.gz # check umap; move norm and listmode to folders # AC: move .bf, .dcm and umap to tracerRawdataLocation :param dyn: dictionary from nipet.mmrchain :param mumaps: dictionary of mu-maps from imaged object, hardware :param hst: dictionary from nipet.mmrhist :param fcomment: string to append to canonical filename # object and hardware mu-maps :param sta: dictionary from nipet.mmrchain :param mumaps: dictionary of mu-maps from imaged object, hardware :param hst: dictionary from nipet.mmrhist :param fcomment: string to append to canonical filename # object and hardware mu-maps # CLASS PRIVATE PROPERTIES & METHODS Store the numpy array to a NIfTI file <fnii> # print 'A = ', A :param hst: from nipet.mmrhist :param muh: is mumaps list array :param muo: is mumaps list array :return: description text for NIfTI if only bed present, attnum := 0.5 :param olist: list of dictionaries :return: numpy.array with times concatenated along axis=0 (c-style) checks that prefix is well-formed and set class properties accordingly :param prefix is the location of tracer rawdata, e.g., FDG_DT123456789.000000-Converted-NAC: :return class properties ac & tracerRawdataLocation are valid: #os.makedirs(self.tracerRawdataLocation) :return: radioisotope look-up table :param: prefix specifies the location of tracer rawdata. :param: self.tracerRawdataLocation contains Siemens sinograms, e.g.: -rwxr-xr-x+ 1 jjlee wheel 16814660 Sep 13 2016 172.16.31.10.1107.5.2.38.51010.2016090913012239062507614.bf -rwxr-xr-x+ 1 jjlee wheel 141444 Sep 13 2016 172.16.31.10.1107.5.2.38.51010.2016090913012239062507614.dcm -rwxr-xr-x+ 1 jjlee wheel 247141283 Sep 13 2016 172.16.31.10.1107.5.2.38.51010.30000016090616552364000000050.bf -rwxr-xr-x+ 1 jjlee wheel 151868 Sep 13 2016 172.16.31.10.1107.5.2.38.51010.30000016090616552364000000050.dcm -rw-r--r--+ 1 jjlee wheel 3081280 Nov 14 14:53 umapSynth_full_frame0.nii.gz :param: self.tracerRawdataLocation also contains folders: norm, containing, e.g.: -rwxr-xr-x+ 1 jjlee wheel 323404 Sep 13 2016 1.3.12.2.1107.5.2.38.51010.30000016090616552364000000048.bf -rwxr-xr-x+ 1 jjlee wheel 143938 Sep 13 2016 172.16.31.10.1107.5.2.38.51010.30000016090616552364000000048.dcm LM, containing, e.g.: -rwxr-xr-x+ 1 jjlee wheel 6817490860 Sep 13 2016 172.16.31.10.1107.5.2.38.51010.30000016090616552364000000049.bf -rwxr-xr-x+ 1 jjlee wheel 145290 Sep 13 2016 1.3.12.2.1107.5.2.38.51010.30000016090616552364000000049.dcm :param: cndaDownload is a path :param: ac, attenuation correction, is bool :param: umapSF is a fileprefix :param: v, verbosity, is bool :param: cndaDownload is a path \ python reconstruction.py -h nvidia-docker run -it \\ -v ${DOCKER_HOME}/hardwareumaps/:/hardwareumaps \\ -v ${SINGULARITY_HOME}/:/SubjectsDir \\ niftypetr-image:reconstruction:latest -h singularity exec \\ --nv \\ --bind $SINGULARITY_HOME/hardwareumaps:/hardwareumaps \\ --bind $SINGULARITY_HOME:/SubjectsDir \\ $SINGULARITY_HOME/niftypetr-image_reconstruction.sif \\ "python" "/work/NiftyPETy/respet/recon/reconstruction.py" "-h" #os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" #os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu) #os.environ["NVIDIA_VISIBLE_DEVICES"] = str(args.gpu) | 2.278536 | 2 |
setup.py | manish-sin/BeterRoads | 0 | 6623508 | import os
import shutil
# Here we will do some initial setup
# We will remove all the folders and some cvs files which needs to be empty when starting a fresh project
# Currently the folders populated with data for reference
# commands below will remove these folders and create a new one with same name
#Here we define the path of all such folders
Root_dir = os.path.abspath(".")
frames_path = os.path.join(Root_dir, r"frames")
wrong_coordinate_extacrtion_path = os.path.join(Root_dir, r"wrong_coordinate_extacrtion")
road_images_path = os.path.join(Root_dir, r"road_images")
pot_holes_detected_path = os.path.join(Root_dir, r"pot_holes_detected")
#Removing those folders
shutil.rmtree(frames_path)
shutil.rmtree(wrong_coordinate_extacrtion_path )
shutil.rmtree(road_images_path)
shutil.rmtree(pot_holes_detected_path)
#recreating those folders
os.mkdir(frames_path)
os.mkdir(wrong_coordinate_extacrtion_path)
os.mkdir(road_images_path)
os.mkdir(pot_holes_detected_path)
#Removing Files
core_data_path = os.path.join(Root_dir, r"core_data.csv")
os.remove(core_data_path)
super_core_data_path = os.path.join(Root_dir, r"super_core_data.csv")
os.remove(super_core_data_path )
final_csv_path = os.path.join(Root_dir, r"final_csv_data.csv")
os.remove(final_csv_path) | import os
import shutil
# Here we will do some initial setup
# We will remove all the folders and some cvs files which needs to be empty when starting a fresh project
# Currently the folders populated with data for reference
# commands below will remove these folders and create a new one with same name
#Here we define the path of all such folders
Root_dir = os.path.abspath(".")
frames_path = os.path.join(Root_dir, r"frames")
wrong_coordinate_extacrtion_path = os.path.join(Root_dir, r"wrong_coordinate_extacrtion")
road_images_path = os.path.join(Root_dir, r"road_images")
pot_holes_detected_path = os.path.join(Root_dir, r"pot_holes_detected")
#Removing those folders
shutil.rmtree(frames_path)
shutil.rmtree(wrong_coordinate_extacrtion_path )
shutil.rmtree(road_images_path)
shutil.rmtree(pot_holes_detected_path)
#recreating those folders
os.mkdir(frames_path)
os.mkdir(wrong_coordinate_extacrtion_path)
os.mkdir(road_images_path)
os.mkdir(pot_holes_detected_path)
#Removing Files
core_data_path = os.path.join(Root_dir, r"core_data.csv")
os.remove(core_data_path)
super_core_data_path = os.path.join(Root_dir, r"super_core_data.csv")
os.remove(super_core_data_path )
final_csv_path = os.path.join(Root_dir, r"final_csv_data.csv")
os.remove(final_csv_path) | en | 0.902643 | # Here we will do some initial setup # We will remove all the folders and some cvs files which needs to be empty when starting a fresh project # Currently the folders populated with data for reference # commands below will remove these folders and create a new one with same name #Here we define the path of all such folders #Removing those folders #recreating those folders #Removing Files | 2.664221 | 3 |
azqa/main.py | basilbc2000/bas-python-projects | 0 | 6623509 | <filename>azqa/main.py
import genconmaps as pcr
import gendistmatrices as gdm
import genplots as gnp
import genmodels as gnm
import genfiles as gnf
# import gendag as gnd
'''
read pcaps
get connections/conversations
refine data as needed
get distances (dtw, ngram)
generate model
generate heatmap
analyze results
'''
conversations = pcr.getConversationMaps()
gnf.saveCoversations (conversations)
conversations = gnf.loadAllConversations()
samples = conversations
samples = pcr.removeConversations(conversations)
# samples = pcr.getRandomConversations(conversations, 10)
size, timestamp = pcr.getConversationStat(samples)
gnp.genPlot(size.values())
# ('172.16.17.32', '172.16.31.10')
# delta, ip.len, sport, dport, ip.p, timestamp
# rows, cols
c2 = pcr.getConversations(conversations, [], 10, 0)
gnp.genPlot(size.values(c2[1:100]))
gnp.genConvPlots(c2)
samples = c2
# gnp.genXYPlots(connections, 0)
# gnp.genXYPlots(connections, 2)
distAll = gdm.calculateDistances(samples)
projection = gnm.getTSNEProjection(distAll)
gnp.genScatterPlot(projection)
model = gnm.genHDBSCANModel(distAll)
gnm.getClusterStat(model)
#gnp.genCondensedTreePlot(model)
#gnp.genSingleLinkageTreePlot(mode l)
labels, inv_mapping, mapping, ipmapping, keys = gdm.getLabelsIPMappings(samples)
gnp.genScatterPlotWithModel(model, distAll, projection, labels, inv_mapping)
clusterfile = gnf.genClusterfile(model, labels, mapping, inv_mapping)
# dagfile = gnf.genRelationshipGraphfile(model, clusterfile)
# gnd.genRelationshipGraphs(dagfile, model)
gnp.genHeatMap(samples, mapping, keys, clusterfile)
| <filename>azqa/main.py
import genconmaps as pcr
import gendistmatrices as gdm
import genplots as gnp
import genmodels as gnm
import genfiles as gnf
# import gendag as gnd
'''
read pcaps
get connections/conversations
refine data as needed
get distances (dtw, ngram)
generate model
generate heatmap
analyze results
'''
conversations = pcr.getConversationMaps()
gnf.saveCoversations (conversations)
conversations = gnf.loadAllConversations()
samples = conversations
samples = pcr.removeConversations(conversations)
# samples = pcr.getRandomConversations(conversations, 10)
size, timestamp = pcr.getConversationStat(samples)
gnp.genPlot(size.values())
# ('172.16.17.32', '172.16.31.10')
# delta, ip.len, sport, dport, ip.p, timestamp
# rows, cols
c2 = pcr.getConversations(conversations, [], 10, 0)
gnp.genPlot(size.values(c2[1:100]))
gnp.genConvPlots(c2)
samples = c2
# gnp.genXYPlots(connections, 0)
# gnp.genXYPlots(connections, 2)
distAll = gdm.calculateDistances(samples)
projection = gnm.getTSNEProjection(distAll)
gnp.genScatterPlot(projection)
model = gnm.genHDBSCANModel(distAll)
gnm.getClusterStat(model)
#gnp.genCondensedTreePlot(model)
#gnp.genSingleLinkageTreePlot(mode l)
labels, inv_mapping, mapping, ipmapping, keys = gdm.getLabelsIPMappings(samples)
gnp.genScatterPlotWithModel(model, distAll, projection, labels, inv_mapping)
clusterfile = gnf.genClusterfile(model, labels, mapping, inv_mapping)
# dagfile = gnf.genRelationshipGraphfile(model, clusterfile)
# gnd.genRelationshipGraphs(dagfile, model)
gnp.genHeatMap(samples, mapping, keys, clusterfile)
| en | 0.514408 | # import gendag as gnd read pcaps get connections/conversations refine data as needed get distances (dtw, ngram) generate model generate heatmap analyze results # samples = pcr.getRandomConversations(conversations, 10) # ('172.16.17.32', '172.16.31.10') # delta, ip.len, sport, dport, ip.p, timestamp # rows, cols # gnp.genXYPlots(connections, 0) # gnp.genXYPlots(connections, 2) #gnp.genCondensedTreePlot(model) #gnp.genSingleLinkageTreePlot(mode l) # dagfile = gnf.genRelationshipGraphfile(model, clusterfile) # gnd.genRelationshipGraphs(dagfile, model) | 1.982855 | 2 |
django/kisaan/dashboard/urls.py | AkshitOstwal/cfthacks2019 | 2 | 6623510 | from django.urls import path
from . import views
from django.contrib.auth.decorators import login_required
app_name = 'dashboard'
urlpatterns = [
path('',login_required(views.IndexView.as_view()),name='index'),
]
| from django.urls import path
from . import views
from django.contrib.auth.decorators import login_required
app_name = 'dashboard'
urlpatterns = [
path('',login_required(views.IndexView.as_view()),name='index'),
]
| none | 1 | 1.631737 | 2 | |
android/jni/msbuild.py | aselle/flatbuffers | 4,526 | 6623511 | #!/usr/bin/python
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple script that locates the newest MSBuild in one of several locations.
This script will find the highest version number of MSBuild and run it,
passing its arguments through to MSBuild.
"""
import glob
import os
import re
import string
import subprocess
import sys
SYSTEMROOT = os.getenv("SYSTEMROOT", "c:\\windows")
PROGRAM_FILES = os.getenv("ProgramFiles", "c:\\Program Files")
PROGRAM_FILES_X86 = os.getenv("ProgramFiles(x86)", "c:\\Program Files (x86)")
SEARCH_FOLDERS = [ PROGRAM_FILES + "\\MSBuild\\*\\Bin\\MSBuild.exe",
PROGRAM_FILES_X86 + "\\MSBuild\\*\\Bin\\MSBuild.exe",
SYSTEMROOT + "\\Microsoft.NET\Framework\\*\\MSBuild.exe" ]
def compare_version(a, b):
"""Compare two version number strings of the form W.X.Y.Z.
The numbers are compared most-significant to least-significant.
For example, 12.345.67.89 > 2.987.88.99.
Args:
a: First version number string to compare
b: Second version number string to compare
Returns:
0 if the numbers are identical, a positive number if 'a' is larger, and
a negative number if 'b' is larger.
"""
aa = string.split(a, ".")
bb = string.split(b, ".")
for i in range(0, 4):
if aa[i] != bb[i]:
return cmp(int(aa[i]), int(bb[i]))
return 0
def main():
msbuilds = []
for folder in SEARCH_FOLDERS:
for file in glob.glob(folder):
p = subprocess.Popen([file, "/version"], stdout=subprocess.PIPE)
out, err = p.communicate()
match = re.search("^[0-9]+\\.[0-9]+\\.[0-9]+\\.[0-9]+$", out, re.M)
if match:
msbuilds.append({ 'ver':match.group(), 'exe':file })
msbuilds.sort(lambda x, y: compare_version(x['ver'], y['ver']), reverse=True)
if len(msbuilds) == 0:
print "Unable to find MSBuild.\n"
return -1;
cmd = [msbuilds[0]['exe']]
cmd.extend(sys.argv[1:])
return subprocess.call(cmd)
if __name__ == '__main__':
sys.exit(main())
| #!/usr/bin/python
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple script that locates the newest MSBuild in one of several locations.
This script will find the highest version number of MSBuild and run it,
passing its arguments through to MSBuild.
"""
import glob
import os
import re
import string
import subprocess
import sys
SYSTEMROOT = os.getenv("SYSTEMROOT", "c:\\windows")
PROGRAM_FILES = os.getenv("ProgramFiles", "c:\\Program Files")
PROGRAM_FILES_X86 = os.getenv("ProgramFiles(x86)", "c:\\Program Files (x86)")
SEARCH_FOLDERS = [ PROGRAM_FILES + "\\MSBuild\\*\\Bin\\MSBuild.exe",
PROGRAM_FILES_X86 + "\\MSBuild\\*\\Bin\\MSBuild.exe",
SYSTEMROOT + "\\Microsoft.NET\Framework\\*\\MSBuild.exe" ]
def compare_version(a, b):
"""Compare two version number strings of the form W.X.Y.Z.
The numbers are compared most-significant to least-significant.
For example, 12.345.67.89 > 2.987.88.99.
Args:
a: First version number string to compare
b: Second version number string to compare
Returns:
0 if the numbers are identical, a positive number if 'a' is larger, and
a negative number if 'b' is larger.
"""
aa = string.split(a, ".")
bb = string.split(b, ".")
for i in range(0, 4):
if aa[i] != bb[i]:
return cmp(int(aa[i]), int(bb[i]))
return 0
def main():
msbuilds = []
for folder in SEARCH_FOLDERS:
for file in glob.glob(folder):
p = subprocess.Popen([file, "/version"], stdout=subprocess.PIPE)
out, err = p.communicate()
match = re.search("^[0-9]+\\.[0-9]+\\.[0-9]+\\.[0-9]+$", out, re.M)
if match:
msbuilds.append({ 'ver':match.group(), 'exe':file })
msbuilds.sort(lambda x, y: compare_version(x['ver'], y['ver']), reverse=True)
if len(msbuilds) == 0:
print "Unable to find MSBuild.\n"
return -1;
cmd = [msbuilds[0]['exe']]
cmd.extend(sys.argv[1:])
return subprocess.call(cmd)
if __name__ == '__main__':
sys.exit(main())
| en | 0.815969 | #!/usr/bin/python # Copyright 2014 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Simple script that locates the newest MSBuild in one of several locations. This script will find the highest version number of MSBuild and run it, passing its arguments through to MSBuild. Compare two version number strings of the form W.X.Y.Z. The numbers are compared most-significant to least-significant. For example, 12.345.67.89 > 2.987.88.99. Args: a: First version number string to compare b: Second version number string to compare Returns: 0 if the numbers are identical, a positive number if 'a' is larger, and a negative number if 'b' is larger. | 2.791233 | 3 |
aqua_test/cli.py | behara/AquaRepo | 25 | 6623512 | from . import __version__
import click
from .core import initializer
INTERACTIVE_INIT_HELP = 'Start an interactive session to compose config file'
@click.version_option(__version__, message='%(version)s')
@click.group()
def main():
''' Boss CLI. '''
pass
@main.command('init')
@click.option('--interactive', '-i', is_flag=True, help=INTERACTIVE_INIT_HELP)
def init(interactive):
''' Initialize a project directory for boss. '''
initializer.setup_boss_home()
files_written = initializer.initialize(interactive)
if not files_written:
click.echo('Already initialized.')
return
# Print the files generated while initializing.
for f in files_written:
click.echo('Generated file: {}'.format(f))
| from . import __version__
import click
from .core import initializer
INTERACTIVE_INIT_HELP = 'Start an interactive session to compose config file'
@click.version_option(__version__, message='%(version)s')
@click.group()
def main():
''' Boss CLI. '''
pass
@main.command('init')
@click.option('--interactive', '-i', is_flag=True, help=INTERACTIVE_INIT_HELP)
def init(interactive):
''' Initialize a project directory for boss. '''
initializer.setup_boss_home()
files_written = initializer.initialize(interactive)
if not files_written:
click.echo('Already initialized.')
return
# Print the files generated while initializing.
for f in files_written:
click.echo('Generated file: {}'.format(f))
| en | 0.671493 | Boss CLI. Initialize a project directory for boss. # Print the files generated while initializing. | 2.653244 | 3 |
misc/add_path_to_envpath.py | mkroman/platformio-atom-ide | 1 | 6623513 | <gh_stars>1-10
# Copyright (c) 2016-present, PlatformIO Plus <<EMAIL>>
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree.
"""Add paths to Windows %PATH% environment variable."""
import os
import sys
import ctypes
from ctypes.wintypes import HWND, UINT, WPARAM, LPARAM, LPVOID
try:
import winreg
except ImportError:
import _winreg as winreg # PY2
def main():
with winreg.CreateKey(winreg.HKEY_CURRENT_USER, u"Environment") as key:
try:
envpath = winreg.QueryValueEx(key, u"PATH")[0]
except WindowsError:
envpath = u"%PATH%"
paths = [envpath]
for path in sys.argv[1:]:
if path and path not in envpath and os.path.isdir(path):
paths.append(path)
envpath = os.pathsep.join(paths)
winreg.SetValueEx(key, u"PATH", 0, winreg.REG_EXPAND_SZ, envpath)
print "Value set!"
winreg.ExpandEnvironmentStrings(envpath)
print "Expanded!"
# notify the system about the changes
SendMessage = ctypes.windll.user32.SendMessageW; print 1
SendMessage.argtypes = HWND, UINT, WPARAM, LPVOID; print 2
SendMessage.restype = LPARAM; print 3
SendMessage(0xFFFF, 0x1A, 0, u"Environment"); print 4
print "Message sent!"
if __name__ == '__main__':
main()
| # Copyright (c) 2016-present, PlatformIO Plus <<EMAIL>>
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree.
"""Add paths to Windows %PATH% environment variable."""
import os
import sys
import ctypes
from ctypes.wintypes import HWND, UINT, WPARAM, LPARAM, LPVOID
try:
import winreg
except ImportError:
import _winreg as winreg # PY2
def main():
with winreg.CreateKey(winreg.HKEY_CURRENT_USER, u"Environment") as key:
try:
envpath = winreg.QueryValueEx(key, u"PATH")[0]
except WindowsError:
envpath = u"%PATH%"
paths = [envpath]
for path in sys.argv[1:]:
if path and path not in envpath and os.path.isdir(path):
paths.append(path)
envpath = os.pathsep.join(paths)
winreg.SetValueEx(key, u"PATH", 0, winreg.REG_EXPAND_SZ, envpath)
print "Value set!"
winreg.ExpandEnvironmentStrings(envpath)
print "Expanded!"
# notify the system about the changes
SendMessage = ctypes.windll.user32.SendMessageW; print 1
SendMessage.argtypes = HWND, UINT, WPARAM, LPVOID; print 2
SendMessage.restype = LPARAM; print 3
SendMessage(0xFFFF, 0x1A, 0, u"Environment"); print 4
print "Message sent!"
if __name__ == '__main__':
main() | en | 0.853287 | # Copyright (c) 2016-present, PlatformIO Plus <<EMAIL>> # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. Add paths to Windows %PATH% environment variable. # PY2 # notify the system about the changes | 2.116113 | 2 |
cellsium/model/__init__.py | modsim/CellSium | 0 | 6623514 | <reponame>modsim/CellSium<gh_stars>0
"""Cell model package."""
from typing import Any, Iterable, Mapping, Optional
from ..parameters import h_to_s, s_to_h
from ..simulation.simulator import Timestep
from .agent import (
Copyable,
IdCounter,
InitializeWithParameters,
Representable,
WithLineage,
WithLineageHistory,
WithRandomSequences,
WithTemporalLineage,
)
from .geometry import (
AutoMesh3D,
BentRod,
CellGeometry,
Coccoid,
Ellipsoid,
Rectangle,
RodShaped,
Shape,
Shape3D,
Square,
WithAngle,
WithFluorescence,
WithPosition,
WithProperDivisionBehavior,
)
from .initialization import (
RandomAngle,
RandomBentRod,
RandomPosition,
RandomWidthLength,
)
def generate_cell(*additional_classes: type, name: str = "PlacedCell"):
"""
Generates a cell class using the standard classes, and possible additional classes.
:param additional_classes: Additional classes to inherit the cell from.
:param name: Name of the class
:return: Class
"""
cell = type(
name,
(
WithLineageHistory,
WithLineage,
WithTemporalLineage,
WithProperDivisionBehavior,
InitializeWithParameters,
Copyable,
Representable,
WithRandomSequences,
RandomWidthLength,
RandomBentRod,
RandomPosition,
RandomAngle,
CellGeometry,
)
+ additional_classes,
{},
)
return cell
PlacedCell = generate_cell(BentRod)
def assemble_cell(
simulated_model: type,
*additional_classes,
placed_cell: type = PlacedCell,
name: str = "Cell"
):
"""
Assembles a cell class from parent classes.
Necessary as the cell class needs the right level of inheritance.
:param simulated_model: Model class
:param additional_classes: Classes to create a cell type, or
:param placed_cell: A prepared cell
:param name: Optional name
:return: Cell class
"""
if additional_classes:
base_cell = generate_cell(*additional_classes)
else:
base_cell = placed_cell
return type(name, (simulated_model, base_cell), {})
class SimulatedCell:
"""
Base class for simulated cells, allowing for division behavior.
"""
def birth(
self, parent: Optional["SimulatedCell"] = None, ts: Optional[Timestep] = None
) -> None:
"""
Called when a cell is "born".
:param parent: Parent cell
:param ts: Timestep
:return: None
"""
pass
def grow(self, ts: Timestep) -> None:
"""
Called each timestep to grow cell.
:param ts: Timestep
:return: None
"""
pass
def divide(self, ts: Timestep) -> Iterable["SimulatedCell"]:
"""
Called when a cell should divide, creates the daughter cells.
:param ts: Timestep
:return: None
"""
offspring_a, offspring_b = self.copy(), self.copy()
offspring_a.position, offspring_b.position = self.get_division_positions()
if isinstance(self, WithLineage):
offspring_a.parent_id = offspring_b.parent_id = self.id_
if isinstance(self, WithLineageHistory):
offspring_a.lineage_history = self.lineage_history[:] + [self.id_]
offspring_b.lineage_history = self.lineage_history[:] + [self.id_]
if isinstance(self, WithTemporalLineage):
now = ts.simulation.time
offspring_b.birth_time = offspring_a.birth_time = now
ts.simulator.add(offspring_a)
ts.simulator.add(offspring_b)
offspring_a.birth(parent=self, ts=ts)
offspring_b.birth(parent=self, ts=ts)
ts.simulator.remove(self)
return offspring_a, offspring_b
def step(self, ts: Timestep) -> None:
"""
Timestep function of the cell object, called by the simulator.
:param ts: Timestep
:return: None
"""
self.grow(ts=ts)
# noinspection PyAttributeOutsideInit
class SizerCell(SimulatedCell):
"""
Example cell implementing a simple sizer growth mechanism.
"""
@staticmethod
def random_sequences(sequence: Any) -> Mapping[str, Any]:
return dict(division_size=sequence.normal(3.0, 0.25)) # µm
def birth(
self, parent: Optional["SizerCell"] = None, ts: Optional[Timestep] = None
) -> None:
self.division_size = next(self.random.division_size)
self.elongation_rate = 1.5
def grow(self, ts: Timestep) -> None:
self.length += self.elongation_rate * ts.hours
if self.length > self.division_size:
offspring_a, offspring_b = self.divide(ts)
offspring_a.length = offspring_b.length = self.length / 2
# noinspection PyAttributeOutsideInit
class TimerCell(SimulatedCell):
"""
Example cell implementing a simple timer growth mechanism.
"""
@staticmethod
def random_sequences(sequence: Any) -> Mapping[str, Any]:
return dict(elongation_rate=sequence.normal(1.5, 0.25)) # µm·h⁻¹
def birth(
self, parent: Optional["TimerCell"] = None, ts: Optional[Timestep] = None
) -> None:
self.elongation_rate = next(self.random.elongation_rate)
self.division_time = h_to_s(1.0)
def grow(self, ts: Timestep) -> None:
self.length += self.elongation_rate * ts.hours
if ts.time > (self.birth_time + self.division_time):
offspring_a, offspring_b = self.divide(ts)
offspring_a.length = offspring_b.length = self.length / 2
__all__ = [
'InitializeWithParameters',
'WithRandomSequences',
'Copyable',
'Representable',
'IdCounter',
'WithLineage',
'WithLineageHistory',
'WithTemporalLineage',
'Shape',
'Shape3D',
'RodShaped',
'Rectangle',
'Square',
'BentRod',
'Coccoid',
'Ellipsoid',
'WithPosition',
'WithAngle',
'WithFluorescence',
'WithProperDivisionBehavior',
'AutoMesh3D',
'CellGeometry',
's_to_h',
'h_to_s',
'PlacedCell',
'SimulatedCell',
'TimerCell',
'SizerCell',
'generate_cell',
'assemble_cell',
'Timestep',
]
| """Cell model package."""
from typing import Any, Iterable, Mapping, Optional
from ..parameters import h_to_s, s_to_h
from ..simulation.simulator import Timestep
from .agent import (
Copyable,
IdCounter,
InitializeWithParameters,
Representable,
WithLineage,
WithLineageHistory,
WithRandomSequences,
WithTemporalLineage,
)
from .geometry import (
AutoMesh3D,
BentRod,
CellGeometry,
Coccoid,
Ellipsoid,
Rectangle,
RodShaped,
Shape,
Shape3D,
Square,
WithAngle,
WithFluorescence,
WithPosition,
WithProperDivisionBehavior,
)
from .initialization import (
RandomAngle,
RandomBentRod,
RandomPosition,
RandomWidthLength,
)
def generate_cell(*additional_classes: type, name: str = "PlacedCell"):
"""
Generates a cell class using the standard classes, and possible additional classes.
:param additional_classes: Additional classes to inherit the cell from.
:param name: Name of the class
:return: Class
"""
cell = type(
name,
(
WithLineageHistory,
WithLineage,
WithTemporalLineage,
WithProperDivisionBehavior,
InitializeWithParameters,
Copyable,
Representable,
WithRandomSequences,
RandomWidthLength,
RandomBentRod,
RandomPosition,
RandomAngle,
CellGeometry,
)
+ additional_classes,
{},
)
return cell
PlacedCell = generate_cell(BentRod)
def assemble_cell(
simulated_model: type,
*additional_classes,
placed_cell: type = PlacedCell,
name: str = "Cell"
):
"""
Assembles a cell class from parent classes.
Necessary as the cell class needs the right level of inheritance.
:param simulated_model: Model class
:param additional_classes: Classes to create a cell type, or
:param placed_cell: A prepared cell
:param name: Optional name
:return: Cell class
"""
if additional_classes:
base_cell = generate_cell(*additional_classes)
else:
base_cell = placed_cell
return type(name, (simulated_model, base_cell), {})
class SimulatedCell:
"""
Base class for simulated cells, allowing for division behavior.
"""
def birth(
self, parent: Optional["SimulatedCell"] = None, ts: Optional[Timestep] = None
) -> None:
"""
Called when a cell is "born".
:param parent: Parent cell
:param ts: Timestep
:return: None
"""
pass
def grow(self, ts: Timestep) -> None:
"""
Called each timestep to grow cell.
:param ts: Timestep
:return: None
"""
pass
def divide(self, ts: Timestep) -> Iterable["SimulatedCell"]:
"""
Called when a cell should divide, creates the daughter cells.
:param ts: Timestep
:return: None
"""
offspring_a, offspring_b = self.copy(), self.copy()
offspring_a.position, offspring_b.position = self.get_division_positions()
if isinstance(self, WithLineage):
offspring_a.parent_id = offspring_b.parent_id = self.id_
if isinstance(self, WithLineageHistory):
offspring_a.lineage_history = self.lineage_history[:] + [self.id_]
offspring_b.lineage_history = self.lineage_history[:] + [self.id_]
if isinstance(self, WithTemporalLineage):
now = ts.simulation.time
offspring_b.birth_time = offspring_a.birth_time = now
ts.simulator.add(offspring_a)
ts.simulator.add(offspring_b)
offspring_a.birth(parent=self, ts=ts)
offspring_b.birth(parent=self, ts=ts)
ts.simulator.remove(self)
return offspring_a, offspring_b
def step(self, ts: Timestep) -> None:
"""
Timestep function of the cell object, called by the simulator.
:param ts: Timestep
:return: None
"""
self.grow(ts=ts)
# noinspection PyAttributeOutsideInit
class SizerCell(SimulatedCell):
"""
Example cell implementing a simple sizer growth mechanism.
"""
@staticmethod
def random_sequences(sequence: Any) -> Mapping[str, Any]:
return dict(division_size=sequence.normal(3.0, 0.25)) # µm
def birth(
self, parent: Optional["SizerCell"] = None, ts: Optional[Timestep] = None
) -> None:
self.division_size = next(self.random.division_size)
self.elongation_rate = 1.5
def grow(self, ts: Timestep) -> None:
self.length += self.elongation_rate * ts.hours
if self.length > self.division_size:
offspring_a, offspring_b = self.divide(ts)
offspring_a.length = offspring_b.length = self.length / 2
# noinspection PyAttributeOutsideInit
class TimerCell(SimulatedCell):
"""
Example cell implementing a simple timer growth mechanism.
"""
@staticmethod
def random_sequences(sequence: Any) -> Mapping[str, Any]:
return dict(elongation_rate=sequence.normal(1.5, 0.25)) # µm·h⁻¹
def birth(
self, parent: Optional["TimerCell"] = None, ts: Optional[Timestep] = None
) -> None:
self.elongation_rate = next(self.random.elongation_rate)
self.division_time = h_to_s(1.0)
def grow(self, ts: Timestep) -> None:
self.length += self.elongation_rate * ts.hours
if ts.time > (self.birth_time + self.division_time):
offspring_a, offspring_b = self.divide(ts)
offspring_a.length = offspring_b.length = self.length / 2
__all__ = [
'InitializeWithParameters',
'WithRandomSequences',
'Copyable',
'Representable',
'IdCounter',
'WithLineage',
'WithLineageHistory',
'WithTemporalLineage',
'Shape',
'Shape3D',
'RodShaped',
'Rectangle',
'Square',
'BentRod',
'Coccoid',
'Ellipsoid',
'WithPosition',
'WithAngle',
'WithFluorescence',
'WithProperDivisionBehavior',
'AutoMesh3D',
'CellGeometry',
's_to_h',
'h_to_s',
'PlacedCell',
'SimulatedCell',
'TimerCell',
'SizerCell',
'generate_cell',
'assemble_cell',
'Timestep',
] | en | 0.758035 | Cell model package. Generates a cell class using the standard classes, and possible additional classes. :param additional_classes: Additional classes to inherit the cell from. :param name: Name of the class :return: Class Assembles a cell class from parent classes. Necessary as the cell class needs the right level of inheritance. :param simulated_model: Model class :param additional_classes: Classes to create a cell type, or :param placed_cell: A prepared cell :param name: Optional name :return: Cell class Base class for simulated cells, allowing for division behavior. Called when a cell is "born". :param parent: Parent cell :param ts: Timestep :return: None Called each timestep to grow cell. :param ts: Timestep :return: None Called when a cell should divide, creates the daughter cells. :param ts: Timestep :return: None Timestep function of the cell object, called by the simulator. :param ts: Timestep :return: None # noinspection PyAttributeOutsideInit Example cell implementing a simple sizer growth mechanism. # µm # noinspection PyAttributeOutsideInit Example cell implementing a simple timer growth mechanism. # µm·h⁻¹ | 2.732858 | 3 |
domain/entities/errors/InvalidMonth.py | JVGC/MyFinancesPython | 0 | 6623515 | <gh_stars>0
class InvalidMonth:
def __init__(self, avaiable_months) -> None:
self.message = 'Invalid Month'
self.entity = 'Month'
self.reason = f"Month should be in: {avaiable_months}"
| class InvalidMonth:
def __init__(self, avaiable_months) -> None:
self.message = 'Invalid Month'
self.entity = 'Month'
self.reason = f"Month should be in: {avaiable_months}" | none | 1 | 2.871544 | 3 | |
siebenapp/tests/test_system.py | ahitrin/SiebenApp | 14 | 6623516 | # coding: utf-8
import pytest
from approvaltests import verify # type: ignore
from siebenapp.system import split_long, dot_export
from siebenapp.tests.dsl import build_goaltree, open_, clos_, selected, previous
@pytest.mark.parametrize(
"source,result",
[
("short", "short"),
("10: Example multi-word Sieben label", "10: Example multi-word\nSieben label"),
(
"123: Example very-very long multi-word Sieben label",
"123: Example very-very\nlong multi-word Sieben\nlabel",
),
(
"43: Manual-placed\nnewlines\nare ignored",
"43: Manual-placed\nnewlines\nare\nignored",
),
],
)
def test_split_long_labels(source, result):
assert split_long(source) == result
def test_dot_export():
goals = build_goaltree(
open_(1, "Root", [2, 3, 4, 5], blockers=[6]),
clos_(
2,
"This is closed goal with no children or blockers. "
"It also has a long name that must be compacted",
),
open_(3, 'I have some "special" symbols', [6, 7], select=selected),
clos_(4, ""),
open_(5, "Many blockerz", blockers=[2, 4, 6, 7]),
clos_(6, "!@#$%^&*()\\/,.?"),
open_(7, ";:[{}]<>", select=previous),
)
verify(dot_export(goals))
| # coding: utf-8
import pytest
from approvaltests import verify # type: ignore
from siebenapp.system import split_long, dot_export
from siebenapp.tests.dsl import build_goaltree, open_, clos_, selected, previous
@pytest.mark.parametrize(
"source,result",
[
("short", "short"),
("10: Example multi-word Sieben label", "10: Example multi-word\nSieben label"),
(
"123: Example very-very long multi-word Sieben label",
"123: Example very-very\nlong multi-word Sieben\nlabel",
),
(
"43: Manual-placed\nnewlines\nare ignored",
"43: Manual-placed\nnewlines\nare\nignored",
),
],
)
def test_split_long_labels(source, result):
assert split_long(source) == result
def test_dot_export():
goals = build_goaltree(
open_(1, "Root", [2, 3, 4, 5], blockers=[6]),
clos_(
2,
"This is closed goal with no children or blockers. "
"It also has a long name that must be compacted",
),
open_(3, 'I have some "special" symbols', [6, 7], select=selected),
clos_(4, ""),
open_(5, "Many blockerz", blockers=[2, 4, 6, 7]),
clos_(6, "!@#$%^&*()\\/,.?"),
open_(7, ";:[{}]<>", select=previous),
)
verify(dot_export(goals))
| en | 0.338409 | # coding: utf-8 # type: ignore #$%^&*()\\/,.?"), | 2.072121 | 2 |
ibslib/io/__init__.py | songsiwei/Ogre_r_sv | 0 | 6623517 | <filename>ibslib/io/__init__.py
__author__ = '<NAME>'
from .read import *
from .write import *
from .aims_extractor import AimsExtractor
from .check import * | <filename>ibslib/io/__init__.py
__author__ = '<NAME>'
from .read import *
from .write import *
from .aims_extractor import AimsExtractor
from .check import * | none | 1 | 1.10722 | 1 | |
vulnerability_mapping/hot_spot_mapping.py | worldbank/CityScan | 4 | 6623518 | import sys, os, importlib, math, shutil
import rasterio
import skimage
import numpy as np
import pandas as pd
import geopandas as gpd
import osmnx as ox
import GOSTnets as gn
import skimage.graph as graph
from rasterio.mask import mask
from rasterio import features
from rasterio.warp import reproject, Resampling
from shapely.geometry import box, Point
from scipy.ndimage import generic_filter
from pandana.loaders import osm
sys.path.append("../../GOST/")
import GOSTRocks.rasterMisc as rMisc
import GOSTRocks.misc as misc
import GOSTRocks.osmMisc as osm_misc
# Driving Speed for attributing road networks with speed
speed_dict = {
'residential': 20, # kmph
'primary': 40,
'primary_link':35,
'motorway':50,
'motorway_link': 45,
'trunk': 40,
'trunk_link':35,
'secondary': 30,
'secondary_link':25,
'tertiary':30,
'tertiary_link': 25,
'unclassified':20,
'living_street':10,
'service':10
}
def get_speed(x, s_dict):
''' Get speed from the above speed dict, but some of the suppied x's are actually lists
INPUT
x [string] - infra type to look up in s_dict
s_dict [dictionary] - see speed_dict above
RETURNS
[number] speed
'''
try:
speed = s_dict[x]
except:
if type(x) == list:
try:
speed = s_dict[x[0]]
except:
speed = 5
else:
speed=5
return(speed)
def get_nodes(b, tags):
''' Extract nodes from OSM based on tag query using pandana loaders
INPUTS
b [list of numbers] - boundary list from shapely.bounds
tags [string] - filter to be send to pandana.osm.node_query
RETURNS
[geopandas dataframe]
'''
nodes = osm.node_query(b[1], b[0], b[3], b[2], tags=tags)
nodes_geom = [Point(x) for x in zip(nodes['lon'], nodes['lat'])]
nodes_df = gpd.GeoDataFrame(nodes[['amenity','lat','lon']], geometry=nodes_geom, crs={'init':'epgs:4326'})
return(nodes_df)
def standardizeInputRasters(inR1, inR2, inR1_outFile, data_type="N"):
''' Standardize inR1 to inR2: changes crs, extent, and resolution.
INPUTS:
inR1, inR2 [rasterio raster object]
inR1_outFile [string] - output file for creating inR1 standardized to inR2
[optional] data_type [character] - Defines the data type of the input raster (inR1).
It defines the resampling type and works for 'N' for numeric and 'C' for categorical
RETURNS
nothing
'''
if inR1.crs != inR2.crs:
raise ValueError("CRS Error")
#Clip R1 to R2
#Get JSON of bounding box
b2 = inR2.bounds
boxJSON = [{'type': 'Polygon', 'coordinates': [[[b2.left, b2.bottom],[b2.left, b2.top],[b2.right, b2.top],[b2.right, b2.bottom],[b2.left, b2.bottom]]]}]
out_img, out_transform = mask(inR1, boxJSON, crop=True)
out_meta = inR1.meta.copy()
#Re-scale resolution of R1 to R2
newArr = np.empty(shape=(1, inR2.shape[0], inR2.shape[1]))
if data_type == "N":
resampling_type = Resampling.cubic
elif data_type == "C":
resampling_type = Resampling.nearest
reproject(out_img, newArr, src_transform=out_transform, dst_transform=inR2.transform, src_crs=inR1.crs, dst_crs=inR2.crs, resampling=resampling_type)
out_meta.update({"driver": "GTiff",
"height": newArr.shape[1],
"width": newArr.shape[2],
"transform": inR2.transform,
"crs": inR2.crs})
with rasterio.open(inR1_outFile, "w", **out_meta) as dest:
dest.write(newArr.astype(out_meta['dtype']))
class city_hotspot(object):
'''
Calculate hotspots through combining population density, builidng height, and access to amenities
'''
def __init__(self, height_raster_file, output_folder):
''' Initiate the city_hotspot analysis
INPUT
height_raster_file [string] - path to the raster describing builidng height
output_folder [string] - path to folder to create output
EXAMPLE
city = hot.city_hotspot(height_raster, out_folder)
city.extract_other_rasters(global_pop_raster, global_globR)
rosads = city.extract_osm_data()
xx = city.generate_walking_raster()
city.calculate_accessibility()
city.calculate_pop_hotspots()
'''
self.height_data = rasterio.open(height_raster_file)
self.bounds = box(*self.height_data.bounds)
self.intermediate_data = []
self.output_folder = output_folder
self.wp_file = os.path.join(output_folder, "wp_2020.tif")
self.wp_file_reproj = os.path.join(output_folder, "wp_2020_re.tif")
self.lc_file = os.path.join(output_folder, "lcvr_globcover_2015.tif")
self.lc_file_reproj = os.path.join(output_folder, "lcvr_globcover_2015_re.tif")
self.toilets_file = os.path.join(output_folder, "toilets.shp")
self.water_file = os.path.join(output_folder, "water_points.shp")
self.shops_file = os.path.join(output_folder, "shops.shp")
self.roads_file = os.path.join(output_folder, "road_network.shp")
self.walking_speed = os.path.join(output_folder, "traversal_speed.tif")
self.walking_time = os.path.join(output_folder, "traversal_time.tif")
self.pop_by_floor = os.path.join(output_folder, "pop_floor.tif")
def combine_results(self, base_map, hotspot_files, thresh=0.29):
''' copy the vizualization qgis document to the output folder, summarize population in hotspots
'''
out_map = os.path.join(self.output_folder, os.path.basename(base_map))
if out_map != base_map:
shutil.copy(base_map, out_map)
# Open the hotspot datasets, apply threshold and summarize population
popD = rasterio.open(self.wp_file_reproj).read()
popD[popD < 0] = 0
pop_h = rasterio.open(self.pop_by_floor).read()
pop_h = (pop_h > thresh).astype(int)
pop_h_sum = (pop_h * popD).sum()
final = pop_h
total_pop = popD.sum()
hot_spot_summary = {}
hot_spot_summary['total_pop'] = total_pop
hot_spot_summary['pop_hotspot'] = pop_h_sum
for h_file in hotspot_files:
if os.path.exists(h_file):
inR = rasterio.open(h_file).read()
inR = (inR > thresh).astype(int)
pop_h = (inR * popD).sum()
#combine all hotspots data
try:
final = final + inR
except:
final = inR
else:
pop_h = -1
hot_spot_summary[os.path.basename(h_file).replace(".tif", "")] = pop_h
# get combo hotspot population
final = (final > 0).astype(int)
final_h = (final * popD).sum()
hot_spot_summary['combo_hotspot'] = final_h
return(hot_spot_summary)
def extract_other_rasters(self, pop_raster, land_cover_raster):
''' Extract population, landcover data that match the existing height data
INPUT
pop_raster [rasterio] - global population raster from which the city data are extracted
land_cover_raster [rasterio] - global landcover dataset
RETURNS
NA - writes all results to file
##TODO: Look at re-smapling to see how numbers change
'''
if not os.path.exists(self.wp_file):
wp_data, out_transform = mask(pop_raster, shapes=[self.bounds], crop=True)
wp_data[wp_data < 0] = 0
meta = self.height_data.meta.copy()
meta.update(width=wp_data.shape[2],
height=wp_data.shape[1],
transform=out_transform,
dtype = wp_data.dtype)
with rasterio.open(self.wp_file, 'w', **meta) as out:
out.write(wp_data)
if not os.path.exists(self.wp_file_reproj):
# standardize the wp_smoothed dataset to the highight dataset
standardizeInputRasters(rasterio.open(self.wp_file), self.height_data, self.wp_file_reproj, data_type='N')
if not os.path.exists(self.lc_file):
wp_data, out_transform = mask(land_cover_raster, shapes=[self.bounds], crop=True)
meta = self.height_data.meta.copy()
meta.update(width=wp_data.shape[2],
height=wp_data.shape[1],
transform=out_transform,
dtype = wp_data.dtype)
with rasterio.open(self.lc_file, 'w', **meta) as out:
out.write(wp_data)
if not os.path.exists(self.lc_file_reproj):
# standardize the wp_smoothed dataset to the highight dataset
standardizeInputRasters(rasterio.open(self.lc_file), self.height_data, self.lc_file_reproj, data_type='C')
def extract_osm_data(self):
''' Extract amenities and the road network from OSM
'''
if not os.path.exists(self.toilets_file):
amenities = ['toilets', 'washroom', 'restroom']
toilets_tags = '"amenity"~"{}"'.format('|'.join(amenities))
try:
self.toilets = get_nodes(self.height_data.bounds, toilets_tags)
self.toilets.to_file(self.toilets_file)
except:
pass
if not os.path.exists(self.water_file):
amenities = ['water_points', 'drinking_water', 'pumps', 'water_pumps', 'well']
water_tags = '"amenity"~"{}"'.format('|'.join(amenities))
try:
self.water_points = get_nodes(self.height_data.bounds, water_tags)
self.water_points.to_file(self.water_file)
except:
pass
if not os.path.exists(self.shops_file):
amenities = ['supermarket', 'convenience', 'general', 'department_stores', 'wholesale', 'grocery', 'general']
shp_tags = '"shop"~"{}"'.format('|'.join(amenities))
try:
self.shops = get_nodes(self.height_data.bounds, shp_tags)
self.shops.to_file(self.shops_file)
except:
pass
if not os.path.exists(self.roads_file):
b = self.height_data.bounds
sel_graph = ox.graph_from_bbox(b[3], b[1], b[2], b[0], retain_all=True)
self.sel_roads = gn.edge_gdf_from_graph(sel_graph)
self.sel_roads['speed'] = self.sel_roads['highway'].apply(lambda x: get_speed(x, speed_dict))
bad_fields = ['name','width','maxspeed','ref','tunnel','service','area','lanes','junction','oneway','bridge','access']
for f in bad_fields:
try:
self.sel_roads.drop([f], axis=1, inplace=True)
except:
pass
def get_type(x):
if type(x) == list:
return(x[0])
else:
return(x)
self.sel_roads['highway'] = self.sel_roads['highway'].apply(get_type)
self.sel_roads['osmid'] = self.sel_roads['osmid'].apply(get_type)
try:
self.sel_roads.to_file(self.roads_file)
except:
print("Error writing roads to disk")
return(self.sel_roads)
else:
self.sel_roads = gpd.read_file(self.roads_file)
def generate_walking_raster(self, resolution=90):
''' Generate a map of walking speed combining landcover and road network
'''
if not os.path.exists(self.walking_time):
# load the landcover data, classify into water (0.5), other (2), and urban (3)
lc_raster = rasterio.open(self.lc_file_reproj)
lc_data = lc_raster.read()
walking_speed = lc_data.copy()
walking_speed[lc_data < 190] = 2
walking_speed[lc_data == 190] = 3
walking_speed[lc_data > 190] = 1
#Open the road network and burn all in at uniform (5)
shapes = ((row['geometry'], 5) for idx, row in self.sel_roads.iterrows())
speed_image = features.rasterize(shapes, out_shape=self.height_data.shape, transform=self.height_data.transform, fill=0)
# stack rasterized roads and lc_speed and take maximum
stacked = np.dstack([walking_speed[0,:,:], speed_image])
max_speed = np.amax(stacked, axis=2)
# Convert road network from travel speed to traversal time (seconds to cross)
traversal_speed = resolution / (max_speed * 1000.0 / (60.0 * 60.0))
meta = lc_raster.meta.copy()
meta.update(dtype = traversal_speed.dtype)
with rasterio.open(self.walking_time, 'w', **meta) as out:
out.write_band(1, traversal_speed)
return(traversal_speed)
meta = lc_raster.meta.copy()
meta.update(dtype = max_speed.dtype)
with rasterio.open(self.walking_speed, 'w', **meta) as out:
out.write_band(1, max_speed)
def calculate_accessibility(self):
''' Using skimage.MCP to calculate walking access through integration of landcover dataset and
the OSM road network
'''
distance_raster = os.path.join(self.output_folder, '%s_distance_roads.tif' % amenity_name)
if not os.path.exists(distance_raster):
traversal_raster = rasterio.open(self.walking_time)
time_data = traversal_raster.read()[0,:,:]
# create skimage graph
inH = self.height_data
meta = inH.meta.copy()
mcp = skimage.graph.MCP_Geometric(time_data)
# iterate through amenity
for amenity_file in [self.toilets_file, self.water_file, self.shops_file]:
if os.path.exists(amenity_file):
amenity = gpd.read_file(amenity_file)
amenity_name = os.path.basename(amenity_file).replace(".shp", "")
if not os.path.exists(distance_raster):
costs, traceback = mcp.find_costs(list(set([inH.index(x.x, x.y) for x in amenity['geometry']])))
meta.update(dtype=costs.dtype)
with rasterio.open(distance_raster, 'w', **meta) as out:
out.write_band(1, costs)
def calculate_pop_hotspots(self, resolution=90, pop_layer = '', out_file=''):
''' Calculate population density hotspots based on TFA
'''
# Divide population by hieght to get a density per footage analysis
if pop_layer == '':
pop_layer = self.wp_file_reproj
if out_file == '':
out_file = self.pop_by_floor
if not os.path.exists(out_file):
pop_raster = rasterio.open(pop_layer)
pop_data = pop_raster.read()
height_data = (self.height_data.read() / 3) * (resolution * resolution) # total floor area
pop_by_floor = pop_data/height_data
pop_by_floor[pop_by_floor < 0] = 0
pop_by_floor[pop_by_floor > 10000] = 0
# sum filter
def sum(P):
return(P.sum())
# smooth WP dataset such that each cell is the sum of a 3x3 filter
pop_by_floor[0,:,:] = generic_filter(pop_by_floor[0,:,:], sum, (3,3))
meta = self.height_data.meta.copy()
meta.update(dtype = pop_by_floor.dtype)
with rasterio.open(out_file, 'w', **meta) as out:
out.write_band(1, pop_by_floor[0,:,:])
def calculate_accessibility_hotspots(self, time_raster, out_hotspots, window=25):
pop_raster = self.wp_file_reproj
pop_floor_raster = self.pop_by_floor
#Read in the time raster and create and inverse travel raster
timeR = rasterio.open(time_raster)
timeD = timeR.read()
invD = 1/((timeD/60)**2) # convert seconds to minutes and square
invD[timeD == 0.0] = 0 # set inverse values at service points to 0
# Mulitple the inverse travel raster by the TFA raster
popR = rasterio.open(pop_floor_raster)
popD = popR.read()
popD[np.isnan(popD)] = 0
pop_inv = invD * popD
out = pop_inv * 0
#Run a filter over the dataset to sum the TFA within the window
yy = generic_filter(pop_inv[0,:,:], sum, (window, window))
yy = yy.astype(popR.meta['dtype'])
#Multiply that windowed sum by the inverse travel value
yy = (yy * invD) * 1/8
# Set the risk value at service locations to the maximum of the dataset
yy[timeD == 0.0] = yy.max()
yy = yy.astype(popR.meta['dtype'])
with rasterio.open(out_hotspots, 'w', **popR.meta) as outR:
outR.write(yy)
def calculate_accessibility_hotspots_dist_decay(self, time_raster, pop_raster,
max_time = 1200, dist_decay=0.005, window = 25):
''' Calculate accessibility risk based on proximity to amenities
'''
cur_folder = self.output_folder
decay_raster = os.path.join(cur_folder, "wp_2020_decay_pop.tif")
decay_raster_window = os.path.join(cur_folder, "wp_2020_decay_pop_window.tif")
decay_vals = os.path.join(cur_folder, "decayRast.tif")
distance_raster = rasterio.open(time_raster)
distR = distance_raster.read()
pop_raster = rasterio.open(pop_raster)
popR = pop_raster.read()
#exclude population that is too far away (beyond max_time)
popR = popR * ((distR < max_time) * 1)
#Create inverse time raster
decayFunction = lambda x: np.exp(-1 * dist_decay * x)
decayDist = decayFunction(distR)
# multiply distance decay raster by Population
decayPop = popR * decayDist
decayPop = decayPop.astype(pop_raster.meta['dtype'])
#For the locations where the shops are located, set the pop to 0
decayPop[decayPop == popR] = 0
# apply summary function across decay pop raster summarizing
# pop within a roving window, only summing population that is further away
def sum(P):
return(P.sum())
decayPop_window = decayPop * 0
decayPop_window[0,:,:] = generic_filter(decayPop[0,:,:], sum, (window,window))
meta = pop_raster.meta.copy()
meta.update(dtype = decayDist.dtype)
decayPop_window = decayPop_window.astype(meta['dtype'])
with rasterio.open(decay_vals, 'w', **meta) as out:
out.write(decayDist)
with rasterio.open(decay_raster, 'w', **pop_raster.meta) as out:
out.write(decayPop)
with rasterio.open(decay_raster_window, 'w', **meta) as out:
out.write(decayPop_window)
return(decay_raster_window)
def calculate_accessibility_hotspots_advanced(self, time_raster, pop_raster,
dist_decay=0.005, window = 25, interim = False):
''' Calculate accessibility risk based on proximity to amenities
'''
cur_folder = self.output_folder
decay_raster = os.path.join(cur_folder, "wp_2020_decay_pop_adv.tif")
decay_raster_window = os.path.join(cur_folder, "wp_2020_decay_pop_window_adv.tif")
decay_vals = os.path.join(cur_folder, "decayRast_adv.tif")
distance_raster = rasterio.open(time_raster)
distR = distance_raster.read()
pop_raster = rasterio.open(pop_raster)
popR = pop_raster.read()
#Create inverse time raster
decayFunction = lambda x: np.exp(-1 * dist_decay * x)
decayDist = decayFunction(distR)
# multiply distance decay raster by Population
decayPop = popR * decayDist
decayPop = decayPop.astype(pop_raster.meta['dtype'])
#For the locations where the shops are located, set the pop to 0
#decayPop[decayPop == popR] = 0
# apply summary function across decay pop raster summarizing
# pop within a roving window, only summing population that is further away
xx = np.dstack([decayPop[0,:,:], distR[0,:,:]])
def sum_less_than_center(P):
P = P.reshape((window,window,2))
pop = P[:,:,0]
dist = P[:,:,1]
center = math.floor(pop.shape[0]/2)
min_dist = dist[center,center]
pop = (dist > min_dist).astype(int) * pop
return(pop.sum())
out = xx * 0
yy = generic_filter(xx, sum_less_than_center, (window,window,2), output=out)
decayPop_window = yy[:,:,0]
meta = pop_raster.meta.copy()
meta.update(dtype = decayDist.dtype)
if interim:
with rasterio.open(decay_vals, 'w', **meta) as out:
out.write(decayDist)
with rasterio.open(decay_raster, 'w', **pop_raster.meta) as out:
out.write(decayPop)
with rasterio.open(decay_raster_window, 'w', **meta) as out:
out.write_band(1, decayPop_window)
return(decay_raster_window)
def calculate_accessibility_hotspots_orig(self, time_raster, pop_raster, pop_floor_raster,
window = 25, interim = False):
''' Calculate accessibility risk based on proximity to amenities
'''
tfa = rasterio.open(pop_floor_raster)
timeR = rasterio.open(time_raster)
cur_folder = self.output_folder
decay_raster = os.path.join(cur_folder, "wp_2020_decay_pop_orig.tif")
decay_raster_window = os.path.join(cur_folder, "wp_2020_decay_pop_window_orig.tif")
decay_vals = os.path.join(cur_folder, "decayRast_orig.tif")
distance_raster = rasterio.open(time_raster)
distR = distance_raster.read()
pop_raster = rasterio.open(pop_raster)
popR = pop_raster.read()
decayDist = 1/distR
# multiply distance decay raster by Population
decayPop = popR * decayDist
decayPop = decayPop.astype(pop_raster.meta['dtype'])
#For the locations where the shops are located, set the pop to 0
#decayPop[decayPop == popR] = 0
# apply summary function across decay pop raster summarizing
# pop within a roving window, only summing population that is further away
xx = np.dstack([decayPop[0,:,:], distR[0,:,:]])
def sum_less_than_center(P):
P = P.reshape((window,window,2))
pop = P[:,:,0]
dist = P[:,:,1]
center = math.floor(pop.shape[0]/2)
min_dist = dist[center,center]
pop = (dist > min_dist).astype(int) * pop
return(pop.sum())
out = xx * 0
yy = generic_filter(xx, sum_less_than_center, (window,window,2), output=out)
decayPop_window = yy[:,:,0]
meta = pop_raster.meta.copy()
meta.update(dtype = decayDist.dtype)
if interim:
with rasterio.open(decay_vals, 'w', **meta) as out:
out.write(decayDist)
with rasterio.open(decay_raster, 'w', **pop_raster.meta) as out:
out.write(decayPop)
with rasterio.open(decay_raster_window, 'w', **meta) as out:
out.write_band(1, decayPop_window)
return(decay_raster_window)
| import sys, os, importlib, math, shutil
import rasterio
import skimage
import numpy as np
import pandas as pd
import geopandas as gpd
import osmnx as ox
import GOSTnets as gn
import skimage.graph as graph
from rasterio.mask import mask
from rasterio import features
from rasterio.warp import reproject, Resampling
from shapely.geometry import box, Point
from scipy.ndimage import generic_filter
from pandana.loaders import osm
sys.path.append("../../GOST/")
import GOSTRocks.rasterMisc as rMisc
import GOSTRocks.misc as misc
import GOSTRocks.osmMisc as osm_misc
# Driving Speed for attributing road networks with speed
speed_dict = {
'residential': 20, # kmph
'primary': 40,
'primary_link':35,
'motorway':50,
'motorway_link': 45,
'trunk': 40,
'trunk_link':35,
'secondary': 30,
'secondary_link':25,
'tertiary':30,
'tertiary_link': 25,
'unclassified':20,
'living_street':10,
'service':10
}
def get_speed(x, s_dict):
''' Get speed from the above speed dict, but some of the suppied x's are actually lists
INPUT
x [string] - infra type to look up in s_dict
s_dict [dictionary] - see speed_dict above
RETURNS
[number] speed
'''
try:
speed = s_dict[x]
except:
if type(x) == list:
try:
speed = s_dict[x[0]]
except:
speed = 5
else:
speed=5
return(speed)
def get_nodes(b, tags):
''' Extract nodes from OSM based on tag query using pandana loaders
INPUTS
b [list of numbers] - boundary list from shapely.bounds
tags [string] - filter to be send to pandana.osm.node_query
RETURNS
[geopandas dataframe]
'''
nodes = osm.node_query(b[1], b[0], b[3], b[2], tags=tags)
nodes_geom = [Point(x) for x in zip(nodes['lon'], nodes['lat'])]
nodes_df = gpd.GeoDataFrame(nodes[['amenity','lat','lon']], geometry=nodes_geom, crs={'init':'epgs:4326'})
return(nodes_df)
def standardizeInputRasters(inR1, inR2, inR1_outFile, data_type="N"):
''' Standardize inR1 to inR2: changes crs, extent, and resolution.
INPUTS:
inR1, inR2 [rasterio raster object]
inR1_outFile [string] - output file for creating inR1 standardized to inR2
[optional] data_type [character] - Defines the data type of the input raster (inR1).
It defines the resampling type and works for 'N' for numeric and 'C' for categorical
RETURNS
nothing
'''
if inR1.crs != inR2.crs:
raise ValueError("CRS Error")
#Clip R1 to R2
#Get JSON of bounding box
b2 = inR2.bounds
boxJSON = [{'type': 'Polygon', 'coordinates': [[[b2.left, b2.bottom],[b2.left, b2.top],[b2.right, b2.top],[b2.right, b2.bottom],[b2.left, b2.bottom]]]}]
out_img, out_transform = mask(inR1, boxJSON, crop=True)
out_meta = inR1.meta.copy()
#Re-scale resolution of R1 to R2
newArr = np.empty(shape=(1, inR2.shape[0], inR2.shape[1]))
if data_type == "N":
resampling_type = Resampling.cubic
elif data_type == "C":
resampling_type = Resampling.nearest
reproject(out_img, newArr, src_transform=out_transform, dst_transform=inR2.transform, src_crs=inR1.crs, dst_crs=inR2.crs, resampling=resampling_type)
out_meta.update({"driver": "GTiff",
"height": newArr.shape[1],
"width": newArr.shape[2],
"transform": inR2.transform,
"crs": inR2.crs})
with rasterio.open(inR1_outFile, "w", **out_meta) as dest:
dest.write(newArr.astype(out_meta['dtype']))
class city_hotspot(object):
'''
Calculate hotspots through combining population density, builidng height, and access to amenities
'''
def __init__(self, height_raster_file, output_folder):
''' Initiate the city_hotspot analysis
INPUT
height_raster_file [string] - path to the raster describing builidng height
output_folder [string] - path to folder to create output
EXAMPLE
city = hot.city_hotspot(height_raster, out_folder)
city.extract_other_rasters(global_pop_raster, global_globR)
rosads = city.extract_osm_data()
xx = city.generate_walking_raster()
city.calculate_accessibility()
city.calculate_pop_hotspots()
'''
self.height_data = rasterio.open(height_raster_file)
self.bounds = box(*self.height_data.bounds)
self.intermediate_data = []
self.output_folder = output_folder
self.wp_file = os.path.join(output_folder, "wp_2020.tif")
self.wp_file_reproj = os.path.join(output_folder, "wp_2020_re.tif")
self.lc_file = os.path.join(output_folder, "lcvr_globcover_2015.tif")
self.lc_file_reproj = os.path.join(output_folder, "lcvr_globcover_2015_re.tif")
self.toilets_file = os.path.join(output_folder, "toilets.shp")
self.water_file = os.path.join(output_folder, "water_points.shp")
self.shops_file = os.path.join(output_folder, "shops.shp")
self.roads_file = os.path.join(output_folder, "road_network.shp")
self.walking_speed = os.path.join(output_folder, "traversal_speed.tif")
self.walking_time = os.path.join(output_folder, "traversal_time.tif")
self.pop_by_floor = os.path.join(output_folder, "pop_floor.tif")
def combine_results(self, base_map, hotspot_files, thresh=0.29):
''' copy the vizualization qgis document to the output folder, summarize population in hotspots
'''
out_map = os.path.join(self.output_folder, os.path.basename(base_map))
if out_map != base_map:
shutil.copy(base_map, out_map)
# Open the hotspot datasets, apply threshold and summarize population
popD = rasterio.open(self.wp_file_reproj).read()
popD[popD < 0] = 0
pop_h = rasterio.open(self.pop_by_floor).read()
pop_h = (pop_h > thresh).astype(int)
pop_h_sum = (pop_h * popD).sum()
final = pop_h
total_pop = popD.sum()
hot_spot_summary = {}
hot_spot_summary['total_pop'] = total_pop
hot_spot_summary['pop_hotspot'] = pop_h_sum
for h_file in hotspot_files:
if os.path.exists(h_file):
inR = rasterio.open(h_file).read()
inR = (inR > thresh).astype(int)
pop_h = (inR * popD).sum()
#combine all hotspots data
try:
final = final + inR
except:
final = inR
else:
pop_h = -1
hot_spot_summary[os.path.basename(h_file).replace(".tif", "")] = pop_h
# get combo hotspot population
final = (final > 0).astype(int)
final_h = (final * popD).sum()
hot_spot_summary['combo_hotspot'] = final_h
return(hot_spot_summary)
def extract_other_rasters(self, pop_raster, land_cover_raster):
''' Extract population, landcover data that match the existing height data
INPUT
pop_raster [rasterio] - global population raster from which the city data are extracted
land_cover_raster [rasterio] - global landcover dataset
RETURNS
NA - writes all results to file
##TODO: Look at re-smapling to see how numbers change
'''
if not os.path.exists(self.wp_file):
wp_data, out_transform = mask(pop_raster, shapes=[self.bounds], crop=True)
wp_data[wp_data < 0] = 0
meta = self.height_data.meta.copy()
meta.update(width=wp_data.shape[2],
height=wp_data.shape[1],
transform=out_transform,
dtype = wp_data.dtype)
with rasterio.open(self.wp_file, 'w', **meta) as out:
out.write(wp_data)
if not os.path.exists(self.wp_file_reproj):
# standardize the wp_smoothed dataset to the highight dataset
standardizeInputRasters(rasterio.open(self.wp_file), self.height_data, self.wp_file_reproj, data_type='N')
if not os.path.exists(self.lc_file):
wp_data, out_transform = mask(land_cover_raster, shapes=[self.bounds], crop=True)
meta = self.height_data.meta.copy()
meta.update(width=wp_data.shape[2],
height=wp_data.shape[1],
transform=out_transform,
dtype = wp_data.dtype)
with rasterio.open(self.lc_file, 'w', **meta) as out:
out.write(wp_data)
if not os.path.exists(self.lc_file_reproj):
# standardize the wp_smoothed dataset to the highight dataset
standardizeInputRasters(rasterio.open(self.lc_file), self.height_data, self.lc_file_reproj, data_type='C')
def extract_osm_data(self):
''' Extract amenities and the road network from OSM
'''
if not os.path.exists(self.toilets_file):
amenities = ['toilets', 'washroom', 'restroom']
toilets_tags = '"amenity"~"{}"'.format('|'.join(amenities))
try:
self.toilets = get_nodes(self.height_data.bounds, toilets_tags)
self.toilets.to_file(self.toilets_file)
except:
pass
if not os.path.exists(self.water_file):
amenities = ['water_points', 'drinking_water', 'pumps', 'water_pumps', 'well']
water_tags = '"amenity"~"{}"'.format('|'.join(amenities))
try:
self.water_points = get_nodes(self.height_data.bounds, water_tags)
self.water_points.to_file(self.water_file)
except:
pass
if not os.path.exists(self.shops_file):
amenities = ['supermarket', 'convenience', 'general', 'department_stores', 'wholesale', 'grocery', 'general']
shp_tags = '"shop"~"{}"'.format('|'.join(amenities))
try:
self.shops = get_nodes(self.height_data.bounds, shp_tags)
self.shops.to_file(self.shops_file)
except:
pass
if not os.path.exists(self.roads_file):
b = self.height_data.bounds
sel_graph = ox.graph_from_bbox(b[3], b[1], b[2], b[0], retain_all=True)
self.sel_roads = gn.edge_gdf_from_graph(sel_graph)
self.sel_roads['speed'] = self.sel_roads['highway'].apply(lambda x: get_speed(x, speed_dict))
bad_fields = ['name','width','maxspeed','ref','tunnel','service','area','lanes','junction','oneway','bridge','access']
for f in bad_fields:
try:
self.sel_roads.drop([f], axis=1, inplace=True)
except:
pass
def get_type(x):
if type(x) == list:
return(x[0])
else:
return(x)
self.sel_roads['highway'] = self.sel_roads['highway'].apply(get_type)
self.sel_roads['osmid'] = self.sel_roads['osmid'].apply(get_type)
try:
self.sel_roads.to_file(self.roads_file)
except:
print("Error writing roads to disk")
return(self.sel_roads)
else:
self.sel_roads = gpd.read_file(self.roads_file)
def generate_walking_raster(self, resolution=90):
''' Generate a map of walking speed combining landcover and road network
'''
if not os.path.exists(self.walking_time):
# load the landcover data, classify into water (0.5), other (2), and urban (3)
lc_raster = rasterio.open(self.lc_file_reproj)
lc_data = lc_raster.read()
walking_speed = lc_data.copy()
walking_speed[lc_data < 190] = 2
walking_speed[lc_data == 190] = 3
walking_speed[lc_data > 190] = 1
#Open the road network and burn all in at uniform (5)
shapes = ((row['geometry'], 5) for idx, row in self.sel_roads.iterrows())
speed_image = features.rasterize(shapes, out_shape=self.height_data.shape, transform=self.height_data.transform, fill=0)
# stack rasterized roads and lc_speed and take maximum
stacked = np.dstack([walking_speed[0,:,:], speed_image])
max_speed = np.amax(stacked, axis=2)
# Convert road network from travel speed to traversal time (seconds to cross)
traversal_speed = resolution / (max_speed * 1000.0 / (60.0 * 60.0))
meta = lc_raster.meta.copy()
meta.update(dtype = traversal_speed.dtype)
with rasterio.open(self.walking_time, 'w', **meta) as out:
out.write_band(1, traversal_speed)
return(traversal_speed)
meta = lc_raster.meta.copy()
meta.update(dtype = max_speed.dtype)
with rasterio.open(self.walking_speed, 'w', **meta) as out:
out.write_band(1, max_speed)
def calculate_accessibility(self):
''' Using skimage.MCP to calculate walking access through integration of landcover dataset and
the OSM road network
'''
distance_raster = os.path.join(self.output_folder, '%s_distance_roads.tif' % amenity_name)
if not os.path.exists(distance_raster):
traversal_raster = rasterio.open(self.walking_time)
time_data = traversal_raster.read()[0,:,:]
# create skimage graph
inH = self.height_data
meta = inH.meta.copy()
mcp = skimage.graph.MCP_Geometric(time_data)
# iterate through amenity
for amenity_file in [self.toilets_file, self.water_file, self.shops_file]:
if os.path.exists(amenity_file):
amenity = gpd.read_file(amenity_file)
amenity_name = os.path.basename(amenity_file).replace(".shp", "")
if not os.path.exists(distance_raster):
costs, traceback = mcp.find_costs(list(set([inH.index(x.x, x.y) for x in amenity['geometry']])))
meta.update(dtype=costs.dtype)
with rasterio.open(distance_raster, 'w', **meta) as out:
out.write_band(1, costs)
def calculate_pop_hotspots(self, resolution=90, pop_layer = '', out_file=''):
''' Calculate population density hotspots based on TFA
'''
# Divide population by hieght to get a density per footage analysis
if pop_layer == '':
pop_layer = self.wp_file_reproj
if out_file == '':
out_file = self.pop_by_floor
if not os.path.exists(out_file):
pop_raster = rasterio.open(pop_layer)
pop_data = pop_raster.read()
height_data = (self.height_data.read() / 3) * (resolution * resolution) # total floor area
pop_by_floor = pop_data/height_data
pop_by_floor[pop_by_floor < 0] = 0
pop_by_floor[pop_by_floor > 10000] = 0
# sum filter
def sum(P):
return(P.sum())
# smooth WP dataset such that each cell is the sum of a 3x3 filter
pop_by_floor[0,:,:] = generic_filter(pop_by_floor[0,:,:], sum, (3,3))
meta = self.height_data.meta.copy()
meta.update(dtype = pop_by_floor.dtype)
with rasterio.open(out_file, 'w', **meta) as out:
out.write_band(1, pop_by_floor[0,:,:])
def calculate_accessibility_hotspots(self, time_raster, out_hotspots, window=25):
pop_raster = self.wp_file_reproj
pop_floor_raster = self.pop_by_floor
#Read in the time raster and create and inverse travel raster
timeR = rasterio.open(time_raster)
timeD = timeR.read()
invD = 1/((timeD/60)**2) # convert seconds to minutes and square
invD[timeD == 0.0] = 0 # set inverse values at service points to 0
# Mulitple the inverse travel raster by the TFA raster
popR = rasterio.open(pop_floor_raster)
popD = popR.read()
popD[np.isnan(popD)] = 0
pop_inv = invD * popD
out = pop_inv * 0
#Run a filter over the dataset to sum the TFA within the window
yy = generic_filter(pop_inv[0,:,:], sum, (window, window))
yy = yy.astype(popR.meta['dtype'])
#Multiply that windowed sum by the inverse travel value
yy = (yy * invD) * 1/8
# Set the risk value at service locations to the maximum of the dataset
yy[timeD == 0.0] = yy.max()
yy = yy.astype(popR.meta['dtype'])
with rasterio.open(out_hotspots, 'w', **popR.meta) as outR:
outR.write(yy)
def calculate_accessibility_hotspots_dist_decay(self, time_raster, pop_raster,
max_time = 1200, dist_decay=0.005, window = 25):
''' Calculate accessibility risk based on proximity to amenities
'''
cur_folder = self.output_folder
decay_raster = os.path.join(cur_folder, "wp_2020_decay_pop.tif")
decay_raster_window = os.path.join(cur_folder, "wp_2020_decay_pop_window.tif")
decay_vals = os.path.join(cur_folder, "decayRast.tif")
distance_raster = rasterio.open(time_raster)
distR = distance_raster.read()
pop_raster = rasterio.open(pop_raster)
popR = pop_raster.read()
#exclude population that is too far away (beyond max_time)
popR = popR * ((distR < max_time) * 1)
#Create inverse time raster
decayFunction = lambda x: np.exp(-1 * dist_decay * x)
decayDist = decayFunction(distR)
# multiply distance decay raster by Population
decayPop = popR * decayDist
decayPop = decayPop.astype(pop_raster.meta['dtype'])
#For the locations where the shops are located, set the pop to 0
decayPop[decayPop == popR] = 0
# apply summary function across decay pop raster summarizing
# pop within a roving window, only summing population that is further away
def sum(P):
return(P.sum())
decayPop_window = decayPop * 0
decayPop_window[0,:,:] = generic_filter(decayPop[0,:,:], sum, (window,window))
meta = pop_raster.meta.copy()
meta.update(dtype = decayDist.dtype)
decayPop_window = decayPop_window.astype(meta['dtype'])
with rasterio.open(decay_vals, 'w', **meta) as out:
out.write(decayDist)
with rasterio.open(decay_raster, 'w', **pop_raster.meta) as out:
out.write(decayPop)
with rasterio.open(decay_raster_window, 'w', **meta) as out:
out.write(decayPop_window)
return(decay_raster_window)
def calculate_accessibility_hotspots_advanced(self, time_raster, pop_raster,
dist_decay=0.005, window = 25, interim = False):
''' Calculate accessibility risk based on proximity to amenities
'''
cur_folder = self.output_folder
decay_raster = os.path.join(cur_folder, "wp_2020_decay_pop_adv.tif")
decay_raster_window = os.path.join(cur_folder, "wp_2020_decay_pop_window_adv.tif")
decay_vals = os.path.join(cur_folder, "decayRast_adv.tif")
distance_raster = rasterio.open(time_raster)
distR = distance_raster.read()
pop_raster = rasterio.open(pop_raster)
popR = pop_raster.read()
#Create inverse time raster
decayFunction = lambda x: np.exp(-1 * dist_decay * x)
decayDist = decayFunction(distR)
# multiply distance decay raster by Population
decayPop = popR * decayDist
decayPop = decayPop.astype(pop_raster.meta['dtype'])
#For the locations where the shops are located, set the pop to 0
#decayPop[decayPop == popR] = 0
# apply summary function across decay pop raster summarizing
# pop within a roving window, only summing population that is further away
xx = np.dstack([decayPop[0,:,:], distR[0,:,:]])
def sum_less_than_center(P):
P = P.reshape((window,window,2))
pop = P[:,:,0]
dist = P[:,:,1]
center = math.floor(pop.shape[0]/2)
min_dist = dist[center,center]
pop = (dist > min_dist).astype(int) * pop
return(pop.sum())
out = xx * 0
yy = generic_filter(xx, sum_less_than_center, (window,window,2), output=out)
decayPop_window = yy[:,:,0]
meta = pop_raster.meta.copy()
meta.update(dtype = decayDist.dtype)
if interim:
with rasterio.open(decay_vals, 'w', **meta) as out:
out.write(decayDist)
with rasterio.open(decay_raster, 'w', **pop_raster.meta) as out:
out.write(decayPop)
with rasterio.open(decay_raster_window, 'w', **meta) as out:
out.write_band(1, decayPop_window)
return(decay_raster_window)
def calculate_accessibility_hotspots_orig(self, time_raster, pop_raster, pop_floor_raster,
window = 25, interim = False):
''' Calculate accessibility risk based on proximity to amenities
'''
tfa = rasterio.open(pop_floor_raster)
timeR = rasterio.open(time_raster)
cur_folder = self.output_folder
decay_raster = os.path.join(cur_folder, "wp_2020_decay_pop_orig.tif")
decay_raster_window = os.path.join(cur_folder, "wp_2020_decay_pop_window_orig.tif")
decay_vals = os.path.join(cur_folder, "decayRast_orig.tif")
distance_raster = rasterio.open(time_raster)
distR = distance_raster.read()
pop_raster = rasterio.open(pop_raster)
popR = pop_raster.read()
decayDist = 1/distR
# multiply distance decay raster by Population
decayPop = popR * decayDist
decayPop = decayPop.astype(pop_raster.meta['dtype'])
#For the locations where the shops are located, set the pop to 0
#decayPop[decayPop == popR] = 0
# apply summary function across decay pop raster summarizing
# pop within a roving window, only summing population that is further away
xx = np.dstack([decayPop[0,:,:], distR[0,:,:]])
def sum_less_than_center(P):
P = P.reshape((window,window,2))
pop = P[:,:,0]
dist = P[:,:,1]
center = math.floor(pop.shape[0]/2)
min_dist = dist[center,center]
pop = (dist > min_dist).astype(int) * pop
return(pop.sum())
out = xx * 0
yy = generic_filter(xx, sum_less_than_center, (window,window,2), output=out)
decayPop_window = yy[:,:,0]
meta = pop_raster.meta.copy()
meta.update(dtype = decayDist.dtype)
if interim:
with rasterio.open(decay_vals, 'w', **meta) as out:
out.write(decayDist)
with rasterio.open(decay_raster, 'w', **pop_raster.meta) as out:
out.write(decayPop)
with rasterio.open(decay_raster_window, 'w', **meta) as out:
out.write_band(1, decayPop_window)
return(decay_raster_window)
| en | 0.805614 | # Driving Speed for attributing road networks with speed # kmph Get speed from the above speed dict, but some of the suppied x's are actually lists INPUT x [string] - infra type to look up in s_dict s_dict [dictionary] - see speed_dict above RETURNS [number] speed Extract nodes from OSM based on tag query using pandana loaders INPUTS b [list of numbers] - boundary list from shapely.bounds tags [string] - filter to be send to pandana.osm.node_query RETURNS [geopandas dataframe] Standardize inR1 to inR2: changes crs, extent, and resolution. INPUTS: inR1, inR2 [rasterio raster object] inR1_outFile [string] - output file for creating inR1 standardized to inR2 [optional] data_type [character] - Defines the data type of the input raster (inR1). It defines the resampling type and works for 'N' for numeric and 'C' for categorical RETURNS nothing #Clip R1 to R2 #Get JSON of bounding box #Re-scale resolution of R1 to R2 Calculate hotspots through combining population density, builidng height, and access to amenities Initiate the city_hotspot analysis INPUT height_raster_file [string] - path to the raster describing builidng height output_folder [string] - path to folder to create output EXAMPLE city = hot.city_hotspot(height_raster, out_folder) city.extract_other_rasters(global_pop_raster, global_globR) rosads = city.extract_osm_data() xx = city.generate_walking_raster() city.calculate_accessibility() city.calculate_pop_hotspots() copy the vizualization qgis document to the output folder, summarize population in hotspots # Open the hotspot datasets, apply threshold and summarize population #combine all hotspots data # get combo hotspot population Extract population, landcover data that match the existing height data INPUT pop_raster [rasterio] - global population raster from which the city data are extracted land_cover_raster [rasterio] - global landcover dataset RETURNS NA - writes all results to file ##TODO: Look at re-smapling to see how numbers change # standardize the wp_smoothed dataset to the highight dataset # standardize the wp_smoothed dataset to the highight dataset Extract amenities and the road network from OSM Generate a map of walking speed combining landcover and road network # load the landcover data, classify into water (0.5), other (2), and urban (3) #Open the road network and burn all in at uniform (5) # stack rasterized roads and lc_speed and take maximum # Convert road network from travel speed to traversal time (seconds to cross) Using skimage.MCP to calculate walking access through integration of landcover dataset and the OSM road network # create skimage graph # iterate through amenity Calculate population density hotspots based on TFA # Divide population by hieght to get a density per footage analysis # total floor area # sum filter # smooth WP dataset such that each cell is the sum of a 3x3 filter #Read in the time raster and create and inverse travel raster # convert seconds to minutes and square # set inverse values at service points to 0 # Mulitple the inverse travel raster by the TFA raster #Run a filter over the dataset to sum the TFA within the window #Multiply that windowed sum by the inverse travel value # Set the risk value at service locations to the maximum of the dataset Calculate accessibility risk based on proximity to amenities #exclude population that is too far away (beyond max_time) #Create inverse time raster # multiply distance decay raster by Population #For the locations where the shops are located, set the pop to 0 # apply summary function across decay pop raster summarizing # pop within a roving window, only summing population that is further away Calculate accessibility risk based on proximity to amenities #Create inverse time raster # multiply distance decay raster by Population #For the locations where the shops are located, set the pop to 0 #decayPop[decayPop == popR] = 0 # apply summary function across decay pop raster summarizing # pop within a roving window, only summing population that is further away Calculate accessibility risk based on proximity to amenities # multiply distance decay raster by Population #For the locations where the shops are located, set the pop to 0 #decayPop[decayPop == popR] = 0 # apply summary function across decay pop raster summarizing # pop within a roving window, only summing population that is further away | 2.36387 | 2 |
Exercise-3/03.py | abhay-lal/18CSC207J-APP | 0 | 6623519 | class Dept:
def _init_(self, student_name='SCO'):
self.student_name = student_name
st1 = Dept()
st2 = Dept('John')
print(st1.student_name)
print(st2.student_name)
| class Dept:
def _init_(self, student_name='SCO'):
self.student_name = student_name
st1 = Dept()
st2 = Dept('John')
print(st1.student_name)
print(st2.student_name)
| none | 1 | 3.537302 | 4 | |
scripts/delete_all.py | noppanit/airfare-recommendation | 1 | 6623520 | <reponame>noppanit/airfare-recommendation
import sys
import os.path
parent = os.path.abspath(os.path.join(os.path.dirname(__file__),'..'))
sys.path.append(parent)
from airfare.atc import delete_all
delete_all()
| import sys
import os.path
parent = os.path.abspath(os.path.join(os.path.dirname(__file__),'..'))
sys.path.append(parent)
from airfare.atc import delete_all
delete_all() | none | 1 | 2.008073 | 2 | |
dataProcessing/octree.py | zrtty1998/trafic-sptio-temporal-visualization | 0 | 6623521 | import math
import os
import random
import shutil
import pandas as pd
import json
def visualize(tree, size=10):
from matplotlib import pyplot
from mpl_toolkits.mplot3d import Axes3D, art3d
def draw_all_nodes(node, ax):
for pnt in node.points:
ax.scatter3D(pnt.x, pnt.y, pnt.z, s=5)
if node.next_000:
# draw_lines(node)
draw_all_nodes(node.next_000, ax)
if node.next_001:
draw_all_nodes(node.next_001, ax)
if node.next_010:
draw_all_nodes(node.next_010, ax)
if node.next_011:
draw_all_nodes(node.next_011, ax)
if node.next_100:
draw_all_nodes(node.next_100, ax)
if node.next_101:
draw_all_nodes(node.next_101, ax)
if node.next_110:
draw_all_nodes(node.next_110, ax)
if node.next_111:
draw_all_nodes(node.next_111, ax)
'''def draw_lines(node):
bb = node.bounding_box
# The scales for axhline & axvline are 0-1, so we have to convert
# our values.
x_offset = -tree._root.bounding_box.min_x
min_x = (bb.min_x + x_offset) / 100
max_x = (bb.max_x + x_offset) / 100
y_offset = -tree._root.bounding_box.min_y
min_y = (bb.min_y + y_offset) / 100
max_y = (bb.max_y + y_offset) / 100
z_offset = -tree._root.bounding_box.min_z
min_z = (bb.min_z + z_offset) / 100
max_z = (bb.max_z + z_offset) / 100
art3d.axhline(
node.center.y, min_x, max_x, color="grey", linewidth=0.5
)
pyplot.axvline(
node.center.x, min_y, max_y, color="grey", linewidth=0.5
)
pyplot.axvline(
node.center.z, min_z, max_z, color="grey", linewidth=0.5
)'''
# Draw the axis first
fig = pyplot.figure()
ax1 = Axes3D(fig)
half_length = tree.length / 2
half_width = tree.width / 2
half_height = tree.height / 2
min_x, max_x = tree.center.x - half_length, tree.center.x + half_length
min_y, max_y = tree.center.y - half_width, tree.center.y + half_width
min_z, max_z = tree.center.z - half_height, tree.center.z + half_height
draw_all_nodes(tree._root, ax1)
pyplot.show()
def to_file(node, path):
def init_dir(node, path):
if node.next_000:
os.mkdir(os.path.join(path, "000"))
os.mkdir(os.path.join(path, "001"))
os.mkdir(os.path.join(path, "010"))
os.mkdir(os.path.join(path, "011"))
os.mkdir(os.path.join(path, "100"))
os.mkdir(os.path.join(path, "101"))
os.mkdir(os.path.join(path, "110"))
os.mkdir(os.path.join(path, "111"))
init_dir(node.next_000, os.path.join(path, "000"))
init_dir(node.next_001, os.path.join(path, "001"))
init_dir(node.next_010, os.path.join(path, "010"))
init_dir(node.next_011, os.path.join(path, "011"))
init_dir(node.next_100, os.path.join(path, "100"))
init_dir(node.next_101, os.path.join(path, "101"))
init_dir(node.next_110, os.path.join(path, "110"))
init_dir(node.next_111, os.path.join(path, "111"))
else:
if len(node.points):
json_text = []
for pnt in node.points:
point = {}
point['position'] = pnt.data
json_text.append(point)
json_file = json.dumps(json_text)
fp = open(os.path.join(path, 'data.json'), 'a')
fp.write(json_file)
fp.close()
# 新建以根结点为起始的文件夹
if os.path.isdir(path):
shutil.rmtree(path)
os.mkdir(path)
init_dir(node, path)
def resample(node, sample_ratio, root_path):
# 遍历所有子节点,若所有子节点均为叶节点,则对其子节点进行采样并存在该节点中
# 若存在非叶节点,则对其非叶节点进行继续遍历
def sample(node, ratio, path):
# 在目录中新建文件,若无文件,则新建。若有文件,则打开后继续追加
if not os.path.exists(path):
fp = open(path, 'x')
fp.write("[]")
fp.close()
fp = open(path, 'r')
json_content = json.loads(fp.read())
fp.close()
for pnt in node.points:
point = {}
point['position'] = pnt.data
json_content.append(point)
json_content = random.sample(json_content, round(len(json_content) * sample_ratio))
json_file = json.dumps(json_content)
fp = open(path, 'w')
fp.write(json_file)
fp.close()
if node.next_000 is None:
# 该节点是叶节点
if node.parent is None:
# 该节点是根节点,且为叶节点,对节点采样存入根目录
if len(node.points):
file_path = os.getcwd() + node.path + '/resampled.json'
sample(node, sample_ratio, file_path)
else:
# 该节点不是根节点,且为叶节点,对节点采样存入父节点
if len(node.points):
file_path = root_path + node.parent.path + '/resampled.json'
sample(node, sample_ratio, file_path)
else:
# 该节点不是叶节点,指针指向其子节点
resample(node.next_000, sample_ratio, root_path)
resample(node.next_001, sample_ratio, root_path)
resample(node.next_010, sample_ratio, root_path)
resample(node.next_011, sample_ratio, root_path)
resample(node.next_100, sample_ratio, root_path)
resample(node.next_101, sample_ratio, root_path)
resample(node.next_110, sample_ratio, root_path)
resample(node.next_111, sample_ratio, root_path)
# 最后子节点采样完成,对本节点采样
if node.parent is not None:
file_path = root_path + node.parent.path + '/resampled.json'
if not os.path.exists(file_path):
fp = open(file_path, 'x')
fp.write("[]")
fp.close()
file_path = root_path + node.path + '/resampled.json'
fp = open(file_path, 'r')
json_content = json.loads(fp.read()) # 采样
json_content = random.sample(json_content, round(len(json_content) * sample_ratio))
fp.close()
file_path = root_path + node.parent.path + '/resampled.json'
fp = open(file_path, 'r')
json_content_parent = json.loads(fp.read())
fp.close()
json_content_parent += json_content
json_file = json.dumps(json_content_parent)
fp = open(file_path, 'w')
fp.write(json_file)
fp.close()
class Point(object):
def __init__(self, x, y, z, data=None):
self.x = x
self.y = y
self.z = z
self.data = data
class BoundingBox(object):
def __init__(self, min_x, min_y, min_z, max_x, max_y, max_z):
self.min_x = min_x
self.min_y = min_y
self.min_z = min_z
self.max_x = max_x
self.max_y = max_y
self.max_z = max_z
self.length = self.max_x - self.min_x
self.width = self.max_y - self.min_y
self.height = self.max_z - self.min_z
self.half_length = self.length / 2
self.half_width = self.width / 2
self.half_height = self.height / 2
self.center = Point(
self.half_length, self.half_width, self.half_height)
class OctNode(object):
point_class = Point
POINT_CAPACITY = 8
bb_class = BoundingBox
def __init__(self, center, c_bbx, length, width, height, capacity=None):
self.center = center
self.c_bbx = c_bbx
self.c_center = self.bbx2center(c_bbx)
self.length = length
self.width = width
self.height = height
self.points = []
self.next_000 = None
self.next_001 = None
self.next_010 = None
self.next_011 = None
self.next_100 = None
self.next_101 = None
self.next_110 = None
self.next_111 = None
self.parent = None
self.path = '/'
if capacity is None:
capacity = self.POINT_CAPACITY
self.capacity = capacity
self.bounding_box = self._calc_bounding_box()
def bbx2center(self, c_bbx):
c_lat = c_bbx[0][0] + (c_bbx[1][0] - c_bbx[0][0]) / 2
c_lng = c_bbx[0][1] + (c_bbx[1][1] - c_bbx[0][1]) / 2
c_alt = c_bbx[0][2] + (c_bbx[1][2] - c_bbx[0][2]) / 2
c_center = (c_lat, c_lng, c_alt)
return c_center
def _calc_bounding_box(self):
half_length = self.length / 2
half_width = self.width / 2
half_height = self.height / 2
min_x = self.center.x - half_length
min_y = self.center.y - half_width
min_z = self.center.z - half_height
max_x = self.center.x + half_length
max_y = self.center.y + half_width
max_z = self.center.z + half_height
return self.bb_class(
min_x=min_x, min_y=min_y, min_z=min_z, max_x=max_x, max_y=max_y, max_z=max_z
)
def contains_point(self, point):
"""
Checks if a point would be within the bounding box of the node.
This is a bounding check, not verification the point is present in
the data.
Args:
point (Point): The point to check.
Returns:
bool: `True` if it is within the bounds, otherwise `False`.
"""
bb = self.bounding_box
if bb.min_x <= point.x <= bb.max_x:
if bb.min_y <= point.y <= bb.max_y:
if bb.min_z <= point.z <= bb.max_z:
return True
return False
def is_000(self, point):
return point.x < self.center.x and point.y < self.center.y and point.z < self.center.z
def is_001(self, point):
return point.x >= self.center.x and point.y < self.center.y and point.z < self.center.z
def is_010(self, point):
return point.x < self.center.x and point.y >= self.center.y and point.z < self.center.z
def is_011(self, point):
return point.x >= self.center.x and point.y >= self.center.y and point.z < self.center.z
def is_100(self, point):
return point.x < self.center.x and point.y < self.center.y and point.z >= self.center.z
def is_101(self, point):
return point.x >= self.center.x and point.y < self.center.y and point.z >= self.center.z
def is_110(self, point):
return point.x < self.center.x and point.y >= self.center.y and point.z >= self.center.z
def is_111(self, point):
return point.x >= self.center.x and point.y >= self.center.y and point.z >= self.center.z
def subdivide(self):
"""
Subdivides an existing node into the node + children.
Returns:
None: Nothing to see here. Please go about your business.
"""
half_length = self.length / 2
half_width = self.width / 2
half_height = self.height / 2
oct_length = half_length / 2
oct_width = half_width / 2
oct_height = half_height / 2
half_lat = (self.c_bbx[1][0] - self.c_bbx[0][0]) / 2
half_lng = (self.c_bbx[1][1] - self.c_bbx[0][1]) / 2
center_000 = self.point_class(
self.center.x - oct_length, self.center.y -
oct_width, self.center.z - oct_height
)
c_bbx_000 = ((self.c_bbx[0][0], self.c_bbx[0][1], self.c_bbx[0][2]), self.c_center)
self.next_000 = self.__class__(
center_000, c_bbx_000, half_length, half_width, half_height, capacity=self.capacity
)
self.next_000.parent = self
self.next_000.path = self.next_000.parent.path + '000/'
center_001 = self.point_class(
self.center.x + oct_length, self.center.y -
oct_width, self.center.z - oct_height
)
c_bbx_001 = ((self.c_bbx[0][0], self.c_bbx[0][1] + half_lng, self.c_bbx[0][2]),
(self.c_bbx[0][0] + half_lat, self.c_bbx[1][1], self.c_center[2]))
self.next_001 = self.__class__(
center_001, c_bbx_001, half_length, half_width, half_height, capacity=self.capacity
)
self.next_001.parent = self
self.next_001.path = self.next_001.parent.path + '001/'
center_010 = self.point_class(
self.center.x - oct_length, self.center.y +
oct_width, self.center.z - oct_height
)
c_bbx_010 = ((self.c_center[0], self.c_bbx[0][1], self.c_bbx[0][2]),
(self.c_bbx[1][0], self.c_center[1], self.c_center[2]))
self.next_010 = self.__class__(
center_010, c_bbx_010, half_length, half_width, half_height, capacity=self.capacity
)
self.next_010.parent = self
self.next_010.path = self.next_010.parent.path + '010/'
center_011 = self.point_class(
self.center.x + oct_length, self.center.y +
oct_width, self.center.z - oct_height
)
c_bbx_011 = ((self.c_center[0], self.c_center[1], self.c_bbx[0][2]),
(self.c_bbx[1][0], self.c_bbx[1][1], self.c_center[2]))
self.next_011 = self.__class__(
center_011, c_bbx_011, half_length, half_width, half_height, capacity=self.capacity
)
self.next_011.parent = self
self.next_011.path = self.next_011.parent.path + '011/'
center_100 = self.point_class(
self.center.x - oct_length, self.center.y -
oct_width, self.center.z + oct_height
)
c_bbx_100 = ((self.c_bbx[0][0], self.c_bbx[0][1], self.c_center[2]), (self.c_center[0], self.c_center[1], self.c_bbx[1][2]))
self.next_100 = self.__class__(
center_100, c_bbx_100, half_length, half_width, half_height, capacity=self.capacity
)
self.next_100.parent = self
self.next_100.path = self.next_100.parent.path + '100/'
center_101 = self.point_class(
self.center.x + oct_length, self.center.y -
oct_width, self.center.z + oct_height
)
c_bbx_101 = ((self.c_bbx[0][0], self.c_bbx[0][1] + half_lng, self.c_center[2]),
(self.c_bbx[0][0] + half_lat, self.c_bbx[1][1], self.c_bbx[1][2]))
self.next_101 = self.__class__(
center_101, c_bbx_101, half_length, half_width, half_height, capacity=self.capacity
)
self.next_101.parent = self
self.next_101.path = self.next_101.parent.path + '101/'
center_110 = self.point_class(
self.center.x - oct_length, self.center.y +
oct_width, self.center.z + oct_height
)
c_bbx_110 = ((self.c_center[0], self.c_bbx[0][1], self.c_center[2]),
(self.c_bbx[1][0], self.c_center[1], self.c_bbx[1][2]))
self.next_110 = self.__class__(
center_110, c_bbx_110, half_length, half_width, half_height, capacity=self.capacity
)
self.next_110.parent = self
self.next_110.path = self.next_110.parent.path + '110/'
center_111 = self.point_class(
self.center.x + oct_length, self.center.y +
oct_width, self.center.z + oct_height
)
c_bbx_111 = ((self.c_center[0], self.c_center[1], self.c_center[2]),
(self.c_bbx[1][0], self.c_bbx[1][1], self.c_bbx[1][2]))
self.next_111 = self.__class__(
center_111, c_bbx_111, half_length, half_width, half_height, capacity=self.capacity
)
self.next_111.parent = self
self.next_111.path = self.next_111.parent.path + '111/'
# Redistribute the points.
# Manually call `append` here, as calling `.insert()` creates an
# infinite recursion situation.
for pnt in self.points:
if self.is_000(pnt):
self.next_000.points.append(pnt)
elif self.is_001(pnt):
self.next_001.points.append(pnt)
elif self.is_010(pnt):
self.next_010.points.append(pnt)
elif self.is_011(pnt):
self.next_011.points.append(pnt)
elif self.is_100(pnt):
self.next_100.points.append(pnt)
elif self.is_101(pnt):
self.next_101.points.append(pnt)
elif self.is_110(pnt):
self.next_110.points.append(pnt)
else:
self.next_111.points.append(pnt)
self.points = []
def insert(self, point):
if not self.contains_point(point):
raise ValueError(
"Point {} is not within this node ({} - {}).".format(
point, self.center, self.bounding_box
)
)
# Check to ensure we're not going to go over capacity.
if (len(self.points) + 1) > self.capacity:
# We're over capacity. Subdivide, then insert into the new child.
self.subdivide()
if self.next_000 is not None:
if self.is_000(point):
return self.next_000.insert(point)
elif self.is_001(point):
return self.next_001.insert(point)
elif self.is_010(point):
return self.next_010.insert(point)
elif self.is_011(point):
return self.next_011.insert(point)
elif self.is_100(point):
return self.next_100.insert(point)
elif self.is_101(point):
return self.next_101.insert(point)
elif self.is_110(point):
return self.next_110.insert(point)
elif self.is_111(point):
return self.next_111.insert(point)
# There are no child nodes & we're under capacity. Add it to `points`.
self.points.append(point)
return True
class Octree(object):
node_class = OctNode
point_class = Point
def __init__(self, center, c_bbx, length, width, height, capacity=None):
"""
Constructs a `QuadTree` object.
Args:
center (tuple|Point): The center point of the quadtree.
width (int|float): The width of the point space.
height (int|float): The height of the point space.
capacity (int): Optional. The number of points per quad before
subdivision occurs. Default is `None`.
"""
self.length = length
self.width = width
self.height = height
self.center = self.convert_to_point(center)
self.c_bbx = c_bbx
self.c_center = self.bbx2center(c_bbx)
self._root = self.node_class(
self.center, self.c_bbx, self.length, self.width, self.height, capacity=capacity
)
def convert_to_point(self, val):
"""
Converts a value to a `Point` object.
This is to allow shortcuts, like providing a tuple for a point.
Args:
val (Point|tuple|None): The value to convert.
Returns:
Point: A point object.
"""
if isinstance(val, self.point_class):
return val
elif isinstance(val, (tuple, list)):
return self.point_class(val[0], val[1], val[2])
elif val is None:
return self.point_class(0, 0, 0)
else:
raise ValueError(
"Unknown data provided for point. Please use one of: "
"quads.Point | tuple | list | None"
)
def insert(self, point, data=None):
"""
Inserts a `Point` into the quadtree.
Args:
point (Point|tuple|None): The point to insert.
data (any): Optional. Corresponding data for that point. Default
is `None`.
Returns:
bool: `True` if insertion succeeded, otherwise `False`.
"""
pnt = self.convert_to_point(point)
# df = pd.DataFrame(
# columns=('LONGITUDE', 'LATITUDE', 'TIME', 'geohash'))
# df = df.append(data, ignore_index=True)
pnt.data = data
return self._root.insert(pnt)
def bbx2center(self, c_bbx):
c_lat = c_bbx[0][0] + (c_bbx[1][0] - c_bbx[0][0]) / 2
c_lng = c_bbx[0][1] + (c_bbx[1][1] - c_bbx[0][1]) / 2
c_alt = c_bbx[0][2] + (c_bbx[1][2] - c_bbx[0][2]) / 2
c_center = (c_lat, c_lng, c_alt)
return c_center
| import math
import os
import random
import shutil
import pandas as pd
import json
def visualize(tree, size=10):
from matplotlib import pyplot
from mpl_toolkits.mplot3d import Axes3D, art3d
def draw_all_nodes(node, ax):
for pnt in node.points:
ax.scatter3D(pnt.x, pnt.y, pnt.z, s=5)
if node.next_000:
# draw_lines(node)
draw_all_nodes(node.next_000, ax)
if node.next_001:
draw_all_nodes(node.next_001, ax)
if node.next_010:
draw_all_nodes(node.next_010, ax)
if node.next_011:
draw_all_nodes(node.next_011, ax)
if node.next_100:
draw_all_nodes(node.next_100, ax)
if node.next_101:
draw_all_nodes(node.next_101, ax)
if node.next_110:
draw_all_nodes(node.next_110, ax)
if node.next_111:
draw_all_nodes(node.next_111, ax)
'''def draw_lines(node):
bb = node.bounding_box
# The scales for axhline & axvline are 0-1, so we have to convert
# our values.
x_offset = -tree._root.bounding_box.min_x
min_x = (bb.min_x + x_offset) / 100
max_x = (bb.max_x + x_offset) / 100
y_offset = -tree._root.bounding_box.min_y
min_y = (bb.min_y + y_offset) / 100
max_y = (bb.max_y + y_offset) / 100
z_offset = -tree._root.bounding_box.min_z
min_z = (bb.min_z + z_offset) / 100
max_z = (bb.max_z + z_offset) / 100
art3d.axhline(
node.center.y, min_x, max_x, color="grey", linewidth=0.5
)
pyplot.axvline(
node.center.x, min_y, max_y, color="grey", linewidth=0.5
)
pyplot.axvline(
node.center.z, min_z, max_z, color="grey", linewidth=0.5
)'''
# Draw the axis first
fig = pyplot.figure()
ax1 = Axes3D(fig)
half_length = tree.length / 2
half_width = tree.width / 2
half_height = tree.height / 2
min_x, max_x = tree.center.x - half_length, tree.center.x + half_length
min_y, max_y = tree.center.y - half_width, tree.center.y + half_width
min_z, max_z = tree.center.z - half_height, tree.center.z + half_height
draw_all_nodes(tree._root, ax1)
pyplot.show()
def to_file(node, path):
def init_dir(node, path):
if node.next_000:
os.mkdir(os.path.join(path, "000"))
os.mkdir(os.path.join(path, "001"))
os.mkdir(os.path.join(path, "010"))
os.mkdir(os.path.join(path, "011"))
os.mkdir(os.path.join(path, "100"))
os.mkdir(os.path.join(path, "101"))
os.mkdir(os.path.join(path, "110"))
os.mkdir(os.path.join(path, "111"))
init_dir(node.next_000, os.path.join(path, "000"))
init_dir(node.next_001, os.path.join(path, "001"))
init_dir(node.next_010, os.path.join(path, "010"))
init_dir(node.next_011, os.path.join(path, "011"))
init_dir(node.next_100, os.path.join(path, "100"))
init_dir(node.next_101, os.path.join(path, "101"))
init_dir(node.next_110, os.path.join(path, "110"))
init_dir(node.next_111, os.path.join(path, "111"))
else:
if len(node.points):
json_text = []
for pnt in node.points:
point = {}
point['position'] = pnt.data
json_text.append(point)
json_file = json.dumps(json_text)
fp = open(os.path.join(path, 'data.json'), 'a')
fp.write(json_file)
fp.close()
# 新建以根结点为起始的文件夹
if os.path.isdir(path):
shutil.rmtree(path)
os.mkdir(path)
init_dir(node, path)
def resample(node, sample_ratio, root_path):
# 遍历所有子节点,若所有子节点均为叶节点,则对其子节点进行采样并存在该节点中
# 若存在非叶节点,则对其非叶节点进行继续遍历
def sample(node, ratio, path):
# 在目录中新建文件,若无文件,则新建。若有文件,则打开后继续追加
if not os.path.exists(path):
fp = open(path, 'x')
fp.write("[]")
fp.close()
fp = open(path, 'r')
json_content = json.loads(fp.read())
fp.close()
for pnt in node.points:
point = {}
point['position'] = pnt.data
json_content.append(point)
json_content = random.sample(json_content, round(len(json_content) * sample_ratio))
json_file = json.dumps(json_content)
fp = open(path, 'w')
fp.write(json_file)
fp.close()
if node.next_000 is None:
# 该节点是叶节点
if node.parent is None:
# 该节点是根节点,且为叶节点,对节点采样存入根目录
if len(node.points):
file_path = os.getcwd() + node.path + '/resampled.json'
sample(node, sample_ratio, file_path)
else:
# 该节点不是根节点,且为叶节点,对节点采样存入父节点
if len(node.points):
file_path = root_path + node.parent.path + '/resampled.json'
sample(node, sample_ratio, file_path)
else:
# 该节点不是叶节点,指针指向其子节点
resample(node.next_000, sample_ratio, root_path)
resample(node.next_001, sample_ratio, root_path)
resample(node.next_010, sample_ratio, root_path)
resample(node.next_011, sample_ratio, root_path)
resample(node.next_100, sample_ratio, root_path)
resample(node.next_101, sample_ratio, root_path)
resample(node.next_110, sample_ratio, root_path)
resample(node.next_111, sample_ratio, root_path)
# 最后子节点采样完成,对本节点采样
if node.parent is not None:
file_path = root_path + node.parent.path + '/resampled.json'
if not os.path.exists(file_path):
fp = open(file_path, 'x')
fp.write("[]")
fp.close()
file_path = root_path + node.path + '/resampled.json'
fp = open(file_path, 'r')
json_content = json.loads(fp.read()) # 采样
json_content = random.sample(json_content, round(len(json_content) * sample_ratio))
fp.close()
file_path = root_path + node.parent.path + '/resampled.json'
fp = open(file_path, 'r')
json_content_parent = json.loads(fp.read())
fp.close()
json_content_parent += json_content
json_file = json.dumps(json_content_parent)
fp = open(file_path, 'w')
fp.write(json_file)
fp.close()
class Point(object):
def __init__(self, x, y, z, data=None):
self.x = x
self.y = y
self.z = z
self.data = data
class BoundingBox(object):
def __init__(self, min_x, min_y, min_z, max_x, max_y, max_z):
self.min_x = min_x
self.min_y = min_y
self.min_z = min_z
self.max_x = max_x
self.max_y = max_y
self.max_z = max_z
self.length = self.max_x - self.min_x
self.width = self.max_y - self.min_y
self.height = self.max_z - self.min_z
self.half_length = self.length / 2
self.half_width = self.width / 2
self.half_height = self.height / 2
self.center = Point(
self.half_length, self.half_width, self.half_height)
class OctNode(object):
point_class = Point
POINT_CAPACITY = 8
bb_class = BoundingBox
def __init__(self, center, c_bbx, length, width, height, capacity=None):
self.center = center
self.c_bbx = c_bbx
self.c_center = self.bbx2center(c_bbx)
self.length = length
self.width = width
self.height = height
self.points = []
self.next_000 = None
self.next_001 = None
self.next_010 = None
self.next_011 = None
self.next_100 = None
self.next_101 = None
self.next_110 = None
self.next_111 = None
self.parent = None
self.path = '/'
if capacity is None:
capacity = self.POINT_CAPACITY
self.capacity = capacity
self.bounding_box = self._calc_bounding_box()
def bbx2center(self, c_bbx):
c_lat = c_bbx[0][0] + (c_bbx[1][0] - c_bbx[0][0]) / 2
c_lng = c_bbx[0][1] + (c_bbx[1][1] - c_bbx[0][1]) / 2
c_alt = c_bbx[0][2] + (c_bbx[1][2] - c_bbx[0][2]) / 2
c_center = (c_lat, c_lng, c_alt)
return c_center
def _calc_bounding_box(self):
half_length = self.length / 2
half_width = self.width / 2
half_height = self.height / 2
min_x = self.center.x - half_length
min_y = self.center.y - half_width
min_z = self.center.z - half_height
max_x = self.center.x + half_length
max_y = self.center.y + half_width
max_z = self.center.z + half_height
return self.bb_class(
min_x=min_x, min_y=min_y, min_z=min_z, max_x=max_x, max_y=max_y, max_z=max_z
)
def contains_point(self, point):
"""
Checks if a point would be within the bounding box of the node.
This is a bounding check, not verification the point is present in
the data.
Args:
point (Point): The point to check.
Returns:
bool: `True` if it is within the bounds, otherwise `False`.
"""
bb = self.bounding_box
if bb.min_x <= point.x <= bb.max_x:
if bb.min_y <= point.y <= bb.max_y:
if bb.min_z <= point.z <= bb.max_z:
return True
return False
def is_000(self, point):
return point.x < self.center.x and point.y < self.center.y and point.z < self.center.z
def is_001(self, point):
return point.x >= self.center.x and point.y < self.center.y and point.z < self.center.z
def is_010(self, point):
return point.x < self.center.x and point.y >= self.center.y and point.z < self.center.z
def is_011(self, point):
return point.x >= self.center.x and point.y >= self.center.y and point.z < self.center.z
def is_100(self, point):
return point.x < self.center.x and point.y < self.center.y and point.z >= self.center.z
def is_101(self, point):
return point.x >= self.center.x and point.y < self.center.y and point.z >= self.center.z
def is_110(self, point):
return point.x < self.center.x and point.y >= self.center.y and point.z >= self.center.z
def is_111(self, point):
return point.x >= self.center.x and point.y >= self.center.y and point.z >= self.center.z
def subdivide(self):
"""
Subdivides an existing node into the node + children.
Returns:
None: Nothing to see here. Please go about your business.
"""
half_length = self.length / 2
half_width = self.width / 2
half_height = self.height / 2
oct_length = half_length / 2
oct_width = half_width / 2
oct_height = half_height / 2
half_lat = (self.c_bbx[1][0] - self.c_bbx[0][0]) / 2
half_lng = (self.c_bbx[1][1] - self.c_bbx[0][1]) / 2
center_000 = self.point_class(
self.center.x - oct_length, self.center.y -
oct_width, self.center.z - oct_height
)
c_bbx_000 = ((self.c_bbx[0][0], self.c_bbx[0][1], self.c_bbx[0][2]), self.c_center)
self.next_000 = self.__class__(
center_000, c_bbx_000, half_length, half_width, half_height, capacity=self.capacity
)
self.next_000.parent = self
self.next_000.path = self.next_000.parent.path + '000/'
center_001 = self.point_class(
self.center.x + oct_length, self.center.y -
oct_width, self.center.z - oct_height
)
c_bbx_001 = ((self.c_bbx[0][0], self.c_bbx[0][1] + half_lng, self.c_bbx[0][2]),
(self.c_bbx[0][0] + half_lat, self.c_bbx[1][1], self.c_center[2]))
self.next_001 = self.__class__(
center_001, c_bbx_001, half_length, half_width, half_height, capacity=self.capacity
)
self.next_001.parent = self
self.next_001.path = self.next_001.parent.path + '001/'
center_010 = self.point_class(
self.center.x - oct_length, self.center.y +
oct_width, self.center.z - oct_height
)
c_bbx_010 = ((self.c_center[0], self.c_bbx[0][1], self.c_bbx[0][2]),
(self.c_bbx[1][0], self.c_center[1], self.c_center[2]))
self.next_010 = self.__class__(
center_010, c_bbx_010, half_length, half_width, half_height, capacity=self.capacity
)
self.next_010.parent = self
self.next_010.path = self.next_010.parent.path + '010/'
center_011 = self.point_class(
self.center.x + oct_length, self.center.y +
oct_width, self.center.z - oct_height
)
c_bbx_011 = ((self.c_center[0], self.c_center[1], self.c_bbx[0][2]),
(self.c_bbx[1][0], self.c_bbx[1][1], self.c_center[2]))
self.next_011 = self.__class__(
center_011, c_bbx_011, half_length, half_width, half_height, capacity=self.capacity
)
self.next_011.parent = self
self.next_011.path = self.next_011.parent.path + '011/'
center_100 = self.point_class(
self.center.x - oct_length, self.center.y -
oct_width, self.center.z + oct_height
)
c_bbx_100 = ((self.c_bbx[0][0], self.c_bbx[0][1], self.c_center[2]), (self.c_center[0], self.c_center[1], self.c_bbx[1][2]))
self.next_100 = self.__class__(
center_100, c_bbx_100, half_length, half_width, half_height, capacity=self.capacity
)
self.next_100.parent = self
self.next_100.path = self.next_100.parent.path + '100/'
center_101 = self.point_class(
self.center.x + oct_length, self.center.y -
oct_width, self.center.z + oct_height
)
c_bbx_101 = ((self.c_bbx[0][0], self.c_bbx[0][1] + half_lng, self.c_center[2]),
(self.c_bbx[0][0] + half_lat, self.c_bbx[1][1], self.c_bbx[1][2]))
self.next_101 = self.__class__(
center_101, c_bbx_101, half_length, half_width, half_height, capacity=self.capacity
)
self.next_101.parent = self
self.next_101.path = self.next_101.parent.path + '101/'
center_110 = self.point_class(
self.center.x - oct_length, self.center.y +
oct_width, self.center.z + oct_height
)
c_bbx_110 = ((self.c_center[0], self.c_bbx[0][1], self.c_center[2]),
(self.c_bbx[1][0], self.c_center[1], self.c_bbx[1][2]))
self.next_110 = self.__class__(
center_110, c_bbx_110, half_length, half_width, half_height, capacity=self.capacity
)
self.next_110.parent = self
self.next_110.path = self.next_110.parent.path + '110/'
center_111 = self.point_class(
self.center.x + oct_length, self.center.y +
oct_width, self.center.z + oct_height
)
c_bbx_111 = ((self.c_center[0], self.c_center[1], self.c_center[2]),
(self.c_bbx[1][0], self.c_bbx[1][1], self.c_bbx[1][2]))
self.next_111 = self.__class__(
center_111, c_bbx_111, half_length, half_width, half_height, capacity=self.capacity
)
self.next_111.parent = self
self.next_111.path = self.next_111.parent.path + '111/'
# Redistribute the points.
# Manually call `append` here, as calling `.insert()` creates an
# infinite recursion situation.
for pnt in self.points:
if self.is_000(pnt):
self.next_000.points.append(pnt)
elif self.is_001(pnt):
self.next_001.points.append(pnt)
elif self.is_010(pnt):
self.next_010.points.append(pnt)
elif self.is_011(pnt):
self.next_011.points.append(pnt)
elif self.is_100(pnt):
self.next_100.points.append(pnt)
elif self.is_101(pnt):
self.next_101.points.append(pnt)
elif self.is_110(pnt):
self.next_110.points.append(pnt)
else:
self.next_111.points.append(pnt)
self.points = []
def insert(self, point):
if not self.contains_point(point):
raise ValueError(
"Point {} is not within this node ({} - {}).".format(
point, self.center, self.bounding_box
)
)
# Check to ensure we're not going to go over capacity.
if (len(self.points) + 1) > self.capacity:
# We're over capacity. Subdivide, then insert into the new child.
self.subdivide()
if self.next_000 is not None:
if self.is_000(point):
return self.next_000.insert(point)
elif self.is_001(point):
return self.next_001.insert(point)
elif self.is_010(point):
return self.next_010.insert(point)
elif self.is_011(point):
return self.next_011.insert(point)
elif self.is_100(point):
return self.next_100.insert(point)
elif self.is_101(point):
return self.next_101.insert(point)
elif self.is_110(point):
return self.next_110.insert(point)
elif self.is_111(point):
return self.next_111.insert(point)
# There are no child nodes & we're under capacity. Add it to `points`.
self.points.append(point)
return True
class Octree(object):
node_class = OctNode
point_class = Point
def __init__(self, center, c_bbx, length, width, height, capacity=None):
"""
Constructs a `QuadTree` object.
Args:
center (tuple|Point): The center point of the quadtree.
width (int|float): The width of the point space.
height (int|float): The height of the point space.
capacity (int): Optional. The number of points per quad before
subdivision occurs. Default is `None`.
"""
self.length = length
self.width = width
self.height = height
self.center = self.convert_to_point(center)
self.c_bbx = c_bbx
self.c_center = self.bbx2center(c_bbx)
self._root = self.node_class(
self.center, self.c_bbx, self.length, self.width, self.height, capacity=capacity
)
def convert_to_point(self, val):
"""
Converts a value to a `Point` object.
This is to allow shortcuts, like providing a tuple for a point.
Args:
val (Point|tuple|None): The value to convert.
Returns:
Point: A point object.
"""
if isinstance(val, self.point_class):
return val
elif isinstance(val, (tuple, list)):
return self.point_class(val[0], val[1], val[2])
elif val is None:
return self.point_class(0, 0, 0)
else:
raise ValueError(
"Unknown data provided for point. Please use one of: "
"quads.Point | tuple | list | None"
)
def insert(self, point, data=None):
"""
Inserts a `Point` into the quadtree.
Args:
point (Point|tuple|None): The point to insert.
data (any): Optional. Corresponding data for that point. Default
is `None`.
Returns:
bool: `True` if insertion succeeded, otherwise `False`.
"""
pnt = self.convert_to_point(point)
# df = pd.DataFrame(
# columns=('LONGITUDE', 'LATITUDE', 'TIME', 'geohash'))
# df = df.append(data, ignore_index=True)
pnt.data = data
return self._root.insert(pnt)
def bbx2center(self, c_bbx):
c_lat = c_bbx[0][0] + (c_bbx[1][0] - c_bbx[0][0]) / 2
c_lng = c_bbx[0][1] + (c_bbx[1][1] - c_bbx[0][1]) / 2
c_alt = c_bbx[0][2] + (c_bbx[1][2] - c_bbx[0][2]) / 2
c_center = (c_lat, c_lng, c_alt)
return c_center
| en | 0.526005 | # draw_lines(node) def draw_lines(node): bb = node.bounding_box # The scales for axhline & axvline are 0-1, so we have to convert # our values. x_offset = -tree._root.bounding_box.min_x min_x = (bb.min_x + x_offset) / 100 max_x = (bb.max_x + x_offset) / 100 y_offset = -tree._root.bounding_box.min_y min_y = (bb.min_y + y_offset) / 100 max_y = (bb.max_y + y_offset) / 100 z_offset = -tree._root.bounding_box.min_z min_z = (bb.min_z + z_offset) / 100 max_z = (bb.max_z + z_offset) / 100 art3d.axhline( node.center.y, min_x, max_x, color="grey", linewidth=0.5 ) pyplot.axvline( node.center.x, min_y, max_y, color="grey", linewidth=0.5 ) pyplot.axvline( node.center.z, min_z, max_z, color="grey", linewidth=0.5 ) # Draw the axis first # 新建以根结点为起始的文件夹 # 遍历所有子节点,若所有子节点均为叶节点,则对其子节点进行采样并存在该节点中 # 若存在非叶节点,则对其非叶节点进行继续遍历 # 在目录中新建文件,若无文件,则新建。若有文件,则打开后继续追加 # 该节点是叶节点 # 该节点是根节点,且为叶节点,对节点采样存入根目录 # 该节点不是根节点,且为叶节点,对节点采样存入父节点 # 该节点不是叶节点,指针指向其子节点 # 最后子节点采样完成,对本节点采样 # 采样 Checks if a point would be within the bounding box of the node. This is a bounding check, not verification the point is present in the data. Args: point (Point): The point to check. Returns: bool: `True` if it is within the bounds, otherwise `False`. Subdivides an existing node into the node + children. Returns: None: Nothing to see here. Please go about your business. # Redistribute the points. # Manually call `append` here, as calling `.insert()` creates an # infinite recursion situation. # Check to ensure we're not going to go over capacity. # We're over capacity. Subdivide, then insert into the new child. # There are no child nodes & we're under capacity. Add it to `points`. Constructs a `QuadTree` object. Args: center (tuple|Point): The center point of the quadtree. width (int|float): The width of the point space. height (int|float): The height of the point space. capacity (int): Optional. The number of points per quad before subdivision occurs. Default is `None`. Converts a value to a `Point` object. This is to allow shortcuts, like providing a tuple for a point. Args: val (Point|tuple|None): The value to convert. Returns: Point: A point object. Inserts a `Point` into the quadtree. Args: point (Point|tuple|None): The point to insert. data (any): Optional. Corresponding data for that point. Default is `None`. Returns: bool: `True` if insertion succeeded, otherwise `False`. # df = pd.DataFrame( # columns=('LONGITUDE', 'LATITUDE', 'TIME', 'geohash')) # df = df.append(data, ignore_index=True) | 3.227127 | 3 |
24.py | lycantropos/Project-Euler | 0 | 6623522 | <filename>24.py<gh_stars>0
from itertools import (permutations,
islice)
from typing import Iterable
def digits_lexicographic_permutation(*,
digits: Iterable[int],
index: int) -> int:
return next(islice(permutations(digits), index - 1, index))
assert list(permutations(range(3))) == [(0, 1, 2), (0, 2, 1), (1, 0, 2),
(1, 2, 0), (2, 0, 1), (2, 1, 0)]
assert digits_lexicographic_permutation(digits=range(10),
index=1_000_000) == (2, 7, 8, 3, 9,
1, 5, 4, 6, 0)
| <filename>24.py<gh_stars>0
from itertools import (permutations,
islice)
from typing import Iterable
def digits_lexicographic_permutation(*,
digits: Iterable[int],
index: int) -> int:
return next(islice(permutations(digits), index - 1, index))
assert list(permutations(range(3))) == [(0, 1, 2), (0, 2, 1), (1, 0, 2),
(1, 2, 0), (2, 0, 1), (2, 1, 0)]
assert digits_lexicographic_permutation(digits=range(10),
index=1_000_000) == (2, 7, 8, 3, 9,
1, 5, 4, 6, 0)
| none | 1 | 3.271427 | 3 | |
models/config_sr.py | ustato/sber-swap | 210 | 6623523 | <reponame>ustato/sber-swap<gh_stars>100-1000
import sys
class TestOptions(object):
name = 'weights'
results_dir = './results/'
gpu_ids = [0]
crop_size = 256
dataset_mode = 'test'
which_epoch = '10'
aspect_ratio = 1.0
checkpoints_dir = ''
init_type = 'xavier'
init_variance = 0.02
isTrain = False
is_test = True
semantic_nc = 3
model = 'pix2pix'
netG = 'lipspade'
nef = 16
ngf = 48
norm_G = 'spectralspadesyncbatch3x3'
num_upsampling_layers = 'normal'
phase = 'test'
use_vae = False
z_dim = 256
| import sys
class TestOptions(object):
name = 'weights'
results_dir = './results/'
gpu_ids = [0]
crop_size = 256
dataset_mode = 'test'
which_epoch = '10'
aspect_ratio = 1.0
checkpoints_dir = ''
init_type = 'xavier'
init_variance = 0.02
isTrain = False
is_test = True
semantic_nc = 3
model = 'pix2pix'
netG = 'lipspade'
nef = 16
ngf = 48
norm_G = 'spectralspadesyncbatch3x3'
num_upsampling_layers = 'normal'
phase = 'test'
use_vae = False
z_dim = 256 | none | 1 | 1.536384 | 2 | |
game_mastering/models/file.py | SamusChief/myth-caster-api | 0 | 6623524 | """ Model for tracking file uploads to the server """
from django.db import models
from common.models import PrivateModel
class GameMasterFile(PrivateModel):
"""
Model to represent specific files.
Attributes
upload: the file uploaded to the server
"""
name = models.CharField(unique=True, max_length=255, db_index=True)
upload = models.FileField(upload_to='game_master_materials/%Y/%m/%d/')
def __str__(self):
return f'File: {self.id}|{self.name}'
| """ Model for tracking file uploads to the server """
from django.db import models
from common.models import PrivateModel
class GameMasterFile(PrivateModel):
"""
Model to represent specific files.
Attributes
upload: the file uploaded to the server
"""
name = models.CharField(unique=True, max_length=255, db_index=True)
upload = models.FileField(upload_to='game_master_materials/%Y/%m/%d/')
def __str__(self):
return f'File: {self.id}|{self.name}'
| en | 0.957184 | Model for tracking file uploads to the server Model to represent specific files. Attributes upload: the file uploaded to the server | 2.748875 | 3 |
scripts/functions/models_transcript.py | ajlee21/cycleGAN_gene_expression | 2 | 6623525 | <gh_stars>1-10
import torch.nn as nn
import torch.nn.functional as F
import torch
from torch.utils.data import DataLoader
import pandas as pd
import numpy as np
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
if hasattr(m, "bias") and m.bias is not None:
torch.nn.init.constant_(m.bias.data, 0.0)
elif classname.find("BatchNorm2d") != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
##############################
# RESNET
##############################
class ResidualBlock(nn.Module):
def __init__(self, in_features):
super(ResidualBlock, self).__init__()
self.block = nn.Sequential(
nn.Linear(in_features, in_features),
nn.ReLU(),
nn.Linear(in_features, in_features)
)
def forward(self, x):
return x + self.block(x)
class GeneratorResNet(nn.Module):
def __init__(self, input_shape, hidden_dim, output_dim, num_residual_blocks):
super(GeneratorResNet, self).__init__()
input_dim = input_shape[1]
# Encoder (Downsampling)
model = [
nn.Linear(input_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, output_dim),
]
# Transformation (Residual blocks)
for _ in range(num_residual_blocks):
model += [ResidualBlock(output_dim)]
# Decoder (Upsampling)
model += [
nn.Linear(output_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, input_dim),
]
# Output layer
# model += [nn.ReflectionPad2d(channels),
# nn.Conv2d(out_features, channels, 7), nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, x):
return self.model(x)
##############################
# Discriminator
##############################
class Discriminator(nn.Module):
def __init__(self, input_shape, hidden_dim, output_dim):
super(Discriminator, self).__init__()
input_dim = input_shape[1]
self.output_shape = (output_dim)
# Extract features from generated sample
self.model = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, output_dim),
nn.ReLU(),
nn.Linear(output_dim, 1),
)
def forward(self, img):
return self.model(img)
| import torch.nn as nn
import torch.nn.functional as F
import torch
from torch.utils.data import DataLoader
import pandas as pd
import numpy as np
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
if hasattr(m, "bias") and m.bias is not None:
torch.nn.init.constant_(m.bias.data, 0.0)
elif classname.find("BatchNorm2d") != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
##############################
# RESNET
##############################
class ResidualBlock(nn.Module):
def __init__(self, in_features):
super(ResidualBlock, self).__init__()
self.block = nn.Sequential(
nn.Linear(in_features, in_features),
nn.ReLU(),
nn.Linear(in_features, in_features)
)
def forward(self, x):
return x + self.block(x)
class GeneratorResNet(nn.Module):
def __init__(self, input_shape, hidden_dim, output_dim, num_residual_blocks):
super(GeneratorResNet, self).__init__()
input_dim = input_shape[1]
# Encoder (Downsampling)
model = [
nn.Linear(input_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, output_dim),
]
# Transformation (Residual blocks)
for _ in range(num_residual_blocks):
model += [ResidualBlock(output_dim)]
# Decoder (Upsampling)
model += [
nn.Linear(output_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, input_dim),
]
# Output layer
# model += [nn.ReflectionPad2d(channels),
# nn.Conv2d(out_features, channels, 7), nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, x):
return self.model(x)
##############################
# Discriminator
##############################
class Discriminator(nn.Module):
def __init__(self, input_shape, hidden_dim, output_dim):
super(Discriminator, self).__init__()
input_dim = input_shape[1]
self.output_shape = (output_dim)
# Extract features from generated sample
self.model = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, output_dim),
nn.ReLU(),
nn.Linear(output_dim, 1),
)
def forward(self, img):
return self.model(img) | de | 0.35443 | ############################## # RESNET ############################## # Encoder (Downsampling) # Transformation (Residual blocks) # Decoder (Upsampling) # Output layer # model += [nn.ReflectionPad2d(channels), # nn.Conv2d(out_features, channels, 7), nn.Tanh()] ############################## # Discriminator ############################## # Extract features from generated sample | 2.917812 | 3 |
1_day/data_fiting.py | h-mayorquin/g_node_data_analysis_205 | 0 | 6623526 | import numpy as np
import matplotlib.pyplot as plt
from load_data_depression import V_mean
from scipy.signal import argrelextrema
T_data = 1200
dt_data = 1000.0 / 4000
times_data = np.arange(0, T_data, dt_data)
if True:
plt.plot(V_mean, label='V_experiment')
plt.legend()
plt.ylim([-0.5, 3])
plt.hold(True)
# We need to extract a vector of the maximums
arg_maximums = argrelextrema(V_mean, np.greater, order=100)[0]
arg_minium = argrelextrema(V_mean, np.less, order=100)[0]
tol = 0.5
tol_min = 1e-8
# From all the maximus we extract those ones that are
arg_maximums_true = []
for arg in arg_maximums:
if V_mean[arg] > tol:
arg_maximums_true.append(arg)
arg_minium_true = []
for arg in arg_minium:
if (V_mean[arg] - V_mean[0]) > tol_min:
arg_minium_true.append(arg)
arg_minium_true.append(arg_minium[-2])
arg_minium_true.append(arg_minium[1])
values_min = V_mean[arg_minium_true]
values_max = V_mean[arg_maximums_true]
plt.plot(arg_maximums_true, values_max, 'or', markersize=10)
plt.hold(True)
plt.plot(arg_minium_true, V_mean[arg_minium_true], 'og', markersize=10)
plt.show()
Amp_data = V_mean[arg_maximums_true] - V_mean[arg_minium_true]
| import numpy as np
import matplotlib.pyplot as plt
from load_data_depression import V_mean
from scipy.signal import argrelextrema
T_data = 1200
dt_data = 1000.0 / 4000
times_data = np.arange(0, T_data, dt_data)
if True:
plt.plot(V_mean, label='V_experiment')
plt.legend()
plt.ylim([-0.5, 3])
plt.hold(True)
# We need to extract a vector of the maximums
arg_maximums = argrelextrema(V_mean, np.greater, order=100)[0]
arg_minium = argrelextrema(V_mean, np.less, order=100)[0]
tol = 0.5
tol_min = 1e-8
# From all the maximus we extract those ones that are
arg_maximums_true = []
for arg in arg_maximums:
if V_mean[arg] > tol:
arg_maximums_true.append(arg)
arg_minium_true = []
for arg in arg_minium:
if (V_mean[arg] - V_mean[0]) > tol_min:
arg_minium_true.append(arg)
arg_minium_true.append(arg_minium[-2])
arg_minium_true.append(arg_minium[1])
values_min = V_mean[arg_minium_true]
values_max = V_mean[arg_maximums_true]
plt.plot(arg_maximums_true, values_max, 'or', markersize=10)
plt.hold(True)
plt.plot(arg_minium_true, V_mean[arg_minium_true], 'og', markersize=10)
plt.show()
Amp_data = V_mean[arg_maximums_true] - V_mean[arg_minium_true]
| en | 0.931947 | # We need to extract a vector of the maximums # From all the maximus we extract those ones that are | 2.566187 | 3 |
users/migrations/0004_auto_20200608_2230.py | a-samir97/mostql-project | 0 | 6623527 | # Generated by Django 3.0 on 2020-06-08 19:30
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0003_adminuser_is_blocked'),
]
operations = [
migrations.RemoveField(
model_name='adminuser',
name='user_id',
),
migrations.RemoveField(
model_name='appuser',
name='user_id',
),
migrations.AlterField(
model_name='adminuser',
name='customuser_ptr',
field=models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='appuser',
name='customuser_ptr',
field=models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='customuser',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| # Generated by Django 3.0 on 2020-06-08 19:30
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0003_adminuser_is_blocked'),
]
operations = [
migrations.RemoveField(
model_name='adminuser',
name='user_id',
),
migrations.RemoveField(
model_name='appuser',
name='user_id',
),
migrations.AlterField(
model_name='adminuser',
name='customuser_ptr',
field=models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='appuser',
name='customuser_ptr',
field=models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='customuser',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| en | 0.819543 | # Generated by Django 3.0 on 2020-06-08 19:30 | 1.619938 | 2 |
blog/models.py | robml/django_diy_blog | 0 | 6623528 | from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
# Create your models here.
class Blog(models.Model):
"""A model representing Blog Posts"""
title=models.CharField(max_length=200)
post_date = models.DateField(auto_now_add=True)
author = models.ForeignKey('BlogAuthor',on_delete=models.SET_NULL, null = True)
description = models.TextField(max_length=1000,help_text="Contents of the actual post")
class Meta:
ordering = ['-post_date']
def get_absolute_url(self):
"""Returns the url to access a particular blog instance."""
return reverse('blog-detail',args=[str(self.id)])
def __str__(self):
return self.title
class Comment(models.Model):
"""Comments for each blog post"""
comment = models.TextField(max_length=200,help_text='Enter comment about blog post here.')
post_date = models.DateTimeField(auto_now_add=True)
author = models.ForeignKey(User,on_delete=models.SET_NULL,null=True)
blog = models.ForeignKey('Blog',on_delete=models.SET_NULL,null=True)
class Meta:
ordering = ['post_date']
def __str__(self):
return self.comment
class BlogAuthor(models.Model):
"""Model representing Bloggers"""
name = models.OneToOneField(User,on_delete=models.SET_NULL,null=True)
bio = models.CharField(max_length=1000,null=True,blank=True)
class Meta:
ordering = ['name']
def get_absolute_url(self):
"""Returns the url to access a particular author instance."""
return reverse('blogauthor-detail',args=[str(self.id)])
def __str__(self):
return self.name.username | from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
# Create your models here.
class Blog(models.Model):
"""A model representing Blog Posts"""
title=models.CharField(max_length=200)
post_date = models.DateField(auto_now_add=True)
author = models.ForeignKey('BlogAuthor',on_delete=models.SET_NULL, null = True)
description = models.TextField(max_length=1000,help_text="Contents of the actual post")
class Meta:
ordering = ['-post_date']
def get_absolute_url(self):
"""Returns the url to access a particular blog instance."""
return reverse('blog-detail',args=[str(self.id)])
def __str__(self):
return self.title
class Comment(models.Model):
"""Comments for each blog post"""
comment = models.TextField(max_length=200,help_text='Enter comment about blog post here.')
post_date = models.DateTimeField(auto_now_add=True)
author = models.ForeignKey(User,on_delete=models.SET_NULL,null=True)
blog = models.ForeignKey('Blog',on_delete=models.SET_NULL,null=True)
class Meta:
ordering = ['post_date']
def __str__(self):
return self.comment
class BlogAuthor(models.Model):
"""Model representing Bloggers"""
name = models.OneToOneField(User,on_delete=models.SET_NULL,null=True)
bio = models.CharField(max_length=1000,null=True,blank=True)
class Meta:
ordering = ['name']
def get_absolute_url(self):
"""Returns the url to access a particular author instance."""
return reverse('blogauthor-detail',args=[str(self.id)])
def __str__(self):
return self.name.username | en | 0.86304 | # Create your models here. A model representing Blog Posts Returns the url to access a particular blog instance. Comments for each blog post Model representing Bloggers Returns the url to access a particular author instance. | 2.875722 | 3 |
swf/actors/worker.py | nstott/simpleflow | 0 | 6623529 | # -*- coding:utf-8 -*-
import boto.exception
from simpleflow import format
from swf.actors import Actor
from swf.models import ActivityTask
from swf.exceptions import PollTimeout, ResponseError, DoesNotExistError, RateLimitExceededError
from swf.responses import Response
class ActivityWorker(Actor):
"""Activity task worker actor implementation
Once started, will start polling for activity task,
to process, and emitting heartbeat until it's stopped
or crashes for some reason.
:param domain: Domain the Actor should interact with
:type domain: swf.models.Domain
:param task_list: task list the Actor should watch for tasks on
:type task_list: string
:param identity: Identity of the worker making the request,
which is recorded in the ActivityTaskStarted
event in the workflow history. This enables
diagnostic tracing when problems arise.
The form of this identity is user defined.
:type identity: string
"""
def __init__(self, domain, task_list, identity=None):
super(ActivityWorker, self).__init__(
domain,
task_list
)
self._identity = identity
def cancel(self, task_token, details=None):
"""Responds to ``swf`` that the activity task was canceled
:param task_token: canceled activity task token
:type task_token: string
:param details: provided details about cancel
:type details: string
"""
try:
return self.connection.respond_activity_task_canceled(
task_token,
details=format.details(details),
)
except boto.exception.SWFResponseError as e:
message = self.get_error_message(e)
if e.error_code == 'UnknownResourceFault':
raise DoesNotExistError(
"Unable to cancel activity task with token={}".format(task_token),
message,
)
raise ResponseError(message)
def complete(self, task_token, result=None):
"""Responds to ``swf`` that the activity task is completed
:param task_token: completed activity task token
:type task_token: string
:param result: The result of the activity task.
:type result: string
"""
try:
return self.connection.respond_activity_task_completed(
task_token,
format.result(result),
)
except boto.exception.SWFResponseError as e:
message = self.get_error_message(e)
if e.error_code == 'UnknownResourceFault':
raise DoesNotExistError(
"Unable to complete activity task with token={}".format(task_token),
message,
)
raise ResponseError(message)
def fail(self, task_token, details=None, reason=None):
"""Replies to ``swf`` that the activity task failed
:param task_token: canceled activity task token
:type task_token: string
:param details: provided details about the failure
:type details: string
:param reason: Description of the error that may assist in diagnostics
:type reason: string
"""
try:
return self.connection.respond_activity_task_failed(
task_token,
details=format.details(details),
reason=format.reason(reason),
)
except boto.exception.SWFResponseError as e:
message = self.get_error_message(e)
if e.error_code == 'UnknownResourceFault':
raise DoesNotExistError(
"Unable to fail activity task with token={}".format(task_token),
message,
)
raise ResponseError(message)
def heartbeat(self, task_token, details=None):
"""Records activity task heartbeat
:param task_token: canceled activity task token
:type task_token: str
:param details: provided details about task progress
:type details: string
"""
try:
return self.connection.record_activity_task_heartbeat(
task_token,
format.heartbeat_details(details),
)
except boto.exception.SWFResponseError as e:
message = self.get_error_message(e)
if e.error_code == 'UnknownResourceFault':
raise DoesNotExistError(
"Unable to send heartbeat with token={}".format(task_token),
message,
)
if e.error_code == 'ThrottlingException':
raise RateLimitExceededError(
"Rate exceeded when sending heartbeat with token={}".format(task_token),
message,
)
raise ResponseError(message)
def poll(self, task_list=None, identity=None):
"""Polls for an activity task to process from current
actor's instance defined ``task_list``
if no activity task was polled, raises a PollTimeout
exception.
:param task_list: task list the Actor should watch for tasks on
:type task_list: string
:param identity: Identity of the worker making the request,
which is recorded in the ActivityTaskStarted
event in the workflow history. This enables
diagnostic tracing when problems arise.
The form of this identity is user defined.
:type identity: string
:raises: PollTimeout
:returns: task token, polled activity task
:rtype: (str, ActivityTask)
"""
task_list = task_list or self.task_list
identity = identity or self._identity
try:
polled_activity_data = self.connection.poll_for_activity_task(
self.domain.name,
task_list,
identity=format.identity(identity),
)
except boto.exception.SWFResponseError as e:
message = self.get_error_message(e)
if e.error_code == 'UnknownResourceFault':
raise DoesNotExistError(
"Unable to poll activity task",
message,
)
raise ResponseError(message)
if 'taskToken' not in polled_activity_data:
raise PollTimeout("Activity Worker poll timed out")
activity_task = ActivityTask.from_poll(
self.domain,
self.task_list,
polled_activity_data
)
return Response(
task_token=activity_task.task_token,
activity_task=activity_task,
raw_response=polled_activity_data,
)
| # -*- coding:utf-8 -*-
import boto.exception
from simpleflow import format
from swf.actors import Actor
from swf.models import ActivityTask
from swf.exceptions import PollTimeout, ResponseError, DoesNotExistError, RateLimitExceededError
from swf.responses import Response
class ActivityWorker(Actor):
"""Activity task worker actor implementation
Once started, will start polling for activity task,
to process, and emitting heartbeat until it's stopped
or crashes for some reason.
:param domain: Domain the Actor should interact with
:type domain: swf.models.Domain
:param task_list: task list the Actor should watch for tasks on
:type task_list: string
:param identity: Identity of the worker making the request,
which is recorded in the ActivityTaskStarted
event in the workflow history. This enables
diagnostic tracing when problems arise.
The form of this identity is user defined.
:type identity: string
"""
def __init__(self, domain, task_list, identity=None):
super(ActivityWorker, self).__init__(
domain,
task_list
)
self._identity = identity
def cancel(self, task_token, details=None):
"""Responds to ``swf`` that the activity task was canceled
:param task_token: canceled activity task token
:type task_token: string
:param details: provided details about cancel
:type details: string
"""
try:
return self.connection.respond_activity_task_canceled(
task_token,
details=format.details(details),
)
except boto.exception.SWFResponseError as e:
message = self.get_error_message(e)
if e.error_code == 'UnknownResourceFault':
raise DoesNotExistError(
"Unable to cancel activity task with token={}".format(task_token),
message,
)
raise ResponseError(message)
def complete(self, task_token, result=None):
"""Responds to ``swf`` that the activity task is completed
:param task_token: completed activity task token
:type task_token: string
:param result: The result of the activity task.
:type result: string
"""
try:
return self.connection.respond_activity_task_completed(
task_token,
format.result(result),
)
except boto.exception.SWFResponseError as e:
message = self.get_error_message(e)
if e.error_code == 'UnknownResourceFault':
raise DoesNotExistError(
"Unable to complete activity task with token={}".format(task_token),
message,
)
raise ResponseError(message)
def fail(self, task_token, details=None, reason=None):
"""Replies to ``swf`` that the activity task failed
:param task_token: canceled activity task token
:type task_token: string
:param details: provided details about the failure
:type details: string
:param reason: Description of the error that may assist in diagnostics
:type reason: string
"""
try:
return self.connection.respond_activity_task_failed(
task_token,
details=format.details(details),
reason=format.reason(reason),
)
except boto.exception.SWFResponseError as e:
message = self.get_error_message(e)
if e.error_code == 'UnknownResourceFault':
raise DoesNotExistError(
"Unable to fail activity task with token={}".format(task_token),
message,
)
raise ResponseError(message)
def heartbeat(self, task_token, details=None):
"""Records activity task heartbeat
:param task_token: canceled activity task token
:type task_token: str
:param details: provided details about task progress
:type details: string
"""
try:
return self.connection.record_activity_task_heartbeat(
task_token,
format.heartbeat_details(details),
)
except boto.exception.SWFResponseError as e:
message = self.get_error_message(e)
if e.error_code == 'UnknownResourceFault':
raise DoesNotExistError(
"Unable to send heartbeat with token={}".format(task_token),
message,
)
if e.error_code == 'ThrottlingException':
raise RateLimitExceededError(
"Rate exceeded when sending heartbeat with token={}".format(task_token),
message,
)
raise ResponseError(message)
def poll(self, task_list=None, identity=None):
"""Polls for an activity task to process from current
actor's instance defined ``task_list``
if no activity task was polled, raises a PollTimeout
exception.
:param task_list: task list the Actor should watch for tasks on
:type task_list: string
:param identity: Identity of the worker making the request,
which is recorded in the ActivityTaskStarted
event in the workflow history. This enables
diagnostic tracing when problems arise.
The form of this identity is user defined.
:type identity: string
:raises: PollTimeout
:returns: task token, polled activity task
:rtype: (str, ActivityTask)
"""
task_list = task_list or self.task_list
identity = identity or self._identity
try:
polled_activity_data = self.connection.poll_for_activity_task(
self.domain.name,
task_list,
identity=format.identity(identity),
)
except boto.exception.SWFResponseError as e:
message = self.get_error_message(e)
if e.error_code == 'UnknownResourceFault':
raise DoesNotExistError(
"Unable to poll activity task",
message,
)
raise ResponseError(message)
if 'taskToken' not in polled_activity_data:
raise PollTimeout("Activity Worker poll timed out")
activity_task = ActivityTask.from_poll(
self.domain,
self.task_list,
polled_activity_data
)
return Response(
task_token=activity_task.task_token,
activity_task=activity_task,
raw_response=polled_activity_data,
)
| en | 0.801844 | # -*- coding:utf-8 -*- Activity task worker actor implementation Once started, will start polling for activity task, to process, and emitting heartbeat until it's stopped or crashes for some reason. :param domain: Domain the Actor should interact with :type domain: swf.models.Domain :param task_list: task list the Actor should watch for tasks on :type task_list: string :param identity: Identity of the worker making the request, which is recorded in the ActivityTaskStarted event in the workflow history. This enables diagnostic tracing when problems arise. The form of this identity is user defined. :type identity: string Responds to ``swf`` that the activity task was canceled :param task_token: canceled activity task token :type task_token: string :param details: provided details about cancel :type details: string Responds to ``swf`` that the activity task is completed :param task_token: completed activity task token :type task_token: string :param result: The result of the activity task. :type result: string Replies to ``swf`` that the activity task failed :param task_token: canceled activity task token :type task_token: string :param details: provided details about the failure :type details: string :param reason: Description of the error that may assist in diagnostics :type reason: string Records activity task heartbeat :param task_token: canceled activity task token :type task_token: str :param details: provided details about task progress :type details: string Polls for an activity task to process from current actor's instance defined ``task_list`` if no activity task was polled, raises a PollTimeout exception. :param task_list: task list the Actor should watch for tasks on :type task_list: string :param identity: Identity of the worker making the request, which is recorded in the ActivityTaskStarted event in the workflow history. This enables diagnostic tracing when problems arise. The form of this identity is user defined. :type identity: string :raises: PollTimeout :returns: task token, polled activity task :rtype: (str, ActivityTask) | 2.439396 | 2 |
raw-input.py | eltechno/python_course | 4 | 6623530 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Created by techno at 8/04/19
#Feature: #Enter feature name here
# Enter feature description here
#Scenario: # Enter scenario name here
# Enter steps here
str = ("uno, dos, tres")
list = str.split(",")
tuple= tuple(list)
print('list : ', list)
print('tuple: ', tuple)
print (len(list)) | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Created by techno at 8/04/19
#Feature: #Enter feature name here
# Enter feature description here
#Scenario: # Enter scenario name here
# Enter steps here
str = ("uno, dos, tres")
list = str.split(",")
tuple= tuple(list)
print('list : ', list)
print('tuple: ', tuple)
print (len(list)) | en | 0.696684 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Created by techno at 8/04/19 #Feature: #Enter feature name here # Enter feature description here #Scenario: # Enter scenario name here # Enter steps here | 3.729296 | 4 |
hypergraph/representation/unipartite.py | antoneri/mapping-hypergraphs | 10 | 6623531 | <reponame>antoneri/mapping-hypergraphs
from itertools import combinations_with_replacement, product
from hypergraph.network import HyperGraph, Network
from hypergraph.transition import w, P, pi
def create_network(hypergraph: HyperGraph, directed: bool, self_links: bool) -> Network:
nodes, edges, weights = hypergraph
print("[unipartite] creating unipartite...")
if directed:
links = []
P_ = P(edges, weights)
pi_ = pi(edges, weights)
for u, v in product(nodes, repeat=2):
weight = pi_(u) * P_(u, v, self_links)
if weight < 1e-10:
continue
links.append((u.id, v.id, weight))
else:
w_ = w(edges, weights)
links = []
for u, v in combinations_with_replacement(nodes, 2):
weight = w_(u, v, True)
if weight < 1e-10:
continue
links.append((u.id, v.id, weight))
return Network(nodes, sorted(links))
| from itertools import combinations_with_replacement, product
from hypergraph.network import HyperGraph, Network
from hypergraph.transition import w, P, pi
def create_network(hypergraph: HyperGraph, directed: bool, self_links: bool) -> Network:
nodes, edges, weights = hypergraph
print("[unipartite] creating unipartite...")
if directed:
links = []
P_ = P(edges, weights)
pi_ = pi(edges, weights)
for u, v in product(nodes, repeat=2):
weight = pi_(u) * P_(u, v, self_links)
if weight < 1e-10:
continue
links.append((u.id, v.id, weight))
else:
w_ = w(edges, weights)
links = []
for u, v in combinations_with_replacement(nodes, 2):
weight = w_(u, v, True)
if weight < 1e-10:
continue
links.append((u.id, v.id, weight))
return Network(nodes, sorted(links)) | none | 1 | 2.95132 | 3 | |
src/installer.py | atareao/start-here | 35 | 6623532 | #/usr/bin/env python3
# -*- coding: UTF-8 -*-
#
# This file is part of ubuntu-first-steps
#
# Copyright (c) 2020 <NAME> <a.k.a. atareao>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
import gi
try:
gi.require_version('Gtk', '3.0')
gi.require_version('GLib', '2.0')
gi.require_version('Vte', '2.91')
except Exception as e:
print(e)
exit(1)
from gi.repository import Gtk
from gi.repository import GLib
from gi.repository import Vte
import os
import sys
import time
import comun
import json
from comun import _
from doitinbackground import DoItInBackground
import utils
MARGIN = 5
class SmartTerminal(Vte.Terminal):
def __init__(self, parent):
Vte.Terminal.__init__(self)
self.parent = parent
self.diib = None
def execute(self, commands):
self.diib = DoItInBackground(self, commands)
self.diib.connect('started', self.parent.start)
self.diib.connect('done_one', self.parent.increase)
self.diib.connect('ended', self.parent.end)
self.diib.connect('stopped', self.parent.stopped)
self.diib.start()
def stop(self):
if self.diib is not None:
self.diib.stop()
class Installer(Gtk.Dialog): # needs GTK, Python, Webkit-GTK
# def __init__(self, ppas_to_install, ppas_to_remove, apps_to_install,
# apps_to_remove):
def __init__(self, actions):
Gtk.Dialog.__init__(self)
self.set_title( _('Add ppa repository'))
self.set_modal(True)
self.set_destroy_with_parent(True)
self.set_position(Gtk.WindowPosition.CENTER_ALWAYS)
self.set_icon_from_file(comun.ICON)
self.set_size_request(600, 50)
actions = json.loads(actions)
self.ppas_to_install = actions['ppas_to_install']
self.ppas_to_remove = actions['ppas_to_remove']
self.apps_to_install = actions['apps_to_install']
self.apps_to_remove = actions['apps_to_remove']
box = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 10)
box.set_border_width(5)
self.get_content_area().add(box)
grid = Gtk.Grid()
grid.set_column_spacing(MARGIN)
grid.set_row_spacing(MARGIN)
box.add(grid)
self.label = Gtk.Label.new('')
self.label.set_halign(Gtk.Align.START)
grid.attach(self.label, 0, 1, 2, 1)
self.progressbar = Gtk.ProgressBar()
grid.attach(self.progressbar, 0, 2, 4, 1)
expander = Gtk.Expander()
expander.connect('notify::expanded', self.on_expanded)
grid.attach(expander, 0, 3, 4, 4)
alignment = Gtk.Alignment()
# alignment.set_padding(1, 0, 2, 2)
alignment.props.xscale = 1
scrolledwindow = Gtk.ScrolledWindow()
scrolledwindow.set_hexpand(True)
scrolledwindow.set_vexpand(True)
self.terminal = SmartTerminal(self)
scrolledwindow.add(self.terminal)
alignment.add(scrolledwindow)
expander.add(alignment)
hbox = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 5)
grid.attach(hbox, 0, 8, 4, 1)
self.button_cancel = Gtk.Button.new_with_label(_('Cancel'))
self.button_cancel.connect('clicked', self.on_button_cancel_clicked)
hbox.pack_start(self.button_cancel, False, False, 0)
self.is_added = False
self.value = 0.0
self.is_installing = False
self.show_all()
self.progressbar.set_visible(False)
self.label.set_visible(False)
expander.set_expanded(True)
time.sleep(1)
self.start_installation()
def end(self, anobject, ok, *args):
self.is_installing = False
self.button_cancel.set_label(_('Exit'))
if ok is True:
kind = Gtk.MessageType.INFO
message = _('Installation completed!')
else:
kind = Gtk.MessageType.ERROR
message = _('Installation NOT completed!')
dialog = Gtk.MessageDialog()
dialog.set_markup(message)
dialog.set_property('message_type', kind)
dialog.add_button(_('Ok'), Gtk.ButtonsType.OK)
dialog.run()
dialog.destroy()
self.destroy()
def stopped(self, anobject, *args):
self.is_installing = False
self.button_cancel.set_label(_('Exit'))
self.destroy()
def start(self, anobject, total, *args):
self.is_installing = True
self.label.set_visible(True)
self.progressbar.set_visible(True)
self.value = 0.0
self.max_value = total
def increase(self, anobject, command, *args):
GLib.idle_add(self.label.set_text, _('Executing: %s') % command)
self.value += 1.0
fraction = self.value / self.max_value
print(fraction)
GLib.idle_add(self.progressbar.set_fraction, fraction)
def decrease(self):
self.value -= 1.0
fraction = self.value / self.max_value
GLib.idle_add(self.progressbar.set_fraction, fraction)
def on_expanded(self, widget, data):
if widget.get_property('expanded') is True:
self.set_size_request(600, 300)
else:
self.set_size_request(600, 50)
self.resize(600, 50)
def on_button_cancel_clicked(self, button):
if self.is_installing:
dialog = Gtk.MessageDialog()
dialog.set_markup(_('Do you want to stop the installation?'))
dialog.set_property('message-type', Gtk.MessageType.INFO)
dialog.add_button(_('Ok'), Gtk.ResponseType.OK)
dialog.add_button(_('Cancel'), Gtk.ResponseType.CANCEL)
ans = dialog.run()
if dialog.run() == Gtk.ResponseType.OK:
GLib.idle_add(dialog.hide)
self.terminal.stop()
GLib.idle_add(dialog.destroy)
def show_info(self):
self.progressbar.set_visible(True)
self.label.set_visible(True)
def start_installation(self):
commands = []
for ppa in self.ppas_to_install:
commands.append('add-apt-repository -y ppa:{}'.format(ppa))
for ppa in self.ppas_to_remove:
commands.append('add-apt-repository -y -r ppa:{}'.format(ppa))
if len(commands) > 0:
commands.append('apt-get update')
commands.append('apt-get upgrade')
if len(self.apps_to_install) > 0:
apps = ' '.join(self.apps_to_install)
commands.append('apt-get -y install {}'.format(apps))
if len(self.apps_to_remove) > 0:
apps = ' '.join(self.apps_to_remove)
commands.append('apt-get -y remove {}'.format(apps))
print(commands)
self.terminal.execute(commands)
def main(args):
if os.geteuid() != 0:
dialog = Gtk.MessageDialog()
dialog.set_markup(_('You must be root to run this tool'))
dialog.set_property('message-type', Gtk.MessageType.ERROR)
dialog.add_button(_('Ok'), Gtk.ResponseType.OK)
dialog.run()
return
installer = Installer(args)
installer.run()
installer.destroy()
exit(0)
if __name__ == '__main__':
actions = {'ppas_to_install': ['ppa:atareao/atareao'],
'ppas_to_remove': [],
'apps_to_install': ['my-weather-indicator'],
'apps_to_remove': []
}
main(actions)
exit(0)
| #/usr/bin/env python3
# -*- coding: UTF-8 -*-
#
# This file is part of ubuntu-first-steps
#
# Copyright (c) 2020 <NAME> <a.k.a. atareao>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
import gi
try:
gi.require_version('Gtk', '3.0')
gi.require_version('GLib', '2.0')
gi.require_version('Vte', '2.91')
except Exception as e:
print(e)
exit(1)
from gi.repository import Gtk
from gi.repository import GLib
from gi.repository import Vte
import os
import sys
import time
import comun
import json
from comun import _
from doitinbackground import DoItInBackground
import utils
MARGIN = 5
class SmartTerminal(Vte.Terminal):
def __init__(self, parent):
Vte.Terminal.__init__(self)
self.parent = parent
self.diib = None
def execute(self, commands):
self.diib = DoItInBackground(self, commands)
self.diib.connect('started', self.parent.start)
self.diib.connect('done_one', self.parent.increase)
self.diib.connect('ended', self.parent.end)
self.diib.connect('stopped', self.parent.stopped)
self.diib.start()
def stop(self):
if self.diib is not None:
self.diib.stop()
class Installer(Gtk.Dialog): # needs GTK, Python, Webkit-GTK
# def __init__(self, ppas_to_install, ppas_to_remove, apps_to_install,
# apps_to_remove):
def __init__(self, actions):
Gtk.Dialog.__init__(self)
self.set_title( _('Add ppa repository'))
self.set_modal(True)
self.set_destroy_with_parent(True)
self.set_position(Gtk.WindowPosition.CENTER_ALWAYS)
self.set_icon_from_file(comun.ICON)
self.set_size_request(600, 50)
actions = json.loads(actions)
self.ppas_to_install = actions['ppas_to_install']
self.ppas_to_remove = actions['ppas_to_remove']
self.apps_to_install = actions['apps_to_install']
self.apps_to_remove = actions['apps_to_remove']
box = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 10)
box.set_border_width(5)
self.get_content_area().add(box)
grid = Gtk.Grid()
grid.set_column_spacing(MARGIN)
grid.set_row_spacing(MARGIN)
box.add(grid)
self.label = Gtk.Label.new('')
self.label.set_halign(Gtk.Align.START)
grid.attach(self.label, 0, 1, 2, 1)
self.progressbar = Gtk.ProgressBar()
grid.attach(self.progressbar, 0, 2, 4, 1)
expander = Gtk.Expander()
expander.connect('notify::expanded', self.on_expanded)
grid.attach(expander, 0, 3, 4, 4)
alignment = Gtk.Alignment()
# alignment.set_padding(1, 0, 2, 2)
alignment.props.xscale = 1
scrolledwindow = Gtk.ScrolledWindow()
scrolledwindow.set_hexpand(True)
scrolledwindow.set_vexpand(True)
self.terminal = SmartTerminal(self)
scrolledwindow.add(self.terminal)
alignment.add(scrolledwindow)
expander.add(alignment)
hbox = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 5)
grid.attach(hbox, 0, 8, 4, 1)
self.button_cancel = Gtk.Button.new_with_label(_('Cancel'))
self.button_cancel.connect('clicked', self.on_button_cancel_clicked)
hbox.pack_start(self.button_cancel, False, False, 0)
self.is_added = False
self.value = 0.0
self.is_installing = False
self.show_all()
self.progressbar.set_visible(False)
self.label.set_visible(False)
expander.set_expanded(True)
time.sleep(1)
self.start_installation()
def end(self, anobject, ok, *args):
self.is_installing = False
self.button_cancel.set_label(_('Exit'))
if ok is True:
kind = Gtk.MessageType.INFO
message = _('Installation completed!')
else:
kind = Gtk.MessageType.ERROR
message = _('Installation NOT completed!')
dialog = Gtk.MessageDialog()
dialog.set_markup(message)
dialog.set_property('message_type', kind)
dialog.add_button(_('Ok'), Gtk.ButtonsType.OK)
dialog.run()
dialog.destroy()
self.destroy()
def stopped(self, anobject, *args):
self.is_installing = False
self.button_cancel.set_label(_('Exit'))
self.destroy()
def start(self, anobject, total, *args):
self.is_installing = True
self.label.set_visible(True)
self.progressbar.set_visible(True)
self.value = 0.0
self.max_value = total
def increase(self, anobject, command, *args):
GLib.idle_add(self.label.set_text, _('Executing: %s') % command)
self.value += 1.0
fraction = self.value / self.max_value
print(fraction)
GLib.idle_add(self.progressbar.set_fraction, fraction)
def decrease(self):
self.value -= 1.0
fraction = self.value / self.max_value
GLib.idle_add(self.progressbar.set_fraction, fraction)
def on_expanded(self, widget, data):
if widget.get_property('expanded') is True:
self.set_size_request(600, 300)
else:
self.set_size_request(600, 50)
self.resize(600, 50)
def on_button_cancel_clicked(self, button):
if self.is_installing:
dialog = Gtk.MessageDialog()
dialog.set_markup(_('Do you want to stop the installation?'))
dialog.set_property('message-type', Gtk.MessageType.INFO)
dialog.add_button(_('Ok'), Gtk.ResponseType.OK)
dialog.add_button(_('Cancel'), Gtk.ResponseType.CANCEL)
ans = dialog.run()
if dialog.run() == Gtk.ResponseType.OK:
GLib.idle_add(dialog.hide)
self.terminal.stop()
GLib.idle_add(dialog.destroy)
def show_info(self):
self.progressbar.set_visible(True)
self.label.set_visible(True)
def start_installation(self):
commands = []
for ppa in self.ppas_to_install:
commands.append('add-apt-repository -y ppa:{}'.format(ppa))
for ppa in self.ppas_to_remove:
commands.append('add-apt-repository -y -r ppa:{}'.format(ppa))
if len(commands) > 0:
commands.append('apt-get update')
commands.append('apt-get upgrade')
if len(self.apps_to_install) > 0:
apps = ' '.join(self.apps_to_install)
commands.append('apt-get -y install {}'.format(apps))
if len(self.apps_to_remove) > 0:
apps = ' '.join(self.apps_to_remove)
commands.append('apt-get -y remove {}'.format(apps))
print(commands)
self.terminal.execute(commands)
def main(args):
if os.geteuid() != 0:
dialog = Gtk.MessageDialog()
dialog.set_markup(_('You must be root to run this tool'))
dialog.set_property('message-type', Gtk.MessageType.ERROR)
dialog.add_button(_('Ok'), Gtk.ResponseType.OK)
dialog.run()
return
installer = Installer(args)
installer.run()
installer.destroy()
exit(0)
if __name__ == '__main__':
actions = {'ppas_to_install': ['ppa:atareao/atareao'],
'ppas_to_remove': [],
'apps_to_install': ['my-weather-indicator'],
'apps_to_remove': []
}
main(actions)
exit(0)
| en | 0.735906 | #/usr/bin/env python3 # -*- coding: UTF-8 -*- # # This file is part of ubuntu-first-steps # # Copyright (c) 2020 <NAME> <a.k.a. atareao> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE # needs GTK, Python, Webkit-GTK # def __init__(self, ppas_to_install, ppas_to_remove, apps_to_install, # apps_to_remove): # alignment.set_padding(1, 0, 2, 2) | 1.77395 | 2 |
lib/bes/common/string_list_util.py | reconstruir/bes | 0 | 6623533 | #-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
from bes.system.compat import compat
from bes.compat.StringIO import StringIO
from .string_util import string_util
class string_list_util(object):
'string list helpers'
@classmethod
def remove_if(clazz, l, blacklist):
'Remove any items in l that are present both in l and blacklist preserving order.'
blacklist_set = set(blacklist)
result = []
for x in l:
if x not in blacklist_set:
result.append(x)
return result
@classmethod
def to_string(clazz, l, delimiter = ';', quote = False):
buf = StringIO()
first = True
for s in iter(l):
if not compat.is_string(s):
raise TypeError('not a string: %s - %s' % (str(s), type(s)))
if not first:
buf.write(delimiter)
first = False
if quote:
s = string_util.quote_if_needed(s)
buf.write(s)
return buf.getvalue()
| #-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
from bes.system.compat import compat
from bes.compat.StringIO import StringIO
from .string_util import string_util
class string_list_util(object):
'string list helpers'
@classmethod
def remove_if(clazz, l, blacklist):
'Remove any items in l that are present both in l and blacklist preserving order.'
blacklist_set = set(blacklist)
result = []
for x in l:
if x not in blacklist_set:
result.append(x)
return result
@classmethod
def to_string(clazz, l, delimiter = ';', quote = False):
buf = StringIO()
first = True
for s in iter(l):
if not compat.is_string(s):
raise TypeError('not a string: %s - %s' % (str(s), type(s)))
if not first:
buf.write(delimiter)
first = False
if quote:
s = string_util.quote_if_needed(s)
buf.write(s)
return buf.getvalue()
| en | 0.595088 | #-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*- | 2.594392 | 3 |
entrypoint/main.py | hlub/entrypoint | 0 | 6623534 | #!/usr/bin/env python3.7
# -*- coding: utf-8 -*-
"""Entrypoint main routine"""
import logging
import argparse
import os
import sys
import traceback
from jinja2.exceptions import TemplateSyntaxError
import yaml
from yaml.scanner import ScannerError
from . import __version__ as version
from .hooks import Hooks
from . import templates
from . import dumb_init
log = logging.getLogger(__name__)
def exec_command(variables, options):
"""Replace the current entrypoint process with the actual command.
Even the commandline arguments of the specified command are treated as
templates.
"""
args = [options.command] + options.command_args
args = [templates.render_string(arg, variables) for arg in args]
if options.command:
os.execvp(args[0], args)
# if this point is reached, exec failed, so we should exit nonzero
log.error("Exec system call failed to replace the program")
sys.exit(2)
def collect_variables(options):
"""Return a dict of variables collected from the specified YAML
configuration and environment.
"""
variables = {}
if os.path.exists(options.variables_file):
if os.path.isdir(options.variables_file):
raise RuntimeError(
"Problem opening configuration volume "
"`variables.yml`! Please make sure that you "
"provided a valid file path."
)
with open(options.variables_file) as stream:
variables = yaml.safe_load(stream) or {}
# Update variables from environment
variables.update(os.environ)
return variables
def parse_args(args=None):
"""Parse commandline"""
parser = argparse.ArgumentParser(
usage="%(prog)s [OPTIONS] [--] COMMAND [ARGS...]",
description="Entrypoint and init for containers: configure via "
"environment variables and YAML file; render a directory hierarchy "
"of templates; handle responsibilities of an init system; and finally "
"execute a command.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="verbose log output"
)
parser.add_argument(
"-q", "--quiet", action="store_true", help="output only pure errors"
)
parser.add_argument(
"--no-init",
dest="dumb_init",
default=True,
action="store_false",
help="disable dumb init functionality",
)
parser.add_argument(
"--no-setsid",
dest="use_setsid",
default=True,
action="store_false",
help="omit use of setsid system call",
)
parser.add_argument(
"-r",
"--rewrite",
metavar="SOURCE_SIG:DEST_SIG",
dest="rewrites",
default=[],
action="append",
help="specify signal rewrites using the signal names",
)
parser.add_argument(
"-V",
"--variables",
metavar="PATH",
dest="variables_file",
default="/variables.yml",
help="optional YAML file containing template variables",
)
parser.add_argument(
"-t",
"--templates",
metavar="PATH",
dest="template_root",
default="/templates",
help="directory structure containing template files",
)
parser.add_argument(
"-j",
"--jinja",
metavar="PATH",
dest="jinja_root",
default="/jinja",
help="root directory for jinja utility templates, "
"which are not directly rendered but may be "
"included within another template",
)
parser.add_argument(
"-o",
"--output",
metavar="OUTPUT",
dest="output_root",
default="/",
help="output directory",
)
parser.add_argument(
"-H",
"--hooks",
metavar="PATH",
dest="hooks_root",
default="/entrypoint_hooks",
help="directory containing entrypoint hooks " "to run before the command",
)
parser.add_argument(
"--version",
action="version",
version="Entrypoint version {}".format(version),
help="print version information",
)
parser.add_argument(
"command",
metavar="COMMAND [ARGS...]",
help="the command to execute after preparations",
)
parser.add_argument("command_args", nargs="*", help=argparse.SUPPRESS)
parser.epilog = """
Entrypoint takes the following steps to initiate a container:
Variables are read from the YAML file VARIABLES and from the
environment, the latter overriding the former.
Then, pre-hook functions are called from the hooks directory.
It is possible to modify the configuration parameters for instance.
All templates in the TEMPLATES directory are rendered into the
OUTPUT directory, maintaining the file hierarchy (including ownership
and mode). Existing directories are not touched.
Then, any hook and post-hook functions in the hooks directory are run.
If everything went without errors, the simple init system takes place
(forking and handling session, terminal, signals and children).
Finally, the COMMAND is executed. Template variables can also be
used in the command and its arguments. Add '--' before the command
if any ARGS start with '-'.
"""
return parser.parse_args(args)
def print_jinja_error(exc):
"""Pretty proitn a template syntax error with a significant piece of
source.
"""
print(
"Jinja syntax error in {}:{}: {}".format(exc.filename, exc.lineno, exc.message)
)
with open(exc.filename, "r") as template_file:
lines = template_file.readlines()
begin, end = max(0, exc.lineno - 5), min(exc.lineno + 5, len(lines))
print("... lines {} - {} ...".format(begin + 1, end))
for i, line in enumerate(lines[begin:end]):
print(line.rstrip(), end="")
if i == exc.lineno - 1:
print(" <=====")
else:
print()
def print_exception():
"""Print nicer template and YAML parse errors."""
exc_type, exc_value, exc_tb = sys.exc_info()
tb = exc_tb
while tb is not None:
if tb.tb_frame.f_code.co_name == "top-level template code":
error = traceback.format_exception(exc_type, exc_value, tb)
error[0] = "Error rendering templates:\n"
for line in error:
sys.stderr.write(line)
break
tb = tb.tb_next
else:
if exc_type == ScannerError:
log.error(exc_value)
else:
traceback.print_exception(exc_type, exc_value, exc_tb)
def main(args=None):
"""Main function, either call with arguments or use sys.argv as default."""
try:
options = parse_args(args)
loglevel = logging.INFO
if options.verbose:
loglevel = logging.DEBUG
elif options.quiet:
loglevel = logging.ERROR
root_logger = logging.getLogger()
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(loglevel)
formatter = logging.Formatter("%(levelname)s: %(message)s")
if loglevel == logging.DEBUG:
# Use more verbose format when debugging
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
stream_handler.setFormatter(formatter)
root_logger.addHandler(stream_handler)
root_logger.setLevel(loglevel)
if not os.environ.get("SKIP_ENTRYPOINT"):
hooks = Hooks(options.hooks_root)
variables = collect_variables(options)
hooks.run_prehooks(variables)
templates.process_templates(
variables,
output_root=options.output_root,
template_root=options.template_root,
jinja_root=options.jinja_root,
)
hooks.run_posthooks(variables)
else:
log.debug("SKIP_ENTRYPOINT is set, skipping entrypoint")
variables = {}
if options.dumb_init:
dumb_init.init(
map(lambda arg: arg.split(":"), options.rewrites),
use_setsid=options.use_setsid,
)
exec_command(variables, options)
except TemplateSyntaxError as err:
print_jinja_error(err)
sys.exit(1)
except Exception:
print_exception()
sys.exit(1)
if __name__ == "__main__":
main()
| #!/usr/bin/env python3.7
# -*- coding: utf-8 -*-
"""Entrypoint main routine"""
import logging
import argparse
import os
import sys
import traceback
from jinja2.exceptions import TemplateSyntaxError
import yaml
from yaml.scanner import ScannerError
from . import __version__ as version
from .hooks import Hooks
from . import templates
from . import dumb_init
log = logging.getLogger(__name__)
def exec_command(variables, options):
"""Replace the current entrypoint process with the actual command.
Even the commandline arguments of the specified command are treated as
templates.
"""
args = [options.command] + options.command_args
args = [templates.render_string(arg, variables) for arg in args]
if options.command:
os.execvp(args[0], args)
# if this point is reached, exec failed, so we should exit nonzero
log.error("Exec system call failed to replace the program")
sys.exit(2)
def collect_variables(options):
"""Return a dict of variables collected from the specified YAML
configuration and environment.
"""
variables = {}
if os.path.exists(options.variables_file):
if os.path.isdir(options.variables_file):
raise RuntimeError(
"Problem opening configuration volume "
"`variables.yml`! Please make sure that you "
"provided a valid file path."
)
with open(options.variables_file) as stream:
variables = yaml.safe_load(stream) or {}
# Update variables from environment
variables.update(os.environ)
return variables
def parse_args(args=None):
"""Parse commandline"""
parser = argparse.ArgumentParser(
usage="%(prog)s [OPTIONS] [--] COMMAND [ARGS...]",
description="Entrypoint and init for containers: configure via "
"environment variables and YAML file; render a directory hierarchy "
"of templates; handle responsibilities of an init system; and finally "
"execute a command.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="verbose log output"
)
parser.add_argument(
"-q", "--quiet", action="store_true", help="output only pure errors"
)
parser.add_argument(
"--no-init",
dest="dumb_init",
default=True,
action="store_false",
help="disable dumb init functionality",
)
parser.add_argument(
"--no-setsid",
dest="use_setsid",
default=True,
action="store_false",
help="omit use of setsid system call",
)
parser.add_argument(
"-r",
"--rewrite",
metavar="SOURCE_SIG:DEST_SIG",
dest="rewrites",
default=[],
action="append",
help="specify signal rewrites using the signal names",
)
parser.add_argument(
"-V",
"--variables",
metavar="PATH",
dest="variables_file",
default="/variables.yml",
help="optional YAML file containing template variables",
)
parser.add_argument(
"-t",
"--templates",
metavar="PATH",
dest="template_root",
default="/templates",
help="directory structure containing template files",
)
parser.add_argument(
"-j",
"--jinja",
metavar="PATH",
dest="jinja_root",
default="/jinja",
help="root directory for jinja utility templates, "
"which are not directly rendered but may be "
"included within another template",
)
parser.add_argument(
"-o",
"--output",
metavar="OUTPUT",
dest="output_root",
default="/",
help="output directory",
)
parser.add_argument(
"-H",
"--hooks",
metavar="PATH",
dest="hooks_root",
default="/entrypoint_hooks",
help="directory containing entrypoint hooks " "to run before the command",
)
parser.add_argument(
"--version",
action="version",
version="Entrypoint version {}".format(version),
help="print version information",
)
parser.add_argument(
"command",
metavar="COMMAND [ARGS...]",
help="the command to execute after preparations",
)
parser.add_argument("command_args", nargs="*", help=argparse.SUPPRESS)
parser.epilog = """
Entrypoint takes the following steps to initiate a container:
Variables are read from the YAML file VARIABLES and from the
environment, the latter overriding the former.
Then, pre-hook functions are called from the hooks directory.
It is possible to modify the configuration parameters for instance.
All templates in the TEMPLATES directory are rendered into the
OUTPUT directory, maintaining the file hierarchy (including ownership
and mode). Existing directories are not touched.
Then, any hook and post-hook functions in the hooks directory are run.
If everything went without errors, the simple init system takes place
(forking and handling session, terminal, signals and children).
Finally, the COMMAND is executed. Template variables can also be
used in the command and its arguments. Add '--' before the command
if any ARGS start with '-'.
"""
return parser.parse_args(args)
def print_jinja_error(exc):
"""Pretty proitn a template syntax error with a significant piece of
source.
"""
print(
"Jinja syntax error in {}:{}: {}".format(exc.filename, exc.lineno, exc.message)
)
with open(exc.filename, "r") as template_file:
lines = template_file.readlines()
begin, end = max(0, exc.lineno - 5), min(exc.lineno + 5, len(lines))
print("... lines {} - {} ...".format(begin + 1, end))
for i, line in enumerate(lines[begin:end]):
print(line.rstrip(), end="")
if i == exc.lineno - 1:
print(" <=====")
else:
print()
def print_exception():
"""Print nicer template and YAML parse errors."""
exc_type, exc_value, exc_tb = sys.exc_info()
tb = exc_tb
while tb is not None:
if tb.tb_frame.f_code.co_name == "top-level template code":
error = traceback.format_exception(exc_type, exc_value, tb)
error[0] = "Error rendering templates:\n"
for line in error:
sys.stderr.write(line)
break
tb = tb.tb_next
else:
if exc_type == ScannerError:
log.error(exc_value)
else:
traceback.print_exception(exc_type, exc_value, exc_tb)
def main(args=None):
"""Main function, either call with arguments or use sys.argv as default."""
try:
options = parse_args(args)
loglevel = logging.INFO
if options.verbose:
loglevel = logging.DEBUG
elif options.quiet:
loglevel = logging.ERROR
root_logger = logging.getLogger()
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(loglevel)
formatter = logging.Formatter("%(levelname)s: %(message)s")
if loglevel == logging.DEBUG:
# Use more verbose format when debugging
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
stream_handler.setFormatter(formatter)
root_logger.addHandler(stream_handler)
root_logger.setLevel(loglevel)
if not os.environ.get("SKIP_ENTRYPOINT"):
hooks = Hooks(options.hooks_root)
variables = collect_variables(options)
hooks.run_prehooks(variables)
templates.process_templates(
variables,
output_root=options.output_root,
template_root=options.template_root,
jinja_root=options.jinja_root,
)
hooks.run_posthooks(variables)
else:
log.debug("SKIP_ENTRYPOINT is set, skipping entrypoint")
variables = {}
if options.dumb_init:
dumb_init.init(
map(lambda arg: arg.split(":"), options.rewrites),
use_setsid=options.use_setsid,
)
exec_command(variables, options)
except TemplateSyntaxError as err:
print_jinja_error(err)
sys.exit(1)
except Exception:
print_exception()
sys.exit(1)
if __name__ == "__main__":
main()
| en | 0.805623 | #!/usr/bin/env python3.7 # -*- coding: utf-8 -*- Entrypoint main routine Replace the current entrypoint process with the actual command. Even the commandline arguments of the specified command are treated as templates. # if this point is reached, exec failed, so we should exit nonzero Return a dict of variables collected from the specified YAML configuration and environment. # Update variables from environment Parse commandline Entrypoint takes the following steps to initiate a container: Variables are read from the YAML file VARIABLES and from the environment, the latter overriding the former. Then, pre-hook functions are called from the hooks directory. It is possible to modify the configuration parameters for instance. All templates in the TEMPLATES directory are rendered into the OUTPUT directory, maintaining the file hierarchy (including ownership and mode). Existing directories are not touched. Then, any hook and post-hook functions in the hooks directory are run. If everything went without errors, the simple init system takes place (forking and handling session, terminal, signals and children). Finally, the COMMAND is executed. Template variables can also be used in the command and its arguments. Add '--' before the command if any ARGS start with '-'. Pretty proitn a template syntax error with a significant piece of source. Print nicer template and YAML parse errors. Main function, either call with arguments or use sys.argv as default. # Use more verbose format when debugging | 2.485435 | 2 |
itunesmusicsearch/result_item.py | fonrims/itunesmusicsearch | 0 | 6623535 | <gh_stars>0
#!/usr/bin/python
class ResultItem(object):
"""
Defines a general result item
"""
def __init__(self, json):
"""
Initializes the ResultItem class from the JSON provided
:param json: String. Raw JSON data to fetch information from
"""
self.artist_name = json['artistName']
self.type = None
if 'wrapperType' in json:
self.type = json['wrapperType']
if 'collectionType' in json:
self.collection_type = json['collectionType']
elif 'artistType' in json:
self.artist_type = json['artistType']
elif 'kind' in json:
self.track_type = json['kind']
elif 'kind' in json:
self.type = json['kind']
if 'primaryGenreName' in json:
self.primary_genre_name = json['primaryGenreName']
if 'trackName' in json:
self.track_name = json['trackName']
if 'trackCensoredName' in json:
self.track_censored_name = json['trackCensoredName']
if 'trackViewUrl' in json:
self.track_view_url = json['trackViewUrl']
if 'previewUrl' in json:
self.preview_url = json['previewUrl']
if 'artworkUrl30' in json:
self.artwork_url_30 = json['artworkUrl30']
if 'artworkUrl60' in json:
self.artwork_url_60 = json['artworkUrl60']
if 'artworkUrl100' in json:
self.artwork_url_100 = json['artworkUrl100']
if 'artworkUrl512' in json:
self.artwork_url_512 = json['artworkUrl512']
if 'collectionName' in json:
self.collection_name = json['collectionName']
if 'trackPrice' in json:
self.track_price = json['trackPrice']
if 'releaseDate' in json:
self.release_date = json['releaseDate']
if 'trackExplicitness' in json:
self.track_explicitness = json['trackExplicitness']
if 'trackTimeMillis' in json:
self.track_time = json['trackTimeMillis']
if 'country' in json:
self.country = json['country']
if 'currency' in json:
self.currency = json['currency']
if 'copyright' in json:
self.copyright = json['copyright']
if 'price' in json:
self.price = json['price']
def __repr__(self):
"""
Retrieves all keys in the class as a String
:return: String. All the keys available in the class
"""
string = ''
for key, value in self.__dict__.items():
if not key.startswith('__'):
string += '\n' + key + ':' + str(value)
return string
| #!/usr/bin/python
class ResultItem(object):
"""
Defines a general result item
"""
def __init__(self, json):
"""
Initializes the ResultItem class from the JSON provided
:param json: String. Raw JSON data to fetch information from
"""
self.artist_name = json['artistName']
self.type = None
if 'wrapperType' in json:
self.type = json['wrapperType']
if 'collectionType' in json:
self.collection_type = json['collectionType']
elif 'artistType' in json:
self.artist_type = json['artistType']
elif 'kind' in json:
self.track_type = json['kind']
elif 'kind' in json:
self.type = json['kind']
if 'primaryGenreName' in json:
self.primary_genre_name = json['primaryGenreName']
if 'trackName' in json:
self.track_name = json['trackName']
if 'trackCensoredName' in json:
self.track_censored_name = json['trackCensoredName']
if 'trackViewUrl' in json:
self.track_view_url = json['trackViewUrl']
if 'previewUrl' in json:
self.preview_url = json['previewUrl']
if 'artworkUrl30' in json:
self.artwork_url_30 = json['artworkUrl30']
if 'artworkUrl60' in json:
self.artwork_url_60 = json['artworkUrl60']
if 'artworkUrl100' in json:
self.artwork_url_100 = json['artworkUrl100']
if 'artworkUrl512' in json:
self.artwork_url_512 = json['artworkUrl512']
if 'collectionName' in json:
self.collection_name = json['collectionName']
if 'trackPrice' in json:
self.track_price = json['trackPrice']
if 'releaseDate' in json:
self.release_date = json['releaseDate']
if 'trackExplicitness' in json:
self.track_explicitness = json['trackExplicitness']
if 'trackTimeMillis' in json:
self.track_time = json['trackTimeMillis']
if 'country' in json:
self.country = json['country']
if 'currency' in json:
self.currency = json['currency']
if 'copyright' in json:
self.copyright = json['copyright']
if 'price' in json:
self.price = json['price']
def __repr__(self):
"""
Retrieves all keys in the class as a String
:return: String. All the keys available in the class
"""
string = ''
for key, value in self.__dict__.items():
if not key.startswith('__'):
string += '\n' + key + ':' + str(value)
return string | en | 0.69929 | #!/usr/bin/python Defines a general result item Initializes the ResultItem class from the JSON provided :param json: String. Raw JSON data to fetch information from Retrieves all keys in the class as a String :return: String. All the keys available in the class | 3.526493 | 4 |
2020-ucc/services/provenance-service/app/unfolding_utils.py | UST-QuAntiL/QuantME-UseCase | 3 | 6623536 | <reponame>UST-QuAntiL/QuantME-UseCase
# ******************************************************************************
# Copyright (c) 2020 University of Stuttgart
#
# See the NOTICE file(s) distributed with this work for additional
# information regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import ast
import time
import qiskit
import requests
from qiskit import IBMQ
from qiskit.ignis.mitigation.measurement import (complete_meas_cal, CompleteMeasFitter)
from qiskit.providers.jobstatus import JOB_FINAL_STATES
from datetime import datetime
from app import app
calibration_matrixes = {}
def mitigate_error(correlation_Id, return_address, qpu, max_age, result, access_token):
"""Mitigate the readout-error in the given result distribution"""
app.logger.info('Result before mitigation: ' + result)
meas_filter = get_correction_matrix(qpu, max_age, access_token, len(next(iter(ast.literal_eval(result)))))
mitigated_results = result
if meas_filter is not None:
# Apply mitigation if matrix is successfully retrieved
mitigated_results = meas_filter.apply(ast.literal_eval(result))
app.logger.info('Result after mitigation: ' + str(mitigated_results))
app.logger.info('Sending callback to ' + str(return_address))
camunda_callback = requests.post(return_address, json={"messageName": correlation_Id, "processVariables": {
"executionResult": {"value": str(mitigated_results), "type": "String"}}})
app.logger.info("Callback returned status code: " + str(camunda_callback.status_code))
def get_correction_matrix(qpu, max_age, access_token, used_qubits):
"""Return a correction matrix for the given QPU whit the maximum age in minutes"""
app.logger.info('Getting calibration matrix for QPU ' + qpu + ' with max age of ' + str(max_age) + ' minutes')
# Check for existing calibration matrix
existing_matrix = calibration_matrixes.get(qpu)
if existing_matrix is not None:
age = datetime.now() - existing_matrix['Date']
app.logger.info('Calibration matrix for this QPU exists with age ' + str(age))
if age.total_seconds() < max_age * 60:
app.logger.info('Returning existing calibration matrix!')
return existing_matrix['Calibration_Matrix']
if access_token is None:
app.logger.error('Unable to create new correction matrix without access key...')
return None
# Load account to enable execution of calibration circuits
IBMQ.save_account(access_token, overwrite=True)
IBMQ.load_account()
provider = IBMQ.get_provider(group='open')
backend = provider.get_backend(qpu)
# Generate a calibration circuit for each state
qr = qiskit.QuantumRegister(used_qubits)
meas_calibs, state_labels = complete_meas_cal(qr=qr, circlabel='mcal')
# Execute each calibration circuit and store results
app.logger.info('Executing ' + str(len(meas_calibs)) + ' circuits to create calibration matrix...')
cal_results = []
for circuit in meas_calibs:
app.logger.info('Executing circuit ' + circuit.name)
cal_results.append(execute_job(circuit, backend))
# Generate calibration matrix out of measurement results
meas_fitter = CompleteMeasFitter(cal_results, state_labels, circlabel='mcal')
app.logger.info('Calibration matrix:')
app.logger.info(meas_fitter.cal_matrix)
# Store calibration matrix for later reuse
calibration_matrixes[qpu] = {"Date": datetime.now(), "Calibration_Matrix": meas_fitter.filter}
return meas_fitter.filter
def execute_job(circuit, backend):
"""Execute a circuit on the specified backend"""
job = qiskit.execute(circuit, backend=backend, shots=1000)
job_status = job.status()
while job_status not in JOB_FINAL_STATES:
app.logger.info('The execution is still running')
time.sleep(20)
job_status = job.status()
return job.result()
| # ******************************************************************************
# Copyright (c) 2020 University of Stuttgart
#
# See the NOTICE file(s) distributed with this work for additional
# information regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import ast
import time
import qiskit
import requests
from qiskit import IBMQ
from qiskit.ignis.mitigation.measurement import (complete_meas_cal, CompleteMeasFitter)
from qiskit.providers.jobstatus import JOB_FINAL_STATES
from datetime import datetime
from app import app
calibration_matrixes = {}
def mitigate_error(correlation_Id, return_address, qpu, max_age, result, access_token):
"""Mitigate the readout-error in the given result distribution"""
app.logger.info('Result before mitigation: ' + result)
meas_filter = get_correction_matrix(qpu, max_age, access_token, len(next(iter(ast.literal_eval(result)))))
mitigated_results = result
if meas_filter is not None:
# Apply mitigation if matrix is successfully retrieved
mitigated_results = meas_filter.apply(ast.literal_eval(result))
app.logger.info('Result after mitigation: ' + str(mitigated_results))
app.logger.info('Sending callback to ' + str(return_address))
camunda_callback = requests.post(return_address, json={"messageName": correlation_Id, "processVariables": {
"executionResult": {"value": str(mitigated_results), "type": "String"}}})
app.logger.info("Callback returned status code: " + str(camunda_callback.status_code))
def get_correction_matrix(qpu, max_age, access_token, used_qubits):
"""Return a correction matrix for the given QPU whit the maximum age in minutes"""
app.logger.info('Getting calibration matrix for QPU ' + qpu + ' with max age of ' + str(max_age) + ' minutes')
# Check for existing calibration matrix
existing_matrix = calibration_matrixes.get(qpu)
if existing_matrix is not None:
age = datetime.now() - existing_matrix['Date']
app.logger.info('Calibration matrix for this QPU exists with age ' + str(age))
if age.total_seconds() < max_age * 60:
app.logger.info('Returning existing calibration matrix!')
return existing_matrix['Calibration_Matrix']
if access_token is None:
app.logger.error('Unable to create new correction matrix without access key...')
return None
# Load account to enable execution of calibration circuits
IBMQ.save_account(access_token, overwrite=True)
IBMQ.load_account()
provider = IBMQ.get_provider(group='open')
backend = provider.get_backend(qpu)
# Generate a calibration circuit for each state
qr = qiskit.QuantumRegister(used_qubits)
meas_calibs, state_labels = complete_meas_cal(qr=qr, circlabel='mcal')
# Execute each calibration circuit and store results
app.logger.info('Executing ' + str(len(meas_calibs)) + ' circuits to create calibration matrix...')
cal_results = []
for circuit in meas_calibs:
app.logger.info('Executing circuit ' + circuit.name)
cal_results.append(execute_job(circuit, backend))
# Generate calibration matrix out of measurement results
meas_fitter = CompleteMeasFitter(cal_results, state_labels, circlabel='mcal')
app.logger.info('Calibration matrix:')
app.logger.info(meas_fitter.cal_matrix)
# Store calibration matrix for later reuse
calibration_matrixes[qpu] = {"Date": datetime.now(), "Calibration_Matrix": meas_fitter.filter}
return meas_fitter.filter
def execute_job(circuit, backend):
"""Execute a circuit on the specified backend"""
job = qiskit.execute(circuit, backend=backend, shots=1000)
job_status = job.status()
while job_status not in JOB_FINAL_STATES:
app.logger.info('The execution is still running')
time.sleep(20)
job_status = job.status()
return job.result() | en | 0.757559 | # ****************************************************************************** # Copyright (c) 2020 University of Stuttgart # # See the NOTICE file(s) distributed with this work for additional # information regarding copyright ownership. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ****************************************************************************** Mitigate the readout-error in the given result distribution # Apply mitigation if matrix is successfully retrieved Return a correction matrix for the given QPU whit the maximum age in minutes # Check for existing calibration matrix # Load account to enable execution of calibration circuits # Generate a calibration circuit for each state # Execute each calibration circuit and store results # Generate calibration matrix out of measurement results # Store calibration matrix for later reuse Execute a circuit on the specified backend | 1.724541 | 2 |
instagrapi/mixins/story.py | pip-install-HSE/instagrapi | 0 | 6623537 | <gh_stars>0
import json
from copy import deepcopy
from typing import List
from instagrapi import config
from instagrapi.exceptions import StoryNotFound
from instagrapi.extractors import extract_story_gql, extract_story_v1
from instagrapi.types import Story, StoryQueue
class StoryMixin:
_stories_cache = {} # pk -> object
# def story_info_gql(self, story_pk: int):
# # GQL havent video_url :-(
# return self.media_info_gql(self, int(story_pk))
def story_info_v1(self, story_pk: int) -> Story:
"""
Get Story by pk or id
Parameters
----------
story_pk: int
Unique identifier of the story
Returns
-------
Story
An object of Story type
"""
story_id = self.media_id(story_pk)
story_pk, user_id = story_id.split("_")
stories = self.user_stories_v1(user_id)
story_pk = int(story_pk)
for story in stories:
self._stories_cache[story.pk] = story
if story_pk in self._stories_cache:
return deepcopy(self._stories_cache[story_pk])
raise StoryNotFound(story_pk=story_pk, user_id=user_id)
def story_info(self, story_pk: int, use_cache: bool = True) -> Story:
"""
Get Story by pk or id
Parameters
----------
story_pk: int
Unique identifier of the story
use_cache: bool, optional
Whether or not to use information from cache, default value is True
Returns
-------
Story
An object of Story type
"""
if not use_cache or story_pk not in self._stories_cache:
story = self.story_info_v1(story_pk)
self._stories_cache[story_pk] = story
return deepcopy(self._stories_cache[story_pk])
def story_delete(self, story_pk: int) -> bool:
"""
Delete story
Parameters
----------
story_pk: int
Unique identifier of the story
Returns
-------
bool
A boolean value
"""
assert self.user_id, "Login required"
media_id = self.media_id(story_pk)
self._stories_cache.pop(self.media_pk(media_id), None)
return self.media_delete(media_id)
def user_stories_v1(self, user_id: int, amount: int = None) -> List[Story]:
"""
Get a user's stories (Private API)
Parameters
----------
user_id: int
amount: int, optional
Maximum number of story to return, default is all
Returns
-------
List[Media]
A list of objects of Media
"""
params = {
"supported_capabilities_new": json.dumps(config.SUPPORTED_CAPABILITIES)
}
user_id = int(user_id)
reel = self.private_request(f"feed/user/{user_id}/story/", params=params)[
"reel"
]
stories = []
for item in reel["items"]:
stories.append(extract_story_v1(item))
if amount:
amount = int(amount)
stories = stories[:amount]
return stories
def user_stories_gql(self, user_ids: List[int] = None) -> List[StoryQueue]:
"""
Get a user's stories (Private API)
Parameters
----------
user_ids: List[int]
Returns
-------
List[StoryQueue]
A list of objects of StoryQueue for each user_id
"""
self.public.cookies.update(self.private.cookies)
def _userid_chunks():
assert user_ids is not None
user_ids_per_query = 50
for i in range(0, len(user_ids), user_ids_per_query):
yield user_ids[i:i + user_ids_per_query]
stories_un = {}
for userid_chunk in _userid_chunks():
res = self.public_graphql_request(query_hash="303a4ae99711322310f25250d988f3b7",
variables={"reel_ids": userid_chunk, "precomposed_overlay": False})
stories_un.update(res)
st = []
for media in stories_un['reels_media']:
sq = StoryQueue(items=[])
for story in media["items"]:
sq.items.append(extract_story_gql(story))
st.append(sq.copy())
return st
def user_stories(self, user_id: int, amount: int = None) -> List[Story]:
"""
Get a user's stories
Parameters
----------
user_id: int
amount: int, optional
Maximum number of story to return, default is all
Returns
-------
List[Media]
A list of objects of Media
"""
# TODO: Add user_stories_gql
return self.user_stories_v1(user_id, amount)
def story_seen(self, story_pks: List[int], skipped_story_pks: List[int] = []):
"""
Mark a story as seen
Parameters
----------
story_pk: int
Returns
-------
bool
A boolean value
"""
return self.media_seen(
[self.media_id(mid) for mid in story_pks],
[self.media_id(mid) for mid in skipped_story_pks]
)
| import json
from copy import deepcopy
from typing import List
from instagrapi import config
from instagrapi.exceptions import StoryNotFound
from instagrapi.extractors import extract_story_gql, extract_story_v1
from instagrapi.types import Story, StoryQueue
class StoryMixin:
_stories_cache = {} # pk -> object
# def story_info_gql(self, story_pk: int):
# # GQL havent video_url :-(
# return self.media_info_gql(self, int(story_pk))
def story_info_v1(self, story_pk: int) -> Story:
"""
Get Story by pk or id
Parameters
----------
story_pk: int
Unique identifier of the story
Returns
-------
Story
An object of Story type
"""
story_id = self.media_id(story_pk)
story_pk, user_id = story_id.split("_")
stories = self.user_stories_v1(user_id)
story_pk = int(story_pk)
for story in stories:
self._stories_cache[story.pk] = story
if story_pk in self._stories_cache:
return deepcopy(self._stories_cache[story_pk])
raise StoryNotFound(story_pk=story_pk, user_id=user_id)
def story_info(self, story_pk: int, use_cache: bool = True) -> Story:
"""
Get Story by pk or id
Parameters
----------
story_pk: int
Unique identifier of the story
use_cache: bool, optional
Whether or not to use information from cache, default value is True
Returns
-------
Story
An object of Story type
"""
if not use_cache or story_pk not in self._stories_cache:
story = self.story_info_v1(story_pk)
self._stories_cache[story_pk] = story
return deepcopy(self._stories_cache[story_pk])
def story_delete(self, story_pk: int) -> bool:
"""
Delete story
Parameters
----------
story_pk: int
Unique identifier of the story
Returns
-------
bool
A boolean value
"""
assert self.user_id, "Login required"
media_id = self.media_id(story_pk)
self._stories_cache.pop(self.media_pk(media_id), None)
return self.media_delete(media_id)
def user_stories_v1(self, user_id: int, amount: int = None) -> List[Story]:
"""
Get a user's stories (Private API)
Parameters
----------
user_id: int
amount: int, optional
Maximum number of story to return, default is all
Returns
-------
List[Media]
A list of objects of Media
"""
params = {
"supported_capabilities_new": json.dumps(config.SUPPORTED_CAPABILITIES)
}
user_id = int(user_id)
reel = self.private_request(f"feed/user/{user_id}/story/", params=params)[
"reel"
]
stories = []
for item in reel["items"]:
stories.append(extract_story_v1(item))
if amount:
amount = int(amount)
stories = stories[:amount]
return stories
def user_stories_gql(self, user_ids: List[int] = None) -> List[StoryQueue]:
"""
Get a user's stories (Private API)
Parameters
----------
user_ids: List[int]
Returns
-------
List[StoryQueue]
A list of objects of StoryQueue for each user_id
"""
self.public.cookies.update(self.private.cookies)
def _userid_chunks():
assert user_ids is not None
user_ids_per_query = 50
for i in range(0, len(user_ids), user_ids_per_query):
yield user_ids[i:i + user_ids_per_query]
stories_un = {}
for userid_chunk in _userid_chunks():
res = self.public_graphql_request(query_hash="303a4ae99711322310f25250d988f3b7",
variables={"reel_ids": userid_chunk, "precomposed_overlay": False})
stories_un.update(res)
st = []
for media in stories_un['reels_media']:
sq = StoryQueue(items=[])
for story in media["items"]:
sq.items.append(extract_story_gql(story))
st.append(sq.copy())
return st
def user_stories(self, user_id: int, amount: int = None) -> List[Story]:
"""
Get a user's stories
Parameters
----------
user_id: int
amount: int, optional
Maximum number of story to return, default is all
Returns
-------
List[Media]
A list of objects of Media
"""
# TODO: Add user_stories_gql
return self.user_stories_v1(user_id, amount)
def story_seen(self, story_pks: List[int], skipped_story_pks: List[int] = []):
"""
Mark a story as seen
Parameters
----------
story_pk: int
Returns
-------
bool
A boolean value
"""
return self.media_seen(
[self.media_id(mid) for mid in story_pks],
[self.media_id(mid) for mid in skipped_story_pks]
) | en | 0.464017 | # pk -> object # def story_info_gql(self, story_pk: int): # # GQL havent video_url :-( # return self.media_info_gql(self, int(story_pk)) Get Story by pk or id Parameters ---------- story_pk: int Unique identifier of the story Returns ------- Story An object of Story type Get Story by pk or id Parameters ---------- story_pk: int Unique identifier of the story use_cache: bool, optional Whether or not to use information from cache, default value is True Returns ------- Story An object of Story type Delete story Parameters ---------- story_pk: int Unique identifier of the story Returns ------- bool A boolean value Get a user's stories (Private API) Parameters ---------- user_id: int amount: int, optional Maximum number of story to return, default is all Returns ------- List[Media] A list of objects of Media Get a user's stories (Private API) Parameters ---------- user_ids: List[int] Returns ------- List[StoryQueue] A list of objects of StoryQueue for each user_id Get a user's stories Parameters ---------- user_id: int amount: int, optional Maximum number of story to return, default is all Returns ------- List[Media] A list of objects of Media # TODO: Add user_stories_gql Mark a story as seen Parameters ---------- story_pk: int Returns ------- bool A boolean value | 2.407413 | 2 |
port_scanner.py | RasbeeTech/Simple-Port-Scanner | 1 | 6623538 | import socket
import re
from common_ports import ports_and_services
def get_open_ports(target, port_range, verbose=False):
open_ports = []
address = ''
hostname = ''
# validate target
if re.search('^[0-9\.]*$', target):
try:
name, alias, addresslist = socket.gethostbyaddr(target)
address = target
hostname = name
except socket.herror:
address = target
hostname = None
except socket.gaierror:
return 'Error: Invalid IP address'
else:
try:
address = socket.gethostbyname(target)
hostname = target
except socket.gaierror:
return 'Error: Invalid hostname'
try:
# Scan ports
for port in range(port_range[0], port_range[1]+1):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
result = sock.connect_ex((address, port))
if result == 0:
open_ports.append(port)
sock.close()
except socket.error:
print("Couldn't connect to server")
except KeyboardInterrupt:
print("You pressed Ctrl+C")
except socket.gaierror:
print ('Hostname could not be resolved. Exiting')
except socket.error:
print ("Couldn't connect to server")
if not verbose:
# return [open_ports]
return(open_ports)
else:
# Stringify
string_builder = []
if hostname:
string_builder.append(f"Open ports for {hostname} ({address})")
else:
string_builder.append(f"Open ports for {address}")
left = 'PORT'.ljust(9, ' ')
right = 'SERVICE'
string_builder.append(left + right)
for port in open_ports:
left = str(port).ljust(9, ' ')
right = ports_and_services[port]
string_builder.append(left + right)
return '\n'.join(string_builder)
| import socket
import re
from common_ports import ports_and_services
def get_open_ports(target, port_range, verbose=False):
open_ports = []
address = ''
hostname = ''
# validate target
if re.search('^[0-9\.]*$', target):
try:
name, alias, addresslist = socket.gethostbyaddr(target)
address = target
hostname = name
except socket.herror:
address = target
hostname = None
except socket.gaierror:
return 'Error: Invalid IP address'
else:
try:
address = socket.gethostbyname(target)
hostname = target
except socket.gaierror:
return 'Error: Invalid hostname'
try:
# Scan ports
for port in range(port_range[0], port_range[1]+1):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
result = sock.connect_ex((address, port))
if result == 0:
open_ports.append(port)
sock.close()
except socket.error:
print("Couldn't connect to server")
except KeyboardInterrupt:
print("You pressed Ctrl+C")
except socket.gaierror:
print ('Hostname could not be resolved. Exiting')
except socket.error:
print ("Couldn't connect to server")
if not verbose:
# return [open_ports]
return(open_ports)
else:
# Stringify
string_builder = []
if hostname:
string_builder.append(f"Open ports for {hostname} ({address})")
else:
string_builder.append(f"Open ports for {address}")
left = 'PORT'.ljust(9, ' ')
right = 'SERVICE'
string_builder.append(left + right)
for port in open_ports:
left = str(port).ljust(9, ' ')
right = ports_and_services[port]
string_builder.append(left + right)
return '\n'.join(string_builder)
| en | 0.382535 | # validate target # Scan ports # return [open_ports] # Stringify | 3.242058 | 3 |
scripts/tshark.py | anzigly/pycoflow | 1 | 6623539 | <reponame>anzigly/pycoflow<filename>scripts/tshark.py
#!/usr/bin/python
import subprocess
def run_tshark(slaves_file):
proc = []
with open(slaves_file) as f:
for h in f.readlines():
if h.startswith("#"):
continue
host = h.strip()
cmd = 'ssh root@%s nohup tshark -i eth0 -s 100 -w ts.pcap "tcp" &' % host
p = subprocess.Popen(cmd, shell=True, stderr=None)
proc.append(p)
def stop_tshark(slaves_file):
cmd = "pssh -h %s -l root killall tshark" % slaves_file
subprocess.Popen(cmd, shell=True, stderr=None) | #!/usr/bin/python
import subprocess
def run_tshark(slaves_file):
proc = []
with open(slaves_file) as f:
for h in f.readlines():
if h.startswith("#"):
continue
host = h.strip()
cmd = 'ssh root@%s nohup tshark -i eth0 -s 100 -w ts.pcap "tcp" &' % host
p = subprocess.Popen(cmd, shell=True, stderr=None)
proc.append(p)
def stop_tshark(slaves_file):
cmd = "pssh -h %s -l root killall tshark" % slaves_file
subprocess.Popen(cmd, shell=True, stderr=None) | ru | 0.258958 | #!/usr/bin/python | 2.553725 | 3 |
src/agx/generator/uml/interfaces.py | bluedynamics/agx.generator.uml | 1 | 6623540 | <filename>src/agx/generator/uml/interfaces.py
from zope.interface import Attribute
from node.interfaces import INode
from agx.core.interfaces import IScope
class IXMLScope(IScope):
"""XML specific scope interface.
Uses tag names for scope identification instead of interfaces.
"""
tags = Attribute(u"List of tags this scope applies.")
| <filename>src/agx/generator/uml/interfaces.py
from zope.interface import Attribute
from node.interfaces import INode
from agx.core.interfaces import IScope
class IXMLScope(IScope):
"""XML specific scope interface.
Uses tag names for scope identification instead of interfaces.
"""
tags = Attribute(u"List of tags this scope applies.")
| en | 0.651149 | XML specific scope interface. Uses tag names for scope identification instead of interfaces. | 1.632038 | 2 |
src/preprocessing/prepare_craft.py | norikinishida/coreference-resolution | 0 | 6623541 | from collections import defaultdict
import unicodedata
import os
import re
import utils
CONLL_KEYS = ["_0", "_1", "token-index", "token", "pos", "_5", "_6", "_7", "_8", "_9", "_10", "_11", "span-exp"]
def main():
config = utils.get_hocon_config(config_path="./config/main.conf", config_name="path")
path_conll = os.path.join(config["craft"], "coref-conll")
path_bionlp = os.path.join(config["craft"], "coref-bionlp")
path_dst = os.path.join(config["data"], "craft")
utils.mkdir(os.path.join(path_dst, "train"))
utils.mkdir(os.path.join(path_dst, "dev"))
utils.mkdir(os.path.join(path_dst, "test"))
utils.mkdir(os.path.join(path_dst + "-conll", "train"))
utils.mkdir(os.path.join(path_dst + "-conll", "dev"))
utils.mkdir(os.path.join(path_dst + "-conll", "test"))
# 辞書 (CoNLLファイル名 -> training/dev/test split) の作成
path_split = os.path.join(config["craft"], "articles", "ids")
train_filenames = utils.read_lines(os.path.join(path_split, "craft-ids-train.txt"))
dev_filenames = utils.read_lines(os.path.join(path_split, "craft-ids-dev.txt"))
test_filenames = utils.read_lines(os.path.join(path_split, "craft-ids-test.txt"))
assert len(train_filenames) == 60
assert len(dev_filenames) == 7
assert len(test_filenames) == 30
filename_to_split = {}
for fname in train_filenames:
filename_to_split[fname] = "train"
for fname in dev_filenames:
filename_to_split[fname] = "dev"
for fname in test_filenames:
filename_to_split[fname] = "test"
# CoNLLファイルリスト
filenames = os.listdir(path_conll)
filenames = [n for n in filenames if n.endswith(".conll")]
filenames.sort()
n_doc = len(filenames)
n_sents = 0
n_chains = 0
n_mensions = 0
for filename in filenames:
split = filename_to_split[filename.replace(".conll", "")]
# CoNLLデータ読み込み
sentences_conll = utils.read_conll(os.path.join(path_conll, filename), CONLL_KEYS)
for s_i in range(len(sentences_conll)):
for w_i in range(len(sentences_conll[s_i])):
token = sentences_conll[s_i][w_i]["token"]
# トークン正規化
token = utils.normalize_string(token, able=["space", "hyphen", "amp", "quot", "lt", "gt"])
token = token.strip()
# もしCoNLLデータにて、一つの単語が空白分割で複数個のパーツに別れるなら、それらを"-"で1単語に結合しておく
# (後のコードで空白でトークン分割するために)
if len(token.split()) > 1:
old_token = token
token = "-".join(token.split())
utils.writelog("Transformed %s -> %s" % (old_token, token))
sentences_conll[s_i][w_i]["token"] = token
# BioNLPデータ読み込み
inner_section_boundaries, outer_section_boundaries = read_section_boundaries_from_bionlp(os.path.join(path_bionlp, filename.replace(".conll", ".bionlp")))
# CoNLLデータとBioNLPデータで文数が一致するかチェック
# i.e., すべての文がいずれかのセクションに含まれているか
assert len(sentences_conll) \
== sum([(end_i - begin_i + 1) for (begin_i, end_i) in inner_section_boundaries]) \
== sum([(end_i - begin_i + 1) for (begin_i, end_i) in outer_section_boundaries])
utils.writelog("%s: {CoNLL vs. BioNLP: OK}" % (filename.replace(".conll", "")))
# CoNLLデータの保存
lines = utils.read_lines(os.path.join(path_conll, filename))
begin_line = lines[0]
end_line = "#end document"
assert begin_line.startswith("#begin document (")
with open(os.path.join(path_dst + "-conll", split, filename), "w") as f:
f.write("%s\n" % begin_line)
for sent in sentences_conll:
for conll_line in sent:
items = [conll_line[key] for key in conll_line.keys()]
f.write("\t".join(items) + "\n")
f.write("\n")
f.write("%s\n" % end_line)
# テキストデータに変換、保存
sections_tok, sections_pos = get_sections(sentences_conll, inner_section_boundaries)
write_sections(os.path.join(path_dst, split, filename.replace(".conll", ".inner_sections.tokens")), sections_tok)
write_sections(os.path.join(path_dst, split, filename.replace(".conll", ".inner_sections.postags")), sections_pos)
sections_tok, sections_pos = get_sections(sentences_conll, outer_section_boundaries)
write_sections(os.path.join(path_dst, split, filename.replace(".conll", ".outer_sections.tokens")), sections_tok)
write_sections(os.path.join(path_dst, split, filename.replace(".conll", ".outer_sections.postags")), sections_pos)
# セクション境界、文境界の保存
write_boundaries(os.path.join(path_dst, split, filename.replace(".conll", ".inner_section_boundaries")), inner_section_boundaries)
write_boundaries(os.path.join(path_dst, split, filename.replace(".conll", ".outer_section_boundaries")), outer_section_boundaries)
sentence_boundaries = get_sentence_boundaries(sentences_conll)
write_boundaries(os.path.join(path_dst, split, filename.replace(".conll", ".sentence_boundaries")), sentence_boundaries)
# Chains抽出
tokens = [w["token"] for s in sentences_conll for w in s]
sentences_conll = assign_global_token_index(sentences_conll)
chains = extract_identity_chains(sentences_conll)
chains = assign_text_to_mensions(chains, tokens)
# 辞書に変換、保存
data = get_chains_dictionary(chains)
utils.write_json(os.path.join(path_dst, split, filename.replace(".conll", ".chains.json")), data)
# カウント
n_sents += len(sentences_conll)
n_chains += len(chains)
for chain in chains:
n_mensions += len(chain)
utils.writelog("No. sentences = %d" % n_sents)
utils.writelog("Avg. sentences/doc = %f" % (float(n_sents) / n_doc))
utils.writelog("No. mensions = %d" % n_mensions)
utils.writelog("Avg. mensions/doc = %f" % (float(n_mensions) / n_doc))
utils.writelog("No. chains = %d" % n_chains)
utils.writelog("Avg. chains/doc = %f" % (float(n_chains) / n_doc))
#########################
def read_section_boundaries_from_bionlp(path):
outer_section_boundaries = []
inner_section_boundaries = []
lines = utils.read_lines(path, process=lambda line: line.split("\t"))
lines = [l for l in lines if l[0].startswith("T")]
section_charlevel_boundaries = get_charlevel_boundaries(lines, target_tag="section")
# outer/innerのセクションを削除
inner_section_charlevel_boundaries = remove_outer_boundaries(section_charlevel_boundaries)
outer_section_charlevel_boundaries = remove_inner_boundaries(section_charlevel_boundaries)
# セクション間を補間する
inner_section_charlevel_boundaries = fill_boundaries(inner_section_charlevel_boundaries)
outer_section_charlevel_boundaries = fill_boundaries(outer_section_charlevel_boundaries)
sentence_charlevel_boundaries = get_charlevel_boundaries(lines, target_tag="sentence")
# 各セクションが含む文単位スパンを同定
for sec_i in range(len(inner_section_charlevel_boundaries)):
sec_begin_char_i, sec_end_char_i = inner_section_charlevel_boundaries[sec_i]
sent_indices = []
for sent_i in range(len(sentence_charlevel_boundaries)):
sent_begin_char_i, sent_end_char_i = sentence_charlevel_boundaries[sent_i]
if sec_begin_char_i <= sent_begin_char_i < sent_end_char_i <= sec_end_char_i:
sent_indices.append(sent_i)
if len(sent_indices) != 0:
inner_section_boundaries.append((min(sent_indices), max(sent_indices)))
for sec_i in range(len(outer_section_charlevel_boundaries)):
sec_begin_char_i, sec_end_char_i = outer_section_charlevel_boundaries[sec_i]
sent_indices = []
for sent_i in range(len(sentence_charlevel_boundaries)):
sent_begin_char_i, sent_end_char_i = sentence_charlevel_boundaries[sent_i]
if sec_begin_char_i <= sent_begin_char_i < sent_end_char_i <= sec_end_char_i:
sent_indices.append(sent_i)
if len(sent_indices) != 0:
outer_section_boundaries.append((min(sent_indices), max(sent_indices)))
return inner_section_boundaries, outer_section_boundaries
def get_charlevel_boundaries(lines, target_tag):
boundaries = []
for line in lines:
assert len(line) == 3
typ, tag_and_boundary, _ = line
if not typ.startswith("T"):
continue
if not tag_and_boundary.startswith(target_tag):
continue
elements = tag_and_boundary.split(" ")
assert len(elements) == 3
tag, begin_char_i, end_char_i = elements
begin_char_i = int(begin_char_i)
end_char_i = int(end_char_i)
boundaries.append((begin_char_i, end_char_i))
return boundaries
def remove_outer_boundaries(boundaries):
remove_indices = []
for i in range(len(boundaries)):
i_begin, i_end = boundaries[i]
for j in range(len(boundaries)):
if i == j:
continue
j_begin, j_end = boundaries[j]
# もしi番目のboundaryがj番目のboundaryを含んでいれば, "i"番目を削除リストに入れる
if i_begin <= j_begin < j_end <= i_end:
remove_indices.append(i)
new_boundaries = []
for i in range(len(boundaries)):
if not i in remove_indices:
new_boundaries.append(boundaries[i])
return new_boundaries
def remove_inner_boundaries(boundaries):
remove_indices = []
for i in range(len(boundaries)):
i_begin, i_end = boundaries[i]
for j in range(len(boundaries)):
if i == j:
continue
j_begin, j_end = boundaries[j]
# もしi番目のboundaryがj番目のboundaryを含んでいれば, "j"番目を削除リストに入れる
if i_begin <= j_begin < j_end <= i_end:
remove_indices.append(j)
new_boundaries = []
for i in range(len(boundaries)):
if not i in remove_indices:
new_boundaries.append(boundaries[i])
return new_boundaries
def fill_boundaries(boundaries):
new_boundaries = []
for i in range(len(boundaries)-1):
cur_begin, cur_end = boundaries[i]
next_begin, next_end = boundaries[i+1]
# セクションが連続していなければ、隙間を埋める
assert cur_end <= next_begin
if next_begin != cur_end:
new_boundaries.append((cur_end, next_begin))
boundaries = boundaries + new_boundaries
boundaries = sorted(boundaries, key=lambda x: x[0])
return boundaries
#########################
def get_sections(sentences_conll, section_boundaries):
sections_tok = []
sections_pos = []
for begin_i, end_i in section_boundaries:
section_tok = []
section_pos = []
for sent_conll in sentences_conll[begin_i:end_i+1]:
tokens = [w["token"] for w in sent_conll]
postags = [w["pos"] for w in sent_conll]
section_tok.append(tokens)
section_pos.append(postags)
sections_tok.append(section_tok)
sections_pos.append(section_pos)
return sections_tok, sections_pos
def write_sections(path, sections):
with open(path, "w") as f:
for section in sections:
# 1セクションの書き込み; 各行は文
for sent in section:
sent = " ".join(sent)
f.write("%s\n" % sent)
# セクションは空行で区切られる
f.write("\n")
def write_tokens(path, tokens):
with open(path, "w") as f:
for token in tokens:
f.write("%s\n" % token)
#########################
def get_sentence_boundaries(sentences_conll):
sentence_boundaries = []
begin_i = end_i = 0
for sent_conll in sentences_conll:
n_tokens = len(sent_conll)
end_i = begin_i + n_tokens - 1
sentence_boundaries.append((begin_i, end_i))
begin_i = end_i + 1
return sentence_boundaries
def write_boundaries(path, boundaries):
with open(path, "w") as f:
for begin_i, end_i in boundaries:
f.write("%d %d\n" % (begin_i, end_i))
#########################
class Mension(object):
def __init__(self, chain_name, mension_name, spans, text=None):
self.chain_name = chain_name
self.mension_name = mension_name
self.spans = spans
self.text = text
def __str__(self):
text = "<%s, spans=%s, chain_name=%s, mension_name=%s>" % (self.text, self.spans, self.chain_name, self.mension_name)
return text
def __repr__(self):
return self.__str__()
class ChainManager(object):
def __init__(self):
self.stacks = defaultdict(list)
self.chains = defaultdict(list)
def get_key(self, chain_name, mension_name):
return "%s-%s" % (chain_name, mension_name)
def set_begin_index(self, chain_name, mension_name, begin_index):
key = self.get_key(chain_name, mension_name)
self.stacks[key].append((begin_index, "*"))
def set_end_index(self, chain_name, mension_name, end_index):
key = self.get_key(chain_name, mension_name)
assert key in self.stacks
assert len(self.stacks[key]) > 0
begin_index, _ = self.stacks[key].pop()
self.chains[key].append((begin_index, end_index))
# To avoid errors in get_chains(), we initialize the element for (chain_name=any, mension_name="")
key2 = self.get_key(chain_name, "")
if not key2 in self.chains:
self.chains[key2] = []
def get_chains(self):
# chains = [self.chains[chain_name] for chain_name in self.chains]
# return chains
new_chains = {}
for key in self.chains:
chain_name, mension_name = key.split("-")
if not chain_name in new_chains:
new_chains[chain_name] = []
if mension_name == "":
for span in self.chains[key]:
mension = Mension(chain_name=chain_name, mension_name=mension_name, spans=[span], text=None)
new_chains[chain_name].append(mension)
else:
discont_spans = self.chains[key]
mension = Mension(chain_name=chain_name, mension_name=mension_name, spans=discont_spans, text=None)
new_chains[chain_name].append(mension)
return list(new_chains.values())
def assign_global_token_index(sentences_conll):
"""
Assign global token indices to token-level dictionaries extracted from the CoNLL-format file
"""
token_index = 0
for s_i in range(len(sentences_conll)):
for w_i in range(len(sentences_conll[s_i])):
sentences_conll[s_i][w_i]["global-token-index"] = token_index
token_index += 1
return sentences_conll
def extract_identity_chains(sentences_conll):
"""
Extract identity chains (ie., a set of mention spans) from a CoNLL-format data
"""
manager = ChainManager()
for sent_conll in sentences_conll:
for word_conll in sent_conll:
span_exps = word_conll["span-exp"]
global_token_index = word_conll["global-token-index"]
span_exps = span_exps.split("|")
for span_exp in span_exps:
if span_exp.startswith("(") and span_exp.endswith(")"):
chain_name = re.findall(r"\(([0-9]+)([a-z]*)\)", span_exp)
assert len(chain_name) == 1
chain_name, mension_name = chain_name[0]
manager.set_begin_index(chain_name=chain_name, mension_name=mension_name, begin_index=global_token_index)
manager.set_end_index(chain_name=chain_name, mension_name=mension_name, end_index=global_token_index)
elif span_exp.startswith("("):
chain_name = re.findall(r"\(([0-9]+)([a-z]*)", span_exp)
assert len(chain_name) == 1
chain_name, mension_name = chain_name[0]
manager.set_begin_index(chain_name=chain_name, mension_name=mension_name, begin_index=global_token_index)
elif span_exp.endswith(")"):
chain_name = re.findall(r"([0-9]+)([a-z]*)\)", span_exp)
assert len(chain_name) == 1
chain_name, mension_name = chain_name[0]
manager.set_end_index(chain_name=chain_name, mension_name=mension_name, end_index=global_token_index)
chains = manager.get_chains()
return chains
def assign_text_to_mensions(chains, tokens):
for c_i in range(len(chains)):
for m_i in range(len(chains[c_i])):
chains[c_i][m_i].text = " ".join([" ".join(tokens[i:j+1]) for (i,j) in chains[c_i][m_i].spans])
return chains
#########################
def get_chains_dictionary(chains):
data = {}
data["chains"] = []
for chain in chains:
chain = [["%d %d" % (b,e) for b,e in m.spans] for m in chain]
data["chains"].append(chain)
data["chains_text"] = []
for chain in chains:
chain = [m.text for m in chain]
data["chains_text"].append(chain)
return data
if __name__ == "__main__":
main()
| from collections import defaultdict
import unicodedata
import os
import re
import utils
CONLL_KEYS = ["_0", "_1", "token-index", "token", "pos", "_5", "_6", "_7", "_8", "_9", "_10", "_11", "span-exp"]
def main():
config = utils.get_hocon_config(config_path="./config/main.conf", config_name="path")
path_conll = os.path.join(config["craft"], "coref-conll")
path_bionlp = os.path.join(config["craft"], "coref-bionlp")
path_dst = os.path.join(config["data"], "craft")
utils.mkdir(os.path.join(path_dst, "train"))
utils.mkdir(os.path.join(path_dst, "dev"))
utils.mkdir(os.path.join(path_dst, "test"))
utils.mkdir(os.path.join(path_dst + "-conll", "train"))
utils.mkdir(os.path.join(path_dst + "-conll", "dev"))
utils.mkdir(os.path.join(path_dst + "-conll", "test"))
# 辞書 (CoNLLファイル名 -> training/dev/test split) の作成
path_split = os.path.join(config["craft"], "articles", "ids")
train_filenames = utils.read_lines(os.path.join(path_split, "craft-ids-train.txt"))
dev_filenames = utils.read_lines(os.path.join(path_split, "craft-ids-dev.txt"))
test_filenames = utils.read_lines(os.path.join(path_split, "craft-ids-test.txt"))
assert len(train_filenames) == 60
assert len(dev_filenames) == 7
assert len(test_filenames) == 30
filename_to_split = {}
for fname in train_filenames:
filename_to_split[fname] = "train"
for fname in dev_filenames:
filename_to_split[fname] = "dev"
for fname in test_filenames:
filename_to_split[fname] = "test"
# CoNLLファイルリスト
filenames = os.listdir(path_conll)
filenames = [n for n in filenames if n.endswith(".conll")]
filenames.sort()
n_doc = len(filenames)
n_sents = 0
n_chains = 0
n_mensions = 0
for filename in filenames:
split = filename_to_split[filename.replace(".conll", "")]
# CoNLLデータ読み込み
sentences_conll = utils.read_conll(os.path.join(path_conll, filename), CONLL_KEYS)
for s_i in range(len(sentences_conll)):
for w_i in range(len(sentences_conll[s_i])):
token = sentences_conll[s_i][w_i]["token"]
# トークン正規化
token = utils.normalize_string(token, able=["space", "hyphen", "amp", "quot", "lt", "gt"])
token = token.strip()
# もしCoNLLデータにて、一つの単語が空白分割で複数個のパーツに別れるなら、それらを"-"で1単語に結合しておく
# (後のコードで空白でトークン分割するために)
if len(token.split()) > 1:
old_token = token
token = "-".join(token.split())
utils.writelog("Transformed %s -> %s" % (old_token, token))
sentences_conll[s_i][w_i]["token"] = token
# BioNLPデータ読み込み
inner_section_boundaries, outer_section_boundaries = read_section_boundaries_from_bionlp(os.path.join(path_bionlp, filename.replace(".conll", ".bionlp")))
# CoNLLデータとBioNLPデータで文数が一致するかチェック
# i.e., すべての文がいずれかのセクションに含まれているか
assert len(sentences_conll) \
== sum([(end_i - begin_i + 1) for (begin_i, end_i) in inner_section_boundaries]) \
== sum([(end_i - begin_i + 1) for (begin_i, end_i) in outer_section_boundaries])
utils.writelog("%s: {CoNLL vs. BioNLP: OK}" % (filename.replace(".conll", "")))
# CoNLLデータの保存
lines = utils.read_lines(os.path.join(path_conll, filename))
begin_line = lines[0]
end_line = "#end document"
assert begin_line.startswith("#begin document (")
with open(os.path.join(path_dst + "-conll", split, filename), "w") as f:
f.write("%s\n" % begin_line)
for sent in sentences_conll:
for conll_line in sent:
items = [conll_line[key] for key in conll_line.keys()]
f.write("\t".join(items) + "\n")
f.write("\n")
f.write("%s\n" % end_line)
# テキストデータに変換、保存
sections_tok, sections_pos = get_sections(sentences_conll, inner_section_boundaries)
write_sections(os.path.join(path_dst, split, filename.replace(".conll", ".inner_sections.tokens")), sections_tok)
write_sections(os.path.join(path_dst, split, filename.replace(".conll", ".inner_sections.postags")), sections_pos)
sections_tok, sections_pos = get_sections(sentences_conll, outer_section_boundaries)
write_sections(os.path.join(path_dst, split, filename.replace(".conll", ".outer_sections.tokens")), sections_tok)
write_sections(os.path.join(path_dst, split, filename.replace(".conll", ".outer_sections.postags")), sections_pos)
# セクション境界、文境界の保存
write_boundaries(os.path.join(path_dst, split, filename.replace(".conll", ".inner_section_boundaries")), inner_section_boundaries)
write_boundaries(os.path.join(path_dst, split, filename.replace(".conll", ".outer_section_boundaries")), outer_section_boundaries)
sentence_boundaries = get_sentence_boundaries(sentences_conll)
write_boundaries(os.path.join(path_dst, split, filename.replace(".conll", ".sentence_boundaries")), sentence_boundaries)
# Chains抽出
tokens = [w["token"] for s in sentences_conll for w in s]
sentences_conll = assign_global_token_index(sentences_conll)
chains = extract_identity_chains(sentences_conll)
chains = assign_text_to_mensions(chains, tokens)
# 辞書に変換、保存
data = get_chains_dictionary(chains)
utils.write_json(os.path.join(path_dst, split, filename.replace(".conll", ".chains.json")), data)
# カウント
n_sents += len(sentences_conll)
n_chains += len(chains)
for chain in chains:
n_mensions += len(chain)
utils.writelog("No. sentences = %d" % n_sents)
utils.writelog("Avg. sentences/doc = %f" % (float(n_sents) / n_doc))
utils.writelog("No. mensions = %d" % n_mensions)
utils.writelog("Avg. mensions/doc = %f" % (float(n_mensions) / n_doc))
utils.writelog("No. chains = %d" % n_chains)
utils.writelog("Avg. chains/doc = %f" % (float(n_chains) / n_doc))
#########################
def read_section_boundaries_from_bionlp(path):
outer_section_boundaries = []
inner_section_boundaries = []
lines = utils.read_lines(path, process=lambda line: line.split("\t"))
lines = [l for l in lines if l[0].startswith("T")]
section_charlevel_boundaries = get_charlevel_boundaries(lines, target_tag="section")
# outer/innerのセクションを削除
inner_section_charlevel_boundaries = remove_outer_boundaries(section_charlevel_boundaries)
outer_section_charlevel_boundaries = remove_inner_boundaries(section_charlevel_boundaries)
# セクション間を補間する
inner_section_charlevel_boundaries = fill_boundaries(inner_section_charlevel_boundaries)
outer_section_charlevel_boundaries = fill_boundaries(outer_section_charlevel_boundaries)
sentence_charlevel_boundaries = get_charlevel_boundaries(lines, target_tag="sentence")
# 各セクションが含む文単位スパンを同定
for sec_i in range(len(inner_section_charlevel_boundaries)):
sec_begin_char_i, sec_end_char_i = inner_section_charlevel_boundaries[sec_i]
sent_indices = []
for sent_i in range(len(sentence_charlevel_boundaries)):
sent_begin_char_i, sent_end_char_i = sentence_charlevel_boundaries[sent_i]
if sec_begin_char_i <= sent_begin_char_i < sent_end_char_i <= sec_end_char_i:
sent_indices.append(sent_i)
if len(sent_indices) != 0:
inner_section_boundaries.append((min(sent_indices), max(sent_indices)))
for sec_i in range(len(outer_section_charlevel_boundaries)):
sec_begin_char_i, sec_end_char_i = outer_section_charlevel_boundaries[sec_i]
sent_indices = []
for sent_i in range(len(sentence_charlevel_boundaries)):
sent_begin_char_i, sent_end_char_i = sentence_charlevel_boundaries[sent_i]
if sec_begin_char_i <= sent_begin_char_i < sent_end_char_i <= sec_end_char_i:
sent_indices.append(sent_i)
if len(sent_indices) != 0:
outer_section_boundaries.append((min(sent_indices), max(sent_indices)))
return inner_section_boundaries, outer_section_boundaries
def get_charlevel_boundaries(lines, target_tag):
boundaries = []
for line in lines:
assert len(line) == 3
typ, tag_and_boundary, _ = line
if not typ.startswith("T"):
continue
if not tag_and_boundary.startswith(target_tag):
continue
elements = tag_and_boundary.split(" ")
assert len(elements) == 3
tag, begin_char_i, end_char_i = elements
begin_char_i = int(begin_char_i)
end_char_i = int(end_char_i)
boundaries.append((begin_char_i, end_char_i))
return boundaries
def remove_outer_boundaries(boundaries):
remove_indices = []
for i in range(len(boundaries)):
i_begin, i_end = boundaries[i]
for j in range(len(boundaries)):
if i == j:
continue
j_begin, j_end = boundaries[j]
# もしi番目のboundaryがj番目のboundaryを含んでいれば, "i"番目を削除リストに入れる
if i_begin <= j_begin < j_end <= i_end:
remove_indices.append(i)
new_boundaries = []
for i in range(len(boundaries)):
if not i in remove_indices:
new_boundaries.append(boundaries[i])
return new_boundaries
def remove_inner_boundaries(boundaries):
remove_indices = []
for i in range(len(boundaries)):
i_begin, i_end = boundaries[i]
for j in range(len(boundaries)):
if i == j:
continue
j_begin, j_end = boundaries[j]
# もしi番目のboundaryがj番目のboundaryを含んでいれば, "j"番目を削除リストに入れる
if i_begin <= j_begin < j_end <= i_end:
remove_indices.append(j)
new_boundaries = []
for i in range(len(boundaries)):
if not i in remove_indices:
new_boundaries.append(boundaries[i])
return new_boundaries
def fill_boundaries(boundaries):
new_boundaries = []
for i in range(len(boundaries)-1):
cur_begin, cur_end = boundaries[i]
next_begin, next_end = boundaries[i+1]
# セクションが連続していなければ、隙間を埋める
assert cur_end <= next_begin
if next_begin != cur_end:
new_boundaries.append((cur_end, next_begin))
boundaries = boundaries + new_boundaries
boundaries = sorted(boundaries, key=lambda x: x[0])
return boundaries
#########################
def get_sections(sentences_conll, section_boundaries):
sections_tok = []
sections_pos = []
for begin_i, end_i in section_boundaries:
section_tok = []
section_pos = []
for sent_conll in sentences_conll[begin_i:end_i+1]:
tokens = [w["token"] for w in sent_conll]
postags = [w["pos"] for w in sent_conll]
section_tok.append(tokens)
section_pos.append(postags)
sections_tok.append(section_tok)
sections_pos.append(section_pos)
return sections_tok, sections_pos
def write_sections(path, sections):
with open(path, "w") as f:
for section in sections:
# 1セクションの書き込み; 各行は文
for sent in section:
sent = " ".join(sent)
f.write("%s\n" % sent)
# セクションは空行で区切られる
f.write("\n")
def write_tokens(path, tokens):
with open(path, "w") as f:
for token in tokens:
f.write("%s\n" % token)
#########################
def get_sentence_boundaries(sentences_conll):
sentence_boundaries = []
begin_i = end_i = 0
for sent_conll in sentences_conll:
n_tokens = len(sent_conll)
end_i = begin_i + n_tokens - 1
sentence_boundaries.append((begin_i, end_i))
begin_i = end_i + 1
return sentence_boundaries
def write_boundaries(path, boundaries):
with open(path, "w") as f:
for begin_i, end_i in boundaries:
f.write("%d %d\n" % (begin_i, end_i))
#########################
class Mension(object):
def __init__(self, chain_name, mension_name, spans, text=None):
self.chain_name = chain_name
self.mension_name = mension_name
self.spans = spans
self.text = text
def __str__(self):
text = "<%s, spans=%s, chain_name=%s, mension_name=%s>" % (self.text, self.spans, self.chain_name, self.mension_name)
return text
def __repr__(self):
return self.__str__()
class ChainManager(object):
def __init__(self):
self.stacks = defaultdict(list)
self.chains = defaultdict(list)
def get_key(self, chain_name, mension_name):
return "%s-%s" % (chain_name, mension_name)
def set_begin_index(self, chain_name, mension_name, begin_index):
key = self.get_key(chain_name, mension_name)
self.stacks[key].append((begin_index, "*"))
def set_end_index(self, chain_name, mension_name, end_index):
key = self.get_key(chain_name, mension_name)
assert key in self.stacks
assert len(self.stacks[key]) > 0
begin_index, _ = self.stacks[key].pop()
self.chains[key].append((begin_index, end_index))
# To avoid errors in get_chains(), we initialize the element for (chain_name=any, mension_name="")
key2 = self.get_key(chain_name, "")
if not key2 in self.chains:
self.chains[key2] = []
def get_chains(self):
# chains = [self.chains[chain_name] for chain_name in self.chains]
# return chains
new_chains = {}
for key in self.chains:
chain_name, mension_name = key.split("-")
if not chain_name in new_chains:
new_chains[chain_name] = []
if mension_name == "":
for span in self.chains[key]:
mension = Mension(chain_name=chain_name, mension_name=mension_name, spans=[span], text=None)
new_chains[chain_name].append(mension)
else:
discont_spans = self.chains[key]
mension = Mension(chain_name=chain_name, mension_name=mension_name, spans=discont_spans, text=None)
new_chains[chain_name].append(mension)
return list(new_chains.values())
def assign_global_token_index(sentences_conll):
"""
Assign global token indices to token-level dictionaries extracted from the CoNLL-format file
"""
token_index = 0
for s_i in range(len(sentences_conll)):
for w_i in range(len(sentences_conll[s_i])):
sentences_conll[s_i][w_i]["global-token-index"] = token_index
token_index += 1
return sentences_conll
def extract_identity_chains(sentences_conll):
"""
Extract identity chains (ie., a set of mention spans) from a CoNLL-format data
"""
manager = ChainManager()
for sent_conll in sentences_conll:
for word_conll in sent_conll:
span_exps = word_conll["span-exp"]
global_token_index = word_conll["global-token-index"]
span_exps = span_exps.split("|")
for span_exp in span_exps:
if span_exp.startswith("(") and span_exp.endswith(")"):
chain_name = re.findall(r"\(([0-9]+)([a-z]*)\)", span_exp)
assert len(chain_name) == 1
chain_name, mension_name = chain_name[0]
manager.set_begin_index(chain_name=chain_name, mension_name=mension_name, begin_index=global_token_index)
manager.set_end_index(chain_name=chain_name, mension_name=mension_name, end_index=global_token_index)
elif span_exp.startswith("("):
chain_name = re.findall(r"\(([0-9]+)([a-z]*)", span_exp)
assert len(chain_name) == 1
chain_name, mension_name = chain_name[0]
manager.set_begin_index(chain_name=chain_name, mension_name=mension_name, begin_index=global_token_index)
elif span_exp.endswith(")"):
chain_name = re.findall(r"([0-9]+)([a-z]*)\)", span_exp)
assert len(chain_name) == 1
chain_name, mension_name = chain_name[0]
manager.set_end_index(chain_name=chain_name, mension_name=mension_name, end_index=global_token_index)
chains = manager.get_chains()
return chains
def assign_text_to_mensions(chains, tokens):
for c_i in range(len(chains)):
for m_i in range(len(chains[c_i])):
chains[c_i][m_i].text = " ".join([" ".join(tokens[i:j+1]) for (i,j) in chains[c_i][m_i].spans])
return chains
#########################
def get_chains_dictionary(chains):
data = {}
data["chains"] = []
for chain in chains:
chain = [["%d %d" % (b,e) for b,e in m.spans] for m in chain]
data["chains"].append(chain)
data["chains_text"] = []
for chain in chains:
chain = [m.text for m in chain]
data["chains_text"].append(chain)
return data
if __name__ == "__main__":
main()
| ja | 0.942595 | # 辞書 (CoNLLファイル名 -> training/dev/test split) の作成 # CoNLLファイルリスト # CoNLLデータ読み込み # トークン正規化 # もしCoNLLデータにて、一つの単語が空白分割で複数個のパーツに別れるなら、それらを"-"で1単語に結合しておく # (後のコードで空白でトークン分割するために) # BioNLPデータ読み込み # CoNLLデータとBioNLPデータで文数が一致するかチェック # i.e., すべての文がいずれかのセクションに含まれているか # CoNLLデータの保存 # テキストデータに変換、保存 # セクション境界、文境界の保存 # Chains抽出 # 辞書に変換、保存 # カウント ######################### # outer/innerのセクションを削除 # セクション間を補間する # 各セクションが含む文単位スパンを同定 # もしi番目のboundaryがj番目のboundaryを含んでいれば, "i"番目を削除リストに入れる # もしi番目のboundaryがj番目のboundaryを含んでいれば, "j"番目を削除リストに入れる # セクションが連続していなければ、隙間を埋める ######################### # 1セクションの書き込み; 各行は文 # セクションは空行で区切られる ######################### ######################### # To avoid errors in get_chains(), we initialize the element for (chain_name=any, mension_name="") # chains = [self.chains[chain_name] for chain_name in self.chains] # return chains Assign global token indices to token-level dictionaries extracted from the CoNLL-format file Extract identity chains (ie., a set of mention spans) from a CoNLL-format data ######################### | 2.170014 | 2 |
tests/test_vision.py | karthik20122001/docker-python | 2,030 | 6623542 | import unittest
import inspect
from unittest.mock import Mock, patch
from kaggle_gcp import KaggleKernelCredentials, init_vision
from test.support import EnvironmentVarGuard
from google.cloud import vision
def _make_credentials():
import google.auth.credentials
return Mock(spec=google.auth.credentials.Credentials)
class TestCloudVision(unittest.TestCase):
class FakeClient:
def __init__(self, credentials=None, client_info=None, **kwargs):
self.credentials = credentials
class FakeConnection():
def __init__(self, user_agent):
self.user_agent = user_agent
if (client_info is not None):
self._connection = FakeConnection(client_info.user_agent)
@patch("google.cloud.vision.ImageAnnotatorClient", new=FakeClient)
def test_default_credentials(self):
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
env.set('KAGGLE_KERNEL_INTEGRATIONS', 'CLOUDAI')
with env:
init_vision()
client = vision.ImageAnnotatorClient()
self.assertIsNotNone(client.credentials)
self.assertIsInstance(client.credentials, KaggleKernelCredentials)
@patch("google.cloud.vision.ImageAnnotatorClient", new=FakeClient)
def test_user_provided_credentials(self):
credentials = _make_credentials()
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
env.set('KAGGLE_KERNEL_INTEGRATIONS', 'CLOUDAI')
with env:
init_vision()
client = vision.ImageAnnotatorClient(credentials=credentials)
self.assertNotIsInstance(client.credentials, KaggleKernelCredentials)
self.assertIsNotNone(client.credentials)
def test_monkeypatching_succeed(self):
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
env.set('KAGGLE_KERNEL_INTEGRATIONS', 'CLOUDAI')
with env:
init_vision()
client = vision.ImageAnnotatorClient.__init__
self.assertTrue("kaggle_gcp" in inspect.getsourcefile(client))
def test_monkeypatching_idempotent(self):
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
env.set('KAGGLE_KERNEL_INTEGRATIONS', 'CLOUDAI')
with env:
init_vision()
client1 = vision.ImageAnnotatorClient.__init__
init_vision()
client2 = vision.ImageAnnotatorClient.__init__
self.assertEqual(client1, client2)
| import unittest
import inspect
from unittest.mock import Mock, patch
from kaggle_gcp import KaggleKernelCredentials, init_vision
from test.support import EnvironmentVarGuard
from google.cloud import vision
def _make_credentials():
import google.auth.credentials
return Mock(spec=google.auth.credentials.Credentials)
class TestCloudVision(unittest.TestCase):
class FakeClient:
def __init__(self, credentials=None, client_info=None, **kwargs):
self.credentials = credentials
class FakeConnection():
def __init__(self, user_agent):
self.user_agent = user_agent
if (client_info is not None):
self._connection = FakeConnection(client_info.user_agent)
@patch("google.cloud.vision.ImageAnnotatorClient", new=FakeClient)
def test_default_credentials(self):
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
env.set('KAGGLE_KERNEL_INTEGRATIONS', 'CLOUDAI')
with env:
init_vision()
client = vision.ImageAnnotatorClient()
self.assertIsNotNone(client.credentials)
self.assertIsInstance(client.credentials, KaggleKernelCredentials)
@patch("google.cloud.vision.ImageAnnotatorClient", new=FakeClient)
def test_user_provided_credentials(self):
credentials = _make_credentials()
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
env.set('KAGGLE_KERNEL_INTEGRATIONS', 'CLOUDAI')
with env:
init_vision()
client = vision.ImageAnnotatorClient(credentials=credentials)
self.assertNotIsInstance(client.credentials, KaggleKernelCredentials)
self.assertIsNotNone(client.credentials)
def test_monkeypatching_succeed(self):
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
env.set('KAGGLE_KERNEL_INTEGRATIONS', 'CLOUDAI')
with env:
init_vision()
client = vision.ImageAnnotatorClient.__init__
self.assertTrue("kaggle_gcp" in inspect.getsourcefile(client))
def test_monkeypatching_idempotent(self):
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
env.set('KAGGLE_KERNEL_INTEGRATIONS', 'CLOUDAI')
with env:
init_vision()
client1 = vision.ImageAnnotatorClient.__init__
init_vision()
client2 = vision.ImageAnnotatorClient.__init__
self.assertEqual(client1, client2)
| none | 1 | 2.373434 | 2 | |
testPython/test3.py | lijie28/python_demo | 0 | 6623543 | <filename>testPython/test3.py
# -*- coding:utf-8*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import os
import os.path
from pyPdf import PdfFileReader,PdfFileWriter
import time
time1=time.time()
# 使用os模块walk函数,搜索出某目录下的全部pdf文件
######################获取同一个文件夹下的所有PDF文件名#######################
def getFileName(filepath):
file_list = []
for root,dirs,files in os.walk(filepath):
for filespath in files:
# print(os.path.join(root,filespath))
file_list.append(os.path.join(root,filespath))
return file_list
##########################合并同一个文件夹下所有PDF文件########################
def MergePDF(filepath,outfile):
output=PdfFileWriter()
outputPages=0
pdf_fileName=getFileName(filepath)
print '总的',pdf_fileName
for each in pdf_fileName:
if '.DS_Store' in each:
continue
# print '看看',os.path.dirname(each),'+', os.path.splitext(each.replace(os.path.dirname(each),''))
#
print '单的',each
# 读取源pdf文件
input = PdfFileReader(file(each, "rb"))
# print 'input:',input
# 如果pdf文件已经加密,必须首先解密才能使用pyPdf
if input.isEncrypted == True:
print 'input.isEncrypted',input.isEncrypted
input.decrypt("map")
# 获得源pdf文件中页面总数
pageCount = input.getNumPages()
outputPages += pageCount
print pageCount
# 分别将page添加到输出output中
for iPage in range(0, pageCount):
output.addPage(input.getPage(iPage))
print "All Pages Number:"+str(outputPages)
# 最后写pdf文件
outputStream=file(filepath+outfile,"wb")
output.write(outputStream)
outputStream.close()
print "finished"
if __name__ == '__main__':
file_dir = r'/Users/lijie/Documents/testhaha/'
out=u"第一周.pdf"
MergePDF(file_dir,out)
time2 = time.time()
print u'总共耗时:' + str(time2 - time1) + 's' | <filename>testPython/test3.py
# -*- coding:utf-8*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import os
import os.path
from pyPdf import PdfFileReader,PdfFileWriter
import time
time1=time.time()
# 使用os模块walk函数,搜索出某目录下的全部pdf文件
######################获取同一个文件夹下的所有PDF文件名#######################
def getFileName(filepath):
file_list = []
for root,dirs,files in os.walk(filepath):
for filespath in files:
# print(os.path.join(root,filespath))
file_list.append(os.path.join(root,filespath))
return file_list
##########################合并同一个文件夹下所有PDF文件########################
def MergePDF(filepath,outfile):
output=PdfFileWriter()
outputPages=0
pdf_fileName=getFileName(filepath)
print '总的',pdf_fileName
for each in pdf_fileName:
if '.DS_Store' in each:
continue
# print '看看',os.path.dirname(each),'+', os.path.splitext(each.replace(os.path.dirname(each),''))
#
print '单的',each
# 读取源pdf文件
input = PdfFileReader(file(each, "rb"))
# print 'input:',input
# 如果pdf文件已经加密,必须首先解密才能使用pyPdf
if input.isEncrypted == True:
print 'input.isEncrypted',input.isEncrypted
input.decrypt("map")
# 获得源pdf文件中页面总数
pageCount = input.getNumPages()
outputPages += pageCount
print pageCount
# 分别将page添加到输出output中
for iPage in range(0, pageCount):
output.addPage(input.getPage(iPage))
print "All Pages Number:"+str(outputPages)
# 最后写pdf文件
outputStream=file(filepath+outfile,"wb")
output.write(outputStream)
outputStream.close()
print "finished"
if __name__ == '__main__':
file_dir = r'/Users/lijie/Documents/testhaha/'
out=u"第一周.pdf"
MergePDF(file_dir,out)
time2 = time.time()
print u'总共耗时:' + str(time2 - time1) + 's' | zh | 0.70169 | # -*- coding:utf-8*- # 使用os模块walk函数,搜索出某目录下的全部pdf文件 ######################获取同一个文件夹下的所有PDF文件名####################### # print(os.path.join(root,filespath)) ##########################合并同一个文件夹下所有PDF文件######################## # print '看看',os.path.dirname(each),'+', os.path.splitext(each.replace(os.path.dirname(each),'')) # # 读取源pdf文件 # print 'input:',input # 如果pdf文件已经加密,必须首先解密才能使用pyPdf # 获得源pdf文件中页面总数 # 分别将page添加到输出output中 # 最后写pdf文件 | 2.968939 | 3 |
arcpy_extensions/server_admin.py | jkeifer/arcpy-extensions | 1 | 6623544 | <filename>arcpy_extensions/server_admin.py
from __future__ import print_function
import os
import sys
import urllib
import urllib2
import httplib
import arcpy
import json
import re
from arcpy import env
class ServiceException(Exception):
pass
class AgsAdmin(object):
def __init__(self, server, port, token=None):
self.server = server
self.port = port
self.token = token
@classmethod
def connectWithToken(self, server, port, token):
return AgsAdmin(server, port, token)
@classmethod
def connectWithoutToken(self, server, port, adminUser, adminPass, expiration=60):
token = self.getToken(server, port, adminUser, adminPass, expiration=expiration)
return AgsAdmin(server, port, token)
@staticmethod
def getToken(server, port, adminUser, adminPass, expiration=60):
"""Get a token required for Admin changes"""
query_dict = {'username': adminUser,
'password': <PASSWORD>,
'expiration': str(expiration),
'client': 'requestip'}
query_string = urllib.urlencode(query_dict)
url = "http://{}:{}/arcgis/admin/generateToken".format(server, port)
token = json.loads(urllib.urlopen(url + "?f=json", query_string).read())
try:
return token["token"]
except KeyError:
raise ServiceException("No token returned. Check credientials.")
def stopStartDeleteService(self, command, servicename, folder=None):
"""
Function to stop, start or delete a service.
Requires token, server, and port.
command = Stop|Start|Delete
serviceList = List of services. A service must be in the <name>.<type> notation
"""
if folder:
if folder.endswith("/"):
pass
else:
folder = folder + "/"
else:
folder = ""
service = urllib.quote(servicename.encode('utf8'))
op_service_url = "http://{0}:{1}/arcgis/admin/services/{2}{3}/{4}?token={5}&f=json".format(self.server,
self.port,
folder,
service,
command,
self.token)
status = urllib2.urlopen(op_service_url, ' ').read()
if not 'success' in status:
raise ServiceException("Could not {0} service {1} successfully.".format(command, servicename))
else:
return 0
def stopService(self, servicename):
return self.stopStartDeleteService("Stop", servicename, folder=None)
def startService(self, servicename):
return self.stopStartDeleteService("Start", servicename, folder=None)
def deleteService(self, servicename):
return self.stopStartDeleteService("Delete", servicename, folder=None)
def servicesInFolder(self, foldername, namefilter=None):
"""
"""
# test if name filter is valid regex
if namefilter:
try:
re.compile(namefilter)
except re.error:
raise re.error("Specified namefilter argument must be a vaild regex. Aborting.")
listofservices = []
folderURL = "/arcgis/admin/services/" + foldername
# This request only needs the token and the response formatting parameter
params = urllib.urlencode({'token': self.token, 'f': 'json'})
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
# Connect to URL and post parameters
httpConn = httplib.HTTPConnection(self.server, self.port)
httpConn.request("POST", folderURL, params, headers)
# Read response
response = httpConn.getresponse()
if (response.status != 200):
httpConn.close()
raise ServiceException("Could not read folder information.")
else:
data = response.read()
# Check that data returned is not an error object
if not assertJsonSuccess(data):
raise ServiceException("Error when reading folder information. " + str(data))
# Deserialize response into Python object
dataObj = json.loads(data)
httpConn.close()
for item in dataObj['services']:
# if namefilter, check to see if name matches; if not, skip to next item
if namefilter:
if not re.search(namefilter, item['serviceName']):
continue
listofservices.append(item['serviceName'] + "." + item['type'])
return listofservices
def stopStartDeleteAllServicesInFolder(self, command, foldername, namefilter=None):
"""
"""
errorcount = 0
listofservices = self.servicesInFolder(foldername, namefilter=namefilter)
if not listofservices:
raise ServiceException("No services were found in the folder {0}.".format(foldername))
for service in listofservices:
try:
self.stopStartDeleteService(command, service, foldername)
except ServiceException as e:
print(e)
print("Failed to {0} service {1}.".format(command.lower(), service))
errorcount += 1
return errorcount
def stopAllServicesInFolder(self, foldername, namefilter=None):
return self.stopStartDeleteAllServicesInFolder("Stop", foldername, namefilter=namefilter)
def startAllServicesInFolder(self, foldername, namefilter=None):
return self.stopStartDeleteAllServicesInFolder("Start", foldername, namefilter=namefilter)
def deleteAllServicesInFolder(self, foldername, namefilter=None):
return self.stopStartDeleteAllServicesInFolder("Delete", foldername, namefilter=namefilter)
# A function that checks that the input JSON object
# is not an error object.
def assertJsonSuccess(data):
obj = json.loads(data)
if 'status' in obj and obj['status'] == "error":
print("Error: JSON object returns an error. " + str(obj))
return False
else:
return True
if __name__ == "__main__":
sys.exit(main())
| <filename>arcpy_extensions/server_admin.py
from __future__ import print_function
import os
import sys
import urllib
import urllib2
import httplib
import arcpy
import json
import re
from arcpy import env
class ServiceException(Exception):
pass
class AgsAdmin(object):
def __init__(self, server, port, token=None):
self.server = server
self.port = port
self.token = token
@classmethod
def connectWithToken(self, server, port, token):
return AgsAdmin(server, port, token)
@classmethod
def connectWithoutToken(self, server, port, adminUser, adminPass, expiration=60):
token = self.getToken(server, port, adminUser, adminPass, expiration=expiration)
return AgsAdmin(server, port, token)
@staticmethod
def getToken(server, port, adminUser, adminPass, expiration=60):
"""Get a token required for Admin changes"""
query_dict = {'username': adminUser,
'password': <PASSWORD>,
'expiration': str(expiration),
'client': 'requestip'}
query_string = urllib.urlencode(query_dict)
url = "http://{}:{}/arcgis/admin/generateToken".format(server, port)
token = json.loads(urllib.urlopen(url + "?f=json", query_string).read())
try:
return token["token"]
except KeyError:
raise ServiceException("No token returned. Check credientials.")
def stopStartDeleteService(self, command, servicename, folder=None):
"""
Function to stop, start or delete a service.
Requires token, server, and port.
command = Stop|Start|Delete
serviceList = List of services. A service must be in the <name>.<type> notation
"""
if folder:
if folder.endswith("/"):
pass
else:
folder = folder + "/"
else:
folder = ""
service = urllib.quote(servicename.encode('utf8'))
op_service_url = "http://{0}:{1}/arcgis/admin/services/{2}{3}/{4}?token={5}&f=json".format(self.server,
self.port,
folder,
service,
command,
self.token)
status = urllib2.urlopen(op_service_url, ' ').read()
if not 'success' in status:
raise ServiceException("Could not {0} service {1} successfully.".format(command, servicename))
else:
return 0
def stopService(self, servicename):
return self.stopStartDeleteService("Stop", servicename, folder=None)
def startService(self, servicename):
return self.stopStartDeleteService("Start", servicename, folder=None)
def deleteService(self, servicename):
return self.stopStartDeleteService("Delete", servicename, folder=None)
def servicesInFolder(self, foldername, namefilter=None):
"""
"""
# test if name filter is valid regex
if namefilter:
try:
re.compile(namefilter)
except re.error:
raise re.error("Specified namefilter argument must be a vaild regex. Aborting.")
listofservices = []
folderURL = "/arcgis/admin/services/" + foldername
# This request only needs the token and the response formatting parameter
params = urllib.urlencode({'token': self.token, 'f': 'json'})
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
# Connect to URL and post parameters
httpConn = httplib.HTTPConnection(self.server, self.port)
httpConn.request("POST", folderURL, params, headers)
# Read response
response = httpConn.getresponse()
if (response.status != 200):
httpConn.close()
raise ServiceException("Could not read folder information.")
else:
data = response.read()
# Check that data returned is not an error object
if not assertJsonSuccess(data):
raise ServiceException("Error when reading folder information. " + str(data))
# Deserialize response into Python object
dataObj = json.loads(data)
httpConn.close()
for item in dataObj['services']:
# if namefilter, check to see if name matches; if not, skip to next item
if namefilter:
if not re.search(namefilter, item['serviceName']):
continue
listofservices.append(item['serviceName'] + "." + item['type'])
return listofservices
def stopStartDeleteAllServicesInFolder(self, command, foldername, namefilter=None):
"""
"""
errorcount = 0
listofservices = self.servicesInFolder(foldername, namefilter=namefilter)
if not listofservices:
raise ServiceException("No services were found in the folder {0}.".format(foldername))
for service in listofservices:
try:
self.stopStartDeleteService(command, service, foldername)
except ServiceException as e:
print(e)
print("Failed to {0} service {1}.".format(command.lower(), service))
errorcount += 1
return errorcount
def stopAllServicesInFolder(self, foldername, namefilter=None):
return self.stopStartDeleteAllServicesInFolder("Stop", foldername, namefilter=namefilter)
def startAllServicesInFolder(self, foldername, namefilter=None):
return self.stopStartDeleteAllServicesInFolder("Start", foldername, namefilter=namefilter)
def deleteAllServicesInFolder(self, foldername, namefilter=None):
return self.stopStartDeleteAllServicesInFolder("Delete", foldername, namefilter=namefilter)
# A function that checks that the input JSON object
# is not an error object.
def assertJsonSuccess(data):
obj = json.loads(data)
if 'status' in obj and obj['status'] == "error":
print("Error: JSON object returns an error. " + str(obj))
return False
else:
return True
if __name__ == "__main__":
sys.exit(main())
| en | 0.746096 | Get a token required for Admin changes Function to stop, start or delete a service.
Requires token, server, and port.
command = Stop|Start|Delete
serviceList = List of services. A service must be in the <name>.<type> notation # test if name filter is valid regex # This request only needs the token and the response formatting parameter # Connect to URL and post parameters # Read response # Check that data returned is not an error object # Deserialize response into Python object # if namefilter, check to see if name matches; if not, skip to next item # A function that checks that the input JSON object # is not an error object. | 2.638635 | 3 |
ProjectApplication/project_core/migrations/0112_physical_person_add_orcid.py | code-review-doctor/project-application | 5 | 6623545 | <gh_stars>1-10
# Generated by Django 3.0.3 on 2020-03-20 15:26
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('project_core', '0111_remove_project_duration_months'),
]
operations = [
migrations.AddField(
model_name='historicalphysicalperson',
name='orcid_id',
field=models.CharField(help_text='Orcid ID', max_length=19, null=True, validators=[django.core.validators.RegexValidator(code='Invalid format', message='Format orcid ID is 0000-0000-0000-0000', regex='^[0-9]{4}-[0-9]{4}-[0-9]{4}-[0-9]{4}$')]),
),
migrations.AddField(
model_name='physicalperson',
name='orcid_id',
field=models.CharField(help_text='Orcid ID', max_length=19, null=True, validators=[django.core.validators.RegexValidator(code='Invalid format', message='Format orcid ID is 0000-0000-0000-0000', regex='^[0-9]{4}-[0-9]{4}-[0-9]{4}-[0-9]{4}$')]),
),
]
| # Generated by Django 3.0.3 on 2020-03-20 15:26
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('project_core', '0111_remove_project_duration_months'),
]
operations = [
migrations.AddField(
model_name='historicalphysicalperson',
name='orcid_id',
field=models.CharField(help_text='Orcid ID', max_length=19, null=True, validators=[django.core.validators.RegexValidator(code='Invalid format', message='Format orcid ID is 0000-0000-0000-0000', regex='^[0-9]{4}-[0-9]{4}-[0-9]{4}-[0-9]{4}$')]),
),
migrations.AddField(
model_name='physicalperson',
name='orcid_id',
field=models.CharField(help_text='Orcid ID', max_length=19, null=True, validators=[django.core.validators.RegexValidator(code='Invalid format', message='Format orcid ID is 0000-0000-0000-0000', regex='^[0-9]{4}-[0-9]{4}-[0-9]{4}-[0-9]{4}$')]),
),
] | en | 0.765362 | # Generated by Django 3.0.3 on 2020-03-20 15:26 | 1.860766 | 2 |
citation.py | wangzefan666/SGC | 1 | 6623546 | import torch
import torch.nn.functional as F
import torch.optim as optim
from utils import load_citation, sgc_precompute, set_seed
from models import get_model
from metrics import accuracy
import pickle as pkl
from args import get_citation_args
from time import perf_counter
from models import GCN
# 加载参数
args = get_citation_args()
# 模型微调
if args.tuned:
if args.model == "SGC":
# 读取微调超参数 - 权重衰减
with open("{}-tuning/{}.txt".format(args.model, args.dataset), 'rb') as f:
args.weight_decay = pkl.load(f)['weight_decay']
print("using tuned weight decay: {}".format(args.weight_decay))
else:
raise NotImplemented
# 设置随机种子,固定结果
set_seed(args.seed, args.cuda)
# 邻接矩阵(归一化),特征,标签,训练集,验证集,测试集
adj, features, labels, idx_train, idx_val, idx_test = load_citation(args.dataset, args.normalization, args.cuda)
# 模型 SGC or GCN
model = get_model(args.model, features.size(1), labels.max().item() + 1, args.hidden, args.dropout, args.cuda)
# 预计算 (S^K * X)
if args.model == "SGC":
features, precompute_time = sgc_precompute(features, adj, args.degree)
print("pre-compute time: {:.4f}s".format(precompute_time))
def train_regression(model,
adj, features, labels,
idx_train, idx_val,
epochs=args.epochs, weight_decay=args.weight_decay,
lr=args.lr):
# optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
optimizer = optim.LBFGS(model.parameters(), lr=lr)
t = perf_counter()
for epoch in range(epochs):
model.train()
def closure():
optimizer.zero_grad()
if isinstance(model, GCN):
output = model(features, adj)
output = output[idx_train]
else:
output = model(features[idx_train])
loss_train = F.cross_entropy(output, labels[idx_train])
loss_train.backward()
return loss_train
optimizer.step(closure) # LBFGS专用
with torch.no_grad():
model.eval()
if isinstance(model, GCN):
output = model(features, adj)
output = output[idx_val]
else:
output = model(features[idx_val])
acc_val = accuracy(output, labels[idx_val])
print("training val acc:{:.4f}".format(acc_val))
train_time = perf_counter() - t
return model, acc_val, train_time
def test_regression(model, adj, features, labels, idx_test):
with torch.no_grad():
model.eval()
if isinstance(model, GCN):
return accuracy(model(features, adj)[idx_test], labels[idx_test])
else:
return accuracy(model(features[idx_test]), labels[idx_test])
# if args.model == "SGC":
model, acc_val, train_time = train_regression(model, adj, features, labels,
idx_train, idx_val,
args.epochs, args.weight_decay, args.lr)
acc_test = test_regression(model, adj, features, labels, idx_test)
print("Validation Accuracy: {:.4f} Test Accuracy: {:.4f}".format(acc_val, acc_test))
print("train time: {:.4f}s".format(train_time))
| import torch
import torch.nn.functional as F
import torch.optim as optim
from utils import load_citation, sgc_precompute, set_seed
from models import get_model
from metrics import accuracy
import pickle as pkl
from args import get_citation_args
from time import perf_counter
from models import GCN
# 加载参数
args = get_citation_args()
# 模型微调
if args.tuned:
if args.model == "SGC":
# 读取微调超参数 - 权重衰减
with open("{}-tuning/{}.txt".format(args.model, args.dataset), 'rb') as f:
args.weight_decay = pkl.load(f)['weight_decay']
print("using tuned weight decay: {}".format(args.weight_decay))
else:
raise NotImplemented
# 设置随机种子,固定结果
set_seed(args.seed, args.cuda)
# 邻接矩阵(归一化),特征,标签,训练集,验证集,测试集
adj, features, labels, idx_train, idx_val, idx_test = load_citation(args.dataset, args.normalization, args.cuda)
# 模型 SGC or GCN
model = get_model(args.model, features.size(1), labels.max().item() + 1, args.hidden, args.dropout, args.cuda)
# 预计算 (S^K * X)
if args.model == "SGC":
features, precompute_time = sgc_precompute(features, adj, args.degree)
print("pre-compute time: {:.4f}s".format(precompute_time))
def train_regression(model,
adj, features, labels,
idx_train, idx_val,
epochs=args.epochs, weight_decay=args.weight_decay,
lr=args.lr):
# optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
optimizer = optim.LBFGS(model.parameters(), lr=lr)
t = perf_counter()
for epoch in range(epochs):
model.train()
def closure():
optimizer.zero_grad()
if isinstance(model, GCN):
output = model(features, adj)
output = output[idx_train]
else:
output = model(features[idx_train])
loss_train = F.cross_entropy(output, labels[idx_train])
loss_train.backward()
return loss_train
optimizer.step(closure) # LBFGS专用
with torch.no_grad():
model.eval()
if isinstance(model, GCN):
output = model(features, adj)
output = output[idx_val]
else:
output = model(features[idx_val])
acc_val = accuracy(output, labels[idx_val])
print("training val acc:{:.4f}".format(acc_val))
train_time = perf_counter() - t
return model, acc_val, train_time
def test_regression(model, adj, features, labels, idx_test):
with torch.no_grad():
model.eval()
if isinstance(model, GCN):
return accuracy(model(features, adj)[idx_test], labels[idx_test])
else:
return accuracy(model(features[idx_test]), labels[idx_test])
# if args.model == "SGC":
model, acc_val, train_time = train_regression(model, adj, features, labels,
idx_train, idx_val,
args.epochs, args.weight_decay, args.lr)
acc_test = test_regression(model, adj, features, labels, idx_test)
print("Validation Accuracy: {:.4f} Test Accuracy: {:.4f}".format(acc_val, acc_test))
print("train time: {:.4f}s".format(train_time))
| zh | 0.593771 | # 加载参数 # 模型微调 # 读取微调超参数 - 权重衰减 # 设置随机种子,固定结果 # 邻接矩阵(归一化),特征,标签,训练集,验证集,测试集 # 模型 SGC or GCN # 预计算 (S^K * X) # optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay) # LBFGS专用 # if args.model == "SGC": | 2.33598 | 2 |
monlan/agents/CompositeAgent.py | CameleoGrey/Monlan | 3 | 6623547 | import numpy as np
from copy import deepcopy
from datetime import datetime
class CompositeAgent():
def __init__(self, opener, holdBuyer, holdSeller):
self.agents = { "opener": opener,
"buy": holdBuyer,
"sell": holdSeller }
self.openerActionsDict = {0: "buy", 1: "hold", 2: "sell"}
self.buyerActionDict = {0: "hold", 1: "buy"}
self.sellerActionDict = {0: "hold", 1: "sell"}
self.nSteps = { "opener": 0, "buy": 0, "sell": 0 }
self.activeAgent = "opener"
self.maxDeposit = -10000000
self.maxBackTestReward = -10000000
self.lastSaveEp = -1
pass
# do opener action
# if action == hold add experience to opener
# else save state of open state and switch active agent to closer
# while not close add experience to closer
# if close position then add experience to closer, switch to opener
# add saved obs, action and reward of closed position to opener too
# repeat opener cycle
def fit_agent(self, env, nEpisodes, plotScores, saveBest = True, saveFreq=5, nWarmUp=0,
uniformEps = False, synEps = True, backTestEnv=None, continueFit=False,
saveDir = None, saveName = None):
scores, episodes = [], []
if continueFit == False:
self.maxDeposit = -10000000
self.maxBackTestReward = -10000000
self.lastSaveEp = -1
uniEpsList = []
if uniformEps:
for i in range(nWarmUp):
uniEpsList.append(1.0)
epsStep = 1.0 / nEpisodes
for i in range(nEpisodes):
uniEpsList.append( 1.0 - i * epsStep )
nEpisodes = nEpisodes + nWarmUp
for e in range(nEpisodes):
done = False
score = 0
#for key in self.nSteps.keys():
# self.nSteps[key] = 0
openState = env.reset()
while not done:
# get action for the current state and go one step in environment
openAction = self.agents["opener"].get_action(openState)
##################################################
if env.parallelOpenState == "closed_original_deal":
if env.savedParallelAction == "buy":
openAction = 2
self.openerActionsDict[openAction] = "sell"
elif env.savedParallelAction == "sell":
openAction = 0
self.openerActionsDict[openAction] = "buy"
##################################################
next_state, openReward, openDone, openInfo = env.step(openAction)
if openDone:
done = True
elif self.openerActionsDict[openAction] in ["buy", "sell"]:
chosenCloser = self.agents[ self.openerActionsDict[openAction] ]
closerState = deepcopy(next_state)
closerName = self.openerActionsDict[openAction]
while True: # buy/sell for closer
closerAction = chosenCloser.get_action(closerState)
if closerAction == 1:
nextOpenerState, nextCloserState, closerReward, closerDone, closerInfo = env.step(closerAction)
if closerDone == True:
done = True
break
chosenCloser.append_sample(closerState, 1, closerReward[1], nextCloserState, closerDone)
#chosenCloser.append_sample(closerState, 0, closerReward[0], nextCloserState, closerDone)
chosenCloser.train_model()
self.nSteps[closerName] += 1
if self.nSteps[closerName] % chosenCloser.batch_size == 0:
self.nSteps[closerName] = 0
chosenCloser.update_target_model()
######################################
if uniformEps:
for agentName in self.agents.keys():
self.agents[agentName].epsilon = uniEpsList[e]
if e < nWarmUp:
for agentName in self.agents.keys():
self.agents[agentName].epsilon = 1.0
if synEps:
for agentName in self.agents.keys():
self.agents[agentName].epsilon = self.agents["opener"].epsilon
######################################
if closerDone:
# every episode update the target model to be same with model
chosenCloser.update_target_model()
print("{}: ".format(env.iStep) + str(env.deposit))
openReward = closerReward[1]
openDone = closerDone
self.agents["opener"].append_sample(openState, openAction, openReward, nextOpenerState, openDone)
#self.agents["opener"].append_sample(openState, openAction, openReward, openState ,openDone)
openState = nextOpenerState
self.agents["opener"].train_model()
self.printInfo(env)
break
elif closerAction == 0:
next_state, closerReward, closerDone, closerInfo = env.step(closerAction)
if closerDone:
done = True
chosenCloser.update_target_model()
# if hold up to end of history data then evaluate action like close position
#closerAction = 1
break
#chosenCloser.append_sample(closerState, 1, closerReward[1], next_state, closerDone)
chosenCloser.append_sample(closerState, 0, closerReward[0], next_state, closerDone)
closerState = next_state
###########################################
chosenCloser.train_model()
if self.nSteps[closerName] % chosenCloser.batch_size == 0:
self.nSteps[closerName] = 0
chosenCloser.update_target_model()
###########################################
######################################
if uniformEps:
for agentName in self.agents.keys():
self.agents[agentName].epsilon = uniEpsList[e]
if e < nWarmUp:
for agentName in self.agents.keys():
self.agents[agentName].epsilon = 1.0
if synEps:
for agentName in self.agents.keys():
self.agents[agentName].epsilon = self.agents["opener"].epsilon
######################################
if closerInfo["limitOrder"] == True:
openState = closerInfo["nextOpenerState"]
openReward = closerReward[0]
self.printInfo(env)
break
self.printInfo(env)
else:
if openDone:
done = True
self.agents["opener"].append_sample(openState, openAction, openReward, next_state, openDone)
#self.agents["opener"].append_sample(openState, openAction, openReward, openState, openDone)
openState = next_state
self.agents["opener"].train_model()
self.printInfo(env)
score += openReward
self.nSteps["opener"] += 1
if self.nSteps["opener"] % self.agents["opener"].batch_size == 0: #memory_size?
self.nSteps["opener"] = 0
self.agents["opener"].update_target_model()
######################################
if uniformEps:
for agentName in self.agents.keys():
self.agents[agentName].epsilon = uniEpsList[e]
if e < nWarmUp:
for agentName in self.agents.keys():
self.agents[agentName].epsilon = 1.0
if synEps:
for agentName in self.agents.keys():
self.agents[agentName].epsilon = self.agents["opener"].epsilon
if done:
# every episode update the target model to be same with model
self.agents["opener"].update_target_model()
print("{}: ".format(env.iStep) + str(env.deposit))
######################################
if uniformEps:
self.agents["opener"].epsilon = uniEpsList[e]
if e < nWarmUp:
self.agents["opener"].epsilon = 1.0
if synEps:
for agentName in self.agents.keys():
self.agents[agentName].epsilon = self.agents["opener"].epsilon
######################################
# every episode, plot the play time
if plotScores == True:
import matplotlib.pyplot as plt
scores.append(score)
episodes.append(e)
plt.close()
plt.plot(episodes, scores, 'b')
if saveDir is None:
plt.savefig("./test_dqn.png")
else:
plt.savefig("{}score_plot.png".format(saveDir))
print("episode:", e, " score:", score, " memory length:",
len(self.agents["opener"].memory), " epsilon:", self.agents["opener"].epsilon)
# save the model
if saveBest and env.deposit > self.maxDeposit and backTestEnv is None:
self.maxDeposit = env.deposit
print("Save new best model. Deposit: {}".format(self.maxDeposit))
if saveDir is None and saveName is None:
self.save_agent("./", "best_composite")
else:
self.save_agent(saveDir, saveName)
self.lastSaveEp = e
elif saveBest and backTestEnv is not None:
backStat = self.use_agent(backTestEnv)
backReward = np.sum(backStat)
avgReward = backReward / len(backStat)
if self.maxBackTestReward < backReward:
self.maxBackTestReward = backReward
#if self.maxBackTestReward < avgReward:
# self.maxBackTestReward = avgReward
print("Save new best model. Backtest reward: {} | Avg reward: {}".format(backReward, avgReward))
if saveDir is None and saveName is None:
self.save_agent("./", "best_composite")
print("Best agent saved")
else:
self.save_agent(saveDir, saveName)
self.lastSaveEp = e
elif saveBest == False and e % saveFreq == 0:
if saveDir is None and saveName is None:
self.save_agent("./", "checkpoint_composite")
else:
self.save_agent(saveDir, saveName)
self.lastSaveEp = e
return self.lastSaveEp
def use_agent(self, env, timeConstraint=None):
startUseTime = datetime.now()
savedEps = {}
for agentName in self.agents.keys():
savedEps[agentName] = self.agents[agentName].epsilon
#####################################
#self.agents["opener"].epsilon = 0.0
#self.agents["buy"].epsilon = 0.125
#self.agents["sell"].epsilon = 0.125
######################################
#######################################
for agentName in self.agents.keys():
self.agents[agentName].epsilon = 0.0
for agentName in self.agents.keys():
print( agentName + "_eps: " + str(self.agents[agentName].epsilon))
########################################
dealsStatistics=[]
score = 0
openState = env.reset()
done = False
while not done:
# get action for the current state and go one step in environment
openAction = self.agents["opener"].get_action(openState)
next_state, openReward, openDone, openInfo = env.step(openAction)
if openDone:
done = True
elif self.openerActionsDict[openAction] in ["buy", "sell"]:
chosenCloser = self.agents[self.openerActionsDict[openAction]]
closerState = deepcopy(next_state)
closerName = self.openerActionsDict[openAction]
while True: # buy/sell for closer
closerAction = chosenCloser.get_action(closerState)
if closerAction == 1:
nextOpenerState, nextCloserState, closerReward, closerDone, closerInfo = env.step(
closerAction)
if closerDone == True:
done = True
break
self.nSteps[closerName] += 1
if closerDone:
print("{}: ".format(env.iStep) + str(env.deposit))
openReward = closerReward[1]
openState = nextOpenerState
dealsStatistics.append(closerReward[1])
self.printInfo(env)
if timeConstraint is not None:
currentUseTime = datetime.now() - startUseTime
if currentUseTime > timeConstraint:
done = True
break
break
elif closerAction == 0:
next_state, closerReward, closerDone, closerInfo = env.step(closerAction)
if closerDone:
done = True
break
if closerInfo["limitOrder"] == True:
openState = closerInfo["nextOpenerState"]
openReward = closerReward[0]
dealsStatistics.append(closerReward[0])
break
"""if timeConstraint is not None:
currentUseTime = datetime.now() - startUseTime
if currentUseTime > timeConstraint:
closePos = 1
env.step(closePos)
done = True
break"""
closerState = next_state
self.printInfo(env)
else:
if openDone:
done = True
if timeConstraint is not None:
currentUseTime = datetime.now() - startUseTime
if currentUseTime > timeConstraint:
done = True
break
openState = next_state
self.printInfo(env)
score += openReward
self.nSteps["opener"] += 1
if done:
print("{}: ".format(env.iStep) + str(env.deposit))
for agentName in self.agents.keys():
self.agents[agentName].epsilon = savedEps[agentName]
return dealsStatistics
def printInfo(self, env):
if env.iStep % 100 == 0:
print("Step: {}".format(env.iStep) + " | Deposit: {}".format(str(env.deposit)))
######################################
print("oe: {}".format(self.agents["opener"].epsilon), end=" ")
print("be: {}".format(self.agents["buy"].epsilon), end=" ")
print("se: {}".format(self.agents["sell"].epsilon))
#######################################
print("om: {}".format(len(self.agents["opener"].memory)), end=" ")
print("bm: {}".format(len(self.agents["buy"].memory)), end=" ")
print("sm: {}".format(len(self.agents["sell"].memory)))
#######################################
def save_agent(self, path, name):
#import joblib
#with open(path + "/" + name + ".pkl", mode="wb") as agentFile:
# joblib.dump(self, agentFile)
for agentName in self.agents.keys():
self.agents[agentName].save_agent( path, name + "_" + agentName )
pass
def load_agent(self, path, name, dropSupportModel = False):
#import joblib
#loadedAgent = None
#with open(path + "/" + name + ".pkl", mode="rb") as agentFile:
# loadedAgent = joblib.load(agentFile)
for agentName in self.agents.keys():
self.agents[agentName] = self.agents[agentName].load_agent( path, name + "_" + agentName, dropSupportModel=dropSupportModel )
self.nSteps = {"opener": 0, "buy": 0, "sell": 0}
self.activeAgent = "opener"
return self | import numpy as np
from copy import deepcopy
from datetime import datetime
class CompositeAgent():
def __init__(self, opener, holdBuyer, holdSeller):
self.agents = { "opener": opener,
"buy": holdBuyer,
"sell": holdSeller }
self.openerActionsDict = {0: "buy", 1: "hold", 2: "sell"}
self.buyerActionDict = {0: "hold", 1: "buy"}
self.sellerActionDict = {0: "hold", 1: "sell"}
self.nSteps = { "opener": 0, "buy": 0, "sell": 0 }
self.activeAgent = "opener"
self.maxDeposit = -10000000
self.maxBackTestReward = -10000000
self.lastSaveEp = -1
pass
# do opener action
# if action == hold add experience to opener
# else save state of open state and switch active agent to closer
# while not close add experience to closer
# if close position then add experience to closer, switch to opener
# add saved obs, action and reward of closed position to opener too
# repeat opener cycle
def fit_agent(self, env, nEpisodes, plotScores, saveBest = True, saveFreq=5, nWarmUp=0,
uniformEps = False, synEps = True, backTestEnv=None, continueFit=False,
saveDir = None, saveName = None):
scores, episodes = [], []
if continueFit == False:
self.maxDeposit = -10000000
self.maxBackTestReward = -10000000
self.lastSaveEp = -1
uniEpsList = []
if uniformEps:
for i in range(nWarmUp):
uniEpsList.append(1.0)
epsStep = 1.0 / nEpisodes
for i in range(nEpisodes):
uniEpsList.append( 1.0 - i * epsStep )
nEpisodes = nEpisodes + nWarmUp
for e in range(nEpisodes):
done = False
score = 0
#for key in self.nSteps.keys():
# self.nSteps[key] = 0
openState = env.reset()
while not done:
# get action for the current state and go one step in environment
openAction = self.agents["opener"].get_action(openState)
##################################################
if env.parallelOpenState == "closed_original_deal":
if env.savedParallelAction == "buy":
openAction = 2
self.openerActionsDict[openAction] = "sell"
elif env.savedParallelAction == "sell":
openAction = 0
self.openerActionsDict[openAction] = "buy"
##################################################
next_state, openReward, openDone, openInfo = env.step(openAction)
if openDone:
done = True
elif self.openerActionsDict[openAction] in ["buy", "sell"]:
chosenCloser = self.agents[ self.openerActionsDict[openAction] ]
closerState = deepcopy(next_state)
closerName = self.openerActionsDict[openAction]
while True: # buy/sell for closer
closerAction = chosenCloser.get_action(closerState)
if closerAction == 1:
nextOpenerState, nextCloserState, closerReward, closerDone, closerInfo = env.step(closerAction)
if closerDone == True:
done = True
break
chosenCloser.append_sample(closerState, 1, closerReward[1], nextCloserState, closerDone)
#chosenCloser.append_sample(closerState, 0, closerReward[0], nextCloserState, closerDone)
chosenCloser.train_model()
self.nSteps[closerName] += 1
if self.nSteps[closerName] % chosenCloser.batch_size == 0:
self.nSteps[closerName] = 0
chosenCloser.update_target_model()
######################################
if uniformEps:
for agentName in self.agents.keys():
self.agents[agentName].epsilon = uniEpsList[e]
if e < nWarmUp:
for agentName in self.agents.keys():
self.agents[agentName].epsilon = 1.0
if synEps:
for agentName in self.agents.keys():
self.agents[agentName].epsilon = self.agents["opener"].epsilon
######################################
if closerDone:
# every episode update the target model to be same with model
chosenCloser.update_target_model()
print("{}: ".format(env.iStep) + str(env.deposit))
openReward = closerReward[1]
openDone = closerDone
self.agents["opener"].append_sample(openState, openAction, openReward, nextOpenerState, openDone)
#self.agents["opener"].append_sample(openState, openAction, openReward, openState ,openDone)
openState = nextOpenerState
self.agents["opener"].train_model()
self.printInfo(env)
break
elif closerAction == 0:
next_state, closerReward, closerDone, closerInfo = env.step(closerAction)
if closerDone:
done = True
chosenCloser.update_target_model()
# if hold up to end of history data then evaluate action like close position
#closerAction = 1
break
#chosenCloser.append_sample(closerState, 1, closerReward[1], next_state, closerDone)
chosenCloser.append_sample(closerState, 0, closerReward[0], next_state, closerDone)
closerState = next_state
###########################################
chosenCloser.train_model()
if self.nSteps[closerName] % chosenCloser.batch_size == 0:
self.nSteps[closerName] = 0
chosenCloser.update_target_model()
###########################################
######################################
if uniformEps:
for agentName in self.agents.keys():
self.agents[agentName].epsilon = uniEpsList[e]
if e < nWarmUp:
for agentName in self.agents.keys():
self.agents[agentName].epsilon = 1.0
if synEps:
for agentName in self.agents.keys():
self.agents[agentName].epsilon = self.agents["opener"].epsilon
######################################
if closerInfo["limitOrder"] == True:
openState = closerInfo["nextOpenerState"]
openReward = closerReward[0]
self.printInfo(env)
break
self.printInfo(env)
else:
if openDone:
done = True
self.agents["opener"].append_sample(openState, openAction, openReward, next_state, openDone)
#self.agents["opener"].append_sample(openState, openAction, openReward, openState, openDone)
openState = next_state
self.agents["opener"].train_model()
self.printInfo(env)
score += openReward
self.nSteps["opener"] += 1
if self.nSteps["opener"] % self.agents["opener"].batch_size == 0: #memory_size?
self.nSteps["opener"] = 0
self.agents["opener"].update_target_model()
######################################
if uniformEps:
for agentName in self.agents.keys():
self.agents[agentName].epsilon = uniEpsList[e]
if e < nWarmUp:
for agentName in self.agents.keys():
self.agents[agentName].epsilon = 1.0
if synEps:
for agentName in self.agents.keys():
self.agents[agentName].epsilon = self.agents["opener"].epsilon
if done:
# every episode update the target model to be same with model
self.agents["opener"].update_target_model()
print("{}: ".format(env.iStep) + str(env.deposit))
######################################
if uniformEps:
self.agents["opener"].epsilon = uniEpsList[e]
if e < nWarmUp:
self.agents["opener"].epsilon = 1.0
if synEps:
for agentName in self.agents.keys():
self.agents[agentName].epsilon = self.agents["opener"].epsilon
######################################
# every episode, plot the play time
if plotScores == True:
import matplotlib.pyplot as plt
scores.append(score)
episodes.append(e)
plt.close()
plt.plot(episodes, scores, 'b')
if saveDir is None:
plt.savefig("./test_dqn.png")
else:
plt.savefig("{}score_plot.png".format(saveDir))
print("episode:", e, " score:", score, " memory length:",
len(self.agents["opener"].memory), " epsilon:", self.agents["opener"].epsilon)
# save the model
if saveBest and env.deposit > self.maxDeposit and backTestEnv is None:
self.maxDeposit = env.deposit
print("Save new best model. Deposit: {}".format(self.maxDeposit))
if saveDir is None and saveName is None:
self.save_agent("./", "best_composite")
else:
self.save_agent(saveDir, saveName)
self.lastSaveEp = e
elif saveBest and backTestEnv is not None:
backStat = self.use_agent(backTestEnv)
backReward = np.sum(backStat)
avgReward = backReward / len(backStat)
if self.maxBackTestReward < backReward:
self.maxBackTestReward = backReward
#if self.maxBackTestReward < avgReward:
# self.maxBackTestReward = avgReward
print("Save new best model. Backtest reward: {} | Avg reward: {}".format(backReward, avgReward))
if saveDir is None and saveName is None:
self.save_agent("./", "best_composite")
print("Best agent saved")
else:
self.save_agent(saveDir, saveName)
self.lastSaveEp = e
elif saveBest == False and e % saveFreq == 0:
if saveDir is None and saveName is None:
self.save_agent("./", "checkpoint_composite")
else:
self.save_agent(saveDir, saveName)
self.lastSaveEp = e
return self.lastSaveEp
def use_agent(self, env, timeConstraint=None):
startUseTime = datetime.now()
savedEps = {}
for agentName in self.agents.keys():
savedEps[agentName] = self.agents[agentName].epsilon
#####################################
#self.agents["opener"].epsilon = 0.0
#self.agents["buy"].epsilon = 0.125
#self.agents["sell"].epsilon = 0.125
######################################
#######################################
for agentName in self.agents.keys():
self.agents[agentName].epsilon = 0.0
for agentName in self.agents.keys():
print( agentName + "_eps: " + str(self.agents[agentName].epsilon))
########################################
dealsStatistics=[]
score = 0
openState = env.reset()
done = False
while not done:
# get action for the current state and go one step in environment
openAction = self.agents["opener"].get_action(openState)
next_state, openReward, openDone, openInfo = env.step(openAction)
if openDone:
done = True
elif self.openerActionsDict[openAction] in ["buy", "sell"]:
chosenCloser = self.agents[self.openerActionsDict[openAction]]
closerState = deepcopy(next_state)
closerName = self.openerActionsDict[openAction]
while True: # buy/sell for closer
closerAction = chosenCloser.get_action(closerState)
if closerAction == 1:
nextOpenerState, nextCloserState, closerReward, closerDone, closerInfo = env.step(
closerAction)
if closerDone == True:
done = True
break
self.nSteps[closerName] += 1
if closerDone:
print("{}: ".format(env.iStep) + str(env.deposit))
openReward = closerReward[1]
openState = nextOpenerState
dealsStatistics.append(closerReward[1])
self.printInfo(env)
if timeConstraint is not None:
currentUseTime = datetime.now() - startUseTime
if currentUseTime > timeConstraint:
done = True
break
break
elif closerAction == 0:
next_state, closerReward, closerDone, closerInfo = env.step(closerAction)
if closerDone:
done = True
break
if closerInfo["limitOrder"] == True:
openState = closerInfo["nextOpenerState"]
openReward = closerReward[0]
dealsStatistics.append(closerReward[0])
break
"""if timeConstraint is not None:
currentUseTime = datetime.now() - startUseTime
if currentUseTime > timeConstraint:
closePos = 1
env.step(closePos)
done = True
break"""
closerState = next_state
self.printInfo(env)
else:
if openDone:
done = True
if timeConstraint is not None:
currentUseTime = datetime.now() - startUseTime
if currentUseTime > timeConstraint:
done = True
break
openState = next_state
self.printInfo(env)
score += openReward
self.nSteps["opener"] += 1
if done:
print("{}: ".format(env.iStep) + str(env.deposit))
for agentName in self.agents.keys():
self.agents[agentName].epsilon = savedEps[agentName]
return dealsStatistics
def printInfo(self, env):
if env.iStep % 100 == 0:
print("Step: {}".format(env.iStep) + " | Deposit: {}".format(str(env.deposit)))
######################################
print("oe: {}".format(self.agents["opener"].epsilon), end=" ")
print("be: {}".format(self.agents["buy"].epsilon), end=" ")
print("se: {}".format(self.agents["sell"].epsilon))
#######################################
print("om: {}".format(len(self.agents["opener"].memory)), end=" ")
print("bm: {}".format(len(self.agents["buy"].memory)), end=" ")
print("sm: {}".format(len(self.agents["sell"].memory)))
#######################################
def save_agent(self, path, name):
#import joblib
#with open(path + "/" + name + ".pkl", mode="wb") as agentFile:
# joblib.dump(self, agentFile)
for agentName in self.agents.keys():
self.agents[agentName].save_agent( path, name + "_" + agentName )
pass
def load_agent(self, path, name, dropSupportModel = False):
#import joblib
#loadedAgent = None
#with open(path + "/" + name + ".pkl", mode="rb") as agentFile:
# loadedAgent = joblib.load(agentFile)
for agentName in self.agents.keys():
self.agents[agentName] = self.agents[agentName].load_agent( path, name + "_" + agentName, dropSupportModel=dropSupportModel )
self.nSteps = {"opener": 0, "buy": 0, "sell": 0}
self.activeAgent = "opener"
return self | en | 0.31572 | # do opener action # if action == hold add experience to opener # else save state of open state and switch active agent to closer # while not close add experience to closer # if close position then add experience to closer, switch to opener # add saved obs, action and reward of closed position to opener too # repeat opener cycle #for key in self.nSteps.keys(): # self.nSteps[key] = 0 # get action for the current state and go one step in environment ################################################## ################################################## # buy/sell for closer #chosenCloser.append_sample(closerState, 0, closerReward[0], nextCloserState, closerDone) ###################################### ###################################### # every episode update the target model to be same with model #self.agents["opener"].append_sample(openState, openAction, openReward, openState ,openDone) # if hold up to end of history data then evaluate action like close position #closerAction = 1 #chosenCloser.append_sample(closerState, 1, closerReward[1], next_state, closerDone) ########################################### ########################################### ###################################### ###################################### #self.agents["opener"].append_sample(openState, openAction, openReward, openState, openDone) #memory_size? ###################################### # every episode update the target model to be same with model ###################################### ###################################### # every episode, plot the play time # save the model #if self.maxBackTestReward < avgReward: # self.maxBackTestReward = avgReward ##################################### #self.agents["opener"].epsilon = 0.0 #self.agents["buy"].epsilon = 0.125 #self.agents["sell"].epsilon = 0.125 ###################################### ####################################### ######################################## # get action for the current state and go one step in environment # buy/sell for closer if timeConstraint is not None: currentUseTime = datetime.now() - startUseTime if currentUseTime > timeConstraint: closePos = 1 env.step(closePos) done = True break ###################################### ####################################### ####################################### #import joblib #with open(path + "/" + name + ".pkl", mode="wb") as agentFile: # joblib.dump(self, agentFile) #import joblib #loadedAgent = None #with open(path + "/" + name + ".pkl", mode="rb") as agentFile: # loadedAgent = joblib.load(agentFile) | 2.316909 | 2 |
project_tools/nrcan_p2/evaluation/bert_keyword_prediction.py | NRCan/Geoscience_Language_Models | 0 | 6623548 | # coding=utf-8
# Copyright (C) 2021 ServiceNow, Inc.
""" Finetuning for running PAIRING and MULTICLASS-MULTILABEL classification finetuning
on NRCan data and the original Glue benchmarks
This script is a SIGNIFICANTLY MODIFIED version of the run_glue.py script provided by
HuggingFace in the Transformers repo:
https://github.com/huggingface/transformers/blob/master/examples/text-classification/run_glue.py
It has been extended to perform multiclass sequence classification and sentence pairing
classification and to handle the format of our inputs.
It was originally make accessible by hf using the apache 2.0 licence.
"""
""" Finetuning the library models for sequence classification on GLUE."""
# You can also adapt this script on your own text classification task. Pointers for this are left as comments.
import logging
import os
import numpy as np
import torch
import json
import shutil
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import pandas as pd
# HACK: hf datasets repo cannot detect disk space on a cluster,
# so let it know there's some space left
shutil.disk_usage = lambda x: shutil._ntuple_diskusage(1, 1, 1)
from datasets import load_dataset, load_metric
from sklearn.metrics import (
accuracy_score,
precision_recall_fscore_support,
multilabel_confusion_matrix,
confusion_matrix
)
import transformers
from transformers import (
BertTokenizer,
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PretrainedConfig,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import is_main_process
from nrcan_p2.evaluation.hf_modeling_auto import AutoModelForMultiLabelSequenceClassification
task_to_keys = {
"cola": ("sentence", None),
"mnli": ("premise", "hypothesis"),
"mrpc": ("sentence1", "sentence2"),
"qnli": ("question", "sentence"),
"qqp": ("question1", "question2"),
"rte": ("sentence1", "sentence2"),
"sst2": ("sentence", None),
"stsb": ("sentence1", "sentence2"),
"wnli": ("sentence1", "sentence2"),
}
logger = logging.getLogger(__name__)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
label_column_name: str = field(metadata={"help": "Which column contains the label"})
task_name: Optional[str] = field(
default=None,
metadata={"help": "The name of the task to train on: " + ", ".join(task_to_keys.keys())},
)
max_seq_length: int = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
dataset_cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the datasets models downloaded from huggingface.co"},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
)
pad_to_max_length: bool = field(
default=True,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
},
)
train_file: Optional[str] = field(
default=None, metadata={"help": "A csv or a json file containing the training data."}
)
validation_file: Optional[str] = field(
default=None, metadata={"help": "A csv or a json file containing the validation data."}
)
test_file: Optional[str] = field(default=None, metadata={"help": "A csv or a json file containing the test data."})
def __post_init__(self):
if self.task_name is not None:
self.task_name = self.task_name.lower()
if self.task_name not in task_to_keys.keys():
raise ValueError("Unknown task, you should pick one in " + ",".join(task_to_keys.keys()))
elif self.train_file is None or self.validation_file is None:
raise ValueError("Need either a GLUE task or a training/validation file.")
else:
train_extension = self.train_file.split(".")[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
validation_extension = self.validation_file.split(".")[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
use_class_weights: bool = field(
default=False, metadata={"help": "Use weights for each class in a multiclass or pairing setting"}
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
shutil.disk_usage = lambda x: shutil._ntuple_diskusage(1, 1, 1)
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if is_main_process(training_args.local_rank) else logging.WARN,
)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the
# sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named
# label if at least two columns are provided.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
label_column_name = data_args.label_column_name
if data_args.task_name is not None:
# Downloading and loading a dataset from the hub.
datasets = load_dataset("glue", data_args.task_name)
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
data_files = {"train": data_args.train_file, "validation": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
train_extension = data_args.train_file.split(".")[-1]
test_extension = data_args.test_file.split(".")[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
data_files["test"] = data_args.test_file
else:
raise ValueError("Need either a GLUE task or a test file for `do_predict`.")
for key in data_files.keys():
logger.info(f"load a local file for {key}: {data_files[key]}")
if data_args.train_file.endswith(".csv"):
# Loading a dataset from local csv files
datasets = load_dataset("csv", data_files=data_files, cache_dir=data_args.dataset_cache_dir)
else:
# Loading a dataset from local json files
datasets = load_dataset("json", data_files=data_files, field='data', cache_dir=data_args.dataset_cache_dir)
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
if data_args.task_name is not None:
is_regression = data_args.task_name == "stsb"
is_multilabel = None
if not is_regression:
label_list = datasets["train"].features[label_column_name].names
num_labels = len(label_list)
else:
num_labels = 1
else:
# Trying to have good defaults here, don't hesitate to tweak to your needs.
print(list(datasets['train'].features.keys())[0:10])
is_regression = datasets["train"].features[label_column_name].dtype in ["float32", "float64"]
# if is_regression:
# num_labels = 1
# else:
is_multilabel = datasets["train"].features[label_column_name].dtype in [list, "list"]
# A useful fast method:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique
if is_multilabel:
logger.warning("Found list type label, converting to pandas. This may be slow and memory intensive")
# this is inefficient
label_list = datasets["train"].data[label_column_name].to_pandas().explode().unique()
label_list = [x for x in label_list if not pd.isnull(x)]
print(len(label_list))
print(label_list[0:10])
label_list.sort() # Let's sort it for determinism
num_labels = len(label_list)
else:
if is_regression:
num_labels = 1
else:
label_list = datasets["train"].unique(label_column_name)
label_list.sort() # Let's sort it for determinism
num_labels = len(label_list)
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# Load a BertTokenizer directly if we have passed it a tokenizer path,
# otherwise, use the AutoTokenizer
if os.path.isdir(model_args.model_name_or_path):
tokenizer = BertTokenizer.from_pretrained(
model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
if not is_multilabel:
logging.info("Instantiating multiclass (not multilabel) classification model...")
model = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
logging.info("Instantiating multilabel classification model...")
model = AutoModelForMultiLabelSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# Preprocessing the datasets
if data_args.task_name is not None:
sentence1_key, sentence2_key = task_to_keys[data_args.task_name]
else:
# Again, we try to have some nice defaults but don't hesitate to tweak to your use case.
non_label_column_names = [name for name in datasets["train"].column_names if name != label_column_name]
print(non_label_column_names)
if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names:
sentence1_key, sentence2_key = "sentence1", "sentence2"
else:
if len(non_label_column_names) >= 2:
sentence1_key, sentence2_key = non_label_column_names[:2]
print(sentence1_key, sentence2_key)
else:
sentence1_key, sentence2_key = non_label_column_names[0], None
# Padding strategy
if data_args.pad_to_max_length:
padding = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
padding = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
label_to_id = None
if (
model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id
and data_args.task_name is not None
and is_regression
):
# Some have all caps in their config, some don't.
label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()}
if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)):
label_to_id = {i: label_name_to_id[label_list[i]] for i in range(num_labels)}
else:
logger.warn(
"Your model seems to have been trained with labels, but they don't match the dataset: ",
f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}."
"\nIgnoring the model labels as a result.",
)
elif data_args.task_name is None and not is_regression:
label_to_id = {v: i for i, v in enumerate(label_list)}
def preprocess_function(examples):
# Tokenize the texts
# Replace all null values with "" (these may exist in the validation set)
args = (
([s if s is not None else "" for s in examples[sentence1_key]],
) if sentence2_key is None
else ([s if s is not None else "" for s in examples[sentence1_key]],
[s if s is not None else "" for s in examples[sentence2_key]])
)
result = tokenizer(*args, padding=padding, max_length=data_args.max_seq_length, truncation=True)
# Map labels to IDs (not necessary for GLUE tasks)
if label_to_id is not None and label_column_name in examples:
if type(examples[label_column_name][0]) == list:
label_keys_sorted = sorted(label_to_id.keys())
result[label_column_name] = [tuple([1 if key in l else 0 for key in label_keys_sorted]) for l in examples[label_column_name]]
else:
result[label_column_name] = [label_to_id[l] for l in examples[label_column_name]]
return result
datasets = datasets.map(preprocess_function, batched=True, load_from_cache_file=not data_args.overwrite_cache)
train_dataset = datasets["train"]
eval_dataset = datasets["validation_matched" if data_args.task_name == "mnli" else "validation"]
if data_args.task_name is not None or data_args.test_file is not None:
test_dataset = datasets["test_matched" if data_args.task_name == "mnli" else "test"]
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# Get the metric function
if data_args.task_name is not None:
metric = load_metric("glue", data_args.task_name)
if model_args.use_class_weights:
if is_multilabel:
label_np = datasets["train"].data["label"].to_numpy()
pos_weights = label_np.shape[0]/np.stack(label_np).sum(axis=0)
model.pos_weights = torch.tensor(pos_weights, device=torch.device("cuda") if torch.cuda.is_available() else "cpu")
# TODO: When datasets metrics include regular accuracy, make an else here and remove special branch from
# compute_metrics
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(p: EvalPrediction):
preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1)
if data_args.task_name is not None:
result = metric.compute(predictions=preds, references=p.label_ids)
if len(result) > 1:
result["combined_score"] = np.mean(list(result.values())).item()
return result
elif is_regression:
return {"mse": ((preds - p.label_ids) ** 2).mean().item()}
else:
micro_precision, micro_recall, micro_fb1, support = precision_recall_fscore_support(p.label_ids, preds, average='micro')
macro_precision, macro_recall, macro_fb1, support = precision_recall_fscore_support(p.label_ids, preds, average='macro')
weighted_precision, weighted_recall, weighted_fb1, support = precision_recall_fscore_support(p.label_ids, preds, average='weighted')
sample_precision, sample_recall, sample_fb1, support = precision_recall_fscore_support(p.label_ids, preds, average=None)
cfm = confusion_matrix(p.label_ids, preds)
return {"accuracy_direct": (preds == p.label_ids).astype(np.float32).mean().item(),
'accuracy': accuracy_score(p.label_ids, preds),
'micro-precision': micro_precision,
'micro-recall': micro_recall,
'micro-fb1': micro_fb1,
'macro-precision': macro_precision,
'macro-recall': macro_recall,
'macro-fb1': macro_fb1,
'support': support.tolist(),
'sample-precision': sample_precision.tolist(),
'sample-recall': sample_recall.tolist(),
'sample-fb1': sample_fb1.tolist(),
'confusion_matrix': cfm.tolist()
}
def compute_metrics_multiclass(p: EvalPrediction):
preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
preds_binary = (preds>0).astype(float)
micro_precision, micro_recall, micro_fb1, support = precision_recall_fscore_support(p.label_ids, preds_binary, average='micro')
macro_precision, macro_recall, macro_fb1, support = precision_recall_fscore_support(p.label_ids, preds_binary, average='macro')
weighted_precision, weighted_recall, weighted_fb1, support = precision_recall_fscore_support(p.label_ids, preds_binary, average='weighted')
sample_precision, sample_recall, sample_fb1, support = precision_recall_fscore_support(p.label_ids, preds_binary, average=None)
confusion_matrix = multilabel_confusion_matrix(p.label_ids, preds_binary)
return {'accuracy': accuracy_score(p.label_ids, preds_binary),
'micro-precision': micro_precision,
'micro-recall': micro_recall,
'micro-fb1': micro_fb1,
'macro-precision': macro_precision,
'macro-recall': macro_recall,
'macro-fb1': macro_fb1,
'support': support.tolist(),
'sample-precision': sample_precision.tolist(),
'sample-recall': sample_recall.tolist(),
'sample-fb1': sample_fb1.tolist(),
'confusion_matrix': confusion_matrix.tolist()
}
metric_computation_function = compute_metrics if not is_multilabel else compute_metrics_multiclass
logging.info(f"Using metric_computation_function {metric_computation_function}")
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset if training_args.do_eval else None,
compute_metrics=metric_computation_function,
tokenizer=tokenizer,
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
data_collator=default_data_collator if data_args.pad_to_max_length else None,
)
# Extra info - write labels to file
output_label_file = os.path.join(training_args.output_dir, f"label_list.json")
with open(output_label_file, "w") as writer:
json.dump(label_to_id, writer, indent=4)
output_label_file = os.path.join(training_args.output_dir, f"training_args.json")
with open(output_label_file, "w") as writer:
writer.write(repr(training_args))
output_label_file = os.path.join(training_args.output_dir, f"model_args.json")
with open(output_label_file, "w") as writer:
writer.write(repr(model_args))
output_label_file = os.path.join(training_args.output_dir, f"data_args.json")
with open(output_label_file, "w") as writer:
writer.write(repr(data_args))
# Training
if training_args.do_train:
train_result = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None
)
metrics = train_result.metrics
trainer.save_model() # Saves the tokenizer too for easy upload
# Save the training results to txt and json formats
output_train_file = os.path.join(training_args.output_dir, "train_results.txt")
output_train_file_json = os.path.join(training_args.output_dir, "train_results.json")
if trainer.is_world_process_zero():
with open(output_train_file, "w") as writer:
logger.info("***** Train results *****")
for key, value in sorted(metrics.items()):
logger.info(f" {key} = {value}")
writer.write(f"{key} = {value}\n")
with open(output_train_file_json, "w") as writer:
json.dump(metrics, writer)
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir, "trainer_state.json"))
# Evaluation
eval_results = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
# Loop to handle MNLI double evaluation (matched, mis-matched)
tasks = [data_args.task_name]
eval_datasets = [eval_dataset]
if data_args.task_name == "mnli":
tasks.append("mnli-mm")
eval_datasets.append(datasets["validation_mismatched"])
# Save the eval results to both txt and json formats
for eval_dataset, task in zip(eval_datasets, tasks):
eval_result = trainer.evaluate(eval_dataset=eval_dataset)
output_eval_file = os.path.join(training_args.output_dir, f"eval_results_{task}.txt")
output_eval_file_json = os.path.join(training_args.output_dir, f"eval_results_{task}.json")
if trainer.is_world_process_zero():
with open(output_eval_file, "w") as writer:
logger.info(f"***** Eval results {task} *****")
for key, value in sorted(eval_result.items()):
logger.info(f" {key} = {value}")
writer.write(f"{key} = {value}\n")
with open(output_eval_file_json, "w") as writer:
json.dump(eval_result, writer)
eval_results.update(eval_result)
if training_args.do_predict:
logger.info("*** Test ***")
# Loop to handle MNLI double evaluation (matched, mis-matched)
tasks = [data_args.task_name]
test_datasets = [test_dataset]
if data_args.task_name == "mnli":
tasks.append("mnli-mm")
test_datasets.append(datasets["test_mismatched"])
for test_dataset, task in zip(test_datasets, tasks):
# Removing the `label` columns because it contains -1 and Trainer won't like that.
test_dataset.remove_columns_(label_column_name)
predictions = trainer.predict(test_dataset=test_dataset).predictions
predictions = np.squeeze(predictions) if is_regression else np.argmax(predictions, axis=1)
output_test_file = os.path.join(training_args.output_dir, f"test_results_{task}.txt")
if trainer.is_world_process_zero():
with open(output_test_file, "w") as writer:
logger.info(f"***** Test results {task} *****")
writer.write("index\tprediction\n")
for index, item in enumerate(predictions):
if is_regression:
writer.write(f"{index}\t{item:3.3f}\n")
else:
item = label_list[item]
writer.write(f"{index}\t{item}\n")
return eval_results
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | # coding=utf-8
# Copyright (C) 2021 ServiceNow, Inc.
""" Finetuning for running PAIRING and MULTICLASS-MULTILABEL classification finetuning
on NRCan data and the original Glue benchmarks
This script is a SIGNIFICANTLY MODIFIED version of the run_glue.py script provided by
HuggingFace in the Transformers repo:
https://github.com/huggingface/transformers/blob/master/examples/text-classification/run_glue.py
It has been extended to perform multiclass sequence classification and sentence pairing
classification and to handle the format of our inputs.
It was originally make accessible by hf using the apache 2.0 licence.
"""
""" Finetuning the library models for sequence classification on GLUE."""
# You can also adapt this script on your own text classification task. Pointers for this are left as comments.
import logging
import os
import numpy as np
import torch
import json
import shutil
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import pandas as pd
# HACK: hf datasets repo cannot detect disk space on a cluster,
# so let it know there's some space left
shutil.disk_usage = lambda x: shutil._ntuple_diskusage(1, 1, 1)
from datasets import load_dataset, load_metric
from sklearn.metrics import (
accuracy_score,
precision_recall_fscore_support,
multilabel_confusion_matrix,
confusion_matrix
)
import transformers
from transformers import (
BertTokenizer,
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PretrainedConfig,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import is_main_process
from nrcan_p2.evaluation.hf_modeling_auto import AutoModelForMultiLabelSequenceClassification
task_to_keys = {
"cola": ("sentence", None),
"mnli": ("premise", "hypothesis"),
"mrpc": ("sentence1", "sentence2"),
"qnli": ("question", "sentence"),
"qqp": ("question1", "question2"),
"rte": ("sentence1", "sentence2"),
"sst2": ("sentence", None),
"stsb": ("sentence1", "sentence2"),
"wnli": ("sentence1", "sentence2"),
}
logger = logging.getLogger(__name__)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
label_column_name: str = field(metadata={"help": "Which column contains the label"})
task_name: Optional[str] = field(
default=None,
metadata={"help": "The name of the task to train on: " + ", ".join(task_to_keys.keys())},
)
max_seq_length: int = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
dataset_cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the datasets models downloaded from huggingface.co"},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
)
pad_to_max_length: bool = field(
default=True,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
},
)
train_file: Optional[str] = field(
default=None, metadata={"help": "A csv or a json file containing the training data."}
)
validation_file: Optional[str] = field(
default=None, metadata={"help": "A csv or a json file containing the validation data."}
)
test_file: Optional[str] = field(default=None, metadata={"help": "A csv or a json file containing the test data."})
def __post_init__(self):
if self.task_name is not None:
self.task_name = self.task_name.lower()
if self.task_name not in task_to_keys.keys():
raise ValueError("Unknown task, you should pick one in " + ",".join(task_to_keys.keys()))
elif self.train_file is None or self.validation_file is None:
raise ValueError("Need either a GLUE task or a training/validation file.")
else:
train_extension = self.train_file.split(".")[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
validation_extension = self.validation_file.split(".")[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
use_class_weights: bool = field(
default=False, metadata={"help": "Use weights for each class in a multiclass or pairing setting"}
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
shutil.disk_usage = lambda x: shutil._ntuple_diskusage(1, 1, 1)
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if is_main_process(training_args.local_rank) else logging.WARN,
)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the
# sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named
# label if at least two columns are provided.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
label_column_name = data_args.label_column_name
if data_args.task_name is not None:
# Downloading and loading a dataset from the hub.
datasets = load_dataset("glue", data_args.task_name)
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
data_files = {"train": data_args.train_file, "validation": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
train_extension = data_args.train_file.split(".")[-1]
test_extension = data_args.test_file.split(".")[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
data_files["test"] = data_args.test_file
else:
raise ValueError("Need either a GLUE task or a test file for `do_predict`.")
for key in data_files.keys():
logger.info(f"load a local file for {key}: {data_files[key]}")
if data_args.train_file.endswith(".csv"):
# Loading a dataset from local csv files
datasets = load_dataset("csv", data_files=data_files, cache_dir=data_args.dataset_cache_dir)
else:
# Loading a dataset from local json files
datasets = load_dataset("json", data_files=data_files, field='data', cache_dir=data_args.dataset_cache_dir)
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
if data_args.task_name is not None:
is_regression = data_args.task_name == "stsb"
is_multilabel = None
if not is_regression:
label_list = datasets["train"].features[label_column_name].names
num_labels = len(label_list)
else:
num_labels = 1
else:
# Trying to have good defaults here, don't hesitate to tweak to your needs.
print(list(datasets['train'].features.keys())[0:10])
is_regression = datasets["train"].features[label_column_name].dtype in ["float32", "float64"]
# if is_regression:
# num_labels = 1
# else:
is_multilabel = datasets["train"].features[label_column_name].dtype in [list, "list"]
# A useful fast method:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique
if is_multilabel:
logger.warning("Found list type label, converting to pandas. This may be slow and memory intensive")
# this is inefficient
label_list = datasets["train"].data[label_column_name].to_pandas().explode().unique()
label_list = [x for x in label_list if not pd.isnull(x)]
print(len(label_list))
print(label_list[0:10])
label_list.sort() # Let's sort it for determinism
num_labels = len(label_list)
else:
if is_regression:
num_labels = 1
else:
label_list = datasets["train"].unique(label_column_name)
label_list.sort() # Let's sort it for determinism
num_labels = len(label_list)
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# Load a BertTokenizer directly if we have passed it a tokenizer path,
# otherwise, use the AutoTokenizer
if os.path.isdir(model_args.model_name_or_path):
tokenizer = BertTokenizer.from_pretrained(
model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
if not is_multilabel:
logging.info("Instantiating multiclass (not multilabel) classification model...")
model = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
logging.info("Instantiating multilabel classification model...")
model = AutoModelForMultiLabelSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# Preprocessing the datasets
if data_args.task_name is not None:
sentence1_key, sentence2_key = task_to_keys[data_args.task_name]
else:
# Again, we try to have some nice defaults but don't hesitate to tweak to your use case.
non_label_column_names = [name for name in datasets["train"].column_names if name != label_column_name]
print(non_label_column_names)
if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names:
sentence1_key, sentence2_key = "sentence1", "sentence2"
else:
if len(non_label_column_names) >= 2:
sentence1_key, sentence2_key = non_label_column_names[:2]
print(sentence1_key, sentence2_key)
else:
sentence1_key, sentence2_key = non_label_column_names[0], None
# Padding strategy
if data_args.pad_to_max_length:
padding = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
padding = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
label_to_id = None
if (
model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id
and data_args.task_name is not None
and is_regression
):
# Some have all caps in their config, some don't.
label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()}
if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)):
label_to_id = {i: label_name_to_id[label_list[i]] for i in range(num_labels)}
else:
logger.warn(
"Your model seems to have been trained with labels, but they don't match the dataset: ",
f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}."
"\nIgnoring the model labels as a result.",
)
elif data_args.task_name is None and not is_regression:
label_to_id = {v: i for i, v in enumerate(label_list)}
def preprocess_function(examples):
# Tokenize the texts
# Replace all null values with "" (these may exist in the validation set)
args = (
([s if s is not None else "" for s in examples[sentence1_key]],
) if sentence2_key is None
else ([s if s is not None else "" for s in examples[sentence1_key]],
[s if s is not None else "" for s in examples[sentence2_key]])
)
result = tokenizer(*args, padding=padding, max_length=data_args.max_seq_length, truncation=True)
# Map labels to IDs (not necessary for GLUE tasks)
if label_to_id is not None and label_column_name in examples:
if type(examples[label_column_name][0]) == list:
label_keys_sorted = sorted(label_to_id.keys())
result[label_column_name] = [tuple([1 if key in l else 0 for key in label_keys_sorted]) for l in examples[label_column_name]]
else:
result[label_column_name] = [label_to_id[l] for l in examples[label_column_name]]
return result
datasets = datasets.map(preprocess_function, batched=True, load_from_cache_file=not data_args.overwrite_cache)
train_dataset = datasets["train"]
eval_dataset = datasets["validation_matched" if data_args.task_name == "mnli" else "validation"]
if data_args.task_name is not None or data_args.test_file is not None:
test_dataset = datasets["test_matched" if data_args.task_name == "mnli" else "test"]
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# Get the metric function
if data_args.task_name is not None:
metric = load_metric("glue", data_args.task_name)
if model_args.use_class_weights:
if is_multilabel:
label_np = datasets["train"].data["label"].to_numpy()
pos_weights = label_np.shape[0]/np.stack(label_np).sum(axis=0)
model.pos_weights = torch.tensor(pos_weights, device=torch.device("cuda") if torch.cuda.is_available() else "cpu")
# TODO: When datasets metrics include regular accuracy, make an else here and remove special branch from
# compute_metrics
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(p: EvalPrediction):
preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1)
if data_args.task_name is not None:
result = metric.compute(predictions=preds, references=p.label_ids)
if len(result) > 1:
result["combined_score"] = np.mean(list(result.values())).item()
return result
elif is_regression:
return {"mse": ((preds - p.label_ids) ** 2).mean().item()}
else:
micro_precision, micro_recall, micro_fb1, support = precision_recall_fscore_support(p.label_ids, preds, average='micro')
macro_precision, macro_recall, macro_fb1, support = precision_recall_fscore_support(p.label_ids, preds, average='macro')
weighted_precision, weighted_recall, weighted_fb1, support = precision_recall_fscore_support(p.label_ids, preds, average='weighted')
sample_precision, sample_recall, sample_fb1, support = precision_recall_fscore_support(p.label_ids, preds, average=None)
cfm = confusion_matrix(p.label_ids, preds)
return {"accuracy_direct": (preds == p.label_ids).astype(np.float32).mean().item(),
'accuracy': accuracy_score(p.label_ids, preds),
'micro-precision': micro_precision,
'micro-recall': micro_recall,
'micro-fb1': micro_fb1,
'macro-precision': macro_precision,
'macro-recall': macro_recall,
'macro-fb1': macro_fb1,
'support': support.tolist(),
'sample-precision': sample_precision.tolist(),
'sample-recall': sample_recall.tolist(),
'sample-fb1': sample_fb1.tolist(),
'confusion_matrix': cfm.tolist()
}
def compute_metrics_multiclass(p: EvalPrediction):
preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
preds_binary = (preds>0).astype(float)
micro_precision, micro_recall, micro_fb1, support = precision_recall_fscore_support(p.label_ids, preds_binary, average='micro')
macro_precision, macro_recall, macro_fb1, support = precision_recall_fscore_support(p.label_ids, preds_binary, average='macro')
weighted_precision, weighted_recall, weighted_fb1, support = precision_recall_fscore_support(p.label_ids, preds_binary, average='weighted')
sample_precision, sample_recall, sample_fb1, support = precision_recall_fscore_support(p.label_ids, preds_binary, average=None)
confusion_matrix = multilabel_confusion_matrix(p.label_ids, preds_binary)
return {'accuracy': accuracy_score(p.label_ids, preds_binary),
'micro-precision': micro_precision,
'micro-recall': micro_recall,
'micro-fb1': micro_fb1,
'macro-precision': macro_precision,
'macro-recall': macro_recall,
'macro-fb1': macro_fb1,
'support': support.tolist(),
'sample-precision': sample_precision.tolist(),
'sample-recall': sample_recall.tolist(),
'sample-fb1': sample_fb1.tolist(),
'confusion_matrix': confusion_matrix.tolist()
}
metric_computation_function = compute_metrics if not is_multilabel else compute_metrics_multiclass
logging.info(f"Using metric_computation_function {metric_computation_function}")
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset if training_args.do_eval else None,
compute_metrics=metric_computation_function,
tokenizer=tokenizer,
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
data_collator=default_data_collator if data_args.pad_to_max_length else None,
)
# Extra info - write labels to file
output_label_file = os.path.join(training_args.output_dir, f"label_list.json")
with open(output_label_file, "w") as writer:
json.dump(label_to_id, writer, indent=4)
output_label_file = os.path.join(training_args.output_dir, f"training_args.json")
with open(output_label_file, "w") as writer:
writer.write(repr(training_args))
output_label_file = os.path.join(training_args.output_dir, f"model_args.json")
with open(output_label_file, "w") as writer:
writer.write(repr(model_args))
output_label_file = os.path.join(training_args.output_dir, f"data_args.json")
with open(output_label_file, "w") as writer:
writer.write(repr(data_args))
# Training
if training_args.do_train:
train_result = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None
)
metrics = train_result.metrics
trainer.save_model() # Saves the tokenizer too for easy upload
# Save the training results to txt and json formats
output_train_file = os.path.join(training_args.output_dir, "train_results.txt")
output_train_file_json = os.path.join(training_args.output_dir, "train_results.json")
if trainer.is_world_process_zero():
with open(output_train_file, "w") as writer:
logger.info("***** Train results *****")
for key, value in sorted(metrics.items()):
logger.info(f" {key} = {value}")
writer.write(f"{key} = {value}\n")
with open(output_train_file_json, "w") as writer:
json.dump(metrics, writer)
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir, "trainer_state.json"))
# Evaluation
eval_results = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
# Loop to handle MNLI double evaluation (matched, mis-matched)
tasks = [data_args.task_name]
eval_datasets = [eval_dataset]
if data_args.task_name == "mnli":
tasks.append("mnli-mm")
eval_datasets.append(datasets["validation_mismatched"])
# Save the eval results to both txt and json formats
for eval_dataset, task in zip(eval_datasets, tasks):
eval_result = trainer.evaluate(eval_dataset=eval_dataset)
output_eval_file = os.path.join(training_args.output_dir, f"eval_results_{task}.txt")
output_eval_file_json = os.path.join(training_args.output_dir, f"eval_results_{task}.json")
if trainer.is_world_process_zero():
with open(output_eval_file, "w") as writer:
logger.info(f"***** Eval results {task} *****")
for key, value in sorted(eval_result.items()):
logger.info(f" {key} = {value}")
writer.write(f"{key} = {value}\n")
with open(output_eval_file_json, "w") as writer:
json.dump(eval_result, writer)
eval_results.update(eval_result)
if training_args.do_predict:
logger.info("*** Test ***")
# Loop to handle MNLI double evaluation (matched, mis-matched)
tasks = [data_args.task_name]
test_datasets = [test_dataset]
if data_args.task_name == "mnli":
tasks.append("mnli-mm")
test_datasets.append(datasets["test_mismatched"])
for test_dataset, task in zip(test_datasets, tasks):
# Removing the `label` columns because it contains -1 and Trainer won't like that.
test_dataset.remove_columns_(label_column_name)
predictions = trainer.predict(test_dataset=test_dataset).predictions
predictions = np.squeeze(predictions) if is_regression else np.argmax(predictions, axis=1)
output_test_file = os.path.join(training_args.output_dir, f"test_results_{task}.txt")
if trainer.is_world_process_zero():
with open(output_test_file, "w") as writer:
logger.info(f"***** Test results {task} *****")
writer.write("index\tprediction\n")
for index, item in enumerate(predictions):
if is_regression:
writer.write(f"{index}\t{item:3.3f}\n")
else:
item = label_list[item]
writer.write(f"{index}\t{item}\n")
return eval_results
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | en | 0.820452 | # coding=utf-8 # Copyright (C) 2021 ServiceNow, Inc. Finetuning for running PAIRING and MULTICLASS-MULTILABEL classification finetuning on NRCan data and the original Glue benchmarks This script is a SIGNIFICANTLY MODIFIED version of the run_glue.py script provided by HuggingFace in the Transformers repo: https://github.com/huggingface/transformers/blob/master/examples/text-classification/run_glue.py It has been extended to perform multiclass sequence classification and sentence pairing classification and to handle the format of our inputs. It was originally make accessible by hf using the apache 2.0 licence. Finetuning the library models for sequence classification on GLUE. # You can also adapt this script on your own text classification task. Pointers for this are left as comments. # HACK: hf datasets repo cannot detect disk space on a cluster, # so let it know there's some space left Arguments pertaining to what data we are going to input our model for training and eval. Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command line. Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. # Setup logging # Log on each process the small summary: # Set the verbosity to info of the Transformers logger (on main process only): # Set seed before initializing model. # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the # sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named # label if at least two columns are provided. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. # Downloading and loading a dataset from the hub. # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. # Loading a dataset from local csv files # Loading a dataset from local json files # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels # Trying to have good defaults here, don't hesitate to tweak to your needs. # if is_regression: # num_labels = 1 # else: # A useful fast method: # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique # this is inefficient # Let's sort it for determinism # Let's sort it for determinism # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. # Load a BertTokenizer directly if we have passed it a tokenizer path, # otherwise, use the AutoTokenizer # Preprocessing the datasets # Again, we try to have some nice defaults but don't hesitate to tweak to your use case. # Padding strategy # We will pad later, dynamically at batch creation, to the max sequence length in each batch # Some models have set the order of the labels to use, so let's make sure we do use it. # Some have all caps in their config, some don't. # Tokenize the texts # Replace all null values with "" (these may exist in the validation set) # Map labels to IDs (not necessary for GLUE tasks) # Log a few random samples from the training set: # Get the metric function # TODO: When datasets metrics include regular accuracy, make an else here and remove special branch from # compute_metrics # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. # Initialize our Trainer # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. # Extra info - write labels to file # Training # Saves the tokenizer too for easy upload # Save the training results to txt and json formats # Need to save the state, since Trainer.save_model saves only the tokenizer with the model # Evaluation # Loop to handle MNLI double evaluation (matched, mis-matched) # Save the eval results to both txt and json formats # Loop to handle MNLI double evaluation (matched, mis-matched) # Removing the `label` columns because it contains -1 and Trainer won't like that. # For xla_spawn (TPUs) | 2.181347 | 2 |
src/learners/DLGGTD.py | dylanashley/direct-lambda-greedy | 0 | 6623549 | <gh_stars>0
# -*- coding: ascii -*-
import numpy as np
from .GTD import GTD
class DLGGTD:
EPS = 1e-3
def __init__(self, initial_x, initial_gamma, max_reward):
assert (len(initial_x.shape) == 1)
n = len(initial_x)
self._last_gamma = initial_gamma
self._GTD = GTD(initial_x)
self.w_err = np.ones(n) * max_reward / (
max(DLGGTD.EPS, 1 - initial_gamma))
self.w_var = np.zeros(n)
self.e_bar = np.zeros(n)
self.z_bar = np.zeros(n)
self._gamma_bar = initial_gamma
def predict(self, x):
"""Return the current prediction for a given set of features x."""
return np.dot(self._GTD.w, x)
def update(self, reward, gamma, x, alpha, eta, kappa, rho=1):
_lambda, self._gamma_bar = DLGGTD.lambda_greedy(
self._last_gamma, self._GTD._last_x, reward, gamma, x, rho,
self.w_err, self.w_var, self._GTD.w, self.e_bar, self.z_bar, alpha,
self._gamma_bar, kappa)
self._GTD.update(reward, gamma, x, alpha, eta, _lambda, rho)
return _lambda
@staticmethod
def lambda_greedy(gamma, x, next_reward, next_gamma, next_x, rho, w_err,
w_var, w, last_e_bar, last_z_bar, alpha, gamma_bar,
kappa_bar):
# use GTD to update w_err
delta_err = next_reward + next_gamma * np.dot(next_x, w_err) - np.dot(
x, w_err)
last_e_bar *= gamma
last_e_bar += x
last_e_bar *= rho
w_err += alpha * delta_err * last_e_bar
# use GTD to update w_var
delta = next_reward + next_gamma * np.dot(next_x, w) - np.dot(x, w)
next_reward_bar = delta**2
next_gamma_bar = next_gamma**2
delta_bar = next_reward_bar + next_gamma_bar * np.dot(
next_x, w_var) - np.dot(x, w_var)
last_z_bar *= kappa_bar * gamma_bar
last_z_bar += x
last_z_bar *= rho
w_var += alpha * delta_bar * last_z_bar
# compute lambda estimate
errsq = (np.dot(next_x, w_err) - np.dot(next_x, w))**2
varg = max(0, np.dot(next_x, w_var))
return errsq / max(DLGGTD.EPS, varg + errsq), next_gamma_bar
| # -*- coding: ascii -*-
import numpy as np
from .GTD import GTD
class DLGGTD:
EPS = 1e-3
def __init__(self, initial_x, initial_gamma, max_reward):
assert (len(initial_x.shape) == 1)
n = len(initial_x)
self._last_gamma = initial_gamma
self._GTD = GTD(initial_x)
self.w_err = np.ones(n) * max_reward / (
max(DLGGTD.EPS, 1 - initial_gamma))
self.w_var = np.zeros(n)
self.e_bar = np.zeros(n)
self.z_bar = np.zeros(n)
self._gamma_bar = initial_gamma
def predict(self, x):
"""Return the current prediction for a given set of features x."""
return np.dot(self._GTD.w, x)
def update(self, reward, gamma, x, alpha, eta, kappa, rho=1):
_lambda, self._gamma_bar = DLGGTD.lambda_greedy(
self._last_gamma, self._GTD._last_x, reward, gamma, x, rho,
self.w_err, self.w_var, self._GTD.w, self.e_bar, self.z_bar, alpha,
self._gamma_bar, kappa)
self._GTD.update(reward, gamma, x, alpha, eta, _lambda, rho)
return _lambda
@staticmethod
def lambda_greedy(gamma, x, next_reward, next_gamma, next_x, rho, w_err,
w_var, w, last_e_bar, last_z_bar, alpha, gamma_bar,
kappa_bar):
# use GTD to update w_err
delta_err = next_reward + next_gamma * np.dot(next_x, w_err) - np.dot(
x, w_err)
last_e_bar *= gamma
last_e_bar += x
last_e_bar *= rho
w_err += alpha * delta_err * last_e_bar
# use GTD to update w_var
delta = next_reward + next_gamma * np.dot(next_x, w) - np.dot(x, w)
next_reward_bar = delta**2
next_gamma_bar = next_gamma**2
delta_bar = next_reward_bar + next_gamma_bar * np.dot(
next_x, w_var) - np.dot(x, w_var)
last_z_bar *= kappa_bar * gamma_bar
last_z_bar += x
last_z_bar *= rho
w_var += alpha * delta_bar * last_z_bar
# compute lambda estimate
errsq = (np.dot(next_x, w_err) - np.dot(next_x, w))**2
varg = max(0, np.dot(next_x, w_var))
return errsq / max(DLGGTD.EPS, varg + errsq), next_gamma_bar | en | 0.617801 | # -*- coding: ascii -*- Return the current prediction for a given set of features x. # use GTD to update w_err # use GTD to update w_var # compute lambda estimate | 2.775983 | 3 |
Chapter07.StringFundamentals/convert_binary_digit_to_integer.py | mindnhand/Learning-Python-5th | 0 | 6623550 | <reponame>mindnhand/Learning-Python-5th<filename>Chapter07.StringFundamentals/convert_binary_digit_to_integer.py
#!/usr/bin/env python3
#encoding=utf-8
#---------------------------------------------------------------
# Usage: python3 convert_binary_digit_to_integer.py binary_digit
# Description: 将二进制输入参数转换为十进制数并打印到标准输出
#---------------------------------------------------------------
import sys
'''
execution result in command line:
~]# python3 convert_binary_digit_to_integer.py 101110
binary digit -> integer: 46
~]# python3 convert_binary_digit_to_integer.py 101111
binary digit -> integer: 47
'''
input_binary = sys.argv[1]
integer = 0
while input_binary != '':
integer = integer * 2 + (ord(input_binary[0]) - ord('0'))
input_binary = input_binary[1:]
print('binary digit -> integer: %d' % integer)
| #!/usr/bin/env python3
#encoding=utf-8
#---------------------------------------------------------------
# Usage: python3 convert_binary_digit_to_integer.py binary_digit
# Description: 将二进制输入参数转换为十进制数并打印到标准输出
#---------------------------------------------------------------
import sys
'''
execution result in command line:
~]# python3 convert_binary_digit_to_integer.py 101110
binary digit -> integer: 46
~]# python3 convert_binary_digit_to_integer.py 101111
binary digit -> integer: 47
'''
input_binary = sys.argv[1]
integer = 0
while input_binary != '':
integer = integer * 2 + (ord(input_binary[0]) - ord('0'))
input_binary = input_binary[1:]
print('binary digit -> integer: %d' % integer) | en | 0.165276 | #!/usr/bin/env python3 #encoding=utf-8 #--------------------------------------------------------------- # Usage: python3 convert_binary_digit_to_integer.py binary_digit # Description: 将二进制输入参数转换为十进制数并打印到标准输出 #--------------------------------------------------------------- execution result in command line: ~]# python3 convert_binary_digit_to_integer.py 101110 binary digit -> integer: 46 ~]# python3 convert_binary_digit_to_integer.py 101111 binary digit -> integer: 47 | 4.168561 | 4 |