index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
22,936
|
kaefee/Agustin-Codazzi-Project
|
refs/heads/main
|
/apps/utils/utils_plots.py
|
import plotly.express as px
def Make_map(df):
new_df=df[["LATITUD","LONGITUD","ORDEN","ALTITUD"]].dropna()
fig=px.scatter_mapbox(new_df, lat="LATITUD", lon="LONGITUD", color= "ORDEN", size_max=15, zoom=7
,labels={"ORDEN": "ORDEN", "ALTITUD": "medal"},custom_data=["ORDEN","ALTITUD"],
color_discrete_map={
"Andisol": '#e74C3C',
"Entisol": '#3498DB',
"Histosol": '#00BC8C',
"Inceptisol": '#375A7F',
"Molisol": '#F39C12',
}
)
fig.update_layout(
plot_bgcolor="black",
mapbox_style="satellite-streets",
paper_bgcolor="#222222",
font_color="#FFFFFF",
margin=dict(l=0, r=2, t=0, b=0),
)
fig.update_traces(
hovertemplate='Orden: %{customdata[0]}' + '<br> Altitud: %{customdata[1]} '
)
return fig
|
{"/callbacks.py": ["/apps/utils/utils_getdata.py", "/apps/utils/utils_plots.py", "/apps/utils/utils_filters.py"], "/apps/utils/utils_filters.py": ["/apps/utils/utils_getdata.py"], "/apps/home/layout_home.py": ["/apps/utils/utils_getdata.py"]}
|
22,937
|
kaefee/Agustin-Codazzi-Project
|
refs/heads/main
|
/apps/home/layout_home.py
|
import dash_html_components as html
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import pandas as pd
from apps.utils import utils_cardskpi
from apps.utils import utils_plots
from apps.utils import utils_filters
from apps.utils import utils_tree_map
from apps.utils import utils_cardskpi
from apps.utils import utils_pivot_table
from apps.utils.utils_getdata import get_data
df=get_data(["CLIMA_AMBIENTAL", "PAISAJE",
'TIPO_RELIEVE', 'FORMA_TERRENO',
'MATERIAL_PARENTAL_LITOLOGIA', 'ORDEN',
"LATITUD","LONGITUD","ALTITUD","CODIGO"]).dropna()
layout= html.Div([
#dbc.Row(dbc.Col(
# dbc.Spinner(children=[dcc.Graph(id="loading-output")], size="lg", color="primary", type="border", fullscreen=True,),
# spinner_style={"width": "10rem", "height": "10rem"}),
# spinnerClassName="spinner"),
# dcc.Loading(children=[dcc.Graph(id="loading-output")], color="#119DFF", type="dot", fullscreen=True,),
# width={'size': 12, 'offset': 0}),
#),
dbc.Row([html.Hr()]), # primera fila se deja vacia
dbc.Row([
dbc.Col([
utils_filters.make_filters(df)
],lg=2,id="Filter_section"),
dbc.Col([
html.Div(id="main_alert", children=[]),
html.H1("Resumen de clasificación Taxonómica", className='title ml-2',style={'textAlign': 'left', 'color': '#FFFFFF'}),
dbc.Row([
dbc.Col([dbc.Container([
dbc.Spinner(children=[
dcc.Graph(figure=utils_plots.Make_map(df),
id="Mapa",
config={
'mapboxAccessToken':open(".mapbox_token").read(),
'displayModeBar': False,
'staticPlot': False,
'fillFrame':False,
'frameMargins': 0,
'responsive': False,
'showTips':True
})], size="lg", color="primary", type="border", fullscreen=True,)
])
],lg='10'),
dbc.Col([
dbc.ListGroup([
dbc.ListGroupItem(
[
dbc.ListGroupItemHeading("Numero de Observaciones",
style={"font-size": "1.3em"}),
dbc.ListGroupItemText(len(df), style={"font-size": "2.5em",
"align": "right"},
id="carta_datos")
],
id="carta_totales",color="#375A7F")
])
],width={"size": 2, "offset": 0})
#offset espacio que se deja desde la izquierda
],no_gutters=True),
dbc.Row([html.Hr()]),
dbc.Row([
dbc.Container([
html.H2("Desglose Taxonómico", className='title ml-2',style={'textAlign': 'left', 'color': '#FFFFFF'}),
],fluid=True),
dbc.Col([
dbc.Container([
dcc.Graph(figure=utils_tree_map.Make_tree_map(df),
id="tree_map",
config={
'displayModeBar': False,
'fillFrame':False,
'frameMargins': 0,
'responsive': False
})]),], width={"size": 9, "offset": 0,})
],no_gutters=True)
],lg=10),
]),
dbc.Row([html.Hr()]),
dbc.Row([
dbc.Container([
html.H2("Tabla Dinamica", className='title ml-2',style={'textAlign': 'left', 'color': '#FFFFFF'}),
utils_pivot_table.make_pivot_table(df)],id="Table_data")
]),
dbc.Row([html.Hr()])
])
|
{"/callbacks.py": ["/apps/utils/utils_getdata.py", "/apps/utils/utils_plots.py", "/apps/utils/utils_filters.py"], "/apps/utils/utils_filters.py": ["/apps/utils/utils_getdata.py"], "/apps/home/layout_home.py": ["/apps/utils/utils_getdata.py"]}
|
22,939
|
jua16073/redes_lab1
|
refs/heads/master
|
/client.py
|
import socket
import capas as capas
import crc as receiver
import crc_sender as sender
import pickle
import random
HOST = '127.0.0.1'
PORT = 65432
def ruido(msg):
rango = len(msg)
index = random.randint(0, rango)
msg[index] = not msg[index]
return msg
def mensaje():
mensaje1 = input("Ingrese un mensaje a mandar\n")
mensaje1 = capas.string_to_binary(mensaje1)
message = sender.crc(mensaje1)
message = pickle.dumps(message)
mensaje1 = capas.to_bitarray(mensaje1)
mensaje1 = ruido(mensaje1)
mensaje1 = capas.bitarray_to_binary(mensaje1)
mensaje1 = pickle.dumps(mensaje1)
return message, mensaje1
def recibir(data):
recibir = pickle.loads(data)
resultado = receiver.crc(recibir)
return resultado
comprobacion, mensaje_enviado = mensaje()
print(mensaje_enviado)
print(comprobacion)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, PORT))
s.sendall(mensaje_enviado)
data = s.recv(100000000)
resultados1 = recibir(data)
print(resultados1)
result = recibir(comprobacion)
if result == resultados1:
print('No Hay Errores')
else:
print('Error')
print("Received", repr(result))
|
{"/client.py": ["/capas.py"], "/capas.py": ["/hamming.py"]}
|
22,940
|
jua16073/redes_lab1
|
refs/heads/master
|
/hamming.py
|
#Using even parity
def code(msg):
#print('hamming ', msg)
msg_8 = []
n_parts = []
#dividir el mensaje en bits de 8
temp = []
for x in range(len(msg)):
if x % 8 == 0 and x !=0:
msg_8.append(temp)
temp = []
temp.append(msg[x])
msg_8.append(temp)
n_message = []
for byte in msg_8:
n_parts.append(redundant(byte))
n_complete = []
for p in n_parts:
for b in p:
n_complete.append(b)
return n_complete
def redundant(part):
n_part = [0,0,0,0,0,0,0,0,0,0,0,0]
x = 0
for r in range(len(n_part)):
if r in [3,7,9,10]:
pass
else:
n_part[r] = part[x]
x+=1
# (1, 3, 5, 7, 9, 11, etc).
# (2, 3, 6, 7, 10, 11, etc).
# (4–7, 12–15, 20–23, etc).
# (8–15, 24–31, 40–47, etc).
c = [0,0,0,0]
bits =[0,0,0,0]
for b in range(len(part)):
if b in [7,5,3,1]:
if part[b]:
c[0] += 1
if b in [6,5,2,1]:
if part[b]:
c[1] += 1
if b in [4,3,2,1]:
if part[b]:
c[2] += 1
if b == 0:
if part[b]:
c[3] += 1
for x in range(len(bits)):
if c[x] % 2 ==0:
bits[x] = 0
else:
bits[x] = 1
n_part[10] = bits[0]
n_part[9] = bits[1]
n_part[7] = bits[2]
n_part[3] = bits[3]
return n_part
def receptor(msg):
print("recibiendo", msg)
#partir en 12
msg_12 = []
temp = []
for b in range(len(msg)):
if b % 12 == 0 and b !=0:
msg_12.append(temp)
temp = []
temp.append(msg[b])
#msg_11.append(temp)
for part in msg_12:
errores(part)
def errores(part):
original = []
redundantes = []
for b in range(len(part)):
if b in [3,7,9,10]:
redundantes.append(part[b])
else:
original.append(part[b])
comprobante = redundant(part)
print('original', part)
print('comprobante',comprobante)
if part == comprobante:
print("Todo nitido")
else:
print("malo fml")
import random
def ruido(msg):
rango = len(msg)
index = random.randint(0, rango)
msg[index] = not msg[index]
|
{"/client.py": ["/capas.py"], "/capas.py": ["/hamming.py"]}
|
22,941
|
jua16073/redes_lab1
|
refs/heads/master
|
/capas.py
|
from bitarray import *
import pickle
import unicodedata
import hamming
def string_to_binary(msg):
var = bin(int.from_bytes(msg.encode(), 'big'))[2:]
return var
def binary_to_string(binary, encoding='utf-8', errors ='surrogatepass'):
var2 = int(binary,2)
return var2.to_bytes(var2.bit_length()+7 // 8, 'big').decode(encoding, errors)
def bitarray_to_binary(bitarray):
var3 = bin(int.from_bytes(bitarray, 'big', signed=False))[2:]
return var3
def to_bitarray(something):
var4 = bitarray(something)
return var4
|
{"/client.py": ["/capas.py"], "/capas.py": ["/hamming.py"]}
|
23,007
|
wh1teone/client_server_trivia_game
|
refs/heads/main
|
/server_side_trivia.py
|
import socket
import chatlib # protocol functions
import random # For random questions asked
import select # For enabling multiple connections of clients to server
import requests # For pulling random questions from the internet
import json # For handling the JSON requests received.
# to be added: 1. handle 2 answers wrong answers problem. 2. adding already used questions to list. 3. provide no_answers response.
users_information_dict = dict()
questions = dict()
peer_name_tuple = tuple()
logged_users_dict = dict()
ERROR_MSG = 'Error! '
SERVER_PORT = 5631
SERVER_IP = '127.0.0.1'
messages_to_send = list()
MSG_MAX_LENGTH = 1024
QUESTIONS_AMOUNT = 2
def add_answered_question_to_user(user, question_id):
"""
gets username and question id and adds it to the users dictionary (to avoid repetitive answers).
:param user: username.
:param question_id: question's id.
:return: None.
"""
global users_information_dict
users_information_dict[user]['questions_asked'].append(question_id)
def build_and_send_message(conn, cmd, data):
"""
:param conn: client socket to which we want to send the message.
:param cmd: the command to send according to the trivia protocol.
:param data: the message to send.
:return: None.
"""
try:
data_to_send = chatlib.build_message(cmd, data).encode()
conn.send(data_to_send)
print('[SERVER]', data_to_send.decode()) # Debug print
messages_to_send.append((conn.getpeername(), data_to_send))
except:
messages_to_send.append((conn.getpeername(), ERROR_MSG))
def recv_message_and_parse(conn):
"""
:param conn: client socket from which we receive & parse the message.
:return: cmd - protocol command received from client, data - message information from client.
"""
try:
received_msg = conn.recv(1024).decode()
cmd, data = chatlib.parse_message(received_msg)
print('[CLIENT]', cmd, data) # debug print
return cmd, data
except:
return None, None
def load_questions():
"""
Loads questions bank from file questions API.
Recieves: None.
Returns: questions dictionary
"""
res = requests.get(f'https://opentdb.com/api.php?amount={QUESTIONS_AMOUNT}&difficulty=easy')
json_res = res.text
loaded = json.loads(json_res)
question_dict = {}
question_num = 1
for question in loaded['results']:
correct_answer = question['correct_answer']
incorrect_answers = question['incorrect_answers']
incorrect_answers.append(correct_answer)
random.shuffle(incorrect_answers)
correct_answer_updated_position = incorrect_answers.index(correct_answer)
question_dict[question_num] = {'question': question['question'], 'answers': incorrect_answers,
'correct': correct_answer_updated_position + 1}
question_num += 1
return question_dict
def fix_url_encoded_questions(string_question):
"""
takes the string input and replaces url encoded letters to normal format.
:param string_question: the question string that we want to fix
:return: fixed question string
"""
to_switch_dict = {''': "'",
'"': '"',
'&': '&'}
for i in to_switch_dict.keys():
print(to_switch_dict[i])
string_question = string_question.replace(i, to_switch_dict[i])
return string_question
def load_user_database():
"""
Loads the user database.
:return: user dictionary.
"""
quiz_users = {
'test' : {'password': 'test', 'score': 0, 'questions_asked': []},
'yossi' : {'password': '123', 'score': 0, 'questions_asked': []},
'master' : {'password': 'master', 'score': 0, 'questions_asked': []}
}
return quiz_users
def setup_socket():
"""
creates new listening socket and returns it.
:return: the socket object.
"""
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((SERVER_IP, SERVER_PORT))
sock.listen()
print('Server is listening...')
return sock
except OSError:
print(f'{ERROR_MSG} adress already in use.')
def send_error(conn, error_msg):
"""
send error message with given message
:param conn: client socket.
:param error_msg: error message to be passed through client socket.
:return:
"""
conn.send(error_msg.encode())
def handle_getscore_message(conn, username):
"""
receives the client socket and username of that socket and returns a YOUR_SCORE message.
:param conn: client socket object.
:param username: the username of the client socket.
:return: None.
"""
global users_information_dict
user_score_to_send = users_information_dict[username]['score']
print(user_score_to_send)
build_and_send_message(conn, 'YOUR_SCORE', f'{user_score_to_send}')
def handle_highscore_message(conn):
"""
recieves client socket to which the highscore of current time is sent with the build_and_send_message function.
:param conn: client socket object.
:return: None.
"""
global users_information_dict
user_list = list()
for name, data in users_information_dict.items():
tmp = {
'name': name,
'score': data['score']
}
user_list.append(tmp)
sorted_users = sorted(user_list, key=lambda k: k['score'], reverse=True)
build_and_send_message(conn, 'ALL_SCORE', f"{sorted_users[0]['name']} : {sorted_users[0]['score']}\n{sorted_users[1]['name']} : {sorted_users[1]['score']}\n{sorted_users[2]['name']} : {sorted_users[2]['score']}")
def handle_logged_message(conn):
"""
receives client socket to which a list of logged users_information_dict in current time is passed.
:param conn: client socket object.
:return:None.
"""
global logged_users_dict
try:
msg_to_send = str()
for i in logged_users_dict:
msg_to_send += f'{logged_users_dict[i]}\n'
build_and_send_message(conn, 'LOGGED_ANSWER', msg_to_send)
except:
send_error(conn, ERROR_MSG)
def handle_logout_message(conn):
"""
Removes the client socket from the logged users_information_dict dictionary
:param conn:
:return:None.
"""
global logged_users_dict
logged_users_dict.pop(conn.getpeername())
print(f' logged user list: {logged_users_dict}')
def handle_login_message(conn, data):
"""
Gets socket and message data of login message. Checks user and pass exists and match.
If not - sends error and finished. If all ok, sends OK message and adds user and address to logged_users_dict.
:param conn: client socket object.
:param data: client socket message.
:return: None.
"""
global users_information_dict # This is needed to access the same users_information_dict dictionary from all functions
global logged_users_dict # To be used later
login_cred = chatlib.split_data(data, 1)
if login_cred[0] in users_information_dict:
if login_cred[1] == users_information_dict[login_cred[0]]['password']:
build_and_send_message(conn, 'LOGIN_OK', '')
logged_users_dict[conn.getpeername()] = login_cred[0]
print(f' logged user list: {logged_users_dict}')
else:
build_and_send_message(conn, 'ERROR', 'Wrong password.')
else:
build_and_send_message(conn, 'ERROR', 'User does not exist.')
def handle_client_message(conn, cmd, data):
"""
Gets message code and data and calls the right function to handle command.
:param conn: client socket object.
:param cmd: client socket command
:param data: client message.
:return: None
"""
global logged_users_dict
if cmd == 'LOGIN':
handle_login_message(conn, data)
elif cmd == 'LOGOUT' or cmd is None:
handle_logout_message(conn)
elif cmd == 'MY_SCORE':
handle_getscore_message(conn, logged_users_dict[conn.getpeername()])
elif cmd == 'HIGHSCORE':
handle_highscore_message(conn)
elif cmd == 'LOGGED':
handle_logged_message(conn)
elif cmd == 'GET_QUESTION':
handle_question_message(conn)
elif cmd == 'SEND_ANSWER':
handle_answer_message(conn, logged_users_dict[conn.getpeername()], data)
else:
build_and_send_message(conn, 'ERROR', 'Error! command does not exist.')
def create_random_question():
"""
:return: random question string to be forwarded to the client
"""
global questions
answers_string = str()
random_question_tuple = random.choice(list(questions.items()))
for i in random_question_tuple[1]['answers']:
answers_string = answers_string + "#" + i
final_string = str(random_question_tuple[0]) + "#" + random_question_tuple[1]['question'] + answers_string
final_string = fix_url_encoded_questions(final_string)
return final_string
def handle_question_message(conn):
"""
sends the user a random question. made with the create_random_question function.
:param conn: client socket.
:return: None.
"""
global questions
global users_information_dict
global logged_users_dict
if len(users_information_dict[logged_users_dict[conn.getpeername()]]['questions_asked']) == QUESTIONS_AMOUNT:
build_and_send_message(conn, 'NO_QUESTIONS', '')
else:
question_for_client = create_random_question()
build_and_send_message(conn, 'YOUR_QUESTION', question_for_client)
def handle_answer_message(conn, username, data):
"""
:param conn: client socket.
:param username: client username.
:param data: client answer.
the function checks if the client answer is correct. if so, add points to the username.
either way sends a message whether answer is correct or not.
:return:
none
"""
global users_information_dict
global questions
acceptable_answer = ['1', '2', '3', '4']
question_id, choice = chatlib.split_data(data, 1)
while choice not in acceptable_answer:
build_and_send_message(conn, 'UNACCEPTABLE_ANSWER', '')
new_cmd, new_data = recv_message_and_parse(conn)
question_id, choice = chatlib.split_data(new_data, 1)
if int(choice) == int(questions[int(question_id)]['correct']):
build_and_send_message(conn, 'CORRECT_ANSWER', '')
users_information_dict[username]['score'] += 5
add_answered_question_to_user(username, question_id)
return
else:
build_and_send_message(conn, 'WRONG_ANSWER', '')
add_answered_question_to_user(username, question_id)
return
if int(choice) == int(questions[int(question_id)]['correct']):
build_and_send_message(conn, 'CORRECT_ANSWER', '')
users_information_dict[username]['score'] += 5
add_answered_question_to_user(username, question_id)
else:
build_and_send_message(conn, 'WRONG_ANSWER', '')
add_answered_question_to_user(username, question_id)
def print_client_sockets(socket_dict):
"""
prints out the client connected to the server based on IP and port.
:param socket_dict: the dictionary of client connected to the server.
"""
global logged_users_dict
print('CONNECTED CLIENT SOCKETS:')
for ip, port in socket_dict.keys():
print(f'IP: {ip}, PORT: {port}')
def main():
global users_information_dict
global questions
global peer_name_tuple
global messages_to_send
users_information_dict = load_user_database()
questions = load_questions()
client_sockets = list()
print('Welcome to Trivia Server!')
server_socket = setup_socket()
print('[SERVER] Listening for new clients...')
while True:
try:
ready_to_read, ready_to_write, in_error = select.select([server_socket] + client_sockets, client_sockets, [])
for current_socket in ready_to_read:
if current_socket is server_socket:
(client_socket, client_address) = server_socket.accept()
print(f'[SERVER] New client has joined the server: {client_address}')
client_sockets.append(client_socket)
print_client_sockets(logged_users_dict)
else:
try:
print('New data from client')
cmd, data = recv_message_and_parse(current_socket)
peer_name_tuple = current_socket.getpeername()
handle_client_message(current_socket, cmd, data)
for message in messages_to_send:
current_socket, data = message
if current_socket in ready_to_write:
current_socket.send(data.encode())
messages_to_send.remove(message)
else:
pass
except:
client_sockets.remove(current_socket)
print('[SERVER] Client socket closed.')
break
except TypeError:
print(f'{ERROR_MSG} socket already open.')
break
if __name__ == '__main__':
main()
|
{"/server_side_trivia.py": ["/chatlib.py"], "/client_side_trivia.py": ["/chatlib.py"]}
|
23,008
|
wh1teone/client_server_trivia_game
|
refs/heads/main
|
/chatlib.py
|
# Protocol Constants
CMD_FIELD_LENGTH = 16 # Exact length of cmd field (in bytes)
LENGTH_FIELD_LENGTH = 4 # Exact length of length field (in bytes)
MAX_DATA_LENGTH = 10 ** LENGTH_FIELD_LENGTH - 1 # Max size of data field according to protocol
MSG_HEADER_LENGTH = CMD_FIELD_LENGTH + 1 + LENGTH_FIELD_LENGTH + 1 # Exact size of header (CMD+LENGTH fields)
MAX_MSG_LENGTH = MSG_HEADER_LENGTH + MAX_DATA_LENGTH # Max size of total message
DELIMITER = "|" # Delimiter character in protocol
DATA_DELIMITER = "#" # Delimiter in the data part of the message
ACCEPTABLE_COMMANDS = ['LOGIN', 'LOGOUT', 'LOGGED', 'GET_QUESTION', 'SEND_ANSWER', 'MY_SCORE', 'HIGHSCORE', 'LOGIN_OK',
'LOGGED_ANSWER', 'YOUR_QUESTION', 'CORRECT_ANSWER', 'WRONG_ANSWER', 'UNACCEPTABLE_ANSWER',
'YOUR_SCORE', 'ALL_SCORE', 'ERROR', 'NO_QUESTIONS']
# Protocol Messages
PROTOCOL_CLIENT = {
'login_msg': 'LOGIN',
'logout_msg': 'LOGOUT',
'my_score_msg': 'MY_SCORE',
'highscore_msg': 'HIGHSCORE',
'get_question_msg': 'GET_QUESTION',
'logged_answer_msg': 'LOGGED',
'send_answer_msg': 'SEND_ANSWER'
}
PROTOCOL_SERVER = {
'login_ok_msg': 'LOGIN_OK',
'login_failed_msg': 'ERROR'
}
# Other constants
ERROR_RETURN = None # What is returned in case of an error
################################################################################################
def build_message(cmd, data):
"""
Gets command name (str) and data field (str) and creates a valid protocol message
Returns: str, or None if error occured.
:param cmd: command name.
:param data: data of the command.
:return: protocol message.
"""
if cmd not in ACCEPTABLE_COMMANDS:
return ERROR_RETURN
else:
cmd_temp = cmd + ' '*(16 - len(cmd)) #checking cmd length to assemble cmd with rest spaces up to length of 16 bits.
message = f'{cmd_temp}|{(4-len(str(len(data)))) * "0"}{len(data)}|{data}'#calculate data length to insert to the middle part of the message.
return message
def parse_message(data):
"""
Parses protocol message and returns command name and data field
Returns: cmd (str), data (str). If some error occured, returns None, None
:param data: data message
:return: command and data.
"""
try:
cmd, msg_len, msg = data.split("|")
stripped_cmd = cmd.strip()
stripped_msg_len = msg_len.strip()
if int(stripped_msg_len) == len(msg) and stripped_cmd in ACCEPTABLE_COMMANDS:
return stripped_cmd, msg
else:
return ERROR_RETURN, ERROR_RETURN
except:
return ERROR_RETURN, ERROR_RETURN
def split_data(msg, expected_fields):
"""
Helper method. gets a string and number of expected fields in it. Splits the string
using protocol's data field delimiter (|#) and validates that there are correct number of fields.
Returns: list of fields if all ok. If some error occured, returns None
:param msg: message received.
:param expected_fields: amount of | or # in the message to be expected.
:return:
"""
found_fields = int()
for letter in msg:
if letter == "#":
found_fields += 1
if expected_fields == found_fields:
fields_to_return = msg.split('#')
return fields_to_return
else:
return ERROR_RETURN
def join_data(msg_fields):
"""
Helper method. Gets a list, joins all of it's fields to one string divided by the data delimiter.
Returns: string that looks like cell1#cell2#cell3
:param msg_fields: list of strings to be joined.
:return: one string with data delimiters between list values.
"""
string_to_return = str()
for i in msg_fields:
string_to_return = string_to_return + "#" + str(i)
string_to_return = (string_to_return[1:])
return string_to_return
|
{"/server_side_trivia.py": ["/chatlib.py"], "/client_side_trivia.py": ["/chatlib.py"]}
|
23,009
|
wh1teone/client_server_trivia_game
|
refs/heads/main
|
/client_side_trivia.py
|
import socket
import chatlib
SERVER_IP = '127.0.0.1'
SERVER_PORT = 5631
# HELPER SOCKET METHODS
def build_and_send_message(conn, cmd, data):
"""
Builds a new message using chatlib, wanted code and message.
Prints debug info, then sends it to the given socket.
:param conn: server socket object
:param cmd: command to be sent to server.
:param data: data message to send.
:return: None.
"""
data_to_send = chatlib.build_message(cmd, data).encode()
conn.send(data_to_send)
def recv_message_and_parse(conn):
"""
receives a new message from given socket,
then parses the message using chatlib.
If error occures, will return None, None
:param conn: server socket object.
:return: None.
"""
try:
received_msg = conn.recv(1024).decode()
cmd, data = chatlib.parse_message(received_msg)
return cmd, data
except:
return chatlib.ERROR_RETURN, chatlib.ERROR_RETURN
def connect():
"""
creates and returns a socket object that is connected to the trivia server.
:return: client socket.
"""
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect((SERVER_IP, SERVER_PORT))
print('Connection established to server.\n')
return client_socket
def error_and_exit(error_msg):
"""
incase of an error, prints out an error message and closes the program.
:param error_msg: error message that will be printed from the server.
:return: None.
"""
print(f'the error: {error_msg} was received...\n exiting client')
exit()
def login(conn):
"""
prompts the user to enter username and password, and sends the message to the server.
while cmd is not login ok, keeps asking for it.
:param conn: server socket object.
:return: None.
"""
cmd = ''
while cmd != 'LOGIN_OK':
username = input('Please enter username: \n')
password = input('Please enter the password \n')
build_and_send_message(conn, chatlib.PROTOCOL_CLIENT['login_msg'], f'{username}#{password}')
cmd, data = recv_message_and_parse(conn)
print(f'{data}')
print('Logged in.\n')
def logout(conn):
"""
send the server logout message.
:param conn: server socket object.
:return: None.
"""
build_and_send_message(conn, chatlib.PROTOCOL_CLIENT['logout_msg'], '')
print('Logging out...\n')
def build_send_recv_parse(conn, cmd, data):
"""
:param conn: server socket object.
:param cmd: command message to be sent to server.
:param data: data to be sent to server.
:return: the command message receieved from the server (msg_code) and data of that message (srv_data)
"""
"""Receives socket, command and data , use the send and receive functions,
and eventually return the server answer in data and msg code"""
build_and_send_message(conn, cmd, data)
msg_code, srv_data = recv_message_and_parse(conn)
return msg_code, srv_data
def get_score(conn):
"""
receives server socket, sends a get_score message, receives server response and prints it out.
for any error received, prints it out.
:param conn: server socket object.
:return: None.
"""
try:
cmd, data = build_send_recv_parse(conn, chatlib.PROTOCOL_CLIENT['my_score_msg'], '')
print(f'Your score is: {data}\n')
except:
error_and_exit(chatlib.ERROR_RETURN)
def get_highscore(conn):
"""
receives a server socket socket, prints out the highscore table as received from the server.
:param conn: server socket object.
:return: None.
"""
try:
cmd, data = build_send_recv_parse(conn, chatlib.PROTOCOL_CLIENT['highscore_msg'], '')
print(f'The highscore table is:\n{data}\n')
except:
error_and_exit(chatlib.ERROR_RETURN)
def play_question(conn):
"""
receives a server socket as arg. requests a question from the server. splits received response to 2/4 answers.
for any error received, prints out error message and returns to server a None response.
:param conn: server socket object.
:return: None.
"""
cmd, data = build_send_recv_parse(conn, chatlib.PROTOCOL_CLIENT['get_question_msg'], '')
try:
if cmd == 'NO_QUESTIONS':
print('There are no more questions to ask. game over.')
return
else:
question_list = chatlib.split_data(data, 5)
user_answer = input(f'{question_list[1]}:\n1. {question_list[2]}\n2. {question_list[3]}\n3. {question_list[4]}\n4. {question_list[5]}\n')
answer_cmd, answer_data = build_send_recv_parse(conn, chatlib.PROTOCOL_CLIENT['send_answer_msg'],
f'{question_list[0]}#{user_answer}')
try:
while answer_cmd == 'UNACCEPTABLE_ANSWER':
new_answer = input('Please enter a valid answer (numbers) as options available.\n')
answer_cmd, answer_data = build_send_recv_parse(conn, chatlib.PROTOCOL_CLIENT['send_answer_msg'],
f'{question_list[0]}#{new_answer}')
if answer_cmd == 'CORRECT_ANSWER':
print('The answer you provided is correct!')
elif answer_cmd == 'WRONG_ANSWER':
print(f'the answer you provided is wrong.')
except:
error_and_exit(chatlib.ERROR_RETURN)
except TypeError:
question_list = chatlib.split_data(data, 3)
user_answer = input(f'{question_list[1]}:\n1. {question_list[2]}\n2. {question_list[3]}\n')
answer_cmd, answer_data = build_send_recv_parse(conn, chatlib.PROTOCOL_CLIENT['send_answer_msg'],
f'{question_list[0]}#{user_answer}')
try:
if answer_cmd == 'CORRECT_ANSWER':
print('The answer you provided is correct!')
elif answer_cmd == 'WRONG_ANSWER':
print(f'the answer you provided is wrong.')
while answer_cmd == 'UNACCEPTABLE_ANSWER':
new_answer = input('Please enter a valid answer (numbers) as options available.\n')
answer_cmd, answer_data = build_send_recv_parse(conn, chatlib.PROTOCOL_CLIENT['send_answer_msg'],
f'{question_list[0]}#{new_answer}')
try:
if answer_cmd == 'CORRECT_ANSWER':
print('The answer you provided is correct!')
elif answer_cmd == 'WRONG_ANSWER':
print(f'the answer you provided is wrong.')
elif answer_cmd == 'NO_QUESTIONS':
print('There are no more questions to ask. game over.')
except:
error_and_exit(chatlib.ERROR_RETURN)
except:
error_and_exit(chatlib.ERROR_RETURN)
def get_logged_users(conn):
"""
receives a server socket object and prints out the users_information_dict' list currently connected.
:param conn: server socket object.
:return:
"""
cmd, data = build_send_recv_parse(conn, chatlib.PROTOCOL_CLIENT['logged_answer_msg'], '')
print(f'Connected users_information_dict at this time:\n {data}')
def main():
client_socket = connect()
login(client_socket)
user_choice = ''
while user_choice != 'q':
user_choice = input('-----------------------------\nplease enter one of the above:\n'
'p Play a trivia question\ns Get my score\nh Get high score\n'
'q Quit\nl Get current logged users\n-----------------------------\n')
if user_choice not in ['s', 'h', 'q', 'p', 'l']:
user_choice = input('-----------------------------\nplease enter one of the above:\n'
'p Play a trivia question\ns Get my score\nh Get high score\n'
'q Quit\nl Get current logged users\n-----------------------------\n')
if user_choice == 'h':
get_highscore(client_socket)
elif user_choice == 's':
get_score(client_socket)
elif user_choice == 'p':
play_question(client_socket)
elif user_choice == 'l':
get_logged_users(client_socket)
logout(client_socket)
if __name__ == '__main__':
main()
|
{"/server_side_trivia.py": ["/chatlib.py"], "/client_side_trivia.py": ["/chatlib.py"]}
|
23,014
|
ssloggett/verbMGL
|
refs/heads/master
|
/run_simulation.py
|
import corpus_functions
from corpus_functions import *
myTrees = BracketParseCorpusReader(root = 'childes', fileids = '.*\.parsed').parsed_sents()
sentences = build_corpus(myTrees, flat_structure = False)
similarity = get_pos_similarity(myTrees)
simulate(sentences, similarity_matrix = similarity, start =50, max = 300, by = 50, rep = 4, conf=[0], printR = True, test = 100)
|
{"/run_simulation.py": ["/corpus_functions.py"], "/corpus_functions.py": ["/verbMGL.py"]}
|
23,015
|
ssloggett/verbMGL
|
refs/heads/master
|
/corpus_functions.py
|
import nltk
from nltk import *
from nltk.corpus import treebank
import nltk.tree
from nltk.tree import *
import nltk.corpus.reader.bracket_parse
from nltk.corpus.reader.bracket_parse import *
import math
from math import log
import verbMGL
from verbMGL import *
def is_parent(node, subtree):
if subtree.parent() is not None and subtree.parent().label() == node: return True
return False
def is_grandparent(node, subtree):
if subtree.parent().parent() is not None and subtree.parent().parent().label() == node: return True
return False
def subject_tag(tree):
tree = ParentedTree.convert(tree)
subjects = []
for subtree in [x for x in tree.subtrees()]:
if subtree.right_sibling() is not None:
if subtree.label() == 'NP' and subtree.right_sibling().label() == 'VP' and (is_parent('S', subtree) or is_parent('SQ', subtree) or is_grandparent('SQ', subtree)):
subjects.append(subtree)
for subject in subjects:
subject.set_label('NP-SUBJ')
subj_heads = ['NN', 'NNS', 'PRP', 'NNP', 'NNPS']
for preterminal in subject.subtrees():
subj_head = False
if preterminal.label() in subj_heads:
if is_parent('NP-SUBJ', preterminal): subj_head = True
elif is_grandparent('NP-SUBJ', preterminal) and preterminal.right_sibling() is None: subj_head = True
elif is_grandparent('NP-SUBJ', preterminal) and preterminal.right_sibling() != 'POS': subj_head = True
if subj_head:
if preterminal.label() == 'NNP': preterminal.set_label('NN-SUBJ')
elif preterminal.label() == 'NNPS': preterminal.set_label('NNS-SUBJ')
else: preterminal.set_label(preterminal.label() + '-SUBJ')
return Tree.convert(tree)
def convert_tree(tree):
tree = ParentedTree.convert(tree)
subtrees = [x for x in tree.subtrees()]
open_nodes, closed_nodes, new_tree = [], [], []
for subtree in subtrees:
sub_subtrees = [x for x in subtree.subtrees()]
if len(sub_subtrees) > 1:
open_nodes.insert(0,subtree.treeposition())
new_tree.append(['[', subtree.label()])
else:
new_tree.append([subtree.leaves()[0], subtree.label()])
closed_nodes.append(subtree.treeposition())
for node in open_nodes:
sub_nodes = [x.treeposition() for x in tree[node].subtrees() if x is not tree[node]]
if close_check(sub_nodes, closed_nodes):
new_tree.append([']', tree[node].label()])
closed_nodes.append(node)
for node in closed_nodes:
if node in open_nodes: open_nodes.remove(node)
return new_tree
def close_check(node_list1, node_list2):
for node in node_list1:
if node not in node_list2: return False
return True
def build_corpus(tree_bank, flat_structure = True):
sentences = []
for tree in tree_bank:
#tree = subject_tag(tree)
if flat_structure: sentence = [[x.leaves()[0], x.label()] for x in tree.subtrees() if len([y for y in x.subtrees()])==1]
else: sentence = convert_tree(tree)
sentence = filter_sentence(sentence)
if sentence != 'bad-tag': sentences.append(sentence)
kdata = []
for sentence in sentences:
for i in sentence.split():
if i[len(i)-3:] in ['VBZ','VBP']: kdata.append(sentence); break
return(kdata)
def filter_sentence(sentence):
include = ['ROOT', 'FRAG', 'SBARQ', 'SBAR', 'SQ', 'S', 'SINV', 'WHNP', 'NP', 'VP', 'PRT', 'INTJ', 'WHPP', 'PP', 'WHADVP', 'ADVP', 'WHADJP', 'ADJP', 'NP-SUBJ',
'WP', 'NN', 'NNS', 'PRP', 'PRP$', 'CD', 'JJ', 'IN', 'VB', 'UH', 'TO', 'VBP', 'WRB', 'NOT', 'DT', 'RB', 'MD', 'RP', 'VBG', 'POS', 'VBZ',
'CC', 'VBD', 'COMP', 'EX', 'VBN', 'WDT', 'PDT', 'WP$', 'JJR', 'NN-SUBJ', 'NNS-SUBJ', 'PRP-SUBJ']
exclude = [".", ",", ""]
for i in sentence:
# Regularize noun phrases (remove proper noun tags)
if i[1] == 'NNP': i[1] = 'NN'
if i[1] == 'NNPS': i[1] = 'NNS'
# Label all non-fininte verbs VBP
if i[1] == 'VB': i[1] = 'VBP'
# Regularize copulas and BE axuiliaries
if i[1] == 'COP' and i[0] == "'s": i[0] = 'BE'; i[1] = 'VBZ'
if i[1] == 'COP' and i[0] == "'re": i[0] = 'BE'; i[1] = 'VBP'
if i[0] in ['was', 'is']: i[0] = 'BE'; i[1] = 'VBZ'
if i[0] in ['were', 'are']: i[0] = 'BE'; i[1] = 'VBP'
# Regularize DO auxiliaries
if i[0] == 'does': i[0] = 'DO'; i[1] = 'VBZ'
if i[0] == 'do': i[0] = 'DO'; i[1] = 'VBP'
if i[0] == 'did': i[0] = 'DO'; i[1] = 'VBD'
# Regularize HAVE auxiliaries
if i[0] == 'has': i[0] = 'HAVE'; i[1] = 'VBZ'
if i[0] == 'have': i[0] = 'HAVE'; i[1] = 'VBP'
if i[0] == 'had': i[0] = 'HAVE'; i[1] = 'VBD'
# If the sentence contains an uncrecognized tag, remove the sentence
if i[1] not in include and i[1] not in exclude: return('bad-tag')
# Return a string-version of the sentence
return ' '.join(['/'.join(i) for i in sentence if i[1] not in exclude])
def get_pos_similarity(corpus):
from math import log
pos = ['ROOT', 'FRAG', 'SBARQ', 'SBAR', 'SQ', 'S', 'SINV', 'WHNP', 'NP', 'VP', 'PRT', 'INTJ', 'WHPP', 'PP', 'WHADVP', 'ADVP', 'WHADJP', 'ADJP', 'NP-SUBJ',
'WP', 'NN', 'NNS', 'PRP', 'PRP$', 'CD', 'JJ', 'IN', 'VB', 'UH', 'TO', 'VBP', 'WRB', 'NOT', 'DT', 'RB', 'MD', 'RP', 'VBG', 'POS', 'VBZ',
'CC', 'VBD', 'COMP', 'EX', 'VBN', 'WDT', 'PDT', 'WP$', 'JJR', 'NN-SUBJ', 'NNS-SUBJ', 'PRP-SUBJ']
pos_frequency_dict, pos_similarity_dict = {}, {}
# Fill in default values for frequency and similarity dictionaries
for p in pos:
pos_frequency_dict[p], pos_similarity_dict[p] = {}, {}
for p2 in pos:
pos_frequency_dict[p][p2], pos_similarity_dict[p][p2] = 0.0000000001, 0
# Loop over trees in corpus and, for each subtree, increment the value for the subtree's parent
for tree in corpus:
#tree = subject_tag(tree)
tree = ParentedTree.convert(tree)
for subtree in tree.subtrees():
current_pos = subtree.label()
parent_node = subtree.parent()
if parent_node is not None and current_pos in pos and parent_node .label() in pos: pos_frequency_dict[current_pos][parent_node.label()] += 1
# Loop over frequency dictionary, changing frequency counts to proportions
for pos in pos_frequency_dict.keys():
total = sum(pos_frequency_dict[pos].values())
for pos2 in pos_frequency_dict[pos].keys():
pos_frequency_dict[pos][pos2] = pos_frequency_dict[pos][pos2]/float(total)
# Loop over entries in similiarity dictionary, calculating relative entropy for each category pair based on parent-node distributions
for current_pos in pos_similarity_dict.keys():
for compare_pos in pos_similarity_dict[current_pos].keys():
#relative_entropy = []
relative_entropy = 0
for parent in pos_similarity_dict[current_pos].keys():
p = pos_frequency_dict[current_pos][parent]
q = pos_frequency_dict[compare_pos][parent]
#relative_entropy.append(float(p)*log(float(p)/float(q), 2))
relative_entropy += float(p)*log(float(p)/float(q), 2)
#pos_similarity_dict[current_pos][compare_pos] = -sum(relative_entropy)
pos_similarity_dict[current_pos][compare_pos] = -relative_entropy
pos_similarity_dict[current_pos][current_pos] = 20
pos_similarity_dict[current_pos]['VB____'] = -100
pos_similarity_dict[current_pos]['*'] = 0
pos_similarity_dict[current_pos]['XP'] = 0
# Add in values for the gap position and the wild-card character
pos_similarity_dict['VB____'] = {}
pos_similarity_dict['*'] = {}
pos_similarity_dict['XP'] = {}
for pos in pos_similarity_dict.keys():
pos_similarity_dict['VB____'][pos] = -100
pos_similarity_dict['*'][pos] = 0
pos_similarity_dict['XP'][pos] = 0
pos_similarity_dict['VB____']['VB____'] = 100
pos_similarity_dict['*']['*'] = 0
pos_similarity_dict['XP']['XP'] = 0
return pos_similarity_dict
def simulate(data, similarity_matrix, start = 50, max=50, by = 25, rep=5, conf=[0,.5], morphs = ['VBZ', 'VBP'], test=200, printR=False):
import time
from random import shuffle
for x in range(0,rep):
shuffle(data)
n=start
rules,contexts = {},{}
while n <= max:
if n < max:
grammar = generalize(data[0:n], similarity_matrix, rules, contexts, morphology = morphs, printRules = False)
rules = grammar[0]
contexts = grammar[1]
else : rules = generalize(data[0:n], similarity_matrix, rules, contexts, morphology = morphs, printRules = printR, fileName = 'MGLgrammar.txt')[0]
a = accuracy(data[n:n+test], rules, morphology = morphs, printAcc = printR, fileName = 'MGLresults.txt', similarity_matrix=similarity_matrix,trainSize=n,grammar=x)
n+=by
|
{"/run_simulation.py": ["/corpus_functions.py"], "/corpus_functions.py": ["/verbMGL.py"]}
|
23,016
|
ssloggett/verbMGL
|
refs/heads/master
|
/verbMGL.py
|
####################################################
# Module 1: generate idiosyncratic rules from corpus
####################################################
# Generate the first set of rules from the data:
# Find the first instance of 'VBZ' or 'VBP' in a sentence, replace it with 'VB____'
# Add a rule to the list of the form [Structural Change, Context]
def generate_idiosyncratic(training, morphology = ['VBZ', 'VBP']):
"""
Generate the first set of rules from the data.
Find the first instance of 'VBZ' or 'VBP' in a sentence, and replace it with 'VB____'.
Rules are represented as tuple-lists of the form [Structural Change, Context]
"""
idiosyncratic = {}
for morpheme in morphology: idiosyncratic[morpheme] = []
for morpheme in morphology:
for sentence in training:
sentence = [[word.split('/')[0],word.split('/')[1]] for word in sentence.split()]
for i in sentence:
if i[1] == morpheme:
idiosyncratic[i[1]].append(sentence)
i[1] = 'VB____'
break
return(idiosyncratic)
# Given two contexts, compare their alignments and keep identical elements.
# Non-identical elements are collapsed into '*'
def compare(c1,c2,similarity):
from needleman_wunsch import align
alignment = align(c1,c2,S=similarity)
if alignment[0][0] == 'NA': return 'NA'
c = []
for i in range(0,len(alignment[0])):
if alignment[0][i][0] in ['[',']'] and alignment[0][i][0] != alignment[1][i][0]:
return 'NA'
if alignment[0][i][0] == alignment[1][i][0] in ['[',']'] and (alignment[0][i][1] != alignment[1][i][1]):
c.append([alignment[0][i][0], 'XP'])
elif alignment[0][i][1] == alignment[1][i][1]:
if alignment[0][i][0] == alignment[1][i][0]:
c.append(alignment[0][i])
else: c.append(['*', alignment[0][i][1]])
else: c.append(['*','*'])
context = [c[0]]
for i in range(1,len(c)):
if c[i][1] != '*' or c[i-1][1] != '*': context.append(c[i])
open_nodes, closed_nodes = [], []
hasGap = False
for word in context:
if word[0] == '[': open_nodes.append(word[1])
if word[0] == ']':
if len(open_nodes) == 0: return 'NA'
elif open_nodes[len(open_nodes)-1] == word[1]:
open_nodes = open_nodes[0:len(open_nodes)-1]
else: return 'NA'
if word[1] == 'VB____': hasGap = True
if not hasGap or len(open_nodes)>1: return 'NA'
return context
#######################################################
# Module 2: generalize by comparing rules with same LHS
#######################################################
# Main function for iteratively looping over rules to generalize and create new rules
def generalize_idiosyncratic(idiosyncratic, prior_rules, prior_contexts, similarity_matrix):
for change in idiosyncratic:
for i in range(0,len(idiosyncratic[change])):
for context in idiosyncratic[change][i+1:]:
new_context = compare(idiosyncratic[change][i], context, similarity_matrix)
if change in prior_contexts.keys():
if new_context != 'NA' and new_context not in prior_contexts[change]:
prior_contexts[change].append(new_context)
posterior = confidence(change, new_context, idiosyncratic, similarity_matrix)
prior_rules[change].append((new_context, posterior))
if len(prior_rules[change])%100 == 0: print str(len(prior_rules[change]))+' '+change+' rules created'
elif new_context != 'NA':
prior_contexts[change] = [new_context]
posterior = confidence(change, new_context, idiosyncratic, similarity_matrix)
prior_rules[change] = [(new_context, posterior)]
if len(prior_rules[change])%100 == 0: print str(len(prior_rules[change]))+' '+change+' rules created'
return [prior_rules, prior_contexts]
def generalize(data, similarity_matrix, rules = {}, contexts = {}, morphology = ['VBZ', 'VBP'], printRules = True, fileName = 'bayesMGL_rules.txt'):
# Generate Idiosyncratic Rules
idiosyncratic = generate_idiosyncratic(data, morphology)
# Do first-level generalization of idiosyncratic rules
generalized = generalize_idiosyncratic(idiosyncratic, rules, contexts, similarity_matrix)
rules = generalized[0]
contexts = generalized[1]
print 'Idiosyncratic rules generalized.'
for change in rules:
for i in range(0,len(rules[change])):
for context in rules[change][i+1:]:
new_context = compare(rules[change][i][0], context[0], similarity_matrix)
if new_context is not 'NA' and new_context not in contexts[change]:
contexts[change].append(new_context)
posterior = confidence(change, new_context, idiosyncratic, similarity_matrix)
rules[change].append((new_context, posterior))
if len(rules[change])%100 == 0: print str(len(rules[change]))+' '+change+' rules created'
print change+' rules generalized'
if printRules: print_rules(rules, [[change, len(idiosyncratic[change])] for change in idiosyncratic], fileName)
return [rules,contexts]
def confidence(change, context, train, similarity_matrix):
prior_denom = 0
for agreement in train: prior_denom += len(train[agreement])
prior = float(len(train[change]))/prior_denom
scope = 0
for sentence in train[change]:
if compare(context,sentence,similarity_matrix) == context: scope += 1
likelihood = float(scope)/len(train[change])
return float(prior)*likelihood
#return prior
#return float(likelihood)
def accuracy(data, rules, similarity_matrix, morphology = ['VBZ', 'VBP'], printAcc = True, fileName = 'bayesMGL_results.txt', trainSize = 'NA', grammar = 'NA'):
print 'Checking accuracy'
test_data = generate_idiosyncratic(data, morphology)
# Initialize a vector to store choice information
# {sentence: {observed_morph:, morph1:, morph2:, max: }}
results = {'sentence': [], 'observed':[], 'predicted':[], 'accuracy':[]}
denoms, accs = {'total':0}, {'total':0}
for morpheme in rules:
results[morpheme] = []
denoms[morpheme] = len(test_data[morpheme])
accs[morpheme] = 0
denoms['total'] = sum(denoms.values())
for change in test_data:
for context in test_data[change]:
max = 0
choice = 'NA'
results['sentence'].append(context)
results['observed'].append(change)
for morpheme in rules:
match = 0
for environment in rules[morpheme]:
if compare(context,environment[0],similarity_matrix) == environment[0]: match += environment[1]
results[morpheme].append(match)
if match > max:
choice = morpheme
max = match
results['predicted'].append(choice)
if choice == change:
results['accuracy'].append(1)
accs[change] += 1
accs['total'] += 1
else: results['accuracy'].append(0)
for key in denoms: accs[key] = float(accs[key])/denoms[key]
if printAcc: print_accuracy(results, fileName, trainSize, grammar)
return accs
#######################
# Convenience Functions
#######################
def print_rules(rules, training, fileName):
import os.path
print 'Writing to rule file'
if not os.path.exists(fileName): rules_file = open(fileName, 'w')
else:
rules_file = open(fileName, 'a')
rules_file.write('\n')
rules_file.write('#########################\n')
rules_file.write('Training sentences:\n'+'\t\t'.join([i[0]+': '+str(i[1]) for i in training]))
rules_file.write('\nTotal rules:\n'+'\t\t'.join([change+': '+str(len(rules[change])) for change in rules]))
rules_file.write('\n#########################\n')
rules_file.write('confidence:\trule:\n')
for change in rules:
for context in rules[change]:
rules_file.write(str(format(context[1],'.3f'))+'\t\t0-->'+change+'/ '+' '.join(['/'.join(i) for i in context[0]])+'\n')
rules_file.close()
def print_accuracy(results, fileName, train, grammar):
import os.path
print 'Writing to accuracy file'
if not os.path.exists(fileName):
accuracy_file = open(fileName, 'w')
accuracy_file.write('grammar\ttrainSize\tobserved\tpredicted\taccuracy\tVBZ\tVBP\n')
else: accuracy_file = open(fileName, 'a')
for i in range(0,len(results['observed'])):
line = [grammar,train, results['observed'][i], results['predicted'][i], results['accuracy'][i], results['VBZ'][i], results['VBP'][i]]
accuracy_file.write('\t'.join([str(x) for x in line])+'\n')
accuracy_file.close()
|
{"/run_simulation.py": ["/corpus_functions.py"], "/corpus_functions.py": ["/verbMGL.py"]}
|
23,017
|
ssloggett/verbMGL
|
refs/heads/master
|
/needleman_wunsch.py
|
def align(seq1, seq2, S, insertion_penalty = -10, deletion_penalty = -10):
"""
Find the optimum local sequence alignment for the sequences `seq1`
and `seq2` using the Smith-Waterman algorithm. Optional keyword
arguments give the gap-scoring scheme:
`insertion_penalty` penalty for an insertion (default: -1)
`deletion_penalty` penalty for a deletion (default: -1)
`S` a matrix specifying the match score between elements
"""
import numpy
DELETION, INSERTION, MATCH = range(3)
m, n = len(seq1), len(seq2)
# Construct the similarity matrix in p[i][j], and remember how
# it was constructed it -- insertion, deletion or (mis)match -- in
# q[i][j]
p = numpy.zeros((m + 1, n + 1))
q = numpy.zeros((m + 1, n + 1))
for i in range(1, m + 1):
for j in range(1, n + 1):
deletion = (p[i - 1][j] + deletion_penalty, DELETION)
insertion = (p[i][j - 1] + insertion_penalty, INSERTION)
match = (p[i - 1][j - 1] + S[seq1[i-1][1]][seq2[j-1][1]], MATCH)
p[i][j], q[i][j] = max(deletion, insertion, match)
# Yield the aligned sequences one character at a time in reverse order.
def backtrack():
i, j = m, n
while i > 0 or j > 0:
if i == 1:
while j > 1:
j -= 1
yield ['*','*'], seq2[j]
i,j=0,0
yield seq1[i], seq2[j]
elif j == 1:
j = 0
while i > 1:
i -= 1
yield seq1[i], ['*','*']
i,j=0,0
yield seq1[i], seq2[j]
elif q[i][j] == MATCH:
i -= 1
j -= 1
yield seq1[i], seq2[j]
elif q[i][j] == INSERTION:
j -= 1
yield ['*','*'], seq2[j]
elif q[i][j] == DELETION:
i -= 1
yield seq1[i], ['*','*']
return [s[::-1] for s in zip(*backtrack())]
|
{"/run_simulation.py": ["/corpus_functions.py"], "/corpus_functions.py": ["/verbMGL.py"]}
|
23,033
|
danjgale/brainnotation
|
refs/heads/main
|
/brainnotation/tests/test_points.py
|
# -*- coding: utf-8 -*-
"""
For testing brainnotation.points functionality
"""
import numpy as np
import pytest
from brainnotation import points
def test_point_in_triangle():
triangle = np.array([[0, 0, 0], [0, 0, 1], [0, 1, 1]])
point = np.array([0, 0.5, 0.5])
inside, pdist = points.point_in_triangle(point, triangle)
assert inside and pdist == 0
point = np.array([0.5, 0, 0])
inside, pdist = points.point_in_triangle(point, triangle)
assert inside and pdist == 0.5
point = np.array([-0.5, -0.5, -0.5])
inside, pdist = points.point_in_triangle(point, triangle)
assert not inside and pdist == 0.5
@pytest.mark.xfail
def test_which_triangle():
assert False
@pytest.mark.xfail
def test_get_shared_triangles():
assert False
@pytest.mark.xfail
def test_get_direct_edges():
assert False
@pytest.mark.xfail
def test_get_indirect_edges():
assert False
@pytest.mark.xfail
def test_make_surf_graph():
assert False
@pytest.mark.xfail
def test_get_surface_distance():
assert False
|
{"/brainnotation/tests/test_points.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_resampling.py": ["/brainnotation/__init__.py"], "/brainnotation/images.py": ["/brainnotation/civet.py"], "/brainnotation/datasets/__init__.py": ["/brainnotation/datasets/atlases.py", "/brainnotation/datasets/annotations.py"], "/brainnotation/resampling.py": ["/brainnotation/__init__.py", "/brainnotation/datasets/__init__.py", "/brainnotation/images.py"], "/brainnotation/tests/test_images.py": ["/brainnotation/__init__.py"], "/brainnotation/nulls/tests/test_spins.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/stats.py": ["/brainnotation/images.py"], "/brainnotation/tests/test_transforms.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_utils.py": ["/brainnotation/__init__.py"], "/brainnotation/datasets/tests/test_annotations.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/datasets/annotations.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/nulls/nulls.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/points.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/tests/test_burt.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/nulls/__init__.py": ["/brainnotation/nulls/nulls.py"], "/brainnotation/nulls/tests/test_nulls.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/datasets/tests/test_utils.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/points.py": ["/brainnotation/images.py"], "/brainnotation/__init__.py": ["/brainnotation/resampling.py", "/brainnotation/stats.py"], "/brainnotation/datasets/tests/test__osf.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/transforms.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/utils.py"], "/brainnotation/datasets/_osf.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/civet.py": ["/brainnotation/points.py"], "/brainnotation/datasets/tests/test_atlases.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/plotting.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/transforms.py"], "/brainnotation/parcellate.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/resampling.py", "/brainnotation/transforms.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/spins.py": ["/brainnotation/images.py", "/brainnotation/points.py"], "/brainnotation/datasets/atlases.py": ["/brainnotation/datasets/utils.py"], "/examples/plot_spatial_nulls.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_stats.py": ["/brainnotation/__init__.py"], "/examples/plot_fetch_datasets.py": ["/brainnotation/__init__.py"]}
|
23,034
|
danjgale/brainnotation
|
refs/heads/main
|
/brainnotation/datasets/utils.py
|
# -*- coding: utf-8 -*-
"""
Utilites for loading / creating datasets
"""
import json
import os
from pkg_resources import resource_filename
import requests
RESTRICTED = ["grh4d"]
def _osfify_urls(data, return_restricted=True):
"""
Formats `data` object with OSF API URL
Parameters
----------
data : object
If dict with a `url` key, will format OSF_API with relevant values
return_restricted : bool, optional
Whether to return restricted annotations. These will only be accesible
with a valid OSF token. Default: True
Returns
-------
data : object
Input data with all `url` dict keys formatted
"""
OSF_API = "https://files.osf.io/v1/resources/{}/providers/osfstorage/{}"
if isinstance(data, str) or data is None:
return data
elif 'url' in data:
# if url is None then we this is a malformed entry and we should ignore
if data['url'] is None:
return
# if the url isn't a string assume we're supposed to format it
elif not isinstance(data['url'], str):
if data['url'][0] in RESTRICTED and not return_restricted:
return
data['url'] = OSF_API.format(*data['url'])
try:
for key, value in data.items():
data[key] = _osfify_urls(value, return_restricted)
except AttributeError:
for n, value in enumerate(data):
data[n] = _osfify_urls(value, return_restricted)
# drop the invalid entries
data = [d for d in data if d is not None]
return data
def get_dataset_info(name, return_restricted=True):
"""
Returns information for requested dataset `name`
Parameters
----------
name : str
Name of dataset
return_restricted : bool, optional
Whether to return restricted annotations. These will only be accesible
with a valid OSF token. Default: True
Returns
-------
dataset : dict or list-of-dict
Information on requested data
"""
fn = resource_filename('brainnotation',
os.path.join('datasets', 'data', 'osf.json'))
with open(fn) as src:
osf_resources = _osfify_urls(json.load(src), return_restricted)
try:
resource = osf_resources[name]
except KeyError:
raise KeyError("Provided dataset '{}' is not valid. Must be one of: {}"
.format(name, sorted(osf_resources.keys())))
return resource
def get_data_dir(data_dir=None):
"""
Gets path to brainnotation data directory
Parameters
----------
data_dir : str, optional
Path to use as data directory. If not specified, will check for
environmental variable 'BRAINNOTATION_DATA'; if that is not set, will
use `~/brainnotation-data` instead. Default: None
Returns
-------
data_dir : str
Path to use as data directory
"""
if data_dir is None:
data_dir = os.environ.get('BRAINNOTATION_DATA',
os.path.join('~', 'brainnotation-data'))
data_dir = os.path.expanduser(data_dir)
if not os.path.exists(data_dir):
os.makedirs(data_dir)
return data_dir
def _get_token(token=None):
"""
Returns `token` if provided or set as environmental variable
Parameters
----------
token : str, optional
OSF personal access token for accessing restricted annotations. Will
also check the environmental variable 'BRAINNOTATION_OSF_TOKEN' if not
provided; if that is not set no token will be provided and restricted
annotations will be inaccessible. Default: None
Returns
-------
token : str
OSF token
"""
if token is None:
token = os.environ.get('BRAINNOTATION_OSF_TOKEN', None)
return token
def _get_session(token=None):
"""
Returns requests.Session with `token` auth in header if supplied
Parameters
----------
token : str, optional
OSF personal access token for accessing restricted annotations. Will
also check the environmental variable 'BRAINNOTATION_OSF_TOKEN' if not
provided; if that is not set no token will be provided and restricted
annotations will be inaccessible. Default: None
Returns
-------
session : requests.Session
Session instance with authentication in header
"""
session = requests.Session()
token = _get_token(token)
if token is not None:
session.headers['Authorization'] = 'Bearer {}'.format(token)
return session
|
{"/brainnotation/tests/test_points.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_resampling.py": ["/brainnotation/__init__.py"], "/brainnotation/images.py": ["/brainnotation/civet.py"], "/brainnotation/datasets/__init__.py": ["/brainnotation/datasets/atlases.py", "/brainnotation/datasets/annotations.py"], "/brainnotation/resampling.py": ["/brainnotation/__init__.py", "/brainnotation/datasets/__init__.py", "/brainnotation/images.py"], "/brainnotation/tests/test_images.py": ["/brainnotation/__init__.py"], "/brainnotation/nulls/tests/test_spins.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/stats.py": ["/brainnotation/images.py"], "/brainnotation/tests/test_transforms.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_utils.py": ["/brainnotation/__init__.py"], "/brainnotation/datasets/tests/test_annotations.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/datasets/annotations.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/nulls/nulls.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/points.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/tests/test_burt.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/nulls/__init__.py": ["/brainnotation/nulls/nulls.py"], "/brainnotation/nulls/tests/test_nulls.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/datasets/tests/test_utils.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/points.py": ["/brainnotation/images.py"], "/brainnotation/__init__.py": ["/brainnotation/resampling.py", "/brainnotation/stats.py"], "/brainnotation/datasets/tests/test__osf.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/transforms.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/utils.py"], "/brainnotation/datasets/_osf.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/civet.py": ["/brainnotation/points.py"], "/brainnotation/datasets/tests/test_atlases.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/plotting.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/transforms.py"], "/brainnotation/parcellate.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/resampling.py", "/brainnotation/transforms.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/spins.py": ["/brainnotation/images.py", "/brainnotation/points.py"], "/brainnotation/datasets/atlases.py": ["/brainnotation/datasets/utils.py"], "/examples/plot_spatial_nulls.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_stats.py": ["/brainnotation/__init__.py"], "/examples/plot_fetch_datasets.py": ["/brainnotation/__init__.py"]}
|
23,035
|
danjgale/brainnotation
|
refs/heads/main
|
/brainnotation/tests/test_resampling.py
|
# -*- coding: utf-8 -*-
"""
For testing brainnotation.resampling functionality
"""
import pytest
from brainnotation import resampling
@pytest.mark.xfail
def test__estimate_density():
assert False
@pytest.mark.xfail
@pytest.mark.workbench
def test_downsample_only():
assert False
@pytest.mark.xfail
@pytest.mark.workbench
def test_transform_to_src():
assert False
@pytest.mark.xfail
@pytest.mark.workbench
def test_transform_to_trg():
assert False
@pytest.mark.xfail
@pytest.mark.workbench
def test_transform_to_alt():
assert False
@pytest.mark.xfail
def test_mni_transform():
assert False
def test__check_altspec():
spec = ('fsaverage', '10k')
assert resampling._check_altspec(spec) == spec
for spec in (None, ('fsaverage',), ('fsaverage', '100k')):
with pytest.raises(ValueError):
resampling._check_altspec(spec)
@pytest.mark.xfail
@pytest.mark.workbench
def test_resample_images():
assert False
|
{"/brainnotation/tests/test_points.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_resampling.py": ["/brainnotation/__init__.py"], "/brainnotation/images.py": ["/brainnotation/civet.py"], "/brainnotation/datasets/__init__.py": ["/brainnotation/datasets/atlases.py", "/brainnotation/datasets/annotations.py"], "/brainnotation/resampling.py": ["/brainnotation/__init__.py", "/brainnotation/datasets/__init__.py", "/brainnotation/images.py"], "/brainnotation/tests/test_images.py": ["/brainnotation/__init__.py"], "/brainnotation/nulls/tests/test_spins.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/stats.py": ["/brainnotation/images.py"], "/brainnotation/tests/test_transforms.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_utils.py": ["/brainnotation/__init__.py"], "/brainnotation/datasets/tests/test_annotations.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/datasets/annotations.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/nulls/nulls.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/points.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/tests/test_burt.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/nulls/__init__.py": ["/brainnotation/nulls/nulls.py"], "/brainnotation/nulls/tests/test_nulls.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/datasets/tests/test_utils.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/points.py": ["/brainnotation/images.py"], "/brainnotation/__init__.py": ["/brainnotation/resampling.py", "/brainnotation/stats.py"], "/brainnotation/datasets/tests/test__osf.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/transforms.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/utils.py"], "/brainnotation/datasets/_osf.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/civet.py": ["/brainnotation/points.py"], "/brainnotation/datasets/tests/test_atlases.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/plotting.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/transforms.py"], "/brainnotation/parcellate.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/resampling.py", "/brainnotation/transforms.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/spins.py": ["/brainnotation/images.py", "/brainnotation/points.py"], "/brainnotation/datasets/atlases.py": ["/brainnotation/datasets/utils.py"], "/examples/plot_spatial_nulls.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_stats.py": ["/brainnotation/__init__.py"], "/examples/plot_fetch_datasets.py": ["/brainnotation/__init__.py"]}
|
23,036
|
danjgale/brainnotation
|
refs/heads/main
|
/brainnotation/images.py
|
# -*- coding: utf-8 -*-
"""
Functions for operating on images + surfaces
"""
import gzip
import os
from pathlib import Path
from typing import Iterable
import nibabel as nib
from nibabel.filebasedimages import ImageFileError
import numpy as np
from scipy.interpolate import griddata
PARCIGNORE = [
'unknown', 'corpuscallosum', 'Background+FreeSurfer_Defined_Medial_Wall',
'???'
]
def construct_surf_gii(vert, tri):
"""
Constructs surface gifti image from `vert` and `tri`
Parameters
----------
vert : (N, 3)
Vertices of surface mesh
tri : (T, 3)
Triangles comprising surface mesh
Returns
-------
img : nib.gifti.GiftiImage
Surface image
"""
vert = nib.gifti.GiftiDataArray(vert, 'NIFTI_INTENT_POINTSET',
'NIFTI_TYPE_FLOAT32',
coordsys=nib.gifti.GiftiCoordSystem(3, 3))
tri = nib.gifti.GiftiDataArray(tri, 'NIFTI_INTENT_TRIANGLE',
'NIFTI_TYPE_INT32')
img = nib.GiftiImage(darrays=[vert, tri])
return img
def construct_shape_gii(data, names=None, intent='NIFTI_INTENT_SHAPE'):
"""
Constructs shape gifti image from `data`
Parameters
----------
data : (N[, F]) array_like
Input data (where `F` corresponds to different features, if applicable)
Returns
-------
img : nib.gifti.GiftiImage
Shape image
"""
intent_dtypes = {
'NIFTI_INTENT_SHAPE': 'float32',
'NIFTI_INTENT_LABEL': 'int32'
}
dtype = intent_dtypes.get(intent, 'float32')
if data.ndim == 1:
data = data[:, None]
if names is not None:
if len(names) != data.shape[1]:
raise ValueError('Length of provided `names` does not match '
'number of features in `data`')
names = [{'Name': name} for name in names]
else:
names = [{} for _ in range(data.shape[1])]
return nib.GiftiImage(darrays=[
nib.gifti.GiftiDataArray(darr.astype(dtype), intent=intent,
datatype=f'NIFTI_TYPE_{dtype.upper()}',
meta=meta)
for darr, meta in zip(data.T, names)
])
def fix_coordsys(fn, val=3):
"""
Sets {xform,data}space of coordsys for GIFTI image `fn` to `val`
Parameters
----------
fn : str or os.PathLike
Path to GIFTI image
Returns
-------
fn : os.PathLike
Path to GIFTI image
"""
fn = Path(fn)
img = nib.load(fn)
for attr in ('dataspace', 'xformspace'):
setattr(img.darrays[0].coordsys, attr, val)
nib.save(img, fn)
return fn
def load_nifti(img):
"""
Loads nifti file `img`
Parameters
----------
img : os.PathLike or nib.Nifti1Image object
Image to be loaded
Returns
-------
img : nib.Nifti1Image
Loaded NIFTI image
"""
try:
img = nib.load(img)
except (TypeError) as err:
msg = ('stat: path should be string, bytes, os.PathLike or integer, '
'not Nifti1Image')
if not str(err) == msg:
raise err
return img
def load_gifti(img):
"""
Loads gifti file `img`
Will try to gunzip `img` if gzip is detected, and will pass pre-loaded
GiftiImage object
Parameters
----------
img : os.PathLike or nib.GiftiImage object
Image to be loaded
Returns
-------
img : nib.GiftiImage
Loaded GIFTI images
"""
try:
img = nib.load(img)
except (ImageFileError, TypeError) as err:
# it's gzipped, so read the gzip and pipe it in
if isinstance(err, ImageFileError) and str(err).endswith('.gii.gz"'):
with gzip.GzipFile(img) as gz:
img = nib.GiftiImage.from_bytes(gz.read())
# it's not a pre-loaded GiftiImage so error out
elif (isinstance(err, TypeError)
and not str(err) == 'stat: path should be string, bytes, os.'
'PathLike or integer, not GiftiImage'):
raise err
return img
def load_data(data):
"""
Small utility function to load + stack `data` images (gifti / nifti)
Parameters
----------
data : tuple-of-str or os.PathLike or nib.GiftiImage or nib.Nifti1Image
Data to be loaded
Returns
-------
out : np.ndarray
Loaded `data`
"""
if isinstance(data, (str, os.PathLike)) or not isinstance(data, Iterable):
data = (data,)
out = ()
for img in data:
try:
out += (load_gifti(img).agg_data(),)
except (AttributeError, TypeError):
out += (load_nifti(img).get_fdata(),)
return np.hstack(out)
def obj_to_gifti(obj, fn=None):
"""
Converts CIVET `obj` surface file to GIFTI format
Parameters
----------
obj : str or os.PathLike
CIVET file to be converted
fn : str or os.PathLike, None
Output filename. If not supplied uses input `obj` filename (with
appropriate suffix). Default: None
Returns
-------
fn : os.PathLike
Path to saved image file
"""
from brainnotation.civet import read_civet_surf
img = construct_surf_gii(*read_civet_surf(Path(obj)))
if fn is None:
fn = obj
fn = Path(fn).resolve()
if fn.name.endswith('.obj'):
fn = fn.parent / fn.name.replace('.obj', '.surf.gii')
nib.save(img, fn)
return fn
def fssurf_to_gifti(surf, fn=None):
"""
Converts FreeSurfer `surf` surface file to GIFTI format
Parameters
----------
obj : str or os.PathLike
FreeSurfer surface file to be converted
fn : str or os.PathLike, None
Output filename. If not supplied uses input `surf` filename (with
appropriate suffix). Default: None
Returns
-------
fn : os.PathLike
Path to saved image file
"""
img = construct_surf_gii(*nib.freesurfer.read_geometry(Path(surf)))
if fn is None:
fn = surf + '.surf.gii'
fn = Path(fn)
nib.save(img, fn)
return fn
def fsmorph_to_gifti(morph, fn=None, modifier=None):
"""
Converts FreeSurfer `morph` data file to GIFTI format
Parameters
----------
obj : str or os.PathLike
FreeSurfer morph file to be converted
fn : str or os.PathLike, None
Output filename. If not supplied uses input `morph` filename (with
appropriate suffix). Default: None
modifier : float, optional
Scalar factor to modify (multiply) the morphometric data. Default: None
Returns
-------
fn : os.PathLike
Path to saved image file
"""
data = nib.freesurfer.read_morph_data(Path(morph))
if modifier is not None:
data *= float(modifier)
img = construct_shape_gii(data)
if fn is None:
fn = morph + '.shape.gii'
fn = Path(fn)
nib.save(img, fn)
return fn
def interp_surface(data, src, trg, method='nearest'):
"""
Interpolate `data` on `src` surface to `trg` surface
Parameters
----------
data : str or os.PathLike
Path to (gifti) data file defined on `src` surface
src : str or os.PathLike
Path to (gifti) file defining surface of `data`
trg : str or os.PathLike
Path to (gifti) file defining desired output surface
method : {'nearest', 'linear'}
Method for interpolation. Default {'nearest'}
Returns
-------
interp : np.ndarray
Input `data` interpolated to `trg` surface
"""
if method not in ('nearest', 'linear'):
raise ValueError(f'Provided method {method} invalid')
src = load_gifti(src).agg_data('NIFTI_INTENT_POINTSET')
data = load_gifti(data).agg_data()
if len(src) != len(data):
raise ValueError('Provided `src` file has different number of '
'vertices from `data` file')
trg = load_gifti(trg).agg_data('NIFTI_INTENT_POINTSET')
return griddata(src, data, trg, method=method)
def vertex_areas(surface):
"""
Calculates vertex areas from `surface` file
Vertex area is calculated as the sum of 1/3 the area of each triangle in
which the vertex participates
Parameters
----------
surface : str or os.PathLike
Path to (gifti) file defining surface for which areas should be
computed
Returns
-------
areas : np.ndarray
Vertex areas
"""
vert, tri = load_gifti(surface).agg_data()
vectors = np.diff(vert[tri], axis=1)
cross = np.cross(vectors[:, 0], vectors[:, 1])
triareas = (np.sqrt(np.sum(cross ** 2, axis=1)) * 0.5) / 3
areas = np.bincount(tri.flatten(), weights=np.repeat(triareas, 3))
return areas
def average_surfaces(*surfs):
"""
Generates average surface from input `surfs`
Parameters
----------
surfs : str or os.PathLike
Path to (gifti) surfaces to be averaged. Surfaces should be aligned!
Returns
-------
average : nib.gifti.GiftiImage
Averaged surface
"""
n_surfs = len(surfs)
vertices = triangles = None
for surf in surfs:
img = load_gifti(surf)
vert = img.agg_data('NIFTI_INTENT_POINTSET')
if vertices is None:
vertices = np.zeros_like(vert)
if triangles is None:
triangles = img.agg_data('NIFTI_INTENT_TRIANGLE')
vertices += vert
vertices /= n_surfs
return construct_surf_gii(vertices, triangles)
def _relabel(labels, minval=0, bgval=None):
"""
Relabels `labels` so that they're consecutive
Parameters
----------
labels : (N,) array_like
Labels to be re-labelled
minval : int, optional
What the new minimum value of the labels should be. Default: 0
bgval : int, optional
What the background value should be; the new labels will start at
`minval` but the first value of these labels (i.e., labels == `minval`)
will be set to `bgval`. Default: None
Returns
------
labels : (N,) np.ndarray
New labels
"""
labels = np.unique(labels, return_inverse=True)[-1] + minval
if bgval is not None:
labels[labels == minval] = bgval
return labels
def relabel_gifti(parcellation, background=None, offset=None):
"""
Updates GIFTI images so label IDs are consecutive across hemispheres
Parameters
----------
parcellation : (2,) tuple-of-str
Surface label files in GIFTI format (lh.label.gii, rh.label.gii)
background : list-of-str, optional
If provided, a list of IDs in `parcellation` that should be set to 0
(the presumptive background value). Other IDs will be shifted so they
are consecutive (i.e., 0--N). If not specified will use labels in
`brainnotation.images.PARCIGNORE`. Default: None
offset : int, optional
What the lowest value in `parcellation[1]` should be not including
background value. If not specified it will be purely consecutive from
`parcellation[0]`. Default: None
Returns
-------
relabelled : (2,) tuple-of-nib.gifti.GiftiImage
Re-labelled `parcellation` files
"""
relabelled = tuple()
minval = 0
if not isinstance(parcellation, tuple):
parcellation = (parcellation,)
if background is None:
background = PARCIGNORE.copy()
for hemi in parcellation:
# get necessary info from file
img = load_gifti(hemi)
data = img.agg_data()
labels = img.labeltable.labels
lt = {v: k for k, v in img.labeltable.get_labels_as_dict().items()}
# get rid of labels we want to drop
if background is not None and len(labels) > 0:
for val in background:
idx = lt.get(val, 0)
if idx == 0:
continue
data[data == idx] = 0
labels = [f for f in labels if f.key != idx]
# reset labels so they're consecutive and update label keys
data = _relabel(data, minval=minval, bgval=0)
ids = np.unique(data)
new_labels = []
if len(labels) > 0:
for n, i in enumerate(ids):
lab = labels[n]
lab.key = i
new_labels.append(lab)
minval = len(ids) - 1 if offset is None else int(offset) - 1
# make new gifti image with updated information
darr = nib.gifti.GiftiDataArray(data, intent='NIFTI_INTENT_LABEL',
datatype='NIFTI_TYPE_INT32')
labeltable = nib.gifti.GiftiLabelTable()
labeltable.labels = new_labels
img = nib.GiftiImage(darrays=[darr], labeltable=labeltable)
relabelled += (img,)
return relabelled
def annot_to_gifti(parcellation, background=None):
"""
Converts FreeSurfer-style annotation `parcellation` files to GIFTI images
Parameters
----------
parcellation : tuple of str or os.PathLike
Paths to surface annotation files (.annot)
background : list-of-str, optional
If provided, a list of IDs in `parcellation` that should be set to 0
(the presumptive background value). Other IDs will be shifted so they
are consecutive (i.e., 0--N). If not specified will use labels in
`brainnotation.images.PARCIGNORE`. Default: None
Returns
-------
gifti : tuple-of-nib.GiftiImage
Converted GIFTI images
"""
if not isinstance(parcellation, tuple):
parcellation = (parcellation,)
gifti = tuple()
for atlas in parcellation:
labels, ctab, names = nib.freesurfer.read_annot(atlas)
darr = nib.gifti.GiftiDataArray(labels, intent='NIFTI_INTENT_LABEL',
datatype='NIFTI_TYPE_INT32')
labeltable = nib.gifti.GiftiLabelTable()
for key, label in enumerate(names):
(r, g, b), a = (ctab[key, :3] / 255), (1.0 if key != 0 else 0.0)
glabel = nib.gifti.GiftiLabel(key, r, g, b, a)
glabel.label = label.decode()
labeltable.labels.append(glabel)
gifti += (nib.GiftiImage(darrays=[darr], labeltable=labeltable),)
return relabel_gifti(gifti, background=background)
def dlabel_to_gifti(parcellation):
"""
Converts CIFTI dlabel file to GIFTI images
Parameters
----------
parcellation : str or os.PathLike
Path to CIFTI parcellation file (.dlabel.nii)
Returns
-------
gifti : tuple-of-nib.GiftiImage
Converted GIFTI images
"""
structures = ('CORTEX_LEFT', 'CORTEX_RIGHT')
dlabel = nib.load(parcellation)
parcdata = np.asarray(dlabel.get_fdata(), dtype='int32').squeeze()
gifti = tuple()
label_dict = dlabel.header.get_axis(index=0).label[0]
for bm in dlabel.header.get_index_map(1).brain_models:
structure = bm.brain_structure
if structure.startswith('CIFTI_STRUCTURE_'):
structure = structure[16:]
if structure not in structures:
continue
labels = np.zeros(bm.surface_number_of_vertices, dtype='int32')
idx = np.asarray(bm.vertex_indices)
slicer = slice(bm.index_offset, bm.index_offset + bm.index_count)
labels[idx] = parcdata[slicer]
darr = nib.gifti.GiftiDataArray(labels, intent='NIFTI_INTENT_LABEL',
datatype='NIFTI_TYPE_INT32')
labeltable = nib.gifti.GiftiLabelTable()
for key, (label, (r, g, b, a)) in label_dict.items():
if key not in labels:
continue
glabel = nib.gifti.GiftiLabel(key, r, g, b, a)
glabel.label = label
labeltable.labels.append(glabel)
gifti += (nib.GiftiImage(darrays=[darr], labeltable=labeltable),)
return gifti
def minc_to_nifti(img, fn=None):
"""
Converts MINC `img` to NIfTI format (and re-orients to RAS)
Parameters
----------
img : str or os.PathLike
Path to MINC file to be converted
fn : str or os.PathLike, optional
Filepath to where converted NIfTI image should be stored. If not
supplied the converted image is not saved to disk and is returned.
Default: None
Returns
-------
out : nib.Nifti1Image or os.PathLike
Converted image (if `fn` is None) or path to saved file on disk
"""
mnc = nib.load(img)
nifti = nib.Nifti1Image(np.asarray(mnc.dataobj), mnc.affine)
# re-orient nifti image RAS
orig_ornt = nib.io_orientation(nifti.affine)
targ_ornt = nib.orientations.axcodes2ornt('RAS')
transform = nib.orientations.ornt_transform(orig_ornt, targ_ornt)
nifti = nifti.as_reoriented(transform)
# save file (if desired)
if fn is not None:
fn = Path(fn).resolve()
if fn.name.endswith('.mnc'):
fn = fn.parent / fn.name.replace('.mnc', '.nii.gz')
nib.save(nifti, fn)
return fn
return nifti
|
{"/brainnotation/tests/test_points.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_resampling.py": ["/brainnotation/__init__.py"], "/brainnotation/images.py": ["/brainnotation/civet.py"], "/brainnotation/datasets/__init__.py": ["/brainnotation/datasets/atlases.py", "/brainnotation/datasets/annotations.py"], "/brainnotation/resampling.py": ["/brainnotation/__init__.py", "/brainnotation/datasets/__init__.py", "/brainnotation/images.py"], "/brainnotation/tests/test_images.py": ["/brainnotation/__init__.py"], "/brainnotation/nulls/tests/test_spins.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/stats.py": ["/brainnotation/images.py"], "/brainnotation/tests/test_transforms.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_utils.py": ["/brainnotation/__init__.py"], "/brainnotation/datasets/tests/test_annotations.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/datasets/annotations.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/nulls/nulls.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/points.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/tests/test_burt.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/nulls/__init__.py": ["/brainnotation/nulls/nulls.py"], "/brainnotation/nulls/tests/test_nulls.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/datasets/tests/test_utils.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/points.py": ["/brainnotation/images.py"], "/brainnotation/__init__.py": ["/brainnotation/resampling.py", "/brainnotation/stats.py"], "/brainnotation/datasets/tests/test__osf.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/transforms.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/utils.py"], "/brainnotation/datasets/_osf.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/civet.py": ["/brainnotation/points.py"], "/brainnotation/datasets/tests/test_atlases.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/plotting.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/transforms.py"], "/brainnotation/parcellate.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/resampling.py", "/brainnotation/transforms.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/spins.py": ["/brainnotation/images.py", "/brainnotation/points.py"], "/brainnotation/datasets/atlases.py": ["/brainnotation/datasets/utils.py"], "/examples/plot_spatial_nulls.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_stats.py": ["/brainnotation/__init__.py"], "/examples/plot_fetch_datasets.py": ["/brainnotation/__init__.py"]}
|
23,037
|
danjgale/brainnotation
|
refs/heads/main
|
/brainnotation/datasets/__init__.py
|
"""
Functions for fetching datasets
"""
__all__ = [
'fetch_all_atlases', 'fetch_atlas', 'fetch_civet', 'fetch_fsaverage',
'fetch_fslr', 'fetch_mni152', 'fetch_regfusion', 'get_atlas_dir',
'DENSITIES', 'ALIAS', 'available_annotations', 'available_tags',
'fetch_annotation'
]
# TODO: remove after nilearn v0.9 release
import warnings
warnings.filterwarnings('ignore', message='Fetchers from the nilearn.datasets',
category=FutureWarning)
from .atlases import (fetch_all_atlases, fetch_atlas, fetch_civet, # noqa
fetch_fsaverage, fetch_fslr, fetch_mni152,
fetch_regfusion, get_atlas_dir, DENSITIES, ALIAS)
from .annotations import (available_annotations, available_tags, # noqa
fetch_annotation)
|
{"/brainnotation/tests/test_points.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_resampling.py": ["/brainnotation/__init__.py"], "/brainnotation/images.py": ["/brainnotation/civet.py"], "/brainnotation/datasets/__init__.py": ["/brainnotation/datasets/atlases.py", "/brainnotation/datasets/annotations.py"], "/brainnotation/resampling.py": ["/brainnotation/__init__.py", "/brainnotation/datasets/__init__.py", "/brainnotation/images.py"], "/brainnotation/tests/test_images.py": ["/brainnotation/__init__.py"], "/brainnotation/nulls/tests/test_spins.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/stats.py": ["/brainnotation/images.py"], "/brainnotation/tests/test_transforms.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_utils.py": ["/brainnotation/__init__.py"], "/brainnotation/datasets/tests/test_annotations.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/datasets/annotations.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/nulls/nulls.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/points.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/tests/test_burt.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/nulls/__init__.py": ["/brainnotation/nulls/nulls.py"], "/brainnotation/nulls/tests/test_nulls.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/datasets/tests/test_utils.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/points.py": ["/brainnotation/images.py"], "/brainnotation/__init__.py": ["/brainnotation/resampling.py", "/brainnotation/stats.py"], "/brainnotation/datasets/tests/test__osf.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/transforms.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/utils.py"], "/brainnotation/datasets/_osf.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/civet.py": ["/brainnotation/points.py"], "/brainnotation/datasets/tests/test_atlases.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/plotting.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/transforms.py"], "/brainnotation/parcellate.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/resampling.py", "/brainnotation/transforms.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/spins.py": ["/brainnotation/images.py", "/brainnotation/points.py"], "/brainnotation/datasets/atlases.py": ["/brainnotation/datasets/utils.py"], "/examples/plot_spatial_nulls.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_stats.py": ["/brainnotation/__init__.py"], "/examples/plot_fetch_datasets.py": ["/brainnotation/__init__.py"]}
|
23,038
|
danjgale/brainnotation
|
refs/heads/main
|
/brainnotation/resampling.py
|
# -*- coding: utf-8 -*-
"""
Functions for comparing data
"""
import nibabel as nib
import numpy as np
from brainnotation import transforms
from brainnotation.datasets import ALIAS, DENSITIES
from brainnotation.images import load_gifti, load_nifti
_resampling_docs = dict(
resample_in="""\
src, trg : str or os.PathLike or niimg_like or nib.GiftiImage or tuple
Input data to be resampled
src_space, trg_space : str
Template space of input data
method : {'nearest', 'linear'}, optional
Method for resampling. Specify 'nearest' if `data` are label images.
Default: 'linear'\
""",
hemi="""\
hemi : {'L', 'R'}, optional
If `src` and `trg` are not tuples this specifies the hemisphere the data
represent. Default: None\
""",
resample_out="""\
src, trg : tuple-of-nib.GiftiImage
Resampled images\
"""
)
def _estimate_density(data, hemi=None):
"""
Tries to estimate standard density of `data`
Parameters
----------
data : (2,) tuple of str or os.PathLike or nib.GiftiImage or tuple
Input data for (src, trg)
hemi : {'L', 'R'}, optional
If `data` is not a tuple this specifies the hemisphere the data are
representing. Default: None
Returns
-------
density : str
String representing approximate density of data (e.g., '10k')
Raises
------
ValueError
If density of `data` is not one of the standard expected values
"""
density_map = {
2562: '3k', 4002: '4k', 7842: '8k', 10242: '10k',
32492: '32k', 40692: '41k', 163842: '164k'
}
densities = tuple()
for img in data:
if img in density_map.values():
densities += (img,)
continue
img, hemi = zip(*transforms._check_hemi(img, hemi))
n_vert = [len(load_gifti(d).agg_data()) for d in img]
if not all(n == n_vert[0] for n in n_vert):
raise ValueError('Provided data have different resolutions across '
'hemispheres?')
else:
n_vert = n_vert[0]
density = density_map.get(n_vert)
if density is None:
raise ValueError('Provided data resolution is non-standard. '
'Number of vertices estimated in data: {n_vert}')
densities += (density,)
return densities
def downsample_only(src, trg, src_space, trg_space, method='linear',
hemi=None):
src_den, trg_den = _estimate_density((src, trg), hemi)
src_num, trg_num = int(src_den[:-1]), int(trg_den[:-1])
src_space, trg_space = src_space.lower(), trg_space.lower()
if src_num >= trg_num: # resample to `trg`
func = getattr(transforms, f'{src_space}_to_{trg_space}')
src = func(src, src_den, trg_den, hemi=hemi, method=method)
elif src_num < trg_num: # resample to `src`
func = getattr(transforms, f'{trg_space}_to_{src_space}')
trg = func(trg, trg_den, src_den, hemi=hemi, method=method)
return src, trg
downsample_only.__doc__ = """\
Resamples `src` and `trg` to match such that neither is upsampled
If density of `src` is greater than `trg` then `src` is resampled to
`trg`; otherwise, `trg` is resampled to `src`
Parameters
----------
{resample_in}
{hemi}
Returns
-------
{resample_out}
""".format(**_resampling_docs)
def transform_to_src(src, trg, src_space, trg_space, method='linear',
hemi=None):
src_den, trg_den = _estimate_density((src, trg), hemi)
func = getattr(transforms, f'{trg_space.lower()}_to_{src_space.lower()}')
trg = func(trg, trg_den, src_den, hemi=hemi, method=method)
return src, trg
transform_to_src.__doc__ = """\
Resamples `trg` to match space and density of `src`
Parameters
----------
{resample_in}
{hemi}
Returns
-------
{resample_out}
""".format(**_resampling_docs)
def transform_to_trg(src, trg, src_space, trg_space, hemi=None,
method='linear'):
src_den, trg_den = _estimate_density((src, trg), hemi)
func = getattr(transforms, f'{src_space.lower()}_to_{trg_space.lower()}')
src = func(src, src_den, trg_den, hemi=hemi, method=method)
return src, trg
transform_to_trg.__doc__ = """\
Resamples `trg` to match space and density of `src`
Parameters
----------
{resample_in}
Returns
-------
{resample_out}
""".format(**_resampling_docs)
def transform_to_alt(src, trg, src_space, trg_space, method='linear',
hemi=None, alt_space='fsaverage', alt_density='41k'):
src, _ = transform_to_trg(src, alt_density, src_space, alt_space,
hemi=hemi, method=method)
trg, _ = transform_to_trg(trg, alt_density, trg_space, alt_space,
hemi=hemi, method=method)
return src, trg
transform_to_alt.__doc__ = """\
Resamples `src` and `trg` to `alt_space` and `alt_density`
Parameters
----------
{resample_in}
{hemi}
alt_space : {{'fsaverage', 'fsLR', 'civet'}}, optional
Alternative space to which `src` and `trg` should be transformed. Default:
'fsaverage'
alt_density : str, optional
Resolution to which `src` and `trg` should be resampled. Must be valid
with `alt_space`. Default: '41k'
Returns
-------
{resample_out}
""".format(**_resampling_docs)
def mni_transform(src, trg, src_space, trg_space, method='linear', hemi=None):
if src_space != 'MNI152':
raise ValueError('Cannot perform MNI transformation when src_space is '
f'not "MNI152." Received: {src_space}.')
trg_den = trg
if trg_space != 'MNI152':
trg_den, = _estimate_density((trg_den,), hemi)
func = getattr(transforms, f'mni152_to_{trg_space.lower()}')
src = func(src, trg_den, method=method)
return src, trg
mni_transform.__doc__ = """\
Resamples `src` in MNI152 to `trg` space
Parameters
----------
{resample_in}
hemi : {{'L', 'R'}}, optional
If `trg_space` is not "MNI152' and `trg` is not a tuple this specifies the
hemisphere the data represent. Default: None
Returns
-------
{resample_out}
""".format(**_resampling_docs)
def _check_altspec(spec):
"""
Confirms that specified alternative `spec` is valid (space, density) format
Parameters
----------
spec : (2,) tuple-of-str
Where entries are (space, density) of desired target space
Returns
-------
spec : (2,) tuple-of-str
Unmodified input `spec`
Raises
------
ValueError
If `spec` is not valid format
"""
invalid_spec = spec is None or len(spec) != 2
if not invalid_spec:
space, den = spec
space = ALIAS.get(space, space)
valid = DENSITIES.get(space)
invalid_spec = valid is None or den not in valid
if invalid_spec:
raise ValueError('Must provide valid alternative specification of '
f'format (space, density). Received: {spec}')
return (space, den)
def resample_images(src, trg, src_space, trg_space, method='linear',
hemi=None, resampling='downsample_only', alt_spec=None):
resamplings = ('downsample_only', 'transform_to_src', 'transform_to_trg',
'transform_to_alt')
if resampling not in resamplings:
raise ValueError(f'Invalid method: {resampling}')
src_space = ALIAS.get(src_space, src_space)
trg_space = ALIAS.get(trg_space, trg_space)
# all this input handling just to deal with volumetric images :face_palm:
opts, err = {}, None
if resampling == 'transform_to_alt':
opts['alt_space'], opts['alt_density'] = _check_altspec(alt_spec)
elif (resampling == 'transform_to_src' and src_space == 'MNI152'
and trg_space != 'MNI152'):
err = ('Specified `src_space` cannot be "MNI152" when `resampling` is '
'"transform_to_src"')
elif (resampling == 'transform_to_trg' and src_space != 'MNI152'
and trg_space == 'MNI152'):
err = ('Specified `trg_space` cannot be "MNI152" when `resampling` is '
'"transform_to_trg"')
elif (resampling == 'transform_to_alt' and opts['alt_space'] == 'MNI152'
and (src_space != 'MNI152' or trg_space != 'MNI152')):
err = ('Specified `alt_space` cannot be "MNI152" when `resampling` is '
'"transform_to_alt"')
if err is not None:
raise ValueError(err)
# handling volumetric data is annoying...
if ((src_space == "MNI152" or trg_space == "MNI152")
and resampling == 'transform_to_alt'):
func = mni_transform if src_space == 'MNI152' else transform_to_trg
src = func(src, opts['alt_density'], src_space, opts['alt_space'],
method=method, hemi=hemi)[0]
func = mni_transform if trg_space == 'MNI152' else transform_to_trg
trg = func(trg, opts['alt_density'], trg_space, opts['alt_space'],
method=method, hemi=hemi)[0]
elif src_space == 'MNI152' and trg_space != 'MNI152':
src, trg = mni_transform(src, trg, src_space, trg_space,
method=method, hemi=hemi)
elif trg_space == 'MNI152' and src_space != 'MNI152':
trg, src = mni_transform(trg, src, trg_space, src_space,
method=method, hemi=hemi)
elif src_space == 'MNI152' and src_space == 'MNI152':
src, trg = load_nifti(src), load_nifti(trg)
srcres = np.prod(nib.affines.voxel_sizes(src.affine))
trgres = np.prod(nib.affines.voxel_sizes(trg.affine))
if ((resampling == 'downsample_only' and srcres > trgres)
or resampling == 'transform_to_src'):
trg, src = mni_transform(trg, src, trg_space, src_space,
method=method)
elif ((resampling == 'downsample_only' and srcres <= trgres)
or resampling == 'transform_to_trg'):
src, trg = mni_transform(src, trg, src_space, trg_space,
method=method)
else:
func = globals()[resampling]
src, trg = func(src, trg, src_space, trg_space, hemi=hemi,
method=method, **opts)
src = tuple(load_gifti(s) for s in src)
trg = tuple(load_gifti(t) for t in trg)
return src, trg
resample_images.__doc__ = """\
Resamples images `src` and `trg` to same space/density with `resampling` method
Parameters
----------
{resample_in}
{hemi}
resampling : str, optional
Name of resampling function to resample `src` and `trg`. Must be one of:
'downsample_only', 'transform_to_src', 'transform_to_trg',
'transform_to_alt'. See Notes for more info. Default: 'downsample_only'
alt_spec : (2,) tuple-of-str
Where entries are (space, density) of desired target space. Only used if
`resampling='transform_to_alt'`. Default: None
Returns
-------
{resample_out}
Notes
-----
The four available `resampling` strategies will control how `src` and/or `trg`
are resampled prior to correlation. Options include:
1. `resampling='downsample_only'`
Data from `src` and `trg` are resampled to the lower resolution of the two
input datasets
2. `resampling='transform_to_src'`
Data from `trg` are always resampled to match `src` space and resolution
3. `resampling='transform_to_trg'`
Data from `src` are always resampled to match `trg` space and resolution
4. `resampling='transform_to_alt'`
Data from `trg` and `src` are resampled to the space and resolution
specified by `alt_spec` (space, density)
""".format(**_resampling_docs)
|
{"/brainnotation/tests/test_points.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_resampling.py": ["/brainnotation/__init__.py"], "/brainnotation/images.py": ["/brainnotation/civet.py"], "/brainnotation/datasets/__init__.py": ["/brainnotation/datasets/atlases.py", "/brainnotation/datasets/annotations.py"], "/brainnotation/resampling.py": ["/brainnotation/__init__.py", "/brainnotation/datasets/__init__.py", "/brainnotation/images.py"], "/brainnotation/tests/test_images.py": ["/brainnotation/__init__.py"], "/brainnotation/nulls/tests/test_spins.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/stats.py": ["/brainnotation/images.py"], "/brainnotation/tests/test_transforms.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_utils.py": ["/brainnotation/__init__.py"], "/brainnotation/datasets/tests/test_annotations.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/datasets/annotations.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/nulls/nulls.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/points.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/tests/test_burt.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/nulls/__init__.py": ["/brainnotation/nulls/nulls.py"], "/brainnotation/nulls/tests/test_nulls.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/datasets/tests/test_utils.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/points.py": ["/brainnotation/images.py"], "/brainnotation/__init__.py": ["/brainnotation/resampling.py", "/brainnotation/stats.py"], "/brainnotation/datasets/tests/test__osf.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/transforms.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/utils.py"], "/brainnotation/datasets/_osf.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/civet.py": ["/brainnotation/points.py"], "/brainnotation/datasets/tests/test_atlases.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/plotting.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/transforms.py"], "/brainnotation/parcellate.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/resampling.py", "/brainnotation/transforms.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/spins.py": ["/brainnotation/images.py", "/brainnotation/points.py"], "/brainnotation/datasets/atlases.py": ["/brainnotation/datasets/utils.py"], "/examples/plot_spatial_nulls.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_stats.py": ["/brainnotation/__init__.py"], "/examples/plot_fetch_datasets.py": ["/brainnotation/__init__.py"]}
|
23,039
|
danjgale/brainnotation
|
refs/heads/main
|
/brainnotation/tests/test_images.py
|
# -*- coding: utf-8 -*-
"""
For testing brainnotation.images functionality
"""
import nibabel as nib
import numpy as np
import pytest
from brainnotation import images
def test_construct_surf_gii():
vertices = np.array([[0, 0, 0], [0, 0, 1], [0, 1, 1]])
tris = np.array([[0, 1, 2]])
surf = images.construct_surf_gii(vertices, tris)
assert isinstance(surf, nib.GiftiImage)
v, t = surf.agg_data()
assert np.allclose(v, vertices) and np.allclose(t, tris)
@pytest.mark.xfail
def test_construct_shape_gii():
assert False
@pytest.mark.xfail
def test_fix_coordsys():
assert False
@pytest.mark.xfail
def test_load_nifti():
assert False
@pytest.mark.xfail
def test_load_gifti():
assert False
@pytest.mark.xfail
def test_load_data():
assert False
@pytest.mark.xfail
def test_obj_to_gifti():
assert False
@pytest.mark.xfail
def test_fssurf_to_gifti():
assert False
@pytest.mark.xfail
def test_fsmorph_to_gifti():
assert False
@pytest.mark.xfail
def test_interp_surface():
assert False
@pytest.mark.xfail
def test_vertex_areas():
assert False
@pytest.mark.xfail
def test_average_surfaces():
assert False
@pytest.mark.xfail
def test_relabel_gifti():
assert False
@pytest.mark.xfail
def test_annot_to_gifti():
assert False
@pytest.mark.xfail
def test_dlabel_to_gifti():
assert False
@pytest.mark.xfail
def test_minc_to_nifti():
assert False
|
{"/brainnotation/tests/test_points.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_resampling.py": ["/brainnotation/__init__.py"], "/brainnotation/images.py": ["/brainnotation/civet.py"], "/brainnotation/datasets/__init__.py": ["/brainnotation/datasets/atlases.py", "/brainnotation/datasets/annotations.py"], "/brainnotation/resampling.py": ["/brainnotation/__init__.py", "/brainnotation/datasets/__init__.py", "/brainnotation/images.py"], "/brainnotation/tests/test_images.py": ["/brainnotation/__init__.py"], "/brainnotation/nulls/tests/test_spins.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/stats.py": ["/brainnotation/images.py"], "/brainnotation/tests/test_transforms.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_utils.py": ["/brainnotation/__init__.py"], "/brainnotation/datasets/tests/test_annotations.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/datasets/annotations.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/nulls/nulls.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/points.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/tests/test_burt.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/nulls/__init__.py": ["/brainnotation/nulls/nulls.py"], "/brainnotation/nulls/tests/test_nulls.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/datasets/tests/test_utils.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/points.py": ["/brainnotation/images.py"], "/brainnotation/__init__.py": ["/brainnotation/resampling.py", "/brainnotation/stats.py"], "/brainnotation/datasets/tests/test__osf.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/transforms.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/utils.py"], "/brainnotation/datasets/_osf.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/civet.py": ["/brainnotation/points.py"], "/brainnotation/datasets/tests/test_atlases.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/plotting.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/transforms.py"], "/brainnotation/parcellate.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/resampling.py", "/brainnotation/transforms.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/spins.py": ["/brainnotation/images.py", "/brainnotation/points.py"], "/brainnotation/datasets/atlases.py": ["/brainnotation/datasets/utils.py"], "/examples/plot_spatial_nulls.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_stats.py": ["/brainnotation/__init__.py"], "/examples/plot_fetch_datasets.py": ["/brainnotation/__init__.py"]}
|
23,040
|
danjgale/brainnotation
|
refs/heads/main
|
/brainnotation/nulls/tests/test_spins.py
|
# -*- coding: utf-8 -*-
"""
For testing brainnotation.nulls.spins functionality
"""
import numpy as np
import pytest
from brainnotation.nulls import spins
def test_load_spins():
out = np.random.randint(1000, size=(100, 100), dtype='int32')
assert out is spins.load_spins(out)
assert np.allclose(out[:, :10], spins.load_spins(out, n_perm=10))
@pytest.mark.xfail
def test_get_parcel_centroids():
assert False
@pytest.mark.xfail
def test__gen_rotation():
assert False
@pytest.mark.xfail
def test_gen_spinsamples():
assert False
@pytest.mark.xfail
def test_spin_parcels():
assert False
@pytest.mark.xfail
def test_parcels_to_vertices():
assert False
@pytest.mark.xfail
def test_vertices_to_parcels():
assert False
@pytest.mark.xfail
def test_spin_data():
assert False
|
{"/brainnotation/tests/test_points.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_resampling.py": ["/brainnotation/__init__.py"], "/brainnotation/images.py": ["/brainnotation/civet.py"], "/brainnotation/datasets/__init__.py": ["/brainnotation/datasets/atlases.py", "/brainnotation/datasets/annotations.py"], "/brainnotation/resampling.py": ["/brainnotation/__init__.py", "/brainnotation/datasets/__init__.py", "/brainnotation/images.py"], "/brainnotation/tests/test_images.py": ["/brainnotation/__init__.py"], "/brainnotation/nulls/tests/test_spins.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/stats.py": ["/brainnotation/images.py"], "/brainnotation/tests/test_transforms.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_utils.py": ["/brainnotation/__init__.py"], "/brainnotation/datasets/tests/test_annotations.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/datasets/annotations.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/nulls/nulls.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/points.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/tests/test_burt.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/nulls/__init__.py": ["/brainnotation/nulls/nulls.py"], "/brainnotation/nulls/tests/test_nulls.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/datasets/tests/test_utils.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/points.py": ["/brainnotation/images.py"], "/brainnotation/__init__.py": ["/brainnotation/resampling.py", "/brainnotation/stats.py"], "/brainnotation/datasets/tests/test__osf.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/transforms.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/utils.py"], "/brainnotation/datasets/_osf.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/civet.py": ["/brainnotation/points.py"], "/brainnotation/datasets/tests/test_atlases.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/plotting.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/transforms.py"], "/brainnotation/parcellate.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/resampling.py", "/brainnotation/transforms.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/spins.py": ["/brainnotation/images.py", "/brainnotation/points.py"], "/brainnotation/datasets/atlases.py": ["/brainnotation/datasets/utils.py"], "/examples/plot_spatial_nulls.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_stats.py": ["/brainnotation/__init__.py"], "/examples/plot_fetch_datasets.py": ["/brainnotation/__init__.py"]}
|
23,041
|
danjgale/brainnotation
|
refs/heads/main
|
/brainnotation/stats.py
|
# -*- coding: utf-8 -*-
"""
Functions for statistical analyses
"""
import numpy as np
from scipy import special, stats as sstats
from scipy.stats.stats import _chk2_asarray
from sklearn.utils.validation import check_random_state
from brainnotation.images import load_data
def correlate_images(src, trg, corrtype='pearsonr', ignore_zero=True,
nulls=None):
"""
Correlates images `src` and `trg`
If `src` and `trg` represent data from multiple hemispheres the data are
concatenated across hemispheres prior to correlation
Parameters
----------
src, trg : str or os.PathLike or nib.GiftiImage or niimg_like or tuple
Images to be correlated
corrtype : {'pearsonr', 'spearmanr'}, optional
Type of correlation to perform. Default: 'pearsonr'
ignore_zero : bool, optional
Whether to perform correlations ignoring all zero values in `src` and
`trg` data. Default: True
nulls : array_like, optional
Null data for `src`
Returns
-------
correlation : float
Correlation between `src` and `trg`
"""
methods = ('pearsonr', 'spearmanr')
if corrtype not in methods:
raise ValueError(f'Invalid method: {corrtype}')
srcdata, trgdata = load_data(src), load_data(trg)
mask = np.zeros(len(srcdata), dtype=bool)
if ignore_zero:
mask = np.logical_or(np.isclose(srcdata, 0), np.isclose(trgdata, 0))
# drop NaNs
mask = np.logical_not(np.logical_or(
mask, np.logical_or(np.isnan(srcdata), np.isnan(trgdata))
))
srcdata, trgdata = srcdata[mask], trgdata[mask]
if corrtype == 'spearmanr':
srcdata, trgdata = sstats.rankdata(srcdata), sstats.rankdata(trgdata)
if nulls is not None:
n_perm = nulls.shape[-1]
nulls = nulls[mask]
return permtest_pearsonr(srcdata, trgdata, n_perm=n_perm, nulls=nulls)
return efficient_pearsonr(srcdata, trgdata)
def permtest_pearsonr(a, b, n_perm=1000, seed=0, nulls=None):
"""
Non-parametric equivalent of :py:func:`scipy.stats.pearsonr`
Generates two-tailed p-value for hypothesis of whether samples `a` and `b`
are correlated using permutation tests
Parameters
----------
a, b : (N[, M]) array_like
Sample observations. These arrays must have the same length and either
an equivalent number of columns or be broadcastable
n_perm : int, optional
Number of permutations to assess. Unless `a` and `b` are very small
along `axis` this will approximate a randomization test via Monte
Carlo simulations. Default: 1000
seed : {int, np.random.RandomState instance, None}, optional
Seed for random number generation. Set to None for "randomness".
Default: 0
nulls : (N, P) array_like, optional
Null array used in place of shuffled `a` array to compute null
distribution of correlations. Array must have the same length as `a`
and `b`. Providing this will override the value supplied to `n_perm`.
When not specified a standard permutation is used to shuffle `a`.
Default: None
Returns
-------
corr : float or numpyndarray
Correlations
pvalue : float or numpy.ndarray
Non-parametric p-value
Notes
-----
The lowest p-value that can be returned by this function is equal to 1 /
(`n_perm` + 1).
"""
a, b, axis = _chk2_asarray(a, b, 0)
rs = check_random_state(seed)
if len(a) != len(b):
raise ValueError('Provided arrays do not have same length')
if a.size == 0 or b.size == 0:
return np.nan, np.nan
if nulls is not None:
n_perm = nulls.shape[-1]
# divide by one forces coercion to float if ndim = 0
true_corr = efficient_pearsonr(a, b)[0] / 1
abs_true = np.abs(true_corr)
permutations = np.ones(true_corr.shape)
for perm in range(n_perm):
# permute `a` and determine whether correlations exceed original
ap = a[rs.permutation(len(a))] if nulls is None else nulls[:, perm]
permutations += np.abs(efficient_pearsonr(ap, b)[0]) >= abs_true
pvals = permutations / (n_perm + 1) # + 1 in denom accounts for true_corr
return true_corr, pvals
def efficient_pearsonr(a, b, ddof=1, nan_policy='propagate'):
"""
Computes correlation of matching columns in `a` and `b`
Parameters
----------
a,b : array_like
Sample observations. These arrays must have the same length and either
an equivalent number of columns or be broadcastable
ddof : int, optional
Degrees of freedom correction in the calculation of the standard
deviation. Default: 1
nan_policy : bool, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default: 'propagate'
Returns
-------
corr : float or numpy.ndarray
Pearson's correlation coefficient between matching columns of inputs
pval : float or numpy.ndarray
Two-tailed p-values
Notes
-----
If either input contains nan and nan_policy is set to 'omit', both arrays
will be masked to omit the nan entries.
"""
a, b, axis = _chk2_asarray(a, b, 0)
if len(a) != len(b):
raise ValueError('Provided arrays do not have same length')
if a.size == 0 or b.size == 0:
return np.nan, np.nan
if nan_policy not in ('propagate', 'raise', 'omit'):
raise ValueError(f'Value for nan_policy "{nan_policy}" not allowed')
a, b = a.reshape(len(a), -1), b.reshape(len(b), -1)
if (a.shape[1] != b.shape[1]):
a, b = np.broadcast_arrays(a, b)
mask = np.logical_or(np.isnan(a), np.isnan(b))
if nan_policy == 'raise' and np.any(mask):
raise ValueError('Input cannot contain NaN when nan_policy is "omit"')
elif nan_policy == 'omit':
# avoid making copies of the data, if possible
a = np.ma.masked_array(a, mask, copy=False, fill_value=np.nan)
b = np.ma.masked_array(b, mask, copy=False, fill_value=np.nan)
with np.errstate(invalid='ignore'):
corr = (sstats.zscore(a, ddof=ddof, nan_policy=nan_policy)
* sstats.zscore(b, ddof=ddof, nan_policy=nan_policy))
sumfunc, n_obs = np.sum, len(a)
if nan_policy == 'omit':
corr = corr.filled(np.nan)
sumfunc = np.nansum
n_obs = np.squeeze(np.sum(np.logical_not(np.isnan(corr)), axis=0))
corr = sumfunc(corr, axis=0) / (n_obs - 1)
corr = np.squeeze(np.clip(corr, -1, 1)) / 1
# taken from scipy.stats
ab = (n_obs / 2) - 1
prob = 2 * special.btdtr(ab, ab, 0.5 * (1 - np.abs(corr)))
return corr, prob
|
{"/brainnotation/tests/test_points.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_resampling.py": ["/brainnotation/__init__.py"], "/brainnotation/images.py": ["/brainnotation/civet.py"], "/brainnotation/datasets/__init__.py": ["/brainnotation/datasets/atlases.py", "/brainnotation/datasets/annotations.py"], "/brainnotation/resampling.py": ["/brainnotation/__init__.py", "/brainnotation/datasets/__init__.py", "/brainnotation/images.py"], "/brainnotation/tests/test_images.py": ["/brainnotation/__init__.py"], "/brainnotation/nulls/tests/test_spins.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/stats.py": ["/brainnotation/images.py"], "/brainnotation/tests/test_transforms.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_utils.py": ["/brainnotation/__init__.py"], "/brainnotation/datasets/tests/test_annotations.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/datasets/annotations.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/nulls/nulls.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/points.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/tests/test_burt.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/nulls/__init__.py": ["/brainnotation/nulls/nulls.py"], "/brainnotation/nulls/tests/test_nulls.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/datasets/tests/test_utils.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/points.py": ["/brainnotation/images.py"], "/brainnotation/__init__.py": ["/brainnotation/resampling.py", "/brainnotation/stats.py"], "/brainnotation/datasets/tests/test__osf.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/transforms.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/utils.py"], "/brainnotation/datasets/_osf.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/civet.py": ["/brainnotation/points.py"], "/brainnotation/datasets/tests/test_atlases.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/plotting.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/transforms.py"], "/brainnotation/parcellate.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/resampling.py", "/brainnotation/transforms.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/spins.py": ["/brainnotation/images.py", "/brainnotation/points.py"], "/brainnotation/datasets/atlases.py": ["/brainnotation/datasets/utils.py"], "/examples/plot_spatial_nulls.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_stats.py": ["/brainnotation/__init__.py"], "/examples/plot_fetch_datasets.py": ["/brainnotation/__init__.py"]}
|
23,042
|
danjgale/brainnotation
|
refs/heads/main
|
/brainnotation/tests/test_transforms.py
|
# -*- coding: utf-8 -*-
"""
For testing brainnotation.transforms functionality
"""
import pytest
from brainnotation import transforms
@pytest.mark.xfail
def test__regfusion_project():
assert False
@pytest.mark.xfail
def test__vol_to_surf():
assert False
@pytest.mark.xfail
def test_mni152_to_civet():
assert False
@pytest.mark.xfail
def test_mni152_to_fsaverage():
assert False
@pytest.mark.xfail
def test_mni152_to_fslr():
assert False
@pytest.mark.xfail
def test_mni152_to_mni152():
assert False
def test__check_hemi():
d, h = zip(*transforms._check_hemi('test', 'L'))
assert d == ('test',) and h == ('L',)
for d, h in (('test', None), ('test', 'invalid_hemi')):
with pytest.raises(ValueError):
transforms._check_hemi(d, h)
@pytest.mark.xfail
def test__surf_to_surf():
assert False
@pytest.mark.xfail
def test_civet_to_fslr():
assert False
@pytest.mark.xfail
def test_fslr_to_civet():
assert False
@pytest.mark.xfail
def test_civet_to_fsaverage():
assert False
@pytest.mark.xfail
def test_fsaverage_to_civet():
assert False
@pytest.mark.xfail
def test_fslr_to_fsaverage():
assert False
@pytest.mark.xfail
def test_fsaverage_to_fslr():
assert False
@pytest.mark.xfail
def test_civet_to_civet():
assert False
@pytest.mark.xfail
def test_fslr_to_fslr():
assert False
@pytest.mark.xfail
def test_fsaverage_to_fsaverage():
assert False
|
{"/brainnotation/tests/test_points.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_resampling.py": ["/brainnotation/__init__.py"], "/brainnotation/images.py": ["/brainnotation/civet.py"], "/brainnotation/datasets/__init__.py": ["/brainnotation/datasets/atlases.py", "/brainnotation/datasets/annotations.py"], "/brainnotation/resampling.py": ["/brainnotation/__init__.py", "/brainnotation/datasets/__init__.py", "/brainnotation/images.py"], "/brainnotation/tests/test_images.py": ["/brainnotation/__init__.py"], "/brainnotation/nulls/tests/test_spins.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/stats.py": ["/brainnotation/images.py"], "/brainnotation/tests/test_transforms.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_utils.py": ["/brainnotation/__init__.py"], "/brainnotation/datasets/tests/test_annotations.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/datasets/annotations.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/nulls/nulls.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/points.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/tests/test_burt.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/nulls/__init__.py": ["/brainnotation/nulls/nulls.py"], "/brainnotation/nulls/tests/test_nulls.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/datasets/tests/test_utils.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/points.py": ["/brainnotation/images.py"], "/brainnotation/__init__.py": ["/brainnotation/resampling.py", "/brainnotation/stats.py"], "/brainnotation/datasets/tests/test__osf.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/transforms.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/utils.py"], "/brainnotation/datasets/_osf.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/civet.py": ["/brainnotation/points.py"], "/brainnotation/datasets/tests/test_atlases.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/plotting.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/transforms.py"], "/brainnotation/parcellate.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/resampling.py", "/brainnotation/transforms.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/spins.py": ["/brainnotation/images.py", "/brainnotation/points.py"], "/brainnotation/datasets/atlases.py": ["/brainnotation/datasets/utils.py"], "/examples/plot_spatial_nulls.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_stats.py": ["/brainnotation/__init__.py"], "/examples/plot_fetch_datasets.py": ["/brainnotation/__init__.py"]}
|
23,043
|
danjgale/brainnotation
|
refs/heads/main
|
/brainnotation/tests/test_utils.py
|
# -*- coding: utf-8 -*-
"""
For testing brainnotation.utils functionality
"""
import os
import pytest
from brainnotation import utils
def test_tmpname(tmp_path):
out = utils.tmpname('.nii.gz', prefix='test', directory=tmp_path)
assert (isinstance(out, os.PathLike) and out.name.startswith('test')
and out.name.endswith('.nii.gz'))
@pytest.mark.xfail
def test_run():
assert False
@pytest.mark.xfail
def test_check_fs_subjid():
assert False
|
{"/brainnotation/tests/test_points.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_resampling.py": ["/brainnotation/__init__.py"], "/brainnotation/images.py": ["/brainnotation/civet.py"], "/brainnotation/datasets/__init__.py": ["/brainnotation/datasets/atlases.py", "/brainnotation/datasets/annotations.py"], "/brainnotation/resampling.py": ["/brainnotation/__init__.py", "/brainnotation/datasets/__init__.py", "/brainnotation/images.py"], "/brainnotation/tests/test_images.py": ["/brainnotation/__init__.py"], "/brainnotation/nulls/tests/test_spins.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/stats.py": ["/brainnotation/images.py"], "/brainnotation/tests/test_transforms.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_utils.py": ["/brainnotation/__init__.py"], "/brainnotation/datasets/tests/test_annotations.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/datasets/annotations.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/nulls/nulls.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/points.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/tests/test_burt.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/nulls/__init__.py": ["/brainnotation/nulls/nulls.py"], "/brainnotation/nulls/tests/test_nulls.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/datasets/tests/test_utils.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/points.py": ["/brainnotation/images.py"], "/brainnotation/__init__.py": ["/brainnotation/resampling.py", "/brainnotation/stats.py"], "/brainnotation/datasets/tests/test__osf.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/transforms.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/utils.py"], "/brainnotation/datasets/_osf.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/civet.py": ["/brainnotation/points.py"], "/brainnotation/datasets/tests/test_atlases.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/plotting.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/transforms.py"], "/brainnotation/parcellate.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/resampling.py", "/brainnotation/transforms.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/spins.py": ["/brainnotation/images.py", "/brainnotation/points.py"], "/brainnotation/datasets/atlases.py": ["/brainnotation/datasets/utils.py"], "/examples/plot_spatial_nulls.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_stats.py": ["/brainnotation/__init__.py"], "/examples/plot_fetch_datasets.py": ["/brainnotation/__init__.py"]}
|
23,044
|
danjgale/brainnotation
|
refs/heads/main
|
/brainnotation/datasets/tests/test_annotations.py
|
# -*- coding: utf-8 -*-
"""
For testing brainnotation.datasets.annotations functionality
"""
import pytest
from brainnotation.datasets import annotations
@pytest.mark.xfail
def test__groupby_match():
assert False
@pytest.mark.xfail
def test__match_annot():
assert False
@pytest.mark.xfail
def test_available_annotations():
assert False
def test_available_tags():
unrestricted = annotations.available_tags()
restricted = annotations.available_tags(return_restricted=True)
assert isinstance(unrestricted, list) and isinstance(restricted, list)
assert all(f in restricted for f in unrestricted)
@pytest.mark.xfail
def test_fetch_annotation():
assert False
|
{"/brainnotation/tests/test_points.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_resampling.py": ["/brainnotation/__init__.py"], "/brainnotation/images.py": ["/brainnotation/civet.py"], "/brainnotation/datasets/__init__.py": ["/brainnotation/datasets/atlases.py", "/brainnotation/datasets/annotations.py"], "/brainnotation/resampling.py": ["/brainnotation/__init__.py", "/brainnotation/datasets/__init__.py", "/brainnotation/images.py"], "/brainnotation/tests/test_images.py": ["/brainnotation/__init__.py"], "/brainnotation/nulls/tests/test_spins.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/stats.py": ["/brainnotation/images.py"], "/brainnotation/tests/test_transforms.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_utils.py": ["/brainnotation/__init__.py"], "/brainnotation/datasets/tests/test_annotations.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/datasets/annotations.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/nulls/nulls.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/points.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/tests/test_burt.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/nulls/__init__.py": ["/brainnotation/nulls/nulls.py"], "/brainnotation/nulls/tests/test_nulls.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/datasets/tests/test_utils.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/points.py": ["/brainnotation/images.py"], "/brainnotation/__init__.py": ["/brainnotation/resampling.py", "/brainnotation/stats.py"], "/brainnotation/datasets/tests/test__osf.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/transforms.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/utils.py"], "/brainnotation/datasets/_osf.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/civet.py": ["/brainnotation/points.py"], "/brainnotation/datasets/tests/test_atlases.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/plotting.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/transforms.py"], "/brainnotation/parcellate.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/resampling.py", "/brainnotation/transforms.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/spins.py": ["/brainnotation/images.py", "/brainnotation/points.py"], "/brainnotation/datasets/atlases.py": ["/brainnotation/datasets/utils.py"], "/examples/plot_spatial_nulls.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_stats.py": ["/brainnotation/__init__.py"], "/examples/plot_fetch_datasets.py": ["/brainnotation/__init__.py"]}
|
23,045
|
danjgale/brainnotation
|
refs/heads/main
|
/brainnotation/datasets/annotations.py
|
# -*- coding: utf-8 -*-
"""
Functions for fetching annotations (from the internet, if necessary)
"""
from collections import defaultdict
from pathlib import Path
import re
import shutil
from nilearn.datasets.utils import _fetch_file
from brainnotation.datasets.utils import (get_data_dir, get_dataset_info,
_get_token, _get_session)
MATCH = re.compile(
r'source-(\S+)_desc-(\S+)_space-(\S+)_(?:den|res)-(\d+[k|m]{1,2})_'
)
def _groupby_match(fnames, return_single=False):
""""
Groups files in `fnames` by (source, desc, space, res/den)
Parameters
----------
fnames : list-of-str
Filenames to be grouped
return_single : bool, optional
If there is only group of filenames return a list instead of a dict.
Default: False
Returns
-------
groups : dict-of-str
Where keys are tuple (source, desc, space, res/den) and values are
lists of filenames
"""
out = defaultdict(list)
for fn in fnames:
out[MATCH.search(fn).groups()].append(fn)
out = {k: v if len(v) > 1 else v[0] for k, v in out.items()}
if return_single and len(out) == 1:
out = list(out.values())[0]
return out
def _match_annot(info, **kwargs):
"""
Matches datasets in `info` to relevant keys
Parameters
----------
info : list-of-dict
Information on annotations
kwargs : key-value pairs
Values of data in `info` on which to match
Returns
-------
matched : list-of-dict
Annotations with specified values for keys
"""
# tags should always be a list
tags = kwargs.get('tags')
if tags is not None and isinstance(tags, str):
kwargs['tags'] = [tags]
# 'den' and 'res' are a special case because these are mutually exclusive
# values (only one will ever be set for a given annotation) so we want to
# match on _either_, not both, if and only if both are provided as keys.
# if only one is specified as a key then we should exclude the other!
denres = []
for vals in (kwargs.get('den'), kwargs.get('res')):
vals = [vals] if isinstance(vals, str) else vals
if vals is not None:
denres.extend(vals)
out = []
for dset in info:
match = True
for key in ('source', 'desc', 'space', 'hemi', 'tags', 'format'):
comp, value = dset.get(key), kwargs.get(key)
if value is None:
continue
elif value is not None and comp is None:
match = False
elif isinstance(value, str):
if value != 'all':
match = match and comp == value
else:
func = all if key == 'tags' else any
match = match and func(f in comp for f in value)
if len(denres) > 0:
match = match and (dset.get('den') or dset.get('res')) in denres
if match:
out.append(dset)
return out
def available_annotations(source=None, desc=None, space=None, den=None,
res=None, hemi=None, tags=None, format=None,
return_restricted=False):
"""
Lists datasets available via :func:`~.fetch_annotation`
Parameters
----------
source, desc, space, den, res, hemi, tags, format : str or list-of-str
Values on which to match annotations. If not specified annotations with
any value for the relevant key will be matched. Default: None
return_restricted : bool, optional
Whether to return restricted annotations. These will only be accesible
with a valid OSF token. Default: True
Returns
-------
datasets : list-of-str
List of available annotations
"""
info = _match_annot(get_dataset_info('annotations', return_restricted),
source=source, desc=desc, space=space, den=den,
res=res, hemi=hemi, tags=tags, format=format)
fnames = [dset['fname'] for dset in info]
return list(_groupby_match(fnames, return_single=False).keys())
def available_tags(return_restricted=False):
"""
Returns available tags for querying annotations
Parameters
----------
return_restricted : bool, optional
Whether to return restricted annotations. These will only be accesible
with a valid OSF token. Default: True
Returns
-------
tags : list-of-str
Available tags
"""
tags = set()
for dset in get_dataset_info('annotations', return_restricted):
if dset['tags'] is not None:
tags.update(dset['tags'])
return sorted(tags)
def fetch_annotation(*, source=None, desc=None, space=None, den=None, res=None,
hemi=None, tags=None, format=None, return_single=False,
token=None, data_dir=None, verbose=1):
"""
Downloads files for brain annotations matching requested variables
Parameters
----------
source, desc, space, den, res, hemi, tags, format : str or list-of-str
Values on which to match annotations. If not specified annotations with
any value for the relevant key will be matched. Default: None
return_single : bool, optional
If only one annotation is found matching input parameters return the
list of filepaths instead of the standard dictionary. Default: False
token : str, optional
OSF personal access token for accessing restricted annotations. Will
also check the environmental variable 'BRAINNOTATION_OSF_TOKEN' if not
provided; if that is not set no token will be provided and restricted
annotations will be inaccessible. Default: None
data_dir : str, optional
Path to use as data directory. If not specified, will check for
environmental variable 'BRAINNOTATION_DATA'; if that is not set, will
use `~/brainnotation-data` instead. Default: None
verbose : int, optional
Modifies verbosity of download, where higher numbers mean more updates.
Default: 1
Returns
-------
data : dict
Dictionary of downloaded annotations where dictionary keys are tuples
(source, desc, space, den/res) and values are lists of corresponding
filenames
"""
# check input parameters to ensure we're fetching _something_
supplied = False
for val in (source, desc, space, den, res, hemi, tags, format):
if val is not None:
supplied = True
break
if not supplied:
raise ValueError('Must provide at least one parameters on which to '
'match annotations. If you want to fetch all '
'annotations set any of the parameters to "all".')
# get info on datasets we need to fetch
token = _get_token(token=token)
return_restricted = False if (token is None or not token) else True
data_dir = get_data_dir(data_dir=data_dir)
info = _match_annot(get_dataset_info('annotations', return_restricted),
source=source, desc=desc, space=space, den=den,
res=res, hemi=hemi, tags=tags, format=format)
if verbose > 1:
print(f'Identified {len(info)} datsets matching specified parameters')
# get session for requests
session = _get_session(token=token)
# TODO: current work-around to handle that _fetch_files() does not support
# session instances. hopefully a future version will and we can just use
# that function to handle this instead of calling _fetch_file() directly
data = []
for dset in info:
fn = Path(data_dir) / 'annotations' / dset['rel_path'] / dset['fname']
if not fn.exists():
dl_file = _fetch_file(dset['url'], str(fn.parent), verbose=verbose,
md5sum=dset['checksum'], session=session)
shutil.move(dl_file, fn)
data.append(str(fn))
return _groupby_match(data, return_single=return_single)
|
{"/brainnotation/tests/test_points.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_resampling.py": ["/brainnotation/__init__.py"], "/brainnotation/images.py": ["/brainnotation/civet.py"], "/brainnotation/datasets/__init__.py": ["/brainnotation/datasets/atlases.py", "/brainnotation/datasets/annotations.py"], "/brainnotation/resampling.py": ["/brainnotation/__init__.py", "/brainnotation/datasets/__init__.py", "/brainnotation/images.py"], "/brainnotation/tests/test_images.py": ["/brainnotation/__init__.py"], "/brainnotation/nulls/tests/test_spins.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/stats.py": ["/brainnotation/images.py"], "/brainnotation/tests/test_transforms.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_utils.py": ["/brainnotation/__init__.py"], "/brainnotation/datasets/tests/test_annotations.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/datasets/annotations.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/nulls/nulls.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/points.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/tests/test_burt.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/nulls/__init__.py": ["/brainnotation/nulls/nulls.py"], "/brainnotation/nulls/tests/test_nulls.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/datasets/tests/test_utils.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/points.py": ["/brainnotation/images.py"], "/brainnotation/__init__.py": ["/brainnotation/resampling.py", "/brainnotation/stats.py"], "/brainnotation/datasets/tests/test__osf.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/transforms.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/utils.py"], "/brainnotation/datasets/_osf.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/civet.py": ["/brainnotation/points.py"], "/brainnotation/datasets/tests/test_atlases.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/plotting.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/transforms.py"], "/brainnotation/parcellate.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/resampling.py", "/brainnotation/transforms.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/spins.py": ["/brainnotation/images.py", "/brainnotation/points.py"], "/brainnotation/datasets/atlases.py": ["/brainnotation/datasets/utils.py"], "/examples/plot_spatial_nulls.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_stats.py": ["/brainnotation/__init__.py"], "/examples/plot_fetch_datasets.py": ["/brainnotation/__init__.py"]}
|
23,046
|
danjgale/brainnotation
|
refs/heads/main
|
/brainnotation/utils.py
|
# -*- coding: utf-8 -*-
"""
Utility functions
"""
import os
from pathlib import Path
import tempfile
import subprocess
def tmpname(suffix, prefix=None, directory=None):
"""
Little helper function because :man_shrugging:
Parameters
----------
suffix : str
Suffix of created filename
Returns
-------
fn : str
Temporary filename; user is responsible for deletion
"""
fd, fn = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=directory)
os.close(fd)
return Path(fn)
def run(cmd, env=None, return_proc=False, quiet=False, **kwargs):
"""
Runs `cmd` via shell subprocess with provided environment `env`
Parameters
----------
cmd : str
Command to be run as single string
env : dict, optional
If provided, dictionary of key-value pairs to be added to base
environment when running `cmd`. Default: None
return_proc : bool, optional
Whether to return CompletedProcess object. Default: false
quiet : bool, optional
Whether to suppress stdout/stderr from subprocess. Default: False
Returns
-------
proc : subprocess.CompletedProcess
Process output
Raises
------
subprocess.CalledProcessError
If subprocess does not exit cleanly
Examples
--------
>>> from brainnotation import utils
>>> p = utils.run('echo "hello world"', return_proc=True, quiet=True)
>>> p.returncode
0
>>> p.stdout # doctest: +SKIP
'hello world\\n'
"""
merged_env = os.environ.copy()
if env is not None:
if not isinstance(env, dict):
raise TypeError('Provided `env` must be a dictionary, not {}'
.format(type(env)))
merged_env.update(env)
opts = dict(check=True, shell=True, universal_newlines=True)
opts.update(**kwargs)
if quiet:
opts.update(dict(stdout=subprocess.PIPE, stderr=subprocess.PIPE))
try:
proc = subprocess.run(cmd, env=merged_env, **opts)
except subprocess.CalledProcessError as err:
raise subprocess.SubprocessError(
f'Command failed with non-zero exit status {err.returncode}. '
f'Error traceback: "{err.stderr.strip()}"'
)
if return_proc:
return proc
def check_fs_subjid(subject_id, subjects_dir=None):
"""
Checks that `subject_id` exists in provided FreeSurfer `subjects_dir`
Parameters
----------
subject_id : str
FreeSurfer subject ID
subjects_dir : str, optional
Path to FreeSurfer subject directory. If not set, will inherit from
the environmental variable $SUBJECTS_DIR. Default: None
Returns
-------
subject_id : str
FreeSurfer subject ID, as provided
subjects_dir : str
Full filepath to `subjects_dir`
Raises
------
FileNotFoundError
"""
# check inputs for subjects_dir and subject_id
if subjects_dir is None or not os.path.isdir(subjects_dir):
try:
subjects_dir = os.environ['SUBJECTS_DIR']
except KeyError:
subjects_dir = os.getcwd()
else:
subjects_dir = os.path.abspath(subjects_dir)
subjdir = os.path.join(subjects_dir, subject_id)
if not os.path.isdir(subjdir):
raise FileNotFoundError('Cannot find specified subject id {} in '
'provided subject directory {}.'
.format(subject_id, subjects_dir))
return subject_id, subjects_dir
|
{"/brainnotation/tests/test_points.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_resampling.py": ["/brainnotation/__init__.py"], "/brainnotation/images.py": ["/brainnotation/civet.py"], "/brainnotation/datasets/__init__.py": ["/brainnotation/datasets/atlases.py", "/brainnotation/datasets/annotations.py"], "/brainnotation/resampling.py": ["/brainnotation/__init__.py", "/brainnotation/datasets/__init__.py", "/brainnotation/images.py"], "/brainnotation/tests/test_images.py": ["/brainnotation/__init__.py"], "/brainnotation/nulls/tests/test_spins.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/stats.py": ["/brainnotation/images.py"], "/brainnotation/tests/test_transforms.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_utils.py": ["/brainnotation/__init__.py"], "/brainnotation/datasets/tests/test_annotations.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/datasets/annotations.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/nulls/nulls.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/points.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/tests/test_burt.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/nulls/__init__.py": ["/brainnotation/nulls/nulls.py"], "/brainnotation/nulls/tests/test_nulls.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/datasets/tests/test_utils.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/points.py": ["/brainnotation/images.py"], "/brainnotation/__init__.py": ["/brainnotation/resampling.py", "/brainnotation/stats.py"], "/brainnotation/datasets/tests/test__osf.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/transforms.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/utils.py"], "/brainnotation/datasets/_osf.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/civet.py": ["/brainnotation/points.py"], "/brainnotation/datasets/tests/test_atlases.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/plotting.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/transforms.py"], "/brainnotation/parcellate.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/resampling.py", "/brainnotation/transforms.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/spins.py": ["/brainnotation/images.py", "/brainnotation/points.py"], "/brainnotation/datasets/atlases.py": ["/brainnotation/datasets/utils.py"], "/examples/plot_spatial_nulls.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_stats.py": ["/brainnotation/__init__.py"], "/examples/plot_fetch_datasets.py": ["/brainnotation/__init__.py"]}
|
23,047
|
danjgale/brainnotation
|
refs/heads/main
|
/brainnotation/nulls/nulls.py
|
# -*- coding: utf-8 -*-
"""
Contains functionality for running spatial null models
"""
import numpy as np
try:
from brainsmash.mapgen import Base, Sampled
_brainsmash_avail = True
except ImportError:
_brainsmash_avail = False
try:
from brainspace.null_models.moran import MoranRandomization
_brainspace_avail = True
except ImportError:
_brainspace_avail = False
from sklearn.utils.validation import check_random_state
from brainnotation.datasets import fetch_atlas
from brainnotation.images import load_gifti, PARCIGNORE
from brainnotation.points import get_surface_distance
from brainnotation.nulls.burt import batch_surrogates
from brainnotation.nulls.spins import (gen_spinsamples, get_parcel_centroids,
load_spins, spin_data, spin_parcels)
HEMI = dict(left='L', lh='L', right='R', rh='R')
_nulls_input_docs = dict(
data_or_none="""\
data : (N,) array_like
Input data from which to generate null maps. If None is provided then the
resampling array will be returned instead.\
""",
data="""\
data : (N,) array_like
Input data from which to generate null maps\
""",
atlas_density="""\
atlas : {'fsLR', 'fsaverage', 'civet'}, optional
Name of surface atlas on which `data` are defined. Default: 'fsaverage'
density : str, optional
Density of surface mesh on which `data` are defined. Must be
compatible with specified `atlas`. Default: '10k'\
""",
parcellation="""\
parcellation : tuple-of-str or os.PathLike, optional
Filepaths to parcellation images ([left, right] hemisphere) mapping `data`
to surface mesh specified by `atlas` and `density`. Should only be supplied
if `data` represents a parcellated null map. Default: None\
""",
n_perm="""\
n_perm : int, optional
Number of null maps or permutations to generate. Default: 1000\
""",
seed="""\
seed : {int, np.random.RandomState instance, None}, optional
Seed for random number generation. Default: None\
""",
spins="""\
spins : array_like or str or os.PathLike
Filepath to or pre-loaded resampling array. If not specified spins are
generated. Default: None\
""",
surfaces="""\
surfaces : tuple-of-str or os.PathLike, optional
Instead of specifying `atlas` and `density` this specifies the surface
files on which `data` are defined. Providing this will override arguments
supplied to `atlas` and `density`. Default: None
""",
n_proc="""\
n_proc : int, optional
Number of processors to use for parallelizing computations. If negative
will use max available processors plus 1 minus the specified number.
Default: 1 (no parallelization)\
""",
distmat="""\
distmat : tuple-of-str or os.PathLike, optional
Filepaths to pre-computed (left, right) surface distance matrices.
Providing this will cause `atlas`, `density`, and `parcellation` to be
ignored. Default: None\
""",
kwargs="""\
kwargs : key-value pairs
Other keyword arguments passed directly to the underlying null method
generator\
""",
nulls="""\
nulls : np.ndarray
Generated null distribution, where each column represents a unique null
map\
"""
)
def naive_nonparametric(data, atlas='fsaverage', density='10k',
parcellation=None, n_perm=1000, seed=None, spins=None,
surfaces=None):
rs = check_random_state(seed)
if spins is None:
if data is None:
if surfaces is None:
surfaces = fetch_atlas(atlas, density)['sphere']
coords, _ = get_parcel_centroids(surfaces,
parcellation=parcellation,
method='surface')
else:
coords = np.asarray(data)
spins = np.column_stack([
rs.permutation(len(coords)) for _ in range(n_perm)
])
spins = load_spins(spins)
if data is None:
data = np.arange(len(spins))
return np.asarray(data)[spins]
naive_nonparametric.__doc__ = """\
Generates null maps from `data` using naive non-parametric method
Method uses random permutations of `data` with no consideration for spatial
topology to generate null distribution
Parameters
----------
{data_or_none}
{atlas_density}
{parcellation}
{n_perm}
{seed}
{spins}
{surfaces}
Returns
-------
{nulls}
""".format(**_nulls_input_docs)
def alexander_bloch(data, atlas='fsaverage', density='10k', parcellation=None,
n_perm=1000, seed=None, spins=None, surfaces=None):
if spins is None:
if surfaces is None:
surfaces = fetch_atlas(atlas, density)['sphere']
coords, hemi = get_parcel_centroids(surfaces,
parcellation=parcellation,
method='surface')
spins = gen_spinsamples(coords, hemi, n_rotate=n_perm, seed=seed)
spins = load_spins(spins)
if data is None:
data = np.arange(len(spins))
return np.asarray(data)[spins]
alexander_bloch.__doc__ = """\
Generates null maps from `data` using method from [SN1]_
Method projects data to a spherical surface and uses arbitrary rotations to
generate null distribution. If `data` are parcellated then parcel centroids
are projected to surface and parcels are reassigned based on minimum distances.
Parameters
----------
{data_or_none}
{atlas_density}
{parcellation}
{n_perm}
{seed}
{spins}
{surfaces}
Returns
-------
{nulls}
References
----------
.. [SN1] Alexander-Bloch, A., Shou, H., Liu, S., Satterthwaite, T. D.,
Glahn, D. C., Shinohara, R. T., Vandekar, S. N., & Raznahan, A. (2018).
On testing for spatial correspondence between maps of human brain
structure and function. NeuroImage, 178, 540-51.
""".format(**_nulls_input_docs)
vazquez_rodriguez = alexander_bloch
def vasa(data, atlas='fsaverage', density='10k', parcellation=None,
n_perm=1000, seed=None, spins=None, surfaces=None):
if parcellation is not None:
raise ValueError('Cannot use `vasa()` null method without specifying '
'a parcellation. Use `alexander_bloch() instead if '
'working with unparcellated data.')
if spins is None:
if surfaces is None:
surfaces = fetch_atlas(atlas, density)['sphere']
coords, hemi = get_parcel_centroids(surfaces,
parcellation=parcellation,
method='surface')
spins = gen_spinsamples(coords, hemi, method='vasa', n_rotate=n_perm,
seed=seed)
spins = load_spins(spins)
if data is None:
data = np.arange(len(spins))
return np.asarray(data)[spins]
vasa.__doc__ = """\
Generates null maps for parcellated `data` using method from [SN2]_
Method projects parcels to a spherical surface and uses arbitrary rotations
with iterative reassignments to generate null distribution. All nulls are
"perfect" permutations of the input data (at the slight expense of spatial
topology)
Parameters
----------
{data_or_none}
{atlas_density}
{parcellation}
{n_perm}
{seed}
{spins}
{surfaces}
Returns
-------
{nulls}
References
----------
.. [SN2] Váša, F., Seidlitz, J., Romero-Garcia, R., Whitaker, K. J.,
Rosenthal, G., Vértes, P. E., ... & Jones, P. B. (2018). Adolescent
tuning of association cortex in human structural brain networks.
Cerebral Cortex, 28(1), 281-294.
""".format(**_nulls_input_docs)
def hungarian(data, atlas='fsaverage', density='10k', parcellation=None,
n_perm=1000, seed=None, spins=None, surfaces=None):
if parcellation is not None:
raise ValueError('Cannot use `hungarian()` null method without '
'specifying a parcellation. Use `alexander_bloch() '
'instead if working with unparcellated data.')
if spins is None:
if surfaces is None:
surfaces = fetch_atlas(atlas, density)['sphere']
coords, hemi = get_parcel_centroids(surfaces,
parcellation=parcellation,
method='surface')
spins = gen_spinsamples(coords, hemi, method='hungarian',
n_rotate=n_perm, seed=seed)
spins = load_spins(spins)
if data is None:
data = np.arange(len(spins))
return np.asarray(data)[spins]
hungarian.__doc__ = """\
Generates null maps for parcellated `data` using the Hungarian method ([SN3]_)
Method projects parcels to a spherical surface and uses arbitrary rotations
with reassignments based on optimization via the Hungarian method to generate
null distribution. All nulls are "perfect" permutations of the input data (at
the slight expense of spatial topology)
Parameters
----------
{data_or_none}
{atlas_density}
{parcellation}
{n_perm}
{seed}
{spins}
{surfaces}
Returns
-------
{nulls}
References
----------
.. [SN3] Kuhn, H. W. (1955). The Hungarian method for the assignment problem.
Naval Research Logistics Quarterly, 2(1‐2), 83-97.
""".format(**_nulls_input_docs)
def baum(data, atlas='fsaverage', density='10k', parcellation=None,
n_perm=1000, seed=None, spins=None, surfaces=None):
if parcellation is not None:
raise ValueError('Cannot use `baum()` null method without specifying '
'a parcellation. Use `alexander_bloch() instead if '
'working with unparcellated data.')
y = np.asarray(data)
if surfaces is None:
surfaces = fetch_atlas(atlas, density)['sphere']
spins = spin_parcels(surfaces, parcellation,
n_rotate=n_perm, spins=spins, seed=seed)
if data is None:
data = np.arange(len(spins))
y = np.asarray(data)
nulls = y[spins]
nulls[spins == -1] = np.nan
return nulls
baum.__doc__ = """\
Generates null maps for parcellated `data` using method from [SN4]_
Method projects `data` to spherical surface and uses arbitrary rotations to
generate null distributions. Reassigned parcels are based on the most common
(i.e., modal) value of the vertices in each parcel within the the rotated data
Parameters
----------
{data_or_none}
{atlas_density}
{parcellation}
{n_perm}
{seed}
{spins}
{surfaces}
Returns
-------
{nulls}
References
----------
.. [SN4] Baum, G. L., Cui, Z., Roalf, D. R., Ciric, R., Betzel, R. F., Larsen,
B., ... & Satterthwaite, T. D. (2020). Development of structure–function
coupling in human brain networks during youth. Proceedings of the National
Academy of Sciences, 117(1), 771-778.
""".format(**_nulls_input_docs)
def cornblath(data, atlas='fsaverage', density='10k', parcellation=None,
n_perm=1000, seed=None, spins=None, surfaces=None):
if parcellation is not None:
raise ValueError('Cannot use `cornblath()` null method without '
'specifying a parcellation. Use `alexander_bloch() '
'instead if working with unparcellated data.')
y = np.asarray(data)
if surfaces is None:
surfaces = fetch_atlas(atlas, density)['sphere']
nulls = spin_data(y, surfaces, parcellation,
n_rotate=n_perm, spins=spins, seed=seed)
return nulls
cornblath.__doc__ = """\
Generates null maps for parcellated `data` using method from [SN5]_
Method projects `data` to spherical surface and uses arbitrary rotations to
generate null distributions. Reassigned parcels are based on the average value
of the vertices in each parcel within the rotated data
Parameters
----------
{data}
{atlas_density}
{parcellation}
{n_perm}
{seed}
{spins}
{surfaces}
Returns
-------
{nulls}
References
----------
.. [SN5] Cornblath, E. J., Ashourvan, A., Kim, J. Z., Betzel, R. F., Ciric, R.,
Adebimpe, A., ... & Bassett, D. S. (2020). Temporal sequences of brain
activity at rest are constrained by white matter structure and modulated by
cognitive demands. Communications biology, 3(1), 1-12.
""".format(**_nulls_input_docs)
def _get_distmat(hemisphere, atlas='fsaverage', density='10k',
parcellation=None, drop=None, n_proc=1):
hemi = HEMI.get(hemisphere, hemisphere)
if hemi not in ('L', 'R'):
raise ValueError(f'Invalid hemishere designation {hemisphere}')
if drop is None:
drop = PARCIGNORE
atlas = fetch_atlas(atlas, density)
surf, medial = getattr(atlas['pial'], hemi), getattr(atlas['medial'], hemi)
if parcellation is None:
dist = get_surface_distance(surf, medial=medial, n_proc=n_proc)
else:
dist = get_surface_distance(surf, parcellation=parcellation,
medial_labels=drop, drop=drop,
n_proc=n_proc)
return dist
_get_distmat.__doc__ = """\
Generates surface distance matrix for specified `hemisphere`
If `parcellation` is provided then the returned distance matrix will be a
parcel-parcel matrix.
Parameters
----------
hemisphere : {{'L', 'R'}}
Hemisphere of surface from which to generate distance matrix
{atlas_density}
{parcellation}
drop : list-of-str, optional
If `parcellation` is not None, which parcels should be ignored / dropped
from the generate distance matrix. If not specified will ignore parcels
generally indicative of the medial wall. Default: None
{n_proc}
Returns
-------
dist : (N, N) np.ndarray
Surface distance matrix between vertices. If a `parcellation` is specified
then this will be the parcel-parcel distance matrix, where the distance
between parcels is the average distance between all constituent vertices
""".format(**_nulls_input_docs)
def _make_surrogates(data, method, atlas='fsaverage', density='10k',
parcellation=None, n_perm=1000, seed=None, distmat=None,
n_proc=1, **kwargs):
if method not in ('burt2018', 'burt2020', 'moran'):
raise ValueError(f'Invalid null method: {method}')
darr = np.asarray(data)
dmin = darr[np.logical_not(np.isnan(darr))].min()
if parcellation is None:
parcellation = (None, None)
surrogates = np.zeros((len(data), n_perm))
for n, (hemi, parc) in enumerate(zip(('L', 'R'), parcellation)):
if distmat is None:
dist = _get_distmat(hemi, atlas=atlas, density=density,
parcellation=parc, n_proc=n_proc)
else:
dist = distmat[n]
if parc is None:
idx = np.arange(n * (len(data) // 2), (n + 1) * (len(data) // 2))
else:
idx = np.unique(load_gifti(parc).agg_data())[1:]
hdata = np.squeeze(data[idx])
mask = np.logical_not(np.isnan(hdata))
surrogates[idx[np.logical_not(mask)]] = np.nan
hdata, dist, idx = hdata[mask], dist[np.ix_(mask, mask)], idx[mask]
if method == 'burt2018':
hdata += np.abs(dmin) + 0.1
surrogates[idx] = batch_surrogates(dist, hdata, n_surr=n_perm,
seed=seed)
elif method == 'burt2020':
if parc is None:
index = np.argsort(dist, axis=-1)
dist = np.sort(dist, axis=-1)
surrogates[idx] = \
Sampled(hdata, dist, index, n_jobs=n_proc,
seed=seed, **kwargs)(n_perm).T
else:
surrogates[idx] = \
Base(hdata, dist, seed=seed, **kwargs)(n_perm, 50).T
elif method == 'moran':
dist = dist.astype('float64')
np.fill_diagonal(dist, 1)
dist **= -1
opts = dict(joint=True, tol=1e-6, n_rep=n_perm, random_state=seed)
opts.update(**kwargs)
mrs = MoranRandomization(**kwargs)
surrogates[idx] = mrs.fit(dist).randomize(hdata).T
return surrogates
_make_surrogates.__doc__ = """\
Generates null surrogates for specified `data` using `method`
Parameters
----------
{data}
method : {{'burt2018', 'burt2020', 'moran'}}
Method by which to generate null surrogates
{atlas_density}
{parcellation}
{n_perm}
{seed}
{distmat}
{n_proc}
{kwargs}
Returns
-------
{nulls}
""".format(**_nulls_input_docs)
def burt2018(data, atlas='fsaverage', density='10k', parcellation=None,
n_perm=1000, seed=None, distmat=None, n_proc=1, **kwargs):
if not _brainsmash_avail:
raise ImportError('Cannot run burt2018 null model when `brainsmash` '
'is not installed. Please `pip install brainsmash` '
'and try again.')
return _make_surrogates(data, 'burt2018', atlas=atlas, density=density,
parcellation=parcellation, n_perm=n_perm,
seed=seed, n_proc=n_proc, distmat=distmat,
**kwargs)
burt2018.__doc__ = """\
Generates null maps for `data` using method from [SN6]_
Method uses a spatial auto-regressive model to estimate distance-dependent
relationship of `data` and generates surrogate maps with similar properties
Parameters
----------
{data}
{atlas_density}
{parcellation}
{n_perm}
{seed}
{distmat}
{kwargs}
Returns
-------
{nulls}
References
----------
.. [SN6] Burt, J. B., Demirtaş, M., Eckner, W. J., Navejar, N. M., Ji, J. L.,
Martin, W. J., ... & Murray, J. D. (2018). Hierarchy of transcriptomic
specialization across human cortex captured by structural neuroimaging
topography. Nature Neuroscience, 21(9), 1251-1259.
""".format(**_nulls_input_docs)
def burt2020(data, atlas='fsaverage', density='10k', parcellation=None,
n_perm=1000, seed=None, distmat=None, n_proc=1, **kwargs):
if not _brainsmash_avail:
raise ImportError('Cannot run burt2020 null model when `brainsmash` '
'is not installed. Please `pip install brainsmash` '
'and try again.')
return _make_surrogates(data, 'burt2020', atlas=atlas, density=density,
parcellation=parcellation, n_perm=n_perm,
seed=seed, n_proc=n_proc, distmat=distmat,
**kwargs)
burt2020.__doc__ = """\
Generates null maps for `data` using method from [SN7]_ and [SN8]_
Method uses variograms to estimate spatial autocorrelation of `data` and
generates surrogate maps with similar variogram properties
Parameters
----------
{data}
{atlas_density}
{parcellation}
{n_perm}
{seed}
{n_proc}
{distmat}
{kwargs}
Returns
-------
{nulls}
References
----------
.. [SN7] Burt, J. B., Helmer, M., Shinn, M., Anticevic, A., & Murray, J. D.
(2020). Generative modeling of brain maps with spatial autocorrelation.
NeuroImage, 220, 117038.
.. [SN8] https://github.com/murraylab/brainsmash
""".format(**_nulls_input_docs)
def moran(data, atlas='fsaverage', density='10k', parcellation=None,
n_perm=1000, seed=None, distmat=None, n_proc=1, **kwargs):
if not _brainspace_avail:
raise ImportError('Cannot run moran null model when `brainspace` is '
'not installed. Please `pip install brainspace` and '
'try again.')
return _make_surrogates(data, 'moran', atlas=atlas, density=density,
parcellation=parcellation, n_perm=n_perm,
seed=seed, n_proc=n_proc, distmat=distmat,
**kwargs)
moran.__doc__ = """\
Generates null maps for `data` using method from [SN9]_
Method uses a spatial decomposition of a distance-based weight matrix to
estimate eigenvectors that are used to generate surrogate maps by imposing a
similar spatial structure on randomized data. For a MATLAB implementation
refer to [SN10]_ and [SN11]_
Parameters
----------
{data}
{atlas_density}
{parcellation}
{n_perm}
{seed}
{n_proc}
{distmat}
{kwargs}
Returns
-------
{nulls}
References
----------
.. [SN9] Wagner, H. H., & Dray, S. (2015). Generating spatially constrained
null models for irregularly spaced data using M oran spectral randomization
methods. Methods in Ecology and Evolution, 6(10), 1169-1178.
.. [SN10] de Wael, R. V., Benkarim, O., Paquola, C., Lariviere, S., Royer, J.,
Tavakol, S., ... & Bernhardt, B. C. (2020). BrainSpace: a toolbox for the
analysis of macroscale gradients in neuroimaging and connectomics datasets.
Communications Biology, 3(1), 1-10.
.. [SN11] https://github.com/MICA-MNI/BrainSpace/
""".format(**_nulls_input_docs)
|
{"/brainnotation/tests/test_points.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_resampling.py": ["/brainnotation/__init__.py"], "/brainnotation/images.py": ["/brainnotation/civet.py"], "/brainnotation/datasets/__init__.py": ["/brainnotation/datasets/atlases.py", "/brainnotation/datasets/annotations.py"], "/brainnotation/resampling.py": ["/brainnotation/__init__.py", "/brainnotation/datasets/__init__.py", "/brainnotation/images.py"], "/brainnotation/tests/test_images.py": ["/brainnotation/__init__.py"], "/brainnotation/nulls/tests/test_spins.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/stats.py": ["/brainnotation/images.py"], "/brainnotation/tests/test_transforms.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_utils.py": ["/brainnotation/__init__.py"], "/brainnotation/datasets/tests/test_annotations.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/datasets/annotations.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/nulls/nulls.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/points.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/tests/test_burt.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/nulls/__init__.py": ["/brainnotation/nulls/nulls.py"], "/brainnotation/nulls/tests/test_nulls.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/datasets/tests/test_utils.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/points.py": ["/brainnotation/images.py"], "/brainnotation/__init__.py": ["/brainnotation/resampling.py", "/brainnotation/stats.py"], "/brainnotation/datasets/tests/test__osf.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/transforms.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/utils.py"], "/brainnotation/datasets/_osf.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/civet.py": ["/brainnotation/points.py"], "/brainnotation/datasets/tests/test_atlases.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/plotting.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/transforms.py"], "/brainnotation/parcellate.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/resampling.py", "/brainnotation/transforms.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/spins.py": ["/brainnotation/images.py", "/brainnotation/points.py"], "/brainnotation/datasets/atlases.py": ["/brainnotation/datasets/utils.py"], "/examples/plot_spatial_nulls.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_stats.py": ["/brainnotation/__init__.py"], "/examples/plot_fetch_datasets.py": ["/brainnotation/__init__.py"]}
|
23,048
|
danjgale/brainnotation
|
refs/heads/main
|
/brainnotation/nulls/tests/test_burt.py
|
# -*- coding: utf-8 -*-
"""
For testing brainnotation.nulls.burt functionality
"""
import numpy as np
import pytest
from brainnotation.nulls import burt
def test__make_weight_matrix():
x0 = np.random.rand(100, 100)
out = burt._make_weight_matrix(x0, 0.5)
assert out.shape == x0.shape
assert np.allclose(np.diag(out), 0)
@pytest.mark.xfail
def test_estimate_rho_d0():
assert False
@pytest.mark.xfail
def test_make_surrogate():
assert False
@pytest.mark.xfail
def test_batch_surrogates():
assert False
|
{"/brainnotation/tests/test_points.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_resampling.py": ["/brainnotation/__init__.py"], "/brainnotation/images.py": ["/brainnotation/civet.py"], "/brainnotation/datasets/__init__.py": ["/brainnotation/datasets/atlases.py", "/brainnotation/datasets/annotations.py"], "/brainnotation/resampling.py": ["/brainnotation/__init__.py", "/brainnotation/datasets/__init__.py", "/brainnotation/images.py"], "/brainnotation/tests/test_images.py": ["/brainnotation/__init__.py"], "/brainnotation/nulls/tests/test_spins.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/stats.py": ["/brainnotation/images.py"], "/brainnotation/tests/test_transforms.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_utils.py": ["/brainnotation/__init__.py"], "/brainnotation/datasets/tests/test_annotations.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/datasets/annotations.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/nulls/nulls.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/points.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/tests/test_burt.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/nulls/__init__.py": ["/brainnotation/nulls/nulls.py"], "/brainnotation/nulls/tests/test_nulls.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/datasets/tests/test_utils.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/points.py": ["/brainnotation/images.py"], "/brainnotation/__init__.py": ["/brainnotation/resampling.py", "/brainnotation/stats.py"], "/brainnotation/datasets/tests/test__osf.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/transforms.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/utils.py"], "/brainnotation/datasets/_osf.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/civet.py": ["/brainnotation/points.py"], "/brainnotation/datasets/tests/test_atlases.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/plotting.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/transforms.py"], "/brainnotation/parcellate.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/resampling.py", "/brainnotation/transforms.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/spins.py": ["/brainnotation/images.py", "/brainnotation/points.py"], "/brainnotation/datasets/atlases.py": ["/brainnotation/datasets/utils.py"], "/examples/plot_spatial_nulls.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_stats.py": ["/brainnotation/__init__.py"], "/examples/plot_fetch_datasets.py": ["/brainnotation/__init__.py"]}
|
23,049
|
danjgale/brainnotation
|
refs/heads/main
|
/brainnotation/nulls/__init__.py
|
"""
Functions for computing null models
"""
__all__ = [
'naive_nonparametric', 'alexander_bloch', 'vazquez_rodriguez', 'vasa',
'hungarian', 'baum', 'cornblath', 'burt2018', 'burt2020', 'moran'
]
from brainnotation.nulls.nulls import (
naive_nonparametric, alexander_bloch, vazquez_rodriguez, vasa,
hungarian, baum, cornblath, burt2018, burt2020, moran
)
|
{"/brainnotation/tests/test_points.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_resampling.py": ["/brainnotation/__init__.py"], "/brainnotation/images.py": ["/brainnotation/civet.py"], "/brainnotation/datasets/__init__.py": ["/brainnotation/datasets/atlases.py", "/brainnotation/datasets/annotations.py"], "/brainnotation/resampling.py": ["/brainnotation/__init__.py", "/brainnotation/datasets/__init__.py", "/brainnotation/images.py"], "/brainnotation/tests/test_images.py": ["/brainnotation/__init__.py"], "/brainnotation/nulls/tests/test_spins.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/stats.py": ["/brainnotation/images.py"], "/brainnotation/tests/test_transforms.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_utils.py": ["/brainnotation/__init__.py"], "/brainnotation/datasets/tests/test_annotations.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/datasets/annotations.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/nulls/nulls.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/points.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/tests/test_burt.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/nulls/__init__.py": ["/brainnotation/nulls/nulls.py"], "/brainnotation/nulls/tests/test_nulls.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/datasets/tests/test_utils.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/points.py": ["/brainnotation/images.py"], "/brainnotation/__init__.py": ["/brainnotation/resampling.py", "/brainnotation/stats.py"], "/brainnotation/datasets/tests/test__osf.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/transforms.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/utils.py"], "/brainnotation/datasets/_osf.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/civet.py": ["/brainnotation/points.py"], "/brainnotation/datasets/tests/test_atlases.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/plotting.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/transforms.py"], "/brainnotation/parcellate.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/resampling.py", "/brainnotation/transforms.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/spins.py": ["/brainnotation/images.py", "/brainnotation/points.py"], "/brainnotation/datasets/atlases.py": ["/brainnotation/datasets/utils.py"], "/examples/plot_spatial_nulls.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_stats.py": ["/brainnotation/__init__.py"], "/examples/plot_fetch_datasets.py": ["/brainnotation/__init__.py"]}
|
23,050
|
danjgale/brainnotation
|
refs/heads/main
|
/brainnotation/nulls/tests/test_nulls.py
|
# -*- coding: utf-8 -*-
"""
For testing brainnotation.nulls.nulls functionality
"""
import numpy as np
import pytest
from brainnotation.nulls import nulls
def test_naive_nonparametric():
data = np.random.rand(50)
perms = nulls.naive_nonparametric(data, n_perm=100)
assert perms.shape == (50, 100)
assert np.all(np.sort(perms, axis=0) == np.sort(data, axis=0)[:, None])
resamples = nulls.naive_nonparametric(None, n_perm=100)
assert resamples.shape == (20484, 100)
assert np.all(np.sort(resamples, axis=0) == np.arange(20484)[:, None])
@pytest.mark.xfail
def test_alexander_bloch():
assert False
@pytest.mark.xfail
def test_vasa():
assert False
@pytest.mark.xfail
def test_hungarian():
assert False
@pytest.mark.xfail
def test_baum():
assert False
@pytest.mark.xfail
def test_cornblath():
assert False
@pytest.mark.xfail
def test__get_distmat():
assert False
@pytest.mark.xfail
def test__make_surrogates():
assert False
@pytest.mark.xfail
def test_burt2018():
assert False
@pytest.mark.xfail
def test_burt2020():
assert False
@pytest.mark.xfail
def test_moran():
assert False
|
{"/brainnotation/tests/test_points.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_resampling.py": ["/brainnotation/__init__.py"], "/brainnotation/images.py": ["/brainnotation/civet.py"], "/brainnotation/datasets/__init__.py": ["/brainnotation/datasets/atlases.py", "/brainnotation/datasets/annotations.py"], "/brainnotation/resampling.py": ["/brainnotation/__init__.py", "/brainnotation/datasets/__init__.py", "/brainnotation/images.py"], "/brainnotation/tests/test_images.py": ["/brainnotation/__init__.py"], "/brainnotation/nulls/tests/test_spins.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/stats.py": ["/brainnotation/images.py"], "/brainnotation/tests/test_transforms.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_utils.py": ["/brainnotation/__init__.py"], "/brainnotation/datasets/tests/test_annotations.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/datasets/annotations.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/nulls/nulls.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/points.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/tests/test_burt.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/nulls/__init__.py": ["/brainnotation/nulls/nulls.py"], "/brainnotation/nulls/tests/test_nulls.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/datasets/tests/test_utils.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/points.py": ["/brainnotation/images.py"], "/brainnotation/__init__.py": ["/brainnotation/resampling.py", "/brainnotation/stats.py"], "/brainnotation/datasets/tests/test__osf.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/transforms.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/utils.py"], "/brainnotation/datasets/_osf.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/civet.py": ["/brainnotation/points.py"], "/brainnotation/datasets/tests/test_atlases.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/plotting.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/transforms.py"], "/brainnotation/parcellate.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/resampling.py", "/brainnotation/transforms.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/spins.py": ["/brainnotation/images.py", "/brainnotation/points.py"], "/brainnotation/datasets/atlases.py": ["/brainnotation/datasets/utils.py"], "/examples/plot_spatial_nulls.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_stats.py": ["/brainnotation/__init__.py"], "/examples/plot_fetch_datasets.py": ["/brainnotation/__init__.py"]}
|
23,051
|
danjgale/brainnotation
|
refs/heads/main
|
/brainnotation/datasets/tests/test_utils.py
|
# -*- coding: utf-8 -*-
"""
For testing brainnotation.datasets.utils functionality
"""
import os
import pytest
from brainnotation.datasets import utils
@pytest.mark.xfail
def test__osfify_urls():
assert False
@pytest.mark.xfail
def test_get_dataset_info():
assert False
@pytest.mark.xfail
def test_get_data_dir():
assert False
def test__get_token():
orig = os.environ.pop('BRAINNOTATION_OSF_TOKEN', None)
assert utils._get_token(None) is None
assert utils._get_token('test') == 'test'
os.environ['BRAINNOTATION_OSF_TOKEN'] = 'test_env'
assert utils._get_token(None) == 'test_env'
assert utils._get_token('test') == 'test'
if orig is not None: # reset env variable
os.environ['BRAINNOTATION_OSF_TOKEN'] = orig
@pytest.mark.xfail
def test__get_session():
assert False
|
{"/brainnotation/tests/test_points.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_resampling.py": ["/brainnotation/__init__.py"], "/brainnotation/images.py": ["/brainnotation/civet.py"], "/brainnotation/datasets/__init__.py": ["/brainnotation/datasets/atlases.py", "/brainnotation/datasets/annotations.py"], "/brainnotation/resampling.py": ["/brainnotation/__init__.py", "/brainnotation/datasets/__init__.py", "/brainnotation/images.py"], "/brainnotation/tests/test_images.py": ["/brainnotation/__init__.py"], "/brainnotation/nulls/tests/test_spins.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/stats.py": ["/brainnotation/images.py"], "/brainnotation/tests/test_transforms.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_utils.py": ["/brainnotation/__init__.py"], "/brainnotation/datasets/tests/test_annotations.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/datasets/annotations.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/nulls/nulls.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/points.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/tests/test_burt.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/nulls/__init__.py": ["/brainnotation/nulls/nulls.py"], "/brainnotation/nulls/tests/test_nulls.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/datasets/tests/test_utils.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/points.py": ["/brainnotation/images.py"], "/brainnotation/__init__.py": ["/brainnotation/resampling.py", "/brainnotation/stats.py"], "/brainnotation/datasets/tests/test__osf.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/transforms.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/utils.py"], "/brainnotation/datasets/_osf.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/civet.py": ["/brainnotation/points.py"], "/brainnotation/datasets/tests/test_atlases.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/plotting.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/transforms.py"], "/brainnotation/parcellate.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/resampling.py", "/brainnotation/transforms.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/spins.py": ["/brainnotation/images.py", "/brainnotation/points.py"], "/brainnotation/datasets/atlases.py": ["/brainnotation/datasets/utils.py"], "/examples/plot_spatial_nulls.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_stats.py": ["/brainnotation/__init__.py"], "/examples/plot_fetch_datasets.py": ["/brainnotation/__init__.py"]}
|
23,052
|
danjgale/brainnotation
|
refs/heads/main
|
/brainnotation/points.py
|
# -*- coding: utf-8 -*-
"""
Functions for working with triangle meshes + surfaces
"""
from joblib import Parallel, delayed
import numpy as np
from scipy import ndimage, sparse
from brainnotation.images import load_gifti, relabel_gifti, PARCIGNORE
def point_in_triangle(point, triangle, return_pdist=True):
"""
Checks whether `point` falls inside `triangle`
Parameters
----------
point : (3,) array_like
Coordinates of point
triangle (3, 3) array_like
Coordinates of triangle
return_pdist : bool, optional
Whether to return planar distance (see outputs). Default: True
Returns
-------
inside : bool
Whether `point` is inside triangle
pdist : float
The approximate distance of the point to the plane of the triangle.
Only returned if `return_pdist` is True
"""
A, B, C = triangle
v0 = C - A
v1 = B - A
v2 = point - A
dot00 = np.dot(v0, v0)
dot01 = np.dot(v0, v1)
dot02 = np.dot(v0, v2)
dot11 = np.dot(v1, v1)
dot12 = np.dot(v1, v2)
denom = 1 / (dot00 * dot11 - dot01 * dot01)
u = (dot11 * dot02 - dot01 * dot12) * denom
v = (dot00 * dot12 - dot01 * dot02) * denom
inside = (u >= 0) and (v >= 0) and (u + v < 1)
if return_pdist:
return inside, np.abs(v2 @ np.cross(v1, v0))
return inside
def which_triangle(point, triangles):
"""
Determines which of `triangles` the provided `point` falls inside
Parameters
----------
point : (3,) array_like
Coordinates of point
triangles : (N, 3, 3) array_like
Coordinates of `N` triangles to check
Returns
-------
idx : int
Index of `triangles` that `point` is inside of. If `point` does not
fall within any of `triangles` then this will be None
"""
idx, planar = None, np.inf
for n, tri in enumerate(triangles):
inside, pdist = point_in_triangle(point, tri)
if pdist < planar and inside:
idx, planar = n, pdist
return idx
def _get_edges(faces):
"""
Gets set of edges from `faces`
Parameters
----------
faces : (F, 3) array_like
Set of indices creating triangular faces of a mesh
Returns
-------
edges : (F*3, 2) array_like
All edges in `faces`
"""
faces = np.asarray(faces)
edges = np.sort(faces[:, [0, 1, 1, 2, 2, 0]].reshape((-1, 2)), axis=1)
return edges
def get_shared_triangles(faces):
"""
Returns dictionary of triangles sharing edges from `faces`
Parameters
----------
faces : (N, 3)
Triangles comprising mesh
Returns
-------
shared : dict
Where keys are len-2 tuple of vertex ids for the shared edge and values
are the triangles that have this shared edge.
"""
# first generate the list of edges for the provided faces and the
# index for which face the edge is from (which is just the index of the
# face repeated thrice, since each face generates three direct edges)
edges = _get_edges(faces)
edges_face = np.repeat(np.arange(len(faces)), 3)
# every edge appears twice in a watertight surface, so we'll first get the
# indices for each duplicate edge in `edges` (this should, assuming all
# goes well, have rows equal to len(edges) // 2)
order = np.lexsort(edges.T[::-1])
edges_sorted = edges[order]
dupe = np.any(edges_sorted[1:] != edges_sorted[:-1], axis=1)
dupe_idx = np.append(0, np.nonzero(dupe)[0] + 1)
start_ok = np.diff(np.concatenate((dupe_idx, [len(edges_sorted)]))) == 2
groups = np.tile(dupe_idx[start_ok].reshape(-1, 1), 2)
edge_groups = order[groups + np.arange(2)]
# now, get the indices of the faces that participate in these duplicate
# edges, as well as the edges themselves
adjacency = edges_face[edge_groups]
nondegenerate = adjacency[:, 0] != adjacency[:, 1]
adjacency = np.sort(adjacency[nondegenerate], axis=1)
adjacency_edges = edges[edge_groups[:, 0][nondegenerate]]
# the non-shared vertex index is the same shape as adjacency, holding
# vertex indices vs face indices
indirect_edges = np.zeros(adjacency.shape, dtype=np.int32) - 1
# loop through the two columns of adjacency
for i, fid in enumerate(adjacency.T):
# faces from the current column of adjacency
face = faces[fid]
# get index of vertex not included in shared edge
unshared = np.logical_not(np.logical_or(
face == adjacency_edges[:, 0].reshape(-1, 1),
face == adjacency_edges[:, 1].reshape(-1, 1)))
# each row should have one "uncontained" vertex; ignore degenerates
row_ok = unshared.sum(axis=1) == 1
unshared[~row_ok, :] = False
indirect_edges[row_ok, i] = face[unshared]
# get vertex coordinates of triangles pairs with shared edges, ordered
# such that the non-shared vertex is always _last_ among the trio
shared = np.sort(face[np.logical_not(unshared)].reshape(-1, 1, 2), axis=-1)
shared = np.repeat(shared, 2, axis=1)
triangles = np.concatenate((shared, indirect_edges[..., None]), axis=-1)
return dict(zip(map(tuple, adjacency_edges), triangles))
def get_direct_edges(vertices, faces):
"""
Gets (unique) direct edges and weights in mesh describes by inputs.
Parameters
----------
vertices : (N, 3) array_like
Coordinates of `vertices` comprising mesh with `faces`
faces : (F, 3) array_like
Indices of `vertices` that compose triangular faces of mesh
Returns
-------
edges : (E, 2) array_like
Indices of `vertices` comprising direct edges (without duplicates)
weights : (E, 1) array_like
Distances between `edges`
"""
edges = np.unique(_get_edges(faces), axis=0)
weights = np.linalg.norm(np.diff(vertices[edges], axis=1), axis=-1)
return edges, weights.squeeze()
def get_indirect_edges(vertices, faces):
"""
Gets indirect edges and weights in mesh described by inputs
Indirect edges are between two vertices that participate in faces sharing
an edge
Parameters
----------
vertices : (N, 3) array_like
Coordinates of `vertices` comprising mesh with `faces`
faces : (F, 3) array_like
Indices of `vertices` that compose triangular faces of mesh
Returns
-------
edges : (E, 2) array_like
Indices of `vertices` comprising indirect edges (without duplicates)
weights : (E, 1) array_like
Distances between `edges` on surface
References
----------
https://github.com/mikedh/trimesh (MIT licensed)
"""
triangles = np.stack(list(get_shared_triangles(faces).values()), axis=0)
indirect_edges = triangles[..., -1]
# `A.shape`: (3, N, 2) corresponding to (xyz coords, edges, triangle pairs)
A, B, V = vertices[triangles].transpose(2, 3, 0, 1)
# calculate the xyz coordinates of the foot of each triangle, where the
# base is the shared edge
# that is, we're trying to calculate F in the equation `VF = VB - (w * BA)`
# where `VF`, `VB`, and `BA` are vectors, and `w = (AB * VB) / (AB ** 2)`
w = (np.sum((A - B) * (V - B), axis=0, keepdims=True)
/ np.sum((A - B) ** 2, axis=0, keepdims=True))
feet = B - (w * (B - A))
# calculate coordinates of midpoint b/w the feet of each pair of triangles
midpoints = (np.sum(feet.transpose(1, 2, 0), axis=1) / 2)[:, None]
# calculate Euclidean distance between non-shared vertices and midpoints
# and add distances together for each pair of triangles
norms = np.linalg.norm(vertices[indirect_edges] - midpoints, axis=-1)
weights = np.sum(norms, axis=-1)
# NOTE: weights won't be perfectly accurate for a small subset of triangle
# pairs where either triangle has angle >90 along the shared edge. in these
# the midpoint lies _outside_ the shared edge, so neighboring triangles
# would need to be taken into account. that said, this occurs in only a
# minority of cases and the difference tends to be in the ~0.001 mm range
return indirect_edges, weights
def make_surf_graph(vertices, faces, mask=None):
"""
Constructs adjacency graph from `surf`.
Parameters
----------
vertices : (N, 3) array_like
Coordinates of `vertices` comprising mesh with `faces`
faces : (F, 3) array_like
Indices of `vertices` that compose triangular faces of mesh
mask : (N,) array_like, optional (default None)
Boolean mask indicating which vertices should be removed from generated
graph. If not supplied, all vertices are used.
Returns
-------
graph : scipy.sparse.csr_matrix
Sparse matrix representing graph of `vertices` and `faces`
Raises
------
ValueError
Inconsistent number of vertices in `mask` and `vertices`
"""
if mask is not None and len(mask) != len(vertices):
raise ValueError('Supplied `mask` array has different number of '
'vertices than supplied `vertices`.')
# get all (direct + indirect) edges from surface
direct_edges, direct_weights = get_direct_edges(vertices, faces)
indirect_edges, indirect_weights = get_indirect_edges(vertices, faces)
edges = np.row_stack((direct_edges, indirect_edges))
weights = np.hstack((direct_weights, indirect_weights))
# remove edges that include a vertex in `mask`
if mask is not None:
idx, = np.where(mask)
mask = ~np.any(np.isin(edges, idx), axis=1)
edges, weights = edges[mask], weights[mask]
# construct our graph on which to calculate shortest paths
return sparse.csr_matrix((np.squeeze(weights), (edges[:, 0], edges[:, 1])),
shape=(len(vertices), len(vertices)))
def _get_graph_distance(vertex, graph, labels=None):
"""
Gets surface distance of `vertex` to all other vertices in `graph`
Parameters
----------
vertex : int
Index of vertex for which to calculate surface distance
graph : array_like
Graph along which to calculate shortest path distances
labels : array_like, optional
Labels indicating parcel to which each vertex belongs. If provided,
distances will be averaged within distinct labels
Returns
-------
dist : (N,) numpy.ndarray
Distance of `vertex` to all other vertices in `graph` (or to all
parcels in `labels`, if provided)
"""
dist = sparse.csgraph.dijkstra(graph, directed=False, indices=[vertex])
if labels is not None:
dist = ndimage.mean(input=np.delete(dist, vertex),
labels=np.delete(labels, vertex),
index=np.unique(labels))
return dist.astype('float32')
def get_surface_distance(surface, parcellation=None, medial=None,
medial_labels=None, drop=None, n_proc=1):
"""
Calculates surface distance for vertices in `surface`
Parameters
----------
surface : str or os.PathLike
Path to surface file on which to calculate distance
parcellation : str or os.PathLike, optional
Path to file with parcel labels for provided `surface`. If provided
will calculate parcel-parcel distances instead of vertex distances,
where parcel-parcel distance is the average distance between all
constituent vertices in two parcels. Default: None
medial : str or os.PathLike, optional
Path to file indicating which vertices correspond to the medial wall
(0 indicates medial wall). If provided will prohibit calculation of
surface distance along the medial wall. Superseded by `medial_labels`
if both are provided. Default: None
medial_labels : list of str, optional
List of parcel names that comprise the medial wall and through which
travel should be disallowed. Only valid if `parcellation` is provided;
supersedes `medial` if both are provided. Default: None
drop : list of str, optional
List of parcel names that should be dropped from the final distance
matrix (if `parcellation` is provided). If not specified, will ignore
parcels commonly used to reference the medial wall (e.g., 'unknown',
'corpuscallosum', '???', 'Background+FreeSurfer_Defined_Medial_Wall').
Default: None
n_proc : int, optional
Number of processors to use for parallelizing distance calculation. If
negative, will use max available processors plus 1 minus the specified
number. Default: 1 (no parallelization)
Returns
-------
distance : (N, N) numpy.ndarray
Surface distance between vertices/parcels on `surface`
"""
if drop is None:
drop = PARCIGNORE
if medial_labels is not None:
if isinstance(medial_labels, str):
medial_labels = [medial_labels]
drop = set(drop + list(medial_labels))
vert, faces = load_gifti(surface).agg_data()
n_vert = vert.shape[0]
labels, mask = None, np.zeros(n_vert, dtype=bool)
# get data from parcellation / medial wall files if provided
if medial is not None:
mask = np.logical_not(load_gifti(medial).agg_data().astype(bool))
if parcellation is not None:
parcellation, = relabel_gifti(parcellation, background=drop)
labels = load_gifti(parcellation).agg_data()
mask[labels == 0] = True
# calculate distance from each vertex to all other vertices
graph = make_surf_graph(vert, faces, mask=mask)
dist = np.row_stack(Parallel(n_jobs=n_proc, max_nbytes=None)(
delayed(_get_graph_distance)(n, graph, labels) for n in range(n_vert)
))
# average distance for all vertices within a parcel + set diagonal to 0
if labels is not None:
dist = np.row_stack([
dist[labels == lab].mean(axis=0) for lab in np.unique(labels)
])
dist[np.diag_indices_from(dist)] = 0
dist = dist[1:, 1:]
# remove distances for parcels that we aren't interested in
return dist
def _geodesic_parcel_centroid(vertices, faces, inds):
"""
Calculates parcel centroids based on surface distance
Parameters
----------
vertices : (N, 3)
Coordinates of vertices defining surface
faces : (F, 3)
Triangular faces defining surface
inds : (R,)
Indices of `vertices` that belong to parcel
Returns
--------
roi : (3,) numpy.ndarray
Vertex corresponding to centroid of parcel
"""
mask = np.ones(len(vertices), dtype=bool)
mask[inds] = False
mat = make_surf_graph(vertices, faces, mask=mask)
paths = sparse.csgraph.dijkstra(mat, directed=False, indices=inds)[:, inds]
# the selected vertex is the one with the minimum average shortest path
# to the other vertices in the parcel
roi = vertices[inds[paths.mean(axis=1).argmin()]]
return roi
|
{"/brainnotation/tests/test_points.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_resampling.py": ["/brainnotation/__init__.py"], "/brainnotation/images.py": ["/brainnotation/civet.py"], "/brainnotation/datasets/__init__.py": ["/brainnotation/datasets/atlases.py", "/brainnotation/datasets/annotations.py"], "/brainnotation/resampling.py": ["/brainnotation/__init__.py", "/brainnotation/datasets/__init__.py", "/brainnotation/images.py"], "/brainnotation/tests/test_images.py": ["/brainnotation/__init__.py"], "/brainnotation/nulls/tests/test_spins.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/stats.py": ["/brainnotation/images.py"], "/brainnotation/tests/test_transforms.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_utils.py": ["/brainnotation/__init__.py"], "/brainnotation/datasets/tests/test_annotations.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/datasets/annotations.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/nulls/nulls.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/points.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/tests/test_burt.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/nulls/__init__.py": ["/brainnotation/nulls/nulls.py"], "/brainnotation/nulls/tests/test_nulls.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/datasets/tests/test_utils.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/points.py": ["/brainnotation/images.py"], "/brainnotation/__init__.py": ["/brainnotation/resampling.py", "/brainnotation/stats.py"], "/brainnotation/datasets/tests/test__osf.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/transforms.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/utils.py"], "/brainnotation/datasets/_osf.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/civet.py": ["/brainnotation/points.py"], "/brainnotation/datasets/tests/test_atlases.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/plotting.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/transforms.py"], "/brainnotation/parcellate.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/resampling.py", "/brainnotation/transforms.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/spins.py": ["/brainnotation/images.py", "/brainnotation/points.py"], "/brainnotation/datasets/atlases.py": ["/brainnotation/datasets/utils.py"], "/examples/plot_spatial_nulls.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_stats.py": ["/brainnotation/__init__.py"], "/examples/plot_fetch_datasets.py": ["/brainnotation/__init__.py"]}
|
23,053
|
danjgale/brainnotation
|
refs/heads/main
|
/brainnotation/__init__.py
|
__all__ = ['resample_images', 'correlate_images']
from brainnotation.resampling import resample_images
from brainnotation.stats import correlate_images
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
|
{"/brainnotation/tests/test_points.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_resampling.py": ["/brainnotation/__init__.py"], "/brainnotation/images.py": ["/brainnotation/civet.py"], "/brainnotation/datasets/__init__.py": ["/brainnotation/datasets/atlases.py", "/brainnotation/datasets/annotations.py"], "/brainnotation/resampling.py": ["/brainnotation/__init__.py", "/brainnotation/datasets/__init__.py", "/brainnotation/images.py"], "/brainnotation/tests/test_images.py": ["/brainnotation/__init__.py"], "/brainnotation/nulls/tests/test_spins.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/stats.py": ["/brainnotation/images.py"], "/brainnotation/tests/test_transforms.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_utils.py": ["/brainnotation/__init__.py"], "/brainnotation/datasets/tests/test_annotations.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/datasets/annotations.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/nulls/nulls.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/points.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/tests/test_burt.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/nulls/__init__.py": ["/brainnotation/nulls/nulls.py"], "/brainnotation/nulls/tests/test_nulls.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/datasets/tests/test_utils.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/points.py": ["/brainnotation/images.py"], "/brainnotation/__init__.py": ["/brainnotation/resampling.py", "/brainnotation/stats.py"], "/brainnotation/datasets/tests/test__osf.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/transforms.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/utils.py"], "/brainnotation/datasets/_osf.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/civet.py": ["/brainnotation/points.py"], "/brainnotation/datasets/tests/test_atlases.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/plotting.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/transforms.py"], "/brainnotation/parcellate.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/resampling.py", "/brainnotation/transforms.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/spins.py": ["/brainnotation/images.py", "/brainnotation/points.py"], "/brainnotation/datasets/atlases.py": ["/brainnotation/datasets/utils.py"], "/examples/plot_spatial_nulls.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_stats.py": ["/brainnotation/__init__.py"], "/examples/plot_fetch_datasets.py": ["/brainnotation/__init__.py"]}
|
23,054
|
danjgale/brainnotation
|
refs/heads/main
|
/brainnotation/tests/test_plotting.py
|
# -*- coding: utf-8 -*-
"""
For testing brainnotation.plotting functionality
"""
import pytest
@pytest.mark.xfail
def test_plot_surf_template():
assert False
|
{"/brainnotation/tests/test_points.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_resampling.py": ["/brainnotation/__init__.py"], "/brainnotation/images.py": ["/brainnotation/civet.py"], "/brainnotation/datasets/__init__.py": ["/brainnotation/datasets/atlases.py", "/brainnotation/datasets/annotations.py"], "/brainnotation/resampling.py": ["/brainnotation/__init__.py", "/brainnotation/datasets/__init__.py", "/brainnotation/images.py"], "/brainnotation/tests/test_images.py": ["/brainnotation/__init__.py"], "/brainnotation/nulls/tests/test_spins.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/stats.py": ["/brainnotation/images.py"], "/brainnotation/tests/test_transforms.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_utils.py": ["/brainnotation/__init__.py"], "/brainnotation/datasets/tests/test_annotations.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/datasets/annotations.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/nulls/nulls.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/points.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/tests/test_burt.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/nulls/__init__.py": ["/brainnotation/nulls/nulls.py"], "/brainnotation/nulls/tests/test_nulls.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/datasets/tests/test_utils.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/points.py": ["/brainnotation/images.py"], "/brainnotation/__init__.py": ["/brainnotation/resampling.py", "/brainnotation/stats.py"], "/brainnotation/datasets/tests/test__osf.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/transforms.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/utils.py"], "/brainnotation/datasets/_osf.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/civet.py": ["/brainnotation/points.py"], "/brainnotation/datasets/tests/test_atlases.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/plotting.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/transforms.py"], "/brainnotation/parcellate.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/resampling.py", "/brainnotation/transforms.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/spins.py": ["/brainnotation/images.py", "/brainnotation/points.py"], "/brainnotation/datasets/atlases.py": ["/brainnotation/datasets/utils.py"], "/examples/plot_spatial_nulls.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_stats.py": ["/brainnotation/__init__.py"], "/examples/plot_fetch_datasets.py": ["/brainnotation/__init__.py"]}
|
23,055
|
danjgale/brainnotation
|
refs/heads/main
|
/brainnotation/datasets/tests/test__osf.py
|
# -*- coding: utf-8 -*-
"""
For testing brainnotation.datasets._osf functionality
"""
from pkg_resources import resource_filename
import pytest
from brainnotation.datasets import _osf
@pytest.mark.xfail
def test_parse_filename():
assert False
@pytest.mark.xfail
def test_parse_fname_list():
assert False
def test_parse_json():
osf = resource_filename('brainnotation', 'datasets/data/osf.json')
out = _osf.parse_json(osf)
assert isinstance(out, list) and all(isinstance(i, dict) for i in out)
@pytest.mark.xfail
def test_write_json():
assert False
@pytest.mark.xfail
def test_complete_json():
assert False
@pytest.mark.xfail
def test_check_missing_keys():
assert False
@pytest.mark.xfail
def test_generate_auto_keys():
assert False
@pytest.mark.xfail
def test_clean_minimal_keys():
assert False
@pytest.mark.xfail
def test_get_url():
assert False
@pytest.mark.xfail
def test_generate_release_json():
assert False
|
{"/brainnotation/tests/test_points.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_resampling.py": ["/brainnotation/__init__.py"], "/brainnotation/images.py": ["/brainnotation/civet.py"], "/brainnotation/datasets/__init__.py": ["/brainnotation/datasets/atlases.py", "/brainnotation/datasets/annotations.py"], "/brainnotation/resampling.py": ["/brainnotation/__init__.py", "/brainnotation/datasets/__init__.py", "/brainnotation/images.py"], "/brainnotation/tests/test_images.py": ["/brainnotation/__init__.py"], "/brainnotation/nulls/tests/test_spins.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/stats.py": ["/brainnotation/images.py"], "/brainnotation/tests/test_transforms.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_utils.py": ["/brainnotation/__init__.py"], "/brainnotation/datasets/tests/test_annotations.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/datasets/annotations.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/nulls/nulls.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/points.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/tests/test_burt.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/nulls/__init__.py": ["/brainnotation/nulls/nulls.py"], "/brainnotation/nulls/tests/test_nulls.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/datasets/tests/test_utils.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/points.py": ["/brainnotation/images.py"], "/brainnotation/__init__.py": ["/brainnotation/resampling.py", "/brainnotation/stats.py"], "/brainnotation/datasets/tests/test__osf.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/transforms.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/utils.py"], "/brainnotation/datasets/_osf.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/civet.py": ["/brainnotation/points.py"], "/brainnotation/datasets/tests/test_atlases.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/plotting.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/transforms.py"], "/brainnotation/parcellate.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/resampling.py", "/brainnotation/transforms.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/spins.py": ["/brainnotation/images.py", "/brainnotation/points.py"], "/brainnotation/datasets/atlases.py": ["/brainnotation/datasets/utils.py"], "/examples/plot_spatial_nulls.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_stats.py": ["/brainnotation/__init__.py"], "/examples/plot_fetch_datasets.py": ["/brainnotation/__init__.py"]}
|
23,056
|
danjgale/brainnotation
|
refs/heads/main
|
/brainnotation/transforms.py
|
# -*- coding: utf-8 -*-
"""
Functionality for transforming files between spaces
"""
import os
from pathlib import Path
import nibabel as nib
from nilearn import image as nimage
import numpy as np
from scipy.interpolate import interpn
from brainnotation.datasets import (ALIAS, DENSITIES, fetch_atlas,
fetch_regfusion, get_atlas_dir)
from brainnotation.images import construct_shape_gii, load_gifti, load_nifti
from brainnotation.utils import tmpname, run
METRICRESAMPLE = 'wb_command -metric-resample {metric} {src} {trg} ' \
'ADAP_BARY_AREA {out} -area-metrics {srcarea} {trgarea} ' \
'-current-roi {srcmask}'
LABELRESAMPLE = 'wb_command -label-resample {metric} {src} {trg} ' \
'ADAP_BARY_AREA {out} -area-metrics {srcarea} {trgarea} ' \
'-current-roi {srcmask}'
MASKSURF = 'wb_command -metric-mask {out} {trgmask} {out}'
SURFFMT = 'tpl-{space}{trg}_den-{den}_hemi-{hemi}_sphere.surf.gii'
VAFMT = 'tpl-{space}_den-{den}_hemi-{hemi}_desc-vaavg_midthickness.shape.gii'
MLFMT = 'tpl-{space}_den-{den}_hemi-{hemi}_desc-nomedialwall_dparc.label.gii'
def _regfusion_project(data, ras, affine, method='linear'):
"""
Project `data` to `ras` space using regfusion
Parameters
----------
data : (X, Y, Z[, V]) array_like
Input (volumetric) data to be projected to the surface
ras : (N, 3) array_like
Coordinates of surface points derived from registration fusion
affine (4, 4) array_like
Affine mapping `data` to `ras`-space coordinates
method : {'nearest', 'linear'}, optional
Method for projection. Default: 'linear'
Returns
-------
projected : (N, V) array_like
Input `data` projected to the surface
"""
data, ras, affine = np.asarray(data), np.asarray(ras), np.asarray(affine)
coords = nib.affines.apply_affine(np.linalg.inv(affine), ras)
volgrid = [range(data.shape[i]) for i in range(3)]
if data.ndim == 3:
projected = interpn(volgrid, data, coords, method=method)
elif data.ndim == 4:
projected = np.column_stack([
interpn(volgrid, data[..., n], coords, method=method)
for n in range(data.shape[-1])
])
return construct_shape_gii(projected.squeeze())
def _vol_to_surf(img, space, density, method='linear'):
"""
Projects `img` to the surface defined by `space` and `density`
Parameters
----------
img : niimg_like, str, or os.PathLike
Image to be projected to the surface
den : str
Density of desired output space
space : str
Desired output space
method : {'nearest', 'linear'}, optional
Method for projection. Default: 'linear'
Returns
-------
projected : (2,) tuple-of-nib.GiftiImage
Left [0] and right [1] hemisphere projected `image` data
"""
space = ALIAS.get(space, space)
if space not in DENSITIES:
raise ValueError(f'Invalid space argument: {space}')
if density not in DENSITIES[space]:
raise ValueError(f'Invalid density for {space} space: {density}')
if method not in ('nearest', 'linear'):
raise ValueError('Invalid method argument: {method}')
img = load_nifti(img)
out = ()
for ras in fetch_regfusion(space)[density]:
out += (_regfusion_project(img.get_fdata(), np.loadtxt(ras),
img.affine, method=method),)
return out
def mni152_to_civet(img, civet_density='41k', method='linear'):
"""
Projects `img` in MNI152 space to CIVET surface
Parameters
----------
img : str or os.PathLike or niimg_like
Image in MNI152 space to be projected
civet_density : {'41k'}, optional
Desired output density of CIVET surface. Default: '41k'
method : {'nearest', 'linear'}, optional
Method for projection. Specify 'nearest' if `img` is a label image.
Default: 'linear'
Returns
-------
civet : (2,) tuple-of-nib.GiftiImage
Projected `img` on CIVET surface
"""
if civet_density == '164k':
raise NotImplementedError('Cannot perform registration fusion to '
'CIVET 164k space yet.')
return _vol_to_surf(img, 'civet', civet_density, method)
def mni152_to_fsaverage(img, fsavg_density='41k', method='linear'):
"""
Projects `img` in MNI152 space to fsaverage surface
Parameters
----------
img : str or os.PathLike or niimg_like
Image in MNI152 space to be projected
fsavg_density : {'3k', '10k', '41k', '164k'}, optional
Desired output density of fsaverage surface. Default: '41k'
method : {'nearest', 'linear'}, optional
Method for projection. Specify 'nearest' if `img` is a label image.
Default: 'linear'
Returns
-------
fsaverage : (2,) tuple-of-nib.GiftiImage
Projected `img` on fsaverage surface
"""
return _vol_to_surf(img, 'fsaverage', fsavg_density, method)
def mni152_to_fslr(img, fslr_density='32k', method='linear'):
"""
Projects `img` in MNI152 space to fsLR surface
Parameters
----------
img : str or os.PathLike or niimg_like
Image in MNI152 space to be projected
fslr_density : {'32k', '164k'}, optional
Desired output density of fsLR surface. Default: '32k'
method : {'nearest', 'linear'}, optional
Method for projection. Specify 'nearest' if `img` is a label image.
Default: 'linear'
Returns
-------
fsLR : (2,) tuple-of-nib.GiftiImage
Projected `img` on fsLR surface
"""
if fslr_density in ('4k', '8k'):
raise NotImplementedError('Cannot perform registration fusion to '
f'fsLR {fslr_density} space yet.')
return _vol_to_surf(img, 'fsLR', fslr_density, method)
def mni152_to_mni152(img, target='1mm', method='linear'):
"""
Resamples `img` to `target` image (if supplied) or target `resolution`
Parameters
----------
img : str or os.PathLike or niimg_like
Image in MNI152 space to be resampled
target : {str, os.PathLike, niimg_like} or {'1mm', '2mm', '3mm'}, optional
Image in MNI152 space to which `img` should be resampled. Can
alternatively specify desired resolution of output resample image.
Default: None
method : {'nearest', 'linear'}, optional
Method for resampling. Specify 'nearest' if `img` is a label image.
Default: 'linear'
Returns
-------
resampled : nib.Nifti1Image
Resampled input `img`
"""
if target not in DENSITIES['MNI152']:
out = nimage.resample_to_img(img, target, interpolation=method)
else:
res = int(target[0])
out = nimage.resample_img(img, np.eye(3) * res, interpolation=method)
return out
def _check_hemi(data, hemi):
""" Utility to check that `data` and `hemi` jibe
Parameters
----------
data : str or os.PathLike or tuple
Input data
hemi : str
Hemisphere(s) corresponding to `data
Returns
-------
zipped : zip
Zipped instance of `data` and `hemi`
"""
if isinstance(data, (str, os.PathLike)) or not hasattr(data, '__len__'):
data = (data,)
if len(data) == 1 and hemi is None:
raise ValueError('Must specify `hemi` when only 1 data file supplied')
if hemi is not None and isinstance(hemi, str) and hemi not in ('L', 'R'):
raise ValueError(f'Invalid hemisphere designation: {hemi}')
elif hemi is not None and isinstance(hemi, str):
hemi = (hemi,)
elif hemi is not None and any(h not in ('L', 'R') for h in hemi):
raise ValueError(f'Invalid hemisphere designations: {hemi}')
else:
hemi = ('L', 'R')
return zip(data, hemi)
def _surf_to_surf(data, srcparams, trgparams, method='linear', hemi=None):
"""
Resamples surface `data` to another surface
Parameters
----------
data : str or os.Pathlike or tuple
Filepath(s) to data. If not a tuple then `hemi` must be specified. If
a tuple then it is assumed that files are ('left', 'right')
srcparams, trgparams : dict
Dictionary with keys ['space', 'den', 'trg']
method : {'nearest', 'linear'}, optional
Method for resampling. Default: 'linear'
hemi : str or None
Hemisphere of `data` if `data` is a single image. Default: None
Returns
-------
resampled : tuple-of-nib.GiftiImage
Input `data` resampled to new surface
"""
methods = ('nearest', 'linear')
if method not in methods:
raise ValueError(f'Invalid method: {method}. Must be one of {methods}')
keys = ('space', 'den', 'trg')
for key in keys:
if key not in srcparams:
raise KeyError(f'srcparams missing key: {key}')
if key not in trgparams:
raise KeyError(f'trgparams missing key: {key}')
for val in (srcparams, trgparams):
space, den = val['space'], val['den']
if den not in DENSITIES[space]:
raise ValueError(f'Invalid density for {space} space: {den}')
# if our source and target are identical just return the loaded data
if srcparams == trgparams:
data, _ = zip(*_check_hemi(data, hemi))
return tuple(load_gifti(d) for d in data)
# get required atlas / templates for transforming between spaces
for atl in (srcparams, trgparams):
fetch_atlas(atl['space'], atl['den'])
srcdir = get_atlas_dir(srcparams['space'])
trgdir = get_atlas_dir(trgparams['space'])
resampled = ()
func = METRICRESAMPLE if method == 'linear' else LABELRESAMPLE
for img, hemi in _check_hemi(data, hemi):
srcparams['hemi'] = trgparams['hemi'] = hemi
try:
img = Path(img).resolve()
tmpimg = None
except TypeError:
tmpimg = tmpname(suffix='.gii')
nib.save(img, tmpimg)
img = Path(tmpimg).resolve()
params = dict(
metric=img,
out=tmpname('.func.gii'),
src=srcdir / SURFFMT.format(**srcparams),
trg=trgdir / SURFFMT.format(**trgparams),
srcarea=srcdir / VAFMT.format(**srcparams),
trgarea=trgdir / VAFMT.format(**trgparams),
srcmask=srcdir / MLFMT.format(**srcparams),
trgmask=trgdir / MLFMT.format(**trgparams)
)
for fn in (func, MASKSURF):
run(fn.format(**params), quiet=True)
resampled += (construct_shape_gii(
load_gifti(params['out']).agg_data()
),)
params['out'].unlink()
if tmpimg is not None:
tmpimg.unlink()
return resampled
def civet_to_fslr(data, density, fslr_density='32k', hemi=None,
method='linear'):
"""
Resamples `data` on CIVET surface to the fsLR surface
Parameters
----------
data : str or os.PathLike or nib.GiftiImage or tuple
Input CIVET data to be resampled to fsLR surface
density : {'41k', '164k'}
Resolution of provided `data`
fslr_density : {'4k', '8k', '32k', '164k'}, optional
Desired density of output fsLR surface. Default: '32k'
hemi : {'L', 'R'}, optional
If `data` is not a tuple this specifies the hemisphere the data are
representing. Default: None
method : {'nearest', 'linear'}, optional
Method for resampling. Specify 'nearest' if `data` are label images.
Default: 'linear'
Returns
-------
resampled : tuple-of-nib.GiftiImage
Input `data` resampled to new surface
"""
srcparams = dict(space='civet', den=density, trg='_space-fsLR')
trgparams = dict(space='fsLR', den=fslr_density, trg='')
return _surf_to_surf(data, srcparams, trgparams, method, hemi)
def fslr_to_civet(data, density, civet_density='41k', hemi=None,
method='linear'):
"""
Resamples `data` on fsLR surface to the CIVET surface
Parameters
----------
data : str or os.PathLike or nib.GiftiImage or tuple
Input fsLR data to be resampled to CIVET surface
density : {'4k', '8k', '32k', '164k'}
Resolution of provided `data`
civet_density : {'41k', '164k'}, optional
Desired density of output CIVET surface. Default: '41k'
hemi : {'L', 'R'}, optional
If `data` is not a tuple this specifies the hemisphere the data are
representing. Default: None
method : {'nearest', 'linear'}, optional
Method for resampling. Specify 'nearest' if `data` are label images.
Default: 'linear'
Returns
-------
resampled : tuple-of-nib.GiftiImage
Input `data` resampled to new surface
"""
srcparams = dict(space='fsLR', den=density, trg='')
trgparams = dict(space='civet', den=civet_density, trg='_space-fsLR')
return _surf_to_surf(data, srcparams, trgparams, method, hemi)
def civet_to_fsaverage(data, density, fsavg_density='41k', hemi=None,
method='linear'):
"""
Resamples `data` on CIVET surface to the fsaverage surface
Parameters
----------
data : str or os.PathLike or nib.GiftiImage or tuple
Input CIVET data to be resampled to fsaverage surface
density : {'41k', '164k'}
Resolution of provided `data`
fsavg_density : {'3k', '10k', '41k', '164k'}, optional
Desired density of output fsaverage surface. Default: '32k'
hemi : {'L', 'R'}, optional
If `data` is not a tuple this specifies the hemisphere the data are
representing. Default: None
method : {'nearest', 'linear'}, optional
Method for resampling. Specify 'nearest' if `data` are label images.
Default: 'linear'
Returns
-------
resampled : tuple-of-nib.GiftiImage
Input `data` resampled to new surface
"""
srcparams = dict(space='civet', den=density, trg='_space-fsaverage')
trgparams = dict(space='fsaverage', den=fsavg_density, trg='')
return _surf_to_surf(data, srcparams, trgparams, method, hemi)
def fsaverage_to_civet(data, density, civet_density='41k', hemi=None,
method='linear'):
"""
Resamples `data` on fsaverage surface to the CIVET surface
Parameters
----------
data : str or os.PathLike or nib.GiftiImage or tuple
Input fsaverage data to be resampled to CIVET surface
density : {'3k', '10k', '41k', '164k'}
Resolution of provided `data`
civet_density : {'41k', '164k'}, optional
Desired density of output CIVET surface. Default: '41k'
hemi : {'L', 'R'}, optional
If `data` is not a tuple this specifies the hemisphere the data are
representing. Default: None
method : {'nearest', 'linear'}, optional
Method for resampling. Specify 'nearest' if `data` are label images.
Default: 'linear'
Returns
-------
resampled : tuple-of-nib.GiftiImage
Input `data` resampled to new surface
"""
srcparams = dict(space='fsaverage', den=density, trg='')
trgparams = dict(space='civet', den=civet_density, trg='_space-fsaverage')
return _surf_to_surf(data, srcparams, trgparams, method, hemi)
def fslr_to_fsaverage(data, density, fsavg_density='41k', hemi=None,
method='linear'):
"""
Resamples `data` on fsLR surface to the fsaverage surface
Parameters
----------
data : str or os.PathLike or nib.GiftiImage or tuple
Input fsLR data to be resampled to fsaverage surface
density : {'4k', '8k', '32k', '164k'}
Resolution of provided `data`
fsavg_density : {'3k', '10k', '41k', '164k'}, optional
Desired density of output fsaverage surface. Default: '41k'
hemi : {'L', 'R'}, optional
If `data` is not a tuple this specifies the hemisphere the data are
representing. Default: None
method : {'nearest', 'linear'}, optional
Method for resampling. Specify 'nearest' if `data` are label images.
Default: 'linear'
Returns
-------
resampled : tuple-of-nib.GiftiImage
Input `data` resampled to new surface
"""
srcparams = dict(space='fsLR', den=density, trg='_space-fsaverage')
trgparams = dict(space='fsaverage', den=fsavg_density, trg='')
return _surf_to_surf(data, srcparams, trgparams, method, hemi)
def fsaverage_to_fslr(data, density, fslr_density='32k', hemi=None,
method='linear'):
"""
Resamples `data` on fsaverage surface to the fsLR surface
Parameters
----------
data : str or os.PathLike or nib.GiftiImage or tuple
Input fsaverage data to be resampled to fsLR surface
density : {'3k', '10k', '41k', '164k'}
Resolution of provided `data`
fslr_density : {'4k', '8k', '32k', '164k'}, optional
Desired density of output fsLR surface. Default: '32k'
hemi : {'L', 'R'}, optional
If `data` is not a tuple this specifies the hemisphere the data are
representing. Default: None
method : {'nearest', 'linear'}, optional
Method for resampling. Specify 'nearest' if `data` are label images.
Default: 'linear'
Returns
-------
resampled : tuple-of-nib.GiftiImage
Input `data` resampled to new surface
"""
srcparams = dict(space='fsaverage', den=density, trg='')
trgparams = dict(space='fsLR', den=fslr_density, trg='_space-fsaverage')
return _surf_to_surf(data, srcparams, trgparams, method, hemi)
def civet_to_civet(data, density, civet_density='41k', hemi=None,
method='linear'):
"""
Resamples `data` on CIVET surface to new density
Parameters
----------
data : str or os.PathLike or nib.GiftiImage or tuple
Input CIVET data to be resampled
density : {'41k', '164k'}
Resolution of provided `data`
civet_density : {'41k', '164k'}, optional
Desired density of output surface. Default: '41k'
hemi : {'L', 'R'}, optional
If `data` is not a tuple this specifies the hemisphere the data are
representing. Default: None
method : {'nearest', 'linear'}, optional
Method for resampling. Specify 'nearest' if `data` are label images.
Default: 'linear'
Returns
-------
resampled : tuple-of-nib.GiftiImage
Input `data` resampled to new surface
"""
srcparams = dict(space='civet', den=density, trg='')
trgparams = dict(space='civet', den=civet_density, trg='')
return _surf_to_surf(data, srcparams, trgparams, method, hemi)
def fslr_to_fslr(data, density, fslr_density='32k', hemi=None,
method='linear'):
"""
Resamples `data` on fsLR surface to new density
Parameters
----------
data : str or os.PathLike or nib.GiftiImage or tuple
Input fsLR data to be resampled
density : {'4k', '8k', '32k', '164k'}
Resolution of provided `data`
fslr_density : {'4k', '8k', '32k', '164k'}, optional
Desired density of output surface. Default: '32k'
hemi : {'L', 'R'}, optional
If `data` is not a tuple this specifies the hemisphere the data are
representing. Default: None
method : {'nearest', 'linear'}, optional
Method for resampling. Specify 'nearest' if `data` are label images.
Default: 'linear'
Returns
-------
resampled : tuple-of-nib.GiftiImage
Input `data` resampled to new density
"""
srcparams = dict(space='fsLR', den=density, trg='')
trgparams = dict(space='fsLR', den=fslr_density, trg='')
return _surf_to_surf(data, srcparams, trgparams, method, hemi)
def fsaverage_to_fsaverage(data, density, fsavg_density='41k', hemi=None,
method='linear'):
"""
Resamples `data` on fsaverage surface to new density
Parameters
----------
data : str or os.PathLike or nib.GiftiImage or tuple
Input fsaverage data to be resampled
density : {'3k', '10k', '41k', '164k'}
Resolution of provided `data`
fsavg_density : {'3k', '10k', '41k', '164k'}, optional
Desired density of output surface. Default: '41k'
hemi : {'L', 'R'}, optional
If `data` is not a tuple this specifies the hemisphere the data are
representing. Default: None
method : {'nearest', 'linear'}, optional
Method for resampling. Specify 'nearest' if `data` are label images.
Default: 'linear'
Returns
-------
resampled : tuple-of-nib.GiftiImage
Input `data` resampled to new density
"""
srcparams = dict(space='fsaverage', den=density, trg='')
trgparams = dict(space='fsaverage', den=fsavg_density, trg='')
return _surf_to_surf(data, srcparams, trgparams, method, hemi)
|
{"/brainnotation/tests/test_points.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_resampling.py": ["/brainnotation/__init__.py"], "/brainnotation/images.py": ["/brainnotation/civet.py"], "/brainnotation/datasets/__init__.py": ["/brainnotation/datasets/atlases.py", "/brainnotation/datasets/annotations.py"], "/brainnotation/resampling.py": ["/brainnotation/__init__.py", "/brainnotation/datasets/__init__.py", "/brainnotation/images.py"], "/brainnotation/tests/test_images.py": ["/brainnotation/__init__.py"], "/brainnotation/nulls/tests/test_spins.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/stats.py": ["/brainnotation/images.py"], "/brainnotation/tests/test_transforms.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_utils.py": ["/brainnotation/__init__.py"], "/brainnotation/datasets/tests/test_annotations.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/datasets/annotations.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/nulls/nulls.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/points.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/tests/test_burt.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/nulls/__init__.py": ["/brainnotation/nulls/nulls.py"], "/brainnotation/nulls/tests/test_nulls.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/datasets/tests/test_utils.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/points.py": ["/brainnotation/images.py"], "/brainnotation/__init__.py": ["/brainnotation/resampling.py", "/brainnotation/stats.py"], "/brainnotation/datasets/tests/test__osf.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/transforms.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/utils.py"], "/brainnotation/datasets/_osf.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/civet.py": ["/brainnotation/points.py"], "/brainnotation/datasets/tests/test_atlases.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/plotting.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/transforms.py"], "/brainnotation/parcellate.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/resampling.py", "/brainnotation/transforms.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/spins.py": ["/brainnotation/images.py", "/brainnotation/points.py"], "/brainnotation/datasets/atlases.py": ["/brainnotation/datasets/utils.py"], "/examples/plot_spatial_nulls.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_stats.py": ["/brainnotation/__init__.py"], "/examples/plot_fetch_datasets.py": ["/brainnotation/__init__.py"]}
|
23,057
|
danjgale/brainnotation
|
refs/heads/main
|
/brainnotation/datasets/_osf.py
|
# -*- coding: utf-8 -*-
"""
Functions for working with data/osf.json file
"""
import os
from pkg_resources import resource_filename
import json
from nilearn.datasets.utils import _md5_sum_file
from brainnotation.datasets.utils import _get_session
# uniquely identify each item ('hemi' can be None)
FNAME_KEYS = ['source', 'desc', 'space', 'den', 'res', 'hemi']
# auto-generated (checksum can be None if file doest not exist)
AUTO_KEYS = ['format', 'fname', 'rel_path', 'checksum']
# required keys but values are all optional
COND_KEYS = ['title', 'tags', 'redir', 'url']
# minimal keys for each item
MINIMAL_KEYS = FNAME_KEYS + AUTO_KEYS + COND_KEYS
# keys for redirection
REDIR_KEYS = ['space', 'den']
# keys for more metadata (unique for each source)
INFO_KEYS = ['source', 'refs', 'comments', 'demographics']
# distribution JSON
OSFJSON = resource_filename('brainnotation', 'data/osf.json')
def parse_filename(fname, return_ext=True, verbose=False):
"""
Parses `fname` (in BIDS-inspired format) and returns dictionary
Parameters
----------
fname : str os os.PathLike
Filename to parse
return_ext : bool, optional
Whether to return extension of `fname` in addition to key-value dict.
Default: False
verbose : bool, optional
Whether to print status messages. Default: False
Returns
-------
info : dict
Key-value pairs extracted from `fname`
ext : str
Extension of `fname`, only returned if `return_ext=True`
"""
try:
base, *ext = fname.split('.')
fname_dict = dict([
pair.split('-') for pair in base.split('_') if pair != 'feature'
])
except ValueError:
print('Wrong filename format!')
return
if verbose:
print(fname_dict)
if return_ext:
return fname_dict, '.'.join(ext)
return fname_dict
def parse_fname_list(fname, verbose=False):
"""
Reads in list of BIDS-inspired filenames from `fname` and parses keys
Parameters
----------
fname : str os os.PathLike
verbose : bool, optional
Whether to print status messages. Default: False
Returns
-------
data : list-of-dict
Information about filenames in `fname`
"""
with open(fname, 'r', encoding='utf-8') as src:
fname_list = [name.strip() for name in src.readlines()]
data = [
parse_filename(name, return_ext=False, verbose=verbose)
for name in fname_list
]
if verbose:
print(fname_list)
return data
def parse_json(fname, root='annotations'):
"""
Loads JSON from `fname` and returns value of `root` key(s)
Parameters
----------
fname : str or os.PathLike
Filepath to JSON file
root : str or list-of-str, optional
Root key(s) to query JSON file. Default: 'annotations'
Returns
-------
data : dict
Data from `fname` JSON file
"""
if isinstance(root, str):
root = [root]
with open(fname, 'r', encoding='utf-8') as src:
data = json.load(src)
for key in root:
data = data[key]
return data
def write_json(data, fname, root='annotations', indent=4):
"""
Saves `data` to `fname` JSON
Parameters
----------
data : JSON-compatible format
Data to save to `fname`
fname : str or os.PathLike
Path to filename where `data` should be saved as JSON
root : str, optional
Key to save `data` in `fname`. Default: 'annotations'
indent : int, optional
Indentation of JSON file. Default: 4
Returns
-------
fname : str
Path to saved file
"""
if not isinstance(root, str):
raise ValueError(f'Provided `root` must be a str. Received: {root}')
# if `fname` already exists we want to update it, not overwrite it!
if os.path.isfile(fname):
output = parse_json(fname, [])
output[root] = data
# save to disk
with open(fname, 'w', encoding='utf-8') as dest:
json.dump(output, dest, indent=indent)
return fname
def complete_json(input_data, ref_keys='minimal', input_root=None,
output_fname=None, output_root=None):
"""
Parameters
----------
input_data : str or os.PathLike or list-of-dict
Filepath to JSON with data or list of dictionaries with information
about annotations
ref_keys : {'minimal', 'info'}, optional
Which reference keys to check in `input_data`. Default: 'minimal'
input_root : str, optional
If `input_data` is a filename the key in the file containing data about
annotations. If not specified will be based on provided `ref_keys`.
Default: None
output_fname : str or os.PathLike, optional
Filepath where complete JSON should be saved. If not specified the
data are not saved to disk. Default: None
output_root : str, optional
If `output_fname` is not None, the key in the saved JSON where
completed information should be stored. If not specified will be based
on `input_root`. Default: None
Returns
-------
output : list-of-dict
Information about annotations from `input_data`
"""
valid_keys = ['minimal', 'info']
if ref_keys not in valid_keys:
raise ValueError(f'Invalid ref_keys: {ref_keys}. Must be one of '
f'{valid_keys}')
# this is to add missing fields to existing data
# could accept data dict list or filename as input
# set minimal vs info
if ref_keys == 'minimal':
ref_keys = MINIMAL_KEYS
if input_root is None:
input_root = 'annotations'
elif ref_keys == 'info':
ref_keys = INFO_KEYS
if input_root is None:
input_root = 'info'
# check input
if not isinstance(input_data, list):
input_data = parse_json(input_data, root=input_root)
# make output
output = []
for item in input_data:
output.append({
key: (item[key] if key in item else None)
for key in ref_keys
})
# write output
if output_fname is not None:
if output_root is None:
output_root = input_root
write_json(output, output_fname, root=output_root)
return output
def check_missing_keys(fname, root='annotations'):
"""
Checks whether data in `fname` JSON are missing required keys
Required keys are specified in ``brainnotation.datasets._osf.MINIMAL_KEYS``
Parameters
----------
fname : str or os.PathLike
Filepath to JSON file to check
root : str or list-of-str, optional
Root key(s) to query JSON file. Default: 'annotations'
Returns
-------
info : list of list-of-str
Missing keys for each entry in `fname`
"""
data = parse_json(fname, root=root)
is_missing_keys, info = False, []
for item in data:
missing = sorted(set(MINIMAL_KEYS) - set(item))
if len(missing) > 0:
is_missing_keys = True
info.append(missing)
if is_missing_keys:
raise KeyError('Data in provided `fname` are missing some keys. '
'Please use `brainnotation.datasets._osf.complete_json`'
' to fill missing keys')
return info
def generate_auto_keys(item):
"""
Adds automatically-generated keys to `item`
Generated keys include: ['format', 'fname', 'rel_path', 'checksum']
Parameters
----------
item : dict
Information about annotation
Returns
-------
item : dict
Updated information about annotation
"""
item = item.copy()
pref = 'source-{source}_desc-{desc}_space-{space}'
surffmt = pref + '_den-{den}_hemi-{hemi}_feature.func.gii'
volfmt = pref = '_res-{res}_feature.nii.gz'
# check format by checking 'hemi'
is_surface = item['den'] or item['hemi'] or item['format'] == 'surface'
is_volume = item['res'] or item['format'] == 'volume'
if is_surface: # this is surface file
item['format'] = 'surface'
item['fname'] = surffmt.format(**item)
elif is_volume: # this is volume file
item['format'] = 'volume'
item['fname'] = volfmt.format(**item)
else:
print('Missing keys to determine surface/volumetric format of data; '
'fname keys not generated')
item['rel_path'] = os.path.join(*[
item[key] for key in ['source', 'desc', 'space']
])
# check file existence
filepath = os.path.join(item['rel_path'], item['fname'])
if item['fname'] is not None and os.path.isfile(filepath):
item['checksum'] = _md5_sum_file(filepath)
return item
def clean_minimal_keys(item):
"""
Removes incompatible keys from `item` based on `item['format']`
Parameters
----------
item : dict
Information about annotation
Returns
-------
item : dict
Updated information about annotation
"""
keys = {'surface': ['res'], 'volume': ['den', 'hemi']}
fmt = item.get('format')
if fmt is None:
print('Invalid value for format key; setting to "null"')
item['format'] = None
return
for key in keys.get(fmt, []):
item.pop(key, None)
return item
def get_url(fname, project, token=None):
"""
Gets OSF API URL path for `fname` in `project`
Parameters
----------
fname : str
Filepath as it exists on OSF
project : str
Project ID on OSF
token : str, optional
OSF personal access token for accessing restricted annotations. Will
also check the environmental variable 'BRAINNOTATION_OSF_TOKEN' if not
provided; if that is not set no token will be provided and restricted
annotations will be inaccessible. Default: None
Returns
-------
path : str
Path to `fname` on OSF project `project`
"""
url = f'https://files.osf.io/v1/resources/{project}/providers/osfstorage/'
session = _get_session(token=token)
path = ''
for pathpart in fname.strip('/').split('/'):
out = session.get(url + path)
out.raise_for_status()
for item in out.json()['data']:
if item['attributes']['name'] == pathpart:
break
path = item['attributes']['path'][1:]
return path
def generate_release_json(fname, output=OSFJSON, root='annotations',
project=None, token=None):
"""
Generates distribution-ready JSON file for fetching annotation data
Parameters
----------
fname : str or os.PathLike
Path to filename where manually-edited JSON information is stored
output : str or os.PathLike
Path to filename where output JSON should be saved
root : str, optional
Key in `fname` where relevant data are stored. Default: 'annotations'
project : str, optional
Project ID on OSF where data files are stored. If not specified then
the URL for the generated data will not be set. Default: None
token : str, optional
OSF personal access token for accessing restricted annotations. Will
also check the environmental variable 'BRAINNOTATION_OSF_TOKEN' if not
provided; if that is not set no token will be provided and restricted
annotations will be inaccessible. Default: None
Returns
-------
output : str
Path to filename where output JSON was saved
"""
output = []
for item in parse_json(fname, root=root):
item = clean_minimal_keys(generate_auto_keys(item))
# fetch URL for file if needed (and project is specified)
if (item.get('fname') is not None and item.get('url') is None
and project is not None):
fn = os.path.join(item['rel_path'], item['fname'])
item['url'] = [project, get_url(fn, project=project, token=token)]
output.append({key: item[key] for key in MINIMAL_KEYS if key in item})
fname = write_json(output, output, root='annotations')
return fname
|
{"/brainnotation/tests/test_points.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_resampling.py": ["/brainnotation/__init__.py"], "/brainnotation/images.py": ["/brainnotation/civet.py"], "/brainnotation/datasets/__init__.py": ["/brainnotation/datasets/atlases.py", "/brainnotation/datasets/annotations.py"], "/brainnotation/resampling.py": ["/brainnotation/__init__.py", "/brainnotation/datasets/__init__.py", "/brainnotation/images.py"], "/brainnotation/tests/test_images.py": ["/brainnotation/__init__.py"], "/brainnotation/nulls/tests/test_spins.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/stats.py": ["/brainnotation/images.py"], "/brainnotation/tests/test_transforms.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_utils.py": ["/brainnotation/__init__.py"], "/brainnotation/datasets/tests/test_annotations.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/datasets/annotations.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/nulls/nulls.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/points.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/tests/test_burt.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/nulls/__init__.py": ["/brainnotation/nulls/nulls.py"], "/brainnotation/nulls/tests/test_nulls.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/datasets/tests/test_utils.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/points.py": ["/brainnotation/images.py"], "/brainnotation/__init__.py": ["/brainnotation/resampling.py", "/brainnotation/stats.py"], "/brainnotation/datasets/tests/test__osf.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/transforms.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/utils.py"], "/brainnotation/datasets/_osf.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/civet.py": ["/brainnotation/points.py"], "/brainnotation/datasets/tests/test_atlases.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/plotting.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/transforms.py"], "/brainnotation/parcellate.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/resampling.py", "/brainnotation/transforms.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/spins.py": ["/brainnotation/images.py", "/brainnotation/points.py"], "/brainnotation/datasets/atlases.py": ["/brainnotation/datasets/utils.py"], "/examples/plot_spatial_nulls.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_stats.py": ["/brainnotation/__init__.py"], "/examples/plot_fetch_datasets.py": ["/brainnotation/__init__.py"]}
|
23,058
|
danjgale/brainnotation
|
refs/heads/main
|
/brainnotation/tests/test_caret.py
|
# -*- coding: utf-8 -*-
"""
For testing brainnotation.caret functionality
"""
import pytest
@pytest.mark.xfail
def test_read_surface_shape():
assert False
@pytest.mark.xfail
def test_read_coords():
assert False
@pytest.mark.xfail
def test_read_topo():
assert False
@pytest.mark.xfail
def test_read_deform_map():
assert False
@pytest.mark.xfail
def test_apply_deform_map():
assert False
|
{"/brainnotation/tests/test_points.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_resampling.py": ["/brainnotation/__init__.py"], "/brainnotation/images.py": ["/brainnotation/civet.py"], "/brainnotation/datasets/__init__.py": ["/brainnotation/datasets/atlases.py", "/brainnotation/datasets/annotations.py"], "/brainnotation/resampling.py": ["/brainnotation/__init__.py", "/brainnotation/datasets/__init__.py", "/brainnotation/images.py"], "/brainnotation/tests/test_images.py": ["/brainnotation/__init__.py"], "/brainnotation/nulls/tests/test_spins.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/stats.py": ["/brainnotation/images.py"], "/brainnotation/tests/test_transforms.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_utils.py": ["/brainnotation/__init__.py"], "/brainnotation/datasets/tests/test_annotations.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/datasets/annotations.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/nulls/nulls.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/points.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/tests/test_burt.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/nulls/__init__.py": ["/brainnotation/nulls/nulls.py"], "/brainnotation/nulls/tests/test_nulls.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/datasets/tests/test_utils.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/points.py": ["/brainnotation/images.py"], "/brainnotation/__init__.py": ["/brainnotation/resampling.py", "/brainnotation/stats.py"], "/brainnotation/datasets/tests/test__osf.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/transforms.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/utils.py"], "/brainnotation/datasets/_osf.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/civet.py": ["/brainnotation/points.py"], "/brainnotation/datasets/tests/test_atlases.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/plotting.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/transforms.py"], "/brainnotation/parcellate.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/resampling.py", "/brainnotation/transforms.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/spins.py": ["/brainnotation/images.py", "/brainnotation/points.py"], "/brainnotation/datasets/atlases.py": ["/brainnotation/datasets/utils.py"], "/examples/plot_spatial_nulls.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_stats.py": ["/brainnotation/__init__.py"], "/examples/plot_fetch_datasets.py": ["/brainnotation/__init__.py"]}
|
23,059
|
danjgale/brainnotation
|
refs/heads/main
|
/brainnotation/civet.py
|
# -*- coding: utf-8 -*-
"""
Functions for working with CIVET data
"""
import os
import numpy as np
from brainnotation.points import get_shared_triangles, which_triangle
def read_civet_surf(fname):
"""
Reads a CIVET-style .obj geometry file
Parameters
----------
fname : str or os.PathLike
Filepath to .obj file
Returns
-------
vertices : (N, 3)
Vertices of surface mesh
triangles : (T, 3)
Triangles comprising surface mesh
"""
k, polygons = 0, []
with open(fname, 'r') as src:
n_vert = int(src.readline().split()[6])
vertices = np.zeros((n_vert, 3))
for i, line in enumerate(src):
if i < n_vert:
vertices[i] = [float(i) for i in line.split()]
elif i >= (2 * n_vert) + 5:
if not line.strip():
k = 1
elif k == 1:
polygons.extend([int(i) for i in line.split()])
triangles = np.reshape(np.asarray(polygons), (-1, 3))
return vertices, triangles
def read_surfmap(surfmap):
"""
Reads surface map from CIVET
Parameters
----------
surfmap : str or os.PathLike
Surface mapping file to be loaded
Returns
-------
control : (N,) array_like
Control vertex IDs
v0, v1 : (N,) array_like
Target vertex IDs
t : (N, 3) array_like
Resampling weights
"""
control, v0, v1, t1, t2 = np.loadtxt(surfmap, skiprows=4).T
control = control.astype(int)
v0 = v0.astype(int)
v1 = v1.astype(int)
t0 = 1 - t1 - t2
return control, v0, v1, np.column_stack((t0, t1, t2))
def resample_surface_map(source, morph, target, surfmap):
"""
Resamples `morph` data defined on `source` surface to `target` surface
Uses `surfmap` to define mapping
Inputs
------
source : str or os.PathLike
Path to surface file on which `morph` is defined
morph : str or os.PathLike
Path to morphology data defined on `source` surface
target : str or os.PathLike
Path to surface file on which to resample `morph` data
surfmap : str or os.PathLike
Path to surface mapping file defining transformation (CIVET style)
Returns
-------
resampled : np.ndarray
Provided `morph` data resampled to `target` surface
"""
if isinstance(source, (str, os.PathLike)):
source = read_civet_surf(source)
if isinstance(morph, (str, os.PathLike)):
morph = np.loadtxt(morph)
if len(morph) != len(source[0]):
raise ValueError('Provided `morph` file has different number of '
'vertices from provided `source` surface')
if isinstance(target, (str, os.PathLike)):
target = read_civet_surf(target)
if isinstance(surfmap, (str, os.PathLike)):
surfmap = read_surfmap(surfmap)
if len(surfmap[0]) != len(target[0]):
raise ValueError('Provided `target` surface has different number of '
'vertices from provided `surfmap` transformation.')
source_tris = get_shared_triangles(source[1])
resampled = np.zeros_like(morph)
for (control, v0, v1, t) in zip(*surfmap):
tris = source_tris[(v0, v1) if v0 < v1 else (v1, v0)]
point, verts = target[0][control], source[0][tris]
idx = which_triangle(point, verts)
if idx is None:
idx = np.argmin(np.linalg.norm(point - verts[:, -1], axis=1))
resampled[control] = np.sum(morph[[v0, v1, tris[idx][-1]]] * t)
return resampled
|
{"/brainnotation/tests/test_points.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_resampling.py": ["/brainnotation/__init__.py"], "/brainnotation/images.py": ["/brainnotation/civet.py"], "/brainnotation/datasets/__init__.py": ["/brainnotation/datasets/atlases.py", "/brainnotation/datasets/annotations.py"], "/brainnotation/resampling.py": ["/brainnotation/__init__.py", "/brainnotation/datasets/__init__.py", "/brainnotation/images.py"], "/brainnotation/tests/test_images.py": ["/brainnotation/__init__.py"], "/brainnotation/nulls/tests/test_spins.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/stats.py": ["/brainnotation/images.py"], "/brainnotation/tests/test_transforms.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_utils.py": ["/brainnotation/__init__.py"], "/brainnotation/datasets/tests/test_annotations.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/datasets/annotations.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/nulls/nulls.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/points.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/tests/test_burt.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/nulls/__init__.py": ["/brainnotation/nulls/nulls.py"], "/brainnotation/nulls/tests/test_nulls.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/datasets/tests/test_utils.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/points.py": ["/brainnotation/images.py"], "/brainnotation/__init__.py": ["/brainnotation/resampling.py", "/brainnotation/stats.py"], "/brainnotation/datasets/tests/test__osf.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/transforms.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/utils.py"], "/brainnotation/datasets/_osf.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/civet.py": ["/brainnotation/points.py"], "/brainnotation/datasets/tests/test_atlases.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/plotting.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/transforms.py"], "/brainnotation/parcellate.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/resampling.py", "/brainnotation/transforms.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/spins.py": ["/brainnotation/images.py", "/brainnotation/points.py"], "/brainnotation/datasets/atlases.py": ["/brainnotation/datasets/utils.py"], "/examples/plot_spatial_nulls.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_stats.py": ["/brainnotation/__init__.py"], "/examples/plot_fetch_datasets.py": ["/brainnotation/__init__.py"]}
|
23,060
|
danjgale/brainnotation
|
refs/heads/main
|
/brainnotation/datasets/tests/test_atlases.py
|
# -*- coding: utf-8 -*-
"""
For testing brainnotation.datasets.atlases functionality
"""
import pytest
from brainnotation.datasets import atlases
@pytest.mark.parametrize('atlas, expected', [
('fslr', 'fsLR'), ('fsLR', 'fsLR'), ('fsavg', 'fsaverage'),
('fsaverage', 'fsaverage'), ('CIVET', 'civet'), ('civet', 'civet'),
('mni152', 'MNI152'), ('mni', 'MNI152'), ('MNI152', 'MNI152')
])
def test__sanitize_atlas(atlas, expected):
assert atlases._sanitize_atlas(atlas) == expected
def test__sanitize_atlas_errors():
with pytest.raises(ValueError):
atlases._sanitize_atlas('invalid')
@pytest.mark.xfail
def test__bunch_outputs():
assert False
@pytest.mark.xfail
def test__fetch_atlas():
assert False
@pytest.mark.xfail
def test_fetch_civet():
assert False
@pytest.mark.xfail
def test_fetch_fsaverage():
assert False
@pytest.mark.xfail
def test_fetch_mni152():
assert False
@pytest.mark.xfail
def test_fetch_regfusion():
assert False
@pytest.mark.xfail
def test_fetch_atlas():
assert False
@pytest.mark.xfail
def test_fetch_all_atlases():
assert False
@pytest.mark.xfail
def test_get_atlas_dir():
assert False
|
{"/brainnotation/tests/test_points.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_resampling.py": ["/brainnotation/__init__.py"], "/brainnotation/images.py": ["/brainnotation/civet.py"], "/brainnotation/datasets/__init__.py": ["/brainnotation/datasets/atlases.py", "/brainnotation/datasets/annotations.py"], "/brainnotation/resampling.py": ["/brainnotation/__init__.py", "/brainnotation/datasets/__init__.py", "/brainnotation/images.py"], "/brainnotation/tests/test_images.py": ["/brainnotation/__init__.py"], "/brainnotation/nulls/tests/test_spins.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/stats.py": ["/brainnotation/images.py"], "/brainnotation/tests/test_transforms.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_utils.py": ["/brainnotation/__init__.py"], "/brainnotation/datasets/tests/test_annotations.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/datasets/annotations.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/nulls/nulls.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/points.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/tests/test_burt.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/nulls/__init__.py": ["/brainnotation/nulls/nulls.py"], "/brainnotation/nulls/tests/test_nulls.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/datasets/tests/test_utils.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/points.py": ["/brainnotation/images.py"], "/brainnotation/__init__.py": ["/brainnotation/resampling.py", "/brainnotation/stats.py"], "/brainnotation/datasets/tests/test__osf.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/transforms.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/utils.py"], "/brainnotation/datasets/_osf.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/civet.py": ["/brainnotation/points.py"], "/brainnotation/datasets/tests/test_atlases.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/plotting.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/transforms.py"], "/brainnotation/parcellate.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/resampling.py", "/brainnotation/transforms.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/spins.py": ["/brainnotation/images.py", "/brainnotation/points.py"], "/brainnotation/datasets/atlases.py": ["/brainnotation/datasets/utils.py"], "/examples/plot_spatial_nulls.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_stats.py": ["/brainnotation/__init__.py"], "/examples/plot_fetch_datasets.py": ["/brainnotation/__init__.py"]}
|
23,061
|
danjgale/brainnotation
|
refs/heads/main
|
/brainnotation/plotting.py
|
# -*- coding: utf-8 -*-
"""
Functionality for plotting
"""
from matplotlib import colors as mcolors, pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa
from nilearn.plotting import plot_surf
import numpy as np
from brainnotation.datasets import ALIAS, fetch_atlas
from brainnotation.images import load_gifti
from brainnotation.transforms import _check_hemi
HEMI = dict(L='left', R='right')
plt.cm.register_cmap(
'caret_blueorange', mcolors.LinearSegmentedColormap.from_list('blend', [
'#00d2ff', '#009eff', '#006cfe', '#0043fe',
'#fd4604', '#fe6b01', '#ffd100', '#ffff04'
])
)
def plot_surf_template(data, template, density, surf='inflated', space=None,
hemi=None, data_dir=None, **kwargs):
"""
Plots `data` on `template` surface
Parameters
----------
data : str or os.PathLike or tuple-of-str
Path to data file(s) to be plotted. If tuple, assumes (left, right)
hemisphere.
template : {'civet', 'fsaverage', 'fsLR'}
Template on which `data` is defined
density : str
Resolution of template
surf : str, optional
Surface on which `data` should be plotted. Must be valid for specified
`space`. Default: 'inflated'
hemi : {'L', 'R'}, optional
If `data` is not a tuple, which hemisphere it should be plotted on.
Default: None
kwargs : key-value pairs
Passed directly to `nilearn.plotting.plot_surf`
Returns
-------
fig : matplotlib.Figure instance
Plotted figure
"""
atlas = fetch_atlas(template, density, data_dir=data_dir, verbose=0)
template = ALIAS.get(template, template)
if template == 'MNI152':
raise ValueError('Cannot plot MNI152 on the surface. Try performing '
'registration fusion to project data to the surface '
'and plotting the projection instead.')
surf, medial = atlas[surf], atlas['medial']
opts = dict(alpha=1.0)
opts.update(**kwargs)
if kwargs.get('bg_map') is not None and kwargs.get('alpha') is None:
opts['alpha'] = 'auto'
data, hemispheres = zip(*_check_hemi(data, hemi))
n_surf = len(data)
fig, axes = plt.subplots(n_surf, 2, subplot_kw={'projection': '3d'})
axes = (axes,) if n_surf == 1 else axes.T
for row, hemi, img in zip(axes, hemispheres, data):
geom = load_gifti(getattr(surf, hemi)).agg_data()
img = load_gifti(img).agg_data().astype('float32')
# set medial wall to NaN; this will avoid it being plotted
med = load_gifti(getattr(medial, hemi)).agg_data().astype(bool)
img[np.logical_not(med)] = np.nan
for ax, view in zip(row, ['lateral', 'medial']):
ax.disable_mouse_rotation()
plot_surf(geom, img, hemi=HEMI[hemi], axes=ax, view=view, **opts)
poly = ax.collections[0]
poly.set_facecolors(
_fix_facecolors(ax, poly._original_facecolor,
*geom, view, hemi)
)
if not opts.get('colorbar', False):
fig.tight_layout()
if n_surf == 1:
fig.subplots_adjust(wspace=-0.1)
else:
fig.subplots_adjust(wspace=-0.4, hspace=-0.15)
return fig
def _fix_facecolors(ax, facecolors, vertices, faces, view, hemi):
"""
Updates `facecolors` to reflect shading of mesh geometry
Parameters
----------
ax : plt.Axes3dSubplot
Axis instance
facecolors : (F,) array_like
Original facecolors of plot
vertices : (V, 3)
Vertices of surface mesh
faces : (F, 3)
Triangles of surface mesh
view : {'lateral', 'medial'}
Plotted view of brain
Returns
-------
colors : (F,) array_like
Updated facecolors with approriate shading
"""
hemi_view = {'R': {'lateral': 'medial', 'medial': 'lateral'}}
views = {
'lateral': plt.cm.colors.LightSource(azdeg=225, altdeg=19.4712),
'medial': plt.cm.colors.LightSource(azdeg=45, altdeg=19.4712)
}
# reverse medial / lateral views if plotting right hemisphere
view = hemi_view.get(hemi, {}).get(view, view)
# re-shade colors
normals = ax._generate_normals(vertices[faces])
colors = ax._shade_colors(np.asarray(facecolors), normals, views[view])
return colors
|
{"/brainnotation/tests/test_points.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_resampling.py": ["/brainnotation/__init__.py"], "/brainnotation/images.py": ["/brainnotation/civet.py"], "/brainnotation/datasets/__init__.py": ["/brainnotation/datasets/atlases.py", "/brainnotation/datasets/annotations.py"], "/brainnotation/resampling.py": ["/brainnotation/__init__.py", "/brainnotation/datasets/__init__.py", "/brainnotation/images.py"], "/brainnotation/tests/test_images.py": ["/brainnotation/__init__.py"], "/brainnotation/nulls/tests/test_spins.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/stats.py": ["/brainnotation/images.py"], "/brainnotation/tests/test_transforms.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_utils.py": ["/brainnotation/__init__.py"], "/brainnotation/datasets/tests/test_annotations.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/datasets/annotations.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/nulls/nulls.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/points.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/tests/test_burt.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/nulls/__init__.py": ["/brainnotation/nulls/nulls.py"], "/brainnotation/nulls/tests/test_nulls.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/datasets/tests/test_utils.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/points.py": ["/brainnotation/images.py"], "/brainnotation/__init__.py": ["/brainnotation/resampling.py", "/brainnotation/stats.py"], "/brainnotation/datasets/tests/test__osf.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/transforms.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/utils.py"], "/brainnotation/datasets/_osf.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/civet.py": ["/brainnotation/points.py"], "/brainnotation/datasets/tests/test_atlases.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/plotting.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/transforms.py"], "/brainnotation/parcellate.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/resampling.py", "/brainnotation/transforms.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/spins.py": ["/brainnotation/images.py", "/brainnotation/points.py"], "/brainnotation/datasets/atlases.py": ["/brainnotation/datasets/utils.py"], "/examples/plot_spatial_nulls.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_stats.py": ["/brainnotation/__init__.py"], "/examples/plot_fetch_datasets.py": ["/brainnotation/__init__.py"]}
|
23,062
|
danjgale/brainnotation
|
refs/heads/main
|
/brainnotation/tests/test_parcellate.py
|
# -*- coding: utf-8 -*-
"""
For testing brainnotation.parcellate functionality
"""
import pytest
@pytest.mark.xfail
def test__gifti_to_array():
assert False
@pytest.mark.xfail
def test__array_to_gifti():
assert False
@pytest.mark.xfail
def test_Parcellater():
assert False
|
{"/brainnotation/tests/test_points.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_resampling.py": ["/brainnotation/__init__.py"], "/brainnotation/images.py": ["/brainnotation/civet.py"], "/brainnotation/datasets/__init__.py": ["/brainnotation/datasets/atlases.py", "/brainnotation/datasets/annotations.py"], "/brainnotation/resampling.py": ["/brainnotation/__init__.py", "/brainnotation/datasets/__init__.py", "/brainnotation/images.py"], "/brainnotation/tests/test_images.py": ["/brainnotation/__init__.py"], "/brainnotation/nulls/tests/test_spins.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/stats.py": ["/brainnotation/images.py"], "/brainnotation/tests/test_transforms.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_utils.py": ["/brainnotation/__init__.py"], "/brainnotation/datasets/tests/test_annotations.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/datasets/annotations.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/nulls/nulls.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/points.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/tests/test_burt.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/nulls/__init__.py": ["/brainnotation/nulls/nulls.py"], "/brainnotation/nulls/tests/test_nulls.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/datasets/tests/test_utils.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/points.py": ["/brainnotation/images.py"], "/brainnotation/__init__.py": ["/brainnotation/resampling.py", "/brainnotation/stats.py"], "/brainnotation/datasets/tests/test__osf.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/transforms.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/utils.py"], "/brainnotation/datasets/_osf.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/civet.py": ["/brainnotation/points.py"], "/brainnotation/datasets/tests/test_atlases.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/plotting.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/transforms.py"], "/brainnotation/parcellate.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/resampling.py", "/brainnotation/transforms.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/spins.py": ["/brainnotation/images.py", "/brainnotation/points.py"], "/brainnotation/datasets/atlases.py": ["/brainnotation/datasets/utils.py"], "/examples/plot_spatial_nulls.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_stats.py": ["/brainnotation/__init__.py"], "/examples/plot_fetch_datasets.py": ["/brainnotation/__init__.py"]}
|
23,063
|
danjgale/brainnotation
|
refs/heads/main
|
/brainnotation/parcellate.py
|
# -*- coding: utf-8 -*-
"""
Functionality for parcellating data
"""
import nibabel as nib
from nilearn.input_data import NiftiLabelsMasker
import numpy as np
from brainnotation.datasets import ALIAS, DENSITIES
from brainnotation.images import construct_shape_gii, load_gifti
from brainnotation.resampling import resample_images
from brainnotation.transforms import _check_hemi
from brainnotation.nulls.spins import vertices_to_parcels, parcels_to_vertices
def _gifti_to_array(gifti):
""" Converts tuple of `gifti` to numpy array
"""
return np.hstack([load_gifti(img).agg_data() for img in gifti])
def _array_to_gifti(data):
""" Converts numpy `array` to tuple of gifti images
"""
return tuple(construct_shape_gii(arr) for arr in np.split(data, 2))
class Parcellater():
"""
Class for parcellating arbitrary volumetric / surface data
Parameters
----------
parcellation : str or os.PathLike or Nifti1Image or GiftiImage or tuple
Parcellation image or surfaces, where each region is identified by a
unique integer ID. All regions with an ID of 0 are ignored.
space : str
The space in which `parcellation` is defined
resampling_target : {'data', 'parcellation', None}, optional
Gives which image gives the final shape/size. For example, if
`resampling_target` is 'data', the `parcellation` is resampled to the
space + resolution of the data, if needed. If it is 'parcellation' then
any data provided to `.fit()` are transformed to the space + resolution
of `parcellation`. Providing None means no resampling; if spaces +
resolutions of the `parcellation` and data provided to `.fit()` do not
match a ValueError is raised. Default: 'data'
hemi : {'L', 'R'}, optional
If provided `parcellation` represents only one hemisphere of a surface
atlas then this specifies which hemisphere. If not specified it is
assumed that `parcellation` is (L, R) hemisphere. Ignored if `space` is
'MNI152'. Default: None
"""
def __init__(self, parcellation, space, resampling_target='data',
hemi=None):
self.parcellation = parcellation
self.space = ALIAS.get(space, space)
self.resampling_target = resampling_target
self.hemi = hemi
self._volumetric = self.space == 'MNI152'
if self.resampling_target == 'parcellation':
self._resampling = 'transform_to_trg'
else:
self._resampling = 'transform_to_src'
if not self._volumetric:
self.parcellation, self.hemi = zip(
*_check_hemi(self.parcellation, self.hemi)
)
if self.resampling_target not in ('parcellation', 'data', None):
raise ValueError('Invalid value for `resampling_target`: '
f'{resampling_target}')
if self.space not in DENSITIES:
raise ValueError(f'Invalid value for `space`: {space}')
def fit(self):
""" Prepare parcellation for data extraction
"""
if not self._volumetric:
self.parcellation = tuple(
load_gifti(img) for img in self.parcellation
)
self._fit = True
return self
def transform(self, data, space, hemi=None):
"""
Applies parcellation to `data` in `space`
Parameters
----------
data : str or os.PathLike or Nifti1Image or GiftiImage or tuple
Data to parcellate
space : str
The space in which `data` is defined
hemi : {'L', 'R'}, optional
If provided `data` represents only one hemisphere of a surface
dataset then this specifies which hemisphere. If not specified it
is assumed that `data` is (L, R) hemisphere. Ignored if `space` is
'MNI152'. Default: None
Returns
-------
parcellated : np.ndarray
Parcellated `data`
"""
self._check_fitted()
space = ALIAS.get(space, space)
if (self.resampling_target == 'data' and space == 'MNI152'
and not self._volumetric):
raise ValueError('Cannot use resampling_target="data" when '
'provided parcellation is in surface space and '
'provided data are in MNI1512 space.')
elif (self.resampling_target == 'parcellation' and self._volumetric
and space != 'MNI152'):
raise ValueError('Cannot use resampling_target="parcellation" '
'when provided parcellation is in MNI152 space '
'and provided are in surface space.')
if hemi is not None and hemi not in self.hemi:
raise ValueError('Cannot parcellate data from {hemi} hemisphere '
'when parcellation was provided for incompatible '
'hemisphere: {self.hemi}')
if isinstance(data, np.ndarray):
data = _array_to_gifti(data)
data, parc = resample_images(data, self.parcellation,
space, self.space, hemi=hemi,
resampling=self._resampling,
method='nearest')
if ((self.resampling_target == 'data'
and space.lower() == 'mni152')
or (self.resampling_target == 'parcellation'
and self._volumetric)):
data = nib.concat_images([nib.squeeze_image(data)])
parcellated = NiftiLabelsMasker(
parc, resampling_target=self.resampling_target
).fit_transform(data)
else:
if not self._volumetric:
for n, _ in enumerate(parc):
parc[n].labeltable.labels = \
self.parcellation[n].labeltable.labels
data = _gifti_to_array(data)
parcellated = vertices_to_parcels(data, parc)
return parcellated
def inverse_transform(self, data):
"""
Project `data` to space + density of parcellation
Parameters
----------
data : array_like
Parcellated data to be projected to the space of parcellation
Returns
-------
data : Nifti1Image or tuple-of-nib.GiftiImage
Provided `data` in space + resolution of parcellation
"""
if not self._volumetric:
verts = parcels_to_vertices(data, self.parcellation, self.drop)
img = _array_to_gifti(verts)
else:
data = np.atleast_2d(data)
img = NiftiLabelsMasker(self.parcellation).fit() \
.inverse_transform(data)
return img
def fit_transform(self, data, space, hemi=None):
""" Prepare and perform parcellation of `data`
"""
return self.fit().transform(data, space, hemi)
def _check_fitted(self):
if not hasattr(self, '_fit'):
raise ValueError(f'It seems that {self.__class__.__name__} has '
'not been fit. You must call `.fit()` before '
'calling `.transform()`')
|
{"/brainnotation/tests/test_points.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_resampling.py": ["/brainnotation/__init__.py"], "/brainnotation/images.py": ["/brainnotation/civet.py"], "/brainnotation/datasets/__init__.py": ["/brainnotation/datasets/atlases.py", "/brainnotation/datasets/annotations.py"], "/brainnotation/resampling.py": ["/brainnotation/__init__.py", "/brainnotation/datasets/__init__.py", "/brainnotation/images.py"], "/brainnotation/tests/test_images.py": ["/brainnotation/__init__.py"], "/brainnotation/nulls/tests/test_spins.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/stats.py": ["/brainnotation/images.py"], "/brainnotation/tests/test_transforms.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_utils.py": ["/brainnotation/__init__.py"], "/brainnotation/datasets/tests/test_annotations.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/datasets/annotations.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/nulls/nulls.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/points.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/tests/test_burt.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/nulls/__init__.py": ["/brainnotation/nulls/nulls.py"], "/brainnotation/nulls/tests/test_nulls.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/datasets/tests/test_utils.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/points.py": ["/brainnotation/images.py"], "/brainnotation/__init__.py": ["/brainnotation/resampling.py", "/brainnotation/stats.py"], "/brainnotation/datasets/tests/test__osf.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/transforms.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/utils.py"], "/brainnotation/datasets/_osf.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/civet.py": ["/brainnotation/points.py"], "/brainnotation/datasets/tests/test_atlases.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/plotting.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/transforms.py"], "/brainnotation/parcellate.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/resampling.py", "/brainnotation/transforms.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/spins.py": ["/brainnotation/images.py", "/brainnotation/points.py"], "/brainnotation/datasets/atlases.py": ["/brainnotation/datasets/utils.py"], "/examples/plot_spatial_nulls.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_stats.py": ["/brainnotation/__init__.py"], "/examples/plot_fetch_datasets.py": ["/brainnotation/__init__.py"]}
|
23,064
|
danjgale/brainnotation
|
refs/heads/main
|
/brainnotation/nulls/spins.py
|
# -*- coding: utf-8 -*-
"""
Contains helper code for running spatial nulls models
"""
from pathlib import Path
import warnings
import numpy as np
from scipy import optimize, spatial
from scipy.ndimage.measurements import _stats, labeled_comprehension
from sklearn.utils.validation import check_random_state
from brainnotation.images import load_gifti, PARCIGNORE
from brainnotation.points import _geodesic_parcel_centroid
def load_spins(fn, n_perm=None):
"""
Loads spins from `fn`
Parameters
----------
fn : os.PathLike
Filepath to file containing spins to load
n_perm : int, optional
Number of spins to retain (i.e., subset data)
Returns
-------
spins : (N, P) array_like
Loaded spins
"""
try:
npy = Path(fn).with_suffix('.npy')
if npy.exists():
spins = np.load(npy, allow_pickle=False, mmap_mode='c')
else:
spins = np.loadtxt(fn, delimiter=',', dtype='int32')
except TypeError:
spins = np.asarray(fn, dtype='int32')
if n_perm is not None:
spins = spins[..., :n_perm]
return spins
def get_parcel_centroids(surfaces, parcellation=None, method='surface',
drop=None):
"""
Returns vertex coordinates corresponding to parcel centroids
If `parcellation` is not specified then returned `centroids` are vertex
coordinates of `surfaces`
Parameters
----------
surfaces : (2,) list-of-str
Surfaces on which to compute parcel centroids; generally spherical
surfaces are recommended. Surfaces should be (left, right) hemisphere.
If no parcellations are provided then returned `centroids` represent
all vertices in `surfaces`
parcellation : (2,) list-of-str, optional
Path to GIFTI label files containing labels of parcels on the
(left, right) hemisphere. If not specified then vertex coordinates from
`surfaces` are returned instead. Default: None
method : {'average', 'surface', 'geodesic'}, optional
Method for calculation of parcel centroid. See Notes for more
information. Default: 'surface'
drop : list, optional
Specifies regions in `parcellation` for which the parcel centroid
should not be calculated. If not specified, centroids for parcels
defined in `PARCIGNORE` are not calculated. Default: None
Returns
-------
centroids : (N, 3) numpy.ndarray
Coordinates of parcel centroids. If `parcellation` is not specified
these are simply the vertex coordinates
hemiid : (N,) numpy.ndarray
Array denoting hemisphere designation of coordinates in `centroids`,
where `hemiid=0` denotes the left and `hemiid=1` the right hemisphere
Notes
-----
The following methods can be used for finding parcel centroids:
1. ``method='average'``
Uses the arithmetic mean of the coordinates for the vertices in each
parcel. Note that in this case the calculated centroids will not act
actually fall on the surface of `surf`.
2. ``method='surface'``
Calculates the 'average' coordinates and then finds the closest vertex
on `surf`, where closest is defined as the vertex with the minimum
Euclidean distance.
3. ``method='geodesic'``
Uses the coordinates of the vertex with the minimum average geodesic
distance to all other vertices in the parcel. Note that this is slightly
more time-consuming than the other two methods, especially for
high-resolution meshes.
"""
methods = ['average', 'surface', 'geodesic']
if method not in methods:
raise ValueError('Provided method for centroid calculation {} is '
'invalid. Must be one of {}'.format(methods, methods))
if drop is None:
drop = PARCIGNORE
if parcellation is None:
parcellation = (None, None)
centroids, hemiid = [], []
for n, (parc, surf) in enumerate(zip(parcellation, surfaces)):
vertices, faces = load_gifti(surf).agg_data()
if parc is not None:
labels = load_gifti(parc).agg_data()
labeltable = parc.labeltable.get_labels_as_dict()
for lab in np.unique(labels):
if labeltable.get(lab) in drop:
continue
mask = labels == lab
if method in ('average', 'surface'):
roi = np.atleast_2d(vertices[mask].mean(axis=0))
if method == 'surface': # find closest vertex on surf
idx = np.argmin(spatial.distance_matrix(vertices, roi),
axis=0)[0]
roi = vertices[idx]
elif method == 'geodesic':
inds, = np.where(mask)
roi = _geodesic_parcel_centroid(vertices, faces, inds)
centroids.append(roi)
hemiid.append(n)
else:
centroids.append(vertices)
hemiid.extend([n] * len(vertices))
return np.row_stack(centroids), np.asarray(hemiid)
def _gen_rotation(seed=None):
"""
Generates random matrix for rotating spherical coordinates
Parameters
----------
seed : {int, np.random.RandomState instance, None}, optional
Seed for random number generation
Returns
-------
rotate_{l,r} : (3, 3) numpy.ndarray
Rotations for left and right hemisphere coordinates, respectively
"""
rs = check_random_state(seed)
# for reflecting across Y-Z plane
reflect = np.array([[-1, 0, 0], [0, 1, 0], [0, 0, 1]])
# generate rotation for left
rotate_l, temp = np.linalg.qr(rs.normal(size=(3, 3)))
rotate_l = rotate_l @ np.diag(np.sign(np.diag(temp)))
if np.linalg.det(rotate_l) < 0:
rotate_l[:, 0] = -rotate_l[:, 0]
# reflect the left rotation across Y-Z plane
rotate_r = reflect @ rotate_l @ reflect
return rotate_l, rotate_r
def gen_spinsamples(coords, hemiid, n_rotate=1000, check_duplicates=True,
method='original', seed=None, verbose=False,
return_cost=False):
"""
Returns a resampling array for `coords` obtained from rotations / spins
Using the method initially proposed in [ST1]_ (and later modified + updated
based on findings in [ST2]_ and [ST3]_), this function applies random
rotations to the user-supplied `coords` in order to generate a resampling
array that preserves its spatial embedding. Rotations are generated for one
hemisphere and mirrored for the other (see `hemiid` for more information).
Due to irregular sampling of `coords` and the randomness of the rotations
it is possible that some "rotations" may resample with replacement (i.e.,
will not be a true permutation). The likelihood of this can be reduced by
either increasing the sampling density of `coords` or changing the
``method`` parameter (see Notes for more information on the latter).
Parameters
----------
coords : (N, 3) array_like
X, Y, Z coordinates of `N` nodes/parcels/regions/vertices defined on a
sphere
hemiid : (N,) array_like
Array denoting hemisphere designation of coordinates in `coords`, where
values should be {0, 1} denoting the different hemispheres. Rotations
are generated for one hemisphere and mirrored across the y-axis for the
other hemisphere.
n_rotate : int, optional
Number of rotations to generate. Default: 1000
check_duplicates : bool, optional
Whether to check for and attempt to avoid duplicate resamplings. A
warnings will be raised if duplicates cannot be avoided. Setting to
True may increase the runtime of this function! Default: True
method : {'original', 'vasa', 'hungarian'}, optional
Method by which to match non- and rotated coordinates. Specifying
'original' will use the method described in [ST1]_. Specfying 'vasa'
will use the method described in [ST4]_. Specfying 'hungarian' will use
the Hungarian algorithm to minimize the global cost of reassignment
(will dramatically increase runtime). Default: 'original'
seed : {int, np.random.RandomState instance, None}, optional
Seed for random number generation. Default: None
verbose : bool, optional
Whether to print occasional status messages. Default: False
return_cost : bool, optional
Whether to return cost array (specified as Euclidean distance) for each
coordinate for each rotation Default: True
Returns
-------
spinsamples : (N, `n_rotate`) numpy.ndarray
Resampling matrix to use in permuting data based on supplied `coords`.
cost : (N, `n_rotate`,) numpy.ndarray
Cost (specified as Euclidean distance) of re-assigning each coordinate
for every rotation in `spinsamples`. Only provided if `return_cost` is
True.
Notes
-----
By default, this function uses the minimum Euclidean distance between the
original coordinates and the new, rotated coordinates to generate a
resampling array after each spin. Unfortunately, this can (with some
frequency) lead to multiple coordinates being re-assigned the same value:
>>> from brainnotation.nulls.spins import gen_spinsamples
>>> coords = [[0, 0, 1], [1, 0, 0], [0, 0, 1], [1, 0, 0]]
>>> hemi = [0, 0, 1, 1]
>>> gen_spinsamples(coords, hemi, n_rotate=1, seed=1,
... check_duplicates=False)
array([[0],
[0],
[2],
[3]])
While this is reasonable in most circumstances, if you feel incredibly
strongly about having a perfect "permutation" (i.e., all indices appear
once and exactly once in the resampling), you can set the ``method``
parameter to either 'vasa' or 'hungarian':
>>> gen_spinsamples(coords, hemi, n_rotate=1, seed=1,
... method='vasa', check_duplicates=False)
array([[1],
[0],
[2],
[3]])
>>> gen_spinsamples(coords, hemi, n_rotate=1, seed=1,
... method='hungarian', check_duplicates=False)
array([[0],
[1],
[2],
[3]])
Note that setting this parameter may increase the runtime of the function
(especially for `method='hungarian'`). Refer to [ST1]_ for information on
why the default suffices in most cases.
For the original MATLAB implementation of this function refer to [ST5]_.
References
----------
.. [ST1] Alexander-Bloch, A., Shou, H., Liu, S., Satterthwaite, T. D.,
Glahn, D. C., Shinohara, R. T., Vandekar, S. N., & Raznahan, A. (2018).
On testing for spatial correspondence between maps of human brain
structure and function. NeuroImage, 178, 540-51.
.. [ST2] Blaser, R., & Fryzlewicz, P. (2016). Random Rotation Ensembles.
Journal of Machine Learning Research, 17(4), 1–26.
.. [ST3] Lefèvre, J., Pepe, A., Muscato, J., De Guio, F., Girard, N.,
Auzias, G., & Germanaud, D. (2018). SPANOL (SPectral ANalysis of Lobes):
A Spectral Clustering Framework for Individual and Group Parcellation of
Cortical Surfaces in Lobes. Frontiers in Neuroscience, 12, 354.
.. [ST4] Váša, F., Seidlitz, J., Romero-Garcia, R., Whitaker, K. J.,
Rosenthal, G., Vértes, P. E., ... & Jones, P. B. (2018). Adolescent
tuning of association cortex in human structural brain networks.
Cerebral Cortex, 28(1), 281-294.
.. [ST5] https://github.com/spin-test/spin-test
"""
methods = ['original', 'vasa', 'hungarian']
if method not in methods:
raise ValueError('Provided method "{}" invalid. Must be one of {}.'
.format(method, methods))
seed = check_random_state(seed)
coords = np.asanyarray(coords)
hemiid = np.squeeze(np.asanyarray(hemiid, dtype='int8'))
# check supplied coordinate shape
if coords.shape[-1] != 3 or coords.squeeze().ndim != 2:
raise ValueError('Provided `coords` must be of shape (N, 3), not {}'
.format(coords.shape))
# ensure hemisphere designation array is correct
if hemiid.ndim != 1:
raise ValueError('Provided `hemiid` array must be one-dimensional.')
if len(coords) != len(hemiid):
raise ValueError('Provided `coords` and `hemiid` must have the same '
'length. Provided lengths: coords = {}, hemiid = {}'
.format(len(coords), len(hemiid)))
if np.max(hemiid) > 1 or np.min(hemiid) < 0:
raise ValueError('Hemiid must have values in {0, 1} denoting left and '
'right hemisphere coordinates, respectively. '
+ 'Provided array contains values: {}'
.format(np.unique(hemiid)))
# empty array to store resampling indices
spinsamples = np.zeros((len(coords), n_rotate), dtype=int)
cost = np.zeros((len(coords), n_rotate))
inds = np.arange(len(coords), dtype=int)
# generate rotations and resampling array!
msg, warned = '', False
for n in range(n_rotate):
count, duplicated = 0, True
if verbose:
msg = 'Generating spin {:>5} of {:>5}'.format(n, n_rotate)
print(msg, end='\r', flush=True)
while duplicated and count < 500:
count, duplicated = count + 1, False
resampled = np.zeros(len(coords), dtype='int32')
# rotate each hemisphere separately
for h, rot in enumerate(_gen_rotation(seed=seed)):
hinds = (hemiid == h)
coor = coords[hinds]
if len(coor) == 0:
continue
# if we need an "exact" mapping (i.e., each node needs to be
# assigned EXACTLY once) then we have to calculate the full
# distance matrix which is a nightmare with respect to memory
# for anything that isn't parcellated data.
# that is, don't do this with vertex coordinates!
if method == 'vasa':
dist = spatial.distance_matrix(coor, coor @ rot)
# min of max a la Vasa et al., 2018
col = np.zeros(len(coor), dtype='int32')
for r in range(len(dist)):
# find parcel whose closest neighbor is farthest away
# overall; assign to that
row = dist.min(axis=1).argmax()
col[row] = dist[row].argmin()
cost[inds[hinds][row], n] = dist[row, col[row]]
# set to -inf and inf so they can't be assigned again
dist[row] = -np.inf
dist[:, col[row]] = np.inf
# optimization of total cost using Hungarian algorithm. this
# may result in certain parcels having higher cost than with
# `method='vasa'` but should always result in the total cost
# being lower #tradeoffs
elif method == 'hungarian':
dist = spatial.distance_matrix(coor, coor @ rot)
row, col = optimize.linear_sum_assignment(dist)
cost[hinds, n] = dist[row, col]
# if nodes can be assigned multiple targets, we can simply use
# the absolute minimum of the distances (no optimization
# required) which is _much_ lighter on memory
# huge thanks to https://stackoverflow.com/a/47779290 for this
# memory-efficient method
elif method == 'original':
dist, col = spatial.cKDTree(coor @ rot).query(coor, 1)
cost[hinds, n] = dist
resampled[hinds] = inds[hinds][col]
# if we want to check for duplicates ensure that we don't have any
if check_duplicates:
if np.any(np.all(resampled[:, None] == spinsamples[:, :n], 0)):
duplicated = True
# if our "spin" is identical to the input then that's no good
elif np.all(resampled == inds):
duplicated = True
# if we broke out because we tried 500 rotations and couldn't generate
# a new one, warn that we're using duplicate rotations and give up.
# this should only be triggered if check_duplicates is set to True
if count == 500 and not warned:
warnings.warn('Duplicate rotations used. Check resampling array '
'to determine real number of unique permutations.')
warned = True
spinsamples[:, n] = resampled
if verbose:
print(' ' * len(msg) + '\b' * len(msg), end='', flush=True)
if return_cost:
return spinsamples, cost
return spinsamples
def spin_parcels(surfaces, parcellation, method='surface', n_rotate=1000,
spins=None, verbose=False, **kwargs):
"""
Rotates parcels in `parcellation` and re-assigns based on maximum overlap
Vertex labels are rotated and a new label is assigned to each *parcel*
based on the region maximally overlapping with its boundaries.
Parameters
----------
surfaces : (2,) list-of-str
Surfaces to use for rotating parcels; generally spherical surfaces
are recommended. Surfaces should be (left, right) hemisphere
parcellation : (2,) list-of-str, optional
Path to GIFTI label files containing parcel labels on the (left, right)
hemisphere of `surfaces`
n_rotate : int, optional
Number of rotations to generate. Default: 1000
spins : array_like, optional
Pre-computed spins to use instead of generating them on the fly. If not
provided will use other provided parameters to create them. Default:
None
seed : {int, np.random.RandomState instance, None}, optional
Seed for random number generation. Default: None
verbose : bool, optional
Whether to print occasional status messages. Default: False
return_cost : bool, optional
Whether to return cost array (specified as Euclidean distance) for each
coordinate for each rotation. Default: True
kwargs : key-value pairs
Keyword arguments passed to :func:`~.gen_spinsamples`
Returns
-------
spinsamples : (N, `n_rotate`) numpy.ndarray
Resampling matrix to use in permuting data parcellated with labels from
`parcellation`, where `N` is the number of parcels. Indices of -1
indicate that the parcel was completely encompassed by regions in
`drop` and should be ignored.
"""
def overlap(vals):
""" Returns most common positive value in `vals`; -1 if all negative
"""
vals = np.asarray(vals)
vals, counts = np.unique(vals[vals > 0], return_counts=True)
try:
return vals[counts.argmax()] - 1
except ValueError:
return -1
# get vertex-level labels (set drop labels to - values)
vertices = np.hstack([
load_gifti(parc).agg_data() for parc in parcellation
])
labels = np.unique(vertices)
mask = labels != 0
# get spins + cost (if requested)
if spins is None:
coords, hemiid = get_parcel_centroids(surfaces, method=method)
spins = gen_spinsamples(coords, hemiid, n_rotate=n_rotate,
verbose=verbose, **kwargs)
if kwargs.get('return_cost'):
spins, cost = spins
spins = load_spins(spins)
if len(vertices) != len(spins):
raise ValueError('Provided annotation files have a different '
'number of vertices than the specified fsaverage '
'surface.\n ANNOTATION: {} vertices\n '
'FSAVERAGE: {} vertices'
.format(len(vertices), len(spins)))
# spin and assign regions based on max overlap
regions = np.zeros((len(labels[mask]), n_rotate), dtype='int32')
for n in range(n_rotate):
if verbose:
msg = f'Calculating parcel overlap: {n:>5}/{n_rotate}'
print(msg, end='\b' * len(msg), flush=True)
regions[:, n] = labeled_comprehension(vertices[spins[:, n]], vertices,
labels, overlap, int, -1)[mask]
if kwargs.get('return_cost'):
return regions, cost
return regions
def parcels_to_vertices(data, parcellation):
"""
Projects parcellated `data` to vertices as defined by `parcellation`
Parameters
----------
data : (N,) numpy.ndarray
Parcellated data to be projected to vertices
parcellation : tuple-of-str or os.PathLike
Filepaths to parcellation images to project `data` to vertices
Reurns
------
projected : numpy.ndarray
Vertex-level data
"""
data = np.vstack(data).astype(float)
vertices = np.hstack([
load_gifti(parc).agg_data() for parc in parcellation
])
expected = np.unique(vertices)[1:].size
n_vert = vertices.shape[0]
if expected != len(data):
raise ValueError('Number of parcels in provided annotation files '
'differs from size of parcellated data array.\n'
' EXPECTED: {} parcels\n'
' RECEIVED: {} parcels'
.format(expected, len(data)))
projected = np.zeros((n_vert, data.shape[-1]), dtype=data.dtype)
n_vert = 0
for parc in parcellation:
labels = load_gifti(parc).agg_data().astype('int')
currdata = np.append([[np.nan]], data, axis=0)
projected[n_vert:n_vert + len(labels), :] = currdata[labels, :]
n_vert += len(labels)
return np.squeeze(projected)
def vertices_to_parcels(data, parcellation):
"""
Reduces vertex-level `data` to parcels defined by `parcellation`
Takes average of vertices within each parcel (excluding NaN values).
Assigns NaN to parcels for which *all* vertices are NaN.
Parameters
----------
data : (N,) numpy.ndarray
Vertex-level data to be reduced to parcels
parcellation : tuple-of-str or os.PathLike
Filepaths to parcellation images to parcellate `data`
Reurns
------
reduced : numpy.ndarray
Parcellated `data`
"""
data = np.vstack(data)
vertices = np.hstack([
load_gifti(parc).agg_data() for parc in parcellation
])
n_parc = np.unique(vertices).size
expected = vertices.shape[0]
if expected != len(data):
raise ValueError('Number of vertices in provided annotation files '
'differs from size of vertex-level data array.\n'
' EXPECTED: {} vertices\n'
' RECEIVED: {} vertices'
.format(expected, len(data)))
numerator = np.zeros((n_parc, data.shape[-1]), dtype=data.dtype)
denominator = np.zeros((n_parc, data.shape[-1]), dtype=data.dtype)
start = end = 0
for parc in parcellation:
labels = load_gifti(parc).agg_data().astype('int')
indices = np.unique(labels)
end += len(labels)
for idx in range(data.shape[-1]):
currdata = np.squeeze(data[start:end, idx])
counts, sums = _stats(np.nan_to_num(currdata), labels, indices)
_, nacounts = _stats(np.isnan(currdata), labels, indices)
counts = (np.asanyarray(counts, dtype=float)
- np.asanyarray(nacounts, dtype=float))
numerator[indices, idx] += sums
denominator[indices, idx] += counts
start = end
with np.errstate(divide='ignore', invalid='ignore'):
reduced = np.squeeze(numerator / denominator)[1:]
return reduced
def spin_data(data, surfaces, parcellation, method='surface', n_rotate=1000,
spins=None, verbose=False, **kwargs):
"""
Projects parcellated `data` to `surfaces`, rotates, and re-parcellates
Projection of `data` to `surfaces` uses provided `parcellation` files.
Re-parcellated data will not be exactly identical to original values due to
re-averaging process. Parcels subsumed by regions in `drop` will be listed
as NaN.
Parameters
----------
data : (N,) numpy.ndarray
Parcellated data to be rotated. Parcels should be ordered by [left,
right] hemisphere; ordering within hemisphere should correspond to the
provided `parcellation` files.
surfaces : (2,) list-of-str
Surfaces to use for rotating parcels; generally spherical surfaces
are recommended. Surfaces should be (left, right) hemisphere
parcellation : (2,) list-of-str, optional
Path to GIFTI label files containing parcel labels on the (left, right)
hemisphere of `surfaces` mapping `data` to vertices in `surfaces`
n_rotate : int, optional
Number of rotations to generate. Default: 1000
spins : array_like, optional
Pre-computed spins to use instead of generating them on the fly. If not
provided will use other provided parameters to create them. Default:
None
verbose : bool, optional
Whether to print occasional status messages. Default: False
kwargs : key-value pairs
Keyword arguments passed to function used to generate rotations
Returns
-------
rotated : (N, `n_rotate`) numpy.ndarray
Rotated `data
"""
# get coordinates and hemisphere designation for spin generation
vertices = parcels_to_vertices(data, parcellation)
if spins is None:
coords, hemiid = get_parcel_centroids(surfaces, method=method)
spins = gen_spinsamples(coords, hemiid, n_rotate=n_rotate,
verbose=verbose, **kwargs)
if kwargs.get('return_cost'):
spins, cost = spins
spins = load_spins(spins)
if len(vertices) != len(spins):
raise ValueError('Provided parcellation files have a different '
'number of vertices than the specified surfaces.\n'
' ANNOTATION: {} vertices\n'
' FSAVERAGE: {} vertices'
.format(len(vertices), len(spins)))
spun = np.zeros(data.shape + (n_rotate,))
for n in range(n_rotate):
if verbose:
msg = f'Reducing vertices to parcels: {n:>5}/{n_rotate}'
print(msg, end='\b' * len(msg), flush=True)
spun[..., n] = vertices_to_parcels(vertices[spins[:, n]], parcellation)
if verbose:
print(' ' * len(msg) + '\b' * len(msg), end='', flush=True)
if kwargs.get('return_cost'):
return spun, cost
return spun
|
{"/brainnotation/tests/test_points.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_resampling.py": ["/brainnotation/__init__.py"], "/brainnotation/images.py": ["/brainnotation/civet.py"], "/brainnotation/datasets/__init__.py": ["/brainnotation/datasets/atlases.py", "/brainnotation/datasets/annotations.py"], "/brainnotation/resampling.py": ["/brainnotation/__init__.py", "/brainnotation/datasets/__init__.py", "/brainnotation/images.py"], "/brainnotation/tests/test_images.py": ["/brainnotation/__init__.py"], "/brainnotation/nulls/tests/test_spins.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/stats.py": ["/brainnotation/images.py"], "/brainnotation/tests/test_transforms.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_utils.py": ["/brainnotation/__init__.py"], "/brainnotation/datasets/tests/test_annotations.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/datasets/annotations.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/nulls/nulls.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/points.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/tests/test_burt.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/nulls/__init__.py": ["/brainnotation/nulls/nulls.py"], "/brainnotation/nulls/tests/test_nulls.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/datasets/tests/test_utils.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/points.py": ["/brainnotation/images.py"], "/brainnotation/__init__.py": ["/brainnotation/resampling.py", "/brainnotation/stats.py"], "/brainnotation/datasets/tests/test__osf.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/transforms.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/utils.py"], "/brainnotation/datasets/_osf.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/civet.py": ["/brainnotation/points.py"], "/brainnotation/datasets/tests/test_atlases.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/plotting.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/transforms.py"], "/brainnotation/parcellate.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/resampling.py", "/brainnotation/transforms.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/spins.py": ["/brainnotation/images.py", "/brainnotation/points.py"], "/brainnotation/datasets/atlases.py": ["/brainnotation/datasets/utils.py"], "/examples/plot_spatial_nulls.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_stats.py": ["/brainnotation/__init__.py"], "/examples/plot_fetch_datasets.py": ["/brainnotation/__init__.py"]}
|
23,065
|
danjgale/brainnotation
|
refs/heads/main
|
/brainnotation/datasets/atlases.py
|
# -*- coding: utf-8 -*-
"""
Functions for fetching datasets (from the internet, if necessary)
"""
from collections import namedtuple
import os
from pathlib import Path
from nilearn.datasets.utils import _fetch_files
from sklearn.utils import Bunch
from brainnotation.datasets.utils import get_data_dir, get_dataset_info
SURFACE = namedtuple('Surface', ('L', 'R'))
ALIAS = dict(
fslr='fsLR', fsavg='fsaverage', mni152='MNI152', mni='MNI152',
FSLR='fsLR', CIVET='civet'
)
DENSITIES = dict(
civet=['41k', '164k'],
fsaverage=['3k', '10k', '41k', '164k'],
fsLR=['4k', '8k', '32k', '164k'],
MNI152=['1mm', '2mm', '3mm'],
)
_atlas_docs = dict(
url="""\
url : str, optional
URL from which to download data. Default: None\
""",
data_dir="""\
data_dir : str, optional
Path to use as data directory. If not specified, will check for
environmental variable 'BRAINNOTATION_DATA'; if that is not set, will
use `~/brainnotation-data` instead. Default: None\
""",
verbose="""\
verbose : int, optional
Modifies verbosity of download, where higher numbers mean more updates.
Default: 1\
""",
genericatlas="""\
atlas : dict
Dictionary where keys are atlas types and values are atlas files\
""",
surfatlas="""\
atlas : dict
Dictionary where keys are atlas types and values are tuples of atlas
files (L/R hemisphere)\
"""
)
def _sanitize_atlas(atlas):
""" Checks for aliases of `atlas` and confirms valid input
"""
atlas = ALIAS.get(atlas, atlas)
if atlas not in DENSITIES:
raise ValueError(f'Invalid atlas: {atlas}.')
return atlas
def _bunch_outputs(keys, values, surface=True):
""" Groups `values` together (L/R) if `surface` and zips with `keys`
"""
if surface:
values = [SURFACE(*values[i:i + 2]) for i in range(0, len(values), 2)]
return Bunch(**dict(zip(keys, values)))
def _fetch_atlas(atlas, density, keys, url=None, data_dir=None, verbose=1):
""" Helper function to get requested `atlas`
"""
atlas = _sanitize_atlas(atlas)
densities = DENSITIES[atlas]
if density not in densities:
raise ValueError(f'Invalid density: {density}. Must be one of '
f'{densities}')
data_dir = get_data_dir(data_dir=data_dir)
info = get_dataset_info(atlas)[density]
if url is None:
url = info['url']
opts = {
'uncompress': True,
'md5sum': info['md5'],
'move': f'{atlas}{density}.tar.gz'
}
if atlas == 'MNI152':
filenames = [
f'tpl-MNI152NLin2009cAsym_res-{density}{suff}.nii.gz'
for suff in ('_T1w', '_T2w', '_PD', '_desc-brain_mask',
'_label-csf_probseg', '_label-gm_probseg',
'_label-wm_probseg')
]
else:
filenames = [
'tpl-{}_den-{}_hemi-{}_{}.surf.gii'
.format(atlas, density, hemi, surf)
for surf in keys
for hemi in ('L', 'R')
] + [
'tpl-{}_den-{}_hemi-{}_desc-{}.gii'
.format(atlas, density, hemi, desc)
for desc in ('nomedialwall_dparc.label',
'sulc_midthickness.shape',
'vaavg_midthickness.shape')
for hemi in ('L', 'R')
]
keys += ['medial', 'sulc', 'vaavg']
filenames = [os.path.join('atlases', atlas, fn) for fn in filenames]
data = [
Path(fn) for fn in
_fetch_files(data_dir, files=[(f, url, opts) for f in filenames],
verbose=verbose)
]
return _bunch_outputs(keys, data, atlas != 'MNI152')
def fetch_civet(density='41k', url=None, data_dir=None, verbose=1):
keys = ['white', 'midthickness', 'inflated', 'veryinflated', 'sphere']
return _fetch_atlas(
'civet', density, keys, url=url, data_dir=data_dir, verbose=verbose
)
fetch_civet.__doc__ = """
Fetches CIVET surface atlas
Parameters
----------
density : {{'{densities}'}}, optional
Density of CIVET atlas to fetch. Default: '41k'
{url}
{data_dir}
{verbose}
Returns
-------
{surfatlas}
""".format(**_atlas_docs, densities="', '".join(DENSITIES['civet']))
def fetch_fsaverage(density='41k', url=None, data_dir=None, verbose=1):
keys = ['white', 'pial', 'inflated', 'sphere']
return _fetch_atlas(
'fsaverage', density, keys, url=url, data_dir=data_dir, verbose=verbose
)
fetch_fsaverage.__doc__ = """
Fetches fsaverage surface atlas
Parameters
----------
density : {{'{densities}'}}, optional
Density of fsaverage atlas to fetch. Default: '41k'
{url}
{data_dir}
{verbose}
Returns
-------
{surfatlas}
""".format(**_atlas_docs, densities="', '".join(DENSITIES['fsaverage']))
def fetch_fslr(density='32k', url=None, data_dir=None, verbose=1):
keys = ['midthickness', 'inflated', 'veryinflated', 'sphere']
if density in ('4k', '8k'):
keys.remove('veryinflated')
return _fetch_atlas(
'fsLR', density, keys, url=url, data_dir=data_dir, verbose=verbose
)
fetch_fslr.__doc__ = """
Fetches fsLR surface atlas
Parameters
----------
density : {{'{densities}'}}, optional
Density of fsLR atlas to fetch. Default: '32k'
{url}
{data_dir}
{verbose}
Returns
-------
{surfatlas}
""".format(**_atlas_docs, densities="', '".join(DENSITIES['fsLR']))
def fetch_mni152(density='1mm', url=None, data_dir=None, verbose=1):
keys = ['T1w', 'T2w', 'PD', 'brainmask', 'CSF', 'GM', 'WM']
return _fetch_atlas(
'MNI152', density, keys, url=url, data_dir=data_dir, verbose=verbose
)
fetch_mni152.__doc__ = """
Fetches MNI152 atlas
Parameters
----------
density : {{'{densities}'}}, optional
Resolution of MNI152 atlas to fetch. Default: '1mm'
{url}
{data_dir}
{verbose}
Returns
-------
{genericatlas}
""".format(**_atlas_docs, densities="', '".join(DENSITIES['MNI152']))
def fetch_regfusion(atlas, url=None, data_dir=None, verbose=1):
atlas = _sanitize_atlas(atlas)
densities = DENSITIES[atlas].copy()
invalid = dict(civet=('164k',), fsLR=('4k', '8k'))
for remove in invalid.get(atlas, []):
densities.remove(remove)
data_dir = get_data_dir(data_dir=data_dir)
info = get_dataset_info('regfusion')
if url is None:
url = info['url']
opts = {
'uncompress': True,
'md5sum': info['md5'],
'move': 'regfusion.tar.gz'
}
filenames = [
'tpl-MNI152_space-{}_den-{}_hemi-{}_regfusion.txt'
.format(atlas, density, hemi)
for density in densities
for hemi in ['L', 'R']
]
filenames = [os.path.join('atlases', 'regfusion', fn) for fn in filenames]
data = [
Path(fn) for fn in
_fetch_files(data_dir, files=[(f, url, opts) for f in filenames],
verbose=verbose)
]
return _bunch_outputs(densities, data)
fetch_regfusion.__doc__ = """
Fetches regfusion inputs for mapping MNI152 to specified surface `atlas`
Parameters
----------
atlas : {{'civet', 'fsaverage', 'fsLR'}}
Atlas to fetch
{url}
{data_dir}
{verbose}
Returns
-------
regfusion : dict
Dictionary where keys are surface densities and values are regfusion inputs
""".format(**_atlas_docs)
def fetch_atlas(atlas, density, url=None, data_dir=None, verbose=1):
atlas = _sanitize_atlas(atlas)
fetcher = globals()[f'fetch_{atlas.lower()}']
return fetcher(density, url=url, data_dir=data_dir, verbose=verbose)
fetch_atlas.__doc__ = """
Fetches specified `atlas` and `density`
Parameters
----------
atlas : {{'{atlases}'}}
Atlas to fetch
density : str
Density (or resolution) of `atlas`. Must be valid for provided `atlas`
{url}
{data_dir}
{verbose}
Returns
-------
{genericatlas}
""".format(**_atlas_docs, atlases="', '".join(DENSITIES.keys()))
def fetch_all_atlases(data_dir=None, verbose=1):
atlases = {'regfusion': {}}
for key, resolutions in DENSITIES.items():
atlases[key] = {}
for res in resolutions:
atlases[key][res] = \
fetch_atlas(key, res, data_dir=data_dir, verbose=verbose)
if key != 'MNI152':
atlases['regfusion'][key] = \
fetch_regfusion(key, data_dir=data_dir, verbose=verbose)
return atlases
fetch_all_atlases.__doc__ = """
Fetches (and caches) all available atlases
Parameters
----------
{data_dir}
{verbose}
Returns
-------
atlases : dict
Nested dictionaries containing all available atlases
"""
def get_atlas_dir(atlas, data_dir=None):
try:
atlas = _sanitize_atlas(atlas)
except ValueError as err:
if atlas != 'regfusion':
raise err
return Path(get_data_dir(data_dir=data_dir)) / 'atlases' / atlas
get_atlas_dir.__doc__ = """
Returns filepath to specified `atlas`
Parameters
----------
atlas : str
Atlas for which filepath should be returned
{data_dir}
Returns
-------
atlas_dir : os.PathLike
Full filepath to `atlas` directory
Raises
------
ValueError
If provided `atlas` is not valid
""".format(**_atlas_docs)
|
{"/brainnotation/tests/test_points.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_resampling.py": ["/brainnotation/__init__.py"], "/brainnotation/images.py": ["/brainnotation/civet.py"], "/brainnotation/datasets/__init__.py": ["/brainnotation/datasets/atlases.py", "/brainnotation/datasets/annotations.py"], "/brainnotation/resampling.py": ["/brainnotation/__init__.py", "/brainnotation/datasets/__init__.py", "/brainnotation/images.py"], "/brainnotation/tests/test_images.py": ["/brainnotation/__init__.py"], "/brainnotation/nulls/tests/test_spins.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/stats.py": ["/brainnotation/images.py"], "/brainnotation/tests/test_transforms.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_utils.py": ["/brainnotation/__init__.py"], "/brainnotation/datasets/tests/test_annotations.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/datasets/annotations.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/nulls/nulls.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/points.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/tests/test_burt.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/nulls/__init__.py": ["/brainnotation/nulls/nulls.py"], "/brainnotation/nulls/tests/test_nulls.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/datasets/tests/test_utils.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/points.py": ["/brainnotation/images.py"], "/brainnotation/__init__.py": ["/brainnotation/resampling.py", "/brainnotation/stats.py"], "/brainnotation/datasets/tests/test__osf.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/transforms.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/utils.py"], "/brainnotation/datasets/_osf.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/civet.py": ["/brainnotation/points.py"], "/brainnotation/datasets/tests/test_atlases.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/plotting.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/transforms.py"], "/brainnotation/parcellate.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/resampling.py", "/brainnotation/transforms.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/spins.py": ["/brainnotation/images.py", "/brainnotation/points.py"], "/brainnotation/datasets/atlases.py": ["/brainnotation/datasets/utils.py"], "/examples/plot_spatial_nulls.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_stats.py": ["/brainnotation/__init__.py"], "/examples/plot_fetch_datasets.py": ["/brainnotation/__init__.py"]}
|
23,066
|
danjgale/brainnotation
|
refs/heads/main
|
/brainnotation/tests/test_civet.py
|
# -*- coding: utf-8 -*-
"""
For testing brainnotation.civet functionality
"""
import pytest
@pytest.mark.xfail
def test_read_civet_surf():
assert False
@pytest.mark.xfail
def test_read_surfmap():
assert False
@pytest.mark.xfail
def test_resample_surface_map():
assert False
|
{"/brainnotation/tests/test_points.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_resampling.py": ["/brainnotation/__init__.py"], "/brainnotation/images.py": ["/brainnotation/civet.py"], "/brainnotation/datasets/__init__.py": ["/brainnotation/datasets/atlases.py", "/brainnotation/datasets/annotations.py"], "/brainnotation/resampling.py": ["/brainnotation/__init__.py", "/brainnotation/datasets/__init__.py", "/brainnotation/images.py"], "/brainnotation/tests/test_images.py": ["/brainnotation/__init__.py"], "/brainnotation/nulls/tests/test_spins.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/stats.py": ["/brainnotation/images.py"], "/brainnotation/tests/test_transforms.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_utils.py": ["/brainnotation/__init__.py"], "/brainnotation/datasets/tests/test_annotations.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/datasets/annotations.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/nulls/nulls.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/points.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/tests/test_burt.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/nulls/__init__.py": ["/brainnotation/nulls/nulls.py"], "/brainnotation/nulls/tests/test_nulls.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/datasets/tests/test_utils.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/points.py": ["/brainnotation/images.py"], "/brainnotation/__init__.py": ["/brainnotation/resampling.py", "/brainnotation/stats.py"], "/brainnotation/datasets/tests/test__osf.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/transforms.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/utils.py"], "/brainnotation/datasets/_osf.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/civet.py": ["/brainnotation/points.py"], "/brainnotation/datasets/tests/test_atlases.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/plotting.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/transforms.py"], "/brainnotation/parcellate.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/resampling.py", "/brainnotation/transforms.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/spins.py": ["/brainnotation/images.py", "/brainnotation/points.py"], "/brainnotation/datasets/atlases.py": ["/brainnotation/datasets/utils.py"], "/examples/plot_spatial_nulls.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_stats.py": ["/brainnotation/__init__.py"], "/examples/plot_fetch_datasets.py": ["/brainnotation/__init__.py"]}
|
23,067
|
danjgale/brainnotation
|
refs/heads/main
|
/examples/plot_spatial_nulls.py
|
# -*- coding: utf-8 -*-
"""
Using spatial null models
=========================
This example demonstrates how to use spatial null models in
:mod:`brainnotation.nulls` to test the correlation between two brain
annotations.
"""
###############################################################################
# The brain—and most features derived from it—is spatially autocorrelated, and
# therefore when making comparisons between brain features we need to account
# for this spatial autocorrelation.
#
# Enter: spatial null models.
#
# Spatial null models need to be used whenever you're comparing brain maps. In
# order to demonstrate how use them in ``brainnotation`` we need two
# annotations to compare. We'll use the first principal component of cognitive
# terms from NeuroSynth (Yarkoni et al., 2011, Nat Methods) and the first
# principal component of gene expression across the brain (from the Allen Human
# Brain Atlas).
#
# Note that we pass `return_single=True` to
# :func:`brainnotation.datasets.fetch_annotation` so that the returned data are
# a list of filepaths rather than the default dictionary format. (This only
# works since we know that there is only one annotation matching our query; a
# dictionary will always be returned if multiple annotations match our query.)
from brainnotation import datasets
nsynth = datasets.fetch_annotation(source='neurosynth', return_single=True)
genepc = datasets.fetch_annotation(desc='genepc1', return_single=True)
print('Neurosynth: ', nsynth)
print('Gene PC1: ', genepc)
###############################################################################
# These annotations are in different spaces so we first need to resample them
# to the same space. Here, we'll choose to resample them to the 'fsaverage'
# surface with a '10k' resolution (approx 10k vertices per hemisphere). Note
# that the `genepc1` is already in this space so no resampling will be
# performed for those data. (We could alternatively specify 'transform_to_trg'
# for the `resampling` parameter and achieve the same outcome.)
#
# The data returned will always be pre-loaded nibabel image instances:
from brainnotation import resampling
nsynth, genepc = resampling.resample_images(nsynth, genepc,
'MNI152', 'fsaverage',
resampling='transform_to_alt',
alt_spec=('fsaverage', '10k'))
print(nsynth, genepc)
###############################################################################
# Once the images are resampled we can easily correlate them:
from brainnotation import stats
corr, pval = stats.correlate_images(nsynth, genepc)
print(f'Correlation: r = {corr:.02f}, p = {pval:.04f}')
###############################################################################
# The returned p-value here is generated from a spatially-naive parameteric
# distribution, which is inappropriate for brain annotations. Instead, we can
# opt to use a null model from the :mod:`brainnotation.nulls` module.
#
# Here, we'll use the original null model proposed be Alexander-Bloch et al.,
# 2018, *NeuroImage*. We provide one of the maps we're comparing, the space +
# density of the map, and the number of permutations we want to generate. The
# return array will be vertices x permutations.
#
# (Note that we need to pass the loaded data from the provided map to the null
# function so we use the :func:`brainnotation.images.load_data` utility.)
from brainnotation import images, nulls
nsynth_data = images.load_data(nsynth)
rotated = nulls.alexander_bloch(nsynth_data, atlas='fsaverage', density='10k',
n_perm=100, seed=1234)
print(rotated.shape)
###############################################################################
# We can supply the generated null array to the
# :func:`brainnotation.stats.correlate_images` function and it will be used to
# generate a non-parameteric p-value. Note that the correlation remains
# identical to that above but the p-value has now changed, revealing that the
# correlation is no longer significant:
corr, pval = stats.correlate_images(nsynth, genepc, nulls=rotated)
print(f'Correlation: r = {corr:.02f}, p = {pval:.04f}')
###############################################################################
# There are a number of different null functions that can be used to generate
# null maps; they have (nearly) identical function signatures, so refer to the
# :ref:`API reference <ref_nulls>` for more information.
|
{"/brainnotation/tests/test_points.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_resampling.py": ["/brainnotation/__init__.py"], "/brainnotation/images.py": ["/brainnotation/civet.py"], "/brainnotation/datasets/__init__.py": ["/brainnotation/datasets/atlases.py", "/brainnotation/datasets/annotations.py"], "/brainnotation/resampling.py": ["/brainnotation/__init__.py", "/brainnotation/datasets/__init__.py", "/brainnotation/images.py"], "/brainnotation/tests/test_images.py": ["/brainnotation/__init__.py"], "/brainnotation/nulls/tests/test_spins.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/stats.py": ["/brainnotation/images.py"], "/brainnotation/tests/test_transforms.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_utils.py": ["/brainnotation/__init__.py"], "/brainnotation/datasets/tests/test_annotations.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/datasets/annotations.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/nulls/nulls.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/points.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/tests/test_burt.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/nulls/__init__.py": ["/brainnotation/nulls/nulls.py"], "/brainnotation/nulls/tests/test_nulls.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/datasets/tests/test_utils.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/points.py": ["/brainnotation/images.py"], "/brainnotation/__init__.py": ["/brainnotation/resampling.py", "/brainnotation/stats.py"], "/brainnotation/datasets/tests/test__osf.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/transforms.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/utils.py"], "/brainnotation/datasets/_osf.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/civet.py": ["/brainnotation/points.py"], "/brainnotation/datasets/tests/test_atlases.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/plotting.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/transforms.py"], "/brainnotation/parcellate.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/resampling.py", "/brainnotation/transforms.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/spins.py": ["/brainnotation/images.py", "/brainnotation/points.py"], "/brainnotation/datasets/atlases.py": ["/brainnotation/datasets/utils.py"], "/examples/plot_spatial_nulls.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_stats.py": ["/brainnotation/__init__.py"], "/examples/plot_fetch_datasets.py": ["/brainnotation/__init__.py"]}
|
23,068
|
danjgale/brainnotation
|
refs/heads/main
|
/brainnotation/tests/test_stats.py
|
# -*- coding: utf-8 -*-
"""
For testing brainnotation.stats functionality
"""
import numpy as np
import pytest
from brainnotation import stats
@pytest.mark.xfail
def test_correlate_images():
assert False
def test_permtest_pearsonr():
rs = np.random.default_rng(12345678)
x, y = rs.random(size=(2, 100))
r, p = stats.permtest_pearsonr(x, y)
assert np.allclose([r, p], [0.0345815411043023, 0.7192807192807192])
r, p = stats.permtest_pearsonr(np.c_[x, x], np.c_[y, y])
assert np.allclose(r, [0.0345815411043023, 0.0345815411043023])
assert np.allclose(p, [0.7192807192807192, 0.7192807192807192])
@pytest.mark.parametrize('x, y, expected', [
# basic one-dimensional input
(range(5), range(5), (1.0, 0.0)),
# broadcasting occurs regardless of input order
(np.stack([range(5), range(5, 0, -1)], 1), range(5),
([1.0, -1.0], [0.0, 0.0])),
(range(5), np.stack([range(5), range(5, 0, -1)], 1),
([1.0, -1.0], [0.0, 0.0])),
# correlation between matching columns
(np.stack([range(5), range(5, 0, -1)], 1),
np.stack([range(5), range(5, 0, -1)], 1),
([1.0, 1.0], [0.0, 0.0]))
])
def test_efficient_pearsonr(x, y, expected):
assert np.allclose(stats.efficient_pearsonr(x, y), expected)
def test_efficient_pearsonr_errors():
with pytest.raises(ValueError):
stats.efficient_pearsonr(range(4), range(5))
assert all(np.isnan(a) for a in stats.efficient_pearsonr([], []))
|
{"/brainnotation/tests/test_points.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_resampling.py": ["/brainnotation/__init__.py"], "/brainnotation/images.py": ["/brainnotation/civet.py"], "/brainnotation/datasets/__init__.py": ["/brainnotation/datasets/atlases.py", "/brainnotation/datasets/annotations.py"], "/brainnotation/resampling.py": ["/brainnotation/__init__.py", "/brainnotation/datasets/__init__.py", "/brainnotation/images.py"], "/brainnotation/tests/test_images.py": ["/brainnotation/__init__.py"], "/brainnotation/nulls/tests/test_spins.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/stats.py": ["/brainnotation/images.py"], "/brainnotation/tests/test_transforms.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_utils.py": ["/brainnotation/__init__.py"], "/brainnotation/datasets/tests/test_annotations.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/datasets/annotations.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/nulls/nulls.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/points.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/tests/test_burt.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/nulls/__init__.py": ["/brainnotation/nulls/nulls.py"], "/brainnotation/nulls/tests/test_nulls.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/datasets/tests/test_utils.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/points.py": ["/brainnotation/images.py"], "/brainnotation/__init__.py": ["/brainnotation/resampling.py", "/brainnotation/stats.py"], "/brainnotation/datasets/tests/test__osf.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/transforms.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/utils.py"], "/brainnotation/datasets/_osf.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/civet.py": ["/brainnotation/points.py"], "/brainnotation/datasets/tests/test_atlases.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/plotting.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/transforms.py"], "/brainnotation/parcellate.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/resampling.py", "/brainnotation/transforms.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/spins.py": ["/brainnotation/images.py", "/brainnotation/points.py"], "/brainnotation/datasets/atlases.py": ["/brainnotation/datasets/utils.py"], "/examples/plot_spatial_nulls.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_stats.py": ["/brainnotation/__init__.py"], "/examples/plot_fetch_datasets.py": ["/brainnotation/__init__.py"]}
|
23,069
|
danjgale/brainnotation
|
refs/heads/main
|
/examples/plot_fetch_datasets.py
|
# -*- coding: utf-8 -*-
"""
Fetching atlases and annotations
================================
This example demonstrates how to use :mod:`brainnotation.datasets` to fetch
atlases and annotations.
"""
###############################################################################
# Much of the functionality of the ``brainnotation`` toolbox relies on the
# atlases and atlas files provided with it. In many cases these atlases are
# fetched "behind-the-scenes" when you call functions that depend on them, but
# they can be access directly.
#
# There is a general purpose :func:`brainnotation.datasets.fetch_atlas`
# function that can fetch any of the atlases provided with ``brainnotation``:
from brainnotation import datasets
fslr = datasets.fetch_atlas('fslr', '32k')
print(fslr.keys())
###############################################################################
# The values corresponding to the keys of the atlas dictionary are length-2
# lists containing filepaths to the downloaded data. All surface atlas files
# are provide in gifti format (whereas MNI files are in gzipped nifti format).
#
# You can load them directly with ``nibabel`` to confirm their validity:
import nibabel as nib
lsphere, rsphere = fslr['sphere']
lvert, ltri = nib.load(lsphere).agg_data()
print(lvert.shape, ltri.shape)
###############################################################################
# The other datasets that are provided with ``brainnotation`` are annotations
# (i.e., brain maps!). While we are slowly making more and more of these openly
# available, for now only a subset are accessible to the general public; these
# are returned by default via :func:`datasets.available_annotations`.
annotations = datasets.available_annotations()
print(f'Available annotations: {len(annotations)}')
###############################################################################
# The :func:`~.available_annotations` function accepts a number of keyword
# arguments that you can use to query specific datasets. For example, providing
# the `format='volume`' argument will return only those annotations that
# are, by default, a volumetric image:
volume_annotations = datasets.available_annotations(format='volume')
print(f'Available volumetric annotations: {len(volume_annotations)}')
###############################################################################
# There are a number of keyword arguments we can specify to reduce the scope of
# the annotations returned. Here, `source` specifies where the annotation came
# from (i.e., a dataset from a manuscript or a data repository or toolbox),
# `desc` refers to a brief description of the annotation, `space` clarifies
# which space the annotation is in, and `den` (specific to surface annotations)
# clarifies the density of the surface on which the annotation is defined:
annot = datasets.available_annotations(source='abagen', desc='genepc1',
space='fsaverage', den='10k')
print(annot)
###############################################################################
# Annotations also have tags to help sort them into categories. You can see
# what tags can be used to query annotations with the :func:`~.available_tags`
# functions:
tags = datasets.available_tags()
print(tags)
###############################################################################
# Tags can be used as a keyword argumnet with :func:`~.available_annotations`.
# You can supply either a single tag or a list of tags. Note that supplying a
# list will only return those annotations that match ALL supplied tags:
fmri_annotations = datasets.available_annotations(tags='fMRI')
print(fmri_annotations)
###############################################################################
# Once we have an annotation that we want we can use the
# :func:`brainnotation.datasets.fetch_annotation` to actually download the
# files. This has a very similar signature to the
# :func:`~.available_annotations` function, accepting almost all the same
# keyword arguments to specify which annotations are desired.
#
# Here, we'll grab the first principal component of gene expression across the
# brain (from the Allen Human Brain Atlas):
abagen = datasets.fetch_annotation(source='abagen', desc='genepc1')
print(abagen)
###############################################################################
# Notice that the returned annotation ``abagen`` is a dictionary. We can subset
# the dictionary with the appropriate key or, if we know that our query is
# going to return only one annotation, also provide the `return_single=True`
# argument to the fetch call:
abagen = datasets.fetch_annotation(source='abagen', desc='genepc1',
return_single=True)
print(abagen)
###############################################################################
# And that's it! This example provided a quick overview on how to fetch the
# various atlases and datasets provided with ``brainnotation``. For more
# information please refer to the :ref:`API reference <ref_datasets>`.
|
{"/brainnotation/tests/test_points.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_resampling.py": ["/brainnotation/__init__.py"], "/brainnotation/images.py": ["/brainnotation/civet.py"], "/brainnotation/datasets/__init__.py": ["/brainnotation/datasets/atlases.py", "/brainnotation/datasets/annotations.py"], "/brainnotation/resampling.py": ["/brainnotation/__init__.py", "/brainnotation/datasets/__init__.py", "/brainnotation/images.py"], "/brainnotation/tests/test_images.py": ["/brainnotation/__init__.py"], "/brainnotation/nulls/tests/test_spins.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/stats.py": ["/brainnotation/images.py"], "/brainnotation/tests/test_transforms.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_utils.py": ["/brainnotation/__init__.py"], "/brainnotation/datasets/tests/test_annotations.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/datasets/annotations.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/nulls/nulls.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/points.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/tests/test_burt.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/nulls/__init__.py": ["/brainnotation/nulls/nulls.py"], "/brainnotation/nulls/tests/test_nulls.py": ["/brainnotation/nulls/__init__.py"], "/brainnotation/datasets/tests/test_utils.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/points.py": ["/brainnotation/images.py"], "/brainnotation/__init__.py": ["/brainnotation/resampling.py", "/brainnotation/stats.py"], "/brainnotation/datasets/tests/test__osf.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/transforms.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/utils.py"], "/brainnotation/datasets/_osf.py": ["/brainnotation/datasets/utils.py"], "/brainnotation/civet.py": ["/brainnotation/points.py"], "/brainnotation/datasets/tests/test_atlases.py": ["/brainnotation/datasets/__init__.py"], "/brainnotation/plotting.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/transforms.py"], "/brainnotation/parcellate.py": ["/brainnotation/datasets/__init__.py", "/brainnotation/images.py", "/brainnotation/resampling.py", "/brainnotation/transforms.py", "/brainnotation/nulls/spins.py"], "/brainnotation/nulls/spins.py": ["/brainnotation/images.py", "/brainnotation/points.py"], "/brainnotation/datasets/atlases.py": ["/brainnotation/datasets/utils.py"], "/examples/plot_spatial_nulls.py": ["/brainnotation/__init__.py"], "/brainnotation/tests/test_stats.py": ["/brainnotation/__init__.py"], "/examples/plot_fetch_datasets.py": ["/brainnotation/__init__.py"]}
|
23,092
|
jrd/bootsetup
|
refs/heads/master
|
/bootsetup/bootsetup_gtk.py
|
#!/usr/bin/env python
# coding: utf-8
# vim:et:sta:sts=2:sw=2:ts=2:tw=0:
"""
Graphical BootSetup.
"""
from __future__ import unicode_literals, print_function, division, absolute_import
import os
import sys
import gettext # noqa
import gtk
import gtk.glade
from .bootsetup import *
from .gathergui import *
class BootSetupGtk(BootSetup):
def _find_locale_dir(self):
if '.local' in __file__:
return os.path.expanduser(os.path.join('~', '.local', 'share', 'locale'))
else:
return os.path.join('usr', 'share', 'locale')
def run_setup(self):
gtk.glade.bindtextdomain(self._appName, self._find_locale_dir())
gtk.glade.textdomain(self._appName)
if not (self._isTest and self._useTestData) and os.getuid() != 0:
self.error_dialog(_("Root privileges are required to run this program."), _("Sorry!"))
sys.exit(1)
gg = GatherGui(self, self._bootloader, self._targetPartition, self._isTest, self._useTestData)
gg.run()
def info_dialog(self, message, title=None, parent=None):
dialog = gtk.MessageDialog(parent=parent, type=gtk.MESSAGE_INFO, buttons=gtk.BUTTONS_OK, flags=gtk.DIALOG_MODAL)
if title:
msg = "<b>{0}</b>\n\n{1}".format(unicode(title), unicode(message))
else:
msg = message
dialog.set_markup(msg)
result_info = dialog.run()
dialog.destroy()
return result_info
def error_dialog(self, message, title=None, parent=None):
dialog = gtk.MessageDialog(parent=parent, type=gtk.MESSAGE_ERROR, buttons=gtk.BUTTONS_CLOSE, flags=gtk.DIALOG_MODAL)
if title:
msg = "<b>{0}</b>\n\n{1}".format(unicode(title), unicode(message))
else:
msg = message
dialog.set_markup(msg)
result_error = dialog.run()
dialog.destroy()
return result_error
|
{"/bootsetup/bootsetup_gtk.py": ["/bootsetup/bootsetup.py", "/bootsetup/gathergui.py"], "/bootsetup/gathergui.py": ["/bootsetup/__init__.py", "/bootsetup/config.py", "/bootsetup/lilo.py", "/bootsetup/grub2.py"], "/bootsetup/bootsetup.py": ["/bootsetup/__init__.py", "/bootsetup/bootsetup_gtk.py", "/bootsetup/bootsetup_curses.py"], "/bootsetup/bootsetup_curses.py": ["/bootsetup/bootsetup.py", "/bootsetup/gathercurses.py"], "/bootsetup/gathercurses.py": ["/bootsetup/__init__.py", "/bootsetup/config.py", "/bootsetup/lilo.py", "/bootsetup/grub2.py"]}
|
23,093
|
jrd/bootsetup
|
refs/heads/master
|
/bootsetup/gathergui.py
|
#!/usr/bin/env python
# coding: utf-8
# vim:et:sta:sts=2:sw=2:ts=2:tw=0:
"""
Graphical BootSetup configuration gathering.
"""
from __future__ import unicode_literals, print_function, division, absolute_import
from .__init__ import __version__, __copyright__, __author__
import gettext # noqa
import gobject
import gtk
import gtk.glade
import os
import sys
import re
import libsalt as slt
from .config import Config
from .lilo import Lilo
from .grub2 import Grub2
class GatherGui:
"""
GUI to gather information about the configuration to setup.
"""
_lilo = None
_grub2 = None
_editing = False
_custom_lilo = False
_editors = ['leafpad', 'gedit', 'geany', 'kate', 'xterm -e nano']
def __init__(self, bootsetup, bootloader=None, target_partition=None, is_test=False, use_test_data=False):
self._bootsetup = bootsetup
self.cfg = Config(bootloader, target_partition, is_test, use_test_data)
print("""
bootloader = {bootloader}
target partition = {partition}
MBR device = {mbr}
disks:{disks}
partitions:{partitions}
boot partitions:{boot_partitions}
""".format(bootloader=self.cfg.cur_bootloader, partition=self.cfg.cur_boot_partition, mbr=self.cfg.cur_mbr_device, disks="\n - " + "\n - ".join(map(" ".join, self.cfg.disks)), partitions="\n - " + "\n - ".join(map(" ".join, self.cfg.partitions)), boot_partitions="\n - " + "\n - ".join(map(" ".join, self.cfg.boot_partitions))))
builder = gtk.Builder()
if os.path.exists('bootsetup.glade'):
builder.add_from_file('bootsetup.glade')
else:
raise Exception("bootsetup.glade not found")
# Get a handle on the glade file widgets we want to interact with
self.AboutDialog = builder.get_object("about_dialog")
self.AboutDialog.set_version(__version__)
self.AboutDialog.set_copyright(__copyright__)
self.AboutDialog.set_authors(__author__)
self.Window = builder.get_object("bootsetup_main")
self.LabelContextHelp = builder.get_object("label_context_help")
self.RadioNone = builder.get_object("radiobutton_none")
self.RadioNone.hide()
self.RadioLilo = builder.get_object("radiobutton_lilo")
self.RadioGrub2 = builder.get_object("radiobutton_grub2")
self.ComboBoxMbr = builder.get_object("combobox_mbr")
self.ComboBoxMbrEntry = self.ComboBoxMbr.get_internal_child(builder, "entry")
self._add_combobox_cell_renderer(self.ComboBoxMbr, 1)
self.LiloPart = builder.get_object("part_lilo")
self.BootPartitionTreeview = builder.get_object("boot_partition_treeview")
self.LabelCellRendererCombo = builder.get_object("label_cellrenderercombo")
self.PartitionTreeViewColumn = builder.get_object("partition_treeviewcolumn")
self.FileSystemTreeViewColumn = builder.get_object("filesystem_treeviewcolumn")
self.OsTreeViewColumn = builder.get_object("os_treeviewcolumn")
self.LabelTreeViewColumn = builder.get_object("label_treeviewcolumn")
self.UpButton = builder.get_object("up_button")
self.DownButton = builder.get_object("down_button")
self.LiloUndoButton = builder.get_object("lilo_undo_button")
self.LiloEditButton = builder.get_object("lilo_edit_button")
self.Grub2Part = builder.get_object("part_grub2")
self.Grub2EditButton = builder.get_object("grub2_edit_button")
self.ComboBoxPartition = builder.get_object("combobox_partition")
self.ComboBoxPartitionEntry = self.ComboBoxPartition.get_internal_child(builder, "entry")
self._add_combobox_cell_renderer(self.ComboBoxPartition, 2)
self._add_combobox_cell_renderer(self.ComboBoxPartition, 1, padding=20)
self.ExecuteButton = builder.get_object("execute_button")
self.DiskListStore = builder.get_object("boot_disk_list_store")
self.PartitionListStore = builder.get_object("boot_partition_list_store")
self.BootPartitionListStore = builder.get_object("boot_bootpartition_list_store")
self.BootLabelListStore = builder.get_object("boot_label_list_store")
# Initialize the contextual help box
self.context_intro = _("<b>BootSetup will install a new bootloader on your computer.</b> \n\
\n\
A bootloader is required to load the main operating system of a computer and will initially display \
a boot menu if several operating systems are available on the same computer.")
self.on_leave_notify_event(None)
self.build_data_stores()
self.update_buttons()
# Connect signals
builder.connect_signals(self)
def run(self):
# indicates to gtk (and gdk) that we will use threads
gtk.gdk.threads_init()
# start the main gtk loop
gtk.main()
def _add_combobox_cell_renderer(self, comboBox, modelPosition, start=False, expand=False, padding=0):
cell = gtk.CellRendererText()
cell.set_property('xalign', 0)
cell.set_property('xpad', padding)
if start:
comboBox.pack_start(cell, expand)
else:
comboBox.pack_end(cell, expand)
comboBox.add_attribute(cell, 'text', modelPosition)
# General contextual help
def on_leave_notify_event(self, widget, data=None):
self.LabelContextHelp.set_markup(self.context_intro)
def on_about_button_enter_notify_event(self, widget, data=None):
self.LabelContextHelp.set_text(_("About BootSetup."))
def on_bootloader_type_enter_notify_event(self, widget, data=None):
self.LabelContextHelp.set_markup(_("Here you can choose between LiLo or the Grub2 bootloader.\n\
Both will boot your Linux and (if applicable) Windows.\n\
LiLo is the old way but still works pretty well. A good choice if you have a simple setup.\n\
Grub2 is a full-featured bootloader and more robust (does not rely on blocklists)."))
def on_combobox_mbr_enter_notify_event(self, widget, data=None):
self.LabelContextHelp.set_markup(_("Select the device that will contain your bootloader.\n\
This is commonly the device you set your Bios to boot on."))
def on_boot_partition_treeview_enter_notify_event(self, widget, data=None):
self.LabelContextHelp.set_markup(_("Here you must define a boot menu label for each \
of the operating systems that will be displayed in your bootloader menu.\n\
Any partition for which you do not set a boot menu label will not be configured and will \
not be displayed in the bootloader menu.\n\
If several kernels are available within one partition, the label you have chosen for that \
partition will be appended numerically to create multiple menu entries for each of these kernels.\n\
Any of these settings can be edited manually in the configuration file."))
def on_up_button_enter_notify_event(self, widget, data=None):
self.LabelContextHelp.set_markup(_("Use this arrow if you want to move the \
selected Operating System up to a higher rank.\n\
The partition with the highest rank will be displayed on the first line of the bootloader menu.\n\
Any of these settings can be edited manually in the configuration file."))
def on_down_button_enter_notify_event(self, widget, data=None):
self.LabelContextHelp.set_markup(_("Use this arrow if you want to move the \
selected Operating System down to a lower rank.\n\
The partition with the lowest rank will be displayed on the last line of the bootloader menu.\n\
Any of these settings can be edited manually in the configuration file."))
def on_lilo_undo_button_enter_notify_event(self, widget, data=None):
self.LabelContextHelp.set_markup(_("This will undo all settings (even manual modifications)."))
def on_lilo_edit_button_enter_notify_event(self, widget, data=None):
self.LabelContextHelp.set_markup(_("Experienced users can \
manually edit the LiLo configuration file.\n\
Please do not tamper with this file unless you know what you are doing and you have \
read its commented instructions regarding chrooted paths."))
def on_combobox_partition_enter_notify_event(self, widget, data=None):
self.LabelContextHelp.set_markup(_("Select the partition that will contain the Grub2 files.\n\
These will be in /boot/grub/. This partition should be readable by Grub2.\n\
It is recommanded to use your / partition, or your /boot partition if you have one."))
def on_grub2_edit_button_enter_notify_event(self, widget, data=None):
self.LabelContextHelp.set_markup(_("You can edit the etc/default/grub file for \
adjusting the Grub2 settings.\n\
This will not let you choose the label or the order of the menu entries, \
it's automatically done by Grub2."))
def on_button_quit_enter_notify_event(self, widget, data=None):
self.LabelContextHelp.set_text(_("Exit BootSetup program."))
def on_execute_button_enter_notify_event(self, widget, data=None):
self.LabelContextHelp.set_markup(_("Once you have defined your settings, \
click on this button to install your bootloader."))
def build_data_stores(self):
print('Building choice lists…', end='')
sys.stdout.flush()
if self.cfg.cur_bootloader == 'lilo':
self.RadioLilo.activate()
self.Window.set_focus(self.RadioLilo)
elif self.cfg.cur_bootloader == 'grub2':
self.RadioGrub2.activate()
self.Window.set_focus(self.RadioGrub2)
else:
self.RadioNone.activate()
self._grub2 = None
self._lilo = None
self.LiloPart.hide()
self.Grub2Part.hide()
self.Window.set_focus(self.RadioLilo)
self.DiskListStore.clear()
self.PartitionListStore.clear()
self.BootPartitionListStore.clear()
for d in self.cfg.disks:
self.DiskListStore.append([d[0], d[2]])
for p in self.cfg.partitions: # for grub2
self.PartitionListStore.append(p)
for p in self.cfg.boot_partitions: # for lilo
p2 = list(p) # copy p
del p2[2] # discard boot type
p2[3] = re.sub(r'[()]', '', re.sub(r'_\(loader\)', '', re.sub(' ', '_', p2[3]))) # lilo does not like spaces and pretty print the label
p2.append('gtk-edit') # add a visual
self.BootPartitionListStore.append(p2)
self.ComboBoxMbrEntry.set_text(self.cfg.cur_mbr_device)
self.ComboBoxPartitionEntry.set_text(self.cfg.cur_boot_partition)
self.LabelCellRendererCombo.set_property("model", self.BootLabelListStore)
self.LabelCellRendererCombo.set_property('text-column', 0)
self.LabelCellRendererCombo.set_property('editable', True)
self.LabelCellRendererCombo.set_property('cell_background', '#CCCCCC')
print(' Done')
sys.stdout.flush()
# What to do when BootSetup logo is clicked
def on_about_button_clicked(self, widget, data=None):
self.AboutDialog.show()
# What to do when the about dialog quit button is clicked
def on_about_dialog_close(self, widget, data=None):
self.AboutDialog.hide()
return True
# What to do when the exit X on the main window upper right is clicked
def gtk_main_quit(self, widget, data=None):
if self._lilo:
del self._lilo
if self._grub2:
del self._grub2
print("Bye _o/")
gtk.main_quit()
def process_gui_events(self):
"""
be sure to treat any pending GUI events before continue
"""
while gtk.events_pending():
gtk.main_iteration()
def update_gui_async(self, fct, *args, **kwargs):
gobject.idle_add(fct, *args, **kwargs)
def on_bootloader_type_clicked(self, widget, data=None):
if widget.get_active():
if widget == self.RadioLilo:
self.cfg.cur_bootloader = 'lilo'
if self._grub2:
self._grub2 = None
self._lilo = Lilo(self.cfg.is_test)
self.LiloPart.show()
self.Grub2Part.hide()
else:
self.cfg.cur_bootloader = 'grub2'
if self._lilo:
self._lilo = None
self._grub2 = Grub2(self.cfg.is_test)
self.LiloPart.hide()
self.Grub2Part.show()
self.update_buttons()
def on_combobox_mbr_changed(self, widget, data=None):
self.cfg.cur_mbr_device = self.ComboBoxMbrEntry.get_text()
self.update_buttons()
def set_editing_mode(self, is_edit):
self._editing = is_edit
self.update_buttons()
def on_label_cellrenderercombo_editing_started(self, widget, path, data):
self.set_editing_mode(True)
def on_label_cellrenderercombo_editing_canceled(self, widget):
self.set_editing_mode(False)
def on_label_cellrenderercombo_edited(self, widget, row_number, new_text):
row_number = int(row_number)
max_chars = 15
if ' ' in new_text:
self._bootsetup.error_dialog(_("\nAn Operating System label should not contain spaces.\n\nPlease check and correct.\n"))
elif len(new_text) > max_chars:
self._bootsetup.error_dialog(_("\nAn Operating System label should not be more than {max} characters long.\n\nPlease check and correct.\n".format(max=max_chars)))
else:
model, it = self.BootPartitionTreeview.get_selection().get_selected()
found = False
for i, line in enumerate(model):
if i == row_number or line[3] == _("Set..."):
continue
if line[3] == new_text:
found = True
break
if found:
self._bootsetup.error_dialog(_("You have used the same label for different Operating Systems. Please check and correct.\n"))
else:
model.set_value(it, 3, new_text)
if new_text == _("Set..."):
model.set_value(it, 4, "gtk-edit")
else:
model.set_value(it, 4, "gtk-yes")
self.set_editing_mode(False)
def on_up_button_clicked(self, widget, data=None):
"""
Move the row items upward.
"""
# Obtain selection
sel = self.BootPartitionTreeview.get_selection()
# Get selected path
(model, rows) = sel.get_selected_rows()
if not rows:
return
# Get new path for each selected row and swap items.
for path1 in rows:
# Move path2 upward
path2 = (path1[0] - 1,)
# If path2 is negative, the user tried to move first path up.
if path2[0] < 0:
return
# Obtain iters and swap items.
iter1 = model.get_iter(path1)
iter2 = model.get_iter(path2)
model.swap(iter1, iter2)
def on_down_button_clicked(self, widget, data=None):
"""
Move the row items downward.
"""
# Obtain selection
sel = self.BootPartitionTreeview.get_selection()
# Get selected path
(model, rows) = sel.get_selected_rows()
if not rows:
return
# Get new path for each selected row and swap items.
for path1 in rows:
# Move path2 downward
path2 = (path1[0] + 1,)
# If path2 is negative, we're trying to move first path up.
if path2[0] < 0:
return
# Obtain iters and swap items.
iter1 = model.get_iter(path1)
# If the second iter is invalid, the user tried to move the last item down.
try:
iter2 = model.get_iter(path2)
except ValueError:
return
model.swap(iter1, iter2)
def _create_lilo_config(self):
partitions = []
self.cfg.cur_boot_partition = None
for row in self.BootPartitionListStore:
p = list(row)
if p[4] == "gtk-yes":
dev = p[0]
fs = p[1]
t = "chain"
for p2 in self.cfg.boot_partitions:
if p2[0] == dev:
t = p2[2]
break
label = p[3]
if not self.cfg.cur_boot_partition and t == 'linux':
self.cfg.cur_boot_partition = dev
partitions.append([dev, fs, t, label])
if self.cfg.cur_boot_partition:
self._lilo.createConfiguration(self.cfg.cur_mbr_device, self.cfg.cur_boot_partition, partitions)
else:
self._bootsetup.error_dialog(_("Sorry, BootSetup is unable to find a Linux filesystem on your choosen boot entries, so cannot install LiLo.\n"))
def on_lilo_edit_button_clicked(self, widget, data=None):
lilocfg = self._lilo.getConfigurationPath()
if not os.path.exists(lilocfg):
self._custom_lilo = True
self.update_buttons()
self._create_lilo_config()
if os.path.exists(lilocfg):
launched = False
for editor in self._editors:
try:
cmd = editor.split(' ') + [lilocfg]
slt.execCall(cmd, shell=True, env=None)
launched = True
break
except:
pass
if not launched:
self._custom_lilo = False
self._bootsetup.error_dialog(_("Sorry, BootSetup is unable to find a suitable text editor in your system. You will not be able to manually modify the LiLo configuration.\n"))
def on_lilo_undo_button_clicked(self, widget, data=None):
lilocfg = self._lilo.getConfigurationPath()
if os.path.exists(lilocfg):
os.remove(lilocfg)
self._custom_lilo = False
self.update_buttons()
def on_combobox_partition_changed(self, widget, data=None):
self.cfg.cur_boot_partition = self.ComboBoxPartitionEntry.get_text()
self.update_buttons()
def on_grub2_edit_button_clicked(self, widget, data=None):
partition = os.path.join("/dev", self.cfg.cur_boot_partition)
if slt.isMounted(partition):
mp = slt.getMountPoint(partition)
doumount = False
else:
mp = slt.mountDevice(partition)
doumount = True
grub2cfg = os.path.join(mp, "etc/default/grub")
if os.path.exists(grub2cfg):
launched = False
for editor in self._editors:
try:
cmd = editor.split(' ') + [grub2cfg]
slt.execCall(cmd, shell=True, env=None)
launched = True
break
except:
pass
if not launched:
self._bootsetup.error_dialog(_("Sorry, BootSetup is unable to find a suitable text editor in your system. You will not be able to manually modify the Grub2 default configuration.\n"))
if doumount:
slt.umountDevice(mp)
def update_buttons(self):
install_ok = False
multiple = False
grub2_edit_ok = False
if self.cfg.cur_mbr_device and os.path.exists("/dev/{0}".format(self.cfg.cur_mbr_device)) and slt.getDiskInfo(self.cfg.cur_mbr_device):
if self.cfg.cur_bootloader == 'lilo' and not self._editing:
if len(self.BootPartitionListStore) > 1:
multiple = True
for bp in self.BootPartitionListStore:
if bp[4] == "gtk-yes":
install_ok = True
elif self.cfg.cur_bootloader == 'grub2':
if self.cfg.cur_boot_partition and os.path.exists("/dev/{0}".format(self.cfg.cur_boot_partition)) and slt.getPartitionInfo(self.cfg.cur_boot_partition):
install_ok = True
if install_ok:
partition = os.path.join("/dev", self.cfg.cur_boot_partition)
if slt.isMounted(partition):
mp = slt.getMountPoint(partition)
doumount = False
else:
mp = slt.mountDevice(partition)
doumount = True
grub2_edit_ok = os.path.exists(os.path.join(mp, "etc/default/grub"))
if doumount:
slt.umountDevice(mp)
self.RadioLilo.set_sensitive(not self._editing)
self.RadioGrub2.set_sensitive(not self._editing)
self.ComboBoxMbr.set_sensitive(not self._editing)
self.BootPartitionTreeview.set_sensitive(not self._custom_lilo)
self.UpButton.set_sensitive(not self._editing and multiple)
self.DownButton.set_sensitive(not self._editing and multiple)
self.LiloUndoButton.set_sensitive(not self._editing and self._custom_lilo)
self.LiloEditButton.set_sensitive(not self._editing and install_ok)
self.Grub2EditButton.set_sensitive(grub2_edit_ok)
self.ExecuteButton.set_sensitive(not self._editing and install_ok)
def on_execute_button_clicked(self, widget, data=None):
if self.cfg.cur_bootloader == 'lilo':
if not os.path.exists(self._lilo.getConfigurationPath()):
self._create_lilo_config()
self._lilo.install()
elif self.cfg.cur_bootloader == 'grub2':
self._grub2.install(self.cfg.cur_mbr_device, self.cfg.cur_boot_partition)
self.installation_done()
def installation_done(self):
print("Bootloader Installation Done.")
msg = "<b>{0}</b>".format(_("Bootloader installation process completed."))
self._bootsetup.info_dialog(msg)
self.gtk_main_quit(self.Window)
|
{"/bootsetup/bootsetup_gtk.py": ["/bootsetup/bootsetup.py", "/bootsetup/gathergui.py"], "/bootsetup/gathergui.py": ["/bootsetup/__init__.py", "/bootsetup/config.py", "/bootsetup/lilo.py", "/bootsetup/grub2.py"], "/bootsetup/bootsetup.py": ["/bootsetup/__init__.py", "/bootsetup/bootsetup_gtk.py", "/bootsetup/bootsetup_curses.py"], "/bootsetup/bootsetup_curses.py": ["/bootsetup/bootsetup.py", "/bootsetup/gathercurses.py"], "/bootsetup/gathercurses.py": ["/bootsetup/__init__.py", "/bootsetup/config.py", "/bootsetup/lilo.py", "/bootsetup/grub2.py"]}
|
23,094
|
jrd/bootsetup
|
refs/heads/master
|
/bootsetup/__init__.py
|
#!/usr/bin/env python
# coding: utf-8
# vim:et:sta:sts=2:sw=2:ts=2:tw=0:
"""
BootSetup helps installing LiLo or Grub2 on your computer.
"""
from __future__ import unicode_literals, print_function, division, absolute_import
__app__ = 'bootsetup'
__copyright__ = 'Copyright 2013-2014, Salix OS'
__author__ = 'Cyrille Pontvieux <jrd@salixos.org>, Pierrick Le Brun <akuna@salixos.org>'
__credits__ = ['Cyrille Pontvieux', 'Pierrick Le Brun']
__maintainer__ = 'Cyrille Pontvieux'
__email__ = 'jrd@salixos.org'
__license__ = 'GPLv2+'
__version__ = '0.1'
|
{"/bootsetup/bootsetup_gtk.py": ["/bootsetup/bootsetup.py", "/bootsetup/gathergui.py"], "/bootsetup/gathergui.py": ["/bootsetup/__init__.py", "/bootsetup/config.py", "/bootsetup/lilo.py", "/bootsetup/grub2.py"], "/bootsetup/bootsetup.py": ["/bootsetup/__init__.py", "/bootsetup/bootsetup_gtk.py", "/bootsetup/bootsetup_curses.py"], "/bootsetup/bootsetup_curses.py": ["/bootsetup/bootsetup.py", "/bootsetup/gathercurses.py"], "/bootsetup/gathercurses.py": ["/bootsetup/__init__.py", "/bootsetup/config.py", "/bootsetup/lilo.py", "/bootsetup/grub2.py"]}
|
23,095
|
jrd/bootsetup
|
refs/heads/master
|
/bootsetup/grub2.py
|
#!/usr/bin/env python
# coding: utf-8
# vim:et:sta:sts=2:sw=2:ts=2:tw=0:
"""
Grub2 for BootSetup.
"""
from __future__ import unicode_literals, print_function, division, absolute_import
import tempfile
import os
import sys
import codecs
import libsalt as slt
class Grub2:
isTest = False
_prefix = None
_tmp = None
_bootInBootMounted = False
_procInBootMounted = False
def __init__(self, isTest):
self.isTest = isTest
self._prefix = "bootsetup.grub2-"
self._tmp = tempfile.mkdtemp(prefix=self._prefix)
slt.mounting._tempMountDir = os.path.join(self._tmp, 'mounts')
self.__debug("tmp dir = " + self._tmp)
def __del__(self):
if self._tmp and os.path.exists(self._tmp):
self.__debug("cleanning " + self._tmp)
try:
if os.path.exists(slt.mounting._tempMountDir):
self.__debug("Remove " + slt.mounting._tempMountDir)
os.rmdir(slt.mounting._tempMountDir)
self.__debug("Remove " + self._tmp)
os.rmdir(self._tmp)
except:
pass
def __debug(self, msg):
if self.isTest:
print("Debug: " + msg)
with codecs.open("bootsetup.log", "a+", "utf-8") as fdebug:
fdebug.write("Debug: {0}\n".format(msg))
def _mountBootPartition(self, bootPartition):
"""
Return the mount point
"""
self.__debug("bootPartition = " + bootPartition)
if slt.isMounted(bootPartition):
self.__debug("bootPartition already mounted")
return slt.getMountPoint(bootPartition)
else:
self.__debug("bootPartition not mounted")
return slt.mountDevice(bootPartition)
def _mountBootInBootPartition(self, mountPoint):
# assume that if the mount_point is /, any /boot directory is already accessible/mounted
if mountPoint != '/' and os.path.exists(os.path.join(mountPoint, 'etc/fstab')):
self.__debug("mp != / and etc/fstab exists, will try to mount /boot by chrooting")
try:
self.__debug("grep -q /boot {mp}/etc/fstab && chroot {mp} /sbin/mount /boot".format(mp=mountPoint))
if slt.execCall("grep -q /boot {mp}/etc/fstab && chroot {mp} /sbin/mount /boot".format(mp=mountPoint)):
self.__debug("/boot mounted in " + mountPoint)
self._bootInBootMounted = True
except:
pass
def _bindProcSysDev(self, mountPoint):
"""
bind /proc /sys and /dev into the boot partition
"""
if mountPoint != "/":
self.__debug("mount point ≠ / so mount /dev, /proc and /sys in " + mountPoint)
self._procInBootMounted = True
slt.execCall('mount -o bind /dev {mp}/dev'.format(mp=mountPoint))
slt.execCall('mount -o bind /proc {mp}/proc'.format(mp=mountPoint))
slt.execCall('mount -o bind /sys {mp}/sys'.format(mp=mountPoint))
def _unbindProcSysDev(self, mountPoint):
"""
unbind /proc /sys and /dev into the boot partition
"""
if self._procInBootMounted:
self.__debug("mount point ≠ / so umount /dev, /proc and /sys in " + mountPoint)
slt.execCall('umount {mp}/dev'.format(mp=mountPoint))
slt.execCall('umount {mp}/proc'.format(mp=mountPoint))
slt.execCall('umount {mp}/sys'.format(mp=mountPoint))
def _copyAndInstallGrub2(self, mountPoint, device):
if self.isTest:
self.__debug("/usr/sbin/grub-install --boot-directory {bootdir} --no-floppy {dev}".format(bootdir=os.path.join(mountPoint, "boot"), dev=device))
return True
else:
return slt.execCall("/usr/sbin/grub-install --boot-directory {bootdir} --no-floppy {dev}".format(bootdir=os.path.join(mountPoint, "boot"), dev=device))
def _installGrub2Config(self, mountPoint):
if os.path.exists(os.path.join(mountPoint, 'etc/default/grub')) and os.path.exists(os.path.join(mountPoint, 'usr/sbin/update-grub')):
self.__debug("grub2 package is installed on the target partition, so it will be used to generate the grub.cfg file")
# assume everything is installed on the target partition, grub2 package included.
if self.isTest:
self.__debug("chroot {mp} /usr/sbin/update-grub".format(mp=mountPoint))
else:
slt.execCall("chroot {mp} /usr/sbin/update-grub".format(mp=mountPoint))
else:
self.__debug("grub2 not installed on the target partition, so grub_mkconfig will directly be used to generate the grub.cfg file")
# tiny OS installed on that mount point, so we cannot chroot on it to install grub2 config.
if self.isTest:
self.__debug("/usr/sbin/grub-mkconfig -o {cfg}".format(cfg=os.path.join(mountPoint, "boot/grub/grub.cfg")))
else:
slt.execCall("/usr/sbin/grub-mkconfig -o {cfg}".format(cfg=os.path.join(mountPoint, "boot/grub/grub.cfg")))
def _umountAll(self, mountPoint):
self.__debug("umountAll")
if mountPoint:
self.__debug("umounting main mount point " + mountPoint)
self._unbindProcSysDev(mountPoint)
if self._bootInBootMounted:
self.__debut("/boot mounted in " + mountPoint + ", so umount it")
slt.execCall("chroot {mp} /sbin/umount /boot".format(mp=mountPoint))
if mountPoint != '/':
self.__debug("umain mount point ≠ '/' → umount " + mountPoint)
slt.umountDevice(mountPoint)
self._bootInBootMounted = False
self._procInBootMounted = False
def install(self, mbrDevice, bootPartition):
mbrDevice = os.path.join("/dev", mbrDevice)
bootPartition = os.path.join("/dev", bootPartition)
self.__debug("mbrDevice = " + mbrDevice)
self.__debug("bootPartition = " + bootPartition)
self._bootInBootMounted = False
self._procInBootMounted = False
mp = None
try:
mp = self._mountBootPartition(bootPartition)
self.__debug("mp = " + unicode(mp))
self._mountBootInBootPartition(mp)
if self._copyAndInstallGrub2(mp, mbrDevice):
self._installGrub2Config(mp)
else:
sys.stderr.write("Grub2 cannot be installed on this disk [{0}]\n".format(mbrDevice))
finally:
self._umountAll(mp)
|
{"/bootsetup/bootsetup_gtk.py": ["/bootsetup/bootsetup.py", "/bootsetup/gathergui.py"], "/bootsetup/gathergui.py": ["/bootsetup/__init__.py", "/bootsetup/config.py", "/bootsetup/lilo.py", "/bootsetup/grub2.py"], "/bootsetup/bootsetup.py": ["/bootsetup/__init__.py", "/bootsetup/bootsetup_gtk.py", "/bootsetup/bootsetup_curses.py"], "/bootsetup/bootsetup_curses.py": ["/bootsetup/bootsetup.py", "/bootsetup/gathercurses.py"], "/bootsetup/gathercurses.py": ["/bootsetup/__init__.py", "/bootsetup/config.py", "/bootsetup/lilo.py", "/bootsetup/grub2.py"]}
|
23,096
|
jrd/bootsetup
|
refs/heads/master
|
/bootsetup/config.py
|
#!/usr/bin/env python
# coding: utf-8
# vim:et:sta:sts=2:sw=2:ts=2:tw=0:
"""
Config class helps storing the configuration for the bootloader setup.
"""
from __future__ import unicode_literals, print_function, division, absolute_import
import sys
import re
import codecs
import os
import libsalt as slt
class Config:
"""
Configuration for BootSetup
"""
disks = []
partitions = []
boot_partitions = []
cur_bootloader = None
cur_boot_partition = None
cur_mbr_device = None
is_test = False
use_test_data = False
is_live = False
def __init__(self, bootloader, target_partition, is_test, use_test_data):
self.cur_bootloader = bootloader
self.cur_boot_partition = target_partition and re.sub(r'/dev/', '', target_partition) or ''
self.cur_mbr_device = ''
self.is_test = is_test
self.use_test_data = use_test_data
self._get_current_config()
def __debug(self, msg):
if self.is_test:
print("Debug: " + msg)
with codecs.open("bootsetup.log", "a+", "utf-8") as fdebug:
fdebug.write("Debug: {0}\n".format(msg))
def _get_current_config(self):
print('Gathering current configuration…', end='')
if self.is_test:
print('')
sys.stdout.flush()
if self.is_test:
self.is_live = False
else:
self.is_live = slt.isSaLTLiveEnv()
if self.use_test_data:
self.disks = [
['sda', 'msdos', 'WDC100 (100GB)'],
['sdb', 'gpt', 'SGT350 (350GB)']
]
self.partitions = [
['sda1', 'ntfs', 'WinVista (20GB)'],
['sda5', 'ext2', 'Salix (80GB)'],
['sdb1', 'fat32', 'Data (300GB)'],
['sdb2', 'ext4', 'Debian (50GB)']
]
self.boot_partitions = [
['sda5', 'ext2', 'linux', 'Salix', 'Salix 14.0'],
['sda1', 'ntfs', 'chain', 'Windows', 'Vista'],
['sdb2', 'ext4', 'linux', 'Debian', 'Debian 7']
]
if not self.cur_boot_partition:
self.cut_boot_partition = 'sda5'
else:
self.disks = []
self.partitions = []
for disk_device in slt.getDisks():
di = slt.getDiskInfo(disk_device)
self.disks.append([disk_device, di['type'], "{0} ({1})".format(di['model'], di['sizeHuman'])])
for p in slt.getPartitions(disk_device):
pi = slt.getPartitionInfo(p)
self.partitions.append([p, pi['fstype'], "{0} ({1})".format(pi['label'], pi['sizeHuman'])])
self.boot_partitions = []
probes = []
if not self.is_live:
# os-prober doesn't want to probe for /
slashDevice = slt.execGetOutput(r"readlink -f $(df / | tail -n 1 | cut -d' ' -f1)")[0]
slashFS = slt.getFsType(re.sub(r'^/dev/', '', slashDevice))
osProbesPath = None
for p in ("/usr/lib64/os-probes/mounted/90linux-distro", "/usr/lib/os-probes/mounted/90linux-distro"):
if os.path.exists(p):
osProbesPath = p
break
if osProbesPath:
try:
os.remove("/var/lib/os-prober/labels") # ensure there is no previous labels
except:
pass
self.__debug("Root device {0} ({1})".format(slashDevice, slashFS))
self.__debug(osProbesPath + " " + slashDevice + " / " + slashFS)
slashDistro = slt.execGetOutput([osProbesPath, slashDevice, '/', slashFS])
if slashDistro:
probes = slashDistro
self.__debug("Probes: " + unicode(probes))
osProberPath = None
for p in ('/usr/bin/os-prober', '/usr/sbin/os-prober'):
if os.path.exists(p):
osProberPath = p
break
if osProberPath:
probes.extend(slt.execGetOutput(osProberPath, shell=False))
self.__debug("Probes: " + unicode(probes))
for probe in probes:
probe = unicode(probe).strip() # ensure clean line
if probe[0] != '/':
continue
probe_info = probe.split(':')
probe_dev = re.sub(r'/dev/', '', probe_info[0])
probe_os = probe_info[1]
probe_label = probe_info[2]
probe_boottype = probe_info[3]
if probe_boottype == 'efi': # skip efi entry
continue
try:
probe_fstype = [p[1] for p in self.partitions if p[0] == probe_dev][0]
except IndexError:
probe_fstype = ''
self.boot_partitions.append([probe_dev, probe_fstype, probe_boottype, probe_os, probe_label])
if self.cur_boot_partition:
# use the disk of that partition.
self.cur_mbr_device = re.sub(r'^(.+?)[0-9]*$', r'\1', self.cur_boot_partition)
elif len(self.disks) > 0:
# use the first disk.
self.cur_mbr_device = self.disks[0][0]
print(' Done')
sys.stdout.flush()
|
{"/bootsetup/bootsetup_gtk.py": ["/bootsetup/bootsetup.py", "/bootsetup/gathergui.py"], "/bootsetup/gathergui.py": ["/bootsetup/__init__.py", "/bootsetup/config.py", "/bootsetup/lilo.py", "/bootsetup/grub2.py"], "/bootsetup/bootsetup.py": ["/bootsetup/__init__.py", "/bootsetup/bootsetup_gtk.py", "/bootsetup/bootsetup_curses.py"], "/bootsetup/bootsetup_curses.py": ["/bootsetup/bootsetup.py", "/bootsetup/gathercurses.py"], "/bootsetup/gathercurses.py": ["/bootsetup/__init__.py", "/bootsetup/config.py", "/bootsetup/lilo.py", "/bootsetup/grub2.py"]}
|
23,097
|
jrd/bootsetup
|
refs/heads/master
|
/bootsetup/bootsetup.py
|
#!/usr/bin/env python
# coding: utf-8
# vim:et:sta:sts=2:sw=2:ts=2:tw=0:
"""
BootSetup helps installing LiLo or Grub2 on your computer.
This is the launcher.
"""
from __future__ import unicode_literals, print_function, division, absolute_import
from .__init__ import __app__, __copyright__, __author__, __license__, __version__
import abc
import os
import sys
import gettext
class BootSetup:
__metaclass__ = abc.ABCMeta
def __init__(self, appName, bootloader, targetPartition, isTest, useTestData):
self._appName = appName
self._bootloader = bootloader
self._targetPartition = targetPartition
self._isTest = isTest
self._useTestData = useTestData
print("BootSetup v{ver}".format(ver=__version__))
@abc.abstractmethod
def run_setup(self):
"""
Launch the UI, exit at the end of the program
"""
raise NotImplementedError()
@abc.abstractmethod
def info_dialog(self, message, title=None, parent=None):
"""
Displays an information message.
"""
raise NotImplementedError()
@abc.abstractmethod
def error_dialog(self, message, title=None, parent=None):
"""
Displays an error message.
"""
raise NotImplementedError()
def usage():
print("""BootSetup v{ver}
{copyright}
{license}
{author}
bootsetup.py [--help] [--version] [--test [--data]] [bootloader] [partition]
Parameters:
--help: Show this help message
--version: Show the BootSetup version
--test: Run it in test mode
--data: Run it with some pre-filled data
bootloader: could be lilo or grub2, by default nothing is proposed. You could use "_" to tell it's undefined.
partition: target partition to install the bootloader.
The disk of that partition is, by default, where the bootloader will be installed
The partition will be guessed by default if not specified:
⋅ First Linux selected partition of the selected disk for LiLo.
⋅ First Linux partition, in order, of the selected disk for Grub2. This could be changed in the UI.
""".format(ver=__version__, copyright=__copyright__, license=__license__, author=__author__))
def print_err(*args):
sys.stderr.write((' '.join(map(unicode, args)) + "\n").encode('utf-8'))
def die(s, exit=1):
print_err(s)
if exit:
sys.exit(exit)
def find_locale_dir():
if '.local' in __file__:
return os.path.expanduser(os.path.join('~', '.local', 'share', 'locale'))
else:
return os.path.join('usr', 'share', 'locale')
def main(args=sys.argv[1:]):
if os.path.dirname(__file__):
os.chdir(os.path.dirname(__file__))
is_graphic = bool(os.environ.get('DISPLAY'))
is_test = False
use_test_data = False
bootloader = None
target_partition = None
gettext.install(domain=__app__, localedir=find_locale_dir(), unicode=True)
for arg in args:
if arg:
if arg == '--help':
usage()
sys.exit(0)
elif arg == '--version':
print(__version__)
sys.exit(0)
elif arg == '--test':
is_test = True
print_err("*** Testing mode ***")
elif is_test and arg == '--data':
use_test_data = True
print_err("*** Test data mode ***")
elif arg[0] == '-':
die(_("Unrecognized parameter '{0}'.").format(arg))
else:
if bootloader is None:
bootloader = arg
elif target_partition is None:
target_partition = arg
else:
die(_("Unrecognized parameter '{0}'.").format(arg))
if bootloader not in ('lilo', 'grub2', '_', None):
die(_("bootloader parameter should be lilo, grub2 or '_', given {0}.").format(bootloader))
if bootloader == '_':
bootloader = None
if target_partition and not os.path.exists(target_partition):
die(_("Partition {0} not found.").format(target_partition))
if is_graphic:
from .bootsetup_gtk import BootSetupGtk as BootSetupImpl
else:
from .bootsetup_curses import BootSetupCurses as BootSetupImpl
bootsetup = BootSetupImpl(__app__, bootloader, target_partition, is_test, use_test_data)
bootsetup.run_setup()
if __name__ == '__main__':
main()
|
{"/bootsetup/bootsetup_gtk.py": ["/bootsetup/bootsetup.py", "/bootsetup/gathergui.py"], "/bootsetup/gathergui.py": ["/bootsetup/__init__.py", "/bootsetup/config.py", "/bootsetup/lilo.py", "/bootsetup/grub2.py"], "/bootsetup/bootsetup.py": ["/bootsetup/__init__.py", "/bootsetup/bootsetup_gtk.py", "/bootsetup/bootsetup_curses.py"], "/bootsetup/bootsetup_curses.py": ["/bootsetup/bootsetup.py", "/bootsetup/gathercurses.py"], "/bootsetup/gathercurses.py": ["/bootsetup/__init__.py", "/bootsetup/config.py", "/bootsetup/lilo.py", "/bootsetup/grub2.py"]}
|
23,098
|
jrd/bootsetup
|
refs/heads/master
|
/bootsetup/lilo.py
|
#!/usr/bin/env python
# coding: utf-8
# vim:et:sta:sts=2:sw=2:ts=2:tw=0:
"""
LiLo for BootSetup.
"""
from __future__ import unicode_literals, print_function, division, absolute_import
import sys
import tempfile
import shutil
import os
import glob
import codecs
import libsalt as slt
from subprocess import CalledProcessError
from operator import itemgetter
class Lilo:
isTest = False
_prefix = None
_tmp = None
_mbrDevice = None
_bootPartition = None
_partitions = None
_bootsMounted = []
_cfgTemplate = """# LILO configuration file
# Generated by BootSetup
#
# Start LILO global section
# Append any additional kernel parameters:
append = "vt.default_utf8=1 "
boot = {boot}
lba32
compact
# Boot BMP Image.
# Bitmap in BMP format: 640x480x8
bitmap = {mp}/boot/salix.bmp
# Menu colors (foreground, background, shadow, highlighted
# foreground, highlighted background, highlighted shadow):
bmp-colors = 255,20,255,20,255,20
# Location of the option table: location x, location y, number of
# columns, lines per column (max 15), "spill" this is how many
# entries must be in the first column before the next begins to
# be used. We do not specify it here, as there is just one column.
bmp-table = 60,6,1,16
# Timer location x, timer location y, foreground color,
# background color, shadow color.
bmp-timer = 65,29,0,255
# Standard menu.
# Or, you can comment out the bitmap menu above and
# use a boot message with the standard menu:
# message = /boot/boot_message.txt
# Wait until the timeout to boot (if commented out, boot the
# first entry immediately):
prompt
# Timeout before the first entry boots.
# This is given in tenths of a second, so 600 for every minute:
timeout = 50
# Override dangerous defaults that rewrite the partition table:
change-rules
reset
# Normal VGA console
# vga = normal
vga = {vga}
# End LILO global section
#
# BootSetup can be executed from a LiveCD. This means that lilo
# could be issued from a 'chrooted' Linux partition, which would
# happen to be the first Linux partition listed below.
# Therefore the following paths are relevant only when viewed
# from that 'chrooted' partition's perspective. Please take this
# constraint into consideration if you must modify this file
# or else BootSetup will fail.
#
# If later on you want to use this configuration file directly
# with lilo in a command line, use the following syntax:
# "lilo -v -C /etc/bootsetup/lilo.conf" instead of the traditional
# "lilo -v" command. You must of course issue that command from
# the operating system holding /etc/bootsetup/lilo.conf and ensure that
# all partitions referenced in it are mounted on the appropriate
# mountpoints.
"""
def __init__(self, isTest):
self.isTest = isTest
self._prefix = "bootsetup.lilo-"
self._tmp = tempfile.mkdtemp(prefix=self._prefix)
slt.mounting._tempMountDir = os.path.join(self._tmp, 'mounts')
self.__debug("tmp dir = " + self._tmp)
def __del__(self):
if self._tmp and os.path.exists(self._tmp):
self.__debug("cleanning " + self._tmp)
try:
cfgPath = self.getConfigurationPath()
if os.path.exists(cfgPath):
self.__debug("Remove " + cfgPath)
os.remove(cfgPath)
if os.path.exists(slt.mounting._tempMountDir):
self.__debug("Remove " + slt.mounting._tempMountDir)
os.rmdir(slt.mounting._tempMountDir)
self.__debug("Remove " + self._tmp)
os.rmdir(self._tmp)
except:
pass
def __debug(self, msg):
if self.isTest:
print("Debug: " + msg)
with codecs.open("bootsetup.log", "a+", "utf-8") as fdebug:
fdebug.write("Debug: {0}\n".format(msg))
def getConfigurationPath(self):
return os.path.join(self._tmp, "lilo.conf")
def _mountBootPartition(self):
"""
Return the mount point
"""
self.__debug("bootPartition = " + self._bootPartition)
if slt.isMounted(self._bootPartition):
self.__debug("bootPartition already mounted")
mp = slt.getMountPoint(self._bootPartition)
else:
self.__debug("bootPartition not mounted")
mp = slt.mountDevice(self._bootPartition)
if mp:
self._mountBootInPartition(mp)
return mp
def _mountBootInPartition(self, mountPoint):
# assume that if the mount_point is /, any /boot directory is already accessible/mounted
fstab = os.path.join(mountPoint, 'etc/fstab')
bootdir = os.path.join(mountPoint, 'boot')
if mountPoint != '/' and os.path.exists(fstab) and os.path.exists(bootdir):
self.__debug("mp != / and etc/fstab + boot exists, will try to mount /boot by reading fstab")
try:
self.__debug('set -- $(grep /boot {fstab}) && echo "$1,$3"'.format(fstab=fstab))
(bootDev, bootType) = slt.execGetOutput('set -- $(grep /boot {fstab}) && echo "$1,$3"'.format(fstab=fstab), shell=True)[0].split(',')
if bootDev and not os.path.ismount(bootdir):
mp = slt.mountDevice(bootDev, fsType=bootType, mountPoint=bootdir)
if mp:
self._bootsMounted.append(mp)
self.__debug("/boot mounted in " + mp)
except:
pass
def _mountPartitions(self, mountPointList):
"""
Fill a list of mount points for each partition
"""
if self._partitions:
partitionsToMount = [p for p in self._partitions if p[2] == "linux"]
self.__debug("mount partitions: " + unicode(partitionsToMount))
for p in partitionsToMount:
dev = os.path.join("/dev", p[0])
self.__debug("mount partition " + dev)
if slt.isMounted(dev):
mp = slt.getMountPoint(dev)
else:
mp = slt.mountDevice(dev)
self.__debug("mount partition " + dev + " => " + unicode(mp))
if mp:
mountPointList[p[0]] = mp
self._mountBootInPartition(mp)
else:
raise Exception("Cannot mount {d}".format(d=dev))
def _umountAll(self, mountPoint, mountPointList):
self.__debug("umountAll")
if mountPoint:
for mp in self._bootsMounted:
self.__debug("umounting " + unicode(mp))
slt.umountDevice(mp, deleteMountPoint=False)
self._bootsMounted = []
if mountPointList:
self.__debug("umount other mount points: " + unicode(mountPointList))
for mp in mountPointList.values():
if mp == mountPoint:
continue # skip it, will be unmounted just next
self.__debug("umount " + unicode(mp))
slt.umountDevice(mp)
if mountPoint != '/':
self.__debug("main mount point ≠ '/' → umount " + mountPoint)
slt.umountDevice(mountPoint)
def _createLiloSections(self, mountPointList):
"""
Return a list of lilo section string for each partition.
There could be more section than partitions if there are multiple kernels.
"""
sections = []
if self._partitions:
for p in self._partitions:
device = os.path.join("/dev", p[0])
fs = p[1]
bootType = p[2]
label = p[3]
if bootType == 'chain':
sections.append(self._getChainLiloSection(device, label))
elif bootType == 'linux':
mp = mountPointList[p[0]]
sections.extend(self._getLinuxLiloSections(device, fs, mp, label))
else:
sys.err.write("The boot type {type} is not supported.\n".format(type=bootType))
return sections
def _getChainLiloSection(self, device, label):
"""
Returns a string for a chainloaded section
"""
self.__debug("Section 'chain' for " + device + " with label: " + label)
return """# {label} chain section
other = {device}
label = {label}
""".format(device=device, label=label)
def _getLinuxLiloSections(self, device, fs, mp, label):
"""
Returns a list of string sections, one for each kernel+initrd
"""
sections = []
self.__debug("Section 'linux' for " + device + "/" + fs + ", mounted on " + mp + " with label: " + label)
kernelList = sorted(glob.glob("{mp}/boot/vmlinuz*".format(mp=mp)))
initrdList = sorted(glob.glob("{mp}/boot/initr*".format(mp=mp)))
for l in (kernelList, initrdList):
for el in l:
if os.path.isdir(el) or os.path.islink(el):
l.remove(el)
self.__debug("kernelList: " + unicode(kernelList))
self.__debug("initrdList: " + unicode(initrdList))
uuid = slt.execGetOutput(['/sbin/blkid', '-s', 'UUID', '-o', 'value', device], shell=False)
if uuid:
rootDevice = "/dev/disk/by-uuid/{uuid}".format(uuid=uuid[0])
else:
rootDevice = device
self.__debug("rootDevice = " + rootDevice)
for (k, i, l) in self._getKernelInitrdCouples(kernelList, initrdList, label):
self.__debug("kernel, initrd, label found: " + unicode(k) + "," + unicode(i) + "," + unicode(l))
section = None
if i:
section = """# {label} Linux section
image = {image}
initrd = {initrd}
root = {root}
""".format(image=k, initrd=i, root=rootDevice, label=l)
else:
section = """# {label} Linux section
image = {image}
root = {root}
""".format(image=k, root=rootDevice, label=l)
if fs == 'ext4':
section += ' append = "{append} "\n'.format(append='rootfstype=ext4')
section += " read-only\n label = {label}\n".format(label=l)
sections.append(section)
return sections
def _getKernelInitrdCouples(self, kernelList, initrdList, labelRef):
ret = []
if kernelList:
if len(kernelList) == 1:
initrd = None
if initrdList:
initrd = initrdList[0] # assume the only initrd match the only kernel
ret.append([kernelList[0], initrd, labelRef])
else:
labelBase = labelRef[0:15 - 2] + "-"
n = 0
for kernel in kernelList:
n += 1
kernelSuffix = os.path.basename(kernel).replace("vmlinuz", "")
initrd = None
for i in initrdList:
if kernelSuffix in i: # find the matching initrd
initrd = i
break
ret.append((kernel, initrd, labelBase + unicode(n)))
return ret
def _getFrameBufferConf(self):
"""
Return the frame buffer configuration for this hardware.
Format: (fb, label)
"""
try:
fbGeometry = slt.execGetOutput("/usr/sbin/fbset | grep -w geometry")
except CalledProcessError:
self.__debug("Impossible to determine frame buffer mode, default to text.")
fbGeometry = None
mode = None
label = None
if fbGeometry:
vesaModes = [
(320, 200, 4, None),
(640, 400, 4, None),
(640, 480, 4, None),
(800, 500, 4, None),
(800, 600, 4, 770),
(1024, 640, 4, None),
(896, 672, 4, None),
(1152, 720, 4, None),
(1024, 768, 4, 772),
(1440, 900, 4, None),
(1280, 1024, 4, 774),
(1400, 1050, 4, None),
(1600, 1200, 4, None),
(1920, 1200, 4, None),
(320, 200, 8, None),
(640, 400, 8, 768),
(640, 480, 8, 769),
(800, 500, 8, 879),
(800, 600, 8, 771),
(1024, 640, 8, 874),
(896, 672, 8, 815),
(1152, 720, 8, 869),
(1024, 768, 8, 773),
(1440, 900, 8, 864),
(1280, 1024, 8, 775),
(1400, 1050, 8, 835),
(1600, 1200, 8, 796),
(1920, 1200, 8, 893),
(320, 200, 15, 781),
(640, 400, 15, 801),
(640, 480, 15, 784),
(800, 500, 15, 880),
(800, 600, 15, 787),
(1024, 640, 15, 875),
(896, 672, 15, 816),
(1152, 720, 15, 870),
(1024, 768, 15, 790),
(1440, 900, 15, 865),
(1280, 1024, 15, 793),
(1400, 1050, 15, None),
(1600, 1200, 15, 797),
(1920, 1200, 15, None),
(320, 200, 16, 782),
(640, 400, 16, 802),
(640, 480, 16, 785),
(800, 500, 16, 881),
(800, 600, 16, 788),
(1024, 640, 16, 876),
(896, 672, 16, 817),
(1152, 720, 16, 871),
(1024, 768, 16, 791),
(1440, 900, 16, 866),
(1280, 1024, 16, 794),
(1400, 1050, 16, 837),
(1600, 1200, 16, 798),
(1920, 1200, 16, None),
(320, 200, 24, 783),
(640, 400, 24, 803),
(640, 480, 24, 786),
(800, 500, 24, 882),
(800, 600, 24, 789),
(1024, 640, 24, 877),
(896, 672, 24, 818),
(1152, 720, 24, 872),
(1024, 768, 24, 792),
(1440, 900, 24, 867),
(1280, 1024, 24, 795),
(1400, 1050, 24, 838),
(1600, 1200, 24, 799),
(1920, 1200, 24, None),
(320, 200, 32, None),
(640, 400, 32, 804),
(640, 480, 32, 809),
(800, 500, 32, 883),
(800, 600, 32, 814),
(1024, 640, 32, 878),
(896, 672, 32, 819),
(1152, 720, 32, 873),
(1024, 768, 32, 824),
(1440, 900, 32, 868),
(1280, 1024, 32, 829),
(1400, 1050, 32, None),
(1600, 1200, 32, 834),
(1920, 1200, 32, None),
]
values = fbGeometry[0].strip().split(' ')
self.__debug("FB Values: " + unicode(values))
xRes = int(values[1])
yRes = int(values[2])
deep = int(values[-1])
xMax = None
yMax = None
dMax = None
# order the vesa modes by vertical size desc, horizontal size desc, color depth desc.
for vesaMode in sorted(vesaModes, key=itemgetter(1, 0, 2), reverse=True):
(x, y, d, m) = vesaMode
if m:
self.__debug("trying {0} for y, {1} for x and {2} for d".format(y, x, d))
if y <= yRes and x <= xRes and d <= deep:
xMax = x
yMax = y
dMax = d
mode = m
break
if mode:
self.__debug("Max mode found: {x}×{y}×{d}".format(x=xMax, y=yMax, d=dMax))
label = "{x}x{y}x{d}".format(x=xMax, y=yMax, d=dMax)
if not mode:
mode = 'normal'
label = 'text'
return (mode, label)
def createConfiguration(self, mbrDevice, bootPartition, partitions):
"""
partitions format: [device, filesystem, boot type, label]
"""
self._mbrDevice = os.path.join("/dev", mbrDevice)
self._bootPartition = os.path.join("/dev", bootPartition)
self._partitions = partitions
self._bootsMounted = []
self.__debug("partitions: " + unicode(self._partitions))
mp = None
mpList = None
try:
mp = self._mountBootPartition()
if not mp:
raise Exception("Cannot mount the main boot partition.")
self.__debug("mp = " + unicode(mp))
mpList = {}
self._mountPartitions(mpList)
self.__debug("mount point lists: " + unicode(mpList))
liloSections = self._createLiloSections(mpList)
self.__debug("lilo sections: " + unicode(liloSections))
(fb, fbLabel) = self._getFrameBufferConf()
self.__debug("frame buffer mode = " + unicode(fb) + " " + unicode(fbLabel))
f = open(self.getConfigurationPath(), "w")
f.write(self._cfgTemplate.format(boot=self._mbrDevice, mp=mp, vga="{0} # {1}".format(fb, fbLabel)))
for s in liloSections:
f.write(s)
f.write("\n")
f.close()
finally:
self._umountAll(mp, mpList)
def install(self):
"""
Assuming that last configuration editing didn't modified mount point.
"""
if self._mbrDevice:
self._bootsMounted = []
mp = None
mpList = None
try:
mp = self._mountBootPartition()
if not mp:
raise Exception("Cannot mount the main boot partition.")
self.__debug("mp = " + unicode(mp))
mpList = {}
self._mountPartitions(mpList)
self.__debug("mount point lists: " + unicode(mpList))
# copy the configuration to the boot_partition
try:
self.__debug("create etc/bootsetup directory in " + mp)
os.makedirs(os.path.join(mp, 'etc/bootsetup'))
except os.error:
pass
self.__debug("copy lilo.conf to etc/bootsetup")
shutil.copyfile(self.getConfigurationPath(), os.path.join(mp, '/etc/bootsetup/lilo.conf'))
# run lilo
if self.isTest:
self.__debug('/sbin/lilo -t -v -C {mp}/etc/bootsetup/lilo.conf'.format(mp=mp))
slt.execCall('/sbin/lilo -t -v -C {mp}/etc/bootsetup/lilo.conf'.format(mp=mp))
else:
slt.execCall('/sbin/lilo -C {mp}/etc/bootsetup/lilo.conf'.format(mp=mp))
finally:
self._umountAll(mp, mpList)
|
{"/bootsetup/bootsetup_gtk.py": ["/bootsetup/bootsetup.py", "/bootsetup/gathergui.py"], "/bootsetup/gathergui.py": ["/bootsetup/__init__.py", "/bootsetup/config.py", "/bootsetup/lilo.py", "/bootsetup/grub2.py"], "/bootsetup/bootsetup.py": ["/bootsetup/__init__.py", "/bootsetup/bootsetup_gtk.py", "/bootsetup/bootsetup_curses.py"], "/bootsetup/bootsetup_curses.py": ["/bootsetup/bootsetup.py", "/bootsetup/gathercurses.py"], "/bootsetup/gathercurses.py": ["/bootsetup/__init__.py", "/bootsetup/config.py", "/bootsetup/lilo.py", "/bootsetup/grub2.py"]}
|
23,099
|
jrd/bootsetup
|
refs/heads/master
|
/bootsetup/bootsetup_curses.py
|
#!/usr/bin/env python
# coding: utf-8
# vim:et:sta:sts=2:sw=2:ts=2:tw=0:
"""
Curses BootSetup.
"""
from __future__ import unicode_literals, print_function, division, absolute_import
import os
import sys
import gettext # noqa
import urwidm
from .bootsetup import *
from .gathercurses import *
class BootSetupCurses(BootSetup):
gc = None
_palette = [
('important', 'yellow', 'black', 'bold'),
('info', 'white', 'dark blue', 'bold'),
('error', 'white', 'dark red', 'bold'),
]
def run_setup(self):
urwidm.set_encoding('utf8')
if os.getuid() != 0:
self.error_dialog(_("Root privileges are required to run this program."), _("Sorry!"))
sys.exit(1)
self.gc = GatherCurses(self, self._bootloader, self._targetPartition, self._isTest, self._useTestData)
self.gc.run()
def _show_ui_dialog(self, dialog, parent=None):
if not parent:
parent = urwidm.Filler(urwidm.Divider(), 'top')
uiToStop = False
if self.gc and self.gc._loop:
ui = self.gc._loop.screen
else:
ui = urwidm.raw_display.Screen()
ui.register_palette(self._palette)
if not ui._started:
uiToStop = True
ui.start()
dialog.run(ui, parent)
if uiToStop:
ui.stop()
def info_dialog(self, message, title=None, parent=None):
if not title:
title = _("INFO")
dialog = urwidm.TextDialog(('info', unicode(message)), 10, 60, ('important', unicode(title)))
self._show_ui_dialog(dialog, parent)
def error_dialog(self, message, title=None, parent=None):
if not title:
title = "/!\ " + _("ERROR")
dialog = urwidm.TextDialog(('error', unicode(message)), 10, 60, ('important', unicode(title)))
self._show_ui_dialog(dialog, parent)
|
{"/bootsetup/bootsetup_gtk.py": ["/bootsetup/bootsetup.py", "/bootsetup/gathergui.py"], "/bootsetup/gathergui.py": ["/bootsetup/__init__.py", "/bootsetup/config.py", "/bootsetup/lilo.py", "/bootsetup/grub2.py"], "/bootsetup/bootsetup.py": ["/bootsetup/__init__.py", "/bootsetup/bootsetup_gtk.py", "/bootsetup/bootsetup_curses.py"], "/bootsetup/bootsetup_curses.py": ["/bootsetup/bootsetup.py", "/bootsetup/gathercurses.py"], "/bootsetup/gathercurses.py": ["/bootsetup/__init__.py", "/bootsetup/config.py", "/bootsetup/lilo.py", "/bootsetup/grub2.py"]}
|
23,100
|
jrd/bootsetup
|
refs/heads/master
|
/setup.py
|
#!/bin/env python
# coding: utf-8
# vim:et:sta:sw=2:sts=2:ts=2:tw=0:
from __future__ import division, print_function, absolute_import
from setuptools import setup
from distutils import cmd
from distutils.command.build import build as build_class
from distutils.command.install import install as install_class
from distutils.command.install_data import install_data as install_data_class
import os
import codecs
import re
from glob import glob
import polib
import subprocess as sp
import shutil
MODULE_NAME = 'bootsetup'
def read(*paths):
"""Build a file path from *paths* and return the contents."""
with codecs.EncodedFile(open(os.path.join(*paths), 'rb'), 'utf-8') as f:
return f.read()
def find_info(info, *file_paths):
file_paths = list(file_paths)
file_paths.append('__init__.py')
info_file = read(*file_paths)
python_simple_string = r"(?:[^'\"\\]*)"
python_escapes = r"(?:\\['\"\\])"
python_string = r"{delim}((?:{simple}{esc}?)*){delim}".format(delim=r"['\"]", simple=python_simple_string, esc=python_escapes)
info_match = re.search(r"^__{0}__ = {1}".format(info, python_string), info_file, re.M)
if info_match:
return info_match.group(1)
else:
python_arrays = r"\[(?:{ps})?((?:, {ps})*)\]".format(ps=python_string)
info_match = re.search(r"^__{0}__ = {1}".format(info, python_arrays), info_file, re.M)
if info_match:
matches = [info_match.group(1)]
if info_match.groups(2):
matches.extend(re.findall(r", {0}".format(python_string), info_match.group(2)))
return ', '.join(matches)
raise RuntimeError("Unable to find {0} string.".format(info))
def find_version(*file_paths):
return find_info('version', *file_paths)
class build_trans(cmd.Command):
"""
Compile .po files to .mo files and .desktop.in to .desktop
"""
description = __doc__
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
po_dir = os.path.join('resources', 'po')
for pot_name in [os.path.basename(filename)[:-4] for filename in glob(os.path.join(po_dir, '*.pot'))]:
print('* Compiling po files for {0}'.format(pot_name))
for po_file in glob(os.path.join(po_dir, '*.po')):
lang = os.path.basename(po_file)[:-3] # len('.po') == 3
mo_file = os.path.join('build', 'locale', lang, 'LC_MESSAGES', '{0}.mo'.format(pot_name))
mo_dir = os.path.dirname(mo_file)
if not os.path.exists(mo_dir):
os.makedirs(mo_dir)
create_mo = False
if not os.path.exists(mo_file):
create_mo = True
else:
po_mtime = os.stat(po_file)[8]
mo_mtime = os.stat(mo_file)[8]
if po_mtime > mo_mtime:
create_mo = True
if create_mo:
print('** Compiling {0}'.format(po_file))
po = polib.pofile(po_file)
po.save_as_mofile(mo_file)
for in_file in glob(os.path.join('resources', '*.desktop.in')):
out_file = os.path.join('build', os.path.basename(in_file)[:-3]) # len('.in') == 3
sp.check_call(['intltool-merge', po_dir, '-d', '-u', in_file, out_file])
class build_icons(cmd.Command):
"""
Copy icons files to the build directory.
"""
description = __doc__
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
icons_dir = os.path.join('resources', 'icons')
for icon in glob(os.path.join(icons_dir, '*.png')):
m = re.search(r'^(.+)-([0-9]+)\.png', os.path.basename(icon))
if m:
name = '{0}.png'.format(m.group(1))
size = m.group(2)
icon_dir = os.path.join('build', 'icons', 'hicolor', '{0}x{0}'.format(size), 'apps')
if not os.path.exists(icon_dir):
os.makedirs(icon_dir)
shutil.copyfile(icon, os.path.join(icon_dir, name))
svg_icon_dir = os.path.join('build', 'icons', 'hicolor', 'scalable', 'apps')
for icon in glob(os.path.join(icons_dir, '*.svg')):
if not os.path.exists(svg_icon_dir):
os.makedirs(svg_icon_dir)
shutil.copyfile(icon, os.path.join(svg_icon_dir, os.path.basename(icon)))
class build(build_class):
"""
Add 'build_trans' as a sub-command.
"""
sub_commands = build_class.sub_commands + [('build_trans', None), ('build_icons', None)]
class install_data(install_data_class):
"""
Install custom data, like .mo files and icons.
"""
def run(self):
po_dir = os.path.join('resources', 'po')
for pot_name in [os.path.basename(filename)[:-4] for filename in glob(os.path.join(po_dir, '*.pot'))]:
for lang in os.listdir(os.path.join('build', 'locale')):
lang_dir = os.path.join('share', 'locale', lang, 'LC_MESSAGES')
lang_file = os.path.join('build', 'locale', lang, 'LC_MESSAGES', '{0}.mo'.format(pot_name))
self.data_files.append((lang_dir, [lang_file]))
app_files = glob(os.path.join('build', '*.desktop'))
if app_files:
self.data_files.append((os.path.join('share', 'applications'), app_files))
for icon in glob(os.path.join('build', 'icons', 'hicolor', '*', '*', '*')):
icon_dest = os.path.join('share', os.path.dirname(os.path.dirname(icon[::-1])[::-1])) # replace build with share
self.data_files.append((icon_dest, [icon]))
doc_dir = os.path.join('doc', '{0}-{1}'.format(MODULE_NAME, find_version(MODULE_NAME)))
self.data_files.append((doc_dir, glob(os.path.join('docs', '*'))))
print('data_files', self.data_files)
install_data_class.run(self)
class install(install_class):
"""
Hack for having install_data run even if there is no data listed.
"""
def initialize_options(self):
install_class.initialize_options(self)
self.distribution.has_data_files = lambda: True
if not self.distribution.data_files:
self.distribution.data_files = []
config = {
'name': 'BootSetup',
'description': 'Helps installing a bootloader like LiLo or Grub2 on your computer',
'long_description': read('README.rst'),
'license': find_info('license', MODULE_NAME),
'author': find_info('credits', MODULE_NAME),
'author_email': find_info('email', MODULE_NAME),
'version': find_version(MODULE_NAME),
'url': 'https://github.com/jrd/bootsetup/',
'download_url': 'https://github.com/jrd/bootsetup/archive/master.zip',
'packages': [MODULE_NAME],
'include_package_data': True,
'package_data': {MODULE_NAME: ['*.glade', '*.png']},
'entry_points': {'console_scripts': ['bootsetup = {0}.bootsetup:main'.format(MODULE_NAME)]},
'cmdclass': {'build': build, 'build_trans': build_trans, 'build_icons': build_icons, 'install': install, 'install_data': install_data},
'classifiers': [ # https://pypi.python.org/pypi?:action=list_classifiers
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Console :: Curses',
'Environment :: X11 Applications',
'Environment :: X11 Applications :: GTK',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: System Administrators',
'Natural Language :: English',
'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Topic :: System :: Boot',
'Topic :: System :: Recovery Tools',
],
}
setup(**config)
|
{"/bootsetup/bootsetup_gtk.py": ["/bootsetup/bootsetup.py", "/bootsetup/gathergui.py"], "/bootsetup/gathergui.py": ["/bootsetup/__init__.py", "/bootsetup/config.py", "/bootsetup/lilo.py", "/bootsetup/grub2.py"], "/bootsetup/bootsetup.py": ["/bootsetup/__init__.py", "/bootsetup/bootsetup_gtk.py", "/bootsetup/bootsetup_curses.py"], "/bootsetup/bootsetup_curses.py": ["/bootsetup/bootsetup.py", "/bootsetup/gathercurses.py"], "/bootsetup/gathercurses.py": ["/bootsetup/__init__.py", "/bootsetup/config.py", "/bootsetup/lilo.py", "/bootsetup/grub2.py"]}
|
23,101
|
jrd/bootsetup
|
refs/heads/master
|
/bootsetup/gathercurses.py
|
#!/usr/bin/env python
# coding: utf-8
# vim:et:sta:sts=2:sw=2:ts=2:tw=0:
"""
Curses (urwid) BootSetup configuration gathering.
"""
from __future__ import unicode_literals, print_function, division, absolute_import
from .__init__ import __version__, __copyright__, __license__, __author__
import gettext # noqa
import urwidm
import re
import os
import libsalt as slt
from .config import Config
from .lilo import Lilo
from .grub2 import Grub2
class GatherCurses:
"""
UI in curses/urwid to gather information about the configuration to setup.
"""
# Other potential color schemes can be found at:
# http://excess.org/urwid/wiki/RecommendedPalette
_palette = [
('body', 'light gray', 'black'),
('header', 'white', 'dark blue'),
('footer', 'light green', 'black'),
('footer_key', 'yellow', 'black'),
('strong', 'white', 'black'),
('copyright', 'light blue', 'black'),
('authors', 'light cyan', 'black'),
('translators', 'light green', 'black'),
('focusable', 'light green', 'black'),
('unfocusable', 'dark blue', 'black'),
('focus', 'black', 'dark green'),
('focus_edit', 'yellow', 'black'),
('focus_icon', 'yellow', 'black'),
('focus_radio', 'yellow', 'black'),
('focus_combo', 'black', 'dark green'),
('combobody', 'light gray', 'dark blue'),
('combofocus', 'black', 'brown'),
('error', 'white', 'dark red'),
('focus_error', 'light red', 'black'),
]
_mainView = None
_helpView = None
_aboutView = None
_mode = 'main'
_loop = None
_helpCtx = ''
_labelPerDevice = {}
_lilo = None
_grub2 = None
_editing = False
_custom_lilo = False
_grub2_cfg = False
_liloMaxChars = 15
_editors = ['vim', 'nano']
def __init__(self, bootsetup, bootloader=None, target_partition=None, is_test=False, use_test_data=False):
self._bootsetup = bootsetup
self.cfg = Config(bootloader, target_partition, is_test, use_test_data)
print("""
bootloader = {bootloader}
target partition = {partition}
MBR device = {mbr}
disks:{disks}
partitions:{partitions}
boot partitions:{boot_partitions}
""".format(bootloader=self.cfg.cur_bootloader, partition=self.cfg.cur_boot_partition, mbr=self.cfg.cur_mbr_device, disks="\n - " + "\n - ".join(map(" ".join, self.cfg.disks)), partitions="\n - " + "\n - ".join(map(" ".join, self.cfg.partitions)), boot_partitions="\n - " + "\n - ".join(map(" ".join, self.cfg.boot_partitions))))
self.ui = urwidm.raw_display.Screen()
self.ui.set_mouse_tracking()
self._palette.extend(bootsetup._palette)
def run(self):
self._createMainView()
self._createHelpView()
self._createAboutView()
self._changeBootloaderSection()
self._loop = urwidm.MainLoop(self._mainView, self._palette, handle_mouse=True, unhandled_input=self._handleKeys, pop_ups=True)
if self.cfg.cur_bootloader == 'lilo':
self._radioLiLo.set_state(True)
self._mainView.body.set_focus(self._mbrDeviceSectionPosition)
elif self.cfg.cur_bootloader == 'grub2':
self._radioGrub2.set_state(True)
self._mainView.body.set_focus(self._mbrDeviceSectionPosition)
self._loop.run()
def _infoDialog(self, message):
self._bootsetup.info_dialog(message, parent=self._loop.widget)
def _errorDialog(self, message):
self._bootsetup.error_dialog(message, parent=self._loop.widget)
def _updateScreen(self):
if self._loop and self._loop.screen._started:
self._loop.draw_screen()
def _onHelpFocusGain(self, widget, context):
self._helpCtx = context
return True
def _onHelpFocusLost(self, widget):
self._helpCtx = ''
return True
def _installHelpContext(self, widget, context):
urwidm.connect_signal(widget, 'focusgain', self._onHelpFocusGain, context)
urwidm.connect_signal(widget, 'focuslost', self._onHelpFocusLost)
def _createComboBox(self, label, elements):
l = [urwidm.TextMultiValues(el) if isinstance(el, list) else el for el in elements]
comboBox = urwidm.ComboBox(label, l)
comboBox.set_combo_attrs('combobody', 'combofocus')
comboBox.cbox.sensitive_attr = ('focusable', 'focus_combo')
return comboBox
def _createComboBoxEdit(self, label, elements):
l = [urwidm.TextMultiValues(el) if isinstance(el, list) else el for el in elements]
comboBox = urwidm.ComboBoxEdit(label, l)
comboBox.set_combo_attrs('combobody', 'combofocus')
comboBox.cbox.sensitive_attr = ('focusable', 'focus_edit')
return comboBox
def _createEdit(self, caption='', edit_text='', multiline=False, align='left', wrap='space', allow_tab=False, edit_pos=None, layout=None, mask=None):
edit = urwidm.EditMore(caption, edit_text, multiline, align, wrap, allow_tab, edit_pos, layout, mask)
return edit
def _createButton(self, label, on_press=None, user_data=None):
btn = urwidm.ButtonMore(label, on_press, user_data)
return btn
def _createRadioButton(self, group, label, state="first True", on_state_change=None, user_data=None):
radio = urwidm.RadioButtonMore(group, label, state, on_state_change, user_data)
return radio
def _createCenterButtonsWidget(self, buttons, h_sep=2, v_sep=0):
maxLen = reduce(max, [len(b.label) for b in buttons], 0) + len("< >")
return urwidm.GridFlowMore(buttons, maxLen, h_sep, v_sep, "center")
def _createMainView(self):
"""
+=======================================+
| Title |
+=======================================+
| Introduction text |
+---------------------------------------+
| Bootloader: (×) LiLo (_) Grub2 |
| MBR Device: |_____________ ↓| | <== ComboBox thanks to wicd
| Grub2 files: |_____________ ↓| | --
| <Edit config> | --}- <== Grub2 only
| |
| +-----------------------------------+ | --
| |Dev.|FS |Type |Label |Actions| | |
| |sda1|ext4|Salix|Salix14____|<↑><↓> | | |
| |sda5|xfs |Arch |ArchLinux__|<↑><↓> | | +- <== LiLo only
| +-----------------------------------+ | |
| <Edit config> <Undo custom config> | --
| <Install> |
+=======================================+
| H: Help, A: About, Q: Quit | <== Action keyboard thanks to wicd
+=======================================+
"""
# header
txtTitle = urwidm.Text(_("BootSetup curses, version {ver}").format(ver=__version__), align="center")
header = urwidm.PileMore([urwidm.Divider(), txtTitle, urwidm.Text('─' * (len(txtTitle.text) + 2), align="center")])
header.attr = 'header'
# footer
keys = [
(('h', 'f2'), _("Help")),
(('a', 'ctrl a'), _("About")),
(('q', 'f10'), _("Quit")),
]
keysColumns = urwidm.OptCols(keys, self._handleKeys, attrs=('footer_key', 'footer'))
keysColumns.attr = 'footer'
footer = urwidm.PileMore([urwidm.Divider('⎽'), keysColumns])
footer.attr = 'footer'
# intro
introHtml = _("<b>BootSetup will install a new bootloader on your computer.</b> \n\
\n\
A bootloader is required to load the main operating system of a computer and will initially display \
a boot menu if several operating systems are available on the same computer.")
intro = map(lambda line: ('strong', line.replace("<b>", "").replace("</b>", "") + "\n") if line.startswith("<b>") else line, introHtml.split("\n"))
intro[-1] = intro[-1].strip() # remove last "\n"
txtIntro = urwidm.Text(intro)
# bootloader type section
lblBootloader = urwidm.Text(_("Bootloader:"))
radioGroupBootloader = []
self._radioLiLo = self._createRadioButton(radioGroupBootloader, "LiLo", state=False, on_state_change=self._onLiLoChange)
self._radioGrub2 = self._createRadioButton(radioGroupBootloader, "Grub2", state=False, on_state_change=self._onGrub2Change)
bootloaderTypeSection = urwidm.ColumnsMore([lblBootloader, self._radioLiLo, self._radioGrub2], focus_column=1)
self._installHelpContext(bootloaderTypeSection, 'type')
# mbr device section
mbrDeviceSection = self._createMbrDeviceSectionView()
# bootloader section
self._bootloaderSection = urwidm.WidgetPlaceholderMore(urwidm.Text(""))
# install section
btnInstall = self._createButton(_("_Install bootloader").replace("_", ""), on_press=self._onInstall)
self._installHelpContext(btnInstall, 'install')
installSection = self._createCenterButtonsWidget([btnInstall])
# body
bodyList = [urwidm.Divider(), txtIntro, urwidm.Divider('─', bottom=1), bootloaderTypeSection, mbrDeviceSection, urwidm.Divider(), self._bootloaderSection, urwidm.Divider('─', top=1, bottom=1), installSection]
self._mbrDeviceSectionPosition = 4
body = urwidm.ListBoxMore(urwidm.SimpleListWalker(bodyList))
body.attr = 'body'
frame = urwidm.FrameMore(body, header, footer, focus_part='body')
frame.attr = 'body'
self._mainView = frame
def _createHelpView(self):
bodyPile = urwidm.PileMore([urwidm.Divider(), urwidm.TextMore("Help")])
bodyPile.attr = 'body'
body = urwidm.FillerMore(bodyPile, valign="top")
body.attr = 'body'
txtTitle = urwidm.Text(_("Help"), align="center")
header = urwidm.PileMore([urwidm.Divider(), txtTitle, urwidm.Text('─' * (len(txtTitle.text) + 2), align="center")])
header.attr = 'header'
keys = [
(('q', 'esc', 'enter'), _("Close")),
]
keysColumns = urwidm.OptCols(keys, self._handleKeys, attrs=('footer_key', 'footer'))
keysColumns.attr = 'footer'
footer = urwidm.PileMore([urwidm.Divider('⎽'), keysColumns])
footer.attr = 'footer'
frame = urwidm.FrameMore(body, header, footer, focus_part='body')
frame.attr = 'body'
self._helpView = frame
def _createAboutView(self):
divider = urwidm.Divider()
name = urwidm.TextMore(('strong', _("BootSetup curses, version {ver}").format(ver=__version__)), align="center")
comments = urwidm.TextMore(('body', _("Helps set up a bootloader like LiLo or Grub2.")), align="center")
copyright = urwidm.TextMore(('copyright', __copyright__), align="center")
license = urwidm.TextMore(('copyright', __license__), align="center")
url = urwidm.TextMore(('strong', "http://salixos.org"), align="center")
authors = urwidm.TextMore(('authors', _("Authors:") + "\n" + __author__.replace(', ', '\n')), align="center")
translators = urwidm.TextMore(('translators', _("Translators:") + "\n" + _("translator_name <translator@email.com>")), align="center")
bodyPile = urwidm.PileMore([divider, name, comments, divider, copyright, license, divider, url, divider, authors, translators])
bodyPile.attr = 'body'
body = urwidm.FillerMore(bodyPile, valign="top")
body.attr = 'body'
txtTitle = urwidm.Text(_("About BootSetup"), align="center")
header = urwidm.PileMore([urwidm.Divider(), txtTitle, urwidm.Text('─' * (len(txtTitle.text) + 2), align="center")])
header.attr = 'header'
keys = [
(('q', 'esc', 'enter'), _("Close")),
]
keysColumns = urwidm.OptCols(keys, self._handleKeys, attrs=('footer_key', 'footer'))
keysColumns.attr = 'footer'
footer = urwidm.PileMore([urwidm.Divider('⎽'), keysColumns])
footer.attr = 'footer'
frame = urwidm.FrameMore(body, header, footer, focus_part='body')
frame.attr = 'body'
self._aboutView = frame
def _createMbrDeviceSectionView(self):
comboBox = self._createComboBoxEdit(_("Install bootloader on:"), self.cfg.disks)
urwidm.connect_signal(comboBox, 'change', self._onMBRChange)
self._installHelpContext(comboBox, 'mbr')
return comboBox
def _createBootloaderSectionView(self):
if self.cfg.cur_bootloader == 'lilo':
listDevTitle = _("Partition")
listFSTitle = _("File system")
listLabelTitle = _("Boot menu label")
listDev = [urwidm.TextMore(listDevTitle)]
listFS = [urwidm.TextMore(listFSTitle)]
listType = [urwidm.TextMore(_("Operating system"))]
listLabel = [urwidm.TextMore(listLabelTitle)]
listActionUp = [urwidm.TextMore("")]
listActionDown = [urwidm.TextMore("")]
for l in (listDev, listFS, listType, listLabel, listActionUp, listActionDown):
l[0].sensitive_attr = 'strong'
self._labelPerDevice = {}
for p in self.cfg.boot_partitions:
dev = p[0]
fs = p[1]
ostype = p[3]
label = re.sub(r'[()]', '', re.sub(r'_\(loader\)', '', re.sub(' ', '_', p[4]))) # lilo does not like spaces and pretty print the label
listDev.append(urwidm.TextMore(dev))
listFS.append(urwidm.TextMore(fs))
listType.append(urwidm.TextMore(ostype))
self._labelPerDevice[dev] = label
editLabel = self._createEdit(edit_text=label, wrap=urwidm.CLIP)
urwidm.connect_signal(editLabel, 'change', self._onLabelChange, dev)
urwidm.connect_signal(editLabel, 'focusgain', self._onHelpFocusGain, 'lilotable')
urwidm.connect_signal(editLabel, 'focuslost', self._onLabelFocusLost, dev)
listLabel.append(editLabel)
btnUp = self._createButton("↑", on_press=self._moveLineUp, user_data=p[0])
self._installHelpContext(btnUp, 'liloup')
listActionUp.append(btnUp)
btnDown = self._createButton("↓", on_press=self._moveLineDown, user_data=p[0])
self._installHelpContext(btnDown, 'lilodown')
listActionDown.append(btnDown)
colDev = urwidm.PileMore(listDev)
colFS = urwidm.PileMore(listFS)
colType = urwidm.PileMore(listType)
colLabel = urwidm.PileMore(listLabel)
colActionUp = urwidm.PileMore(listActionUp)
colActionDown = urwidm.PileMore(listActionDown)
urwidm.connect_signal(colLabel, 'focuslost', self._onLiloColumnFocusLost, [colLabel, colActionUp, colActionDown])
urwidm.connect_signal(colActionUp, 'focuslost', self._onLiloColumnFocusLost, [colLabel, colActionUp, colActionDown])
urwidm.connect_signal(colActionDown, 'focuslost', self._onLiloColumnFocusLost, [colLabel, colActionUp, colActionDown])
self._liloTable = urwidm.ColumnsMore([('fixed', max(6, len(listDevTitle)), colDev), ('fixed', max(6, len(listFSTitle)), colFS), colType, ('fixed', max(self._liloMaxChars + 1, len(listLabelTitle)), colLabel), ('fixed', 5, colActionUp), ('fixed', 5, colActionDown)], dividechars=1)
self._liloTableLines = urwidm.LineBoxMore(self._liloTable)
self._liloTableLines.sensitive_attr = "strong"
self._liloTableLines.unsensitive_attr = "unfocusable"
self._liloBtnEdit = self._createButton(_("_Edit configuration").replace("_", ""), on_press=self._editLiLoConf)
self._installHelpContext(self._liloBtnEdit, 'liloedit')
self._liloBtnCancel = self._createButton(_("_Undo configuration").replace("_", ""), on_press=self._cancelLiLoConf)
self._installHelpContext(self._liloBtnCancel, 'lilocancel')
self._liloButtons = self._createCenterButtonsWidget([self._liloBtnEdit, self._liloBtnCancel])
pile = urwidm.PileMore([self._liloTableLines, self._liloButtons])
self._updateLiLoButtons()
return pile
elif self.cfg.cur_bootloader == 'grub2':
comboBox = self._createComboBox(_("Install Grub2 files on:"), self.cfg.partitions)
urwidm.connect_signal(comboBox, 'change', self._onGrub2FilesChange)
self._installHelpContext(comboBox, 'partition')
self._grub2BtnEdit = self._createButton(_("_Edit configuration").replace("_", ""), on_press=self._editGrub2Conf)
self._installHelpContext(self._grub2BtnEdit, 'grub2edit')
pile = urwidm.PileMore([comboBox, self._createCenterButtonsWidget([self._grub2BtnEdit])])
self._onGrub2FilesChange(comboBox, comboBox.selected_item[0], None)
return pile
else:
return urwidm.Text("")
def _onLiloColumnFocusLost(self, widget, columnWidgets):
pos = widget.get_focus_pos()
for cw in columnWidgets:
cw.focus_item = cw.widget_list[pos] # set focus item directly without using set_focus method to prevent FG/FL events
return True
def _changeBootloaderSection(self):
self._bootloaderSection.original_widget = self._createBootloaderSectionView()
def _handleKeys(self, key):
if not isinstance(key, tuple): # only keyboard input
key = key.lower()
if self._mode == 'main':
if key in ('h', 'f2'):
self._switchToContextualHelp()
elif key in ('a', 'ctrl a'):
self._switchToAbout()
if key in ('q', 'f10'):
self.main_quit()
elif self._mode == 'help':
if key in ('q', 'esc', 'enter'):
self._mode = 'main'
self._loop.widget = self._mainView
elif self._mode == 'about':
if key in ('q', 'esc', 'enter'):
self._mode = 'main'
self._loop.widget = self._mainView
def _switchToContextualHelp(self):
self._mode = 'help'
if self._helpCtx == '':
txt = _("<b>BootSetup will install a new bootloader on your computer.</b> \n\
\n\
A bootloader is required to load the main operating system of a computer and will initially display \
a boot menu if several operating systems are available on the same computer.").replace("<b>", "").replace("</b>", "")
elif self._helpCtx == 'type':
txt = _("Here you can choose between LiLo or Grub2 bootloader.\n\
Both will boot your Linux and (if applicable) Windows.\n\
LiLo is the old way but still works pretty well. A good choice if you have a simple setup.\n\
Grub2 is a full-featured bootloader and more robust (does not rely on blocklists).")
elif self._helpCtx == 'mbr':
txt = _("Select the device that will contain your bootloader.\n\
This is commonly the device you set your Bios to boot on.")
elif self._helpCtx == 'lilotable':
txt = _("Here you must define a boot menu label for each \
of the operating systems that will be displayed in your bootloader menu.\n\
Any partition for which you do not set a boot menu label will not be configured and will \
not be displayed in the bootloader menu.\n\
If several kernels are available within one partition, the label you have chosen for that \
partition will be appended numerically to create multiple menu entries for each of these kernels.\n\
Any of these settings can be edited manually in the configuration file.")
elif self._helpCtx == 'liloup':
txt = _("Use this arrow if you want to move the \
selected Operating System up to a higher rank.\n\
The partition with the highest rank will be displayed on the first line of the bootloader menu.\n\
Any of these settings can be edited manually in the configuration file.")
elif self._helpCtx == 'lilodown':
txt = _("Use this arrow if you want to move the \
selected Operating System down to a lower rank.\n\
The partition with the lowest rank will be displayed on the last line of the bootloader menu.\n\
Any of these settings can be edited manually in the configuration file.")
elif self._helpCtx == 'liloedit':
txt = _("Experienced users can \
manually edit the LiLo configuration file.\n\
Please do not tamper with this file unless you know what you are doing and you have \
read its commented instructions regarding chrooted paths.")
elif self._helpCtx == 'lilocancel':
txt = _("This will undo all settings (even manual modifications).")
elif self._helpCtx == 'partition':
txt = _("Select the partition that will contain the Grub2 files.\n\
These will be in /boot/grub/. This partition should be readable by Grub2.\n\
It is recommanded to use your / partition, or your /boot partition if you have one.")
elif self._helpCtx == 'grub2edit':
txt = _("You can edit the etc/default/grub file for \
adjusting the Grub2 settings.\n\
This will not let you choose the label or the order of the menu entries, \
it's automatically done by Grub2.")
elif self._helpCtx == 'install':
txt = _("Once you have defined your settings, \
click on this button to install your bootloader.")
self._helpView.body._original_widget.widget_list[1].set_text(('strong', txt))
self._loop.widget = self._helpView
def _switchToAbout(self):
self._mode = 'about'
self._loop.widget = self._aboutView
def _onLiLoChange(self, radioLiLo, newState):
if newState:
self.cfg.cur_bootloader = 'lilo'
if self._grub2:
self._grub2 = None
self._lilo = Lilo(self.cfg.is_test)
self._changeBootloaderSection()
def _onGrub2Change(self, radioGrub2, newState):
if newState:
self.cfg.cur_bootloader = 'grub2'
if self._lilo:
self._lilo = None
self._grub2 = Grub2(self.cfg.is_test)
self._changeBootloaderSection()
def _isDeviceValid(self, device):
return not device.startswith("/") and os.path.exists(os.path.join("/dev", device))
def _onMBRChange(self, combo, disk, pos):
if self._isDeviceValid(disk):
self.cfg.cur_mbr_device = disk
return True
else:
return False
def _isLabelValid(self, label):
if ' ' in label:
return 'space'
elif len(label) > self._liloMaxChars:
return 'max'
else:
return 'ok'
def _showLabelError(self, errorType, editLabel):
"""Show a label error if the errorType is 'space' or 'max' and return True, else return False."""
if errorType == 'space':
self._errorDialog(_("\nAn Operating System label should not contain spaces.\n\nPlease check and correct.\n"))
editLabel.sensitive_attr = ('error', 'focus_error')
return True
elif errorType == 'max':
self._errorDialog(_("\nAn Operating System label should not be more than {max} characters long.\n\nPlease check and correct.\n".format(max=self._liloMaxChars)))
editLabel.sensitive_attr = ('error', 'focus_error')
return True
elif errorType == 'pass':
return False
else: # == 'ok'
editLabel.sensitive_attr = ('focusable', 'focus_edit')
return False
def _onLabelChange(self, editLabel, newText, device):
validOld = self._isLabelValid(editLabel.edit_text)
if validOld == 'ok':
validNew = self._isLabelValid(newText)
else:
validNew = 'pass'
if not self._showLabelError(validNew, editLabel):
self._labelPerDevice[device] = newText
def _onLabelFocusLost(self, editLabel, device):
return not self._showLabelError(self._isLabelValid(editLabel.edit_text), editLabel)
def _findDevPosition(self, device):
colDevice = self._liloTable.widget_list[0]
for i, line in enumerate(colDevice.widget_list):
if i == 0: # skip header
continue
if line.text == device:
return i
return None
def _moveLineUp(self, button, device):
pos = self._findDevPosition(device)
if pos > 1: # 0 = header
for col, types in self._liloTable.contents:
old = col.widget_list[pos]
del col.widget_list[pos]
col.widget_list.insert(pos - 1, old)
def _moveLineDown(self, button, device):
pos = self._findDevPosition(device)
if pos < len(self._liloTable.widget_list[0].item_types) - 1:
for col, types in self._liloTable.contents:
old = col.widget_list[pos]
del col.widget_list[pos]
col.widget_list.insert(pos + 1, old)
def _create_lilo_config(self):
partitions = []
self.cfg.cur_boot_partition = None
for p in self.cfg.boot_partitions:
dev = p[0]
fs = p[1]
t = p[2]
label = self._labelPerDevice[dev]
if not self.cfg.cur_boot_partition and t == 'linux':
self.cfg.cur_boot_partition = dev
partitions.append([dev, fs, t, label])
if self.cfg.cur_boot_partition:
self._lilo.createConfiguration(self.cfg.cur_mbr_device, self.cfg.cur_boot_partition, partitions)
else:
self._errorDialog(_("Sorry, BootSetup is unable to find a Linux filesystem on your choosen boot entries, so cannot install LiLo.\n"))
def _editLiLoConf(self, button):
lilocfg = self._lilo.getConfigurationPath()
if not os.path.exists(lilocfg):
self._custom_lilo = True
self._create_lilo_config()
if os.path.exists(lilocfg):
launched = False
for editor in self._editors:
try:
slt.execCall([editor, lilocfg], shell=True, env=None)
launched = True
break
except:
pass
if not launched:
self._custom_lilo = False
self._errorDialog(_("Sorry, BootSetup is unable to find a suitable text editor in your system. You will not be able to manually modify the LiLo configuration.\n"))
self._updateLiLoButtons()
def _cancelLiLoConf(self, button):
lilocfg = self._lilo.getConfigurationPath()
if os.path.exists(lilocfg):
os.remove(lilocfg)
self._custom_lilo = False
self._updateLiLoButtons()
def _set_sensitive_rec(self, w, state):
w.sensitive = state
if hasattr(w, "widget_list"):
for w2 in w.widget_list:
self._set_sensitive_rec(w2, state)
elif hasattr(w, "cells"):
for w2 in w.cells:
self._set_sensitive_rec(w2, state)
def _updateLiLoButtons(self):
self._set_sensitive_rec(self._liloTable, not self._custom_lilo)
self._liloTableLines.sensitive = not self._custom_lilo
self._updateScreen()
def _onGrub2FilesChange(self, combo, partition, pos):
if self._isDeviceValid(partition):
self.cfg.cur_boot_partition = partition
self._updateGrub2EditButton()
return True
else:
self._updateGrub2EditButton(False)
return False
def _updateGrub2EditButton(self, doTest=True):
if doTest:
partition = os.path.join("/dev", self.cfg.cur_boot_partition)
if slt.isMounted(partition):
mp = slt.getMountPoint(partition)
doumount = False
else:
mp = slt.mountDevice(partition)
doumount = True
self._grub2_conf = os.path.exists(os.path.join(mp, "etc/default/grub"))
if doumount:
slt.umountDevice(mp)
else:
self._grub2_conf = False
self._grub2BtnEdit.sensitive = self._grub2_conf
self._updateScreen()
def _editGrub2Conf(self, button):
partition = os.path.join("/dev", self.cfg.cur_boot_partition)
if slt.isMounted(partition):
mp = slt.getMountPoint(partition)
doumount = False
else:
mp = slt.mountDevice(partition)
doumount = True
grub2cfg = os.path.join(mp, "etc/default/grub")
launched = False
for editor in self._editors:
try:
slt.execCall([editor, grub2cfg], shell=True, env=None)
launched = True
break
except:
pass
if not launched:
self._errorDialog(_("Sorry, BootSetup is unable to find a suitable text editor in your system. You will not be able to manually modify the Grub2 default configuration.\n"))
if doumount:
slt.umountDevice(mp)
def _onInstall(self, btnInstall):
if self.cfg.cur_bootloader == 'lilo':
if not os.path.exists(self._lilo.getConfigurationPath()):
self._create_lilo_config()
self._lilo.install()
elif self.cfg.cur_bootloader == 'grub2':
self._grub2.install(self.cfg.cur_mbr_device, self.cfg.cur_boot_partition)
self.installation_done()
def installation_done(self):
print("Bootloader Installation Done.")
msg = _("Bootloader installation process completed.")
self._infoDialog(msg)
self.main_quit()
def main_quit(self):
if self._lilo:
del self._lilo
if self._grub2:
del self._grub2
print("Bye _o/")
raise urwidm.ExitMainLoop()
|
{"/bootsetup/bootsetup_gtk.py": ["/bootsetup/bootsetup.py", "/bootsetup/gathergui.py"], "/bootsetup/gathergui.py": ["/bootsetup/__init__.py", "/bootsetup/config.py", "/bootsetup/lilo.py", "/bootsetup/grub2.py"], "/bootsetup/bootsetup.py": ["/bootsetup/__init__.py", "/bootsetup/bootsetup_gtk.py", "/bootsetup/bootsetup_curses.py"], "/bootsetup/bootsetup_curses.py": ["/bootsetup/bootsetup.py", "/bootsetup/gathercurses.py"], "/bootsetup/gathercurses.py": ["/bootsetup/__init__.py", "/bootsetup/config.py", "/bootsetup/lilo.py", "/bootsetup/grub2.py"]}
|
23,109
|
Anishaagr/Features
|
refs/heads/master
|
/features/utilities/jsonreader.py
|
import json
import os
BASE_PATH = "C:\\Users\\anisha.agarwal\\PycharmProjects\\cortex\\features\\"
country_file = os.path.join(BASE_PATH, "data\\country.json")
def read_json():
with open(country_file) as file:
return json.load(file)
|
{"/features/steps/step_imp_context_search.py": ["/features/utilities/jsonreader.py"], "/features/environment.py": ["/features/steps/step_imp_context_search.py"], "/features/steps/step_def_context_search.py": ["/features/steps/step_imp_context_search.py"]}
|
23,110
|
Anishaagr/Features
|
refs/heads/master
|
/features/steps/step_imp_context_search.py
|
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import *
from selenium.webdriver import Chrome
from features.utilities.jsonreader import read_json
from selenium.webdriver.common.action_chains import ActionChains
import time
chromeDriverEXE = '/utilities/chromedriver.exe'
country_data = read_json()
list_of_country_from_ui = []
ADVANCE_SEARCH = "//a[text()='Advanced search']"
APPLIANCE_MODEL_DROP_DOWN = "//input[@id='vdl-input-7']"
APPLIANCE_MODEL_3340 = "//vdl-checkbox//div[text()= '3340 ']"
APPLIANCE_MODEL_5220 = "//vdl-checkbox//div[text()= '5220 ']"
ADD_FILTER = "//lib-string-filter//div[text()='Add Filter']"
CLICK_FILTER_DROP_DOWN = "//input[@id='vdl-input-2']"
ADD_COUNTRY_FILTER = "//vdl-checkbox//div[text()='Country ']"
ADD_STATE_FILTER = "//vdl-checkbox//div[text()='State ']"
ADD_CITY_FILTER = "//vdl-checkbox//div[text()='City ']"
ADD_ACCOUNTNAME_FILTER = "//vdl-checkbox//div[text()='Account Name ']"
ADD_HOSTNAME_FILTER = "//vdl-checkbox//div[text()='Hostname ']"
COUNTRY_DROP_DOWN = "//input[@id='vdl-input-17']"
COUNTRY_COLUMN = "//lib-string-filter//div[text()='Country']"
LIST_OF_COUNTRY_IN_COUNTRY_DROP_DOWN = "//*[@class='vdl-checkbox-label-text ng-star-inserted"
CLEAR_SELECTED = "//div[@class='cdk-overlay-pane']//div[text()='Clear selected items']"
VERSION_2_7_1 = "//vdl-checkbox//div[text()='2.7.1 ']"
VERSION_DROP_DOWN = "//input[@id='vdl-input-9']"
NO_SEARCH_RESULTS = "//b[text()=' No Search Results']"
NOT_FOUND = "//div[contains(@id,'cdk-overlay-')]//vdl-checkbox"
STATE_DROP_DOWN = "//input[@id='vdl-input-19']"
CITY_DROP_DOWN = "//input[@id='vdl-input-21']"
ACCOUNTNAME_DROP_DOWN = "//input[@id='vdl-input-23']"
HOSTNAME_DROP_DOWN = "//input[@id='vdl-input-25']"
LIST_OF_COUNTRY = "//*[@class='vdl-checkbox-label-text ng-star-inserted']"
ERROR_ICON = "//vdl-icon[@class='invalid-selection-icon vdl-icon fa fa-exclamation-circle ng-star-inserted']"
ERROR_MESSAGE = "//div[@class='vdl-tooltip ng-trigger ng-trigger-state']"
IGNORED_EXCEPTIONS = [NoSuchElementException, ElementNotVisibleException, ElementNotSelectableException,
ElementClickInterceptedException, StaleElementReferenceException]
def cortex_ui_page(url):
global driver
driver = Chrome(executable_path=chromeDriverEXE)
driver.get(url)
driver.maximize_window()
print(driver)
return driver
def webdriver_shutdown():
driver.close()
driver.quit()
def xpath(element):
wait = WebDriverWait(driver, 20, poll_frequency=2, ignored_exceptions=IGNORED_EXCEPTIONS)
return wait.until(EC.presence_of_element_located((By.XPATH, element)))
def navigate_to_advance_serach_page():
driver.find_element_by_xpath(ADVANCE_SEARCH).click()
time.sleep(5)
def clear_selected(drop_down_filter_name):
if drop_down_filter_name == 'Version':
driver.find_element_by_xpath("//body").click()
xpath(VERSION_DROP_DOWN).click()
time.sleep(1)
xpath(CLEAR_SELECTED).click()
elif drop_down_filter_name == "Appliance Model":
driver.find_element_by_xpath("//body").click()
xpath(APPLIANCE_MODEL_DROP_DOWN).click()
xpath(CLEAR_SELECTED).click()
def select_appliance_model(model_no):
if model_no == "3340":
driver.find_element_by_xpath("//body").click()
xpath(APPLIANCE_MODEL_DROP_DOWN).click()
if len(driver.find_elements_by_xpath(CLEAR_SELECTED)):
xpath(CLEAR_SELECTED).click()
xpath(APPLIANCE_MODEL_3340).click()
elif model_no == "5220":
driver.find_element_by_xpath("//body").click()
xpath(APPLIANCE_MODEL_DROP_DOWN).click()
xpath(APPLIANCE_MODEL_5220).click()
def select_version(version):
driver.find_element_by_xpath("//body").click()
xpath(VERSION_DROP_DOWN).click()
xpath(VERSION_2_7_1).click()
def add_filters():
xpath(CLICK_FILTER_DROP_DOWN).click()
xpath(ADD_COUNTRY_FILTER).click()
xpath(ADD_STATE_FILTER).click()
xpath(ADD_CITY_FILTER).click()
xpath(ADD_ACCOUNTNAME_FILTER).click()
xpath(ADD_HOSTNAME_FILTER).click()
time.sleep(1)
def verify_context_search_for_country(COUNTRY_LIST_FOR_APPLIANCE_MODEL, appliance_model):
driver.find_element_by_xpath("//body").click()
xpath(COUNTRY_DROP_DOWN).click()
count_of_country = len(driver.find_elements_by_xpath(LIST_OF_COUNTRY))
print(f"SELECTED APPLIANCE MODEL ------> {appliance_model}")
print(f"COUNT OF COUNTRY LISTED IS: {count_of_country}")
time.sleep(1)
countries = driver.find_elements_by_xpath(LIST_OF_COUNTRY)
for a in countries:
list_of_country_from_ui.append(a.text)
if a.text in COUNTRY_LIST_FOR_APPLIANCE_MODEL:
COUNTRY_LIST_FOR_APPLIANCE_MODEL.remove(a.text)
print(f"LIST OF COUNTRY FETCHED FROM UI: \n{list_of_country_from_ui}")
print("COUNTRY LIST IN COMMON METHOD", COUNTRY_LIST_FOR_APPLIANCE_MODEL)
return COUNTRY_LIST_FOR_APPLIANCE_MODEL
def click_on_country_drop_down_for_appliance_model(model_no):
if model_no == "3340":
return verify_context_search_for_country(country_data["appliance_model"][model_no], model_no)
elif model_no == "5220":
return verify_context_search_for_country(country_data["appliance_model"][model_no], model_no)
def verify_context_search_for_secondary_drop_down_filters(drop_down):
xpath(NO_SEARCH_RESULTS)
time.sleep(2)
driver.find_element_by_xpath("//body").click()
if drop_down == "Country":
driver.find_element_by_xpath(COUNTRY_DROP_DOWN).click()
elif drop_down == "State":
driver.find_element_by_xpath(STATE_DROP_DOWN).click()
elif drop_down == "City":
driver.find_element_by_xpath(CITY_DROP_DOWN).click()
elif drop_down == "Account Name":
driver.find_element_by_xpath(ACCOUNTNAME_DROP_DOWN).click()
elif drop_down == "Hostname":
driver.find_element_by_xpath(HOSTNAME_DROP_DOWN).click()
return len(driver.find_elements_by_xpath(NOT_FOUND))
def select_countries(count):
driver.find_element_by_xpath("//body").click()
xpath(COUNTRY_DROP_DOWN).click()
time.sleep(1)
countries = driver.find_elements_by_xpath(LIST_OF_COUNTRY)
for country in countries[:int(count)]:
time.sleep(1)
country.click()
def error_indicator():
time.sleep(1)
driver.find_element_by_xpath("//body").click()
error_icon = xpath(ERROR_ICON)
ActionChains(driver).move_to_element(error_icon).perform()
time.sleep(1)
return xpath(ERROR_MESSAGE).text
|
{"/features/steps/step_imp_context_search.py": ["/features/utilities/jsonreader.py"], "/features/environment.py": ["/features/steps/step_imp_context_search.py"], "/features/steps/step_def_context_search.py": ["/features/steps/step_imp_context_search.py"]}
|
23,111
|
Anishaagr/Features
|
refs/heads/master
|
/features/runnerfile.py
|
import sys
from behave import __main__ as runnerfile
if __name__ == '__main__':
sys.stdout.flush()
report_generation = "-f allure_behave.formatter:AllureFormatter -o allure/results "
command_line_args = ' --no-capture'
runnerfile.main(report_generation + command_line_args)
|
{"/features/steps/step_imp_context_search.py": ["/features/utilities/jsonreader.py"], "/features/environment.py": ["/features/steps/step_imp_context_search.py"], "/features/steps/step_def_context_search.py": ["/features/steps/step_imp_context_search.py"]}
|
23,112
|
Anishaagr/Features
|
refs/heads/master
|
/features/environment.py
|
from features.steps.step_imp_context_search import webdriver_shutdown, cortex_ui_page, add_filters, clear_selected, navigate_to_advance_serach_page
URL = "http://localhost:4201/"
def before_feature(context, feature):
if "Context Search between primary filters and secondary drop-down filters" in str(feature):
cortex_ui_page(URL)
def before_scenario(context, scenario):
if "Select Appliance model 3340 and verify context search in Country drop-down filter" in str(scenario):
navigate_to_advance_serach_page()
add_filters()
def before_step(context, step):
if 'Select 1st "8" countries from Country drop-down' in str(step):
clear_selected("Version")
clear_selected("Appliance Model")
def after_feature(context, feature):
if "Context Search between primary filters and secondary drop-down filters" in str(feature):
webdriver_shutdown()
|
{"/features/steps/step_imp_context_search.py": ["/features/utilities/jsonreader.py"], "/features/environment.py": ["/features/steps/step_imp_context_search.py"], "/features/steps/step_def_context_search.py": ["/features/steps/step_imp_context_search.py"]}
|
23,113
|
Anishaagr/Features
|
refs/heads/master
|
/features/steps/step_def_context_search.py
|
from behave import given, when, then
from features.steps.step_imp_context_search import *
expected_error_message = """The selected Country(s) 'Argentina", "Austria", "Belgium", "Canada", "Chile", "China' may not be valid for your current Appliance Model and/or Version selection."""
# @given('On Advance Search page')
# def step_impl(context):
# print("On advance search page")
# cortex_ui_page("http://localhost:4201/")
# navigate_to_advance_serach_page()
# add_filters()
@when('Select Appliance Model "{model_no}" from Appliance Model drop-down')
def step_impl(context, model_no):
"""This test step will select Appliance Model """
select_appliance_model(model_no)
@then('Get list of Country for "{model_no}" appliance model')
def step_imp(context, model_no):
"""This test step will fetch the values displayed in Country drop-down"""
count = click_on_country_drop_down_for_appliance_model(model_no)
assert len(count) == 0, "Country list verification failed"
@when('Select Version "{version}" from Version drop-down')
def step_impl(context, version):
"""This test step will select version"""
select_version(version)
@then('Verify "{drop_down}" drop-down has no values')
def step_impl(context, drop_down):
"""This test step will verify, secondary drop-downs have no values, if the primary filters do nat have data
related"""
values_in_drop_down = verify_context_search_for_secondary_drop_down_filters(drop_down)
assert values_in_drop_down == 0, f"{drop_down} drop-down is not empty"
@when('Select 1st "{count}" countries from Country drop-down')
def step_impl(context, count):
"""This test step will select no. of values from Country drop-down """
select_countries(count)
print("Select 1st eight countries from Country drop-down")
@then('Verify error indicator over Country drop-down')
def step_impl(context):
"""This test step will verify error indicator is generated for invalid selections as per primary filters"""
error_message = error_indicator()
print("returned error message is : ", '\n', error_message)
assert error_message == expected_error_message, "error message is not displayed"
|
{"/features/steps/step_imp_context_search.py": ["/features/utilities/jsonreader.py"], "/features/environment.py": ["/features/steps/step_imp_context_search.py"], "/features/steps/step_def_context_search.py": ["/features/steps/step_imp_context_search.py"]}
|
23,201
|
Elliot-Ruiz96/CursoPython
|
refs/heads/main
|
/Proyecto/clienteAsist.py
|
from PySide6 import QtWidgets
from estudiante import Estudiante
import sys
import socket
import pickle
host = '3.16.226.150'
port = 9997
class Menu(QtWidgets.QWidget):
def __init__(self, parent=None):
super(Menu, self).__init__(parent)
nameLabel1 = QtWidgets.QLabel("Nombre:")
self.nameLine = QtWidgets.QLineEdit()
nameLabel2 = QtWidgets.QLabel("Correo:")
self.mailLine = QtWidgets.QLineEdit()
nameLabel3 = QtWidgets.QLabel("Contraseña:")
self.passLine = QtWidgets.QLineEdit()
self.submitButton = QtWidgets.QPushButton("&Buscar y enviar")
self.submitButton.setToolTip("Cargar archivo .zip")
self.submitButton.clicked.connect(self.submitAlumno)
buttonLayout1 = QtWidgets.QVBoxLayout()
buttonLayout1.addWidget(self.submitButton)
mainLayout = QtWidgets.QGridLayout()
mainLayout.addWidget(nameLabel1, 0, 0)
mainLayout.addWidget(self.nameLine, 0, 1)
mainLayout.addWidget(nameLabel2, 1, 0)
mainLayout.addWidget(self.mailLine, 1, 1)
mainLayout.addWidget(nameLabel3, 2, 0)
mainLayout.addWidget(self.passLine, 2, 1)
mainLayout.addLayout(buttonLayout1, 1, 2)
self.setLayout(mainLayout)
self.setWindowTitle("Proyecto")
def submitAlumno(self):
s = socket.socket()
# Port = 9997 proyecto final, 9998 pruebas
s.connect((host, port))
estudiante = Estudiante(self.nameLine.text(), self.mailLine.text(), self.passLine.text())
estudiante_seriado = pickle.dumps(estudiante)
s.send(estudiante_seriado)
res = s.recv(1024)
print(f'Respuesta: \n\t{res.decode()}')
s.send(b'INI')
res = s.recv(1024)
print(f'Respuesta: \n\t{res.decode()}')
fileName, _ = QtWidgets.QFileDialog.getOpenFileName(self,
"Open ZIP file",
'',
"Zip file (*.zip);;All Files (*)")
if not fileName:
return
fileName2 = open(fileName, 'rb')
fileName_seriado = pickle.dumps(fileName2.read())
i = True
j = 0
while i:
chunk = fileName_seriado[j: j + 1024]
if not chunk:
i = False
continue
s.send(chunk)
res = s.recv(1024)
print(f'Respuesta: \n\t{res.decode()}')
j += 1024
s.send(b'FIN')
res = s.recv(1024)
print(f'Respuesta: \n\t{res.decode()}')
s.close()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
Menu = Menu()
Menu.show()
sys.exit(app.exec_())
|
{"/Tarea1.py": ["/Pack/Tarea1_Modulo.py"], "/Tarea3.py": ["/Pack/StudentIO.py"], "/Tarea2.py": ["/Pack/Tarea2_Modulo.py"]}
|
23,202
|
Elliot-Ruiz96/CursoPython
|
refs/heads/main
|
/Pack/Tarea2_Modulo.py
|
import re
def validacion(correo, telefono, curp, rfc):
# Variable para validacion de correo
# [PV] # La validacion es muy debil, ingresar solo @. lo marca como valida
# patron sencillo '[a-z.]+@([a-z.]+){1,2}[a-z]{2-3}'
valida_correo = re.search("@", correo) # Busca el arroba
valida_correo_dominio = re.split("@", correo) # Divide en dos grupos el correo
# Variable para validacion de telefono
# [PV] Busca que haya 10 numeros pero no valida la forma
# [PV] ej. '\([0-9]{3}\) [0-9]{3}-[0-9]{4}'
valida_telefono = re.findall("\d", telefono) # Verifica que sean solo numeros
# Variable para validacion de curp
# [PV] Si se escriben puros numeros se toma com valido
# ej. valido [A-Z]{4}[0-9]{6}[H|M][A-Z]{5}[A-Z0-9]{2}
valida_curp = re.search("\S", curp) # Busca que no haya espacios
# Variable para validacion de rfc
# [PV] Si se escriben puros numeros se toma com valido
# ej. valido [A-Z]{4}[0-9]{6}[A-Z0-9]{3}
valida_rfc = re.search("\S", rfc) # Busca que no haya espacios
if valida_correo:
if re.search("[.]", valida_correo_dominio[1]): # Busca el punto en el dominio
print(f'Correo {correo} valido.')
else:
print(f'Correo {correo} no valido.')
else:
print(f'Correo {correo} no valido.')
if len(valida_telefono) == 10: # Valida que sean 10 numeros
print(f'Numero {telefono} valido.')
else:
print(f'Numero {telefono} no valido.')
if len(curp) == 18: # Valida que sean 18 caracteres
if valida_curp:
print(f'CURP {curp} valida.')
else:
print(f'CURP {curp} no valida.')
else:
print(f'CURP {curp} no valida.')
if len(rfc) == 13: # Valida que sean 18 caracteres
if valida_rfc:
print(f'RFC {rfc} valida.')
else:
print(f'CURP {curp} no valida.')
else:
print(f'RFC {rfc} no valida.')
# elliotruizs@ieee.org
# 4775531264
# RUSE960823HGTZNL03
# RUSE9608231H0
# Para validar de una manera mas precisa el rfc y el curp
# se debe validar los diferentes grupos de datos para su construccion
|
{"/Tarea1.py": ["/Pack/Tarea1_Modulo.py"], "/Tarea3.py": ["/Pack/StudentIO.py"], "/Tarea2.py": ["/Pack/Tarea2_Modulo.py"]}
|
23,203
|
Elliot-Ruiz96/CursoPython
|
refs/heads/main
|
/Tarea5/main.py
|
import sys
from PySide6 import QtWidgets, QtCore
from PySide6.QtWidgets import QLineEdit
from PySide6.QtWidgets import *
from mongoengine import *
connect('IECA', host='Localhost', port=27017)
class estudiantes(Document):
Nombre_estudiante = StringField(required=True, max_length=200)
Correo_estudiantil = StringField(required=True)
Contrasenia = StringField(required=True)
Materias = StringField(required=True)
# def escritura(student):
# def lectura():
# def modificacion():
class Menu(QtWidgets.QWidget):
class Estudiantes:
nombre = ""
correo = ""
contrasenia = ""
materias = ""
def __init__(self, nombre, correo, contrasenia, materias):
self.nombre = nombre
self.correo = correo
self.contrasenia = contrasenia
self.materias = materias
def __init__(self):
super().__init__()
self.setWindowTitle("Tarea 5")
self.layout = QtWidgets.QVBoxLayout(self)
# Se usara el mismo objeto para dedicar menos memoria
self.t1 = QtWidgets.QLabel("TAREA 5", alignment=QtCore.Qt.AlignCenter)
self.layout.addWidget(self.t1)
self.B1 = QtWidgets.QPushButton("1. Ingresar estudiante")
self.layout.addWidget(self.B1)
self.B2 = QtWidgets.QPushButton("2. Modificar estudiantes")
self.layout.addWidget(self.B2)
self.B3 = QtWidgets.QPushButton("3. Mostrar Estudiantes")
self.layout.addWidget(self.B3)
self.B4 = QtWidgets.QPushButton("4. Salir")
self.layout.addWidget(self.B4)
self.B1.clicked.connect(self.escritura)
self.B2.clicked.connect(self.modificacion)
self.B3.clicked.connect(self.lectura)
self.B4.clicked.connect(quit)
self.layout = QtWidgets.QVBoxLayout(self)
def escritura(self):
self.t1 = QtWidgets.QLabel("Ingresa nombre del estudiante: ")
self.layout.addWidget(self.t1)
self.e1 = QLineEdit()
self.layout.addWidget(self.e1)
self.t1 = QtWidgets.QLabel("Ingresa correo del estudiante: ")
self.layout.addWidget(self.t1)
self.e2 = QLineEdit()
self.layout.addWidget(self.e2)
self.t1 = QtWidgets.QLabel("Ingresa contrasenia del estudiante: ")
self.layout.addWidget(self.t1)
self.e3 = QLineEdit()
self.layout.addWidget(self.e3)
self.t1 = QtWidgets.QLabel("Ingresa materias del estudiante: ")
self.layout.addWidget(self.t1)
self.e4 = QLineEdit()
self.layout.addWidget(self.e4)
comp_nom = self.e1
comp_cor = self.e2
comp_con = self.e3
comp_mat = self.e4
aceptado = True
repetido = None
for datos in estudiantes.objects:
comp_nom != datos.Nombre_estudiante
comp_cor != datos.Correo_estudiantil
comp_con != datos.Contrasenia
comp_mat != datos.Materias
if comp_nom and comp_cor and comp_con and comp_mat:
aceptado = True
repetido = False
else:
print("Estudiante ya ingresado")
repetido = True
input("Presione enter para continuar")
if aceptado:
datos = estudiantes(
Nombre_estudiante=comp_nom,
Correo_estudiantil=comp_cor,
Contrasenia=comp_con,
Materias=comp_mat)
if repetido is True:
pass
else:
datos.save()
def modificacion(self):
p = estudiantes.objects(Nombre_estudiante="Cesar")
estudiantes.objects(Nombre_estudiante=p[0].Nombre_estudiante).update_one(set__Nombre_estudiante="Hola")
estudiantes.objects(Materias=p[0].Materias).update_one(set__Materias="Adios")
print(p[0].Contraseña)
p[0].save()
def lectura(self):
i = 1
for Datos in estudiantes.objects:
self.t1 = QtWidgets.QLabel(f"Estudiante {i}")
self.layout.addWidget(self.t1)
self.t1 = QtWidgets.QLabel(estudiantes.Nombre_estudiante.name)
self.layout.addWidget(self.t1)
self.t1 = QtWidgets.QLabel(Datos.Nombre_estudiante)
self.layout.addWidget(self.t1)
self.t1 = QtWidgets.QLabel(estudiantes.Correo_estudiantil.name)
self.layout.addWidget(self.t1)
self.t1 = QtWidgets.QLabel(Datos.Correo_estudiantil)
self.layout.addWidget(self.t1)
self.t1 = QtWidgets.QLabel(estudiantes.Contrasenia.name)
self.layout.addWidget(self.t1)
self.t1 = QtWidgets.QLabel(Datos.Contrasenia)
self.layout.addWidget(self.t1)
self.t1 = QtWidgets.QLabel(estudiantes.Materias.name)
self.layout.addWidget(self.t1)
self.t1 = QtWidgets.QLabel(Datos.Materias)
self.layout.addWidget(self.t1)
i += 1
if i == 1:
self.t1 = QtWidgets.QLabel("Base de datos vacias.")
self.layout.addWidget(self.t1)
if __name__ == '__main__':
app = QtWidgets.QApplication([])
widget = Menu()
widget.resize(600, 450)
widget.show()
sys.exit(app.exec_())
|
{"/Tarea1.py": ["/Pack/Tarea1_Modulo.py"], "/Tarea3.py": ["/Pack/StudentIO.py"], "/Tarea2.py": ["/Pack/Tarea2_Modulo.py"]}
|
23,204
|
Elliot-Ruiz96/CursoPython
|
refs/heads/main
|
/Tarea4.py
|
from mongoengine import *
connect('IECA', host='Localhost', port=27017)
# Pagina mas especifica de mongo engine https://www.tutorialspoint.com/mongoengine/mongoengine_atomic_updates.htm
class estudiantes(Document):
Nombre_estudiante = StringField(required=True, max_length=200)
Correo_estudiantil = StringField(required=True)
Contrasenia = StringField(required=True)
Materias = StringField(required=True)
# class estudiantesCopia(Document):
# Nombre_estudiante = StringField(required=True,max_length=200)
# Correo_estudiantil = StringField(required=True)
# Contraseña = StringField(required=True)
# Materias = StringField(required=True)
class Estudiantes:
nombre = ""
correo = ""
contrasenia = ""
materias = ""
def __init__(self, nombre, correo, contrasenia, materias):
self.nombre = nombre
self.correo = correo
self.contrasenia = contrasenia
self.materias = materias
# def base_Datos:
def escritura(student):
aceptado = True
repetido = None
for datos in estudiantes.objects:
comp_nom = student.nombre != datos.Nombre_estudiante
comp_cor = student.correo != datos.Correo_estudiantil
comp_con = student.contrasenia != datos.Contrasenia
comp_mat = student.materias != datos.Materias
if comp_nom and comp_cor and comp_con and comp_mat:
aceptado = True
repetido = False
else:
print("Estudiante ya ingresado")
repetido = True
input("Presione enter para continuar")
if aceptado:
datos = estudiantes(
Nombre_estudiante=student.nombre,
Correo_estudiantil=student.correo,
Contrasenia=student.contrasenia,
Materias=student.materias)
if repetido is True:
pass
else:
datos.save()
def lectura():
i = 1
for Datos in estudiantes.objects:
print(f"\t****Estudiante{i}****")
print(f"\t{estudiantes.Nombre_estudiante.name}:{Datos.Nombre_estudiante}")
print(f"\t{estudiantes.Correo_estudiantil.name}:{Datos.Correo_estudiantil}")
print(f"\t{estudiantes.Contrasenia.name}:{Datos.Contrasenia}")
print(f"\t{estudiantes.Materias.name}:{Datos.Materias}")
print("")
i += 1
if i == 1:
print("Base de datos esta vacia")
def eliminacion():
# [PV] Se debe eliminar solo un usuario
estudiantes.objects.delete()
print("La base de datos fue vaciada")
print("")
def modificacion():
# [PV] para modificar primero se debe obtener un objeto, realizar las modificaciones despues guardar de nuevo
# User = input("Ingresa nombre de usuario a modificar: ")
# User_nuev = input("Ingresa el nuevo nombre el usuario");
# estudiantes.objects(Nombre_estudiante = User).update_one(set__Nombre_estudiante = User_nuev)
# estudiantes.objects(Nombre_estudiante="Elliot").delete()
# Nombre = input("Nuevo nombre usuario: ")
# Correo = input("Nuevo Correo: ")
# Contra = input("Nueva contraseña: ")
# Materias = input("Nuevas Materias: ")
# Modify= estudiantes.objects(Nombre_estudiante=Nombre,
# Correo_estudiantil=Correo,
# Contraseña=Contra,
# Materias=Materias)
# Eliminar[0].delete()
# Eliminar[0].deleted()
p = estudiantes.objects(Nombre_estudiante="Elliot")
estudiantes.objects(Nombre_estudiante=p[0].Nombre_estudiante).update_one(set__Nombre_estudiante="Hola")
estudiantes.objects(Materias=p[0].Materias).update_one(set__Materias="Adios")
print(p[0].Contrasenia)
p[0].save()
def menu():
ciclo = True
while ciclo:
print("\n\t\t\tTAREA 4\n")
print("Bienvenido al menu de opciones.\n")
print("1. Ingresar estudiante.")
print("2. Modificar estudiante.")
print("3. Mostrar estudiantes.")
print("4. Eliminar estudiantes.")
print("5. Salir")
opcion = input("Seleciona un opcion: ")
if opcion == "1":
print("")
nombre = input("Ingresa nombre del estudiante: ")
correo = input("Ingresa correo del estudiante: ")
contrasenia = input("Ingresa contrasenia del estudiante: ")
materias = input("Ingresa materias del estudiante: ")
escritura(Estudiantes(nombre, correo, contrasenia, materias))
if opcion == "2":
modificacion()
if opcion == "3":
lectura()
if opcion == "4":
eliminacion()
if opcion == "5":
ciclo = False
if __name__ == '__main__':
menu()
|
{"/Tarea1.py": ["/Pack/Tarea1_Modulo.py"], "/Tarea3.py": ["/Pack/StudentIO.py"], "/Tarea2.py": ["/Pack/Tarea2_Modulo.py"]}
|
23,205
|
Elliot-Ruiz96/CursoPython
|
refs/heads/main
|
/Pack/Funciones.py
|
# Un modulo es un archivo con funciones
def funcion( nombre = 'Elliot', apellido = 'Ruiz', lista=['a','b','c','d']):
print('Hello', nombre, apellido)
print(f'lista: {lista}')
lista[1] = 14
return lista
if __name__ == '__main__':
lista = [ 1 , 2 , 3]
l = funcion(lista=lista.copy())
print(f'main: {lista}')
print(l)
|
{"/Tarea1.py": ["/Pack/Tarea1_Modulo.py"], "/Tarea3.py": ["/Pack/StudentIO.py"], "/Tarea2.py": ["/Pack/Tarea2_Modulo.py"]}
|
23,206
|
Elliot-Ruiz96/CursoPython
|
refs/heads/main
|
/ciclos.py
|
continuar = True
contador = 0
while continuar:
contador +=1
print(f'{contador}')
print('¿Deseas continuar?')
print('s = Si')
print('Cualquier caracter = Si')
respuesta = input()
if respuesta == 's':
continue
else:
continuar = False
for i in range(1 , 10 , 2):
print(f'Numero: {i + 1}')
# La letra 'f' antes del texto indica que hay una variable dentro del print y se escribe dentro de los parentesis,
if i == 4:
break
print('FIN')
|
{"/Tarea1.py": ["/Pack/Tarea1_Modulo.py"], "/Tarea3.py": ["/Pack/StudentIO.py"], "/Tarea2.py": ["/Pack/Tarea2_Modulo.py"]}
|
23,207
|
Elliot-Ruiz96/CursoPython
|
refs/heads/main
|
/clientUDP.py
|
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ip = 'localhost'
puerto = 12345
msg = 'Hello world!'.encode()
sock.sendto(msg, (ip, puerto))
info, direccion = sock.recvfrom(1024)
print(f"Recibido: {info.decode()} desde {direccion}")
|
{"/Tarea1.py": ["/Pack/Tarea1_Modulo.py"], "/Tarea3.py": ["/Pack/StudentIO.py"], "/Tarea2.py": ["/Pack/Tarea2_Modulo.py"]}
|
23,208
|
Elliot-Ruiz96/CursoPython
|
refs/heads/main
|
/ejemplo/gui.py
|
from PySide2.QtWidgets import QApplication
from PySide2.QtWidgets import QMainWindow
import sys
from main import Ejemplo
# Windows -> desinger.exe
# Inicializacon de gui
app = QApplication(sys.argv)
# Inicializacion de ventana principal
window = QMainWindow()
# window = Ejemplo()
# Muestra la ventana creada
window.show()
# Ejecucion de gui
sys.exit(app.exec_())
|
{"/Tarea1.py": ["/Pack/Tarea1_Modulo.py"], "/Tarea3.py": ["/Pack/StudentIO.py"], "/Tarea2.py": ["/Pack/Tarea2_Modulo.py"]}
|
23,209
|
Elliot-Ruiz96/CursoPython
|
refs/heads/main
|
/serverUDP.py
|
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ip = 'localhost'
puerto = 12345
sock.bind((ip, puerto))
while True:
print("Esperando paquetes...")
info, direccion = sock.recvfrom(1024)
print(f"Mensaje: {info.decode()} desde {direccion}")
sock.sendto('Recibido'.encode(), direccion)
|
{"/Tarea1.py": ["/Pack/Tarea1_Modulo.py"], "/Tarea3.py": ["/Pack/StudentIO.py"], "/Tarea2.py": ["/Pack/Tarea2_Modulo.py"]}
|
23,210
|
Elliot-Ruiz96/CursoPython
|
refs/heads/main
|
/Herencia.py
|
class Vehiculo:
__llantas = 4
__personas = 2
__lucesencendidas = False
def acelera(self):
pass
def enciendeluces(self):
self.__lucesencendidas = True
def apagarluces(self):
self.__lucesencendidas = False
def cuantasllantas(self):
pass
class Motocicleta(Vehiculo):
def __init__(self):
self.enciendeluces()
print(f'Luces encendidas: {self.lucesencendidas()}')
pass
if __name__ == '__main__':
m = Motocicleta()
print(m)
|
{"/Tarea1.py": ["/Pack/Tarea1_Modulo.py"], "/Tarea3.py": ["/Pack/StudentIO.py"], "/Tarea2.py": ["/Pack/Tarea2_Modulo.py"]}
|
23,211
|
Elliot-Ruiz96/CursoPython
|
refs/heads/main
|
/Expresiones.py
|
import re
def suma (a,b):
patron = '[0-9]*$'
ra = re.match(patron,str(a))
rb = re.match(patron, str(b))
if ra and rb:
return int(a)+int(b)
|
{"/Tarea1.py": ["/Pack/Tarea1_Modulo.py"], "/Tarea3.py": ["/Pack/StudentIO.py"], "/Tarea2.py": ["/Pack/Tarea2_Modulo.py"]}
|
23,212
|
Elliot-Ruiz96/CursoPython
|
refs/heads/main
|
/serverTCP.py
|
import socket
# Difereniciar cliente y host
server_sock = socket.socket()
host = socket.gethostname()
print(server_sock)
print(host)
port = 9999
server_sock.bind((host, port))
print('Esperando conexiones')
server_sock.listen(1)
while True:
client_sock, addr = server_sock.accept()
print(addr)
print(f'Cliente conectado de la direccion: {addr}')
msg = 'Hola' + addr[0] + ':' + str(addr[1])
client_sock.send(msg.encode())
client_sock.close()
|
{"/Tarea1.py": ["/Pack/Tarea1_Modulo.py"], "/Tarea3.py": ["/Pack/StudentIO.py"], "/Tarea2.py": ["/Pack/Tarea2_Modulo.py"]}
|
23,213
|
Elliot-Ruiz96/CursoPython
|
refs/heads/main
|
/ejemplo/main.py
|
# This Python file uses the following encoding: utf-8
import sys
import os
from PySide2.QtWidgets import QApplication, QWidget
from PySide2.QtCore import QFile
from PySide2.QtUiTools import QUiLoader
class Ejemplo(QWidget):
def __init__(self):
super(Ejemplo, self).__init__()
self.load_ui()
#self.pushButton.clicked.connect(slot1)
def load_ui(self):
loader = QUiLoader()
path = os.path.join(os.path.dirname(__file__), "ejemplo.ui")
ui_file = QFile(path)
ui_file.open(QFile.ReadOnly)
loader.load(ui_file, self)
ui_file.close()
def slot1(self):
print('Boton presionado!')
def fun1 (self):
pass
if __name__ == "__main__":
app = QApplication([])
widget = Ejemplo()
widget.show()
sys.exit(app.exec_())
|
{"/Tarea1.py": ["/Pack/Tarea1_Modulo.py"], "/Tarea3.py": ["/Pack/StudentIO.py"], "/Tarea2.py": ["/Pack/Tarea2_Modulo.py"]}
|
23,214
|
Elliot-Ruiz96/CursoPython
|
refs/heads/main
|
/Pack/StudentIO.py
|
import pickle
class Estudiante:
def __init__(self, nombre, carrera, correo, num_control, promedio):
self.nombre = nombre
self.carrera = carrera
self.correo = correo
self.num_control = num_control
self.promedio = promedio
def setnombre(self):
nombre = input()
self.nombre = nombre
def getnombre(self):
return self.nombre
def setcarrera(self):
carrera = input()
self.carrera = carrera
def getcarrera(self):
return self.carrera
def setcorreo(self):
correo = input()
self.correo = correo
def getcorreo(self):
return self.correo
def setnum_control(self):
num_control = input()
self.num_control = num_control
def getnum_control(self):
return self.num_control
def setpromedio(self):
promedio = input()
self.promedio = promedio
def getpromedio(self):
return self.promedio
# e = Estudiante(nombre, carrera, correo, num_control, promedio)
e = Estudiante("Elliot", "MECATRONICA", "elliotruizs@iee.org", "16240056", "82.47")
e1 = Estudiante("DIEGO", "ACTUARIA", "diegovo@iee.org", "16240057", "83.47")
e2 = Estudiante("RENE", "SISTEMAS", "reneva@iee.org", "16240058", "84.47")
e3 = Estudiante("SERGIO", "GESTION", "sergiorv@iee.org", "16240059", "85.47")
e4 = Estudiante("KARLA", "ADMINISTRACION", "karlahe@iee.org", "16240060", "86.47")
def agregar():
# [PV] Solo se cambia el priemr objeto
print("Ingresa tu nombre completo: ")
e.setnombre()
print("Ingresa tu carrera: ")
e.setcarrera()
print("Ingresa tu correo: ")
e.setcorreo()
print("Ingresa tu numero de control: ")
e.setnum_control()
print("Ingresa tu promedio: ")
e.setpromedio()
def lectura():
# [PV] Se puede usar un loop para mostrarlos todos
print("Nombre:")
print(e.getnombre())
print('Carrera:')
print(e.getcarrera())
print('Correo:')
print(e.getcorreo())
print('Numero de control:')
print(e.getnum_control())
print('Promedio:')
print(e.getpromedio())
def actualizar():
# [PV] Siempre se edita el primer objeto
print("Ingresa tu nombre completo: ")
e.setnombre()
print("Ingresa tu carrera: ")
e.setcarrera()
print("Ingresa tu correo: ")
e.setcorreo()
print("Ingresa tu numero de control: ")
e.setnum_control()
print("Ingresa tu promedio: ")
e.setpromedio()
def pickle1():
ej_dict = {1: e.nombre, 2: e.carrera, 3: e.correo, 4: e.num_control, 5: e.promedio}
ej_dict1 = {1: e1.nombre, 2: e1.carrera, 3: e1.correo, 4: e1.num_control, 5: e1.promedio}
ej_dict2 = {1: e2.nombre, 2: e2.carrera, 3: e2.correo, 4: e2.num_control, 5: e2.promedio}
ej_dict3 = {1: e3.nombre, 2: e3.carrera, 3: e3.correo, 4: e3.num_control, 5: e3.promedio}
ej_dict4 = {1: e4.nombre, 2: e4.carrera, 3: e4.correo, 4: e4.num_control, 5: e4.promedio}
pickle_out = open("dict.db", "wb")
pickle.dump(ej_dict, pickle_out)
pickle.dump(ej_dict1, pickle_out)
pickle.dump(ej_dict2, pickle_out)
pickle.dump(ej_dict3, pickle_out)
pickle.dump(ej_dict4, pickle_out)
pickle_out.close()
pickle_in = open("dict.db", "rb")
example_dict = pickle.load(pickle_in)
print(example_dict)
print(example_dict[2])
|
{"/Tarea1.py": ["/Pack/Tarea1_Modulo.py"], "/Tarea3.py": ["/Pack/StudentIO.py"], "/Tarea2.py": ["/Pack/Tarea2_Modulo.py"]}
|
23,215
|
Elliot-Ruiz96/CursoPython
|
refs/heads/main
|
/Tarea5/main2_1.py
|
from PySide6 import QtCore, QtWidgets
# from PySide6 import QtCore, QtWidgets
from PySide2 import QtCore, QtWidgets
from mongoengine import *
import sys
connect('IECA', host='Localhost', port=27017)
class estudiantes(Document):
Nombre_estudiante = StringField(required=True, max_length=200)
Correo_estudiantil = StringField(required=True)
Contrasenia = StringField(required=True)
Materias = StringField(required=True)
class Estudiantes:
nombre = ""
correo = ""
contrasenia = ""
materias = ""
def __init__(self, nombre, correo, contrasenia, materias):
self.nombre = nombre
self.correo = correo
self.contrasenia = contrasenia
self.materias = materias
# Clase padre para el menu
class Menu(QtWidgets.QWidget):
ModoNavegar, ModoIngresar, ModoEditar = range(3)
# Funcion para declarar e inicializar los widgets
def __init__(self, parent=None):
super(Menu, self).__init__(parent)
self.database = estudiantes()
self.oldName = ''
self.oldMail = ''
self.oldPass = ''
self.oldSubj = ''
self.ModoActual = self.ModoNavegar
nameLabel1 = QtWidgets.QLabel("Nombre:")
self.nameLine = QtWidgets.QLineEdit()
self.nameLine.setReadOnly(True)
nameLabel2 = QtWidgets.QLabel("Correo:")
self.mailLine = QtWidgets.QLineEdit()
self.mailLine.setReadOnly(True)
nameLabel3 = QtWidgets.QLabel("Contraseña:")
self.passLine = QtWidgets.QLineEdit()
self.passLine.setReadOnly(True)
nameLabel4 = QtWidgets.QLabel("Materia:")
self.subjLine = QtWidgets.QLineEdit()
self.subjLine.setReadOnly(True)
# Botones para funcion de menu
self.addButton = QtWidgets.QPushButton("&Ingresa")
self.editButton = QtWidgets.QPushButton("&Modifica")
self.editButton.setEnabled(False)
self.removeButton = QtWidgets.QPushButton("&Elimina")
self.removeButton.setEnabled(False)
self.submitButton = QtWidgets.QPushButton("&Confirma")
self.submitButton.hide()
self.cancelButton = QtWidgets.QPushButton("&Cancela")
self.cancelButton.hide()
# Botones para mostrar
self.nextButton = QtWidgets.QPushButton("&Siguiente")
self.nextButton.setEnabled(False)
self.previousButton = QtWidgets.QPushButton("&Anterior")
self.previousButton.setEnabled(False)
# Definir la conecion a funciones
self.addButton.clicked.connect(self.addAlumno)
self.editButton.clicked.connect(self.editAlumno)
self.removeButton.clicked.connect(self.removeAlumno)
self.submitButton.clicked.connect(self.submitAlumno)
self.cancelButton.clicked.connect(self.cancelAlumno)
self.nextButton.clicked.connect(self.nextAlumno)
self.previousButton.clicked.connect(self.previousAlumno)
# Layout de funciones principales
buttonLayout1 = QtWidgets.QVBoxLayout()
buttonLayout1.addWidget(self.addButton)
buttonLayout1.addWidget(self.editButton)
buttonLayout1.addWidget(self.removeButton)
buttonLayout1.addWidget(self.cancelButton)
buttonLayout1.addWidget(self.submitButton)
buttonLayout1.addStretch()
# Layout de funciones mostrar
buttonLayout2 = QtWidgets.QHBoxLayout()
buttonLayout2.addWidget(self.nextButton)
buttonLayout2.addWidget(self.previousButton)
# Layout principal con coordenadas
mainLayout = QtWidgets.QGridLayout()
mainLayout.addWidget(nameLabel1, 0, 0)
mainLayout.addWidget(self.nameLine, 0, 1)
mainLayout.addWidget(nameLabel2, 1, 0)
mainLayout.addWidget(self.mailLine, 1, 1)
mainLayout.addWidget(nameLabel3, 2, 0)
mainLayout.addWidget(self.passLine, 2, 1)
mainLayout.addWidget(nameLabel4, 3, 0)
mainLayout.addWidget(self.subjLine, 3, 1)
mainLayout.addLayout(buttonLayout1, 1, 2)
mainLayout.addLayout(buttonLayout2, 4, 1)
self.setLayout(mainLayout)
self.setWindowTitle("Tarea 5")
def addAlumno(self):
self.oldName = self.nameLine.text()
self.oldMail = self.mailLine.text()
self.oldPass = self.passLine.text()
self.oldSubj = self.subjLine.text()
self.nameLine.clear()
self.mailLine.clear()
self.passLine.clear()
self.subjLine.clear()
self.updateGUI(self.ModoIngresar)
def editAlumno(self):
self.oldName = self.nameLine.text()
self.oldMail = self.mailLine.text()
self.oldPass = self.passLine.text()
self.oldSubj = self.subjLine.text()
self.updateGUI(self.ModoEditar)
def removeAlumno(self):
nombre = self.nameLine.text()
# [PV] No se interactua con la BD
if nombre in self.database:
boton = QtWidgets.QMessageBox.question(self, "Confirmar", "Estas seguro de quitar a \"%s\"?" % nombre,
QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
if boton == QtWidgets.QMessageBox.Yes:
self.previous()
del self.database[nombre]
QtWidgets.QMessageBox.information(self, "Operacion exitosa", "\"%s\" ha sido eliminado" % nombre)
self.updateGUI(self.ModoNavegar)
def submitAlumno(self):
nombre = self.nameLine.text()
correo = self.mailLine.text()
contra = self.passLine.text()
materi = self.subjLine.text()
if nombre == "" or correo == "" or contra == "" or materi == "":
QtWidgets.QMessageBox.information(self, "Campo Vacio", "Por favor ingrese todos lo campos.")
return
if self.ModoActual == self.ModoIngresar:
if nombre not in self.database:
estudiantes(
Nombre_estudiante=nombre,
Correo_estudiantil=correo,
Contrasenia=contra,
Materias=materi)
QtWidgets.QMessageBox.information(self, "Operacion exitosa", "\%s\" ha sido añadido." % nombre)
# [PV] No se guarda a la BD
else:
QtWidgets.QMessageBox.information(self, "Operacion fallida", "\%s\" ya ha sido añadido antes." % nombre)
return
elif self.ModoActual == self.ModoEditar:
if self.oldName != nombre:
if nombre not in self.database:
QtWidgets.QMessageBox.information(self, "Operacion exitosa", "\"%s\" ha sido añadido."
% self.oldName)
# [PV] No se interactua con la BD
del self.database[self.oldName]
self.database[nombre] = correo
self.database[nombre] = contra
self.database[nombre] = materi
else:
QtWidgets.QMessageBox.information(self, "Operacion fallida", "\%s\" ya ha sido añadido antes."
% nombre)
return
elif self.oldMail != correo:
QtWidgets.QMessageBox.information(self, "Operacion exitosa", "\"%s\" ha sido añadido." % nombre)
self.database[nombre] = correo
self.database[nombre] = contra
self.database[nombre] = materi
elif self.oldPass != contra:
QtWidgets.QMessageBox.information(self, "Operacion exitosa", "\"%s\" ha sido añadido." % nombre)
self.database[nombre] = correo
self.database[nombre] = contra
self.database[nombre] = materi
elif self.oldSubj != materi:
QtWidgets.QMessageBox.information(self, "Operacion exitosa", "\"%s\" ha sido añadido." % nombre)
self.database[nombre] = correo
self.database[nombre] = contra
self.database[nombre] = materi
self.updateGUI(self.ModoNavegar)
def cancelAlumno(self):
self.nameLine.setText(self.oldName)
self.mailLine.setText(self.oldMail)
self.passLine.setText(self.oldPass)
self.subjLine.setText(self.oldSubj)
self.updateGUI(self.ModoNavegar)
def nextAlumno(self):
nombre = self.nameLine.text()
it = iter(self.database)
try:
while True:
this_name, _ = it.next()
if this_name == nombre:
next_nombre, next_correo, next_contra, next_materi = it.next()
break
except StopIteration:
next_nombre, next_correo, next_contra, next_materi = iter(self.database).next()
self.nameLine.setText(next_nombre)
self.mailLine.setText(next_correo)
self.passLine.setText(next_contra)
self.subjLine.setText(next_materi)
def previousAlumno(self):
nombre = self.nameLine.text()
prev_nombre = prev_correo = prev_contra = prev_materi = None
for this_name, this_correo, this_contra, this_materi in self.database:
if this_name == nombre:
break
prev_nombre = this_name
prev_correo = this_correo
prev_contra = this_contra
prev_materi = this_materi
else:
self.nameLine.clear()
self.mailLine.clear()
self.passLine.clear()
self.subjLine.clear()
return
if prev_nombre is None:
for prev_nombre, prev_correo, prev_contra, prev_materi in self.database:
pass
self.nameLine.setText(prev_nombre)
self.mailLine.setText(prev_correo)
self.passLine.setText(prev_contra)
self.subjLine.setText(prev_materi)
def updateGUI(self, modo):
self.ModoActual = modo
if self.ModoActual in (self.ModoIngresar, self.ModoEditar):
self.nameLine.setReadOnly(False)
self.nameLine.setFocus(QtCore.Qt.OtherFocusReason)
self.mailLine.setReadOnly(False)
self.passLine.setReadOnly(False)
self.subjLine.setReadOnly(False)
self.addButton.setEnabled(False)
self.editButton.setEnabled(False)
self.removeButton.setEnabled(False)
self.nextButton.setEnabled(False)
self.previousButton.setEnabled(False)
self.submitButton.show()
self.cancelButton.show()
elif self.ModoActual == self.ModoNavegar:
if not self.database:
self.nameLine.clear()
self.mailLine.clear()
self.passLine.clear()
self.subjLine.clear()
self.nameLine.setReadOnly(True)
self.mailLine.setReadOnly(True)
self.passLine.setReadOnly(True)
self.subjLine.setReadOnly(True)
self.addButton.setEnabled(True)
number = len(self.database)
self.editButton.setEnabled(number >= 1)
self.removeButton.setEnabled(number >= 1)
self.findButton.setEnabled(number > 2)
# [PV] El boton findButton no existe en otro lugar del programa
# self.findButton.setEnabled(number > 2)
self.nextButton.setEnabled(number > 1)
self.previousButton.setEnabled(number > 1)
self.submitButton.hide()
self.cancelButton.hide()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
Menu = Menu()
Menu.show()
sys.exit(app.exec_())
|
{"/Tarea1.py": ["/Pack/Tarea1_Modulo.py"], "/Tarea3.py": ["/Pack/StudentIO.py"], "/Tarea2.py": ["/Pack/Tarea2_Modulo.py"]}
|
23,216
|
Elliot-Ruiz96/CursoPython
|
refs/heads/main
|
/Diccionario.py
|
# https://devcode.la/tutoriales/diccionarios-en-python/
diccionario = {}
diccionario2 = dict()
print(f'Diccionario: {diccionario}')
print(f'Tipo: {type(diccionario)}')
diccionario[1] = 'uno'
diccionario[3.4] = 'tres punto cuatro'
diccionario['uno'] = 'uno'
diccionario[False] = 'Falso'
print(f'diccionario[1]: {diccionario[1]}')
print(f'diccionario[3.4]: {diccionario[3.4]}')
print(f'diccionario["uno"]: {diccionario["uno"]}')
print(f'diccionario[False]: {diccionario[False]}')
print(f'Diccionario: {diccionario}')
diccionario2 = {1 : 'uno' , 2.0 : 'dos punto cero'}
print(diccionario2)
print('\n\n')
print(diccionario.items())
print(diccionario.keys())
print(diccionario.values())
for key in diccionario.keys():
print(f'Key: {key}')
print(f'Valor: {diccionario[key]}')
print(f'key: {key}')
|
{"/Tarea1.py": ["/Pack/Tarea1_Modulo.py"], "/Tarea3.py": ["/Pack/StudentIO.py"], "/Tarea2.py": ["/Pack/Tarea2_Modulo.py"]}
|
23,217
|
Elliot-Ruiz96/CursoPython
|
refs/heads/main
|
/Herencia_Division.py
|
class Division:
divisor = 0
dividendo = 0
resultado = 0
residuo = 0
def __init__(self, dividendo, divisor):
self.divisor = divisor
self.dividendo = dividendo
def dividir(self):
pass
class Divisonentera(Division):
def __init__(self, dividendo, divisor):
super().__init__(dividendo, divisor)
def dividir(self):
return self.dividendo // self.divisor, self.dividendo % self.divisor
class Divisondecimal(Division):
def __init__(self, dividendo, divisor):
super().__init__(dividendo, divisor)
def dividir(self):
return self.dividendo / self.divisor
if __name__ == '__main__':
de = Divisonentera(15, 3)
res = de.dividir()
print(res)
dd = Divisondecimal(16, 3)
res2 = dd.dividir()
print(res2)
|
{"/Tarea1.py": ["/Pack/Tarea1_Modulo.py"], "/Tarea3.py": ["/Pack/StudentIO.py"], "/Tarea2.py": ["/Pack/Tarea2_Modulo.py"]}
|
23,218
|
Elliot-Ruiz96/CursoPython
|
refs/heads/main
|
/Tarea1.py
|
from Pack.Tarea1_Modulo import funcion
import random
def main():
funcion(s_user, s_pc)
print('Bienvenido al juego de piedra, papel o tijera')
print('Escribe tu eleccion: ')
s_user = input()
s_pc = random.choice(['piedra', 'papel', 'tijera'])
print('Eleccion de usuario: ', s_user)
print('Eleccion de PC: ', s_pc)
main()
|
{"/Tarea1.py": ["/Pack/Tarea1_Modulo.py"], "/Tarea3.py": ["/Pack/StudentIO.py"], "/Tarea2.py": ["/Pack/Tarea2_Modulo.py"]}
|
23,219
|
Elliot-Ruiz96/CursoPython
|
refs/heads/main
|
/clienteAsistencia.py
|
import socket
import pickle
from estudiante import Estudiante
def main():
s = socket.socket()
host = '3.16.226.150'
port = 9999
s.connect((host, port))
estudiante = Estudiante("Elliot Ruiz Sanchez", "elliotruizs@ieee.org", "IECA8")
estudiante_seriado = pickle.dumps(estudiante)
s.send(estudiante_seriado)
res = s.recv(1024)
print(f'Respuesta: \n\t{res.decode()}')
s.close()
if __name__ == '__main__':
main()
|
{"/Tarea1.py": ["/Pack/Tarea1_Modulo.py"], "/Tarea3.py": ["/Pack/StudentIO.py"], "/Tarea2.py": ["/Pack/Tarea2_Modulo.py"]}
|
23,220
|
Elliot-Ruiz96/CursoPython
|
refs/heads/main
|
/Pickle.py
|
import pickle
file = open('data.dat', 'wb')
animals = ['python', 'monkey', 'camel']
pickle.dump(animals, file, 2)
pass
|
{"/Tarea1.py": ["/Pack/Tarea1_Modulo.py"], "/Tarea3.py": ["/Pack/StudentIO.py"], "/Tarea2.py": ["/Pack/Tarea2_Modulo.py"]}
|
23,221
|
Elliot-Ruiz96/CursoPython
|
refs/heads/main
|
/server.py
|
import pickle
s = 'Hola mundo'
print(s)
print(type(s))
se = s.encode()
print(se)
print(type(se))
sp = pickle.dumps(s)
print(sp)
print(type(sp))
ss2 = pickle.loads(se)
print(ss2)
print(type(ss2))
# No imprime debido a que encuenra una h primero en lugar de la direccion \x80
|
{"/Tarea1.py": ["/Pack/Tarea1_Modulo.py"], "/Tarea3.py": ["/Pack/StudentIO.py"], "/Tarea2.py": ["/Pack/Tarea2_Modulo.py"]}
|
23,222
|
Elliot-Ruiz96/CursoPython
|
refs/heads/main
|
/Pack/Tarea1_Modulo.py
|
# import random
# s_user = input()
# s_pc = random.choice(['piedra', 'papel', 'tijera'])
# s_user = 'piedra', s_pc = 'tijera'
def funcion(s_user, s_pc):
if s_user != s_pc:
if s_user == 'piedra':
if s_pc == 'papel':
print('Perdiste!')
else:
print('Ganaste!')
elif s_user == 'papel':
if s_pc == 'tijera':
print('Perdiste!')
else:
print('Ganaste!')
elif s_user == 'tijera':
if s_pc == 'piedra':
print('Perdiste!')
else:
print('Ganaste!')
else:
print('Seleccion no valida!')
else:
print('Empate!')
|
{"/Tarea1.py": ["/Pack/Tarea1_Modulo.py"], "/Tarea3.py": ["/Pack/StudentIO.py"], "/Tarea2.py": ["/Pack/Tarea2_Modulo.py"]}
|
23,223
|
Elliot-Ruiz96/CursoPython
|
refs/heads/main
|
/POO.py
|
class Persona:
nombre = ''
correo = ''
def __init__(self):
self.edad = 24
self.nombre = 'Elliot'
self.correo = 'elliotruizs@ieee.org'
def saludar(self, nombre):
print('Hola', nombre)
print(self.nombre, '\n', self.correo, '\n', self.edad)
# name por que es una variable especifica y main para ejecutarse
if __name__ == '__main__':
p = Persona()
p.saludar('ERS')
print(p)
|
{"/Tarea1.py": ["/Pack/Tarea1_Modulo.py"], "/Tarea3.py": ["/Pack/StudentIO.py"], "/Tarea2.py": ["/Pack/Tarea2_Modulo.py"]}
|
23,224
|
Elliot-Ruiz96/CursoPython
|
refs/heads/main
|
/Tupla.py
|
#En las tuplas no puede cambiar ni un solo valor despues de declararlas al principio
#Si se usa tupla[-1] (un negativo en el indice) busca de fin a inicio
#Los indices empiezan desde 0
# https://recursospython.com/guias-y-manuales/listas-y-tuplas/
tupla = 'Hola' , 2 , 3.4 , False , [ 1 , 'test' ] , 2 , 2
tupla2 = tuple()
print(f'Tupla: {tupla[0]}')
print(f'Tupla: {tupla[1]}')
print(f'Tupla: {tupla[2]}')
print(f'Tupla: {tupla[3]}')
print(f'Tupla: {tupla[4][1]}')
print(f'Tupla(-1): {tupla[-1]}')
print(f'Tupla2: {tupla2}')
print(type(tupla2))
conteo = tupla.count(2)
print('Conteo: ' , conteo)
|
{"/Tarea1.py": ["/Pack/Tarea1_Modulo.py"], "/Tarea3.py": ["/Pack/StudentIO.py"], "/Tarea2.py": ["/Pack/Tarea2_Modulo.py"]}
|
23,225
|
Elliot-Ruiz96/CursoPython
|
refs/heads/main
|
/Regex.py
|
import re
texto = 'Buenas tardes a todos y todas'
patron = 'B.*t.*a'
patron2 = 'd[aeo]s'
coincidencia = re.match(patron, texto)
coincidencia2 = re.search(patron, texto)
encontrar = re.findall(patron2, texto)
lista = ['unos', 'dos', 'tres']
for item in lista:
m = re.search
pass
|
{"/Tarea1.py": ["/Pack/Tarea1_Modulo.py"], "/Tarea3.py": ["/Pack/StudentIO.py"], "/Tarea2.py": ["/Pack/Tarea2_Modulo.py"]}
|
23,226
|
Elliot-Ruiz96/CursoPython
|
refs/heads/main
|
/PruebasQT/Hello World.py
|
import sys
# Importando la clase apropiada
from PySide6.QtWidgets import QApplication, QLabel
# Crear instancia QApp
app = QApplication(sys.argv)
# Es posible pasar cualquier argumento a QApp Obj
label = QLabel("Hello World")
label.show()
app.exec_()
|
{"/Tarea1.py": ["/Pack/Tarea1_Modulo.py"], "/Tarea3.py": ["/Pack/StudentIO.py"], "/Tarea2.py": ["/Pack/Tarea2_Modulo.py"]}
|
23,227
|
Elliot-Ruiz96/CursoPython
|
refs/heads/main
|
/Tarea3.py
|
from Pack.StudentIO import agregar
from Pack.StudentIO import lectura
from Pack.StudentIO import actualizar
from Pack.StudentIO import pickle1
def menu():
while True:
print("Bienvenido a la tarea 3.")
print("1. Agregar nuevo alumno.")
print("2. Lectura de alumno.")
print("3. Actualizar alumno.")
print("4. Salir del programa")
print("Ingrese su eleccion: ")
choice = input()
if choice == '1':
# [PV] Ver comentarios el archivo StudentIO.py
agregar()
elif choice == '2':
# [PV] Ver comentarios el archivo StudentIO.py
lectura()
pickle1()
elif choice == '3':
# [PV] Ver comentarios el archivo StudentIO.py
actualizar()
elif choice == '4':
print("Adios!")
break
menu()
|
{"/Tarea1.py": ["/Pack/Tarea1_Modulo.py"], "/Tarea3.py": ["/Pack/StudentIO.py"], "/Tarea2.py": ["/Pack/Tarea2_Modulo.py"]}
|
23,228
|
Elliot-Ruiz96/CursoPython
|
refs/heads/main
|
/Lista.py
|
#En las listas se pueden cambiar los valores despues de declararlas al principio
#Si se usa lista[-1] (un negativo en el indice) busca de fin a inicio
#Los indices empiezan desde 0
lista = [ ]
lista2 = [ ]
print(f'Lista: {lista}')
print(f'Lista 2: {lista2}')
print(type(lista))
# Agregar elementos
# append()
# insert()
# Eliminar elementos
|
{"/Tarea1.py": ["/Pack/Tarea1_Modulo.py"], "/Tarea3.py": ["/Pack/StudentIO.py"], "/Tarea2.py": ["/Pack/Tarea2_Modulo.py"]}
|
23,229
|
Elliot-Ruiz96/CursoPython
|
refs/heads/main
|
/clientTCP.py
|
import socket
s = socket.socket()
host = socket.gethostname()
port = 9999
s.connect((host, port))
msg_recv = s.recv(1024).decode()
print(msg_recv)
s.close()
|
{"/Tarea1.py": ["/Pack/Tarea1_Modulo.py"], "/Tarea3.py": ["/Pack/StudentIO.py"], "/Tarea2.py": ["/Pack/Tarea2_Modulo.py"]}
|
23,230
|
Elliot-Ruiz96/CursoPython
|
refs/heads/main
|
/Tarea2.py
|
from Pack.Tarea2_Modulo import validacion
def main():
validacion(correo, numero, curp, rfc)
print("Ingresa tu email: ")
correo = input()
print("Ingresa tu numero celular: ")
numero = input()
print("Ingresa tu CURP: ")
curp = input()
print("Ingresa tu RCF: ")
rfc = input()
main()
|
{"/Tarea1.py": ["/Pack/Tarea1_Modulo.py"], "/Tarea3.py": ["/Pack/StudentIO.py"], "/Tarea2.py": ["/Pack/Tarea2_Modulo.py"]}
|
23,231
|
Elliot-Ruiz96/CursoPython
|
refs/heads/main
|
/condicionales.py
|
verdadero = 3 < 5
falso = 5 > 3
if falso:
print('Buenas')
elif 4 > 6:
print('4 > 6')
else:
print('Ciao')
|
{"/Tarea1.py": ["/Pack/Tarea1_Modulo.py"], "/Tarea3.py": ["/Pack/StudentIO.py"], "/Tarea2.py": ["/Pack/Tarea2_Modulo.py"]}
|
23,232
|
Elliot-Ruiz96/CursoPython
|
refs/heads/main
|
/Pack/Tarea3_Clase.py
|
class Estudiante:
def __init__(self):
self.nombre = "Elliot Ruiz"
self.carrera = "Mecatronica"
self.correo = "elliotruizs@iee.org"
self.num_control = "16240056"
self.promedio = "82.47"
def setnombre(self):
nombre = input()
self.nombre = nombre
def getnombre(self):
return self.nombre
def setcarrera(self):
carrera = input()
self.carrera = carrera
def getcarrera(self):
return self.carrera
def setcorreo(self):
correo = input()
self.correo = correo
def getcorreo(self):
return self.correo
def setnum_control(self):
num_control = input()
self.num_control = num_control
def getnum_control(self):
return self.num_control
def setpromedio(self):
promedio = input()
self.promedio = promedio
def getpromedio(self):
return self.promedio
e = Estudiante()
|
{"/Tarea1.py": ["/Pack/Tarea1_Modulo.py"], "/Tarea3.py": ["/Pack/StudentIO.py"], "/Tarea2.py": ["/Pack/Tarea2_Modulo.py"]}
|
23,233
|
Elliot-Ruiz96/CursoPython
|
refs/heads/main
|
/Archivo.py
|
f = open("./DarthPlagueis.txt", 'w+') # f: <_io.TextIOWrapper name='./DarthPlagueis.txt' mode='a+' encoding='cp1252'>
g = open("./Pruebas.txt", 'w+')
ret = f.read()
readable = f.readable()
writable = f.writable()
ret2 = g.write('Hola mundo\n')
g.seek(0)
print(readable)
print(writable)
pass
|
{"/Tarea1.py": ["/Pack/Tarea1_Modulo.py"], "/Tarea3.py": ["/Pack/StudentIO.py"], "/Tarea2.py": ["/Pack/Tarea2_Modulo.py"]}
|
23,235
|
cellcounter/cellcounter
|
refs/heads/master
|
/cellcounter/accounts/test_views.py
|
from urllib.parse import urlparse
from django.contrib.auth.forms import PasswordChangeForm, SetPasswordForm
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.tokens import default_token_generator
from django.core import mail
from django.core.cache import cache
from django.urls import reverse
from django.test import TestCase
from django.test.client import RequestFactory
from django.test.utils import override_settings
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
from cellcounter.cc_kapi.factories import UserFactory, KeyboardFactory
from .forms import EmailUserCreationForm, PasswordResetForm
from .utils import read_signup_email
from .views import PasswordResetConfirmView
class TestRegistrationView(TestCase):
def setUp(self):
self.request_factory = RequestFactory()
def test_get(self):
response = self.client.get(reverse("register"))
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.context["form"], EmailUserCreationForm)
def test_valid(self):
data = {
"username": "123",
"email": "joe@example.org",
"password1": "test",
"password2": "test",
"tos": True,
}
response = self.client.post(reverse("register"), data=data, follow=True)
self.assertRedirects(response, reverse("new_count"))
user = User.objects.get(username="123")
messages = list(response.context["messages"])
self.assertEqual(
"Successfully registered, you are now logged in! <a href='/accounts/%s/'>View your profile</a>"
% user.id,
messages[0].message,
)
self.assertEqual(user, response.context["user"])
def test_invalid(self):
data = {
"username": "123",
"email": "joe@example.org",
"password1": "test",
"password2": "test",
"tos": False,
}
response = self.client.post(reverse("register"), data=data)
self.assertEqual(response.status_code, 200)
self.assertFormError(
response, "form", "tos", "You must agree our Terms of Service"
)
self.assertEqual(AnonymousUser(), response.context["user"])
@override_settings(RATELIMIT_ENABLE=True)
def test_ratelimit_registration(self):
cache.clear()
data = {
"username": "123",
"email": "joe@example.org",
"password1": "test",
"password2": "test",
"tos": True,
}
self.client.post(reverse("register"), data)
self.client.logout()
data["username"] = "Another"
self.client.post(reverse("register"), data, follow=True)
self.client.logout()
data["username"] = "Another2"
response = self.client.post(reverse("register"), data, follow=True)
messages = list(response.context["messages"])
self.assertEqual(1, len(messages))
self.assertEqual("You have been rate limited", messages[0].message)
@override_settings(RATELIMIT_ENABLE=True)
def test_ratelimit_invalid_form(self):
cache.clear()
data = {
"username": "123",
"email": "1234",
"password1": "test",
"password2": "test",
"tos": True,
}
self.client.post(reverse("register"), data)
response = self.client.post(reverse("register"), data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertNotIn("You have been rate limited", response.content.decode("utf-8"))
class TestPasswordChangeView(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.user = UserFactory()
self.valid_data = {
"old_password": "test",
"new_password1": "new",
"new_password2": "new",
}
self.invalid_data = {
"old_password": "test",
"new_password1": "test",
"new_password2": "1234",
}
def test_logged_out_get_redirect(self):
response = self.client.get(reverse("change-password"))
self.assertRedirects(
response, "%s?next=%s" % (reverse("login"), reverse("change-password"))
)
def test_logged_out_post_redirect(self):
response = self.client.post(reverse("change-password"), self.valid_data)
self.assertRedirects(
response, "%s?next=%s" % (reverse("login"), reverse("change-password"))
)
def test_logged_in_to_form(self):
self.client.force_login(self.user)
response = self.client.get(reverse("change-password"))
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.context["form"], PasswordChangeForm)
def test_post_valid(self):
self.client.force_login(self.user)
response = self.client.post(
reverse("change-password"), data=self.valid_data, follow=True
)
self.assertRedirects(response, reverse("new_count"))
messages = list(response.context["messages"])
self.assertEqual("Password changed successfully", messages[0].message)
def test_post_invalid(self):
self.client.force_login(self.user)
response = self.client.post(reverse("change-password"), data=self.invalid_data)
self.assertFormError(
response, "form", "new_password2", "The two password fields didn’t match."
)
class TestUserDetailView(TestCase):
def setUp(self):
self.keyboard = KeyboardFactory()
def test_get_anonymous(self):
user2 = UserFactory()
response = self.client.get(reverse("user-detail", kwargs={"pk": user2.id}))
self.assertRedirects(
response,
"%s?next=%s"
% (reverse("login"), reverse("user-detail", kwargs={"pk": user2.id})),
)
def test_get_self(self):
self.client.force_login(self.keyboard.user)
response = self.client.get(
reverse("user-detail", kwargs={"pk": self.keyboard.user.id})
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["user_detail"], self.keyboard.user)
self.assertEqual(len(response.context["keyboards"]), 3)
def test_get_someone_else(self):
user2 = UserFactory()
self.client.force_login(self.keyboard.user)
response = self.client.get(reverse("user-detail", kwargs={"pk": user2.id}))
self.assertEqual(response.status_code, 403)
class TestUserDeleteView(TestCase):
def setUp(self):
self.user = UserFactory()
def test_get_delete_anonymous(self):
response = self.client.get(reverse("user-delete", kwargs={"pk": self.user.id}))
self.assertRedirects(
response,
"%s?next=%s"
% (reverse("login"), reverse("user-delete", kwargs={"pk": self.user.id})),
)
def test_delete_anonymous(self):
user2 = UserFactory()
response = self.client.delete(reverse("user-delete", kwargs={"pk": user2.id}))
self.assertRedirects(
response,
"%s?next=%s"
% (reverse("login"), reverse("user-delete", kwargs={"pk": user2.id})),
)
def test_get_delete_self(self):
self.client.force_login(self.user)
response = self.client.get(reverse("user-delete", kwargs={"pk": self.user.id}))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "accounts/user_check_delete.html")
def test_delete_self(self):
self.client.force_login(self.user)
response = self.client.delete(
reverse("user-delete", kwargs={"pk": self.user.id}), follow=True
)
self.assertRedirects(response, reverse("new_count"))
self.assertEqual(
"User account deleted", list(response.context["messages"])[0].message
)
def test_get_delete_someone_else(self):
user2 = UserFactory()
self.client.force_login(self.user)
response = self.client.get(reverse("user-delete", kwargs={"pk": user2.id}))
self.assertEqual(response.status_code, 403)
def test_delete_someone_else(self):
user2 = UserFactory()
self.client.force_login(self.user)
response = self.client.delete(reverse("user-delete", kwargs={"pk": user2.id}))
self.assertEqual(response.status_code, 403)
class TestUserUpdateView(TestCase):
def setUp(self):
self.valid_data = {
"first_name": "Jack",
"last_name": "Example",
"email": "test@example.org",
}
self.extra_data = {
"first_name": "Joe",
"last_name": "Example",
"email": "test@example.org",
"username": "invalid",
}
self.invalid_data = {
"first_name": "Joe",
"last_name": "Example",
"email": "1234",
}
def test_get_update_when_anonymous(self):
user = UserFactory()
response = self.client.get(reverse("user-update", kwargs={"pk": user.id}))
self.assertRedirects(
response,
"%s?next=%s"
% (reverse("login"), reverse("user-update", kwargs={"pk": user.id})),
)
def test_post_update_when_anonymous(self):
user = UserFactory()
response = self.client.post(
reverse("user-update", kwargs={"pk": user.id}), data=self.valid_data
)
self.assertRedirects(
response,
"%s?next=%s"
% (reverse("login"), reverse("user-update", kwargs={"pk": user.id})),
)
def test_update_self_valid(self):
user = UserFactory()
self.client.force_login(user)
response = self.client.post(
reverse("user-update", kwargs={"pk": user.id}),
data=self.valid_data,
follow=True,
)
self.assertRedirects(response, reverse("user-detail", kwargs={"pk": user.id}))
self.assertEqual(
"User details updated", list(response.context["messages"])[0].message
)
updated_user = User.objects.get(username=user.username)
self.assertNotEqual(updated_user.first_name, user.first_name)
self.assertNotEqual(updated_user.last_name, user.last_name)
self.assertNotEqual(updated_user.email, user.email)
def test_update_self_extra(self):
user = UserFactory()
self.client.force_login(user)
response = self.client.post(
reverse("user-update", kwargs={"pk": user.id}),
data=self.extra_data,
follow=True,
)
self.assertRedirects(response, reverse("user-detail", kwargs={"pk": user.id}))
self.assertEqual(
"User details updated", list(response.context["messages"])[0].message
)
updated_user = User.objects.get(username=user.username)
self.assertNotEqual(updated_user.first_name, user.first_name)
self.assertNotEqual(updated_user.last_name, user.last_name)
self.assertNotEqual(updated_user.email, user.email)
self.assertEqual(updated_user.username, user.username)
def test_update_self_invalid(self):
user = UserFactory()
self.client.force_login(user)
response = self.client.post(
reverse("user-update", kwargs={"pk": user.id}), data=self.invalid_data
)
self.assertEqual(response.status_code, 200)
self.assertFormError(response, "form", "email", "Enter a valid email address.")
def test_update_someone_else(self):
user = UserFactory()
user2 = UserFactory()
self.client.force_login(user)
response = self.client.post(reverse("user-update", kwargs={"pk": user2.id}))
self.assertEqual(response.status_code, 403)
class TestPasswordResetView(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.user = UserFactory()
def test_get_form(self):
response = self.client.get(reverse("password-reset"))
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.context["form"], PasswordResetForm)
self.assertTemplateUsed(response, "accounts/reset_form.html")
def test_post_valid_email(self):
data = {"email": self.user.email}
response = self.client.post(reverse("password-reset"), data=data, follow=True)
self.assertRedirects(response, reverse("new_count"))
self.assertEqual(
"Reset email sent", list(response.context["messages"])[0].message
)
self.assertEqual(1, len(mail.outbox))
url, path = read_signup_email(mail.outbox[0])
uidb64, token = urlparse(url).path.split("/")[-3:-1]
self.assertEqual(
path,
reverse(
"password-reset-confirm", kwargs={"uidb64": uidb64, "token": token}
),
)
def test_post_invalid_email(self):
data = {"email": "invalid@example.org"}
response = self.client.post(reverse("password-reset"), data=data, follow=True)
self.assertRedirects(response, reverse("new_count"))
self.assertEqual(0, len(mail.outbox))
@override_settings(RATELIMIT_ENABLE=True)
def test_post_ratelimit(self):
for n in range(0, 5):
self.client.post(
reverse("password-reset"), data={"email": self.user.email}, follow=True
)
response = self.client.post(
reverse("password-reset"), data={"email": self.user.email}, follow=True
)
self.assertEqual(
list(response.context["messages"])[0].message, "You have been rate limited"
)
cache.clear()
class TestPasswordResetConfirmView(TestCase):
def setUp(self):
self.user = UserFactory()
self.valid_uidb64 = urlsafe_base64_encode(force_bytes(self.user.pk))
self.valid_data = {"new_password1": "newpwd", "new_password2": "newpwd"}
self.invalid_data = {"new_password1": "newpwd", "new_password2": "1234"}
def _generate_token(self, user):
return default_token_generator.make_token(user)
def test_valid_user_valid(self):
"""valid_user() with valid uidb64"""
self.assertEqual(
PasswordResetConfirmView().valid_user(self.valid_uidb64), self.user
)
def test_valid_user_invalid(self):
"""valid_user() with invalid uidb64"""
uidb64 = urlsafe_base64_encode(force_bytes(2))
self.assertIsNone(PasswordResetConfirmView().valid_user(uidb64))
def test_valid_token_valid(self):
"""valid_token() with valid user and token"""
self.assertTrue(
PasswordResetConfirmView().valid_token(
self.user, self._generate_token(self.user)
)
)
def test_valid_token_invalid_token(self):
"""valid_token() with valid user and invalid token"""
token = "AAA-AAAAAAAAAAAAAAAAAAAA"
self.assertFalse(PasswordResetConfirmView().valid_token(self.user, token))
def test_valid_token_invalid_both(self):
"""valid_token() with invalid user and invalid token"""
token = "AAA-AAAAAAAAAAAAAAAAAAAA"
self.assertFalse(
PasswordResetConfirmView().valid_token(
None, self._generate_token(self.user)
)
)
def test_get_invalid_token(self):
token = "AAA-AAAAAAAAAAAAAAAAAAAA"
response = self.client.get(
reverse(
"password-reset-confirm",
kwargs={"uidb64": self.valid_uidb64, "token": token},
)
)
self.assertEqual(response.status_code, 200)
self.assertFalse(response.context["validlink"])
self.assertIn(
"The password reset link was invalid, possibly because it has already been used."
" Please request a new password reset.",
response.content.decode("utf-8"),
)
def test_get_invalid_user(self):
response = self.client.get(
reverse(
"password-reset-confirm",
kwargs={
"uidb64": urlsafe_base64_encode(force_bytes(2)),
"token": self._generate_token(self.user),
},
)
)
self.assertEqual(response.status_code, 200)
self.assertFalse(response.context["validlink"])
self.assertIn(
"The password reset link was invalid, possibly because it has already been used."
" Please request a new password reset.",
response.content.decode("utf-8"),
)
def test_post_invalid_token(self):
token = "AAA-AAAAAAAAAAAAAAAAAAAA"
response = self.client.post(
reverse(
"password-reset-confirm",
kwargs={"uidb64": self.valid_uidb64, "token": token},
),
data=self.valid_data,
)
self.assertEqual(response.status_code, 200)
self.assertFalse(response.context["validlink"])
self.assertIn(
"The password reset link was invalid, possibly because it has already been used."
" Please request a new password reset.",
response.content.decode("utf-8"),
)
def test_get_valid(self):
token = self._generate_token(self.user)
response = self.client.get(
reverse(
"password-reset-confirm",
kwargs={"uidb64": self.valid_uidb64, "token": token},
)
)
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.context["form"], SetPasswordForm)
def test_post_valid(self):
token = self._generate_token(self.user)
response = self.client.post(
reverse(
"password-reset-confirm",
kwargs={"uidb64": self.valid_uidb64, "token": token},
),
data=self.valid_data,
follow=True,
)
self.assertRedirects(response, reverse("new_count"))
self.assertEqual(
"Password reset successfully", list(response.context["messages"])[0].message
)
def test_post_invalid(self):
token = self._generate_token(self.user)
response = self.client.post(
reverse(
"password-reset-confirm",
kwargs={"uidb64": self.valid_uidb64, "token": token},
),
data=self.invalid_data,
)
self.assertEqual(response.status_code, 200)
self.assertFormError(
response, "form", "new_password2", "The two password fields didn’t match."
)
|
{"/cellcounter/accounts/test_views.py": ["/cellcounter/cc_kapi/factories.py", "/cellcounter/accounts/forms.py", "/cellcounter/accounts/utils.py", "/cellcounter/accounts/views.py"], "/cellcounter/cc_kapi/routers.py": ["/cellcounter/cc_kapi/models.py"], "/cellcounter/cc_kapi/models.py": ["/cellcounter/main/models.py"], "/cellcounter/cc_kapi/serializers.py": ["/cellcounter/cc_kapi/models.py"], "/cellcounter/cc_kapi/defaults.py": ["/cellcounter/cc_kapi/models.py"], "/cellcounter/main/management/commands/fix_database.py": ["/cellcounter/cc_kapi/models.py", "/cellcounter/main/models.py"], "/cellcounter/statistics/urls.py": ["/cellcounter/statistics/views.py"], "/cellcounter/cc_kapi/test_builtin_keyboards.py": ["/cellcounter/cc_kapi/marshalls.py", "/cellcounter/cc_kapi/factories.py", "/cellcounter/cc_kapi/models.py"], "/cellcounter/main/views.py": ["/cellcounter/main/models.py"], "/cellcounter/statistics/views.py": ["/cellcounter/statistics/models.py"], "/cellcounter/cc_kapi/migrations/0002_v2api.py": ["/cellcounter/cc_kapi/defaults.py", "/cellcounter/cc_kapi/models.py"], "/cellcounter/cc_kapi/urls.py": ["/cellcounter/cc_kapi/views.py", "/cellcounter/cc_kapi/routers.py"], "/cellcounter/cc_kapi/marshalls.py": ["/cellcounter/cc_kapi/models.py", "/cellcounter/cc_kapi/serializers.py", "/cellcounter/cc_kapi/defaults.py"], "/cellcounter/accounts/test_forms.py": ["/cellcounter/cc_kapi/factories.py", "/cellcounter/accounts/forms.py"], "/cellcounter/cc_kapi/test_db_migration.py": ["/cellcounter/cc_kapi/factories.py", "/cellcounter/cc_kapi/serializers.py", "/cellcounter/cc_kapi/models.py"], "/cellcounter/cc_kapi/tests.py": ["/cellcounter/main/models.py", "/cellcounter/cc_kapi/defaults.py", "/cellcounter/cc_kapi/factories.py", "/cellcounter/cc_kapi/models.py", "/cellcounter/cc_kapi/serializers.py"], "/cellcounter/cc_kapi/views.py": ["/cellcounter/cc_kapi/models.py", "/cellcounter/cc_kapi/serializers.py", "/cellcounter/cc_kapi/defaults.py", "/cellcounter/cc_kapi/marshalls.py"], "/cellcounter/urls.py": ["/cellcounter/main/views.py"], "/cellcounter/main/admin.py": ["/cellcounter/main/models.py"], "/cellcounter/accounts/views.py": ["/cellcounter/cc_kapi/marshalls.py", "/cellcounter/accounts/forms.py"], "/cellcounter/cc_kapi/factories.py": ["/cellcounter/main/models.py", "/cellcounter/cc_kapi/models.py", "/cellcounter/cc_kapi/defaults.py", "/cellcounter/cc_kapi/marshalls.py"], "/cellcounter/accounts/test_utils.py": ["/cellcounter/cc_kapi/factories.py", "/cellcounter/accounts/forms.py", "/cellcounter/accounts/utils.py"], "/cellcounter/statistics/tests.py": ["/cellcounter/statistics/views.py", "/cellcounter/statistics/middleware.py", "/cellcounter/statistics/models.py"]}
|
23,236
|
cellcounter/cellcounter
|
refs/heads/master
|
/cellcounter/cc_kapi/routers.py
|
from rest_framework.routers import Route, SimpleRouter
from .models import Keyboard
class KeyboardAPIRouter(SimpleRouter):
"""
A router for the keyboard API, which splits desktop and mobile.
"""
routes = [
Route(
url=r"^{prefix}/$",
mapping={"get": "list"},
name="{basename}-list",
detail=False,
initkwargs={"suffix": "List"},
),
Route(
url=r"^{prefix}/desktop/$",
mapping={"get": "list", "post": "create"},
name="{basename}-desktop-list",
detail=False,
initkwargs={"suffix": "Desktop List", "device_type": Keyboard.DESKTOP},
),
Route(
url=r"^{prefix}/desktop/{lookup}/$",
mapping={"get": "retrieve", "put": "update", "delete": "destroy"},
name="{basename}-desktop-detail",
detail=True,
initkwargs={"suffix": "Desktop Detail", "device_type": Keyboard.DESKTOP},
),
Route(
url=r"^{prefix}/desktop/{lookup}/set_default$",
mapping={"put": "set_default"},
name="{basename}-desktop-set_default",
detail=True,
initkwargs={
"suffix": "Desktop Set Default",
"device_type": Keyboard.DESKTOP,
},
),
Route(
url=r"^{prefix}/mobile/$",
mapping={"get": "list", "post": "create"},
name="{basename}-mobile-list",
detail=False,
initkwargs={"suffix": "Mobile List", "device_type": Keyboard.MOBILE},
),
Route(
url=r"^{prefix}/mobile/{lookup}/$",
mapping={"get": "retrieve", "put": "update", "delete": "destroy"},
name="{basename}-mobile-detail",
detail=True,
initkwargs={"suffix": "Mobile Detail", "device_type": Keyboard.MOBILE},
),
Route(
url=r"^{prefix}/mobile/{lookup}/set_default$",
mapping={"put": "set_default"},
name="{basename}-mobile-set_default",
detail=True,
initkwargs={"suffix": "Mobile Set Default", "device_type": Keyboard.MOBILE},
),
]
|
{"/cellcounter/accounts/test_views.py": ["/cellcounter/cc_kapi/factories.py", "/cellcounter/accounts/forms.py", "/cellcounter/accounts/utils.py", "/cellcounter/accounts/views.py"], "/cellcounter/cc_kapi/routers.py": ["/cellcounter/cc_kapi/models.py"], "/cellcounter/cc_kapi/models.py": ["/cellcounter/main/models.py"], "/cellcounter/cc_kapi/serializers.py": ["/cellcounter/cc_kapi/models.py"], "/cellcounter/cc_kapi/defaults.py": ["/cellcounter/cc_kapi/models.py"], "/cellcounter/main/management/commands/fix_database.py": ["/cellcounter/cc_kapi/models.py", "/cellcounter/main/models.py"], "/cellcounter/statistics/urls.py": ["/cellcounter/statistics/views.py"], "/cellcounter/cc_kapi/test_builtin_keyboards.py": ["/cellcounter/cc_kapi/marshalls.py", "/cellcounter/cc_kapi/factories.py", "/cellcounter/cc_kapi/models.py"], "/cellcounter/main/views.py": ["/cellcounter/main/models.py"], "/cellcounter/statistics/views.py": ["/cellcounter/statistics/models.py"], "/cellcounter/cc_kapi/migrations/0002_v2api.py": ["/cellcounter/cc_kapi/defaults.py", "/cellcounter/cc_kapi/models.py"], "/cellcounter/cc_kapi/urls.py": ["/cellcounter/cc_kapi/views.py", "/cellcounter/cc_kapi/routers.py"], "/cellcounter/cc_kapi/marshalls.py": ["/cellcounter/cc_kapi/models.py", "/cellcounter/cc_kapi/serializers.py", "/cellcounter/cc_kapi/defaults.py"], "/cellcounter/accounts/test_forms.py": ["/cellcounter/cc_kapi/factories.py", "/cellcounter/accounts/forms.py"], "/cellcounter/cc_kapi/test_db_migration.py": ["/cellcounter/cc_kapi/factories.py", "/cellcounter/cc_kapi/serializers.py", "/cellcounter/cc_kapi/models.py"], "/cellcounter/cc_kapi/tests.py": ["/cellcounter/main/models.py", "/cellcounter/cc_kapi/defaults.py", "/cellcounter/cc_kapi/factories.py", "/cellcounter/cc_kapi/models.py", "/cellcounter/cc_kapi/serializers.py"], "/cellcounter/cc_kapi/views.py": ["/cellcounter/cc_kapi/models.py", "/cellcounter/cc_kapi/serializers.py", "/cellcounter/cc_kapi/defaults.py", "/cellcounter/cc_kapi/marshalls.py"], "/cellcounter/urls.py": ["/cellcounter/main/views.py"], "/cellcounter/main/admin.py": ["/cellcounter/main/models.py"], "/cellcounter/accounts/views.py": ["/cellcounter/cc_kapi/marshalls.py", "/cellcounter/accounts/forms.py"], "/cellcounter/cc_kapi/factories.py": ["/cellcounter/main/models.py", "/cellcounter/cc_kapi/models.py", "/cellcounter/cc_kapi/defaults.py", "/cellcounter/cc_kapi/marshalls.py"], "/cellcounter/accounts/test_utils.py": ["/cellcounter/cc_kapi/factories.py", "/cellcounter/accounts/forms.py", "/cellcounter/accounts/utils.py"], "/cellcounter/statistics/tests.py": ["/cellcounter/statistics/views.py", "/cellcounter/statistics/middleware.py", "/cellcounter/statistics/models.py"]}
|
23,237
|
cellcounter/cellcounter
|
refs/heads/master
|
/cellcounter/cc_kapi/models.py
|
from django.db import models
from django.contrib.auth.models import User
from cellcounter.main.models import CellType
from django.utils import timezone
class Keyboard(models.Model):
"""Represents a Keyboard mapping between users and keys"""
DESKTOP = 1
MOBILE = 2
DEVICE_TYPES = (
(DESKTOP, "desktop"),
(MOBILE, "mobile"),
)
user = models.ForeignKey(User, on_delete=models.CASCADE)
label = models.CharField(max_length=25)
created = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
device_type = models.PositiveIntegerField(choices=DEVICE_TYPES, default=DESKTOP)
_is_default = False
def _set_default(self):
self._is_default = True
@property
def is_default(self):
return self._is_default
@is_default.setter
def is_default(self, value):
self._set_default()
to_set = self
if not self.user:
to_set = None
if self.device_type == self.DESKTOP:
self.user.defaultkeyboards.desktop = to_set
elif self.device_type == self.MOBILE:
self.user.defaultkeyboards.mobile = to_set
def __unicode__(self):
if self.user is None:
return "Builtin Keyboard '%s'" % (self.label)
else:
return "Keyboard '%s' for user '%s'" % (self.label, self.user.username)
def _sync_keymaps(self, new_mapping_list):
"""Expects a list of KeyMap objects"""
current_mappings = self.mappings.all()
new_mappings = new_mapping_list
[self.mappings.remove(x) for x in current_mappings if x not in new_mappings]
[self.mappings.add(x) for x in new_mappings if x not in current_mappings]
def set_keymaps(self, new_mapping_list):
"""new_mapping_list is a list of KeyMap objects"""
self._sync_keymaps(new_mapping_list)
def save(
self, force_insert=False, force_update=False, using=None, update_fields=None
):
if self.user:
self.last_modified = timezone.now()
super(Keyboard, self).save(
force_insert=False, force_update=False, using=None, update_fields=None
)
class KeyMap(models.Model):
cellid = models.ForeignKey(CellType, on_delete=models.CASCADE)
key = models.CharField(max_length=1)
keyboards = models.ManyToManyField(Keyboard, related_name="mappings")
class DefaultKeyboards(models.Model):
"""Maps the default keyboard settings (desktop and mobile) to the user"""
user = models.OneToOneField(User, primary_key=True, on_delete=models.CASCADE)
desktop = models.ForeignKey(
Keyboard,
default=None,
related_name="desktop_default",
null=True,
on_delete=models.CASCADE,
)
mobile = models.ForeignKey(
Keyboard,
default=None,
related_name="mobile_default",
null=True,
on_delete=models.CASCADE,
)
def __str__(self): # __unicode__ on Python 2
return "%s default keyboard mappings" % self.user.username
|
{"/cellcounter/accounts/test_views.py": ["/cellcounter/cc_kapi/factories.py", "/cellcounter/accounts/forms.py", "/cellcounter/accounts/utils.py", "/cellcounter/accounts/views.py"], "/cellcounter/cc_kapi/routers.py": ["/cellcounter/cc_kapi/models.py"], "/cellcounter/cc_kapi/models.py": ["/cellcounter/main/models.py"], "/cellcounter/cc_kapi/serializers.py": ["/cellcounter/cc_kapi/models.py"], "/cellcounter/cc_kapi/defaults.py": ["/cellcounter/cc_kapi/models.py"], "/cellcounter/main/management/commands/fix_database.py": ["/cellcounter/cc_kapi/models.py", "/cellcounter/main/models.py"], "/cellcounter/statistics/urls.py": ["/cellcounter/statistics/views.py"], "/cellcounter/cc_kapi/test_builtin_keyboards.py": ["/cellcounter/cc_kapi/marshalls.py", "/cellcounter/cc_kapi/factories.py", "/cellcounter/cc_kapi/models.py"], "/cellcounter/main/views.py": ["/cellcounter/main/models.py"], "/cellcounter/statistics/views.py": ["/cellcounter/statistics/models.py"], "/cellcounter/cc_kapi/migrations/0002_v2api.py": ["/cellcounter/cc_kapi/defaults.py", "/cellcounter/cc_kapi/models.py"], "/cellcounter/cc_kapi/urls.py": ["/cellcounter/cc_kapi/views.py", "/cellcounter/cc_kapi/routers.py"], "/cellcounter/cc_kapi/marshalls.py": ["/cellcounter/cc_kapi/models.py", "/cellcounter/cc_kapi/serializers.py", "/cellcounter/cc_kapi/defaults.py"], "/cellcounter/accounts/test_forms.py": ["/cellcounter/cc_kapi/factories.py", "/cellcounter/accounts/forms.py"], "/cellcounter/cc_kapi/test_db_migration.py": ["/cellcounter/cc_kapi/factories.py", "/cellcounter/cc_kapi/serializers.py", "/cellcounter/cc_kapi/models.py"], "/cellcounter/cc_kapi/tests.py": ["/cellcounter/main/models.py", "/cellcounter/cc_kapi/defaults.py", "/cellcounter/cc_kapi/factories.py", "/cellcounter/cc_kapi/models.py", "/cellcounter/cc_kapi/serializers.py"], "/cellcounter/cc_kapi/views.py": ["/cellcounter/cc_kapi/models.py", "/cellcounter/cc_kapi/serializers.py", "/cellcounter/cc_kapi/defaults.py", "/cellcounter/cc_kapi/marshalls.py"], "/cellcounter/urls.py": ["/cellcounter/main/views.py"], "/cellcounter/main/admin.py": ["/cellcounter/main/models.py"], "/cellcounter/accounts/views.py": ["/cellcounter/cc_kapi/marshalls.py", "/cellcounter/accounts/forms.py"], "/cellcounter/cc_kapi/factories.py": ["/cellcounter/main/models.py", "/cellcounter/cc_kapi/models.py", "/cellcounter/cc_kapi/defaults.py", "/cellcounter/cc_kapi/marshalls.py"], "/cellcounter/accounts/test_utils.py": ["/cellcounter/cc_kapi/factories.py", "/cellcounter/accounts/forms.py", "/cellcounter/accounts/utils.py"], "/cellcounter/statistics/tests.py": ["/cellcounter/statistics/views.py", "/cellcounter/statistics/middleware.py", "/cellcounter/statistics/models.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.