text stringlengths 0 1.05M | meta dict |
|---|---|
__author__ = 'alena'
from model.group import Group
def test_modify_group_name(app):
if app.group.count() == 0:
app.group.create(Group(name="test"))
old_groups = app.group.get_group_list()
group = Group(name="New group")
group.id = old_groups[0].id
app.group.modify_first_group(group)
new_groups = app.group.get_group_list()
assert len(old_groups) == len(new_groups)
old_groups[0] = group
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
# def test_modify_group_header(app):
# if app.group.count() == 0:
# app.group.create(Group(name="test"))
# old_groups = app.group.get_group_list()
# app.group.modify_first_group(Group(header="New header"))
# new_groups = app.group.get_group_list()
# assert len(old_groups) == len(new_groups)
#
#
# def test_modify_group_footer(app):
# if app.group.count() == 0:
# app.group.create(Group(name="test"))
# old_groups = app.group.get_group_list()
# app.group.modify_first_group(Group(footer="New footer"))
# new_groups = app.group.get_group_list()
# assert len(old_groups) == len(new_groups)
#
#
| {
"repo_name": "alenasf/Pythontest",
"path": "test/test_modify_group.py",
"copies": "1",
"size": "1175",
"license": "apache-2.0",
"hash": -1599455714431597000,
"line_mean": 31.6388888889,
"line_max": 93,
"alpha_frac": 0.6357446809,
"autogenerated": false,
"ratio": 2.8658536585365852,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4001598339436585,
"avg_score": null,
"num_lines": null
} |
__author__ = "alena"
from sys import maxsize
class Contact:
def __init__(self,firstname=None, middlename=None, lastname=None, nickname=None, title=None, company=None, address=None, home=None,
mobile=None, work=None, fax=None, email=None, email2=None, email3=None, homepage=None, address2=None, phone2=None,
notes=None, id=None):
self.firstname = firstname
self.middlename = middlename
self.lastname = lastname
self.nickname = nickname
self.title = title
self.company = company
self.address = address
self.home = home
self.mobile = mobile
self.work = work
self.fax = fax
self.email = email
# self.byear = byear
# self.ayear = ayear
self.email2 = email2
self.email3 = email3
self.homepage = homepage
self.address2 = address2
self.phone2 = phone2
self.notes = notes
self.id = id
def __repr__(self):
return "%s:%s:%s" % (self.id, self.firstname, self.lastname)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and self.firstname == other.firstname and self.lastname == other.lastname
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
| {
"repo_name": "alenasf/Pythontest",
"path": "model/contact.py",
"copies": "1",
"size": "1409",
"license": "apache-2.0",
"hash": 3447115992871491600,
"line_mean": 28.9787234043,
"line_max": 149,
"alpha_frac": 0.5748757984,
"autogenerated": false,
"ratio": 3.8602739726027395,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.493514977100274,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alena'
from model.contact import Contact
class ContactHelper:
def __init__(self, app):
self.app = app
def open_contact_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("/edit.php") and len(wd.find_elements_by_name("Submit")) > 0):
wd.find_element_by_link_text("add new").click()
def create(self, contact):
wd = self.app.wd
self.open_contact_page()
self.fill_contact_form(contact)
# submit contact creation
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
#return to home page
self.return_to_homepage()
def change_contact_field_value(self, contact_field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(contact_field_name).click()
wd.find_element_by_name(contact_field_name).clear()
wd.find_element_by_name(contact_field_name).send_keys(text)
def fill_contact_form(self, contact):
wd = self.app.wd
self.change_contact_field_value("firstname", contact.firstname)
self.change_contact_field_value("middlename", contact.middlename)
self.change_contact_field_value("lastname", contact.lastname)
self.change_contact_field_value("nickname", contact.nickname)
self.change_contact_field_value("title", contact.title)
self.change_contact_field_value("company", contact.company)
self.change_contact_field_value("address", contact.address)
self.change_contact_field_value("home", contact.home)
self.change_contact_field_value("mobile", contact.mobile)
self.change_contact_field_value("work", contact.work)
self.change_contact_field_value("fax", contact.fax)
self.change_contact_field_value("email", contact.email)
self.change_contact_field_value("email2", contact.email2)
self.change_contact_field_value("email3", contact.email3)
self.change_contact_field_value("homepage", contact.homepage)
self.change_contact_field_value("address2", contact.address2)
self.change_contact_field_value("phone2", contact.phone2)
self.change_contact_field_value("notes", contact.notes)
# wd.find_element_by_name("email2").click()
# if not wd.find_element_by_xpath("//div[@id='content']/form/select[1]//option[4]").is_selected():
# wd.find_element_by_xpath("//div[@id='content']/form/select[1]//option[4]").click()
# if not wd.find_element_by_xpath("//div[@id='content']/form/select[2]//option[10]").is_selected():
# wd.find_element_by_xpath("//div[@id='content']/form/select[2]//option[10]").click()
# wd.find_element_by_name("byear").click()
# wd.find_element_by_name("byear").clear()
# wd.find_element_by_name("byear").send_keys(contact.byear)
# if not wd.find_element_by_xpath("//div[@id='content']/form/select[3]//option[5]").is_selected():
# wd.find_element_by_xpath("//div[@id='content']/form/select[3]//option[5]").click()
# if not wd.find_element_by_xpath("//div[@id='content']/form/select[4]//option[3]").is_selected():
# wd.find_element_by_xpath("//div[@id='content']/form/select[4]//option[3]").click()
# wd.find_element_by_name("ayear").click()
# wd.find_element_by_name("ayear").clear()
# wd.find_element_by_name("ayear").send_keys(contact.ayear)
def select_first_contact(self):
wd = self.app.wd
wd.find_element_by_name("selected[]").click()
def delete_first_contact(self):
wd = self.app.wd
self.select_first_contact()
# init deletion
wd.find_element_by_xpath("//input[@value='Delete']").click()
# submit deletion
wd.switch_to_alert().accept()
# self.return_to_homepage()
def modify_first_contact(self, new_contact_data):
wd = self.app.wd
# self.open_contact_page()
self.select_first_contact()
# open modification form
wd.find_element_by_xpath("//img[@alt='Edit']").click()
# fill contact form
self.fill_contact_form(new_contact_data)
# submit modification
wd.find_element_by_name("update").click()
# self.return_to_homepage()
def return_to_homepage(self):
wd = self.app.wd
wd.find_element_by_link_text("home page").click()
def count_contact(self):
wd = self.app.wd
self.open_contact_page()
return len(wd.find_elements_by_name("selected[]"))
def get_contact_list(self):
wd = self.app.wd
# self.open_contact_page()
contacts = []
for element in wd.find_elements_by_name("entry"):
cells = element.find_elements_by_tag_name("td")
lastname = cells[1].text
firstname = cells[2].text
id = cells[0].find_element_by_tag_name("input").get_attribute("value")
contacts.append(Contact(id=id, firstname=firstname, lastname=lastname))
return contacts
| {
"repo_name": "alenasf/Pythontest",
"path": "fixture/contact.py",
"copies": "1",
"size": "5084",
"license": "apache-2.0",
"hash": 49778657035755360,
"line_mean": 42.0847457627,
"line_max": 107,
"alpha_frac": 0.6174272227,
"autogenerated": false,
"ratio": 3.4561522773623388,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9521513050815784,
"avg_score": 0.010413289849310756,
"num_lines": 118
} |
__author__ = 'alena'
from model.group import Group
class GroupHelper:
def __init__(self, app):
self.app = app
def open_groups_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("/group.php") and len(wd.find_elements_by_name("new")) > 0):
wd.find_element_by_link_text("groups").click()
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def fill_group_form(self, group):
wd = self.app.wd
self.change_field_value("group_name", group.name)
self.change_field_value("group_header", group.header)
self.change_field_value("group_footer", group.footer)
def create(self, group):
wd = self.app.wd
self.open_groups_page()
# init group creation
wd.find_element_by_name("new").click()
self.fill_group_form(group)
# submit group creation
wd.find_element_by_name("submit").click()
self.return_to_groups_page()
def select_first_group(self):
wd = self.app.wd
wd.find_element_by_name("selected[]").click()
def delete_first_group(self):
wd = self.app.wd
self.open_groups_page()
self.select_first_group()
# submit deletion
wd.find_element_by_name("delete").click()
self.return_to_groups_page()
def modify_first_group(self, new_group_data):
wd = self.app.wd
self.open_groups_page()
self.select_first_group()
# open modification form
wd.find_element_by_name("edit").click()
# fill group form
self.fill_group_form(new_group_data)
# submit modification
wd.find_element_by_name("update").click()
self.return_to_groups_page()
def return_to_groups_page(self):
wd = self.app.wd
wd.find_element_by_link_text("group page").click()
def count(self):
wd = self.app.wd
self.open_groups_page()
return len(wd.find_elements_by_name("selected[]"))
def get_group_list(self):
wd = self.app.wd
self.open_groups_page()
groups = []
for element in wd.find_elements_by_css_selector("span.group"):
text = element.text
id = element.find_element_by_name("selected[]").get_attribute("value")
groups.append(Group(name=text, id=id))
return groups
| {
"repo_name": "alenasf/Pythontest",
"path": "fixture/group.py",
"copies": "1",
"size": "2596",
"license": "apache-2.0",
"hash": -8587110011450003000,
"line_mean": 27.2173913043,
"line_max": 100,
"alpha_frac": 0.5866718028,
"autogenerated": false,
"ratio": 3.512855209742896,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4599527012542896,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alena'
class SessionHelper:
def __init__(self, app):
self.app = app
def login(self, username, password):
wd = self.app.wd
self.app.open_home_page()
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys(username)
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys(password)
wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click()
def logout(self):
wd = self.app.wd
wd.find_element_by_link_text("Logout").click()
def ensure_logout(self):
wd = self.app.wd
if self.is_logged_in():
self.logout()
def is_logged_in(self):
wd = self.app.wd
return len(wd.find_elements_by_link_text("Logout")) > 0
def is_logged_in_as(self, username):
wd = self.app.wd
return wd.find_element_by_xpath("//div/div[1]/form/b").text == "("+username+")"
def ensure_login(self, username, password):
wd = self.app.wd
if self.is_logged_in():
if self.is_logged_in_as(username):
return
else:
self.logout()
self.login(username, password)
| {
"repo_name": "alenasf/Pythontest",
"path": "fixture/session.py",
"copies": "1",
"size": "1344",
"license": "apache-2.0",
"hash": 4000602737178500600,
"line_mean": 26.4285714286,
"line_max": 87,
"alpha_frac": 0.5572916667,
"autogenerated": false,
"ratio": 3.3768844221105527,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44341760888105525,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alesandro.Esquiva'
import urllib.request
import urllib.parse
import json
class AARConnector:
def __init__(self,**kwargs):
self.url = kwargs.get("url","")
self.domain = kwargs.get("domain","http://automaticapirest.info/demo/")
self.table = kwargs.get("table","")
self.columns = kwargs.get("columns","")
self.orderby = kwargs.get("orderby","")
self.way = kwargs.get("way","ASC")
self.limit = kwargs.get("limit","")
self.where = kwargs.get("where","")
self.opt = kwargs.get("opt","")
if(self.url == ""):
print(self.formatURL())
self.url = self.formatURL()
def getRawData(self):
url = self.url
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
jsonraw = response.read().decode('utf-8')
return jsonraw
def getJson(self):
jsonraw = self.getRawData()
data = json.loads(jsonraw)
return data
def printJson(self):
jsonobject = self.getJson()
print(json.dumps(jsonobject, indent=1, sort_keys=True))
def getData(self):
return self.getJson()["data"]
def printData(self):
data = self.getData()
print(json.dumps(data, indent=1, sort_keys=True))
def getDBInfo(self):
return self.getJson()["dbInfo"]
def printDBInfo(self):
dbinfo = self.getDBInfo()
print(json.dumps(dbinfo, indent=1, sort_keys=True))
def formatURL(self):
url = self.domain+"getData.php?t="+self.table
if(self.columns != ""):
url = url+"&c="+self.columns
if(self.orderby != ""):
url = url+"&o="+self.orderby
if(self.way != "ASC"):
url = url+"&s="+self.way
else:
url = url+"&s=ASC"
if(self.limit != ""):
url = url+"&l="+self.limit
if(self.where != ""):
url = url+"&w="+urllib.parse.quote(self.where)
if(self.opt != ""):
url = url+"&opt="+self.opt
return url
| {
"repo_name": "alejandroesquiva/AutomaticApiRest-PythonConnector",
"path": "build/lib/aarpy/AARConnector.py",
"copies": "1",
"size": "2091",
"license": "mit",
"hash": -226404345322028220,
"line_mean": 27.2567567568,
"line_max": 79,
"alpha_frac": 0.543758967,
"autogenerated": false,
"ratio": 3.61139896373057,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9634102416215753,
"avg_score": 0.004211102902963368,
"num_lines": 74
} |
__author__ = 'alesha'
from apiclient.discovery import build
from apiclient.errors import HttpError
import re
duration_reg = re.compile(u"PT((?P<hours>\d+)H)?((?P<minutes>\d+)M)?((?P<seconds>\d+)S)?")
DEVELOPER_KEY = "AIzaSyALPCgnpIM6KcJsilUsi1VxO5A7xgLujPQ"
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
developerKey=DEVELOPER_KEY)
to_seconds = lambda x: 3600 * int(x['hours'] or 0) + \
60 * int(x['minutes'] or 0) + \
int(x['seconds'] or 0)
def parse_time(duration_str):
for d in duration_reg.finditer(duration_str):
result = d.groupdict()
result = dict(map(lambda x: (x[0], int(x[1]) if x[1] else None), result.items()))
return result
def get_time(video_id):
video_response = youtube.videos().list(
id=video_id,
part='contentDetails'
).execute()
# Add each result to the list, and then display the list of matching videos.
for video_result in video_response.get("items", []):
duration_str = video_result["contentDetails"]["duration"]
d = parse_time(duration_str)
if d:
return d
return None
def get_video_info(video_id):
video_response = youtube.videos().list(
id=video_id,
part='statistics,contentDetails'
).execute()
for video_result in video_response.get("items", []):
duration_str = video_result["contentDetails"]["duration"]
statistic_data = video_result["statistics"]
video_length = parse_time(duration_str)
if video_length and statistic_data:
return {"yt_comments": int(statistic_data.get('commentCount', 0)),
"yt_likes": int(statistic_data.get('likeCount', 0)),
"yt_dislikes": int(statistic_data.get('dislikeCount', 0)),
"yt_views": int(statistic_data.get('viewCount', 0)),
"yt_favorites": int(statistic_data.get('favoriteCount', 0)),
"video_length": to_seconds(video_length)
}
return None
if __name__ == '__main__':
result = get_video_info("9O5jf3CWTiA")
print result
| {
"repo_name": "AlexeyProskuryakov/rr",
"path": "wsgi/youtube.py",
"copies": "1",
"size": "2240",
"license": "mit",
"hash": -4695879740626117000,
"line_mean": 32.9393939394,
"line_max": 90,
"alpha_frac": 0.5977678571,
"autogenerated": false,
"ratio": 3.462132921174652,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4559900778274652,
"avg_score": null,
"num_lines": null
} |
from Bio import pairwise2, Entrez, SeqIO
from Bio.SubsMat import MatrixInfo as matlist
from Bio.Blast.Applications import NcbiblastnCommandline
from Bio.Blast import NCBIXML
import tensorflow as tf
from urllib.request import urlopen
from urllib.parse import urlparse
from subprocess import call, check_output, run
from pyensembl import EnsemblRelease
from bs4 import BeautifulSoup
from collections import OrderedDict
from operator import itemgetter
from itertools import islice
from threading import Thread
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from scipy.signal import argrelextrema
import pandas as pd
import regex
import re
import datetime, math, sys, hashlib, pickle, time, random, string, json, glob, os, signal
import httplib2 as http
from urllib.request import urlopen
from pyliftover import LiftOver
from PIL import Image
class TimeoutError(Exception):
'''
Custom error for Timeout class.
'''
pass
class Timeout:
'''
A timeout handler with context manager.
Based on UNIX signals.
'''
def __init__(self, seconds=1, error_message='Timeout'):
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self, signum, frame):
raise TimeoutError(self.error_message)
def __enter__(self):
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, type, value, traceback):
signal.alarm(0)
def random_walk(lenght):
'''int => np.array
Return a random walk path.
'''
walk = []
y = 0
for _ in range(lenght):
if random.randint(0,1):
y += 1
else:
y -= 1
walk.append(y)
return np.array(walk)
def find_min_max(array):
'''np.array => dict
Return a dictionary of indexes
where the maxima and minima of the input array are found.
'''
# for local maxima
maxima = argrelextrema(array, np.greater)
# for local minima
minima = argrelextrema(array, np.less)
return {'maxima':maxima,
'minima':minima}
def smooth(array, window_len=10, window='hanning'):
'''np.array, int, str => np.array
Smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
input:
x: the input signal
window_len: the dimension of the smoothing window; should be an odd integer
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
output:
the smoothed signal
example:
t = linspace(-2,2,0.1)
x = sin(t)+randn(len(t))*0.1
y = smooth(x)
see also:
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.
'''
if array.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if array.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s = np.r_[array[window_len-1:0:-1],array,array[-2:-window_len-1:-1]]
#print(len(s))
if window == 'flat': #moving average
w = np.ones(window_len,'d')
else:
w = eval('np.'+window+'(window_len)')
y = np.convolve(w/w.sum(),s,mode='valid')
y = y[int(window_len/2-1):-int(window_len/2)]
offset = len(y)-len(array) #in case input and output are not of the same lenght
assert len(array) == len(y[offset:])
return y[offset:]
def cohen_effect_size(group1, group2):
'''(np.array, np.array) => float
Compute the Cohen Effect Size (d) between two groups
by comparing the difference between groups to the variability within groups.
Return the the difference in standard deviation.
'''
assert type(group1) == np.ndarray
assert type(group2) == np.ndarray
diff = group1.mean() - group2.mean()
var1 = group1.var()
var2 = group2.var()
n1, n2 = len(group1), len(group2)
pooled_var = (n1 * var1 + n2 * var2) / (n1 + n2)
d = diff / np.sqrt(pooled_var)
return d
def gen_ascii_symbols(input_file, chars):
'''
Return a dict of letters/numbers associated with
the corresponding ascii-art representation.
You can use http://www.network-science.de/ascii/ to generate the ascii-art for each symbol.
The input file looks like:
,adPPYYba,
"" `Y8
,adPPPPP88
88, ,88
`"8bbdP"Y8
88
88
88
88,dPPYba,
88P' "8a
88 d8
88b, ,a8"
8Y"Ybbd8"'
...
Each symbol is separated by at least one empty line ("\n")
'''
#input_file = 'ascii_symbols.txt'
#chars = string.ascii_lowercase+string.ascii_uppercase+'0123456789'
symbols = []
s = ''
with open(input_file, 'r') as f:
for line in f:
if line == '\n':
if len(s):
symbols.append(s)
s = ''
else:
continue
else:
s += line
return dict(zip(chars,symbols))
def gen_ascii_captcha(symbols, length=6, max_h=10, noise_level=0, noise_char='.'):
'''
Return a string of the specified length made by random symbols.
Print the ascii-art representation of it.
Example:
symbols = gen_ascii_symbols(input_file='ascii_symbols.txt',
chars = string.ascii_lowercase+string.ascii_uppercase+'0123456789')
while True:
captcha = gen_ascii_captcha(symbols, noise_level=0.2)
x = input('captcha: ')
if x == captcha:
print('\ncorrect')
break
print('\ninvalid captcha, please retry')
'''
assert noise_level <= 1
#max_h = 10
#noise_level = 0
captcha = ''.join(random.sample(chars, length))
#print(code)
pool = [symbols[c].split('\n') for c in captcha]
for n in range(max_h, 0, -1):
line = ''
for item in pool:
try:
next_line = item[-n]
except IndexError:
next_line = ''.join([' ' for i in range(max([len(_item) for _item in item]))])
if noise_level:
#if random.random() < noise_level:
# next_line = next_line.replace(' ', noise_char)
next_line = ''.join([c if random.random() > noise_level \
else random.choice(noise_char) for c in next_line])
line += next_line
print(line)
return captcha
def rnd_sample_df(df, n=1, slice_size=1):
'''
Yield dataframes generated by randomly slicing df.
It is different from pandas.DataFrame.sample().
'''
assert n > 0 and slice_size > 0
max_len = len(df)-slice_size
for _ in range(n):
i = random.randint(0,max_len)
yield df.iloc[i:i+slice_size]
def date_to_stamp(d='2012-12-31'):
'''
Return UNIX timestamp of a date.
'''
Y,M,D = d.split('-')
stamp = time.mktime(datetime.date(int(Y),
int(M),
int(D)
).timetuple()
)
return stamp
def rolling_normalize_df(df, method='min-max', size=30, overlap=5):
'''
Return a new df with datapoints normalized based on a sliding window
of rolling on the a pandas.DataFrame.
It is useful to have local (window by window) normalization of the values.
'''
to_merge = []
for item in split_overlap_long(df, size, overlap, is_dataframe=True):
to_merge.append(normalize_df(item, method))
new_df = pd.concat(to_merge)
return new_df.groupby(new_df.index).mean()
def normalize_df(df, method='min-max'):
'''
Return normalized data.
max, min, mean and std are computed considering
all the values of the dfand not by column.
i.e. mean = df.values.mean() and not df.mean().
Ideal to normalize df having multiple columns of non-indipendent values.
Methods implemented:
'raw' No normalization
'min-max' Deafault
'norm' ...
'z-norm' ...
'sigmoid' ...
'decimal' ...
'softmax' It's a transformation rather than a normalization
'tanh' ...
'''
if type(df) is not pd.core.frame.DataFrame:
df = pd.DataFrame(df)
if method == 'min-max':
return (df-df.values.min())/(df.values.max()-df.values.min())
if method == 'norm':
return (df-df.values.mean())/(df.values.max()-df.values.mean())
if method == 'z-norm':
return (df-df.values.mean())/df.values.std()
if method == 'sigmoid':
_max = df.values.max()
return df.apply(lambda x: 1/(1+np.exp(-x/_max)))
if method == 'decimal':
#j = len(str(int(df.values.max())))
i = 10**len(str(int(df.values.max())))#10**j
return df.apply(lambda x: x/i)
if method == 'tanh':
return 0.5*(np.tanh(0.01*(df-df.values.mean()))/df.values.std() + 1)
if method == 'softmax':
return np.exp(df)/np.sum(np.exp(df))
if method == 'raw':
return df
raise ValueError(f'"method" not found: {method}')
def merge_dict(dictA, dictB):
'''(dict, dict) => dict
Merge two dicts, if they contain the same keys, it sums their values.
Return the merged dict.
Example:
dictA = {'any key':1, 'point':{'x':2, 'y':3}, 'something':'aaaa'}
dictB = {'any key':1, 'point':{'x':2, 'y':3, 'z':0, 'even more nested':{'w':99}}, 'extra':8}
merge_dict(dictA, dictB)
{'any key': 2,
'point': {'x': 4, 'y': 6, 'z': 0, 'even more nested': {'w': 99}},
'something': 'aaaa',
'extra': 8}
'''
r = {}
common_k = [k for k in dictA if k in dictB]
common_k += [k for k in dictB if k in dictA]
common_k = set(common_k)
for k, v in dictA.items():
#add unique k of dictA
if k not in common_k:
r[k] = v
else:
#add inner keys if they are not containing other dicts
if type(v) is not dict:
if k in dictB:
r[k] = v + dictB[k]
else:
#recursively merge the inner dicts
r[k] = merge_dict(dictA[k], dictB[k])
#add unique k of dictB
for k, v in dictB.items():
if k not in common_k:
r[k] = v
return r
def png_to_flat_array(img_file):
img = Image.open(img_file).convert('RGBA')
arr = np.array(img)
# make a 1-dimensional view of arr
return arr.ravel()
def png_to_vector_matrix(img_file):
# convert it to a matrix
return np.matrix(png_to_flat_array(img_file))
def TFKMeansCluster(vectors, noofclusters, datatype="uint8"):
'''
K-Means Clustering using TensorFlow.
'vectors' should be a n*k 2-D NumPy array, where n is the number
of vectors of dimensionality k.
'noofclusters' should be an integer.
'''
noofclusters = int(noofclusters)
assert noofclusters < len(vectors)
#Find out the dimensionality
dim = len(vectors[0])
#Will help select random centroids from among the available vectors
vector_indices = list(range(len(vectors)))
random.shuffle(vector_indices)
#GRAPH OF COMPUTATION
#We initialize a new graph and set it as the default during each run
#of this algorithm. This ensures that as this function is called
#multiple times, the default graph doesn't keep getting crowded with
#unused ops and Variables from previous function calls.
graph = tf.Graph()
with graph.as_default():
#SESSION OF COMPUTATION
sess = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
centroids = [tf.Variable((vectors[vector_indices[i]])) for i in range(noofclusters)]
##These nodes will assign the centroid Variables the appropriate
##values
centroid_value = tf.placeholder(datatype, [dim])
cent_assigns = []
for centroid in centroids:
cent_assigns.append(tf.assign(centroid, centroid_value))
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
assignments = [tf.Variable(0) for i in range(len(vectors))]
##These nodes will assign an assignment Variable the appropriate
##value
assignment_value = tf.placeholder("int32")
cluster_assigns = []
for assignment in assignments:
cluster_assigns.append(tf.assign(assignment,
assignment_value))
##Now lets construct the node that will compute the mean
#The placeholder for the input
mean_input = tf.placeholder("float", [None, dim])
#The Node/op takes the input and computes a mean along the 0th
#dimension, i.e. the list of input vectors
mean_op = tf.reduce_mean(mean_input, 0)
##Node for computing Euclidean distances
#Placeholders for input
v1 = tf.placeholder("float", [dim])
v2 = tf.placeholder("float", [dim])
euclid_dist = tf.sqrt(tf.reduce_sum(tf.pow(tf.subtract(v1, v2), 2)))
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
#Placeholder for input
centroid_distances = tf.placeholder("float", [noofclusters])
cluster_assignment = tf.argmin(centroid_distances, 0)
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
init_op = tf.global_variables_initializer() #deprecated tf.initialize_all_variables()
#Initialize all variables
sess.run(init_op)
##CLUSTERING ITERATIONS
#Now perform the Expectation-Maximization steps of K-Means clustering
#iterations. To keep things simple, we will only do a set number of
#iterations, instead of using a Stopping Criterion.
noofiterations = 100
for iteration_n in range(noofiterations):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
#Iterate over each vector
for vector_n in range(len(vectors)):
vect = vectors[vector_n]
#Compute Euclidean distance between this vector and each
#centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
#cluster assignment node.
distances = [sess.run(euclid_dist, feed_dict={v1: vect, v2: sess.run(centroid)})
for centroid in centroids]
#Now use the cluster assignment node, with the distances
#as the input
assignment = sess.run(cluster_assignment, feed_dict = {
centroid_distances: distances})
#Now assign the value to the appropriate state variable
sess.run(cluster_assigns[vector_n], feed_dict={
assignment_value: assignment})
##MAXIMIZATION STEP
#Based on the expected state computed from the Expectation Step,
#compute the locations of the centroids so as to maximize the
#overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(noofclusters):
#Collect all the vectors assigned to this cluster
assigned_vects = [vectors[i] for i in range(len(vectors))
if sess.run(assignments[i]) == cluster_n]
#Compute new centroid location
new_location = sess.run(mean_op, feed_dict={mean_input: np.array(assigned_vects)})
#Assign value to appropriate variable
sess.run(cent_assigns[cluster_n], feed_dict={centroid_value: new_location})
#Return centroids and assignments
centroids = sess.run(centroids)
assignments = sess.run(assignments)
return centroids, assignments
def xna_calc(sequence, t='dsDNA', p=0):
'''str => dict
BETA version, works only for dsDNA and ssDNA.
Return basic "biomath" calculations based on the input sequence.
Arguments:
t (type) :'ssDNA' or 'dsDNA'
p (phosphates): 0,1,2
#in case if ssDNA having 3'P, you should pass 2 i.e., 2 phospates present in 1 dsDNA molecule
'''
r = {}
#check inputs
c = Counter(sequence.upper())
for k in c.keys():
if k in 'ACGNT':
pass
else:
raise ValueError(f'Wrong sequence passed: "sequence" contains invalid characters, only "ATCGN" are allowed.')
if t not in ['ssDNA','dsDNA']:
raise ValueError(f'Wrong DNA type passed: "t" can be "ssDNA" or "dsDNA". "{t}" was passed instead.')
if not 0 <= p <= 2:
raise ValueError(f'Wrong number of 5\'-phosphates passed: "p" must be an integer from 0 to 4. {p} was passed instead.')
##Calculate:
#length
r['len'] = len(sequence)
#molecular weight
#still unsure about what is the best method to do this
#s = 'ACTGACTGACTATATTCGCGATCGATGCGCTAGCTCGTACGC'
#bioinformatics.org : 25986.8 Da
#Thermo : 25854.8 Da
#Promega : 27720.0 Da
#MolBioTools : 25828.77 Da
#This function : 25828.86 Da #Similar to OligoCalc implementation
#DNA Molecular Weight (typically for synthesized DNA oligonucleotides.
#The OligoCalc DNA MW calculations assume that there is not a 5' monophosphate)
#Anhydrous Molecular Weight = (An x 313.21) + (Tn x 304.2) + (Cn x 289.18) + (Gn x 329.21) - 61.96
#An, Tn, Cn, and Gn are the number of each respective nucleotide within the polynucleotide.
#The subtraction of 61.96 gm/mole from the oligonucleotide molecular weight takes into account the removal
#of HPO2 (63.98) and the addition of two hydrogens (2.02).
#Alternatively, you could think of this of the removal of a phosphate and the addition of a hydroxyl,
#since this formula calculates the molecular weight of 5' and 3' hydroxylated oligonucleotides.
#Please note: this calculation works well for synthesized oligonucleotides.
#If you would like an accurate MW for restriction enzyme cut DNA, please use:
#Molecular Weight = (An x 313.21) + (Tn x 304.2) + (Cn x 289.18) + (Gn x 329.21) - 61.96 + 79.0
#The addition of 79.0 gm/mole to the oligonucleotide molecular weight takes into account the 5' monophosphate
#left by most restriction enzymes.
#No phosphate is present at the 5' end of strands made by primer extension,
#so no adjustment to the OligoCalc DNA MW calculation is necessary for primer extensions.
#That means that for ssDNA, you need to add 79.0 to the value calculated by OligoCalc
#to get the weight with a 5' monophosphate.
#Finally, if you need to calculate the molecular weight of phosphorylated dsDNA,
#don't forget to adjust both strands. You can automatically perform either addition
#by selecting the Phosphorylated option from the 5' modification select list.
#Please note that the chemical modifications are only valid for DNA and may not be valid for RNA
#due to differences in the linkage chemistry, and also due to the lack of the 5' phosphates
#from synthetic RNA molecules. RNA Molecular Weight (for instance from an RNA transcript).
#The OligoCalc RNA MW calculations assume that there is a 5' triphosphate on the molecule)
#Molecular Weight = (An x 329.21) + (Un x 306.17) + (Cn x 305.18) + (Gn x 345.21) + 159.0
#An, Un, Cn, and Gn are the number of each respective nucleotide within the polynucleotide.
#Addition of 159.0 gm/mole to the molecular weight takes into account the 5' triphosphate.
if t == 'ssDNA':
mw = ((c['A']*313.21)+(c['T']*304.2)+(c['C']*289.18)+(c['G']*329.21)+(c['N']*303.7)-61.96)+(p*79.0)
elif t =='dsDNA':
mw_F = ((c['A']*313.21)+(c['T']*304.2)+(c['C']*289.18)+(c['G']*329.21)+(c['N']*303.7)-61.96)+(p*79.0)
d = Counter(complement(sequence.upper())) #complement sequence
mw_R = ((d['A']*313.21)+(d['T']*304.2)+(d['C']*289.18)+(d['G']*329.21)+(d['N']*303.7)-61.96)+(p*79.0)
mw = mw_F + mw_R
elif t == 'ssRNA':
pass
elif t == 'dsRNA':
pass
else:
return ValueError(f'Nucleic acid type not understood: "{t}"')
r['MW in Daltons'] = mw
#in ng
r['MW in ng'] = mw * 1.6605402e-15
#molecules in 1ng
r['molecules per ng'] = 1/r['MW in ng']
#ng for 10e10 molecules
r['ng per billion molecules'] = (10**9)/r['molecules per ng'] #(1 billions)
#moles per ng
r['moles per ng'] = (r['MW in ng'] * mw)
return r
def occur(string, sub):
'''
Counts the occurrences of a sequence in a string considering overlaps.
Example:
>> s = 'ACTGGGACGGGGGG'
>> s.count('GGG')
3
>> occur(s,'GGG')
5
'''
count = start = 0
while True:
start = string.find(sub, start) + 1
if start > 0:
count+=1
else:
return count
def get_prime(n):
for num in range(2,n,2):
if all(num%i != 0 for i in range(2,int(math.sqrt(num))+1)):
yield num
def ssl_fencrypt(infile, outfile):
'''(file_path, file_path) => encrypted_file
Uses openssl to encrypt/decrypt files.
'''
pwd = getpass('enter encryption pwd:')
if getpass('repeat pwd:') == pwd:
run(f'openssl enc -aes-256-cbc -a -salt -pass pass:{pwd} -in {infile} -out {outfile}',shell=True)
else:
print("passwords don't match.")
def ssl_fdecrypt(infile, outfile):
'''(file_path, file_path) => decrypted_file
Uses openssl to encrypt/decrypt files.
'''
pwd = getpass('enter decryption pwd:')
run(f'openssl enc -d -aes-256-cbc -a -pass pass:{pwd} -in {infile} -out {outfile}', shell=True)
def loop_zip(strA, strB):
'''(str, str) => zip()
Return a zip object containing each letters of strA, paired with letters of strB.
If strA is longer than strB, then its letters will be paired recursively.
Example:
>>> list(loop_zip('ABCDEF', '123'))
[('A', '1'), ('B', '2'), ('C', '3'), ('D', '1'), ('E', '2'), ('F', '3')]
'''
assert len(strA) >= len(strB)
s = ''
n = 0
for l in strA:
try:
s += strB[n]
except IndexError:
n = 0
s += strB[n]
n += 1
return zip(list(strA),list(s))
def encrypt(msg, pwd):
'''(str, str) => list
Simple encryption/decription tool.
WARNING:
This is NOT cryptographically secure!!
'''
if len(msg) < len(pwd):
raise ValueError('The password is longer than the message. This is not allowed.')
return [(string_to_number(a)+string_to_number(b)) for a,b in loop_zip(msg, pwd)]
def decrypt(encr, pwd):
'''(str, str) => list
Simple encryption/decription tool.
WARNING:
This is NOT cryptographically secure!!
'''
return ''.join([number_to_string((a-string_to_number(b))) for a,b in loop_zip(encr, pwd)])
def convert_mw(mw, to='g'):
'''(int_or_float, str) => float
Converts molecular weights (in dalton) to g, mg, ug, ng, pg.
Example:
>> diploid_human_genome_mw = 6_469.66e6 * 660 #lenght * average weight of nucleotide
>> convert_mw(diploid_human_genome_mw, to="ng")
0.0070904661368191195
'''
if to == 'g':
return mw * 1.6605402e-24
if to == 'mg':
return mw * 1.6605402e-21
if to == 'ug':
return mw * 1.6605402e-18
if to == 'ng':
return mw * 1.6605402e-15
if to == 'pg':
return mw * 1.6605402e-12
raise ValueError(f"'to' must be one of ['g','mg','ug','ng','pg'] but '{to}' was passed instead.")
def snp237(snp_number):
'''int => list
Return the genomic position of a SNP on the GCRh37 reference genome.
'''
query = f'https://www.snpedia.com/index.php/Rs{snp_number}'
html = urlopen(query).read().decode("utf-8")
for line in html.split('\n'):
if line.startswith('<tr><td width="90">Reference</td>'):
reference = line.split('"')[-2]
elif line.startswith('<tr><td width="90">Chromosome</td>'):
chromosome = line.split('<td>')[1].split('<')[0]
elif line.startswith('<tr><td width="90">Position</td>'):
position = int(line.split('<td>')[1].split('<')[0])
break
if 'GRCh38' in reference:
lo = LiftOver('hg38', 'hg19')
return lo.convert_coordinate(f'chr{chromosome}', position)[0][:2]
else:
return f'chr{chromosome}', position
def is_prime(n):
'''Return True if n is a prime number'''
if n == 1:
return False #1 is not prime
#if it's even and not 2, then it's not prime
if n == 2:
return True
if n > 2 and n % 2 == 0:
return False
max_divisor = math.floor(math.sqrt(n))
for d in range(3, 1 + max_divisor, 2):
if n % d == 0:
return False
return True
def flatmap(f, items):
return chain.from_iterable(imap(f, items))
def parse_fasta(fasta_file):
'''file_path => dict
Return a dict of id:sequences.
'''
d = {}
_id = False
seq = ''
with open(fasta_file,'r') as f:
for line in f:
if line.startswith('\n'):
continue
if line.startswith('>'):
if not _id:
_id = line[1:].strip()
elif _id and seq:
d.update({_id:seq})
_id = line[1:].strip()
seq = ''
else:
seq += line.strip()
d.update({_id:seq})
return d
def get_fasta_stats(fasta_file):
'''file_path => dict
Return lenght and base counts of each seuqence found in the fasta file.
'''
d = {}
_id = False
seq = ''
with open(fasta_file,'r') as f:
for line in f:
if line.startswith('\n'):
continue
if line.startswith('>'):
if not _id:
_id = line[1:].strip()
elif _id and seq:
d.update({_id:seq})
_id = line[1:].strip()
seq = ''
else:
seq += line.strip().upper()
d.update({_id:{'length':len(seq),
'A':seq.count('A'),
'T':seq.count('T'),
'C':seq.count('C'),
'G':seq.count('G'),
'N':seq.count('N')}
})
return d
def quick_align(reference, sample, matrix=matlist.blosum62, gap_open=-10, gap_extend=-0.5):
'''
Return a binary score matrix for a pairwise alignment.
'''
alns = pairwise2.align.globalds(reference, sample, matrix, gap_open, gap_extend)
top_aln = alns[0]
aln_reference, aln_sample, score, begin, end = top_aln
score = []
for i, base in enumerate(aln_reference):
if aln_sample[i] == base:
score.append(1)
else:
score.append(0)
return score
def vp(var_name,var_dict=globals(),sep=' : '):
'''(str, dict) => print
Variable Print, a fast way to print out a variable's value.
>>> scale = 0.35
>>> mass = '71 Kg'
>>> vp('scale')
scale : 0.35
>>> vp('mass',sep='=')
mass=71 Kg
'''
try:
print(f'{var_name}{sep}{g[var_name]}')
except:
print(f'{var_name} not found!')
def view_matrix(arrays):
'''list_of_arrays => print
Print out the array, row by row.
'''
for a in arrays:
print(a)
print('=========')
for n,r in enumerate(arrays):
print(n,len(r))
print(f'row:{len(arrays)}\ncol:{len(r)}')
def fill_matrix(arrays,z=0):
'''(list_of_arrays, any) => None
Add z to fill-in any array shorter than m=max([len(a) for a in arrays]).
'''
m = max([len(a) for a in arrays])
for i,a in enumerate(arrays):
if len(a) != m:
arrays[i] = np.append(a, [z for n in range(m-len(a))])
def get_size(obj_0):
'''obj => int
Recursively iterate to sum size of object & members (in bytes).
Adapted from http://stackoverflow.com/questions/449560/how-do-i-determine-the-size-of-an-object-in-python
'''
def inner(obj, _seen_ids = set()):
obj_id = id(obj)
if obj_id in _seen_ids:
return 0
_seen_ids.add(obj_id)
size = sys.getsizeof(obj)
if isinstance(obj, zero_depth_bases):
pass # bypass remaining control flow and return
elif isinstance(obj, (tuple, list, Set, deque)):
size += sum(inner(i) for i in obj)
elif isinstance(obj, Mapping) or hasattr(obj, iteritems):
size += sum(inner(k) + inner(v) for k, v in getattr(obj, iteritems)())
# Check for custom object instances - may subclass above too
if hasattr(obj, '__dict__'):
size += inner(vars(obj))
if hasattr(obj, '__slots__'): # can have __slots__ with __dict__
size += sum(inner(getattr(obj, s)) for s in obj.__slots__ if hasattr(obj, s))
return size
return inner(obj_0)
def total_size(o, handlers={}, verbose=False):
'''(object, dict, bool) => print
Returns the approximate memory footprint an object and all of its contents.
Automatically finds the contents of the following builtin containers and
their subclasses: tuple, list, deque, dict, set and frozenset.
To search other containers, add handlers to iterate over their contents:
handlers = {SomeContainerClass: iter,
OtherContainerClass: OtherContainerClass.get_elements}
>>> d = dict(a=1, b=2, c=3, d=[4,5,6,7], e='a string of chars')
>>> print(total_size(d, verbose=True))
796
280 <type 'dict'> {'a': 1, 'c': 3, 'b': 2, 'e': 'a string of chars', 'd': [4, 5, 6, 7]}
38 <type 'str'> 'a'
24 <type 'int'> 1
38 <type 'str'> 'c'
24 <type 'int'> 3
38 <type 'str'> 'b'
24 <type 'int'> 2
38 <type 'str'> 'e'
54 <type 'str'> 'a string of chars'
38 <type 'str'> 'd'
104 <type 'list'> [4, 5, 6, 7]
24 <type 'int'> 4
24 <type 'int'> 5
24 <type 'int'> 6
24 <type 'int'> 7
'''
dict_handler = lambda d: chain.from_iterable(d.items())
all_handlers = {tuple: iter,
list: iter,
deque: iter,
dict: dict_handler,
set: iter,
frozenset: iter,
}
all_handlers.update(handlers) # user handlers take precedence
seen = set() # track which object id's have already been seen
default_size = sys.getsizeof(0) # estimate sizeof object without __sizeof__
def sizeof(o):
if id(o) in seen: # do not double count the same object
return 0
seen.add(id(o))
s = sys.getsizeof(o, default_size)
if verbose:
print(s,type(o),repr(o))
for typ, handler in all_handlers.items():
if isinstance(o, typ):
s += sum(map(sizeof, handler(o)))
break
return s
return sizeof(o)
def center(pattern):
'''np.array => np.array
Return the centered pattern,
which is given by [(value - mean) for value in pattern]
>>> array = np.array([681.7, 682.489, 681.31, 682.001, 682.001, 682.499, 682.001])
>>> center(array)
array([-0.30014286, 0.48885714, -0.69014286, 0.00085714, 0.00085714, 0.49885714, 0.00085714])
'''
#mean = pattern.mean()
#return np.array([(value - mean) for value in pattern])
return (pattern - np.mean(pattern))
def rescale(pattern):
'''np.array => np.array
Rescale each point of the array to be a float between 0 and 1.
>>> a = np.array([1,2,3,4,5,6,5,4,3,2,1])
>>> rescale(a)
array([ 0. , 0.2, 0.4, 0.6, 0.8, 1. , 0.8, 0.6, 0.4, 0.2, 0. ])
'''
#_max = pattern.max()
#_min = pattern.min()
#return np.array([(value - _min)/(_max - _min) for value in pattern])
return (pattern - pattern.min()) / (pattern.max()-pattern.min())
def standardize(pattern):
'''np.array => np.array
Return a standard pattern.
>>> a = np.array([1,2,3,4,5,6,5,4,3,2,1])
>>> standardize(a)
array([-1.41990459, -0.79514657, -0.17038855, 0.45436947, 1.07912749,
1.7038855 , 1.07912749, 0.45436947, -0.17038855, -0.79514657,
-1.41990459])
'''
#mean = pattern.mean()
#std = pattern.std()
#return np.array([(value - mean)/std for value in pattern])
return (pattern - np.mean(pattern)) / np.std(pattern)
def normalize(pattern):
'''np.array => np.array
Return a normalized pattern using np.linalg.norm().
>>> a = np.array([1,2,3,4,5,6,5,4,3,2,1])
>>> normalize(a)
'''
return pattern / np.linalg.norm(pattern)
def gen_patterns(data, length, ptype='all'):
'''(array, int) => dict
Generate all possible patterns of a given legth
by manipulating consecutive slices of data.
Return a dict of patterns dividad by pattern_type.
>>> data = [1,2,3,4,5,4,3,2,1]
>>> gen_patterns(data,len(data))
{'center': {0: array([-1.77777778, -0.77777778, 0.22222222, 1.22222222, 2.22222222, 1.22222222, 0.22222222, -0.77777778, -1.77777778])},
'norm': {0: array([ 0.10846523, 0.21693046, 0.32539569, 0.43386092, 0.54232614, 0.43386092, 0.32539569, 0.21693046, 0.10846523])},
'scale': {0: array([ 0. , 0.25, 0.5 , 0.75, 1. , 0.75, 0.5 , 0.25, 0. ])},
'std': {0: array([-1.35224681, -0.59160798, 0.16903085, 0.92966968, 1.69030851, 0.92966968, 0.16903085, -0.59160798, -1.35224681])}}
>>> gen_patterns(data,3)
{'center': {0: array([-1., 0., 1.]),
1: array([-1., 0., 1.]),
2: array([-1., 0., 1.])},
'norm': {0: array([ 0.26726124, 0.53452248, 0.80178373]),
1: array([ 0.37139068, 0.55708601, 0.74278135]),
2: array([ 0.42426407, 0.56568542, 0.70710678])},
'scale': {0: array([ 0. , 0.5, 1. ]),
1: array([ 0. , 0.5, 1. ]),
2: array([ 0. , 0.5, 1. ])},
'std': {0: array([-1.22474487, 0. , 1.22474487]),
1: array([-1.22474487, 0. , 1.22474487]),
2: array([-1.22474487, 0. , 1.22474487])}}
'''
results = {}
ptypes = ['std','norm','scale','center']
if ptype == 'all': #to do: select specific ptypes
for t in ptypes:
results.update({t:{}})
for n in range(length):
if n+length > len(data):
break
raw = np.array(data[n:n+length])
partial = {'std' :standardize(raw),
'norm' :normalize(raw),
'scale' :rescale(raw),
'center':center(raw)}
for t in ptypes:
results[t].update({n:partial[t]})
return results
def delta_percent(a, b, warnings=False):
'''(float, float) => float
Return the difference in percentage between a nd b.
If the result is 0.0 return 1e-09 instead.
>>> delta_percent(20,22)
10.0
>>> delta_percent(2,20)
900.0
>>> delta_percent(1,1)
1e-09
>>> delta_percent(10,9)
-10.0
'''
#np.seterr(divide='ignore', invalid='ignore')
try:
x = ((float(b)-a) / abs(a))*100
if x == 0.0:
return 0.000000001 #avoid -inf
else:
return x
except Exception as e:
if warnings:
print(f'Exception raised by delta_percent(): {e}')
return 0.000000001 #avoid -inf
def is_similar(array1,array2,t=0.1):
'''(array, array, float) => bool
Return True if all the points of two arrays are no more than t apart.
'''
if len(array1) != len(array2):
return False
for i,n in enumerate(array1):
if abs(n-array2[i]) <= t:
pass
else:
return False
return True
def cluster_patterns(pattern_list,t):
''' ([array, array, ...], float) => dict
Return a dict having as keys the idx of patterns in pattern_list
and as values the idx of the similar patterns.
"t" is the inverse of a similarity threshold,
i.e. the max discrepancy between the value of array1[i] and array2[i].
If no simalar patterns are found,value is assigned to an empty list.
>>> a = [1,2,3,4,5,6,5,4,3,2,1]
>>> a1 = [n+1 for n in a]
>>> a2 = [n+5 for n in a]
>>> a3 = [n+6 for n in a]
>>> patterns = [a,a1,a2,a3]
>>> cluster_patterns(patterns,t=2)
{0: [1], 1: [0], 2: [3], 3: [2]}
>>> cluster_patterns(patterns,t=5)
{0: [1, 2], 1: [0, 2, 3], 2: [0, 1, 3], 3: [1, 2]}
>>> cluster_patterns(patterns,t=0.2)
{0: [], 1: [], 2: [], 3: []}
'''
result = {}
for idx, array1 in enumerate(pattern_list):
result.update({idx:[]})
for i,array2 in enumerate(pattern_list):
if i != idx:
if is_similar(array1,array2,t=t):
result[idx].append(i)
#print 'clusters:',len([k for k,v in result.iteritems() if len(v)])
return result
def stamp_to_date(stamp,time='utc'):
'''(int_or_float, float, str) => datetime.datetime
Convert UNIX timestamp to UTC or Local Time
>>> stamp = 1477558868.93
>>> print stamp_to_date(stamp,time='utc')
2016-10-27 09:01:08.930000
>>> print stamp_to_date(int(stamp),time='utc')
2016-10-27 09:01:08
>>> stamp_to_date(stamp,time='local')
datetime.datetime(2016, 10, 27, 11, 1, 8, 930000)
'''
if time.lower() == 'utc':
return datetime.datetime.utcfromtimestamp(stamp)
elif time.lower() == 'local':
return datetime.datetime.fromtimestamp(stamp)
else:
raise ValueError('"time" must be "utc" or "local"')
def future_value(interest,period,cash):
'''(float, int, int_or_float) => float
Return the future value obtained from an amount of cash
growing with a fix interest over a period of time.
>>> future_value(0.5,1,1)
1.5
>>> future_value(0.1,10,100)
259.37424601
'''
if not 0 <= interest <= 1:
raise ValueError('"interest" must be a float between 0 and 1')
for d in range(period):
cash += cash * interest
return cash
def entropy(sequence, verbose=False):
'''(string, bool) => float
Return the Shannon Entropy of a string.
Calculated as the minimum average number of
bits per symbol required for encoding the string.
The theoretical limit for data compression:
Shannon Entropy of the string * string length
'''
letters = list(sequence)
alphabet = list(set(letters)) # list of symbols in the string
# calculate the frequency of each symbol in the string
frequencies = []
for symbol in alphabet:
ctr = 0
for sym in letters:
if sym == symbol:
ctr += 1
frequencies.append(float(ctr) / len(letters))
# Shannon entropy
ent = 0.0
for freq in frequencies:
ent = ent + freq * math.log(freq, 2)
ent = -ent
if verbose:
print('Input string:')
print(sequence)
print()
print('Alphabet of symbols in the string:')
print(alphabet)
print()
print('Frequencies of alphabet symbols:')
print(frequencies)
print()
print('Shannon entropy:')
print(ent)
print('Minimum number of bits required to encode each symbol:')
print(int(math.ceil(ent)))
return ent
def quick_entropy(sequence):
'''(string, bool) => float
Return the Shannon Entropy of a string.
Compact version of entropy()
Calculated as the minimum average number of bits per symbol
required for encoding the string.
The theoretical limit for data compression:
Shannon Entropy of the string * string length.
'''
alphabet = set(sequence) # list of symbols in the string
# calculate the frequency of each symbol in the string
frequencies = []
for symbol in alphabet:
frequencies.append(sequence.count(symbol) / len(sequence))
# Shannon entropy
ent = 0.0
for freq in frequencies:
ent -= freq * math.log(freq, 2)
return ent
def percent_of(total, fraction):
'''(int_or_float,int_or_float) => float
Return the percentage of 'fraction' in 'total'.
Examples:
percent_of(150, 75)
>>> 50.0
percent_of(30, 90)
>>> 300.0
'''
assert total > 0
if np.isnan(total) or np.isnan(fraction):
return nan
return (100*fraction)/total
def buzz(sequence, noise=0.01):
'''(string,float) => string
Return a sequence with some random noise.
'''
if not noise:
return sequence
bits = set([char for char in sequence] + ['del','dup'])
r = ''
for char in sequence:
if random.random() <= noise:
b = random.sample(bits,1)[0]
if b == 'del':
continue
elif b == 'dup':
r += 2*char
else:
r += b
else:
r += char
return r
def simple_consensus(aligned_sequences_file):
'''file => string
Return the consensus of a series of fasta sequences aligned with muscle.
'''
# Generate consensus from Muscle alignment
sequences = []
seq = False
with open(aligned_sequences_file,'r') as f:
for line in f:
if line.startswith('\n'):
continue
if line.startswith('>'):
if seq:
sequences.append(seq)
seq = ''
else:
seq += line.strip()
sequences.append(seq)
#check if all sequenced have the same length
for seq in sequences:
assert len(seq) == len(sequences[0])
#compute consensus by majority vote
consensus = ''
for i in range(len(sequences[0])):
char_count = Counter()
for seq in sequences:
char_count.update(seq[i])
consensus += char_count.most_common()[0][0]
return consensus.replace('-','')
def print_sbar(n,m,s='|#.|',size=30,message=''):
'''(int,int,string,int) => None
Print a progress bar using the simbols in 's'.
Example:
range_limit = 1000
for n in range(range_limit):
print_sbar(n+1,m=range_limit)
time.sleep(0.1)
'''
#adjust to bar size
if m != size:
n =(n*size)/m
m = size
#calculate ticks
_a = int(n)*s[1]+(int(m)-int(n))*s[2]
_b = round(n/(int(m))*100,1)
#adjust overflow
if _b >= 100:
_b = 100.0
#to stdout
sys.stdout.write(f'\r{message}{s[0]}{_a}{s[3]} {_b}% ')
sys.stdout.flush()
def get_hash(a_string,algorithm='md5'):
'''str => str
Return the hash of a string calculated using various algorithms.
.. code-block:: python
>>> get_hash('prova','md5')
'189bbbb00c5f1fb7fba9ad9285f193d1'
>>> get_hash('prova','sha256')
'6258a5e0eb772911d4f92be5b5db0e14511edbe01d1d0ddd1d5a2cb9db9a56ba'
'''
if algorithm == 'md5':
return hashlib.md5(a_string.encode()).hexdigest()
elif algorithm == 'sha256':
return hashlib.sha256(a_string.encode()).hexdigest()
else:
raise ValueError('algorithm {} not found'.format(algorithm))
def get_first_transcript_by_gene_name(gene_name):
'''str => str
Return the id of the main trascript for a given gene.
The data is from http://grch37.ensembl.org/
'''
data = EnsemblRelease(75)
gene = data.genes_by_name(gene_name)
gene_id = str(gene[0]).split(',')[0].split('=')[-1]
gene_location = str(gene[0]).split('=')[-1].strip(')')
url = 'http://grch37.ensembl.org/Homo_sapiens/Gene/Summary?db=core;g={};r={}'.format(gene_id,gene_location)
for line in urlopen(url):
if '<tbody><tr><td class="bold">' in line:
return line.split('">')[2].split('</a>')[0]
def get_exons_coord_by_gene_name(gene_name):
'''str => OrderedDict({'exon_id':[coordinates]})
Return an OrderedDict having as k the exon_id
and as value a tuple containing the genomic coordinates ('chr',start,stop).
'''
gene = data.genes_by_name(gene_name)
gene_id = str(gene[0]).split(',')[0].split('=')[-1]
gene_location = str(gene[0]).split('=')[-1].strip(')')
gene_transcript = get_first_transcript_by_gene_name(gene_name).split('.')[0]
table = OrderedDict()
for exon_id in data.exon_ids_of_gene_id(gene_id):
exon = data.exon_by_id(exon_id)
coordinates = (exon.contig, exon.start, exon.end)
table.update({exon_id:coordinates})
return table
def get_exons_coord_by_gene_name(gene_name):
'''string => OrderedDict
.. code-block:: python
>>> table = get_exons_coord_by_gene_name('TP53')
>>> for k,v in table.items():
... print(k,v)
ENSE00002419584 ['7,579,721', '7,579,700']
'''
data = EnsemblRelease(75)
gene = data.genes_by_name(gene_name)
gene_id = str(gene[0]).split(',')[0].split('=')[-1]
gene_location = str(gene[0]).split('=')[-1].strip(')')
gene_transcript = get_first_transcript_by_gene_name(gene_name).split('.')[0]
url = 'http://grch37.ensembl.org/Homo_sapiens/Transcript/Exons?db=core;g={};r={};t={}'.format(gene_id,gene_location,gene_transcript)
str_html = get_html(url)
html = ''
for line in str_html.split('\n'):
try:
#print line
html += str(line)+'\n'
except UnicodeEncodeError:
pass
blocks = html.split('\n')
table = OrderedDict()
for exon_id in data.exon_ids_of_gene_id(gene_id):
for i,txt in enumerate(blocks):
if exon_id in txt:
if exon_id not in table:
table.update({exon_id:[]})
for item in txt.split('<td style="width:10%;text-align:left">')[1:-1]:
table[exon_id].append(item.split('</td>')[0])
return table
def split_overlap(seq, size, overlap, is_dataframe=False):
'''(seq,int,int) => [[...],[...],...]
Split a sequence into chunks of a specific size and overlap.
Works also on strings!
It is very efficient for short sequences (len(seq()) <= 100).
Set "is_dataframe=True" to split a pandas.DataFrame
Examples:
>>> split_overlap(seq=list(range(10)),size=3,overlap=2)
[[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9]]
>>> split_overlap(seq=range(10),size=3,overlap=2)
[range(0, 3), range(1, 4), range(2, 5), range(3, 6), range(4, 7), range(5, 8), range(6, 9), range(7, 10)]
'''
if size < 1 or overlap < 0:
raise ValueError('"size must be >= 1 and overlap >= 0')
result = []
if is_dataframe:
while True:
if len(seq) <= size:
result.append(seq)
return result
else:
result.append(seq.iloc[:size])
seq = seq.iloc[size-overlap:]
else:
while True:
if len(seq) <= size:
result.append(seq)
return result
else:
result.append(seq[:size])
seq = seq[size-overlap:]
def split_overlap_long(seq, size, overlap, is_dataframe=False):
'''(seq,int,int) => generator
Split a sequence into chunks of a specific size and overlap.
Return a generator. It is very efficient for long sequences (len(seq()) > 100).
https://stackoverflow.com/questions/48381870/a-better-way-to-split-a-sequence-in-chunks-with-overlaps
Set "is_dataframe=True" to split a pandas.DataFrame
Examples:
>>> split_overlap_long(seq=list(range(10)),size=3,overlap=2)
<generator object split_overlap_long at 0x10bc49d58>
>>> list(split_overlap_long(seq=list(range(10)),size=3,overlap=2))
[[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9]]
>>> list(split_overlap_long(seq=range(10),size=3,overlap=2))
[range(0, 3), range(1, 4), range(2, 5), range(3, 6), range(4, 7), range(5, 8), range(6, 9), range(7, 10)]
'''
if size < 1 or overlap < 0:
raise ValueError('size must be >= 1 and overlap >= 0')
if is_dataframe:
for i in range(0, len(seq) - overlap, size - overlap):
yield seq.iloc[i:i + size]
else:
for i in range(0, len(seq) - overlap, size - overlap):
yield seq[i:i + size]
def itr_split_overlap(iterable, size, overlap):
'''(iterable,int,int) => generator
Similar to long_split_overlap() but it works on any iterable.
In case of long sequences, long_split_overlap() is more efficient
but this function can handle potentially infinite iterables using deque().
https://stackoverflow.com/questions/48381870/a-better-way-to-split-a-sequence-in-chunks-with-overlaps
Warning: for range() and symilar, it behaves differently than split_overlap() and split_overlap_long()
Examples:
>>> list(itr_split_overlap(iterable=range(10),size=3,overlap=2))
[(0, 1, 2), (1, 2, 3), (2, 3, 4), (3, 4, 5), (4, 5, 6), (5, 6, 7), (6, 7, 8), (7, 8, 9)]
'''
if size < 1 or overlap < 0:
raise ValueError('size must be >= 1 and overlap >= 0')
itr = iter(iterable)
buf = deque(islice(itr, size), maxlen=size)
chunk = None
for chunk in iter(lambda: tuple(islice(itr, size - overlap)), ()):
yield tuple(buf)
buf.extend(chunk)
rest = tuple(buf)
if chunk:
rest = rest[size - overlap - len(chunk):]
yield rest
def reorder_dict(d, keys):
'''(dict,list) => OrderedDict
Change the order of a dictionary's keys
without copying the dictionary (save RAM!).
Return an OrderedDict.
'''
tmp = OrderedDict()
for k in keys:
tmp[k] = d[k]
del d[k] #this saves RAM
return tmp
#test = OrderedDict({'1':1,'2':2,'4':4,'3':3})
#print(test)
#test2 = reorder_dict(test,['1','2','3','4'])
#print(test)
#print(test2)
#>>> OrderedDict([('2', 2), ('3', 3), ('4', 4), ('1', 1)])
#>>> OrderedDict()
#>>> OrderedDict([('1', 1), ('2', 2), ('3', 3), ('4', 4)])
def in_between(one_number, two_numbers):
'''(int,list) => bool
Return true if a number is in between two other numbers.
Return False otherwise.
'''
if two_numbers[0] < two_numbers[1]:
pass
else:
two_numbers = sorted(two_numbers)
return two_numbers[0] <= one_number <= two_numbers[1]
def is_overlapping(svA, svB, limit=0.9):
'''(list,list,float) => bool
Check if two SV ovelaps for at least 90% (limit=0.9).
svX = [chr1,brk1,chr2,brk2]
'''
# Step 1.
# Select the breaks in order to have lower coordinates first
if int(svA[1]) <= int(svA[3]):
chr1_A = svA[0]
brk1_A = int(svA[1])
chr2_A = svA[2]
brk2_A = int(svA[3])
else:
chr2_A = svA[0]
brk2_A = svA[1]
chr1_A = svA[2]
brk1_A = svA[3]
if int(svB[1]) <= int(svB[3]):
chr1_B = svB[0]
brk1_B = int(svB[1])
chr2_B = svB[2]
brk2_B = int(svB[3])
else:
chr2_B = svB[0]
brk2_B = int(svB[1])
chr1_B = svB[2]
brk1_B = int(svB[3])
# Step 2.
# Determine who is the longest
# Return False immediately if the chromosomes are not the same.
# This computation is reasonable only for sv on the same chormosome.
if chr1_A == chr2_A and chr1_B == chr2_B and chr1_A == chr1_B:
len_A = brk2_A - brk1_A
len_B = brk2_B - brk1_B
if len_A >= len_B:
len_reference = len_A
len_sample = len_B
else:
len_reference = len_B
len_sample = len_A
limit = round(len_reference * limit) # this is the minimum overlap the two sv need to share
# to be considered overlapping
# if the sample is smaller then the limit then there is no need to go further.
# the sample segment will never share enough similarity with the reference.
if len_sample < limit:
return False
else:
return False
# Step 3.
# Determine if there is an overlap
# >> There is an overlap if a least one of the break of an sv is in beetween the two breals of the other sv.
overlapping = False
for b in [brk1_A,brk2_A]:
if in_between(b,[brk1_B,brk2_B]):
overlapping = True
for b in [brk1_B,brk2_B]:
if in_between(b,[brk1_A,brk2_A]):
overlapping = True
if not overlapping:
return False
# Step 4.
# Determine the lenght of the ovelapping part
# easy case: if the points are all different then, if I sort the points,
# the overlap is the region between points[1] and points[2]
# |-----------------| |---------------------|
# |--------------| |-------------|
points = sorted([brk1_A,brk2_A,brk1_B,brk2_B])
if len(set(points)) == 4: # the points are all different
overlap = points[2]-points[1]
elif len(set(points)) == 3: #one point is in common
# |-----------------|
# |--------------|
if points[0] == points[1]:
overlap = points[3]-points[2]
# |---------------------|
# |-------------|
if points[2] == points[3]:
overlap = points[2]-points[1]
# |-----------------|
# |-------------|
if points[1] == points[2]:
return False # there is no overlap
else:
# |-----------------|
# |-----------------|
return True # if two points are in common, then it is the very same sv
if overlap >= limit:
return True
else:
return False
def load_obj(file):
'''
Load a pickled object.
Be aware that pickle is version dependent,
i.e. objects dumped in Py3 cannot be loaded with Py2.
'''
try:
with open(file,'rb') as f:
obj = pickle.load(f)
return obj
except:
return False
def save_obj(obj, file):
'''
Dump an object with pickle.
Be aware that pickle is version dependent,
i.e. objects dumped in Py3 cannot be loaded with Py2.
'''
try:
with open(file,'wb') as f:
pickle.dump(obj, f)
print('Object saved to {}'.format(file))
return True
except:
print('Error: Object not saved...')
return False
#save_obj(hotspots_review,'hotspots_review_CIS.txt')
def query_encode(chromosome, start, end):
'''
Queries ENCODE via http://promoter.bx.psu.edu/ENCODE/search_human.php
Parses the output and returns a dictionary of CIS elements found and the relative location.
'''
## Regex setup
re1='(chr{})'.format(chromosome) # The specific chromosome
re2='(:)' # Any Single Character ':'
re3='(\\d+)' # Integer
re4='(-)' # Any Single Character '-'
re5='(\\d+)' # Integer
rg = re.compile(re1+re2+re3+re4+re5,re.IGNORECASE|re.DOTALL)
## Query ENCODE
std_link = 'http://promoter.bx.psu.edu/ENCODE/get_human_cis_region.php?assembly=hg19&'
query = std_link + 'chr=chr{}&start={}&end={}'.format(chromosome,start,end)
print(query)
html_doc = urlopen(query)
html_txt = BeautifulSoup(html_doc, 'html.parser').get_text()
data = html_txt.split('\n')
## Parse the output
parsed = {}
coordinates = [i for i, item_ in enumerate(data) if item_.strip() == 'Coordinate']
elements = [data[i-2].split(' ')[-1].replace(': ','') for i in coordinates]
blocks = [item for item in data if item[:3] == 'chr']
print(elements)
try:
i = 0
for item in elements:
#print(i)
try:
txt = blocks[i]
#print(txt)
m = rg.findall(txt)
bins = [''.join(item) for item in m]
parsed.update({item:bins})
i += 1
print('found {}'.format(item))
except:
print('the field {} was empty'.format(item))
return parsed
except Exception as e:
print('ENCODE query falied on chr{}:{}-{}'.format(chromosome, start, end))
print(e)
return False
def compare_patterns(pattA, pattB):
'''(np.array, np.array) => float
Compare two arrays point by point.
Return a "raw similarity score".
You may want to center the two patterns before compare them.
>>> a = np.array([1,2,3,4,5,6,5,4,3,2,1])
>>> a1 = np.array([n+0.1 for n in a])
>>> a2 = np.array([n+1 for n in a])
>>> a3 = np.array([n+10 for n in a])
>>> compare_patterns(a,a)
99.999999999
>>> compare_patterns(a,a1)
95.69696969696969
>>> compare_patterns(a,a2)
56.96969696969697
>>> compare_patterns(a2,a)
72.33766233766234
>>> compare_patterns(center(a),center(a2))
99.999999999999943
>>> compare_patterns(a,a3)
-330.3030303030303
'''
if len(pattA) == len(pattB):
deltas = []
for i,pA in enumerate(pattA):
deltas.append(100 - abs(delta_percent(pA,pattB[i])))
similarity = sum(deltas)/len(pattA)
return similarity
else:
raise ValueError('"pattA" and "pattB" must have same length.')
def compare_bins(dict_A,dict_B):
'''(dict,dict) => dict, dict, dict
Compares two dicts of bins.
Returns the shared elements, the unique elements of A and the unique elements of B.
The dicts shape is supposed to be like this:
OrderedDict([('1',
['23280000-23290000',
'24390000-24400000',
...]),
('2',
['15970000-15980000',
'16020000-16030000',
...]),
('3',
['610000-620000',
'3250000-3260000',
'6850000-6860000',
...])}
'''
chrms = [str(x) for x in range(1,23)] + ['X','Y']
shared = OrderedDict()
unique_A = OrderedDict()
unique_B = OrderedDict()
for k in chrms:
shared.update({k:[]})
unique_A.update({k:[]})
unique_B.update({k:[]})
if k in dict_A and k in dict_B:
for bin_ in dict_A[k]:
if bin_ in dict_B[k]:
shared[k].append(bin_)
else:
unique_A[k].append(bin_)
for bin_ in dict_B[k]:
if bin_ not in shared[k]:
unique_B[k].append(bin_)
elif k not in dict_A:
unique_B[k] = [bin_ for bin_ in dict_B[k]]
elif k not in dict_B:
unique_A[k] = [bin_ for bin_ in dict_A[k]]
return shared, unique_A, unique_B
#To manage heavy files
def yield_file(infile):
with open(infile, 'r') as f:
for line in f:
if line[0] not in ['#','\n',' ','']:
yield line.strip()
#Downaload sequence from ensembl
def sequence_from_coordinates(chromosome, strand, start, end, ref_genome=37):
'''
Download the nucleotide sequence from the gene_name.
'''
Entrez.email = "a.marcozzi@umcutrecht.nl" # Always tell NCBI who you are
if int(ref_genome) == 37:
#GRCh37 from http://www.ncbi.nlm.nih.gov/assembly/GCF_000001405.25/#/def_asm_Primary_Assembly
NCBI_IDS = {'1':'NC_000001.10','2':'NC_000002.11','3':'NC_000003.11','4':'NC_000004.11',
'5':'NC_000005.9','6':'NC_000006.11','7':'NC_000007.13','8':'NC_000008.10',
'9':'NC_000009.11','10':'NC_000010.10','11':'NC_000011.9','12':'NC_000012.11',
'13':'NC_000013.10','14':'NC_000014.8','15':'NC_000015.9','16':'NC_000016.9',
'17':'NC_000017.10','18':'NC_000018.9','19':'NC_000019.9','20':'NC_000020.10',
'21':'NC_000021.8','22':'NC_000022.10','X':'NC_000023.10','Y':'NC_000024.9'}
elif int(ref_genome) == 38:
#GRCh38 from https://www.ncbi.nlm.nih.gov/assembly/GCF_000001405.38
NCBI_IDS = {'1':'NC_000001.11','2':'NC_000002.12','3':'NC_000003.12','4':'NC_000004.12',
'5':'NC_000005.10','6':'NC_000006.12','7':'NC_000007.14','8':'NC_000008.11',
'9':'NC_000009.12','10':'NC_000010.11','11':'NC_000011.10','12':'NC_000012.12',
'13':'NC_000013.11','14':'NC_000014.9','15':'NC_000015.10','16':'NC_000016.10',
'17':'NC_000017.11','18':'NC_000018.10','19':'NC_000019.10','20':'NC_000020.11',
'21':'NC_000021.9','22':'NC_000022.11','X':'NC_000023.11','Y':'NC_000024.10'}
try:
handle = Entrez.efetch(db="nucleotide",
id=NCBI_IDS[str(chromosome)],
rettype="fasta",
strand=strand, #"1" for the plus strand and "2" for the minus strand.
seq_start=start,
seq_stop=end)
record = SeqIO.read(handle, "fasta")
handle.close()
sequence = str(record.seq)
return sequence
except ValueError:
print('ValueError: no sequence found in NCBI')
return False
#GC content calculator
def gc_content(sequence, percent=True):
'''
Return the GC content of a sequence.
'''
sequence = sequence.upper()
g = sequence.count("G")
c = sequence.count("C")
t = sequence.count("T")
a = sequence.count("A")
gc_count = g+c
total_bases_count = g+c+t+a
if total_bases_count == 0:
print('Error in gc_content(sequence): sequence may contain only Ns')
return None
try:
gc_fraction = float(gc_count) / total_bases_count
except Exception as e:
print(e)
print(sequence)
if percent:
return gc_fraction * 100
else:
return gc_fraction
##Flexibility calculator##
#requires stabflex3.py
#Endpoint function to calculate the flexibility of a given sequence
def dna_flex(sequence, window_size=500, step_zize=100, verbose=False):
'''(str,int,int,bool) => list_of_tuples
Calculate the flexibility index of a sequence.
Return a list of tuples.
Each tuple contains the bin's coordinates
and the calculated flexibility of that bin.
Example:
dna_flex(seq_a,500,100)
>>> [('0-500', 9.7),('100-600', 9.77),...]
'''
if verbose:
print("Algorithm window size : %d" % window_size)
print("Algorithm window step : %d" % step_zize)
print("Sequence has {} bases".format(len(self.seq)))
algorithm = myFlex(sequence,window_size,step_zize)
flexibility_result = algorithm.analyse(flexibility_data)
return flexibility_result.report(verbose)
##Repeats scanner##
#G-quadruplex
def g4_scanner(sequence):
'''
G-quadruplex motif scanner.
Scan a sequence for the presence of the regex motif:
[G]{3,5}[ACGT]{1,7}[G]{3,5}[ACGT]{1,7}[G]{3,5}[ACGT]{1,7}[G]{3,5}
Reference: http://www.ncbi.nlm.nih.gov/pmc/articles/PMC1636468/
Return two callable iterators.
The first one contains G4 found on the + strand.
The second contains the complementary G4 found on the + strand, i.e. a G4 in the - strand.
'''
#forward G4
pattern_f = '[G]{3,5}[ACGT]{1,7}[G]{3,5}[ACGT]{1,7}[G]{3,5}[ACGT]{1,7}[G]{3,5}'
result_f = re.finditer(pattern_f, sequence)
#reverse G4
pattern_r = '[C]{3,5}[ACGT]{1,7}[C]{3,5}[ACGT]{1,7}[C]{3,5}[ACGT]{1,7}[C]{3,5}'
result_r = re.finditer(pattern_r, sequence)
return result_f, result_r
#Repeat-masker
def parse_RepeatMasker(infile="RepeatMasker.txt", rep_type='class'):
'''
Parse RepeatMasker.txt and return a dict of bins for each chromosome
and a set of repeats found on that bin.
dict = {'chromosome':{'bin':set(repeats)}}
'''
chromosomes = [str(c) for c in range(1,23)]+['X','Y']
result = {}
if rep_type == 'name':
idx = 10 #repName
elif rep_type == 'class':
idx = 11 #repClass
elif rep_type == 'family':
idx = 12 #repFamily
else:
raise NameError('Invalid rep_type "{}". Expected "class","family" or "name"'.format(rep_type))
#RepeatMasker.txt is around 500MB!
for line in yield_file(infile):
data = line.split('\t')
chromosome = data[5].replace('chr','')
start = data[6]
end = data[7]
bin_ = '{}-{}'.format(start,end)
repeat = data[idx].replace('?','')
if chromosome in chromosomes:
if chromosome not in result:
result.update({chromosome:{bin_:set([repeat])}})
else:
if bin_ not in result[chromosome]:
result[chromosome].update({bin_:set([repeat])})
else:
result[chromosome][bin_].add(repeat)
return result
def next_day(d='2012-12-04'):
'''Return the next day in the calendar.'''
Y,M,D = d.split('-')
t = datetime.date(int(Y),int(M),int(D))
_next = t + datetime.timedelta(1)
return str(_next)
# next_day('2012-12-31')
# >>> '2013-01-01'
def previous_day(d='2012-12-04'):
'''Return the previous day in the calendar.'''
Y,M,D = d.split('-')
t = datetime.date(int(Y),int(M),int(D))
_prev = t + datetime.timedelta(-1)
return str(_prev)
# previous_day('2013-01-01')
# >>> '2012-12-31'
def intersect(list1, list2):
'''(list,list) => list
Return the intersection of two lists, i.e. the item in common.
'''
return [item for item in list2 if item in list1]
def annotate_fusion_genes(dataset_file):
'''
Uses FusionGenes_Annotation.pl to find fusion genes in the dataset.
Generates a new file containing all the annotations.
'''
start = time.time()
print('annotating', dataset_file, '...')
raw_output = run_perl('FusionGenes_Annotation.pl', dataset_file)
raw_list = str(raw_output)[2:].split('\\n')
outfile = dataset_file[:-4] + '_annotated.txt'
with open(outfile, 'w') as outfile:
line_counter = 0
header = ['##ID', 'ChrA', 'StartA', 'EndA', 'ChrB', 'StartB', 'EndB', 'CnvType', 'Orientation',
'GeneA', 'StrandA', 'LastExonA', 'TotalExonsA', 'PhaseA',
'GeneB', 'StrandB', 'LastExonB', 'TotalExonsB', 'PhaseB',
'InFrame', 'InPhase']
outfile.write(list_to_line(header, '\t') + '\n')
for item in raw_list:
cleaned_item = item.split('\\t')
if len(cleaned_item) > 10: # FusionGenes_Annotation.pl return the data twice. We kepp the annotated one.
outfile.write(list_to_line(cleaned_item, '\t') + '\n')
line_counter += 1
print('succesfully annotated',line_counter, 'breakpoints from', dataset_file, 'in', time.time()-start, 'seconds')
# track threads
try:
global running_threads
running_threads -= 1
except:
pass
# dataset_file = '/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/public/breaks/Decipher-DeletionsOnly.txt'
# annotate_fusion_genes(dataset_file)
def blastn(input_fasta_file, db_path='/Users/amarcozzi/Desktop/BLAST_DB/',db_name='human_genomic',out_file='blastn_out.xml'):
'''
Run blastn on the local machine using a local database.
Requires NCBI BLAST+ to be installed. http://blast.ncbi.nlm.nih.gov/Blast.cgi?CMD=Web&PAGE_TYPE=BlastDocs&DOC_TYPE=Download
Takes a fasta file as input and writes the output in an XML file.
'''
db = db_path + db_name
blastn_cline = NcbiblastnCommandline(query=input_fasta_file, db=db, evalue=0.001, outfmt=5, out=out_file)
print(blastn_cline)
stdout, stderr = blastn_cline()
# to be tested
def check_line(line, unexpected_char=['\n','',' ','#']):
'''
Check if the line starts with an unexpected character.
If so, return False, else True
'''
for item in unexpected_char:
if line.startswith(item):
return False
return True
def dice_coefficient(sequence_a, sequence_b):
'''(str, str) => float
Return the dice cofficient of two sequences.
'''
a = sequence_a
b = sequence_b
if not len(a) or not len(b): return 0.0
# quick case for true duplicates
if a == b: return 1.0
# if a != b, and a or b are single chars, then they can't possibly match
if len(a) == 1 or len(b) == 1: return 0.0
# list comprehension, preferred over list.append() '''
a_bigram_list = [a[i:i+2] for i in range(len(a)-1)]
b_bigram_list = [b[i:i+2] for i in range(len(b)-1)]
a_bigram_list.sort()
b_bigram_list.sort()
# assignments to save function calls
lena = len(a_bigram_list)
lenb = len(b_bigram_list)
# initialize match counters
matches = i = j = 0
while (i < lena and j < lenb):
if a_bigram_list[i] == b_bigram_list[j]:
matches += 2
i += 1
j += 1
elif a_bigram_list[i] < b_bigram_list[j]:
i += 1
else:
j += 1
score = float(matches)/float(lena + lenb)
return score
def find_path(graph, start, end, path=[]):
'''
Find a path between two nodes in a graph.
Works on graphs like this:
graph ={'A': ['B', 'C'],
'B': ['C', 'D'],
'C': ['D'],
'D': ['C'],
'E': ['F'],
'F': ['C']}
'''
path = path + [start]
if start == end:
return path
if not graph.has_key(start):
return None
for node in graph[start]:
if node not in path:
newpath = find_path(graph, node, end, path)
if newpath: return newpath
return None
def find_all_paths(graph, start, end, path=[]):
'''
Find all paths between two nodes of a graph.
Works on graphs like this:
graph ={'A': ['B', 'C'],
'B': ['C', 'D'],
'C': ['D'],
'D': ['C'],
'E': ['F'],
'F': ['C']}
'''
path = path + [start]
if start == end:
return [path]
if not graph.has_key(start):
return []
paths = []
for node in graph[start]:
if node not in path:
newpaths = find_all_paths(graph, node, end, path)
for newpath in newpaths:
paths.append(newpath)
return paths
def find_shortest_path(graph, start, end, path=[]):
'''
Find the shortest path between two nodes of a graph.
Works on graphs like this:
graph ={'A': ['B', 'C'],
'B': ['C', 'D'],
'C': ['D'],
'D': ['C'],
'E': ['F'],
'F': ['C']}
'''
path = path + [start]
if start == end:
return path
if not graph.has_key(start):
return None
shortest = None
for node in graph[start]:
if node not in path:
newpath = find_shortest_path(graph, node, end, path)
if newpath:
if not shortest or len(newpath) < len(shortest):
shortest = newpath
return shortest
# ##
# graph = {'A': ['B', 'C'],
# 'B': ['C', 'D'],
# 'C': ['D'],
# 'D': ['C'],
# 'E': ['F'],
# 'F': ['C']}
# >>> find_path(graph, 'A', 'D')
# ['A', 'B', 'C', 'D']
# >>> find_all_paths(graph, 'A', 'D')
# [['A', 'B', 'C', 'D'], ['A', 'B', 'D'], ['A', 'C', 'D']]
# >>> find_shortest_path(graph, 'A', 'D')
# ['A', 'C', 'D']
def gen_rnd_string(length):
'''
Return a string of uppercase and lowercase ascii letters.
'''
s = [l for l in string.ascii_letters]
random.shuffle(s)
s = ''.join(s[:length])
return s
def gene_synonyms(gene_name):
'''str => list()
Queries http://rest.genenames.org and returns a list of synonyms of gene_name.
Returns None if no synonym was found.
'''
result = []
headers = {'Accept': 'application/json'}
uri = 'http://rest.genenames.org'
path = '/search/{}'.format(gene_name)
target = urlparse(uri+path)
method = 'GET'
body = ''
h = http.Http()
response, content = h.request(
target.geturl(),
method,
body,
headers )
if response['status'] == '200':
# assume that content is a json reply
# parse content with the json module
data = json.loads(content.decode('utf8'))
for item in data['response']['docs']:
result.append(item['symbol'])
return result
else:
print('Error detected: ' + response['status'])
return None
#print(gene_synonyms('MLL3'))
def string_to_number(s):
'''
Convert a bytes string into a single number.
Example:
>>> string_to_number('foo bar baz')
147948829660780569073512294
'''
return int.from_bytes(s.encode(), 'little')
def number_to_string(n):
'''
Convert a number into a bytes string.
Example:
>>> number_to_string(147948829660780569073512294)
'foo bar baz'
'''
return n.to_bytes(math.ceil(n.bit_length() / 8), 'little').decode()
#x = 147948829660780569073512294
#number_to_string(x)
#>>> 'foo bar baz'
def determine_average_breaks_distance(dataset): # tested only for deletion/duplication
'''
Evaluate the average distance among breaks in a dataset.
'''
data = extract_data(dataset, columns=[1,2,4,5], verbose=False)
to_average = []
for item in data:
if item[0] == item[2]:
to_average.append(int(item[3])-int(item[1]))
return sum(to_average)/len(to_average)
#print(determine_average_breaks_distance('/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/random/sorted/rnd_dataset_100_annotated_sorted.txt'))
def dict_overview(dictionary, how_many_keys, indent=False):
'''
Prints out how_many_elements of the target dictionary.
Useful to have a quick look at the structure of a dictionary.
'''
ks = list(islice(dictionary, how_many_keys))
for k in ks:
if indent:
print(f'{k}\n\t{dictionary[k]}')
else:
print(f'{k}\t{dictionary[k]}')
def download_human_genome(build='GRCh37', entrez_usr_email="A.E.vanvlimmeren@students.uu.nl"): #beta: works properly only forGRCh37
'''
Download the Human genome from enterez.
Save each chromosome in a separate txt file.
'''
Entrez.email = entrez_usr_email
#Last available version
NCBI_IDS = {'1':"NC_000001", '2':"NC_000002",'3':"NC_000003",'4':"NC_000004",
'5':"NC_000005",'6':"NC_000006",'7':"NC_000007", '8':"NC_000008",
'9':"NC_000009", '10':"NC_000010", '11':"NC_000011", '12':"NC_000012",
'13':"NC_000013",'14':"NC_000014", '15':"NC_000015", '16':"NC_000016",
'17':"NC_000017", '18':"NC_000018", '19':"NC_000019", '20':"NC_000020",
'21':"NC_000021", '22':"NC_000022", 'X':"NC_000023", 'Y':"NC_000024"}
#GRCh37 from http://www.ncbi.nlm.nih.gov/assembly/GCF_000001405.25/#/def_asm_Primary_Assembly
NCBI_IDS_GRCh37 = { 'NC_000001.10','NC_000002.11','NC_000003.11','NC_000004.11',
'NC_000005.9','NC_000006.11','NC_000007.13','NC_000008.10',
'NC_000009.11','NC_000010.10','NC_000011.9','NC_000012.11',
'NC_000013.10','NC_000014.8','NC_000015.9','NC_000016.9',
'NC_000017.10','NC_000018.9','NC_000019.9','NC_000020.10',
'NC_000021.8','NC_000022.10','NC_000023.10','NC_000024.9'}
CHR_LENGTHS_GRCh37 = { '1':249250621,'2' :243199373,'3' :198022430,'4' :191154276,
'5' :180915260,'6' :171115067,'7' :159138663,'8' :146364022,
'9' :141213431,'10':135534747,'11':135006516,'12':133851895,
'13':115169878,'14':107349540,'15':102531392,'16':90354753,
'17':81195210,'18':78077248,'19':59128983,'20':63025520,
'21':48129895,'22':51304566,'X' :155270560,'Y' :59373566}
if build == 'GRCh37':
NCBI_IDS = NCBI_IDS_GRCh37
CHR_LENGTHS = CHR_LENGTHS_GRCh37
else:
print('This function only work with genome build GRCh37 fow now...')
return False
idx = 0
for target_chromosome in NCBI_IDS:
length = CHR_LENGTHS[idx]
idx += 1
sequence = False
try:
# Always tell NCBI who you are
handle = Entrez.efetch(db="nucleotide",
id=target_chromosome,
rettype="fasta",
strand=1,
seq_start=0, #this is to obtain actual start coordinates from the index
seq_stop=length) # this is the end of the chromosome
record = SeqIO.read(handle, "fasta")
handle.close()
sequence = str(record.seq)
except ValueError:
print('ValueError: no sequence found in NCBI')
with open('sequence_{}.txt'.format(target_chromosome), 'w') as f:
f.write(sequence)
def exponential_range(start=0,end=10000,base=10):
'''
Generates a range of integer that grow exponentially.
Example: list(exp_range(0,100000,2))
Output :[0,
2,
4,
8,
16,
32,
64,
128,
256,
512,
1024,
2048,
4096,
8192,
16384,
32768,
65536]
'''
if end/base < base:
raise ValueError('"end" must be at least "base**2"')
result = []
new_start = start
new_end = base**2
new_base = base
while new_start < end:
result.append(range(new_start,new_end,new_base))
new_start = new_end
new_end = new_start*base
new_base = new_base*base
#print(result)
for item in result:
for i in item:
yield i
##list(exp_range(0,100000,10))
def extract_data(infile, columns=[3,0,1,2,5], header='##', skip_lines_starting_with='#', data_separator='\t', verbose=False ):
'''
Extract data from a file. Returns a list of tuples.
Each tuple contains the data extracted from one line of the file
in the indicated columns and with the indicated order.
'''
extracted_data = []
header_list = []
header_flag = 0
line_counter = 0
with open(infile) as infile:
lines = infile.readlines()
for line in lines: # yield_file(infile) can be used instead
line_counter += 1
if line[:len(header)] == header: # checks the header
header_list = line_to_list(line[len(header):], data_separator)
header_flag += 1
if header_flag > 1:
raise ValueError('More than one line seems to contain the header identificator "' + header + '".')
elif line[0] == skip_lines_starting_with or line == '' or line == '\n': # skips comments and blank lines
pass
else:
list_ = line_to_list(line, data_separator)
reduced_list=[]
for item in columns:
reduced_list.append(list_[item])
extracted_data.append(tuple(reduced_list))
if verbose == True: # Prints out a brief summary
print('Data extracted from', infile)
print('Header =', header_list)
print('Total lines =', line_counter)
return extracted_data
# extract_data('tables/clinvarCnv.txt', columns=[3,0,1,2,5], header='##', skip_lines_starting_with='#', data_separator='\t', verbose=True)
def extract_Toronto(infile, outfile):
'''
Ad hoc function to extract deletions and duplications out of the Toronto Genetic Variants Database.
Returns a file ready to be annotated with FusionGenes_Annotation.pl .
'''
# Extract data from infile
# Columns are: ID, Chr, Start, End, CNV_Type
raw_data = extract_data(infile, columns=[0,1,2,3,5], verbose=True )
# Take only deletions and duplications
filtered_data = []
for data in raw_data:
if "deletion" in data or 'duplication' in data:
filtered_data.append(data)
print('len(row_data) :',len(raw_data))
print('len(filtered_data) :',len(filtered_data))
# Write filtered_data to a text file
header = ['##ID','ChrA','StartA','EndA','ChrB','StartB','EndB','CnvType','Orientation']
with open(outfile, 'w') as outfile:
outfile.write(list_to_line(header, '\t') + '\n')
for item in filtered_data:
if item[-1] == 'duplication':
orientation = 'HT'
elif item[-1] == 'deletion':
orientation = 'TH'
else:
print('ERROR: unable to determine "Orientation"...')
list_ = [item[0],item[1],item[2],item[2],item[1],item[3],item[3],item[-1].upper(),orientation]
outfile.write(list_to_line(list_, '\t') + '\n')
print('Done')
# infile = '/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/public/GRCh37_hg19_variants_2014-10-16.txt'
# outfile = infile[:-4]+'_DelDupOnly.txt'
# extract_Toronto(infile, outfile)
def extract_Decipher(infile, outfile):
'''
Ad hoc function to extract deletions and duplications out of the Decipher Database.
Returns a file ready to be annotated with FusionGenes_Annotation.pl .
'''
# Extract data from infile
# Columns are: ID, Chr, Start, End, CNV_Type(here expressed as "mean_ratio")
raw_data = extract_data(infile, columns=[0,3,1,2,4], verbose=True )
header = ['##ID','ChrA','StartA','EndA','ChrB','StartB','EndB','CnvType','Orientation']
with open(outfile, 'w') as outfile:
outfile.write(list_to_line(header, '\t') + '\n')
for item in raw_data:
# Convert mean_ratio to CnvType
if float(item[-1]) > 0:
CnvType = 'DUPLICATION'
orientation = 'HT'
elif float(item[-1]) < 0:
CnvType = 'DELETION'
orientation = 'TH'
else:
print('ERROR: unable to determine "Orientation"...')
# Write output
list_ = [item[0],item[1],item[2],item[2],item[1],item[3],item[3],CnvType,orientation]
outfile.write(list_to_line(list_, '\t') + '\n')
print('Done')
# infile = '/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/public/decipher-hg19_15-01-30.txt'
# outfile = infile[:-4]+'_DelDupOnly.txt'
# extract_Decipher(infile, outfile)
def extract_dgvMerged(infile, outfile):
'''
Ad hoc function to extract deletions and losses out of the dgvMerged database.
Returns a file ready to be annotated with FusionGenes_Annotation.pl .
'''
#original_header = '##bin chrom chromStart chromEnd name score strand thickStart thickEnd itemRgb varType reference pubMedId method platform mergedVariants supportingVariants sampleSize observedGains observedLosses cohortDescription genes samples'
# [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] [12] [13] [14] [15] [16] [17] [18] [19] [20] [21] [22]
raw_data = extract_data(infile, columns=[4,1,2,3,10], header='##', skip_lines_starting_with='#', data_separator='\t', verbose=False )
# Take only deletions and losses
filtered_data = []
for data in raw_data:
if "Deletion" in data or 'Loss' in data:
filtered_data.append(data)
print('len(row_data) :',len(raw_data))
print('len(filtered_data) :',len(filtered_data))
# Write filtered_data to a text file
header = ['##ID','ChrA','StartA','EndA','ChrB','StartB','EndB','CnvType','Orientation']
with open(outfile, 'w') as outfile:
outfile.write(list_to_line(header, '\t') + '\n')
for item in filtered_data:
if item[-1] == 'Deletion' or item[-1] == 'Loss':
cnv_type = 'DELETION'
orientation = 'HT'
# elif item[-1] == 'deletion':
# orientation = 'TH'
else:
print('ERROR: unable to determine "Orientation"...')
list_ = [item[0],item[1][3:],item[2],item[2],item[1][3:],item[3],item[3],cnv_type,orientation]
outfile.write(list_to_line(list_, '\t') + '\n')
print('Done')
# ## Extract deletions and Losses from dgvMerged
# folder = '/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/public/breaks'
# file_name = 'dgvMerged.txt'
# infile = folder + '/' + file_name
# outfile = folder + '/' + 'dgvMerged-DeletionsOnly.txt'
# extract_dgvMerged(infile, outfile)
# ## annotate
# dataset_file = '/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/public/breaks/dgvMerged-DeletionsOnly.txt'
# annotate_fusion_genes(dataset_file)
def fill_and_sort(pandas_chrSeries):
'''incomplete pandas.Series => complete and sorted pandas.Series
Given a pandas.Series in which the first argument is the chromosome name
and the second argument is a count " [('1', 61), ('3', 28), ..., ('X', 29)]"
This function returns a new (sorted by chromosome) series with the missing chromosome included as ('Chr_name',0).
This is useful when creating series out of subsets grouped by Chr.
If the Chr does not contains any event, then it will be excluded from the subset.
However, expecially for plotting reasons, you may want to have ('Chr',0) in you list instead of a missing Chr.
Example.
> series = [('1', 61), ('3', 28), ..., ('X', 29)] # in this Series Chr_2 and Chr_Y are missing.
> fill_and_sort(series)
>>> [('1', 61), ('2',0), ('3', 28), ..., ('X', 29), ('Y',0)] # this Series have all the chromosomes
'''
# add missing ChrA
CHROMOSOMES = [str(c) for c in range(1,23)]+['X','Y']
chr_list = CHROMOSOMES[:]
complete_series = []
for item in pandas_chrSeries.iteritems():
chr_list.remove(item[0])
complete_series.append(item)
for item in chr_list:
complete_series.append((item,0))
# sort by chromosome
sorted_ = []
for item in CHROMOSOMES:
for _item in complete_series:
if _item[0]==item:
sorted_.append(_item[1])
return pd.Series(sorted_, index=CHROMOSOMES)
# counts = [50,9,45,6]
# pandas_chrSeries = pd.Series(counts, index=['1','4','X','10'])
# print(pandas_chrSeries)
# good_series = fill_and_sort(pandas_chrSeries)
# print(good_series)
def find(string, char):
'''
Looks for a character in a sctring and returns its index.
'''
# Compared to string.find(), it returns ALL the indexes, not only the first one.
return [index for index, letter in enumerate(string) if letter == char]
# print(find('alessio', 's'))
def filter_out(word, infile, outfile):
'''
Reads a file line by line
and writes an output file containing only
the lines that DO NOT contains 'word'.
'''
print('Filtering out lines containing',word,'...')
with open(infile, 'r') as infile:
lines = infile.readlines()
with open(outfile, 'w') as outfile:
for line in lines: # yield_file(infile) can be used instead
if word not in line:
outfile.write(line)
print('Done')
# infile = '/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/public/breaks/Decipher_DelDupOnly.txt'
# outfile = '/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/public/breaks/Decipher-DeletionsOnly.txt'
# filter_out('DUPLICATION',infile, outfile)
def flatten2(l):
'''
Flat an irregular iterable to a list.
Python >= 2.6 version.
'''
for item in l:
if isinstance(item, collections.Iterable) and not isinstance(item, basestring):
for sub in flatten(item):
yield sub
else:
yield item
def flatten(l):
'''
Flat an irregular iterable to a list.
Python >= 3.3 version.
'''
for item in l:
try:
yield from flatten(item)
except TypeError:
yield item
def gene_synonyms(gene_name):
'''str => list()
Queries http://rest.genenames.org and http://www.ncbi.nlm.nih.gov/ to figure out the best synonym of gene_name.
'''
result = []
tmp = []
headers = {'Accept': 'application/json'}
uri = 'http://rest.genenames.org'
path = '/search/{}'.format(gene_name)
html_doc = urlopen('http://www.ncbi.nlm.nih.gov/gene/?term={}[sym]'.format(gene_name))
html_txt = BeautifulSoup(html_doc, 'html.parser').get_text()
target = urlparse(uri+path)
method = 'GET'
body = ''
h = http.Http()
response, content = h.request(
target.geturl(),
method,
body,
headers )
if response['status'] == '200':
# assume that content is a json reply
# parse content with the json module
data = json.loads(content.decode('utf8'))
for item in data['response']['docs']:
tmp.append(item['symbol'])
else:
print('Error detected: ' + response['status'])
return None
if len(tmp) > 1:
for gene in tmp:
if gene in html_txt:
result.append(gene)
return result
else:
return tmp
#print(gene_synonyms('MLL3'))
def gen_controls(how_many, chromosome, GapTable_file,outfile):
global running_threads # in case of multithreading
list_brkps = gen_rnd_single_break(how_many, chromosome, GapTable_file, verbose=False)
with open(outfile,'w') as f:
for item in list_brkps:
f.write(list_to_line(item,'\t')+'\n')
running_threads -= 1 # in case of multithreading
# # Generate controls
# import time
# from threading import Thread
# threads = 0
# running_threads = 0
# max_simultaneous_threads = 20
# how_many=9045
# chromosome='9'
# GapTable_file='/Users/alec/Desktop/UMCU_Backup/Projects/Anne_Project/current_brkps_DB/out_ALL_gap.txt'
# while threads < 100:
# while running_threads >= max_simultaneous_threads:
# time.sleep(1)
# running_threads += 1
# outfile = '/Users/alec/Desktop/UMCU_Backup/Projects/Anne_Project/current_brkps_DB/out_chr9_control_'+str(threads)+'.txt'
# print('thread', threads, '|', 'running threads:',running_threads)
# Thread(target=gen_controls, args=(how_many,chromosome,GapTable_file,outfile)).start()
# threads += 1
def gen_control_dataset(real_dataset, suffix='_control.txt'):# tested only for deletion/duplication
'''
Generates a control dataset ad hoc.
Takes as input an existing dataset and generates breaks
in the same chromosomes and with the same distance (+-1bp),
the position are however randomized.
'''
real_data = extract_data(real_dataset, columns=[1,2,4,5,7,8], verbose=False)
control_data = []
_id_list = []
for item in real_data:
if item[0] == item[2]: # ChrA == ChrB
# generate a unique id
_id = gen_rnd_id(16)
while _id in _id_list:
_id = gen_rnd_id(16)
_id_list.append(_id)
chromosome = item[0]
distance = int(item[3])-int(item[1]) #
cnv_type = item[4]
orientation = item[5]
breaks = gen_rnd_breaks(how_many=1, chromosome=chromosome,
min_distance=distance-1, max_distance=distance+1,
GapTable_file='tables/gap.txt')
print(breaks)
control_data.append([_id,chromosome,breaks[0][1],breaks[0][1],chromosome,breaks[0][2],
breaks[0][2],cnv_type,orientation])
else:
print(item[0],'is no equal to',item[2],'I am skipping these breaks')
header = ['##ID', 'ChrA', 'StartA', 'EndA', 'ChrB', 'StartB', 'EndB', 'CnvType', 'Orientation']
filename = real_dataset[:-4]+ suffix
with open(filename,'w') as outfile:
outfile.write(list_to_line(header, '\t') + '\n')
for item in control_data:
line = list_to_line(item, '\t')
print(line)
outfile.write(line + '\n')
print('Data written in',filename)
# gen_control_dataset('/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/raw/clinvarCnv-DeletionsOnly.txt')
def gen_gap_table(infile='/Users/amarcozzi/Desktop/All_breakpoints_HG19_final.txt', outfile='/Users/amarcozzi/Desktop/All_breakpoints_HG19_gap.txt', resolution=10000):
'''
Generates a file containing a list of coordinates
for wich no brakpoints have been found in the input file.
'''
# Global constants
CHROMOSOMES = [str(c) for c in range(1,23)]+['X','Y']
# length of chromosomes based on GRCh37 (Data source: Ensembl genome browser release 68, July 2012)
# http://jul2012.archive.ensembl.org/Homo_sapiens/Location/Chromosome?r=1:1-1000000
# http://grch37.ensembl.org/Homo_sapiens/Location/Chromosome?r=1:24626643-24726643
CHR_LENGTHS = {'1':249250621,'2' :243199373,'3' :198022430,'4' :191154276,
'5' :180915260,'6' :171115067,'7' :159138663,'8' :146364022,
'9' :141213431,'10':135534747,'11':135006516,'12':133851895,
'13':115169878,'14':107349540,'15':102531392,'16':90354753,
'17':81195210,'18':78077248,'19':59128983,'20':63025520,
'21':48129895,'22':51304566,'X' :155270560,'Y' :59373566}
gap_list = []
for Chr in CHROMOSOMES:
print('-----------------------------------------------------')
print('Analyzing breakpoints in chromosome',Chr)
length = CHR_LENGTHS[Chr]
# determine the intervals given the chromosome length and the resolution
x_ax = [] # data holder
y_ax = [] # stores breakpoint counts per inteval
breakpoint_list = []
# # Extract data from infile, chromosome by chromosome
# with open(infile, 'r') as f:
# lines = f.readlines()
# for line in lines: # yield_file(infile) can be used instead
# if line.startswith('chr'+Chr+':'):
# tmp = line.split(':')
# breakpoint = tmp[1].split('-')[0]
# breakpoint_list.append(int(breakpoint))
# print(len(breakpoint_list),'breakpoints found...')
with open(infile, 'r') as f:
#lines = f.readlines()
for line in f:#lines: # yield_file(infile) can be used instead
if line.startswith(Chr+'\t'):
tmp = line_to_list(line,'\t')
breakpoint = tmp[1]
breakpoint_list.append(int(breakpoint))
print(len(breakpoint_list),'breakpoints found...')
for item in range(resolution,length+resolution,resolution):
x_ax.append(item)
print('Interval list:',len(x_ax), 'at',resolution,'bases resolution')
for interval in x_ax:
count = 0
to_remove = []
for breakpoint in breakpoint_list:
if breakpoint <= interval:
count += 1
to_remove.append(breakpoint)
y_ax.append(count)
for item in to_remove:
try:
breakpoint_list.remove(item)
except:
print('Error',item)
counter = 0
for idx,count_ in enumerate(y_ax):
if count_ == 0:
gap = x_ax[idx]
gap_list.append((Chr,gap))
counter += 1
print('Found', counter,'gaps in chromosome',Chr,'\n')
with open(outfile, 'w') as f:
f.write('#Gap table at '+str(resolution)+' bases resolution based on '+infile+'\n')
f.write('##chrom'+'\t'+'chromStart'+'\t'+'chromEnd'+'\n')
for item in gap_list:
line = 'chr'+str(item[0])+'\t'+str(item[1]-resolution)+'\t'+str(item[1])
f.write(line+'\n')
# import time
# start = time.time()
# gen_gap_table()
# print('Done in',time.time()-start,'seconds')
## Generate a gap table file
# import time
# start = time.time()
# gen_gap_table(infile='/Users/amarcozzi/Desktop/current_brkps_DB/out_ALL.txt', outfile='/Users/amarcozzi/Desktop/current_brkps_DB/out_ALL_gap.txt', resolution=10000)
# print('Done in',time.time()-start,'seconds')
def gen_multiple_controls(real_dataset, how_many):
'''
Generates how_many control datasets.
'''
n=0
while n < how_many:
suffix = '_control_'+str(n)+'.txt'
#real_dataset = '/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/raw/dataset_1b.txt'
gen_control_dataset(real_dataset,suffix)
n+=1
print(n,'datasets have been generated')
# gen_multiple_controls('/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/raw/dataset_4.txt',1000)
# ## Generate multiple controls of datasets found in a folder
# folder = '/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/random'
# for item in list_of_files(folder,'txt'):
# gen_multiple_controls(item,1000)
def gen_deletion_dataset_from_breaks(list_of_breaks, outfile, ID_already=False):
'''Genrates a proper deletion dataset file out of a list of breaks '''
# Var names are not pythonic but I think it is better for readibility
header = ['##ID', 'ChrA', 'StartA', 'EndA', 'ChrB', 'StartB', 'EndB', 'CnvType', 'Orientation']
ID_list = [] # to check if the ID is already present
print('writing breakpoints to', outfile, '..........')
with open(outfile, 'w') as outfile:
outfile.write(list_to_line(header, '\t') + '\n')
for item in list_of_breaks:
if ID_already == False: # the braks do not have an ID
while True: # checks ID
ID = gen_rnd_id(8)
if ID not in ID_list:
ID_list.append(ID)
break
ChrA = ChrB = item[0][3:]
StartA = EndA = item[1]
StartB = EndB = item[2]
else: # the break do have an ID
ID = item[0] # the ID is supposed to be the first entry
ChrA = ChrB = item[1][3:]
StartA = EndA = item[2]
StartB = EndB = item[3]
CnvType = 'DELETION'
Orientation = 'TH'
line = list_to_line([ID, ChrA, StartA, EndA, ChrB, StartB, EndB, CnvType, Orientation], '\t')
outfile.write(line + '\n')
print('OK')
# list_of_breaks = gen_rnd_breaks(how_many=100, chromosome='Y', min_distance=1000, max_distance=15000, GapTable_file='tables/gap.txt')
# gen_deletion_dataset_from_breaks(list_of_breaks, 'test_deletion_dataset.txt')
# ## Generate (m) RANDOM datasets of different length (n)
# for m in range(1000):
# for n in [100,1000,10000,100000,1000000]:
# outfile = 'rnd_dataset_'+ str(n)+'_'+str(m)+'.txt'
# breaks = list()
# for chromosome in CHROMOSOMES:
# breaks.extend(gen_rnd_breaks(how_many=500, chromosome=chromosome, min_distance=0, max_distance=n))
# gen_deletion_dataset_from_breaks(breaks, outfile)
def gen_rnd_breaks(how_many=100, chromosome='Y', min_distance=1000, max_distance=15000, GapTable_file='tables/gap.txt'):
'''Returns tuples containing 1)the chromosome, 2)first breakpoint, 3)second breakpoint
Keeps only the points that do not appear in te gap table.
gen_rnd_breaks(int, string, int, int, filepath) => [(chrX, int, int), ...]
valid chromosomes inputs are "1" to "22" ; "Y" ; "X"
The chromosome length is based on the build GRCh37/hg19.'''
# CHR_LENGTHS is based on GRCh37
CHR_LENGTHS = {'1':249250621,'2' :243199373,'3' :198022430,'4' :191154276,
'5' :180915260,'6' :171115067,'7' :159138663,'8' :146364022,
'9' :141213431,'10':135534747,'11':135006516,'12':133851895,
'13':115169878,'14':107349540,'15':102531392,'16':90354753,
'17':81195210,'18':78077248,'19':59128983,'20':63025520,
'21':48129895,'22':51304566,'X' :155270560,'Y' :59373566}
# Genrates a chromosome-specific gap list
print('generating', how_many, 'breakpoints in Chr', chromosome, '..........')
with open(GapTable_file,'r') as infile:
lines = infile.readlines()
full_gap_list = []
chr_specific_gap = []
for line in lines:
if '#' not in line: # skip comments
full_gap_list.append(line_to_list(line, '\t'))
for item in full_gap_list:
if 'chr' + chromosome in item:
# Database/browser start coordinates differ by 1 base
chr_specific_gap.append((item[2],item[3]))
# Merge contiguous gaps
merged_gaps = []
n = 0
left_tick = False
while n < len(chr_specific_gap):
if left_tick == False:
left_tick = chr_specific_gap[n][0]
try:
if chr_specific_gap[n][1] == chr_specific_gap[n+1][0]:
n += 1
else:
right_tick = chr_specific_gap[n][1]
merged_gaps.append((left_tick,right_tick))
left_tick = False
n += 1
except:
n += 1
# Genrates breakpoint list
list_of_breakpoints = []
while len(list_of_breakpoints) < how_many:
try:
start = random.randint(0,CHR_LENGTHS[chromosome])
except KeyError:
if chromosome == '23':
chromosome = 'X'
start = random.randint(0,CHR_LENGTHS[chromosome])
elif chromosome == '24':
chromosome = 'Y'
start = random.randint(0,CHR_LENGTHS[chromosome])
else:
print('ERROR: Wrong chromosome name!!')
end = random.randint(start+min_distance, start+max_distance)
are_points_ok = True # assumes that the points are ok
for item in merged_gaps:
# checks whether the points are ok for real
if start < int(item[0]) or start > int(item[1]):
if end < int(item[0]) or end > int(item[1]):
pass
else: are_points_ok = False
else: are_points_ok = False
if are_points_ok == True:
list_of_breakpoints.append(('chr'+chromosome, start, end))
print('OK')
return list_of_breakpoints
# print(gen_rnd_breaks(how_many=100, chromosome='Y', min_distance=1000, max_distance=15000, GapTable_file='tables/gap.txt'))
def gen_rnd_id(length):
'''Generates a random string made by uppercase ascii chars and digits'''
chars = string.ascii_uppercase + string.digits
return ''.join(random.choice(chars) for char in range(length))
# print(gen_rnd_id(16))
#@profile
def gen_rnd_single_break(how_many=100, chromosome='1', GapTable_file='/Users/amarcozzi/Desktop/All_breakpoints_HG19_gap_10k.txt', verbose=False):
'''Returns tuples containing 1)the chromosome, 2)the breakpoint
Keeps only the points that do not appear in te gap table.
gen_rnd_breaks(int, string, filepath) => [(chrX, int), ...]
valid chromosomes inputs are "1" to "22" ; "Y" ; "X"
The chromosome length is based on the build GRCh37/hg19.
Prerequisites: The gap_list file is in the form:
##chrom chromStart chromEnd
chr1 0 10000
chr1 30000 40000
chr1 40000 50000
chr1 50000 60000
'''
if verbose == True:
start_time = time.time()
# CHR_LENGTHS is based on GRCh37
CHR_LENGTHS = {'1':249250621,'2' :243199373,'3' :198022430,'4' :191154276,
'5' :180915260,'6' :171115067,'7' :159138663,'8' :146364022,
'9' :141213431,'10':135534747,'11':135006516,'12':133851895,
'13':115169878,'14':107349540,'15':102531392,'16':90354753,
'17':81195210,'18':78077248,'19':59128983,'20':63025520,
'21':48129895,'22':51304566,'X' :155270560,'Y' :59373566}
# Genrates a chromosome-specific gap list
with open(GapTable_file, 'r') as infile:
lines = infile.readlines()
full_gap_list = []
chr_specific_gap = []
for line in lines:
if '#' not in line: # skip comments
full_gap_list.append(line_to_list(line, '\t'))
for item in full_gap_list:
if 'chr' + chromosome in item:
chr_specific_gap.append((item[1],item[2]))
# Merge contiguous gaps
merged_gaps = merge_gaps(chr_specific_gap)
# merged_gaps = []
# while len(chr_specific_gap) > 0:
# try:
# if chr_specific_gap[0][1] == chr_specific_gap[1][0]:
# tmp = (chr_specific_gap[0][0],chr_specific_gap[1][1])
# chr_specific_gap.pop(0)
# chr_specific_gap[0] = tmp
# else:
# merged_gaps.append(chr_specific_gap.pop(0))
# except:
# merged_gaps.append(chr_specific_gap.pop(0))
# Genrates breakpoint list
if verbose == True: print('generating', how_many, 'breakpoints in Chr', chromosome)
list_of_breakpoints = []
while len(list_of_breakpoints) < how_many:
try:
start = random.randint(0,CHR_LENGTHS[chromosome])
# if verbose == True: print(start)
except KeyError:
if chromosome == '23':
chromosome = 'X'
start = random.randint(0,CHR_LENGTHS[chromosome])
elif chromosome == '24':
chromosome = 'Y'
start = random.randint(0,CHR_LENGTHS[chromosome])
else:
print('ERROR: Wrong chromosome name!!')
#end = random.randint(start+min_distance, start+max_distance)
are_points_ok = True # assumes that the points are ok
for item in merged_gaps:
# checks whether the points are ok for real
if start <= int(item[0]) or start >= int(item[1]):
pass
else:
are_points_ok = False
if verbose == True: print(start,'is in a gap and will be discarded')
if are_points_ok == True:
list_of_breakpoints.append((chromosome, start))
if verbose == True: print(start,'is OK',len(list_of_breakpoints),'good breaks generated out of',how_many)
if verbose == True: print(how_many,'breakpoint have been generated in chromosome',chromosome,'in',time.time()-start_time,'seconds')
return list_of_breakpoints
# gen_rnd_single_break(verbose=True)
# ## Generate single breaks dataset
# import time
# start = time.time()
# breaks_on_1 = gen_rnd_single_break(how_many=19147,verbose=False)
# for item in breaks_on_1:
# print(str(item[0])+'\t'+str(item[1]))
# print('Done in', time.time()-start,'seconds..')
# ## Generate a control file
# list_brkps = gen_rnd_single_break(how_many=20873, chromosome='1', GapTable_file='/Users/amarcozzi/Desktop/current_brkps_DB/out_ALL_gap.txt', verbose=True)
# with open('/Users/amarcozzi/Desktop/current_brkps_DB/out_chr1_control.txt','w') as f:
# for item in list_brkps:
# f.write(list_to_line(item,'\t')+'\n')
# ## Generate multiple controls
# import time
# from threading import Thread
# start_time = time.time()
# threads = 0
# running_threads = 0
# max_simultaneous_threads = 20
# GapTable_file = '/Users/amarcozzi/Desktop/Projects/Anne_Project/current_brkps_DB/out_ALL_gap.txt'
# chromosome = 'Y'
# infile = '/Users/amarcozzi/Desktop/Projects/Anne_Project/current_brkps_DB/out_chr'+chromosome+'.txt'
# how_many = 0
# for line in yield_file(infile):
# if line.startswith(chromosome+'\t'):
# how_many += 1
# print('found',how_many,'breakpoints in chromosome',chromosome)
# while threads < 100:
# while running_threads >= max_simultaneous_threads:
# time.sleep(1)
# running_threads += 1
# outfile = '/Users/amarcozzi/Desktop/Projects/Anne_Project/current_brkps_DB/controls/out_chr'+chromosome+'_control_'+str(threads)+'.txt'
# print('thread', threads, '|', 'running threads:',running_threads)
# Thread(target=gen_controls, args=(how_many,chromosome,GapTable_file,outfile)).start()
# threads += 1
# print('Waiting for threads to finish...')
# while running_threads > 0:
# time.sleep(1)
# end_time = time.time()
# print('\nDone in',(end_time-start_time)/60,'minutes')
def kmers_finder(sequence_dict, motif_length, min_repetition):
'''(dict, int, int) => OrderedDict(sorted(list))
Find all the motifs long 'motif_length' and repeated at least 'min_repetition' times.
Return an OrderedDict having motif:repetition as key:value sorted by value.
'''
motif_dict = {}
for _id, sequence in sequence_dict.items():
#populate a dictionary of motifs (motif_dict)
for i in range(len(sequence) - motif_length +1):
motif = sequence[i:i+motif_length]
if motif not in motif_dict:
motif_dict[motif] = 1
else:
motif_dict[motif] += 1
#remove from motif_dict all the motifs repeated less than 'repetition' times
keys_to_remove = [key for key, value in motif_dict.items() if value < min_repetition]
for key in keys_to_remove:
del motif_dict[key]
#Return a sorted dictionary
return OrderedDict(sorted(motif_dict.items(), key=itemgetter(1), reverse=True))
def kmers_finder_with_mismatches(sequence, motif_length, max_mismatches, most_common=False):
'''(str, int, int) => sorted(list)
Find the most frequent k-mers with mismatches in a string.
Input: A sequence and a pair of integers: motif_length (<=12) and max_mismatch (<= 3).
Output: An OrderedDict containing all k-mers with up to d mismatches in string.
Sample Input: ACGTTGCATGTCGCATGATGCATGAGAGCT 4 1
Sample Output: OrderedDict([('ATGC', 5), ('ATGT', 5), ('GATG', 5),...])
'''
#check passed variables
if not motif_length <= 12 and motif_length >= 1:
raise ValueError("motif_length must be between 0 and 12. {} was passed.".format(motif_length))
if not max_mismatches <= 3 and max_mismatches >= 1:
raise ValueError("max_mismatch must be between 0 and 3. {} was passed.".format(max_mismatches))
motif_dict = {}
for i in range(len(sequence) - motif_length +1):
motif = sequence[i:i+motif_length]
if motif not in motif_dict:
motif_dict[motif] = 1
else:
motif_dict[motif] += 1
motif_dict_with_mismatches = {}
for kmer in motif_dict:
motif_dict_with_mismatches.update({kmer:[]})
for other_kmer in motif_dict:
mismatches = 0
for i in range(len(kmer)):
if kmer[i] != other_kmer[i]:
mismatches += 1
if mismatches <= max_mismatches:
motif_dict_with_mismatches[kmer].append([other_kmer,motif_dict[other_kmer]])
tmp = {}
for item in motif_dict_with_mismatches:
count = 0
for motif in motif_dict_with_mismatches[item]:
count += motif[-1]
tmp.update({item:count})
result = OrderedDict(sorted(tmp.items(), key=itemgetter(1), reverse=True))
if most_common:
commons = OrderedDict()
_max = result.items()[0][1]
for item in result:
if result[item] == _max:
commons.update({item:result[item]})
else:
return commons
return result
def line_to_list(line, char):
'''Makes a list of string out of a line. Splits the word at char.'''
# Allows for more customization compared with string.split()
split_indexes = find(line, char)
list_ = []
n = 0
for index in split_indexes:
item = line[n:index].replace('\n','').replace('\r','') # cleans up the line
if item != '': # skips empty 'cells'
list_.append(item)
n = index + 1
list_.append(line[n:].replace('\n','').replace('\r','')) # append the last item
return list_
# print(line_to_list('Makes a list of string out of a line. Splits the word at char.', ' '))
def list_to_line(list_, char):
'''Makes a string out of a list of items'''
# Allows for more customization compared with string.split()
string = ''
for item in list_:
string += str(item) + char
return string.rstrip(char) # Removes the last char
#print(list_to_line(['prova', '1', '2', '3', 'prova'], '---'))
def list_of_files(path, extension, recursive=False):
'''
Return a list of filepaths for each file into path with the target extension.
If recursive, it will loop over subfolders as well.
'''
if not recursive:
for file_path in glob.iglob(path + '/*.' + extension):
yield file_path
else:
for root, dirs, files in os.walk(path):
for file_path in glob.iglob(root + '/*.' + extension):
yield file_path
def merge_gaps(gap_list):
'''
Merges overlapping gaps in a gap list.
The gap list is in the form: [('3','4'),('5','6'),('6','7'),('8','9'),('10','11'),('15','16'),('17','18'),('18','19')]
Returns a new list containing the merged gaps: [('3','4'),('5','7'),('8','9'),('10','11'),('15','16'),('17','19')]
'''
merged_gaps = []
while len(gap_list) > 0:
try:
if int(gap_list[0][1]) >= int(gap_list[1][0]):
tmp = (gap_list[0][0],gap_list[1][1])
gap_list.pop(0)
gap_list[0] = tmp
else:
merged_gaps.append(gap_list.pop(0))
except:
merged_gaps.append(gap_list.pop(0))
return merged_gaps
# gap_list = [('3','4'),('5','6'),('6','7'),('8','9'),('10','11'),('15','16'),('17','18'),('18','19')]
# expected = [('3','4'),('5','7'),('8','9'),('10','11'),('15','16'),('17','19')]
# prova = merge_gaps(gap_list)
# print(prova)
# print(expected)
def merge_sort(intervals):
'''
Merges and sorts the intervals in a list.
It's an alternative of merge_gaps() that sort the list before merging.
Should be faster but I haven't campared them yet.
'''
sorted_by_lower_bound = sorted(intervals, key=lambda tup: tup[0])
merged = []
for higher in sorted_by_lower_bound:
if not merged:
merged.append(higher)
else:
lower = merged[-1]
# test for intersection between lower and higher:
# we know via sorting that lower[0] <= higher[0]
if higher[0] <= lower[1]:
upper_bound = max(lower[1], higher[1])
merged[-1] = (lower[0], upper_bound) # replace by merged interval
else:
merged.append(higher)
return merged
def multi_threads_fusion_genes_annotation(folder_path, extension, max_simultaneous_threads):
''' Executes annotate_fusion_genes() for each dataset file in a folder.
Each execution run on a different thread.'''
global running_threads
dataset_files = list_of_files(folder_path, extension)
threads = 0
running_threads = 0
for file_ in dataset_files:
while running_threads >= max_simultaneous_threads:
time.sleep(1)
threads += 1
running_threads += 1
print('thread', threads, '|', 'running threads:',running_threads)
Thread(target=annotate_fusion_genes, args=(file_,)).start() # with multithreading
# folder = '/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/public'
# multi_threads_fusion_genes_annotation(folder, 'txt',50)
def pandize_dataset(annotated_dataset, verbose=True):
'''
Prepares a dataset to be "pandas ready".
Takes a file path as input.
'''
# Parse
if verbose == True:
message = 'parsing ' + annotated_dataset.split('/')[-1]
spacer = (100-len(message))*'.'
print(message, spacer)
dataset = pd.io.parsers.read_table(annotated_dataset, dtype={'ChrA':'str','ChrB':'str'}, sep='\t', index_col=0)
if verbose == True:
print('OK')
# Clean
if verbose == True:
message = 'cleaning ' + annotated_dataset.split('/')[-1]
spacer = (100-len(message))*'.'
print(message, spacer)
dataset = dataset.replace('In Frame', 1)
dataset = dataset.replace('Not in Frame', 0)
dataset = dataset.replace('In Phase', 1)
dataset = dataset.replace('Not in Phase', 0)
if verbose == True:
print('OK')
return dataset
# pandize_dataset('test_data_annotated.txt')
# pandize_dataset('/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/control_dataset_100-1000-150000_annotated.txt')
def parse_blastXML(infile):
'''
Parses a blast outfile (XML).
'''
for blast_record in NCBIXML.parse(open(infile)):
for alignment in blast_record.alignments:
for hsp in alignment.hsps:
print("*****Alignment****")
print("sequence:", alignment.title)
print("length:", alignment.length)
print("e-value:", hsp.expect)
print(hsp.query)
print(hsp.match)
print(hsp.sbjct)
# to be tested
def reverse(sequence):
r = ''
for i in range(len(sequence),0,-1):
r += sequence[i-1]
return r
def complement(sequence):
d = {'A':'T','a':'t',
'T':'A','t':'a',
'C':'G','c':'g',
'G':'C','g':'c'}
r = ''
for b in sequence.upper():
r += d[b]
return r
def get_mismatches(template, primer, maxerr, overlapped=False):
error = 'e<={}'.format(maxerr)
return regex.findall(f'({primer}){{{error}}}', template, overlapped=overlapped)
def pcr(template,primer_F,primer_R,circular=False):
if circular: ##works only with primers without 5' overhang
i = template.upper().find(primer_F.upper())
template = template[i:]+template[:i]
#Find primer_F, or the largest 3'part of it, in template
for n in range(len(primer_F)):
ix_F = [m.end() for m in re.finditer(primer_F[n:].upper(),
template.upper())]
if len(ix_F) == 1: #it's unique
#print(ix_F)
#print(primer_F[n:])
break
n += 1
#print(ix_F)
#Find primer_R, or the largest 5'part of it, in template
rc_R = reverse(complement(primer_R))
for n in range(len(primer_R)):
ix = [m.start() for m in re.finditer(rc_R[:n].upper(),
template.upper())]
if len(ix) == 1: #it's unique
ix_R = ix[:]
if len(ix) < 1: #it's the largest possible
#print(ix_R)
#print(rc_R[:n])
break
n += 1
#Build the product
return primer_F + template[ix_F[0]:ix_R[0]] + rc_R
##template = 'CTAGAGAGGGCCTATTTCCCATGATT--something--GCCAATTCTGCAGACAAATGGGGTACCCG'
##primer_F = 'GACAAATGGCTCTAGAGAGGGCCTATTTCCCATGATT'
##primer_R = 'TTATGTAACGGGTACCCCATTTGTCTGCAGAATTGGC'
##product = pcr(template,primer_F,primer_R)
##expected = 'GACAAATGGCTCTAGAGAGGGCCTATTTCCCATGATT--something--GCCAATTCTGCAGACAAATGGGGTACCCGTTACATAA'
##expected == result
def pip_upgrade_all(executable=False):
'''
Upgrades all pip-installed packages.
Requires a bash shell.
'''
if executable:
print('upgrading pip...')
call(f'{executable} -m pip install --upgrade pip',
shell=True)
call(f"{executable} -m pip freeze --local | grep -v '^\-e' | cut -d = -f 1 | xargs -n1 {executable} -m pip install -U",
shell=True)
print('done')
else:
#pip
print('upgrading pip...')
call('python -m pip install --upgrade pip', shell=True)
call("python -m pip freeze --local | grep -v '^\-e' | cut -d = -f 1 | xargs -n1 python -m pip install -U", shell=True)
#pip2
print('upgrading pip2...')
call('python2 -m pip install --upgrade pip', shell=True)
call("python2 -m pip freeze --local | grep -v '^\-e' | cut -d = -f 1 | xargs -n1 python2 -m pip install -U", shell=True)
#pip3
print('upgrading pip3...')
call('python3 -m pip install --upgrade pip', shell=True)
call("python3 -m pip freeze --local | grep -v '^\-e' | cut -d = -f 1 | xargs -n1 python3 -m pip install -U", shell=True)
#pypy
print('upgrading pypy-pip...')
call('pypy -m pip install --upgrade pip',shell=True)
call("pypy -m pip freeze --local | grep -v '^\-e' | cut -d = -f 1 | xargs -n1 pypy -m pip install -U", shell=True)
#pypy3
print('upgrading pypy3-pip...')
call('pypy3 -m pip install --upgrade pip',shell=True)
call("pypy3 -m pip freeze --local | grep -v '^\-e' | cut -d = -f 1 | xargs -n1 pypy3 -m pip install -U", shell=True)
def probability(p,n,k):
'''
Simple probability calculator.
Calculates what is the probability that k events occur in n trials.
Each event have p probability of occurring once.
Example: What is the probability of having 3 Heads by flipping a coin 10 times?
probability = prob(0.5,10,3)
print(probability) => (15/128) = 0.1171875
'''
p = float(p)
n = float(n)
k = float(k)
C = math.factorial(n) / (math.factorial(k) * math.factorial(n-k) )
probability = C * (p**k) * (1-p)**(n-k)
return probability
#from math import factorial
#print(probability(0.5,10,3))
#print(probability(0.5,1,1))
def process(real_dataset):
'''
Generates, annotates and sorts a controll dataset for the given real dataset.
'''
gen_control_dataset(real_dataset)
control_filename = real_dataset[:-4]+'_control.txt'
#annotate_fusion_genes(real_dataset)
annotate_fusion_genes(control_filename)
control_filename = control_filename[:-4]+'_annotated.txt'
#dataset_filename = real_dataset[:-4]+'_annotated.txt'
#sort_dataset(dataset_filename)
sort_dataset(control_filename)
print(real_dataset,'processed. All OK.')
#process('/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/clinvarCnv-DeletionsOnly.txt')
# folder = '/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/random'
# for item in list_of_files(folder,'txt'):
# process(item)
def query_encode(chromosome, start, end):
'''
Queries ENCODE via http://promoter.bx.psu.edu/ENCODE/search_human.php
Parses the output and returns a dictionary of CIS elements found and the relative location.
'''
## Regex setup
re1='(chr{})'.format(chromosome) # The specific chromosome
re2='(:)' # Any Single Character ':'
re3='(\\d+)' # Integer
re4='(-)' # Any Single Character '-'
re5='(\\d+)' # Integer
rg = re.compile(re1+re2+re3+re4+re5,re.IGNORECASE|re.DOTALL)
## Query ENCODE
std_link = 'http://promoter.bx.psu.edu/ENCODE/get_human_cis_region.php?assembly=hg19&'
query = std_link + 'chr=chr{}&start={}&end={}'.format(chromosome,start,end)
print(query)
html_doc = urlopen(query)
html_txt = BeautifulSoup(html_doc, 'html.parser').get_text()
data = html_txt.split('\n')
## Parse the output
parsed = {}
coordinates = [i for i, item_ in enumerate(data) if item_.strip() == 'Coordinate']
elements = [data[i-2].split(' ')[-1].replace(': ','') for i in coordinates]
blocks = [item for item in data if item[:3] == 'chr']
#if len(elements) == len(blocks):
i = 0
for item in elements:
txt = blocks[i]
m = rg.findall(txt)
bins = [''.join(item) for item in m]
parsed.update({item:bins})
i += 1
return parsed
#cis_elements = query_encode(2,10000,20000)
def run_perl(perl_script_file, input_perl_script):
'''
Run an external perl script and return its output
'''
return check_output(["perl", perl_script_file, input_perl_script])
#print(run_perl('FusionGenes_Annotation.pl', 'test_data.txt'))
def run_py(code, interp='python3'):
'''Run an block of python code using the target interpreter.'''
with open('tmp.py', 'w') as f:
for line in code.split('\n'):
f.write(line+'\n')
return check_output([interpr, 'tmp.py'])
def run_pypy(code, interpr='pypy3'):
'''Run an block of python code with PyPy'''
with open('tmp.py', 'w') as f:
for line in code.split('\n'):
f.write(line+'\n')
return check_output([interpr, 'tmp.py'])
def sequence_from_coordinates(chromosome,strand,start,end): #beta hg19 only
'''
Download the nucleotide sequence from the gene_name.
'''
Entrez.email = "a.marcozzi@umcutrecht.nl" # Always tell NCBI who you are
#GRCh37 from http://www.ncbi.nlm.nih.gov/assembly/GCF_000001405.25/#/def_asm_Primary_Assembly
NCBI_IDS = {'1':'NC_000001.10','2':'NC_000002.11','3':'NC_000003.11','4':'NC_000004.11',
'5':'NC_000005.9','6':'NC_000006.11','7':'NC_000007.13','8':'NC_000008.10',
'9':'NC_000009.11','10':'NC_000010.10','11':'NC_000011.9','12':'NC_000012.11',
'13':'NC_000013.10','14':'NC_000014.8','15':'NC_000015.9','16':'NC_000016.9',
'17':'NC_000017.10','18':'NC_000018.9','19':'NC_000019.9','20':'NC_000020.10',
'21':'NC_000021.8','22':'NC_000022.10','X':'NC_000023.10','Y':'NC_000024.9'}
try:
handle = Entrez.efetch(db="nucleotide",
id=NCBI_IDS[str(chromosome)],
rettype="fasta",
strand=strand, #"1" for the plus strand and "2" for the minus strand.
seq_start=start,
seq_stop=end)
record = SeqIO.read(handle, "fasta")
handle.close()
sequence = str(record.seq)
return sequence
except ValueError:
print('ValueError: no sequence found in NCBI')
return False
#a = sequence_from_coordinates(9,'-',21967751,21994490)
#print(a)
def sequence_from_gene(gene_name): #beta
'''
Download the nucleotide sequence from the gene_name.
'''
data = EnsemblRelease(75)
Entrez.email = "a.marcozzi@umcutrecht.nl" # Always tell NCBI who you are
NCBI_IDS = {'1':"NC_000001", '2':"NC_000002",'3':"NC_000003",'4':"NC_000004",
'5':"NC_000005",'6':"NC_000006",'7':"NC_000007", '8':"NC_000008",
'9':"NC_000009", '10':"NC_000010", '11':"NC_000011", '12':"NC_000012",
'13':"NC_000013",'14':"NC_000014", '15':"NC_000015", '16':"NC_000016",
'17':"NC_000017", '18':"NC_000018", '19':"NC_000019", '20':"NC_000020",
'21':"NC_000021", '22':"NC_000022", 'X':"NC_000023", 'Y':"NC_000024"}
gene_obj = data.genes_by_name(gene_name)
target_chromosome = NCBI_IDS[gene_obj[0].contig]
seq_start = int(gene_obj[0].start)
seq_stop = int(gene_obj[0].end)
strand = 1 if gene_obj[0].strand == '+' else 2
try:
handle = Entrez.efetch(db="nucleotide",
id=target_chromosome,
rettype="fasta",
strand=strand, #"1" for the plus strand and "2" for the minus strand.
seq_start=seq_start,
seq_stop=seq_stop)
record = SeqIO.read(handle, "fasta")
handle.close()
sequence = str(record.seq)
return sequence
except ValueError:
print('ValueError: no sequence found in NCBI')
return False
def sortby_chr(string):
'''
Helps to sort datasets grouped by ChrA/B.
To use with sorted().
'''
# since the ChrA/B value is a string, when sorting by chr may return ['1','10','11'...'2','20'...'3'...'X','Y']
# instead I want sorted() to return ['1','2',...'9','10','11'...'X','Y']
if string == 'X':
return 23
elif string == 'Y':
return 24
else:
return int(string)
# prova = ['1','10','11','9','2','20','3','X','Y']
# print('sorted()', sorted(prova))
# print('sortby_chr()', sorted(prova, key=sortby_chr))
def sort_dataset(dataset_file, overwrite=False):
'''
Sort a dataset by ChrA. It helps during plotting
'''
text = []
header_counter = 0
header = False
print('Sorting...')
with open(dataset_file, 'r') as infile:
#lines = infile.readlines()
for line in infile:
list_ = line_to_list(line, '\t')
if line[:2] == '##':
header = list_
header_counter += 1
else:
text.append(list_)
#checkpoint
if header == False or header_counter > 1:
print('Something is wrong with the header line...', header_counter, header)
return None
# sort by the second element of the list i.e. 'ChrA'
text.sort(key=lambda x: sortby_chr(itemgetter(1)(x)))
# Write output
if overwrite == False:
outfile = dataset_file[:-4]+'_sorted.txt'
else:
outfile = dataset_files
with open(outfile, 'w') as outfile:
outfile.write(list_to_line(header, '\t') + '\n')
for list_ in text:
outfile.write(list_to_line(list_, '\t') + '\n')
print('Done!')
# sort_dataset('test_data.txt')
# folder = '/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/public'
# for item in list_of_files(folder, 'txt'):
# sort_dataset(item)
# sort_dataset('/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/public/annotated/dgvMerged-DeletionsOnly_annotated.txt')
def split_fasta_file(infile): #beta
'''
Split a fasta file containing multiple sequences
into multiple files containing one sequence each.
One sequence per file.
'''
flag = False
length = 0
with open(infile,'r') as f:
for line in f:
if line.startswith('>'):
if flag == False:
flag = True
outfile = '{}.txt'.format(line[1:].strip())
print('writing {}'.format(outfile))
lines = [line]
else:
with open(outfile, 'w') as out:
for _ in lines:
out.write(_)
print('{} bases written'.format(length))
length = 0
outfile = '{}.txt'.format(line[1:].strip())
print('writing {}'.format(outfile))
lines = [line]
else:
lines.append(line)
length += len(line.strip())
#Write last file
with open(outfile, 'w') as out:
for _ in lines:
out.write(_)
print('{} bases written'.format(length))
def substract_datasets(infile_1, infile_2, outfile, header=True):
'''
Takes two files containing tab delimited data, comapares them and return a file
containing the data that is present only in infile_2 but not in infile_1.
The variable by_column is an int that indicates which column to use
as data reference for the comparison.
'''
header2 = False
comment_line = '# dataset generated by substracting ' + infile_1 + ' to ' + infile_2 + '\n'
with open(infile_1) as infile_1:
lines_1 = infile_1.readlines()
with open(infile_2) as infile_2:
lines_2 = infile_2.readlines()
row_to_removes = []
for line in lines_1:
if line[0] != '#': # skips comments
if header == True:
header2 = True # to use for the second file
header = False # set back header to false since the first line will be skipped
first_line = line
pass
else:
item = line_to_list(line, '\t')
row_to_removes.append(item)
result_list = []
for line in lines_2:
if line[0] != '#': # skips comments
if header2 == True:
header2 = False # set back header to false since the first line will be skipped
pass
else:
item = line_to_list(line, '\t')
if item not in row_to_removes:
result_list.append(item)
with open(outfile, 'w') as outfile:
outfile.write(comment_line)
outfile.write(first_line)
for item in result_list:
outfile.write(list_to_line(item, '\t') + '\n')
print('substraction of two datasets DONE')
# substract_datasets('dataset_1_b.txt', 'dataset_1.txt', 'dataset_1-1b.txt', header=True)
def yield_file(filepath):
'''
A simple generator that yield the lines of a file.
Good to read large file without running out of memory.
'''
with open(filepath, 'r') as f:
for line in f:
yield line
# for line in yield_file('GRCh37_hg19_variants_2014-10-16.txt'):
# print(line[:20]) | {
"repo_name": "alec-djinn/alefuncs",
"path": "alefuncs.py",
"copies": "1",
"size": "136740",
"license": "mit",
"hash": -7891461489791813000,
"line_mean": 34.5818371064,
"line_max": 288,
"alpha_frac": 0.5649700161,
"autogenerated": false,
"ratio": 3.4436385614989424,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9422218160876228,
"avg_score": 0.01727808334454285,
"num_lines": 3843
} |
from Bio import pairwise2, Entrez, SeqIO
from Bio.SubsMat import MatrixInfo as matlist
from Bio.Blast.Applications import NcbiblastnCommandline
from Bio.Blast import NCBIXML
import tensorflow as tf
from urllib.request import urlopen
from urllib.parse import urlparse
from subprocess import call, check_output, run
from pyensembl import EnsemblRelease
from bs4 import BeautifulSoup
from collections import OrderedDict, Set, Mapping, deque, Counter
from operator import itemgetter
from itertools import islice, chain, tee
from threading import Thread
from numbers import Number
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from scipy.signal import argrelextrema
import pandas as pd
import regex
import re
import datetime, math, sys, hashlib, pickle, time, random, string, json, glob, os, signal, warnings, decimal
import httplib2 as http
from urllib.request import urlopen
from pyliftover import LiftOver
from PIL import Image
ale_palette = {
"purple": "#9b59b6",
"blue": "#3498db",
"gray": "#95a5a6",
"red": "#e74c3c",
"black": "#34495e",
"green": "#2ecc71",
}
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def is_int(txt):
'''
Determine if a string can interpreted as integer.
'''
try:
int(txt)
return True
except ValueError:
return False
def count_atoms(formula):
'''
Count atoms in a chemical formula.
>>> atomize('C6H12O7')
Counter({'H': 12, 'O': 7, 'C': 6})
>>> atomize('C6H12O7MgCl2')
Counter({'H': 12, 'O': 7, 'C': 6, 'Cl': 2, 'Mg': 1})
'''
r = ''
c = Counter()
for i, letter in enumerate(formula):
if letter.isupper():
r += ' ' + letter
else:
r += letter
r = r.strip().split()
for g in r:
counted = False
for i,s in enumerate(g):
if is_int(s):
c.update({g[:i]:int(g[i:])})
counted = True
break
if not counted:
c.update({g:1})
return c
def float_range(start, stop, step):
'''
range()-like function with float steps.
'''
while start < stop:
yield float(start)
start += decimal.Decimal(step)
def rnd_candle(start):
"""int => np.array
Return a random candle.
"""
r = random.random
candle = [start]
for _ in range(3):
if r() > 0.5:
start += r()
else:
start -= r()
candle.append(start)
O = candle[0]
C = candle[-1]
H = max(candle)
L = min(candle)
return [O,H,L,C]
def make rnd_walk_dandles(start):
"""int => list_of_lists
Return a random walk path of [open, high, low, close] candles.
"""
candles = []
for n in range(100):
c = rnd_candle(start)
candles.append(c)
start = c[-1]
return candles
def warn_with_traceback(message, category, filename, lineno, file=None, line=None):
'''
Traceback Warnings
How to use: warnings.showwarning = warn_with_traceback
https://stackoverflow.com/questions/22373927/get-traceback-of-warnings
'''
log = file if hasattr(file,'write') else sys.stderr
traceback.print_stack(file=log)
log.write(warnings.formatwarning(message, category, filename, lineno, line))
def stretch(arr, factor=False, length=False):
'''
Stretch an array along the x-axis.
'''
assert factor or length, '"factor" or "length" must be specified.'
n = len(arr)
if factor:
return np.interp(np.linspace(0, n, factor*n), np.arange(n), arr)
elif length:
return np.interp(np.linspace(0, n, length), np.arange(n), arr)
def install_ssl_certificates():
'''
Fix for [SSL: CERTIFICATE_VERIFY_FAILED]
'''
# sample script to install or update a set of default Root Certificates
# for the ssl module. Uses the certificates provided by the certifi package:
# https://pypi.python.org/pypi/certifi
import os
import os.path
import ssl
import stat
import subprocess
import sys
STAT_0o775 = ( stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
| stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP
| stat.S_IROTH | stat.S_IXOTH )
openssl_dir, openssl_cafile = os.path.split(
ssl.get_default_verify_paths().openssl_cafile)
print(" -- pip install --upgrade certifi")
subprocess.check_call([sys.executable,
"-E", "-s", "-m", "pip", "install", "--upgrade", "certifi"])
import certifi
# change working directory to the default SSL directory
os.chdir(openssl_dir)
relpath_to_certifi_cafile = os.path.relpath(certifi.where())
print(" -- removing any existing file or link")
try:
os.remove(openssl_cafile)
except FileNotFoundError:
pass
print(" -- creating symlink to certifi certificate bundle")
os.symlink(relpath_to_certifi_cafile, openssl_cafile)
print(" -- setting permissions")
os.chmod(openssl_cafile, STAT_0o775)
print(" -- update complete")
def make_df_from_files(files):
"""
Make a single DataFrame concatenating the data from multiple csv files.
"""
return pd.concat((pd.read_csv(f) for f in files), ignore_index=True)
def move_terminal_cursor(x, y):
"""
Move the terminal cursor to a specific position.
"""
print(f"\033[{y};{x}H")
def print_at(x, y, txt):
"""
Print txt on a specific coordinate of the terminal screen.
"""
print(f"\033[{y};{x}H{txt}")
def clear_terminal_output():
"""
Clear the terminal and reset the cursor at the top left corner.
"""
rows, columns = map(int, os.popen("stty size", "r").read().split())
txt = " " * columns
for r in range(rows):
print_at(0, r, txt)
move_terminal_cursor(0, 0)
def move_terminal_cursor(x, y):
'''
Move the terminal cursor to a specific position.
'''
print(f"\033[{y};{x}H")
def print_at(x, y, txt):
'''
Print txt on a specific coordinate of the terminal screen.
'''
print(f"\033[{y};{x}H{txt}")
def clear_terminal_output():
'''
Clear the terminal and reset the cursor at the top left corner.
'''
rows, columns = map(int,os.popen('stty size', 'r').read().split())
txt = ' '*columns
for r in range(rows):
print_at(0,r,txt)
move_terminal_cursor(0,0)
def in_ipynb():
'''
Determine if the script is running on a notebook.
'''
try:
cfg = get_ipython().config
if cfg['IPKernelApp']['parent_appname'] == 'ipython-notebook':
return True
else:
return False
except NameError:
return False
def compare_dict(d1, d2):
d1_keys = set(d1.keys())
d2_keys = set(d2.keys())
intersect_keys = d1_keys.intersection(d2_keys)
added = d1_keys - d2_keys
removed = d2_keys - d1_keys
modified = {o: (d1[o], d2[o]) for o in intersect_keys if d1[o] != d2[o]}
same = set(o for o in intersect_keys if d1[o] == d2[o])
return added, removed, modified, same
class DotNotDict:
"""
Trasform a dictionary into a class so you can use
the dot-notation to access the dictionary data.
Example:
>> d = {'alpha':0,'beta':1,'gamma':3.5}
>> d = DotNotDict(d)
>> d.gamma
3.5
"""
def __init__(self, dictionary):
for k, v in dictionary.items():
setattr(self, k, v)
def __repr__(self):
for k in [x for x in dir(self) if not x.startswith("__")]:
print(f"{k:>50} : {getattr(self, k)}")
return ""
def fake_rsi(length):
"""Generate an array simulating an RSI trace."""
def f(x):
# RSIs hardly go over 90 or below 10
if x > 90:
return x - 20
if x < 10:
return x + 20
return x
return list(map(f, smooth(rescale(random_walk(length)) * 100, 5)))
def drop(arr, p=0.1):
"""
General "dropout" function.
Works on any shape of np.array of numbers.
p is the probability of dropping (set to 0) a number in the array.
"""
if type(arr) is list:
arr = np.array(arr)
try: # take care of cases in wich the shape is (n,)
size = np.multiply(*arr.shape)
except ValueError:
size = arr.shape[0]
mask = np.random.binomial(1, 1 - p, size).reshape(arr.shape)
return np.multiply(arr, mask)
def md5(fname):
"""
Compute the md5 of a file in chunks.
Avoid running out of memory when hashing large files.
"""
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def viterbi(pi, a, b, obs):
"""
The Viterbi algorithm for shortest path.
# code adapted from Stephen Marsland's, Machine Learning An Algorthmic Perspective, Vol. 2
# https://github.com/alexsosn/MarslandMLAlgo/blob/master/Ch16/HMM.py
# http://www.blackarbs.com/blog/introduction-hidden-markov-models-python-networkx-sklearn/2/9/2017
"""
nStates = np.shape(b)[0]
T = np.shape(obs)[0]
# init blank path
path = np.zeros(T)
# delta --> highest probability of any path that reaches state i
delta = np.zeros((nStates, T))
# phi --> argmax by time step for each state
phi = np.zeros((nStates, T))
# init delta and phi
delta[:, 0] = pi * b[:, obs[0]]
phi[:, 0] = 0
print("\nStart Walk Forward\n")
# the forward algorithm extension
for t in range(1, T):
for s in range(nStates):
delta[s, t] = np.max(delta[:, t - 1] * a[:, s]) * b[s, obs[t]]
phi[s, t] = np.argmax(delta[:, t - 1] * a[:, s])
print(
"s={s} and t={t}: phi[{s}, {t}] = {phi}".format(s=s, t=t, phi=phi[s, t])
)
# find optimal path
print("-" * 50)
print("Start Backtrace\n")
path[T - 1] = np.argmax(delta[:, T - 1])
# p('init path\n t={} path[{}-1]={}\n'.format(T-1, T, path[T-1]))
for t in range(T - 2, -1, -1):
path[t] = phi[path[t + 1], [t + 1]]
# p(' '*4 + 't={t}, path[{t}+1]={path}, [{t}+1]={i}'.format(t=t, path=path[t+1], i=[t+1]))
print("path[{}] = {}".format(t, path[t]))
return path, delta, phi
def gauss_func(x, amp, x0, sigma):
return amp * np.exp(-(x - x0) ** 2.0 / (2.0 * sigma ** 2.0))
def call_python(Version, Module, Function, ArgumentList):
"""
Call a PythonX function from PythonY.
"""
gw = execnet.makegateway("popen//python=python%s" % Version)
channel = gw.remote_exec(
"""
from %s import %s as the_function
channel.send(the_function(*channel.receive()))
"""
% (Module, Function)
)
channel.send(ArgumentList)
return channel.receive()
def print_attrs(name, obj):
"""
Quick overview of an HDF5 file content.
Example:
f = h5py.File(fast5_read,'r')
f.visititems(print_attrs)
"""
print(name)
for key, val in obj.attrs.items():
print(key, val)
def scaled_tanh(x):
# https://stackoverflow.com/questions/13632976/neural-network-with-tanh-wrong-saturation-with-normalized-data
return 1.7159 * np.tanh(2 / 3 * x)
def scaled_tanh_deriv(x):
# https://stackoverflow.com/questions/13632976/neural-network-with-tanh-wrong-saturation-with-normalized-data
# 1.14399053 * (1 - np.tanh(2/3 *x)) * (1 + np.tanh(2/3 * x)))
return 1.14393 * (1 - np.power(tanh(2 / 3 * x), 2))
def scaled_tanh_error(expected, output):
# https://stackoverflow.com/questions/13632976/neural-network-with-tanh-wrong-saturation-with-normalized-data
return 2 / 3 * (1.7159 - output ** 2) * (expected - output)
def tanh_deriv(x):
"""
The derivative of hyperbolic tangent function.
Useful for machine-learning regression problem,
to compute the local minimum.
https://towardsdatascience.com/activation-functions-neural-networks-1cbd9f8d91d6
"""
return 1.0 - np.power(np.tanh(x), 2)
def fancy_relu(x):
"""
np.array => np.array
A very fast ReLu implementation.
Uses numpy fancy indexing to do the trick.
"""
# modifies x
# fastest method
x[x < 0] = 0
return x
def factorial(n):
"""
Return the factorial of n.
This is just for teaching purpose,
for production code use math.factorial(n) instead.
"""
return reduce(lambda x, y: x * y, [1] + list(range(1, n + 1)))
def jitter(n, mu=0, sigma=0.1):
"""Return a jittered version of n"""
return n + np.random.normal(mu, sigma, 1)
class TimeoutError(Exception):
"""
Custom error for Timeout class.
"""
pass
class Timeout:
"""
A timeout handler with context manager.
Based on UNIX signals.
"""
def __init__(self, seconds=1, error_message="Timeout"):
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self, signum, frame):
raise TimeoutError(self.error_message)
def __enter__(self):
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, type, value, traceback):
signal.alarm(0)
def random_walk(lenght):
"""int => np.array
Return a random walk path.
"""
walk = []
y = 0
for _ in range(lenght):
if random.randint(0, 1):
y += 1
else:
y -= 1
walk.append(y)
return np.array(walk)
def find_min_max(array):
"""np.array => dict
Return a dictionary of indexes
where the maxima and minima of the input array are found.
"""
# for local maxima
maxima = argrelextrema(array, np.greater)
# for local minima
minima = argrelextrema(array, np.less)
return {"maxima": maxima, "minima": minima}
def smooth(array, window_len=10, window="hanning"):
"""np.array, int, str => np.array
Smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
input:
x: the input signal
window_len: the dimension of the smoothing window; should be an odd integer
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
output:
the smoothed signal
example:
t = linspace(-2,2,0.1)
x = sin(t)+randn(len(t))*0.1
y = smooth(x)
see also:
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.
"""
if array.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if array.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len < 3:
return x
if not window in ["flat", "hanning", "hamming", "bartlett", "blackman"]:
raise ValueError(
"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'"
)
s = np.r_[array[window_len - 1 : 0 : -1], array, array[-2 : -window_len - 1 : -1]]
# print(len(s))
if window == "flat": # moving average
w = np.ones(window_len, "d")
else:
w = eval("np." + window + "(window_len)")
y = np.convolve(w / w.sum(), s, mode="valid")
y = y[int(window_len / 2 - 1) : -int(window_len / 2)]
offset = len(y) - len(array) # in case input and output are not of the same lenght
assert len(array) == len(y[offset:])
return y[offset:]
def cohen_effect_size(group1, group2):
"""(np.array, np.array) => float
Compute the Cohen Effect Size (d) between two groups
by comparing the difference between groups to the variability within groups.
Return the the difference in standard deviation.
"""
assert type(group1) == np.ndarray
assert type(group2) == np.ndarray
diff = group1.mean() - group2.mean()
var1 = group1.var()
var2 = group2.var()
n1, n2 = len(group1), len(group2)
pooled_var = (n1 * var1 + n2 * var2) / (n1 + n2)
d = diff / np.sqrt(pooled_var)
return d
def gen_ascii_symbols(input_file, chars):
"""
Return a dict of letters/numbers associated with
the corresponding ascii-art representation.
You can use http://www.network-science.de/ascii/ to generate the ascii-art for each symbol.
The input file looks like:
,adPPYYba,
"" `Y8
,adPPPPP88
88, ,88
`"8bbdP"Y8
88
88
88
88,dPPYba,
88P' "8a
88 d8
88b, ,a8"
8Y"Ybbd8"'
...
Each symbol is separated by at least one empty line ("\n")
"""
# input_file = 'ascii_symbols.txt'
# chars = string.ascii_lowercase+string.ascii_uppercase+'0123456789'
symbols = []
s = ""
with open(input_file, "r") as f:
for line in f:
if line == "\n":
if len(s):
symbols.append(s)
s = ""
else:
continue
else:
s += line
return dict(zip(chars, symbols))
def gen_ascii_captcha(symbols, length=6, max_h=10, noise_level=0, noise_char="."):
"""
Return a string of the specified length made by random symbols.
Print the ascii-art representation of it.
Example:
symbols = gen_ascii_symbols(input_file='ascii_symbols.txt',
chars = string.ascii_lowercase+string.ascii_uppercase+'0123456789')
while True:
captcha = gen_ascii_captcha(symbols, noise_level=0.2)
x = input('captcha: ')
if x == captcha:
print('\ncorrect')
break
print('\ninvalid captcha, please retry')
"""
assert noise_level <= 1
# max_h = 10
# noise_level = 0
captcha = "".join(random.sample(chars, length))
# print(code)
pool = [symbols[c].split("\n") for c in captcha]
for n in range(max_h, 0, -1):
line = ""
for item in pool:
try:
next_line = item[-n]
except IndexError:
next_line = "".join(
[" " for i in range(max([len(_item) for _item in item]))]
)
if noise_level:
# if random.random() < noise_level:
# next_line = next_line.replace(' ', noise_char)
next_line = "".join(
[
c
if random.random() > noise_level
else random.choice(noise_char)
for c in next_line
]
)
line += next_line
print(line)
return captcha
def rnd_sample_df(df, n=1, slice_size=1):
"""
Yield dataframes generated by randomly slicing df.
It is different from pandas.DataFrame.sample().
"""
assert n > 0 and slice_size > 0
max_len = len(df) - slice_size
for _ in range(n):
i = random.randint(0, max_len)
yield df.iloc[i : i + slice_size]
def date_to_stamp(d="2012-12-31"):
"""
Return UNIX timestamp of a date.
"""
Y, M, D = d.split("-")
stamp = time.mktime(datetime.date(int(Y), int(M), int(D)).timetuple())
return stamp
def rolling_normalize_df(df, method="min-max", size=30, overlap=5):
"""
Return a new df with datapoints normalized based on a sliding window
of rolling on the a pandas.DataFrame.
It is useful to have local (window by window) normalization of the values.
"""
to_merge = []
for item in split_overlap_long(df, size, overlap, is_dataframe=True):
to_merge.append(normalize_df(item, method))
new_df = pd.concat(to_merge)
return new_df.groupby(new_df.index).mean()
def normalize_df(df, method="min-max"):
"""
Return normalized data.
max, min, mean and std are computed considering
all the values of the dfand not by column.
i.e. mean = df.values.mean() and not df.mean().
Ideal to normalize df having multiple columns of non-indipendent values.
Methods implemented:
'raw' No normalization
'min-max' Deafault
'norm' ...
'z-norm' ...
'sigmoid' ...
'decimal' ...
'softmax' It's a transformation rather than a normalization
'tanh' ...
"""
if type(df) is not pd.core.frame.DataFrame:
df = pd.DataFrame(df)
if method == "min-max":
return (df - df.values.min()) / (df.values.max() - df.values.min())
if method == "norm":
return (df - df.values.mean()) / (df.values.max() - df.values.mean())
if method == "z-norm":
return (df - df.values.mean()) / df.values.std()
if method == "sigmoid":
_max = df.values.max()
return df.apply(lambda x: 1 / (1 + np.exp(-x / _max)))
if method == "decimal":
# j = len(str(int(df.values.max())))
i = 10 ** len(str(int(df.values.max()))) # 10**j
return df.apply(lambda x: x / i)
if method == "tanh":
return 0.5 * (np.tanh(0.01 * (df - df.values.mean())) / df.values.std() + 1)
if method == "softmax":
return np.exp(df) / np.sum(np.exp(df))
if method == "raw":
return df
raise ValueError(f'"method" not found: {method}')
def merge_dict(dictA, dictB):
"""(dict, dict) => dict
Merge two dicts, if they contain the same keys, it sums their values.
Return the merged dict.
Example:
dictA = {'any key':1, 'point':{'x':2, 'y':3}, 'something':'aaaa'}
dictB = {'any key':1, 'point':{'x':2, 'y':3, 'z':0, 'even more nested':{'w':99}}, 'extra':8}
merge_dict(dictA, dictB)
{'any key': 2,
'point': {'x': 4, 'y': 6, 'z': 0, 'even more nested': {'w': 99}},
'something': 'aaaa',
'extra': 8}
"""
r = {}
common_k = [k for k in dictA if k in dictB]
#common_k += [k for k in dictB if k in dictA]
#common_k = set(common_k)
for k, v in dictA.items():
# add unique k of dictA
if k not in common_k:
r[k] = v
else:
# add inner keys if they are not containing other dicts
if type(v) is not dict:
if k in dictB:
r[k] = v + dictB[k]
else:
# recursively merge the inner dicts
r[k] = merge_dict(dictA[k], dictB[k])
# add unique k of dictB
for k, v in dictB.items():
if k not in common_k:
r[k] = v
return r
def png_to_flat_array(img_file):
img = Image.open(img_file).convert("RGBA")
arr = np.array(img)
# make a 1-dimensional view of arr
return arr.ravel()
def png_to_vector_matrix(img_file):
# convert it to a matrix
return np.matrix(png_to_flat_array(img_file))
def TFKMeansCluster(vectors, noofclusters, datatype="uint8"):
"""
K-Means Clustering using TensorFlow.
'vectors' should be a n*k 2-D NumPy array, where n is the number
of vectors of dimensionality k.
'noofclusters' should be an integer.
"""
noofclusters = int(noofclusters)
assert noofclusters < len(vectors)
# Find out the dimensionality
dim = len(vectors[0])
# Will help select random centroids from among the available vectors
vector_indices = list(range(len(vectors)))
random.shuffle(vector_indices)
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
graph = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
sess = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
centroids = [
tf.Variable((vectors[vector_indices[i]])) for i in range(noofclusters)
]
##These nodes will assign the centroid Variables the appropriate
##values
centroid_value = tf.placeholder(datatype, [dim])
cent_assigns = []
for centroid in centroids:
cent_assigns.append(tf.assign(centroid, centroid_value))
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
assignments = [tf.Variable(0) for i in range(len(vectors))]
##These nodes will assign an assignment Variable the appropriate
##value
assignment_value = tf.placeholder("int32")
cluster_assigns = []
for assignment in assignments:
cluster_assigns.append(tf.assign(assignment, assignment_value))
##Now lets construct the node that will compute the mean
# The placeholder for the input
mean_input = tf.placeholder("float", [None, dim])
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
mean_op = tf.reduce_mean(mean_input, 0)
##Node for computing Euclidean distances
# Placeholders for input
v1 = tf.placeholder("float", [dim])
v2 = tf.placeholder("float", [dim])
euclid_dist = tf.sqrt(tf.reduce_sum(tf.pow(tf.subtract(v1, v2), 2)))
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
centroid_distances = tf.placeholder("float", [noofclusters])
cluster_assignment = tf.argmin(centroid_distances, 0)
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
init_op = (
tf.global_variables_initializer()
) # deprecated tf.initialize_all_variables()
# Initialize all variables
sess.run(init_op)
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
noofiterations = 100
for iteration_n in range(noofiterations):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(vectors)):
vect = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
distances = [
sess.run(euclid_dist, feed_dict={v1: vect, v2: sess.run(centroid)})
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
assignment = sess.run(
cluster_assignment, feed_dict={centroid_distances: distances}
)
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n], feed_dict={assignment_value: assignment}
)
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(noofclusters):
# Collect all the vectors assigned to this cluster
assigned_vects = [
vectors[i]
for i in range(len(vectors))
if sess.run(assignments[i]) == cluster_n
]
# Compute new centroid location
new_location = sess.run(
mean_op, feed_dict={mean_input: np.array(assigned_vects)}
)
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n], feed_dict={centroid_value: new_location}
)
# Return centroids and assignments
centroids = sess.run(centroids)
assignments = sess.run(assignments)
return centroids, assignments
def xna_calc(sequence, t="dsDNA", p=0):
"""str => dict
BETA version, works only for dsDNA and ssDNA.
Return basic "biomath" calculations based on the input sequence.
Arguments:
t (type) :'ssDNA' or 'dsDNA'
p (phosphates): 0,1,2
#in case if ssDNA having 3'P, you should pass 2 i.e., 2 phospates present in 1 dsDNA molecule
"""
r = {}
# check inputs
c = Counter(sequence.upper())
for k in c.keys():
if k in "ACGNT":
pass
else:
raise ValueError(
f'Wrong sequence passed: "sequence" contains invalid characters, only "ATCGN" are allowed.'
)
if t not in ["ssDNA", "dsDNA"]:
raise ValueError(
f'Wrong DNA type passed: "t" can be "ssDNA" or "dsDNA". "{t}" was passed instead.'
)
if not 0 <= p <= 2:
raise ValueError(
f'Wrong number of 5\'-phosphates passed: "p" must be an integer from 0 to 4. {p} was passed instead.'
)
##Calculate:
# length
r["len"] = len(sequence)
# molecular weight
# still unsure about what is the best method to do this
# s = 'ACTGACTGACTATATTCGCGATCGATGCGCTAGCTCGTACGC'
# bioinformatics.org : 25986.8 Da
# Thermo : 25854.8 Da
# Promega : 27720.0 Da
# MolBioTools : 25828.77 Da
# This function : 25828.86 Da #Similar to OligoCalc implementation
# DNA Molecular Weight (typically for synthesized DNA oligonucleotides.
# The OligoCalc DNA MW calculations assume that there is not a 5' monophosphate)
# Anhydrous Molecular Weight = (An x 313.21) + (Tn x 304.2) + (Cn x 289.18) + (Gn x 329.21) - 61.96
# An, Tn, Cn, and Gn are the number of each respective nucleotide within the polynucleotide.
# The subtraction of 61.96 gm/mole from the oligonucleotide molecular weight takes into account the removal
# of HPO2 (63.98) and the addition of two hydrogens (2.02).
# Alternatively, you could think of this of the removal of a phosphate and the addition of a hydroxyl,
# since this formula calculates the molecular weight of 5' and 3' hydroxylated oligonucleotides.
# Please note: this calculation works well for synthesized oligonucleotides.
# If you would like an accurate MW for restriction enzyme cut DNA, please use:
# Molecular Weight = (An x 313.21) + (Tn x 304.2) + (Cn x 289.18) + (Gn x 329.21) - 61.96 + 79.0
# The addition of 79.0 gm/mole to the oligonucleotide molecular weight takes into account the 5' monophosphate
# left by most restriction enzymes.
# No phosphate is present at the 5' end of strands made by primer extension,
# so no adjustment to the OligoCalc DNA MW calculation is necessary for primer extensions.
# That means that for ssDNA, you need to add 79.0 to the value calculated by OligoCalc
# to get the weight with a 5' monophosphate.
# Finally, if you need to calculate the molecular weight of phosphorylated dsDNA,
# don't forget to adjust both strands. You can automatically perform either addition
# by selecting the Phosphorylated option from the 5' modification select list.
# Please note that the chemical modifications are only valid for DNA and may not be valid for RNA
# due to differences in the linkage chemistry, and also due to the lack of the 5' phosphates
# from synthetic RNA molecules. RNA Molecular Weight (for instance from an RNA transcript).
# The OligoCalc RNA MW calculations assume that there is a 5' triphosphate on the molecule)
# Molecular Weight = (An x 329.21) + (Un x 306.17) + (Cn x 305.18) + (Gn x 345.21) + 159.0
# An, Un, Cn, and Gn are the number of each respective nucleotide within the polynucleotide.
# Addition of 159.0 gm/mole to the molecular weight takes into account the 5' triphosphate.
if t == "ssDNA":
mw = (
(c["A"] * 313.21)
+ (c["T"] * 304.2)
+ (c["C"] * 289.18)
+ (c["G"] * 329.21)
+ (c["N"] * 303.7)
- 61.96
) + (p * 79.0)
elif t == "dsDNA":
mw_F = (
(c["A"] * 313.21)
+ (c["T"] * 304.2)
+ (c["C"] * 289.18)
+ (c["G"] * 329.21)
+ (c["N"] * 303.7)
- 61.96
) + (p * 79.0)
d = Counter(complement(sequence.upper())) # complement sequence
mw_R = (
(d["A"] * 313.21)
+ (d["T"] * 304.2)
+ (d["C"] * 289.18)
+ (d["G"] * 329.21)
+ (d["N"] * 303.7)
- 61.96
) + (p * 79.0)
mw = mw_F + mw_R
elif t == "ssRNA":
pass
elif t == "dsRNA":
pass
else:
return ValueError(f'Nucleic acid type not understood: "{t}"')
r["MW in Daltons"] = mw
# in ng
r["MW in ng"] = mw * 1.6605402e-15
# molecules in 1ng
r["molecules per ng"] = 1 / r["MW in ng"]
# ng for 10e10 molecules
r["ng per billion molecules"] = (10 ** 9) / r["molecules per ng"] # (1 billions)
# moles per ng
r["moles per ng"] = r["MW in ng"] * mw
return r
def occur(string, sub):
"""
Counts the occurrences of a sequence in a string considering overlaps.
Example:
>> s = 'ACTGGGACGGGGGG'
>> s.count('GGG')
3
>> occur(s,'GGG')
5
"""
count = start = 0
while True:
start = string.find(sub, start) + 1
if start > 0:
count += 1
else:
return count
def get_prime(n):
for num in range(2, n, 2):
if all(num % i != 0 for i in range(2, int(math.sqrt(num)) + 1)):
yield num
def ssl_fencrypt(infile, outfile):
"""(file_path, file_path) => encrypted_file
Uses openssl to encrypt/decrypt files.
"""
pwd = getpass("enter encryption pwd:")
if getpass("repeat pwd:") == pwd:
run(
f"openssl enc -aes-256-cbc -a -salt -pass pass:{pwd} -in {infile} -out {outfile}",
shell=True,
)
else:
print("passwords don't match.")
def ssl_fdecrypt(infile, outfile):
"""(file_path, file_path) => decrypted_file
Uses openssl to encrypt/decrypt files.
"""
pwd = getpass("enter decryption pwd:")
run(
f"openssl enc -d -aes-256-cbc -a -pass pass:{pwd} -in {infile} -out {outfile}",
shell=True,
)
def loop_zip(strA, strB):
"""(str, str) => zip()
Return a zip object containing each letters of strA, paired with letters of strB.
If strA is longer than strB, then its letters will be paired recursively.
Example:
>>> list(loop_zip('ABCDEF', '123'))
[('A', '1'), ('B', '2'), ('C', '3'), ('D', '1'), ('E', '2'), ('F', '3')]
"""
assert len(strA) >= len(strB)
s = ""
n = 0
for l in strA:
try:
s += strB[n]
except IndexError:
n = 0
s += strB[n]
n += 1
return zip(list(strA), list(s))
def encrypt(msg, pwd):
"""(str, str) => list
Simple encryption/decription tool.
WARNING:
This is NOT cryptographically secure!!
"""
if len(msg) < len(pwd):
raise ValueError(
"The password is longer than the message. This is not allowed."
)
return [(string_to_number(a) + string_to_number(b)) for a, b in loop_zip(msg, pwd)]
def decrypt(encr, pwd):
"""(str, str) => list
Simple encryption/decription tool.
WARNING:
This is NOT cryptographically secure!!
"""
return "".join(
[number_to_string((a - string_to_number(b))) for a, b in loop_zip(encr, pwd)]
)
def convert_mw(mw, to="g"):
"""(int_or_float, str) => float
Converts molecular weights (in dalton) to g, mg, ug, ng, pg.
Example:
>> diploid_human_genome_mw = 6_469.66e6 * 660 #lenght * average weight of nucleotide
>> convert_mw(diploid_human_genome_mw, to="ng")
0.0070904661368191195
"""
if to == "g":
return mw * 1.6605402e-24
if to == "mg":
return mw * 1.6605402e-21
if to == "ug":
return mw * 1.6605402e-18
if to == "ng":
return mw * 1.6605402e-15
if to == "pg":
return mw * 1.6605402e-12
raise ValueError(
f"'to' must be one of ['g','mg','ug','ng','pg'] but '{to}' was passed instead."
)
def snp237(snp_number):
"""int => list
Return the genomic position of a SNP on the GCRh37 reference genome.
"""
if type(snp_number) is str \
and snp_number.lower().startswith('rs'):
snp_number = snp_number[2:]
query = f"https://www.snpedia.com/index.php/Rs{snp_number}"
html = urlopen(query).read().decode("utf-8")
for line in html.split("\n"):
if line.startswith('<tbody><tr><td width="90">Reference</td>'):
reference = line.split('"')[-2]
elif line.startswith('<tbody><tr><td width="90">Chromosome</td>'):
chromosome = line.split("<td>")[1].split("<")[0]
elif line.startswith('<tbody><tr><td width="90">Position</td>'):
position = int(line.split("<td>")[1].split("<")[0])
break
if "GRCh38" in reference:
lo = LiftOver("hg38", "hg19")
return lo.convert_coordinate(f"chr{chromosome}", position)[0][:2]
else:
return f"chr{chromosome}", position
def is_prime(n):
"""Return True if n is a prime number"""
if n == 1:
return False # 1 is not prime
# if it's even and not 2, then it's not prime
if n == 2:
return True
if n > 2 and n % 2 == 0:
return False
max_divisor = math.floor(math.sqrt(n))
for d in range(3, 1 + max_divisor, 2):
if n % d == 0:
return False
return True
def flatmap(f, items):
return chain.from_iterable(imap(f, items))
def parse_fasta(fasta_file):
"""file_path => dict
Return a dict of id:sequences.
"""
d = {}
_id = False
seq = ""
with open(fasta_file, "r") as f:
for line in f:
if line.startswith("\n"):
continue
if line.startswith(">"):
if not _id:
_id = line.strip()[1:]
elif _id and seq:
d.update({_id: seq})
_id = line.strip()[1:]
seq = ""
else:
seq += line.strip()
d.update({_id: seq})
return d
def get_fasta_stats(fasta_file):
"""file_path => dict
Return lenght and base counts of each seuqence found in the fasta file.
"""
d = {}
_id = False
seq = ""
with open(fasta_file, "r") as f:
for line in f:
if line.startswith("\n"):
continue
if line.startswith(">"):
if not _id:
_id = line[1:].strip()
elif _id and seq:
d.update({_id: seq})
_id = line[1:].strip()
seq = ""
else:
seq += line.strip().upper()
d.update(
{
_id: {
"length": len(seq),
"A": seq.count("A"),
"T": seq.count("T"),
"C": seq.count("C"),
"G": seq.count("G"),
"N": seq.count("N"),
}
}
)
return d
def quick_align(
reference, sample, matrix=matlist.blosum62, gap_open=-10, gap_extend=-0.5
):
"""
Return a binary score matrix for a pairwise alignment.
"""
alns = pairwise2.align.globalds(reference, sample, matrix, gap_open, gap_extend)
top_aln = alns[0]
aln_reference, aln_sample, score, begin, end = top_aln
score = []
for i, base in enumerate(aln_reference):
if aln_sample[i] == base:
score.append(1)
else:
score.append(0)
return score
def vp(var_name, var_dict=globals(), sep=" : "):
"""(str, dict) => print
Variable Print, a fast way to print out a variable's value.
>>> scale = 0.35
>>> mass = '71 Kg'
>>> vp('scale')
scale : 0.35
>>> vp('mass',sep='=')
mass=71 Kg
"""
try:
print(f"{var_name}{sep}{g[var_name]}")
except:
print(f"{var_name} not found!")
def view_matrix(arrays):
"""list_of_arrays => print
Print out the array, row by row.
"""
for a in arrays:
print(a)
print("=========")
for n, r in enumerate(arrays):
print(n, len(r))
print(f"row:{len(arrays)}\ncol:{len(r)}")
def fill_matrix(arrays, z=0):
"""(list_of_arrays, any) => None
Add z to fill-in any array shorter than m=max([len(a) for a in arrays]).
"""
m = max([len(a) for a in arrays])
for i, a in enumerate(arrays):
if len(a) != m:
arrays[i] = np.append(a, [z for n in range(m - len(a))])
def get_size(obj_0):
"""obj => int
Recursively iterate to sum size of object & members (in bytes).
Adapted from http://stackoverflow.com/questions/449560/how-do-i-determine-the-size-of-an-object-in-python
"""
def inner(obj, _seen_ids=set()):
zero_depth_bases = (str, bytes, Number, range, bytearray)
obj_id = id(obj)
if obj_id in _seen_ids:
return 0
_seen_ids.add(obj_id)
size = sys.getsizeof(obj)
if isinstance(obj, zero_depth_bases):
pass # bypass remaining control flow and return
elif isinstance(obj, (tuple, list, Set, deque)):
size += sum(inner(i) for i in obj)
elif isinstance(obj, Mapping) or hasattr(obj, "items"):
size += sum(inner(k) + inner(v) for k, v in getattr(obj, "items")())
# Check for custom object instances - may subclass above too
if hasattr(obj, "__dict__"):
size += inner(vars(obj))
if hasattr(obj, "__slots__"): # can have __slots__ with __dict__
size += sum(
inner(getattr(obj, s)) for s in obj.__slots__ if hasattr(obj, s)
)
return size
return inner(obj_0)
def total_size(o, handlers={}, verbose=False):
"""(object, dict, bool) => print
Returns the approximate memory footprint an object and all of its contents.
Automatically finds the contents of the following builtin containers and
their subclasses: tuple, list, deque, dict, set and frozenset.
To search other containers, add handlers to iterate over their contents:
handlers = {SomeContainerClass: iter,
OtherContainerClass: OtherContainerClass.get_elements}
>>> d = dict(a=1, b=2, c=3, d=[4,5,6,7], e='a string of chars')
>>> print(total_size(d, verbose=True))
796
280 <type 'dict'> {'a': 1, 'c': 3, 'b': 2, 'e': 'a string of chars', 'd': [4, 5, 6, 7]}
38 <type 'str'> 'a'
24 <type 'int'> 1
38 <type 'str'> 'c'
24 <type 'int'> 3
38 <type 'str'> 'b'
24 <type 'int'> 2
38 <type 'str'> 'e'
54 <type 'str'> 'a string of chars'
38 <type 'str'> 'd'
104 <type 'list'> [4, 5, 6, 7]
24 <type 'int'> 4
24 <type 'int'> 5
24 <type 'int'> 6
24 <type 'int'> 7
"""
dict_handler = lambda d: chain.from_iterable(d.items())
all_handlers = {
tuple: iter,
list: iter,
deque: iter,
dict: dict_handler,
set: iter,
frozenset: iter,
}
all_handlers.update(handlers) # user handlers take precedence
seen = set() # track which object id's have already been seen
default_size = sys.getsizeof(0) # estimate sizeof object without __sizeof__
def sizeof(o):
if id(o) in seen: # do not double count the same object
return 0
seen.add(id(o))
s = sys.getsizeof(o, default_size)
if verbose:
print(s, type(o), repr(o))
for typ, handler in all_handlers.items():
if isinstance(o, typ):
s += sum(map(sizeof, handler(o)))
break
return s
return sizeof(o)
def center(pattern):
"""np.array => np.array
Return the centered pattern,
which is given by [(value - mean) for value in pattern]
>>> array = np.array([681.7, 682.489, 681.31, 682.001, 682.001, 682.499, 682.001])
>>> center(array)
array([-0.30014286, 0.48885714, -0.69014286, 0.00085714, 0.00085714, 0.49885714, 0.00085714])
"""
# mean = pattern.mean()
# return np.array([(value - mean) for value in pattern])
return pattern - np.mean(pattern)
def rescale(pattern):
"""np.array => np.array
Rescale each point of the array to be a float between 0 and 1.
>>> a = np.array([1,2,3,4,5,6,5,4,3,2,1])
>>> rescale(a)
array([ 0. , 0.2, 0.4, 0.6, 0.8, 1. , 0.8, 0.6, 0.4, 0.2, 0. ])
"""
# _max = pattern.max()
# _min = pattern.min()
# return np.array([(value - _min)/(_max - _min) for value in pattern])
return (pattern - pattern.min()) / (pattern.max() - pattern.min())
def standardize(pattern):
"""np.array => np.array
Return a standard pattern.
>>> a = np.array([1,2,3,4,5,6,5,4,3,2,1])
>>> standardize(a)
array([-1.41990459, -0.79514657, -0.17038855, 0.45436947, 1.07912749,
1.7038855 , 1.07912749, 0.45436947, -0.17038855, -0.79514657,
-1.41990459])
"""
# mean = pattern.mean()
# std = pattern.std()
# return np.array([(value - mean)/std for value in pattern])
return (pattern - np.mean(pattern)) / np.std(pattern)
def normalize(pattern):
"""np.array => np.array
Return a normalized pattern using np.linalg.norm().
>>> a = np.array([1,2,3,4,5,6,5,4,3,2,1])
>>> normalize(a)
"""
return pattern / np.linalg.norm(pattern)
def gen_patterns(data, length, ptype="all"):
"""(array, int) => dict
Generate all possible patterns of a given legth
by manipulating consecutive slices of data.
Return a dict of patterns dividad by pattern_type.
>>> data = [1,2,3,4,5,4,3,2,1]
>>> gen_patterns(data,len(data))
{'center': {0: array([-1.77777778, -0.77777778, 0.22222222, 1.22222222, 2.22222222, 1.22222222, 0.22222222, -0.77777778, -1.77777778])},
'norm': {0: array([ 0.10846523, 0.21693046, 0.32539569, 0.43386092, 0.54232614, 0.43386092, 0.32539569, 0.21693046, 0.10846523])},
'scale': {0: array([ 0. , 0.25, 0.5 , 0.75, 1. , 0.75, 0.5 , 0.25, 0. ])},
'std': {0: array([-1.35224681, -0.59160798, 0.16903085, 0.92966968, 1.69030851, 0.92966968, 0.16903085, -0.59160798, -1.35224681])}}
>>> gen_patterns(data,3)
{'center': {0: array([-1., 0., 1.]),
1: array([-1., 0., 1.]),
2: array([-1., 0., 1.])},
'norm': {0: array([ 0.26726124, 0.53452248, 0.80178373]),
1: array([ 0.37139068, 0.55708601, 0.74278135]),
2: array([ 0.42426407, 0.56568542, 0.70710678])},
'scale': {0: array([ 0. , 0.5, 1. ]),
1: array([ 0. , 0.5, 1. ]),
2: array([ 0. , 0.5, 1. ])},
'std': {0: array([-1.22474487, 0. , 1.22474487]),
1: array([-1.22474487, 0. , 1.22474487]),
2: array([-1.22474487, 0. , 1.22474487])}}
"""
results = {}
ptypes = ["std", "norm", "scale", "center"]
if ptype == "all": # to do: select specific ptypes
for t in ptypes:
results.update({t: {}})
for n in range(length):
if n + length > len(data):
break
raw = np.array(data[n : n + length])
partial = {
"std": standardize(raw),
"norm": normalize(raw),
"scale": rescale(raw),
"center": center(raw),
}
for t in ptypes:
results[t].update({n: partial[t]})
return results
def delta_percent(a, b, warnings=False):
"""(float, float) => float
Return the difference in percentage between a nd b.
If the result is 0.0 return 1e-09 instead.
>>> delta_percent(20,22)
10.0
>>> delta_percent(2,20)
900.0
>>> delta_percent(1,1)
1e-09
>>> delta_percent(10,9)
-10.0
"""
# np.seterr(divide='ignore', invalid='ignore')
try:
x = ((float(b) - a) / abs(a)) * 100
if x == 0.0:
return 0.000000001 # avoid -inf
else:
return x
except Exception as e:
if warnings:
print(f"Exception raised by delta_percent(): {e}")
return 0.000000001 # avoid -inf
def is_similar(array1, array2, t=0.1):
"""(array, array, float) => bool
Return True if all the points of two arrays are no more than t apart.
"""
if len(array1) != len(array2):
return False
for i, n in enumerate(array1):
if abs(n - array2[i]) <= t:
pass
else:
return False
return True
def cluster_patterns(pattern_list, t):
""" ([array, array, ...], float) => dict
Return a dict having as keys the idx of patterns in pattern_list
and as values the idx of the similar patterns.
"t" is the inverse of a similarity threshold,
i.e. the max discrepancy between the value of array1[i] and array2[i].
If no simalar patterns are found,value is assigned to an empty list.
>>> a = [1,2,3,4,5,6,5,4,3,2,1]
>>> a1 = [n+1 for n in a]
>>> a2 = [n+5 for n in a]
>>> a3 = [n+6 for n in a]
>>> patterns = [a,a1,a2,a3]
>>> cluster_patterns(patterns,t=2)
{0: [1], 1: [0], 2: [3], 3: [2]}
>>> cluster_patterns(patterns,t=5)
{0: [1, 2], 1: [0, 2, 3], 2: [0, 1, 3], 3: [1, 2]}
>>> cluster_patterns(patterns,t=0.2)
{0: [], 1: [], 2: [], 3: []}
"""
result = {}
for idx, array1 in enumerate(pattern_list):
result.update({idx: []})
for i, array2 in enumerate(pattern_list):
if i != idx:
if is_similar(array1, array2, t=t):
result[idx].append(i)
# print 'clusters:',len([k for k,v in result.iteritems() if len(v)])
return result
def stamp_to_date(stamp, time="utc"):
"""(int_or_float, float, str) => datetime.datetime
Convert UNIX timestamp to UTC or Local Time
>>> stamp = 1477558868.93
>>> print stamp_to_date(stamp,time='utc')
2016-10-27 09:01:08.930000
>>> print stamp_to_date(int(stamp),time='utc')
2016-10-27 09:01:08
>>> stamp_to_date(stamp,time='local')
datetime.datetime(2016, 10, 27, 11, 1, 8, 930000)
"""
if time.lower() == "utc":
return datetime.datetime.utcfromtimestamp(stamp)
elif time.lower() == "local":
return datetime.datetime.fromtimestamp(stamp)
else:
raise ValueError('"time" must be "utc" or "local"')
def future_value(interest, period, cash):
"""(float, int, int_or_float) => float
Return the future value obtained from an amount of cash
growing with a fix interest over a period of time.
>>> future_value(0.5,1,1)
1.5
>>> future_value(0.1,10,100)
259.37424601
"""
if not 0 <= interest <= 1:
raise ValueError('"interest" must be a float between 0 and 1')
for d in range(period):
cash += cash * interest
return cash
def entropy(sequence, verbose=False):
"""(string, bool) => float
Return the Shannon Entropy of a string.
Calculated as the minimum average number of
bits per symbol required for encoding the string.
The theoretical limit for data compression:
Shannon Entropy of the string * string length
"""
letters = list(sequence)
alphabet = list(set(letters)) # list of symbols in the string
# calculate the frequency of each symbol in the string
frequencies = []
for symbol in alphabet:
ctr = 0
for sym in letters:
if sym == symbol:
ctr += 1
frequencies.append(float(ctr) / len(letters))
# Shannon entropy
ent = 0.0
for freq in frequencies:
ent = ent + freq * math.log(freq, 2)
ent = -ent
if verbose:
print("Input string:")
print(sequence)
print()
print("Alphabet of symbols in the string:")
print(alphabet)
print()
print("Frequencies of alphabet symbols:")
print(frequencies)
print()
print("Shannon entropy:")
print(ent)
print("Minimum number of bits required to encode each symbol:")
print(int(math.ceil(ent)))
return ent
def quick_entropy(sequence):
"""(string, bool) => float
Return the Shannon Entropy of a string.
Compact version of entropy()
Calculated as the minimum average number of bits per symbol
required for encoding the string.
The theoretical limit for data compression:
Shannon Entropy of the string * string length.
"""
alphabet = set(sequence) # list of symbols in the string
# calculate the frequency of each symbol in the string
frequencies = []
for symbol in alphabet:
frequencies.append(sequence.count(symbol) / len(sequence))
# Shannon entropy
ent = 0.0
for freq in frequencies:
ent -= freq * math.log(freq, 2)
return ent
def percent_of(total, fraction):
"""(int_or_float,int_or_float) => float
Return the percentage of 'fraction' in 'total'.
Examples:
percent_of(150, 75)
>>> 50.0
percent_of(30, 90)
>>> 300.0
"""
assert total > 0
if np.isnan(total) or np.isnan(fraction):
return nan
return (100 * fraction) / total
def buzz(sequence, noise=0.01):
"""(string,float) => string
Return a sequence with some random noise.
"""
if not noise:
return sequence
bits = set([char for char in sequence] + ["del", "dup"])
r = ""
for char in sequence:
if random.random() <= noise:
b = random.sample(bits, 1)[0]
if b == "del":
continue
elif b == "dup":
r += 2 * char
else:
r += b
else:
r += char
return r
def simple_consensus(aligned_sequences_file):
"""file => string
Return the consensus of a series of fasta sequences aligned with muscle.
"""
# Generate consensus from Muscle alignment
sequences = []
seq = False
with open(aligned_sequences_file, "r") as f:
for line in f:
if line.startswith("\n"):
continue
if line.startswith(">"):
if seq:
sequences.append(seq)
seq = ""
else:
seq += line.strip()
sequences.append(seq)
# check if all sequenced have the same length
for seq in sequences:
assert len(seq) == len(sequences[0])
# compute consensus by majority vote
consensus = ""
for i in range(len(sequences[0])):
char_count = Counter()
for seq in sequences:
char_count.update(seq[i])
consensus += char_count.most_common()[0][0]
return consensus.replace("-", "")
def print_sbar(n, m, s="|#.|", size=30, message=""):
"""(int,int,string,int) => None
Print a progress bar using the simbols in 's'.
Example:
range_limit = 1000
for n in range(range_limit):
print_sbar(n+1,m=range_limit)
time.sleep(0.1)
"""
# adjust to bar size
if m != size:
n = (n * size) / m
m = size
# calculate ticks
_a = int(n) * s[1] + (int(m) - int(n)) * s[2]
_b = round(n / (int(m)) * 100, 1)
# adjust overflow
if _b >= 100:
_b = 100.0
# to stdout
sys.stdout.write(f"\r{message}{s[0]}{_a}{s[3]} {_b}% ")
sys.stdout.flush()
def get_hash(a_string, algorithm="md5"):
"""str => str
Return the hash of a string calculated using various algorithms.
.. code-block:: python
>>> get_hash('prova','md5')
'189bbbb00c5f1fb7fba9ad9285f193d1'
>>> get_hash('prova','sha256')
'6258a5e0eb772911d4f92be5b5db0e14511edbe01d1d0ddd1d5a2cb9db9a56ba'
"""
if algorithm == "md5":
return hashlib.md5(a_string.encode()).hexdigest()
elif algorithm == "sha256":
return hashlib.sha256(a_string.encode()).hexdigest()
else:
raise ValueError("algorithm {} not found".format(algorithm))
def get_first_transcript_by_gene_name(gene_name, data=EnsemblRelease(75), genome_id=None):
"""str => str
Return the id of the main trascript for a given gene.
The data is from http://grch37.ensembl.org/
"""
assert genome_id in ['grch37','grch38'], 'please specify genome_id'
gene = data.genes_by_name(gene_name)
gene_id = str(gene[0]).split(",")[0].split("=")[-1]
gene_location = str(gene[0]).split("=")[-1].strip(")")
url = f"http://{genome_id}.ensembl.org/Homo_sapiens/Gene/Summary?db=core;g={gene_id};r={gene_location}"
for line in urlopen(url):
if '<tbody><tr><td class="bold">' in line:
return line.split('">')[2].split("</a>")[0]
def get_exons_coord_by_gene_name(gene_name, data=EnsemblRelease(75)):
"""str => OrderedDict({'exon_id':[coordinates]})
Return an OrderedDict having as k the exon_id
and as value a tuple containing the genomic coordinates ('chr',start,stop).
"""
gene = data.genes_by_name(gene_name)
gene_id = str(gene[0]).split(",")[0].split("=")[-1]
gene_location = str(gene[0]).split("=")[-1].strip(")")
gene_transcript = get_first_transcript_by_gene_name(gene_name).split(".")[0]
table = OrderedDict()
for exon_id in data.exon_ids_of_gene_id(gene_id):
exon = data.exon_by_id(exon_id)
coordinates = (exon.contig, exon.start, exon.end)
table.update({exon_id: coordinates})
return table
def get_exons_coord_by_gene_name(gene_name, data=EnsemblRelease(75), genome_id=None):
"""string => OrderedDict
.. code-block:: python
>>> table = get_exons_coord_by_gene_name('TP53')
>>> for k,v in table.items():
... print(k,v)
ENSE00002419584 ['7,579,721', '7,579,700']
"""
assert genome_id in ['grch37','grch38'], 'please specify genome_id'
gene = data.genes_by_name(gene_name)
gene_id = str(gene[0]).split(",")[0].split("=")[-1]
gene_location = str(gene[0]).split("=")[-1].strip(")")
gene_transcript = get_first_transcript_by_gene_name(gene_name).split(".")[0]
url = f"http://{genome_id}.ensembl.org/Homo_sapiens/Transcript/Exons?db=core;g={gene_id};r={gene_location};t={gene_transcript}"
str_html = get_html(url)
html = ""
for line in str_html.split("\n"):
try:
# print line
html += str(line) + "\n"
except UnicodeEncodeError:
pass
blocks = html.split("\n")
table = OrderedDict()
for exon_id in data.exon_ids_of_gene_id(gene_id):
for i, txt in enumerate(blocks):
if exon_id in txt:
if exon_id not in table:
table.update({exon_id: []})
for item in txt.split('<td style="width:10%;text-align:left">')[1:-1]:
table[exon_id].append(item.split("</td>")[0])
return table
def split_overlap(seq, size, overlap, is_dataframe=False):
"""(seq,int,int) => [[...],[...],...]
Split a sequence into chunks of a specific size and overlap.
Works also on strings!
It is very efficient for short sequences (len(seq()) <= 100).
Set "is_dataframe=True" to split a pandas.DataFrame
Examples:
>>> split_overlap(seq=list(range(10)),size=3,overlap=2)
[[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9]]
>>> split_overlap(seq=range(10),size=3,overlap=2)
[range(0, 3), range(1, 4), range(2, 5), range(3, 6), range(4, 7), range(5, 8), range(6, 9), range(7, 10)]
"""
if size < 1 or overlap < 0:
raise ValueError('"size must be >= 1 and overlap >= 0')
result = []
if is_dataframe:
while True:
if len(seq) <= size:
result.append(seq)
return result
else:
result.append(seq.iloc[:size])
seq = seq.iloc[size - overlap :]
else:
while True:
if len(seq) <= size:
result.append(seq)
return result
else:
result.append(seq[:size])
seq = seq[size - overlap :]
def split_overlap_long(seq, size, overlap, is_dataframe=False):
"""(seq,int,int) => generator
Split a sequence into chunks of a specific size and overlap.
Return a generator. It is very efficient for long sequences (len(seq()) > 100).
https://stackoverflow.com/questions/48381870/a-better-way-to-split-a-sequence-in-chunks-with-overlaps
Set "is_dataframe=True" to split a pandas.DataFrame
Examples:
>>> split_overlap_long(seq=list(range(10)),size=3,overlap=2)
<generator object split_overlap_long at 0x10bc49d58>
>>> list(split_overlap_long(seq=list(range(10)),size=3,overlap=2))
[[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9]]
>>> list(split_overlap_long(seq=range(10),size=3,overlap=2))
[range(0, 3), range(1, 4), range(2, 5), range(3, 6), range(4, 7), range(5, 8), range(6, 9), range(7, 10)]
"""
if size < 1 or overlap < 0:
raise ValueError("size must be >= 1 and overlap >= 0")
if is_dataframe:
for i in range(0, len(seq) - overlap, size - overlap):
yield seq.iloc[i : i + size]
else:
for i in range(0, len(seq) - overlap, size - overlap):
yield seq[i : i + size]
def itr_split_overlap(iterable, size, overlap):
"""(iterable,int,int) => generator
Similar to long_split_overlap() but it works on any iterable.
In case of long sequences, long_split_overlap() is more efficient
but this function can handle potentially infinite iterables using deque().
https://stackoverflow.com/questions/48381870/a-better-way-to-split-a-sequence-in-chunks-with-overlaps
Warning: for range() and symilar, it behaves differently than split_overlap() and split_overlap_long()
Examples:
>>> list(itr_split_overlap(iterable=range(10),size=3,overlap=2))
[(0, 1, 2), (1, 2, 3), (2, 3, 4), (3, 4, 5), (4, 5, 6), (5, 6, 7), (6, 7, 8), (7, 8, 9)]
"""
if size < 1 or overlap < 0:
raise ValueError("size must be >= 1 and overlap >= 0")
itr = iter(iterable)
buf = deque(islice(itr, size), maxlen=size)
chunk = None
for chunk in iter(lambda: tuple(islice(itr, size - overlap)), ()):
yield tuple(buf)
buf.extend(chunk)
rest = tuple(buf)
if chunk:
rest = rest[size - overlap - len(chunk) :]
yield rest
def split_overlap_df(df, size, overlap):
"""(df,int,int) => generator
Split a pandas.DataFrame into chunks of a specific size and overlap.
"""
if size < 1 or overlap < 0:
raise ValueError("size must be >= 1 and overlap >= 0")
for i in range(0, len(df) - overlap, size - overlap):
yield df.iloc[i : i + size]
def reorder_dict(d, keys):
"""(dict,list) => OrderedDict
Change the order of a dictionary's keys
without copying the dictionary (save RAM!).
Return an OrderedDict.
"""
tmp = OrderedDict()
for k in keys:
tmp[k] = d[k]
del d[k] # this saves RAM
return tmp
# test = OrderedDict({'1':1,'2':2,'4':4,'3':3})
# print(test)
# test2 = reorder_dict(test,['1','2','3','4'])
# print(test)
# print(test2)
# >>> OrderedDict([('2', 2), ('3', 3), ('4', 4), ('1', 1)])
# >>> OrderedDict()
# >>> OrderedDict([('1', 1), ('2', 2), ('3', 3), ('4', 4)])
def in_between(one_number, two_numbers):
"""(int,list) => bool
Return true if a number is in between two other numbers.
Return False otherwise.
"""
if two_numbers[0] < two_numbers[1]:
pass
else:
two_numbers = sorted(two_numbers)
return two_numbers[0] <= one_number <= two_numbers[1]
def is_overlapping(svA, svB, limit=0.9):
"""(list,list,float) => bool
Check if two SV ovelaps for at least 90% (limit=0.9).
svX = [chr1,brk1,chr2,brk2]
"""
# Step 1.
# Select the breaks in order to have lower coordinates first
if int(svA[1]) <= int(svA[3]):
chr1_A = svA[0]
brk1_A = int(svA[1])
chr2_A = svA[2]
brk2_A = int(svA[3])
else:
chr2_A = svA[0]
brk2_A = svA[1]
chr1_A = svA[2]
brk1_A = svA[3]
if int(svB[1]) <= int(svB[3]):
chr1_B = svB[0]
brk1_B = int(svB[1])
chr2_B = svB[2]
brk2_B = int(svB[3])
else:
chr2_B = svB[0]
brk2_B = int(svB[1])
chr1_B = svB[2]
brk1_B = int(svB[3])
# Step 2.
# Determine who is the longest
# Return False immediately if the chromosomes are not the same.
# This computation is reasonable only for sv on the same chormosome.
if chr1_A == chr2_A and chr1_B == chr2_B and chr1_A == chr1_B:
len_A = brk2_A - brk1_A
len_B = brk2_B - brk1_B
if len_A >= len_B:
len_reference = len_A
len_sample = len_B
else:
len_reference = len_B
len_sample = len_A
limit = round(
len_reference * limit
) # this is the minimum overlap the two sv need to share
# to be considered overlapping
# if the sample is smaller then the limit then there is no need to go further.
# the sample segment will never share enough similarity with the reference.
if len_sample < limit:
return False
else:
return False
# Step 3.
# Determine if there is an overlap
# >> There is an overlap if a least one of the break of an sv is in beetween the two breals of the other sv.
overlapping = False
for b in [brk1_A, brk2_A]:
if in_between(b, [brk1_B, brk2_B]):
overlapping = True
for b in [brk1_B, brk2_B]:
if in_between(b, [brk1_A, brk2_A]):
overlapping = True
if not overlapping:
return False
# Step 4.
# Determine the lenght of the ovelapping part
# easy case: if the points are all different then, if I sort the points,
# the overlap is the region between points[1] and points[2]
# |-----------------| |---------------------|
# |--------------| |-------------|
points = sorted([brk1_A, brk2_A, brk1_B, brk2_B])
if len(set(points)) == 4: # the points are all different
overlap = points[2] - points[1]
elif len(set(points)) == 3: # one point is in common
# |-----------------|
# |--------------|
if points[0] == points[1]:
overlap = points[3] - points[2]
# |---------------------|
# |-------------|
if points[2] == points[3]:
overlap = points[2] - points[1]
# |-----------------|
# |-------------|
if points[1] == points[2]:
return False # there is no overlap
else:
# |-----------------|
# |-----------------|
return True # if two points are in common, then it is the very same sv
if overlap >= limit:
return True
else:
return False
def load_obj(file):
"""
Load a pickled object.
Be aware that pickle is version dependent,
i.e. objects dumped in Py3 cannot be loaded with Py2.
"""
try:
with open(file, "rb") as f:
obj = pickle.load(f)
return obj
except:
return False
def save_obj(obj, file):
"""
Dump an object with pickle.
Be aware that pickle is version dependent,
i.e. objects dumped in Py3 cannot be loaded with Py2.
"""
try:
with open(file, "wb") as f:
pickle.dump(obj, f)
print("Object saved to {}".format(file))
return True
except:
print("Error: Object not saved...")
return False
# save_obj(hotspots_review,'hotspots_review_CIS.txt')
def query_encode(chromosome, start, end):
"""
Queries ENCODE via http://promoter.bx.psu.edu/ENCODE/search_human.php
Parses the output and returns a dictionary of CIS elements found and the relative location.
"""
## Regex setup
re1 = "(chr{})".format(chromosome) # The specific chromosome
re2 = "(:)" # Any Single Character ':'
re3 = "(\\d+)" # Integer
re4 = "(-)" # Any Single Character '-'
re5 = "(\\d+)" # Integer
rg = re.compile(re1 + re2 + re3 + re4 + re5, re.IGNORECASE | re.DOTALL)
## Query ENCODE
std_link = (
"http://promoter.bx.psu.edu/ENCODE/get_human_cis_region.php?assembly=hg19&"
)
query = std_link + "chr=chr{}&start={}&end={}".format(chromosome, start, end)
print(query)
html_doc = urlopen(query)
html_txt = BeautifulSoup(html_doc, "html.parser").get_text()
data = html_txt.split("\n")
## Parse the output
parsed = {}
coordinates = [i for i, item_ in enumerate(data) if item_.strip() == "Coordinate"]
elements = [data[i - 2].split(" ")[-1].replace(": ", "") for i in coordinates]
blocks = [item for item in data if item[:3] == "chr"]
print(elements)
try:
i = 0
for item in elements:
# print(i)
try:
txt = blocks[i]
# print(txt)
m = rg.findall(txt)
bins = ["".join(item) for item in m]
parsed.update({item: bins})
i += 1
print("found {}".format(item))
except:
print("the field {} was empty".format(item))
return parsed
except Exception as e:
print("ENCODE query falied on chr{}:{}-{}".format(chromosome, start, end))
print(e)
return False
def compare_patterns(pattA, pattB):
"""(np.array, np.array) => float
Compare two arrays point by point.
Return a "raw similarity score".
You may want to center the two patterns before compare them.
>>> a = np.array([1,2,3,4,5,6,5,4,3,2,1])
>>> a1 = np.array([n+0.1 for n in a])
>>> a2 = np.array([n+1 for n in a])
>>> a3 = np.array([n+10 for n in a])
>>> compare_patterns(a,a)
99.999999999
>>> compare_patterns(a,a1)
95.69696969696969
>>> compare_patterns(a,a2)
56.96969696969697
>>> compare_patterns(a2,a)
72.33766233766234
>>> compare_patterns(center(a),center(a2))
99.999999999999943
>>> compare_patterns(a,a3)
-330.3030303030303
"""
if len(pattA) == len(pattB):
deltas = []
for i, pA in enumerate(pattA):
deltas.append(100 - abs(delta_percent(pA, pattB[i])))
similarity = sum(deltas) / len(pattA)
return similarity
else:
raise ValueError('"pattA" and "pattB" must have same length.')
def compare_bins(dict_A, dict_B):
"""(dict,dict) => dict, dict, dict
Compares two dicts of bins.
Returns the shared elements, the unique elements of A and the unique elements of B.
The dicts shape is supposed to be like this:
OrderedDict([('1',
['23280000-23290000',
'24390000-24400000',
...]),
('2',
['15970000-15980000',
'16020000-16030000',
...]),
('3',
['610000-620000',
'3250000-3260000',
'6850000-6860000',
...])}
"""
chrms = [str(x) for x in range(1, 23)] + ["X", "Y"]
shared = OrderedDict()
unique_A = OrderedDict()
unique_B = OrderedDict()
for k in chrms:
shared.update({k: []})
unique_A.update({k: []})
unique_B.update({k: []})
if k in dict_A and k in dict_B:
for bin_ in dict_A[k]:
if bin_ in dict_B[k]:
shared[k].append(bin_)
else:
unique_A[k].append(bin_)
for bin_ in dict_B[k]:
if bin_ not in shared[k]:
unique_B[k].append(bin_)
elif k not in dict_A:
unique_B[k] = [bin_ for bin_ in dict_B[k]]
elif k not in dict_B:
unique_A[k] = [bin_ for bin_ in dict_A[k]]
return shared, unique_A, unique_B
# To manage heavy files
def yield_file(infile):
with open(infile, "r") as f:
for line in f:
if line[0] not in ["#", "\n", " ", ""]:
yield line.strip()
# Downaload sequence from ensembl
def sequence_from_coordinates(chromosome, strand, start, end, ref_genome=37):
"""
Download the nucleotide sequence from the gene_name.
"""
Entrez.email = "a.marcozzi@umcutrecht.nl" # Always tell NCBI who you are
if int(ref_genome) == 37:
# GRCh37 from http://www.ncbi.nlm.nih.gov/assembly/GCF_000001405.25/#/def_asm_Primary_Assembly
NCBI_IDS = {
"1": "NC_000001.10",
"2": "NC_000002.11",
"3": "NC_000003.11",
"4": "NC_000004.11",
"5": "NC_000005.9",
"6": "NC_000006.11",
"7": "NC_000007.13",
"8": "NC_000008.10",
"9": "NC_000009.11",
"10": "NC_000010.10",
"11": "NC_000011.9",
"12": "NC_000012.11",
"13": "NC_000013.10",
"14": "NC_000014.8",
"15": "NC_000015.9",
"16": "NC_000016.9",
"17": "NC_000017.10",
"18": "NC_000018.9",
"19": "NC_000019.9",
"20": "NC_000020.10",
"21": "NC_000021.8",
"22": "NC_000022.10",
"X": "NC_000023.10",
"Y": "NC_000024.9",
}
elif int(ref_genome) == 38:
# GRCh38 from https://www.ncbi.nlm.nih.gov/assembly/GCF_000001405.38
NCBI_IDS = {
"1": "NC_000001.11",
"2": "NC_000002.12",
"3": "NC_000003.12",
"4": "NC_000004.12",
"5": "NC_000005.10",
"6": "NC_000006.12",
"7": "NC_000007.14",
"8": "NC_000008.11",
"9": "NC_000009.12",
"10": "NC_000010.11",
"11": "NC_000011.10",
"12": "NC_000012.12",
"13": "NC_000013.11",
"14": "NC_000014.9",
"15": "NC_000015.10",
"16": "NC_000016.10",
"17": "NC_000017.11",
"18": "NC_000018.10",
"19": "NC_000019.10",
"20": "NC_000020.11",
"21": "NC_000021.9",
"22": "NC_000022.11",
"X": "NC_000023.11",
"Y": "NC_000024.10",
}
try:
handle = Entrez.efetch(
db="nucleotide",
id=NCBI_IDS[str(chromosome)],
rettype="fasta",
strand=strand, # "1" for the plus strand and "2" for the minus strand.
seq_start=start,
seq_stop=end,
)
record = SeqIO.read(handle, "fasta")
handle.close()
sequence = str(record.seq)
return sequence
except ValueError:
print("ValueError: no sequence found in NCBI")
return False
# GC content calculator
def gc_content(sequence, percent=True):
"""
Return the GC content of a sequence.
"""
sequence = sequence.upper()
g = sequence.count("G")
c = sequence.count("C")
t = sequence.count("T")
a = sequence.count("A")
gc_count = g + c
total_bases_count = g + c + t + a
if total_bases_count == 0:
print("Error in gc_content(sequence): sequence may contain only Ns")
return None
try:
gc_fraction = float(gc_count) / total_bases_count
except Exception as e:
print(e)
print(sequence)
if percent:
return gc_fraction * 100
else:
return gc_fraction
##Flexibility calculator##
# requires stabflex3.py
# Endpoint function to calculate the flexibility of a given sequence
def dna_flex(sequence, window_size=500, step_zize=100, verbose=False):
"""(str,int,int,bool) => list_of_tuples
Calculate the flexibility index of a sequence.
Return a list of tuples.
Each tuple contains the bin's coordinates
and the calculated flexibility of that bin.
Example:
dna_flex(seq_a,500,100)
>>> [('0-500', 9.7),('100-600', 9.77),...]
"""
if verbose:
print("Algorithm window size : %d" % window_size)
print("Algorithm window step : %d" % step_zize)
print("Sequence has {} bases".format(len(self.seq)))
algorithm = myFlex(sequence, window_size, step_zize)
flexibility_result = algorithm.analyse(flexibility_data)
return flexibility_result.report(verbose)
##Repeats scanner##
# G-quadruplex
def g4_scanner(sequence):
"""
G-quadruplex motif scanner.
Scan a sequence for the presence of the regex motif:
[G]{3,5}[ACGT]{1,7}[G]{3,5}[ACGT]{1,7}[G]{3,5}[ACGT]{1,7}[G]{3,5}
Reference: http://www.ncbi.nlm.nih.gov/pmc/articles/PMC1636468/
Return two callable iterators.
The first one contains G4 found on the + strand.
The second contains the complementary G4 found on the + strand, i.e. a G4 in the - strand.
"""
# forward G4
pattern_f = "[G]{3,5}[ACGT]{1,7}[G]{3,5}[ACGT]{1,7}[G]{3,5}[ACGT]{1,7}[G]{3,5}"
result_f = re.finditer(pattern_f, sequence)
# reverse G4
pattern_r = "[C]{3,5}[ACGT]{1,7}[C]{3,5}[ACGT]{1,7}[C]{3,5}[ACGT]{1,7}[C]{3,5}"
result_r = re.finditer(pattern_r, sequence)
return result_f, result_r
# Repeat-masker
def parse_RepeatMasker(infile="RepeatMasker.txt", rep_type="class"):
"""
Parse RepeatMasker.txt and return a dict of bins for each chromosome
and a set of repeats found on that bin.
dict = {'chromosome':{'bin':set(repeats)}}
"""
chromosomes = [str(c) for c in range(1, 23)] + ["X", "Y"]
result = {}
if rep_type == "name":
idx = 10 # repName
elif rep_type == "class":
idx = 11 # repClass
elif rep_type == "family":
idx = 12 # repFamily
else:
raise NameError(
'Invalid rep_type "{}". Expected "class","family" or "name"'.format(
rep_type
)
)
# RepeatMasker.txt is around 500MB!
for line in yield_file(infile):
data = line.split("\t")
chromosome = data[5].replace("chr", "")
start = data[6]
end = data[7]
bin_ = "{}-{}".format(start, end)
repeat = data[idx].replace("?", "")
if chromosome in chromosomes:
if chromosome not in result:
result.update({chromosome: {bin_: set([repeat])}})
else:
if bin_ not in result[chromosome]:
result[chromosome].update({bin_: set([repeat])})
else:
result[chromosome][bin_].add(repeat)
return result
def next_day(d="2012-12-04"):
"""Return the next day in the calendar."""
Y, M, D = d.split("-")
t = datetime.date(int(Y), int(M), int(D))
_next = t + datetime.timedelta(1)
return str(_next)
# next_day('2012-12-31')
# >>> '2013-01-01'
def previous_day(d="2012-12-04"):
"""Return the previous day in the calendar."""
Y, M, D = d.split("-")
t = datetime.date(int(Y), int(M), int(D))
_prev = t + datetime.timedelta(-1)
return str(_prev)
# previous_day('2013-01-01')
# >>> '2012-12-31'
def intersect(list1, list2):
"""(list,list) => list
Return the intersection of two lists, i.e. the item in common.
"""
return [item for item in list2 if item in list1]
def annotate_fusion_genes(dataset_file):
"""
Uses FusionGenes_Annotation.pl to find fusion genes in the dataset.
Generates a new file containing all the annotations.
"""
start = time.time()
print("annotating", dataset_file, "...")
raw_output = run_perl("FusionGenes_Annotation.pl", dataset_file)
raw_list = str(raw_output)[2:].split("\\n")
outfile = dataset_file[:-4] + "_annotated.txt"
with open(outfile, "w") as outfile:
line_counter = 0
header = [
"##ID",
"ChrA",
"StartA",
"EndA",
"ChrB",
"StartB",
"EndB",
"CnvType",
"Orientation",
"GeneA",
"StrandA",
"LastExonA",
"TotalExonsA",
"PhaseA",
"GeneB",
"StrandB",
"LastExonB",
"TotalExonsB",
"PhaseB",
"InFrame",
"InPhase",
]
outfile.write(list_to_line(header, "\t") + "\n")
for item in raw_list:
cleaned_item = item.split("\\t")
if (
len(cleaned_item) > 10
): # FusionGenes_Annotation.pl return the data twice. We kepp the annotated one.
outfile.write(list_to_line(cleaned_item, "\t") + "\n")
line_counter += 1
print(
"succesfully annotated",
line_counter,
"breakpoints from",
dataset_file,
"in",
time.time() - start,
"seconds",
)
# track threads
try:
global running_threads
running_threads -= 1
except:
pass
# dataset_file = '/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/public/breaks/Decipher-DeletionsOnly.txt'
# annotate_fusion_genes(dataset_file)
def blastn(
input_fasta_file,
db_path="/Users/amarcozzi/Desktop/BLAST_DB/",
db_name="human_genomic",
out_file="blastn_out.xml",
):
"""
Run blastn on the local machine using a local database.
Requires NCBI BLAST+ to be installed. http://blast.ncbi.nlm.nih.gov/Blast.cgi?CMD=Web&PAGE_TYPE=BlastDocs&DOC_TYPE=Download
Takes a fasta file as input and writes the output in an XML file.
"""
db = db_path + db_name
blastn_cline = NcbiblastnCommandline(
query=input_fasta_file, db=db, evalue=0.001, outfmt=5, out=out_file
)
print(blastn_cline)
stdout, stderr = blastn_cline()
# to be tested
def check_line(line, unexpected_char=["\n", "", " ", "#"]):
"""
Check if the line starts with an unexpected character.
If so, return False, else True
"""
for item in unexpected_char:
if line.startswith(item):
return False
return True
def dice_coefficient(sequence_a, sequence_b):
"""(str, str) => float
Return the dice cofficient of two sequences.
"""
a = sequence_a
b = sequence_b
if not len(a) or not len(b):
return 0.0
# quick case for true duplicates
if a == b:
return 1.0
# if a != b, and a or b are single chars, then they can't possibly match
if len(a) == 1 or len(b) == 1:
return 0.0
# list comprehension, preferred over list.append() '''
a_bigram_list = [a[i : i + 2] for i in range(len(a) - 1)]
b_bigram_list = [b[i : i + 2] for i in range(len(b) - 1)]
a_bigram_list.sort()
b_bigram_list.sort()
# assignments to save function calls
lena = len(a_bigram_list)
lenb = len(b_bigram_list)
# initialize match counters
matches = i = j = 0
while i < lena and j < lenb:
if a_bigram_list[i] == b_bigram_list[j]:
matches += 2
i += 1
j += 1
elif a_bigram_list[i] < b_bigram_list[j]:
i += 1
else:
j += 1
score = float(matches) / float(lena + lenb)
return score
def find_path(graph, start, end, path=[]):
"""
Find a path between two nodes in a graph.
Works on graphs like this:
graph ={'A': ['B', 'C'],
'B': ['C', 'D'],
'C': ['D'],
'D': ['C'],
'E': ['F'],
'F': ['C']}
"""
path = path + [start]
if start == end:
return path
if not graph.has_key(start):
return None
for node in graph[start]:
if node not in path:
newpath = find_path(graph, node, end, path)
if newpath:
return newpath
return None
def find_all_paths(graph, start, end, path=[]):
"""
Find all paths between two nodes of a graph.
Works on graphs like this:
graph ={'A': ['B', 'C'],
'B': ['C', 'D'],
'C': ['D'],
'D': ['C'],
'E': ['F'],
'F': ['C']}
"""
path = path + [start]
if start == end:
return [path]
if not graph.has_key(start):
return []
paths = []
for node in graph[start]:
if node not in path:
newpaths = find_all_paths(graph, node, end, path)
for newpath in newpaths:
paths.append(newpath)
return paths
def find_shortest_path(graph, start, end, path=[]):
"""
Find the shortest path between two nodes of a graph.
Works on graphs like this:
graph ={'A': ['B', 'C'],
'B': ['C', 'D'],
'C': ['D'],
'D': ['C'],
'E': ['F'],
'F': ['C']}
"""
path = path + [start]
if start == end:
return path
if not graph.has_key(start):
return None
shortest = None
for node in graph[start]:
if node not in path:
newpath = find_shortest_path(graph, node, end, path)
if newpath:
if not shortest or len(newpath) < len(shortest):
shortest = newpath
return shortest
# ##
# graph = {'A': ['B', 'C'],
# 'B': ['C', 'D'],
# 'C': ['D'],
# 'D': ['C'],
# 'E': ['F'],
# 'F': ['C']}
# >>> find_path(graph, 'A', 'D')
# ['A', 'B', 'C', 'D']
# >>> find_all_paths(graph, 'A', 'D')
# [['A', 'B', 'C', 'D'], ['A', 'B', 'D'], ['A', 'C', 'D']]
# >>> find_shortest_path(graph, 'A', 'D')
# ['A', 'C', 'D']
def gen_rnd_string(length):
"""
Return a string of uppercase and lowercase ascii letters.
"""
s = [l for l in string.ascii_letters]
random.shuffle(s)
s = "".join(s[:length])
return s
def gene_synonyms(gene_name):
"""str => list()
Queries http://rest.genenames.org and returns a list of synonyms of gene_name.
Returns None if no synonym was found.
"""
result = []
headers = {"Accept": "application/json"}
uri = "http://rest.genenames.org"
path = "/search/{}".format(gene_name)
target = urlparse(uri + path)
method = "GET"
body = ""
h = http.Http()
response, content = h.request(target.geturl(), method, body, headers)
if response["status"] == "200":
# assume that content is a json reply
# parse content with the json module
data = json.loads(content.decode("utf8"))
for item in data["response"]["docs"]:
result.append(item["symbol"])
return result
else:
print("Error detected: " + response["status"])
return None
# print(gene_synonyms('MLL3'))
def string_to_number(s):
"""
Convert a bytes string into a single number.
Example:
>>> string_to_number('foo bar baz')
147948829660780569073512294
"""
return int.from_bytes(s.encode(), "little")
def number_to_string(n):
"""
Convert a number into a bytes string.
Example:
>>> number_to_string(147948829660780569073512294)
'foo bar baz'
"""
return n.to_bytes(math.ceil(n.bit_length() / 8), "little").decode()
# x = 147948829660780569073512294
# number_to_string(x)
# >>> 'foo bar baz'
def determine_average_breaks_distance(dataset): # tested only for deletion/duplication
"""
Evaluate the average distance among breaks in a dataset.
"""
data = extract_data(dataset, columns=[1, 2, 4, 5], verbose=False)
to_average = []
for item in data:
if item[0] == item[2]:
to_average.append(int(item[3]) - int(item[1]))
return sum(to_average) / len(to_average)
# print(determine_average_breaks_distance('/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/random/sorted/rnd_dataset_100_annotated_sorted.txt'))
def dict_overview(dictionary, how_many_keys, indent=False):
"""
Prints out how_many_elements of the target dictionary.
Useful to have a quick look at the structure of a dictionary.
"""
ks = list(islice(dictionary, how_many_keys))
for k in ks:
if indent:
print(f"{k}\n\t{dictionary[k]}")
else:
print(f"{k}\t{dictionary[k]}")
def download_human_genome(build="hg19"):
if build == "hg19":
run('wget -O hg19.fa.gz -r https://hgdownload.soe.ucsc.edu/goldenPath/hg19/bigZips/latest/hg19.fa.gz', shell=True)
elif build == "hg38":
run('wget -O hg38.fa.gz -r https://hgdownload.soe.ucsc.edu/goldenPath/hg38/bigZips/latest/hg38.fa.gz', shell=True)
print('Invalid build. Accepted values are: hg19, hg38')
def download_human_genome_old(
build = "GRCh37",
entrez_usr_email = "a.marcozzi@umcutrecht.nl"
):
"""
Download the Human genome from enterez.
"""
Entrez.email = entrez_usr_email
# Last available version
if build == "GRCh37":
# GRCh37 from http://www.ncbi.nlm.nih.gov/assembly/GCF_000001405.25/#/def_asm_Primary_Assembly
NCBI_IDS_GRCh37 = {
"1": "NC_000001.10",
"2": "NC_000002.11",
"3": "NC_000003.11",
"4": "NC_000004.11",
"5": "NC_000005.9",
"6": "NC_000006.11",
"7": "NC_000007.13",
"8": "NC_000008.10",
"9": "NC_000009.11",
"10": "NC_000010.10",
"11": "NC_000011.9",
"12": "NC_000012.11",
"13": "NC_000013.10",
"14": "NC_000014.8",
"15": "NC_000015.9",
"16": "NC_000016.9",
"17": "NC_000017.10",
"18": "NC_000018.9",
"19": "NC_000019.9",
"20": "NC_000020.10",
"21": "NC_000021.8",
"22": "NC_000022.10",
"X": "NC_000023.10",
"Y": "NC_000024.9",
}
CHR_LENGTHS_GRCh37 = {
"1": 249250621,
"2": 243199373,
"3": 198022430,
"4": 191154276,
"5": 180915260,
"6": 171115067,
"7": 159138663,
"8": 146364022,
"9": 141213431,
"10": 135534747,
"11": 135006516,
"12": 133851895,
"13": 115169878,
"14": 107349540,
"15": 102531392,
"16": 90354753,
"17": 81195210,
"18": 78077248,
"19": 59128983,
"20": 63025520,
"21": 48129895,
"22": 51304566,
"X": 155270560,
"Y": 59373566,
}
NCBI_IDS = NCBI_IDS_GRCh37
CHR_LENGTHS = CHR_LENGTHS_GRCh37
elif build == "GRCh38":
NCBI_IDS_GRCh38 = {
"1": "NC_000001.11",
"2": "NC_000002.12",
"3": "NC_000003.12",
"4": "NC_000004.12",
"5": "NC_000005.10",
"6": "NC_000006.12",
"7": "NC_000007.14",
"8": "NC_000008.11",
"9": "NC_000009.12",
"10": "NC_000010.11",
"11": "NC_000011.10",
"12": "NC_000012.12",
"13": "NC_000013.11",
"14": "NC_000014.9",
"15": "NC_000015.10",
"16": "NC_000016.10",
"17": "NC_000017.11",
"18": "NC_000018.10",
"19": "NC_000019.10",
"20": "NC_000020.11",
"21": "NC_000021.9",
"22": "NC_000022.11",
"X": "NC_000023.11",
"Y": "NC_000024.10",
}
CHR_LENGTHS_GRCh38 = {
"1": 248_956_422,
"2": 242_193_529,
"3": 198_295_559,
"4": 190_214_555,
"5": 181_538_259,
"6": 170_805_979,
"7": 159_345_973,
"8": 145_138_636,
"9": 138_394_717,
"10": 133_797_422,
"11": 135_086_622,
"12": 133_275_309,
"13": 114_364_328,
"14": 107_043_718,
"15": 101_991_189,
"16": 90_338_345,
"17": 83_257_441,
"18": 80_373_285,
"19": 58_617_616,
"20": 64_444_167,
"21": 46_709_983,
"22": 50_818_468,
"X": 156_040_895,
"Y": 57_227_415,
}
NCBI_IDS = NCBI_IDS_GRCh38
CHR_LENGTHS = CHR_LENGTHS_GRCh38
else:
print("This function only work with the genome builds GRCh37 & GRCh38 fow now...")
return False
with open(f"Homo_sapiens_assembly{build}.fasta", "w") as f:
for chromosome, nc_id in NCBI_IDS.items():
print(f"downloading {nc_id}")
length = CHR_LENGTHS[chromosome]
sequence = False
try:
# Always tell NCBI who you are
handle = Entrez.efetch(
db="nucleotide",
id=nc_id,
rettype="fasta",
strand=1,
seq_start=0, # this is to obtain actual start coordinates from the index
seq_stop=length,
) # this is the end of the chromosome
record = SeqIO.read(handle, "fasta")
handle.close()
sequence = str(record.seq)
header = f'>{chromosome} dna:chromosome chromosome:{build}:{chromosome}:1:{length}:1'
f.write(f'{header}\n{sequence}\n')
except ValueError:
print("ValueError: no sequence found in NCBI")
def exponential_range(start=0, end=10000, base=10):
"""
Generates a range of integer that grow exponentially.
Example: list(exp_range(0,100000,2))
Output :[0,
2,
4,
8,
16,
32,
64,
128,
256,
512,
1024,
2048,
4096,
8192,
16384,
32768,
65536]
"""
if end / base < base:
raise ValueError('"end" must be at least "base**2"')
result = []
new_start = start
new_end = base ** 2
new_base = base
while new_start < end:
result.append(range(new_start, new_end, new_base))
new_start = new_end
new_end = new_start * base
new_base = new_base * base
# print(result)
for item in result:
for i in item:
yield i
##list(exp_range(0,100000,10))
def extract_data(
infile,
columns=[3, 0, 1, 2, 5],
header="##",
skip_lines_starting_with="#",
data_separator="\t",
verbose=False,
):
"""
Extract data from a file. Returns a list of tuples.
Each tuple contains the data extracted from one line of the file
in the indicated columns and with the indicated order.
"""
extracted_data = []
header_list = []
header_flag = 0
line_counter = 0
with open(infile) as infile:
lines = infile.readlines()
for line in lines: # yield_file(infile) can be used instead
line_counter += 1
if line[: len(header)] == header: # checks the header
header_list = line_to_list(line[len(header) :], data_separator)
header_flag += 1
if header_flag > 1:
raise ValueError(
'More than one line seems to contain the header identificator "'
+ header
+ '".'
)
elif (
line[0] == skip_lines_starting_with or line == "" or line == "\n"
): # skips comments and blank lines
pass
else:
list_ = line_to_list(line, data_separator)
reduced_list = []
for item in columns:
reduced_list.append(list_[item])
extracted_data.append(tuple(reduced_list))
if verbose == True: # Prints out a brief summary
print("Data extracted from", infile)
print("Header =", header_list)
print("Total lines =", line_counter)
return extracted_data
# extract_data('tables/clinvarCnv.txt', columns=[3,0,1,2,5], header='##', skip_lines_starting_with='#', data_separator='\t', verbose=True)
def extract_Toronto(infile, outfile):
"""
Ad hoc function to extract deletions and duplications out of the Toronto Genetic Variants Database.
Returns a file ready to be annotated with FusionGenes_Annotation.pl .
"""
# Extract data from infile
# Columns are: ID, Chr, Start, End, CNV_Type
raw_data = extract_data(infile, columns=[0, 1, 2, 3, 5], verbose=True)
# Take only deletions and duplications
filtered_data = []
for data in raw_data:
if "deletion" in data or "duplication" in data:
filtered_data.append(data)
print("len(row_data) :", len(raw_data))
print("len(filtered_data) :", len(filtered_data))
# Write filtered_data to a text file
header = [
"##ID",
"ChrA",
"StartA",
"EndA",
"ChrB",
"StartB",
"EndB",
"CnvType",
"Orientation",
]
with open(outfile, "w") as outfile:
outfile.write(list_to_line(header, "\t") + "\n")
for item in filtered_data:
if item[-1] == "duplication":
orientation = "HT"
elif item[-1] == "deletion":
orientation = "TH"
else:
print('ERROR: unable to determine "Orientation"...')
list_ = [
item[0],
item[1],
item[2],
item[2],
item[1],
item[3],
item[3],
item[-1].upper(),
orientation,
]
outfile.write(list_to_line(list_, "\t") + "\n")
print("Done")
# infile = '/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/public/GRCh37_hg19_variants_2014-10-16.txt'
# outfile = infile[:-4]+'_DelDupOnly.txt'
# extract_Toronto(infile, outfile)
def extract_Decipher(infile, outfile):
"""
Ad hoc function to extract deletions and duplications out of the Decipher Database.
Returns a file ready to be annotated with FusionGenes_Annotation.pl .
"""
# Extract data from infile
# Columns are: ID, Chr, Start, End, CNV_Type(here expressed as "mean_ratio")
raw_data = extract_data(infile, columns=[0, 3, 1, 2, 4], verbose=True)
header = [
"##ID",
"ChrA",
"StartA",
"EndA",
"ChrB",
"StartB",
"EndB",
"CnvType",
"Orientation",
]
with open(outfile, "w") as outfile:
outfile.write(list_to_line(header, "\t") + "\n")
for item in raw_data:
# Convert mean_ratio to CnvType
if float(item[-1]) > 0:
CnvType = "DUPLICATION"
orientation = "HT"
elif float(item[-1]) < 0:
CnvType = "DELETION"
orientation = "TH"
else:
print('ERROR: unable to determine "Orientation"...')
# Write output
list_ = [
item[0],
item[1],
item[2],
item[2],
item[1],
item[3],
item[3],
CnvType,
orientation,
]
outfile.write(list_to_line(list_, "\t") + "\n")
print("Done")
# infile = '/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/public/decipher-hg19_15-01-30.txt'
# outfile = infile[:-4]+'_DelDupOnly.txt'
# extract_Decipher(infile, outfile)
def extract_dgvMerged(infile, outfile):
"""
Ad hoc function to extract deletions and losses out of the dgvMerged database.
Returns a file ready to be annotated with FusionGenes_Annotation.pl .
"""
# original_header = '##bin chrom chromStart chromEnd name score strand thickStart thickEnd itemRgb varType reference pubMedId method platform mergedVariants supportingVariants sampleSize observedGains observedLosses cohortDescription genes samples'
# [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] [12] [13] [14] [15] [16] [17] [18] [19] [20] [21] [22]
raw_data = extract_data(
infile,
columns=[4, 1, 2, 3, 10],
header="##",
skip_lines_starting_with="#",
data_separator="\t",
verbose=False,
)
# Take only deletions and losses
filtered_data = []
for data in raw_data:
if "Deletion" in data or "Loss" in data:
filtered_data.append(data)
print("len(row_data) :", len(raw_data))
print("len(filtered_data) :", len(filtered_data))
# Write filtered_data to a text file
header = [
"##ID",
"ChrA",
"StartA",
"EndA",
"ChrB",
"StartB",
"EndB",
"CnvType",
"Orientation",
]
with open(outfile, "w") as outfile:
outfile.write(list_to_line(header, "\t") + "\n")
for item in filtered_data:
if item[-1] == "Deletion" or item[-1] == "Loss":
cnv_type = "DELETION"
orientation = "HT"
# elif item[-1] == 'deletion':
# orientation = 'TH'
else:
print('ERROR: unable to determine "Orientation"...')
list_ = [
item[0],
item[1][3:],
item[2],
item[2],
item[1][3:],
item[3],
item[3],
cnv_type,
orientation,
]
outfile.write(list_to_line(list_, "\t") + "\n")
print("Done")
# ## Extract deletions and Losses from dgvMerged
# folder = '/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/public/breaks'
# file_name = 'dgvMerged.txt'
# infile = folder + '/' + file_name
# outfile = folder + '/' + 'dgvMerged-DeletionsOnly.txt'
# extract_dgvMerged(infile, outfile)
# ## annotate
# dataset_file = '/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/public/breaks/dgvMerged-DeletionsOnly.txt'
# annotate_fusion_genes(dataset_file)
def fill_and_sort(pandas_chrSeries):
"""incomplete pandas.Series => complete and sorted pandas.Series
Given a pandas.Series in which the first argument is the chromosome name
and the second argument is a count " [('1', 61), ('3', 28), ..., ('X', 29)]"
This function returns a new (sorted by chromosome) series with the missing chromosome included as ('Chr_name',0).
This is useful when creating series out of subsets grouped by Chr.
If the Chr does not contains any event, then it will be excluded from the subset.
However, expecially for plotting reasons, you may want to have ('Chr',0) in you list instead of a missing Chr.
Example.
> series = [('1', 61), ('3', 28), ..., ('X', 29)] # in this Series Chr_2 and Chr_Y are missing.
> fill_and_sort(series)
>>> [('1', 61), ('2',0), ('3', 28), ..., ('X', 29), ('Y',0)] # this Series have all the chromosomes
"""
# add missing ChrA
CHROMOSOMES = [str(c) for c in range(1, 23)] + ["X", "Y"]
chr_list = CHROMOSOMES[:]
complete_series = []
for item in pandas_chrSeries.iteritems():
chr_list.remove(item[0])
complete_series.append(item)
for item in chr_list:
complete_series.append((item, 0))
# sort by chromosome
sorted_ = []
for item in CHROMOSOMES:
for _item in complete_series:
if _item[0] == item:
sorted_.append(_item[1])
return pd.Series(sorted_, index=CHROMOSOMES)
# counts = [50,9,45,6]
# pandas_chrSeries = pd.Series(counts, index=['1','4','X','10'])
# print(pandas_chrSeries)
# good_series = fill_and_sort(pandas_chrSeries)
# print(good_series)
def find(string, char):
"""
Looks for a character in a sctring and returns its index.
"""
# Compared to string.find(), it returns ALL the indexes, not only the first one.
return [index for index, letter in enumerate(string) if letter == char]
# print(find('alessio', 's'))
def filter_out(word, infile, outfile):
"""
Reads a file line by line
and writes an output file containing only
the lines that DO NOT contains 'word'.
"""
print("Filtering out lines containing", word, "...")
with open(infile, "r") as infile:
lines = infile.readlines()
with open(outfile, "w") as outfile:
for line in lines: # yield_file(infile) can be used instead
if word not in line:
outfile.write(line)
print("Done")
# infile = '/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/public/breaks/Decipher_DelDupOnly.txt'
# outfile = '/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/public/breaks/Decipher-DeletionsOnly.txt'
# filter_out('DUPLICATION',infile, outfile)
flatten = lambda l: [item for sublist in l for item in sublist]
def gene_synonyms(gene_name):
"""str => list()
Queries http://rest.genenames.org and http://www.ncbi.nlm.nih.gov/ to figure out the best synonym of gene_name.
"""
result = []
tmp = []
headers = {"Accept": "application/json"}
uri = "http://rest.genenames.org"
path = "/search/{}".format(gene_name)
html_doc = urlopen(
"http://www.ncbi.nlm.nih.gov/gene/?term={}[sym]".format(gene_name)
)
html_txt = BeautifulSoup(html_doc, "html.parser").get_text()
target = urlparse(uri + path)
method = "GET"
body = ""
h = http.Http()
response, content = h.request(target.geturl(), method, body, headers)
if response["status"] == "200":
# assume that content is a json reply
# parse content with the json module
data = json.loads(content.decode("utf8"))
for item in data["response"]["docs"]:
tmp.append(item["symbol"])
else:
print("Error detected: " + response["status"])
return None
if len(tmp) > 1:
for gene in tmp:
if gene in html_txt:
result.append(gene)
return result
else:
return tmp
# print(gene_synonyms('MLL3'))
def gen_controls(how_many, chromosome, GapTable_file, outfile):
global running_threads # in case of multithreading
list_brkps = gen_rnd_single_break(
how_many, chromosome, GapTable_file, verbose=False
)
with open(outfile, "w") as f:
for item in list_brkps:
f.write(list_to_line(item, "\t") + "\n")
running_threads -= 1 # in case of multithreading
# # Generate controls
# import time
# from threading import Thread
# threads = 0
# running_threads = 0
# max_simultaneous_threads = 20
# how_many=9045
# chromosome='9'
# GapTable_file='/Users/alec/Desktop/UMCU_Backup/Projects/Anne_Project/current_brkps_DB/out_ALL_gap.txt'
# while threads < 100:
# while running_threads >= max_simultaneous_threads:
# time.sleep(1)
# running_threads += 1
# outfile = '/Users/alec/Desktop/UMCU_Backup/Projects/Anne_Project/current_brkps_DB/out_chr9_control_'+str(threads)+'.txt'
# print('thread', threads, '|', 'running threads:',running_threads)
# Thread(target=gen_controls, args=(how_many,chromosome,GapTable_file,outfile)).start()
# threads += 1
def gen_control_dataset(
real_dataset, suffix="_control.txt"
): # tested only for deletion/duplication
"""
Generates a control dataset ad hoc.
Takes as input an existing dataset and generates breaks
in the same chromosomes and with the same distance (+-1bp),
the position are however randomized.
"""
real_data = extract_data(real_dataset, columns=[1, 2, 4, 5, 7, 8], verbose=False)
control_data = []
_id_list = []
for item in real_data:
if item[0] == item[2]: # ChrA == ChrB
# generate a unique id
_id = gen_rnd_id(16)
while _id in _id_list:
_id = gen_rnd_id(16)
_id_list.append(_id)
chromosome = item[0]
distance = int(item[3]) - int(item[1]) #
cnv_type = item[4]
orientation = item[5]
breaks = gen_rnd_breaks(
how_many=1,
chromosome=chromosome,
min_distance=distance - 1,
max_distance=distance + 1,
GapTable_file="tables/gap.txt",
)
print(breaks)
control_data.append(
[
_id,
chromosome,
breaks[0][1],
breaks[0][1],
chromosome,
breaks[0][2],
breaks[0][2],
cnv_type,
orientation,
]
)
else:
print(item[0], "is no equal to", item[2], "I am skipping these breaks")
header = [
"##ID",
"ChrA",
"StartA",
"EndA",
"ChrB",
"StartB",
"EndB",
"CnvType",
"Orientation",
]
filename = real_dataset[:-4] + suffix
with open(filename, "w") as outfile:
outfile.write(list_to_line(header, "\t") + "\n")
for item in control_data:
line = list_to_line(item, "\t")
print(line)
outfile.write(line + "\n")
print("Data written in", filename)
# gen_control_dataset('/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/raw/clinvarCnv-DeletionsOnly.txt')
def gen_gap_table(
infile="/Users/amarcozzi/Desktop/All_breakpoints_HG19_final.txt",
outfile="/Users/amarcozzi/Desktop/All_breakpoints_HG19_gap.txt",
resolution=10000,
):
"""
Generates a file containing a list of coordinates
for wich no brakpoints have been found in the input file.
"""
# Global constants
CHROMOSOMES = [str(c) for c in range(1, 23)] + ["X", "Y"]
# length of chromosomes based on GRCh37 (Data source: Ensembl genome browser release 68, July 2012)
# http://jul2012.archive.ensembl.org/Homo_sapiens/Location/Chromosome?r=1:1-1000000
# http://grch37.ensembl.org/Homo_sapiens/Location/Chromosome?r=1:24626643-24726643
CHR_LENGTHS = {
"1": 249250621,
"2": 243199373,
"3": 198022430,
"4": 191154276,
"5": 180915260,
"6": 171115067,
"7": 159138663,
"8": 146364022,
"9": 141213431,
"10": 135534747,
"11": 135006516,
"12": 133851895,
"13": 115169878,
"14": 107349540,
"15": 102531392,
"16": 90354753,
"17": 81195210,
"18": 78077248,
"19": 59128983,
"20": 63025520,
"21": 48129895,
"22": 51304566,
"X": 155270560,
"Y": 59373566,
}
gap_list = []
for Chr in CHROMOSOMES:
print("-----------------------------------------------------")
print("Analyzing breakpoints in chromosome", Chr)
length = CHR_LENGTHS[Chr]
# determine the intervals given the chromosome length and the resolution
x_ax = [] # data holder
y_ax = [] # stores breakpoint counts per inteval
breakpoint_list = []
# # Extract data from infile, chromosome by chromosome
# with open(infile, 'r') as f:
# lines = f.readlines()
# for line in lines: # yield_file(infile) can be used instead
# if line.startswith('chr'+Chr+':'):
# tmp = line.split(':')
# breakpoint = tmp[1].split('-')[0]
# breakpoint_list.append(int(breakpoint))
# print(len(breakpoint_list),'breakpoints found...')
with open(infile, "r") as f:
# lines = f.readlines()
for line in f: # lines: # yield_file(infile) can be used instead
if line.startswith(Chr + "\t"):
tmp = line_to_list(line, "\t")
breakpoint = tmp[1]
breakpoint_list.append(int(breakpoint))
print(len(breakpoint_list), "breakpoints found...")
for item in range(resolution, length + resolution, resolution):
x_ax.append(item)
print("Interval list:", len(x_ax), "at", resolution, "bases resolution")
for interval in x_ax:
count = 0
to_remove = []
for breakpoint in breakpoint_list:
if breakpoint <= interval:
count += 1
to_remove.append(breakpoint)
y_ax.append(count)
for item in to_remove:
try:
breakpoint_list.remove(item)
except:
print("Error", item)
counter = 0
for idx, count_ in enumerate(y_ax):
if count_ == 0:
gap = x_ax[idx]
gap_list.append((Chr, gap))
counter += 1
print("Found", counter, "gaps in chromosome", Chr, "\n")
with open(outfile, "w") as f:
f.write(
"#Gap table at "
+ str(resolution)
+ " bases resolution based on "
+ infile
+ "\n"
)
f.write("##chrom" + "\t" + "chromStart" + "\t" + "chromEnd" + "\n")
for item in gap_list:
line = (
"chr"
+ str(item[0])
+ "\t"
+ str(item[1] - resolution)
+ "\t"
+ str(item[1])
)
f.write(line + "\n")
# import time
# start = time.time()
# gen_gap_table()
# print('Done in',time.time()-start,'seconds')
## Generate a gap table file
# import time
# start = time.time()
# gen_gap_table(infile='/Users/amarcozzi/Desktop/current_brkps_DB/out_ALL.txt', outfile='/Users/amarcozzi/Desktop/current_brkps_DB/out_ALL_gap.txt', resolution=10000)
# print('Done in',time.time()-start,'seconds')
def gen_multiple_controls(real_dataset, how_many):
"""
Generates how_many control datasets.
"""
n = 0
while n < how_many:
suffix = "_control_" + str(n) + ".txt"
# real_dataset = '/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/raw/dataset_1b.txt'
gen_control_dataset(real_dataset, suffix)
n += 1
print(n, "datasets have been generated")
# gen_multiple_controls('/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/raw/dataset_4.txt',1000)
# ## Generate multiple controls of datasets found in a folder
# folder = '/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/random'
# for item in list_of_files(folder,'txt'):
# gen_multiple_controls(item,1000)
def gen_deletion_dataset_from_breaks(list_of_breaks, outfile, ID_already=False):
"""Genrates a proper deletion dataset file out of a list of breaks """
# Var names are not pythonic but I think it is better for readibility
header = [
"##ID",
"ChrA",
"StartA",
"EndA",
"ChrB",
"StartB",
"EndB",
"CnvType",
"Orientation",
]
ID_list = [] # to check if the ID is already present
print("writing breakpoints to", outfile, "..........")
with open(outfile, "w") as outfile:
outfile.write(list_to_line(header, "\t") + "\n")
for item in list_of_breaks:
if ID_already == False: # the braks do not have an ID
while True: # checks ID
ID = gen_rnd_id(8)
if ID not in ID_list:
ID_list.append(ID)
break
ChrA = ChrB = item[0][3:]
StartA = EndA = item[1]
StartB = EndB = item[2]
else: # the break do have an ID
ID = item[0] # the ID is supposed to be the first entry
ChrA = ChrB = item[1][3:]
StartA = EndA = item[2]
StartB = EndB = item[3]
CnvType = "DELETION"
Orientation = "TH"
line = list_to_line(
[ID, ChrA, StartA, EndA, ChrB, StartB, EndB, CnvType, Orientation], "\t"
)
outfile.write(line + "\n")
print("OK")
# list_of_breaks = gen_rnd_breaks(how_many=100, chromosome='Y', min_distance=1000, max_distance=15000, GapTable_file='tables/gap.txt')
# gen_deletion_dataset_from_breaks(list_of_breaks, 'test_deletion_dataset.txt')
# ## Generate (m) RANDOM datasets of different length (n)
# for m in range(1000):
# for n in [100,1000,10000,100000,1000000]:
# outfile = 'rnd_dataset_'+ str(n)+'_'+str(m)+'.txt'
# breaks = list()
# for chromosome in CHROMOSOMES:
# breaks.extend(gen_rnd_breaks(how_many=500, chromosome=chromosome, min_distance=0, max_distance=n))
# gen_deletion_dataset_from_breaks(breaks, outfile)
def gen_rnd_breaks(
how_many=100,
chromosome="Y",
min_distance=1000,
max_distance=15000,
GapTable_file="tables/gap.txt",
):
"""Returns tuples containing 1)the chromosome, 2)first breakpoint, 3)second breakpoint
Keeps only the points that do not appear in te gap table.
gen_rnd_breaks(int, string, int, int, filepath) => [(chrX, int, int), ...]
valid chromosomes inputs are "1" to "22" ; "Y" ; "X"
The chromosome length is based on the build GRCh37/hg19."""
# CHR_LENGTHS is based on GRCh37
CHR_LENGTHS = {
"1": 249250621,
"2": 243199373,
"3": 198022430,
"4": 191154276,
"5": 180915260,
"6": 171115067,
"7": 159138663,
"8": 146364022,
"9": 141213431,
"10": 135534747,
"11": 135006516,
"12": 133851895,
"13": 115169878,
"14": 107349540,
"15": 102531392,
"16": 90354753,
"17": 81195210,
"18": 78077248,
"19": 59128983,
"20": 63025520,
"21": 48129895,
"22": 51304566,
"X": 155270560,
"Y": 59373566,
}
# Genrates a chromosome-specific gap list
print("generating", how_many, "breakpoints in Chr", chromosome, "..........")
with open(GapTable_file, "r") as infile:
lines = infile.readlines()
full_gap_list = []
chr_specific_gap = []
for line in lines:
if "#" not in line: # skip comments
full_gap_list.append(line_to_list(line, "\t"))
for item in full_gap_list:
if "chr" + chromosome in item:
# Database/browser start coordinates differ by 1 base
chr_specific_gap.append((item[2], item[3]))
# Merge contiguous gaps
merged_gaps = []
n = 0
left_tick = False
while n < len(chr_specific_gap):
if left_tick == False:
left_tick = chr_specific_gap[n][0]
try:
if chr_specific_gap[n][1] == chr_specific_gap[n + 1][0]:
n += 1
else:
right_tick = chr_specific_gap[n][1]
merged_gaps.append((left_tick, right_tick))
left_tick = False
n += 1
except:
n += 1
# Genrates breakpoint list
list_of_breakpoints = []
while len(list_of_breakpoints) < how_many:
try:
start = random.randint(0, CHR_LENGTHS[chromosome])
except KeyError:
if chromosome == "23":
chromosome = "X"
start = random.randint(0, CHR_LENGTHS[chromosome])
elif chromosome == "24":
chromosome = "Y"
start = random.randint(0, CHR_LENGTHS[chromosome])
else:
print("ERROR: Wrong chromosome name!!")
end = random.randint(start + min_distance, start + max_distance)
are_points_ok = True # assumes that the points are ok
for item in merged_gaps:
# checks whether the points are ok for real
if start < int(item[0]) or start > int(item[1]):
if end < int(item[0]) or end > int(item[1]):
pass
else:
are_points_ok = False
else:
are_points_ok = False
if are_points_ok == True:
list_of_breakpoints.append(("chr" + chromosome, start, end))
print("OK")
return list_of_breakpoints
# print(gen_rnd_breaks(how_many=100, chromosome='Y', min_distance=1000, max_distance=15000, GapTable_file='tables/gap.txt'))
def gen_rnd_id(length):
"""Generates a random string made by uppercase ascii chars and digits"""
chars = string.ascii_uppercase + string.digits
return "".join(random.choice(chars) for char in range(length))
# print(gen_rnd_id(16))
# @profile
def gen_rnd_single_break(
how_many=100,
chromosome="1",
GapTable_file="/Users/amarcozzi/Desktop/All_breakpoints_HG19_gap_10k.txt",
verbose=False,
):
"""Returns tuples containing 1)the chromosome, 2)the breakpoint
Keeps only the points that do not appear in te gap table.
gen_rnd_breaks(int, string, filepath) => [(chrX, int), ...]
valid chromosomes inputs are "1" to "22" ; "Y" ; "X"
The chromosome length is based on the build GRCh37/hg19.
Prerequisites: The gap_list file is in the form:
##chrom chromStart chromEnd
chr1 0 10000
chr1 30000 40000
chr1 40000 50000
chr1 50000 60000
"""
if verbose == True:
start_time = time.time()
# CHR_LENGTHS is based on GRCh37
CHR_LENGTHS = {
"1": 249250621,
"2": 243199373,
"3": 198022430,
"4": 191154276,
"5": 180915260,
"6": 171115067,
"7": 159138663,
"8": 146364022,
"9": 141213431,
"10": 135534747,
"11": 135006516,
"12": 133851895,
"13": 115169878,
"14": 107349540,
"15": 102531392,
"16": 90354753,
"17": 81195210,
"18": 78077248,
"19": 59128983,
"20": 63025520,
"21": 48129895,
"22": 51304566,
"X": 155270560,
"Y": 59373566,
}
# Genrates a chromosome-specific gap list
with open(GapTable_file, "r") as infile:
lines = infile.readlines()
full_gap_list = []
chr_specific_gap = []
for line in lines:
if "#" not in line: # skip comments
full_gap_list.append(line_to_list(line, "\t"))
for item in full_gap_list:
if "chr" + chromosome in item:
chr_specific_gap.append((item[1], item[2]))
# Merge contiguous gaps
merged_gaps = merge_gaps(chr_specific_gap)
# merged_gaps = []
# while len(chr_specific_gap) > 0:
# try:
# if chr_specific_gap[0][1] == chr_specific_gap[1][0]:
# tmp = (chr_specific_gap[0][0],chr_specific_gap[1][1])
# chr_specific_gap.pop(0)
# chr_specific_gap[0] = tmp
# else:
# merged_gaps.append(chr_specific_gap.pop(0))
# except:
# merged_gaps.append(chr_specific_gap.pop(0))
# Genrates breakpoint list
if verbose == True:
print("generating", how_many, "breakpoints in Chr", chromosome)
list_of_breakpoints = []
while len(list_of_breakpoints) < how_many:
try:
start = random.randint(0, CHR_LENGTHS[chromosome])
# if verbose == True: print(start)
except KeyError:
if chromosome == "23":
chromosome = "X"
start = random.randint(0, CHR_LENGTHS[chromosome])
elif chromosome == "24":
chromosome = "Y"
start = random.randint(0, CHR_LENGTHS[chromosome])
else:
print("ERROR: Wrong chromosome name!!")
# end = random.randint(start+min_distance, start+max_distance)
are_points_ok = True # assumes that the points are ok
for item in merged_gaps:
# checks whether the points are ok for real
if start <= int(item[0]) or start >= int(item[1]):
pass
else:
are_points_ok = False
if verbose == True:
print(start, "is in a gap and will be discarded")
if are_points_ok == True:
list_of_breakpoints.append((chromosome, start))
if verbose == True:
print(
start,
"is OK",
len(list_of_breakpoints),
"good breaks generated out of",
how_many,
)
if verbose == True:
print(
how_many,
"breakpoint have been generated in chromosome",
chromosome,
"in",
time.time() - start_time,
"seconds",
)
return list_of_breakpoints
# gen_rnd_single_break(verbose=True)
# ## Generate single breaks dataset
# import time
# start = time.time()
# breaks_on_1 = gen_rnd_single_break(how_many=19147,verbose=False)
# for item in breaks_on_1:
# print(str(item[0])+'\t'+str(item[1]))
# print('Done in', time.time()-start,'seconds..')
# ## Generate a control file
# list_brkps = gen_rnd_single_break(how_many=20873, chromosome='1', GapTable_file='/Users/amarcozzi/Desktop/current_brkps_DB/out_ALL_gap.txt', verbose=True)
# with open('/Users/amarcozzi/Desktop/current_brkps_DB/out_chr1_control.txt','w') as f:
# for item in list_brkps:
# f.write(list_to_line(item,'\t')+'\n')
# ## Generate multiple controls
# import time
# from threading import Thread
# start_time = time.time()
# threads = 0
# running_threads = 0
# max_simultaneous_threads = 20
# GapTable_file = '/Users/amarcozzi/Desktop/Projects/Anne_Project/current_brkps_DB/out_ALL_gap.txt'
# chromosome = 'Y'
# infile = '/Users/amarcozzi/Desktop/Projects/Anne_Project/current_brkps_DB/out_chr'+chromosome+'.txt'
# how_many = 0
# for line in yield_file(infile):
# if line.startswith(chromosome+'\t'):
# how_many += 1
# print('found',how_many,'breakpoints in chromosome',chromosome)
# while threads < 100:
# while running_threads >= max_simultaneous_threads:
# time.sleep(1)
# running_threads += 1
# outfile = '/Users/amarcozzi/Desktop/Projects/Anne_Project/current_brkps_DB/controls/out_chr'+chromosome+'_control_'+str(threads)+'.txt'
# print('thread', threads, '|', 'running threads:',running_threads)
# Thread(target=gen_controls, args=(how_many,chromosome,GapTable_file,outfile)).start()
# threads += 1
# print('Waiting for threads to finish...')
# while running_threads > 0:
# time.sleep(1)
# end_time = time.time()
# print('\nDone in',(end_time-start_time)/60,'minutes')
def kmers_finder(sequence_dict, motif_length, min_repetition):
"""(dict, int, int) => OrderedDict(sorted(list))
Find all the motifs long 'motif_length' and repeated at least 'min_repetition' times.
Return an OrderedDict having motif:repetition as key:value sorted by value.
"""
motif_dict = {}
for _id, sequence in sequence_dict.items():
# populate a dictionary of motifs (motif_dict)
for i in range(len(sequence) - motif_length + 1):
motif = sequence[i : i + motif_length]
if motif not in motif_dict:
motif_dict[motif] = 1
else:
motif_dict[motif] += 1
# remove from motif_dict all the motifs repeated less than 'repetition' times
keys_to_remove = [
key for key, value in motif_dict.items() if value < min_repetition
]
for key in keys_to_remove:
del motif_dict[key]
# Return a sorted dictionary
return OrderedDict(sorted(motif_dict.items(), key=itemgetter(1), reverse=True))
def kmers_finder_with_mismatches(
sequence, motif_length, max_mismatches, most_common=False
):
"""(str, int, int) => sorted(list)
Find the most frequent k-mers with mismatches in a string.
Input: A sequence and a pair of integers: motif_length (<=12) and max_mismatch (<= 3).
Output: An OrderedDict containing all k-mers with up to d mismatches in string.
Sample Input: ACGTTGCATGTCGCATGATGCATGAGAGCT 4 1
Sample Output: OrderedDict([('ATGC', 5), ('ATGT', 5), ('GATG', 5),...])
"""
# check passed variables
if not motif_length <= 12 and motif_length >= 1:
raise ValueError(
"motif_length must be between 0 and 12. {} was passed.".format(motif_length)
)
if not max_mismatches <= 3 and max_mismatches >= 1:
raise ValueError(
"max_mismatch must be between 0 and 3. {} was passed.".format(
max_mismatches
)
)
motif_dict = {}
for i in range(len(sequence) - motif_length + 1):
motif = sequence[i : i + motif_length]
if motif not in motif_dict:
motif_dict[motif] = 1
else:
motif_dict[motif] += 1
motif_dict_with_mismatches = {}
for kmer in motif_dict:
motif_dict_with_mismatches.update({kmer: []})
for other_kmer in motif_dict:
mismatches = 0
for i in range(len(kmer)):
if kmer[i] != other_kmer[i]:
mismatches += 1
if mismatches <= max_mismatches:
motif_dict_with_mismatches[kmer].append(
[other_kmer, motif_dict[other_kmer]]
)
tmp = {}
for item in motif_dict_with_mismatches:
count = 0
for motif in motif_dict_with_mismatches[item]:
count += motif[-1]
tmp.update({item: count})
result = OrderedDict(sorted(tmp.items(), key=itemgetter(1), reverse=True))
if most_common:
commons = OrderedDict()
_max = result.items()[0][1]
for item in result:
if result[item] == _max:
commons.update({item: result[item]})
else:
return commons
return result
def line_to_list(line, char):
"""Makes a list of string out of a line. Splits the word at char."""
# Allows for more customization compared with string.split()
split_indexes = find(line, char)
list_ = []
n = 0
for index in split_indexes:
item = line[n:index].replace("\n", "").replace("\r", "") # cleans up the line
if item != "": # skips empty 'cells'
list_.append(item)
n = index + 1
list_.append(line[n:].replace("\n", "").replace("\r", "")) # append the last item
return list_
# print(line_to_list('Makes a list of string out of a line. Splits the word at char.', ' '))
def list_to_line(list_, char):
"""Makes a string out of a list of items"""
# Allows for more customization compared with string.split()
string = ""
for item in list_:
string += str(item) + char
return string.rstrip(char) # Removes the last char
# print(list_to_line(['prova', '1', '2', '3', 'prova'], '---'))
def list_of_files(path, extension, recursive=False):
"""
Return a list of filepaths for each file into path with the target extension.
If recursive, it will loop over subfolders as well.
"""
if not recursive:
for file_path in glob.iglob(path + "/*." + extension):
yield file_path
else:
for root, dirs, files in os.walk(path):
for file_path in glob.iglob(root + "/*." + extension):
yield file_path
def merge_gaps(gap_list):
"""
Merges overlapping gaps in a gap list.
The gap list is in the form: [('3','4'),('5','6'),('6','7'),('8','9'),('10','11'),('15','16'),('17','18'),('18','19')]
Returns a new list containing the merged gaps: [('3','4'),('5','7'),('8','9'),('10','11'),('15','16'),('17','19')]
"""
merged_gaps = []
while len(gap_list) > 0:
try:
if int(gap_list[0][1]) >= int(gap_list[1][0]):
tmp = (gap_list[0][0], gap_list[1][1])
gap_list.pop(0)
gap_list[0] = tmp
else:
merged_gaps.append(gap_list.pop(0))
except:
merged_gaps.append(gap_list.pop(0))
return merged_gaps
# gap_list = [('3','4'),('5','6'),('6','7'),('8','9'),('10','11'),('15','16'),('17','18'),('18','19')]
# expected = [('3','4'),('5','7'),('8','9'),('10','11'),('15','16'),('17','19')]
# prova = merge_gaps(gap_list)
# print(prova)
# print(expected)
def merge_sort(intervals):
"""
Merges and sorts the intervals in a list.
It's an alternative of merge_gaps() that sort the list before merging.
Should be faster but I haven't campared them yet.
"""
sorted_by_lower_bound = sorted(intervals, key=lambda tup: tup[0])
merged = []
for higher in sorted_by_lower_bound:
if not merged:
merged.append(higher)
else:
lower = merged[-1]
# test for intersection between lower and higher:
# we know via sorting that lower[0] <= higher[0]
if higher[0] <= lower[1]:
upper_bound = max(lower[1], higher[1])
merged[-1] = (lower[0], upper_bound) # replace by merged interval
else:
merged.append(higher)
return merged
def multi_threads_fusion_genes_annotation(
folder_path, extension, max_simultaneous_threads
):
""" Executes annotate_fusion_genes() for each dataset file in a folder.
Each execution run on a different thread."""
global running_threads
dataset_files = list_of_files(folder_path, extension)
threads = 0
running_threads = 0
for file_ in dataset_files:
while running_threads >= max_simultaneous_threads:
time.sleep(1)
threads += 1
running_threads += 1
print("thread", threads, "|", "running threads:", running_threads)
Thread(
target=annotate_fusion_genes, args=(file_,)
).start() # with multithreading
# folder = '/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/public'
# multi_threads_fusion_genes_annotation(folder, 'txt',50)
def pandize_dataset(annotated_dataset, verbose=True):
"""
Prepares a dataset to be "pandas ready".
Takes a file path as input.
"""
# Parse
if verbose == True:
message = "parsing " + annotated_dataset.split("/")[-1]
spacer = (100 - len(message)) * "."
print(message, spacer)
dataset = pd.io.parsers.read_table(
annotated_dataset, dtype={"ChrA": "str", "ChrB": "str"}, sep="\t", index_col=0
)
if verbose == True:
print("OK")
# Clean
if verbose == True:
message = "cleaning " + annotated_dataset.split("/")[-1]
spacer = (100 - len(message)) * "."
print(message, spacer)
dataset = dataset.replace("In Frame", 1)
dataset = dataset.replace("Not in Frame", 0)
dataset = dataset.replace("In Phase", 1)
dataset = dataset.replace("Not in Phase", 0)
if verbose == True:
print("OK")
return dataset
# pandize_dataset('test_data_annotated.txt')
# pandize_dataset('/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/control_dataset_100-1000-150000_annotated.txt')
def parse_blastXML(infile):
"""
Parses a blast outfile (XML).
"""
for blast_record in NCBIXML.parse(open(infile)):
for alignment in blast_record.alignments:
for hsp in alignment.hsps:
print("*****Alignment****")
print("sequence:", alignment.title)
print("length:", alignment.length)
print("e-value:", hsp.expect)
print(hsp.query)
print(hsp.match)
print(hsp.sbjct)
# to be tested
def reverse(sequence):
return ''.join(list(reversed(sequence)))
def complement(sequence):
d = {
"A": "T", "a": "t", "T": "A", "t": "a",
"C": "G", "c": "g", "G": "C", "g": "c",
"N":"N", "n":"n"
}
r = ""
for b in sequence.upper():
r += d[b]
return r
def get_mismatches(template, primer, maxerr, overlapped=False):
error = "e<={}".format(maxerr)
return regex.findall(f"({primer}){{{error}}}", template, overlapped=overlapped)
def pcr(template, primer_F, primer_R, circular=False):
if circular: ##works only with primers without 5' overhang
i = template.upper().find(primer_F.upper())
template = template[i:] + template[:i]
# Find primer_F, or the largest 3'part of it, in template
for n in range(len(primer_F)):
ix_F = [m.end() for m in re.finditer(primer_F[n:].upper(), template.upper())]
if len(ix_F) == 1: # it's unique
# print(ix_F)
# print(primer_F[n:])
break
n += 1
# print(ix_F)
# Find primer_R, or the largest 5'part of it, in template
rc_R = reverse(complement(primer_R))
for n in range(len(primer_R)):
ix = [m.start() for m in re.finditer(rc_R[:n].upper(), template.upper())]
if len(ix) == 1: # it's unique
ix_R = ix[:]
if len(ix) < 1: # it's the largest possible
# print(ix_R)
# print(rc_R[:n])
break
n += 1
# Build the product
return primer_F + template[ix_F[0] : ix_R[0]] + rc_R
##template = 'CTAGAGAGGGCCTATTTCCCATGATT--something--GCCAATTCTGCAGACAAATGGGGTACCCG'
##primer_F = 'GACAAATGGCTCTAGAGAGGGCCTATTTCCCATGATT'
##primer_R = 'TTATGTAACGGGTACCCCATTTGTCTGCAGAATTGGC'
##product = pcr(template,primer_F,primer_R)
##expected = 'GACAAATGGCTCTAGAGAGGGCCTATTTCCCATGATT--something--GCCAATTCTGCAGACAAATGGGGTACCCGTTACATAA'
##expected == result
def pip_upgrade_all(executable=False):
"""
Upgrades all pip-installed packages.
Requires a bash shell.
"""
if executable:
print("upgrading pip...")
call(f"{executable} -m pip install --upgrade pip", shell=True)
call(
f"{executable} -m pip freeze --local | grep -v '^\-e' | cut -d = -f 1 | xargs -n1 {executable} -m pip install -U",
shell=True,
)
print("done")
else:
# pip
print("upgrading pip...")
call("python -m pip install --upgrade pip", shell=True)
call(
"python -m pip freeze --local | grep -v '^\-e' | cut -d = -f 1 | xargs -n1 python -m pip install -U",
shell=True,
)
# pip2
print("upgrading pip2...")
call("python2 -m pip install --upgrade pip", shell=True)
call(
"python2 -m pip freeze --local | grep -v '^\-e' | cut -d = -f 1 | xargs -n1 python2 -m pip install -U",
shell=True,
)
# pip3
print("upgrading pip3...")
call("python3 -m pip install --upgrade pip", shell=True)
call(
"python3 -m pip freeze --local | grep -v '^\-e' | cut -d = -f 1 | xargs -n1 python3 -m pip install -U",
shell=True,
)
# pypy
print("upgrading pypy-pip...")
call("pypy -m pip install --upgrade pip", shell=True)
call(
"pypy -m pip freeze --local | grep -v '^\-e' | cut -d = -f 1 | xargs -n1 pypy -m pip install -U",
shell=True,
)
# pypy3
print("upgrading pypy3-pip...")
call("pypy3 -m pip install --upgrade pip", shell=True)
call(
"pypy3 -m pip freeze --local | grep -v '^\-e' | cut -d = -f 1 | xargs -n1 pypy3 -m pip install -U",
shell=True,
)
def probability(p, n, k):
"""
Simple probability calculator.
Calculates what is the probability that k events occur in n trials.
Each event have p probability of occurring once.
Example: What is the probability of having 3 Heads by flipping a coin 10 times?
probability = prob(0.5,10,3)
print(probability) => (15/128) = 0.1171875
"""
p = float(p)
n = float(n)
k = float(k)
C = math.factorial(n) / (math.factorial(k) * math.factorial(n - k))
probability = C * (p ** k) * (1 - p) ** (n - k)
return probability
# from math import factorial
# print(probability(0.5,10,3))
# print(probability(0.5,1,1))
def process(real_dataset):
"""
Generates, annotates and sorts a controll dataset for the given real dataset.
"""
gen_control_dataset(real_dataset)
control_filename = real_dataset[:-4] + "_control.txt"
# annotate_fusion_genes(real_dataset)
annotate_fusion_genes(control_filename)
control_filename = control_filename[:-4] + "_annotated.txt"
# dataset_filename = real_dataset[:-4]+'_annotated.txt'
# sort_dataset(dataset_filename)
sort_dataset(control_filename)
print(real_dataset, "processed. All OK.")
# process('/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/clinvarCnv-DeletionsOnly.txt')
# folder = '/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/random'
# for item in list_of_files(folder,'txt'):
# process(item)
def query_encode(chromosome, start, end):
"""
Queries ENCODE via http://promoter.bx.psu.edu/ENCODE/search_human.php
Parses the output and returns a dictionary of CIS elements found and the relative location.
"""
## Regex setup
re1 = "(chr{})".format(chromosome) # The specific chromosome
re2 = "(:)" # Any Single Character ':'
re3 = "(\\d+)" # Integer
re4 = "(-)" # Any Single Character '-'
re5 = "(\\d+)" # Integer
rg = re.compile(re1 + re2 + re3 + re4 + re5, re.IGNORECASE | re.DOTALL)
## Query ENCODE
std_link = (
"http://promoter.bx.psu.edu/ENCODE/get_human_cis_region.php?assembly=hg19&"
)
query = std_link + "chr=chr{}&start={}&end={}".format(chromosome, start, end)
print(query)
html_doc = urlopen(query)
html_txt = BeautifulSoup(html_doc, "html.parser").get_text()
data = html_txt.split("\n")
## Parse the output
parsed = {}
coordinates = [i for i, item_ in enumerate(data) if item_.strip() == "Coordinate"]
elements = [data[i - 2].split(" ")[-1].replace(": ", "") for i in coordinates]
blocks = [item for item in data if item[:3] == "chr"]
# if len(elements) == len(blocks):
i = 0
for item in elements:
txt = blocks[i]
m = rg.findall(txt)
bins = ["".join(item) for item in m]
parsed.update({item: bins})
i += 1
return parsed
# cis_elements = query_encode(2,10000,20000)
def run_perl(perl_script_file, input_perl_script):
"""
Run an external perl script and return its output
"""
return check_output(["perl", perl_script_file, input_perl_script])
# print(run_perl('FusionGenes_Annotation.pl', 'test_data.txt'))
def run_py(code, interp="python3"):
"""Run an block of python code using the target interpreter."""
with open("tmp.py", "w") as f:
for line in code.split("\n"):
f.write(line + "\n")
return check_output([interpr, "tmp.py"])
def run_pypy(code, interpr="pypy3"):
"""Run an block of python code with PyPy"""
with open("tmp.py", "w") as f:
for line in code.split("\n"):
f.write(line + "\n")
return check_output([interpr, "tmp.py"])
def sequence_from_gene(gene_name): # beta
"""
Download the nucleotide sequence from the gene_name.
"""
data = EnsemblRelease(75)
Entrez.email = "a.marcozzi@umcutrecht.nl" # Always tell NCBI who you are
NCBI_IDS = {
"1": "NC_000001",
"2": "NC_000002",
"3": "NC_000003",
"4": "NC_000004",
"5": "NC_000005",
"6": "NC_000006",
"7": "NC_000007",
"8": "NC_000008",
"9": "NC_000009",
"10": "NC_000010",
"11": "NC_000011",
"12": "NC_000012",
"13": "NC_000013",
"14": "NC_000014",
"15": "NC_000015",
"16": "NC_000016",
"17": "NC_000017",
"18": "NC_000018",
"19": "NC_000019",
"20": "NC_000020",
"21": "NC_000021",
"22": "NC_000022",
"X": "NC_000023",
"Y": "NC_000024",
}
gene_obj = data.genes_by_name(gene_name)
target_chromosome = NCBI_IDS[gene_obj[0].contig]
seq_start = int(gene_obj[0].start)
seq_stop = int(gene_obj[0].end)
strand = 1 if gene_obj[0].strand == "+" else 2
try:
handle = Entrez.efetch(
db="nucleotide",
id=target_chromosome,
rettype="fasta",
strand=strand, # "1" for the plus strand and "2" for the minus strand.
seq_start=seq_start,
seq_stop=seq_stop,
)
record = SeqIO.read(handle, "fasta")
handle.close()
sequence = str(record.seq)
return sequence
except ValueError:
print("ValueError: no sequence found in NCBI")
return False
def sortby_chr(string):
"""
Helps to sort datasets grouped by ChrA/B.
To use with sorted().
"""
# since the ChrA/B value is a string, when sorting by chr may return ['1','10','11'...'2','20'...'3'...'X','Y']
# instead I want sorted() to return ['1','2',...'9','10','11'...'X','Y']
if string == "X":
return 23
elif string == "Y":
return 24
else:
return int(string)
# prova = ['1','10','11','9','2','20','3','X','Y']
# print('sorted()', sorted(prova))
# print('sortby_chr()', sorted(prova, key=sortby_chr))
def sort_dataset(dataset_file, overwrite=False):
"""
Sort a dataset by ChrA. It helps during plotting
"""
text = []
header_counter = 0
header = False
print("Sorting...")
with open(dataset_file, "r") as infile:
# lines = infile.readlines()
for line in infile:
list_ = line_to_list(line, "\t")
if line[:2] == "##":
header = list_
header_counter += 1
else:
text.append(list_)
# checkpoint
if header == False or header_counter > 1:
print("Something is wrong with the header line...", header_counter, header)
return None
# sort by the second element of the list i.e. 'ChrA'
text.sort(key=lambda x: sortby_chr(itemgetter(1)(x)))
# Write output
if overwrite == False:
outfile = dataset_file[:-4] + "_sorted.txt"
else:
outfile = dataset_files
with open(outfile, "w") as outfile:
outfile.write(list_to_line(header, "\t") + "\n")
for list_ in text:
outfile.write(list_to_line(list_, "\t") + "\n")
print("Done!")
# sort_dataset('test_data.txt')
# folder = '/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/public'
# for item in list_of_files(folder, 'txt'):
# sort_dataset(item)
# sort_dataset('/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/public/annotated/dgvMerged-DeletionsOnly_annotated.txt')
def split_fasta_file(infile): # beta
"""
Split a fasta file containing multiple sequences
into multiple files containing one sequence each.
One sequence per file.
"""
flag = False
length = 0
with open(infile, "r") as f:
for line in f:
if line.startswith(">"):
if flag == False:
flag = True
outfile = "{}.txt".format(line[1:].strip())
print("writing {}".format(outfile))
lines = [line]
else:
with open(outfile, "w") as out:
for _ in lines:
out.write(_)
print("{} bases written".format(length))
length = 0
outfile = "{}.txt".format(line[1:].strip())
print("writing {}".format(outfile))
lines = [line]
else:
lines.append(line)
length += len(line.strip())
# Write last file
with open(outfile, "w") as out:
for _ in lines:
out.write(_)
print("{} bases written".format(length))
def substract_datasets(infile_1, infile_2, outfile, header=True):
"""
Takes two files containing tab delimited data, comapares them and return a file
containing the data that is present only in infile_2 but not in infile_1.
The variable by_column is an int that indicates which column to use
as data reference for the comparison.
"""
header2 = False
comment_line = (
"# dataset generated by substracting " + infile_1 + " to " + infile_2 + "\n"
)
with open(infile_1) as infile_1:
lines_1 = infile_1.readlines()
with open(infile_2) as infile_2:
lines_2 = infile_2.readlines()
row_to_removes = []
for line in lines_1:
if line[0] != "#": # skips comments
if header == True:
header2 = True # to use for the second file
header = (
False
) # set back header to false since the first line will be skipped
first_line = line
pass
else:
item = line_to_list(line, "\t")
row_to_removes.append(item)
result_list = []
for line in lines_2:
if line[0] != "#": # skips comments
if header2 == True:
header2 = (
False
) # set back header to false since the first line will be skipped
pass
else:
item = line_to_list(line, "\t")
if item not in row_to_removes:
result_list.append(item)
with open(outfile, "w") as outfile:
outfile.write(comment_line)
outfile.write(first_line)
for item in result_list:
outfile.write(list_to_line(item, "\t") + "\n")
print("substraction of two datasets DONE")
# substract_datasets('dataset_1_b.txt', 'dataset_1.txt', 'dataset_1-1b.txt', header=True)
def yield_file(filepath):
"""
A simple generator that yield the lines of a file.
Good to read large file without running out of memory.
"""
with open(filepath, "r") as f:
for line in f:
yield line
# for line in yield_file('GRCh37_hg19_variants_2014-10-16.txt'):
# print(line[:20])
def read_in_chunks(file_object, chunk_size=1024):
"""
Lazy function (generator) to read a file piece by piece.
Default chunk size: 1k.
"""
while True:
data = file_object.read(chunk_size)
if not data:
break
yield data
| {
"repo_name": "25shmeckles/alefuncs",
"path": "alefuncs.py",
"copies": "1",
"size": "153788",
"license": "mit",
"hash": 2794238588809867300,
"line_mean": 30.5981097185,
"line_max": 289,
"alpha_frac": 0.5483587796,
"autogenerated": false,
"ratio": 3.434147648608816,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44825064282088156,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alessio Rocchi'
import argparse
from lxml import etree
class SoftHandLoader(object):
def __init__(self,filename):
self.handParameters = dict()
self.jointToLink = dict()
self.urdf = etree.fromstring(file(filename).read())
for transmission_el in self.urdf.iter('transmission'):
for transmission_type_el in transmission_el.iter('type'):
if isinstance(transmission_type_el.tag, basestring):
if transmission_type_el.text == 'transmission_interface/AdaptiveSynergyTransmission':
self.handParameters = self.parseTransmission(transmission_el)
self.jointToLink = self.parseJointToLink()
def parseTransmission(self, transmission_el):
handParams = dict()
for joint_el in transmission_el.iter('joint'):
if isinstance(joint_el.tag, basestring):
joint_name = joint_el.get('name')
_,_,finger,joint,_ = joint_name.split('_')
for R_el in joint_el.iter('mechanicalReduction'):
if not handParams.has_key(finger):
handParams[finger] = dict()
handParams[finger][joint] = {'r':float(R_el.text)}
for E_el in joint_el.iter('mechanicalElasticity'):
handParams[finger][joint]['e']=float(E_el.text)
return handParams
def parseJointToLink(self):
jointToLink = dict()
for joint_el in self.urdf.iter('joint'):
if isinstance(joint_el.tag, basestring):
if 'type' in joint_el.keys() and joint_el.get('type') == 'revolute':
joint_name = joint_el.get('name')
is_mimic = (joint_name.split('_')[-1]=='mimic')
if is_mimic:
continue
jointToLink[joint_name] = self.parseJointChildLink(joint_name)
return jointToLink
def parseJointChildLink(self,joint_name):
for joint_el in self.urdf.iter('joint'):
if isinstance(joint_el.tag, basestring):
if 'name' in joint_el.keys() and joint_el.get('name') == joint_name:
if 'type' in joint_el.keys() and joint_el.get('type') == 'revolute':
for child_link_el in joint_el.iter('child'):
if isinstance(joint_el.tag, basestring):
if 'link' in child_link_el.keys():
link_name = child_link_el.get('link')
_,_,_,fake,_ = link_name.split('_')
is_fake = (fake == 'fake')
if not is_fake:
return link_name
else:
childLinkChildJoint = self.parseChildWithParentLink(link_name)
return self.parseJointChildLink(childLinkChildJoint)
raise Exception('could not find child link for joint %s'%joint_name)
def parseChildWithParentLink(self,link_name):
for joint_el in self.urdf.iter('joint'):
if isinstance(joint_el.tag, basestring):
if 'type' in joint_el.keys() and joint_el.get('type') == 'revolute':
for child_link_el in joint_el.iter('parent'):
if isinstance(joint_el.tag, basestring):
if 'link' in child_link_el.keys():
if child_link_el.get('link') == link_name:
return joint_el.get('name')
raise Exception('could not joint with parent link %s'%link_name)
def jointToPhalanx(self, finger, joint_position):
_,_,_,phalanx,_= self.jointToLink['soft_hand_%s_%s_joint'%(finger,joint_position)].split('_')
return phalanx
def phalanxToJoint(self, finger, phalanx):
for key,val in self.jointToLink.iteritems():
if val == 'soft_hand_%s_%s_link'%(finger,phalanx):
joint_name = key.split('_')[3]
return joint_name
raise Exception('could not find parent joint for link soft_hand_%s_%s_link'%(finger,phalanx))
if __name__ == '__main__':
parser = argparse.ArgumentParser(usage='SoftHandLoader soft_hand_urdf_file\nLoad an URDF file and gets transmission information out of it')
parser.add_argument('urdf_file', type=argparse.FileType('r'), nargs='?',
default=None, help='URDF file. Use - for stdin')
args = parser.parse_args()
# Extract robot name and directory
if args.urdf_file is None:
print("Error! no urdf_file provided")
exit()
else:
loader = SoftHandLoader(args.urdf_file)
print(loader.handParams)
| {
"repo_name": "lia2790/grasp_learning",
"path": "python/plugins/loaders/soft_hand_loader.py",
"copies": "2",
"size": "4871",
"license": "bsd-3-clause",
"hash": 7794803465480979000,
"line_mean": 45.3904761905,
"line_max": 143,
"alpha_frac": 0.5440361322,
"autogenerated": false,
"ratio": 4.082984073763621,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5627020205963621,
"avg_score": null,
"num_lines": null
} |
__author__ = "Alexander [Amper] Marshalov"
__email__ = "alone.amper+cityhash@gmail.com"
__icq__ = "87-555-3"
__jabber__ = "alone.amper@gmail.com"
__twitter__ = "amper"
__url__ = "http://amper.github.com/cityhash"
from setuptools import setup
from setuptools.extension import Extension
from setuptools.dist import Distribution
from os.path import join, dirname
try:
from cpuinfo import get_cpu_info
cpu_info = get_cpu_info()
have_sse42 = 'sse4.2' in cpu_info['flags']
except Exception:
have_sse42 = False
try:
from Cython.Distutils import build_ext
except ImportError:
USE_CYTHON = False
else:
USE_CYTHON = True
class BinaryDistribution(Distribution):
"""
Subclass the setuptools Distribution to flip the purity flag to false.
See http://lucumr.pocoo.org/2014/1/27/python-on-wheels/
"""
def is_pure(self):
# TODO: check if this is still necessary with Python v2.7
return False
CXXFLAGS = """
-O3
-Wno-unused-value
-Wno-unused-function
""".split()
if have_sse42:
CXXFLAGS.append('-msse4.2')
INCLUDE_DIRS = ['include']
CMDCLASS = {}
EXT_MODULES = []
if USE_CYTHON:
EXT_MODULES.append(
Extension("cityhash", ["src/city.cc", "src/cityhash.pyx"],
language="c++",
extra_compile_args=CXXFLAGS,
include_dirs=INCLUDE_DIRS)
)
CMDCLASS['build_ext'] = build_ext
else:
EXT_MODULES.append(
Extension("cityhash", ["src/city.cc", "src/cityhash.cpp"],
language="c++",
extra_compile_args=CXXFLAGS,
include_dirs=INCLUDE_DIRS)
)
VERSION = '0.2.3.post9'
URL = "https://github.com/escherba/python-cityhash"
LONG_DESCRIPTION = """
"""
def long_description():
fname = join(dirname(__file__), 'README.rst')
try:
with open(fname, 'rb') as fd:
return fd.read().decode('utf-8')
except Exception:
return LONG_DESCRIPTION
setup(
version=VERSION,
description="Python-bindings for CityHash, a fast non-cryptographic hash algorithm",
author="Alexander [Amper] Marshalov",
author_email="alone.amper+cityhash@gmail.com",
url=URL,
download_url=URL + "/tarball/master/" + VERSION,
name='cityhash',
license='MIT',
cmdclass=CMDCLASS,
ext_modules=EXT_MODULES,
keywords=['hash', 'hashing', 'cityhash'],
classifiers=[
'Development Status :: 4 - Beta',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: C++',
'Programming Language :: Cython',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities'
],
long_description=long_description(),
distclass=BinaryDistribution,
)
| {
"repo_name": "escherba/python-cityhash",
"path": "setup.py",
"copies": "2",
"size": "3280",
"license": "mit",
"hash": -9014873458279009000,
"line_mean": 26.1074380165,
"line_max": 88,
"alpha_frac": 0.6176829268,
"autogenerated": false,
"ratio": 3.664804469273743,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0020950773342944054,
"num_lines": 121
} |
__author__ = 'Alexander Black'
import socket
import select
import sys
def prompt():
sys.stdout.write("> ")
sys.stdout.flush()
class Client(object):
def __init__(self):
self.host = sys.argv[1]
self.port = int(sys.argv[2])
self.sock = None
self.connect_to_server()
def connect_to_server(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(2)
# connect to remote host
try:
self.sock.connect((self.host, self.port))
except:
print 'Unable to connect'
sys.exit()
print 'Connected to remote host. Start sending messages'
prompt()
self.wait_for_messages()
def wait_for_messages(self):
while 1:
socket_list = [sys.stdin, self.sock]
# Get the list sockets which are readable
read_sockets, write_sockets, error_sockets = select.select(socket_list, [], [])
for sock in read_sockets:
# incoming message from remote server
if sock == self.sock:
data = sock.recv(4096)
if not data:
print '\nDisconnected from chat server'
sys.exit()
else:
# print data
sys.stdout.write(data)
prompt()
# user entered a message
else:
msg = sys.stdin.readline()
self.sock.send(msg)
prompt()
if __name__ == '__main__':
client = Client()
| {
"repo_name": "alexwhb/simple-python-chat-server",
"path": "client.py",
"copies": "1",
"size": "1667",
"license": "mit",
"hash": -8102219238538165000,
"line_mean": 26.7833333333,
"line_max": 91,
"alpha_frac": 0.4949010198,
"autogenerated": false,
"ratio": 4.481182795698925,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5476083815498924,
"avg_score": null,
"num_lines": null
} |
# This is the exact and Barnes-Hut t-SNE implementation. There are other
# modifications of the algorithm:
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
# Includes a further addition of SemiSupervision via partial labelling of the data
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from sklearn.neighbors import BallTree
from sklearn.base import BaseEstimator
from sklearn.utils import check_array
from sklearn.utils import check_random_state
from sklearn.utils.extmath import _ravel
from sklearn.decomposition import PCA
from sklearn.metrics.pairwise import pairwise_distances
from . import _utils
from . import _barnes_hut_tsne
from sklearn.utils.fixes import astype
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, labels, label_importance, rep_sample,
desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
labels : array, shape (n_samples,)
An integer labelling of each sample, with unknown samples given
the label -1.
label_importance: float
How much to deviate from a uniform prior via the label classes.
rep_samples: boolean
Whether the partial labelling is a representative sample of
the full (and unknown) labelling.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = astype(distances, np.float32, copy=False)
labels = astype(labels, np.int64, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, None, labels, label_importance,
rep_sample, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _joint_probabilities_nn(distances, neighbors, labels, label_importance,
rep_sample, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances using just nearest
neighbors.
This method is approximately equal to _joint_probabilities. The latter
is O(N), but limiting the joint probability to nearest neighbors improves
this substantially to O(uN).
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
labels : array, shape (n_samples,)
An integer labelling of each sample, with unknown samples given
the label -1.
label_importance: float
How much to deviate from a uniform prior via the label classes.
rep_samples: boolean
Whether the partial labelling is a representative sample of
the full (and unknown) labelling.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = astype(distances, np.float32, copy=False)
labels = astype(labels, np.int64, copy=False)
neighbors = astype(neighbors, np.int64, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, neighbors, labels, label_importance,
rep_sample, desired_perplexity, verbose)
m = "All probabilities should be finite"
assert np.all(np.isfinite(conditional_P)), m
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
assert np.all(np.abs(P) <= 1.0)
return P
def _kl_divergence(params, P, degrees_of_freedom, n_samples, n_components,
skip_num_points=0):
"""t-SNE objective function: gradient of the KL divergence
of p_ijs and q_ijs and the absolute error.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= degrees_of_freedom
n **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
# Gradient: dC/dY
grad = np.ndarray((n_samples, n_components))
PQd = squareform((P - Q) * n)
for i in range(skip_num_points, n_samples):
np.dot(_ravel(PQd[i]), X_embedded[i] - X_embedded, out=grad[i])
grad = grad.ravel()
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad *= c
return kl_divergence, grad
def _kl_divergence_error(params, P, neighbors, degrees_of_freedom, n_samples,
n_components):
"""t-SNE objective function: the absolute error of the
KL divergence of p_ijs and q_ijs.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
neighbors : array (n_samples, K)
The neighbors is not actually required to calculate the
divergence, but is here to match the signature of the
gradient function
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= degrees_of_freedom
n **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
if len(P.shape) == 2:
P = squareform(P)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
return kl_divergence
def _kl_divergence_bh(params, P, neighbors, degrees_of_freedom, n_samples,
n_components, angle=0.5, skip_num_points=0,
verbose=False):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Uses Barnes-Hut tree methods to calculate the gradient that
runs in O(NlogN) instead of O(N^2)
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
neighbors: int64 array, shape (n_samples, K)
Array with element [i, j] giving the index for the jth
closest neighbor to point i.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
angle : float (default: 0.5)
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
verbose : int
Verbosity level.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
params = astype(params, np.float32, copy=False)
X_embedded = params.reshape(n_samples, n_components)
neighbors = astype(neighbors, np.int64, copy=False)
if len(P.shape) == 1:
sP = squareform(P).astype(np.float32)
else:
sP = P.astype(np.float32)
grad = np.zeros(X_embedded.shape, dtype=np.float32)
error = _barnes_hut_tsne.gradient(sP, X_embedded, neighbors,
grad, angle, n_components, verbose,
dof=degrees_of_freedom)
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad = grad.ravel()
grad *= c
return error, grad
def _gradient_descent(objective, p0, it, n_iter, objective_error=None,
n_iter_check=1, n_iter_without_progress=50,
momentum=0.5, learning_rate=1000.0, min_gain=0.01,
min_grad_norm=1e-7, min_error_diff=1e-7, verbose=0,
args=None, kwargs=None):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector. When expensive to compute, the cost can optionally
be None and can be computed every n_iter_check steps using
the objective_error function.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_check : int
Number of iterations before evaluating the global error. If the error
is sufficiently low, we abort the optimization.
objective_error : function or callable
Should return a tuple of cost and gradient for a given parameter
vector.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.5)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 1000.0)
The learning rate should be extremely high for t-SNE! Values in the
range [100.0, 1000.0] are common.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
min_error_diff : float, optional (default: 1e-7)
If the absolute difference of two successive cost function values
is below this threshold, the optimization will be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
kwargs : dict
Keyword arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = 0
for i in range(it, n_iter):
new_error, grad = objective(p, *args, **kwargs)
grad_norm = linalg.norm(grad)
inc = update * grad >= 0.0
dec = np.invert(inc)
gains[inc] += 0.05
gains[dec] *= 0.95
np.clip(gains, min_gain, np.inf)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if (i + 1) % n_iter_check == 0:
if new_error is None:
new_error = objective_error(p, *args)
error_diff = np.abs(new_error - error)
error = new_error
if verbose >= 2:
m = "[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f"
print(m % (i + 1, error, grad_norm))
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if grad_norm <= min_grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
if error_diff <= min_error_diff:
if verbose >= 2:
m = "[t-SNE] Iteration %d: error difference %f. Finished."
print(m % (i + 1, error_diff))
break
if new_error is not None:
error = new_error
return p, error, i
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in U^{(k)}_i (r(i, j) - k)}
where :math:`r(i, j)` is the rank of the embedded datapoint j
according to the pairwise distances between the embedded datapoints,
:math:`U^{(k)}_i` is the set of points that are in the k nearest
neighbors in the embedded space but not in the original space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
dist_X_embedded = pairwise_distances(X_embedded, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1]
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class SemiSupervisedTSNE(BaseEstimator):
"""Semi Supervised t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
This semi-supervised version of t-SNE supports an incomplete labelling
being supplied. This labelling is then used to inform the dimension
reduction such that samples with the same label are more likely to
be close, while samples with different labels are more likely to be
separated.
Read more in the sklearn :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
label_importance : float, optional (default: 1.0)
How much to weight the importance of the labels when determining
the transformation. In practice this determines how far from
a uniform distribution to make the label based prior.
class_sizes_are_representative : boolean, optional (default: False)
If label class sizes are representative of the full/true labelling
then we can weight the prior using class sizes, which can account
for significant variance in class sizes well. Unless youn know that
you have a representative sample labelled it is best to leave this
False.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selecting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 4.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 1000)
The learning rate can be a critical parameter. It should be
between 100 and 1000. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high. If the cost function gets stuck in a bad local
minimum increasing the learning rate helps sometimes.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 200.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
.. versionadded:: 0.17
parameter *n_iter_without_progress* to control stopping criteria.
min_grad_norm : float, optional (default: 1E-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string, optional (default: "random")
Initialization of embedding. Possible options are 'random' and 'pca'.
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton. Note that different initializations
might result in different local minima of the cost function.
method : string (default: 'barnes_hut')
By default the gradient calculation algorithm uses Barnes-Hut
approximation running in O(NlogN) time. method='exact'
will run on the slower, but exact, algorithm in O(N^2) time. The
exact algorithm should be used when nearest-neighbor errors need
to be better than 3%. However, the exact method cannot scale to
millions of examples.
.. versionadded:: 0.17
Approximate optimization *method* via the Barnes-Hut.
angle : float (default: 0.5)
Only used if method='barnes_hut'
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kl_divergence_ : float
Kullback-Leibler divergence after optimization.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = TSNE(n_components=2, random_state=0)
>>> np.set_printoptions(suppress=True)
>>> model.fit_transform(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
array([[ 0.00017599, 0.00003993],
[ 0.00009891, 0.00021913],
[ 0.00018554, -0.00009357],
[ 0.00009528, -0.00001407]])
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
[3] L.J.P. van der Maaten. Accelerating t-SNE using Tree-Based Algorithms.
Journal of Machine Learning Research 15(Oct):3221-3245, 2014.
http://lvdmaaten.github.io/publications/papers/JMLR_2014.pdf
"""
def __init__(self, n_components=2, label_importance=1.0,
class_sizes_are_representative=False, perplexity=30.0,
early_exaggeration=4.0, learning_rate=1000.0, n_iter=1000,
n_iter_without_progress=30, min_grad_norm=1e-7,
metric="euclidean", init="random", verbose=0,
random_state=None, method='barnes_hut', angle=0.5):
if init not in ["pca", "random"] or isinstance(init, np.ndarray):
msg = "'init' must be 'pca', 'random' or a NumPy array"
raise ValueError(msg)
self.n_components = n_components
self.label_importance = label_importance
self.class_sizes_are_representative = class_sizes_are_representative
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.n_iter_without_progress = n_iter_without_progress
self.min_grad_norm = min_grad_norm
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
self.method = method
self.angle = angle
self.embedding_ = None
def _fit(self, X, y, skip_num_points=0):
"""Fit the model using X as training data, and y
as the (partial) labelling.
Note that sparse arrays can only be handled by method='exact'.
It is recommended that you convert your sparse array to dense
(e.g. `X.toarray()`) if it fits in memory, or otherwise using a
dimensionality reduction technique (e.g. TruncatedSVD).
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. Note that this
when method='barnes_hut', X cannot be a sparse array and if need be
will be converted to a 32 bit float array. Method='exact' allows
sparse arrays and 64bit floating point inputs.
y : array, shape (n_samples,)
Labels must be integers, with unlabelled points given the label -1.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
"""
if self.method not in ['barnes_hut', 'exact']:
raise ValueError("'method' must be 'barnes_hut' or 'exact'")
if self.angle < 0.0 or self.angle > 1.0:
raise ValueError("'angle' must be between 0.0 - 1.0")
if self.method == 'barnes_hut' and sp.issparse(X):
raise TypeError('A sparse matrix was passed, but dense '
'data is required for method="barnes_hut". Use '
'X.toarray() to convert to a dense numpy array if '
'the array is small enough for it to fit in '
'memory. Otherwise consider dimensionality '
'reduction techniques (e.g. TruncatedSVD)')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is "
"%f" % self.early_exaggeration)
if self.n_iter < 200:
raise ValueError("n_iter should be at least 200")
if self.metric == "precomputed":
if self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be used "
"with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric,
squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
if not np.all(distances >= 0):
raise ValueError("All distances should be positive, either "
"the metric or precomputed distances given "
"as X are not correct")
# Degrees of freedom of the Student's t-distribution. The suggestion
# degrees_of_freedom = n_components - 1 comes from
# "Learning a Parametric Embedding by Preserving Local Structure"
# Laurens van der Maaten, 2009.
degrees_of_freedom = max(self.n_components - 1.0, 1)
n_samples = X.shape[0]
# the number of nearest neighbors to find
k = min(n_samples - 1, int(3. * self.perplexity + 1))
neighbors_nn = None
if self.method == 'barnes_hut':
if self.verbose:
print("[t-SNE] Computing %i nearest neighbors..." % k)
if self.metric == 'precomputed':
# Use the precomputed distances to find
# the k nearest neighbors and their distances
neighbors_nn = np.argsort(distances, axis=1)[:, :k]
else:
# Find the nearest neighbors for every point
bt = BallTree(X)
# LvdM uses 3 * perplexity as the number of neighbors
# And we add one to not count the data point itself
# In the event that we have very small # of points
# set the neighbors to n - 1
distances_nn, neighbors_nn = bt.query(X, k=k + 1)
neighbors_nn = neighbors_nn[:, 1:]
P = _joint_probabilities_nn(distances, neighbors_nn, y,
self.label_importance,
self.class_sizes_are_representative,
self.perplexity, self.verbose)
else:
P = _joint_probabilities(distances, y,
self.label_importance,
self.class_sizes_are_representative,
self.perplexity, self.verbose)
assert np.all(np.isfinite(P)), "All probabilities should be finite"
assert np.all(P >= 0), "All probabilities should be zero or positive"
assert np.all(P <= 1), ("All probabilities should be less "
"or then equal to one")
if self.init == 'pca':
pca = PCA(n_components=self.n_components, svd_solver='randomized',
random_state=random_state)
X_embedded = pca.fit_transform(X)
elif isinstance(self.init, np.ndarray):
X_embedded = self.init
elif self.init == 'random':
X_embedded = None
else:
raise ValueError("Unsupported initialization scheme: %s"
% self.init)
return self._tsne(P, degrees_of_freedom, n_samples, random_state,
X_embedded=X_embedded,
neighbors=neighbors_nn,
skip_num_points=skip_num_points)
def _tsne(self, P, degrees_of_freedom, n_samples, random_state,
X_embedded=None, neighbors=None, skip_num_points=0):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with three stages:
# * early exaggeration with momentum 0.5
# * early exaggeration with momentum 0.8
# * final optimization with momentum 0.8
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
if X_embedded is None:
# Initialize embedding randomly
X_embedded = 1e-4 * random_state.randn(n_samples,
self.n_components)
params = X_embedded.ravel()
opt_args = {}
opt_args = {"n_iter": 50, "momentum": 0.5, "it": 0,
"learning_rate": self.learning_rate,
"verbose": self.verbose, "n_iter_check": 25,
"kwargs": dict(skip_num_points=skip_num_points)}
if self.method == 'barnes_hut':
m = "Must provide an array of neighbors to use Barnes-Hut"
assert neighbors is not None, m
obj_func = _kl_divergence_bh
objective_error = _kl_divergence_error
sP = squareform(P).astype(np.float32)
neighbors = neighbors.astype(np.int64)
args = [sP, neighbors, degrees_of_freedom, n_samples,
self.n_components]
opt_args['args'] = args
opt_args['min_grad_norm'] = 1e-3
opt_args['n_iter_without_progress'] = 30
# Don't always calculate the cost since that calculation
# can be nearly as expensive as the gradient
opt_args['objective_error'] = objective_error
opt_args['kwargs']['angle'] = self.angle
opt_args['kwargs']['verbose'] = self.verbose
else:
obj_func = _kl_divergence
opt_args['args'] = [P, degrees_of_freedom, n_samples,
self.n_components]
opt_args['min_error_diff'] = 0.0
opt_args['min_grad_norm'] = 0.0
# Early exaggeration
P *= self.early_exaggeration
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
opt_args['n_iter'] = 100
opt_args['momentum'] = 0.8
opt_args['it'] = it + 1
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
if self.verbose:
print("[t-SNE] KL divergence after %d iterations with early "
"exaggeration: %f" % (it + 1, kl_divergence))
# Save the final number of iterations
self.n_iter_final = it
# Final optimization
P /= self.early_exaggeration
opt_args['n_iter'] = self.n_iter
opt_args['it'] = it + 1
params, error, it = _gradient_descent(obj_func, params, **opt_args)
if self.verbose:
print("[t-SNE] Error after %d iterations: %f"
% (it + 1, kl_divergence))
X_embedded = params.reshape(n_samples, self.n_components)
self.kl_divergence_ = kl_divergence
return X_embedded
def fit_transform(self, X, y):
"""Fit X into an embedded space and return that transformed
output.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
y : array, shape (n_samples,)
A (partial) labelling of the samples. The array should provide
a label value for each sample. Labels must be integers, with
unlabelled points given the label -1.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
embedding = self._fit(X, y)
self.embedding_ = embedding
return self.embedding_
def fit(self, X, y):
"""Fit X into an embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. If the method
is 'exact', X may be a sparse matrix of type 'csr', 'csc'
or 'coo'.
y : array, shape (n_samples,)
A (partial) labelling of the samples. The array should provide
a label value for each sample. Labels must be integers, with
unlabelled points given the label -1.
"""
self.fit_transform(X, y)
return self
| {
"repo_name": "lmcinnes/sstsne",
"path": "sstsne/ss_t_sne.py",
"copies": "1",
"size": "38282",
"license": "bsd-2-clause",
"hash": 2770489963970443300,
"line_mean": 38.7941787942,
"line_max": 82,
"alpha_frac": 0.6145708166,
"autogenerated": false,
"ratio": 3.9981201044386423,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5112690921038642,
"avg_score": null,
"num_lines": null
} |
# This is the exact and Barnes-Hut t-SNE implementation. There are other
# modifications of the algorithm:
# * Fast Optimization for t-SNE:
# https://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
from time import time
import numpy as np
from scipy import linalg
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from scipy.sparse import csr_matrix, issparse
from ..neighbors import NearestNeighbors
from ..base import BaseEstimator
from ..utils import check_random_state
from ..utils._openmp_helpers import _openmp_effective_n_threads
from ..utils.validation import check_non_negative
from ..utils.validation import _deprecate_positional_args
from ..decomposition import PCA
from ..metrics.pairwise import pairwise_distances
# mypy error: Module 'sklearn.manifold' has no attribute '_utils'
from . import _utils # type: ignore
# mypy error: Module 'sklearn.manifold' has no attribute '_barnes_hut_tsne'
from . import _barnes_hut_tsne # type: ignore
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = distances.astype(np.float32, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _joint_probabilities_nn(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances using just nearest
neighbors.
This method is approximately equal to _joint_probabilities. The latter
is O(N), but limiting the joint probability to nearest neighbors improves
this substantially to O(uN).
Parameters
----------
distances : CSR sparse matrix, shape (n_samples, n_samples)
Distances of samples to its n_neighbors nearest neighbors. All other
distances are left to zero (and are not materialized in memory).
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : csr sparse matrix, shape (n_samples, n_samples)
Condensed joint probability matrix with only nearest neighbors.
"""
t0 = time()
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances.sort_indices()
n_samples = distances.shape[0]
distances_data = distances.data.reshape(n_samples, -1)
distances_data = distances_data.astype(np.float32, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances_data, desired_perplexity, verbose)
assert np.all(np.isfinite(conditional_P)), \
"All probabilities should be finite"
# Symmetrize the joint probability distribution using sparse operations
P = csr_matrix((conditional_P.ravel(), distances.indices,
distances.indptr),
shape=(n_samples, n_samples))
P = P + P.T
# Normalize the joint probability distribution
sum_P = np.maximum(P.sum(), MACHINE_EPSILON)
P /= sum_P
assert np.all(np.abs(P.data) <= 1.0)
if verbose >= 2:
duration = time() - t0
print("[t-SNE] Computed conditional probabilities in {:.3f}s"
.format(duration))
return P
def _kl_divergence(params, P, degrees_of_freedom, n_samples, n_components,
skip_num_points=0, compute_error=True):
"""t-SNE objective function: gradient of the KL divergence
of p_ijs and q_ijs and the absolute error.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
degrees_of_freedom : int
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
compute_error: bool (optional, default:True)
If False, the kl_divergence is not computed and returns NaN.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
dist = pdist(X_embedded, "sqeuclidean")
dist /= degrees_of_freedom
dist += 1.
dist **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(dist / (2.0 * np.sum(dist)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
if compute_error:
kl_divergence = 2.0 * np.dot(
P, np.log(np.maximum(P, MACHINE_EPSILON) / Q))
else:
kl_divergence = np.nan
# Gradient: dC/dY
# pdist always returns double precision distances. Thus we need to take
grad = np.ndarray((n_samples, n_components), dtype=params.dtype)
PQd = squareform((P - Q) * dist)
for i in range(skip_num_points, n_samples):
grad[i] = np.dot(np.ravel(PQd[i], order='K'),
X_embedded[i] - X_embedded)
grad = grad.ravel()
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad *= c
return kl_divergence, grad
def _kl_divergence_bh(params, P, degrees_of_freedom, n_samples, n_components,
angle=0.5, skip_num_points=0, verbose=False,
compute_error=True, num_threads=1):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Uses Barnes-Hut tree methods to calculate the gradient that
runs in O(NlogN) instead of O(N^2)
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : csr sparse matrix, shape (n_samples, n_sample)
Sparse approximate joint probability matrix, computed only for the
k nearest-neighbors and symmetrized.
degrees_of_freedom : int
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
angle : float (default: 0.5)
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
verbose : int
Verbosity level.
compute_error: bool (optional, default:True)
If False, the kl_divergence is not computed and returns NaN.
num_threads : int (optional, default:1)
Number of threads used to compute the gradient. This is set here to
avoid calling _openmp_effective_n_threads for each gradient step.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
params = params.astype(np.float32, copy=False)
X_embedded = params.reshape(n_samples, n_components)
val_P = P.data.astype(np.float32, copy=False)
neighbors = P.indices.astype(np.int64, copy=False)
indptr = P.indptr.astype(np.int64, copy=False)
grad = np.zeros(X_embedded.shape, dtype=np.float32)
error = _barnes_hut_tsne.gradient(val_P, X_embedded, neighbors, indptr,
grad, angle, n_components, verbose,
dof=degrees_of_freedom,
compute_error=compute_error,
num_threads=num_threads)
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad = grad.ravel()
grad *= c
return error, grad
def _gradient_descent(objective, p0, it, n_iter,
n_iter_check=1, n_iter_without_progress=300,
momentum=0.8, learning_rate=200.0, min_gain=0.01,
min_grad_norm=1e-7, verbose=0, args=None, kwargs=None):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector. When expensive to compute, the cost can optionally
be None and can be computed every n_iter_check steps using
the objective_error function.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_check : int, default=1
Number of iterations before evaluating the global error. If the error
is sufficiently low, we abort the optimization.
n_iter_without_progress : int, default=300
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), default=0.8
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, default=200.0
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers.
min_gain : float, default=0.01
Minimum individual gain for each parameter.
min_grad_norm : float, default=1e-7
If the gradient norm is below this threshold, the optimization will
be aborted.
verbose : int, default=0
Verbosity level.
args : sequence, default=None
Arguments to pass to objective function.
kwargs : dict, default=None
Keyword arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(float).max
best_error = np.finfo(float).max
best_iter = i = it
tic = time()
for i in range(it, n_iter):
check_convergence = (i + 1) % n_iter_check == 0
# only compute the error when needed
kwargs['compute_error'] = check_convergence or i == n_iter - 1
error, grad = objective(p, *args, **kwargs)
grad_norm = linalg.norm(grad)
inc = update * grad < 0.0
dec = np.invert(inc)
gains[inc] += 0.2
gains[dec] *= 0.8
np.clip(gains, min_gain, np.inf, out=gains)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if check_convergence:
toc = time()
duration = toc - tic
tic = toc
if verbose >= 2:
print("[t-SNE] Iteration %d: error = %.7f,"
" gradient norm = %.7f"
" (%s iterations in %0.3fs)"
% (i + 1, error, grad_norm, n_iter_check, duration))
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if grad_norm <= min_grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
return p, error, i
@_deprecate_positional_args
def trustworthiness(X, X_embedded, *, n_neighbors=5, metric='euclidean'):
r"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in \mathcal{N}_{i}^{k}} \max(0, (r(i, j) - k))
where for each sample i, :math:`\mathcal{N}_{i}^{k}` are its k nearest
neighbors in the output space, and every sample j is its :math:`r(i, j)`-th
nearest neighbor in the input space. In other words, any unexpected nearest
neighbors in the output space are penalised in proportion to their rank in
the input space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, default=5
Number of neighbors k that will be considered.
metric : string, or callable, default='euclidean'
Which metric to use for computing pairwise distances between samples
from the original input space. If metric is 'precomputed', X must be a
matrix of pairwise distances or squared distances. Otherwise, see the
documentation of argument metric in sklearn.pairwise.pairwise_distances
for a list of available metrics.
.. versionadded:: 0.20
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
dist_X = pairwise_distances(X, metric=metric)
if metric == 'precomputed':
dist_X = dist_X.copy()
# we set the diagonal to np.inf to exclude the points themselves from
# their own neighborhood
np.fill_diagonal(dist_X, np.inf)
ind_X = np.argsort(dist_X, axis=1)
# `ind_X[i]` is the index of sorted distances between i and other samples
ind_X_embedded = NearestNeighbors(n_neighbors=n_neighbors).fit(
X_embedded).kneighbors(return_distance=False)
# We build an inverted index of neighbors in the input space: For sample i,
# we define `inverted_index[i]` as the inverted index of sorted distances:
# inverted_index[i][ind_X[i]] = np.arange(1, n_sample + 1)
n_samples = X.shape[0]
inverted_index = np.zeros((n_samples, n_samples), dtype=int)
ordered_indices = np.arange(n_samples + 1)
inverted_index[ordered_indices[:-1, np.newaxis],
ind_X] = ordered_indices[1:]
ranks = inverted_index[ordered_indices[:-1, np.newaxis],
ind_X_embedded] - n_neighbors
t = np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNE(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, default=2
Dimension of the embedded space.
perplexity : float, default=30
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selecting a value
between 5 and 50. Different values can result in significanlty
different results.
early_exaggeration : float, default=12.0
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, default=200.0
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers. If the cost function gets stuck in a bad local
minimum increasing the learning rate may help.
n_iter : int, default=1000
Maximum number of iterations for the optimization. Should be at
least 250.
n_iter_without_progress : int, default=300
Maximum number of iterations without progress before we abort the
optimization, used after 250 initial iterations with early
exaggeration. Note that progress is only checked every 50 iterations so
this value is rounded to the next multiple of 50.
.. versionadded:: 0.17
parameter *n_iter_without_progress* to control stopping criteria.
min_grad_norm : float, default=1e-7
If the gradient norm is below this threshold, the optimization will
be stopped.
metric : string or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string or numpy array, default="random"
Initialization of embedding. Possible options are 'random', 'pca',
and a numpy array of shape (n_samples, n_components).
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, default=0
Verbosity level.
random_state : int, RandomState instance, default=None
Determines the random number generator. Pass an int for reproducible
results across multiple function calls. Note that different
initializations might result in different local minima of the cost
function. See :term: `Glossary <random_state>`.
method : string, default='barnes_hut'
By default the gradient calculation algorithm uses Barnes-Hut
approximation running in O(NlogN) time. method='exact'
will run on the slower, but exact, algorithm in O(N^2) time. The
exact algorithm should be used when nearest-neighbor errors need
to be better than 3%. However, the exact method cannot scale to
millions of examples.
.. versionadded:: 0.17
Approximate optimization *method* via the Barnes-Hut.
angle : float, default=0.5
Only used if method='barnes_hut'
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
n_jobs : int or None, default=None
The number of parallel jobs to run for neighbors search. This parameter
has no impact when ``metric="precomputed"`` or
(``metric="euclidean"`` and ``method="exact"``).
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionadded:: 0.22
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kl_divergence_ : float
Kullback-Leibler divergence after optimization.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> X_embedded = TSNE(n_components=2).fit_transform(X)
>>> X_embedded.shape
(4, 2)
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
https://lvdmaaten.github.io/tsne/
[3] L.J.P. van der Maaten. Accelerating t-SNE using Tree-Based Algorithms.
Journal of Machine Learning Research 15(Oct):3221-3245, 2014.
https://lvdmaaten.github.io/publications/papers/JMLR_2014.pdf
"""
# Control the number of exploration iterations with early_exaggeration on
_EXPLORATION_N_ITER = 250
# Control the number of iterations between progress checks
_N_ITER_CHECK = 50
@_deprecate_positional_args
def __init__(self, n_components=2, *, perplexity=30.0,
early_exaggeration=12.0, learning_rate=200.0, n_iter=1000,
n_iter_without_progress=300, min_grad_norm=1e-7,
metric="euclidean", init="random", verbose=0,
random_state=None, method='barnes_hut', angle=0.5,
n_jobs=None):
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.n_iter_without_progress = n_iter_without_progress
self.min_grad_norm = min_grad_norm
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
self.method = method
self.angle = angle
self.n_jobs = n_jobs
def _fit(self, X, skip_num_points=0):
"""Private function to fit the model using X as training data."""
if self.method not in ['barnes_hut', 'exact']:
raise ValueError("'method' must be 'barnes_hut' or 'exact'")
if self.angle < 0.0 or self.angle > 1.0:
raise ValueError("'angle' must be between 0.0 - 1.0")
if self.method == 'barnes_hut':
X = self._validate_data(X, accept_sparse=['csr'],
ensure_min_samples=2,
dtype=[np.float32, np.float64])
else:
X = self._validate_data(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=[np.float32, np.float64])
if self.metric == "precomputed":
if isinstance(self.init, str) and self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
check_non_negative(X, "TSNE.fit(). With metric='precomputed', X "
"should contain positive distances.")
if self.method == "exact" and issparse(X):
raise TypeError(
'TSNE with method="exact" does not accept sparse '
'precomputed distance matrix. Use method="barnes_hut" '
'or provide the dense distance matrix.')
if self.method == 'barnes_hut' and self.n_components > 3:
raise ValueError("'n_components' should be inferior to 4 for the "
"barnes_hut algorithm as it relies on "
"quad-tree or oct-tree.")
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is {}"
.format(self.early_exaggeration))
if self.n_iter < 250:
raise ValueError("n_iter should be at least 250")
n_samples = X.shape[0]
neighbors_nn = None
if self.method == "exact":
# Retrieve the distance matrix, either using the precomputed one or
# computing it.
if self.metric == "precomputed":
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric,
squared=True)
else:
distances = pairwise_distances(X, metric=self.metric,
n_jobs=self.n_jobs)
if np.any(distances < 0):
raise ValueError("All distances should be positive, the "
"metric given is not correct")
# compute the joint probability distribution for the input space
P = _joint_probabilities(distances, self.perplexity, self.verbose)
assert np.all(np.isfinite(P)), "All probabilities should be finite"
assert np.all(P >= 0), "All probabilities should be non-negative"
assert np.all(P <= 1), ("All probabilities should be less "
"or then equal to one")
else:
# Compute the number of nearest neighbors to find.
# LvdM uses 3 * perplexity as the number of neighbors.
# In the event that we have very small # of points
# set the neighbors to n - 1.
n_neighbors = min(n_samples - 1, int(3. * self.perplexity + 1))
if self.verbose:
print("[t-SNE] Computing {} nearest neighbors..."
.format(n_neighbors))
# Find the nearest neighbors for every point
knn = NearestNeighbors(algorithm='auto',
n_jobs=self.n_jobs,
n_neighbors=n_neighbors,
metric=self.metric)
t0 = time()
knn.fit(X)
duration = time() - t0
if self.verbose:
print("[t-SNE] Indexed {} samples in {:.3f}s...".format(
n_samples, duration))
t0 = time()
distances_nn = knn.kneighbors_graph(mode='distance')
duration = time() - t0
if self.verbose:
print("[t-SNE] Computed neighbors for {} samples "
"in {:.3f}s...".format(n_samples, duration))
# Free the memory used by the ball_tree
del knn
if self.metric == "euclidean":
# knn return the euclidean distance but we need it squared
# to be consistent with the 'exact' method. Note that the
# the method was derived using the euclidean method as in the
# input space. Not sure of the implication of using a different
# metric.
distances_nn.data **= 2
# compute the joint probability distribution for the input space
P = _joint_probabilities_nn(distances_nn, self.perplexity,
self.verbose)
if isinstance(self.init, np.ndarray):
X_embedded = self.init
elif self.init == 'pca':
pca = PCA(n_components=self.n_components, svd_solver='randomized',
random_state=random_state)
X_embedded = pca.fit_transform(X).astype(np.float32, copy=False)
elif self.init == 'random':
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
X_embedded = 1e-4 * random_state.randn(
n_samples, self.n_components).astype(np.float32)
else:
raise ValueError("'init' must be 'pca', 'random', or "
"a numpy array")
# Degrees of freedom of the Student's t-distribution. The suggestion
# degrees_of_freedom = n_components - 1 comes from
# "Learning a Parametric Embedding by Preserving Local Structure"
# Laurens van der Maaten, 2009.
degrees_of_freedom = max(self.n_components - 1, 1)
return self._tsne(P, degrees_of_freedom, n_samples,
X_embedded=X_embedded,
neighbors=neighbors_nn,
skip_num_points=skip_num_points)
def _tsne(self, P, degrees_of_freedom, n_samples, X_embedded,
neighbors=None, skip_num_points=0):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with two stages:
# * initial optimization with early exaggeration and momentum at 0.5
# * final optimization with momentum at 0.8
params = X_embedded.ravel()
opt_args = {
"it": 0,
"n_iter_check": self._N_ITER_CHECK,
"min_grad_norm": self.min_grad_norm,
"learning_rate": self.learning_rate,
"verbose": self.verbose,
"kwargs": dict(skip_num_points=skip_num_points),
"args": [P, degrees_of_freedom, n_samples, self.n_components],
"n_iter_without_progress": self._EXPLORATION_N_ITER,
"n_iter": self._EXPLORATION_N_ITER,
"momentum": 0.5,
}
if self.method == 'barnes_hut':
obj_func = _kl_divergence_bh
opt_args['kwargs']['angle'] = self.angle
# Repeat verbose argument for _kl_divergence_bh
opt_args['kwargs']['verbose'] = self.verbose
# Get the number of threads for gradient computation here to
# avoid recomputing it at each iteration.
opt_args['kwargs']['num_threads'] = _openmp_effective_n_threads()
else:
obj_func = _kl_divergence
# Learning schedule (part 1): do 250 iteration with lower momentum but
# higher learning rate controlled via the early exaggeration parameter
P *= self.early_exaggeration
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
if self.verbose:
print("[t-SNE] KL divergence after %d iterations with early "
"exaggeration: %f" % (it + 1, kl_divergence))
# Learning schedule (part 2): disable early exaggeration and finish
# optimization with a higher momentum at 0.8
P /= self.early_exaggeration
remaining = self.n_iter - self._EXPLORATION_N_ITER
if it < self._EXPLORATION_N_ITER or remaining > 0:
opt_args['n_iter'] = self.n_iter
opt_args['it'] = it + 1
opt_args['momentum'] = 0.8
opt_args['n_iter_without_progress'] = self.n_iter_without_progress
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
# Save the final number of iterations
self.n_iter_ = it
if self.verbose:
print("[t-SNE] KL divergence after %d iterations: %f"
% (it + 1, kl_divergence))
X_embedded = params.reshape(n_samples, self.n_components)
self.kl_divergence_ = kl_divergence
return X_embedded
def fit_transform(self, X, y=None):
"""Fit X into an embedded space and return that transformed
output.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. If the method
is 'exact', X may be a sparse matrix of type 'csr', 'csc'
or 'coo'. If the method is 'barnes_hut' and the metric is
'precomputed', X may be a precomputed sparse graph.
y : Ignored
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
embedding = self._fit(X)
self.embedding_ = embedding
return self.embedding_
def fit(self, X, y=None):
"""Fit X into an embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. If the method
is 'exact', X may be a sparse matrix of type 'csr', 'csc'
or 'coo'. If the method is 'barnes_hut' and the metric is
'precomputed', X may be a precomputed sparse graph.
y : Ignored
"""
self.fit_transform(X)
return self
| {
"repo_name": "huzq/scikit-learn",
"path": "sklearn/manifold/_t_sne.py",
"copies": "1",
"size": "36870",
"license": "bsd-3-clause",
"hash": -4075654696369325000,
"line_mean": 39.4720087816,
"line_max": 81,
"alpha_frac": 0.6130458367,
"autogenerated": false,
"ratio": 4.050313083598813,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 911
} |
# This is the standard t-SNE implementation. There are faster modifications of
# the algorithm:
# * Barnes-Hut-SNE: reduces the complexity of the gradient computation from
# N^2 to N log N (http://arxiv.org/abs/1301.3342)
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
import numpy as np
from scipy import linalg
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from ..base import BaseEstimator
from ..utils import check_array
from ..utils import check_random_state
from ..utils.extmath import _ravel
from ..decomposition import RandomizedPCA
from ..metrics.pairwise import pairwise_distances
from . import _utils
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
conditional_P = _utils._binary_search_perplexity(
distances, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _kl_divergence(params, P, alpha, n_samples, n_components):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
alpha : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= alpha
n **= (alpha + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
# Gradient: dC/dY
grad = np.ndarray((n_samples, n_components))
PQd = squareform((P - Q) * n)
for i in range(n_samples):
np.dot(_ravel(PQd[i]), X_embedded[i] - X_embedded, out=grad[i])
grad = grad.ravel()
c = 2.0 * (alpha + 1.0) / alpha
grad *= c
return kl_divergence, grad
def _gradient_descent(objective, p0, it, n_iter, n_iter_without_progress=30,
momentum=0.5, learning_rate=1000.0, min_gain=0.01,
min_grad_norm=1e-7, min_error_diff=1e-7, verbose=0,
args=[]):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.5)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 1000.0)
The learning rate should be extremely high for t-SNE! Values in the
range [100.0, 1000.0] are common.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
min_error_diff : float, optional (default: 1e-7)
If the absolute difference of two successive cost function values
is below this threshold, the optimization will be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = 0
for i in range(it, n_iter):
new_error, grad = objective(p, *args)
error_diff = np.abs(new_error - error)
error = new_error
grad_norm = linalg.norm(grad)
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if min_grad_norm >= grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
if min_error_diff >= error_diff:
if verbose >= 2:
print("[t-SNE] Iteration %d: error difference %f. Finished."
% (i + 1, error_diff))
break
inc = update * grad >= 0.0
dec = np.invert(inc)
gains[inc] += 0.05
gains[dec] *= 0.95
np.clip(gains, min_gain, np.inf)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if verbose >= 2 and (i+1) % 10 == 0:
print("[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f"
% (i + 1, error, grad_norm))
return p, error, i
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in U^{(k)}_i (r(i, j) - k)}
where :math:`r(i, j)` is the rank of the embedded datapoint j
according to the pairwise distances between the embedded datapoints,
:math:`U^{(k)}_i` is the set of points that are in the k nearest
neighbors in the embedded space but not in the original space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
dist_X_embedded = pairwise_distances(X_embedded, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1]
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNE(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selcting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 4.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 1000)
The learning rate can be a critical parameter. It should be
between 100 and 1000. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high. If the cost function gets stuck in a bad local
minimum increasing the learning rate helps sometimes.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 200.
metric : string or callable, (default: "euclidean")
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
init : string, optional (default: "random")
Initialization of embedding. Possible options are 'random' and 'pca'.
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton. Note that different initializations
might result in different local minima of the cost function.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = TSNE(n_components=2, random_state=0)
>>> model.fit_transform(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
array([[ 887.28..., 238.61...],
[ -714.79..., 3243.34...],
[ 957.30..., -2505.78...],
[-1130.28..., -974.78...])
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
"""
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=4.0, learning_rate=1000.0, n_iter=1000,
metric="euclidean", init="random", verbose=0,
random_state=None):
if init not in ["pca", "random"]:
raise ValueError("'init' must be either 'pca' or 'random'")
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
def _fit(self, X):
"""Fit the model using X as training data.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is "
"%f" % self.early_exaggeration)
if self.n_iter < 200:
raise ValueError("n_iter should be at least 200")
if self.metric == "precomputed":
if self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be used "
"with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
distances = pairwise_distances(X, metric=self.metric, squared=True)
# Degrees of freedom of the Student's t-distribution. The suggestion
# alpha = n_components - 1 comes from "Learning a Parametric Embedding
# by Preserving Local Structure" Laurens van der Maaten, 2009.
alpha = self.n_components - 1.0
n_samples = X.shape[0]
self.training_data_ = X
P = _joint_probabilities(distances, self.perplexity, self.verbose)
if self.init == 'pca':
pca = RandomizedPCA(n_components=self.n_components,
random_state=random_state)
X_embedded = pca.fit_transform(X)
elif self.init == 'random':
X_embedded = None
else:
raise ValueError("Unsupported initialization scheme: %s"
% self.init)
self.embedding_ = self._tsne(P, alpha, n_samples, random_state,
X_embedded=X_embedded)
def _tsne(self, P, alpha, n_samples, random_state, X_embedded=None):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with three stages:
# * early exaggeration with momentum 0.5
# * early exaggeration with momentum 0.8
# * final optimization with momentum 0.8
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
if X_embedded is None:
# Initialize embedding randomly
X_embedded = 1e-4 * random_state.randn(n_samples,
self.n_components)
params = X_embedded.ravel()
# Early exaggeration
P *= self.early_exaggeration
params, error, it = _gradient_descent(
_kl_divergence, params, it=0, n_iter=50, momentum=0.5,
min_grad_norm=0.0, min_error_diff=0.0,
learning_rate=self.learning_rate, verbose=self.verbose,
args=[P, alpha, n_samples, self.n_components])
params, error, it = _gradient_descent(
_kl_divergence, params, it=it + 1, n_iter=100, momentum=0.8,
min_grad_norm=0.0, min_error_diff=0.0,
learning_rate=self.learning_rate, verbose=self.verbose,
args=[P, alpha, n_samples, self.n_components])
if self.verbose:
print("[t-SNE] Error after %d iterations with early "
"exaggeration: %f" % (it + 1, error))
# Final optimization
P /= self.early_exaggeration
params, error, it = _gradient_descent(
_kl_divergence, params, it=it + 1, n_iter=self.n_iter,
momentum=0.8, learning_rate=self.learning_rate,
verbose=self.verbose, args=[P, alpha, n_samples,
self.n_components])
if self.verbose:
print("[t-SNE] Error after %d iterations: %f" % (it + 1, error))
X_embedded = params.reshape(n_samples, self.n_components)
return X_embedded
def fit_transform(self, X):
"""Transform X to the embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
self._fit(X)
return self.embedding_
| {
"repo_name": "Garrett-R/scikit-learn",
"path": "sklearn/manifold/t_sne.py",
"copies": "5",
"size": "19694",
"license": "bsd-3-clause",
"hash": 5105412433231380000,
"line_mean": 36.8730769231,
"line_max": 80,
"alpha_frac": 0.6161267391,
"autogenerated": false,
"ratio": 3.8828864353312302,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00011108745975574063,
"num_lines": 520
} |
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
| {
"repo_name": "Obus/scikit-learn",
"path": "sklearn/tests/test_learning_curve.py",
"copies": "225",
"size": "10791",
"license": "bsd-3-clause",
"hash": 5173862076298208000,
"line_mean": 41.3176470588,
"line_max": 75,
"alpha_frac": 0.6113427857,
"autogenerated": false,
"ratio": 3.452015355086372,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
| {
"repo_name": "PatrickOReilly/scikit-learn",
"path": "sklearn/tests/test_learning_curve.py",
"copies": "59",
"size": "10869",
"license": "bsd-3-clause",
"hash": 6121548704753379000,
"line_mean": 40.9652509653,
"line_max": 75,
"alpha_frac": 0.6116478057,
"autogenerated": false,
"ratio": 3.4581609926821506,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 259
} |
__author__ = 'Alexander Gorkun'
__email__ = 'mindkilleralexs@gmail.com'
from django.contrib.auth.models import User, Group
from django.contrib.auth import authenticate as django_auth
from django.db import transaction
from django.db.models import Q
from django.core.cache import cache
from xanderhorkunspider import domain
from xanderhorkunspider.web.websites import models
from xanderhorkunspider.web.config import settings
from xanderhorkunspider import spider
__websites_dao = models.WebsitesDBDao()
__pages_dao = models.PagesDBDao()
__loading_dao = models.LoadingDBDao()
class Websites(domain.Websites):
"""
Class contains business logic related to websites, pages and loadings.
"""
def find_last_loadings(self, limit=10):
"""
Gets a list of most recently created loadings.
:param limit: Max amount of items to return.
:return: List of Loading models.
"""
cache_key = 'last_loadings_' + str(10)
if cache_key in cache:
return cache.get(cache_key)
loadings = self._loading_dao.find_all(limit=limit, order_by='time', order_asc=False)
cache.set(cache_key, loadings, 600)
return loadings
websites_domain = Websites(__pages_dao, __websites_dao, __loading_dao)
class SpiderFactory(object):
"""
Creates and stores spiders.
"""
spiders = []
websites = None
max_processes = None
def __init__(self, def_websites, def_max_p=10):
"""
Setting default parameters for future spiders.
:param def_websites: Default Websites domain object to create spiders.
:param def_max_p: Default max processes amount for spiders.
"""
assert isinstance(def_websites, domain.Websites)
assert def_max_p > 0
self.websites = def_websites
self.max_processes = def_max_p
def create_spider(self, websites=None, max_p=None, autostart=False):
"""
Creates a spider.
:param websites: Websites domain obj, if none given default will be used.
:param max_p: Max amount of processes, if not given default will be used.
:param autostart: Will the spider be already started? False by default.
:return: SpiderManager instance.
"""
if not websites:
websites = self.websites
if not max_p:
max_p = self.max_processes
spdr = spider.SpiderManager(websites, max_p=max_p, autostart=autostart)
self.spiders.append(spdr)
return spdr
def find_spider_by_id(self, sid):
"""
Finds spider by it's ID received by "id" function.
:param sid: Integer
:return: None or spider.
"""
sid = int(sid)
for spdr in self.spiders:
if id(spdr) == sid:
return spdr
return None
spider_factory = SpiderFactory(websites_domain)
class Users(object):
"""
Contains business logic to work with users.
"""
__default_groups = None
def __init__(self):
"""
Class is responsible for maintaining users (creation, authorization etc.)
"""
def _fetch_default_groups(self):
if self.__default_groups is None:
self.__default_groups = set(Group.objects.filter(Q(name__in=settings.DEFAULT_GROUPS)))
return self.__default_groups
def create(self, username, email, password):
"""
Creates user based on credentials, checks if user with such credentials already exists.
:param username: User's name.
:param email: E-mail.
:param password: User's password.
:raises ValueError: If user with such credentials already exists.
:return: User instance.
"""
if User.objects.filter(email=email).exists() or User.objects.filter(username=username).exists():
raise ValueError("Such user already exists")
with transaction.atomic():
user = User.objects.create_user(username, email=email, password=password)
if user is None:
raise RuntimeError("Unable to create user for some reasons")
for group in self._fetch_default_groups():
user.groups.add(group)
return user
def authenticate(self, username, password):
"""
Authenticates user.
:param username: User's name.
:param password: Password.
:return: User instance.
"""
user = django_auth(username=username, password=password)
if user and not user.is_active:
user = None
return user
users = Users() | {
"repo_name": "AlexMaxHorkun/xanderh-spider",
"path": "xanderhorkunspider/web/websites/domain.py",
"copies": "1",
"size": "4618",
"license": "apache-2.0",
"hash": 2990174486019816000,
"line_mean": 30.8551724138,
"line_max": 104,
"alpha_frac": 0.6307925509,
"autogenerated": false,
"ratio": 4.02615518744551,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.515694773834551,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alexander Gorkun'
__email__ = 'mindkilleralexs@gmail.com'
from django import shortcuts
from django.contrib.auth import login, logout
from django.contrib.auth.decorators import login_required
from xanderhorkunspider.web.websites.domain import users
from xanderhorkunspider.web.websites import forms
# Auth related views
def signup_view(request):
"""
This page allows user to register, contains sign up form.
:param request: HTTP request.
:type request: django.http.HttpRequest
:return: HTTP response.
:rtype: django.http.HttpResponse
"""
if request.user.is_authenticated():
return shortcuts.redirect('index')
if request.method == 'POST':
form = forms.SignupForm(request.POST)
if form.is_valid():
user = None
try:
user = users.create(form.cleaned_data['username'], email=form.cleaned_data['email'],
password=form.cleaned_data['password'])
except ValueError:
# User already exists
form.add_error("username", "User with such email/username already exists")
if user:
user = users.authenticate(username=form.cleaned_data['username'],
password=form.cleaned_data['password'])
if user:
login(request, user)
if not form.cleaned_data['rememberme']:
request.session.set_expiry(0)
return shortcuts.redirect('index')
else:
raise RuntimeError("Unable to create user or authenticate")
else:
form = forms.SignupForm()
return shortcuts.render(request, "websites/auth/signup.html", {'form': form})
@login_required()
def logout_view(request):
"""
This page allows user to logout, does not contain any HTML output.
:param request: HTTP request.
:type request: django.http.HttpRequest
:return: HTTP response.
:rtype: django.http.HttpResponse
"""
if request.user.is_authenticated:
logout(request)
return shortcuts.redirect('index')
def login_view(request):
"""
This page allows user to login, contains login form.
:param request: HTTP request.
:type request: django.http.HttpRequest
:return: HTTP response.
:rtype: django.http.HttpResponse
"""
if request.user.is_authenticated():
return shortcuts.redirect('index')
bad_credentials_error = False
if request.method == 'POST':
if not ('username' in request.POST and 'password' in request.POST):
bad_credentials_error = True
else:
user = users.authenticate(request.POST['username'], request.POST['password'])
if user:
login(request, user)
if not request.POST.get('rememberme', None):
request.session.set_expiry(0)
return shortcuts.redirect('index')
else:
bad_credentials_error = True
return shortcuts.render(request, "websites/auth/login.html", {'bad_credentials': bad_credentials_error}) | {
"repo_name": "AlexMaxHorkun/xanderh-spider",
"path": "xanderhorkunspider/web/websites/views/auth.py",
"copies": "1",
"size": "3180",
"license": "apache-2.0",
"hash": -8798578542268508000,
"line_mean": 35.988372093,
"line_max": 108,
"alpha_frac": 0.6119496855,
"autogenerated": false,
"ratio": 4.3561643835616435,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001012848797340803,
"num_lines": 86
} |
__author__ = 'Alexander Gorkun'
__email__ = 'mindkilleralexs@gmail.com'
from xanderhorkunspider import dao
class InMemoryPageDao(dao.PageDao):
"""
Just keeps Pages in array.
"""
__pages = {}
def find_by_url(self, url):
for pid, p in self.__pages.items():
if p.url == url:
return p
return None
def persist(self, page):
assert page.id == 0
page.id = id(page)
self.__pages[page.id] = page
class InMemoryLoadingDao(dao.LoadingDao):
"""
Just keeps Loadings in array.
"""
__loadings = {}
def persist(self, loading):
assert loading.id == 0
self.__loadings[id(loading)] = loading
def save(self, loading):
"""No need to do anything"""
pass
def find_all(self, limit=0, offset=0):
loadings = []
for lid, l in self.__loadings.items():
loadings.append(l)
if offset > 0:
loadings = loadings[offset:]
if limit > 0:
loadings = loadings[:limit]
return loadings
class InMemoryWebsiteDao(dao.WebsiteDao):
"""
Keeps Websites in array.
"""
__websites = {}
def persist(self, website):
assert website.id == 0
self.__websites[id(website)] = website
def save(self, website):
"""No need to do anything"""
pass | {
"repo_name": "AlexMaxHorkun/xanderh-spider",
"path": "xanderhorkunspider/inmemory_dao.py",
"copies": "1",
"size": "1381",
"license": "apache-2.0",
"hash": -5496088601378433000,
"line_mean": 21.2903225806,
"line_max": 46,
"alpha_frac": 0.5488776249,
"autogenerated": false,
"ratio": 3.692513368983957,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9734670563776431,
"avg_score": 0.0013440860215053762,
"num_lines": 62
} |
__author__ = 'Alexander Gorkun'
__email__ = 'mindkilleralexs@gmail.com'
from xanderhorkunspider import models
class Websites(object):
"""
Contains methods for spider to use to work with websites, pages and loadings.
"""
_page_dao = None
_website_dao = None
_loading_dao = None
def __init__(self, page_dao, website_dao, loading_dao):
"""
:param page_dao: PageDao impl.
:param website_dao: WebsiteDao impl.
:param loading_dao: LoadingDao impl.
"""
self._page_dao = page_dao
self._website_dao = website_dao
self._loading_dao = loading_dao
def persist(self, website):
"""
Saves Website entity, it's pages and it's pages' loadings.
:param website: Website obj.
"""
self._website_dao.persist(website)
for page in website.pages:
self._page_dao.persist(page)
for loading in page.loadings:
self._loading_dao.persist(loading)
def find(self, wid):
return self._website_dao.find(wid)
def save_loading(self, loading):
if not loading.page:
raise ValueError("Loading must have link to page")
if loading.page.id == 0:
try:
self._page_dao.persist(loading.page)
except Exception as exc:
# Maybe page with such URL already exists
page = self._page_dao.find_by_url(loading.page.url)
if page:
loading.page = page
else:
raise exc
if loading.id == 0:
self._loading_dao.persist(loading)
else:
self._loading_dao.save(loading)
def find_page_by_url(self, url):
return self._page_dao.find_by_url(url)
def find_loadings(self, l=0, o=0):
return self._loading_dao.find_all(limit=l, offset=o)
def find_websites(self, l=0, o=0):
"""
Gets list of websites.
:param l: Max amount of items.
:param o: Offset.
:return: List.
"""
return self._website_dao.find_all(limit=l, offset=o)
def save(self, website):
"""
Saves changes to a website.
:param website: Website entity.
"""
self._website_dao.save(website)
def remove(self, website):
"""
Deletes a website.
:param website: Website entity or ID.
"""
if isinstance(website, models.Website):
wid = website.id
else:
wid = int(website)
self._website_dao.delete(wid)
def find_page(self, pid):
"""
Finds a page by ID.
:param pid: Page's ID.
:return: Page.
"""
return self._page_dao.find(pid)
def save_page(self, page):
"""
Persists of saves page.
:param page: Page entity.
"""
if page.id:
self._page_dao.persist(page)
else:
self._page_dao.save(page)
def remove_page(self, page):
"""
Deletes page.
:param page: Page entity or ID
:return:
"""
if isinstance(page, models.Page):
pid = page.id
else:
pid = page
self._page_dao.delete(pid) | {
"repo_name": "AlexMaxHorkun/xanderh-spider",
"path": "xanderhorkunspider/domain.py",
"copies": "1",
"size": "3297",
"license": "apache-2.0",
"hash": 1563321288986279700,
"line_mean": 26.7142857143,
"line_max": 81,
"alpha_frac": 0.533515317,
"autogenerated": false,
"ratio": 3.8879716981132075,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49214870151132073,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alexander Gorkun'
__email__ = 'mindkilleralexs@gmail.com'
import json
import base64
from django import shortcuts
from django import http
from django.contrib.auth.decorators import permission_required
from xanderhorkunspider.web.websites import models
from xanderhorkunspider.web.websites import forms
from xanderhorkunspider.web.websites import domain
def index_view(request):
"""
Home page of "websites" module.
Shows list of websites and some of their pages along with most recent loadings.
:type request: django.http.HttpRequest
:rtype: django.http.HttpResponse
"""
websites_domain = domain.websites_domain
websites = websites_domain.find_websites()
last_loadings = domain.websites_domain.find_last_loadings()
return shortcuts.render(request, 'websites/index.html', {'websites': websites, 'last_loadings': last_loadings})
@permission_required('websites.edit_websites')
def edit_website_view(request, wid=None):
"""
Page allows to create/edit a website entity.
:type request: django.http.HttpRequest
:param wid: Website's ID, it's an edit form when given, create form otherwise.
:rtype: django.http.HttpResponse
"""
template = 'websites/add_website.html'
websites = domain.websites_domain
website = None
if wid is not None:
website = websites.find(wid)
if not website:
website = models.WebsitesModel()
if request.method == 'POST':
form = forms.WebsiteForm(request.POST)
if form.is_valid():
website.host = form.cleaned_data['host']
website.name = form.cleaned_data['name']
if not website.id:
websites.persist(website)
else:
websites.save(website)
return shortcuts.redirect(shortcuts.resolve_url('index'))
else:
form_data = None
if website.id:
form_data = dict()
form_data['name'] = website.name
form_data['host'] = website.host
form = forms.WebsiteForm(form_data)
return shortcuts.render(request, template, {'form': form, 'website': website})
@permission_required('websites.edit_websites')
def delete_website_view(request, wid):
"""
User can delete a website here.
:param wid: Website's ID.
:type request: django.http.HttpRequest
:rtype: django.http.HttpResponse
"""
template = 'websites/delete_website.html'
websites = domain.websites_domain
website = websites.find(wid)
if not website:
raise http.Http404
if request.method == 'POST':
websites.remove(website)
return shortcuts.redirect(shortcuts.resolve_url('index'))
else:
return shortcuts.render(request, template, {'website': website})
@permission_required('websites.edit_websites')
def edit_page_view(request, pid=None, wid=None):
"""
Page allows to create/edit a website's page entity.
:type request: django.http.HttpRequest
:param wid: Website's ID, when given the would be able to be assigned only to this website.
:param pid: Page's ID, if given it's an edit form, create form otherwise.
:rtype: django.http.HttpResponse
"""
template = 'websites/edit_page.html'
websites = domain.websites_domain
if pid:
page = websites.find_page(pid)
if not page:
raise http.Http404()
else:
page = models.PageModel()
if wid:
website = websites.find(wid)
if not website:
raise http.Http404()
page.website = website
if request.method == 'POST':
form = forms.PageForm(request.POST, instance=page)
if form.is_valid():
websites.save_page(page)
return shortcuts.redirect(shortcuts.resolve_url('index'))
else:
form = forms.PageForm(instance=page)
return shortcuts.render(request, template, {'page': page, 'form': form})
@permission_required('websites.edit_websites')
def delete_page_view(request, pid):
"""
User can delete a website's page here.
:param pid: Page's ID.
:type request: django.http.HttpRequest
:rtype: django.http.HttpResponse
"""
websites = domain.websites_domain
page = websites.find_page(pid)
if not page:
raise http.Http404()
if request.method == 'POST':
websites.remove_page(page)
return shortcuts.redirect(shortcuts.resolve_url('index'))
else:
return shortcuts.render(request, 'websites/delete_page.html', {'page': page})
@permission_required('websites.run_spider_sessions')
def spider_session_view(request, wid):
"""
User can start and/or monitor a spider session here.
Spider loads a website's pages' contents, saves it and finds links to new pages in it.
:param wid: Website's ID.
:type request: django.http.HttpRequest
:rtype: django.http.HttpResponse
"""
websites = domain.websites_domain
website = websites.find(wid)
if not website:
raise http.Http404()
if 'spider_id' in request.GET:
spider_id = int(request.GET['spider_id'])
else:
spider_id = 0
return shortcuts.render(request, 'websites/spider_session.html',
{'website': website, 'spider_id': spider_id,
'default_max_process_count': domain.spider_factory.max_processes})
@permission_required('websites.run_spider_sessions')
def start_spider_session_view(request):
"""
This is a block at "spider session" page. It shows current processes and stuff.
:type request: django.http.HttpRequest
:rtype: django.http.HttpResponse
"""
wid = request.GET.get('website')
sid = request.GET.get('spider_id')
if request.GET.get('max_processes'):
max_processes = int(request.GET.get('max_processes'))
else:
max_processes = 5
if not wid:
raise http.Http404()
website = domain.websites_domain.find(wid)
if sid:
spider = domain.spider_factory.find_spider_by_id(request.GET['spider_id'])
if not spider:
raise ValueError("No spider found with ID %s" % request.GET['spider_id'])
else:
spider = domain.spider_factory.create_spider()
if max_processes:
spider.max_process_count = max_processes
if (not website) or (not len(website.pages)):
raise http.Http404()
for p in website.pages:
spider.crawl(p)
if not spider.is_alive():
spider.start()
return shortcuts.render(request, "websites/start_spider_session.html",
{'spider': spider, 'website': website, 'spider_id': id(spider)})
@permission_required('websites.run_spider_sessions')
def spider_status_view(request, sid):
"""
Gets information about spider and it's processes. Returns json.
:type request: django.http.HttpRequest
:rtype: django.http.HttpResponse
"""
spider_manager = domain.spider_factory.find_spider_by_id(sid)
if not spider_manager:
raise ValueError("No spider with ID '%s' found" % sid)
info = spider_manager.active_processes_info()
response_data = {'is_alive': spider_manager.is_alive(), 'loadings': list()}
for crawling in info:
crawling_data = {
'url': crawling.page.url,
'website': {'name': crawling.page.website.name},
'id': base64.urlsafe_b64encode(str.encode(crawling.page.url)).decode(),
'started': crawling.started.strftime("%y,%m,%d,%H,%M,%S")
}
response_data['loadings'].append(crawling_data)
if 'website_id' in request.GET:
website = domain.websites_domain.find(request.GET['website_id'])
if website:
response_data['pages_count'] = website.pages.count()
if 'stop_when_done' in request.GET:
if int(request.GET['stop_when_done']) != 0:
spider_manager.stop_when_done = True
return http.HttpResponse(json.dumps(response_data), content_type="application/json") | {
"repo_name": "AlexMaxHorkun/xanderh-spider",
"path": "xanderhorkunspider/web/websites/views/websites.py",
"copies": "1",
"size": "8017",
"license": "apache-2.0",
"hash": 3547741031125777400,
"line_mean": 34.3215859031,
"line_max": 115,
"alpha_frac": 0.6466259199,
"autogenerated": false,
"ratio": 3.742763772175537,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9885666395316554,
"avg_score": 0.0007446593517967383,
"num_lines": 227
} |
__author__ = 'Alexander Gorkun'
__email__ = 'mindkilleralexs@gmail.com'
import json
from django.db import models
from xanderhorkunspider.models import Website
from xanderhorkunspider.models import Page
from xanderhorkunspider.models import Loading
from xanderhorkunspider.dao import WebsiteDao
from xanderhorkunspider.dao import PageDao
from xanderhorkunspider.dao import LoadingDao
class WebsitesModel(models.Model, Website):
"""
Represents websites, a list of pages (urls) with a host to determine if a page belongs to it and a name for UI.
"""
id = models.AutoField(primary_key=True)
host = models.CharField(max_length=128)
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class Meta:
db_table = "websites"
@property
def pages(self):
return self.pages_set.all()
def pages_count(self):
"""
Number of pages belongs.
:return: int
"""
return self.pages_set.count()
class WebsitesDBDao(WebsiteDao):
"""
Implementation of DAO interface for website entities for spider to use.
"""
def find_all(self, offset=0, limit=0):
query = WebsitesModel.objects.all()
if limit > 0:
query = query[:limit]
if offset > 0:
query = query[offset:]
return list(query)
def persist(self, website):
if isinstance(website, WebsitesModel):
website.save()
else:
website_model = WebsitesModel()
WebsitesDBDao._entity_to_model(website, website_model)
website_model.save()
website.id = website_model.id
def save(self, website):
if isinstance(website, WebsitesModel):
website.save()
else:
website_model = WebsitesModel.objects.get(pk=website.id)
if not website_model:
raise RuntimeError()
WebsitesDBDao._entity_to_model(website, website_model)
website_model.save()
def find(self, wid):
return WebsitesModel.objects.get(pk=wid)
@classmethod
def _entity_to_model(cls, entity, model):
"""
Copies field values from simple entityt to model.
:param entity: Simple website entity.
:param model: Website model that extends django's model.
"""
if (not entity.id is None) and entity.id > 0:
model.id = entity.id
model.host = entity.host
model.name = entity.name
model.pages = entity.pages
def delete(self, wid):
website = WebsitesModel.objects.get(pk=wid)
if not website:
raise ValueError("Website with id = %d not found" % wid)
website.delete()
class PageModel(models.Model, Page):
"""
Represents pages (urls) of a website.
"""
id = models.AutoField(primary_key=True)
url = models.CharField(max_length=255, unique=True)
website = models.ForeignKey(WebsitesModel, related_name='pages_set', on_delete=models.CASCADE)
def __str__(self):
return self.url
class Meta:
db_table = "pages"
class PagesDBDao(PageDao):
"""
Implementation of DAO interface for page entities for spider to use.
"""
def find(self, pid):
return PageModel.objects.get(pk=pid)
@classmethod
def _entity_to_model(cls, entity, model):
"""
Copies field values from simple entity to model.
:param entity: Simple page entity.
:param model: Page model that extends django's model.
"""
if (not entity.id is None) and entity.id > 0:
model.id = entity.id
model.url = entity.url
model.website = entity.website
def persist(self, page):
if not isinstance(page, PageModel):
page_model = PageModel()
PagesDBDao._entity_to_model(page, page_model)
page_model.save()
page.id = page_model.id
else:
page.save()
def save(self, page):
if not isinstance(page, PageModel):
page_model = PageModel()
PagesDBDao._entity_to_model(page, page_model)
page_model.save()
else:
page.save()
def delete(self, page):
page_model = PageModel.objects.get(pk=page)
if page_model:
page_model.delete()
else:
raise ValueError("Page with ID %d not found" % page)
def find_by_url(self, url):
try:
page = PageModel.objects.get(url=url)
except PageModel.DoesNotExist:
page = None
return page
class LoadingModel(models.Model, Loading):
"""
Represents loading result of a page.
"""
id = models.AutoField(primary_key=True)
page = models.ForeignKey(PageModel, related_name='loadings_set', on_delete=models.CASCADE)
success = models.BooleanField(default=False)
headers = {}
headers_serialized = models.CharField(max_length=4096)
content = models.TextField()
time = models.DateTimeField(auto_now_add=True)
loading_time = models.PositiveIntegerField()
def __str__(self):
return self.content
class Meta:
db_table = "loadings"
class LoadingDBDao(LoadingDao):
"""
Implementation of DAO interface for loading entity for spider to use.
"""
@classmethod
def _entity_to_model(cls, entity, model):
"""
Copy attributes from entity to django's model.
:param entity: Loading entity.
:param model: LoadingModel instance.
"""
if entity.id > 0:
model.id = entity.id
page = entity.page
if (not isinstance(page, PageModel)) and page.id > 0:
page = PageModel.objects.get(pk=page.id)
model.page = page
model.success = entity.success
model.headers = entity.headers
model.headers_serialized = json.dumps(entity.headers)
model.content = entity.content
model.time = entity.time
model.loading_time = entity.loading_time
def persist(self, loading):
if isinstance(loading, Loading):
loading_model = LoadingModel()
LoadingDBDao._entity_to_model(loading, loading_model)
loading_model.save()
loading.id = loading_model.id
elif isinstance(loading, LoadingModel):
loading.save()
else:
raise ValueError()
def save(self, loading):
if isinstance(loading, Loading):
loading_model = LoadingModel()
LoadingDBDao._entity_to_model(loading, loading_model)
loading_model.save()
elif isinstance(loading, LoadingModel):
loading.save()
else:
raise ValueError()
def find_all(self, limit=0, offset=0, order_by='time', order_asc=False):
"""
Gets list of Loadings from storage.
:param limit: Max amount of entities to return.
:param offset: Start index.
:param order_by: Order loadings by property, time or id.
:param order_asc: True for ascending, false for descending ordering.
:return: List of Loadings.
"""
query = LoadingModel.objects.select_related('page').all()
if order_by in ('time', 'id'):
if not order_asc:
order_by = '-' + order_by
query = query.order_by(order_by)
if limit > 0:
query = query[:limit]
if offset > 0:
query = query[offset:]
return list(query) | {
"repo_name": "AlexMaxHorkun/xanderh-spider",
"path": "xanderhorkunspider/web/websites/models.py",
"copies": "1",
"size": "7561",
"license": "apache-2.0",
"hash": 7462365176562864000,
"line_mean": 29.0079365079,
"line_max": 115,
"alpha_frac": 0.6021690253,
"autogenerated": false,
"ratio": 4.009013785790032,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5111182811090031,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alexander Gorkun'
__email__ = 'mindkilleralexs@gmail.com'
import requests
class LoadResult(object):
"""
Simply stores results of loading a page.
"""
url = ""
headers = {}
body = ""
def __init__(self, url, headers, body):
"""
Initialize object with information about loaded page.
:type url str
:type headers dict
:type body str
"""
self.url = url
self.headers = headers
self.body = body
class Loader(object):
"""
Used to get a web page's content.
"""
timeout = 5
def __init__(self, timeout=0):
"""
:param timeout: max amount of time to load pages
"""
if timeout:
self.timeout = int(timeout)
def load(self, url, timeout=None):
"""
Loads page content.
:param url: URL address.
:param timeout: max amount of time to load page within, optional.
:return: page headers and body in LoadResult obj or None if loading failed.
"""
if timeout is None and self.timeout is not None:
timeout = self.timeout
try:
http_response = requests.get(url, timeout=timeout)
except:
return None
if not http_response:
return None
return LoadResult(url, http_response.headers, http_response.text) | {
"repo_name": "AlexMaxHorkun/xanderh-spider",
"path": "xanderhorkunspider/loader.py",
"copies": "1",
"size": "1398",
"license": "apache-2.0",
"hash": 8182295433516886000,
"line_mean": 22.3166666667,
"line_max": 83,
"alpha_frac": 0.5586552217,
"autogenerated": false,
"ratio": 4.275229357798165,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5333884579498165,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alexander Gorkun'
__email__ = 'mindkilleralexs@gmail.com'
import threading
import time
import datetime
from xanderhorkunspider import loader
from xanderhorkunspider import parser
from xanderhorkunspider import models
class LoadingEvaluator(object):
def evaluate_loading(self, loading):
"""
Determines whether page content is worth while.
:param loading: Loading object.
:return: Bool.
"""
return 'content-type' in loading.headers and loading.headers['content-type'] == 'text/html'
class Spider(object):
"""
Gets page content, parses it for new links.
If use_existing is True then if content is already loaded
it will be parsed, not loaded again.
"""
page_loader = loader.Loader()
links_parser = parser.LinksParser()
use_existing = True
def __init__(self, ldr=None, links_parser=None):
"""
:param ldr: Custom Loader impl if needed.
:param links_parser: Custom LinksParser impl if needed.
"""
if not ldr is None:
self.page_loader = ldr
if not links_parser is None:
self.links_parser = links_parser
def crawl_on_page(self, page):
"""
Loads page, gets it's content and new links.
:param page: Page entity.
:return: Resulting Loading entity and links list.
"""
if self.use_existing and page.isloaded():
loading = page.get_last_successful_loading()
else:
start_time = datetime.datetime.utcnow()
load_result = self.page_loader.load(page.url)
loading = models.Loading()
loading.page = page
loading.success = not load_result is None
loading.headers = dict(getattr(load_result, "headers", {}))
loading.content = getattr(load_result, "body", "")
loading.time = start_time
loading.loading_time = (datetime.datetime.utcnow() - start_time).total_seconds()
if len(loading.content):
links = self.links_parser.parse(loading)
else:
links = None
return loading, links
class CrawlingInfo(object):
__slots__ = ['loading', 'page', 'links', 'started', 'finished']
def __init__(self, loading, page, links, started, finished):
"""
Class holds info about CrawlingProcess.
:param loading: Loading that has been created during crawling process.
:param page: Page entity that was loaded.
:param links: Set of links received from page content.
:param started: When started (datetime).
:param finished: When finished (datetime).
"""
self.loading = loading
self.page = page
self.links = links
self.started = started
self.finished = finished
def time_took(self):
"""
Calculates what time did it take to finish crawling process.
:return: int, seconds.
"""
if not (self.finished and self.started):
return 0
return (self.finished - self.started).total_seconds()
class CrawlingProcess(threading.Thread):
page = None
resulting_loading = None
resulting_links = None
finished = None
spider = None
websites = None
evaluator = None
started = None
def __init__(self, page, spider, websites, ev):
"""
:param page: Page to crawl on.
:param websites: Websites.
:param spider: Spider.
:param ev: LoadingEvaluator to determine whether to save page or not (if it has ID=0).
"""
super().__init__()
self.page = page
self.spider = spider
self.websites = websites
self.evaluator = ev
def run(self):
"""
Runs spider's crawl and saves results.
"""
self.started = datetime.datetime.utcnow()
loading, links = self.spider.crawl_on_page(self.page)
self.resulting_links = links
self.resulting_loading = loading
if (loading.id == 0 and loading.page, id == 0 and self.evaluator.evaluate_loading(loading)) \
or not loading.page.id == 0:
self.websites.save_loading(loading)
self.finished = datetime.datetime.utcnow()
class SpiderManager(threading.Thread):
spider = Spider()
max_process_count = 50
__processes = []
websites = None
evaluator = LoadingEvaluator()
update_existing = False
stop_when_done = False
finished_processes = list()
def __init__(self, websites, spider=None, max_p=None, loading_evaluator=None, autostart=False):
"""
:param websites: Websites domain.
:param spider: Custom Spider impl if needed.
:param max_p: Maximum amount of processes to run.
:param loading_evaluator: LoadingEvaluator custom impl if needed.
"""
super().__init__()
if spider is not None:
self.spider = spider
self.spider.use_existing = not self.update_existing
if isinstance(max_p, int) and max_p > 0:
self.max_process_count = max_p
self.websites = websites
if not loading_evaluator is None:
self.evaluator = loading_evaluator
if autostart:
self.start()
def running_count(self):
"""
Finds how many crawling processes is running.
:return: number
"""
running = 0
for p in self.__processes:
if p.is_alive():
running += 1
return running
def is_done(self):
"""
Check all processes are finished.
:return: bool.
"""
return len(self.__processes) == 0
def crawl(self, page):
"""
Starts a process of crawling on page.
:param page: Page.
"""
self.__processes.append(CrawlingProcess(page, self.spider, self.websites, self.evaluator))
def _start_process(self, crawling_process):
"""
When finds not yet started crawling process starts it if possible.
:param crawling_process: CrawlingProcess.
"""
if self.running_count() < self.max_process_count:
crawling_process.start()
def _process_crawling_result(self, crawling_process):
"""
Processes crawling process when it's finished, creates new
crawling processes for received links from page's body.
:param crawling_process: CrawlingProcess
"""
if crawling_process.resulting_links is not None and not self.stop_when_done:
for l in crawling_process.resulting_links:
page = self.websites.find_page_by_url(l)
if page is None:
page = models.Page()
page.url = l
page.website = crawling_process.page.website
if not page.isloaded() or self.update_existing:
self.crawl(page)
def run(self):
while True:
for p in self.__processes:
if not p.is_alive():
if not p.finished and not self.stop_when_done:
self._start_process(p)
else:
self._process_crawling_result(p)
self.finished_processes.append(
CrawlingInfo(p.resulting_loading, p.page, p.resulting_links, p.started, p.finished))
self.__processes.remove(p)
if self.stop_when_done and self.is_done():
break
time.sleep(1)
def active_processes_info(self):
"""
Gets info about running processes.
:return: List of CrawlingInfo.
"""
info = list()
for p in self.__processes:
if p.is_alive():
info.append(CrawlingInfo(None, p.page, None, p.started, None))
return info
def waiting_processes_info(self):
"""
Gets info about waiting processes.
:return: List of CrawlingInfo.
"""
info = list()
for p in self.__processes:
if not p.is_alive() and not p.finished:
info.append(CrawlingInfo(None, p.page, None, None, None))
return info
def crawling_info(self):
"""
Gets info about processes.
:return: List of CrawlingInfo.
"""
info = list()
for p in self.__processes:
info.append(CrawlingInfo(p.resulting_loading, p.page, p.resulting_links, p.started, p.finished))
info += self.finished_processes
return info | {
"repo_name": "AlexMaxHorkun/xanderh-spider",
"path": "xanderhorkunspider/spider.py",
"copies": "1",
"size": "8635",
"license": "apache-2.0",
"hash": -2719488578277907500,
"line_mean": 32.2153846154,
"line_max": 112,
"alpha_frac": 0.5790387956,
"autogenerated": false,
"ratio": 4.15743861338469,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0010263663967722241,
"num_lines": 260
} |
__author__ = 'Alexander Gorkun'
__email__ = 'mindkilleralexs@gmail.com'
import unittest
import time
import httpretty
from xanderhorkunspider import loader
from xanderhorkunspider import models
from xanderhorkunspider import parser
from xanderhorkunspider import spider
from xanderhorkunspider import domain
from xanderhorkunspider import inmemory_dao
class TestLoader(unittest.TestCase):
mock_url = "http://www.example.com/"
mock_content = "<html><head><title>Some title</title></head><body><h1>Hello you!</h1></body></html>"
mock_contenttype = "text/html"
mock_headers = {'age': '19'}
@httpretty.activate
def test_load(self):
httpretty.register_uri(httpretty.GET, self.mock_url,
body=self.mock_content, ccontent_type=self.mock_contenttype,
adding_headers=self.mock_headers)
l = loader.Loader(2)
response = l.load(self.mock_url)
self.assertIsInstance(response, loader.LoadResult, "Loader did not return LoadResult")
for key, value in self.mock_headers.items():
self.assertTrue(key in response.headers, "Response does not contain needed header")
self.assertTrue(response.headers[key] == value, "Response header's '%s' value is invalid" % key)
self.assertTrue(response.url == self.mock_url)
self.assertTrue(response.body == self.mock_content)
class TestLinksParser(unittest.TestCase):
mock_host = "example.com"
mock_text = "<html><head><title>Some title</title></head><body><h1>Hello you!</h1>" \
"<a someattr=\"someshit\" href=\"/relative-link/23\" otherattr>AAA</a><p>someshit</p>" \
"<a href=\"http://ohersubdomain.example.com/someshit\">DDD</a>" \
"<p>Some text <a dd=\"jdsh\" href=\"http://example.com/gofuckyourself\"></a>" \
"<a href=\"http://fuckthislink.com/someshit2\">SDaa</a>" \
"<a hh=\"dsad\" href=\"gofurther\">SSSS</a></p></body></html>"
mock_page_url = "http://somesubdomain.example.com"
mock_url = "/someshit"
def test_parse(self):
website = models.Website()
website.name = "Some site"
website.host = self.mock_host
page = models.Page()
page.website = website
page.url = self.mock_page_url + self.mock_url
loading = models.Loading()
loading.page = page
loading.success = True
loading.content = self.mock_text
p = parser.LinksParser()
links = p.parse(loading)
testlinks = {self.mock_page_url + '/relative-link/23', 'http://ohersubdomain.example.com/someshit',
"http://" + self.mock_host + '/gofuckyourself',
self.mock_page_url + self.mock_url + '/gofurther'}
self.assertTrue(links == testlinks)
class TestSpider(unittest.TestCase):
mock_host = "example.com"
mock_url = "http://www." + mock_host
mock_content = "<html><head><title>Some title</title></head><body><h1>Hello you!</h1>" \
"<a href=\"someotherpage\">Some link</a></body></html>"
@httpretty.activate
def test_crawl(self):
httpretty.register_uri(httpretty.GET, self.mock_url,
body=self.mock_content)
website = models.Website()
website.name = "Test"
website.host = self.mock_host
page = models.Page()
page.website = website
page.url = self.mock_url
spdr = spider.Spider()
loading, links = spdr.crawl_on_page(page)
self.assertTrue(isinstance(loading, models.Loading))
self.assertTrue(loading.success)
self.assertTrue(loading.content == self.mock_content)
self.assertTrue(page is loading.page)
self.assertTrue(links == {self.mock_url + "/someotherpage", })
class TestSpiderManager(unittest.TestCase):
mock_host = "example.com"
mock_base_url = "http://somesub." + mock_host + "/somepage"
@httpretty.activate
def test_threepages(self):
website = models.Website()
website.name = "some site"
website.host = self.mock_host
page1 = models.Page()
page1.website = website
page1.url = self.mock_base_url
page2 = models.Page()
page2.website = website
page2.url = self.mock_base_url + '/page2'
httpretty.register_uri(httpretty.GET, page1.url,
body="<p>somestuff</p>")
httpretty.register_uri(httpretty.GET, page2.url,
body="<p>sup</p><a href=\"page3\">link to 3</a>")
httpretty.register_uri(httpretty.GET, page2.url + "/page3",
body="<div>enough</div>")
websites = domain.Websites(inmemory_dao.InMemoryPageDao(),
inmemory_dao.InMemoryWebsiteDao(), inmemory_dao.InMemoryLoadingDao())
spdr = spider.Spider(loader.Loader(), parser.LinksParser())
spider_manager = spider.SpiderManager(websites, spdr)
spider_manager.crawl(page1)
spider_manager.crawl(page2)
spider_manager.start()
"""Giving 5 sec for spider to run"""
time.sleep(5)
spider_manager.stop_when_done = True
spider_manager.join()
received_links = set()
for l in websites.find_loadings():
received_links.add(l.page.url)
self.assertTrue(received_links == {self.mock_base_url,
self.mock_base_url + '/page2', page2.url + "/page3"}) | {
"repo_name": "AlexMaxHorkun/xanderh-spider",
"path": "xanderhorkunspider/tests.py",
"copies": "1",
"size": "5559",
"license": "apache-2.0",
"hash": -7925175582289580000,
"line_mean": 41.7692307692,
"line_max": 108,
"alpha_frac": 0.6037057025,
"autogenerated": false,
"ratio": 3.652431011826544,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47561367143265443,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alexander Gorkun'
__email__ = 'mindkilleralexs@gmail.com'
import urllib.parse
import re
class LinksParser(object):
"""
Gets links out of html. Checks if they belong to the website the loaded page does.
"""
def _validateurl(self, loading, url):
"""
Validates received urls.
:param loading: Loading object.
:param url: link
:return: true or false
"""
urldata = urllib.parse.urlparse(url)
valid_protoloc = urldata.scheme in ('http', 'https')
valid_host = urllib.parse.urlparse(url).netloc.endswith(loading.page.website.host)
return valid_host and valid_protoloc
def parse(self, loading):
match = re.findall(r'<a\s.*?href="([^#]*?)[#"]', loading.content)
urls = set()
page_url_data = urllib.parse.urlparse(loading.page.url)
for url in match:
if url.startswith("/"):
url = page_url_data.scheme + "://" + page_url_data.netloc + url
elif not (url.startswith('http://') or url.startswith('https://')):
if not loading.page.url.endswith('/'):
url = '/' + url
url = loading.page.url + url
if self._validateurl(loading, url):
urls.add(url)
return urls | {
"repo_name": "AlexMaxHorkun/xanderh-spider",
"path": "xanderhorkunspider/parser.py",
"copies": "1",
"size": "1320",
"license": "apache-2.0",
"hash": 6960002280209434000,
"line_mean": 32.025,
"line_max": 90,
"alpha_frac": 0.5643939394,
"autogenerated": false,
"ratio": 3.8823529411764706,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9937357525225228,
"avg_score": 0.0018778710702485658,
"num_lines": 40
} |
__author__ = 'Alexander Gorkun'
__email__ = 'mindkilleralexs@gmail.com'
class PageDao(object):
"""
Interface of Page entity's DAO.
"""
def persist(self, page):
"""
Persist new Page entity.
:param page: Page entity.
:return: nothing.
"""
raise NotImplementedError()
def find_by_url(self, url):
"""
Looks for Page entity with such URL.
:param url: URL property that needed Page entity has.
:return: Page entity or None.
"""
raise NotImplementedError()
def find(self, id):
"""
Looks for Page entity with such ID.
:param id: ID of needed entity.
:return: Page or None.
"""
raise NotImplementedError()
def save(self, page):
"""
Save changes to the Page entity.
:param page: Page entity.
"""
raise NotImplementedError()
def delete(self, page):
"""
Deletes page entity.
:param page: Page's ID.
"""
raise NotImplementedError()
class WebsiteDao(object):
"""
Interface to Website entity's DAO.
"""
def persist(self, website):
"""
Persist new Website entity.
:param website: Website entity.
:return: nothing.
"""
raise NotImplementedError()
def find_by_url(self, url):
"""
Looks for Website entity which owns URL.
:param url: URL address.
:return: Website entity or None.
"""
raise NotImplementedError()
def find(self, id):
"""
Looks for Website entity with such ID.
:param id: ID of needed entity.
:return: Website or None.
"""
raise NotImplementedError()
def find_all(self, offset=0, limit=0):
"""
Gets list of Websites.
:param offset: Start index.
:param limit: Max amount to return.
:return: List.
"""
raise NotImplementedError()
def save(self, website):
"""
Save changes to the Website entity.
:param website: Website entity.
"""
raise NotImplementedError()
def delete(self, wid):
"""
Deletes a website.
:param wid: Website's ID.
"""
raise NotImplementedError()
class LoadingDao(object):
"""
Interface of Loading entity's DAO.
"""
def persist(self, loading):
"""
Persist new Loading entity.
:param loading: Loading entity.
:return: nothing.
"""
raise NotImplementedError()
def find_by_url(self, url):
"""
Looks for Loading entity with such Page's URL.
:param url: URL property of Page that needed Loading entity has.
:return: Loading entity or None.
"""
raise NotImplementedError()
def find(self, id):
"""
Looks for Loading entity with such ID.
:param id: ID of needed entity.
:return: Loading or None.
"""
raise NotImplementedError()
def save(self, loading):
"""
Save changes to the Loading entity.
:param loading: Loading entity.
"""
raise NotImplementedError()
def find_all(self, limit=0, offset=0):
"""
Gets list of Loadings from storage.
:param limit: Max amount of entities to return.
:param offset: Start index.
:return: List of Loadings.
"""
raise NotImplementedError() | {
"repo_name": "AlexMaxHorkun/xanderh-spider",
"path": "xanderhorkunspider/dao.py",
"copies": "1",
"size": "3537",
"license": "apache-2.0",
"hash": 225009541938311040,
"line_mean": 23.4,
"line_max": 72,
"alpha_frac": 0.5482046932,
"autogenerated": false,
"ratio": 4.482889733840304,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5531094427040304,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alexander Gorkun'
__email__ = 'mindkilleralexs@gmail.com'
class Website(object):
"""
Holds data about added websites to parse.
"""
id = 0
name = ""
pages = set()
host = ""
class Page(object):
"""
Holds info about loaded pages.
"""
id = 0
url = ""
website = None
loadings = []
def isloaded(self):
"""
Checks if the page's content was successfully loaded at least once.
:return: bool
"""
for loading in self.loadings:
if loading.success:
return True
return False
def getcontent(self):
"""
Returns content of last successful loading.
:return: Content (HTML) or None
"""
loading = self.get_last_successful_loading()
if not loading is None:
return loading.content
else:
return None
def get_last_successful_loading(self):
last_loading = None
for l in self.loadings:
if l.success and (last_loading is None or last_loading.time < l.time):
last_loading = l
if last_loading:
return last_loading
else:
return None
class Loading(object):
"""
Holds info about a loading attempt.
"""
id = 0
page = None
success = False
headers = {}
content = ""
time = None
loading_time = 0 | {
"repo_name": "AlexMaxHorkun/xanderh-spider",
"path": "xanderhorkunspider/models.py",
"copies": "1",
"size": "1431",
"license": "apache-2.0",
"hash": 2461449725393940500,
"line_mean": 20.3731343284,
"line_max": 82,
"alpha_frac": 0.535290007,
"autogenerated": false,
"ratio": 4.196480938416422,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5231770945416422,
"avg_score": null,
"num_lines": null
} |
__author__ = "Alexander Gorokhov"
__email__ = "sashgorokhov@gmail.com"
import urllib.request, urllib.parse, json, threading
__enable_requests__ = True
try:
import requests
except ImportError:
__enable_requests__ = False
class VKError(Exception):
def __init__(self, error):
super().__init__(error)
self.vkerror = error
self.error_code = int(error['error_code'])
self.error_msg = str(error['error_msg'])
self.request_params = error['request_params']
class VKApi:
def __init__(self, access_token=None):
self.access_token = access_token
self.api_version = '5.21'
self._lock = threading.Lock()
def _compile_params(self, params_dict):
params = list()
for key in params_dict:
if len(str(params_dict[key])) != 0:
if isinstance(params_dict[key], list):
params.append((key, ','.join(map(str, params_dict[key]))))
else:
params.append((key, str(params_dict[key])))
if self.access_token:
params.append(("access_token", str(self.access_token)))
params.append(('v', str(self.api_version)))
return params
def call(self, method, **params_dict):
params = self._compile_params(params_dict)
url = 'https://api.vk.com/method/{0}?{1}'.format(method, urllib.parse.urlencode(params))
with self._lock:
response = urllib.request.urlopen(url).read()
response = json.loads(response.decode())
if 'error' in response:
raise VKError(response['error'])
return response['response']
def download(self, link, filename=None, reportHook=None):
if filename:
urllib.request.urlretrieve(link, filename, reportHook)
else:
return urllib.request.urlretrieve(link, reporthook=reportHook)[0]
def upload(self, link, filename, res_type):
if __enable_requests__:
response = requests.post(link, files={res_type: open(filename, 'rb')}).json()
return response
else:
raise RuntimeError('api.upload(): Requests are disabled(import error)')
def test_connection(access_token):
api = VKApi(access_token)
try:
api.call("users.get")
except VKError:
return False
return True | {
"repo_name": "sashgorokhov/VK-P-P-Music-Project",
"path": "modules/vk/api.py",
"copies": "1",
"size": "2356",
"license": "mit",
"hash": -5855609339441957000,
"line_mean": 31.7361111111,
"line_max": 96,
"alpha_frac": 0.5955008489,
"autogenerated": false,
"ratio": 3.824675324675325,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9911922024786342,
"avg_score": 0.0016508297577966255,
"num_lines": 72
} |
__author__ = "Alexander Gorokhov"
__email__ = "sashgorokhov@gmail.com"
from PySide import QtCore, QtWebKit, QtGui
from urllib.parse import urlparse
DESCTIPTION = "VK Qt auth window"
class __QtAuthWindow(QtWebKit.QWebView):
def __init__(self, appId, scope):
super().__init__()
url = 'http://oauth.vk.com/oauth/authorize?' + \
'redirect_uri=oauth.vk.com/blank.html&' + \
'response_type=token&' + \
'client_id={0}&scope={1}&'.format(appId,
','.join(scope)) + \
'display=wap&revoke=1'
self.accessToken = None
self.userId = None
self.expires = None
self.setWindowTitle(str(DESCTIPTION))
self.urlChanged.connect(self.webUrlChanged)
self.load(QtCore.QUrl(url))
def webUrlChanged(self, newUrl):
url = newUrl.toString()
if urlparse(url).path != '/blank.html':
return
params = {
p_pair.split('=')[0]: p_pair.split('=')[1]
for p_pair in url.split('#')[1].split('&')}
self.accessToken = params['access_token']
self.userId = params['user_id']
self.expires = params['expires_in']
self.close()
def show_browser(appId, scope):
app = QtGui.QApplication([])
form = __QtAuthWindow(str(appId), scope)
form.show()
app.exit(app.exec_())
app.quit()
return form.accessToken, form.userId, form.expires
| {
"repo_name": "sashgorokhov/VK-P-P-Music-Project",
"path": "modules/vk/qt/auth.py",
"copies": "1",
"size": "1476",
"license": "mit",
"hash": -8932796446229069000,
"line_mean": 31.8,
"line_max": 68,
"alpha_frac": 0.5616531165,
"autogenerated": false,
"ratio": 3.4976303317535544,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45592834482535544,
"avg_score": null,
"num_lines": null
} |
__author__ = "Alexander Gorokhov"
__email__ = "sashgorokhov@gmail.com"
import http.cookiejar, urllib.request, urllib.parse, html.parser
from . import accesstokener
_noqt = False
try:
import PySide
except ImportError:
_noqt = True
def quickauth_qt(appid, permissions_scope=list()):
access_token = user_id = expires_in = None
if not accesstokener.good():
from .qt.auth import show_browser
access_token, user_id, expires_in = show_browser(appid, permissions_scope)
if access_token:
accesstokener.new(access_token, user_id, expires_in)
else:
access_token, user_id, expires_in = accesstokener.get()
return access_token, user_id, expires_in
def quickauth_nogui(login, passwd, appid, permissions_scope=list()):
access_token = user_id = expires_in = None
if not accesstokener.good():
access_token, user_id, expires_in = auth(login, passwd, appid, permissions_scope)
if access_token:
accesstokener.new(access_token, user_id, expires_in)
else:
access_token, user_id, expires_in = accesstokener.get()
return access_token, user_id, expires_in
class VKAuthError(Exception): pass
class _FormParser(html.parser.HTMLParser):
def __init__(self):
html.parser.HTMLParser.__init__(self)
self.url = None
self.params = {}
self.in_form = False
self.form_parsed = False
self.method = 'GET'
def handle_starttag(self, tag, attrs):
tag = tag.lower()
if tag == 'form':
if self.form_parsed:
raise VKAuthError('Second form on page')
if self.in_form:
raise VKAuthError('Already in form')
self.in_form = True
if not self.in_form:
return
attrs = dict((name.lower(), value) for name, value in attrs)
if tag == 'form':
self.url = attrs['action']
if 'method' in attrs:
self.method = attrs['method']
elif tag == 'input' and 'type' in attrs and 'name' in attrs:
if attrs['type'] in ['hidden', 'text', 'password']:
self.params[attrs['name']] = attrs['value'] if 'value' in attrs else ''
def handle_endtag(self, tag):
tag = tag.lower()
if tag == 'form':
if not self.in_form:
raise VKAuthError('Unexpected end of <form>')
self.in_form = False
self.form_parsed = True
def auth(login, passwd, appid, scope):
if not isinstance(scope, list):
scope = [scope]
_opener = urllib.request.build_opener(
urllib.request.HTTPCookieProcessor(http.cookiejar.CookieJar()),
urllib.request.HTTPRedirectHandler())
try:
response = _opener.open(
'http://oauth.vk.com/oauth/authorize?' + \
'redirect_uri=oauth.vk.com/blank.html&response_type=token&' + \
'client_id={0}&scope={1}&display=wap'.format(app_id, ','.join(scope))
)
except urllib.error.URLError as e:
raise VKAuthError('Cant connect to vk.com or app_id is invalid.')
except Exception as e:
raise VKAuthError('Unhandled exception: ' + str(e))
doc = response.read().decode()
parser = _FormParser()
parser.feed(doc)
parser.close()
if not parser.form_parsed or parser.url is None or 'pass' not in parser.params or 'email' not in parser.params:
raise VKAuthError('Unexpected response page o_O')
parser.params['email'] = login
parser.params['pass'] = passwd
parser.method = 'POST'
keys = [i for i in parser.params]
for i in keys:
b = '1'.encode()
if type(i) != type(b):
a = i.encode()
else:
a = i
if type(parser.params[i]) != type(b):
parser.params[a] = parser.params[i].encode()
else:
parser.params[a] = parser.params[i]
parser.params.pop(i)
response = _opener.open(parser.url, urllib.parse.urlencode(parser.params).encode())
doc = response.read()
url = response.geturl()
if urllib.parse.urlparse(url).path != '/blank.html':
parser = _FormParser()
parser.feed(str(doc))
parser.close()
if not parser.form_parsed or parser.url is None:
raise VKAuthError('Invalid email or password')
if parser.method == 'post':
response = _opener.open(parser.url, urllib.parse.urlencode(parser.params).encode())
else:
raise VKAuthError('Unexpected method: ' + parser.method)
url = response.geturl()
if urllib.parse.urlparse(url).path != "/blank.html":
raise VKAuthError('Invalid email or password')
answer = dict(tuple(kv_pair.split('=')) for kv_pair in urllib.parse.urlparse(url).fragment.split('&'))
if 'access_token' not in answer or 'user_id' not in answer:
raise VKAuthError('Missing some values in answer')
return answer['access_token'], answer['user_id'], answer['expires_in']
# :)
if _noqt:
del globals()['quickauth_qt']
| {
"repo_name": "sashgorokhov/VK-P-P-Music-Project",
"path": "modules/vk/__init__.py",
"copies": "1",
"size": "5071",
"license": "mit",
"hash": 8232950022209926000,
"line_mean": 33.4965986395,
"line_max": 115,
"alpha_frac": 0.601656478,
"autogenerated": false,
"ratio": 3.6221428571428573,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9714228979724931,
"avg_score": 0.001914071083585097,
"num_lines": 147
} |
__author__ = "Alexander Metzner"
import sys
from pyfix.testcollector import TestCollector
from pyfix.testrunner import TestRunner, TestRunListener
from pybuilder.errors import BuildFailedException
from pybuilder.utils import discover_modules_matching, render_report
def run_unit_tests(project, logger):
sys.path.append(project.expand_path("$dir_source_main_python"))
test_dir = project.expand_path("$dir_source_unittest_python")
sys.path.append(test_dir)
pyfix_unittest_file_suffix = project.get_property("pyfix_unittest_file_suffix")
if pyfix_unittest_file_suffix is not None:
logger.warn("pyfix_unittest_file_suffix is deprecated, please use pyfix_unittest_module_glob")
module_glob = "*{0}".format(pyfix_unittest_file_suffix)
if module_glob.endswith(".py"):
module_glob = module_glob[:-3]
project.set_property("pyfix_unittest_module_glob", module_glob)
else:
module_glob = project.get_property("pyfix_unittest_module_glob")
logger.info("Executing pyfix unittest Python modules in %s", test_dir)
logger.debug("Including files matching '%s.py'", module_glob)
try:
result = execute_tests_matching(logger, test_dir, module_glob)
if result.number_of_tests_executed == 0:
logger.warn("No pyfix executed")
else:
logger.info("Executed %d pyfix unittests", result.number_of_tests_executed)
write_report(project, result)
if not result.success:
raise BuildFailedException("%d pyfix unittests failed", result.number_of_failures)
logger.info("All pyfix unittests passed")
except ImportError as e:
logger.error("Error importing pyfix unittest: %s", e)
raise BuildFailedException("Unable to execute unit tests.")
class TestListener(TestRunListener):
def __init__(self, logger):
self._logger = logger
def before_suite(self, test_definitions):
self._logger.info("Running %d pyfix tests", len(test_definitions))
def before_test(self, test_definition):
self._logger.debug("Running pyfix test '%s'", test_definition.name)
def after_test(self, test_results):
for test_result in test_results:
if not test_result.success:
self._logger.warn("Test '%s' failed: %s", test_result.test_definition.name, test_result.message)
def import_modules(test_modules):
return [__import__(module_name) for module_name in test_modules]
def execute_tests(logger, test_source, suffix):
return execute_tests_matching(logger, test_source, "*{0}".format(suffix))
def execute_tests_matching(logger, test_source, module_glob):
test_module_names = discover_modules_matching(test_source, module_glob)
test_modules = import_modules(test_module_names)
test_collector = TestCollector()
for test_module in test_modules:
test_collector.collect_tests(test_module)
test_runner = TestRunner()
test_runner.add_test_run_listener(TestListener(logger))
return test_runner.run_tests(test_collector.test_suite)
def write_report(project, test_results):
report = {"tests-run": test_results.number_of_tests_executed,
"time_in_millis": test_results.execution_time,
"failures": []}
for test_result in test_results.test_results:
if test_result.success:
continue
report["failures"].append({"test": test_result.test_definition.name, "message": test_result.message,
"traceback": test_result.traceback_as_string})
project.write_report("pyfix_unittest.json", render_report(report))
| {
"repo_name": "shakamunyi/pybuilder",
"path": "src/main/python/pybuilder/plugins/python/pyfix_plugin_impl.py",
"copies": "1",
"size": "3654",
"license": "apache-2.0",
"hash": 2699642763252847600,
"line_mean": 36.6701030928,
"line_max": 112,
"alpha_frac": 0.6830870279,
"autogenerated": false,
"ratio": 3.782608695652174,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49656957235521737,
"avg_score": null,
"num_lines": null
} |
from msvcrt import getch
from clint.textui import colored
from math import ceil
from random import uniform
from time import sleep, clock
from os import system, name
from sys import exit
# Define values received when ord(getchar()) is used, i.e. ASCII values.
RIGHT_KEY = 77
LEFT_KEY = 75
SPECIAL_CHAR = 224
EXIT_CHAR = 3
# Define values for the char dimensions in the terminal.
CHAR_HEIGHT = 12
CHAR_WIDTH = 7
# Amount the colored cube is indented.
CUBE_INDENT = 5
# Side lengths of the colored cube in characters.
CUBE_SIDE_LEN = 8
# Define chances for different colors. Chances are RELATIVE! i.e. if
# colored.green : 0.2,
# colored.red : 0.2,
# colored.blue: 0.2
# then each color has a third chance of getting picked, whereas if it were
# 0.2, 0.1, 0.1, then green would have 50% chance, while the other two have
# 25% chance each.
COLOR_CHANCES = {
colored.green : 0.2,
colored.red : 0.2,
colored.blue : 0.2,
}
SUM_CHANCE = sum(list(COLOR_CHANCES.values()))
# Number of colors that are tested each round.
NUM_TRIALS = 10
def clear_screen():
"""Clears the window, ensures DOS and Unix compatibility"""
system("cls" if name=="nt" else "clear")
def draw_cube(color, char, indent, row_num):
"""Draws a colored cube of row_num side length. Calculates the
appropriate number of columns based on character height and width and
the row_num."""
# Calculate the appropriate number of columns based on the dimensions
# of characters in the terminal and the number of rows.
col_num = ceil(CHAR_HEIGHT / CHAR_WIDTH) * row_num
# Print the cube.
for i in range(row_num):
print(color(" " * indent + char * col_num))
def get_random_color():
"""Using uniform probability, gets a color under the probabilities defined
in COLOR_CHANCES. Returns a 'colored' method e.g. colored.red."""
num_colors = len(COLOR_CHANCES)
rand_num = uniform(0, SUM_CHANCE)
color_chance_items = list(COLOR_CHANCES.items())
# List comprehension to get a list containing just the chances.
chances = [chance[1] for chance in color_chance_items]
# Find which color was generated.
for color in range(num_colors):
if rand_num < sum(chances[0:color + 1]):
return color_chance_items[color][0]
# Initial screen-clear.
clear_screen()
# Prepare score value to hold the number of correct inputs.
score = 0
# Not correct or incorrect yet
prev_correct = -1
# Print the first color such that for the second color the player can start
# playing. Show the color for three seconds before going to the first input.
print("\nFirst color:")
prev_color = get_random_color()
draw_cube(prev_color, "#", CUBE_INDENT, CUBE_SIDE_LEN)
for i in range(3):
print(". ", end="")
sleep(1)
clear_screen()
# Start the number of trials as defined by NUM_TRIALS as well as the timer.
# Each loop first tells whether the previous input was correct or incorrect,
# then shows a colored cube and waits for an input.
time_start = clock()
trial = 0
while trial < NUM_TRIALS:
# Print the result of the previous trial.
if prev_correct == True:
print(colored.green("CORRECT!"))
elif prev_correct == False:
print(colored.red("WRONG!"))
else:
print("")
# prev_correct must be -1 i.e. first trial,
# so can't be correct or wrong yet.
pass
# Print the trial number.
print("#{}".format(trial+1))
# Randomly generate a color and draw a corresponding cube.
color = get_random_color()
draw_cube(color, "#", CUBE_INDENT, CUBE_SIDE_LEN)
# Get user input.
key_press = ord(getch())
if key_press == SPECIAL_CHAR:
# If key_press is the special character, get the next one for the
# specific special character.
key_press = ord(getch())
elif key_press == EXIT_CHAR:
# If KeyboardInterrupt has been entered.
print("Keyboard Interrupt, exiting...")
exit()
# Left key = SAME as previous color ####CONTROLS####
# Right key = DIFFERENT from previous color ####CONTROLS####
if (key_press == LEFT_KEY):
if (prev_color == color): # If CORRECT
score += 1
prev_correct = True
else:
prev_correct = False
elif (key_press == RIGHT_KEY):
if (prev_color != color): # If CORRECT
score += 1
prev_correct = True
else:
prev_correct = False
# Prepare for the next round by saving the current color.
prev_color = color
trial += 1
clear_screen()
time_taken = clock() - time_start
print("You got a score of {}/{} in {:.3f} seconds!".format(score, NUM_TRIALS,
time_taken)) | {
"repo_name": "Terpal47/misc-programs",
"path": "Games/Prev-color Game/game.py",
"copies": "1",
"size": "5193",
"license": "mit",
"hash": -6106615080087967000,
"line_mean": 31.8734177215,
"line_max": 78,
"alpha_frac": 0.6383593299,
"autogenerated": false,
"ratio": 3.6803685329553506,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48187278628553504,
"avg_score": null,
"num_lines": null
} |
import csv
from math import ceil
# Used to help user in case they enter information incorrectly.
from window import status
# Define global macro values.
SINGLE_HARVEST_CROPS_CSV_FILE = "csv_files/single_harvest_crops.csv"
REGENERATIVE_CROPS_CSV_FILE = "csv_files/regenerative_crops.csv"
DAYS_IN_SEASON = 28
DEBUG = 0
class Crop():
"""
Defines a class for a general crop. Contains properties shared by all
crops, whether they are a one-time harvest or multiple-time harvest However,
contains all properties of a single-harvest crop.
- name: Name for the crop as referred to in-game.
- buy_price: The cheapest cost of an individual seed.
- sell_price: The regular sales price of one unit of produce.
- growth_time: The number of days it takes for the crop to
become harvestable.
- harvest_yield: The average amount of produce per harvest.
"""
def __init__(self, name, buy_price, sell_price, growth_time, harvest_yield):
self.name = name
self.buy_price = buy_price
self.sell_price = sell_price
self.growth_time = growth_time
self.sell_price = sell_price
self.harvest_yield = harvest_yield
# Regenerative crops are defined using the RegenerativeCrop class,
# which will set this property as True instead.
self.regenerative = False
# Initialize net gold income per day (ngid) property. Will be changed
# with future functions.
self.ngid = None
class RegenerativeCrop(Crop):
"""
Defines a class for a regenerative crop, such as a strawberry crop.
- regrowth_time: The number of days it takes for a mature crop to
regrow and become harvestable again after being
harvested.
- max_harvests: The number of times a crop can be harvested in a
season.
"""
def __init__(self, name, buy_price, sell_price, growth_time,
harvest_yield, regrowth_time, max_harvests):
Crop.__init__(self, name, buy_price, sell_price, growth_time,
harvest_yield)
self.regrowth_time = regrowth_time
self.max_harvests = max_harvests
self.regenerative = True
class data:
""" Define an object to hold widely used information. Will allow for easy
argument-passing for functions. """
crops = {}
def import_crops(data):
""" Reads in data from csv files and populates the data object with crop
objects. """
with open(SINGLE_HARVEST_CROPS_CSV_FILE) as crop_values:
crop_values = csv.DictReader(crop_values)
for row in crop_values:
info = [row["name"], int(row["buy_price"]), int(row["sell_price"]),
int(row["growth_time"]), float(row["harvest_yield"])]
crop = Crop(*info)
crop.seasons = ([row["season1"], row["season2"]] if
row["season2"] else [row["season1"]])
data.crops[row["name"]] = crop
with open(REGENERATIVE_CROPS_CSV_FILE) as crop_values:
crop_values = csv.DictReader(crop_values)
for row in crop_values:
info = [row["name"], int(row["buy_price"]), int(row["sell_price"]),
int(row["growth_time"]), float(row["harvest_yield"]),
int(row["regrowth_time"]), int(row["max_harvests"])]
crop = RegenerativeCrop(*info)
crop.seasons = ([row["season1"], row["season2"]] if
row["season2"] else [row["season1"]])
data.crops[row["name"]] = crop
def get_net_income(season, date, budget, max_num_seeds, data, recursive=0):
""" Given inputs taken from the GUI, returns all possible crops that can be
bought and also calculates the amount of gold that the player would have at
the end of the season if only that crop were harvested and returns that
number too. Recursive is equal to 1 when this instance of the function is
called recursively one layer lower on the stack. This occurs at the end of
the first call to calculate final gold for crops that can span two seasons.
"""
# Ensure given arguments are valid. ---------------------------------------
# Check that season is valid.
if season not in ["spring", "summer", "fall"]:
status.config(text = "Error, invalid input (season)")
return
# Check valid date.
if not (str(date).isdigit() and date >= 1 and date <= 28):
status.config(text = "Error, invalid input (date)")
return
# Check valid budget.
if not (str(budget).isdigit() and budget >= 1):
status.config(text = "Error, invalid input (budget)")
return
# Check valid max_num_seeds.
if not (str(max_num_seeds).isdigit() and max_num_seeds >= 1):
status.config(text = "Error, invalid input (Max # seeds)")
return
# Argument checking finished. ---------------------------------------------
if not recursive:
# Must be first call of the function.
num_days = DAYS_IN_SEASON - date
available_crops = [crop for crop in list(data.crops.values()) if
season in crop.seasons and budget >= crop.buy_price]
else:
# Must be calculating for crops that span two seasons.
num_days = DAYS_IN_SEASON * 2 - date
# Eliminate crops that do not span two seasons.
available_crops = [crop for crop in list(data.crops.values()) if
len(crop.seasons) == 2 and budget > crop.buy_price]
possible_paths = []
for crop in available_crops:
if DEBUG: print(crop.name)
if not crop.regenerative:
# Crop is single-harvest.
# Calculate the number of harvesting cycles there's time for.
num_cycles = num_days // crop.growth_time
gold = budget
for i in range(num_cycles):
buy_amount = gold // crop.buy_price
# Make sure we're not buying more than we have room for.
if (buy_amount > max_num_seeds):
buy_amount = max_num_seeds
gold -= buy_amount * crop.buy_price
gold += buy_amount * crop.sell_price * crop.harvest_yield
# Only add this crop if it's profitable i.e. player ends with more
# gold than they started with.
if gold > budget:
possible_paths.append( [crop.name, gold - budget] )
elif crop.regenerative:
# Crop is regenerative.
# Prepare an array that will contain the amount of harvests for each
# day.
planner = [0] * num_days
total_crops = 0
gold = budget
for i in range(num_days):
gold += planner[i] * crop.sell_price * crop.harvest_yield
if i <= (num_days - (crop.growth_time + crop.regrowth_time *
ceil(crop.buy_price / crop.sell_price)) ):
# If pass conditional, must still have time to plant and
# harvest a crop such that it's profitable.
# Calculate how many can be bought.
buy_amount = gold // crop.buy_price
# Make sure we're not buying more than we have room for.
if (total_crops + buy_amount > max_num_seeds):
buy_amount = max_num_seeds - total_crops
# Do logistics (delta gold, total amount of crops).
total_crops += buy_amount
gold -= buy_amount * crop.buy_price
# Calculate which days this crop will be harvestable.
num_harvests = 0
for j in range(i + crop.growth_time, num_days,
crop.regrowth_time):
planner[j] += int(buy_amount)
num_harvests += 1
# If the crop has a maximum number of harvests and we
# have passed it, stop the loop as it can't be harvested
# more.
if (crop.max_harvests != -1 and num_harvests >=
crop.max_harvests):
break
if DEBUG == 2: print(planner)
# Only add this crop if it's profitable i.e. player ends with more
# gold than they started with.
if gold > budget:
possible_paths.append( [crop.name, gold - budget] )
# Now check if there are crops that can span two seasons, including the
# current season.
if season == "summer" and not recursive:
long_possible_paths = get_net_income("summer", date, budget,
max_num_seeds, data, 1)
if DEBUG == 2:
print(possible_paths)
print(long_possible_paths)
print("")
# Merge long-term crops' final golds with their short-term. Put long
# term gold in front of short term gold.
for path in long_possible_paths:
path_names = [short_path[0] for short_path in possible_paths]
try:
index = path_names.index(path[0])
possible_paths[index].insert(1, path[1])
except ValueError:
# Occurs when crop is in long_possible_paths but not (short)
# possible_paths. E.g. Corn in summer with date=12, budget=222,
# max seeds=12. Corn is not profitable if grown just for the
# rest of summer, so not added to possible_paths. However, if
# grown in spring too, it's profitable and so is added to the
# list.
possible_paths.append(path)
possible_paths = sorted(possible_paths, key=lambda x: x[1], reverse=1)
# Return an array of arrays where each inner array contains:
# 0: The name of the crop.
# 1: The gold gained if the player were to plant this all season.
# 2: The gold gained if the player were to plant this for the current and
# next season (if applicable). Otherwise, there's no [2] index.
# The array is sorted by descending gold gained values.
return possible_paths | {
"repo_name": "Terpal47/stardew-valley-assistant",
"path": "GUI Edition/functions.py",
"copies": "1",
"size": "10625",
"license": "mit",
"hash": -5949941528973469000,
"line_mean": 40.6705882353,
"line_max": 80,
"alpha_frac": 0.5702588235,
"autogenerated": false,
"ratio": 3.9794007490636703,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005894816679574618,
"num_lines": 255
} |
import sublime, sublime_plugin
class FileOffsetCommand(sublime_plugin.TextCommand):
def run(self, edit):
positions = self._collect_positions()
offsets = self._calc_offsets(positions)
is_long = (len(offsets) > 1)
text = self._format_result(offsets, positions, is_long)
self._show_result(text, len(offsets))
def _collect_positions(self):
ret = []
for sel in self.view.sel():
(bpos, epos) = (sel.begin(), sel.end())
(brow, bcol) = self.view.rowcol(bpos)
if (sel.empty()):
(erow, ecol) = (brow, bcol)
else:
(erow, ecol) = self.view.rowcol(epos)
ret.append([(brow, bcol, bpos), (erow, ecol, epos)])
return ret
def _calc_offsets(self, positions):
def _offsets_by_file():
def _get_offset(l, c, state):
while (state["line_num"] <= l):
state["line_offset"] = state["file"].tell()
state["line"] = state["file"].readline()
state["line_num"] += 1
col_offset = len(state["line"][:c].encode(self.view.encoding()))
return state["line_offset"] + col_offset
ret = []
f = open(self.view.file_name(), "rt", encoding=self.view.encoding())
state = { "file": f, "line_num": 0, "line": "", }
for ((brow, bcol, bpos), (erow, ecol, epos)) in positions:
bo = _get_offset(brow, bcol, state)
if ((brow, bcol) == (erow, ecol)):
ret.append((bo, bo))
else:
eo = _get_offset(erow, ecol, state)
ret.append((bo, eo))
return ret
def _offsets_by_pos():
ret = []
for pos in positions:
((brow, bcol, bpos), (erow, ecol, epos)) = pos
ret.append((bpos, epos))
return ret
if (self.view.file_name()):
return _offsets_by_file()
else:
return _offsets_by_pos()
def _get_substring_at_pos(self, apos, l):
(brow, bcol, bsel) = apos[0]
pos = self.view.text_point(brow, bcol)
substr = self.view.substr(sublime.Region(pos, pos + l))
return substr.split("\n")[0]
def _format_result(self, offsets, positions, show_substr):
text = "File name: %s\n\n" % self.view.file_name()
for i in range(len(offsets)):
(bo, eo) = offsets[i]
if (bo != eo):
text += "0x%08X (%d) - 0x%08X (%d)" % (bo, bo, eo, eo)
if (show_substr):
text += "\t" + self._get_substring_at_pos(positions[i], 16)
else:
text += "0x%08X (%d)" % (bo, bo)
text += "\n"
return text
def _show_result(self, text, is_long):
#print(text)
if (is_long):
self.view.run_command("file_offset_show_result", {"text":text})
else:
sublime.message_dialog(text)
class FileOffsetShowResultCommand(sublime_plugin.TextCommand):
def run(self, edit, text):
n = sublime.active_window().new_file()
n.set_scratch(True)
n.insert(edit, 0, text)
| {
"repo_name": "AlexNk/Sublime-FileOffset",
"path": "FileOffset.py",
"copies": "1",
"size": "3335",
"license": "mit",
"hash": 2842456616398841300,
"line_mean": 37.3333333333,
"line_max": 80,
"alpha_frac": 0.4962518741,
"autogenerated": false,
"ratio": 3.477580813347237,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.945200875700759,
"avg_score": 0.004364786087929439,
"num_lines": 87
} |
__author__ = 'Alexander'
from datawarehouse.models import LutInterventionItnCoveragesAdmin1, LutInterventionIrsCoveragesAdmin1
from django.core.management.base import BaseCommand
import csv
class Command(BaseCommand):
"""
This class defines the ETL command. The ETL command is used
to ingest data given an input file and a mapping file. It is
important to note that location is hardcoded due to the complicated
nature of gis data. The rest of the class is dynamic and could be
easily reused in other projects.
"""
def handle(self, *args, **options):
"""This method is responsible for 'handling' the inputs.
This method is the heart of the management command.
It uses the given inputs, along with other methods, to ingest
data.
:param *args: Argument list.
:param **options: Command line options list.
"""
if len(args) == 0:
print "Please specify filename"
exit(0)
filename = args[0]
with open(filename, 'rb') as csvfile:
irsdata = csv.reader(csvfile, delimiter=",")
for row in irsdata:
print row[1], row[0]
#print ".%s." % float(row[4])
irs = LutInterventionIrsCoveragesAdmin1()
irs.gaul_code = int(row[0])
irs.country = row[2]
irs.province_name = row[1]
try:
irs.percent_of_the_population_protected_by_irs = float(row[3])
except ValueError:
irs.percent_of_the_population_protected_by_irs = None
try:
irs.year = int(row[4])
except ValueError:
print "ValueError: Year"
continue
try:
irs.source = row[5]
except ValueError:
print "ValueError: source"
continue
irs.save()
print options
pass | {
"repo_name": "tph-thuering/vnetsource",
"path": "datawarehouse/management/commands/upload_irs_data.py",
"copies": "2",
"size": "2037",
"license": "mpl-2.0",
"hash": 2761300517046714000,
"line_mean": 34.7543859649,
"line_max": 101,
"alpha_frac": 0.5552282769,
"autogenerated": false,
"ratio": 4.380645161290323,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5935873438190322,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alexander'
from datawarehouse.models import LutInterventionItnCoveragesAdmin1
from django.core.management.base import BaseCommand
import csv
class Command(BaseCommand):
"""
This class defines the ETL command. The ETL command is used
to ingest data given an input file and a mapping file. It is
important to note that location is hardcoded due to the complicated
nature of gis data. The rest of the class is dynamic and could be
easily reused in other projects.
"""
def handle(self, *args, **options):
"""This method is responsible for 'handling' the inputs.
This method is the heart of the management command.
It uses the given inputs, along with other methods, to ingest
data.
:param *args: Argument list.
:param **options: Command line options list.
"""
if len(args) == 0:
print "Please specify filename"
exit(0)
filename = args[0]
with open(filename, 'rb') as csvfile:
itndata = csv.reader(csvfile, delimiter=",")
for row in itndata:
print row[1], row[0]
#print ".%s." % float(row[4])
itn = LutInterventionItnCoveragesAdmin1()
itn.gaul_code = int(row[0])
itn.country = row[2]
itn.province_name = row[1]
try:
itn.percent_of_children_under_5_years_sleeping_under_a_bednet = float(row[4])
except ValueError:
itn.percent_of_children_under_5_years_sleeping_under_a_bednet = None
try:
itn.the_estimated_percent_households_with_itn= float(row[5])
except ValueError:
itn.the_estimated_percent_households_with_itn = None
try:
#print float(ronow[6])
itn.percent_itn_all = float(row[6])
#print "itn_all_good"
except ValueError:
#print "itn_all"
itn.percent_itn_all = None
try:
itn.year = int(row[7])
except ValueError:
continue
try:
itn.source = row[8]
except ValueError:
continue
itn.save()
print options
pass | {
"repo_name": "tph-thuering/vnetsource",
"path": "datawarehouse/management/commands/upload_itn_data.py",
"copies": "2",
"size": "2432",
"license": "mpl-2.0",
"hash": -3366788097872826000,
"line_mean": 35.3134328358,
"line_max": 97,
"alpha_frac": 0.5337171053,
"autogenerated": false,
"ratio": 4.222222222222222,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5755939327522223,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alexander Ponomarev'
from pyevolve import Util
from random import randint as rand_randint, gauss as rand_gauss
from pyevolve import Consts
def G1DListMutatorIntegerGaussian(genome, **args):
""" A gaussian mutator for G1DList of Integers
Accepts the *rangemin* and *rangemax* genome parameters, both optional. Also
accepts the parameter *gauss_mu* and the *gauss_sigma* which respectively
represents the mean and the std. dev. of the random distribution.
"""
if args["pmut"] <= 0.0: return 0
listSize = len(genome)
mutations = args["pmut"] * (listSize)
mu = genome.getParam("gauss_mu")
sigma = genome.getParam("gauss_sigma")
if mu is None:
mu = Consts.CDefG1DListMutIntMU
if sigma is None:
sigma = Consts.CDefG1DListMutIntSIGMA
if mutations < 1.0:
mutations = 0
for it in xrange(listSize):
if Util.randomFlipCoin(args["pmut"]):
final_value = genome[it] + int(rand_gauss(mu, sigma))
final_value = min(final_value, genome.getParam("rangemax")[it])
final_value = max(final_value, genome.getParam("rangemin")[it])
genome[it] = final_value
mutations += 1
else:
for it in xrange(int(round(mutations))):
which_gene = rand_randint(0, listSize - 1)
final_value = genome[which_gene] + int(rand_gauss(mu, sigma))
final_value = min(final_value, genome.getParam("rangemax")[which_gene])
final_value = max(final_value, genome.getParam("rangemin")[which_gene])
genome[which_gene] = final_value
return int(mutations) | {
"repo_name": "lamerman/gatool",
"path": "ev/Mutators.py",
"copies": "1",
"size": "1679",
"license": "bsd-3-clause",
"hash": -167220860815788700,
"line_mean": 32.6,
"line_max": 83,
"alpha_frac": 0.6301369863,
"autogenerated": false,
"ratio": 3.610752688172043,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4740889674472043,
"avg_score": null,
"num_lines": null
} |
from optparse import OptionParser
from importlib import import_module
from tests_common import setup_logger
from fnmatch import fnmatch
import traceback
import time
import sys
import os
logger = setup_logger("tests_runner")
def get_available_tests():
test_base = getattr(sys.modules["tests_common"], "TestBase")
logger.info("Looking for available tests")
files = os.listdir(os.path.join(os.getcwd(), "tests"))
for file in files:
if file.endswith(".py") and file != "__init__.py":
logger.debug("+ Found python file %s" % file)
name = ".".join(file.split(".")[:-1])
import_module("tests.%s" % name)
tests = test_base.__subclasses__()
logger.info("Loaded %d tests:" % len(tests))
for test in tests:
logger.info("+ %s" % test.__name__)
return tests
def filter_tests(tests, pattern):
filtered = list()
if not(pattern):
logger.info("No filtering pattern was supplied")
return filtered
patterns = pattern.split(",")
for t in tests:
for p in patterns:
if fnmatch(t.__name__, p.strip()):
filtered.append(t)
continue
logger.info("After filtering tests by given pattern, found %d tests to execute" % len(filtered))
for t in filtered:
logger.info("+ %s" % t.__name__)
return filtered
def run_single_test(test):
ok = True
try:
logger.info("Starting test: %s" % test.__name__)
test.setup()
test.run_test()
logger.info("Test execution completed successfully!")
except Exception as e:
logger.error("Test execution failed: %s" % e.message)
logger.error("-" * 60)
trace = traceback.format_exc().splitlines()
for line in trace:
logger.error(line)
logger.error("-" * 60)
ok = False
finally:
test.tear_down()
return ok
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-t", "--tests", dest="tests", help="Comma separated list of tests")
parser.add_option("-c", "--continue-after-failure", dest="continue_after_failure", action="store_true", help="Continue to the next test in case of test failure")
(options, args) = parser.parse_args()
# Load available tests from "tests/" directory
tests = get_available_tests()
if len(tests) == 0:
logger.error("No tests were found, aborting execution!")
sys.exit(1)
# Filter the tests we want to run
to_run = filter_tests(tests, options.tests)
# Run filtered tests
passed_tests = 0
failed_tests = 0
total_duration = 0
for test in to_run:
# Run test and measure execution time
start_time = time.time()
ok = run_single_test(test)
test_duration = time.time() - start_time
logger.info("Test execution took %.2f seconds" % test_duration)
total_duration += test_duration
if ok:
passed_tests += 1
else:
failed_tests += 1
if not(options.continue_after_failure):
logger.error("Discarding other tests due to failure")
break
logger.info("=" * 60)
logger.info("Ran %d tests (%d passed, %d failed) in %.2f seconds" % (passed_tests + failed_tests, passed_tests, failed_tests, total_duration))
sys.exit(1 if (failed_tests > 0) else 0)
| {
"repo_name": "sirotin/at-a-glance-of-a-key",
"path": "simple-python-testing-framework/tests_runner.py",
"copies": "1",
"size": "3661",
"license": "mit",
"hash": -1280112956590273500,
"line_mean": 30.8347826087,
"line_max": 165,
"alpha_frac": 0.6121278339,
"autogenerated": false,
"ratio": 3.8946808510638298,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5006808684963829,
"avg_score": null,
"num_lines": null
} |
from selenium import webdriver
import base64
def load_url_and_read_values(url, driver):
# Load wanted page
driver.get(url)
# Show cookie value as it shown in the webpage
webpage_value = driver.find_element_by_id("current_cookie").text
print("Current value (from webpage text): %s" % webpage_value)
# Get actual cookie and show it's value
cookie = driver.get_cookie("demo")
if cookie:
value = cookie["value"]
print("Current cookie value is: %s (decoded: %s)" % (value, base64.b64decode(value)))
else:
print("Cookie does not exist!")
if __name__ == "__main__":
# Start chrome web browser
url = "http://localhost:55438/Default.aspx"
driver = webdriver.Chrome()
# Check current page values
print("==== Demo 1: Read initial web-site values ====")
load_url_and_read_values(url, driver)
# Update the cookie using the web-page form
print("==== Demo 2: Automate web-page form filling ====")
driver.find_element_by_id("cookie_value").send_keys("form submit")
driver.find_element_by_id("form_submit").click()
# Check current page values
load_url_and_read_values(url, driver)
# Update the cookie
print("==== Demo 3: Update cookie ====")
cookie = driver.get_cookie("demo")
if cookie:
cookie["value"] = base64.b64encode("set cookie")
else:
cookie = { "domain" : "localhost",
"expiry" : None,
"httpOnly" : False,
"name" : "demo",
"path" : "/",
"secure" : False,
"value" : base64.b64encode("set cookie") }
driver.delete_cookie("demo")
driver.add_cookie(cookie)
# Check current page values
load_url_and_read_values(url, driver)
# Close the web browser
driver.quit()
| {
"repo_name": "sirotin/at-a-glance-of-a-key",
"path": "using-selenium-with-python-example/demo.py",
"copies": "1",
"size": "2102",
"license": "mit",
"hash": 1544020560031860700,
"line_mean": 32.3650793651,
"line_max": 93,
"alpha_frac": 0.6151284491,
"autogenerated": false,
"ratio": 3.7137809187279154,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4828909367827915,
"avg_score": null,
"num_lines": null
} |
import logging
import paramiko
import time
logger = logging.getLogger("builder")
class SSHWrapper(object):
def __init__(self):
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.connected = False
def __del__(self):
if self.connected:
self.disconnect()
def connect(self, hostname, username, pem, timeout=60):
key = paramiko.RSAKey.from_private_key_file(pem)
try:
logger.debug("Connecting to %s" % hostname)
self.ssh.connect(hostname, username=username, pkey=key, timeout=timeout)
logger.debug("Successfully connected to %s" % hostname)
self.hostname = hostname
self.connected = True
return True
except Exception as e:
logger.warn("Failed connecting to %s - retry limit exceeded" % hostname)
self.hostname = ""
self.connected = False
return False
def disconnect(self):
if self.connected:
logger.debug("Disconnecting from %s" % self.hostname)
self.ssh.close()
self.hostname = ""
self.connected = False
def execute(self, cmd, show_output=True, throw_on_error=True):
if not(self.connected):
logger.error("Cannot execute ssh command - not connected")
return 255
logger.info("Running on remote host %s: '%s'" % (self.hostname, cmd))
_, stdout, stderr = self.ssh.exec_command(cmd)
rc = stdout.channel.recv_exit_status()
logger.info("Command returned rc=%d" % rc)
if show_output:
for line in stdout:
logger.debug("STDOUT - %s" % line)
for line in stderr:
logger.debug("STDERR - %s" % line)
if throw_on_error and rc != 0:
raise Exception("Command failed, rc=%d" % rc)
return rc
| {
"repo_name": "sirotin/at-a-glance-of-a-key",
"path": "compile-using-aws-spot-instance/ssh_helper.py",
"copies": "1",
"size": "1898",
"license": "mit",
"hash": 7030119807167040000,
"line_mean": 26.5072463768,
"line_max": 91,
"alpha_frac": 0.6975763962,
"autogenerated": false,
"ratio": 3.25,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44475763962000003,
"avg_score": null,
"num_lines": null
} |
import pickledb
import logging
import base64
import json
from functools import wraps
from flask import Flask, request, redirect, abort
app = Flask(__name__)
logger = logging.getLogger()
hdlr = logging.FileHandler("redirections.log")
formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.DEBUG)
class RedirectionsStore(object):
DATABASE_PATH = "redirections.db"
def __init__(self):
# True asks the database to dump every change directly to the underlying file
self.db = pickledb.load(RedirectionsStore.DATABASE_PATH, True)
def add_record(self, key, value):
if self.db.get(key) != None:
return False
self.db.set(key, value)
return True
def get_record(self, key):
return self.db.get(key)
def delete_record(self, key):
self.db.rem(key)
def get_all_records(self):
result = dict()
for key in self.db.getall():
result[key] = self.db.get(key)
return result
class ServerImplementation(object):
@staticmethod
def add_new_redirection_request():
# Try parsing base64 key and value
try:
key = base64.b64decode(request.json["key"])
value = base64.b64decode(request.json["value"])
except Exception as e:
logger.error("Failed parsing data: %s" % e.message)
abort(400) # Bad request
logger.info("Adding redirection with key=%s, value=%s" % (key, value))
db = RedirectionsStore()
added = db.add_record(key, value)
if not(added):
logger.error("Key already exists - discarding request (key=%s)" % key)
abort(403) # Forbidden
return ("", 201) # Created
@staticmethod
def delete_redirection_request():
# Try parsing base64 key
try:
key = base64.b64decode(request.json["key"])
except Exception as e:
logger.error("Failed parsing data: %s" % e.message)
abort(400) # Bad request
logger.info("Deleting redirection with key=%s" % key)
db = RedirectionsStore()
db.delete_record(key)
return ("", 204)
def secure_api(f):
@wraps(f)
def implementation(*args, **kwargs):
auth = request.authorization
if not(auth):
logger.error("No authorization supplied, discarding request!")
abort(401) # Unauthorized
if (auth.username != "admin" or auth.password != "password"):
logger.error("Bad user name or password (username=%s, password=%s)" % (auth.username, auth.password))
abort(401) # Unauthorized
return f(*args, **kwargs)
return implementation
@app.route("/mgmt", methods=[ "POST", "DELETE" ])
@secure_api
def api_mgmt():
# Make sure we receive arguments with json format
if not(request.json):
logger.warn("Got mgmt API request not in json format - discarding!")
abort(415) # Unsupported media type
if request.method == "POST":
logger.debug("Handling mgmt POST request")
return ServerImplementation.add_new_redirection_request()
elif request.method == "DELETE":
logger.debug("Handling mgmt DELETE request")
return ServerImplementation.delete_redirection_request()
logger.warn("Got mgmt request that cannot be handled")
abort(400) # Bad request
@app.route("/redirections", methods=[ "GET" ])
@secure_api
def api_redirections():
logger.info("Got a request to list all redirections from database")
db = RedirectionsStore()
records = db.get_all_records()
result = json.dumps(records)
return (result, 200)
@app.route("/redirect/<path:key>")
def redirect_request(key):
logger.info("Got a redirection request with key=%s" % key)
db = RedirectionsStore()
result = db.get_record(key)
if result == None:
logger.error("Key %s has no redirection defined" % key)
abort(400) # Bad request
logger.debug("Redirecting to %s" % result)
return redirect(result, 302)
if __name__ == "__main__":
app.run()
| {
"repo_name": "sirotin/at-a-glance-of-a-key",
"path": "simple-url-redirection-using-flask/app.py",
"copies": "1",
"size": "4432",
"license": "mit",
"hash": -1904149332818174700,
"line_mean": 30.2112676056,
"line_max": 113,
"alpha_frac": 0.6383122744,
"autogenerated": false,
"ratio": 3.8809106830122593,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5019222957412259,
"avg_score": null,
"num_lines": null
} |
import time
import logging
import boto.ec2
from aws_helper import AwsHelper
from ssh_helper import SSHWrapper
logger = logging.getLogger("builder")
class AwsWrapper:
def __init__(self, settings):
self.settings = settings
self.ec2 = boto.ec2.connect_to_region(self.settings.Region,
aws_access_key_id=self.settings.Id,
aws_secret_access_key=self.settings.Secret)
self.helper = AwsHelper(self.ec2, settings)
self.instance = None
self.ssh = SSHWrapper()
def createInstance(self):
self.instance = self.helper.requestSpotInstance()
if not(self.instance):
raise Exception("Failed creating spot instance")
# Note: This is optional, the instance is ready for ssh almost 2 minutes before AWS decides it's healthy
result = self.helper.waitForHealthchecks(self.instance, timeout=300)
if not(result):
logger.warn("Failed waiting for instance healthchecks to pass")
def prepareInstance(self, ip_address=None):
if ip_address == None:
ip_address = self.instance.private_ip_address
result = self.ssh.connect(hostname=ip_address,
username=self.settings.UserName,
pem="%s.pem" % self.settings.KeyPair,
timeout=120)
if not(result):
raise Exception("Failed connecting to ssh port for host %s" % ip_address)
# Some cool-down time
time.sleep(2)
# Update host name and mount data volume
self.ssh.execute("sudo sed -i \"s|127.0.0.1.*|127.0.0.1 localhost `hostname`|g\" /etc/hosts")
self.ssh.execute("sudo mkdir -p /mnt/data")
self.ssh.execute("sudo mount /dev/xvdf /mnt/data")
| {
"repo_name": "sirotin/at-a-glance-of-a-key",
"path": "compile-using-aws-spot-instance/aws_wrapper.py",
"copies": "1",
"size": "1885",
"license": "mit",
"hash": -6019239523778815000,
"line_mean": 34.5660377358,
"line_max": 106,
"alpha_frac": 0.6970822281,
"autogenerated": false,
"ratio": 3.3540925266903914,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45511747547903914,
"avg_score": null,
"num_lines": null
} |
import time
import logging
import boto.ec2
logger = logging.getLogger("builder")
class AwsHelper:
def __init__(self, ec2, settings):
self.ec2 = ec2
self.settings = settings
def checkSpotRequestState(self, sir_id):
logger.debug("Checking spot request: %s" % sir_id)
active_requests = self.ec2.get_all_spot_instance_requests()
for sir in active_requests:
if (sir_id == sir.id):
logger.debug("Spot request %s is in state %s" % (sir_id, sir.state))
return sir
logger.warn("Cannot find an active request for %s" % sir_id)
return None
def getInstanceIdFromSpotRequest(self, sir_id, timeout):
instance_id = None
retries = timeout
while (retries > 0):
sir = self.checkSpotRequestState(sir_id)
if (sir != None):
if (sir.state.lower() == "active"):
instance_id = sir.instance_id
break
retries -= 2
time.sleep(2)
return instance_id
def prepareInstance(self, instance_id):
instance = self.getInstanceObject(instance_id)
logger.info("Waiting for instance %s to be running" % instance_id)
is_running = self.waitForRunningState(instance, timeout=120)
if not(is_running):
logger.warn("Instance %s is not running, terminating the instance" % instance_id)
self.terminateInstance(instance_id)
return None
logger.info("Tagging instance %s" % instance_id)
self.tagInstance(instance, self.settings.InstanceName)
logger.info("Attaching volume %s" % self.settings.DataVolume)
attached = self.ec2.attach_volume(volume_id=self.settings.DataVolume, instance_id=instance_id, device="/dev/sdf")
if not(attached):
logger.warn("Failed attaching volume %s to instance %s" % (self.settings.DataVolume, instance_id))
self.terminateInstance(instance_id)
return None
logger.info("Instance %s was successfully created (ip: %s)" % (instance_id, instance.private_ip_address))
return instance
def cancelSpotRequest(self, sir_id):
try:
logger.warn("Canceling spot request %s" % sir_id)
self.ec2.cancel_spot_instance_requests([sir_id])
except Exception as e:
logger.error("Failed canceling spot request: %d - %s (%s)" % (e.status, e.error_code, e.reason))
return None
def getInstanceObject(self, instance_id):
try:
result = self.ec2.get_all_instances([instance_id])
instance = result[0].instances[0]
return instance
except Exception as e:
logger.error("Failed getting instance: %d - %s (%s)" % (e.status, e.error_code, e.reason))
return None
def waitForRunningState(self, instance, timeout):
if (instance == None):
return False
status = ""
retries = timeout
while (retries > 0):
status = instance.update()
if (status.lower() == "running"):
logger.info("Instance %s is in running state" % instance.id)
return True
retries -= 5
time.sleep(5)
logger.warn("Failed waiting for instance %s to be in running state (state: %s)" % (instance.id, status))
return False
def tagInstance(self, instance, name):
if (instance == None):
return False
status = instance.update()
if (status.lower() != "running"):
logger.warn("Cannot mark instance %s in status: %s" % (instance.id, status))
return False
instance.add_tag("Name", name)
instance.add_tag("Owner", self.settings.InstanceOwner)
return True
def waitForHealthchecks(self, instance, timeout):
if (instance == None):
return False
logger.debug("Waiting until instance %s will become healthy (timeout: %d seconds)" % (instance.id, timeout))
retries = timeout
while (retries > 0):
health = self.ec2.get_all_instance_status([instance.id])
instance_status = health[0].instance_status.status.lower()
system_status = health[0].system_status.status.lower()
if (instance_status == "ok") and (system_status == "ok"):
logger.info("Instance %s health is ok" % instance.id)
return True
retries -= 5
time.sleep(5)
logger.warn("Instance %s health is not ok after the given timeout (instance: %s, system: %s)" % (instance.id, instance_status, system_status))
return False
def requestSpotInstance(self):
eni = boto.ec2.networkinterface.NetworkInterfaceSpecification(network_interface_id=self.settings.EniId,
device_index=0,
delete_on_termination=False)
network_interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(eni)
logger.info("Requesting spot instance of type %s" % self.settings.InstanceType)
req = self.ec2.request_spot_instances(price=self.settings.Price,
image_id=self.settings.AmiId,
instance_type=self.settings.InstanceType,
availability_zone_group=self.settings.Region,
placement=self.settings.AvailabilityZone,
key_name=self.settings.KeyPair,
network_interfaces=network_interfaces)
sir_id = req[0].id
instance_id = self.getInstanceIdFromSpotRequest(sir_id, timeout=120)
if (instance_id == None):
logger.warn("Spot request %s completed with failure, canceling the request" % sir_id)
self.helper.cancelSpotRequest(sir_id)
return
logger.info("Spot request completed, instance id: %s" % instance_id)
instance = self.prepareInstance(instance_id)
return instance
| {
"repo_name": "sirotin/at-a-glance-of-a-key",
"path": "compile-using-aws-spot-instance/aws_helper.py",
"copies": "1",
"size": "5550",
"license": "mit",
"hash": -2697873670567798300,
"line_mean": 33.0490797546,
"line_max": 144,
"alpha_frac": 0.6859459459,
"autogenerated": false,
"ratio": 3.2840236686390534,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44699696145390533,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alexander "Yukikaze" Putin'
__email__ = 'yukikaze (at) modxvm.com'
import struct
class Replay(object):
def __init__(self, raw_data):
self._raw_data = raw_data
self._blocks = []
self._magic, block_count = struct.unpack('II', raw_data[:8]) # unpack 2 uint
self._last_block_end = self.__read_blocks(block_count)
@classmethod
def from_file(cls, path):
stream = open(path, 'rb')
data = stream.read()
stream.close()
return cls(data)
@property
def block_count(self):
return len(self._blocks)
@property
def magic(self):
return self._magic
@property
def blocks(self):
return self._blocks[:]
@property
def match_start(self):
return self._blocks[0]
@property
def battle_result(self):
return self._blocks[1]
def add_block(self, data):
self._blocks.append(data)
def remove_block(self, i):
self._blocks.remove(i)
def to_string(self):
import json
result = struct.pack('II', self._magic, len(self._blocks))
for block in self._blocks:
json_string = json.dumps(block)
length = len(json_string)
result += struct.pack('I', length)
result += json_string
result += self._raw_data[self._last_block_end:]
return result
def to_file(self, path):
stream = open(path, 'wb')
data = self.to_string()
stream.write(data)
stream.close()
def save(self, path):
self.to_file(path)
def __read_blocks(self, count):
import json
pos = 8
for i in xrange(0, count):
size = struct.unpack('I', self._raw_data[pos: pos + 4])[0]
start = pos + 4
end = pos + 4 + size
json_string = self._raw_data[start: end]
json_object = json.loads(json_string)
self._blocks += json_object,
pos = end
return pos
def load(file_name):
return Replay.from_file(file_name)
| {
"repo_name": "AlexYukikaze/wot-replay",
"path": "replay/__init__.py",
"copies": "1",
"size": "2081",
"license": "mit",
"hash": -6521766839666575000,
"line_mean": 24.3780487805,
"line_max": 85,
"alpha_frac": 0.5482940894,
"autogenerated": false,
"ratio": 3.6444833625218913,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4692777451921891,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alexandre Cloquet'
from django.contrib.auth.models import User, Group
from django.db import models
from django.utils.translation import ugettext as _
from uuid import uuid4
from Portal.models import Category
class GuildSettings(models.Model):
'''
Handle all settings about guild and SuperPortal
'''
guild_name = models.CharField(_('Guild name'), max_length=120)
guild_motto = models.CharField(_('Guild motto'), max_length=256)
guild_chief = models.ForeignKey(User)
short_guild_description = models.CharField(_("Short description about your guild"), max_length=120, default="")
guild_description = models.TextField(_("Description about your guild"), default="")
tag = models.CharField(_('Tag'), max_length=10, default="")
forum_active = models.BooleanField(_('Forum active'), default=True)
slider_active = models.BooleanField(_('Slider active'), default=False)
youtube_channel = models.URLField(_('Youtube channel'), default="", blank=True, null=True)
twitch_channel = models.CharField(_('Twitch channel'), default="", blank=True, null=True, max_length=50)
facebook_page = models.URLField(_('Facebook Page'), default="", blank=True, null=True)
twitter_page = models.URLField(_('Twitter page'), default="", blank=True, null=True)
category_slider = models.ForeignKey(Category, blank=True, null=True)
group_can_vote = models.ManyToManyField(Group, blank=True, related_name='group_can_vote')
group_can_write_news = models.ManyToManyField(Group, blank=True, related_name='group_can_write_news')
group_can_write_wiki = models.ManyToManyField(Group, blank=True, related_name='group_can_write_wiki')
icon_guild = models.ImageField(upload_to='superportal/setting', blank=True, null=True)
class Meta:
verbose_name = _('Guild Settings')
verbose_name_plural = _('Guild Settings')
def __str__(self):
return u"[%s][%s] %s - %s" % (self.tag, self.guild_name, self.guild_motto, self.guild_chief.username) | {
"repo_name": "elryndir/GuildPortal",
"path": "SuperPortal/models/settings.py",
"copies": "1",
"size": "2020",
"license": "mit",
"hash": 4257580109426372000,
"line_mean": 50.8205128205,
"line_max": 115,
"alpha_frac": 0.702970297,
"autogenerated": false,
"ratio": 3.733826247689464,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9921315291224984,
"avg_score": 0.0030962506928959754,
"num_lines": 39
} |
__author__ = 'Alexandre Cloquet'
from django.utils.translation import ugettext as _
from django.contrib.auth.models import User
from django.db import models
class Game(models.Model):
name = models.CharField(max_length=128)
image = models.ImageField(upload_to='game/', blank=True)
url_api = models.URLField(blank=True, null=True)
image_thumbnail = models.ImageField(upload_to='game/thumbnail/', blank=True)
class Meta:
verbose_name = _('Game')
verbose_name_plural = _('Games')
def __str__(self):
return self.name
def get_out_raid(self):
list_out_raid = []
for raid in self.raid_set.all():
for out_raid in raid.get_out_raid():
list_out_raid.append(out_raid)
return list_out_raid
def percent_of_player(self):
all_user_for_game = len(self.userprofile_set.all())
all_user = len(User.objects.all()) - 1
return float((all_user_for_game * 100) / all_user)
class Class(models.Model):
name = models.CharField(max_length=64)
game = models.ForeignKey(Game)
class Meta:
verbose_name = _('Class')
verbose_name_plural = _('Class')
def __str__(self):
return self.name
class FieldValue(models.Model):
field_value = models.CharField(_('value'), max_length=64)
def __str__(self):
return self.field_value
class Meta:
verbose_name = _('Table entry')
verbose_name_plural = _('Tables entries')
class TypeValue(models.Model):
field_value = models.CharField(_('value'), max_length=64)
game = models.ForeignKey(Game, null=True)
def __str__(self):
return self.field_value
class Meta:
verbose_name = _('Table name')
verbose_name_plural = _('Tables names')
class CharacterAttribute(models.Model):
attribute_name = models.ForeignKey(TypeValue)
attribute_value = models.ForeignKey(FieldValue)
for_game = models.ForeignKey(Game, null=True)
def __str__(self):
return '[' + self.for_game.name + '] ' + self.attribute_name.field_value + ' - ' + self.attribute_value.field_value
class Meta:
verbose_name = _('Table relation')
verbose_name_plural = _('Tables relations') | {
"repo_name": "elryndir/GuildPortal",
"path": "Portal/models/enrollment.py",
"copies": "1",
"size": "2250",
"license": "mit",
"hash": -5742032601725762000,
"line_mean": 27.1375,
"line_max": 124,
"alpha_frac": 0.6262222222,
"autogenerated": false,
"ratio": 3.6885245901639343,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9801558487834725,
"avg_score": 0.0026376649058416925,
"num_lines": 80
} |
from math import sqrt
import numpy as np
from scipy import linalg
from .mxne_debiasing import compute_bias
from ..utils import logger, verbose, sum_squared, warn, dgemm
from ..time_frequency._stft import stft_norm1, stft_norm2, stft, istft
def groups_norm2(A, n_orient):
"""Compute squared L2 norms of groups inplace."""
n_positions = A.shape[0] // n_orient
return np.sum(np.power(A, 2, A).reshape(n_positions, -1), axis=1)
def norm_l2inf(A, n_orient, copy=True):
"""L2-inf norm."""
if A.size == 0:
return 0.0
if copy:
A = A.copy()
return sqrt(np.max(groups_norm2(A, n_orient)))
def norm_l21(A, n_orient, copy=True):
"""L21 norm."""
if A.size == 0:
return 0.0
if copy:
A = A.copy()
return np.sum(np.sqrt(groups_norm2(A, n_orient)))
def prox_l21(Y, alpha, n_orient, shape=None, is_stft=False):
"""Proximity operator for l21 norm.
L2 over columns and L1 over rows => groups contain n_orient rows.
It can eventually take into account the negative frequencies
when a complex value is passed and is_stft=True.
Parameters
----------
Y : array, shape (n_sources, n_coefs)
The input data.
alpha : float
The regularization parameter.
n_orient : int
Number of dipoles per locations (typically 1 or 3).
shape : None | tuple
Shape of TF coefficients matrix.
is_stft : bool
If True, Y contains TF coefficients.
Returns
-------
Y : array, shape (n_sources, n_coefs)
The output data.
active_set : array of bool, shape (n_sources, )
Mask of active sources
Examples
--------
>>> Y = np.tile(np.array([0, 4, 3, 0, 0], dtype=np.float64), (2, 1))
>>> Y = np.r_[Y, np.zeros_like(Y)]
>>> print(Y) # doctest:+SKIP
[[ 0. 4. 3. 0. 0.]
[ 0. 4. 3. 0. 0.]
[ 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0.]]
>>> Yp, active_set = prox_l21(Y, 2, 2)
>>> print(Yp) # doctest:+SKIP
[[0. 2.86862915 2.15147186 0. 0. ]
[0. 2.86862915 2.15147186 0. 0. ]]
>>> print(active_set)
[ True True False False]
"""
if len(Y) == 0:
return np.zeros_like(Y), np.zeros((0,), dtype=bool)
if shape is not None:
shape_init = Y.shape
Y = Y.reshape(*shape)
n_positions = Y.shape[0] // n_orient
if is_stft:
rows_norm = np.sqrt(stft_norm2(Y).reshape(n_positions, -1).sum(axis=1))
else:
rows_norm = np.sqrt((Y * Y.conj()).real.reshape(n_positions,
-1).sum(axis=1))
# Ensure shrink is >= 0 while avoiding any division by zero
shrink = np.maximum(1.0 - alpha / np.maximum(rows_norm, alpha), 0.0)
active_set = shrink > 0.0
if n_orient > 1:
active_set = np.tile(active_set[:, None], [1, n_orient]).ravel()
shrink = np.tile(shrink[:, None], [1, n_orient]).ravel()
Y = Y[active_set]
if shape is None:
Y *= shrink[active_set][:, np.newaxis]
else:
Y *= shrink[active_set][:, np.newaxis, np.newaxis]
Y = Y.reshape(-1, *shape_init[1:])
return Y, active_set
def prox_l1(Y, alpha, n_orient):
"""Proximity operator for l1 norm with multiple orientation support.
Please note that this function computes a soft-thresholding if
n_orient == 1 and a block soft-thresholding (L2 over orientation and
L1 over position (space + time)) if n_orient == 3. See also [1]_.
Parameters
----------
Y : array, shape (n_sources, n_coefs)
The input data.
alpha : float
The regularization parameter.
n_orient : int
Number of dipoles per locations (typically 1 or 3).
Returns
-------
Y : array, shape (n_sources, n_coefs)
The output data.
active_set : array of bool, shape (n_sources, )
Mask of active sources.
References
----------
.. [1] A. Gramfort, D. Strohmeier, J. Haueisen, M. Hämäläinen, M. Kowalski
"Time-Frequency Mixed-Norm Estimates: Sparse M/EEG imaging with
non-stationary source activations",
Neuroimage, Volume 70, pp. 410-422, 15 April 2013.
DOI: 10.1016/j.neuroimage.2012.12.051
Examples
--------
>>> Y = np.tile(np.array([1, 2, 3, 2, 0], dtype=np.float64), (2, 1))
>>> Y = np.r_[Y, np.zeros_like(Y)]
>>> print(Y) # doctest:+SKIP
[[ 1. 2. 3. 2. 0.]
[ 1. 2. 3. 2. 0.]
[ 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0.]]
>>> Yp, active_set = prox_l1(Y, 2, 2)
>>> print(Yp) # doctest:+SKIP
[[0. 0.58578644 1.58578644 0.58578644 0. ]
[0. 0.58578644 1.58578644 0.58578644 0. ]]
>>> print(active_set)
[ True True False False]
"""
n_positions = Y.shape[0] // n_orient
norms = np.sqrt((Y * Y.conj()).real.T.reshape(-1, n_orient).sum(axis=1))
# Ensure shrink is >= 0 while avoiding any division by zero
shrink = np.maximum(1.0 - alpha / np.maximum(norms, alpha), 0.0)
shrink = shrink.reshape(-1, n_positions).T
active_set = np.any(shrink > 0.0, axis=1)
shrink = shrink[active_set]
if n_orient > 1:
active_set = np.tile(active_set[:, None], [1, n_orient]).ravel()
Y = Y[active_set]
if len(Y) > 0:
for o in range(n_orient):
Y[o::n_orient] *= shrink
return Y, active_set
def dgap_l21(M, G, X, active_set, alpha, n_orient):
"""Duality gap for the mixed norm inverse problem.
Parameters
----------
M : array, shape (n_sensors, n_times)
The data.
G : array, shape (n_sensors, n_active)
The gain matrix a.k.a. lead field.
X : array, shape (n_active, n_times)
Sources.
active_set : array of bool, shape (n_sources, )
Mask of active sources.
alpha : float
The regularization parameter.
n_orient : int
Number of dipoles per locations (typically 1 or 3).
Returns
-------
gap : float
Dual gap.
p_obj : float
Primal objective.
d_obj : float
Dual objective. gap = p_obj - d_obj.
R : array, shape (n_sensors, n_times)
Current residual (M - G * X).
References
----------
.. [1] A. Gramfort, M. Kowalski, M. Hämäläinen,
"Mixed-norm estimates for the M/EEG inverse problem using accelerated
gradient methods", Physics in Medicine and Biology, 2012.
https://doi.org/10.1088/0031-9155/57/7/1937
"""
GX = np.dot(G[:, active_set], X)
R = M - GX
penalty = norm_l21(X, n_orient, copy=True)
nR2 = sum_squared(R)
p_obj = 0.5 * nR2 + alpha * penalty
dual_norm = norm_l2inf(np.dot(G.T, R), n_orient, copy=False)
scaling = alpha / dual_norm
scaling = min(scaling, 1.0)
d_obj = (scaling - 0.5 * (scaling ** 2)) * nR2 + scaling * np.sum(R * GX)
gap = p_obj - d_obj
return gap, p_obj, d_obj, R
@verbose
def _mixed_norm_solver_prox(M, G, alpha, lipschitz_constant, maxit=200,
tol=1e-8, verbose=None, init=None, n_orient=1,
dgap_freq=10):
"""Solve L21 inverse problem with proximal iterations and FISTA."""
n_sensors, n_times = M.shape
_, n_sources = G.shape
if n_sources < n_sensors:
gram = np.dot(G.T, G)
GTM = np.dot(G.T, M)
else:
gram = None
if init is None:
X = 0.0
R = M.copy()
if gram is not None:
R = np.dot(G.T, R)
else:
X = init
if gram is None:
R = M - np.dot(G, X)
else:
R = GTM - np.dot(gram, X)
t = 1.0
Y = np.zeros((n_sources, n_times)) # FISTA aux variable
E = [] # track primal objective function
highest_d_obj = - np.inf
active_set = np.ones(n_sources, dtype=bool) # start with full AS
for i in range(maxit):
X0, active_set_0 = X, active_set # store previous values
if gram is None:
Y += np.dot(G.T, R) / lipschitz_constant # ISTA step
else:
Y += R / lipschitz_constant # ISTA step
X, active_set = prox_l21(Y, alpha / lipschitz_constant, n_orient)
t0 = t
t = 0.5 * (1.0 + sqrt(1.0 + 4.0 * t ** 2))
Y.fill(0.0)
dt = ((t0 - 1.0) / t)
Y[active_set] = (1.0 + dt) * X
Y[active_set_0] -= dt * X0
Y_as = active_set_0 | active_set
if gram is None:
R = M - np.dot(G[:, Y_as], Y[Y_as])
else:
R = GTM - np.dot(gram[:, Y_as], Y[Y_as])
if (i + 1) % dgap_freq == 0:
_, p_obj, d_obj, _ = dgap_l21(M, G, X, active_set, alpha,
n_orient)
highest_d_obj = max(d_obj, highest_d_obj)
gap = p_obj - highest_d_obj
E.append(p_obj)
logger.debug("p_obj : %s -- gap : %s" % (p_obj, gap))
if gap < tol:
logger.debug('Convergence reached ! (gap: %s < %s)'
% (gap, tol))
break
return X, active_set, E
@verbose
def _mixed_norm_solver_cd(M, G, alpha, lipschitz_constant, maxit=10000,
tol=1e-8, verbose=None, init=None, n_orient=1,
dgap_freq=10):
"""Solve L21 inverse problem with coordinate descent."""
from sklearn.linear_model import MultiTaskLasso
assert M.ndim == G.ndim and M.shape[0] == G.shape[0]
clf = MultiTaskLasso(alpha=alpha / len(M), tol=tol / sum_squared(M),
normalize=False, fit_intercept=False, max_iter=maxit,
warm_start=True)
if init is not None:
clf.coef_ = init.T
else:
clf.coef_ = np.zeros((G.shape[1], M.shape[1])).T
clf.fit(G, M)
X = clf.coef_.T
active_set = np.any(X, axis=1)
X = X[active_set]
gap, p_obj, d_obj, _ = dgap_l21(M, G, X, active_set, alpha, n_orient)
return X, active_set, p_obj
@verbose
def _mixed_norm_solver_bcd(M, G, alpha, lipschitz_constant, maxit=200,
tol=1e-8, verbose=None, init=None, n_orient=1,
dgap_freq=10):
"""Solve L21 inverse problem with block coordinate descent."""
n_sensors, n_times = M.shape
n_sensors, n_sources = G.shape
n_positions = n_sources // n_orient
if init is None:
X = np.zeros((n_sources, n_times))
R = M.copy()
else:
X = init
R = M - np.dot(G, X)
E = [] # track primal objective function
highest_d_obj = - np.inf
active_set = np.zeros(n_sources, dtype=bool) # start with full AS
alpha_lc = alpha / lipschitz_constant
# First make G fortran for faster access to blocks of columns
G = np.asfortranarray(G)
# Ensure these are correct for dgemm
assert R.dtype == np.float64
assert G.dtype == np.float64
one_ovr_lc = 1. / lipschitz_constant
# assert that all the multiplied matrices are fortran contiguous
assert X.T.flags.f_contiguous
assert R.T.flags.f_contiguous
assert G.flags.f_contiguous
# storing list of contiguous arrays
list_G_j_c = []
for j in range(n_positions):
idx = slice(j * n_orient, (j + 1) * n_orient)
list_G_j_c.append(np.ascontiguousarray(G[:, idx]))
for i in range(maxit):
_bcd(G, X, R, active_set, one_ovr_lc, n_orient, n_positions,
alpha_lc, list_G_j_c)
if (i + 1) % dgap_freq == 0:
_, p_obj, d_obj, _ = dgap_l21(M, G, X[active_set], active_set,
alpha, n_orient)
highest_d_obj = max(d_obj, highest_d_obj)
gap = p_obj - highest_d_obj
E.append(p_obj)
logger.debug("Iteration %d :: p_obj %f :: dgap %f :: n_active %d" %
(i + 1, p_obj, gap, np.sum(active_set) / n_orient))
if gap < tol:
logger.debug('Convergence reached ! (gap: %s < %s)'
% (gap, tol))
break
X = X[active_set]
return X, active_set, E
def _bcd(G, X, R, active_set, one_ovr_lc, n_orient, n_positions,
alpha_lc, list_G_j_c):
"""Implement one full pass of BCD.
BCD stands for Block Coordinate Descent.
This function make use of scipy.linalg.get_blas_funcs to speed reasons.
Parameters
----------
G : array, shape (n_sensors, n_active)
The gain matrix a.k.a. lead field.
X : array, shape (n_sources, n_times)
Sources, modified in place.
R : array, shape (n_sensors, n_times)
The residuals: R = M - G @ X, modified in place.
active_set : array of bool, shape (n_sources, )
Mask of active sources, modified in place.
one_ovr_lc : array, shape (n_positions, )
One over the lipschitz constants.
n_orient : int
Number of dipoles per positions (typically 1 or 3).
n_positions : int
Number of source positions.
alpha_lc: array, shape (n_positions, )
alpha * (Lipschitz constants).
"""
X_j_new = np.zeros_like(X[0:n_orient, :], order='C')
for j, G_j_c in enumerate(list_G_j_c):
idx = slice(j * n_orient, (j + 1) * n_orient)
G_j = G[:, idx]
X_j = X[idx]
dgemm(alpha=one_ovr_lc[j], beta=0., a=R.T, b=G_j, c=X_j_new.T,
overwrite_c=True)
# X_j_new = G_j.T @ R
# Mathurin's trick to avoid checking all the entries
was_non_zero = X_j[0, 0] != 0
# was_non_zero = np.any(X_j)
if was_non_zero:
dgemm(alpha=1., beta=1., a=X_j.T, b=G_j_c.T, c=R.T,
overwrite_c=True)
# R += np.dot(G_j, X_j)
X_j_new += X_j
block_norm = sqrt(sum_squared(X_j_new))
if block_norm <= alpha_lc[j]:
X_j.fill(0.)
active_set[idx] = False
else:
shrink = max(1.0 - alpha_lc[j] / block_norm, 0.0)
X_j_new *= shrink
dgemm(alpha=-1., beta=1., a=X_j_new.T, b=G_j_c.T, c=R.T,
overwrite_c=True)
# R -= np.dot(G_j, X_j_new)
X_j[:] = X_j_new
active_set[idx] = True
@verbose
def mixed_norm_solver(M, G, alpha, maxit=3000, tol=1e-8, verbose=None,
active_set_size=50, debias=True, n_orient=1,
solver='auto', return_gap=False, dgap_freq=10):
"""Solve L1/L2 mixed-norm inverse problem with active set strategy.
Parameters
----------
M : array, shape (n_sensors, n_times)
The data.
G : array, shape (n_sensors, n_dipoles)
The gain matrix a.k.a. lead field.
alpha : float
The regularization parameter. It should be between 0 and 100.
A value of 100 will lead to an empty active set (no active source).
maxit : int
The number of iterations.
tol : float
Tolerance on dual gap for convergence checking.
%(verbose)s
active_set_size : int
Size of active set increase at each iteration.
debias : bool
Debias source estimates.
n_orient : int
The number of orientation (1 : fixed or 3 : free or loose).
solver : 'prox' | 'cd' | 'bcd' | 'auto'
The algorithm to use for the optimization.
return_gap : bool
Return final duality gap.
dgap_freq : int
The duality gap is computed every dgap_freq iterations of the solver on
the active set.
Returns
-------
X : array, shape (n_active, n_times)
The source estimates.
active_set : array
The mask of active sources.
E : list
The value of the objective function over the iterations.
gap : float
Final duality gap. Returned only if return_gap is True.
References
----------
.. [1] A. Gramfort, M. Kowalski, M. Hämäläinen,
"Mixed-norm estimates for the M/EEG inverse problem using accelerated
gradient methods", Physics in Medicine and Biology, 2012.
https://doi.org/10.1088/0031-9155/57/7/1937
.. [2] D. Strohmeier, Y. Bekhti, J. Haueisen, A. Gramfort,
"The Iterative Reweighted Mixed-Norm Estimate for Spatio-Temporal
MEG/EEG Source Reconstruction", IEEE Transactions of Medical Imaging,
Volume 35 (10), pp. 2218-2228, 15 April 2013.
"""
n_dipoles = G.shape[1]
n_positions = n_dipoles // n_orient
n_sensors, n_times = M.shape
alpha_max = norm_l2inf(np.dot(G.T, M), n_orient, copy=False)
logger.info("-- ALPHA MAX : %s" % alpha_max)
alpha = float(alpha)
has_sklearn = True
try:
from sklearn.linear_model import MultiTaskLasso # noqa: F401
except ImportError:
has_sklearn = False
if solver == 'auto':
if has_sklearn and (n_orient == 1):
solver = 'cd'
else:
solver = 'bcd'
if solver == 'cd':
if n_orient == 1 and not has_sklearn:
warn('Scikit-learn >= 0.12 cannot be found. Using block coordinate'
' descent instead of coordinate descent.')
solver = 'bcd'
if n_orient > 1:
warn('Coordinate descent is only available for fixed orientation. '
'Using block coordinate descent instead of coordinate '
'descent')
solver = 'bcd'
if solver == 'cd':
logger.info("Using coordinate descent")
l21_solver = _mixed_norm_solver_cd
lc = None
elif solver == 'bcd':
logger.info("Using block coordinate descent")
l21_solver = _mixed_norm_solver_bcd
G = np.asfortranarray(G)
if n_orient == 1:
lc = np.sum(G * G, axis=0)
else:
lc = np.empty(n_positions)
for j in range(n_positions):
G_tmp = G[:, (j * n_orient):((j + 1) * n_orient)]
lc[j] = linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2)
else:
logger.info("Using proximal iterations")
l21_solver = _mixed_norm_solver_prox
lc = 1.01 * linalg.norm(G, ord=2) ** 2
if active_set_size is not None:
E = list()
highest_d_obj = - np.inf
X_init = None
active_set = np.zeros(n_dipoles, dtype=bool)
idx_large_corr = np.argsort(groups_norm2(np.dot(G.T, M), n_orient))
new_active_idx = idx_large_corr[-active_set_size:]
if n_orient > 1:
new_active_idx = (n_orient * new_active_idx[:, None] +
np.arange(n_orient)[None, :]).ravel()
active_set[new_active_idx] = True
as_size = np.sum(active_set)
for k in range(maxit):
if solver == 'bcd':
lc_tmp = lc[active_set[::n_orient]]
elif solver == 'cd':
lc_tmp = None
else:
lc_tmp = 1.01 * linalg.norm(G[:, active_set], ord=2) ** 2
X, as_, _ = l21_solver(M, G[:, active_set], alpha, lc_tmp,
maxit=maxit, tol=tol, init=X_init,
n_orient=n_orient, dgap_freq=dgap_freq)
active_set[active_set] = as_.copy()
idx_old_active_set = np.where(active_set)[0]
_, p_obj, d_obj, R = dgap_l21(M, G, X, active_set, alpha,
n_orient)
highest_d_obj = max(d_obj, highest_d_obj)
gap = p_obj - highest_d_obj
E.append(p_obj)
logger.info("Iteration %d :: p_obj %f :: dgap %f ::"
"n_active_start %d :: n_active_end %d" % (
k + 1, p_obj, gap, as_size // n_orient,
np.sum(active_set) // n_orient))
if gap < tol:
logger.info('Convergence reached ! (gap: %s < %s)'
% (gap, tol))
break
# add sources if not last iteration
if k < (maxit - 1):
idx_large_corr = np.argsort(groups_norm2(np.dot(G.T, R),
n_orient))
new_active_idx = idx_large_corr[-active_set_size:]
if n_orient > 1:
new_active_idx = (n_orient * new_active_idx[:, None] +
np.arange(n_orient)[None, :])
new_active_idx = new_active_idx.ravel()
active_set[new_active_idx] = True
idx_active_set = np.where(active_set)[0]
as_size = np.sum(active_set)
X_init = np.zeros((as_size, n_times), dtype=X.dtype)
idx = np.searchsorted(idx_active_set, idx_old_active_set)
X_init[idx] = X
else:
warn('Did NOT converge ! (gap: %s > %s)' % (gap, tol))
else:
X, active_set, E = l21_solver(M, G, alpha, lc, maxit=maxit,
tol=tol, n_orient=n_orient, init=None)
if return_gap:
gap = dgap_l21(M, G, X, active_set, alpha, n_orient)[0]
if np.any(active_set) and debias:
bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient)
X *= bias[:, np.newaxis]
logger.info('Final active set size: %s' % (np.sum(active_set) // n_orient))
if return_gap:
return X, active_set, E, gap
else:
return X, active_set, E
@verbose
def iterative_mixed_norm_solver(M, G, alpha, n_mxne_iter, maxit=3000,
tol=1e-8, verbose=None, active_set_size=50,
debias=True, n_orient=1, dgap_freq=10,
solver='auto'):
"""Solve L0.5/L2 mixed-norm inverse problem with active set strategy.
Parameters
----------
M : array, shape (n_sensors, n_times)
The data.
G : array, shape (n_sensors, n_dipoles)
The gain matrix a.k.a. lead field.
alpha : float
The regularization parameter. It should be between 0 and 100.
A value of 100 will lead to an empty active set (no active source).
n_mxne_iter : int
The number of MxNE iterations. If > 1, iterative reweighting
is applied.
maxit : int
The number of iterations.
tol : float
Tolerance on dual gap for convergence checking.
%(verbose)s
active_set_size : int
Size of active set increase at each iteration.
debias : bool
Debias source estimates.
n_orient : int
The number of orientation (1 : fixed or 3 : free or loose).
dgap_freq : int or np.inf
The duality gap is evaluated every dgap_freq iterations.
solver : 'prox' | 'cd' | 'bcd' | 'auto'
The algorithm to use for the optimization.
Returns
-------
X : array, shape (n_active, n_times)
The source estimates.
active_set : array
The mask of active sources.
E : list
The value of the objective function over the iterations.
References
----------
.. [1] D. Strohmeier, Y. Bekhti, J. Haueisen, A. Gramfort,
"The Iterative Reweighted Mixed-Norm Estimate for Spatio-Temporal
MEG/EEG Source Reconstruction", IEEE Transactions of Medical Imaging,
Volume 35 (10), pp. 2218-2228, 2016.
"""
def g(w):
return np.sqrt(np.sqrt(groups_norm2(w.copy(), n_orient)))
def gprime(w):
return 2. * np.repeat(g(w), n_orient).ravel()
E = list()
active_set = np.ones(G.shape[1], dtype=bool)
weights = np.ones(G.shape[1])
X = np.zeros((G.shape[1], M.shape[1]))
for k in range(n_mxne_iter):
X0 = X.copy()
active_set_0 = active_set.copy()
G_tmp = G[:, active_set] * weights[np.newaxis, :]
if active_set_size is not None:
if np.sum(active_set) > (active_set_size * n_orient):
X, _active_set, _ = mixed_norm_solver(
M, G_tmp, alpha, debias=False, n_orient=n_orient,
maxit=maxit, tol=tol, active_set_size=active_set_size,
dgap_freq=dgap_freq, solver=solver, verbose=verbose)
else:
X, _active_set, _ = mixed_norm_solver(
M, G_tmp, alpha, debias=False, n_orient=n_orient,
maxit=maxit, tol=tol, active_set_size=None,
dgap_freq=dgap_freq, solver=solver, verbose=verbose)
else:
X, _active_set, _ = mixed_norm_solver(
M, G_tmp, alpha, debias=False, n_orient=n_orient,
maxit=maxit, tol=tol, active_set_size=None,
dgap_freq=dgap_freq, solver=solver, verbose=verbose)
logger.info('active set size %d' % (_active_set.sum() / n_orient))
if _active_set.sum() > 0:
active_set[active_set] = _active_set
# Reapply weights to have correct unit
X *= weights[_active_set][:, np.newaxis]
weights = gprime(X)
p_obj = 0.5 * linalg.norm(M - np.dot(G[:, active_set], X),
'fro') ** 2. + alpha * np.sum(g(X))
E.append(p_obj)
# Check convergence
if ((k >= 1) and np.all(active_set == active_set_0) and
np.all(np.abs(X - X0) < tol)):
print('Convergence reached after %d reweightings!' % k)
break
else:
active_set = np.zeros_like(active_set)
p_obj = 0.5 * linalg.norm(M) ** 2.
E.append(p_obj)
break
if np.any(active_set) and debias:
bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient)
X *= bias[:, np.newaxis]
return X, active_set, E
###############################################################################
# TF-MxNE
@verbose
def tf_lipschitz_constant(M, G, phi, phiT, tol=1e-3, verbose=None):
"""Compute lipschitz constant for FISTA.
It uses a power iteration method.
"""
n_times = M.shape[1]
n_points = G.shape[1]
iv = np.ones((n_points, n_times), dtype=np.float64)
v = phi(iv)
L = 1e100
for it in range(100):
L_old = L
logger.info('Lipschitz estimation: iteration = %d' % it)
iv = np.real(phiT(v))
Gv = np.dot(G, iv)
GtGv = np.dot(G.T, Gv)
w = phi(GtGv)
L = np.max(np.abs(w)) # l_inf norm
v = w / L
if abs((L - L_old) / L_old) < tol:
break
return L
def safe_max_abs(A, ia):
"""Compute np.max(np.abs(A[ia])) possible with empty A."""
if np.sum(ia): # ia is not empty
return np.max(np.abs(A[ia]))
else:
return 0.
def safe_max_abs_diff(A, ia, B, ib):
"""Compute np.max(np.abs(A)) possible with empty A."""
A = A[ia] if np.sum(ia) else 0.0
B = B[ib] if np.sum(ia) else 0.0
return np.max(np.abs(A - B))
class _Phi(object):
"""Have phi stft as callable w/o using a lambda that does not pickle."""
def __init__(self, wsize, tstep, n_coefs): # noqa: D102
self.wsize = np.atleast_1d(wsize)
self.tstep = np.atleast_1d(tstep)
self.n_coefs = np.atleast_1d(n_coefs)
self.n_dicts = len(tstep)
self.n_freqs = wsize // 2 + 1
self.n_steps = self.n_coefs // self.n_freqs
def __call__(self, x): # noqa: D105
if self.n_dicts == 1:
return stft(x, self.wsize[0], self.tstep[0],
verbose=False).reshape(-1, self.n_coefs[0])
else:
return np.hstack(
[stft(x, self.wsize[i], self.tstep[i], verbose=False).reshape(
-1, self.n_coefs[i]) for i in range(self.n_dicts)]) / np.sqrt(
self.n_dicts)
def norm(self, z, ord=2):
"""Squared L2 norm if ord == 2 and L1 norm if order == 1."""
if ord not in (1, 2):
raise ValueError('Only supported norm order are 1 and 2. '
'Got ord = %s' % ord)
stft_norm = stft_norm1 if ord == 1 else stft_norm2
norm = 0.
if len(self.n_coefs) > 1:
z_ = np.array_split(np.atleast_2d(z), np.cumsum(self.n_coefs)[:-1],
axis=1)
else:
z_ = [np.atleast_2d(z)]
for i in range(len(z_)):
norm += stft_norm(
z_[i].reshape(-1, self.n_freqs[i], self.n_steps[i]))
return norm
class _PhiT(object):
"""Have phi.T istft as callable w/o using a lambda that does not pickle."""
def __init__(self, tstep, n_freqs, n_steps, n_times): # noqa: D102
self.tstep = tstep
self.n_freqs = n_freqs
self.n_steps = n_steps
self.n_times = n_times
self.n_dicts = len(tstep) if isinstance(tstep, np.ndarray) else 1
self.n_coefs = self.n_freqs * self.n_steps
def __call__(self, z): # noqa: D105
if self.n_dicts == 1:
return istft(z.reshape(-1, self.n_freqs[0], self.n_steps[0]),
self.tstep[0], self.n_times)
else:
x_out = np.zeros((z.shape[0], self.n_times))
z_ = np.array_split(z, np.cumsum(self.n_coefs)[:-1], axis=1)
for i in range(self.n_dicts):
x_out += istft(z_[i].reshape(-1, self.n_freqs[i],
self.n_steps[i]),
self.tstep[i], self.n_times)
return x_out / np.sqrt(self.n_dicts)
def norm_l21_tf(Z, phi, n_orient, w_space=None):
"""L21 norm for TF."""
if Z.shape[0]:
l21_norm = np.sqrt(
phi.norm(Z, ord=2).reshape(-1, n_orient).sum(axis=1))
if w_space is not None:
l21_norm *= w_space
l21_norm = l21_norm.sum()
else:
l21_norm = 0.
return l21_norm
def norm_l1_tf(Z, phi, n_orient, w_time):
"""L1 norm for TF."""
if Z.shape[0]:
n_positions = Z.shape[0] // n_orient
Z_ = np.sqrt(np.sum(
(np.abs(Z) ** 2.).reshape((n_orient, -1), order='F'), axis=0))
Z_ = Z_.reshape((n_positions, -1), order='F')
if w_time is not None:
Z_ *= w_time
l1_norm = phi.norm(Z_, ord=1).sum()
else:
l1_norm = 0.
return l1_norm
def norm_epsilon(Y, l1_ratio, phi, w_space=1., w_time=None):
"""Weighted epsilon norm.
The weighted epsilon norm is the dual norm of::
w_{space} * (1. - l1_ratio) * ||Y||_2 + l1_ratio * ||Y||_{1, w_{time}}.
where `||Y||_{1, w_{time}} = (np.abs(Y) * w_time).sum()`
Warning: it takes into account the fact that Y only contains coefficients
corresponding to the positive frequencies (see `stft_norm2()`): some
entries will be counted twice. It is also assumed that all entries of both
Y and w_time are non-negative.
Parameters
----------
Y : array, shape (n_coefs,)
The input data.
l1_ratio : float between 0 and 1
Tradeoff between L2 and L1 regularization. When it is 0, no temporal
regularization is applied.
phi : instance of _Phi
The TF operator.
w_space : float
Scalar weight of the L2 norm. By default, it is taken equal to 1.
w_time : array, shape (n_coefs, ) | None
Weights of each TF coefficient in the L1 norm. If None, weights equal
to 1 are used.
Returns
-------
nu : float
The value of the dual norm evaluated at Y.
References
----------
.. [1] E. Ndiaye, O. Fercoq, A. Gramfort, J. Salmon,
"GAP Safe Screening Rules for Sparse-Group Lasso", Advances in Neural
Information Processing Systems (NIPS), 2016.
.. [2] O. Burdakov, B. Merkulov,
"On a new norm for data fitting and optimization problems",
LiTH-MAT, 2001.
"""
# since the solution is invariant to flipped signs in Y, all entries
# of Y are assumed positive
# Add negative freqs: count all freqs twice except first and last:
freqs_count = np.full(len(Y), 2)
for i, fc in enumerate(np.array_split(freqs_count,
np.cumsum(phi.n_coefs)[:-1])):
fc[:phi.n_steps[i]] = 1
fc[-phi.n_steps[i]:] = 1
# exclude 0 weights:
if w_time is not None:
nonzero_weights = (w_time != 0.0)
Y = Y[nonzero_weights]
freqs_count = freqs_count[nonzero_weights]
w_time = w_time[nonzero_weights]
norm_inf_Y = np.max(Y / w_time) if w_time is not None else np.max(Y)
if l1_ratio == 1.:
# dual norm of L1 weighted is Linf with inverse weights
return norm_inf_Y
elif l1_ratio == 0.:
# dual norm of L2 is L2
return np.sqrt(phi.norm(Y[None, :], ord=2).sum())
if norm_inf_Y == 0.:
return 0.
# ignore some values of Y by lower bound on dual norm:
if w_time is None:
idx = Y > l1_ratio * norm_inf_Y
else:
idx = Y > l1_ratio * np.max(Y / (w_space * (1. - l1_ratio) +
l1_ratio * w_time))
if idx.sum() == 1:
return norm_inf_Y
# sort both Y / w_time and freqs_count at the same time
if w_time is not None:
idx_sort = np.argsort(Y[idx] / w_time[idx])[::-1]
w_time = w_time[idx][idx_sort]
else:
idx_sort = np.argsort(Y[idx])[::-1]
Y = Y[idx][idx_sort]
freqs_count = freqs_count[idx][idx_sort]
Y = np.repeat(Y, freqs_count)
if w_time is not None:
w_time = np.repeat(w_time, freqs_count)
K = Y.shape[0]
if w_time is None:
p_sum_Y2 = np.cumsum(Y ** 2)
p_sum_w2 = np.arange(1, K + 1)
p_sum_Yw = np.cumsum(Y)
upper = p_sum_Y2 / Y ** 2 - 2. * p_sum_Yw / Y + p_sum_w2
else:
p_sum_Y2 = np.cumsum(Y ** 2)
p_sum_w2 = np.cumsum(w_time ** 2)
p_sum_Yw = np.cumsum(Y * w_time)
upper = (p_sum_Y2 / (Y / w_time) ** 2 -
2. * p_sum_Yw / (Y / w_time) + p_sum_w2)
upper_greater = np.where(upper > w_space ** 2 * (1. - l1_ratio) ** 2 /
l1_ratio ** 2)[0]
i0 = upper_greater[0] - 1 if upper_greater.size else K - 1
p_sum_Y2 = p_sum_Y2[i0]
p_sum_w2 = p_sum_w2[i0]
p_sum_Yw = p_sum_Yw[i0]
denom = l1_ratio ** 2 * p_sum_w2 - w_space ** 2 * (1. - l1_ratio) ** 2
if np.abs(denom) < 1e-10:
return p_sum_Y2 / (2. * l1_ratio * p_sum_Yw)
else:
delta = (l1_ratio * p_sum_Yw) ** 2 - p_sum_Y2 * denom
return (l1_ratio * p_sum_Yw - np.sqrt(delta)) / denom
def norm_epsilon_inf(G, R, phi, l1_ratio, n_orient, w_space=None, w_time=None):
"""Weighted epsilon-inf norm of phi(np.dot(G.T, R)).
Parameters
----------
G : array, shape (n_sensors, n_sources)
Gain matrix a.k.a. lead field.
R : array, shape (n_sensors, n_times)
Residual.
phi : instance of _Phi
The TF operator.
l1_ratio : float between 0 and 1
Parameter controlling the tradeoff between L21 and L1 regularization.
0 corresponds to an absence of temporal regularization, ie MxNE.
n_orient : int
Number of dipoles per location (typically 1 or 3).
w_space : array, shape (n_positions,) or None.
Weights for the L2 term of the epsilon norm. If None, weights are
all equal to 1.
w_time : array, shape (n_positions, n_coefs) or None
Weights for the L1 term of the epsilon norm. If None, weights are
all equal to 1.
Returns
-------
nu : float
The maximum value of the epsilon norms over groups of n_orient dipoles
(consecutive rows of phi(np.dot(G.T, R))).
"""
n_positions = G.shape[1] // n_orient
GTRPhi = np.abs(phi(np.dot(G.T, R)))
# norm over orientations:
GTRPhi = GTRPhi.reshape((n_orient, -1), order='F')
GTRPhi = np.linalg.norm(GTRPhi, axis=0)
GTRPhi = GTRPhi.reshape((n_positions, -1), order='F')
nu = 0.
for idx in range(n_positions):
GTRPhi_ = GTRPhi[idx]
w_t = w_time[idx] if w_time is not None else None
w_s = w_space[idx] if w_space is not None else 1.
norm_eps = norm_epsilon(GTRPhi_, l1_ratio, phi, w_space=w_s,
w_time=w_t)
if norm_eps > nu:
nu = norm_eps
return nu
def dgap_l21l1(M, G, Z, active_set, alpha_space, alpha_time, phi, phiT,
n_orient, highest_d_obj, w_space=None, w_time=None):
"""Duality gap for the time-frequency mixed norm inverse problem.
Parameters
----------
M : array, shape (n_sensors, n_times)
The data.
G : array, shape (n_sensors, n_sources)
Gain matrix a.k.a. lead field.
Z : array, shape (n_active, n_coefs)
Sources in TF domain.
active_set : array of bool, shape (n_sources, )
Mask of active sources.
alpha_space : float
The spatial regularization parameter.
alpha_time : float
The temporal regularization parameter. The higher it is the smoother
will be the estimated time series.
phi : instance of _Phi
The TF operator.
phiT : instance of _PhiT
The transpose of the TF operator.
n_orient : int
Number of dipoles per locations (typically 1 or 3).
highest_d_obj : float
The highest value of the dual objective so far.
w_space : array, shape (n_positions, )
Array of spatial weights.
w_time : array, shape (n_positions, n_coefs)
Array of TF weights.
Returns
-------
gap : float
Dual gap
p_obj : float
Primal objective
d_obj : float
Dual objective. gap = p_obj - d_obj
R : array, shape (n_sensors, n_times)
Current residual (M - G * X)
References
----------
.. [1] A. Gramfort, M. Kowalski, M. Hämäläinen,
"Mixed-norm estimates for the M/EEG inverse problem using accelerated
gradient methods", Physics in Medicine and Biology, 2012.
https://doi.org/10.1088/0031-9155/57/7/1937
.. [2] E. Ndiaye, O. Fercoq, A. Gramfort, J. Salmon,
"GAP Safe Screening Rules for Sparse-Group Lasso", Advances in Neural
Information Processing Systems (NIPS), 2016.
"""
X = phiT(Z)
GX = np.dot(G[:, active_set], X)
R = M - GX
# some functions need w_time only on active_set, other need it completely
if w_time is not None:
w_time_as = w_time[active_set[::n_orient]]
else:
w_time_as = None
if w_space is not None:
w_space_as = w_space[active_set[::n_orient]]
else:
w_space_as = None
penaltyl1 = norm_l1_tf(Z, phi, n_orient, w_time_as)
penaltyl21 = norm_l21_tf(Z, phi, n_orient, w_space_as)
nR2 = sum_squared(R)
p_obj = 0.5 * nR2 + alpha_space * penaltyl21 + alpha_time * penaltyl1
l1_ratio = alpha_time / (alpha_space + alpha_time)
dual_norm = norm_epsilon_inf(G, R, phi, l1_ratio, n_orient,
w_space=w_space, w_time=w_time)
scaling = min(1., (alpha_space + alpha_time) / dual_norm)
d_obj = (scaling - 0.5 * (scaling ** 2)) * nR2 + scaling * np.sum(R * GX)
d_obj = max(d_obj, highest_d_obj)
gap = p_obj - d_obj
return gap, p_obj, d_obj, R
def _tf_mixed_norm_solver_bcd_(M, G, Z, active_set, candidates, alpha_space,
alpha_time, lipschitz_constant, phi, phiT,
w_space=None, w_time=None, n_orient=1,
maxit=200, tol=1e-8, dgap_freq=10, perc=None,
timeit=True, verbose=None):
# First make G fortran for faster access to blocks of columns
G = np.asfortranarray(G)
n_sources = G.shape[1]
n_positions = n_sources // n_orient
Gd = G.copy()
G = dict(zip(np.arange(n_positions), np.hsplit(G, n_positions)))
R = M.copy() # residual
active = np.where(active_set[::n_orient])[0]
for idx in active:
R -= np.dot(G[idx], phiT(Z[idx]))
E = [] # track primal objective function
if w_time is None:
alpha_time_lc = alpha_time / lipschitz_constant
else:
alpha_time_lc = alpha_time * w_time / lipschitz_constant[:, None]
if w_space is None:
alpha_space_lc = alpha_space / lipschitz_constant
else:
alpha_space_lc = alpha_space * w_space / lipschitz_constant
converged = False
d_obj = - np.inf
for i in range(maxit):
for jj in candidates:
ids = jj * n_orient
ide = ids + n_orient
G_j = G[jj]
Z_j = Z[jj]
active_set_j = active_set[ids:ide]
was_active = np.any(active_set_j)
# gradient step
GTR = np.dot(G_j.T, R) / lipschitz_constant[jj]
X_j_new = GTR.copy()
if was_active:
X_j = phiT(Z_j)
R += np.dot(G_j, X_j)
X_j_new += X_j
rows_norm = linalg.norm(X_j_new, 'fro')
if rows_norm <= alpha_space_lc[jj]:
if was_active:
Z[jj] = 0.0
active_set_j[:] = False
else:
if was_active:
Z_j_new = Z_j + phi(GTR)
else:
Z_j_new = phi(GTR)
col_norm = np.sqrt(np.sum(np.abs(Z_j_new) ** 2, axis=0))
if np.all(col_norm <= alpha_time_lc[jj]):
Z[jj] = 0.0
active_set_j[:] = False
else:
# l1
shrink = np.maximum(1.0 - alpha_time_lc[jj] / np.maximum(
col_norm, alpha_time_lc[jj]), 0.0)
if w_time is not None:
shrink[w_time[jj] == 0.0] = 0.0
Z_j_new *= shrink[np.newaxis, :]
# l21
shape_init = Z_j_new.shape
row_norm = np.sqrt(phi.norm(Z_j_new, ord=2).sum())
if row_norm <= alpha_space_lc[jj]:
Z[jj] = 0.0
active_set_j[:] = False
else:
shrink = np.maximum(
1.0 - alpha_space_lc[jj] /
np.maximum(row_norm, alpha_space_lc[jj]), 0.0)
Z_j_new *= shrink
Z[jj] = Z_j_new.reshape(-1, *shape_init[1:]).copy()
active_set_j[:] = True
R -= np.dot(G_j, phiT(Z[jj]))
if (i + 1) % dgap_freq == 0:
Zd = np.vstack([Z[pos] for pos in range(n_positions)
if np.any(Z[pos])])
gap, p_obj, d_obj, _ = dgap_l21l1(
M, Gd, Zd, active_set, alpha_space, alpha_time, phi, phiT,
n_orient, d_obj, w_space=w_space, w_time=w_time)
converged = (gap < tol)
E.append(p_obj)
logger.info("\n Iteration %d :: n_active %d" % (
i + 1, np.sum(active_set) / n_orient))
logger.info(" dgap %.2e :: p_obj %f :: d_obj %f" % (
gap, p_obj, d_obj))
if converged:
break
if perc is not None:
if np.sum(active_set) / float(n_orient) <= perc * n_positions:
break
return Z, active_set, E, converged
@verbose
def _tf_mixed_norm_solver_bcd_active_set(M, G, alpha_space, alpha_time,
lipschitz_constant, phi, phiT,
Z_init=None, w_space=None,
w_time=None, n_orient=1, maxit=200,
tol=1e-8, dgap_freq=10,
verbose=None):
n_sensors, n_times = M.shape
n_sources = G.shape[1]
n_positions = n_sources // n_orient
Z = dict.fromkeys(np.arange(n_positions), 0.0)
active_set = np.zeros(n_sources, dtype=bool)
active = []
if Z_init is not None:
if Z_init.shape != (n_sources, phi.n_coefs.sum()):
raise Exception('Z_init must be None or an array with shape '
'(n_sources, n_coefs).')
for ii in range(n_positions):
if np.any(Z_init[ii * n_orient:(ii + 1) * n_orient]):
active_set[ii * n_orient:(ii + 1) * n_orient] = True
active.append(ii)
if len(active):
Z.update(dict(zip(active,
np.vsplit(Z_init[active_set], len(active)))))
E = []
candidates = range(n_positions)
d_obj = -np.inf
while True:
# single BCD pass on all positions:
Z_init = dict.fromkeys(np.arange(n_positions), 0.0)
Z_init.update(dict(zip(active, Z.values())))
Z, active_set, E_tmp, _ = _tf_mixed_norm_solver_bcd_(
M, G, Z_init, active_set, candidates, alpha_space, alpha_time,
lipschitz_constant, phi, phiT, w_space=w_space, w_time=w_time,
n_orient=n_orient, maxit=1, tol=tol, perc=None, verbose=verbose)
E += E_tmp
# multiple BCD pass on active positions:
active = np.where(active_set[::n_orient])[0]
Z_init = dict(zip(range(len(active)), [Z[idx] for idx in active]))
candidates_ = range(len(active))
if w_space is not None:
w_space_as = w_space[active_set[::n_orient]]
else:
w_space_as = None
if w_time is not None:
w_time_as = w_time[active_set[::n_orient]]
else:
w_time_as = None
Z, as_, E_tmp, converged = _tf_mixed_norm_solver_bcd_(
M, G[:, active_set], Z_init,
np.ones(len(active) * n_orient, dtype=bool),
candidates_, alpha_space, alpha_time,
lipschitz_constant[active_set[::n_orient]], phi, phiT,
w_space=w_space_as, w_time=w_time_as,
n_orient=n_orient, maxit=maxit, tol=tol,
dgap_freq=dgap_freq, perc=0.5,
verbose=verbose)
active = np.where(active_set[::n_orient])[0]
active_set[active_set] = as_.copy()
E += E_tmp
converged = True
if converged:
Zd = np.vstack([Z[pos] for pos in range(len(Z)) if np.any(Z[pos])])
gap, p_obj, d_obj, _ = dgap_l21l1(
M, G, Zd, active_set, alpha_space, alpha_time,
phi, phiT, n_orient, d_obj, w_space, w_time)
logger.info("\ndgap %.2e :: p_obj %f :: d_obj %f :: n_active %d"
% (gap, p_obj, d_obj, np.sum(active_set) / n_orient))
if gap < tol:
logger.info("\nConvergence reached!\n")
break
if active_set.sum():
Z = np.vstack([Z[pos] for pos in range(len(Z)) if np.any(Z[pos])])
X = phiT(Z)
else:
Z = np.zeros((0, phi.n_coefs.sum()), dtype=np.complex128)
X = np.zeros((0, n_times))
return X, Z, active_set, E, gap
@verbose
def tf_mixed_norm_solver(M, G, alpha_space, alpha_time, wsize=64, tstep=4,
n_orient=1, maxit=200, tol=1e-8,
active_set_size=None, debias=True, return_gap=False,
dgap_freq=10, verbose=None):
"""Solve TF L21+L1 inverse solver with BCD and active set approach.
Parameters
----------
M : array, shape (n_sensors, n_times)
The data.
G : array, shape (n_sensors, n_dipoles)
The gain matrix a.k.a. lead field.
alpha_space : float
The spatial regularization parameter.
alpha_time : float
The temporal regularization parameter. The higher it is the smoother
will be the estimated time series.
wsize: int or array-like
Length of the STFT window in samples (must be a multiple of 4).
If an array is passed, multiple TF dictionaries are used (each having
its own wsize and tstep) and each entry of wsize must be a multiple
of 4.
tstep: int or array-like
Step between successive windows in samples (must be a multiple of 2,
a divider of wsize and smaller than wsize/2) (default: wsize/2).
If an array is passed, multiple TF dictionaries are used (each having
its own wsize and tstep), and each entry of tstep must be a multiple
of 2 and divide the corresponding entry of wsize.
n_orient : int
The number of orientation (1 : fixed or 3 : free or loose).
maxit : int
The number of iterations.
tol : float
If absolute difference between estimates at 2 successive iterations
is lower than tol, the convergence is reached.
debias : bool
Debias source estimates.
return_gap : bool
Return final duality gap.
dgap_freq : int or np.inf
The duality gap is evaluated every dgap_freq iterations.
%(verbose)s
Returns
-------
X : array, shape (n_active, n_times)
The source estimates.
active_set : array
The mask of active sources.
E : list
The value of the objective function every dgap_freq iteration. If
log_objective is False or dgap_freq is np.inf, it will be empty.
gap : float
Final duality gap. Returned only if return_gap is True.
References
----------
.. [1] A. Gramfort, D. Strohmeier, J. Haueisen, M. Hämäläinen, M. Kowalski
"Time-Frequency Mixed-Norm Estimates: Sparse M/EEG imaging with
non-stationary source activations",
Neuroimage, Volume 70, pp. 410-422, 15 April 2013.
DOI: 10.1016/j.neuroimage.2012.12.051
.. [2] A. Gramfort, D. Strohmeier, J. Haueisen, M. Hämäläinen, M. Kowalski
"Functional Brain Imaging with M/EEG Using Structured Sparsity in
Time-Frequency Dictionaries",
Proceedings Information Processing in Medical Imaging
Lecture Notes in Computer Science, Volume 6801/2011, pp. 600-611, 2011.
DOI: 10.1007/978-3-642-22092-0_49
.. [3] Y. Bekhti, D. Strohmeier, M. Jas, R. Badeau, A. Gramfort.
"M/EEG source localization with multiscale time-frequency dictionaries",
6th International Workshop on Pattern Recognition in Neuroimaging
(PRNI), 2016.
DOI: 10.1109/PRNI.2016.7552337
"""
n_sensors, n_times = M.shape
n_sensors, n_sources = G.shape
n_positions = n_sources // n_orient
tstep = np.atleast_1d(tstep)
wsize = np.atleast_1d(wsize)
if len(tstep) != len(wsize):
raise ValueError('The same number of window sizes and steps must be '
'passed. Got tstep = %s and wsize = %s' %
(tstep, wsize))
n_steps = np.ceil(M.shape[1] / tstep.astype(float)).astype(int)
n_freqs = wsize // 2 + 1
n_coefs = n_steps * n_freqs
phi = _Phi(wsize, tstep, n_coefs)
phiT = _PhiT(tstep, n_freqs, n_steps, n_times)
if n_orient == 1:
lc = np.sum(G * G, axis=0)
else:
lc = np.empty(n_positions)
for j in range(n_positions):
G_tmp = G[:, (j * n_orient):((j + 1) * n_orient)]
lc[j] = linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2)
logger.info("Using block coordinate descent with active set approach")
X, Z, active_set, E, gap = _tf_mixed_norm_solver_bcd_active_set(
M, G, alpha_space, alpha_time, lc, phi, phiT,
Z_init=None, n_orient=n_orient, maxit=maxit, tol=tol,
dgap_freq=dgap_freq, verbose=None)
if np.any(active_set) and debias:
bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient)
X *= bias[:, np.newaxis]
if return_gap:
return X, active_set, E, gap
else:
return X, active_set, E
@verbose
def iterative_tf_mixed_norm_solver(M, G, alpha_space, alpha_time,
n_tfmxne_iter, wsize=64, tstep=4,
maxit=3000, tol=1e-8, debias=True,
n_orient=1, dgap_freq=10, verbose=None):
"""Solve TF L0.5/L1 + L0.5 inverse problem with BCD + active set approach.
Parameters
----------
M: array, shape (n_sensors, n_times)
The data.
G: array, shape (n_sensors, n_dipoles)
The gain matrix a.k.a. lead field.
alpha_space: float
The spatial regularization parameter. The higher it is the less there
will be active sources.
alpha_time : float
The temporal regularization parameter. The higher it is the smoother
will be the estimated time series. 0 means no temporal regularization,
a.k.a. irMxNE.
n_tfmxne_iter : int
Number of TF-MxNE iterations. If > 1, iterative reweighting is applied.
wsize : int or array-like
Length of the STFT window in samples (must be a multiple of 4).
If an array is passed, multiple TF dictionaries are used (each having
its own wsize and tstep) and each entry of wsize must be a multiple
of 4.
tstep : int or array-like
Step between successive windows in samples (must be a multiple of 2,
a divider of wsize and smaller than wsize/2) (default: wsize/2).
If an array is passed, multiple TF dictionaries are used (each having
its own wsize and tstep), and each entry of tstep must be a multiple
of 2 and divide the corresponding entry of wsize.
maxit : int
The maximum number of iterations for each TF-MxNE problem.
tol : float
If absolute difference between estimates at 2 successive iterations
is lower than tol, the convergence is reached. Also used as criterion
on duality gap for each TF-MxNE problem.
debias : bool
Debias source estimates.
n_orient : int
The number of orientation (1 : fixed or 3 : free or loose).
dgap_freq : int or np.inf
The duality gap is evaluated every dgap_freq iterations.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
X : array, shape (n_active, n_times)
The source estimates.
active_set : array
The mask of active sources.
E : list
The value of the objective function over iterations.
"""
n_sensors, n_times = M.shape
n_sources = G.shape[1]
n_positions = n_sources // n_orient
tstep = np.atleast_1d(tstep)
wsize = np.atleast_1d(wsize)
if len(tstep) != len(wsize):
raise ValueError('The same number of window sizes and steps must be '
'passed. Got tstep = %s and wsize = %s' %
(tstep, wsize))
n_steps = np.ceil(n_times / tstep.astype(float)).astype(int)
n_freqs = wsize // 2 + 1
n_coefs = n_steps * n_freqs
phi = _Phi(wsize, tstep, n_coefs)
phiT = _PhiT(tstep, n_freqs, n_steps, n_times)
if n_orient == 1:
lc = np.sum(G * G, axis=0)
else:
lc = np.empty(n_positions)
for j in range(n_positions):
G_tmp = G[:, (j * n_orient):((j + 1) * n_orient)]
lc[j] = linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2)
# space and time penalties, and inverse of their derivatives:
def g_space(Z):
return np.sqrt(np.sqrt(phi.norm(Z, ord=2).reshape(
-1, n_orient).sum(axis=1)))
def g_space_prime_inv(Z):
return 2. * g_space(Z)
def g_time(Z):
return np.sqrt(np.sqrt(np.sum((np.abs(Z) ** 2.).reshape(
(n_orient, -1), order='F'), axis=0)).reshape(
(-1, Z.shape[1]), order='F'))
def g_time_prime_inv(Z):
return 2. * g_time(Z)
E = list()
active_set = np.ones(n_sources, dtype=bool)
Z = np.zeros((n_sources, phi.n_coefs.sum()), dtype=np.complex128)
for k in range(n_tfmxne_iter):
active_set_0 = active_set.copy()
Z0 = Z.copy()
if k == 0:
w_space = None
w_time = None
else:
w_space = 1. / g_space_prime_inv(Z)
w_time = g_time_prime_inv(Z)
w_time[w_time == 0.0] = -1.
w_time = 1. / w_time
w_time[w_time < 0.0] = 0.0
X, Z, active_set_, E_, _ = _tf_mixed_norm_solver_bcd_active_set(
M, G[:, active_set], alpha_space, alpha_time,
lc[active_set[::n_orient]], phi, phiT,
Z_init=Z, w_space=w_space, w_time=w_time, n_orient=n_orient,
maxit=maxit, tol=tol, dgap_freq=dgap_freq, verbose=None)
active_set[active_set] = active_set_
if active_set.sum() > 0:
l21_penalty = np.sum(g_space(Z.copy()))
l1_penalty = phi.norm(g_time(Z.copy()), ord=1).sum()
p_obj = (0.5 * linalg.norm(M - np.dot(G[:, active_set], X),
'fro') ** 2. + alpha_space * l21_penalty +
alpha_time * l1_penalty)
E.append(p_obj)
logger.info('Iteration %d: active set size=%d, E=%f' % (
k + 1, active_set.sum() / n_orient, p_obj))
# Check convergence
if np.array_equal(active_set, active_set_0):
max_diff = np.amax(np.abs(Z - Z0))
if (max_diff < tol):
print('Convergence reached after %d reweightings!' % k)
break
else:
p_obj = 0.5 * linalg.norm(M) ** 2.
E.append(p_obj)
logger.info('Iteration %d: as_size=%d, E=%f' % (
k + 1, active_set.sum() / n_orient, p_obj))
break
if debias:
if active_set.sum() > 0:
bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient)
X *= bias[:, np.newaxis]
return X, active_set, E
| {
"repo_name": "larsoner/mne-python",
"path": "mne/inverse_sparse/mxne_optim.py",
"copies": "6",
"size": "57847",
"license": "bsd-3-clause",
"hash": -3749142048177160700,
"line_mean": 35.030529595,
"line_max": 79,
"alpha_frac": 0.536927839,
"autogenerated": false,
"ratio": 3.2319342759738445,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6768862114973845,
"avg_score": null,
"num_lines": null
} |
import functools
from math import sqrt
import numpy as np
from .mxne_debiasing import compute_bias
from ..utils import logger, verbose, sum_squared, warn, _get_blas_funcs
from ..time_frequency._stft import stft_norm1, stft_norm2, stft, istft
@functools.lru_cache(None)
def _get_dgemm():
return _get_blas_funcs(np.float64, 'gemm')
def groups_norm2(A, n_orient):
"""Compute squared L2 norms of groups inplace."""
n_positions = A.shape[0] // n_orient
return np.sum(np.power(A, 2, A).reshape(n_positions, -1), axis=1)
def norm_l2inf(A, n_orient, copy=True):
"""L2-inf norm."""
if A.size == 0:
return 0.0
if copy:
A = A.copy()
return sqrt(np.max(groups_norm2(A, n_orient)))
def norm_l21(A, n_orient, copy=True):
"""L21 norm."""
if A.size == 0:
return 0.0
if copy:
A = A.copy()
return np.sum(np.sqrt(groups_norm2(A, n_orient)))
def prox_l21(Y, alpha, n_orient, shape=None, is_stft=False):
"""Proximity operator for l21 norm.
L2 over columns and L1 over rows => groups contain n_orient rows.
It can eventually take into account the negative frequencies
when a complex value is passed and is_stft=True.
Parameters
----------
Y : array, shape (n_sources, n_coefs)
The input data.
alpha : float
The regularization parameter.
n_orient : int
Number of dipoles per locations (typically 1 or 3).
shape : None | tuple
Shape of TF coefficients matrix.
is_stft : bool
If True, Y contains TF coefficients.
Returns
-------
Y : array, shape (n_sources, n_coefs)
The output data.
active_set : array of bool, shape (n_sources, )
Mask of active sources
Examples
--------
>>> Y = np.tile(np.array([0, 4, 3, 0, 0], dtype=np.float64), (2, 1))
>>> Y = np.r_[Y, np.zeros_like(Y)]
>>> print(Y) # doctest:+SKIP
[[ 0. 4. 3. 0. 0.]
[ 0. 4. 3. 0. 0.]
[ 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0.]]
>>> Yp, active_set = prox_l21(Y, 2, 2)
>>> print(Yp) # doctest:+SKIP
[[0. 2.86862915 2.15147186 0. 0. ]
[0. 2.86862915 2.15147186 0. 0. ]]
>>> print(active_set)
[ True True False False]
"""
if len(Y) == 0:
return np.zeros_like(Y), np.zeros((0,), dtype=bool)
if shape is not None:
shape_init = Y.shape
Y = Y.reshape(*shape)
n_positions = Y.shape[0] // n_orient
if is_stft:
rows_norm = np.sqrt(stft_norm2(Y).reshape(n_positions, -1).sum(axis=1))
else:
rows_norm = np.sqrt((Y * Y.conj()).real.reshape(n_positions,
-1).sum(axis=1))
# Ensure shrink is >= 0 while avoiding any division by zero
shrink = np.maximum(1.0 - alpha / np.maximum(rows_norm, alpha), 0.0)
active_set = shrink > 0.0
if n_orient > 1:
active_set = np.tile(active_set[:, None], [1, n_orient]).ravel()
shrink = np.tile(shrink[:, None], [1, n_orient]).ravel()
Y = Y[active_set]
if shape is None:
Y *= shrink[active_set][:, np.newaxis]
else:
Y *= shrink[active_set][:, np.newaxis, np.newaxis]
Y = Y.reshape(-1, *shape_init[1:])
return Y, active_set
def prox_l1(Y, alpha, n_orient):
"""Proximity operator for l1 norm with multiple orientation support.
Please note that this function computes a soft-thresholding if
n_orient == 1 and a block soft-thresholding (L2 over orientation and
L1 over position (space + time)) if n_orient == 3. See also
:footcite:`GramfortEtAl2013b`.
Parameters
----------
Y : array, shape (n_sources, n_coefs)
The input data.
alpha : float
The regularization parameter.
n_orient : int
Number of dipoles per locations (typically 1 or 3).
Returns
-------
Y : array, shape (n_sources, n_coefs)
The output data.
active_set : array of bool, shape (n_sources, )
Mask of active sources.
References
----------
.. footbibliography::
Examples
--------
>>> Y = np.tile(np.array([1, 2, 3, 2, 0], dtype=np.float64), (2, 1))
>>> Y = np.r_[Y, np.zeros_like(Y)]
>>> print(Y) # doctest:+SKIP
[[ 1. 2. 3. 2. 0.]
[ 1. 2. 3. 2. 0.]
[ 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0.]]
>>> Yp, active_set = prox_l1(Y, 2, 2)
>>> print(Yp) # doctest:+SKIP
[[0. 0.58578644 1.58578644 0.58578644 0. ]
[0. 0.58578644 1.58578644 0.58578644 0. ]]
>>> print(active_set)
[ True True False False]
"""
n_positions = Y.shape[0] // n_orient
norms = np.sqrt((Y * Y.conj()).real.T.reshape(-1, n_orient).sum(axis=1))
# Ensure shrink is >= 0 while avoiding any division by zero
shrink = np.maximum(1.0 - alpha / np.maximum(norms, alpha), 0.0)
shrink = shrink.reshape(-1, n_positions).T
active_set = np.any(shrink > 0.0, axis=1)
shrink = shrink[active_set]
if n_orient > 1:
active_set = np.tile(active_set[:, None], [1, n_orient]).ravel()
Y = Y[active_set]
if len(Y) > 0:
for o in range(n_orient):
Y[o::n_orient] *= shrink
return Y, active_set
def _primal_l21(M, G, X, active_set, alpha, n_orient):
"""Primal objective for the mixed-norm inverse problem.
See :footcite:`GramfortEtAl2012`.
Parameters
----------
M : array, shape (n_sensors, n_times)
The data.
G : array, shape (n_sensors, n_active)
The gain matrix a.k.a. lead field.
X : array, shape (n_active, n_times)
Sources.
active_set : array of bool, shape (n_sources,)
Mask of active sources.
alpha : float
The regularization parameter.
n_orient : int
Number of dipoles per locations (typically 1 or 3).
Returns
-------
p_obj : float
Primal objective.
R : array, shape (n_sensors, n_times)
Current residual (M - G * X).
nR2 : float
Data-fitting term.
GX : array, shape (n_sensors, n_times)
Forward prediction.
"""
GX = np.dot(G[:, active_set], X)
R = M - GX
penalty = norm_l21(X, n_orient, copy=True)
nR2 = sum_squared(R)
p_obj = 0.5 * nR2 + alpha * penalty
return p_obj, R, nR2, GX
def dgap_l21(M, G, X, active_set, alpha, n_orient):
"""Duality gap for the mixed norm inverse problem.
See :footcite:`GramfortEtAl2012`.
Parameters
----------
M : array, shape (n_sensors, n_times)
The data.
G : array, shape (n_sensors, n_active)
The gain matrix a.k.a. lead field.
X : array, shape (n_active, n_times)
Sources.
active_set : array of bool, shape (n_sources, )
Mask of active sources.
alpha : float
The regularization parameter.
n_orient : int
Number of dipoles per locations (typically 1 or 3).
Returns
-------
gap : float
Dual gap.
p_obj : float
Primal objective.
d_obj : float
Dual objective. gap = p_obj - d_obj.
R : array, shape (n_sensors, n_times)
Current residual (M - G * X).
References
----------
.. footbibilography::
"""
p_obj, R, nR2, GX = _primal_l21(M, G, X, active_set, alpha, n_orient)
dual_norm = norm_l2inf(np.dot(G.T, R), n_orient, copy=False)
scaling = alpha / dual_norm
scaling = min(scaling, 1.0)
d_obj = (scaling - 0.5 * (scaling ** 2)) * nR2 + scaling * np.sum(R * GX)
gap = p_obj - d_obj
return gap, p_obj, d_obj, R
@verbose
def _mixed_norm_solver_prox(M, G, alpha, lipschitz_constant, maxit=200,
tol=1e-8, verbose=None, init=None, n_orient=1,
dgap_freq=10):
"""Solve L21 inverse problem with proximal iterations and FISTA."""
n_sensors, n_times = M.shape
_, n_sources = G.shape
if n_sources < n_sensors:
gram = np.dot(G.T, G)
GTM = np.dot(G.T, M)
else:
gram = None
if init is None:
X = 0.0
R = M.copy()
if gram is not None:
R = np.dot(G.T, R)
else:
X = init
if gram is None:
R = M - np.dot(G, X)
else:
R = GTM - np.dot(gram, X)
t = 1.0
Y = np.zeros((n_sources, n_times)) # FISTA aux variable
E = [] # track primal objective function
highest_d_obj = - np.inf
active_set = np.ones(n_sources, dtype=bool) # start with full AS
for i in range(maxit):
X0, active_set_0 = X, active_set # store previous values
if gram is None:
Y += np.dot(G.T, R) / lipschitz_constant # ISTA step
else:
Y += R / lipschitz_constant # ISTA step
X, active_set = prox_l21(Y, alpha / lipschitz_constant, n_orient)
t0 = t
t = 0.5 * (1.0 + sqrt(1.0 + 4.0 * t ** 2))
Y.fill(0.0)
dt = ((t0 - 1.0) / t)
Y[active_set] = (1.0 + dt) * X
Y[active_set_0] -= dt * X0
Y_as = active_set_0 | active_set
if gram is None:
R = M - np.dot(G[:, Y_as], Y[Y_as])
else:
R = GTM - np.dot(gram[:, Y_as], Y[Y_as])
if (i + 1) % dgap_freq == 0:
_, p_obj, d_obj, _ = dgap_l21(M, G, X, active_set, alpha,
n_orient)
highest_d_obj = max(d_obj, highest_d_obj)
gap = p_obj - highest_d_obj
E.append(p_obj)
logger.debug("p_obj : %s -- gap : %s" % (p_obj, gap))
if gap < tol:
logger.debug('Convergence reached ! (gap: %s < %s)'
% (gap, tol))
break
return X, active_set, E
@verbose
def _mixed_norm_solver_cd(M, G, alpha, lipschitz_constant, maxit=10000,
tol=1e-8, verbose=None, init=None, n_orient=1,
dgap_freq=10):
"""Solve L21 inverse problem with coordinate descent."""
from sklearn.linear_model import MultiTaskLasso
assert M.ndim == G.ndim and M.shape[0] == G.shape[0]
clf = MultiTaskLasso(alpha=alpha / len(M), tol=tol / sum_squared(M),
fit_intercept=False, max_iter=maxit, warm_start=True)
if init is not None:
clf.coef_ = init.T
else:
clf.coef_ = np.zeros((G.shape[1], M.shape[1])).T
clf.fit(G, M)
X = clf.coef_.T
active_set = np.any(X, axis=1)
X = X[active_set]
gap, p_obj, d_obj, _ = dgap_l21(M, G, X, active_set, alpha, n_orient)
return X, active_set, p_obj
@verbose
def _mixed_norm_solver_bcd(M, G, alpha, lipschitz_constant, maxit=200,
tol=1e-8, verbose=None, init=None, n_orient=1,
dgap_freq=10, use_accel=True, K=5):
"""Solve L21 inverse problem with block coordinate descent."""
_, n_times = M.shape
_, n_sources = G.shape
n_positions = n_sources // n_orient
if init is None:
X = np.zeros((n_sources, n_times))
R = M.copy()
else:
X = init
R = M - np.dot(G, X)
E = [] # track primal objective function
highest_d_obj = - np.inf
active_set = np.zeros(n_sources, dtype=bool) # start with full AS
alpha_lc = alpha / lipschitz_constant
if use_accel:
last_K_X = np.empty((K + 1, n_sources, n_times))
U = np.zeros((K, n_sources * n_times))
# First make G fortran for faster access to blocks of columns
G = np.asfortranarray(G)
# Ensure these are correct for dgemm
assert R.dtype == np.float64
assert G.dtype == np.float64
one_ovr_lc = 1. / lipschitz_constant
# assert that all the multiplied matrices are fortran contiguous
assert X.T.flags.f_contiguous
assert R.T.flags.f_contiguous
assert G.flags.f_contiguous
# storing list of contiguous arrays
list_G_j_c = []
for j in range(n_positions):
idx = slice(j * n_orient, (j + 1) * n_orient)
list_G_j_c.append(np.ascontiguousarray(G[:, idx]))
for i in range(maxit):
_bcd(G, X, R, active_set, one_ovr_lc, n_orient, alpha_lc, list_G_j_c)
if (i + 1) % dgap_freq == 0:
_, p_obj, d_obj, _ = dgap_l21(M, G, X[active_set], active_set,
alpha, n_orient)
highest_d_obj = max(d_obj, highest_d_obj)
gap = p_obj - highest_d_obj
E.append(p_obj)
logger.debug("Iteration %d :: p_obj %f :: dgap %f :: n_active %d" %
(i + 1, p_obj, gap, np.sum(active_set) / n_orient))
if gap < tol:
logger.debug('Convergence reached ! (gap: %s < %s)'
% (gap, tol))
break
# using Anderson acceleration of the primal variable for faster
# convergence
if use_accel:
last_K_X[i % (K + 1)] = X
if i % (K + 1) == K:
for k in range(K):
U[k] = last_K_X[k + 1].ravel() - last_K_X[k].ravel()
C = U @ U.T
one_vec = np.ones(K)
try:
z = np.linalg.solve(C, one_vec)
except np.linalg.LinAlgError:
# Matrix C is not always expected to be non-singular. If C
# is singular, acceleration is not used at this iteration
# and the solver proceeds with the non-sped-up code.
logger.debug("Iteration %d: LinAlg Error" % (i + 1))
else:
c = z / z.sum()
X_acc = np.sum(
last_K_X[:-1] * c[:, None, None], axis=0
)
_grp_norm2_acc = groups_norm2(X_acc, n_orient)
active_set_acc = _grp_norm2_acc != 0
if n_orient > 1:
active_set_acc = np.kron(
active_set_acc, np.ones(n_orient, dtype=bool)
)
p_obj = _primal_l21(M, G, X[active_set], active_set, alpha,
n_orient)[0]
p_obj_acc = _primal_l21(M, G, X_acc[active_set_acc],
active_set_acc, alpha, n_orient)[0]
if p_obj_acc < p_obj:
X = X_acc
active_set = active_set_acc
R = M - G[:, active_set] @ X[active_set]
X = X[active_set]
return X, active_set, E
def _bcd(G, X, R, active_set, one_ovr_lc, n_orient, alpha_lc, list_G_j_c):
"""Implement one full pass of BCD.
BCD stands for Block Coordinate Descent.
This function make use of scipy.linalg.get_blas_funcs to speed reasons.
Parameters
----------
G : array, shape (n_sensors, n_active)
The gain matrix a.k.a. lead field.
X : array, shape (n_sources, n_times)
Sources, modified in place.
R : array, shape (n_sensors, n_times)
The residuals: R = M - G @ X, modified in place.
active_set : array of bool, shape (n_sources, )
Mask of active sources, modified in place.
one_ovr_lc : array, shape (n_positions, )
One over the lipschitz constants.
n_orient : int
Number of dipoles per positions (typically 1 or 3).
n_positions : int
Number of source positions.
alpha_lc: array, shape (n_positions, )
alpha * (Lipschitz constants).
"""
X_j_new = np.zeros_like(X[:n_orient, :], order='C')
dgemm = _get_dgemm()
for j, G_j_c in enumerate(list_G_j_c):
idx = slice(j * n_orient, (j + 1) * n_orient)
G_j = G[:, idx]
X_j = X[idx]
dgemm(alpha=one_ovr_lc[j], beta=0., a=R.T, b=G_j, c=X_j_new.T,
overwrite_c=True)
# X_j_new = G_j.T @ R
# Mathurin's trick to avoid checking all the entries
was_non_zero = X_j[0, 0] != 0
# was_non_zero = np.any(X_j)
if was_non_zero:
dgemm(alpha=1., beta=1., a=X_j.T, b=G_j_c.T, c=R.T,
overwrite_c=True)
# R += np.dot(G_j, X_j)
X_j_new += X_j
block_norm = sqrt(sum_squared(X_j_new))
if block_norm <= alpha_lc[j]:
X_j.fill(0.)
active_set[idx] = False
else:
shrink = max(1.0 - alpha_lc[j] / block_norm, 0.0)
X_j_new *= shrink
dgemm(alpha=-1., beta=1., a=X_j_new.T, b=G_j_c.T, c=R.T,
overwrite_c=True)
# R -= np.dot(G_j, X_j_new)
X_j[:] = X_j_new
active_set[idx] = True
@verbose
def mixed_norm_solver(M, G, alpha, maxit=3000, tol=1e-8, verbose=None,
active_set_size=50, debias=True, n_orient=1,
solver='auto', return_gap=False, dgap_freq=10,
active_set_init=None, X_init=None):
"""Solve L1/L2 mixed-norm inverse problem with active set strategy.
See references :footcite:`GramfortEtAl2012,StrohmeierEtAl2016,
BertrandEtAl2020`.
Parameters
----------
M : array, shape (n_sensors, n_times)
The data.
G : array, shape (n_sensors, n_dipoles)
The gain matrix a.k.a. lead field.
alpha : float
The regularization parameter. It should be between 0 and 100.
A value of 100 will lead to an empty active set (no active source).
maxit : int
The number of iterations.
tol : float
Tolerance on dual gap for convergence checking.
%(verbose)s
active_set_size : int
Size of active set increase at each iteration.
debias : bool
Debias source estimates.
n_orient : int
The number of orientation (1 : fixed or 3 : free or loose).
solver : 'prox' | 'cd' | 'bcd' | 'auto'
The algorithm to use for the optimization. Block Coordinate Descent
(BCD) uses Anderson acceleration for faster convergence.
return_gap : bool
Return final duality gap.
dgap_freq : int
The duality gap is computed every dgap_freq iterations of the solver on
the active set.
active_set_init : array, shape (n_dipoles,) or None
The initial active set (boolean array) used at the first iteration.
If None, the usual active set strategy is applied.
X_init : array, shape (n_dipoles, n_times) or None
The initial weight matrix used for warm starting the solver. If None,
the weights are initialized at zero.
Returns
-------
X : array, shape (n_active, n_times)
The source estimates.
active_set : array, shape (new_active_set_size,)
The mask of active sources. Note that new_active_set_size is the size
of the active set after convergence of the solver.
E : list
The value of the objective function over the iterations.
gap : float
Final duality gap. Returned only if return_gap is True.
References
----------
.. footbibliography::
"""
n_dipoles = G.shape[1]
n_positions = n_dipoles // n_orient
_, n_times = M.shape
alpha_max = norm_l2inf(np.dot(G.T, M), n_orient, copy=False)
logger.info("-- ALPHA MAX : %s" % alpha_max)
alpha = float(alpha)
has_sklearn = True
try:
from sklearn.linear_model import MultiTaskLasso # noqa: F401
except ImportError:
has_sklearn = False
if solver == 'auto':
if has_sklearn and (n_orient == 1):
solver = 'cd'
else:
solver = 'bcd'
if solver == 'cd':
if n_orient == 1 and not has_sklearn:
warn('Scikit-learn >= 0.12 cannot be found. Using block coordinate'
' descent instead of coordinate descent.')
solver = 'bcd'
if n_orient > 1:
warn('Coordinate descent is only available for fixed orientation. '
'Using block coordinate descent instead of coordinate '
'descent')
solver = 'bcd'
if solver == 'cd':
logger.info("Using coordinate descent")
l21_solver = _mixed_norm_solver_cd
lc = None
elif solver == 'bcd':
logger.info("Using block coordinate descent")
l21_solver = _mixed_norm_solver_bcd
G = np.asfortranarray(G)
if n_orient == 1:
lc = np.sum(G * G, axis=0)
else:
lc = np.empty(n_positions)
for j in range(n_positions):
G_tmp = G[:, (j * n_orient):((j + 1) * n_orient)]
lc[j] = np.linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2)
else:
logger.info("Using proximal iterations")
l21_solver = _mixed_norm_solver_prox
lc = 1.01 * np.linalg.norm(G, ord=2) ** 2
if active_set_size is not None:
E = list()
highest_d_obj = - np.inf
if X_init is not None and X_init.shape != (n_dipoles, n_times):
raise ValueError('Wrong dim for initialized coefficients.')
active_set = (active_set_init if active_set_init is not None else
np.zeros(n_dipoles, dtype=bool))
idx_large_corr = np.argsort(groups_norm2(np.dot(G.T, M), n_orient))
new_active_idx = idx_large_corr[-active_set_size:]
if n_orient > 1:
new_active_idx = (n_orient * new_active_idx[:, None] +
np.arange(n_orient)[None, :]).ravel()
active_set[new_active_idx] = True
as_size = np.sum(active_set)
for k in range(maxit):
if solver == 'bcd':
lc_tmp = lc[active_set[::n_orient]]
elif solver == 'cd':
lc_tmp = None
else:
lc_tmp = 1.01 * np.linalg.norm(G[:, active_set], ord=2) ** 2
X, as_, _ = l21_solver(M, G[:, active_set], alpha, lc_tmp,
maxit=maxit, tol=tol, init=X_init,
n_orient=n_orient, dgap_freq=dgap_freq)
active_set[active_set] = as_.copy()
idx_old_active_set = np.where(active_set)[0]
_, p_obj, d_obj, R = dgap_l21(M, G, X, active_set, alpha,
n_orient)
highest_d_obj = max(d_obj, highest_d_obj)
gap = p_obj - highest_d_obj
E.append(p_obj)
logger.info("Iteration %d :: p_obj %f :: dgap %f ::"
"n_active_start %d :: n_active_end %d" % (
k + 1, p_obj, gap, as_size // n_orient,
np.sum(active_set) // n_orient))
if gap < tol:
logger.info('Convergence reached ! (gap: %s < %s)'
% (gap, tol))
break
# add sources if not last iteration
if k < (maxit - 1):
idx_large_corr = np.argsort(groups_norm2(np.dot(G.T, R),
n_orient))
new_active_idx = idx_large_corr[-active_set_size:]
if n_orient > 1:
new_active_idx = (n_orient * new_active_idx[:, None] +
np.arange(n_orient)[None, :])
new_active_idx = new_active_idx.ravel()
active_set[new_active_idx] = True
idx_active_set = np.where(active_set)[0]
as_size = np.sum(active_set)
X_init = np.zeros((as_size, n_times), dtype=X.dtype)
idx = np.searchsorted(idx_active_set, idx_old_active_set)
X_init[idx] = X
else:
warn('Did NOT converge ! (gap: %s > %s)' % (gap, tol))
else:
X, active_set, E = l21_solver(M, G, alpha, lc, maxit=maxit,
tol=tol, n_orient=n_orient, init=None)
if return_gap:
gap = dgap_l21(M, G, X, active_set, alpha, n_orient)[0]
if np.any(active_set) and debias:
bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient)
X *= bias[:, np.newaxis]
logger.info('Final active set size: %s' % (np.sum(active_set) // n_orient))
if return_gap:
return X, active_set, E, gap
else:
return X, active_set, E
@verbose
def iterative_mixed_norm_solver(M, G, alpha, n_mxne_iter, maxit=3000,
tol=1e-8, verbose=None, active_set_size=50,
debias=True, n_orient=1, dgap_freq=10,
solver='auto', weight_init=None):
"""Solve L0.5/L2 mixed-norm inverse problem with active set strategy.
See reference :footcite:`StrohmeierEtAl2016`.
Parameters
----------
M : array, shape (n_sensors, n_times)
The data.
G : array, shape (n_sensors, n_dipoles)
The gain matrix a.k.a. lead field.
alpha : float
The regularization parameter. It should be between 0 and 100.
A value of 100 will lead to an empty active set (no active source).
n_mxne_iter : int
The number of MxNE iterations. If > 1, iterative reweighting
is applied.
maxit : int
The number of iterations.
tol : float
Tolerance on dual gap for convergence checking.
%(verbose)s
active_set_size : int
Size of active set increase at each iteration.
debias : bool
Debias source estimates.
n_orient : int
The number of orientation (1 : fixed or 3 : free or loose).
dgap_freq : int or np.inf
The duality gap is evaluated every dgap_freq iterations.
solver : 'prox' | 'cd' | 'bcd' | 'auto'
The algorithm to use for the optimization.
weight_init : array, shape (n_dipoles,) or None
The initial weight used for reweighting the gain matrix. If None, the
weights are initialized with ones.
Returns
-------
X : array, shape (n_active, n_times)
The source estimates.
active_set : array
The mask of active sources.
E : list
The value of the objective function over the iterations.
References
----------
.. footbibliography::
"""
def g(w):
return np.sqrt(np.sqrt(groups_norm2(w.copy(), n_orient)))
def gprime(w):
return 2. * np.repeat(g(w), n_orient).ravel()
E = list()
if weight_init is not None and weight_init.shape != (G.shape[1],):
raise ValueError('Wrong dimension for weight initialization. Got %s. '
'Expected %s.' % (weight_init.shape, (G.shape[1],)))
weights = weight_init if weight_init is not None else np.ones(G.shape[1])
active_set = (weights != 0)
weights = weights[active_set]
X = np.zeros((G.shape[1], M.shape[1]))
for k in range(n_mxne_iter):
X0 = X.copy()
active_set_0 = active_set.copy()
G_tmp = G[:, active_set] * weights[np.newaxis, :]
if active_set_size is not None:
if np.sum(active_set) > (active_set_size * n_orient):
X, _active_set, _ = mixed_norm_solver(
M, G_tmp, alpha, debias=False, n_orient=n_orient,
maxit=maxit, tol=tol, active_set_size=active_set_size,
dgap_freq=dgap_freq, solver=solver, verbose=verbose)
else:
X, _active_set, _ = mixed_norm_solver(
M, G_tmp, alpha, debias=False, n_orient=n_orient,
maxit=maxit, tol=tol, active_set_size=None,
dgap_freq=dgap_freq, solver=solver, verbose=verbose)
else:
X, _active_set, _ = mixed_norm_solver(
M, G_tmp, alpha, debias=False, n_orient=n_orient,
maxit=maxit, tol=tol, active_set_size=None,
dgap_freq=dgap_freq, solver=solver, verbose=verbose)
logger.info('active set size %d' % (_active_set.sum() / n_orient))
if _active_set.sum() > 0:
active_set[active_set] = _active_set
# Reapply weights to have correct unit
X *= weights[_active_set][:, np.newaxis]
weights = gprime(X)
p_obj = 0.5 * np.linalg.norm(M - np.dot(G[:, active_set], X),
'fro') ** 2. + alpha * np.sum(g(X))
E.append(p_obj)
# Check convergence
if ((k >= 1) and np.all(active_set == active_set_0) and
np.all(np.abs(X - X0) < tol)):
print('Convergence reached after %d reweightings!' % k)
break
else:
active_set = np.zeros_like(active_set)
p_obj = 0.5 * np.linalg.norm(M) ** 2.
E.append(p_obj)
break
if np.any(active_set) and debias:
bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient)
X *= bias[:, np.newaxis]
return X, active_set, E
###############################################################################
# TF-MxNE
@verbose
def tf_lipschitz_constant(M, G, phi, phiT, tol=1e-3, verbose=None):
"""Compute lipschitz constant for FISTA.
It uses a power iteration method.
"""
n_times = M.shape[1]
n_points = G.shape[1]
iv = np.ones((n_points, n_times), dtype=np.float64)
v = phi(iv)
L = 1e100
for it in range(100):
L_old = L
logger.info('Lipschitz estimation: iteration = %d' % it)
iv = np.real(phiT(v))
Gv = np.dot(G, iv)
GtGv = np.dot(G.T, Gv)
w = phi(GtGv)
L = np.max(np.abs(w)) # l_inf norm
v = w / L
if abs((L - L_old) / L_old) < tol:
break
return L
def safe_max_abs(A, ia):
"""Compute np.max(np.abs(A[ia])) possible with empty A."""
if np.sum(ia): # ia is not empty
return np.max(np.abs(A[ia]))
else:
return 0.
def safe_max_abs_diff(A, ia, B, ib):
"""Compute np.max(np.abs(A)) possible with empty A."""
A = A[ia] if np.sum(ia) else 0.0
B = B[ib] if np.sum(ia) else 0.0
return np.max(np.abs(A - B))
class _Phi(object):
"""Have phi stft as callable w/o using a lambda that does not pickle."""
def __init__(self, wsize, tstep, n_coefs, n_times): # noqa: D102
self.wsize = np.atleast_1d(wsize)
self.tstep = np.atleast_1d(tstep)
self.n_coefs = np.atleast_1d(n_coefs)
self.n_dicts = len(tstep)
self.n_freqs = wsize // 2 + 1
self.n_steps = self.n_coefs // self.n_freqs
self.n_times = n_times
# ravel freq+time here
self.ops = list()
for ws, ts in zip(self.wsize, self.tstep):
self.ops.append(
stft(np.eye(n_times), ws, ts,
verbose=False).reshape(n_times, -1))
def __call__(self, x): # noqa: D105
if self.n_dicts == 1:
return x @ self.ops[0]
else:
return np.hstack(
[x @ op for op in self.ops]) / np.sqrt(self.n_dicts)
def norm(self, z, ord=2):
"""Squared L2 norm if ord == 2 and L1 norm if order == 1."""
if ord not in (1, 2):
raise ValueError('Only supported norm order are 1 and 2. '
'Got ord = %s' % ord)
stft_norm = stft_norm1 if ord == 1 else stft_norm2
norm = 0.
if len(self.n_coefs) > 1:
z_ = np.array_split(np.atleast_2d(z), np.cumsum(self.n_coefs)[:-1],
axis=1)
else:
z_ = [np.atleast_2d(z)]
for i in range(len(z_)):
norm += stft_norm(
z_[i].reshape(-1, self.n_freqs[i], self.n_steps[i]))
return norm
class _PhiT(object):
"""Have phi.T istft as callable w/o using a lambda that does not pickle."""
def __init__(self, tstep, n_freqs, n_steps, n_times): # noqa: D102
self.tstep = tstep
self.n_freqs = n_freqs
self.n_steps = n_steps
self.n_times = n_times
self.n_dicts = len(tstep) if isinstance(tstep, np.ndarray) else 1
self.n_coefs = list()
self.op_re = list()
self.op_im = list()
for nf, ns, ts in zip(self.n_freqs, self.n_steps, self.tstep):
nc = nf * ns
self.n_coefs.append(nc)
eye = np.eye(nc).reshape(nf, ns, nf, ns)
self.op_re.append(istft(
eye, ts, n_times).reshape(nc, n_times))
self.op_im.append(istft(
eye * 1j, ts, n_times).reshape(nc, n_times))
def __call__(self, z): # noqa: D105
if self.n_dicts == 1:
return z.real @ self.op_re[0] + z.imag @ self.op_im[0]
else:
x_out = np.zeros((z.shape[0], self.n_times))
z_ = np.array_split(z, np.cumsum(self.n_coefs)[:-1], axis=1)
for this_z, op_re, op_im in zip(z_, self.op_re, self.op_im):
x_out += this_z.real @ op_re + this_z.imag @ op_im
return x_out / np.sqrt(self.n_dicts)
def norm_l21_tf(Z, phi, n_orient, w_space=None):
"""L21 norm for TF."""
if Z.shape[0]:
l21_norm = np.sqrt(
phi.norm(Z, ord=2).reshape(-1, n_orient).sum(axis=1))
if w_space is not None:
l21_norm *= w_space
l21_norm = l21_norm.sum()
else:
l21_norm = 0.
return l21_norm
def norm_l1_tf(Z, phi, n_orient, w_time):
"""L1 norm for TF."""
if Z.shape[0]:
n_positions = Z.shape[0] // n_orient
Z_ = np.sqrt(np.sum(
(np.abs(Z) ** 2.).reshape((n_orient, -1), order='F'), axis=0))
Z_ = Z_.reshape((n_positions, -1), order='F')
if w_time is not None:
Z_ *= w_time
l1_norm = phi.norm(Z_, ord=1).sum()
else:
l1_norm = 0.
return l1_norm
def norm_epsilon(Y, l1_ratio, phi, w_space=1., w_time=None):
"""Weighted epsilon norm.
The weighted epsilon norm is the dual norm of::
w_{space} * (1. - l1_ratio) * ||Y||_2 + l1_ratio * ||Y||_{1, w_{time}}.
where `||Y||_{1, w_{time}} = (np.abs(Y) * w_time).sum()`
Warning: it takes into account the fact that Y only contains coefficients
corresponding to the positive frequencies (see `stft_norm2()`): some
entries will be counted twice. It is also assumed that all entries of both
Y and w_time are non-negative. See
:footcite:`NdiayeEtAl2016,BurdakovMerkulov2001`.
Parameters
----------
Y : array, shape (n_coefs,)
The input data.
l1_ratio : float between 0 and 1
Tradeoff between L2 and L1 regularization. When it is 0, no temporal
regularization is applied.
phi : instance of _Phi
The TF operator.
w_space : float
Scalar weight of the L2 norm. By default, it is taken equal to 1.
w_time : array, shape (n_coefs, ) | None
Weights of each TF coefficient in the L1 norm. If None, weights equal
to 1 are used.
Returns
-------
nu : float
The value of the dual norm evaluated at Y.
References
----------
.. footbibliography::
"""
# since the solution is invariant to flipped signs in Y, all entries
# of Y are assumed positive
# Add negative freqs: count all freqs twice except first and last:
freqs_count = np.full(len(Y), 2)
for i, fc in enumerate(np.array_split(freqs_count,
np.cumsum(phi.n_coefs)[:-1])):
fc[:phi.n_steps[i]] = 1
fc[-phi.n_steps[i]:] = 1
# exclude 0 weights:
if w_time is not None:
nonzero_weights = (w_time != 0.0)
Y = Y[nonzero_weights]
freqs_count = freqs_count[nonzero_weights]
w_time = w_time[nonzero_weights]
norm_inf_Y = np.max(Y / w_time) if w_time is not None else np.max(Y)
if l1_ratio == 1.:
# dual norm of L1 weighted is Linf with inverse weights
return norm_inf_Y
elif l1_ratio == 0.:
# dual norm of L2 is L2
return np.sqrt(phi.norm(Y[None, :], ord=2).sum())
if norm_inf_Y == 0.:
return 0.
# ignore some values of Y by lower bound on dual norm:
if w_time is None:
idx = Y > l1_ratio * norm_inf_Y
else:
idx = Y > l1_ratio * np.max(Y / (w_space * (1. - l1_ratio) +
l1_ratio * w_time))
if idx.sum() == 1:
return norm_inf_Y
# sort both Y / w_time and freqs_count at the same time
if w_time is not None:
idx_sort = np.argsort(Y[idx] / w_time[idx])[::-1]
w_time = w_time[idx][idx_sort]
else:
idx_sort = np.argsort(Y[idx])[::-1]
Y = Y[idx][idx_sort]
freqs_count = freqs_count[idx][idx_sort]
Y = np.repeat(Y, freqs_count)
if w_time is not None:
w_time = np.repeat(w_time, freqs_count)
K = Y.shape[0]
if w_time is None:
p_sum_Y2 = np.cumsum(Y ** 2)
p_sum_w2 = np.arange(1, K + 1)
p_sum_Yw = np.cumsum(Y)
upper = p_sum_Y2 / Y ** 2 - 2. * p_sum_Yw / Y + p_sum_w2
else:
p_sum_Y2 = np.cumsum(Y ** 2)
p_sum_w2 = np.cumsum(w_time ** 2)
p_sum_Yw = np.cumsum(Y * w_time)
upper = (p_sum_Y2 / (Y / w_time) ** 2 -
2. * p_sum_Yw / (Y / w_time) + p_sum_w2)
upper_greater = np.where(upper > w_space ** 2 * (1. - l1_ratio) ** 2 /
l1_ratio ** 2)[0]
i0 = upper_greater[0] - 1 if upper_greater.size else K - 1
p_sum_Y2 = p_sum_Y2[i0]
p_sum_w2 = p_sum_w2[i0]
p_sum_Yw = p_sum_Yw[i0]
denom = l1_ratio ** 2 * p_sum_w2 - w_space ** 2 * (1. - l1_ratio) ** 2
if np.abs(denom) < 1e-10:
return p_sum_Y2 / (2. * l1_ratio * p_sum_Yw)
else:
delta = (l1_ratio * p_sum_Yw) ** 2 - p_sum_Y2 * denom
return (l1_ratio * p_sum_Yw - np.sqrt(delta)) / denom
def norm_epsilon_inf(G, R, phi, l1_ratio, n_orient, w_space=None, w_time=None):
"""Weighted epsilon-inf norm of phi(np.dot(G.T, R)).
Parameters
----------
G : array, shape (n_sensors, n_sources)
Gain matrix a.k.a. lead field.
R : array, shape (n_sensors, n_times)
Residual.
phi : instance of _Phi
The TF operator.
l1_ratio : float between 0 and 1
Parameter controlling the tradeoff between L21 and L1 regularization.
0 corresponds to an absence of temporal regularization, ie MxNE.
n_orient : int
Number of dipoles per location (typically 1 or 3).
w_space : array, shape (n_positions,) or None.
Weights for the L2 term of the epsilon norm. If None, weights are
all equal to 1.
w_time : array, shape (n_positions, n_coefs) or None
Weights for the L1 term of the epsilon norm. If None, weights are
all equal to 1.
Returns
-------
nu : float
The maximum value of the epsilon norms over groups of n_orient dipoles
(consecutive rows of phi(np.dot(G.T, R))).
"""
n_positions = G.shape[1] // n_orient
GTRPhi = np.abs(phi(np.dot(G.T, R)))
# norm over orientations:
GTRPhi = GTRPhi.reshape((n_orient, -1), order='F')
GTRPhi = np.linalg.norm(GTRPhi, axis=0)
GTRPhi = GTRPhi.reshape((n_positions, -1), order='F')
nu = 0.
for idx in range(n_positions):
GTRPhi_ = GTRPhi[idx]
w_t = w_time[idx] if w_time is not None else None
w_s = w_space[idx] if w_space is not None else 1.
norm_eps = norm_epsilon(GTRPhi_, l1_ratio, phi, w_space=w_s,
w_time=w_t)
if norm_eps > nu:
nu = norm_eps
return nu
def dgap_l21l1(M, G, Z, active_set, alpha_space, alpha_time, phi, phiT,
n_orient, highest_d_obj, w_space=None, w_time=None):
"""Duality gap for the time-frequency mixed norm inverse problem.
See :footcite:`GramfortEtAl2012,NdiayeEtAl2016`
Parameters
----------
M : array, shape (n_sensors, n_times)
The data.
G : array, shape (n_sensors, n_sources)
Gain matrix a.k.a. lead field.
Z : array, shape (n_active, n_coefs)
Sources in TF domain.
active_set : array of bool, shape (n_sources, )
Mask of active sources.
alpha_space : float
The spatial regularization parameter.
alpha_time : float
The temporal regularization parameter. The higher it is the smoother
will be the estimated time series.
phi : instance of _Phi
The TF operator.
phiT : instance of _PhiT
The transpose of the TF operator.
n_orient : int
Number of dipoles per locations (typically 1 or 3).
highest_d_obj : float
The highest value of the dual objective so far.
w_space : array, shape (n_positions, )
Array of spatial weights.
w_time : array, shape (n_positions, n_coefs)
Array of TF weights.
Returns
-------
gap : float
Dual gap
p_obj : float
Primal objective
d_obj : float
Dual objective. gap = p_obj - d_obj
R : array, shape (n_sensors, n_times)
Current residual (M - G * X)
References
----------
.. footbibliography::
"""
X = phiT(Z)
GX = np.dot(G[:, active_set], X)
R = M - GX
# some functions need w_time only on active_set, other need it completely
if w_time is not None:
w_time_as = w_time[active_set[::n_orient]]
else:
w_time_as = None
if w_space is not None:
w_space_as = w_space[active_set[::n_orient]]
else:
w_space_as = None
penaltyl1 = norm_l1_tf(Z, phi, n_orient, w_time_as)
penaltyl21 = norm_l21_tf(Z, phi, n_orient, w_space_as)
nR2 = sum_squared(R)
p_obj = 0.5 * nR2 + alpha_space * penaltyl21 + alpha_time * penaltyl1
l1_ratio = alpha_time / (alpha_space + alpha_time)
dual_norm = norm_epsilon_inf(G, R, phi, l1_ratio, n_orient,
w_space=w_space, w_time=w_time)
scaling = min(1., (alpha_space + alpha_time) / dual_norm)
d_obj = (scaling - 0.5 * (scaling ** 2)) * nR2 + scaling * np.sum(R * GX)
d_obj = max(d_obj, highest_d_obj)
gap = p_obj - d_obj
return gap, p_obj, d_obj, R
def _tf_mixed_norm_solver_bcd_(M, G, Z, active_set, candidates, alpha_space,
alpha_time, lipschitz_constant, phi, phiT,
w_space=None, w_time=None, n_orient=1,
maxit=200, tol=1e-8, dgap_freq=10, perc=None,
timeit=True, verbose=None):
n_sources = G.shape[1]
n_positions = n_sources // n_orient
# First make G fortran for faster access to blocks of columns
Gd = np.asfortranarray(G)
G = np.ascontiguousarray(
Gd.T.reshape(n_positions, n_orient, -1).transpose(0, 2, 1))
R = M.copy() # residual
active = np.where(active_set[::n_orient])[0]
for idx in active:
R -= np.dot(G[idx], phiT(Z[idx]))
E = [] # track primal objective function
if w_time is None:
alpha_time_lc = alpha_time / lipschitz_constant
else:
alpha_time_lc = alpha_time * w_time / lipschitz_constant[:, None]
if w_space is None:
alpha_space_lc = alpha_space / lipschitz_constant
else:
alpha_space_lc = alpha_space * w_space / lipschitz_constant
converged = False
d_obj = - np.inf
for i in range(maxit):
for jj in candidates:
ids = jj * n_orient
ide = ids + n_orient
G_j = G[jj]
Z_j = Z[jj]
active_set_j = active_set[ids:ide]
was_active = np.any(active_set_j)
# gradient step
GTR = np.dot(G_j.T, R) / lipschitz_constant[jj]
X_j_new = GTR.copy()
if was_active:
X_j = phiT(Z_j)
R += np.dot(G_j, X_j)
X_j_new += X_j
rows_norm = np.linalg.norm(X_j_new, 'fro')
if rows_norm <= alpha_space_lc[jj]:
if was_active:
Z[jj] = 0.0
active_set_j[:] = False
else:
GTR_phi = phi(GTR)
if was_active:
Z_j_new = Z_j + GTR_phi
else:
Z_j_new = GTR_phi
col_norm = np.linalg.norm(Z_j_new, axis=0)
if np.all(col_norm <= alpha_time_lc[jj]):
Z[jj] = 0.0
active_set_j[:] = False
else:
# l1
shrink = np.maximum(1.0 - alpha_time_lc[jj] / np.maximum(
col_norm, alpha_time_lc[jj]), 0.0)
if w_time is not None:
shrink[w_time[jj] == 0.0] = 0.0
Z_j_new *= shrink[np.newaxis, :]
# l21
shape_init = Z_j_new.shape
row_norm = np.sqrt(phi.norm(Z_j_new, ord=2).sum())
if row_norm <= alpha_space_lc[jj]:
Z[jj] = 0.0
active_set_j[:] = False
else:
shrink = np.maximum(
1.0 - alpha_space_lc[jj] /
np.maximum(row_norm, alpha_space_lc[jj]), 0.0)
Z_j_new *= shrink
Z[jj] = Z_j_new.reshape(-1, *shape_init[1:]).copy()
active_set_j[:] = True
Z_j_phi_T = phiT(Z[jj])
R -= np.dot(G_j, Z_j_phi_T)
if (i + 1) % dgap_freq == 0:
Zd = np.vstack([Z[pos] for pos in range(n_positions)
if np.any(Z[pos])])
gap, p_obj, d_obj, _ = dgap_l21l1(
M, Gd, Zd, active_set, alpha_space, alpha_time, phi, phiT,
n_orient, d_obj, w_space=w_space, w_time=w_time)
converged = (gap < tol)
E.append(p_obj)
logger.info("\n Iteration %d :: n_active %d" % (
i + 1, np.sum(active_set) / n_orient))
logger.info(" dgap %.2e :: p_obj %f :: d_obj %f" % (
gap, p_obj, d_obj))
if converged:
break
if perc is not None:
if np.sum(active_set) / float(n_orient) <= perc * n_positions:
break
return Z, active_set, E, converged
@verbose
def _tf_mixed_norm_solver_bcd_active_set(M, G, alpha_space, alpha_time,
lipschitz_constant, phi, phiT,
Z_init=None, w_space=None,
w_time=None, n_orient=1, maxit=200,
tol=1e-8, dgap_freq=10,
verbose=None):
n_sensors, n_times = M.shape
n_sources = G.shape[1]
n_positions = n_sources // n_orient
Z = dict.fromkeys(np.arange(n_positions), 0.0)
active_set = np.zeros(n_sources, dtype=bool)
active = []
if Z_init is not None:
if Z_init.shape != (n_sources, phi.n_coefs.sum()):
raise Exception('Z_init must be None or an array with shape '
'(n_sources, n_coefs).')
for ii in range(n_positions):
if np.any(Z_init[ii * n_orient:(ii + 1) * n_orient]):
active_set[ii * n_orient:(ii + 1) * n_orient] = True
active.append(ii)
if len(active):
Z.update(dict(zip(active,
np.vsplit(Z_init[active_set], len(active)))))
E = []
candidates = range(n_positions)
d_obj = -np.inf
while True:
# single BCD pass on all positions:
Z_init = dict.fromkeys(np.arange(n_positions), 0.0)
Z_init.update(dict(zip(active, Z.values())))
Z, active_set, E_tmp, _ = _tf_mixed_norm_solver_bcd_(
M, G, Z_init, active_set, candidates, alpha_space, alpha_time,
lipschitz_constant, phi, phiT, w_space=w_space, w_time=w_time,
n_orient=n_orient, maxit=1, tol=tol, perc=None, verbose=verbose)
E += E_tmp
# multiple BCD pass on active positions:
active = np.where(active_set[::n_orient])[0]
Z_init = dict(zip(range(len(active)), [Z[idx] for idx in active]))
candidates_ = range(len(active))
if w_space is not None:
w_space_as = w_space[active_set[::n_orient]]
else:
w_space_as = None
if w_time is not None:
w_time_as = w_time[active_set[::n_orient]]
else:
w_time_as = None
Z, as_, E_tmp, converged = _tf_mixed_norm_solver_bcd_(
M, G[:, active_set], Z_init,
np.ones(len(active) * n_orient, dtype=bool),
candidates_, alpha_space, alpha_time,
lipschitz_constant[active_set[::n_orient]], phi, phiT,
w_space=w_space_as, w_time=w_time_as,
n_orient=n_orient, maxit=maxit, tol=tol,
dgap_freq=dgap_freq, perc=0.5,
verbose=verbose)
active = np.where(active_set[::n_orient])[0]
active_set[active_set] = as_.copy()
E += E_tmp
converged = True
if converged:
Zd = np.vstack([Z[pos] for pos in range(len(Z)) if np.any(Z[pos])])
gap, p_obj, d_obj, _ = dgap_l21l1(
M, G, Zd, active_set, alpha_space, alpha_time,
phi, phiT, n_orient, d_obj, w_space, w_time)
logger.info("\ndgap %.2e :: p_obj %f :: d_obj %f :: n_active %d"
% (gap, p_obj, d_obj, np.sum(active_set) / n_orient))
if gap < tol:
logger.info("\nConvergence reached!\n")
break
if active_set.sum():
Z = np.vstack([Z[pos] for pos in range(len(Z)) if np.any(Z[pos])])
X = phiT(Z)
else:
Z = np.zeros((0, phi.n_coefs.sum()), dtype=np.complex128)
X = np.zeros((0, n_times))
return X, Z, active_set, E, gap
@verbose
def tf_mixed_norm_solver(M, G, alpha_space, alpha_time, wsize=64, tstep=4,
n_orient=1, maxit=200, tol=1e-8,
active_set_size=None, debias=True, return_gap=False,
dgap_freq=10, verbose=None):
"""Solve TF L21+L1 inverse solver with BCD and active set approach.
See :footcite:`GramfortEtAl2013b,GramfortEtAl2011,BekhtiEtAl2016`.
Parameters
----------
M : array, shape (n_sensors, n_times)
The data.
G : array, shape (n_sensors, n_dipoles)
The gain matrix a.k.a. lead field.
alpha_space : float
The spatial regularization parameter.
alpha_time : float
The temporal regularization parameter. The higher it is the smoother
will be the estimated time series.
wsize: int or array-like
Length of the STFT window in samples (must be a multiple of 4).
If an array is passed, multiple TF dictionaries are used (each having
its own wsize and tstep) and each entry of wsize must be a multiple
of 4.
tstep: int or array-like
Step between successive windows in samples (must be a multiple of 2,
a divider of wsize and smaller than wsize/2) (default: wsize/2).
If an array is passed, multiple TF dictionaries are used (each having
its own wsize and tstep), and each entry of tstep must be a multiple
of 2 and divide the corresponding entry of wsize.
n_orient : int
The number of orientation (1 : fixed or 3 : free or loose).
maxit : int
The number of iterations.
tol : float
If absolute difference between estimates at 2 successive iterations
is lower than tol, the convergence is reached.
debias : bool
Debias source estimates.
return_gap : bool
Return final duality gap.
dgap_freq : int or np.inf
The duality gap is evaluated every dgap_freq iterations.
%(verbose)s
Returns
-------
X : array, shape (n_active, n_times)
The source estimates.
active_set : array
The mask of active sources.
E : list
The value of the objective function every dgap_freq iteration. If
log_objective is False or dgap_freq is np.inf, it will be empty.
gap : float
Final duality gap. Returned only if return_gap is True.
References
----------
.. footbibliography::
"""
n_sensors, n_times = M.shape
n_sensors, n_sources = G.shape
n_positions = n_sources // n_orient
tstep = np.atleast_1d(tstep)
wsize = np.atleast_1d(wsize)
if len(tstep) != len(wsize):
raise ValueError('The same number of window sizes and steps must be '
'passed. Got tstep = %s and wsize = %s' %
(tstep, wsize))
n_steps = np.ceil(M.shape[1] / tstep.astype(float)).astype(int)
n_freqs = wsize // 2 + 1
n_coefs = n_steps * n_freqs
phi = _Phi(wsize, tstep, n_coefs, n_times)
phiT = _PhiT(tstep, n_freqs, n_steps, n_times)
if n_orient == 1:
lc = np.sum(G * G, axis=0)
else:
lc = np.empty(n_positions)
for j in range(n_positions):
G_tmp = G[:, (j * n_orient):((j + 1) * n_orient)]
lc[j] = np.linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2)
logger.info("Using block coordinate descent with active set approach")
X, Z, active_set, E, gap = _tf_mixed_norm_solver_bcd_active_set(
M, G, alpha_space, alpha_time, lc, phi, phiT,
Z_init=None, n_orient=n_orient, maxit=maxit, tol=tol,
dgap_freq=dgap_freq, verbose=None)
if np.any(active_set) and debias:
bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient)
X *= bias[:, np.newaxis]
if return_gap:
return X, active_set, E, gap
else:
return X, active_set, E
@verbose
def iterative_tf_mixed_norm_solver(M, G, alpha_space, alpha_time,
n_tfmxne_iter, wsize=64, tstep=4,
maxit=3000, tol=1e-8, debias=True,
n_orient=1, dgap_freq=10, verbose=None):
"""Solve TF L0.5/L1 + L0.5 inverse problem with BCD + active set approach.
Parameters
----------
M: array, shape (n_sensors, n_times)
The data.
G: array, shape (n_sensors, n_dipoles)
The gain matrix a.k.a. lead field.
alpha_space: float
The spatial regularization parameter. The higher it is the less there
will be active sources.
alpha_time : float
The temporal regularization parameter. The higher it is the smoother
will be the estimated time series. 0 means no temporal regularization,
a.k.a. irMxNE.
n_tfmxne_iter : int
Number of TF-MxNE iterations. If > 1, iterative reweighting is applied.
wsize : int or array-like
Length of the STFT window in samples (must be a multiple of 4).
If an array is passed, multiple TF dictionaries are used (each having
its own wsize and tstep) and each entry of wsize must be a multiple
of 4.
tstep : int or array-like
Step between successive windows in samples (must be a multiple of 2,
a divider of wsize and smaller than wsize/2) (default: wsize/2).
If an array is passed, multiple TF dictionaries are used (each having
its own wsize and tstep), and each entry of tstep must be a multiple
of 2 and divide the corresponding entry of wsize.
maxit : int
The maximum number of iterations for each TF-MxNE problem.
tol : float
If absolute difference between estimates at 2 successive iterations
is lower than tol, the convergence is reached. Also used as criterion
on duality gap for each TF-MxNE problem.
debias : bool
Debias source estimates.
n_orient : int
The number of orientation (1 : fixed or 3 : free or loose).
dgap_freq : int or np.inf
The duality gap is evaluated every dgap_freq iterations.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut-logging>` for more).
Returns
-------
X : array, shape (n_active, n_times)
The source estimates.
active_set : array
The mask of active sources.
E : list
The value of the objective function over iterations.
"""
n_sensors, n_times = M.shape
n_sources = G.shape[1]
n_positions = n_sources // n_orient
tstep = np.atleast_1d(tstep)
wsize = np.atleast_1d(wsize)
if len(tstep) != len(wsize):
raise ValueError('The same number of window sizes and steps must be '
'passed. Got tstep = %s and wsize = %s' %
(tstep, wsize))
n_steps = np.ceil(n_times / tstep.astype(float)).astype(int)
n_freqs = wsize // 2 + 1
n_coefs = n_steps * n_freqs
phi = _Phi(wsize, tstep, n_coefs, n_times)
phiT = _PhiT(tstep, n_freqs, n_steps, n_times)
if n_orient == 1:
lc = np.sum(G * G, axis=0)
else:
lc = np.empty(n_positions)
for j in range(n_positions):
G_tmp = G[:, (j * n_orient):((j + 1) * n_orient)]
lc[j] = np.linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2)
# space and time penalties, and inverse of their derivatives:
def g_space(Z):
return np.sqrt(np.sqrt(phi.norm(Z, ord=2).reshape(
-1, n_orient).sum(axis=1)))
def g_space_prime_inv(Z):
return 2. * g_space(Z)
def g_time(Z):
return np.sqrt(np.sqrt(np.sum((np.abs(Z) ** 2.).reshape(
(n_orient, -1), order='F'), axis=0)).reshape(
(-1, Z.shape[1]), order='F'))
def g_time_prime_inv(Z):
return 2. * g_time(Z)
E = list()
active_set = np.ones(n_sources, dtype=bool)
Z = np.zeros((n_sources, phi.n_coefs.sum()), dtype=np.complex128)
for k in range(n_tfmxne_iter):
active_set_0 = active_set.copy()
Z0 = Z.copy()
if k == 0:
w_space = None
w_time = None
else:
w_space = 1. / g_space_prime_inv(Z)
w_time = g_time_prime_inv(Z)
w_time[w_time == 0.0] = -1.
w_time = 1. / w_time
w_time[w_time < 0.0] = 0.0
X, Z, active_set_, E_, _ = _tf_mixed_norm_solver_bcd_active_set(
M, G[:, active_set], alpha_space, alpha_time,
lc[active_set[::n_orient]], phi, phiT,
Z_init=Z, w_space=w_space, w_time=w_time, n_orient=n_orient,
maxit=maxit, tol=tol, dgap_freq=dgap_freq, verbose=None)
active_set[active_set] = active_set_
if active_set.sum() > 0:
l21_penalty = np.sum(g_space(Z.copy()))
l1_penalty = phi.norm(g_time(Z.copy()), ord=1).sum()
p_obj = (0.5 * np.linalg.norm(M - np.dot(G[:, active_set], X),
'fro') ** 2. + alpha_space * l21_penalty +
alpha_time * l1_penalty)
E.append(p_obj)
logger.info('Iteration %d: active set size=%d, E=%f' % (
k + 1, active_set.sum() / n_orient, p_obj))
# Check convergence
if np.array_equal(active_set, active_set_0):
max_diff = np.amax(np.abs(Z - Z0))
if (max_diff < tol):
print('Convergence reached after %d reweightings!' % k)
break
else:
p_obj = 0.5 * np.linalg.norm(M) ** 2.
E.append(p_obj)
logger.info('Iteration %d: as_size=%d, E=%f' % (
k + 1, active_set.sum() / n_orient, p_obj))
break
if debias:
if active_set.sum() > 0:
bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient)
X *= bias[:, np.newaxis]
return X, active_set, E
| {
"repo_name": "mne-tools/mne-python",
"path": "mne/inverse_sparse/mxne_optim.py",
"copies": "2",
"size": "60067",
"license": "bsd-3-clause",
"hash": -6502757440756734000,
"line_mean": 34.7967818832,
"line_max": 79,
"alpha_frac": 0.5312234671,
"autogenerated": false,
"ratio": 3.2664636467453367,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9797687113845337,
"avg_score": 0,
"num_lines": 1678
} |
import numpy as np
from scipy import linalg
from ..source_estimate import SourceEstimate, _BaseSourceEstimate, _make_stc
from ..minimum_norm.inverse import (combine_xyz, _prepare_forward,
_check_reference, _log_exp_var)
from ..forward import is_fixed_orient
from ..io.pick import pick_channels_evoked
from ..io.proj import deactivate_proj
from ..utils import logger, verbose, _check_depth, _check_option, sum_squared
from ..dipole import Dipole
from .mxne_optim import (mixed_norm_solver, iterative_mixed_norm_solver, _Phi,
tf_mixed_norm_solver, iterative_tf_mixed_norm_solver,
norm_l2inf, norm_epsilon_inf)
def _check_ori(pick_ori, forward):
"""Check pick_ori."""
_check_option('pick_ori', pick_ori, [None, 'vector'])
if pick_ori == 'vector' and is_fixed_orient(forward):
raise ValueError('pick_ori="vector" cannot be combined with a fixed '
'orientation forward solution.')
def _prepare_weights(forward, gain, source_weighting, weights, weights_min):
mask = None
if isinstance(weights, _BaseSourceEstimate):
weights = np.max(np.abs(weights.data), axis=1)
weights_max = np.max(weights)
if weights_min > weights_max:
raise ValueError('weights_min > weights_max (%s > %s)' %
(weights_min, weights_max))
weights_min = weights_min / weights_max
weights = weights / weights_max
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
weights = np.ravel(np.tile(weights, [n_dip_per_pos, 1]).T)
if len(weights) != gain.shape[1]:
raise ValueError('weights do not have the correct dimension '
' (%d != %d)' % (len(weights), gain.shape[1]))
if len(source_weighting.shape) == 1:
source_weighting *= weights
else:
source_weighting *= weights[:, None]
gain *= weights[None, :]
if weights_min is not None:
mask = (weights > weights_min)
gain = gain[:, mask]
n_sources = np.sum(mask) // n_dip_per_pos
logger.info("Reducing source space to %d sources" % n_sources)
return gain, source_weighting, mask
def _prepare_gain(forward, info, noise_cov, pca, depth, loose, rank,
weights=None, weights_min=None):
depth = _check_depth(depth, 'depth_sparse')
forward, gain_info, gain, _, _, source_weighting, _, _, whitener = \
_prepare_forward(forward, info, noise_cov, 'auto', loose, rank, pca,
use_cps=True, **depth)
if weights is None:
mask = None
else:
gain, source_weighting, mask = _prepare_weights(
forward, gain, source_weighting, weights, weights_min)
return forward, gain, gain_info, whitener, source_weighting, mask
def _reapply_source_weighting(X, source_weighting, active_set):
X *= source_weighting[active_set][:, None]
return X
def _compute_residual(forward, evoked, X, active_set, info):
# OK, picking based on row_names is safe
sel = [forward['sol']['row_names'].index(c) for c in info['ch_names']]
residual = evoked.copy()
residual = pick_channels_evoked(residual, include=info['ch_names'])
r_tmp = residual.copy()
r_tmp.data = np.dot(forward['sol']['data'][sel, :][:, active_set], X)
# Take care of proj
active_projs = list()
non_active_projs = list()
for p in evoked.info['projs']:
if p['active']:
active_projs.append(p)
else:
non_active_projs.append(p)
if len(active_projs) > 0:
r_tmp.info['projs'] = deactivate_proj(active_projs, copy=True,
verbose=False)
r_tmp.apply_proj(verbose=False)
r_tmp.add_proj(non_active_projs, remove_existing=False, verbose=False)
residual.data -= r_tmp.data
return residual
@verbose
def _make_sparse_stc(X, active_set, forward, tmin, tstep,
active_is_idx=False, pick_ori=None, verbose=None):
source_nn = forward['source_nn']
vector = False
if not is_fixed_orient(forward):
if pick_ori != 'vector':
logger.info('combining the current components...')
X = combine_xyz(X)
else:
vector = True
source_nn = np.reshape(source_nn, (-1, 3, 3))
if not active_is_idx:
active_idx = np.where(active_set)[0]
else:
active_idx = active_set
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
if n_dip_per_pos > 1:
active_idx = np.unique(active_idx // n_dip_per_pos)
src = forward['src']
vertices = []
n_points_so_far = 0
for this_src in src:
this_n_points_so_far = n_points_so_far + len(this_src['vertno'])
this_active_idx = active_idx[(n_points_so_far <= active_idx) &
(active_idx < this_n_points_so_far)]
this_active_idx -= n_points_so_far
this_vertno = this_src['vertno'][this_active_idx]
n_points_so_far = this_n_points_so_far
vertices.append(this_vertno)
source_nn = source_nn[active_idx]
return _make_stc(
X, vertices, src.kind, tmin, tstep, src[0]['subject_his_id'],
vector=vector, source_nn=source_nn)
def _split_gof(M, X, gain):
# parse out the variance explained using an orthogonal basis
# assuming x is estimated using elements of gain, with residual res
# along the first axis
assert M.ndim == X.ndim == gain.ndim == 2, (M.ndim, X.ndim, gain.ndim)
assert gain.shape == (M.shape[0], X.shape[0])
assert M.shape[1] == X.shape[1]
norm = (M * M.conj()).real.sum(0, keepdims=True)
norm[norm == 0] = np.inf
M_est = gain @ X
assert M.shape == M_est.shape
res = M - M_est
assert gain.shape[0] == M.shape[0], (gain.shape, M.shape)
# find an orthonormal basis for our matrices that spans the actual data
U, s, _ = np.linalg.svd(gain, full_matrices=False)
U = U[:, s >= s[0] * 1e-6]
# the part that gets explained
fit_orth = U.T @ M
# the part that got over-explained (landed in residual)
res_orth = U.T @ res
# determine the weights by projecting each one onto this basis
w = (U.T @ gain)[:, :, np.newaxis] * X
w_norm = np.linalg.norm(w, axis=1, keepdims=True)
w_norm[w_norm == 0] = 1.
w /= w_norm
# our weights are now unit-norm positive (will presrve power)
fit_back = np.linalg.norm(fit_orth[:, np.newaxis] * w, axis=0) ** 2
res_back = np.linalg.norm(res_orth[:, np.newaxis] * w, axis=0) ** 2
# and the resulting goodness of fits
gof_back = 100 * (fit_back - res_back) / norm
assert gof_back.shape == X.shape, (gof_back.shape, X.shape)
return gof_back
@verbose
def _make_dipoles_sparse(X, active_set, forward, tmin, tstep, M,
gain_active, active_is_idx=False,
verbose=None):
times = tmin + tstep * np.arange(X.shape[1])
if not active_is_idx:
active_idx = np.where(active_set)[0]
else:
active_idx = active_set
# Compute the GOF split amongst the dipoles
assert M.shape == (gain_active.shape[0], len(times))
assert gain_active.shape[1] == len(active_idx) == X.shape[0]
gof_split = _split_gof(M, X, gain_active)
assert gof_split.shape == (len(active_idx), len(times))
assert X.shape[0] in (len(active_idx), 3 * len(active_idx))
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
if n_dip_per_pos > 1:
active_idx = active_idx // n_dip_per_pos
_, keep = np.unique(active_idx, return_index=True)
keep.sort() # maintain old order
active_idx = active_idx[keep]
gof_split.shape = (len(active_idx), n_dip_per_pos, len(times))
gof_split = gof_split.sum(1)
assert (gof_split < 100).all()
assert gof_split.shape == (len(active_idx), len(times))
dipoles = []
for k, i_dip in enumerate(active_idx):
i_pos = forward['source_rr'][i_dip][np.newaxis, :]
i_pos = i_pos.repeat(len(times), axis=0)
X_ = X[k * n_dip_per_pos: (k + 1) * n_dip_per_pos]
if n_dip_per_pos == 1:
amplitude = X_[0]
i_ori = forward['source_nn'][i_dip][np.newaxis, :]
i_ori = i_ori.repeat(len(times), axis=0)
else:
if forward['surf_ori']:
X_ = np.dot(forward['source_nn'][
i_dip * n_dip_per_pos:(i_dip + 1) * n_dip_per_pos].T, X_)
amplitude = np.linalg.norm(X_, axis=0)
i_ori = np.zeros((len(times), 3))
i_ori[amplitude > 0.] = (X_[:, amplitude > 0.] /
amplitude[amplitude > 0.]).T
dipoles.append(Dipole(times, i_pos, amplitude, i_ori, gof_split[k]))
return dipoles
@verbose
def make_stc_from_dipoles(dipoles, src, verbose=None):
"""Convert a list of spatio-temporal dipoles into a SourceEstimate.
Parameters
----------
dipoles : Dipole | list of instances of Dipole
The dipoles to convert.
src : instance of SourceSpaces
The source space used to generate the forward operator.
%(verbose)s
Returns
-------
stc : SourceEstimate
The source estimate.
"""
logger.info('Converting dipoles into a SourceEstimate.')
if isinstance(dipoles, Dipole):
dipoles = [dipoles]
if not isinstance(dipoles, list):
raise ValueError('Dipoles must be an instance of Dipole or '
'a list of instances of Dipole. '
'Got %s!' % type(dipoles))
tmin = dipoles[0].times[0]
tstep = dipoles[0].times[1] - tmin
X = np.zeros((len(dipoles), len(dipoles[0].times)))
source_rr = np.concatenate([_src['rr'][_src['vertno'], :] for _src in src],
axis=0)
n_lh_points = len(src[0]['vertno'])
lh_vertno = list()
rh_vertno = list()
for i in range(len(dipoles)):
if not np.all(dipoles[i].pos == dipoles[i].pos[0]):
raise ValueError('Only dipoles with fixed position over time '
'are supported!')
X[i] = dipoles[i].amplitude
idx = np.all(source_rr == dipoles[i].pos[0], axis=1)
idx = np.where(idx)[0][0]
if idx < n_lh_points:
lh_vertno.append(src[0]['vertno'][idx])
else:
rh_vertno.append(src[1]['vertno'][idx - n_lh_points])
vertices = [np.array(lh_vertno).astype(int),
np.array(rh_vertno).astype(int)]
stc = SourceEstimate(X, vertices=vertices, tmin=tmin, tstep=tstep,
subject=src._subject)
logger.info('[done]')
return stc
@verbose
def mixed_norm(evoked, forward, noise_cov, alpha, loose='auto', depth=0.8,
maxit=3000, tol=1e-4, active_set_size=10,
debias=True, time_pca=True, weights=None, weights_min=0.,
solver='auto', n_mxne_iter=1, return_residual=False,
return_as_dipoles=False, dgap_freq=10, rank=None, pick_ori=None,
verbose=None):
"""Mixed-norm estimate (MxNE) and iterative reweighted MxNE (irMxNE).
Compute L1/L2 mixed-norm solution [1]_ or L0.5/L2 [2]_ mixed-norm
solution on evoked data.
Parameters
----------
evoked : instance of Evoked or list of instances of Evoked
Evoked data to invert.
forward : dict
Forward operator.
noise_cov : instance of Covariance
Noise covariance to compute whitener.
alpha : float in range [0, 100)
Regularization parameter. 0 means no regularization, 100 would give 0
active dipole.
%(loose)s
%(depth)s
maxit : int
Maximum number of iterations.
tol : float
Tolerance parameter.
active_set_size : int | None
Size of active set increment. If None, no active set strategy is used.
debias : bool
Remove coefficient amplitude bias due to L1 penalty.
time_pca : bool or int
If True the rank of the concatenated epochs is reduced to
its true dimension. If is 'int' the rank is limited to this value.
weights : None | array | SourceEstimate
Weight for penalty in mixed_norm. Can be None, a
1d array with shape (n_sources,), or a SourceEstimate (e.g. obtained
with wMNE, dSPM, or fMRI).
weights_min : float
Do not consider in the estimation sources for which weights
is less than weights_min.
solver : 'prox' | 'cd' | 'bcd' | 'auto'
The algorithm to use for the optimization. 'prox' stands for
proximal iterations using the FISTA algorithm, 'cd' uses
coordinate descent, and 'bcd' applies block coordinate descent.
'cd' is only available for fixed orientation.
n_mxne_iter : int
The number of MxNE iterations. If > 1, iterative reweighting
is applied.
return_residual : bool
If True, the residual is returned as an Evoked instance.
return_as_dipoles : bool
If True, the sources are returned as a list of Dipole instances.
dgap_freq : int or np.inf
The duality gap is evaluated every dgap_freq iterations. Ignored if
solver is 'cd'.
%(rank_None)s
.. versionadded:: 0.18
%(pick_ori)s
%(verbose)s
Returns
-------
stc : SourceEstimate | list of SourceEstimate
Source time courses for each evoked data passed as input.
residual : instance of Evoked
The residual a.k.a. data not explained by the sources.
Only returned if return_residual is True.
See Also
--------
tf_mixed_norm
References
----------
.. [1] A. Gramfort, M. Kowalski, M. Hämäläinen,
"Mixed-norm estimates for the M/EEG inverse problem using accelerated
gradient methods", Physics in Medicine and Biology, 2012.
https://doi.org/10.1088/0031-9155/57/7/1937
.. [2] D. Strohmeier, Y. Bekhti, J. Haueisen, A. Gramfort,
"The Iterative Reweighted Mixed-Norm Estimate for Spatio-Temporal
MEG/EEG Source Reconstruction", IEEE Transactions of Medical Imaging,
Volume 35 (10), pp. 2218-2228, 2016.
"""
if not (0. <= alpha < 100.):
raise ValueError('alpha must be in [0, 100). '
'Got alpha = %s' % alpha)
if n_mxne_iter < 1:
raise ValueError('MxNE has to be computed at least 1 time. '
'Requires n_mxne_iter >= 1, got %d' % n_mxne_iter)
if dgap_freq <= 0.:
raise ValueError('dgap_freq must be a positive integer.'
' Got dgap_freq = %s' % dgap_freq)
pca = True
if not isinstance(evoked, list):
evoked = [evoked]
_check_reference(evoked[0])
all_ch_names = evoked[0].ch_names
if not all(all_ch_names == evoked[i].ch_names
for i in range(1, len(evoked))):
raise Exception('All the datasets must have the same good channels.')
forward, gain, gain_info, whitener, source_weighting, mask = _prepare_gain(
forward, evoked[0].info, noise_cov, pca, depth, loose, rank,
weights, weights_min)
_check_ori(pick_ori, forward)
sel = [all_ch_names.index(name) for name in gain_info['ch_names']]
M = np.concatenate([e.data[sel] for e in evoked], axis=1)
# Whiten data
logger.info('Whitening data matrix.')
M = np.dot(whitener, M)
if time_pca:
U, s, Vh = linalg.svd(M, full_matrices=False)
if not isinstance(time_pca, bool) and isinstance(time_pca, int):
U = U[:, :time_pca]
s = s[:time_pca]
Vh = Vh[:time_pca]
M = U * s
# Scaling to make setting of tol and alpha easy
tol *= sum_squared(M)
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
alpha_max = norm_l2inf(np.dot(gain.T, M), n_dip_per_pos, copy=False)
alpha_max *= 0.01
gain /= alpha_max
source_weighting /= alpha_max
if n_mxne_iter == 1:
X, active_set, E = mixed_norm_solver(
M, gain, alpha, maxit=maxit, tol=tol,
active_set_size=active_set_size, n_orient=n_dip_per_pos,
debias=debias, solver=solver, dgap_freq=dgap_freq, verbose=verbose)
else:
X, active_set, E = iterative_mixed_norm_solver(
M, gain, alpha, n_mxne_iter, maxit=maxit, tol=tol,
n_orient=n_dip_per_pos, active_set_size=active_set_size,
debias=debias, solver=solver, dgap_freq=dgap_freq, verbose=verbose)
if time_pca:
X = np.dot(X, Vh)
M = np.dot(M, Vh)
gain_active = gain[:, active_set]
if mask is not None:
active_set_tmp = np.zeros(len(mask), dtype=bool)
active_set_tmp[mask] = active_set
active_set = active_set_tmp
del active_set_tmp
if active_set.sum() == 0:
raise Exception("No active dipoles found. alpha is too big.")
# Reapply weights to have correct unit
X = _reapply_source_weighting(X, source_weighting, active_set)
source_weighting[source_weighting == 0] = 1 # zeros
gain_active /= source_weighting[active_set]
del source_weighting
M_estimate = np.dot(gain_active, X)
outs = list()
residual = list()
cnt = 0
for e in evoked:
tmin = e.times[0]
tstep = 1.0 / e.info['sfreq']
Xe = X[:, cnt:(cnt + len(e.times))]
if return_as_dipoles:
out = _make_dipoles_sparse(
Xe, active_set, forward, tmin, tstep,
M[:, cnt:(cnt + len(e.times))],
gain_active)
else:
out = _make_sparse_stc(
Xe, active_set, forward, tmin, tstep, pick_ori=pick_ori)
outs.append(out)
cnt += len(e.times)
if return_residual:
residual.append(_compute_residual(forward, e, Xe, active_set,
gain_info))
_log_exp_var(M, M_estimate, prefix='')
logger.info('[done]')
if len(outs) == 1:
out = outs[0]
if return_residual:
residual = residual[0]
else:
out = outs
if return_residual:
out = out, residual
return out
def _window_evoked(evoked, size):
"""Window evoked (size in seconds)."""
if isinstance(size, (float, int)):
lsize = rsize = float(size)
else:
lsize, rsize = size
evoked = evoked.copy()
sfreq = float(evoked.info['sfreq'])
lsize = int(lsize * sfreq)
rsize = int(rsize * sfreq)
lhann = np.hanning(lsize * 2)[:lsize]
rhann = np.hanning(rsize * 2)[-rsize:]
window = np.r_[lhann, np.ones(len(evoked.times) - lsize - rsize), rhann]
evoked.data *= window[None, :]
return evoked
@verbose
def tf_mixed_norm(evoked, forward, noise_cov,
loose='auto', depth=0.8, maxit=3000,
tol=1e-4, weights=None, weights_min=0., pca=True,
debias=True, wsize=64, tstep=4, window=0.02,
return_residual=False, return_as_dipoles=False, alpha=None,
l1_ratio=None, dgap_freq=10, rank=None, pick_ori=None,
n_tfmxne_iter=1, verbose=None):
"""Time-Frequency Mixed-norm estimate (TF-MxNE).
Compute L1/L2 + L1 mixed-norm solution on time-frequency
dictionary. Works with evoked data
:footcite:`GramfortEtAl2013,GramfortEtAl2011`.
Parameters
----------
evoked : instance of Evoked
Evoked data to invert.
forward : dict
Forward operator.
noise_cov : instance of Covariance
Noise covariance to compute whitener.
%(loose)s
%(depth)s
maxit : int
Maximum number of iterations.
tol : float
Tolerance parameter.
weights : None | array | SourceEstimate
Weight for penalty in mixed_norm. Can be None or
1d array of length n_sources or a SourceEstimate e.g. obtained
with wMNE or dSPM or fMRI.
weights_min : float
Do not consider in the estimation sources for which weights
is less than weights_min.
pca : bool
If True the rank of the data is reduced to true dimension.
debias : bool
Remove coefficient amplitude bias due to L1 penalty.
wsize : int or array-like
Length of the STFT window in samples (must be a multiple of 4).
If an array is passed, multiple TF dictionaries are used (each having
its own wsize and tstep) and each entry of wsize must be a multiple
of 4. See :footcite:`BekhtiEtAl2016`.
tstep : int or array-like
Step between successive windows in samples (must be a multiple of 2,
a divider of wsize and smaller than wsize/2) (default: wsize/2).
If an array is passed, multiple TF dictionaries are used (each having
its own wsize and tstep), and each entry of tstep must be a multiple
of 2 and divide the corresponding entry of wsize. See
:footcite:`BekhtiEtAl2016`.
window : float or (float, float)
Length of time window used to take care of edge artifacts in seconds.
It can be one float or float if the values are different for left
and right window length.
return_residual : bool
If True, the residual is returned as an Evoked instance.
return_as_dipoles : bool
If True, the sources are returned as a list of Dipole instances.
alpha : float in [0, 100) or None
Overall regularization parameter.
If alpha and l1_ratio are not None, alpha_space and alpha_time are
overridden by alpha * alpha_max * (1. - l1_ratio) and alpha * alpha_max
* l1_ratio. 0 means no regularization, 100 would give 0 active dipole.
l1_ratio : float in [0, 1] or None
Proportion of temporal regularization.
If l1_ratio and alpha are not None, alpha_space and alpha_time are
overridden by alpha * alpha_max * (1. - l1_ratio) and alpha * alpha_max
* l1_ratio. 0 means no time regularization a.k.a. MxNE.
dgap_freq : int or np.inf
The duality gap is evaluated every dgap_freq iterations.
%(rank_None)s
.. versionadded:: 0.18
%(pick_ori)s
n_tfmxne_iter : int
Number of TF-MxNE iterations. If > 1, iterative reweighting is applied.
%(verbose)s
Returns
-------
stc : instance of SourceEstimate
Source time courses.
residual : instance of Evoked
The residual a.k.a. data not explained by the sources.
Only returned if return_residual is True.
See Also
--------
mixed_norm
References
----------
.. footbibliography::
"""
_check_reference(evoked)
all_ch_names = evoked.ch_names
info = evoked.info
if not (0. <= alpha < 100.):
raise ValueError('alpha must be in [0, 100). '
'Got alpha = %s' % alpha)
if not (0. <= l1_ratio <= 1.):
raise ValueError('l1_ratio must be in range [0, 1].'
' Got l1_ratio = %s' % l1_ratio)
alpha_space = alpha * (1. - l1_ratio)
alpha_time = alpha * l1_ratio
if n_tfmxne_iter < 1:
raise ValueError('TF-MxNE has to be computed at least 1 time. '
'Requires n_tfmxne_iter >= 1, got %s' % n_tfmxne_iter)
if dgap_freq <= 0.:
raise ValueError('dgap_freq must be a positive integer.'
' Got dgap_freq = %s' % dgap_freq)
tstep = np.atleast_1d(tstep)
wsize = np.atleast_1d(wsize)
if len(tstep) != len(wsize):
raise ValueError('The same number of window sizes and steps must be '
'passed. Got tstep = %s and wsize = %s' %
(tstep, wsize))
forward, gain, gain_info, whitener, source_weighting, mask = _prepare_gain(
forward, evoked.info, noise_cov, pca, depth, loose, rank,
weights, weights_min)
_check_ori(pick_ori, forward)
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
if window is not None:
evoked = _window_evoked(evoked, window)
sel = [all_ch_names.index(name) for name in gain_info["ch_names"]]
M = evoked.data[sel]
# Whiten data
logger.info('Whitening data matrix.')
M = np.dot(whitener, M)
n_steps = np.ceil(M.shape[1] / tstep.astype(float)).astype(int)
n_freqs = wsize // 2 + 1
n_coefs = n_steps * n_freqs
phi = _Phi(wsize, tstep, n_coefs)
# Scaling to make setting of tol and alpha easy
tol *= sum_squared(M)
alpha_max = norm_epsilon_inf(gain, M, phi, l1_ratio, n_dip_per_pos)
alpha_max *= 0.01
gain /= alpha_max
source_weighting /= alpha_max
if n_tfmxne_iter == 1:
X, active_set, E = tf_mixed_norm_solver(
M, gain, alpha_space, alpha_time, wsize=wsize, tstep=tstep,
maxit=maxit, tol=tol, verbose=verbose, n_orient=n_dip_per_pos,
dgap_freq=dgap_freq, debias=debias)
else:
X, active_set, E = iterative_tf_mixed_norm_solver(
M, gain, alpha_space, alpha_time, wsize=wsize, tstep=tstep,
n_tfmxne_iter=n_tfmxne_iter, maxit=maxit, tol=tol, verbose=verbose,
n_orient=n_dip_per_pos, dgap_freq=dgap_freq, debias=debias)
if active_set.sum() == 0:
raise Exception("No active dipoles found. "
"alpha_space/alpha_time are too big.")
# Compute estimated whitened sensor data for each dipole (dip, ch, time)
gain_active = gain[:, active_set]
if mask is not None:
active_set_tmp = np.zeros(len(mask), dtype=bool)
active_set_tmp[mask] = active_set
active_set = active_set_tmp
del active_set_tmp
X = _reapply_source_weighting(X, source_weighting, active_set)
gain_active /= source_weighting[active_set]
if return_residual:
residual = _compute_residual(
forward, evoked, X, active_set, gain_info)
if return_as_dipoles:
out = _make_dipoles_sparse(
X, active_set, forward, evoked.times[0], 1.0 / info['sfreq'],
M, gain_active)
else:
out = _make_sparse_stc(
X, active_set, forward, evoked.times[0], 1.0 / info['sfreq'],
pick_ori=pick_ori)
logger.info('[done]')
if return_residual:
out = out, residual
return out
| {
"repo_name": "olafhauk/mne-python",
"path": "mne/inverse_sparse/mxne_inverse.py",
"copies": "4",
"size": "26416",
"license": "bsd-3-clause",
"hash": 3104678548255727000,
"line_mean": 36.2014084507,
"line_max": 79,
"alpha_frac": 0.5957293757,
"autogenerated": false,
"ratio": 3.379782469609725,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 710
} |
import numpy as np
from ..source_estimate import SourceEstimate, _BaseSourceEstimate, _make_stc
from ..minimum_norm.inverse import (combine_xyz, _prepare_forward,
_check_reference, _log_exp_var)
from ..forward import is_fixed_orient
from ..io.pick import pick_channels_evoked
from ..io.proj import deactivate_proj
from ..utils import logger, verbose, _check_depth, _check_option, sum_squared
from ..dipole import Dipole
from .mxne_optim import (mixed_norm_solver, iterative_mixed_norm_solver, _Phi,
tf_mixed_norm_solver, iterative_tf_mixed_norm_solver,
norm_l2inf, norm_epsilon_inf)
def _check_ori(pick_ori, forward):
"""Check pick_ori."""
_check_option('pick_ori', pick_ori, [None, 'vector'])
if pick_ori == 'vector' and is_fixed_orient(forward):
raise ValueError('pick_ori="vector" cannot be combined with a fixed '
'orientation forward solution.')
def _prepare_weights(forward, gain, source_weighting, weights, weights_min):
mask = None
if isinstance(weights, _BaseSourceEstimate):
weights = np.max(np.abs(weights.data), axis=1)
weights_max = np.max(weights)
if weights_min > weights_max:
raise ValueError('weights_min > weights_max (%s > %s)' %
(weights_min, weights_max))
weights_min = weights_min / weights_max
weights = weights / weights_max
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
weights = np.ravel(np.tile(weights, [n_dip_per_pos, 1]).T)
if len(weights) != gain.shape[1]:
raise ValueError('weights do not have the correct dimension '
' (%d != %d)' % (len(weights), gain.shape[1]))
if len(source_weighting.shape) == 1:
source_weighting *= weights
else:
source_weighting *= weights[:, None]
gain *= weights[None, :]
if weights_min is not None:
mask = (weights > weights_min)
gain = gain[:, mask]
n_sources = np.sum(mask) // n_dip_per_pos
logger.info("Reducing source space to %d sources" % n_sources)
return gain, source_weighting, mask
def _prepare_gain(forward, info, noise_cov, pca, depth, loose, rank,
weights=None, weights_min=None):
depth = _check_depth(depth, 'depth_sparse')
forward, gain_info, gain, _, _, source_weighting, _, _, whitener = \
_prepare_forward(forward, info, noise_cov, 'auto', loose, rank, pca,
use_cps=True, **depth)
if weights is None:
mask = None
else:
gain, source_weighting, mask = _prepare_weights(
forward, gain, source_weighting, weights, weights_min)
return forward, gain, gain_info, whitener, source_weighting, mask
def _reapply_source_weighting(X, source_weighting, active_set):
X *= source_weighting[active_set][:, None]
return X
def _compute_residual(forward, evoked, X, active_set, info):
# OK, picking based on row_names is safe
sel = [forward['sol']['row_names'].index(c) for c in info['ch_names']]
residual = evoked.copy()
residual = pick_channels_evoked(residual, include=info['ch_names'])
r_tmp = residual.copy()
r_tmp.data = np.dot(forward['sol']['data'][sel, :][:, active_set], X)
# Take care of proj
active_projs = list()
non_active_projs = list()
for p in evoked.info['projs']:
if p['active']:
active_projs.append(p)
else:
non_active_projs.append(p)
if len(active_projs) > 0:
r_tmp.info['projs'] = deactivate_proj(active_projs, copy=True,
verbose=False)
r_tmp.apply_proj(verbose=False)
r_tmp.add_proj(non_active_projs, remove_existing=False, verbose=False)
residual.data -= r_tmp.data
return residual
@verbose
def _make_sparse_stc(X, active_set, forward, tmin, tstep,
active_is_idx=False, pick_ori=None, verbose=None):
source_nn = forward['source_nn']
vector = False
if not is_fixed_orient(forward):
if pick_ori != 'vector':
logger.info('combining the current components...')
X = combine_xyz(X)
else:
vector = True
source_nn = np.reshape(source_nn, (-1, 3, 3))
if not active_is_idx:
active_idx = np.where(active_set)[0]
else:
active_idx = active_set
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
if n_dip_per_pos > 1:
active_idx = np.unique(active_idx // n_dip_per_pos)
src = forward['src']
vertices = []
n_points_so_far = 0
for this_src in src:
this_n_points_so_far = n_points_so_far + len(this_src['vertno'])
this_active_idx = active_idx[(n_points_so_far <= active_idx) &
(active_idx < this_n_points_so_far)]
this_active_idx -= n_points_so_far
this_vertno = this_src['vertno'][this_active_idx]
n_points_so_far = this_n_points_so_far
vertices.append(this_vertno)
source_nn = source_nn[active_idx]
return _make_stc(
X, vertices, src.kind, tmin, tstep, src[0]['subject_his_id'],
vector=vector, source_nn=source_nn)
def _split_gof(M, X, gain):
# parse out the variance explained using an orthogonal basis
# assuming x is estimated using elements of gain, with residual res
# along the first axis
assert M.ndim == X.ndim == gain.ndim == 2, (M.ndim, X.ndim, gain.ndim)
assert gain.shape == (M.shape[0], X.shape[0])
assert M.shape[1] == X.shape[1]
norm = (M * M.conj()).real.sum(0, keepdims=True)
norm[norm == 0] = np.inf
M_est = gain @ X
assert M.shape == M_est.shape
res = M - M_est
assert gain.shape[0] == M.shape[0], (gain.shape, M.shape)
# find an orthonormal basis for our matrices that spans the actual data
U, s, _ = np.linalg.svd(gain, full_matrices=False)
U = U[:, s >= s[0] * 1e-6]
# the part that gets explained
fit_orth = U.T @ M
# the part that got over-explained (landed in residual)
res_orth = U.T @ res
# determine the weights by projecting each one onto this basis
w = (U.T @ gain)[:, :, np.newaxis] * X
w_norm = np.linalg.norm(w, axis=1, keepdims=True)
w_norm[w_norm == 0] = 1.
w /= w_norm
# our weights are now unit-norm positive (will presrve power)
fit_back = np.linalg.norm(fit_orth[:, np.newaxis] * w, axis=0) ** 2
res_back = np.linalg.norm(res_orth[:, np.newaxis] * w, axis=0) ** 2
# and the resulting goodness of fits
gof_back = 100 * (fit_back - res_back) / norm
assert gof_back.shape == X.shape, (gof_back.shape, X.shape)
return gof_back
@verbose
def _make_dipoles_sparse(X, active_set, forward, tmin, tstep, M,
gain_active, active_is_idx=False,
verbose=None):
times = tmin + tstep * np.arange(X.shape[1])
if not active_is_idx:
active_idx = np.where(active_set)[0]
else:
active_idx = active_set
# Compute the GOF split amongst the dipoles
assert M.shape == (gain_active.shape[0], len(times))
assert gain_active.shape[1] == len(active_idx) == X.shape[0]
gof_split = _split_gof(M, X, gain_active)
assert gof_split.shape == (len(active_idx), len(times))
assert X.shape[0] in (len(active_idx), 3 * len(active_idx))
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
if n_dip_per_pos > 1:
active_idx = active_idx // n_dip_per_pos
_, keep = np.unique(active_idx, return_index=True)
keep.sort() # maintain old order
active_idx = active_idx[keep]
gof_split.shape = (len(active_idx), n_dip_per_pos, len(times))
gof_split = gof_split.sum(1)
assert (gof_split < 100).all()
assert gof_split.shape == (len(active_idx), len(times))
dipoles = []
for k, i_dip in enumerate(active_idx):
i_pos = forward['source_rr'][i_dip][np.newaxis, :]
i_pos = i_pos.repeat(len(times), axis=0)
X_ = X[k * n_dip_per_pos: (k + 1) * n_dip_per_pos]
if n_dip_per_pos == 1:
amplitude = X_[0]
i_ori = forward['source_nn'][i_dip][np.newaxis, :]
i_ori = i_ori.repeat(len(times), axis=0)
else:
if forward['surf_ori']:
X_ = np.dot(forward['source_nn'][
i_dip * n_dip_per_pos:(i_dip + 1) * n_dip_per_pos].T, X_)
amplitude = np.linalg.norm(X_, axis=0)
i_ori = np.zeros((len(times), 3))
i_ori[amplitude > 0.] = (X_[:, amplitude > 0.] /
amplitude[amplitude > 0.]).T
dipoles.append(Dipole(times, i_pos, amplitude, i_ori, gof_split[k]))
return dipoles
@verbose
def make_stc_from_dipoles(dipoles, src, verbose=None):
"""Convert a list of spatio-temporal dipoles into a SourceEstimate.
Parameters
----------
dipoles : Dipole | list of instances of Dipole
The dipoles to convert.
src : instance of SourceSpaces
The source space used to generate the forward operator.
%(verbose)s
Returns
-------
stc : SourceEstimate
The source estimate.
"""
logger.info('Converting dipoles into a SourceEstimate.')
if isinstance(dipoles, Dipole):
dipoles = [dipoles]
if not isinstance(dipoles, list):
raise ValueError('Dipoles must be an instance of Dipole or '
'a list of instances of Dipole. '
'Got %s!' % type(dipoles))
tmin = dipoles[0].times[0]
tstep = dipoles[0].times[1] - tmin
X = np.zeros((len(dipoles), len(dipoles[0].times)))
source_rr = np.concatenate([_src['rr'][_src['vertno'], :] for _src in src],
axis=0)
n_lh_points = len(src[0]['vertno'])
lh_vertno = list()
rh_vertno = list()
for i in range(len(dipoles)):
if not np.all(dipoles[i].pos == dipoles[i].pos[0]):
raise ValueError('Only dipoles with fixed position over time '
'are supported!')
X[i] = dipoles[i].amplitude
idx = np.all(source_rr == dipoles[i].pos[0], axis=1)
idx = np.where(idx)[0][0]
if idx < n_lh_points:
lh_vertno.append(src[0]['vertno'][idx])
else:
rh_vertno.append(src[1]['vertno'][idx - n_lh_points])
vertices = [np.array(lh_vertno).astype(int),
np.array(rh_vertno).astype(int)]
stc = SourceEstimate(X, vertices=vertices, tmin=tmin, tstep=tstep,
subject=src._subject)
logger.info('[done]')
return stc
@verbose
def mixed_norm(evoked, forward, noise_cov, alpha, loose='auto', depth=0.8,
maxit=3000, tol=1e-4, active_set_size=10,
debias=True, time_pca=True, weights=None, weights_min=0.,
solver='auto', n_mxne_iter=1, return_residual=False,
return_as_dipoles=False, dgap_freq=10, rank=None, pick_ori=None,
verbose=None):
"""Mixed-norm estimate (MxNE) and iterative reweighted MxNE (irMxNE).
Compute L1/L2 mixed-norm solution :footcite:`GramfortEtAl2012` or L0.5/L2
:footcite:`StrohmeierEtAl2016` mixed-norm solution on evoked data.
Parameters
----------
evoked : instance of Evoked or list of instances of Evoked
Evoked data to invert.
forward : dict
Forward operator.
noise_cov : instance of Covariance
Noise covariance to compute whitener.
alpha : float in range [0, 100)
Regularization parameter. 0 means no regularization, 100 would give 0
active dipole.
%(loose)s
%(depth)s
maxit : int
Maximum number of iterations.
tol : float
Tolerance parameter.
active_set_size : int | None
Size of active set increment. If None, no active set strategy is used.
debias : bool
Remove coefficient amplitude bias due to L1 penalty.
time_pca : bool or int
If True the rank of the concatenated epochs is reduced to
its true dimension. If is 'int' the rank is limited to this value.
weights : None | array | SourceEstimate
Weight for penalty in mixed_norm. Can be None, a
1d array with shape (n_sources,), or a SourceEstimate (e.g. obtained
with wMNE, dSPM, or fMRI).
weights_min : float
Do not consider in the estimation sources for which weights
is less than weights_min.
solver : 'prox' | 'cd' | 'bcd' | 'auto'
The algorithm to use for the optimization. 'prox' stands for
proximal iterations using the FISTA algorithm, 'cd' uses
coordinate descent, and 'bcd' applies block coordinate descent.
'cd' is only available for fixed orientation.
n_mxne_iter : int
The number of MxNE iterations. If > 1, iterative reweighting
is applied.
return_residual : bool
If True, the residual is returned as an Evoked instance.
return_as_dipoles : bool
If True, the sources are returned as a list of Dipole instances.
dgap_freq : int or np.inf
The duality gap is evaluated every dgap_freq iterations. Ignored if
solver is 'cd'.
%(rank_None)s
.. versionadded:: 0.18
%(pick_ori)s
%(verbose)s
Returns
-------
stc : SourceEstimate | list of SourceEstimate
Source time courses for each evoked data passed as input.
residual : instance of Evoked
The residual a.k.a. data not explained by the sources.
Only returned if return_residual is True.
See Also
--------
tf_mixed_norm
References
----------
.. footbibliography::
"""
from scipy import linalg
if not (0. <= alpha < 100.):
raise ValueError('alpha must be in [0, 100). '
'Got alpha = %s' % alpha)
if n_mxne_iter < 1:
raise ValueError('MxNE has to be computed at least 1 time. '
'Requires n_mxne_iter >= 1, got %d' % n_mxne_iter)
if dgap_freq <= 0.:
raise ValueError('dgap_freq must be a positive integer.'
' Got dgap_freq = %s' % dgap_freq)
pca = True
if not isinstance(evoked, list):
evoked = [evoked]
_check_reference(evoked[0])
all_ch_names = evoked[0].ch_names
if not all(all_ch_names == evoked[i].ch_names
for i in range(1, len(evoked))):
raise Exception('All the datasets must have the same good channels.')
forward, gain, gain_info, whitener, source_weighting, mask = _prepare_gain(
forward, evoked[0].info, noise_cov, pca, depth, loose, rank,
weights, weights_min)
_check_ori(pick_ori, forward)
sel = [all_ch_names.index(name) for name in gain_info['ch_names']]
M = np.concatenate([e.data[sel] for e in evoked], axis=1)
# Whiten data
logger.info('Whitening data matrix.')
M = np.dot(whitener, M)
if time_pca:
U, s, Vh = linalg.svd(M, full_matrices=False)
if not isinstance(time_pca, bool) and isinstance(time_pca, int):
U = U[:, :time_pca]
s = s[:time_pca]
Vh = Vh[:time_pca]
M = U * s
# Scaling to make setting of tol and alpha easy
tol *= sum_squared(M)
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
alpha_max = norm_l2inf(np.dot(gain.T, M), n_dip_per_pos, copy=False)
alpha_max *= 0.01
gain /= alpha_max
source_weighting /= alpha_max
if n_mxne_iter == 1:
X, active_set, E = mixed_norm_solver(
M, gain, alpha, maxit=maxit, tol=tol,
active_set_size=active_set_size, n_orient=n_dip_per_pos,
debias=debias, solver=solver, dgap_freq=dgap_freq, verbose=verbose)
else:
X, active_set, E = iterative_mixed_norm_solver(
M, gain, alpha, n_mxne_iter, maxit=maxit, tol=tol,
n_orient=n_dip_per_pos, active_set_size=active_set_size,
debias=debias, solver=solver, dgap_freq=dgap_freq, verbose=verbose)
if time_pca:
X = np.dot(X, Vh)
M = np.dot(M, Vh)
gain_active = gain[:, active_set]
if mask is not None:
active_set_tmp = np.zeros(len(mask), dtype=bool)
active_set_tmp[mask] = active_set
active_set = active_set_tmp
del active_set_tmp
if active_set.sum() == 0:
raise Exception("No active dipoles found. alpha is too big.")
# Reapply weights to have correct unit
X = _reapply_source_weighting(X, source_weighting, active_set)
source_weighting[source_weighting == 0] = 1 # zeros
gain_active /= source_weighting[active_set]
del source_weighting
M_estimate = np.dot(gain_active, X)
outs = list()
residual = list()
cnt = 0
for e in evoked:
tmin = e.times[0]
tstep = 1.0 / e.info['sfreq']
Xe = X[:, cnt:(cnt + len(e.times))]
if return_as_dipoles:
out = _make_dipoles_sparse(
Xe, active_set, forward, tmin, tstep,
M[:, cnt:(cnt + len(e.times))],
gain_active)
else:
out = _make_sparse_stc(
Xe, active_set, forward, tmin, tstep, pick_ori=pick_ori)
outs.append(out)
cnt += len(e.times)
if return_residual:
residual.append(_compute_residual(forward, e, Xe, active_set,
gain_info))
_log_exp_var(M, M_estimate, prefix='')
logger.info('[done]')
if len(outs) == 1:
out = outs[0]
if return_residual:
residual = residual[0]
else:
out = outs
if return_residual:
out = out, residual
return out
def _window_evoked(evoked, size):
"""Window evoked (size in seconds)."""
if isinstance(size, (float, int)):
lsize = rsize = float(size)
else:
lsize, rsize = size
evoked = evoked.copy()
sfreq = float(evoked.info['sfreq'])
lsize = int(lsize * sfreq)
rsize = int(rsize * sfreq)
lhann = np.hanning(lsize * 2)[:lsize]
rhann = np.hanning(rsize * 2)[-rsize:]
window = np.r_[lhann, np.ones(len(evoked.times) - lsize - rsize), rhann]
evoked.data *= window[None, :]
return evoked
@verbose
def tf_mixed_norm(evoked, forward, noise_cov,
loose='auto', depth=0.8, maxit=3000,
tol=1e-4, weights=None, weights_min=0., pca=True,
debias=True, wsize=64, tstep=4, window=0.02,
return_residual=False, return_as_dipoles=False, alpha=None,
l1_ratio=None, dgap_freq=10, rank=None, pick_ori=None,
n_tfmxne_iter=1, verbose=None):
"""Time-Frequency Mixed-norm estimate (TF-MxNE).
Compute L1/L2 + L1 mixed-norm solution on time-frequency
dictionary. Works with evoked data
:footcite:`GramfortEtAl2013b,GramfortEtAl2011`.
Parameters
----------
evoked : instance of Evoked
Evoked data to invert.
forward : dict
Forward operator.
noise_cov : instance of Covariance
Noise covariance to compute whitener.
%(loose)s
%(depth)s
maxit : int
Maximum number of iterations.
tol : float
Tolerance parameter.
weights : None | array | SourceEstimate
Weight for penalty in mixed_norm. Can be None or
1d array of length n_sources or a SourceEstimate e.g. obtained
with wMNE or dSPM or fMRI.
weights_min : float
Do not consider in the estimation sources for which weights
is less than weights_min.
pca : bool
If True the rank of the data is reduced to true dimension.
debias : bool
Remove coefficient amplitude bias due to L1 penalty.
wsize : int or array-like
Length of the STFT window in samples (must be a multiple of 4).
If an array is passed, multiple TF dictionaries are used (each having
its own wsize and tstep) and each entry of wsize must be a multiple
of 4. See :footcite:`BekhtiEtAl2016`.
tstep : int or array-like
Step between successive windows in samples (must be a multiple of 2,
a divider of wsize and smaller than wsize/2) (default: wsize/2).
If an array is passed, multiple TF dictionaries are used (each having
its own wsize and tstep), and each entry of tstep must be a multiple
of 2 and divide the corresponding entry of wsize. See
:footcite:`BekhtiEtAl2016`.
window : float or (float, float)
Length of time window used to take care of edge artifacts in seconds.
It can be one float or float if the values are different for left
and right window length.
return_residual : bool
If True, the residual is returned as an Evoked instance.
return_as_dipoles : bool
If True, the sources are returned as a list of Dipole instances.
alpha : float in [0, 100) or None
Overall regularization parameter.
If alpha and l1_ratio are not None, alpha_space and alpha_time are
overridden by alpha * alpha_max * (1. - l1_ratio) and alpha * alpha_max
* l1_ratio. 0 means no regularization, 100 would give 0 active dipole.
l1_ratio : float in [0, 1] or None
Proportion of temporal regularization.
If l1_ratio and alpha are not None, alpha_space and alpha_time are
overridden by alpha * alpha_max * (1. - l1_ratio) and alpha * alpha_max
* l1_ratio. 0 means no time regularization a.k.a. MxNE.
dgap_freq : int or np.inf
The duality gap is evaluated every dgap_freq iterations.
%(rank_None)s
.. versionadded:: 0.18
%(pick_ori)s
n_tfmxne_iter : int
Number of TF-MxNE iterations. If > 1, iterative reweighting is applied.
%(verbose)s
Returns
-------
stc : instance of SourceEstimate
Source time courses.
residual : instance of Evoked
The residual a.k.a. data not explained by the sources.
Only returned if return_residual is True.
See Also
--------
mixed_norm
References
----------
.. footbibliography::
"""
_check_reference(evoked)
all_ch_names = evoked.ch_names
info = evoked.info
if not (0. <= alpha < 100.):
raise ValueError('alpha must be in [0, 100). '
'Got alpha = %s' % alpha)
if not (0. <= l1_ratio <= 1.):
raise ValueError('l1_ratio must be in range [0, 1].'
' Got l1_ratio = %s' % l1_ratio)
alpha_space = alpha * (1. - l1_ratio)
alpha_time = alpha * l1_ratio
if n_tfmxne_iter < 1:
raise ValueError('TF-MxNE has to be computed at least 1 time. '
'Requires n_tfmxne_iter >= 1, got %s' % n_tfmxne_iter)
if dgap_freq <= 0.:
raise ValueError('dgap_freq must be a positive integer.'
' Got dgap_freq = %s' % dgap_freq)
tstep = np.atleast_1d(tstep)
wsize = np.atleast_1d(wsize)
if len(tstep) != len(wsize):
raise ValueError('The same number of window sizes and steps must be '
'passed. Got tstep = %s and wsize = %s' %
(tstep, wsize))
forward, gain, gain_info, whitener, source_weighting, mask = _prepare_gain(
forward, evoked.info, noise_cov, pca, depth, loose, rank,
weights, weights_min)
_check_ori(pick_ori, forward)
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
if window is not None:
evoked = _window_evoked(evoked, window)
sel = [all_ch_names.index(name) for name in gain_info["ch_names"]]
M = evoked.data[sel]
# Whiten data
logger.info('Whitening data matrix.')
M = np.dot(whitener, M)
n_steps = np.ceil(M.shape[1] / tstep.astype(float)).astype(int)
n_freqs = wsize // 2 + 1
n_coefs = n_steps * n_freqs
phi = _Phi(wsize, tstep, n_coefs, evoked.data.shape[1])
# Scaling to make setting of tol and alpha easy
tol *= sum_squared(M)
alpha_max = norm_epsilon_inf(gain, M, phi, l1_ratio, n_dip_per_pos)
alpha_max *= 0.01
gain /= alpha_max
source_weighting /= alpha_max
if n_tfmxne_iter == 1:
X, active_set, E = tf_mixed_norm_solver(
M, gain, alpha_space, alpha_time, wsize=wsize, tstep=tstep,
maxit=maxit, tol=tol, verbose=verbose, n_orient=n_dip_per_pos,
dgap_freq=dgap_freq, debias=debias)
else:
X, active_set, E = iterative_tf_mixed_norm_solver(
M, gain, alpha_space, alpha_time, wsize=wsize, tstep=tstep,
n_tfmxne_iter=n_tfmxne_iter, maxit=maxit, tol=tol, verbose=verbose,
n_orient=n_dip_per_pos, dgap_freq=dgap_freq, debias=debias)
if active_set.sum() == 0:
raise Exception("No active dipoles found. "
"alpha_space/alpha_time are too big.")
# Compute estimated whitened sensor data for each dipole (dip, ch, time)
gain_active = gain[:, active_set]
if mask is not None:
active_set_tmp = np.zeros(len(mask), dtype=bool)
active_set_tmp[mask] = active_set
active_set = active_set_tmp
del active_set_tmp
X = _reapply_source_weighting(X, source_weighting, active_set)
gain_active /= source_weighting[active_set]
if return_residual:
residual = _compute_residual(
forward, evoked, X, active_set, gain_info)
if return_as_dipoles:
out = _make_dipoles_sparse(
X, active_set, forward, evoked.times[0], 1.0 / info['sfreq'],
M, gain_active)
else:
out = _make_sparse_stc(
X, active_set, forward, evoked.times[0], 1.0 / info['sfreq'],
pick_ori=pick_ori)
logger.info('[done]')
if return_residual:
out = out, residual
return out
| {
"repo_name": "kambysese/mne-python",
"path": "mne/inverse_sparse/mxne_inverse.py",
"copies": "6",
"size": "26013",
"license": "bsd-3-clause",
"hash": 1627604500459531000,
"line_mean": 36.0555555556,
"line_max": 79,
"alpha_frac": 0.5948948603,
"autogenerated": false,
"ratio": 3.3888744137571654,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 702
} |
import pytest
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_allclose, assert_array_less)
from mne.inverse_sparse.mxne_optim import (mixed_norm_solver,
tf_mixed_norm_solver,
iterative_mixed_norm_solver,
iterative_tf_mixed_norm_solver,
norm_epsilon_inf, norm_epsilon,
_Phi, _PhiT, dgap_l21l1)
from mne.time_frequency._stft import stft_norm2
def _generate_tf_data():
n, p, t = 30, 40, 64
rng = np.random.RandomState(0)
G = rng.randn(n, p)
G /= np.std(G, axis=0)[None, :]
X = np.zeros((p, t))
active_set = [0, 4]
times = np.linspace(0, 2 * np.pi, t)
X[0] = np.sin(times)
X[4] = -2 * np.sin(4 * times)
X[4, times <= np.pi / 2] = 0
X[4, times >= np.pi] = 0
M = np.dot(G, X)
M += 1 * rng.randn(*M.shape)
return M, G, active_set
def test_l21_mxne():
"""Test convergence of MxNE solver."""
n, p, t, alpha = 30, 40, 20, 1.
rng = np.random.RandomState(0)
G = rng.randn(n, p)
G /= np.std(G, axis=0)[None, :]
X = np.zeros((p, t))
X[0] = 3
X[4] = -2
M = np.dot(G, X)
args = (M, G, alpha, 1000, 1e-8)
with pytest.warns(None): # CD
X_hat_prox, active_set, _ = mixed_norm_solver(
*args, active_set_size=None,
debias=True, solver='prox')
assert_array_equal(np.where(active_set)[0], [0, 4])
with pytest.warns(None): # CD
X_hat_cd, active_set, _, gap_cd = mixed_norm_solver(
*args, active_set_size=None,
debias=True, solver='cd', return_gap=True)
assert_array_less(gap_cd, 1e-8)
assert_array_equal(np.where(active_set)[0], [0, 4])
with pytest.warns(None): # CD
X_hat_bcd, active_set, E, gap_bcd = mixed_norm_solver(
M, G, alpha, maxit=1000, tol=1e-8, active_set_size=None,
debias=True, solver='bcd', return_gap=True)
assert_array_less(gap_bcd, 9.6e-9)
assert_array_equal(np.where(active_set)[0], [0, 4])
assert_allclose(X_hat_prox, X_hat_cd, rtol=1e-2)
assert_allclose(X_hat_prox, X_hat_bcd, rtol=1e-2)
assert_allclose(X_hat_bcd, X_hat_cd, rtol=1e-2)
with pytest.warns(None): # CD
X_hat_prox, active_set, _ = mixed_norm_solver(
*args, active_set_size=2, debias=True, solver='prox')
assert_array_equal(np.where(active_set)[0], [0, 4])
with pytest.warns(None): # CD
X_hat_cd, active_set, _ = mixed_norm_solver(
*args, active_set_size=2, debias=True, solver='cd')
assert_array_equal(np.where(active_set)[0], [0, 4])
with pytest.warns(None): # CD
X_hat_bcd, active_set, _ = mixed_norm_solver(
*args, active_set_size=2, debias=True, solver='bcd')
assert_array_equal(np.where(active_set)[0], [0, 4])
assert_allclose(X_hat_bcd, X_hat_cd, rtol=1e-2)
assert_allclose(X_hat_bcd, X_hat_prox, rtol=1e-2)
with pytest.warns(None): # CD
X_hat_prox, active_set, _ = mixed_norm_solver(
*args, active_set_size=2, debias=True, n_orient=2, solver='prox')
assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5])
with pytest.warns(None): # CD
X_hat_bcd, active_set, _ = mixed_norm_solver(
*args, active_set_size=2, debias=True, n_orient=2, solver='bcd')
assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5])
# suppress a coordinate-descent warning here
with pytest.warns(RuntimeWarning, match='descent'):
X_hat_cd, active_set, _ = mixed_norm_solver(
*args, active_set_size=2, debias=True, n_orient=2, solver='cd')
assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5])
assert_allclose(X_hat_bcd, X_hat_prox, rtol=1e-2)
assert_allclose(X_hat_bcd, X_hat_cd, rtol=1e-2)
with pytest.warns(None): # CD
X_hat_bcd, active_set, _ = mixed_norm_solver(
*args, active_set_size=2, debias=True, n_orient=5, solver='bcd')
assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4])
with pytest.warns(None): # CD
X_hat_prox, active_set, _ = mixed_norm_solver(
*args, active_set_size=2, debias=True, n_orient=5, solver='prox')
assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4])
with pytest.warns(RuntimeWarning, match='descent'):
X_hat_cd, active_set, _ = mixed_norm_solver(
*args, active_set_size=2, debias=True, n_orient=5, solver='cd')
assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4])
assert_array_equal(X_hat_bcd, X_hat_cd)
assert_allclose(X_hat_bcd, X_hat_prox, rtol=1e-2)
def test_tf_mxne():
"""Test convergence of TF-MxNE solver."""
alpha_space = 10.
alpha_time = 5.
M, G, active_set = _generate_tf_data()
with pytest.warns(None): # CD
X_hat_tf, active_set_hat_tf, E, gap_tfmxne = tf_mixed_norm_solver(
M, G, alpha_space, alpha_time, maxit=200, tol=1e-8, verbose=True,
n_orient=1, tstep=4, wsize=32, return_gap=True)
assert_array_less(gap_tfmxne, 1e-8)
assert_array_equal(np.where(active_set_hat_tf)[0], active_set)
def test_norm_epsilon():
"""Test computation of espilon norm on TF coefficients."""
tstep = np.array([2])
wsize = np.array([4])
n_times = 10
n_steps = np.ceil(n_times / tstep.astype(float)).astype(int)
n_freqs = wsize // 2 + 1
n_coefs = n_steps * n_freqs
phi = _Phi(wsize, tstep, n_coefs, n_times)
Y = np.zeros(n_steps * n_freqs)
l1_ratio = 0.03
assert_allclose(norm_epsilon(Y, l1_ratio, phi), 0.)
Y[0] = 2.
assert_allclose(norm_epsilon(Y, l1_ratio, phi), np.max(Y))
l1_ratio = 1.
assert_allclose(norm_epsilon(Y, l1_ratio, phi), np.max(Y))
# dummy value without random:
Y = np.arange(n_steps * n_freqs).reshape(-1, )
l1_ratio = 0.0
assert_allclose(norm_epsilon(Y, l1_ratio, phi) ** 2,
stft_norm2(Y.reshape(-1, n_freqs[0], n_steps[0])))
l1_ratio = 0.03
# test that vanilla epsilon norm = weights equal to 1
w_time = np.ones(n_coefs[0])
Y = np.abs(np.random.randn(n_coefs[0]))
assert_allclose(norm_epsilon(Y, l1_ratio, phi),
norm_epsilon(Y, l1_ratio, phi, w_time=w_time))
# scaling w_time and w_space by the same amount should divide
# epsilon norm by the same amount
Y = np.arange(n_coefs) + 1
mult = 2.
assert_allclose(
norm_epsilon(Y, l1_ratio, phi, w_space=1,
w_time=np.ones(n_coefs)) / mult,
norm_epsilon(Y, l1_ratio, phi, w_space=mult,
w_time=mult * np.ones(n_coefs)))
@pytest.mark.slowtest # slow-ish on Travis OSX
@pytest.mark.timeout(60) # ~30 sec on Travis OSX and Linux OpenBLAS
def test_dgapl21l1():
"""Test duality gap for L21 + L1 regularization."""
n_orient = 2
M, G, active_set = _generate_tf_data()
n_times = M.shape[1]
n_sources = G.shape[1]
tstep, wsize = np.array([4, 2]), np.array([64, 16])
n_steps = np.ceil(n_times / tstep.astype(float)).astype(int)
n_freqs = wsize // 2 + 1
n_coefs = n_steps * n_freqs
phi = _Phi(wsize, tstep, n_coefs, n_times)
phiT = _PhiT(tstep, n_freqs, n_steps, n_times)
for l1_ratio in [0.05, 0.1]:
alpha_max = norm_epsilon_inf(G, M, phi, l1_ratio, n_orient)
alpha_space = (1. - l1_ratio) * alpha_max
alpha_time = l1_ratio * alpha_max
Z = np.zeros([n_sources, phi.n_coefs.sum()])
# for alpha = alpha_max, Z = 0 is the solution so the dgap is 0
gap = dgap_l21l1(M, G, Z, np.ones(n_sources, dtype=bool),
alpha_space, alpha_time, phi, phiT,
n_orient, -np.inf)[0]
assert_allclose(0., gap)
# check that solution for alpha smaller than alpha_max is non 0:
X_hat_tf, active_set_hat_tf, E, gap = tf_mixed_norm_solver(
M, G, alpha_space / 1.01, alpha_time / 1.01, maxit=200, tol=1e-8,
verbose=True, debias=False, n_orient=n_orient, tstep=tstep,
wsize=wsize, return_gap=True)
# allow possible small numerical errors (negative gap)
assert_array_less(-1e-10, gap)
assert_array_less(gap, 1e-8)
assert_array_less(1, len(active_set_hat_tf))
X_hat_tf, active_set_hat_tf, E, gap = tf_mixed_norm_solver(
M, G, alpha_space / 5., alpha_time / 5., maxit=200, tol=1e-8,
verbose=True, debias=False, n_orient=n_orient, tstep=tstep,
wsize=wsize, return_gap=True)
assert_array_less(-1e-10, gap)
assert_array_less(gap, 1e-8)
assert_array_less(1, len(active_set_hat_tf))
def test_tf_mxne_vs_mxne():
"""Test equivalence of TF-MxNE (with alpha_time=0) and MxNE."""
alpha_space = 60.
alpha_time = 0.
M, G, active_set = _generate_tf_data()
X_hat_tf, active_set_hat_tf, E = tf_mixed_norm_solver(
M, G, alpha_space, alpha_time, maxit=200, tol=1e-8,
verbose=True, debias=False, n_orient=1, tstep=4, wsize=32)
# Also run L21 and check that we get the same
X_hat_l21, _, _ = mixed_norm_solver(
M, G, alpha_space, maxit=200, tol=1e-8, verbose=False, n_orient=1,
active_set_size=None, debias=False)
assert_allclose(X_hat_tf, X_hat_l21, rtol=1e-1)
@pytest.mark.slowtest # slow-ish on Travis OSX
def test_iterative_reweighted_mxne():
"""Test convergence of irMxNE solver."""
n, p, t, alpha = 30, 40, 20, 1
rng = np.random.RandomState(0)
G = rng.randn(n, p)
G /= np.std(G, axis=0)[None, :]
X = np.zeros((p, t))
X[0] = 3
X[4] = -2
M = np.dot(G, X)
with pytest.warns(None): # CD
X_hat_l21, _, _ = mixed_norm_solver(
M, G, alpha, maxit=1000, tol=1e-8, verbose=False, n_orient=1,
active_set_size=None, debias=False, solver='bcd')
with pytest.warns(None): # CD
X_hat_bcd, active_set, _ = iterative_mixed_norm_solver(
M, G, alpha, 1, maxit=1000, tol=1e-8, active_set_size=None,
debias=False, solver='bcd')
with pytest.warns(None): # CD
X_hat_prox, active_set, _ = iterative_mixed_norm_solver(
M, G, alpha, 1, maxit=1000, tol=1e-8, active_set_size=None,
debias=False, solver='prox')
assert_allclose(X_hat_bcd, X_hat_l21, rtol=1e-3)
assert_allclose(X_hat_prox, X_hat_l21, rtol=1e-3)
with pytest.warns(None): # CD
X_hat_prox, active_set, _ = iterative_mixed_norm_solver(
M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=None,
debias=True, solver='prox')
assert_array_equal(np.where(active_set)[0], [0, 4])
with pytest.warns(None): # CD
X_hat_bcd, active_set, _ = iterative_mixed_norm_solver(
M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2,
debias=True, solver='bcd')
assert_array_equal(np.where(active_set)[0], [0, 4])
with pytest.warns(None): # CD
X_hat_cd, active_set, _ = iterative_mixed_norm_solver(
M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=None,
debias=True, solver='cd')
assert_array_equal(np.where(active_set)[0], [0, 4])
assert_array_almost_equal(X_hat_prox, X_hat_cd, 5)
assert_array_almost_equal(X_hat_bcd, X_hat_cd, 5)
with pytest.warns(None): # CD
X_hat_bcd, active_set, _ = iterative_mixed_norm_solver(
M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2,
debias=True, n_orient=2, solver='bcd')
assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5])
# suppress a coordinate-descent warning here
with pytest.warns(RuntimeWarning, match='descent'):
X_hat_cd, active_set, _ = iterative_mixed_norm_solver(
M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2,
debias=True, n_orient=2, solver='cd')
assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5])
assert_array_equal(X_hat_bcd, X_hat_cd, 5)
X_hat_bcd, active_set, _ = iterative_mixed_norm_solver(
M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2, debias=True,
n_orient=5)
assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4])
with pytest.warns(RuntimeWarning, match='descent'):
X_hat_cd, active_set, _ = iterative_mixed_norm_solver(
M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2,
debias=True, n_orient=5, solver='cd')
assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4])
assert_array_equal(X_hat_bcd, X_hat_cd, 5)
@pytest.mark.slowtest
def test_iterative_reweighted_tfmxne():
"""Test convergence of irTF-MxNE solver."""
M, G, true_active_set = _generate_tf_data()
alpha_space = 38.
alpha_time = 0.5
tstep, wsize = [4, 2], [64, 16]
X_hat_tf, _, _ = tf_mixed_norm_solver(
M, G, alpha_space, alpha_time, maxit=1000, tol=1e-4, wsize=wsize,
tstep=tstep, verbose=False, n_orient=1, debias=False)
X_hat_bcd, active_set, _ = iterative_tf_mixed_norm_solver(
M, G, alpha_space, alpha_time, 1, wsize=wsize, tstep=tstep,
maxit=1000, tol=1e-4, debias=False, verbose=False)
assert_allclose(X_hat_tf, X_hat_bcd, rtol=1e-3)
assert_array_equal(np.where(active_set)[0], true_active_set)
alpha_space = 50.
X_hat_bcd, active_set, _ = iterative_tf_mixed_norm_solver(
M, G, alpha_space, alpha_time, 3, wsize=wsize, tstep=tstep,
n_orient=5, maxit=1000, tol=1e-4, debias=False, verbose=False)
assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4])
alpha_space = 40.
X_hat_bcd, active_set, _ = iterative_tf_mixed_norm_solver(
M, G, alpha_space, alpha_time, 2, wsize=wsize, tstep=tstep,
n_orient=2, maxit=1000, tol=1e-4, debias=False, verbose=False)
assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5])
| {
"repo_name": "kambysese/mne-python",
"path": "mne/inverse_sparse/tests/test_mxne_optim.py",
"copies": "6",
"size": "14142",
"license": "bsd-3-clause",
"hash": 7207271965778474000,
"line_mean": 40.7168141593,
"line_max": 77,
"alpha_frac": 0.587328525,
"autogenerated": false,
"ratio": 2.7594146341463412,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 339
} |
import os.path as op
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_allclose
import pytest
import mne
from mne.datasets import testing
from mne.label import read_label
from mne import (read_cov, read_forward_solution, read_evokeds,
convert_forward_solution)
from mne.inverse_sparse import mixed_norm, tf_mixed_norm
from mne.inverse_sparse.mxne_inverse import make_stc_from_dipoles
from mne.minimum_norm import apply_inverse, make_inverse_operator
from mne.utils import assert_stcs_equal, run_tests_if_main
from mne.dipole import Dipole
from mne.source_estimate import VolSourceEstimate
data_path = testing.data_path(download=False)
# NOTE: These use the ave and cov from sample dataset (no _trunc)
fname_data = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis-cov.fif')
fname_fwd = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
label = 'Aud-rh'
fname_label = op.join(data_path, 'MEG', 'sample', 'labels', '%s.label' % label)
@pytest.mark.timeout(150) # ~30 sec on Travis Linux
@pytest.mark.slowtest
@testing.requires_testing_data
def test_mxne_inverse_standard():
"""Test (TF-)MxNE inverse computation."""
# Read noise covariance matrix
cov = read_cov(fname_cov)
# Handling average file
loose = 0.0
depth = 0.9
evoked = read_evokeds(fname_data, condition=0, baseline=(None, 0))
evoked.crop(tmin=-0.05, tmax=0.2)
evoked_l21 = evoked.copy()
evoked_l21.crop(tmin=0.081, tmax=0.1)
label = read_label(fname_label)
assert label.hemi == 'rh'
forward = read_forward_solution(fname_fwd)
forward = convert_forward_solution(forward, surf_ori=True)
# Reduce source space to make test computation faster
inverse_operator = make_inverse_operator(evoked_l21.info, forward, cov,
loose=loose, depth=depth,
fixed=True, use_cps=True)
stc_dspm = apply_inverse(evoked_l21, inverse_operator, lambda2=1. / 9.,
method='dSPM')
stc_dspm.data[np.abs(stc_dspm.data) < 12] = 0.0
stc_dspm.data[np.abs(stc_dspm.data) >= 12] = 1.
weights_min = 0.5
# MxNE tests
alpha = 70 # spatial regularization parameter
stc_prox = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
depth=depth, maxit=300, tol=1e-8,
active_set_size=10, weights=stc_dspm,
weights_min=weights_min, solver='prox')
with pytest.warns(None): # CD
stc_cd = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
depth=depth, maxit=300, tol=1e-8,
active_set_size=10, weights=stc_dspm,
weights_min=weights_min, solver='cd')
stc_bcd = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
depth=depth, maxit=300, tol=1e-8, active_set_size=10,
weights=stc_dspm, weights_min=weights_min,
solver='bcd')
assert_array_almost_equal(stc_prox.times, evoked_l21.times, 5)
assert_array_almost_equal(stc_cd.times, evoked_l21.times, 5)
assert_array_almost_equal(stc_bcd.times, evoked_l21.times, 5)
assert_allclose(stc_prox.data, stc_cd.data, rtol=1e-3, atol=0.0)
assert_allclose(stc_prox.data, stc_bcd.data, rtol=1e-3, atol=0.0)
assert_allclose(stc_cd.data, stc_bcd.data, rtol=1e-3, atol=0.0)
assert stc_prox.vertices[1][0] in label.vertices
assert stc_cd.vertices[1][0] in label.vertices
assert stc_bcd.vertices[1][0] in label.vertices
# vector
with pytest.warns(None): # no convergence
stc = mixed_norm(evoked_l21, forward, cov, alpha, loose=1, maxit=2)
with pytest.warns(None): # no convergence
stc_vec = mixed_norm(evoked_l21, forward, cov, alpha, loose=1, maxit=2,
pick_ori='vector')
assert_stcs_equal(stc_vec.magnitude(), stc)
with pytest.warns(None), pytest.raises(ValueError, match='pick_ori='):
mixed_norm(evoked_l21, forward, cov, alpha, loose=0, maxit=2,
pick_ori='vector')
with pytest.warns(None): # CD
dips = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
depth=depth, maxit=300, tol=1e-8, active_set_size=10,
weights=stc_dspm, weights_min=weights_min,
solver='cd', return_as_dipoles=True)
stc_dip = make_stc_from_dipoles(dips, forward['src'])
assert isinstance(dips[0], Dipole)
assert stc_dip.subject == "sample"
assert_stcs_equal(stc_cd, stc_dip)
with pytest.warns(None): # CD
stc, _ = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
depth=depth, maxit=300, tol=1e-8,
weights=stc_dspm, # gh-6382
active_set_size=10, return_residual=True,
solver='cd')
assert_array_almost_equal(stc.times, evoked_l21.times, 5)
assert stc.vertices[1][0] in label.vertices
# irMxNE tests
with pytest.warns(None): # CD
stc = mixed_norm(evoked_l21, forward, cov, alpha,
n_mxne_iter=5, loose=loose, depth=depth,
maxit=300, tol=1e-8, active_set_size=10,
solver='cd')
assert_array_almost_equal(stc.times, evoked_l21.times, 5)
assert stc.vertices[1][0] in label.vertices
assert stc.vertices == [[63152], [79017]]
# Do with TF-MxNE for test memory savings
alpha = 60. # overall regularization parameter
l1_ratio = 0.01 # temporal regularization proportion
stc, _ = tf_mixed_norm(evoked, forward, cov,
loose=loose, depth=depth, maxit=100, tol=1e-4,
tstep=4, wsize=16, window=0.1, weights=stc_dspm,
weights_min=weights_min, return_residual=True,
alpha=alpha, l1_ratio=l1_ratio)
assert_array_almost_equal(stc.times, evoked.times, 5)
assert stc.vertices[1][0] in label.vertices
# vector
stc_nrm = tf_mixed_norm(
evoked, forward, cov, loose=1, depth=depth, maxit=2, tol=1e-4,
tstep=4, wsize=16, window=0.1, weights=stc_dspm,
weights_min=weights_min, alpha=alpha, l1_ratio=l1_ratio)
stc_vec = tf_mixed_norm(
evoked, forward, cov, loose=1, depth=depth, maxit=2, tol=1e-4,
tstep=4, wsize=16, window=0.1, weights=stc_dspm,
weights_min=weights_min, alpha=alpha, l1_ratio=l1_ratio,
pick_ori='vector')
assert_stcs_equal(stc_vec.magnitude(), stc_nrm)
pytest.raises(ValueError, tf_mixed_norm, evoked, forward, cov,
alpha=101, l1_ratio=0.03)
pytest.raises(ValueError, tf_mixed_norm, evoked, forward, cov,
alpha=50., l1_ratio=1.01)
@pytest.mark.slowtest
@testing.requires_testing_data
def test_mxne_vol_sphere():
"""Test (TF-)MxNE with a sphere forward and volumic source space."""
evoked = read_evokeds(fname_data, condition=0, baseline=(None, 0))
evoked.crop(tmin=-0.05, tmax=0.2)
cov = read_cov(fname_cov)
evoked_l21 = evoked.copy()
evoked_l21.crop(tmin=0.081, tmax=0.1)
info = evoked.info
sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.080)
src = mne.setup_volume_source_space(subject=None, pos=15., mri=None,
sphere=(0.0, 0.0, 0.0, 0.08),
bem=None, mindist=5.0,
exclude=2.0, sphere_units='m')
fwd = mne.make_forward_solution(info, trans=None, src=src,
bem=sphere, eeg=False, meg=True)
alpha = 80.
pytest.raises(ValueError, mixed_norm, evoked, fwd, cov, alpha,
loose=0.0, return_residual=False,
maxit=3, tol=1e-8, active_set_size=10)
pytest.raises(ValueError, mixed_norm, evoked, fwd, cov, alpha,
loose=0.2, return_residual=False,
maxit=3, tol=1e-8, active_set_size=10)
# irMxNE tests
stc = mixed_norm(evoked_l21, fwd, cov, alpha,
n_mxne_iter=1, maxit=30, tol=1e-8,
active_set_size=10)
assert isinstance(stc, VolSourceEstimate)
assert_array_almost_equal(stc.times, evoked_l21.times, 5)
# Compare orientation obtained using fit_dipole and gamma_map
# for a simulated evoked containing a single dipole
stc = mne.VolSourceEstimate(50e-9 * np.random.RandomState(42).randn(1, 4),
vertices=[stc.vertices[0][:1]],
tmin=stc.tmin,
tstep=stc.tstep)
evoked_dip = mne.simulation.simulate_evoked(fwd, stc, info, cov, nave=1e9,
use_cps=True)
dip_mxne = mixed_norm(evoked_dip, fwd, cov, alpha=80,
n_mxne_iter=1, maxit=30, tol=1e-8,
active_set_size=10, return_as_dipoles=True)
amp_max = [np.max(d.amplitude) for d in dip_mxne]
dip_mxne = dip_mxne[np.argmax(amp_max)]
assert dip_mxne.pos[0] in src[0]['rr'][stc.vertices[0]]
dip_fit = mne.fit_dipole(evoked_dip, cov, sphere)[0]
assert np.abs(np.dot(dip_fit.ori[0], dip_mxne.ori[0])) > 0.99
dist = 1000 * np.linalg.norm(dip_fit.pos[0] - dip_mxne.pos[0])
assert dist < 4. # within 4 mm
# Do with TF-MxNE for test memory savings
alpha = 60. # overall regularization parameter
l1_ratio = 0.01 # temporal regularization proportion
stc, _ = tf_mixed_norm(evoked, fwd, cov, maxit=3, tol=1e-4,
tstep=16, wsize=32, window=0.1, alpha=alpha,
l1_ratio=l1_ratio, return_residual=True)
assert isinstance(stc, VolSourceEstimate)
assert_array_almost_equal(stc.times, evoked.times, 5)
run_tests_if_main()
| {
"repo_name": "cjayb/mne-python",
"path": "mne/inverse_sparse/tests/test_mxne_inverse.py",
"copies": "2",
"size": "10276",
"license": "bsd-3-clause",
"hash": -1911820511172548900,
"line_mean": 43.2931034483,
"line_max": 79,
"alpha_frac": 0.5985792137,
"autogenerated": false,
"ratio": 3.111111111111111,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9709690324811111,
"avg_score": 0,
"num_lines": 232
} |
from copy import deepcopy
import os.path as op
import pickle
import numpy as np
from scipy import fftpack
from numpy.testing import (assert_array_almost_equal, assert_equal,
assert_array_equal, assert_allclose)
import pytest
from mne import (equalize_channels, pick_types, read_evokeds, write_evokeds,
combine_evoked, create_info, read_events,
Epochs, EpochsArray)
from mne.evoked import _get_peak, Evoked, EvokedArray
from mne.io import read_raw_fif
from mne.io.constants import FIFF
from mne.utils import requires_pandas, grand_average
base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
fname = op.join(base_dir, 'test-ave.fif')
fname_gz = op.join(base_dir, 'test-ave.fif.gz')
raw_fname = op.join(base_dir, 'test_raw.fif')
event_name = op.join(base_dir, 'test-eve.fif')
def test_decim():
"""Test evoked decimation."""
rng = np.random.RandomState(0)
n_channels, n_times = 10, 20
dec_1, dec_2 = 2, 3
decim = dec_1 * dec_2
sfreq = 10.
sfreq_new = sfreq / decim
data = rng.randn(n_channels, n_times)
info = create_info(n_channels, sfreq, 'eeg')
info['lowpass'] = sfreq_new / float(decim)
evoked = EvokedArray(data, info, tmin=-1)
evoked_dec = evoked.copy().decimate(decim)
evoked_dec_2 = evoked.copy().decimate(decim, offset=1)
evoked_dec_3 = evoked.decimate(dec_1).decimate(dec_2)
assert_array_equal(evoked_dec.data, data[:, ::decim])
assert_array_equal(evoked_dec_2.data, data[:, 1::decim])
assert_array_equal(evoked_dec.data, evoked_dec_3.data)
# Check proper updating of various fields
assert evoked_dec.first == -2
assert evoked_dec.last == 1
assert_array_equal(evoked_dec.times, [-1, -0.4, 0.2, 0.8])
assert evoked_dec_2.first == -2
assert evoked_dec_2.last == 1
assert_array_equal(evoked_dec_2.times, [-0.9, -0.3, 0.3, 0.9])
assert evoked_dec_3.first == -2
assert evoked_dec_3.last == 1
assert_array_equal(evoked_dec_3.times, [-1, -0.4, 0.2, 0.8])
# make sure the time nearest zero is also sample number 0.
for ev in (evoked_dec, evoked_dec_2, evoked_dec_3):
lowest_index = np.argmin(np.abs(np.arange(ev.first, ev.last)))
idxs_of_times_nearest_zero = \
np.where(np.abs(ev.times) == np.min(np.abs(ev.times)))[0]
# we use `in` here in case two times are equidistant from 0.
assert lowest_index in idxs_of_times_nearest_zero
assert len(idxs_of_times_nearest_zero) in (1, 2)
# Now let's do it with some real data
raw = read_raw_fif(raw_fname)
events = read_events(event_name)
sfreq_new = raw.info['sfreq'] / decim
raw.info['lowpass'] = sfreq_new / 4. # suppress aliasing warnings
picks = pick_types(raw.info, meg=True, eeg=True, exclude=())
epochs = Epochs(raw, events, 1, -0.2, 0.5, picks=picks, preload=True)
for offset in (0, 1):
ev_ep_decim = epochs.copy().decimate(decim, offset).average()
ev_decim = epochs.average().decimate(decim, offset)
expected_times = epochs.times[offset::decim]
assert_allclose(ev_decim.times, expected_times)
assert_allclose(ev_ep_decim.times, expected_times)
expected_data = epochs.get_data()[:, :, offset::decim].mean(axis=0)
assert_allclose(ev_decim.data, expected_data)
assert_allclose(ev_ep_decim.data, expected_data)
assert_equal(ev_decim.info['sfreq'], sfreq_new)
assert_array_equal(ev_decim.times, expected_times)
def test_savgol_filter():
"""Test savgol filtering."""
h_freq = 10.
evoked = read_evokeds(fname, 0)
freqs = fftpack.fftfreq(len(evoked.times), 1. / evoked.info['sfreq'])
data = np.abs(fftpack.fft(evoked.data))
match_mask = np.logical_and(freqs >= 0, freqs <= h_freq / 2.)
mismatch_mask = np.logical_and(freqs >= h_freq * 2, freqs < 50.)
pytest.raises(ValueError, evoked.savgol_filter, evoked.info['sfreq'])
evoked_sg = evoked.copy().savgol_filter(h_freq)
data_filt = np.abs(fftpack.fft(evoked_sg.data))
# decent in pass-band
assert_allclose(np.mean(data[:, match_mask], 0),
np.mean(data_filt[:, match_mask], 0),
rtol=1e-4, atol=1e-2)
# suppression in stop-band
assert (np.mean(data[:, mismatch_mask]) >
np.mean(data_filt[:, mismatch_mask]) * 5)
# original preserved
assert_allclose(data, np.abs(fftpack.fft(evoked.data)), atol=1e-16)
def test_hash_evoked():
"""Test evoked hashing."""
ave = read_evokeds(fname, 0)
ave_2 = read_evokeds(fname, 0)
assert hash(ave) == hash(ave_2)
assert ave == ave_2
# do NOT use assert_equal here, failing output is terrible
assert pickle.dumps(ave) == pickle.dumps(ave_2)
ave_2.data[0, 0] -= 1
assert hash(ave) != hash(ave_2)
def _aspect_kinds():
"""Yield evoked aspect kinds."""
kinds = list()
for key in FIFF:
if not key.startswith('FIFFV_ASPECT_'):
continue
kinds.append(getattr(FIFF, str(key)))
return kinds
@pytest.mark.parametrize('aspect_kind', _aspect_kinds())
def test_evoked_aspects(aspect_kind, tmpdir):
"""Test handling of evoked aspects."""
# gh-6359
ave = read_evokeds(fname, 0)
ave._aspect_kind = aspect_kind
assert 'Evoked' in repr(ave)
# for completeness let's try a round-trip
temp_fname = op.join(str(tmpdir), 'test-ave.fif')
ave.save(temp_fname)
ave_2 = read_evokeds(temp_fname, condition=0)
assert_allclose(ave.data, ave_2.data)
assert ave.kind == ave_2.kind
@pytest.mark.slowtest
def test_io_evoked(tmpdir):
"""Test IO for evoked data (fif + gz) with integer and str args."""
ave = read_evokeds(fname, 0)
ave_double = ave.copy()
ave_double.comment = ave.comment + ' doubled nave'
ave_double.nave = ave.nave * 2
write_evokeds(tmpdir.join('evoked-ave.fif'), [ave, ave_double])
ave2, ave_double = read_evokeds(op.join(tmpdir, 'evoked-ave.fif'))
assert ave2.nave * 2 == ave_double.nave
# This not being assert_array_equal due to windows rounding
assert (np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-3))
assert_array_almost_equal(ave.times, ave2.times)
assert_equal(ave.nave, ave2.nave)
assert_equal(ave._aspect_kind, ave2._aspect_kind)
assert_equal(ave.kind, ave2.kind)
assert_equal(ave.last, ave2.last)
assert_equal(ave.first, ave2.first)
assert (repr(ave))
# test compressed i/o
ave2 = read_evokeds(fname_gz, 0)
assert (np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-8))
# test str access
condition = 'Left Auditory'
pytest.raises(ValueError, read_evokeds, fname, condition, kind='stderr')
pytest.raises(ValueError, read_evokeds, fname, condition,
kind='standard_error')
ave3 = read_evokeds(fname, condition)
assert_array_almost_equal(ave.data, ave3.data, 19)
# test read_evokeds and write_evokeds
aves1 = read_evokeds(fname)[1::2]
aves2 = read_evokeds(fname, [1, 3])
aves3 = read_evokeds(fname, ['Right Auditory', 'Right visual'])
write_evokeds(tmpdir.join('evoked-ave.fif'), aves1)
aves4 = read_evokeds(tmpdir.join('evoked-ave.fif'))
for aves in [aves2, aves3, aves4]:
for [av1, av2] in zip(aves1, aves):
assert_array_almost_equal(av1.data, av2.data)
assert_array_almost_equal(av1.times, av2.times)
assert_equal(av1.nave, av2.nave)
assert_equal(av1.kind, av2.kind)
assert_equal(av1._aspect_kind, av2._aspect_kind)
assert_equal(av1.last, av2.last)
assert_equal(av1.first, av2.first)
assert_equal(av1.comment, av2.comment)
# test saving and reading complex numbers in evokeds
ave_complex = ave.copy()
ave_complex._data = 1j * ave_complex.data
fname_temp = str(tmpdir.join('complex-ave.fif'))
ave_complex.save(fname_temp)
ave_complex = read_evokeds(fname_temp)[0]
assert_allclose(ave.data, ave_complex.data.imag)
# test warnings on bad filenames
fname2 = tmpdir.join('test-bad-name.fif')
with pytest.warns(RuntimeWarning, match='-ave.fif'):
write_evokeds(fname2, ave)
with pytest.warns(RuntimeWarning, match='-ave.fif'):
read_evokeds(fname2)
# test writing when order of bads doesn't match
fname3 = tmpdir.join('test-bad-order-ave.fif')
condition = 'Left Auditory'
ave4 = read_evokeds(fname, condition)
ave4.info['bads'] = ave4.ch_names[:3]
ave5 = ave4.copy()
ave5.info['bads'] = ave4.info['bads'][::-1]
write_evokeds(fname3, [ave4, ave5])
# constructor
pytest.raises(TypeError, Evoked, fname)
# MaxShield
fname_ms = tmpdir.join('test-ave.fif')
assert (ave.info['maxshield'] is False)
ave.info['maxshield'] = True
ave.save(fname_ms)
pytest.raises(ValueError, read_evokeds, fname_ms)
with pytest.warns(RuntimeWarning, match='Elekta'):
aves = read_evokeds(fname_ms, allow_maxshield=True)
assert all(ave.info['maxshield'] is True for ave in aves)
aves = read_evokeds(fname_ms, allow_maxshield='yes')
assert (all(ave.info['maxshield'] is True for ave in aves))
def test_shift_time_evoked(tmpdir):
"""Test for shifting of time scale."""
tempdir = str(tmpdir)
# Shift backward
ave = read_evokeds(fname, 0).shift_time(-0.1, relative=True)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
# Shift forward twice the amount
ave_bshift = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
ave_bshift.shift_time(0.2, relative=True)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave_bshift)
# Shift backward again
ave_fshift = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
ave_fshift.shift_time(-0.1, relative=True)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave_fshift)
ave_normal = read_evokeds(fname, 0)
ave_relative = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
assert_allclose(ave_normal.data, ave_relative.data, atol=1e-16, rtol=1e-3)
assert_array_almost_equal(ave_normal.times, ave_relative.times, 8)
assert_equal(ave_normal.last, ave_relative.last)
assert_equal(ave_normal.first, ave_relative.first)
# Absolute time shift
ave = read_evokeds(fname, 0)
ave.shift_time(-0.3, relative=False)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
ave_absolute = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
assert_allclose(ave_normal.data, ave_absolute.data, atol=1e-16, rtol=1e-3)
assert_equal(ave_absolute.first, int(-0.3 * ave.info['sfreq']))
# subsample shift
shift = 1e-6 # 1 µs, should be well below 1/sfreq
ave = read_evokeds(fname, 0)
times = ave.times
ave.shift_time(shift)
assert_allclose(times + shift, ave.times, atol=1e-16, rtol=1e-12)
# test handling of Evoked.first, Evoked.last
ave = read_evokeds(fname, 0)
first_last = np.array([ave.first, ave.last])
# should shift by 0 samples
ave.shift_time(1e-6)
assert_array_equal(first_last, np.array([ave.first, ave.last]))
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
ave_loaded = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
assert_array_almost_equal(ave.times, ave_loaded.times, 8)
# should shift by 57 samples
ave.shift_time(57. / ave.info['sfreq'])
assert_array_equal(first_last + 57, np.array([ave.first, ave.last]))
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
ave_loaded = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
assert_array_almost_equal(ave.times, ave_loaded.times, 8)
def test_tmin_tmax():
"""Test that the tmin and tmax attributes return the correct time."""
evoked = read_evokeds(fname, 0)
assert evoked.times[0] == evoked.tmin
assert evoked.times[-1] == evoked.tmax
def test_evoked_resample(tmpdir):
"""Test resampling evoked data."""
tempdir = str(tmpdir)
# upsample, write it out, read it in
ave = read_evokeds(fname, 0)
orig_lp = ave.info['lowpass']
sfreq_normal = ave.info['sfreq']
ave.resample(2 * sfreq_normal, npad=100)
assert ave.info['lowpass'] == orig_lp
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
ave_up = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
# compare it to the original
ave_normal = read_evokeds(fname, 0)
# and compare the original to the downsampled upsampled version
ave_new = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
ave_new.resample(sfreq_normal, npad=100)
assert ave.info['lowpass'] == orig_lp
assert_array_almost_equal(ave_normal.data, ave_new.data, 2)
assert_array_almost_equal(ave_normal.times, ave_new.times)
assert_equal(ave_normal.nave, ave_new.nave)
assert_equal(ave_normal._aspect_kind, ave_new._aspect_kind)
assert_equal(ave_normal.kind, ave_new.kind)
assert_equal(ave_normal.last, ave_new.last)
assert_equal(ave_normal.first, ave_new.first)
# for the above to work, the upsampling just about had to, but
# we'll add a couple extra checks anyway
assert (len(ave_up.times) == 2 * len(ave_normal.times))
assert (ave_up.data.shape[1] == 2 * ave_normal.data.shape[1])
ave_new.resample(50)
assert ave_new.info['sfreq'] == 50.
assert ave_new.info['lowpass'] == 25.
def test_evoked_filter():
"""Test filtering evoked data."""
# this is mostly a smoke test as the Epochs and raw tests are more complete
ave = read_evokeds(fname, 0).pick_types(meg='grad')
ave.data[:] = 1.
assert round(ave.info['lowpass']) == 172
ave_filt = ave.copy().filter(None, 40., fir_design='firwin')
assert ave_filt.info['lowpass'] == 40.
assert_allclose(ave.data, 1., atol=1e-6)
def test_evoked_detrend():
"""Test for detrending evoked data."""
ave = read_evokeds(fname, 0)
ave_normal = read_evokeds(fname, 0)
ave.detrend(0)
ave_normal.data -= np.mean(ave_normal.data, axis=1)[:, np.newaxis]
picks = pick_types(ave.info, meg=True, eeg=True, exclude='bads')
assert_allclose(ave.data[picks], ave_normal.data[picks],
rtol=1e-8, atol=1e-16)
@requires_pandas
def test_to_data_frame():
"""Test evoked Pandas exporter."""
ave = read_evokeds(fname, 0)
# test index checking
with pytest.raises(ValueError, match='options. Valid index options are'):
ave.to_data_frame(index=['foo', 'bar'])
with pytest.raises(ValueError, match='"qux" is not a valid option'):
ave.to_data_frame(index='qux')
with pytest.raises(TypeError, match='index must be `None` or a string or'):
ave.to_data_frame(index=np.arange(400))
# test setting index
df = ave.to_data_frame(index='time')
assert 'time' not in df.columns
assert 'time' in df.index.names
# test wide and long formats
df_wide = ave.to_data_frame()
assert all(np.in1d(ave.ch_names, df_wide.columns))
df_long = ave.to_data_frame(long_format=True)
expected = ('time', 'channel', 'ch_type', 'value')
assert set(expected) == set(df_long.columns)
assert set(ave.ch_names) == set(df_long['channel'])
assert(len(df_long) == ave.data.size)
del df_wide, df_long
# test scalings
df = ave.to_data_frame(index='time')
assert ((df.columns == ave.ch_names).all())
assert_array_equal(df.values[:, 0], ave.data[0] * 1e13)
assert_array_equal(df.values[:, 2], ave.data[2] * 1e15)
@requires_pandas
@pytest.mark.parametrize('time_format', (None, 'ms', 'timedelta'))
def test_to_data_frame_time_format(time_format):
"""Test time conversion in evoked Pandas exporter."""
from pandas import Timedelta
ave = read_evokeds(fname, 0)
# test time_format
df = ave.to_data_frame(time_format=time_format)
dtypes = {None: np.float64, 'ms': np.int64, 'timedelta': Timedelta}
assert isinstance(df['time'].iloc[0], dtypes[time_format])
def test_evoked_proj():
"""Test SSP proj operations."""
for proj in [True, False]:
ave = read_evokeds(fname, condition=0, proj=proj)
assert (all(p['active'] == proj for p in ave.info['projs']))
# test adding / deleting proj
if proj:
pytest.raises(ValueError, ave.add_proj, [],
{'remove_existing': True})
pytest.raises(ValueError, ave.del_proj, 0)
else:
projs = deepcopy(ave.info['projs'])
n_proj = len(ave.info['projs'])
ave.del_proj(0)
assert (len(ave.info['projs']) == n_proj - 1)
# Test that already existing projections are not added.
ave.add_proj(projs, remove_existing=False)
assert (len(ave.info['projs']) == n_proj)
ave.add_proj(projs[:-1], remove_existing=True)
assert (len(ave.info['projs']) == n_proj - 1)
ave = read_evokeds(fname, condition=0, proj=False)
data = ave.data.copy()
ave.apply_proj()
assert_allclose(np.dot(ave._projector, data), ave.data)
def test_get_peak():
"""Test peak getter."""
evoked = read_evokeds(fname, condition=0, proj=True)
pytest.raises(ValueError, evoked.get_peak, ch_type='mag', tmin=1)
pytest.raises(ValueError, evoked.get_peak, ch_type='mag', tmax=0.9)
pytest.raises(ValueError, evoked.get_peak, ch_type='mag', tmin=0.02,
tmax=0.01)
pytest.raises(ValueError, evoked.get_peak, ch_type='mag', mode='foo')
pytest.raises(RuntimeError, evoked.get_peak, ch_type=None, mode='foo')
pytest.raises(ValueError, evoked.get_peak, ch_type='misc', mode='foo')
ch_name, time_idx = evoked.get_peak(ch_type='mag')
assert (ch_name in evoked.ch_names)
assert (time_idx in evoked.times)
ch_name, time_idx, max_amp = evoked.get_peak(ch_type='mag',
time_as_index=True,
return_amplitude=True)
assert (time_idx < len(evoked.times))
assert_equal(ch_name, 'MEG 1421')
assert_allclose(max_amp, 7.17057e-13, rtol=1e-5)
pytest.raises(ValueError, evoked.get_peak, ch_type='mag',
merge_grads=True)
ch_name, time_idx = evoked.get_peak(ch_type='grad', merge_grads=True)
assert_equal(ch_name, 'MEG 244X')
data = np.array([[0., 1., 2.],
[0., -3., 0]])
times = np.array([.1, .2, .3])
ch_idx, time_idx, max_amp = _get_peak(data, times, mode='abs')
assert_equal(ch_idx, 1)
assert_equal(time_idx, 1)
assert_allclose(max_amp, -3.)
ch_idx, time_idx, max_amp = _get_peak(data * -1, times, mode='neg')
assert_equal(ch_idx, 0)
assert_equal(time_idx, 2)
assert_allclose(max_amp, -2.)
ch_idx, time_idx, max_amp = _get_peak(data, times, mode='pos')
assert_equal(ch_idx, 0)
assert_equal(time_idx, 2)
assert_allclose(max_amp, 2.)
pytest.raises(ValueError, _get_peak, data + 1e3, times, mode='neg')
pytest.raises(ValueError, _get_peak, data - 1e3, times, mode='pos')
def test_drop_channels_mixin():
"""Test channels-dropping functionality."""
evoked = read_evokeds(fname, condition=0, proj=True)
drop_ch = evoked.ch_names[:3]
ch_names = evoked.ch_names[3:]
ch_names_orig = evoked.ch_names
dummy = evoked.copy().drop_channels(drop_ch)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, evoked.ch_names)
assert_equal(len(ch_names_orig), len(evoked.data))
dummy2 = evoked.copy().drop_channels([drop_ch[0]])
assert_equal(dummy2.ch_names, ch_names_orig[1:])
evoked.drop_channels(drop_ch)
assert_equal(ch_names, evoked.ch_names)
assert_equal(len(ch_names), len(evoked.data))
for ch_names in ([1, 2], "fake", ["fake"]):
pytest.raises(ValueError, evoked.drop_channels, ch_names)
def test_pick_channels_mixin():
"""Test channel-picking functionality."""
evoked = read_evokeds(fname, condition=0, proj=True)
ch_names = evoked.ch_names[:3]
ch_names_orig = evoked.ch_names
dummy = evoked.copy().pick_channels(ch_names)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, evoked.ch_names)
assert_equal(len(ch_names_orig), len(evoked.data))
evoked.pick_channels(ch_names)
assert_equal(ch_names, evoked.ch_names)
assert_equal(len(ch_names), len(evoked.data))
evoked = read_evokeds(fname, condition=0, proj=True)
assert ('meg' in evoked)
assert ('eeg' in evoked)
evoked.pick_types(meg=False, eeg=True)
assert ('meg' not in evoked)
assert ('eeg' in evoked)
assert (len(evoked.ch_names) == 60)
def test_equalize_channels():
"""Test equalization of channels."""
evoked1 = read_evokeds(fname, condition=0, proj=True)
evoked2 = evoked1.copy()
ch_names = evoked1.ch_names[2:]
evoked1.drop_channels(evoked1.ch_names[:1])
evoked2.drop_channels(evoked2.ch_names[1:2])
my_comparison = [evoked1, evoked2]
my_comparison = equalize_channels(my_comparison)
for e in my_comparison:
assert_equal(ch_names, e.ch_names)
def test_arithmetic():
"""Test evoked arithmetic."""
ev = read_evokeds(fname, condition=0)
ev20 = EvokedArray(np.ones_like(ev.data), ev.info, ev.times[0], nave=20)
ev30 = EvokedArray(np.ones_like(ev.data), ev.info, ev.times[0], nave=30)
tol = dict(rtol=1e-9, atol=0)
# test subtraction
sub1 = combine_evoked([ev, ev], weights=[1, -1])
sub2 = combine_evoked([ev, -ev], weights=[1, 1])
assert np.allclose(sub1.data, np.zeros_like(sub1.data), atol=1e-20)
assert np.allclose(sub2.data, np.zeros_like(sub2.data), atol=1e-20)
# test nave weighting. Expect signal ampl.: 1*(20/50) + 1*(30/50) == 1
# and expect nave == ev1.nave + ev2.nave
ev = combine_evoked([ev20, ev30], weights='nave')
assert np.allclose(ev.nave, ev20.nave + ev30.nave)
assert np.allclose(ev.data, np.ones_like(ev.data), **tol)
# test equal-weighted sum. Expect signal ampl. == 2
# and expect nave == 1/sum(1/naves) == 1/(1/20 + 1/30) == 12
ev = combine_evoked([ev20, ev30], weights=[1, 1])
assert np.allclose(ev.nave, 12.)
assert np.allclose(ev.data, ev20.data + ev30.data, **tol)
# test equal-weighted average. Expect signal ampl. == 1
# and expect nave == 1/sum(weights²/naves) == 1/(0.5²/20 + 0.5²/30) == 48
ev = combine_evoked([ev20, ev30], weights='equal')
assert np.allclose(ev.nave, 48.)
assert np.allclose(ev.data, np.mean([ev20.data, ev30.data], axis=0), **tol)
# test zero weights
ev = combine_evoked([ev20, ev30], weights=[1, 0])
assert ev.nave == ev20.nave
assert np.allclose(ev.data, ev20.data, **tol)
# default comment behavior if evoked.comment is None
old_comment1 = ev20.comment
ev20.comment = None
ev = combine_evoked([ev20, -ev30], weights=[1, -1])
assert_equal(ev.comment.count('unknown'), 2)
assert ev.comment == 'unknown + unknown'
ev20.comment = old_comment1
with pytest.raises(ValueError, match="Invalid value for the 'weights'"):
combine_evoked([ev20, ev30], weights='foo')
with pytest.raises(ValueError, match='weights must be the same size as'):
combine_evoked([ev20, ev30], weights=[1])
# grand average
evoked1, evoked2 = read_evokeds(fname, condition=[0, 1], proj=True)
ch_names = evoked1.ch_names[2:]
evoked1.info['bads'] = ['EEG 008'] # test interpolation
evoked1.drop_channels(evoked1.ch_names[:1])
evoked2.drop_channels(evoked2.ch_names[1:2])
gave = grand_average([evoked1, evoked2])
assert_equal(gave.data.shape, [len(ch_names), evoked1.data.shape[1]])
assert_equal(ch_names, gave.ch_names)
assert_equal(gave.nave, 2)
with pytest.raises(TypeError, match='All elements must be an instance of'):
grand_average([1, evoked1])
gave = grand_average([ev20, ev20, -ev30]) # (1 + 1 + -1) / 3 = 1/3
assert_allclose(gave.data, np.full_like(gave.data, 1. / 3.))
# test channel (re)ordering
evoked1, evoked2 = read_evokeds(fname, condition=[0, 1], proj=True)
data2 = evoked2.data # assumes everything is ordered to the first evoked
data = (evoked1.data + evoked2.data) / 2.
evoked2.reorder_channels(evoked2.ch_names[::-1])
assert not np.allclose(data2, evoked2.data)
with pytest.warns(RuntimeWarning, match='reordering'):
evoked3 = combine_evoked([evoked1, evoked2], weights=[0.5, 0.5])
assert np.allclose(evoked3.data, data)
assert evoked1.ch_names != evoked2.ch_names
assert evoked1.ch_names == evoked3.ch_names
def test_array_epochs(tmpdir):
"""Test creating evoked from array."""
tempdir = str(tmpdir)
# creating
rng = np.random.RandomState(42)
data1 = rng.randn(20, 60)
sfreq = 1e3
ch_names = ['EEG %03d' % (i + 1) for i in range(20)]
types = ['eeg'] * 20
info = create_info(ch_names, sfreq, types)
evoked1 = EvokedArray(data1, info, tmin=-0.01)
# save, read, and compare evokeds
tmp_fname = op.join(tempdir, 'evkdary-ave.fif')
evoked1.save(tmp_fname)
evoked2 = read_evokeds(tmp_fname)[0]
data2 = evoked2.data
assert_allclose(data1, data2)
assert_array_almost_equal(evoked1.times, evoked2.times, 8)
assert_equal(evoked1.first, evoked2.first)
assert_equal(evoked1.last, evoked2.last)
assert_equal(evoked1.kind, evoked2.kind)
assert_equal(evoked1.nave, evoked2.nave)
# now compare with EpochsArray (with single epoch)
data3 = data1[np.newaxis, :, :]
events = np.c_[10, 0, 1]
evoked3 = EpochsArray(data3, info, events=events, tmin=-0.01).average()
assert_allclose(evoked1.data, evoked3.data)
assert_allclose(evoked1.times, evoked3.times)
assert_equal(evoked1.first, evoked3.first)
assert_equal(evoked1.last, evoked3.last)
assert_equal(evoked1.kind, evoked3.kind)
assert_equal(evoked1.nave, evoked3.nave)
# test kind check
with pytest.raises(ValueError, match='Invalid value'):
EvokedArray(data1, info, tmin=0, kind=1)
with pytest.raises(ValueError, match='Invalid value'):
EvokedArray(data1, info, kind='mean')
# test match between channels info and data
ch_names = ['EEG %03d' % (i + 1) for i in range(19)]
types = ['eeg'] * 19
info = create_info(ch_names, sfreq, types)
pytest.raises(ValueError, EvokedArray, data1, info, tmin=-0.01)
def test_time_as_index_and_crop():
"""Test time as index and cropping."""
tmin, tmax = -0.1, 0.1
evoked = read_evokeds(fname, condition=0).crop(tmin, tmax)
delta = 1. / evoked.info['sfreq']
atol = 0.5 * delta
assert_allclose(evoked.times[[0, -1]], [tmin, tmax], atol=atol)
assert_array_equal(evoked.time_as_index([-.1, .1], use_rounding=True),
[0, len(evoked.times) - 1])
evoked.crop(evoked.tmin, evoked.tmax, include_tmax=False)
n_times = len(evoked.times)
with pytest.warns(RuntimeWarning, match='tmax is set to'):
evoked.crop(tmin, tmax, include_tmax=False)
assert len(evoked.times) == n_times
assert_allclose(evoked.times[[0, -1]], [tmin, tmax - delta], atol=atol)
def test_add_channels():
"""Test evoked splitting / re-appending channel types."""
evoked = read_evokeds(fname, condition=0)
hpi_coils = [{'event_bits': []},
{'event_bits': np.array([256, 0, 256, 256])},
{'event_bits': np.array([512, 0, 512, 512])}]
evoked.info['hpi_subsystem'] = dict(hpi_coils=hpi_coils, ncoil=2)
evoked_eeg = evoked.copy().pick_types(meg=False, eeg=True)
evoked_meg = evoked.copy().pick_types(meg=True)
evoked_stim = evoked.copy().pick_types(meg=False, stim=True)
evoked_eeg_meg = evoked.copy().pick_types(meg=True, eeg=True)
evoked_new = evoked_meg.copy().add_channels([evoked_eeg, evoked_stim])
assert (all(ch in evoked_new.ch_names
for ch in evoked_stim.ch_names + evoked_meg.ch_names))
evoked_new = evoked_meg.copy().add_channels([evoked_eeg])
assert (ch in evoked_new.ch_names for ch in evoked.ch_names)
assert_array_equal(evoked_new.data, evoked_eeg_meg.data)
assert (all(ch not in evoked_new.ch_names
for ch in evoked_stim.ch_names))
# Now test errors
evoked_badsf = evoked_eeg.copy()
evoked_badsf.info['sfreq'] = 3.1415927
evoked_eeg = evoked_eeg.crop(-.1, .1)
pytest.raises(RuntimeError, evoked_meg.add_channels, [evoked_badsf])
pytest.raises(AssertionError, evoked_meg.add_channels, [evoked_eeg])
pytest.raises(ValueError, evoked_meg.add_channels, [evoked_meg])
pytest.raises(TypeError, evoked_meg.add_channels, evoked_badsf)
def test_evoked_baseline(tmpdir):
"""Test evoked baseline."""
evoked = read_evokeds(fname, condition=0, baseline=None)
# Here we create a data_set with constant data.
evoked = EvokedArray(np.ones_like(evoked.data), evoked.info,
evoked.times[0])
assert evoked.baseline is None
evoked_baselined = EvokedArray(np.ones_like(evoked.data), evoked.info,
evoked.times[0], baseline=(None, 0))
assert_allclose(evoked_baselined.baseline, (evoked_baselined.tmin, 0))
del evoked_baselined
# Mean baseline correction is applied, since the data is equal to its mean
# the resulting data should be a matrix of zeroes.
baseline = (None, None)
evoked.apply_baseline(baseline)
assert_allclose(evoked.baseline, (evoked.tmin, evoked.tmax))
assert_allclose(evoked.data, np.zeros_like(evoked.data))
# Test that the .baseline attribute changes if we apply a different
# baseline now.
baseline = (None, 0)
evoked.apply_baseline(baseline)
assert_allclose(evoked.baseline, (evoked.tmin, 0))
# By default for our test file, no baseline should be set upon reading
evoked = read_evokeds(fname, condition=0)
assert evoked.baseline is None
# Test that the .baseline attribute is set when we call read_evokeds()
# with a `baseline` parameter.
baseline = (-0.2, -0.1)
evoked = read_evokeds(fname, condition=0, baseline=baseline)
assert_allclose(evoked.baseline, baseline)
# Test that the .baseline attribute survives an I/O roundtrip.
evoked = read_evokeds(fname, condition=0)
baseline = (-0.2, -0.1)
evoked.apply_baseline(baseline)
assert_allclose(evoked.baseline, baseline)
tmp_fname = tmpdir / 'test-ave.fif'
evoked.save(tmp_fname)
evoked_read = read_evokeds(tmp_fname, condition=0)
assert_allclose(evoked_read.baseline, evoked.baseline)
# We shouldn't be able to remove a baseline correction after it has been
# applied.
evoked = read_evokeds(fname, condition=0)
baseline = (-0.2, -0.1)
evoked.apply_baseline(baseline)
with pytest.raises(ValueError, match='already been baseline-corrected'):
evoked.apply_baseline(None)
def test_hilbert():
"""Test hilbert on raw, epochs, and evoked."""
raw = read_raw_fif(raw_fname).load_data()
raw.del_proj()
raw.pick_channels(raw.ch_names[:2])
events = read_events(event_name)
epochs = Epochs(raw, events)
with pytest.raises(RuntimeError, match='requires epochs data to be load'):
epochs.apply_hilbert()
epochs.load_data()
evoked = epochs.average()
raw_hilb = raw.apply_hilbert()
epochs_hilb = epochs.apply_hilbert()
evoked_hilb = evoked.copy().apply_hilbert()
evoked_hilb_2_data = epochs_hilb.get_data().mean(0)
assert_allclose(evoked_hilb.data, evoked_hilb_2_data)
# This one is only approximate because of edge artifacts
evoked_hilb_3 = Epochs(raw_hilb, events).average()
corr = np.corrcoef(np.abs(evoked_hilb_3.data.ravel()),
np.abs(evoked_hilb.data.ravel()))[0, 1]
assert 0.96 < corr < 0.98
# envelope=True mode
evoked_hilb_env = evoked.apply_hilbert(envelope=True)
assert_allclose(evoked_hilb_env.data, np.abs(evoked_hilb.data))
def test_apply_function_evk():
"""Check the apply_function method for evoked data."""
# create fake evoked data to use for checking apply_function
data = np.random.rand(10, 1000)
info = create_info(10, 1000., 'eeg')
evoked = EvokedArray(data, info)
evoked_data = evoked.data.copy()
# check apply_function channel-wise
def fun(data, multiplier):
return data * multiplier
mult = -1
applied = evoked.apply_function(fun, n_jobs=1, multiplier=mult)
assert np.shape(applied.data) == np.shape(evoked_data)
assert np.equal(applied.data, evoked_data * mult).all()
| {
"repo_name": "drammock/mne-python",
"path": "mne/tests/test_evoked.py",
"copies": "4",
"size": "32784",
"license": "bsd-3-clause",
"hash": 9215672245024772000,
"line_mean": 38.9756097561,
"line_max": 79,
"alpha_frac": 0.6488102502,
"autogenerated": false,
"ratio": 2.941757156959526,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5590567407159527,
"avg_score": null,
"num_lines": null
} |
import os.path as op
import itertools as itt
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal, assert_allclose)
import pytest
import numpy as np
from scipy import linalg
from mne.cov import (regularize, whiten_evoked,
_auto_low_rank_model,
prepare_noise_cov, compute_whitener,
_regularized_covariance)
from mne import (read_cov, write_cov, Epochs, merge_events,
find_events, compute_raw_covariance,
compute_covariance, read_evokeds, compute_proj_raw,
pick_channels_cov, pick_types, make_ad_hoc_cov,
make_fixed_length_events, create_info, compute_rank)
from mne.channels import equalize_channels
from mne.datasets import testing
from mne.fixes import _get_args
from mne.io import read_raw_fif, RawArray, read_raw_ctf, read_info
from mne.io.pick import _DATA_CH_TYPES_SPLIT, pick_info
from mne.preprocessing import maxwell_filter
from mne.rank import _compute_rank_int
from mne.utils import requires_sklearn, catch_logging, assert_snr
base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
cov_fname = op.join(base_dir, 'test-cov.fif')
cov_gz_fname = op.join(base_dir, 'test-cov.fif.gz')
cov_km_fname = op.join(base_dir, 'test-km-cov.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
ave_fname = op.join(base_dir, 'test-ave.fif')
erm_cov_fname = op.join(base_dir, 'test_erm-cov.fif')
hp_fif_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')
ctf_fname = op.join(testing.data_path(download=False), 'CTF',
'testdata_ctf.ds')
@pytest.mark.parametrize('proj', (True, False))
@pytest.mark.parametrize('pca', (True, 'white', False))
def test_compute_whitener(proj, pca):
"""Test properties of compute_whitener."""
raw = read_raw_fif(raw_fname).crop(0, 3).load_data()
raw.pick_types(meg=True, eeg=True, exclude=())
if proj:
raw.apply_proj()
else:
raw.del_proj()
with pytest.warns(RuntimeWarning, match='Too few samples'):
cov = compute_raw_covariance(raw)
W, _, C = compute_whitener(cov, raw.info, pca=pca, return_colorer=True,
verbose='error')
n_channels = len(raw.ch_names)
n_reduced = len(raw.ch_names)
rank = n_channels - len(raw.info['projs'])
n_reduced = rank if pca is True else n_channels
assert W.shape == C.shape[::-1] == (n_reduced, n_channels)
# round-trip mults
round_trip = np.dot(W, C)
if pca is True:
assert_allclose(round_trip, np.eye(n_reduced), atol=1e-7)
elif pca == 'white':
# Our first few rows/cols are zeroed out in the white space
assert_allclose(round_trip[-rank:, -rank:],
np.eye(rank), atol=1e-7)
else:
assert pca is False
assert_allclose(round_trip, np.eye(n_channels), atol=0.05)
def test_cov_mismatch():
"""Test estimation with MEG<->Head mismatch."""
raw = read_raw_fif(raw_fname).crop(0, 5).load_data()
events = find_events(raw, stim_channel='STI 014')
raw.pick_channels(raw.ch_names[:5])
raw.add_proj([], remove_existing=True)
epochs = Epochs(raw, events, None, tmin=-0.2, tmax=0., preload=True)
for kind in ('shift', 'None'):
epochs_2 = epochs.copy()
# This should be fine
compute_covariance([epochs, epochs_2])
if kind == 'shift':
epochs_2.info['dev_head_t']['trans'][:3, 3] += 0.001
else: # None
epochs_2.info['dev_head_t'] = None
pytest.raises(ValueError, compute_covariance, [epochs, epochs_2])
compute_covariance([epochs, epochs_2], on_mismatch='ignore')
with pytest.raises(RuntimeWarning, match='transform mismatch'):
compute_covariance([epochs, epochs_2], on_mismatch='warn')
with pytest.raises(ValueError, match='Invalid value'):
compute_covariance(epochs, on_mismatch='x')
# This should work
epochs.info['dev_head_t'] = None
epochs_2.info['dev_head_t'] = None
compute_covariance([epochs, epochs_2], method=None)
def test_cov_order():
"""Test covariance ordering."""
raw = read_raw_fif(raw_fname)
raw.set_eeg_reference(projection=True)
info = raw.info
# add MEG channel with low enough index number to affect EEG if
# order is incorrect
info['bads'] += ['MEG 0113']
ch_names = [info['ch_names'][pick]
for pick in pick_types(info, meg=False, eeg=True)]
cov = read_cov(cov_fname)
# no avg ref present warning
prepare_noise_cov(cov, info, ch_names, verbose='error')
# big reordering
cov_reorder = cov.copy()
order = np.random.RandomState(0).permutation(np.arange(len(cov.ch_names)))
cov_reorder['names'] = [cov['names'][ii] for ii in order]
cov_reorder['data'] = cov['data'][order][:, order]
# Make sure we did this properly
_assert_reorder(cov_reorder, cov, order)
# Now check some functions that should get the same result for both
# regularize
with pytest.raises(ValueError, match='rank, if str'):
regularize(cov, info, rank='foo')
with pytest.raises(TypeError, match='rank must be'):
regularize(cov, info, rank=False)
with pytest.raises(TypeError, match='rank must be'):
regularize(cov, info, rank=1.)
cov_reg = regularize(cov, info, rank='full')
cov_reg_reorder = regularize(cov_reorder, info, rank='full')
_assert_reorder(cov_reg_reorder, cov_reg, order)
# prepare_noise_cov
cov_prep = prepare_noise_cov(cov, info, ch_names)
cov_prep_reorder = prepare_noise_cov(cov, info, ch_names)
_assert_reorder(cov_prep, cov_prep_reorder,
order=np.arange(len(cov_prep['names'])))
# compute_whitener
whitener, w_ch_names, n_nzero = compute_whitener(
cov, info, return_rank=True)
assert whitener.shape[0] == whitener.shape[1]
whitener_2, w_ch_names_2, n_nzero_2 = compute_whitener(
cov_reorder, info, return_rank=True)
assert_array_equal(w_ch_names_2, w_ch_names)
assert_allclose(whitener_2, whitener, rtol=1e-6)
assert n_nzero == n_nzero_2
# with pca
assert n_nzero < whitener.shape[0]
whitener_pca, w_ch_names_pca, n_nzero_pca = compute_whitener(
cov, info, pca=True, return_rank=True)
assert_array_equal(w_ch_names_pca, w_ch_names)
assert n_nzero_pca == n_nzero
assert whitener_pca.shape == (n_nzero_pca, len(w_ch_names))
# whiten_evoked
evoked = read_evokeds(ave_fname)[0]
evoked_white = whiten_evoked(evoked, cov)
evoked_white_2 = whiten_evoked(evoked, cov_reorder)
assert_allclose(evoked_white_2.data, evoked_white.data, atol=1e-7)
def _assert_reorder(cov_new, cov_orig, order):
"""Check that we get the same result under reordering."""
inv_order = np.argsort(order)
assert_array_equal([cov_new['names'][ii] for ii in inv_order],
cov_orig['names'])
assert_allclose(cov_new['data'][inv_order][:, inv_order],
cov_orig['data'], atol=1e-20)
def test_ad_hoc_cov(tmpdir):
"""Test ad hoc cov creation and I/O."""
out_fname = tmpdir.join('test-cov.fif')
evoked = read_evokeds(ave_fname)[0]
cov = make_ad_hoc_cov(evoked.info)
cov.save(out_fname)
assert 'Covariance' in repr(cov)
cov2 = read_cov(out_fname)
assert_array_almost_equal(cov['data'], cov2['data'])
std = dict(grad=2e-13, mag=10e-15, eeg=0.1e-6)
cov = make_ad_hoc_cov(evoked.info, std)
cov.save(out_fname)
assert 'Covariance' in repr(cov)
cov2 = read_cov(out_fname)
assert_array_almost_equal(cov['data'], cov2['data'])
cov['data'] = np.diag(cov['data'])
with pytest.raises(RuntimeError, match='attributes inconsistent'):
cov._get_square()
cov['diag'] = False
cov._get_square()
cov['data'] = np.diag(cov['data'])
with pytest.raises(RuntimeError, match='attributes inconsistent'):
cov._get_square()
def test_io_cov(tmpdir):
"""Test IO for noise covariance matrices."""
cov = read_cov(cov_fname)
cov['method'] = 'empirical'
cov['loglik'] = -np.inf
cov.save(tmpdir.join('test-cov.fif'))
cov2 = read_cov(tmpdir.join('test-cov.fif'))
assert_array_almost_equal(cov.data, cov2.data)
assert_equal(cov['method'], cov2['method'])
assert_equal(cov['loglik'], cov2['loglik'])
assert 'Covariance' in repr(cov)
cov2 = read_cov(cov_gz_fname)
assert_array_almost_equal(cov.data, cov2.data)
cov2.save(tmpdir.join('test-cov.fif.gz'))
cov2 = read_cov(tmpdir.join('test-cov.fif.gz'))
assert_array_almost_equal(cov.data, cov2.data)
cov['bads'] = ['EEG 039']
cov_sel = pick_channels_cov(cov, exclude=cov['bads'])
assert cov_sel['dim'] == (len(cov['data']) - len(cov['bads']))
assert cov_sel['data'].shape == (cov_sel['dim'], cov_sel['dim'])
cov_sel.save(tmpdir.join('test-cov.fif'))
cov2 = read_cov(cov_gz_fname)
assert_array_almost_equal(cov.data, cov2.data)
cov2.save(tmpdir.join('test-cov.fif.gz'))
cov2 = read_cov(tmpdir.join('test-cov.fif.gz'))
assert_array_almost_equal(cov.data, cov2.data)
# test warnings on bad filenames
cov_badname = tmpdir.join('test-bad-name.fif.gz')
with pytest.warns(RuntimeWarning, match='-cov.fif'):
write_cov(cov_badname, cov)
with pytest.warns(RuntimeWarning, match='-cov.fif'):
read_cov(cov_badname)
@pytest.mark.parametrize('method', (None, 'empirical', 'shrunk'))
def test_cov_estimation_on_raw(method, tmpdir):
"""Test estimation from raw (typically empty room)."""
if method == 'shrunk':
try:
import sklearn # noqa: F401
except Exception as exp:
pytest.skip('sklearn is required, got %s' % (exp,))
raw = read_raw_fif(raw_fname, preload=True)
cov_mne = read_cov(erm_cov_fname)
method_params = dict(shrunk=dict(shrinkage=[0]))
# The pure-string uses the more efficient numpy-based method, the
# the list gets triaged to compute_covariance (should be equivalent
# but use more memory)
with pytest.warns(None): # can warn about EEG ref
cov = compute_raw_covariance(
raw, tstep=None, method=method, rank='full',
method_params=method_params)
assert_equal(cov.ch_names, cov_mne.ch_names)
assert_equal(cov.nfree, cov_mne.nfree)
assert_snr(cov.data, cov_mne.data, 1e6)
# test equivalence with np.cov
cov_np = np.cov(raw.copy().pick_channels(cov['names']).get_data(), ddof=1)
if method != 'shrunk': # can check all
off_diag = np.triu_indices(cov_np.shape[0])
else:
# We explicitly zero out off-diag entries between channel types,
# so let's just check MEG off-diag entries
off_diag = np.triu_indices(len(pick_types(raw.info, meg=True,
exclude=())))
for other in (cov_mne, cov):
assert_allclose(np.diag(cov_np), np.diag(other.data), rtol=5e-6)
assert_allclose(cov_np[off_diag], other.data[off_diag], rtol=4e-3)
assert_snr(cov.data, other.data, 1e6)
# tstep=0.2 (default)
with pytest.warns(None): # can warn about EEG ref
cov = compute_raw_covariance(raw, method=method, rank='full',
method_params=method_params)
assert_equal(cov.nfree, cov_mne.nfree - 120) # cutoff some samples
assert_snr(cov.data, cov_mne.data, 170)
# test IO when computation done in Python
cov.save(tmpdir.join('test-cov.fif')) # test saving
cov_read = read_cov(tmpdir.join('test-cov.fif'))
assert cov_read.ch_names == cov.ch_names
assert cov_read.nfree == cov.nfree
assert_array_almost_equal(cov.data, cov_read.data)
# test with a subset of channels
raw_pick = raw.copy().pick_channels(raw.ch_names[:5])
raw_pick.info.normalize_proj()
cov = compute_raw_covariance(raw_pick, tstep=None, method=method,
rank='full', method_params=method_params)
assert cov_mne.ch_names[:5] == cov.ch_names
assert_snr(cov.data, cov_mne.data[:5, :5], 5e6)
cov = compute_raw_covariance(raw_pick, method=method, rank='full',
method_params=method_params)
assert_snr(cov.data, cov_mne.data[:5, :5], 90) # cutoff samps
# make sure we get a warning with too short a segment
raw_2 = read_raw_fif(raw_fname).crop(0, 1)
with pytest.warns(RuntimeWarning, match='Too few samples'):
cov = compute_raw_covariance(raw_2, method=method,
method_params=method_params)
# no epochs found due to rejection
pytest.raises(ValueError, compute_raw_covariance, raw, tstep=None,
method='empirical', reject=dict(eog=200e-6))
# but this should work
with pytest.warns(None): # sklearn
cov = compute_raw_covariance(
raw.copy().crop(0, 10.), tstep=None, method=method,
reject=dict(eog=1000e-6), method_params=method_params,
verbose='error')
@pytest.mark.slowtest
@requires_sklearn
def test_cov_estimation_on_raw_reg():
"""Test estimation from raw with regularization."""
raw = read_raw_fif(raw_fname, preload=True)
raw.info['sfreq'] /= 10.
raw = RawArray(raw._data[:, ::10].copy(), raw.info) # decimate for speed
cov_mne = read_cov(erm_cov_fname)
with pytest.warns(RuntimeWarning, match='Too few samples'):
# "diagonal_fixed" is much faster. Use long epochs for speed.
cov = compute_raw_covariance(raw, tstep=5., method='diagonal_fixed')
assert_snr(cov.data, cov_mne.data, 5)
def _assert_cov(cov, cov_desired, tol=0.005, nfree=True):
assert_equal(cov.ch_names, cov_desired.ch_names)
err = (linalg.norm(cov.data - cov_desired.data, ord='fro') /
linalg.norm(cov.data, ord='fro'))
assert err < tol, '%s >= %s' % (err, tol)
if nfree:
assert_equal(cov.nfree, cov_desired.nfree)
@pytest.mark.slowtest
@pytest.mark.parametrize('rank', ('full', None))
def test_cov_estimation_with_triggers(rank, tmpdir):
"""Test estimation from raw with triggers."""
raw = read_raw_fif(raw_fname)
raw.set_eeg_reference(projection=True).load_data()
events = find_events(raw, stim_channel='STI 014')
event_ids = [1, 2, 3, 4]
reject = dict(grad=10000e-13, mag=4e-12, eeg=80e-6, eog=150e-6)
# cov with merged events and keep_sample_mean=True
events_merged = merge_events(events, event_ids, 1234)
epochs = Epochs(raw, events_merged, 1234, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=True,
reject=reject, preload=True)
cov = compute_covariance(epochs, keep_sample_mean=True)
cov_km = read_cov(cov_km_fname)
# adjust for nfree bug
cov_km['nfree'] -= 1
_assert_cov(cov, cov_km)
# Test with tmin and tmax (different but not too much)
cov_tmin_tmax = compute_covariance(epochs, tmin=-0.19, tmax=-0.01)
assert np.all(cov.data != cov_tmin_tmax.data)
err = (linalg.norm(cov.data - cov_tmin_tmax.data, ord='fro') /
linalg.norm(cov_tmin_tmax.data, ord='fro'))
assert err < 0.05
# cov using a list of epochs and keep_sample_mean=True
epochs = [Epochs(raw, events, ev_id, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=True, reject=reject)
for ev_id in event_ids]
cov2 = compute_covariance(epochs, keep_sample_mean=True)
assert_array_almost_equal(cov.data, cov2.data)
assert cov.ch_names == cov2.ch_names
# cov with keep_sample_mean=False using a list of epochs
cov = compute_covariance(epochs, keep_sample_mean=False)
assert cov_km.nfree == cov.nfree
_assert_cov(cov, read_cov(cov_fname), nfree=False)
method_params = {'empirical': {'assume_centered': False}}
pytest.raises(ValueError, compute_covariance, epochs,
keep_sample_mean=False, method_params=method_params)
pytest.raises(ValueError, compute_covariance, epochs,
keep_sample_mean=False, method='shrunk', rank=rank)
# test IO when computation done in Python
cov.save(tmpdir.join('test-cov.fif')) # test saving
cov_read = read_cov(tmpdir.join('test-cov.fif'))
_assert_cov(cov, cov_read, 1e-5)
# cov with list of epochs with different projectors
epochs = [Epochs(raw, events[:1], None, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=True),
Epochs(raw, events[:1], None, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=False)]
# these should fail
pytest.raises(ValueError, compute_covariance, epochs)
pytest.raises(ValueError, compute_covariance, epochs, projs=None)
# these should work, but won't be equal to above
with pytest.warns(RuntimeWarning, match='Too few samples'):
cov = compute_covariance(epochs, projs=epochs[0].info['projs'])
with pytest.warns(RuntimeWarning, match='Too few samples'):
cov = compute_covariance(epochs, projs=[])
# test new dict support
epochs = Epochs(raw, events, dict(a=1, b=2, c=3, d=4), tmin=-0.01, tmax=0,
proj=True, reject=reject, preload=True)
with pytest.warns(RuntimeWarning, match='Too few samples'):
compute_covariance(epochs)
with pytest.warns(RuntimeWarning, match='Too few samples'):
compute_covariance(epochs, projs=[])
pytest.raises(TypeError, compute_covariance, epochs, projs='foo')
pytest.raises(TypeError, compute_covariance, epochs, projs=['foo'])
def test_arithmetic_cov():
"""Test arithmetic with noise covariance matrices."""
cov = read_cov(cov_fname)
cov_sum = cov + cov
assert_array_almost_equal(2 * cov.nfree, cov_sum.nfree)
assert_array_almost_equal(2 * cov.data, cov_sum.data)
assert cov.ch_names == cov_sum.ch_names
cov += cov
assert_array_almost_equal(cov_sum.nfree, cov.nfree)
assert_array_almost_equal(cov_sum.data, cov.data)
assert cov_sum.ch_names == cov.ch_names
def test_regularize_cov():
"""Test cov regularization."""
raw = read_raw_fif(raw_fname)
raw.info['bads'].append(raw.ch_names[0]) # test with bad channels
noise_cov = read_cov(cov_fname)
# Regularize noise cov
reg_noise_cov = regularize(noise_cov, raw.info,
mag=0.1, grad=0.1, eeg=0.1, proj=True,
exclude='bads', rank='full')
assert noise_cov['dim'] == reg_noise_cov['dim']
assert noise_cov['data'].shape == reg_noise_cov['data'].shape
assert np.mean(noise_cov['data'] < reg_noise_cov['data']) < 0.08
# make sure all args are represented
assert set(_DATA_CH_TYPES_SPLIT) - set(_get_args(regularize)) == set()
def test_whiten_evoked():
"""Test whitening of evoked data."""
evoked = read_evokeds(ave_fname, condition=0, baseline=(None, 0),
proj=True)
cov = read_cov(cov_fname)
###########################################################################
# Show result
picks = pick_types(evoked.info, meg=True, eeg=True, ref_meg=False,
exclude='bads')
noise_cov = regularize(cov, evoked.info, grad=0.1, mag=0.1, eeg=0.1,
exclude='bads', rank='full')
evoked_white = whiten_evoked(evoked, noise_cov, picks, diag=True)
whiten_baseline_data = evoked_white.data[picks][:, evoked.times < 0]
mean_baseline = np.mean(np.abs(whiten_baseline_data), axis=1)
assert np.all(mean_baseline < 1.)
assert np.all(mean_baseline > 0.2)
# degenerate
cov_bad = pick_channels_cov(cov, include=evoked.ch_names[:10])
pytest.raises(RuntimeError, whiten_evoked, evoked, cov_bad, picks)
def test_regularized_covariance():
"""Test unchanged data with regularized_covariance."""
evoked = read_evokeds(ave_fname, condition=0, baseline=(None, 0),
proj=True)
data = evoked.data.copy()
# check that input data remain unchanged. gh-5698
_regularized_covariance(data)
assert_allclose(data, evoked.data, atol=1e-20)
@requires_sklearn
def test_auto_low_rank():
"""Test probabilistic low rank estimators."""
n_samples, n_features, rank = 400, 10, 5
sigma = 0.1
def get_data(n_samples, n_features, rank, sigma):
rng = np.random.RandomState(42)
W = rng.randn(n_features, n_features)
X = rng.randn(n_samples, rank)
U, _, _ = linalg.svd(W.copy())
X = np.dot(X, U[:, :rank].T)
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X += rng.randn(n_samples, n_features) * sigmas
return X
X = get_data(n_samples=n_samples, n_features=n_features, rank=rank,
sigma=sigma)
method_params = {'iter_n_components': [4, 5, 6]}
cv = 3
n_jobs = 1
mode = 'factor_analysis'
rescale = 1e8
X *= rescale
est, info = _auto_low_rank_model(X, mode=mode, n_jobs=n_jobs,
method_params=method_params,
cv=cv)
assert_equal(info['best'], rank)
X = get_data(n_samples=n_samples, n_features=n_features, rank=rank,
sigma=sigma)
method_params = {'iter_n_components': [n_features + 5]}
msg = ('You are trying to estimate %i components on matrix '
'with %i features.') % (n_features + 5, n_features)
with pytest.warns(RuntimeWarning, match=msg):
_auto_low_rank_model(X, mode=mode, n_jobs=n_jobs,
method_params=method_params, cv=cv)
@pytest.mark.slowtest
@pytest.mark.parametrize('rank', ('full', None, 'info'))
@requires_sklearn
def test_compute_covariance_auto_reg(rank):
"""Test automated regularization."""
raw = read_raw_fif(raw_fname, preload=True)
raw.resample(100, npad='auto') # much faster estimation
events = find_events(raw, stim_channel='STI 014')
event_ids = [1, 2, 3, 4]
reject = dict(mag=4e-12)
# cov with merged events and keep_sample_mean=True
events_merged = merge_events(events, event_ids, 1234)
# we need a few channels for numerical reasons in PCA/FA
picks = pick_types(raw.info, meg='mag', eeg=False)[:10]
raw.pick_channels([raw.ch_names[pick] for pick in picks])
raw.info.normalize_proj()
epochs = Epochs(
raw, events_merged, 1234, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=True, reject=reject, preload=True)
epochs = epochs.crop(None, 0)[:5]
method_params = dict(factor_analysis=dict(iter_n_components=[3]),
pca=dict(iter_n_components=[3]))
covs = compute_covariance(epochs, method='auto',
method_params=method_params,
return_estimators=True, rank=rank)
# make sure regularization produces structured differencess
diag_mask = np.eye(len(epochs.ch_names)).astype(bool)
off_diag_mask = np.invert(diag_mask)
for cov_a, cov_b in itt.combinations(covs, 2):
if (cov_a['method'] == 'diagonal_fixed' and
# here we have diagnoal or no regularization.
cov_b['method'] == 'empirical' and rank == 'full'):
assert not np.any(cov_a['data'][diag_mask] ==
cov_b['data'][diag_mask])
# but the rest is the same
assert_allclose(cov_a['data'][off_diag_mask],
cov_b['data'][off_diag_mask], rtol=1e-12)
else:
# and here we have shrinkage everywhere.
assert not np.any(cov_a['data'][diag_mask] ==
cov_b['data'][diag_mask])
assert not np.any(cov_a['data'][diag_mask] ==
cov_b['data'][diag_mask])
logliks = [c['loglik'] for c in covs]
assert np.diff(logliks).max() <= 0 # descending order
methods = ['empirical', 'ledoit_wolf', 'oas', 'shrunk', 'shrinkage']
if rank == 'full':
methods.extend(['factor_analysis', 'pca'])
with catch_logging() as log:
cov3 = compute_covariance(epochs, method=methods,
method_params=method_params, projs=None,
return_estimators=True, rank=rank,
verbose=True)
log = log.getvalue().split('\n')
if rank is None:
assert ' Setting small MAG eigenvalues to zero (without PCA)' in log
assert 'Reducing data rank from 10 -> 7' in log
else:
assert 'Reducing' not in log
method_names = [cov['method'] for cov in cov3]
best_bounds = [-45, -35]
bounds = [-55, -45] if rank == 'full' else best_bounds
for method in set(methods) - {'empirical', 'shrunk'}:
this_lik = cov3[method_names.index(method)]['loglik']
assert bounds[0] < this_lik < bounds[1]
this_lik = cov3[method_names.index('shrunk')]['loglik']
assert best_bounds[0] < this_lik < best_bounds[1]
this_lik = cov3[method_names.index('empirical')]['loglik']
bounds = [-110, -100] if rank == 'full' else best_bounds
assert bounds[0] < this_lik < bounds[1]
assert_equal({c['method'] for c in cov3}, set(methods))
cov4 = compute_covariance(epochs, method=methods,
method_params=method_params, projs=None,
return_estimators=False, rank=rank)
assert cov3[0]['method'] == cov4['method'] # ordering
# invalid prespecified method
pytest.raises(ValueError, compute_covariance, epochs, method='pizza')
# invalid scalings
pytest.raises(ValueError, compute_covariance, epochs, method='shrunk',
scalings=dict(misc=123))
def _cov_rank(cov, info, proj=True):
# ignore warnings about rank mismatches: sometimes we will intentionally
# violate the computed/info assumption, such as when using SSS with
# `rank='full'`
with pytest.warns(None):
return _compute_rank_int(cov, info=info, proj=proj)
@pytest.fixture(scope='module')
def raw_epochs_events():
"""Create raw, epochs, and events for tests."""
raw = read_raw_fif(raw_fname).set_eeg_reference(projection=True).crop(0, 3)
raw = maxwell_filter(raw, regularize=None) # heavily reduce the rank
assert raw.info['bads'] == [] # no bads
events = make_fixed_length_events(raw)
epochs = Epochs(raw, events, tmin=-0.2, tmax=0, preload=True)
return (raw, epochs, events)
@requires_sklearn
@pytest.mark.parametrize('rank', (None, 'full', 'info'))
def test_low_rank_methods(rank, raw_epochs_events):
"""Test low-rank covariance matrix estimation."""
epochs = raw_epochs_events[1]
sss_proj_rank = 139 # 80 MEG + 60 EEG - 1 proj
n_ch = 366
methods = ('empirical', 'diagonal_fixed', 'oas')
bounds = {
'None': dict(empirical=(-15000, -5000),
diagonal_fixed=(-1500, -500),
oas=(-700, -600)),
'full': dict(empirical=(-18000, -8000),
diagonal_fixed=(-2000, -1600),
oas=(-1600, -1000)),
'info': dict(empirical=(-15000, -5000),
diagonal_fixed=(-700, -600),
oas=(-700, -600)),
}
with pytest.warns(RuntimeWarning, match='Too few samples'):
covs = compute_covariance(
epochs, method=methods, return_estimators=True, rank=rank,
verbose=True)
for cov in covs:
method = cov['method']
these_bounds = bounds[str(rank)][method]
this_rank = _cov_rank(cov, epochs.info, proj=(rank != 'full'))
if rank == 'full' and method != 'empirical':
assert this_rank == n_ch
else:
assert this_rank == sss_proj_rank
assert these_bounds[0] < cov['loglik'] < these_bounds[1], \
(rank, method)
@requires_sklearn
def test_low_rank_cov(raw_epochs_events):
"""Test additional properties of low rank computations."""
raw, epochs, events = raw_epochs_events
sss_proj_rank = 139 # 80 MEG + 60 EEG - 1 proj
n_ch = 366
proj_rank = 365 # one EEG proj
with pytest.warns(RuntimeWarning, match='Too few samples'):
emp_cov = compute_covariance(epochs)
# Test equivalence with mne.cov.regularize subspace
with pytest.raises(ValueError, match='are dependent.*must equal'):
regularize(emp_cov, epochs.info, rank=None, mag=0.1, grad=0.2)
assert _cov_rank(emp_cov, epochs.info) == sss_proj_rank
reg_cov = regularize(emp_cov, epochs.info, proj=True, rank='full')
assert _cov_rank(reg_cov, epochs.info) == proj_rank
with pytest.warns(RuntimeWarning, match='exceeds the theoretical'):
_compute_rank_int(reg_cov, info=epochs.info)
del reg_cov
with catch_logging() as log:
reg_r_cov = regularize(emp_cov, epochs.info, proj=True, rank=None,
verbose=True)
log = log.getvalue()
assert 'jointly' in log
assert _cov_rank(reg_r_cov, epochs.info) == sss_proj_rank
reg_r_only_cov = regularize(emp_cov, epochs.info, proj=False, rank=None)
assert _cov_rank(reg_r_only_cov, epochs.info) == sss_proj_rank
assert_allclose(reg_r_only_cov['data'], reg_r_cov['data'])
del reg_r_only_cov, reg_r_cov
# test that rank=306 is same as rank='full'
epochs_meg = epochs.copy().pick_types(meg=True)
assert len(epochs_meg.ch_names) == 306
epochs_meg.info.update(bads=[], projs=[])
cov_full = compute_covariance(epochs_meg, method='oas',
rank='full', verbose='error')
assert _cov_rank(cov_full, epochs_meg.info) == 306
with pytest.warns(RuntimeWarning, match='few samples'):
cov_dict = compute_covariance(epochs_meg, method='oas',
rank=dict(meg=306))
assert _cov_rank(cov_dict, epochs_meg.info) == 306
assert_allclose(cov_full['data'], cov_dict['data'])
cov_dict = compute_covariance(epochs_meg, method='oas',
rank=dict(meg=306), verbose='error')
assert _cov_rank(cov_dict, epochs_meg.info) == 306
assert_allclose(cov_full['data'], cov_dict['data'])
# Work with just EEG data to simplify projection / rank reduction
raw = raw.copy().pick_types(meg=False, eeg=True)
n_proj = 2
raw.add_proj(compute_proj_raw(raw, n_eeg=n_proj))
n_ch = len(raw.ch_names)
rank = n_ch - n_proj - 1 # plus avg proj
assert len(raw.info['projs']) == 3
epochs = Epochs(raw, events, tmin=-0.2, tmax=0, preload=True)
assert len(raw.ch_names) == n_ch
emp_cov = compute_covariance(epochs, rank='full', verbose='error')
assert _cov_rank(emp_cov, epochs.info) == rank
reg_cov = regularize(emp_cov, epochs.info, proj=True, rank='full')
assert _cov_rank(reg_cov, epochs.info) == rank
reg_r_cov = regularize(emp_cov, epochs.info, proj=False, rank=None)
assert _cov_rank(reg_r_cov, epochs.info) == rank
dia_cov = compute_covariance(epochs, rank=None, method='diagonal_fixed',
verbose='error')
assert _cov_rank(dia_cov, epochs.info) == rank
assert_allclose(dia_cov['data'], reg_cov['data'])
epochs.pick_channels(epochs.ch_names[:103])
# degenerate
with pytest.raises(ValueError, match='can.*only be used with rank="full"'):
compute_covariance(epochs, rank=None, method='pca')
with pytest.raises(ValueError, match='can.*only be used with rank="full"'):
compute_covariance(epochs, rank=None, method='factor_analysis')
@testing.requires_testing_data
@requires_sklearn
def test_cov_ctf():
"""Test basic cov computation on ctf data with/without compensation."""
raw = read_raw_ctf(ctf_fname).crop(0., 2.).load_data()
events = make_fixed_length_events(raw, 99999)
assert len(events) == 2
ch_names = [raw.info['ch_names'][pick]
for pick in pick_types(raw.info, meg=True, eeg=False,
ref_meg=False)]
for comp in [0, 1]:
raw.apply_gradient_compensation(comp)
epochs = Epochs(raw, events, None, -0.2, 0.2, preload=True)
with pytest.warns(RuntimeWarning, match='Too few samples'):
noise_cov = compute_covariance(epochs, tmax=0.,
method=['empirical'])
prepare_noise_cov(noise_cov, raw.info, ch_names)
raw.apply_gradient_compensation(0)
epochs = Epochs(raw, events, None, -0.2, 0.2, preload=True)
with pytest.warns(RuntimeWarning, match='Too few samples'):
noise_cov = compute_covariance(epochs, tmax=0., method=['empirical'])
raw.apply_gradient_compensation(1)
# TODO This next call in principle should fail.
prepare_noise_cov(noise_cov, raw.info, ch_names)
# make sure comps matrices was not removed from raw
assert raw.info['comps'], 'Comps matrices removed'
def test_equalize_channels():
"""Test equalization of channels for instances of Covariance."""
cov1 = make_ad_hoc_cov(create_info(['CH1', 'CH2', 'CH3', 'CH4'], sfreq=1.0,
ch_types='eeg'))
cov2 = make_ad_hoc_cov(create_info(['CH5', 'CH1', 'CH2'], sfreq=1.0,
ch_types='eeg'))
cov1, cov2 = equalize_channels([cov1, cov2])
assert cov1.ch_names == ['CH1', 'CH2']
assert cov2.ch_names == ['CH1', 'CH2']
def test_compute_whitener_rank():
"""Test risky rank options."""
info = read_info(ave_fname)
info = pick_info(info, pick_types(info, meg=True))
info['projs'] = []
# need a square version because the diag one takes shortcuts in
# compute_whitener (users shouldn't even need this function so it's
# private)
cov = make_ad_hoc_cov(info)._as_square()
assert len(cov['names']) == 306
_, _, rank = compute_whitener(cov, info, rank=None, return_rank=True)
assert rank == 306
assert compute_rank(cov, info=info, verbose=True) == dict(meg=rank)
cov['data'][-1] *= 1e-14 # trivially rank-deficient
_, _, rank = compute_whitener(cov, info, rank=None, return_rank=True)
assert rank == 305
assert compute_rank(cov, info=info, verbose=True) == dict(meg=rank)
# this should emit a warning
with pytest.warns(RuntimeWarning, match='exceeds the estimated'):
_, _, rank = compute_whitener(cov, info, rank=dict(meg=306),
return_rank=True)
assert rank == 306
| {
"repo_name": "bloyl/mne-python",
"path": "mne/tests/test_cov.py",
"copies": "4",
"size": "34609",
"license": "bsd-3-clause",
"hash": 7960692879079456000,
"line_mean": 41.9392059553,
"line_max": 79,
"alpha_frac": 0.6172671848,
"autogenerated": false,
"ratio": 3.2929590865842053,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5910226271384206,
"avg_score": null,
"num_lines": null
} |
import pytest
import numpy as np
from scipy import sparse
from scipy import linalg
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_allclose
from sklearn.utils import check_random_state
from sklearn.utils.fixes import parse_version
from sklearn.linear_model import LinearRegression
from sklearn.linear_model._base import _deprecate_normalize
from sklearn.linear_model._base import _preprocess_data
from sklearn.linear_model._base import _rescale_data
from sklearn.linear_model._base import make_dataset
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_regression
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
rng = np.random.RandomState(0)
rtol = 1e-6
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
reg = LinearRegression()
reg.fit(X, Y)
assert_array_almost_equal(reg.coef_, [1])
assert_array_almost_equal(reg.intercept_, [0])
assert_array_almost_equal(reg.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
reg = LinearRegression()
reg.fit(X, Y)
assert_array_almost_equal(reg.coef_, [0])
assert_array_almost_equal(reg.intercept_, [0])
assert_array_almost_equal(reg.predict(X), [0])
def test_linear_regression_sample_weights():
# TODO: loop over sparse data as well
rng = np.random.RandomState(0)
# It would not work with under-determined systems
for n_samples, n_features in ((6, 5), ):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
for intercept in (True, False):
# LinearRegression with explicit sample_weight
reg = LinearRegression(fit_intercept=intercept)
reg.fit(X, y, sample_weight=sample_weight)
coefs1 = reg.coef_
inter1 = reg.intercept_
assert reg.coef_.shape == (X.shape[1], ) # sanity checks
assert reg.score(X, y) > 0.5
# Closed form of the weighted least square
# theta = (X^T W X)^(-1) * X^T W y
W = np.diag(sample_weight)
if intercept is False:
X_aug = X
else:
dummy_column = np.ones(shape=(n_samples, 1))
X_aug = np.concatenate((dummy_column, X), axis=1)
coefs2 = linalg.solve(X_aug.T.dot(W).dot(X_aug),
X_aug.T.dot(W).dot(y))
if intercept is False:
assert_array_almost_equal(coefs1, coefs2)
else:
assert_array_almost_equal(coefs1, coefs2[1:])
assert_almost_equal(inter1, coefs2[0])
def test_raises_value_error_if_positive_and_sparse():
error_msg = ('A sparse matrix was passed, '
'but dense data is required.')
# X must not be sparse if positive == True
X = sparse.eye(10)
y = np.ones(10)
reg = LinearRegression(positive=True)
with pytest.raises(TypeError, match=error_msg):
reg.fit(X, y)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
reg = LinearRegression()
# make sure the "OK" sample weights actually work
reg.fit(X, y, sample_weights_OK)
reg.fit(X, y, sample_weights_OK_1)
reg.fit(X, y, sample_weights_OK_2)
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression().fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression().fit(X3, y)
assert (lr2_with_intercept.coef_.shape ==
lr2_without_intercept.coef_.shape)
assert (lr3_with_intercept.coef_.shape ==
lr3_without_intercept.coef_.shape)
assert (lr2_without_intercept.coef_.ndim ==
lr3_without_intercept.coef_.ndim)
def test_error_on_wrong_normalize():
normalize = 'wrong'
default = True
error_msg = "Leave 'normalize' to its default"
with pytest.raises(ValueError, match=error_msg):
_deprecate_normalize(normalize, default, 'estimator')
@pytest.mark.parametrize('normalize', [True, False, 'deprecated'])
@pytest.mark.parametrize('default', [True, False])
# FIXME update test in 1.2 for new versions
def test_deprecate_normalize(normalize, default):
# test all possible case of the normalize parameter deprecation
if not default:
if normalize == 'deprecated':
# no warning
output = default
expected = None
warning_msg = []
else:
output = normalize
expected = FutureWarning
warning_msg = ['1.2']
if not normalize:
warning_msg.append('default value')
else:
warning_msg.append('StandardScaler(')
elif default:
if normalize == 'deprecated':
# warning to pass False and use StandardScaler
output = default
expected = FutureWarning
warning_msg = ['False', '1.2', 'StandardScaler(']
else:
# no warning
output = normalize
expected = None
warning_msg = []
with pytest.warns(expected) as record:
_normalize = _deprecate_normalize(normalize, default, 'estimator')
assert _normalize == output
n_warnings = 0 if expected is None else 1
assert len(record) == n_warnings
if n_warnings:
assert all([
warning in str(record[0].message)
for warning in warning_msg
])
def test_linear_regression_sparse(random_state=0):
# Test that linear regression also works with sparse data
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.predict(X) - y.ravel(), 0)
# FIXME: 'normalize' to be removed in 1.2 in LinearRegression
@pytest.mark.filterwarnings("ignore:'normalize' was deprecated")
@pytest.mark.parametrize('normalize', [True, False])
@pytest.mark.parametrize('fit_intercept', [True, False])
def test_linear_regression_sparse_equal_dense(normalize, fit_intercept):
# Test that linear regression agrees between sparse and dense
rng = check_random_state(0)
n_samples = 200
n_features = 2
X = rng.randn(n_samples, n_features)
X[X < 0.1] = 0.
Xcsr = sparse.csr_matrix(X)
y = rng.rand(n_samples)
params = dict(normalize=normalize, fit_intercept=fit_intercept)
clf_dense = LinearRegression(**params)
clf_sparse = LinearRegression(**params)
clf_dense.fit(X, y)
clf_sparse.fit(Xcsr, y)
assert clf_dense.intercept_ == pytest.approx(clf_sparse.intercept_)
assert_allclose(clf_dense.coef_, clf_sparse.coef_)
def test_linear_regression_multiple_outcome(random_state=0):
# Test multiple-outcome linear regressions
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
reg = LinearRegression()
reg.fit((X), Y)
assert reg.coef_.shape == (2, n_features)
Y_pred = reg.predict(X)
reg.fit(X, y)
y_pred = reg.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
# Test multiple-outcome linear regressions with sparse data
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert ols.coef_.shape == (2, n_features)
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_positive():
# Test nonnegative LinearRegression on a simple dataset.
X = [[1], [2]]
y = [1, 2]
reg = LinearRegression(positive=True)
reg.fit(X, y)
assert_array_almost_equal(reg.coef_, [1])
assert_array_almost_equal(reg.intercept_, [0])
assert_array_almost_equal(reg.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
y = [0]
reg = LinearRegression(positive=True)
reg.fit(X, y)
assert_allclose(reg.coef_, [0])
assert_allclose(reg.intercept_, [0])
assert_allclose(reg.predict(X), [0])
def test_linear_regression_positive_multiple_outcome(random_state=0):
# Test multiple-outcome nonnegative linear regressions
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression(positive=True)
ols.fit(X, Y)
assert ols.coef_.shape == (2, n_features)
assert np.all(ols.coef_ >= 0.)
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_allclose(np.vstack((y_pred, y_pred)).T, Y_pred)
def test_linear_regression_positive_vs_nonpositive():
# Test differences with LinearRegression when positive=False.
X, y = make_sparse_uncorrelated(random_state=0)
reg = LinearRegression(positive=True)
reg.fit(X, y)
regn = LinearRegression(positive=False)
regn.fit(X, y)
assert np.mean((reg.coef_ - regn.coef_)**2) > 1e-3
def test_linear_regression_positive_vs_nonpositive_when_positive():
# Test LinearRegression fitted coefficients
# when the problem is positive.
n_samples = 200
n_features = 4
X = rng.rand(n_samples, n_features)
y = X[:, 0] + 2 * X[:, 1] + 3 * X[:, 2] + 1.5 * X[:, 3]
reg = LinearRegression(positive=True)
reg.fit(X, y)
regn = LinearRegression(positive=False)
regn.fit(X, y)
assert np.mean((reg.coef_ - regn.coef_)**2) < 1e-6
def test_linear_regression_pd_sparse_dataframe_warning():
pd = pytest.importorskip('pandas')
# restrict the pd versions < '0.24.0' as they have a bug in is_sparse func
if parse_version(pd.__version__) < parse_version('0.24.0'):
pytest.skip("pandas 0.24+ required.")
# Warning is raised only when some of the columns is sparse
df = pd.DataFrame({'0': np.random.randn(10)})
for col in range(1, 4):
arr = np.random.randn(10)
arr[:8] = 0
# all columns but the first column is sparse
if col != 0:
arr = pd.arrays.SparseArray(arr, fill_value=0)
df[str(col)] = arr
msg = "pandas.DataFrame with sparse columns found."
reg = LinearRegression()
with pytest.warns(UserWarning, match=msg):
reg.fit(df.iloc[:, 0:2], df.iloc[:, 3])
# does not warn when the whole dataframe is sparse
df['0'] = pd.arrays.SparseArray(df['0'], fill_value=0)
assert hasattr(df, "sparse")
with pytest.warns(None) as record:
reg.fit(df.iloc[:, 0:2], df.iloc[:, 3])
assert not record
def test_preprocess_data():
n_samples = 200
n_features = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
expected_X_scale = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_scale = \
_preprocess_data(X, y, fit_intercept=False, normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_scale, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_scale = \
_preprocess_data(X, y, fit_intercept=True, normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_scale, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_scale = \
_preprocess_data(X, y, fit_intercept=True, normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_scale, expected_X_scale)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_scale)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_preprocess_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [X, sparse.csc_matrix(X)]
for X in args:
_, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
@pytest.mark.parametrize("is_sparse", [False, True])
def test_preprocess_data_weighted(is_sparse):
n_samples = 200
n_features = 4
# Generate random data with 50% of zero values to make sure
# that the sparse variant of this test is actually sparse. This also
# shifts the mean value for each columns in X further away from
# zero.
X = rng.rand(n_samples, n_features)
X[X < 0.5] = 0.
# Scale the first feature of X to be 10 larger than the other to
# better check the impact of feature scaling.
X[:, 0] *= 10
# Constant non-zero feature.
X[:, 2] = 1.
# Constant zero feature (non-materialized in the sparse case)
X[:, 3] = 0.
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
X_sample_weight_avg = np.average(X, weights=sample_weight, axis=0)
X_sample_weight_var = np.average((X - X_sample_weight_avg)**2,
weights=sample_weight,
axis=0)
constant_mask = X_sample_weight_var < 10 * np.finfo(X.dtype).eps
assert_array_equal(constant_mask, [0, 0, 1, 1])
expected_X_scale = (
np.sqrt(X_sample_weight_var) * np.sqrt(sample_weight.sum())
)
# near constant features should not be scaled
expected_X_scale[constant_mask] = 1
if is_sparse:
X = sparse.csr_matrix(X)
# normalize is False
Xt, yt, X_mean, y_mean, X_scale = \
_preprocess_data(X, y, fit_intercept=True, normalize=False,
sample_weight=sample_weight, return_mean=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_scale, np.ones(n_features))
if is_sparse:
assert_array_almost_equal(Xt.toarray(), X.toarray())
else:
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
# normalize is True
Xt, yt, X_mean, y_mean, X_scale = \
_preprocess_data(X, y, fit_intercept=True, normalize=True,
sample_weight=sample_weight, return_mean=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_scale, expected_X_scale)
if is_sparse:
# X is not centered
assert_array_almost_equal(
Xt.toarray(), X.toarray() / expected_X_scale
)
else:
assert_array_almost_equal(
Xt, (X - expected_X_mean) / expected_X_scale
)
# _preprocess_data with normalize=True scales the data by the feature-wise
# euclidean norms while StandardScaler scales the data by the feature-wise
# standard deviations.
# The two are equivalent up to a ratio of np.sqrt(n_samples) if unweighted
# or np.sqrt(sample_weight.sum()) if weighted.
if is_sparse:
scaler = StandardScaler(with_mean=False).fit(
X, sample_weight=sample_weight)
# Non-constant features are scaled similarly with np.sqrt(n_samples)
assert_array_almost_equal(
scaler.transform(X).toarray()[:, :2]
/ np.sqrt(sample_weight.sum()),
Xt.toarray()[:, :2]
)
# Constant features go through un-scaled.
assert_array_almost_equal(
scaler.transform(X).toarray()[:, 2:],
Xt.toarray()[:, 2:]
)
else:
scaler = StandardScaler(with_mean=True).fit(
X, sample_weight=sample_weight)
assert_array_almost_equal(scaler.mean_, X_mean)
assert_array_almost_equal(
scaler.transform(X) / np.sqrt(sample_weight.sum()),
Xt,
)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_preprocess_data_with_return_mean():
n_samples = 200
n_features = 2
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
expected_X_scale = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_scale = \
_preprocess_data(X, y, fit_intercept=False, normalize=False,
return_mean=True)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_scale, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_scale = \
_preprocess_data(X, y, fit_intercept=True, normalize=False,
return_mean=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_scale, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_scale = \
_preprocess_data(X, y, fit_intercept=True, normalize=True,
return_mean=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_scale, expected_X_scale)
assert_array_almost_equal(Xt.A, XA / expected_X_scale)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_preprocess_data():
# Test output format of _preprocess_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = _preprocess_data(csr, y, True)
assert csr_.getformat() == 'csr'
@pytest.mark.parametrize('is_sparse', (True, False))
@pytest.mark.parametrize('to_copy', (True, False))
def test_preprocess_copy_data_no_checks(is_sparse, to_copy):
X, y = make_regression()
X[X < 2.5] = 0.0
if is_sparse:
X = sparse.csr_matrix(X)
X_, y_, _, _, _ = _preprocess_data(X, y, True,
copy=to_copy, check_input=False)
if to_copy and is_sparse:
assert not np.may_share_memory(X_.data, X.data)
elif to_copy:
assert not np.may_share_memory(X_, X)
elif is_sparse:
assert np.may_share_memory(X_.data, X.data)
else:
assert np.may_share_memory(X_, X)
def test_dtype_preprocess_data():
n_samples = 200
n_features = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
X_32 = np.asarray(X, dtype=np.float32)
y_32 = np.asarray(y, dtype=np.float32)
X_64 = np.asarray(X, dtype=np.float64)
y_64 = np.asarray(y, dtype=np.float64)
for fit_intercept in [True, False]:
for normalize in [True, False]:
Xt_32, yt_32, X_mean_32, y_mean_32, X_scale_32 = _preprocess_data(
X_32, y_32, fit_intercept=fit_intercept, normalize=normalize,
return_mean=True)
Xt_64, yt_64, X_mean_64, y_mean_64, X_scale_64 = _preprocess_data(
X_64, y_64, fit_intercept=fit_intercept, normalize=normalize,
return_mean=True)
Xt_3264, yt_3264, X_mean_3264, y_mean_3264, X_scale_3264 = (
_preprocess_data(X_32, y_64, fit_intercept=fit_intercept,
normalize=normalize, return_mean=True))
Xt_6432, yt_6432, X_mean_6432, y_mean_6432, X_scale_6432 = (
_preprocess_data(X_64, y_32, fit_intercept=fit_intercept,
normalize=normalize, return_mean=True))
assert Xt_32.dtype == np.float32
assert yt_32.dtype == np.float32
assert X_mean_32.dtype == np.float32
assert y_mean_32.dtype == np.float32
assert X_scale_32.dtype == np.float32
assert Xt_64.dtype == np.float64
assert yt_64.dtype == np.float64
assert X_mean_64.dtype == np.float64
assert y_mean_64.dtype == np.float64
assert X_scale_64.dtype == np.float64
assert Xt_3264.dtype == np.float32
assert yt_3264.dtype == np.float32
assert X_mean_3264.dtype == np.float32
assert y_mean_3264.dtype == np.float32
assert X_scale_3264.dtype == np.float32
assert Xt_6432.dtype == np.float64
assert yt_6432.dtype == np.float64
assert X_mean_6432.dtype == np.float64
assert y_mean_6432.dtype == np.float64
assert X_scale_6432.dtype == np.float64
assert X_32.dtype == np.float32
assert y_32.dtype == np.float32
assert X_64.dtype == np.float64
assert y_64.dtype == np.float64
assert_array_almost_equal(Xt_32, Xt_64)
assert_array_almost_equal(yt_32, yt_64)
assert_array_almost_equal(X_mean_32, X_mean_64)
assert_array_almost_equal(y_mean_32, y_mean_64)
assert_array_almost_equal(X_scale_32, X_scale_64)
@pytest.mark.parametrize('n_targets', [None, 2])
def test_rescale_data_dense(n_targets):
n_samples = 200
n_features = 2
sample_weight = 1.0 + rng.rand(n_samples)
X = rng.rand(n_samples, n_features)
if n_targets is None:
y = rng.rand(n_samples)
else:
y = rng.rand(n_samples, n_targets)
rescaled_X, rescaled_y = _rescale_data(X, y, sample_weight)
rescaled_X2 = X * np.sqrt(sample_weight)[:, np.newaxis]
if n_targets is None:
rescaled_y2 = y * np.sqrt(sample_weight)
else:
rescaled_y2 = y * np.sqrt(sample_weight)[:, np.newaxis]
assert_array_almost_equal(rescaled_X, rescaled_X2)
assert_array_almost_equal(rescaled_y, rescaled_y2)
def test_fused_types_make_dataset():
iris = load_iris()
X_32 = iris.data.astype(np.float32)
y_32 = iris.target.astype(np.float32)
X_csr_32 = sparse.csr_matrix(X_32)
sample_weight_32 = np.arange(y_32.size, dtype=np.float32)
X_64 = iris.data.astype(np.float64)
y_64 = iris.target.astype(np.float64)
X_csr_64 = sparse.csr_matrix(X_64)
sample_weight_64 = np.arange(y_64.size, dtype=np.float64)
# array
dataset_32, _ = make_dataset(X_32, y_32, sample_weight_32)
dataset_64, _ = make_dataset(X_64, y_64, sample_weight_64)
xi_32, yi_32, _, _ = dataset_32._next_py()
xi_64, yi_64, _, _ = dataset_64._next_py()
xi_data_32, _, _ = xi_32
xi_data_64, _, _ = xi_64
assert xi_data_32.dtype == np.float32
assert xi_data_64.dtype == np.float64
assert_allclose(yi_64, yi_32, rtol=rtol)
# csr
datasetcsr_32, _ = make_dataset(X_csr_32, y_32, sample_weight_32)
datasetcsr_64, _ = make_dataset(X_csr_64, y_64, sample_weight_64)
xicsr_32, yicsr_32, _, _ = datasetcsr_32._next_py()
xicsr_64, yicsr_64, _, _ = datasetcsr_64._next_py()
xicsr_data_32, _, _ = xicsr_32
xicsr_data_64, _, _ = xicsr_64
assert xicsr_data_32.dtype == np.float32
assert xicsr_data_64.dtype == np.float64
assert_allclose(xicsr_data_64, xicsr_data_32, rtol=rtol)
assert_allclose(yicsr_64, yicsr_32, rtol=rtol)
assert_array_equal(xi_data_32, xicsr_data_32)
assert_array_equal(xi_data_64, xicsr_data_64)
assert_array_equal(yi_32, yicsr_32)
assert_array_equal(yi_64, yicsr_64)
| {
"repo_name": "kevin-intel/scikit-learn",
"path": "sklearn/linear_model/tests/test_base.py",
"copies": "2",
"size": "26110",
"license": "bsd-3-clause",
"hash": 9191682616168708000,
"line_mean": 34.1886792453,
"line_max": 79,
"alpha_frac": 0.6164304864,
"autogenerated": false,
"ratio": 3.2198791466272043,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4836309633027205,
"avg_score": null,
"num_lines": null
} |
from math import log
import numpy as np
from scipy.linalg import pinvh
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_raise_message
from sklearn.utils import check_random_state
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn.linear_model import Ridge
from sklearn import datasets
from sklearn.utils.extmath import fast_logdet
diabetes = datasets.load_diabetes()
def test_n_iter():
"""Check value of n_iter."""
X = np.array([[1], [2], [6], [8], [10]])
y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(n_iter=0)
msg = "n_iter should be greater than or equal to 1."
assert_raise_message(ValueError, msg, clf.fit, X, y)
def test_bayesian_ridge_scores():
"""Check scores attribute shape"""
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
assert clf.scores_.shape == (clf.n_iter_ + 1,)
def test_bayesian_ridge_score_values():
"""Check value of score on toy example.
Compute log marginal likelihood with equation (36) in Sparse Bayesian
Learning and the Relevance Vector Machine (Tipping, 2001):
- 0.5 * (log |Id/alpha + X.X^T/lambda| +
y^T.(Id/alpha + X.X^T/lambda).y + n * log(2 * pi))
+ lambda_1 * log(lambda) - lambda_2 * lambda
+ alpha_1 * log(alpha) - alpha_2 * alpha
and check equality with the score computed during training.
"""
X, y = diabetes.data, diabetes.target
n_samples = X.shape[0]
# check with initial values of alpha and lambda (see code for the values)
eps = np.finfo(np.float64).eps
alpha_ = 1. / (np.var(y) + eps)
lambda_ = 1.
# value of the parameters of the Gamma hyperpriors
alpha_1 = 0.1
alpha_2 = 0.1
lambda_1 = 0.1
lambda_2 = 0.1
# compute score using formula of docstring
score = lambda_1 * log(lambda_) - lambda_2 * lambda_
score += alpha_1 * log(alpha_) - alpha_2 * alpha_
M = 1. / alpha_ * np.eye(n_samples) + 1. / lambda_ * np.dot(X, X.T)
M_inv = pinvh(M)
score += - 0.5 * (fast_logdet(M) + np.dot(y.T, np.dot(M_inv, y)) +
n_samples * log(2 * np.pi))
# compute score with BayesianRidge
clf = BayesianRidge(alpha_1=alpha_1, alpha_2=alpha_2,
lambda_1=lambda_1, lambda_2=lambda_2,
n_iter=1, fit_intercept=False, compute_score=True)
clf.fit(X, y)
assert_almost_equal(clf.scores_[0], score, decimal=9)
def test_bayesian_ridge_parameter():
# Test correctness of lambda_ and alpha_ parameters (GitHub issue #8224)
X = np.array([[1, 1], [3, 4], [5, 7], [4, 1], [2, 6], [3, 10], [3, 2]])
y = np.array([1, 2, 3, 2, 0, 4, 5]).T
# A Ridge regression model using an alpha value equal to the ratio of
# lambda_ and alpha_ from the Bayesian Ridge model must be identical
br_model = BayesianRidge(compute_score=True).fit(X, y)
rr_model = Ridge(alpha=br_model.lambda_ / br_model.alpha_).fit(X, y)
assert_array_almost_equal(rr_model.coef_, br_model.coef_)
assert_almost_equal(rr_model.intercept_, br_model.intercept_)
def test_bayesian_sample_weights():
# Test correctness of the sample_weights method
X = np.array([[1, 1], [3, 4], [5, 7], [4, 1], [2, 6], [3, 10], [3, 2]])
y = np.array([1, 2, 3, 2, 0, 4, 5]).T
w = np.array([4, 3, 3, 1, 1, 2, 3]).T
# A Ridge regression model using an alpha value equal to the ratio of
# lambda_ and alpha_ from the Bayesian Ridge model must be identical
br_model = BayesianRidge(compute_score=True).fit(X, y, sample_weight=w)
rr_model = Ridge(alpha=br_model.lambda_ / br_model.alpha_).fit(
X, y, sample_weight=w)
assert_array_almost_equal(rr_model.coef_, br_model.coef_)
assert_almost_equal(rr_model.intercept_, br_model.intercept_)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_bayesian_initial_params():
# Test BayesianRidge with initial values (alpha_init, lambda_init)
X = np.vander(np.linspace(0, 4, 5), 4)
y = np.array([0., 1., 0., -1., 0.]) # y = (x^3 - 6x^2 + 8x) / 3
# In this case, starting from the default initial values will increase
# the bias of the fitted curve. So, lambda_init should be small.
reg = BayesianRidge(alpha_init=1., lambda_init=1e-3)
# Check the R2 score nearly equals to one.
r2 = reg.fit(X, y).score(X, y)
assert_almost_equal(r2, 1.)
def test_prediction_bayesian_ridge_ard_with_constant_input():
# Test BayesianRidge and ARDRegression predictions for edge case of
# constant target vectors
n_samples = 4
n_features = 5
random_state = check_random_state(42)
constant_value = random_state.rand()
X = random_state.random_sample((n_samples, n_features))
y = np.full(n_samples, constant_value,
dtype=np.array(constant_value).dtype)
expected = np.full(n_samples, constant_value,
dtype=np.array(constant_value).dtype)
for clf in [BayesianRidge(), ARDRegression()]:
y_pred = clf.fit(X, y).predict(X)
assert_array_almost_equal(y_pred, expected)
def test_std_bayesian_ridge_ard_with_constant_input():
# Test BayesianRidge and ARDRegression standard dev. for edge case of
# constant target vector
# The standard dev. should be relatively small (< 0.01 is tested here)
n_samples = 4
n_features = 5
random_state = check_random_state(42)
constant_value = random_state.rand()
X = random_state.random_sample((n_samples, n_features))
y = np.full(n_samples, constant_value,
dtype=np.array(constant_value).dtype)
expected_upper_boundary = 0.01
for clf in [BayesianRidge(), ARDRegression()]:
_, y_std = clf.fit(X, y).predict(X, return_std=True)
assert_array_less(y_std, expected_upper_boundary)
def test_update_of_sigma_in_ard():
# Checks that `sigma_` is updated correctly after the last iteration
# of the ARDRegression algorithm. See issue #10128.
X = np.array([[1, 0],
[0, 0]])
y = np.array([0, 0])
clf = ARDRegression(n_iter=1)
clf.fit(X, y)
# With the inputs above, ARDRegression prunes one of the two coefficients
# in the first iteration. Hence, the expected shape of `sigma_` is (1, 1).
assert clf.sigma_.shape == (1, 1)
# Ensure that no error is thrown at prediction stage
clf.predict(X, return_std=True)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_return_std():
# Test return_std option for both Bayesian regressors
def f(X):
return np.dot(X, w) + b
def f_noise(X, noise_mult):
return f(X) + np.random.randn(X.shape[0]) * noise_mult
d = 5
n_train = 50
n_test = 10
w = np.array([1.0, 0.0, 1.0, -1.0, 0.0])
b = 1.0
X = np.random.random((n_train, d))
X_test = np.random.random((n_test, d))
for decimal, noise_mult in enumerate([1, 0.1, 0.01]):
y = f_noise(X, noise_mult)
m1 = BayesianRidge()
m1.fit(X, y)
y_mean1, y_std1 = m1.predict(X_test, return_std=True)
assert_array_almost_equal(y_std1, noise_mult, decimal=decimal)
m2 = ARDRegression()
m2.fit(X, y)
y_mean2, y_std2 = m2.predict(X_test, return_std=True)
assert_array_almost_equal(y_std2, noise_mult, decimal=decimal)
| {
"repo_name": "chrsrds/scikit-learn",
"path": "sklearn/linear_model/tests/test_bayes.py",
"copies": "1",
"size": "8265",
"license": "bsd-3-clause",
"hash": -8418597883932554000,
"line_mean": 34.625,
"line_max": 78,
"alpha_frac": 0.6318209316,
"autogenerated": false,
"ratio": 3.047566371681416,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9179387303281417,
"avg_score": 0,
"num_lines": 232
} |
import numpy as np
from scipy import sparse
import warnings
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import center_data, sparse_center_data
from sklearn.utils import check_random_state
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
def test_linear_regression():
"""
Test LinearRegression on a simple dataset.
"""
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [0])
def test_linear_regression_n_jobs():
"""
Test for the n_jobs parameter on the fit method and the constructor
"""
X = [[1], [2]]
Y = [1, 2]
clf = LinearRegression()
with warnings.catch_warnings(record=True):
clf_fit = clf.fit(X, Y, 4)
assert_equal(clf_fit.n_jobs, clf.n_jobs)
assert_equal(clf.n_jobs, 1)
def test_fit_intercept():
"""
Test assertions on betas shape.
"""
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
"Test that linear regression also works with sparse data"
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.residues_, 0)
def test_linear_regression_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions"
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
clf = LinearRegression(fit_intercept=True)
clf.fit((X), Y)
assert_equal(clf.coef_.shape, (2, n_features))
Y_pred = clf.predict(X)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions with sparse data"
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_center_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [(center_data, X), (sparse_center_data, sparse.csc_matrix(X))]
for center, X in args:
_, yt, _, y_mean, _ = center(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_center_data_weighted():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
# XXX: currently scaled to variance=n_samples
expected_X_std = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt.A, XA / expected_X_std)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_sparse_center_data():
"""Test output format of sparse_center_data, when input is csr"""
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = sparse_center_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
| {
"repo_name": "mehdidc/scikit-learn",
"path": "sklearn/linear_model/tests/test_base.py",
"copies": "7",
"size": "10479",
"license": "bsd-3-clause",
"hash": 4989755894596540000,
"line_mean": 37.1054545455,
"line_max": 79,
"alpha_frac": 0.6073098578,
"autogenerated": false,
"ratio": 3.1658610271903322,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7273170884990332,
"avg_score": null,
"num_lines": null
} |
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import center_data, sparse_center_data
from sklearn.utils import check_random_state
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [0])
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
"Test that linear regression also works with sparse data"
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.residues_, 0)
def test_linear_regression_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions"
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
clf = LinearRegression(fit_intercept=True)
clf.fit((X), Y)
assert_equal(clf.coef_.shape, (2, n_features))
Y_pred = clf.predict(X)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions with sparse data"
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_center_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [(center_data, X), (sparse_center_data, sparse.csc_matrix(X))]
for center, X in args:
_, yt, _, y_mean, _ = center(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_center_data_weighted():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
# XXX: currently scaled to variance=n_samples
expected_X_std = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt.A, XA / expected_X_std)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_sparse_center_data():
# Test output format of sparse_center_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = sparse_center_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
| {
"repo_name": "smartscheduling/scikit-learn-categorical-tree",
"path": "sklearn/linear_model/tests/test_base.py",
"copies": "120",
"size": "10082",
"license": "bsd-3-clause",
"hash": -5016893403683557000,
"line_mean": 38.2295719844,
"line_max": 79,
"alpha_frac": 0.6092045229,
"autogenerated": false,
"ratio": 3.1644695543000627,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
from sklearn.utils.testing import assert_array_almost_equal
def test_bayesian_on_diabetes():
"""
Test BayesianRidge on diabetes
"""
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
"""
Test BayesianRidge on toy
"""
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
"""
Test BayesianRegression ARD classifier
"""
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
| {
"repo_name": "rseubert/scikit-learn",
"path": "sklearn/linear_model/tests/test_bayes.py",
"copies": "30",
"size": "1812",
"license": "bsd-3-clause",
"hash": 7597844669144402000,
"line_mean": 27.3125,
"line_max": 74,
"alpha_frac": 0.6534216336,
"autogenerated": false,
"ratio": 3.1845342706502637,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from ..externals.six.moves import xrange
from . import cd_fast
from .base import LinearModel, _pre_fit
from .base import _preprocess_data
from ..base import RegressorMixin
from ..exceptions import ConvergenceWarning
from ..externals import six
from ..externals.joblib import Parallel, delayed
from ..model_selection import check_cv
from ..utils import check_array, check_X_y, deprecated
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils.validation import check_random_state
from ..utils.validation import column_or_1d
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For
l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <
1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = _preprocess_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_offset, _, X_scale = _preprocess_data(X, y, fit_intercept,
normalize,
return_mean=True)
mean_dot = X_offset * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_scale[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
positive : bool, default False
If set to True, forces coefficients to be positive.
return_n_iter : bool
whether to return the number of iterations or not.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False,
check_input=True, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
check_input : bool, default True
Skip input validation checks, including the Gram matrix when provided
assuming there are handled by the caller when check_input=False.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
# We expect X and y to be already float64 Fortran ordered when bypassing
# checks
if check_input:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
y = check_array(y, 'csc', dtype=np.float64, order='F', copy=False,
ensure_2d=False)
if Xy is not None:
# Xy should be a 1d contiguous array or a 2D C ordered array
Xy = check_array(Xy, dtype=np.float64, order='C', copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_offset' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_offset'] / params['X_scale']
else:
X_sparse_scaling = np.zeros(n_features)
# X should be normalized and fit already if function is called
# from ElasticNet.fit
if check_input:
X, y, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False,
fit_intercept=False, copy=False)
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=np.float64)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=np.float64)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1]))
else:
coef_ = np.asfortranarray(coef_init)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
# We expect precompute to be already Fortran ordered when bypassing
# checks
if check_input:
precompute = check_array(precompute, dtype=np.float64,
order='C')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like")
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
precompute : True | False | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
self.random_state = random_state
self.selection = selection
def fit(self, X, y, check_input=True):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if (isinstance(self.precompute, six.string_types) and
self.precompute == 'auto'):
warnings.warn("Setting precompute to 'auto', was found to be "
"slower even when n_samples > n_features. Hence "
"it will be removed in 0.18.",
DeprecationWarning, stacklevel=2)
# We expect X and y to be already float64 Fortran ordered arrays
# when bypassing checks
if check_input:
y = np.asarray(y, dtype=np.float64)
X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float64,
order='F',
copy=self.copy_X and self.fit_intercept,
multi_output=True, y_numeric=True)
y = check_array(y, dtype=np.float64, order='F', copy=False,
ensure_2d=False)
X, y, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=False)
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=np.float64)
self.n_iter_ = []
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_offset=X_offset, X_scale=X_scale, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection,
check_input=False)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_offset, y_offset, X_scale)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T,
dense_output=True) +
self.intercept_)
else:
return super(ElasticNet, self)._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | array-like, default=False
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_offset'] = X_offset
path_params['X_scale'] = X_scale
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_offset = np.atleast_1d(y_offset)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_scale)
coefs[:, nonzeros] /= X_scale[nonzeros][:, np.newaxis]
intercepts = y_offset[:, np.newaxis] - np.dot(X_offset, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as float64, Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = np.asarray(y, dtype=np.float64)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1 and y.shape[1] > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
y = column_or_1d(y, warn=True)
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering or float64 so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if (hasattr(reference_to_old_X, "data") and
not np.may_share_memory(reference_to_old_X.data, X.data)):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv.split(X))
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=np.float64)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
If positive, restrict regression coefficients to be positive
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float or array of floats, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = check_array(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = check_array(y, dtype=np.float64, ensure_2d=False)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_offset, y_offset, X_scale = _preprocess_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_offset, y_offset, X_scale)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_tasks, n_features)
parameter vector (W in the cost function formula)
intercept_ : array, shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, random_state=None, selection='cyclic', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, random_state=None, selection='cyclic',
tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1, random_state=None,
selection='cyclic'):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
| {
"repo_name": "DailyActie/Surrogate-Model",
"path": "01-codes/scikit-learn-master/sklearn/linear_model/coordinate_descent.py",
"copies": "1",
"size": "81083",
"license": "mit",
"hash": 2159661821378754600,
"line_mean": 36.5037002775,
"line_max": 82,
"alpha_frac": 0.6067733064,
"autogenerated": false,
"ratio": 4.04686564184468,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00019076021592560007,
"num_lines": 2162
} |
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from joblib import Parallel, delayed, effective_n_jobs
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin, MultiOutputMixin
from .base import _preprocess_data
from ..utils import check_array, check_X_y
from ..utils.validation import check_random_state
from ..model_selection import check_cv
from ..utils.extmath import safe_sparse_dot
from ..utils.fixes import _joblib_parallel_args
from ..utils.validation import check_is_fitted
from ..utils.validation import column_or_1d
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 < l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. (currently not
supported) ``For l1_ratio = 1`` it is an L1 penalty. For
``0 < l1_ratio <1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
if l1_ratio == 0:
raise ValueError("Automatic alpha grid generation is not supported for"
" l1_ratio=0. Please supply a grid by providing "
"your estimator with the appropriate `alphas=` "
"argument.")
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = _preprocess_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_offset, _, X_scale = _preprocess_data(X, y, fit_intercept,
normalize,
return_mean=True)
mean_dot = X_offset * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_scale[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
(Only allowed when ``y.ndim == 1``).
**params : kwargs
keyword arguments passed to the coordinate descent solver.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
For an example, see
:ref:`examples/linear_model/plot_lasso_coordinate_descent_path.py
<sphx_glr_auto_examples_linear_model_plot_lasso_coordinate_descent_path.py>`.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
--------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[0. 0. 0.46874778]
[0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[0. 0. 0.46915237]
[0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, return_n_iter=return_n_iter, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False,
check_input=True, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
(Only allowed when ``y.ndim == 1``).
check_input : bool, default True
Skip input validation checks, including the Gram matrix when provided
assuming there are handled by the caller when check_input=False.
**params : kwargs
keyword arguments passed to the coordinate descent solver.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
For an example, see
:ref:`examples/linear_model/plot_lasso_coordinate_descent_path.py
<sphx_glr_auto_examples_linear_model_plot_lasso_coordinate_descent_path.py>`.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
# We expect X and y to be already Fortran ordered when bypassing
# checks
if check_input:
X = check_array(X, 'csc', dtype=[np.float64, np.float32],
order='F', copy=copy_X)
y = check_array(y, 'csc', dtype=X.dtype.type, order='F', copy=False,
ensure_2d=False)
if Xy is not None:
# Xy should be a 1d contiguous array or a 2D C ordered array
Xy = check_array(Xy, dtype=X.dtype.type, order='C', copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
if multi_output and positive:
raise ValueError('positive=True is not allowed for multi-output'
' (y.ndim != 1)')
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_offset' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_offset'] / params['X_scale']
X_sparse_scaling = np.asarray(X_sparse_scaling, dtype=X.dtype)
else:
X_sparse_scaling = np.zeros(n_features, dtype=X.dtype)
# X should be normalized and fit already if function is called
# from ElasticNet.fit
if check_input:
X, y, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False,
fit_intercept=False, copy=False, check_input=check_input)
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=X.dtype)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=X.dtype)
if coef_init is None:
coef_ = np.zeros(coefs.shape[:-1], dtype=X.dtype, order='F')
else:
coef_ = np.asfortranarray(coef_init, dtype=X.dtype)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
# We expect precompute to be already Fortran ordered when bypassing
# checks
if check_input:
precompute = check_array(precompute, dtype=X.dtype.type,
order='C')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like. Got %r" % precompute)
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin, MultiOutputMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the penalty terms. Defaults to 1.0.
See the notes for the exact mathematical meaning of this
parameter.``alpha = 0`` is equivalent to an ordinary least square,
solved by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the ``Lasso`` object is not advised.
Given this, you should use the :class:`LinearRegression` object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : True | False | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. The Gram matrix can also be passed as argument.
For sparse input this option is always ``True`` to preserve sparsity.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator that selects a random
feature to update. If int, random_state is the seed used by the random
number generator; If RandomState instance, random_state is the random
number generator; If None, the random number generator is the
RandomState instance used by `np.random`. Used when ``selection`` ==
'random'.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn.linear_model import ElasticNet
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(n_features=2, random_state=0)
>>> regr = ElasticNet(random_state=0)
>>> regr.fit(X, y)
ElasticNet(random_state=0)
>>> print(regr.coef_)
[18.83816048 64.55968825]
>>> print(regr.intercept_)
1.451...
>>> print(regr.predict([[0, 0]]))
[1.451...]
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
ElasticNetCV : Elastic net model with best model selection by
cross-validation.
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y, check_input=True):
"""Fit model with coordinate descent.
Parameters
----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target. Will be cast to X's dtype if necessary
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if isinstance(self.precompute, str):
raise ValueError('precompute should be one of True, False or'
' array-like. Got %r' % self.precompute)
# Remember if X is copied
X_copied = False
# We expect X and y to be float64 or float32 Fortran ordered arrays
# when bypassing checks
if check_input:
X_copied = self.copy_X and self.fit_intercept
X, y = check_X_y(X, y, accept_sparse='csc',
order='F', dtype=[np.float64, np.float32],
copy=X_copied, multi_output=True, y_numeric=True)
y = check_array(y, order='F', copy=False, dtype=X.dtype.type,
ensure_2d=False)
# Ensure copying happens only once, don't do it again if done above
should_copy = self.copy_X and not X_copied
X, y, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=should_copy,
check_input=check_input)
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or not hasattr(self, "coef_"):
coef_ = np.zeros((n_targets, n_features), dtype=X.dtype,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=X.dtype)
self.n_iter_ = []
for k in range(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_offset=X_offset, X_scale=X_scale,
return_n_iter=True, coef_init=coef_[k],
max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection,
check_input=False)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_ = coef_[0]
self.dual_gap_ = dual_gaps_[0]
else:
self.coef_ = coef_
self.dual_gap_ = dual_gaps_
self._set_intercept(X_offset, y_offset, X_scale)
# workaround since _set_intercept will cast self.coef_ into X.dtype
self.coef_ = np.asarray(self.coef_, dtype=X.dtype)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted ``coef_`` """
return sparse.csr_matrix(self.coef_)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
else:
return super()._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the ``Lasso`` object is not advised.
Given this, you should use the :class:`LinearRegression` object.
fit_intercept : boolean, optional, default True
Whether to calculate the intercept for this model. If set
to False, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : True | False | array-like, default=False
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator that selects a random
feature to update. If int, random_state is the seed used by the random
number generator; If RandomState instance, random_state is the random
number generator; If None, the random number generator is the
RandomState instance used by `np.random`. Used when ``selection`` ==
'random'.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1)
>>> print(clf.coef_)
[0.85 0. ]
>>> print(clf.intercept_)
0.15...
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super().__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_offset'] = X_offset
path_params['X_scale'] = X_scale
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_offset = np.atleast_1d(y_offset)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_scale)
coefs[:, nonzeros] /= X_scale[nonzeros][:, np.newaxis]
intercepts = y_offset[:, np.newaxis] - np.dot(X_offset, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matrices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(LinearModel, MultiOutputMixin, metaclass=ABCMeta):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=None,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = check_array(y, copy=False, dtype=[np.float64, np.float32],
ensure_2d=False)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1 and y.shape[1] > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
y = column_or_1d(y, warn=True)
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if (hasattr(reference_to_old_X, "data") and
not np.may_share_memory(reference_to_old_X.data, X.data)):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=[np.float64, np.float32],
order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = [_alpha_grid(X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize, copy_X=self.copy_X)
for l1_ratio in l1_ratios]
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if effective_n_jobs(self.n_jobs) > 1:
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv.split(X, y))
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=X.dtype.type)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
**_joblib_parallel_args(prefer="threads"))(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = {name: value
for name, value in self.get_params().items()
if name in model.get_params()}
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path.
See glossary entry for :term:`cross-validation estimator`.
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
verbose : bool or integer
Amount of verbosity.
n_jobs : int or None, optional (default=None)
Number of CPUs to use during the cross validation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
positive : bool, optional
If positive, restrict regression coefficients to be positive
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator that selects a random
feature to update. If int, random_state is the seed used by the random
number generator; If RandomState instance, random_state is the random
number generator; If None, the random number generator is the
RandomState instance used by `np.random`. Used when ``selection`` ==
'random'.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn.linear_model import LassoCV
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(noise=4, random_state=0)
>>> reg = LassoCV(cv=5, random_state=0).fit(X, y)
>>> reg.score(X, y)
0.9993...
>>> reg.predict(X[:1,])
array([-78.4951...])
Notes
-----
For an example, see
:ref:`examples/linear_model/plot_lasso_model_selection.py
<sphx_glr_auto_examples_linear_model_plot_lasso_model_selection.py>`.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=None,
positive=False, random_state=None, selection='cyclic'):
super().__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path.
See glossary entry for :term:`cross-validation estimator`.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float or array of floats, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
verbose : bool or integer
Amount of verbosity.
n_jobs : int or None, optional (default=None)
Number of CPUs to use during the cross validation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator that selects a random
feature to update. If int, random_state is the seed used by the random
number generator; If RandomState instance, random_state is the random
number generator; If None, the random number generator is the
RandomState instance used by `np.random`. Used when ``selection`` ==
'random'.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn.linear_model import ElasticNetCV
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(n_features=2, random_state=0)
>>> regr = ElasticNetCV(cv=5, random_state=0)
>>> regr.fit(X, y)
ElasticNetCV(cv=5, random_state=0)
>>> print(regr.alpha_)
0.199...
>>> print(regr.intercept_)
0.398...
>>> print(regr.predict([[0, 0]]))
[0.398...]
Notes
-----
For an example, see
:ref:`examples/linear_model/plot_lasso_model_selection.py
<sphx_glr_auto_examples_linear_model_plot_lasso_model_selection.py>`.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=None, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||_Fro^2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = sum_i sqrt(sum_j w_ij ^ 2)
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_elastic_net>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 1 the penalty is an L1/L2 penalty. For l1_ratio = 0 it
is an L2 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator that selects a random
feature to update. If int, random_state is the seed used by the random
number generator; If RandomState instance, random_state is the random
number generator; If None, the random number generator is the
RandomState instance used by `np.random`. Used when ``selection`` ==
'random'.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array.
Note that ``coef_`` stores the transpose of ``W``, ``W.T``.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskElasticNet(alpha=0.1)
>>> print(clf.coef_)
[[0.45663524 0.45612256]
[0.45663524 0.45612256]]
>>> print(clf.intercept_)
[0.0872422 0.0872422]
See also
--------
MultiTaskElasticNet : Multi-task L1/L2 ElasticNet with built-in
cross-validation.
ElasticNet
MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskElasticNet model with coordinate descent
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target. Will be cast to X's dtype if necessary
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
X = check_array(X, dtype=[np.float64, np.float32], order='F',
copy=self.copy_X and self.fit_intercept)
y = check_array(y, dtype=X.dtype.type, ensure_2d=False)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_offset, y_offset, X_scale = _preprocess_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or not hasattr(self, "coef_"):
self.coef_ = np.zeros((n_tasks, n_features), dtype=X.dtype.type,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_offset, y_offset, X_scale)
# return self for chaining fit and predict calls
return self
def _more_tags(self):
return {'multioutput_only': True}
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator that selects a random
feature to update. If int, random_state is the seed used by the random
number generator; If RandomState instance, random_state is the random
number generator; If None, the random number generator is the
RandomState instance used by `np.random`. Used when ``selection`` ==
'random'.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
Attributes
----------
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
Note that ``coef_`` stores the transpose of ``W``, ``W.T``.
intercept_ : array, shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1)
>>> print(clf.coef_)
[[0.89393398 0. ]
[0.89393398 0. ]]
>>> print(clf.intercept_)
[0.10606602 0.10606602]
See also
--------
MultiTaskLasso : Multi-task L1/L2 Lasso with built-in cross-validation
Lasso
MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
See glossary entry for :term:`cross-validation estimator`.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_elastic_net>`.
Parameters
----------
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 1 the penalty is an L1/L2 penalty. For l1_ratio = 0 it
is an L2 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
verbose : bool or integer
Amount of verbosity.
n_jobs : int or None, optional (default=None)
Number of CPUs to use during the cross validation. Note that this is
used only if multiple values for l1_ratio are given.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator that selects a random
feature to update. If int, random_state is the seed used by the random
number generator; If RandomState instance, random_state is the random
number generator; If None, the random number generator is the
RandomState instance used by `np.random`. Used when ``selection`` ==
'random'.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
Note that ``coef_`` stores the transpose of ``W``, ``W.T``.
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV(cv=3)
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
MultiTaskElasticNetCV(cv=3)
>>> print(clf.coef_)
[[0.52875032 0.46958558]
[0.52875032 0.46958558]]
>>> print(clf.intercept_)
[0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=None, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
def _more_tags(self):
return {'multioutput_only': True}
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer.
See glossary entry for :term:`cross-validation estimator`.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
verbose : bool or integer
Amount of verbosity.
n_jobs : int or None, optional (default=None)
Number of CPUs to use during the cross validation. Note that this is
used only if multiple values for l1_ratio are given.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator that selects a random
feature to update. If int, random_state is the seed used by the random
number generator; If RandomState instance, random_state is the random
number generator; If None, the random number generator is the
RandomState instance used by `np.random`. Used when ``selection`` ==
'random'
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
Note that ``coef_`` stores the transpose of ``W``, ``W.T``.
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn.linear_model import MultiTaskLassoCV
>>> from sklearn.datasets import make_regression
>>> from sklearn.metrics import r2_score
>>> X, y = make_regression(n_targets=2, noise=4, random_state=0)
>>> reg = MultiTaskLassoCV(cv=5, random_state=0).fit(X, y)
>>> r2_score(y, reg.predict(X))
0.9994...
>>> reg.alpha_
0.5713...
>>> reg.predict(X[:1,])
array([[153.7971..., 94.9015...]])
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=None, random_state=None,
selection='cyclic'):
super().__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
def _more_tags(self):
return {'multioutput_only': True}
| {
"repo_name": "chrsrds/scikit-learn",
"path": "sklearn/linear_model/coordinate_descent.py",
"copies": "2",
"size": "85449",
"license": "bsd-3-clause",
"hash": 5724805391785200000,
"line_mean": 36.7258278146,
"line_max": 81,
"alpha_frac": 0.6065723414,
"autogenerated": false,
"ratio": 4.008490875826805,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5615063217226806,
"avg_score": null,
"num_lines": null
} |
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import center_data, sparse_center_data
from ..utils import array2d, atleast2d_or_csc
from ..cross_validation import _check_cv as check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from ..utils import ConvergenceWarning
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape = (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For
l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <
1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : bool
Fit or not an intercept
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = atleast2d_or_csc(X, copy=(copy_X and fit_intercept and not
X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = center_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_mean, _, X_std = sparse_center_data(X, y, fit_intercept,
normalize)
mean_dot = np.sum(X_mean[:, np.newaxis] * y, axis=1)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_std[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
alphas = np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
return alphas
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, fit_intercept=None,
normalize=None, copy_X=True, coef_init=None,
verbose=False, return_models=False,
**params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape = (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
fit_intercept : bool
Fit or not an intercept.
WARNING : deprecated, will be removed in 0.16.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
WARNING : deprecated, will be removed in 0.16.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
return_models : boolean, optional, default True
If ``True``, the function will return list of models. Setting it
to ``False`` will change the function output returning the values
of the alphas and the coefficients along the path. Returning the
model list will be removed in version 0.16.
params : kwargs
keyword arguments passed to the coordinate descent solver.
Returns
-------
models : a list of models along the regularization path
(Is returned if ``return_models`` is set ``True`` (default).
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
(Is returned, along with ``coefs``, when ``return_models`` is set
to ``False``)
coefs : array, shape (n_features, n_alphas) or
(n_outputs, n_features, n_alphas)
Coefficients along the path.
(Is returned, along with ``alphas``, when ``return_models`` is set
to ``False``).
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
(Is returned, along with ``alphas``, when ``return_models`` is set
to ``False``).
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Deprecation Notice: Setting ``return_models`` to ``False`` will make
the Lasso Path return an output in the style used by :func:`lars_path`.
This will be become the norm as of version 0.16. Leaving ``return_models``
set to `True` will let the function return a list of models as before.
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5],
... fit_intercept=False)
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
return_models=return_models, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, fit_intercept=True,
normalize=False, copy_X=True, coef_init=None,
verbose=False, return_models=False,
**params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape = (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
fit_intercept : bool
Fit or not an intercept.
WARNING : deprecated, will be removed in 0.16.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
WARNING : deprecated, will be removed in 0.16.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
return_models : boolean, optional, default False
If ``True``, the function will return list of models. Setting it
to ``False`` will change the function output returning the values
of the alphas and the coefficients along the path. Returning the
model list will be removed in version 0.16.
params : kwargs
keyword arguments passed to the coordinate descent solver.
Returns
-------
models : a list of models along the regularization path
(Is returned if ``return_models`` is set ``True`` (default).
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
(Is returned, along with ``coefs``, when ``return_models`` is set
to ``False``)
coefs : array, shape (n_features, n_alphas) or
(n_outputs, n_features, n_alphas)
Coefficients along the path.
(Is returned, along with ``alphas``, when ``return_models`` is set
to ``False``).
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
(Is returned, along with ``alphas``, when ``return_models`` is set
to ``False``).
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
Deprecation Notice: Setting ``return_models`` to ``False`` will make
the Lasso Path return an output in the style used by :func:`lars_path`.
This will be become the norm as of version 0.15. Leaving ``return_models``
set to `True` will let the function return a list of models as before.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
if return_models:
warnings.warn("Use enet_path(return_models=False), as it returns the"
" coefficients and alphas instead of just a list of"
" models as previously `lasso_path`/`enet_path` did."
" `return_models` will eventually be removed in 0.16,"
" after which, returning alphas and coefs"
" will become the norm.",
DeprecationWarning, stacklevel=2)
if normalize is True:
warnings.warn("normalize param will be removed in 0.16."
" Intercept fitting and feature normalization will be"
" done in estimators.",
DeprecationWarning, stacklevel=2)
else:
normalize = False
if fit_intercept is True or fit_intercept is None:
warnings.warn("fit_intercept param will be removed in 0.16."
" Intercept fitting and feature normalization will be"
" done in estimators.",
DeprecationWarning, stacklevel=2)
if fit_intercept is None:
fit_intercept = True
X = atleast2d_or_csc(X, dtype=np.float64, order='F',
copy=copy_X and fit_intercept)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_mean' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_mean'] / params['X_std']
else:
X_sparse_scaling = np.ones(n_features)
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize, fit_intercept, copy=False)
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
positive = params.get('positive', False)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
models = []
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=np.float64)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=np.float64)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1]))
else:
coef_ = np.asfortranarray(coef_init)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol)
elif isinstance(precompute, np.ndarray):
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like")
coef_, dual_gap_, eps_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations',
ConvergenceWarning)
if return_models:
if not multi_output:
model = ElasticNet(
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept
if sparse.isspmatrix(X) else False,
precompute=precompute)
else:
model = MultiTaskElasticNet(
alpha=alpha, l1_ratio=l1_ratio, fit_intercept=False)
model.dual_gap_ = dual_gaps[i]
model.coef_ = coefs[..., i]
if (fit_intercept and not sparse.isspmatrix(X)) or multi_output:
model.fit_intercept = True
model._set_intercept(X_mean, y_mean, X_std)
models.append(model)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_models:
return models
else:
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept: bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol: float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive: bool, optional
When set to ``True``, forces the coefficients to be positive.
Attributes
----------
``coef_`` : array, shape = (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
``sparse_coef_`` : scipy.sparse matrix, shape = (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
``intercept_`` : float | array, shape = (n_targets,)
independent term in decision function.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
def fit(self, X, y):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape = (n_samples,) or (n_samples, n_targets)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
X = atleast2d_or_csc(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
# From now on X can be touched inplace
y = np.asarray(y, dtype=np.float64)
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=np.float64)
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_mean=X_mean, X_std=X_std,
coef_init=coef_[k], max_iter=self.max_iter)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape = (n_samples,)
The predicted decision function
"""
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True)
+ self.intercept_)
else:
return super(ElasticNet, self).decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
Attributes
----------
``coef_`` : array, shape = (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
``sparse_coef_`` : scipy.sparse matrix, shape = (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
``intercept_`` : float | array, shape = (n_targets,)
independent term in decision function.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute='auto', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute='auto', copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas: array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype: a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['fit_intercept'] = False
path_params['normalize'] = False
path_params['Xy'] = Xy
path_params['X_mean'] = X_mean
path_params['X_std'] = X_std
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = atleast2d_or_csc(X_train, dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_mean = np.atleast_1d(y_mean)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_std)
coefs[:, nonzeros] /= X_std[nonzeros][:, np.newaxis]
intercepts = y_mean[:, np.newaxis] - np.dot(X_mean, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as float64, Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = np.asarray(y, dtype=np.float64)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering or float64 so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = atleast2d_or_csc(X, copy=False)
if sparse.isspmatrix(X):
if not np.may_share_memory(reference_to_old_X.data, X.data):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = atleast2d_or_csc(X, dtype=np.float64, order='F',
copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv, X)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv)
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=np.float64)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter: int, optional
The maximum number of iterations
tol: float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
positive : bool, optional
If positive, restrict regression coefficients to be positive
Attributes
----------
``alpha_`` : float
The amount of penalization chosen by cross validation
``coef_`` : array, shape = (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
``intercept_`` : float | array, shape = (n_targets,)
independent term in decision function.
``mse_path_`` : array, shape = (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
``alphas_`` : numpy array, shape = (n_alphas,)
The grid of alphas used for fitting
``dual_gap_`` : numpy array, shape = (n_alphas,)
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Parameters
----------
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
Attributes
----------
``alpha_`` : float
The amount of penalization chosen by cross validation
``l1_ratio_`` : float
The compromise between l1 and l2 penalization chosen by
cross validation
``coef_`` : array, shape = (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
``intercept_`` : float | array, shape = (n_targets, n_features)
Independent term in the decision function.
``mse_path_`` : array, shape = (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
``alphas_`` : numpy array, shape = (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
``intercept_`` : array, shape = (n_tasks,)
Independent term in decision function.
``coef_`` : array, shape = (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
def fit(self, X, y):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X : ndarray, shape = (n_samples, n_features)
Data
y : ndarray, shape = (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = array2d(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = np.asarray(y, dtype=np.float64)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
self.coef_, self.dual_gap_, self.eps_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol)
self._set_intercept(X_mean, y_mean, X_std)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
``coef_`` : array, shape = (n_tasks, n_features)
parameter vector (W in the cost function formula)
``intercept_`` : array, shape = (n_tasks,)
independent term in decision function.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
Attributes
----------
``intercept_`` : array, shape (n_tasks,)
Independent term in decision function.
``coef_`` : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
``alpha_`` : float
The amount of penalization chosen by cross validation
``mse_path_`` : array, shape (n_alphas, n_folds) or
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
``alphas_`` : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
``l1_ratio_`` : float
best l1_ratio obtained by cross-validation.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automaticlly.
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
Attributes
----------
``intercept_`` : array, shape (n_tasks,)
Independent term in decision function.
``coef_`` : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
``alpha_`` : float
The amount of penalization chosen by cross validation
``mse_path_`` : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
``alphas_`` : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs)
| {
"repo_name": "chaluemwut/fbserver",
"path": "venv/lib/python2.7/site-packages/sklearn/linear_model/coordinate_descent.py",
"copies": "2",
"size": "67549",
"license": "apache-2.0",
"hash": -196371892571954530,
"line_mean": 35.1031533939,
"line_max": 79,
"alpha_frac": 0.5896904469,
"autogenerated": false,
"ratio": 3.918382736817681,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00020661555745926213,
"num_lines": 1871
} |
import sys
import warnings
import itertools
import operator
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import center_data
from ..utils import array2d, atleast2d_or_csc, deprecated
from ..cross_validation import _check_cv as check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape = (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For
l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <
1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : bool
Fit or not an intercept
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
if Xy is None:
X = atleast2d_or_csc(X, copy=(copy_X and fit_intercept and not
sparse.isspmatrix(X)))
if not sparse.isspmatrix(X):
# X can be touched inplace thanks to the above line
X, y, _, _, _ = center_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
n_samples = X.shape[0]
else:
n_samples = len(y)
alpha_max = np.abs(Xy).max() / (n_samples * l1_ratio)
alphas = np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
return alphas
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, fit_intercept=None,
normalize=None, copy_X=True, verbose=False, return_models=True,
**params):
"""Compute Lasso path with coordinate descent
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape = (n_samples,)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
fit_intercept : bool
Fit or not an intercept.
WARNING : will be deprecated in 0.15
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
WARNING : will be deprecated in 0.15
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
verbose : bool or integer
Amount of verbosity
return_models : boolean, optional, default True
If ``True``, the function will return list of models. Setting it
to ``False`` will change the function output returning the values
of the alphas and the coefficients along the path. Returning the
model list will be removed in version 0.15.
params : kwargs
keyword arguments passed to the Lasso objects
Returns
-------
models : a list of models along the regularization path
(Is returned if ``return_models`` is set ``True`` (default).
alphas : array, shape: [n_alphas + 1]
The alphas along the path where models are computed.
(Is returned, along with ``coefs``, when ``return_models`` is set
to ``False``)
coefs : shape (n_features, n_alphas + 1)
Coefficients along the path.
(Is returned, along with ``alphas``, when ``return_models`` is set
to ``False``).
dual_gaps : shape (n_alphas + 1)
The dual gaps and the end of the optimization for each alpha.
(Is returned, along with ``alphas``, when ``return_models`` is set
to ``False``).
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficents between the
values output by lars_path
Deprecation Notice: Setting ``return_models`` to ``False`` will make
the Lasso Path return an output in the style used by :func:`lars_path`.
This will be become the norm as of version 0.15. Leaving ``return_models``
set to `True` will let the function return a list of models as before.
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5],
... return_models=False, fit_intercept=False)
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, verbose=verbose,
return_models=return_models, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, fit_intercept=True,
normalize=False, copy_X=True, verbose=False, rho=None,
return_models=True, **params):
"""Compute Elastic-Net path with coordinate descent
The Elastic Net optimization function is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape = (n_samples,)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
fit_intercept : bool
Fit or not an intercept.
WARNING : will be deprecated in 0.15
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
WARNING : will be deprecated in 0.15
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
verbose : bool or integer
Amount of verbosity
return_models : boolean, optional, default True
If ``True``, the function will return list of models. Setting it
to ``False`` will change the function output returning the values
of the alphas and the coefficients along the path. Returning the
model list will be removed in version 0.15.
params : kwargs
keyword arguments passed to the Lasso objects
Returns
-------
models : a list of models along the regularization path
(Is returned if ``return_models`` is set ``True`` (default).
alphas : array, shape: [n_alphas + 1]
The alphas along the path where models are computed.
(Is returned, along with ``coefs``, when ``return_models`` is set
to ``False``)
coefs : shape (n_features, n_alphas + 1)
Coefficients along the path.
(Is returned, along with ``alphas``, when ``return_models`` is set
to ``False``).
dual_gaps : shape (n_alphas + 1)
The dual gaps and the end of the optimization for each alpha.
(Is returned, along with ``alphas``, when ``return_models`` is set
to ``False``).
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
Deprecation Notice: Setting ``return_models`` to ``False`` will make
the Lasso Path return an output in the style used by :func:`lars_path`.
This will be become the norm as of version 0.15. Leaving ``return_models``
set to `True` will let the function return a list of models as before.
See also
--------
ElasticNet
ElasticNetCV
"""
if return_models:
warnings.warn("Use enet_path(return_models=False), as it returns the"
" coefficients and alphas instead of just a list of"
" models as previously `lasso_path`/`enet_path` did."
" `return_models` will eventually be removed in 0.15,"
" after which, returning alphas and coefs"
" will become the norm.",
DeprecationWarning, stacklevel=2)
if normalize is True:
warnings.warn("normalize param will be removed in 0.15."
" Intercept fitting and feature normalization will be"
" done in estimators.",
DeprecationWarning, stacklevel=2)
else:
normalize = False
if fit_intercept is True or fit_intercept is None:
warnings.warn("fit_intercept param will be removed in 0.15."
" Intercept fitting and feature normalization will be"
" done in estimators.",
DeprecationWarning, stacklevel=2)
if fit_intercept is None:
fit_intercept = True
if rho is not None:
l1_ratio = rho
warnings.warn("rho was renamed to l1_ratio and will be removed "
"in 0.15", DeprecationWarning)
X = atleast2d_or_csc(X, dtype=np.float64, order='F',
copy=copy_X and fit_intercept)
n_samples, n_features = X.shape
if sparse.isspmatrix(X):
if 'X_mean' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_mean'] / params['X_std']
else:
X_sparse_scaling = np.ones(n_features)
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize, fit_intercept, copy=False)
n_samples = X.shape[0]
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
coef_ = np.zeros(n_features, dtype=np.float64)
models = []
coefs = np.empty((n_features, n_alphas), dtype=np.float64)
dual_gaps = np.empty(n_alphas)
tol = params.get('tol', 1e-4)
positive = params.get('positive', False)
max_iter = params.get('max_iter', 1000)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if sparse.isspmatrix(X):
coef_, dual_gap_, eps_ = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, positive)
else:
coef_, dual_gap_, eps_ = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, positive)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations')
coefs[:, i] = coef_
dual_gaps[i] = dual_gap_
if return_models:
model = ElasticNet(
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept if sparse.isspmatrix(X) else False,
precompute=precompute)
model.coef_ = coefs[:, i]
model.dual_gap_ = dual_gaps[-1]
if fit_intercept and not sparse.isspmatrix(X):
model.fit_intercept = True
model._set_intercept(X_mean, y_mean, X_std)
models.append(model)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_models:
return models
else:
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear Model trained with L1 and L2 prior as regularizer
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept: bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
max_iter: int, optional
The maximum number of iterations
copy_X : boolean, optional, default False
If ``True``, X will be copied; else, it may be overwritten.
tol: float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive: bool, optional
When set to ``True``, forces the coefficients to be positive.
Attributes
----------
``coef_`` : array, shape = (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
``sparse_coef_`` : scipy.sparse matrix, shape = (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
``intercept_`` : float | array, shape = (n_targets,)
independent term in decision function.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
rho=None):
self.alpha = alpha
self.l1_ratio = l1_ratio
if rho is not None:
self.l1_ratio = rho
warnings.warn("rho was renamed to l1_ratio and will be removed "
"in 0.15", DeprecationWarning)
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
def fit(self, X, y, Xy=None, coef_init=None):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape = (n_samples,) or (n_samples, n_targets)
Target
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
WARNING : ignored and will be deprecated in 0.15
coef_init : ndarray of shape n_features or (n_targets, n_features)
The initial coeffients to warm-start the optimization
WARNING : ignored and will be deprecated in 0.15
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if Xy is not None:
warnings.warn("Xy param is now ignored and will be removed in "
"0.15. See enet_path function.",
DeprecationWarning, stacklevel=2)
if coef_init is not None:
warnings.warn("coef_init is now ignored and will be removed in "
"0.15. See enet_path function.",
DeprecationWarning, stacklevel=2)
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
X = atleast2d_or_csc(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
# From now on X can be touched inplace
y = np.asarray(y, dtype=np.float64)
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, Xy, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
coef_ = np.zeros((n_targets, n_features), dtype=np.float64)
dual_gaps_ = np.zeros(n_targets, dtype=np.float64)
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
return_models=False, X_mean=X_mean, X_std=X_std)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape = (n_samples,)
The predicted decision function
"""
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True)
+ self.intercept_)
else:
return super(ElasticNet, self).decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
max_iter: int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
Attributes
----------
``coef_`` : array, shape = (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
``sparse_coef_`` : scipy.sparse matrix, shape = (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
``intercept_`` : float | array, shape = (n_targets,)
independent term in decision function.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute='auto', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute='auto', copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, l1_ratio=1,
X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : narray, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype: a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
precompute = path_params['precompute']
Xy = None
X_train, y_train, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X_train, y_train,
Xy, precompute, normalize,
fit_intercept, copy=False)
# del path_params['precompute']
path_params = path_params.copy()
path_params['return_models'] = False
path_params['fit_intercept'] = False
path_params['normalize'] = False
path_params['Xy'] = Xy
path_params['X_mean'] = X_mean
path_params['X_std'] = X_std
path_params['precompute'] = precompute
path_params['copy_X'] = False
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = atleast2d_or_csc(X_train, dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y[train], **path_params)
del X_train
if normalize:
nonzeros = np.flatnonzero(X_std)
coefs[nonzeros] /= X_std[nonzeros][:, np.newaxis]
intercepts = y_mean - np.dot(X_mean, coefs)
residues = safe_sparse_dot(X_test, coefs) - y_test[:, np.newaxis]
residues += intercepts[np.newaxis, :]
this_mses = (residues ** 2).mean(axis=0)
return this_mses, l1_ratio
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as float64, Fortran-contiguous data
to avoid unnecessary memory duplication
y : narray, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
# Dealing right with copy_X is important in the following:
# multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering or float64 so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = atleast2d_or_csc(X, copy=False)
if sparse.isspmatrix(X):
if not np.may_share_memory(reference_to_old_X.data, X.data):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = atleast2d_or_csc(X, dtype=np.float64, order='F',
copy=copy_X)
copy_X = False
y = np.asarray(y, dtype=np.float64)
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
if alphas is None:
mean_l1_ratio = 1.
if hasattr(self, 'l1_ratio'):
mean_l1_ratio = np.mean(self.l1_ratio)
alphas = _alpha_grid(X, y, l1_ratio=mean_l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X)
n_alphas = len(alphas)
path_params.update({'alphas': alphas, 'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv, X)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv)
best_mse = np.inf
all_mse_paths = list()
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
for l1_ratio, mse_alphas in itertools.groupby(
Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_path_residuals)(
X, y, train, test, self.path, path_params,
l1_ratio=l1_ratio, X_order='F',
dtype=np.float64)
for l1_ratio in l1_ratios for train, test in folds
), operator.itemgetter(1)):
mse_alphas = [m[0] for m in mse_alphas]
mse_alphas = np.array(mse_alphas)
mse = np.mean(mse_alphas, axis=0)
i_best_alpha = np.argmin(mse)
this_best_mse = mse[i_best_alpha]
all_mse_paths.append(mse_alphas.T)
if this_best_mse < best_mse:
best_alpha = alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
self.alphas_ = np.asarray(alphas)
self.mse_path_ = np.squeeze(all_mse_paths)
# Refit the model with the parameters selected
model = ElasticNet()
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.fit(X, y)
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
return self
@property
def rho_(self):
warnings.warn("rho was renamed to ``l1_ratio_`` and will be removed "
"in 0.15", DeprecationWarning)
return self.l1_ratio_
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter: int, optional
The maximum number of iterations
tol: float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or crossvalidation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific crossvalidation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
amount of verbosity
Attributes
----------
``alpha_`` : float
The amount of penalization chosen by cross validation
``coef_`` : array, shape = (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
``intercept_`` : float | array, shape = (n_targets,)
independent term in decision function.
``mse_path_`` : array, shape = (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
``alphas_`` : numpy array
The grid of alphas used for fitting
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
n_jobs = 1
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Parameters
----------
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or crossvalidation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific crossvalidation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
amount of verbosity
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
Attributes
----------
``alpha_`` : float
The amount of penalization chosen by cross validation
``l1_ratio_`` : float
The compromise between l1 and l2 penalization chosen by
cross validation
``coef_`` : array, shape = (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
``intercept_`` : float | array, shape = (n_targets, n_features)
Independent term in the decision function.
``mse_path_`` : array, shape = (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, rho=None):
self.l1_ratio = l1_ratio
if rho is not None:
self.l1_ratio = rho
warnings.warn("rho was renamed to l1_ratio and will be removed "
"in 0.15", DeprecationWarning)
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
@property
@deprecated("rho was renamed to ``l1_ratio_`` and will be removed "
"in 0.15")
def rho(self):
return self.l1_ratio_
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
``intercept_`` : array, shape = (n_tasks,)
Independent term in decision function.
``coef_`` : array, shape = (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, rho=None, tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, rho=None):
self.l1_ratio = l1_ratio
if rho is not None:
self.l1_ratio = rho
warnings.warn("rho was renamed to l1_ratio and will be removed "
"in 0.15", DeprecationWarning)
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
def fit(self, X, y, Xy=None, coef_init=None):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X: ndarray, shape = (n_samples, n_features)
Data
y: ndarray, shape = (n_samples, n_tasks)
Target
coef_init: ndarray of shape n_features
The initial coeffients to warm-start the optimization
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = array2d(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = np.asarray(y, dtype=np.float64)
squeeze_me = False
if y.ndim == 1:
squeeze_me = True
y = y[:, np.newaxis]
n_samples, n_features = X.shape
_, n_tasks = y.shape
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if coef_init is None:
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
else:
self.coef_ = coef_init
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
self.coef_, self.dual_gap_, self.eps_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol)
self._set_intercept(X_mean, y_mean, X_std)
# Make sure that the coef_ have the same shape as the given 'y',
# to predict with the same shape
if squeeze_me:
self.coef_ = self.coef_.squeeze()
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
``coef_`` : array, shape = (n_tasks, n_features)
parameter vector (W in the cost function formula)
``intercept_`` : array, shape = (n_tasks,)
independent term in decision function.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
| {
"repo_name": "fspaolo/scikit-learn",
"path": "sklearn/linear_model/coordinate_descent.py",
"copies": "1",
"size": "54482",
"license": "bsd-3-clause",
"hash": -7032359904602488000,
"line_mean": 35.0569159497,
"line_max": 79,
"alpha_frac": 0.5900297346,
"autogenerated": false,
"ratio": 3.947398927691639,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00008103837166898527,
"num_lines": 1511
} |
import sys
import warnings
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from joblib import Parallel, delayed, effective_n_jobs
from ._base import LinearModel, _pre_fit
from ..base import RegressorMixin, MultiOutputMixin
from ._base import _preprocess_data
from ..utils import check_array
from ..utils.validation import check_random_state
from ..model_selection import check_cv
from ..utils.extmath import safe_sparse_dot
from ..utils.fixes import _astype_copy_false, _joblib_parallel_args
from ..utils.validation import check_is_fitted, _check_sample_weight
from ..utils.validation import column_or_1d
from ..utils.validation import _deprecate_positional_args
# mypy error: Module 'sklearn.linear_model' has no attribute '_cd_fast'
from . import _cd_fast as cd_fast # type: ignore
def _set_order(X, y, order='C'):
"""Change the order of X and y if necessary.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values.
order : {None, 'C', 'F'}
If 'C', dense arrays are returned as C-ordered, sparse matrices in csr
format. If 'F', dense arrays are return as F-ordered, sparse matrices
in csc format.
Returns
-------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data with guaranteed order.
y : ndarray of shape (n_samples,)
Target values with guaranteed order.
"""
if order not in [None, 'C', 'F']:
raise ValueError("Unknown value for order. Got {} instead of "
"None, 'C' or 'F'.".format(order))
sparse_X = sparse.issparse(X)
sparse_y = sparse.issparse(y)
if order is not None:
sparse_format = "csc" if order == "F" else "csr"
if sparse_X:
# As of scipy 1.1.0, new argument copy=False by default.
# This is what we want.
X = X.asformat(sparse_format, **_astype_copy_false(X))
else:
X = np.asarray(X, order=order)
if sparse_y:
y = y.asformat(sparse_format)
else:
y = np.asarray(y, order=order)
return X, y
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray of shape (n_samples,)
Target values
Xy : array-like of shape (n_features,), default=None
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float, default=1.0
The elastic net mixing parameter, with ``0 < l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. (currently not
supported) ``For l1_ratio = 1`` it is an L1 penalty. For
``0 < l1_ratio <1``, the penalty is a combination of L1 and L2.
eps : float, default=1e-3
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, default=100
Number of alphas along the regularization path
fit_intercept : bool, default=True
Whether to fit an intercept or not
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
"""
if l1_ratio == 0:
raise ValueError("Automatic alpha grid generation is not supported for"
" l1_ratio=0. Please supply a grid by providing "
"your estimator with the appropriate `alphas=` "
"argument.")
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, accept_sparse='csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = _preprocess_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_offset, _, X_scale = _preprocess_data(X, y, fit_intercept,
normalize,
return_mean=True)
mean_dot = X_offset * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_scale[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
@_deprecate_positional_args
def lasso_path(X, y, *, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : {array-like, sparse matrix} of shape (n_samples,) or \
(n_samples, n_outputs)
Target values
eps : float, default=1e-3
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, default=100
Number of alphas along the regularization path
alphas : ndarray, default=None
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : 'auto', bool or array-like of shape (n_features, n_features),\
default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like of shape (n_features,) or (n_features, n_outputs),\
default=None
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : ndarray of shape (n_features, ), default=None
The initial values of the coefficients.
verbose : bool or int, default=False
Amount of verbosity.
return_n_iter : bool, default=False
whether to return the number of iterations or not.
positive : bool, default=False
If set to True, forces coefficients to be positive.
(Only allowed when ``y.ndim == 1``).
**params : kwargs
keyword arguments passed to the coordinate descent solver.
Returns
-------
alphas : ndarray of shape (n_alphas,)
The alphas along the path where models are computed.
coefs : ndarray of shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : ndarray of shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : list of int
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
For an example, see
:ref:`examples/linear_model/plot_lasso_coordinate_descent_path.py
<sphx_glr_auto_examples_linear_model_plot_lasso_coordinate_descent_path.py>`.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
--------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[0. 0. 0.46874778]
[0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[0. 0. 0.46915237]
[0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, return_n_iter=return_n_iter, **params)
@_deprecate_positional_args
def enet_path(X, y, *, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False,
check_input=True, **params):
"""
Compute elastic net path with coordinate descent.
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : {array-like, sparse matrix} of shape (n_samples,) or \
(n_samples, n_outputs)
Target values.
l1_ratio : float, default=0.5
Number between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso.
eps : float, default=1e-3
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, default=100
Number of alphas along the regularization path.
alphas : ndarray, default=None
List of alphas where to compute the models.
If None alphas are set automatically.
precompute : 'auto', bool or array-like of shape (n_features, n_features),\
default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like of shape (n_features,) or (n_features, n_outputs),\
default=None
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : ndarray of shape (n_features, ), default=None
The initial values of the coefficients.
verbose : bool or int, default=False
Amount of verbosity.
return_n_iter : bool, default=False
Whether to return the number of iterations or not.
positive : bool, default=False
If set to True, forces coefficients to be positive.
(Only allowed when ``y.ndim == 1``).
check_input : bool, default=True
If set to False, the input validation checks are skipped (including the
Gram matrix when provided). It is assumed that they are handled
by the caller.
**params : kwargs
Keyword arguments passed to the coordinate descent solver.
Returns
-------
alphas : ndarray of shape (n_alphas,)
The alphas along the path where models are computed.
coefs : ndarray of shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : ndarray of shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : list of int
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
See Also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
Notes
-----
For an example, see
:ref:`examples/linear_model/plot_lasso_coordinate_descent_path.py
<sphx_glr_auto_examples_linear_model_plot_lasso_coordinate_descent_path.py>`.
"""
# We expect X and y to be already Fortran ordered when bypassing
# checks
if check_input:
X = check_array(X, accept_sparse='csc', dtype=[np.float64, np.float32],
order='F', copy=copy_X)
y = check_array(y, accept_sparse='csc', dtype=X.dtype.type,
order='F', copy=False, ensure_2d=False)
if Xy is not None:
# Xy should be a 1d contiguous array or a 2D C ordered array
Xy = check_array(Xy, dtype=X.dtype.type, order='C', copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
if multi_output and positive:
raise ValueError('positive=True is not allowed for multi-output'
' (y.ndim != 1)')
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_offset' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_offset'] / params['X_scale']
X_sparse_scaling = np.asarray(X_sparse_scaling, dtype=X.dtype)
else:
X_sparse_scaling = np.zeros(n_features, dtype=X.dtype)
# X should be normalized and fit already if function is called
# from ElasticNet.fit
if check_input:
X, y, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False,
fit_intercept=False, copy=False, check_input=check_input)
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=X.dtype)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=X.dtype)
if coef_init is None:
coef_ = np.zeros(coefs.shape[:-1], dtype=X.dtype, order='F')
else:
coef_ = np.asfortranarray(coef_init, dtype=X.dtype)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
# We expect precompute to be already Fortran ordered when bypassing
# checks
if check_input:
precompute = check_array(precompute, dtype=X.dtype.type,
order='C')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like. Got %r" % precompute)
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(MultiOutputMixin, RegressorMixin, LinearModel):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float, default=1.0
Constant that multiplies the penalty terms. Defaults to 1.0.
See the notes for the exact mathematical meaning of this
parameter. ``alpha = 0`` is equivalent to an ordinary least square,
solved by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the ``Lasso`` object is not advised.
Given this, you should use the :class:`LinearRegression` object.
l1_ratio : float, default=0.5
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool, default=True
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : bool or array-like of shape (n_features, n_features),\
default=False
Whether to use a precomputed Gram matrix to speed up
calculations. The Gram matrix can also be passed as argument.
For sparse input this option is always ``True`` to preserve sparsity.
max_iter : int, default=1000
The maximum number of iterations
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, default=1e-4
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, default=False
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
positive : bool, default=False
When set to ``True``, forces the coefficients to be positive.
random_state : int, RandomState instance, default=None
The seed of the pseudo random number generator that selects a random
feature to update. Used when ``selection`` == 'random'.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
selection : {'cyclic', 'random'}, default='cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
Attributes
----------
coef_ : ndarray of shape (n_features,) or (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : sparse matrix of shape (n_features,) or \
(n_tasks, n_features)
sparse representation of the `coef_`.
intercept_ : float or ndarray of shape (n_targets,)
independent term in decision function.
n_iter_ : list of int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
dual_gap_ : float or ndarray of shape (n_targets,)
Given param alpha, the dual gaps at the end of the optimization,
same shape as each observation of y.
Examples
--------
>>> from sklearn.linear_model import ElasticNet
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(n_features=2, random_state=0)
>>> regr = ElasticNet(random_state=0)
>>> regr.fit(X, y)
ElasticNet(random_state=0)
>>> print(regr.coef_)
[18.83816048 64.55968825]
>>> print(regr.intercept_)
1.451...
>>> print(regr.predict([[0, 0]]))
[1.451...]
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
ElasticNetCV : Elastic net model with best model selection by
cross-validation.
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
@_deprecate_positional_args
def __init__(self, alpha=1.0, *, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y, sample_weight=None, check_input=True):
"""Fit model with coordinate descent.
Parameters
----------
X : {ndarray, sparse matrix} of (n_samples, n_features)
Data
y : {ndarray, sparse matrix} of shape (n_samples,) or \
(n_samples, n_targets)
Target. Will be cast to X's dtype if necessary
sample_weight : float or array-like of shape (n_samples,), default=None
Sample weight.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if isinstance(self.precompute, str):
raise ValueError('precompute should be one of True, False or'
' array-like. Got %r' % self.precompute)
if (not isinstance(self.l1_ratio, numbers.Number) or
self.l1_ratio < 0 or self.l1_ratio > 1):
raise ValueError("l1_ratio must be between 0 and 1; "
f"got l1_ratio={self.l1_ratio}")
# Remember if X is copied
X_copied = False
# We expect X and y to be float64 or float32 Fortran ordered arrays
# when bypassing checks
if check_input:
X_copied = self.copy_X and self.fit_intercept
X, y = self._validate_data(X, y, accept_sparse='csc',
order='F',
dtype=[np.float64, np.float32],
copy=X_copied, multi_output=True,
y_numeric=True)
y = check_array(y, order='F', copy=False, dtype=X.dtype.type,
ensure_2d=False)
n_samples, n_features = X.shape
alpha = self.alpha
if isinstance(sample_weight, numbers.Number):
sample_weight = None
if sample_weight is not None:
if check_input:
if sparse.issparse(X):
raise ValueError("Sample weights do not (yet) support "
"sparse matrices.")
sample_weight = _check_sample_weight(sample_weight, X,
dtype=X.dtype)
# simplify things by rescaling sw to sum up to n_samples
# => np.average(x, weights=sw) = np.mean(sw * x)
sample_weight *= (n_samples / np.sum(sample_weight))
# Objective function is:
# 1/2 * np.average(squared error, weights=sw) + alpha * penalty
# but coordinate descent minimizes:
# 1/2 * sum(squared error) + alpha * penalty
# enet_path therefore sets alpha = n_samples * alpha
# With sw, enet_path should set alpha = sum(sw) * alpha
# Therefore, we rescale alpha = sum(sw) / n_samples * alpha
# Note: As we rescaled sample_weights to sum up to n_samples,
# we don't need this
# alpha *= np.sum(sample_weight) / n_samples
# Ensure copying happens only once, don't do it again if done above.
# X and y will be rescaled if sample_weight is not None, order='F'
# ensures that the returned X and y are still F-contiguous.
should_copy = self.copy_X and not X_copied
X, y, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=should_copy,
check_input=check_input, sample_weight=sample_weight)
# coordinate descent needs F-ordered arrays and _pre_fit might have
# called _rescale_data
if check_input or sample_weight is not None:
X, y = _set_order(X, y, order='F')
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or not hasattr(self, "coef_"):
coef_ = np.zeros((n_targets, n_features), dtype=X.dtype,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=X.dtype)
self.n_iter_ = []
for k in range(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_offset=X_offset, X_scale=X_scale,
return_n_iter=True, coef_init=coef_[k],
max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection,
check_input=False)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_ = coef_[0]
self.dual_gap_ = dual_gaps_[0]
else:
self.coef_ = coef_
self.dual_gap_ = dual_gaps_
self._set_intercept(X_offset, y_offset, X_scale)
# workaround since _set_intercept will cast self.coef_ into X.dtype
self.coef_ = np.asarray(self.coef_, dtype=X.dtype)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted ``coef_`` """
return sparse.csr_matrix(self.coef_)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : ndarray of shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self)
if sparse.isspmatrix(X):
return safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
else:
return super()._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, default=1.0
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the ``Lasso`` object is not advised.
Given this, you should use the :class:`LinearRegression` object.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to False, no intercept will be used in calculations
(i.e. data is expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : 'auto', bool or array-like of shape (n_features, n_features),\
default=False
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, default=1000
The maximum number of iterations
tol : float, default=1e-4
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
positive : bool, default=False
When set to ``True``, forces the coefficients to be positive.
random_state : int, RandomState instance, default=None
The seed of the pseudo random number generator that selects a random
feature to update. Used when ``selection`` == 'random'.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
selection : {'cyclic', 'random'}, default='cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
Attributes
----------
coef_ : ndarray of shape (n_features,) or (n_targets, n_features)
parameter vector (w in the cost function formula)
dual_gap_ : float or ndarray of shape (n_targets,)
Given param alpha, the dual gaps at the end of the optimization,
same shape as each observation of y.
sparse_coef_ : sparse matrix of shape (n_features, 1) or \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float or ndarray of shape (n_targets,)
independent term in decision function.
n_iter_ : int or list of int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1)
>>> print(clf.coef_)
[0.85 0. ]
>>> print(clf.intercept_)
0.15...
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
@_deprecate_positional_args
def __init__(self, alpha=1.0, *, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super().__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, default=None
Array of float that is used for cross-validation. If not
provided, computed using 'path'.
l1_ratio : float, default=1
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2.
X_order : {'F', 'C'}, default=None
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype, default=None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
if not sparse.issparse(X):
for array, array_input in ((X_train, X), (y_train, y),
(X_test, X), (y_test, y)):
if array.base is not array_input and not array.flags['WRITEABLE']:
# fancy indexing should create a writable copy but it doesn't
# for read-only memmaps (cf. numpy#14132).
array.setflags(write=True)
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_offset'] = X_offset
path_params['X_scale'] = X_scale
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, accept_sparse='csc', dtype=dtype,
order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_offset = np.atleast_1d(y_offset)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_scale)
coefs[:, nonzeros] /= X_scale[nonzeros][:, np.newaxis]
intercepts = y_offset[:, np.newaxis] - np.dot(X_offset, coefs)
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(MultiOutputMixin, LinearModel, metaclass=ABCMeta):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=None,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
@abstractmethod
def _get_estimator(self):
"""Model to be fitted after the best alpha has been determined."""
@abstractmethod
def _is_multitask(self):
"""Bool indicating if class is meant for multidimensional target."""
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
"""
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
check_y_params = dict(copy=False, dtype=[np.float64, np.float32],
ensure_2d=False)
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
# Need to validate separately here.
# We can't pass multi_ouput=True because that would allow y to be
# csr. We also want to allow y to be 64 or 32 but check_X_y only
# allows to convert for 64.
check_X_params = dict(accept_sparse='csc',
dtype=[np.float64, np.float32], copy=False)
X, y = self._validate_data(X, y,
validate_separately=(check_X_params,
check_y_params))
if sparse.isspmatrix(X):
if (hasattr(reference_to_old_X, "data") and
not np.may_share_memory(reference_to_old_X.data, X.data)):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
# Need to validate separately here.
# We can't pass multi_ouput=True because that would allow y to be
# csr. We also want to allow y to be 64 or 32 but check_X_y only
# allows to convert for 64.
check_X_params = dict(accept_sparse='csc',
dtype=[np.float64, np.float32], order='F',
copy=copy_X)
X, y = self._validate_data(X, y,
validate_separately=(check_X_params,
check_y_params))
copy_X = False
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if not self._is_multitask():
if y.ndim > 1 and y.shape[1] > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%s" % self.__class__.__name__)
y = column_or_1d(y, warn=True)
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % self.__class__.__name__[9:])
model = self._get_estimator()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = [_alpha_grid(X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize, copy_X=self.copy_X)
for l1_ratio in l1_ratios]
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if effective_n_jobs(self.n_jobs) > 1:
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv.split(X, y))
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=X.dtype.type)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
**_joblib_parallel_args(prefer="threads"))(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = {name: value
for name, value in self.get_params().items()
if name in model.get_params()}
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
precompute = getattr(self, "precompute", None)
if isinstance(precompute, str) and precompute == "auto":
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(RegressorMixin, LinearModelCV):
"""Lasso linear model with iterative fitting along a regularization path.
See glossary entry for :term:`cross-validation estimator`.
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, default=1e-3
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, default=100
Number of alphas along the regularization path
alphas : ndarray, default=None
List of alphas where to compute the models.
If ``None`` alphas are set automatically
fit_intercept : bool, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : 'auto', bool or array-like of shape (n_features, n_features),\
default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, default=1000
The maximum number of iterations
tol : float, default=1e-4
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
cv : int, cross-validation generator or iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- int, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
verbose : bool or int, default=False
Amount of verbosity.
n_jobs : int, default=None
Number of CPUs to use during the cross validation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
positive : bool, default=False
If positive, restrict regression coefficients to be positive
random_state : int, RandomState instance, default=None
The seed of the pseudo random number generator that selects a random
feature to update. Used when ``selection`` == 'random'.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
selection : {'cyclic', 'random'}, default='cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : ndarray of shape (n_features,) or (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float or ndarray of shape (n_targets,)
independent term in decision function.
mse_path_ : ndarray of shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : ndarray of shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : float or ndarray of shape (n_targets,)
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn.linear_model import LassoCV
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(noise=4, random_state=0)
>>> reg = LassoCV(cv=5, random_state=0).fit(X, y)
>>> reg.score(X, y)
0.9993...
>>> reg.predict(X[:1,])
array([-78.4951...])
Notes
-----
For an example, see
:ref:`examples/linear_model/plot_lasso_model_selection.py
<sphx_glr_auto_examples_linear_model_plot_lasso_model_selection.py>`.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
@_deprecate_positional_args
def __init__(self, *, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=None,
positive=False, random_state=None, selection='cyclic'):
super().__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
def _get_estimator(self):
return Lasso()
def _is_multitask(self):
return False
def _more_tags(self):
return {'multioutput': False}
class ElasticNetCV(RegressorMixin, LinearModelCV):
"""Elastic Net model with iterative fitting along a regularization path.
See glossary entry for :term:`cross-validation estimator`.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float or list of float, default=0.5
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, default=1e-3
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, default=100
Number of alphas along the regularization path, used for each l1_ratio.
alphas : ndarray, default=None
List of alphas where to compute the models.
If None alphas are set automatically
fit_intercept : bool, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : 'auto', bool or array-like of shape (n_features, n_features),\
default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, default=1000
The maximum number of iterations
tol : float, default=1e-4
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- int, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
verbose : bool or int, default=0
Amount of verbosity.
n_jobs : int, default=None
Number of CPUs to use during the cross validation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
positive : bool, default=False
When set to ``True``, forces the coefficients to be positive.
random_state : int, RandomState instance, default=None
The seed of the pseudo random number generator that selects a random
feature to update. Used when ``selection`` == 'random'.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
selection : {'cyclic', 'random'}, default='cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : ndarray of shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float or ndarray of shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : ndarray of shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : ndarray of shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
dual_gap_ : float
The dual gaps at the end of the optimization for the optimal alpha.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn.linear_model import ElasticNetCV
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(n_features=2, random_state=0)
>>> regr = ElasticNetCV(cv=5, random_state=0)
>>> regr.fit(X, y)
ElasticNetCV(cv=5, random_state=0)
>>> print(regr.alpha_)
0.199...
>>> print(regr.intercept_)
0.398...
>>> print(regr.predict([[0, 0]]))
[0.398...]
Notes
-----
For an example, see
:ref:`examples/linear_model/plot_lasso_model_selection.py
<sphx_glr_auto_examples_linear_model_plot_lasso_model_selection.py>`.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
@_deprecate_positional_args
def __init__(self, *, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=None, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def _get_estimator(self):
return ElasticNet()
def _is_multitask(self):
return False
def _more_tags(self):
return {'multioutput': False}
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||_Fro^2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = sum_i sqrt(sum_j W_ij ^ 2)
i.e. the sum of norms of each row.
Read more in the :ref:`User Guide <multi_task_elastic_net>`.
Parameters
----------
alpha : float, default=1.0
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float, default=0.5
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 1 the penalty is an L1/L2 penalty. For l1_ratio = 0 it
is an L2 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : bool, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, default=1000
The maximum number of iterations
tol : float, default=1e-4
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, default=False
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
random_state : int, RandomState instance, default=None
The seed of the pseudo random number generator that selects a random
feature to update. Used when ``selection`` == 'random'.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
selection : {'cyclic', 'random'}, default='cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
Attributes
----------
intercept_ : ndarray of shape (n_tasks,)
Independent term in decision function.
coef_ : ndarray of shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array.
Note that ``coef_`` stores the transpose of ``W``, ``W.T``.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
dual_gap_ : float
The dual gaps at the end of the optimization.
eps_ : float
The tolerance scaled scaled by the variance of the target `y`.
sparse_coef_ : sparse matrix of shape (n_features,) or \
(n_tasks, n_features)
sparse representation of the `coef_`.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskElasticNet(alpha=0.1)
>>> print(clf.coef_)
[[0.45663524 0.45612256]
[0.45663524 0.45612256]]
>>> print(clf.intercept_)
[0.0872422 0.0872422]
See also
--------
MultiTaskElasticNet : Multi-task L1/L2 ElasticNet with built-in
cross-validation.
ElasticNet
MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X and y arguments of the fit
method should be directly passed as Fortran-contiguous numpy arrays.
"""
@_deprecate_positional_args
def __init__(self, alpha=1.0, *, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskElasticNet model with coordinate descent
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data
y : ndarray of shape (n_samples, n_tasks)
Target. Will be cast to X's dtype if necessary
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# Need to validate separately here.
# We can't pass multi_ouput=True because that would allow y to be csr.
check_X_params = dict(dtype=[np.float64, np.float32], order='F',
copy=self.copy_X and self.fit_intercept)
check_y_params = dict(ensure_2d=False, order='F')
X, y = self._validate_data(X, y, validate_separately=(check_X_params,
check_y_params))
y = y.astype(X.dtype)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_offset, y_offset, X_scale = _preprocess_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or not hasattr(self, "coef_"):
self.coef_ = np.zeros((n_tasks, n_features), dtype=X.dtype.type,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_offset, y_offset, X_scale)
# return self for chaining fit and predict calls
return self
def _more_tags(self):
return {'multioutput_only': True}
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, default=1.0
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : bool, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, default=1000
The maximum number of iterations
tol : float, default=1e-4
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, default=False
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
random_state : int, RandomState instance, default=None
The seed of the pseudo random number generator that selects a random
feature to update. Used when ``selection`` == 'random'.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
selection : {'cyclic', 'random'}, default='cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
Attributes
----------
coef_ : ndarray of shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
Note that ``coef_`` stores the transpose of ``W``, ``W.T``.
intercept_ : ndarray of shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
dual_gap_ : ndarray of shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
eps_ : float
The tolerance scaled scaled by the variance of the target `y`.
sparse_coef_ : sparse matrix of shape (n_features,) or \
(n_tasks, n_features)
sparse representation of the `coef_`.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0, 1], [1, 2], [2, 4]], [[0, 0], [1, 1], [2, 3]])
MultiTaskLasso(alpha=0.1)
>>> print(clf.coef_)
[[0. 0.60809415]
[0. 0.94592424]]
>>> print(clf.intercept_)
[-0.41888636 -0.87382323]
See also
--------
MultiTaskLasso : Multi-task L1/L2 Lasso with built-in cross-validation
Lasso
MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X and y arguments of the fit
method should be directly passed as Fortran-contiguous numpy arrays.
"""
@_deprecate_positional_args
def __init__(self, alpha=1.0, *, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(RegressorMixin, LinearModelCV):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
See glossary entry for :term:`cross-validation estimator`.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_elastic_net>`.
.. versionadded:: 0.15
Parameters
----------
l1_ratio : float or list of float, default=0.5
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 1 the penalty is an L1/L2 penalty. For l1_ratio = 0 it
is an L2 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, default=1e-3
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, default=100
Number of alphas along the regularization path
alphas : array-like, default=None
List of alphas where to compute the models.
If not provided, set automatically.
fit_intercept : bool, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
max_iter : int, default=1000
The maximum number of iterations
tol : float, default=1e-4
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- int, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
verbose : bool or int, default=0
Amount of verbosity.
n_jobs : int, default=None
Number of CPUs to use during the cross validation. Note that this is
used only if multiple values for l1_ratio are given.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance, default=None
The seed of the pseudo random number generator that selects a random
feature to update. Used when ``selection`` == 'random'.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
selection : {'cyclic', 'random'}, default='cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
Attributes
----------
intercept_ : ndarray of shape (n_tasks,)
Independent term in decision function.
coef_ : ndarray of shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
Note that ``coef_`` stores the transpose of ``W``, ``W.T``.
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : ndarray of shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : ndarray of shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
dual_gap_ : float
The dual gap at the end of the optimization for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV(cv=3)
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
MultiTaskElasticNetCV(cv=3)
>>> print(clf.coef_)
[[0.52875032 0.46958558]
[0.52875032 0.46958558]]
>>> print(clf.intercept_)
[0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X and y arguments of the fit
method should be directly passed as Fortran-contiguous numpy arrays.
"""
path = staticmethod(enet_path)
@_deprecate_positional_args
def __init__(self, *, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=None, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
def _get_estimator(self):
return MultiTaskElasticNet()
def _is_multitask(self):
return True
def _more_tags(self):
return {'multioutput_only': True}
class MultiTaskLassoCV(RegressorMixin, LinearModelCV):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer.
See glossary entry for :term:`cross-validation estimator`.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
.. versionadded:: 0.15
Parameters
----------
eps : float, default=1e-3
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, default=100
Number of alphas along the regularization path
alphas : array-like, default=None
List of alphas where to compute the models.
If not provided, set automatically.
fit_intercept : bool, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
max_iter : int, default=1000
The maximum number of iterations.
tol : float, default=1e-4
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
cv : int, cross-validation generator or iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- int, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
verbose : bool or int, default=False
Amount of verbosity.
n_jobs : int, default=None
Number of CPUs to use during the cross validation. Note that this is
used only if multiple values for l1_ratio are given.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance, default=None
The seed of the pseudo random number generator that selects a random
feature to update. Used when ``selection`` == 'random'.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
selection : {'cyclic', 'random'}, default='cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
Attributes
----------
intercept_ : ndarray of shape (n_tasks,)
Independent term in decision function.
coef_ : ndarray of shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
Note that ``coef_`` stores the transpose of ``W``, ``W.T``.
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : ndarray of shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : ndarray of shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
dual_gap_ : float
The dual gap at the end of the optimization for the optimal alpha.
Examples
--------
>>> from sklearn.linear_model import MultiTaskLassoCV
>>> from sklearn.datasets import make_regression
>>> from sklearn.metrics import r2_score
>>> X, y = make_regression(n_targets=2, noise=4, random_state=0)
>>> reg = MultiTaskLassoCV(cv=5, random_state=0).fit(X, y)
>>> r2_score(y, reg.predict(X))
0.9994...
>>> reg.alpha_
0.5713...
>>> reg.predict(X[:1,])
array([[153.7971..., 94.9015...]])
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X and y arguments of the fit
method should be directly passed as Fortran-contiguous numpy arrays.
"""
path = staticmethod(lasso_path)
@_deprecate_positional_args
def __init__(self, *, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=None, random_state=None,
selection='cyclic'):
super().__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
def _get_estimator(self):
return MultiTaskLasso()
def _is_multitask(self):
return True
def _more_tags(self):
return {'multioutput_only': True}
| {
"repo_name": "bnaul/scikit-learn",
"path": "sklearn/linear_model/_coordinate_descent.py",
"copies": "2",
"size": "92547",
"license": "bsd-3-clause",
"hash": 7834911798739709000,
"line_mean": 36.6973523422,
"line_max": 81,
"alpha_frac": 0.6007109901,
"autogenerated": false,
"ratio": 3.989438744719372,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 2455
} |
import sys
import warnings
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from joblib import Parallel, effective_n_jobs
from ._base import LinearModel, _pre_fit
from ..base import RegressorMixin, MultiOutputMixin
from ._base import _preprocess_data
from ..utils import check_array
from ..utils.validation import check_random_state
from ..model_selection import check_cv
from ..utils.extmath import safe_sparse_dot
from ..utils.fixes import _astype_copy_false, _joblib_parallel_args
from ..utils.validation import check_is_fitted, _check_sample_weight
from ..utils.validation import column_or_1d
from ..utils.fixes import delayed
# mypy error: Module 'sklearn.linear_model' has no attribute '_cd_fast'
from . import _cd_fast as cd_fast # type: ignore
def _set_order(X, y, order='C'):
"""Change the order of X and y if necessary.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values.
order : {None, 'C', 'F'}
If 'C', dense arrays are returned as C-ordered, sparse matrices in csr
format. If 'F', dense arrays are return as F-ordered, sparse matrices
in csc format.
Returns
-------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data with guaranteed order.
y : ndarray of shape (n_samples,)
Target values with guaranteed order.
"""
if order not in [None, 'C', 'F']:
raise ValueError("Unknown value for order. Got {} instead of "
"None, 'C' or 'F'.".format(order))
sparse_X = sparse.issparse(X)
sparse_y = sparse.issparse(y)
if order is not None:
sparse_format = "csc" if order == "F" else "csr"
if sparse_X:
# As of scipy 1.1.0, new argument copy=False by default.
# This is what we want.
X = X.asformat(sparse_format, **_astype_copy_false(X))
else:
X = np.asarray(X, order=order)
if sparse_y:
y = y.asformat(sparse_format)
else:
y = np.asarray(y, order=order)
return X, y
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray of shape (n_samples,)
Target values
Xy : array-like of shape (n_features,), default=None
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float, default=1.0
The elastic net mixing parameter, with ``0 < l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. (currently not
supported) ``For l1_ratio = 1`` it is an L1 penalty. For
``0 < l1_ratio <1``, the penalty is a combination of L1 and L2.
eps : float, default=1e-3
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, default=100
Number of alphas along the regularization path
fit_intercept : bool, default=True
Whether to fit an intercept or not
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
"""
if l1_ratio == 0:
raise ValueError("Automatic alpha grid generation is not supported for"
" l1_ratio=0. Please supply a grid by providing "
"your estimator with the appropriate `alphas=` "
"argument.")
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, accept_sparse='csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = _preprocess_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_offset, _, X_scale = _preprocess_data(X, y, fit_intercept,
normalize,
return_mean=True)
mean_dot = X_offset * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_scale[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, *, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : {array-like, sparse matrix} of shape (n_samples,) or \
(n_samples, n_outputs)
Target values
eps : float, default=1e-3
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, default=100
Number of alphas along the regularization path
alphas : ndarray, default=None
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : 'auto', bool or array-like of shape (n_features, n_features),\
default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like of shape (n_features,) or (n_features, n_outputs),\
default=None
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : ndarray of shape (n_features, ), default=None
The initial values of the coefficients.
verbose : bool or int, default=False
Amount of verbosity.
return_n_iter : bool, default=False
whether to return the number of iterations or not.
positive : bool, default=False
If set to True, forces coefficients to be positive.
(Only allowed when ``y.ndim == 1``).
**params : kwargs
keyword arguments passed to the coordinate descent solver.
Returns
-------
alphas : ndarray of shape (n_alphas,)
The alphas along the path where models are computed.
coefs : ndarray of shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : ndarray of shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : list of int
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
For an example, see
:ref:`examples/linear_model/plot_lasso_coordinate_descent_path.py
<sphx_glr_auto_examples_linear_model_plot_lasso_coordinate_descent_path.py>`.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
--------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[0. 0. 0.46874778]
[0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[0. 0. 0.46915237]
[0.2159048 0.4425765 0.23668876]]
See Also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, return_n_iter=return_n_iter, **params)
def enet_path(X, y, *, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False,
check_input=True, **params):
"""
Compute elastic net path with coordinate descent.
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||_Fro^2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : {array-like, sparse matrix} of shape (n_samples,) or \
(n_samples, n_outputs)
Target values.
l1_ratio : float, default=0.5
Number between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso.
eps : float, default=1e-3
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, default=100
Number of alphas along the regularization path.
alphas : ndarray, default=None
List of alphas where to compute the models.
If None alphas are set automatically.
precompute : 'auto', bool or array-like of shape (n_features, n_features),\
default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like of shape (n_features,) or (n_features, n_outputs),\
default=None
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : ndarray of shape (n_features, ), default=None
The initial values of the coefficients.
verbose : bool or int, default=False
Amount of verbosity.
return_n_iter : bool, default=False
Whether to return the number of iterations or not.
positive : bool, default=False
If set to True, forces coefficients to be positive.
(Only allowed when ``y.ndim == 1``).
check_input : bool, default=True
If set to False, the input validation checks are skipped (including the
Gram matrix when provided). It is assumed that they are handled
by the caller.
**params : kwargs
Keyword arguments passed to the coordinate descent solver.
Returns
-------
alphas : ndarray of shape (n_alphas,)
The alphas along the path where models are computed.
coefs : ndarray of shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : ndarray of shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : list of int
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
See Also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
Notes
-----
For an example, see
:ref:`examples/linear_model/plot_lasso_coordinate_descent_path.py
<sphx_glr_auto_examples_linear_model_plot_lasso_coordinate_descent_path.py>`.
"""
# We expect X and y to be already Fortran ordered when bypassing
# checks
if check_input:
X = check_array(X, accept_sparse='csc', dtype=[np.float64, np.float32],
order='F', copy=copy_X)
y = check_array(y, accept_sparse='csc', dtype=X.dtype.type,
order='F', copy=False, ensure_2d=False)
if Xy is not None:
# Xy should be a 1d contiguous array or a 2D C ordered array
Xy = check_array(Xy, dtype=X.dtype.type, order='C', copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
if multi_output and positive:
raise ValueError('positive=True is not allowed for multi-output'
' (y.ndim != 1)')
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_offset' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_offset'] / params['X_scale']
X_sparse_scaling = np.asarray(X_sparse_scaling, dtype=X.dtype)
else:
X_sparse_scaling = np.zeros(n_features, dtype=X.dtype)
# X should be normalized and fit already if function is called
# from ElasticNet.fit
if check_input:
X, y, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False,
fit_intercept=False, copy=False, check_input=check_input)
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=X.dtype)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=X.dtype)
if coef_init is None:
coef_ = np.zeros(coefs.shape[:-1], dtype=X.dtype, order='F')
else:
coef_ = np.asfortranarray(coef_init, dtype=X.dtype)
for i, alpha in enumerate(alphas):
# account for n_samples scaling in objectives between here and cd_fast
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
# We expect precompute to be already Fortran ordered when bypassing
# checks
if check_input:
precompute = check_array(precompute, dtype=X.dtype.type,
order='C')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like. Got %r" % precompute)
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
# we correct the scale of the returned dual gap, as the objective
# in cd_fast is n_samples * the objective in this docstring.
dual_gaps[i] = dual_gap_ / n_samples
n_iters.append(n_iter_)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(MultiOutputMixin, RegressorMixin, LinearModel):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * ||w||_1 + 0.5 * b * ||w||_2^2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float, default=1.0
Constant that multiplies the penalty terms. Defaults to 1.0.
See the notes for the exact mathematical meaning of this
parameter. ``alpha = 0`` is equivalent to an ordinary least square,
solved by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the ``Lasso`` object is not advised.
Given this, you should use the :class:`LinearRegression` object.
l1_ratio : float, default=0.5
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool, default=True
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : bool or array-like of shape (n_features, n_features),\
default=False
Whether to use a precomputed Gram matrix to speed up
calculations. The Gram matrix can also be passed as argument.
For sparse input this option is always ``False`` to preserve sparsity.
max_iter : int, default=1000
The maximum number of iterations.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, default=1e-4
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, default=False
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
positive : bool, default=False
When set to ``True``, forces the coefficients to be positive.
random_state : int, RandomState instance, default=None
The seed of the pseudo random number generator that selects a random
feature to update. Used when ``selection`` == 'random'.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
selection : {'cyclic', 'random'}, default='cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
Attributes
----------
coef_ : ndarray of shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the cost function formula).
sparse_coef_ : sparse matrix of shape (n_features,) or \
(n_tasks, n_features)
Sparse representation of the `coef_`.
intercept_ : float or ndarray of shape (n_targets,)
Independent term in decision function.
n_iter_ : list of int
Number of iterations run by the coordinate descent solver to reach
the specified tolerance.
dual_gap_ : float or ndarray of shape (n_targets,)
Given param alpha, the dual gaps at the end of the optimization,
same shape as each observation of y.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
Examples
--------
>>> from sklearn.linear_model import ElasticNet
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(n_features=2, random_state=0)
>>> regr = ElasticNet(random_state=0)
>>> regr.fit(X, y)
ElasticNet(random_state=0)
>>> print(regr.coef_)
[18.83816048 64.55968825]
>>> print(regr.intercept_)
1.451...
>>> print(regr.predict([[0, 0]]))
[1.451...]
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See Also
--------
ElasticNetCV : Elastic net model with best model selection by
cross-validation.
SGDRegressor : Implements elastic net regression with incremental training.
SGDClassifier : Implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, *, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y, sample_weight=None, check_input=True):
"""Fit model with coordinate descent.
Parameters
----------
X : {ndarray, sparse matrix} of (n_samples, n_features)
Data.
y : {ndarray, sparse matrix} of shape (n_samples,) or \
(n_samples, n_targets)
Target. Will be cast to X's dtype if necessary.
sample_weight : float or array-like of shape (n_samples,), default=None
Sample weight. Internally, the `sample_weight` vector will be
rescaled to sum to `n_samples`.
.. versionadded:: 0.23
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if isinstance(self.precompute, str):
raise ValueError('precompute should be one of True, False or'
' array-like. Got %r' % self.precompute)
if (not isinstance(self.l1_ratio, numbers.Number) or
self.l1_ratio < 0 or self.l1_ratio > 1):
raise ValueError("l1_ratio must be between 0 and 1; "
f"got l1_ratio={self.l1_ratio}")
# Remember if X is copied
X_copied = False
# We expect X and y to be float64 or float32 Fortran ordered arrays
# when bypassing checks
if check_input:
X_copied = self.copy_X and self.fit_intercept
X, y = self._validate_data(X, y, accept_sparse='csc',
order='F',
dtype=[np.float64, np.float32],
copy=X_copied, multi_output=True,
y_numeric=True)
y = check_array(y, order='F', copy=False, dtype=X.dtype.type,
ensure_2d=False)
n_samples, n_features = X.shape
alpha = self.alpha
if isinstance(sample_weight, numbers.Number):
sample_weight = None
if sample_weight is not None:
if check_input:
if sparse.issparse(X):
raise ValueError("Sample weights do not (yet) support "
"sparse matrices.")
sample_weight = _check_sample_weight(sample_weight, X,
dtype=X.dtype)
# simplify things by rescaling sw to sum up to n_samples
# => np.average(x, weights=sw) = np.mean(sw * x)
sample_weight = sample_weight * (n_samples / np.sum(sample_weight))
# Objective function is:
# 1/2 * np.average(squared error, weights=sw) + alpha * penalty
# but coordinate descent minimizes:
# 1/2 * sum(squared error) + alpha * penalty
# enet_path therefore sets alpha = n_samples * alpha
# With sw, enet_path should set alpha = sum(sw) * alpha
# Therefore, we rescale alpha = sum(sw) / n_samples * alpha
# Note: As we rescaled sample_weights to sum up to n_samples,
# we don't need this
# alpha *= np.sum(sample_weight) / n_samples
# Ensure copying happens only once, don't do it again if done above.
# X and y will be rescaled if sample_weight is not None, order='F'
# ensures that the returned X and y are still F-contiguous.
should_copy = self.copy_X and not X_copied
X, y, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=should_copy,
check_input=check_input, sample_weight=sample_weight)
# coordinate descent needs F-ordered arrays and _pre_fit might have
# called _rescale_data
if check_input or sample_weight is not None:
X, y = _set_order(X, y, order='F')
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or not hasattr(self, "coef_"):
coef_ = np.zeros((n_targets, n_features), dtype=X.dtype,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=X.dtype)
self.n_iter_ = []
for k in range(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_offset=X_offset, X_scale=X_scale,
return_n_iter=True, coef_init=coef_[k],
max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection,
check_input=False)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_ = coef_[0]
self.dual_gap_ = dual_gaps_[0]
else:
self.coef_ = coef_
self.dual_gap_ = dual_gaps_
self._set_intercept(X_offset, y_offset, X_scale)
# workaround since _set_intercept will cast self.coef_ into X.dtype
self.coef_ = np.asarray(self.coef_, dtype=X.dtype)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
"""Sparse representation of the fitted `coef_`."""
return sparse.csr_matrix(self.coef_)
def _decision_function(self, X):
"""Decision function of the linear model.
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : ndarray of shape (n_samples,)
The predicted decision function.
"""
check_is_fitted(self)
if sparse.isspmatrix(X):
return safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
else:
return super()._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, default=1.0
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the ``Lasso`` object is not advised.
Given this, you should use the :class:`LinearRegression` object.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to False, no intercept will be used in calculations
(i.e. data is expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : bool or array-like of shape (n_features, n_features),\
default=False
Whether to use a precomputed Gram matrix to speed up
calculations. The Gram matrix can also be passed as argument.
For sparse input this option is always ``False`` to preserve sparsity.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, default=1000
The maximum number of iterations.
tol : float, default=1e-4
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
positive : bool, default=False
When set to ``True``, forces the coefficients to be positive.
random_state : int, RandomState instance, default=None
The seed of the pseudo random number generator that selects a random
feature to update. Used when ``selection`` == 'random'.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
selection : {'cyclic', 'random'}, default='cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
Attributes
----------
coef_ : ndarray of shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the cost function formula).
dual_gap_ : float or ndarray of shape (n_targets,)
Given param alpha, the dual gaps at the end of the optimization,
same shape as each observation of y.
sparse_coef_ : sparse matrix of shape (n_features, 1) or \
(n_targets, n_features)
Readonly property derived from ``coef_``.
intercept_ : float or ndarray of shape (n_targets,)
Independent term in decision function.
n_iter_ : int or list of int
Number of iterations run by the coordinate descent solver to reach
the specified tolerance.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1)
>>> print(clf.coef_)
[0.85 0. ]
>>> print(clf.intercept_)
0.15...
See Also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, *, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super().__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
path : callable
Function returning a list of models on the path. See
enet_path for an example of signature.
path_params : dictionary
Parameters passed to the path function.
alphas : array-like, default=None
Array of float that is used for cross-validation. If not
provided, computed using 'path'.
l1_ratio : float, default=1
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2.
X_order : {'F', 'C'}, default=None
The order of the arrays expected by the path function to
avoid memory copies.
dtype : a numpy dtype, default=None
The dtype of the arrays expected by the path function to
avoid memory copies.
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
if not sparse.issparse(X):
for array, array_input in ((X_train, X), (y_train, y),
(X_test, X), (y_test, y)):
if array.base is not array_input and not array.flags['WRITEABLE']:
# fancy indexing should create a writable copy but it doesn't
# for read-only memmaps (cf. numpy#14132).
array.setflags(write=True)
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_offset'] = X_offset
path_params['X_scale'] = X_scale
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, accept_sparse='csc', dtype=dtype,
order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_offset = np.atleast_1d(y_offset)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_scale)
coefs[:, nonzeros] /= X_scale[nonzeros][:, np.newaxis]
intercepts = y_offset[:, np.newaxis] - np.dot(X_offset, coefs)
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(MultiOutputMixin, LinearModel, metaclass=ABCMeta):
"""Base class for iterative model fitting along a regularization path."""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=None,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
@abstractmethod
def _get_estimator(self):
"""Model to be fitted after the best alpha has been determined."""
@abstractmethod
def _is_multitask(self):
"""Bool indicating if class is meant for multidimensional target."""
@staticmethod
@abstractmethod
def path(X, y, **kwargs):
"""Compute path with coordinate descent."""
def fit(self, X, y):
"""Fit linear model with coordinate descent.
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
"""
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
check_y_params = dict(copy=False, dtype=[np.float64, np.float32],
ensure_2d=False)
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
# Need to validate separately here.
# We can't pass multi_ouput=True because that would allow y to be
# csr. We also want to allow y to be 64 or 32 but check_X_y only
# allows to convert for 64.
check_X_params = dict(accept_sparse='csc',
dtype=[np.float64, np.float32], copy=False)
X, y = self._validate_data(X, y,
validate_separately=(check_X_params,
check_y_params))
if sparse.isspmatrix(X):
if (hasattr(reference_to_old_X, "data") and
not np.may_share_memory(reference_to_old_X.data, X.data)):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
# Need to validate separately here.
# We can't pass multi_ouput=True because that would allow y to be
# csr. We also want to allow y to be 64 or 32 but check_X_y only
# allows to convert for 64.
check_X_params = dict(accept_sparse='csc',
dtype=[np.float64, np.float32], order='F',
copy=copy_X)
X, y = self._validate_data(X, y,
validate_separately=(check_X_params,
check_y_params))
copy_X = False
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if not self._is_multitask():
if y.ndim > 1 and y.shape[1] > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%s" % self.__class__.__name__)
y = column_or_1d(y, warn=True)
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % self.__class__.__name__[9:])
model = self._get_estimator()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = [_alpha_grid(X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize, copy_X=self.copy_X)
for l1_ratio in l1_ratios]
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if effective_n_jobs(self.n_jobs) > 1:
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv.split(X, y))
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=X.dtype.type)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
**_joblib_parallel_args(prefer="threads"))(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = {name: value
for name, value in self.get_params().items()
if name in model.get_params()}
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
precompute = getattr(self, "precompute", None)
if isinstance(precompute, str) and precompute == "auto":
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(RegressorMixin, LinearModelCV):
"""Lasso linear model with iterative fitting along a regularization path.
See glossary entry for :term:`cross-validation estimator`.
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, default=1e-3
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, default=100
Number of alphas along the regularization path.
alphas : ndarray, default=None
List of alphas where to compute the models.
If ``None`` alphas are set automatically.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : 'auto', bool or array-like of shape (n_features, n_features),\
default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, default=1000
The maximum number of iterations.
tol : float, default=1e-4
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
cv : int, cross-validation generator or iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- int, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
verbose : bool or int, default=False
Amount of verbosity.
n_jobs : int, default=None
Number of CPUs to use during the cross validation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
positive : bool, default=False
If positive, restrict regression coefficients to be positive.
random_state : int, RandomState instance, default=None
The seed of the pseudo random number generator that selects a random
feature to update. Used when ``selection`` == 'random'.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
selection : {'cyclic', 'random'}, default='cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation.
coef_ : ndarray of shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the cost function formula).
intercept_ : float or ndarray of shape (n_targets,)
Independent term in decision function.
mse_path_ : ndarray of shape (n_alphas, n_folds)
Mean square error for the test set on each fold, varying alpha.
alphas_ : ndarray of shape (n_alphas,)
The grid of alphas used for fitting.
dual_gap_ : float or ndarray of shape (n_targets,)
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
Number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
Examples
--------
>>> from sklearn.linear_model import LassoCV
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(noise=4, random_state=0)
>>> reg = LassoCV(cv=5, random_state=0).fit(X, y)
>>> reg.score(X, y)
0.9993...
>>> reg.predict(X[:1,])
array([-78.4951...])
Notes
-----
For an example, see
:ref:`examples/linear_model/plot_lasso_model_selection.py
<sphx_glr_auto_examples_linear_model_plot_lasso_model_selection.py>`.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See Also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, *, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=None,
positive=False, random_state=None, selection='cyclic'):
super().__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
def _get_estimator(self):
return Lasso()
def _is_multitask(self):
return False
def _more_tags(self):
return {'multioutput': False}
class ElasticNetCV(RegressorMixin, LinearModelCV):
"""Elastic Net model with iterative fitting along a regularization path.
See glossary entry for :term:`cross-validation estimator`.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float or list of float, default=0.5
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``.
eps : float, default=1e-3
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, default=100
Number of alphas along the regularization path, used for each l1_ratio.
alphas : ndarray, default=None
List of alphas where to compute the models.
If None alphas are set automatically.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : 'auto', bool or array-like of shape (n_features, n_features),\
default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, default=1000
The maximum number of iterations.
tol : float, default=1e-4
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- int, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
verbose : bool or int, default=0
Amount of verbosity.
n_jobs : int, default=None
Number of CPUs to use during the cross validation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
positive : bool, default=False
When set to ``True``, forces the coefficients to be positive.
random_state : int, RandomState instance, default=None
The seed of the pseudo random number generator that selects a random
feature to update. Used when ``selection`` == 'random'.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
selection : {'cyclic', 'random'}, default='cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation.
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation.
coef_ : ndarray of shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the cost function formula).
intercept_ : float or ndarray of shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : ndarray of shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : ndarray of shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
dual_gap_ : float
The dual gaps at the end of the optimization for the optimal alpha.
n_iter_ : int
Number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
Examples
--------
>>> from sklearn.linear_model import ElasticNetCV
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(n_features=2, random_state=0)
>>> regr = ElasticNetCV(cv=5, random_state=0)
>>> regr.fit(X, y)
ElasticNetCV(cv=5, random_state=0)
>>> print(regr.alpha_)
0.199...
>>> print(regr.intercept_)
0.398...
>>> print(regr.predict([[0, 0]]))
[0.398...]
Notes
-----
For an example, see
:ref:`examples/linear_model/plot_lasso_model_selection.py
<sphx_glr_auto_examples_linear_model_plot_lasso_model_selection.py>`.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See Also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, *, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=None, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def _get_estimator(self):
return ElasticNet()
def _is_multitask(self):
return False
def _more_tags(self):
return {'multioutput': False}
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as
regularizer.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||_Fro^2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = sum_i sqrt(sum_j W_ij ^ 2)
i.e. the sum of norms of each row.
Read more in the :ref:`User Guide <multi_task_elastic_net>`.
Parameters
----------
alpha : float, default=1.0
Constant that multiplies the L1/L2 term. Defaults to 1.0.
l1_ratio : float, default=0.5
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 1 the penalty is an L1/L2 penalty. For l1_ratio = 0 it
is an L2 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, default=1000
The maximum number of iterations.
tol : float, default=1e-4
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, default=False
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
random_state : int, RandomState instance, default=None
The seed of the pseudo random number generator that selects a random
feature to update. Used when ``selection`` == 'random'.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
selection : {'cyclic', 'random'}, default='cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
Attributes
----------
intercept_ : ndarray of shape (n_tasks,)
Independent term in decision function.
coef_ : ndarray of shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array.
Note that ``coef_`` stores the transpose of ``W``, ``W.T``.
n_iter_ : int
Number of iterations run by the coordinate descent solver to reach
the specified tolerance.
dual_gap_ : float
The dual gaps at the end of the optimization.
eps_ : float
The tolerance scaled scaled by the variance of the target `y`.
sparse_coef_ : sparse matrix of shape (n_features,) or \
(n_tasks, n_features)
Sparse representation of the `coef_`.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskElasticNet(alpha=0.1)
>>> print(clf.coef_)
[[0.45663524 0.45612256]
[0.45663524 0.45612256]]
>>> print(clf.intercept_)
[0.0872422 0.0872422]
See Also
--------
MultiTaskElasticNetCV : Multi-task L1/L2 ElasticNet with built-in
cross-validation.
ElasticNet
MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X and y arguments of the fit
method should be directly passed as Fortran-contiguous numpy arrays.
"""
def __init__(self, alpha=1.0, *, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskElasticNet model with coordinate descent
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data.
y : ndarray of shape (n_samples, n_tasks)
Target. Will be cast to X's dtype if necessary.
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# Need to validate separately here.
# We can't pass multi_ouput=True because that would allow y to be csr.
check_X_params = dict(dtype=[np.float64, np.float32], order='F',
copy=self.copy_X and self.fit_intercept)
check_y_params = dict(ensure_2d=False, order='F')
X, y = self._validate_data(X, y, validate_separately=(check_X_params,
check_y_params))
y = y.astype(X.dtype)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_offset, y_offset, X_scale = _preprocess_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or not hasattr(self, "coef_"):
self.coef_ = np.zeros((n_tasks, n_features), dtype=X.dtype.type,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
# account for different objective scaling here and in cd_fast
self.dual_gap_ /= n_samples
self._set_intercept(X_offset, y_offset, X_scale)
# return self for chaining fit and predict calls
return self
def _more_tags(self):
return {'multioutput_only': True}
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, default=1.0
Constant that multiplies the L1/L2 term. Defaults to 1.0.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, default=1000
The maximum number of iterations.
tol : float, default=1e-4
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, default=False
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
random_state : int, RandomState instance, default=None
The seed of the pseudo random number generator that selects a random
feature to update. Used when ``selection`` == 'random'.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
selection : {'cyclic', 'random'}, default='cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
Attributes
----------
coef_ : ndarray of shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
Note that ``coef_`` stores the transpose of ``W``, ``W.T``.
intercept_ : ndarray of shape (n_tasks,)
Independent term in decision function.
n_iter_ : int
Number of iterations run by the coordinate descent solver to reach
the specified tolerance.
dual_gap_ : ndarray of shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
eps_ : float
The tolerance scaled scaled by the variance of the target `y`.
sparse_coef_ : sparse matrix of shape (n_features,) or \
(n_tasks, n_features)
Sparse representation of the `coef_`.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0, 1], [1, 2], [2, 4]], [[0, 0], [1, 1], [2, 3]])
MultiTaskLasso(alpha=0.1)
>>> print(clf.coef_)
[[0. 0.60809415]
[0. 0.94592424]]
>>> print(clf.intercept_)
[-0.41888636 -0.87382323]
See Also
--------
MultiTaskLasso : Multi-task L1/L2 Lasso with built-in cross-validation.
Lasso
MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X and y arguments of the fit
method should be directly passed as Fortran-contiguous numpy arrays.
"""
def __init__(self, alpha=1.0, *, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(RegressorMixin, LinearModelCV):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
See glossary entry for :term:`cross-validation estimator`.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_elastic_net>`.
.. versionadded:: 0.15
Parameters
----------
l1_ratio : float or list of float, default=0.5
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 1 the penalty is an L1/L2 penalty. For l1_ratio = 0 it
is an L2 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, default=1e-3
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, default=100
Number of alphas along the regularization path.
alphas : array-like, default=None
List of alphas where to compute the models.
If not provided, set automatically.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
max_iter : int, default=1000
The maximum number of iterations.
tol : float, default=1e-4
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- int, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
verbose : bool or int, default=0
Amount of verbosity.
n_jobs : int, default=None
Number of CPUs to use during the cross validation. Note that this is
used only if multiple values for l1_ratio are given.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance, default=None
The seed of the pseudo random number generator that selects a random
feature to update. Used when ``selection`` == 'random'.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
selection : {'cyclic', 'random'}, default='cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
Attributes
----------
intercept_ : ndarray of shape (n_tasks,)
Independent term in decision function.
coef_ : ndarray of shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
Note that ``coef_`` stores the transpose of ``W``, ``W.T``.
alpha_ : float
The amount of penalization chosen by cross validation.
mse_path_ : ndarray of shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
Mean square error for the test set on each fold, varying alpha.
alphas_ : ndarray of shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
l1_ratio_ : float
Best l1_ratio obtained by cross-validation.
n_iter_ : int
Number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
dual_gap_ : float
The dual gap at the end of the optimization for the optimal alpha.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV(cv=3)
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
MultiTaskElasticNetCV(cv=3)
>>> print(clf.coef_)
[[0.52875032 0.46958558]
[0.52875032 0.46958558]]
>>> print(clf.intercept_)
[0.00166409 0.00166409]
See Also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X and y arguments of the fit
method should be directly passed as Fortran-contiguous numpy arrays.
"""
path = staticmethod(enet_path)
def __init__(self, *, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=None, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
def _get_estimator(self):
return MultiTaskElasticNet()
def _is_multitask(self):
return True
def _more_tags(self):
return {'multioutput_only': True}
class MultiTaskLassoCV(RegressorMixin, LinearModelCV):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer.
See glossary entry for :term:`cross-validation estimator`.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
.. versionadded:: 0.15
Parameters
----------
eps : float, default=1e-3
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, default=100
Number of alphas along the regularization path.
alphas : array-like, default=None
List of alphas where to compute the models.
If not provided, set automatically.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
max_iter : int, default=1000
The maximum number of iterations.
tol : float, default=1e-4
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
cv : int, cross-validation generator or iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- int, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
verbose : bool or int, default=False
Amount of verbosity.
n_jobs : int, default=None
Number of CPUs to use during the cross validation. Note that this is
used only if multiple values for l1_ratio are given.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance, default=None
The seed of the pseudo random number generator that selects a random
feature to update. Used when ``selection`` == 'random'.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
selection : {'cyclic', 'random'}, default='cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
Attributes
----------
intercept_ : ndarray of shape (n_tasks,)
Independent term in decision function.
coef_ : ndarray of shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
Note that ``coef_`` stores the transpose of ``W``, ``W.T``.
alpha_ : float
The amount of penalization chosen by cross validation.
mse_path_ : ndarray of shape (n_alphas, n_folds)
Mean square error for the test set on each fold, varying alpha.
alphas_ : ndarray of shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
Number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
dual_gap_ : float
The dual gap at the end of the optimization for the optimal alpha.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
Examples
--------
>>> from sklearn.linear_model import MultiTaskLassoCV
>>> from sklearn.datasets import make_regression
>>> from sklearn.metrics import r2_score
>>> X, y = make_regression(n_targets=2, noise=4, random_state=0)
>>> reg = MultiTaskLassoCV(cv=5, random_state=0).fit(X, y)
>>> r2_score(y, reg.predict(X))
0.9994...
>>> reg.alpha_
0.5713...
>>> reg.predict(X[:1,])
array([[153.7971..., 94.9015...]])
See Also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X and y arguments of the fit
method should be directly passed as Fortran-contiguous numpy arrays.
"""
path = staticmethod(lasso_path)
def __init__(self, *, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=None, random_state=None,
selection='cyclic'):
super().__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
def _get_estimator(self):
return MultiTaskLasso()
def _is_multitask(self):
return True
def _more_tags(self):
return {'multioutput_only': True}
| {
"repo_name": "kevin-intel/scikit-learn",
"path": "sklearn/linear_model/_coordinate_descent.py",
"copies": "2",
"size": "93685",
"license": "bsd-3-clause",
"hash": 502482693768072700,
"line_mean": 36.5040032026,
"line_max": 81,
"alpha_frac": 0.5997758446,
"autogenerated": false,
"ratio": 3.984052732298533,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5583828576898533,
"avg_score": null,
"num_lines": null
} |
import sys
import warnings
import itertools
import operator
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel
from ..base import RegressorMixin
from .base import sparse_center_data, center_data
from ..utils import array2d, atleast2d_or_csc, deprecated
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from . import cd_fast
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear Model trained with L1 and L2 prior as regularizer
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept: bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
max_iter: int, optional
The maximum number of iterations
copy_X : boolean, optional, default False
If ``True``, X will be copied; else, it may be overwritten.
tol: float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive: bool, optional
When set to ``True``, forces the coefficients to be positive.
Attributes
----------
``coef_`` : array, shape = (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
``sparse_coef_`` : scipy.sparse matrix, shape = (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
``intercept_`` : float | array, shape = (n_targets,)
independent term in decision function.
``dual_gap_`` : float | array, shape = (n_targets,)
the current fit is guaranteed to be epsilon-suboptimal with
epsilon := ``dual_gap_``
``eps_`` : float | array, shape = (n_targets,)
``eps_`` is used to check if the fit converged to the requested
``tol``
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
rho=None):
self.alpha = alpha
self.l1_ratio = l1_ratio
if rho is not None:
self.l1_ratio = rho
warnings.warn("rho was renamed to l1_ratio and will be removed "
"in 0.15", DeprecationWarning)
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
def fit(self, X, y, Xy=None, coef_init=None):
"""Fit model with coordinate descent
Parameters
-----------
X: ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y: ndarray, shape = (n_samples,) or (n_samples, n_targets)
Target
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
coef_init: ndarray of shape n_features or (n_targets, n_features)
The initial coeffients to warm-start the optimization
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
X = atleast2d_or_csc(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
# From now on X can be touched inplace
y = np.asarray(y, dtype=np.float64)
# now all computation with X can be done inplace
fit = self._sparse_fit if sparse.isspmatrix(X) else self._dense_fit
fit(X, y, Xy, coef_init)
return self
def _dense_fit(self, X, y, Xy=None, coef_init=None):
# copy was done in fit if necessary
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
precompute = self.precompute
if hasattr(precompute, '__array__') \
and not np.allclose(X_mean, np.zeros(n_features)) \
and not np.allclose(X_std, np.ones(n_features)):
# recompute Gram
precompute = 'auto'
Xy = None
coef_ = self._init_coef(coef_init, n_features, n_targets)
dual_gap_ = np.empty(n_targets)
eps_ = np.empty(n_targets)
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
# precompute if n_samples > n_features
if precompute == "auto" and n_samples > n_features:
precompute = True
if hasattr(precompute, '__array__'):
Gram = precompute
elif precompute:
Gram = np.dot(X.T, X)
else:
Gram = None
for k in xrange(n_targets):
if Gram is None:
coef_[k, :], dual_gap_[k], eps_[k] = \
cd_fast.enet_coordinate_descent(
coef_[k, :], l1_reg, l2_reg, X, y[:, k], self.max_iter,
self.tol, self.positive)
else:
Gram = Gram.copy()
if Xy is None:
this_Xy = np.dot(X.T, y[:, k])
else:
this_Xy = Xy[:, k]
coef_[k, :], dual_gap_[k], eps_[k] = \
cd_fast.enet_coordinate_descent_gram(
coef_[k, :], l1_reg, l2_reg, Gram, this_Xy, y[:, k],
self.max_iter, self.tol, self.positive)
if dual_gap_[k] > eps_[k]:
warnings.warn('Objective did not converge for ' +
'target %d, you might want' % k +
' to increase the number of iterations')
self.coef_, self.dual_gap_, self.eps_ = (np.squeeze(a) for a in
(coef_, dual_gap_, eps_))
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
def _sparse_fit(self, X, y, Xy=None, coef_init=None):
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have incompatible shapes.\n" +
"Note: Sparse matrices cannot be indexed w/" +
"boolean masks (use `indices=True` in CV).")
# NOTE: we are explicitly not centering the data the naive way to
# avoid breaking the sparsity of X
X_data, y, X_mean, y_mean, X_std = sparse_center_data(
X, y, self.fit_intercept, self.normalize)
if y.ndim == 1:
y = y[:, np.newaxis]
n_samples, n_features = X.shape[0], X.shape[1]
n_targets = y.shape[1]
coef_ = self._init_coef(coef_init, n_features, n_targets)
dual_gap_ = np.empty(n_targets)
eps_ = np.empty(n_targets)
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
for k in xrange(n_targets):
coef_[k, :], dual_gap_[k], eps_[k] = \
cd_fast.sparse_enet_coordinate_descent(
coef_[k, :], l1_reg, l2_reg, X_data, X.indices,
X.indptr, y[:, k], X_mean / X_std,
self.max_iter, self.tol, self.positive)
if dual_gap_[k] > eps_[k]:
warnings.warn('Objective did not converge for ' +
'target %d, you might want' % k +
' to increase the number of iterations')
self.coef_, self.dual_gap_, self.eps_ = (np.squeeze(a) for a in
(coef_, dual_gap_, eps_))
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
def _init_coef(self, coef_init, n_features, n_targets):
if coef_init is None:
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64)
else:
coef_ = self.coef_
else:
coef_ = coef_init
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
if coef_.shape != (n_targets, n_features):
raise ValueError("X and coef_init have incompatible "
"shapes (%s != %s)."
% (coef_.shape, (n_targets, n_features)))
return coef_
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape = (n_samples,)
The predicted decision function
"""
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True)
+ self.intercept_)
else:
return super(ElasticNet, self).decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
max_iter: int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
Attributes
----------
``coef_`` : array, shape = (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
``sparse_coef_`` : scipy.sparse matrix, shape = (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
``intercept_`` : float | array, shape = (n_targets,)
independent term in decision function.
``dual_gap_`` : float | array, shape = (n_targets,)
the current fit is guaranteed to be epsilon-suboptimal with
epsilon := ``dual_gap_``
``eps_`` : float | array, shape = (n_targets,)
``eps_`` is used to check if the fit converged to the requested
``tol``
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute='auto', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute='auto', copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive)
###############################################################################
# Classes to store linear models along a regularization path
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, fit_intercept=True,
normalize=False, copy_X=True, verbose=False,
**params):
"""Compute Lasso path with coordinate descent
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Parameters
----------
X : ndarray, shape = (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape = (n_samples,)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
fit_intercept : bool
Fit or not an intercept
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
verbose : bool or integer
Amount of verbosity
params : kwargs
keyword arguments passed to the Lasso objects
Returns
-------
models : a list of models along the regularization path
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficents between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> coef_path = [e.coef_ for e in lasso_path(X, y, alphas=[5., 1., .5], fit_intercept=False)]
>>> print(np.array(coef_path).T)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1], coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, verbose=verbose, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, fit_intercept=True,
normalize=False, copy_X=True, verbose=False, rho=None,
**params):
"""Compute Elastic-Net path with coordinate descent
The Elastic Net optimization function is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
Parameters
----------
X : ndarray, shape = (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape = (n_samples,)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
fit_intercept : bool
Fit or not an intercept
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
verbose : bool or integer
Amount of verbosity
params : kwargs
keyword arguments passed to the Lasso objects
Returns
-------
models : a list of models along the regularization path
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
ElasticNet
ElasticNetCV
"""
if rho is not None:
l1_ratio = rho
warnings.warn("rho was renamed to l1_ratio and will be removed "
"in 0.15", DeprecationWarning)
X = atleast2d_or_csc(X, dtype=np.float64, order='F',
copy=copy_X and fit_intercept)
# From now on X can be touched inplace
if not sparse.isspmatrix(X):
X, y, X_mean, y_mean, X_std = center_data(X, y, fit_intercept,
normalize, copy=False)
# XXX : in the sparse case the data will be centered
# at each fit...
n_samples, n_features = X.shape
if (hasattr(precompute, '__array__')
and not np.allclose(X_mean, np.zeros(n_features))
and not np.allclose(X_std, np.ones(n_features))):
# recompute Gram
precompute = 'auto'
Xy = None
if precompute or ((precompute == 'auto') and (n_samples > n_features)):
if sparse.isspmatrix(X):
warnings.warn("precompute is ignored for sparse data")
precompute = False
else:
precompute = np.dot(X.T, X)
if Xy is None:
Xy = safe_sparse_dot(X.T, y, dense_output=True)
n_samples = X.shape[0]
if alphas is None:
alpha_max = np.abs(Xy).max() / (n_samples * l1_ratio)
alphas = np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
coef_ = None # init coef_
models = []
n_alphas = len(alphas)
for i, alpha in enumerate(alphas):
model = ElasticNet(
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept if sparse.isspmatrix(X) else False,
precompute=precompute)
model.set_params(**params)
model.fit(X, y, coef_init=coef_, Xy=Xy)
if fit_intercept and not sparse.isspmatrix(X):
model.fit_intercept = True
model._set_intercept(X_mean, y_mean, X_std)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
coef_ = model.coef_.copy()
models.append(model)
return models
def _path_residuals(X, y, train, test, path, path_params, l1_ratio=1):
this_mses = list()
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
models_train = path(X[train], y[train], **path_params)
this_mses = np.empty(len(models_train))
for i_model, model in enumerate(models_train):
y_ = model.predict(X[test])
this_mses[i_model] = ((y_ - y[test]) ** 2).mean()
return this_mses, l1_ratio
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : narray, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
# We avoid copying X so for to save memory. X will be copied
# after the cross-validation loop
X = atleast2d_or_csc(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
# From now on X can be touched inplace
y = np.asarray(y, dtype=np.float64)
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
# We can modify X inplace
path_params['copy_X'] = False
# Start to compute path on full data
# XXX: is this really useful: we are fitting models that we won't
# use later
models = self.path(X, y, **path_params)
# Update the alphas list
alphas = [model.alpha for model in models]
n_alphas = len(alphas)
path_params.update({'alphas': alphas, 'n_alphas': n_alphas})
# If we are not computing in parallel, we don't want to modify X
# inplace in the folds
if self.n_jobs == 1 or self.n_jobs is None:
path_params['copy_X'] = True
# init cross-validation generator
cv = check_cv(self.cv, X)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv)
best_mse = np.inf
all_mse_paths = list()
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
for l1_ratio, mse_alphas in itertools.groupby(
Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_path_residuals)(
X, y, train, test, self.path, path_params,
l1_ratio=l1_ratio)
for l1_ratio in l1_ratios for train, test in folds
), operator.itemgetter(1)):
mse_alphas = [m[0] for m in mse_alphas]
mse_alphas = np.array(mse_alphas)
mse = np.mean(mse_alphas, axis=0)
i_best_alpha = np.argmin(mse)
this_best_mse = mse[i_best_alpha]
all_mse_paths.append(mse_alphas.T)
if this_best_mse < best_mse:
model = models[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
if hasattr(model, 'l1_ratio'):
if model.l1_ratio != best_l1_ratio:
# Need to refit the model
model.l1_ratio = best_l1_ratio
model.fit(X, y)
self.l1_ratio_ = model.l1_ratio
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.alpha_ = model.alpha
self.alphas_ = np.asarray(alphas)
self.coef_path_ = np.asarray([model.coef_ for model in models])
self.mse_path_ = np.squeeze(all_mse_paths)
return self
@property
def rho_(self):
warnings.warn("rho was renamed to ``l1_ratio_`` and will be removed "
"in 0.15", DeprecationWarning)
return self.l1_ratio_
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter: int, optional
The maximum number of iterations
tol: float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or crossvalidation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific crossvalidation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible objects.
verbose : bool or integer
amount of verbosity
Attributes
----------
``alpha_`` : float
The amount of penalization choosen by cross validation
``coef_`` : array, shape = (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
``intercept_`` : float | array, shape = (n_targets,)
independent term in decision function.
``mse_path_`` : array, shape = (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
``alphas_`` : numpy array
The grid of alphas used for fitting
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
n_jobs = 1
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Parameters
----------
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or crossvalidation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific crossvalidation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible objects.
verbose : bool or integer
amount of verbosity
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
Attributes
----------
``alpha_`` : float
The amount of penalization choosen by cross validation
``l1_ratio_`` : float
The compromise between l1 and l2 penalization choosen by
cross validation
``coef_`` : array, shape = (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
``intercept_`` : float | array, shape = (n_targets, n_features)
Independent term in the decision function.
``mse_path_`` : array, shape = (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, rho=None):
self.l1_ratio = l1_ratio
if rho is not None:
self.l1_ratio = rho
warnings.warn("rho was renamed to l1_ratio and will be removed "
"in 0.15", DeprecationWarning)
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
@property
@deprecated("rho was renamed to ``l1_ratio_`` and will be removed "
"in 0.15")
def rho(self):
return self.l1_ratio_
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
``intercept_`` : array, shape = (n_tasks,)
Independent term in decision function.
``coef_`` : array, shape = (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, rho=None, tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, rho=None):
self.l1_ratio = l1_ratio
if rho is not None:
self.l1_ratio = rho
warnings.warn("rho was renamed to l1_ratio and will be removed "
"in 0.15", DeprecationWarning)
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
def fit(self, X, y, Xy=None, coef_init=None):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X: ndarray, shape = (n_samples, n_features)
Data
y: ndarray, shape = (n_samples, n_tasks)
Target
coef_init: ndarray of shape n_features
The initial coeffients to warm-start the optimization
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = array2d(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = np.asarray(y, dtype=np.float64)
squeeze_me = False
if y.ndim == 1:
squeeze_me = True
y = y[:, np.newaxis]
n_samples, n_features = X.shape
_, n_tasks = y.shape
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if coef_init is None:
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
else:
self.coef_ = coef_init
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
self.coef_, self.dual_gap_, self.eps_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol)
self._set_intercept(X_mean, y_mean, X_std)
# Make sure that the coef_ have the same shape as the given 'y',
# to predict with the same shape
if squeeze_me:
self.coef_ = self.coef_.squeeze()
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
``coef_`` : array, shape = (n_tasks, n_features)
parameter vector (W in the cost function formula)
``intercept_`` : array, shape = (n_tasks,)
independent term in decision function.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
| {
"repo_name": "kmike/scikit-learn",
"path": "sklearn/linear_model/coordinate_descent.py",
"copies": "2",
"size": "47509",
"license": "bsd-3-clause",
"hash": -4343959595309814300,
"line_mean": 34.5872659176,
"line_max": 97,
"alpha_frac": 0.585741649,
"autogenerated": false,
"ratio": 3.9018561103810776,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00012609245710726004,
"num_lines": 1335
} |
import sys
import warnings
import itertools
import operator
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import LinearModel
from ..base import RegressorMixin
from .base import sparse_center_data
from ..utils import as_float_array
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
from ..utils.extmath import safe_sparse_dot
from . import cd_fast
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear Model trained with L1 and L2 prior as regularizer
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * rho * ||w||_1 + 0.5 * alpha * (1 - rho) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and rho = a / (a + b)
The parameter rho corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, rho =
1 is the lasso penalty. Currently, rho <= 0.01 is not reliable, unless
you supply your own sequence of alpha.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter
rho : float
The ElasticNet mixing parameter, with 0 < rho <= 1. For rho = 0
the penalty is an L1 penalty. For rho = 1 it is an L2 penalty.
For 0 < rho < 1, the penalty is a combination of L1 and L2
fit_intercept: bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
normalize : boolean, optional
If True, the regressors X are normalized
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always True to preserve sparsity.
max_iter: int, optional
The maximum number of iterations
copy_X : boolean, optional, default False
If True, X will be copied; else, it may be overwritten.
tol: float, optional
The tolerance for the optimization: if the updates are
smaller than 'tol', the optimization code checks the
dual gap for optimality and continues until it is smaller
than tol.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive: bool, optional
When set to True, forces the coefficients to be positive.
Attributes
----------
coef_ : array, shape = [n_features]
parameter vector (w in the cost function formula)
sparse_coef_: scipy.sparse matrix, shape = [n_features, 1]
sparse_coef_: is a readonly property derived from coef_
intercept_ : float
independent term in decision function.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a fortran contiguous numpy array.
"""
def __init__(self, alpha=1.0, rho=0.5, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False):
self.alpha = alpha
self.rho = rho
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
def fit(self, X, y, Xy=None, coef_init=None):
"""Fit Elastic Net model with coordinate descent
Parameters
-----------
X: ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y: ndarray, (n_samples)
Target
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
coef_init: ndarray of shape n_features
The initial coeffients to warm-start the optimization
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a fortran contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
fit = self._sparse_fit if sp.isspmatrix(X) else self._dense_fit
fit(X, y, Xy, coef_init)
return self
def _dense_fit(self, X, y, Xy=None, coef_init=None):
# X and y must be of type float64
X = np.asanyarray(X, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
n_samples, n_features = X.shape
X_init = X
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept, self.normalize, copy=self.copy_X)
precompute = self.precompute
if X_init is not X and hasattr(precompute, '__array__'):
# recompute Gram
# FIXME: it could be updated from precompute and X_mean
# instead of recomputed
precompute = 'auto'
if X_init is not X and Xy is not None:
Xy = None # recompute Xy
if coef_init is None:
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros(n_features, dtype=np.float64)
else:
if coef_init.shape[0] != X.shape[1]:
raise ValueError("X and coef_init have incompatible " +
"shapes.")
self.coef_ = coef_init
alpha = self.alpha * self.rho * n_samples
beta = self.alpha * (1.0 - self.rho) * n_samples
X = np.asfortranarray(X) # make data contiguous in memory
# precompute if n_samples > n_features
if hasattr(precompute, '__array__'):
Gram = precompute
elif precompute == True or \
(precompute == 'auto' and n_samples > n_features):
Gram = np.dot(X.T, X)
else:
Gram = None
if Gram is None:
self.coef_, self.dual_gap_, self.eps_ = \
cd_fast.enet_coordinate_descent(self.coef_, alpha, beta,
X, y, self.max_iter, self.tol, self.positive)
else:
if Xy is None:
Xy = np.dot(X.T, y)
self.coef_, self.dual_gap_, self.eps_ = \
cd_fast.enet_coordinate_descent_gram(self.coef_, alpha,
beta, Gram, Xy, y, self.max_iter, self.tol, self.positive)
self._set_intercept(X_mean, y_mean, X_std)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
def _sparse_fit(self, X, y, Xy=None, coef_init=None):
if not sp.isspmatrix_csc(X) or not np.issubdtype(np.float64, X):
X = sp.csc_matrix(X, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have incompatible shapes.\n" +
"Note: Sparse matrices cannot be indexed w/" +
"boolean masks (use `indices=True` in CV).")
# NOTE: we are explicitly not centering the data the naive way to
# avoid breaking the sparsity of X
n_samples, n_features = X.shape[0], X.shape[1]
if coef_init is None and \
(not self.warm_start or self.coef_ is None):
self.coef_ = np.zeros(n_features, dtype=np.float64)
else:
if coef_init.shape[0] != X.shape[1]:
raise ValueError("X and coef_init have incompatible " +
"shapes.")
self.coef_ = coef_init
alpha = self.alpha * self.rho * n_samples
beta = self.alpha * (1.0 - self.rho) * n_samples
X_data, y, X_mean, y_mean, X_std = sparse_center_data(X, y,
self.fit_intercept,
self.normalize)
self.coef_, self.dual_gap_, self.eps_ = \
cd_fast.sparse_enet_coordinate_descent(
self.coef_, alpha, beta, X_data, X.indices,
X.indptr, y, X_mean / X_std,
self.max_iter, self.tol, self.positive)
self._set_intercept(X_mean, y_mean, X_std)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
'to increase the number of iterations')
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sp.csr_matrix(self.coef_)
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape [n_samples, n_features]
Returns
-------
array, shape = [n_samples] with the predicted real values
"""
if sp.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T, \
dense_output=True) + self.intercept_)
else:
return super(ElasticNet, self).decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with rho=1.0 (no L2 penalty).
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If True, the regressors X are normalized
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always True to preserve sparsity.
max_iter: int, optional
The maximum number of iterations
tol: float, optional
The tolerance for the optimization: if the updates are
smaller than 'tol', the optimization code checks the
dual gap for optimality and continues until it is smaller
than tol.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive: bool, optional
When set to True, forces the coefficients to be positive.
Attributes
----------
`coef_` : array, shape = [n_features]
parameter vector (w in the cost function formula)
sparse_coef_: scipy.sparse matrix, shape = [n_features, 1]
sparse_coef_: is a readonly property derived from coef_
`intercept_` : float
independent term in decision function.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute='auto', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a fortran contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute='auto', copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False):
super(Lasso, self).__init__(alpha=alpha, rho=1.0,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive)
###############################################################################
# Classes to store linear models along a regularization path
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, fit_intercept=True,
normalize=False, copy_X=True, verbose=False,
**params):
"""Compute Lasso path with coordinate descent
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data. Pass directly as fortran contiguous data to avoid
unnecessary memory duplication
y : numpy array of shape [n_samples]
Target values
eps : float, optional
Length of the path. eps=1e-3 means that
alpha_min / alpha_max = 1e-3
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
fit_intercept : bool
Fit or not an intercept
normalize : boolean, optional
If True, the regressors X are normalized
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : bool or integer
Amount of verbosity
params : kwargs
keyword arguments passed to the Lasso objects
Returns
-------
models : a list of models along the regularization path
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a fortran contiguous numpy array.
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, rho=1., eps=eps, n_alphas=n_alphas, alphas=alphas,
precompute=precompute, Xy=Xy,
fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, verbose=verbose, **params)
def enet_path(X, y, rho=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, fit_intercept=True,
normalize=False, copy_X=True, verbose=False,
**params):
"""Compute Elastic-Net path with coordinate descent
The Elastic Net optimization function is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * rho * ||w||_1 + 0.5 * alpha * (1 - rho) * ||w||^2_2
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Training data. Pass directly as fortran contiguous data to avoid
unnecessary memory duplication
y : numpy array of shape [n_samples]
Target values
rho : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). rho=1 corresponds to the Lasso
eps : float
Length of the path. eps=1e-3 means that
alpha_min / alpha_max = 1e-3
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
fit_intercept : bool
Fit or not an intercept
normalize : boolean, optional
If True, the regressors X are normalized
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : bool or integer
Amount of verbosity
params : kwargs
keyword arguments passed to the Lasso objects
Returns
-------
models : a list of models along the regularization path
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
ElasticNet
ElasticNetCV
"""
X = as_float_array(X, copy_X)
X_init = X
X, y, X_mean, y_mean, X_std = LinearModel._center_data(X, y,
fit_intercept,
normalize,
copy=False)
X = np.asfortranarray(X) # make data contiguous in memory
n_samples, n_features = X.shape
if X_init is not X and hasattr(precompute, '__array__'):
precompute = 'auto'
if X_init is not X and Xy is not None:
Xy = None
if 'precompute' is True or \
((precompute == 'auto') and (n_samples > n_features)):
precompute = np.dot(X.T, X)
if Xy is None:
Xy = np.dot(X.T, y)
n_samples = X.shape[0]
if alphas is None:
alpha_max = np.abs(Xy).max() / (n_samples * rho)
alphas = np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
coef_ = None # init coef_
models = []
n_alphas = len(alphas)
for i, alpha in enumerate(alphas):
model = ElasticNet(alpha=alpha, rho=rho, fit_intercept=False,
precompute=precompute)
model.set_params(**params)
model.fit(X, y, coef_init=coef_, Xy=Xy)
if fit_intercept:
model.fit_intercept = True
model._set_intercept(X_mean, y_mean, X_std)
if verbose:
if verbose > 2:
print model
elif verbose > 1:
print 'Path: %03i out of %03i' % (i, n_alphas)
else:
sys.stderr.write('.')
coef_ = model.coef_.copy()
models.append(model)
return models
def _path_residuals(X, y, train, test, path, path_params, rho=1):
this_mses = list()
if 'rho' in path_params:
path_params['rho'] = rho
models_train = path(X[train], y[train], **path_params)
this_mses = np.empty(len(models_train))
for i_model, model in enumerate(models_train):
y_ = model.predict(X[test])
this_mses[i_model] = ((y_ - y[test]) ** 2).mean()
return this_mses, rho
class LinearModelCV(LinearModel):
"""Base class for iterative model fitting along a regularization path"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
def fit(self, X, y):
"""Fit linear model with coordinate descent along decreasing alphas
using cross-validation
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data. Pass directly as fortran contiguous data to avoid
unnecessary memory duplication
y : numpy array of shape [n_samples]
Target values
"""
X = np.asfortranarray(X, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'rho' in path_params:
rhos = np.atleast_1d(path_params['rho'])
# For the first path, we need to set rho
path_params['rho'] = rhos[0]
else:
rhos = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
# Start to compute path on full data
# XXX: is this really useful: we are fitting models that we won't
# use later
models = self.path(X, y, **path_params)
# Update the alphas list
alphas = [model.alpha for model in models]
n_alphas = len(alphas)
path_params.update({'alphas': alphas, 'n_alphas': n_alphas})
# init cross-validation generator
cv = check_cv(self.cv, X)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv)
best_mse = np.inf
all_mse_paths = list()
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on rho and folds
for rho, mse_alphas in itertools.groupby(
Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_path_residuals)(X, y, train, test,
self.path, path_params, rho=rho)
for rho in rhos for train, test in folds
), operator.itemgetter(1)):
mse_alphas = [m[0] for m in mse_alphas]
mse_alphas = np.array(mse_alphas)
mse = np.mean(mse_alphas, axis=0)
i_best_alpha = np.argmin(mse)
this_best_mse = mse[i_best_alpha]
all_mse_paths.append(mse_alphas.T)
if this_best_mse < best_mse:
model = models[i_best_alpha]
best_rho = rho
if hasattr(model, 'rho'):
if model.rho != best_rho:
# Need to refit the model
model.rho = best_rho
model.fit(X, y)
self.rho_ = model.rho
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.alpha = model.alpha
self.alphas = np.asarray(alphas)
self.coef_path_ = np.asarray([model.coef_ for model in models])
self.mse_path_ = np.squeeze(all_mse_paths)
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Parameters
----------
eps : float, optional
Length of the path. eps=1e-3 means that
alpha_min / alpha_max = 1e-3.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter: int, optional
The maximum number of iterations
tol: float, optional
The tolerance for the optimization: if the updates are
smaller than 'tol', the optimization code checks the
dual gap for optimality and continues until it is smaller
than tol.
cv : integer or crossvalidation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific crossvalidation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
verbose : bool or integer
amount of verbosity
Attributes
----------
`alpha_`: float
The amount of penalization choosen by cross validation
`coef_` : array, shape = [n_features]
parameter vector (w in the fomulation formula)
`intercept_` : float
independent term in decision function.
`mse_path_`: array, shape = [n_alphas, n_folds]
mean square error for the test set on each fold, varying alpha
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a fortran contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
n_jobs = 1
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False):
super(LassoCV, self).__init__(eps=eps, n_alphas=n_alphas,
alphas=alphas, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, max_iter=max_iter,
tol=tol, copy_X=copy_X, cv=cv, verbose=verbose)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Parameters
----------
rho : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For rho = 0
the penalty is an L1 penalty. For rho = 1 it is an L2 penalty.
For 0 < rho < 1, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for rho is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in [.1, .5, .7,
.9, .95, .99, 1]
eps : float, optional
Length of the path. eps=1e-3 means that
alpha_min / alpha_max = 1e-3.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter: int, optional
The maximum number of iterations
tol: float, optional
The tolerance for the optimization: if the updates are
smaller than 'tol', the optimization code checks the
dual gap for optimality and continues until it is smaller
than tol.
cv : integer or crossvalidation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific crossvalidation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
verbose : bool or integer
amount of verbosity
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If '-1', use
all the CPUs. Note that this is used only if multiple values for
rho are given.
Attributes
----------
`alpha_`: float
The amount of penalization choosen by cross validation
`rho_`: float
The compromise between l1 and l2 penalization choosen by
cross validation
`coef_` : array, shape = [n_features]
parameter vector (w in the fomulation formula)
`intercept_` : float
independent term in decision function.
`mse_path_`: array, shape = [n_rho, n_alpha, n_folds]
mean square error for the test set on each fold, varying rho and
alpha
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a fortran contiguous numpy array.
The parameter rho corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * rho * ||w||_1 + 0.5 * alpha * (1 - rho) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and rho = a / (a + b)
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, rho=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1):
self.rho = rho
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
| {
"repo_name": "sgenoud/scikit-learn",
"path": "sklearn/linear_model/coordinate_descent.py",
"copies": "1",
"size": "31607",
"license": "bsd-3-clause",
"hash": -4527553504032378000,
"line_mean": 33.0592672414,
"line_max": 79,
"alpha_frac": 0.5922738634,
"autogenerated": false,
"ratio": 4.057902169726537,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5150176033126537,
"avg_score": null,
"num_lines": null
} |
import sys
import warnings
import numpy as np
from .base import LinearModel
from ..utils import as_float_array
from ..cross_validation import check_cv
from . import cd_fast
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel):
"""Linear Model trained with L1 and L2 prior as regularizer
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * rho * ||w||_1 + 0.5 * alpha * (1 - rho) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and rho = a / (a + b)
The parameter rho corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, rho =
1 is the lasso penalty. Currently, rho <= 0.01 is not reliable, unless
you supply your own sequence of alpha.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter
rho : float
The ElasticNet mixing parameter, with 0 < rho <= 1. For rho = 0
the penalty is an L1 penalty. For rho = 1 it is an L2 penalty.
For 0 < rho < 1, the penalty is a combination of L1 and L2
fit_intercept: bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
normalize : boolean, optional
If True, the regressors X are normalized
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter: int, optional
The maximum number of iterations
copy_X : boolean, optional, default False
If True, X will be copied; else, it may be overwritten.
tol: float, optional
The tolerance for the optimization: if the updates are
smaller than 'tol', the optimization code checks the
dual gap for optimality and continues until it is smaller
than tol.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a fortran contiguous numpy array.
"""
def __init__(self, alpha=1.0, rho=0.5, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False):
self.alpha = alpha
self.rho = rho
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
def fit(self, X, y, Xy=None, coef_init=None):
"""Fit Elastic Net model with coordinate descent
Parameters
-----------
X: ndarray, (n_samples, n_features)
Data
y: ndarray, (n_samples)
Target
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
coef_init: ndarray of shape n_features
The initial coeffients to warm-start the optimization
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a fortran contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = np.asanyarray(X, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
n_samples, n_features = X.shape
X_init = X
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept, self.normalize, copy=self.copy_X)
precompute = self.precompute
if X_init is not X and hasattr(precompute, '__array__'):
# recompute Gram
# FIXME: it could be updated from precompute and X_mean
# instead of recomputed
precompute = 'auto'
if X_init is not X and Xy is not None:
Xy = None # recompute Xy
if coef_init is None:
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros(n_features, dtype=np.float64)
else:
self.coef_ = coef_init
alpha = self.alpha * self.rho * n_samples
beta = self.alpha * (1.0 - self.rho) * n_samples
X = np.asfortranarray(X) # make data contiguous in memory
# precompute if n_samples > n_features
if hasattr(precompute, '__array__'):
Gram = precompute
elif precompute == True or \
(precompute == 'auto' and n_samples > n_features):
Gram = np.dot(X.T, X)
else:
Gram = None
if Gram is None:
self.coef_, self.dual_gap_, self.eps_ = \
cd_fast.enet_coordinate_descent(self.coef_, alpha, beta,
X, y, self.max_iter,
self.tol)
else:
if Xy is None:
Xy = np.dot(X.T, y)
self.coef_, self.dual_gap_, self.eps_ = \
cd_fast.enet_coordinate_descent_gram(self.coef_, alpha,
beta, Gram, Xy, y, self.max_iter, self.tol)
self._set_intercept(X_mean, y_mean, X_std)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with rho=1.0 (no L2 penalty).
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If True, the regressors X are normalized
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter: int, optional
The maximum number of iterations
tol: float, optional
The tolerance for the optimization: if the updates are
smaller than 'tol', the optimization code checks the
dual gap for optimality and continues until it is smaller
than tol.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
`coef_` : array, shape = [n_features]
parameter vector (w in the fomulation formula)
`intercept_` : float
independent term in decision function.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, precompute='auto', tol=0.0001, warm_start=False)
>>> print clf.coef_
[ 0.85 0. ]
>>> print clf.intercept_
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a fortran contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute='auto', copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False):
super(Lasso, self).__init__(alpha=alpha, rho=1.0,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start)
###############################################################################
# Classes to store linear models along a regularization path
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, fit_intercept=True,
normalize=False, copy_X=True, verbose=False,
**params):
"""Compute Lasso path with coordinate descent
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data. Pass directly as fortran contiguous data to avoid
unnecessary memory duplication
y : numpy array of shape [n_samples]
Target values
eps : float, optional
Length of the path. eps=1e-3 means that
alpha_min / alpha_max = 1e-3
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
fit_intercept : bool
Fit or not an intercept
normalize : boolean, optional
If True, the regressors X are normalized
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : bool or integer
Amount of verbosity
params : kwargs
keyword arguments passed to the Lasso objects
Returns
-------
models : a list of models along the regularization path
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a fortran contiguous numpy array.
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, rho=1., eps=eps, n_alphas=n_alphas, alphas=alphas,
precompute=precompute, Xy=Xy,
fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, verbose=verbose, **params)
def enet_path(X, y, rho=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, fit_intercept=True,
normalize=False, copy_X=True, verbose=False,
**params):
"""Compute Elastic-Net path with coordinate descent
The Elastic Net optimization function is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * rho * ||w||_1 + 0.5 * alpha * (1 - rho) * ||w||^2_2
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Training data. Pass directly as fortran contiguous data to avoid
unnecessary memory duplication
y : numpy array of shape [n_samples]
Target values
rho : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). rho=1 corresponds to the Lasso
eps : float
Length of the path. eps=1e-3 means that
alpha_min / alpha_max = 1e-3
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
fit_intercept : bool
Fit or not an intercept
normalize : boolean, optional
If True, the regressors X are normalized
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : bool or integer
Amount of verbosity
params : kwargs
keyword arguments passed to the Lasso objects
Returns
-------
models : a list of models along the regularization path
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
ElasticNet
ElasticNetCV
"""
X = as_float_array(X, copy_X)
X_init = X
X, y, X_mean, y_mean, X_std = LinearModel._center_data(X, y,
fit_intercept,
normalize,
copy=False)
X = np.asfortranarray(X) # make data contiguous in memory
n_samples, n_features = X.shape
if X_init is not X and hasattr(precompute, '__array__'):
precompute = 'auto'
if X_init is not X and Xy is not None:
Xy = None
if 'precompute' is True or \
((precompute == 'auto') and (n_samples > n_features)):
precompute = np.dot(X.T, X)
if Xy is None:
Xy = np.dot(X.T, y)
n_samples = X.shape[0]
if alphas is None:
alpha_max = np.abs(Xy).max() / (n_samples * rho)
alphas = np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
coef_ = None # init coef_
models = []
n_alphas = len(alphas)
for i, alpha in enumerate(alphas):
model = ElasticNet(alpha=alpha, rho=rho, fit_intercept=False,
precompute=precompute)
model.set_params(**params)
model.fit(X, y, coef_init=coef_, Xy=Xy)
if fit_intercept:
model.fit_intercept = True
model._set_intercept(X_mean, y_mean, X_std)
if verbose:
if verbose > 2:
print model
elif verbose > 1:
print 'Path: %03i out of %03i' % (i, n_alphas)
else:
sys.stderr.write('.')
coef_ = model.coef_.copy()
models.append(model)
return models
class LinearModelCV(LinearModel):
"""Base class for iterative model fitting along a regularization path"""
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
def fit(self, X, y):
"""Fit linear model with coordinate descent along decreasing alphas
using cross-validation
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data. Pass directly as fortran contiguous data to avoid
unnecessary memory duplication
y : numpy array of shape [n_samples]
Target values
fit_params : kwargs
keyword arguments passed to the Lasso fit method
"""
X = np.asfortranarray(X, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
del path_params['cv']
# Start to compute path on full data
models = self.path(X, y, **path_params)
# Update the alphas list
alphas = [model.alpha for model in models]
n_alphas = len(alphas)
path_params.update({'alphas': alphas, 'n_alphas': n_alphas})
# init cross-validation generator
cv = check_cv(self.cv, X)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv)
n_folds = len(folds)
mse_alphas = np.zeros((n_folds, n_alphas))
for i, (train, test) in enumerate(folds):
if self.verbose:
print '%s: fold % 2i out of % 2i' % (
self.__class__.__name__, i, n_folds),
sys.stdout.flush()
models_train = self.path(X[train], y[train], **path_params)
for i_alpha, model in enumerate(models_train):
y_ = model.predict(X[test])
mse_alphas[i, i_alpha] += ((y_ - y[test]) ** 2).mean()
if self.verbose == 1:
print ''
i_best_alpha = np.argmin(np.mean(mse_alphas, axis=0))
model = models[i_best_alpha]
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.alpha = model.alpha
self.alphas = np.asarray(alphas)
self.coef_path_ = np.asarray([model.coef_ for model in models])
self.mse_path_ = mse_alphas.T
return self
class LassoCV(LinearModelCV):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Parameters
----------
eps : float, optional
Length of the path. eps=1e-3 means that
alpha_min / alpha_max = 1e-3.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter: int, optional
The maximum number of iterations
tol: float, optional
The tolerance for the optimization: if the updates are
smaller than 'tol', the optimization code checks the
dual gap for optimality and continues until it is smaller
than tol.
cv : integer or crossvalidation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific crossvalidation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
verbose : bool or integer
amount of verbosity
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a fortran contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
class ElasticNetCV(LinearModelCV):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Parameters
----------
rho : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For rho = 0
the penalty is an L1 penalty. For rho = 1 it is an L2 penalty.
For 0 < rho < 1, the penalty is a combination of L1 and L2
eps : float, optional
Length of the path. eps=1e-3 means that
alpha_min / alpha_max = 1e-3.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter: int, optional
The maximum number of iterations
tol: float, optional
The tolerance for the optimization: if the updates are
smaller than 'tol', the optimization code checks the
dual gap for optimality and continues until it is smaller
than tol.
cv : integer or crossvalidation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific crossvalidation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
verbose : bool or integer
amount of verbosity
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a fortran contiguous numpy array.
The parameter rho corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * rho * ||w||_1 + 0.5 * alpha * (1 - rho) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and rho = a / (a + b)
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, rho=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0):
self.rho = rho
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
| {
"repo_name": "cdegroc/scikit-learn",
"path": "sklearn/linear_model/coordinate_descent.py",
"copies": "1",
"size": "24053",
"license": "bsd-3-clause",
"hash": -8644280071132476000,
"line_mean": 32.0852819807,
"line_max": 79,
"alpha_frac": 0.5943541346,
"autogenerated": false,
"ratio": 4.123607063260757,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5217961197860758,
"avg_score": null,
"num_lines": null
} |
import itertools
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.exceptions import NotFittedError
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
from sklearn.covariance import fast_mcd
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def test_fast_mcd_on_invalid_input():
X = np.arange(100)
assert_raise_message(ValueError, 'Expected 2D array, got 1D array instead',
fast_mcd, X)
def test_mcd_class_on_invalid_input():
X = np.arange(100)
mcd = MinCovDet()
assert_raise_message(ValueError, 'Expected 2D array, got 1D array instead',
mcd.fit, X)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_mcd_issue3367():
# Check that MCD completes when the covariance matrix is singular
# i.e. one of the rows and columns are all zeros
rand_gen = np.random.RandomState(0)
# Think of these as the values for X and Y -> 10 values between -5 and 5
data_values = np.linspace(-5, 5, 10).tolist()
# Get the cartesian product of all possible coordinate pairs from above set
data = np.array(list(itertools.product(data_values, data_values)))
# Add a third column that's all zeros to make our data a set of point
# within a plane, which means that the covariance matrix will be singular
data = np.hstack((data, np.zeros((data.shape[0], 1))))
# The below line of code should raise an exception if the covariance matrix
# is singular. As a further test, since we have points in XYZ, the
# principle components (Eigenvectors) of these directly relate to the
# geometry of the points. Since it's a plane, we should be able to test
# that the Eigenvector that corresponds to the smallest Eigenvalue is the
# plane normal, specifically [0, 0, 1], since everything is in the XY plane
# (as I've set it up above). To do this one would start by:
#
# evals, evecs = np.linalg.eigh(mcd_fit.covariance_)
# normal = evecs[:, np.argmin(evals)]
#
# After which we need to assert that our `normal` is equal to [0, 0, 1].
# Do note that there is floating point error associated with this, so it's
# best to subtract the two and then compare some small tolerance (e.g.
# 1e-12).
MinCovDet(random_state=rand_gen).fit(data)
def test_mcd_support_covariance_is_zero():
# Check that MCD returns a ValueError with informative message when the
# covariance of the support data is equal to 0.
X_1 = np.array([0.5, 0.1, 0.1, 0.1, 0.957, 0.1, 0.1, 0.1, 0.4285, 0.1])
X_1 = X_1.reshape(-1, 1)
X_2 = np.array([0.5, 0.3, 0.3, 0.3, 0.957, 0.3, 0.3, 0.3, 0.4285, 0.3])
X_2 = X_2.reshape(-1, 1)
msg = ('The covariance matrix of the support data is equal to 0, try to '
'increase support_fraction')
for X in [X_1, X_2]:
assert_raise_message(ValueError, msg, MinCovDet().fit, X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
assert_raises(NotFittedError, clf.predict, X)
assert_raises(NotFittedError, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
| {
"repo_name": "herilalaina/scikit-learn",
"path": "sklearn/covariance/tests/test_robust_covariance.py",
"copies": "9",
"size": "6059",
"license": "bsd-3-clause",
"hash": 1376543601366235600,
"line_mean": 37.3481012658,
"line_max": 79,
"alpha_frac": 0.6638059086,
"autogenerated": false,
"ratio": 3.1167695473251027,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 158
} |
import numpy as np
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
from sklearn.covariance import fast_mcd
from sklearn.exceptions import NotFittedError
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_raises
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def test_fast_mcd_on_invalid_input():
X = np.arange(100)
assert_raise_message(ValueError, 'fast_mcd expects at least 2 samples',
fast_mcd, X)
def test_mcd_class_on_invalid_input():
X = np.arange(100)
mcd = MinCovDet()
assert_raise_message(ValueError, 'MinCovDet expects at least 2 samples',
mcd.fit, X)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert (error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert (error_cov < tol_cov)
assert (np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
assert_raises(NotFittedError, clf.predict, X)
assert_raises(NotFittedError, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert (sum(y_pred == -1) == sum(decision_transformed < 0))
| {
"repo_name": "DailyActie/Surrogate-Model",
"path": "01-codes/scikit-learn-master/sklearn/covariance/tests/test_robust_covariance.py",
"copies": "1",
"size": "3840",
"license": "mit",
"hash": 5247051714583605000,
"line_mean": 34.5555555556,
"line_max": 80,
"alpha_frac": 0.6614583333,
"autogenerated": false,
"ratio": 3.0236220472440944,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9183937262117026,
"avg_score": 0.00022862368541380886,
"num_lines": 108
} |
import numpy as np
import pytest
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X, _ = datasets.load_diabetes(return_X_y=True)
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
with pytest.raises(NotImplementedError):
cov.error_norm(emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
assert np.amin(mahal_dist) > 0
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# Create X with 1 sample and 5 features
X_1sample = np.arange(5).reshape(1, 5)
cov = EmpiricalCovariance()
warn_msg = (
"Only one sample available. You may want to reshape your data array"
)
with pytest.warns(UserWarning, match=warn_msg):
cov.fit(X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
# Tests ShrunkCovariance module on a simple dataset.
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
# Tests LedoitWolf module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shrinkage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shrinkage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shrinkage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shrinkage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shrinkage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shrinkage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shrinkage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shrinkage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
lw = LedoitWolf()
warn_msg = (
"Only one sample available. You may want to reshape your data array"
)
with pytest.warns(UserWarning, match=warn_msg):
lw.fit(X_1sample)
assert_array_almost_equal(lw.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def _naive_ledoit_wolf_shrinkage(X):
# A simple implementation of the formulas from Ledoit & Wolf
# The computation below achieves the following computations of the
# "O. Ledoit and M. Wolf, A Well-Conditioned Estimator for
# Large-Dimensional Covariance Matrices"
# beta and delta are given in the beginning of section 3.2
n_samples, n_features = X.shape
emp_cov = empirical_covariance(X, assume_centered=False)
mu = np.trace(emp_cov) / n_features
delta_ = emp_cov.copy()
delta_.flat[::n_features + 1] -= mu
delta = (delta_ ** 2).sum() / n_features
X2 = X ** 2
beta_ = 1. / (n_features * n_samples) \
* np.sum(np.dot(X2.T, X2) / n_samples - emp_cov ** 2)
beta = min(beta_, delta)
shrinkage = beta / delta
return shrinkage
def test_ledoit_wolf_small():
# Compare our blocked implementation to the naive implementation
X_small = X[:, :4]
lw = LedoitWolf()
lw.fit(X_small)
shrinkage_ = lw.shrinkage_
assert_almost_equal(shrinkage_, _naive_ledoit_wolf_shrinkage(X_small))
def test_ledoit_wolf_large():
# test that ledoit_wolf doesn't error on data that is wider than block_size
rng = np.random.RandomState(0)
# use a number of features that is larger than the block-size
X = rng.normal(size=(10, 20))
lw = LedoitWolf(block_size=10).fit(X)
# check that covariance is about diagonal (random normal noise)
assert_almost_equal(lw.covariance_, np.eye(20), 0)
cov = lw.covariance_
# check that the result is consistent with not splitting data into blocks.
lw = LedoitWolf(block_size=25).fit(X)
assert_almost_equal(lw.covariance_, cov)
def test_oas():
# Tests OAS module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shrinkage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shrinkage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0:1]
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shrinkage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shrinkage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
# Same tests without assuming centered data--------------------------------
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shrinkage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shrinkage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shrinkage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shrinkage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
oa = OAS()
warn_msg = (
"Only one sample available. You may want to reshape your data array"
)
with pytest.warns(UserWarning, match=warn_msg):
oa.fit(X_1sample)
assert_array_almost_equal(oa.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
def test_EmpiricalCovariance_validates_mahalanobis():
"""Checks that EmpiricalCovariance validates data with mahalanobis."""
cov = EmpiricalCovariance().fit(X)
msg = (f"X has 2 features, but \\w+ is expecting {X.shape[1]} "
"features as input")
with pytest.raises(ValueError, match=msg):
cov.mahalanobis(X[:, :2])
| {
"repo_name": "kevin-intel/scikit-learn",
"path": "sklearn/covariance/tests/test_covariance.py",
"copies": "3",
"size": "12809",
"license": "bsd-3-clause",
"hash": -5066063345063005000,
"line_mean": 37.8151515152,
"line_max": 79,
"alpha_frac": 0.6588336326,
"autogenerated": false,
"ratio": 3.199050949050949,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 330
} |
import numpy as np
import warnings
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
"""Tests Covariance module on a simple dataset.
"""
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
print(np.amin(mahal_dist), np.amax(mahal_dist))
assert(np.amin(mahal_dist) > 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
X_1sample = np.arange(5)
cov = EmpiricalCovariance()
with warnings.catch_warnings(record=True):
cov.fit(X_1sample)
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
"""Tests ShrunkCovariance module on a simple dataset.
"""
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
"""Tests LedoitWolf module on a simple dataset.
"""
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# (too) large data set
X_large = np.ones((20, 200))
assert_raises(MemoryError, ledoit_wolf, X_large, block_size=100)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
X_1sample = np.arange(5)
lw = LedoitWolf()
with warnings.catch_warnings(record=True):
lw.fit(X_1sample)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def test_oas():
"""Tests OAS module on a simple dataset.
"""
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
### Same tests without assuming centered data
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
X_1sample = np.arange(5)
oa = OAS()
with warnings.catch_warnings(record=True):
oa.fit(X_1sample)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
| {
"repo_name": "B3AU/waveTree",
"path": "sklearn/covariance/tests/test_covariance.py",
"copies": "5",
"size": "10155",
"license": "bsd-3-clause",
"hash": 6598631652302148000,
"line_mean": 37.4659090909,
"line_max": 79,
"alpha_frac": 0.6629246677,
"autogenerated": false,
"ratio": 3.118857493857494,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00007575757575757576,
"num_lines": 264
} |
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
assert_greater(np.amin(mahal_dist), 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# Create X with 1 sample and 5 features
X_1sample = np.arange(5).reshape(1, 5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
# Tests ShrunkCovariance module on a simple dataset.
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
# Tests LedoitWolf module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shrinkage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shrinkage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shrinkage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shrinkage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shrinkage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shrinkage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shrinkage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shrinkage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
assert_array_almost_equal(lw.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def _naive_ledoit_wolf_shrinkage(X):
# A simple implementation of the formulas from Ledoit & Wolf
# The computation below achieves the following computations of the
# "O. Ledoit and M. Wolf, A Well-Conditioned Estimator for
# Large-Dimensional Covariance Matrices"
# beta and delta are given in the beginning of section 3.2
n_samples, n_features = X.shape
emp_cov = empirical_covariance(X, assume_centered=False)
mu = np.trace(emp_cov) / n_features
delta_ = emp_cov.copy()
delta_.flat[::n_features + 1] -= mu
delta = (delta_ ** 2).sum() / n_features
X2 = X ** 2
beta_ = 1. / (n_features * n_samples) \
* np.sum(np.dot(X2.T, X2) / n_samples - emp_cov ** 2)
beta = min(beta_, delta)
shrinkage = beta / delta
return shrinkage
def test_ledoit_wolf_small():
# Compare our blocked implementation to the naive implementation
X_small = X[:, :4]
lw = LedoitWolf()
lw.fit(X_small)
shrinkage_ = lw.shrinkage_
assert_almost_equal(shrinkage_, _naive_ledoit_wolf_shrinkage(X_small))
def test_ledoit_wolf_large():
# test that ledoit_wolf doesn't error on data that is wider than block_size
rng = np.random.RandomState(0)
# use a number of features that is larger than the block-size
X = rng.normal(size=(10, 20))
lw = LedoitWolf(block_size=10).fit(X)
# check that covariance is about diagonal (random normal noise)
assert_almost_equal(lw.covariance_, np.eye(20), 0)
cov = lw.covariance_
# check that the result is consistent with not splitting data into blocks.
lw = LedoitWolf(block_size=25).fit(X)
assert_almost_equal(lw.covariance_, cov)
def test_oas():
# Tests OAS module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shrinkage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shrinkage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0:1]
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shrinkage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shrinkage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
# Same tests without assuming centered data--------------------------------
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shrinkage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shrinkage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shrinkage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shrinkage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
assert_array_almost_equal(oa.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
| {
"repo_name": "Vimos/scikit-learn",
"path": "sklearn/covariance/tests/test_covariance.py",
"copies": "59",
"size": "12193",
"license": "bsd-3-clause",
"hash": -8137810715484464000,
"line_mean": 38.8464052288,
"line_max": 79,
"alpha_frac": 0.6627573198,
"autogenerated": false,
"ratio": 3.186046511627907,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.validation import NotFittedError
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
"""Tests the FastMCD algorithm implementation
"""
### Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
### Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
### Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
### 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
assert_raises(NotFittedError, clf.predict, X)
assert_raises(NotFittedError, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
| {
"repo_name": "mehdidc/scikit-learn",
"path": "sklearn/covariance/tests/test_robust_covariance.py",
"copies": "7",
"size": "3377",
"license": "bsd-3-clause",
"hash": 3132634646285445000,
"line_mean": 33.4591836735,
"line_max": 78,
"alpha_frac": 0.6633106307,
"autogenerated": false,
"ratio": 2.967486818980668,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7130797449680668,
"avg_score": null,
"num_lines": null
} |
from numpy.testing import assert_almost_equal, assert_array_almost_equal
import numpy as np
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, LedoitWolf, ledoit_wolf, OAS, oas
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
"""Tests Covariance module on a simple dataset.
"""
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X), norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X), norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X), scaling=False), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X), squared=False), 0)
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
assert(np.amax(mahal_dist) < 250)
assert(np.amin(mahal_dist) > 50)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
def test_shrunk_covariance():
"""Tests ShrunkCovariance module on a simple dataset.
"""
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
"""Tests LedoitWolf module on a simple dataset.
"""
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X, assume_centered=True)
assert_almost_equal(lw.shrinkage_, 0.00192, 4)
assert_almost_equal(lw.score(X, assume_centered=True), -2.89795, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X, assume_centered=True)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d, assume_centered=True)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X, assume_centered=True)
assert_almost_equal(lw.score(X, assume_centered=True), -2.89795, 4)
assert(lw.precision_ is None)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, 0.007582, 4)
assert_almost_equal(lw.score(X), 2.243483, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), 2.2434839, 4)
assert(lw.precision_ is None)
def test_oas():
"""Tests OAS module on a simple dataset.
"""
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X, assume_centered=True)
assert_almost_equal(oa.shrinkage_, 0.018740, 4)
assert_almost_equal(oa.score(X, assume_centered=True), -5.03605, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X, assume_centered=True)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d, assume_centered=True)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X, assume_centered=True)
assert_almost_equal(oa.score(X, assume_centered=True), -5.03605, 4)
assert(oa.precision_ is None)
### Same tests without assuming centered data
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, 0.020236, 4)
assert_almost_equal(oa.score(X), 2.079025, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), 2.079025, 4)
assert(oa.precision_ is None)
| {
"repo_name": "sgenoud/scikit-learn",
"path": "sklearn/covariance/tests/test_covariance.py",
"copies": "3",
"size": "8673",
"license": "bsd-3-clause",
"hash": 7348934390458435000,
"line_mean": 38.7844036697,
"line_max": 79,
"alpha_frac": 0.6756600945,
"autogenerated": false,
"ratio": 3.0093684941013183,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0001505528111032698,
"num_lines": 218
} |
from numpy.testing import assert_almost_equal, assert_array_almost_equal
import numpy as np
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
"""Tests the FastMCD algorithm implementation
"""
### Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
### Medium data set
launch_mcd_on_dataset(1000, 5, 450, 1e-3, 1e-3, 540)
### Large data set
launch_mcd_on_dataset(1700, 5, 800, 1e-3, 1e-3, 870)
### 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def launch_mcd_on_dataset(
n_samples, n_features, n_outliers, tol_loc, tol_cov, tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
clf.fit(X)
y_pred = clf.predict(X)
assert_array_almost_equal(
clf.decision_function(X, raw_mahalanobis=True),
clf.mahalanobis(X - clf.location_))
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
| {
"repo_name": "sgenoud/scikit-learn",
"path": "sklearn/covariance/tests/test_robust_covariance.py",
"copies": "1",
"size": "2616",
"license": "bsd-3-clause",
"hash": -7775778065667585000,
"line_mean": 31.7,
"line_max": 72,
"alpha_frac": 0.6548165138,
"autogenerated": false,
"ratio": 2.9034406215316317,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40582571353316316,
"avg_score": null,
"num_lines": null
} |
import numpy as np
import warnings
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
"""Tests Covariance module on a simple dataset.
"""
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
print(np.amin(mahal_dist), np.amax(mahal_dist))
assert(np.amin(mahal_dist) > 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
X_1sample = np.arange(5)
cov = EmpiricalCovariance()
with warnings.catch_warnings(record=True):
cov.fit(X_1sample)
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
"""Tests ShrunkCovariance module on a simple dataset.
"""
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
"""Tests LedoitWolf module on a simple dataset.
"""
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# (too) large data set
X_large = np.ones((20, 200))
assert_raises(MemoryError, ledoit_wolf, X_large, block_size=100)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
X_1sample = np.arange(5)
lw = LedoitWolf()
with warnings.catch_warnings(record=True):
lw.fit(X_1sample)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def test_oas():
"""Tests OAS module on a simple dataset.
"""
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
### Same tests without assuming centered data
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
X_1sample = np.arange(5)
oa = OAS()
with warnings.catch_warnings(record=True):
oa.fit(X_1sample)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
| {
"repo_name": "kmike/scikit-learn",
"path": "sklearn/covariance/tests/test_covariance.py",
"copies": "4",
"size": "10153",
"license": "bsd-3-clause",
"hash": 5667032455597067000,
"line_mean": 37.4583333333,
"line_max": 79,
"alpha_frac": 0.6628582685,
"autogenerated": false,
"ratio": 3.1182432432432434,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00007575757575757576,
"num_lines": 264
} |
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
"""Tests the FastMCD algorithm implementation
"""
### Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
### Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
### Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
### 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
print(clf.threshold)
assert_raises(Exception, clf.predict, X)
assert_raises(Exception, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
| {
"repo_name": "florian-f/sklearn",
"path": "sklearn/covariance/tests/test_robust_covariance.py",
"copies": "4",
"size": "3338",
"license": "bsd-3-clause",
"hash": -3109847551201528300,
"line_mean": 33.0612244898,
"line_max": 78,
"alpha_frac": 0.6587777112,
"autogenerated": false,
"ratio": 2.961845607808341,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5620623319008341,
"avg_score": null,
"num_lines": null
} |
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
cblas_libs, blas_info = get_blas_info()
libraries = []
if os.name == 'posix':
cblas_libs.append('m')
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_dbscan_inner',
sources=['_dbscan_inner.cpp'],
include_dirs=[numpy.get_include()],
language="c++")
config.add_extension('_hierarchical',
sources=['_hierarchical.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension(
'_k_means',
libraries=cblas_libs,
sources=['_k_means.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args', []),
**blas_info
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| {
"repo_name": "DailyActie/Surrogate-Model",
"path": "01-codes/scikit-learn-master/sklearn/cluster/setup.py",
"copies": "1",
"size": "1450",
"license": "mit",
"hash": 4701870351377395000,
"line_mean": 28.5918367347,
"line_max": 67,
"alpha_frac": 0.5496551724,
"autogenerated": false,
"ratio": 4.119318181818182,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5168973354218181,
"avg_score": null,
"num_lines": null
} |
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
cblas_libs, blas_info = get_blas_info()
libraries = []
if os.name == 'posix':
cblas_libs.append('m')
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_dbscan_inner',
sources=['_dbscan_inner.cpp'],
include_dirs=[numpy.get_include()],
language="c++")
config.add_extension('_hierarchical',
sources=['_hierarchical.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('_k_means_elkan',
sources=['_k_means_elkan.c'],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension(
'_k_means',
libraries=cblas_libs,
sources=['_k_means.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args', []),
**blas_info
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| {
"repo_name": "mjudsp/Tsallis",
"path": "sklearn/cluster/setup.py",
"copies": "5",
"size": "1654",
"license": "bsd-3-clause",
"hash": -727766178050549200,
"line_mean": 30.8076923077,
"line_max": 67,
"alpha_frac": 0.5374848851,
"autogenerated": false,
"ratio": 4.1767676767676765,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007122507122507122,
"num_lines": 52
} |
import os
import numpy
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_dbscan_inner',
sources=['_dbscan_inner.pyx'],
include_dirs=[numpy.get_include()],
language="c++")
config.add_extension('_hierarchical_fast',
sources=['_hierarchical_fast.pyx'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('_k_means_fast',
sources=['_k_means_fast.pyx'],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('_k_means_lloyd',
sources=['_k_means_lloyd.pyx'],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('_k_means_elkan',
sources=['_k_means_elkan.pyx'],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| {
"repo_name": "bnaul/scikit-learn",
"path": "sklearn/cluster/setup.py",
"copies": "12",
"size": "1616",
"license": "bsd-3-clause",
"hash": -4976089820001635000,
"line_mean": 31.32,
"line_max": 63,
"alpha_frac": 0.5198019802,
"autogenerated": false,
"ratio": 4.564971751412429,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
import numpy as np
from ..utils import logger, verbose
@verbose
def is_equal(first, second, verbose=None):
"""Check if 2 python structures are the same.
Designed to handle dict, list, np.ndarray etc.
"""
all_equal = True
# Check all keys in first dict
if type(first) != type(second):
all_equal = False
if isinstance(first, dict):
for key in first.keys():
if (key not in second):
logger.info("Missing key %s in %s" % (key, second))
all_equal = False
else:
if not is_equal(first[key], second[key]):
all_equal = False
elif isinstance(first, np.ndarray):
if not np.allclose(first, second):
all_equal = False
elif isinstance(first, list):
for a, b in zip(first, second):
if not is_equal(a, b):
logger.info('%s and\n%s are different' % (a, b))
all_equal = False
else:
if first != second:
logger.info('%s and\n%s are different' % (first, second))
all_equal = False
return all_equal
| {
"repo_name": "wmvanvliet/mne-python",
"path": "mne/io/diff.py",
"copies": "14",
"size": "1219",
"license": "bsd-3-clause",
"hash": -7213742225305804000,
"line_mean": 29.475,
"line_max": 69,
"alpha_frac": 0.5578342904,
"autogenerated": false,
"ratio": 3.8454258675078865,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""Implementation of coordinate descent for the Elastic Net with sparse data.
"""
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import LinearModel
from . import cd_fast_sparse
class ElasticNet(LinearModel):
"""Linear Model trained with L1 and L2 prior as regularizer
This implementation works on scipy.sparse X and dense coef_.
rho=1 is the lasso penalty. Currently, rho <= 0.01 is not
reliable, unless you supply your own sequence of alpha.
Parameters
----------
alpha : float
Constant that multiplies the L1 term. Defaults to 1.0
rho : float
The ElasticNet mixing parameter, with 0 < rho <= 1.
coef_ : ndarray of shape n_features
The initial coeffients to warm-start the optimization
fit_intercept: bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
TODO: fit_intercept=True is not yet implemented
Notes
-----
The parameter rho corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
"""
def __init__(self, alpha=1.0, rho=0.5, fit_intercept=False,
normalize=False, max_iter=1000, tol=1e-4):
if fit_intercept:
raise NotImplementedError("fit_intercept=True is not implemented")
self.alpha = alpha
self.rho = rho
self.fit_intercept = fit_intercept
self.normalize = normalize
self.intercept_ = 0.0
self.max_iter = max_iter
self.tol = tol
self._set_coef(None)
def _set_coef(self, coef_):
self.coef_ = coef_
if coef_ is None:
self.sparse_coef_ = None
else:
# sparse representation of the fitted coef for the predict method
self.sparse_coef_ = sp.csr_matrix(coef_)
def fit(self, X, y):
"""Fit current model with coordinate descent
X is expected to be a sparse matrix. For maximum efficiency, use a
sparse matrix in CSC format (scipy.sparse.csc_matrix)
"""
X = sp.csc_matrix(X)
y = np.asanyarray(y, dtype=np.float64)
# NOTE: we are explicitly not centering the data the naive way to
# avoid breaking the sparsity of X
n_samples, n_features = X.shape[0], X.shape[1]
if self.coef_ is None:
self.coef_ = np.zeros(n_features, dtype=np.float64)
alpha = self.alpha * self.rho * n_samples
beta = self.alpha * (1.0 - self.rho) * n_samples
X_data = np.array(X.data, np.float64)
# TODO: add support for non centered data
coef_, self.dual_gap_, self.eps_ = \
cd_fast_sparse.enet_coordinate_descent(
self.coef_, alpha, beta, X_data, X.indices, X.indptr, y,
self.max_iter, self.tol)
# update self.coef_ and self.sparse_coef_ consistently
self._set_coef(coef_)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
'to increase the number of iterations')
# XXX TODO: implement intercept_ fitting
# return self for chaining fit and predict calls
return self
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : scipy.sparse matrix of shape [n_samples, n_features]
Returns
-------
array, shape = [n_samples] with the predicted real values
"""
# np.dot only works correctly if both arguments are sparse matrices
if not sp.issparse(X):
X = sp.csr_matrix(X)
return np.ravel(np.dot(self.sparse_coef_, X.T).todense()
+ self.intercept_)
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer
This implementation works on scipy.sparse X and dense coef_. Technically
this is the same as Elastic Net with the L2 penalty set to zero.
Parameters
----------
alpha : float
Constant that multiplies the L1 term. Defaults to 1.0
coef_ : ndarray of shape n_features
The initial coeffients to warm-start the optimization
fit_intercept: bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
"""
def __init__(self, alpha=1.0, fit_intercept=False, normalize=False,
max_iter=1000, tol=1e-4):
super(Lasso, self).__init__(
alpha=alpha, rho=1.0, fit_intercept=fit_intercept,
normalize=normalize, max_iter=max_iter, tol=tol)
| {
"repo_name": "ominux/scikit-learn",
"path": "sklearn/linear_model/sparse/coordinate_descent.py",
"copies": "2",
"size": "4827",
"license": "bsd-3-clause",
"hash": 4640861534838178000,
"line_mean": 33.2340425532,
"line_max": 78,
"alpha_frac": 0.6154961674,
"autogenerated": false,
"ratio": 4.052896725440806,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5668392892840806,
"avg_score": null,
"num_lines": null
} |
"""Implementation of coordinate descent for the Elastic Net with sparse data.
"""
import warnings
import numpy as np
import scipy.sparse as sp
from ...utils.extmath import safe_sparse_dot
from ..base import LinearModel
from . import cd_fast_sparse
class ElasticNet(LinearModel):
"""Linear Model trained with L1 and L2 prior as regularizer
This implementation works on scipy.sparse X and dense `coef_`.
rho=1 is the lasso penalty. Currently, rho <= 0.01 is not
reliable, unless you supply your own sequence of alpha.
Parameters
----------
alpha : float
Constant that multiplies the L1 term. Defaults to 1.0
rho : float
The ElasticNet mixing parameter, with 0 < rho <= 1.
fit_intercept: bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
TODO: fit_intercept=True is not yet implemented
Notes
-----
The parameter rho corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
"""
def __init__(self, alpha=1.0, rho=0.5, fit_intercept=False,
normalize=False, max_iter=1000, tol=1e-4):
if fit_intercept:
raise NotImplementedError("fit_intercept=True is not implemented")
self.alpha = alpha
self.rho = rho
self.fit_intercept = fit_intercept
self.normalize = normalize
self.intercept_ = 0.0
self.max_iter = max_iter
self.tol = tol
self._set_coef(None)
def _set_coef(self, coef_):
self.coef_ = coef_
if coef_ is None:
self.sparse_coef_ = None
else:
# sparse representation of the fitted coef for the predict method
self.sparse_coef_ = sp.csr_matrix(coef_)
def fit(self, X, y):
"""Fit current model with coordinate descent
X is expected to be a sparse matrix. For maximum efficiency, use a
sparse matrix in CSC format (scipy.sparse.csc_matrix)
"""
X = sp.csc_matrix(X)
y = np.asarray(y, dtype=np.float64)
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have incompatible shapes.\n" +
"Note: Sparse matrices cannot be indexed w/" +
"boolean masks (use `indices=True` in CV).")
# NOTE: we are explicitly not centering the data the naive way to
# avoid breaking the sparsity of X
n_samples, n_features = X.shape[0], X.shape[1]
if self.coef_ is None:
self.coef_ = np.zeros(n_features, dtype=np.float64)
alpha = self.alpha * self.rho * n_samples
beta = self.alpha * (1.0 - self.rho) * n_samples
X_data = np.array(X.data, np.float64)
# TODO: add support for non centered data
coef_, self.dual_gap_, self.eps_ = \
cd_fast_sparse.enet_coordinate_descent(
self.coef_, alpha, beta, X_data, X.indices, X.indptr, y,
self.max_iter, self.tol)
# update self.coef_ and self.sparse_coef_ consistently
self._set_coef(coef_)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
'to increase the number of iterations')
# XXX TODO: implement intercept_ fitting
# return self for chaining fit and predict calls
return self
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : scipy.sparse matrix of shape [n_samples, n_features]
Returns
-------
array, shape = [n_samples] with the predicted real values
"""
return np.ravel(safe_sparse_dot(self.sparse_coef_, X.T,
dense_output=True) + self.intercept_)
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer
This implementation works on scipy.sparse X and dense `coef_`. Technically
this is the same as Elastic Net with the L2 penalty set to zero.
Parameters
----------
alpha : float
Constant that multiplies the L1 term. Defaults to 1.0
`coef_` : ndarray of shape n_features
The initial coeffients to warm-start the optimization
fit_intercept: bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
"""
def __init__(self, alpha=1.0, fit_intercept=False, normalize=False,
max_iter=1000, tol=1e-4):
super(Lasso, self).__init__(
alpha=alpha, rho=1.0, fit_intercept=fit_intercept,
normalize=normalize, max_iter=max_iter, tol=tol)
| {
"repo_name": "cdegroc/scikit-learn",
"path": "sklearn/linear_model/sparse/coordinate_descent.py",
"copies": "1",
"size": "4941",
"license": "bsd-3-clause",
"hash": -6102850043187879000,
"line_mean": 33.7957746479,
"line_max": 78,
"alpha_frac": 0.6071645416,
"autogenerated": false,
"ratio": 4.059983566146261,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00022356360384529397,
"num_lines": 142
} |
from copy import deepcopy
import numpy as np
from scipy import linalg, signal
from ..source_estimate import SourceEstimate
from ..minimum_norm.inverse import combine_xyz, _prepare_forward
from ..minimum_norm.inverse import _check_reference
from ..forward import compute_orient_prior, is_fixed_orient, _to_fixed_ori
from ..io.pick import pick_channels_evoked
from ..io.proj import deactivate_proj
from ..utils import logger, verbose
from ..dipole import Dipole
from ..externals.six.moves import xrange as range
from .mxne_optim import (mixed_norm_solver, iterative_mixed_norm_solver,
norm_l2inf, tf_mixed_norm_solver)
@verbose
def _prepare_weights(forward, gain, source_weighting, weights, weights_min):
mask = None
if isinstance(weights, SourceEstimate):
# weights = np.sqrt(np.sum(weights.data ** 2, axis=1))
weights = np.max(np.abs(weights.data), axis=1)
weights_max = np.max(weights)
if weights_min > weights_max:
raise ValueError('weights_min > weights_max (%s > %s)' %
(weights_min, weights_max))
weights_min = weights_min / weights_max
weights = weights / weights_max
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
weights = np.ravel(np.tile(weights, [n_dip_per_pos, 1]).T)
if len(weights) != gain.shape[1]:
raise ValueError('weights do not have the correct dimension '
' (%d != %d)' % (len(weights), gain.shape[1]))
if len(source_weighting.shape) == 1:
source_weighting *= weights
else:
source_weighting *= weights[:, None]
gain *= weights[None, :]
if weights_min is not None:
mask = (weights > weights_min)
gain = gain[:, mask]
n_sources = np.sum(mask) // n_dip_per_pos
logger.info("Reducing source space to %d sources" % n_sources)
return gain, source_weighting, mask
@verbose
def _prepare_gain_column(forward, info, noise_cov, pca, depth, loose, weights,
weights_min, verbose=None):
gain_info, gain, _, whitener, _ = _prepare_forward(forward, info,
noise_cov, pca)
logger.info('Whitening lead field matrix.')
gain = np.dot(whitener, gain)
if depth is not None:
depth_prior = np.sum(gain ** 2, axis=0) ** depth
source_weighting = np.sqrt(depth_prior ** -1.)
else:
source_weighting = np.ones(gain.shape[1], dtype=gain.dtype)
if loose is not None and loose != 1.0:
source_weighting *= np.sqrt(compute_orient_prior(forward, loose))
gain *= source_weighting[None, :]
if weights is None:
mask = None
else:
gain, source_weighting, mask = _prepare_weights(forward, gain,
source_weighting,
weights, weights_min)
return gain, gain_info, whitener, source_weighting, mask
def _prepare_gain(forward, info, noise_cov, pca, depth, loose, weights,
weights_min, verbose=None):
if not isinstance(depth, float):
raise ValueError('Invalid depth parameter. '
'A float is required (got %s).'
% type(depth))
elif depth < 0.0:
raise ValueError('Depth parameter must be positive (got %s).'
% depth)
gain, gain_info, whitener, source_weighting, mask = \
_prepare_gain_column(forward, info, noise_cov, pca, depth,
loose, weights, weights_min)
return gain, gain_info, whitener, source_weighting, mask
def _reapply_source_weighting(X, source_weighting, active_set,
n_dip_per_pos):
X *= source_weighting[active_set][:, None]
return X
def _compute_residual(forward, evoked, X, active_set, info):
# OK, picking based on row_names is safe
sel = [forward['sol']['row_names'].index(c) for c in info['ch_names']]
residual = evoked.copy()
residual = pick_channels_evoked(residual, include=info['ch_names'])
r_tmp = residual.copy()
r_tmp.data = np.dot(forward['sol']['data'][sel, :][:, active_set], X)
# Take care of proj
active_projs = list()
non_active_projs = list()
for p in evoked.info['projs']:
if p['active']:
active_projs.append(p)
else:
non_active_projs.append(p)
if len(active_projs) > 0:
r_tmp.info['projs'] = deactivate_proj(active_projs, copy=True)
r_tmp.apply_proj()
r_tmp.add_proj(non_active_projs, remove_existing=False)
residual.data -= r_tmp.data
return residual
@verbose
def _make_sparse_stc(X, active_set, forward, tmin, tstep,
active_is_idx=False, verbose=None):
if not is_fixed_orient(forward):
logger.info('combining the current components...')
X = combine_xyz(X)
if not active_is_idx:
active_idx = np.where(active_set)[0]
else:
active_idx = active_set
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
if n_dip_per_pos > 1:
active_idx = np.unique(active_idx // n_dip_per_pos)
src = forward['src']
n_lh_points = len(src[0]['vertno'])
lh_vertno = src[0]['vertno'][active_idx[active_idx < n_lh_points]]
rh_vertno = src[1]['vertno'][active_idx[active_idx >= n_lh_points] -
n_lh_points]
vertices = [lh_vertno, rh_vertno]
stc = SourceEstimate(X, vertices=vertices, tmin=tmin, tstep=tstep)
return stc
@verbose
def _make_dipoles_sparse(X, active_set, forward, tmin, tstep, M, M_est,
active_is_idx=False, verbose=None):
times = tmin + tstep * np.arange(X.shape[1])
if not active_is_idx:
active_idx = np.where(active_set)[0]
else:
active_idx = active_set
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
if n_dip_per_pos > 1:
active_idx = np.unique(active_idx // n_dip_per_pos)
gof = np.zeros(M_est.shape[1])
M_norm2 = np.sum(M ** 2, axis=0)
R_norm2 = np.sum((M - M_est) ** 2, axis=0)
gof[M_norm2 > 0.0] = 1. - R_norm2[M_norm2 > 0.0] / M_norm2[M_norm2 > 0.0]
gof *= 100.
dipoles = []
for k, i_dip in enumerate(active_idx):
i_pos = forward['source_rr'][i_dip][np.newaxis, :]
i_pos = i_pos.repeat(len(times), axis=0)
X_ = X[k * n_dip_per_pos: (k + 1) * n_dip_per_pos]
if n_dip_per_pos == 1:
amplitude = X_[0]
i_ori = forward['source_nn'][i_dip][np.newaxis, :]
i_ori = i_ori.repeat(len(times), axis=0)
else:
if forward['surf_ori']:
X_ = np.dot(forward['source_nn'][i_dip *
n_dip_per_pos:(i_dip + 1) * n_dip_per_pos].T, X_)
amplitude = np.sqrt(np.sum(X_ ** 2, axis=0))
i_ori = np.zeros((len(times), 3))
i_ori[amplitude > 0.] = (X_[:, amplitude > 0.] /
amplitude[amplitude > 0.]).T
dipoles.append(Dipole(times, i_pos, amplitude, i_ori, gof))
return dipoles
@verbose
def make_stc_from_dipoles(dipoles, src, verbose=None):
"""Convert a list of spatio-temporal dipoles into a SourceEstimate.
Parameters
----------
dipoles : Dipole | list of instances of Dipole
The dipoles to convert.
src : instance of SourceSpaces
The source space used to generate the forward operator.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
stc : SourceEstimate
The source estimate.
"""
logger.info('Converting dipoles into a SourceEstimate.')
if isinstance(dipoles, Dipole):
dipoles = [dipoles]
if not isinstance(dipoles, list):
raise ValueError('Dipoles must be an instance of Dipole or '
'a list of instances of Dipole. '
'Got %s!' % type(dipoles))
tmin = dipoles[0].times[0]
tstep = dipoles[0].times[1] - tmin
X = np.zeros((len(dipoles), len(dipoles[0].times)))
source_rr = np.concatenate([_src['rr'][_src['vertno'], :] for _src in src],
axis=0)
n_lh_points = len(src[0]['vertno'])
lh_vertno = list()
rh_vertno = list()
for i in range(len(dipoles)):
if not np.all(dipoles[i].pos == dipoles[i].pos[0]):
raise ValueError('Only dipoles with fixed position over time '
'are supported!')
X[i] = dipoles[i].amplitude
idx = np.all(source_rr == dipoles[i].pos[0], axis=1)
idx = np.where(idx)[0][0]
if idx < n_lh_points:
lh_vertno.append(src[0]['vertno'][idx])
else:
rh_vertno.append(src[1]['vertno'][idx - n_lh_points])
vertices = [np.array(lh_vertno).astype(int),
np.array(rh_vertno).astype(int)]
stc = SourceEstimate(X, vertices=vertices, tmin=tmin, tstep=tstep)
logger.info('[done]')
return stc
@verbose
def mixed_norm(evoked, forward, noise_cov, alpha, loose=0.2, depth=0.8,
maxit=3000, tol=1e-4, active_set_size=10, pca=True,
debias=True, time_pca=True, weights=None, weights_min=None,
solver='auto', n_mxne_iter=1, return_residual=False,
return_as_dipoles=False, verbose=None):
"""Mixed-norm estimate (MxNE) and iterative reweighted MxNE (irMxNE).
Compute L1/L2 mixed-norm solution [1]_ or L0.5/L2 [2]_ mixed-norm
solution on evoked data.
Parameters
----------
evoked : instance of Evoked or list of instances of Evoked
Evoked data to invert.
forward : dict
Forward operator.
noise_cov : instance of Covariance
Noise covariance to compute whitener.
alpha : float
Regularization parameter.
loose : float in [0, 1]
Value that weights the source variances of the dipole components
that are parallel (tangential) to the cortical surface. If loose
is 0 or None then the solution is computed with fixed orientation.
If loose is 1, it corresponds to free orientations.
depth: None | float in [0, 1]
Depth weighting coefficients. If None, no depth weighting is performed.
maxit : int
Maximum number of iterations.
tol : float
Tolerance parameter.
active_set_size : int | None
Size of active set increment. If None, no active set strategy is used.
pca : bool
If True the rank of the data is reduced to true dimension.
debias : bool
Remove coefficient amplitude bias due to L1 penalty.
time_pca : bool or int
If True the rank of the concatenated epochs is reduced to
its true dimension. If is 'int' the rank is limited to this value.
weights : None | array | SourceEstimate
Weight for penalty in mixed_norm. Can be None, a
1d array with shape (n_sources,), or a SourceEstimate (e.g. obtained
with wMNE, dSPM, or fMRI).
weights_min : float
Do not consider in the estimation sources for which weights
is less than weights_min.
solver : 'prox' | 'cd' | 'bcd' | 'auto'
The algorithm to use for the optimization. 'prox' stands for
proximal interations using the FISTA algorithm, 'cd' uses
coordinate descent, and 'bcd' applies block coordinate descent.
'cd' is only available for fixed orientation.
n_mxne_iter : int
The number of MxNE iterations. If > 1, iterative reweighting
is applied.
return_residual : bool
If True, the residual is returned as an Evoked instance.
return_as_dipoles : bool
If True, the sources are returned as a list of Dipole instances.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
stc : SourceEstimate | list of SourceEstimate
Source time courses for each evoked data passed as input.
residual : instance of Evoked
The residual a.k.a. data not explained by the sources.
Only returned if return_residual is True.
See Also
--------
tf_mixed_norm
References
----------
.. [1] A. Gramfort, M. Kowalski, M. Hamalainen,
"Mixed-norm estimates for the M/EEG inverse problem using accelerated
gradient methods", Physics in Medicine and Biology, 2012.
http://dx.doi.org/10.1088/0031-9155/57/7/1937
.. [2] D. Strohmeier, Y. Bekhti, J. Haueisen, A. Gramfort,
"The Iterative Reweighted Mixed-Norm Estimate for Spatio-Temporal
MEG/EEG Source Reconstruction", IEEE Transactions of Medical Imaging,
Volume 35 (10), pp. 2218-2228, 2016.
"""
if n_mxne_iter < 1:
raise ValueError('MxNE has to be computed at least 1 time. '
'Requires n_mxne_iter >= 1, got %d' % n_mxne_iter)
if not isinstance(evoked, list):
evoked = [evoked]
_check_reference(evoked[0])
all_ch_names = evoked[0].ch_names
if not all(all_ch_names == evoked[i].ch_names
for i in range(1, len(evoked))):
raise Exception('All the datasets must have the same good channels.')
# put the forward solution in fixed orientation if it's not already
if loose is None and not is_fixed_orient(forward):
forward = deepcopy(forward)
_to_fixed_ori(forward)
gain, gain_info, whitener, source_weighting, mask = _prepare_gain(
forward, evoked[0].info, noise_cov, pca, depth, loose, weights,
weights_min)
sel = [all_ch_names.index(name) for name in gain_info['ch_names']]
M = np.concatenate([e.data[sel] for e in evoked], axis=1)
# Whiten data
logger.info('Whitening data matrix.')
M = np.dot(whitener, M)
if time_pca:
U, s, Vh = linalg.svd(M, full_matrices=False)
if not isinstance(time_pca, bool) and isinstance(time_pca, int):
U = U[:, :time_pca]
s = s[:time_pca]
Vh = Vh[:time_pca]
M = U * s
# Scaling to make setting of alpha easy
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
alpha_max = norm_l2inf(np.dot(gain.T, M), n_dip_per_pos, copy=False)
alpha_max *= 0.01
gain /= alpha_max
source_weighting /= alpha_max
if n_mxne_iter == 1:
X, active_set, E = mixed_norm_solver(
M, gain, alpha, maxit=maxit, tol=tol,
active_set_size=active_set_size, n_orient=n_dip_per_pos,
debias=debias, solver=solver, verbose=verbose)
else:
X, active_set, E = iterative_mixed_norm_solver(
M, gain, alpha, n_mxne_iter, maxit=maxit, tol=tol,
n_orient=n_dip_per_pos, active_set_size=active_set_size,
debias=debias, solver=solver, verbose=verbose)
if time_pca:
X = np.dot(X, Vh)
M = np.dot(M, Vh)
# Compute estimated whitened sensor data
M_estimated = np.dot(gain[:, active_set], X)
if mask is not None:
active_set_tmp = np.zeros(len(mask), dtype=np.bool)
active_set_tmp[mask] = active_set
active_set = active_set_tmp
del active_set_tmp
if active_set.sum() == 0:
raise Exception("No active dipoles found. alpha is too big.")
# Reapply weights to have correct unit
X = _reapply_source_weighting(X, source_weighting,
active_set, n_dip_per_pos)
outs = list()
residual = list()
cnt = 0
for e in evoked:
tmin = e.times[0]
tstep = 1.0 / e.info['sfreq']
Xe = X[:, cnt:(cnt + len(e.times))]
if return_as_dipoles:
out = _make_dipoles_sparse(
Xe, active_set, forward, tmin, tstep,
M[:, cnt:(cnt + len(e.times))],
M_estimated[:, cnt:(cnt + len(e.times))], verbose=None)
else:
out = _make_sparse_stc(Xe, active_set, forward, tmin, tstep)
outs.append(out)
cnt += len(e.times)
if return_residual:
residual.append(_compute_residual(forward, e, Xe, active_set,
gain_info))
logger.info('[done]')
if len(outs) == 1:
out = outs[0]
if return_residual:
residual = residual[0]
else:
out = outs
if return_residual:
out = out, residual
return out
def _window_evoked(evoked, size):
"""Window evoked (size in seconds)."""
if isinstance(size, (float, int)):
lsize = rsize = float(size)
else:
lsize, rsize = size
evoked = evoked.copy()
sfreq = float(evoked.info['sfreq'])
lsize = int(lsize * sfreq)
rsize = int(rsize * sfreq)
lhann = signal.hann(lsize * 2)
rhann = signal.hann(rsize * 2)
window = np.r_[lhann[:lsize],
np.ones(len(evoked.times) - lsize - rsize),
rhann[-rsize:]]
evoked.data *= window[None, :]
return evoked
@verbose
def tf_mixed_norm(evoked, forward, noise_cov, alpha_space, alpha_time,
loose=0.2, depth=0.8, maxit=3000, tol=1e-4,
weights=None, weights_min=None, pca=True, debias=True,
wsize=64, tstep=4, window=0.02, return_residual=False,
return_as_dipoles=False, verbose=None):
"""Time-Frequency Mixed-norm estimate (TF-MxNE).
Compute L1/L2 + L1 mixed-norm solution on time-frequency
dictionary. Works with evoked data [1]_ [2]_.
Parameters
----------
evoked : instance of Evoked
Evoked data to invert.
forward : dict
Forward operator.
noise_cov : instance of Covariance
Noise covariance to compute whitener.
alpha_space : float in [0, 100]
Regularization parameter for spatial sparsity. If larger than 100,
then no source will be active.
alpha_time : float in [0, 100]
Regularization parameter for temporal sparsity. It set to 0,
no temporal regularization is applied. It this case, TF-MxNE is
equivalent to MxNE with L21 norm.
loose : float in [0, 1]
Value that weights the source variances of the dipole components
that are parallel (tangential) to the cortical surface. If loose
is 0 or None then the solution is computed with fixed orientation.
If loose is 1, it corresponds to free orientations.
depth: None | float in [0, 1]
Depth weighting coefficients. If None, no depth weighting is performed.
maxit : int
Maximum number of iterations.
tol : float
Tolerance parameter.
weights: None | array | SourceEstimate
Weight for penalty in mixed_norm. Can be None or
1d array of length n_sources or a SourceEstimate e.g. obtained
with wMNE or dSPM or fMRI.
weights_min: float
Do not consider in the estimation sources for which weights
is less than weights_min.
pca: bool
If True the rank of the data is reduced to true dimension.
debias: bool
Remove coefficient amplitude bias due to L1 penalty.
wsize: int
Length of the STFT window in samples (must be a multiple of 4).
tstep: int
Step between successive windows in samples (must be a multiple of 2,
a divider of wsize and smaller than wsize/2) (default: wsize/2).
window : float or (float, float)
Length of time window used to take care of edge artifacts in seconds.
It can be one float or float if the values are different for left
and right window length.
return_residual : bool
If True, the residual is returned as an Evoked instance.
return_as_dipoles : bool
If True, the sources are returned as a list of Dipole instances.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
stc : instance of SourceEstimate
Source time courses.
residual : instance of Evoked
The residual a.k.a. data not explained by the sources.
Only returned if return_residual is True.
See Also
--------
mixed_norm
References
----------
.. [1] A. Gramfort, D. Strohmeier, J. Haueisen, M. Hamalainen, M. Kowalski
"Time-Frequency Mixed-Norm Estimates: Sparse M/EEG imaging with
non-stationary source activations",
Neuroimage, Volume 70, pp. 410-422, 15 April 2013.
DOI: 10.1016/j.neuroimage.2012.12.051
.. [2] A. Gramfort, D. Strohmeier, J. Haueisen, M. Hamalainen, M. Kowalski
"Functional Brain Imaging with M/EEG Using Structured Sparsity in
Time-Frequency Dictionaries",
Proceedings Information Processing in Medical Imaging
Lecture Notes in Computer Science, Volume 6801/2011, pp. 600-611, 2011.
DOI: 10.1007/978-3-642-22092-0_49
"""
_check_reference(evoked)
all_ch_names = evoked.ch_names
info = evoked.info
if (alpha_space < 0.) or (alpha_space > 100.):
raise Exception('alpha_space must be in range [0, 100].'
' Got alpha_space = %f' % alpha_space)
if (alpha_time < 0.) or (alpha_time > 100.):
raise Exception('alpha_time must be in range [0, 100].'
' Got alpha_time = %f' % alpha_time)
# put the forward solution in fixed orientation if it's not already
if loose is None and not is_fixed_orient(forward):
forward = deepcopy(forward)
_to_fixed_ori(forward)
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
gain, gain_info, whitener, source_weighting, mask = _prepare_gain(
forward, evoked.info, noise_cov, pca, depth, loose, weights,
weights_min)
if window is not None:
evoked = _window_evoked(evoked, window)
sel = [all_ch_names.index(name) for name in gain_info["ch_names"]]
M = evoked.data[sel]
# Whiten data
logger.info('Whitening data matrix.')
M = np.dot(whitener, M)
# Scaling to make setting of alpha easy
alpha_max = norm_l2inf(np.dot(gain.T, M), n_dip_per_pos, copy=False)
alpha_max *= 0.01
gain /= alpha_max
source_weighting /= alpha_max
X, active_set, E = tf_mixed_norm_solver(
M, gain, alpha_space, alpha_time, wsize=wsize, tstep=tstep,
maxit=maxit, tol=tol, verbose=verbose, n_orient=n_dip_per_pos,
log_objective=True, debias=debias)
if active_set.sum() == 0:
raise Exception("No active dipoles found. "
"alpha_space/alpha_time are too big.")
# Compute estimated whitened sensor data
M_estimated = np.dot(gain[:, active_set], X)
if mask is not None:
active_set_tmp = np.zeros(len(mask), dtype=np.bool)
active_set_tmp[mask] = active_set
active_set = active_set_tmp
del active_set_tmp
X = _reapply_source_weighting(
X, source_weighting, active_set, n_dip_per_pos)
if return_residual:
residual = _compute_residual(
forward, evoked, X, active_set, gain_info)
if return_as_dipoles:
out = _make_dipoles_sparse(
X, active_set, forward, evoked.times[0], 1.0 / info['sfreq'],
M, M_estimated, verbose=None)
else:
out = _make_sparse_stc(
X, active_set, forward, evoked.times[0], 1.0 / info['sfreq'])
logger.info('[done]')
if return_residual:
out = out, residual
return out
| {
"repo_name": "jaeilepp/mne-python",
"path": "mne/inverse_sparse/mxne_inverse.py",
"copies": "1",
"size": "23868",
"license": "bsd-3-clause",
"hash": 1507655731444313000,
"line_mean": 35.8333333333,
"line_max": 79,
"alpha_frac": 0.6026059997,
"autogenerated": false,
"ratio": 3.4681778552746296,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9570783854974629,
"avg_score": 0,
"num_lines": 648
} |
from math import sqrt
import numpy as np
from scipy import linalg
from .mxne_debiasing import compute_bias
from ..utils import logger, verbose, sum_squared, warn
from ..time_frequency.stft import stft_norm1, stft_norm2, stft, istft
def groups_norm2(A, n_orient):
"""Compute squared L2 norms of groups inplace."""
n_positions = A.shape[0] // n_orient
return np.sum(np.power(A, 2, A).reshape(n_positions, -1), axis=1)
def norm_l2inf(A, n_orient, copy=True):
"""L2-inf norm."""
if A.size == 0:
return 0.0
if copy:
A = A.copy()
return sqrt(np.max(groups_norm2(A, n_orient)))
def norm_l21(A, n_orient, copy=True):
"""L21 norm."""
if A.size == 0:
return 0.0
if copy:
A = A.copy()
return np.sum(np.sqrt(groups_norm2(A, n_orient)))
def prox_l21(Y, alpha, n_orient, shape=None, is_stft=False):
"""Proximity operator for l21 norm.
L2 over columns and L1 over rows => groups contain n_orient rows.
It can eventually take into account the negative frequencies
when a complex value is passed and is_stft=True.
Parameters
----------
Y : array, shape (n_sources, n_coefs)
The input data.
alpha : float
The regularization parameter.
n_orient : int
Number of dipoles per locations (typically 1 or 3).
shape : None | tuple
Shape of TF coefficients matrix.
is_stft : bool
If True, Y contains TF coefficients.
Returns
-------
Y : array, shape (n_sources, n_coefs)
The output data.
active_set : array of bool, shape (n_sources, )
Mask of active sources
Example
-------
>>> Y = np.tile(np.array([0, 4, 3, 0, 0], dtype=np.float), (2, 1))
>>> Y = np.r_[Y, np.zeros_like(Y)]
>>> print(Y) # doctest:+SKIP
[[ 0. 4. 3. 0. 0.]
[ 0. 4. 3. 0. 0.]
[ 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0.]]
>>> Yp, active_set = prox_l21(Y, 2, 2)
>>> print(Yp) # doctest:+SKIP
[[0. 2.86862915 2.15147186 0. 0. ]
[0. 2.86862915 2.15147186 0. 0. ]]
>>> print(active_set)
[ True True False False]
"""
if len(Y) == 0:
return np.zeros_like(Y), np.zeros((0,), dtype=np.bool)
if shape is not None:
shape_init = Y.shape
Y = Y.reshape(*shape)
n_positions = Y.shape[0] // n_orient
if is_stft:
rows_norm = np.sqrt(stft_norm2(Y).reshape(n_positions, -1).sum(axis=1))
else:
rows_norm = np.sqrt((Y * Y.conj()).real.reshape(n_positions,
-1).sum(axis=1))
# Ensure shrink is >= 0 while avoiding any division by zero
shrink = np.maximum(1.0 - alpha / np.maximum(rows_norm, alpha), 0.0)
active_set = shrink > 0.0
if n_orient > 1:
active_set = np.tile(active_set[:, None], [1, n_orient]).ravel()
shrink = np.tile(shrink[:, None], [1, n_orient]).ravel()
Y = Y[active_set]
if shape is None:
Y *= shrink[active_set][:, np.newaxis]
else:
Y *= shrink[active_set][:, np.newaxis, np.newaxis]
Y = Y.reshape(-1, *shape_init[1:])
return Y, active_set
def prox_l1(Y, alpha, n_orient):
"""Proximity operator for l1 norm with multiple orientation support.
Please note that this function computes a soft-thresholding if
n_orient == 1 and a block soft-thresholding (L2 over orientation and
L1 over position (space + time)) if n_orient == 3. See also [1]_.
Parameters
----------
Y : array, shape (n_sources, n_coefs)
The input data.
alpha : float
The regularization parameter.
n_orient : int
Number of dipoles per locations (typically 1 or 3).
Returns
-------
Y : array, shape (n_sources, n_coefs)
The output data.
active_set : array of bool, shape (n_sources, )
Mask of active sources.
References
----------
.. [1] A. Gramfort, D. Strohmeier, J. Haueisen, M. Hamalainen, M. Kowalski
"Time-Frequency Mixed-Norm Estimates: Sparse M/EEG imaging with
non-stationary source activations",
Neuroimage, Volume 70, pp. 410-422, 15 April 2013.
DOI: 10.1016/j.neuroimage.2012.12.051
Example
-------
>>> Y = np.tile(np.array([1, 2, 3, 2, 0], dtype=np.float), (2, 1))
>>> Y = np.r_[Y, np.zeros_like(Y)]
>>> print(Y) # doctest:+SKIP
[[ 1. 2. 3. 2. 0.]
[ 1. 2. 3. 2. 0.]
[ 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0.]]
>>> Yp, active_set = prox_l1(Y, 2, 2)
>>> print(Yp) # doctest:+SKIP
[[0. 0.58578644 1.58578644 0.58578644 0. ]
[0. 0.58578644 1.58578644 0.58578644 0. ]]
>>> print(active_set)
[ True True False False]
"""
n_positions = Y.shape[0] // n_orient
norms = np.sqrt((Y * Y.conj()).real.T.reshape(-1, n_orient).sum(axis=1))
# Ensure shrink is >= 0 while avoiding any division by zero
shrink = np.maximum(1.0 - alpha / np.maximum(norms, alpha), 0.0)
shrink = shrink.reshape(-1, n_positions).T
active_set = np.any(shrink > 0.0, axis=1)
shrink = shrink[active_set]
if n_orient > 1:
active_set = np.tile(active_set[:, None], [1, n_orient]).ravel()
Y = Y[active_set]
if len(Y) > 0:
for o in range(n_orient):
Y[o::n_orient] *= shrink
return Y, active_set
def dgap_l21(M, G, X, active_set, alpha, n_orient):
"""Duality gap for the mixed norm inverse problem.
Parameters
----------
M : array, shape (n_sensors, n_times)
The data.
G : array, shape (n_sensors, n_active)
The gain matrix a.k.a. lead field.
X : array, shape (n_active, n_times)
Sources.
active_set : array of bool, shape (n_sources, )
Mask of active sources.
alpha : float
The regularization parameter.
n_orient : int
Number of dipoles per locations (typically 1 or 3).
Returns
-------
gap : float
Dual gap.
p_obj : float
Primal objective.
d_obj : float
Dual objective. gap = p_obj - d_obj.
R : array, shape (n_sensors, n_times)
Current residual (M - G * X).
References
----------
.. [1] A. Gramfort, M. Kowalski, M. Hamalainen,
"Mixed-norm estimates for the M/EEG inverse problem using accelerated
gradient methods", Physics in Medicine and Biology, 2012.
https://doi.org/10.1088/0031-9155/57/7/1937
"""
GX = np.dot(G[:, active_set], X)
R = M - GX
penalty = norm_l21(X, n_orient, copy=True)
nR2 = sum_squared(R)
p_obj = 0.5 * nR2 + alpha * penalty
dual_norm = norm_l2inf(np.dot(G.T, R), n_orient, copy=False)
scaling = alpha / dual_norm
scaling = min(scaling, 1.0)
d_obj = (scaling - 0.5 * (scaling ** 2)) * nR2 + scaling * np.sum(R * GX)
gap = p_obj - d_obj
return gap, p_obj, d_obj, R
@verbose
def _mixed_norm_solver_prox(M, G, alpha, lipschitz_constant, maxit=200,
tol=1e-8, verbose=None, init=None, n_orient=1,
dgap_freq=10):
"""Solve L21 inverse problem with proximal iterations and FISTA."""
n_sensors, n_times = M.shape
_, n_sources = G.shape
if n_sources < n_sensors:
gram = np.dot(G.T, G)
GTM = np.dot(G.T, M)
else:
gram = None
if init is None:
X = 0.0
R = M.copy()
if gram is not None:
R = np.dot(G.T, R)
else:
X = init
if gram is None:
R = M - np.dot(G, X)
else:
R = GTM - np.dot(gram, X)
t = 1.0
Y = np.zeros((n_sources, n_times)) # FISTA aux variable
E = [] # track primal objective function
highest_d_obj = - np.inf
active_set = np.ones(n_sources, dtype=np.bool) # start with full AS
for i in range(maxit):
X0, active_set_0 = X, active_set # store previous values
if gram is None:
Y += np.dot(G.T, R) / lipschitz_constant # ISTA step
else:
Y += R / lipschitz_constant # ISTA step
X, active_set = prox_l21(Y, alpha / lipschitz_constant, n_orient)
t0 = t
t = 0.5 * (1.0 + sqrt(1.0 + 4.0 * t ** 2))
Y.fill(0.0)
dt = ((t0 - 1.0) / t)
Y[active_set] = (1.0 + dt) * X
Y[active_set_0] -= dt * X0
Y_as = active_set_0 | active_set
if gram is None:
R = M - np.dot(G[:, Y_as], Y[Y_as])
else:
R = GTM - np.dot(gram[:, Y_as], Y[Y_as])
if (i + 1) % dgap_freq == 0:
_, p_obj, d_obj, _ = dgap_l21(M, G, X, active_set, alpha,
n_orient)
highest_d_obj = max(d_obj, highest_d_obj)
gap = p_obj - highest_d_obj
E.append(p_obj)
logger.debug("p_obj : %s -- gap : %s" % (p_obj, gap))
if gap < tol:
logger.debug('Convergence reached ! (gap: %s < %s)'
% (gap, tol))
break
return X, active_set, E
@verbose
def _mixed_norm_solver_cd(M, G, alpha, lipschitz_constant, maxit=10000,
tol=1e-8, verbose=None, init=None, n_orient=1,
dgap_freq=10):
"""Solve L21 inverse problem with coordinate descent."""
from sklearn.linear_model.coordinate_descent import MultiTaskLasso
assert M.ndim == G.ndim and M.shape[0] == G.shape[0]
clf = MultiTaskLasso(alpha=alpha / len(M), tol=tol / sum_squared(M),
normalize=False, fit_intercept=False, max_iter=maxit,
warm_start=True)
if init is not None:
clf.coef_ = init.T
else:
clf.coef_ = np.zeros((G.shape[1], M.shape[1])).T
clf.fit(G, M)
X = clf.coef_.T
active_set = np.any(X, axis=1)
X = X[active_set]
gap, p_obj, d_obj, _ = dgap_l21(M, G, X, active_set, alpha, n_orient)
return X, active_set, p_obj
@verbose
def _mixed_norm_solver_bcd(M, G, alpha, lipschitz_constant, maxit=200,
tol=1e-8, verbose=None, init=None, n_orient=1,
dgap_freq=10):
"""Solve L21 inverse problem with block coordinate descent."""
n_sensors, n_times = M.shape
n_sensors, n_sources = G.shape
n_positions = n_sources // n_orient
if init is None:
X = np.zeros((n_sources, n_times))
R = M.copy()
else:
X = init
R = M - np.dot(G, X)
E = [] # track primal objective function
highest_d_obj = - np.inf
active_set = np.zeros(n_sources, dtype=np.bool) # start with full AS
alpha_lc = alpha / lipschitz_constant
# First make G fortran for faster access to blocks of columns
G = np.asfortranarray(G)
# It is better to call gemm here
# so it is called only once
gemm = linalg.get_blas_funcs("gemm", [R.T, G[:, 0:n_orient]])
one_ovr_lc = 1. / lipschitz_constant
# assert that all the multiplied matrices are fortran contiguous
assert X.T.flags.f_contiguous
assert R.T.flags.f_contiguous
assert G.flags.f_contiguous
# storing list of contiguous arrays
list_G_j_c = []
for j in range(n_positions):
idx = slice(j * n_orient, (j + 1) * n_orient)
list_G_j_c.append(np.ascontiguousarray(G[:, idx]))
for i in range(maxit):
_bcd(G, X, R, active_set, one_ovr_lc, n_orient, n_positions,
alpha_lc, gemm, list_G_j_c)
if (i + 1) % dgap_freq == 0:
_, p_obj, d_obj, _ = dgap_l21(M, G, X[active_set], active_set,
alpha, n_orient)
highest_d_obj = max(d_obj, highest_d_obj)
gap = p_obj - highest_d_obj
E.append(p_obj)
logger.debug("Iteration %d :: p_obj %f :: dgap %f :: n_active %d" %
(i + 1, p_obj, gap, np.sum(active_set) / n_orient))
if gap < tol:
logger.debug('Convergence reached ! (gap: %s < %s)'
% (gap, tol))
break
X = X[active_set]
return X, active_set, E
def _bcd(G, X, R, active_set, one_ovr_lc, n_orient, n_positions,
alpha_lc, gemm, list_G_j_c):
"""Implement one full pass of BCD.
BCD stands for Block Coordinate Descent.
This function make use of scipy.linalg.get_blas_funcs to speed reasons.
Parameters
----------
G : array, shape (n_sensors, n_active)
The gain matrix a.k.a. lead field.
X : array, shape (n_sources, n_times)
Sources, modified in place.
R : array, shape (n_sensors, n_times)
The residuals: R = M - G @ X, modified in place.
active_set : array of bool, shape (n_sources, )
Mask of active sources, modified in place.
one_ovr_lc : array, shape (n_positions, )
One over the lipschitz constants.
n_orient : int
Number of dipoles per positions (typically 1 or 3).
n_positions : int
Number of source positions.
alpha_lc: array, shape (n_positions, )
alpha * (Lipschitz constants).
gemm: callable
Low level blas function to fastly multiply matrix time matrix.
"""
X_j_new = np.zeros_like(X[0:n_orient, :], order='C')
for j, G_j_c in enumerate(list_G_j_c):
idx = slice(j * n_orient, (j + 1) * n_orient)
G_j = G[:, idx]
X_j = X[idx]
gemm(alpha=one_ovr_lc[j], beta=0., a=R.T, b=G_j, c=X_j_new.T,
overwrite_c=True)
# X_j_new = G_j.T @ R
# Mathurin's trick to avoid checking all the entries
was_non_zero = X_j[0, 0] != 0
# was_non_zero = np.any(X_j)
if was_non_zero:
gemm(alpha=1., beta=1., a=X_j.T, b=G_j_c.T, c=R.T,
overwrite_c=True)
# R += np.dot(G_j, X_j)
X_j_new += X_j
block_norm = sqrt(sum_squared(X_j_new))
if block_norm <= alpha_lc[j]:
X_j.fill(0.)
active_set[idx] = False
else:
shrink = max(1.0 - alpha_lc[j] / block_norm, 0.0)
X_j_new *= shrink
gemm(alpha=-1., beta=1., a=X_j_new.T, b=G_j_c.T, c=R.T,
overwrite_c=True)
# R -= np.dot(G_j, X_j_new)
X_j[:] = X_j_new
active_set[idx] = True
@verbose
def mixed_norm_solver(M, G, alpha, maxit=3000, tol=1e-8, verbose=None,
active_set_size=50, debias=True, n_orient=1,
solver='auto', return_gap=False, dgap_freq=10):
"""Solve L1/L2 mixed-norm inverse problem with active set strategy.
Parameters
----------
M : array, shape (n_sensors, n_times)
The data.
G : array, shape (n_sensors, n_dipoles)
The gain matrix a.k.a. lead field.
alpha : float
The regularization parameter. It should be between 0 and 100.
A value of 100 will lead to an empty active set (no active source).
maxit : int
The number of iterations.
tol : float
Tolerance on dual gap for convergence checking.
%(verbose)s
active_set_size : int
Size of active set increase at each iteration.
debias : bool
Debias source estimates.
n_orient : int
The number of orientation (1 : fixed or 3 : free or loose).
solver : 'prox' | 'cd' | 'bcd' | 'auto'
The algorithm to use for the optimization.
return_gap : bool
Return final duality gap.
dgap_freq : int
The duality gap is computed every dgap_freq iterations of the solver on
the active set.
Returns
-------
X : array, shape (n_active, n_times)
The source estimates.
active_set : array
The mask of active sources.
E : list
The value of the objective function over the iterations.
gap : float
Final duality gap. Returned only if return_gap is True.
References
----------
.. [1] A. Gramfort, M. Kowalski, M. Hamalainen,
"Mixed-norm estimates for the M/EEG inverse problem using accelerated
gradient methods", Physics in Medicine and Biology, 2012.
https://doi.org/10.1088/0031-9155/57/7/1937
.. [2] D. Strohmeier, Y. Bekhti, J. Haueisen, A. Gramfort,
"The Iterative Reweighted Mixed-Norm Estimate for Spatio-Temporal
MEG/EEG Source Reconstruction", IEEE Transactions of Medical Imaging,
Volume 35 (10), pp. 2218-2228, 15 April 2013.
"""
n_dipoles = G.shape[1]
n_positions = n_dipoles // n_orient
n_sensors, n_times = M.shape
alpha_max = norm_l2inf(np.dot(G.T, M), n_orient, copy=False)
logger.info("-- ALPHA MAX : %s" % alpha_max)
alpha = float(alpha)
has_sklearn = True
try:
from sklearn.linear_model.coordinate_descent import MultiTaskLasso # noqa: F401,E501
except ImportError:
has_sklearn = False
if solver == 'auto':
if has_sklearn and (n_orient == 1):
solver = 'cd'
else:
solver = 'bcd'
if solver == 'cd':
if n_orient == 1 and not has_sklearn:
warn('Scikit-learn >= 0.12 cannot be found. Using block coordinate'
' descent instead of coordinate descent.')
solver = 'bcd'
if n_orient > 1:
warn('Coordinate descent is only available for fixed orientation. '
'Using block coordinate descent instead of coordinate '
'descent')
solver = 'bcd'
if solver == 'cd':
logger.info("Using coordinate descent")
l21_solver = _mixed_norm_solver_cd
lc = None
elif solver == 'bcd':
logger.info("Using block coordinate descent")
l21_solver = _mixed_norm_solver_bcd
G = np.asfortranarray(G)
if n_orient == 1:
lc = np.sum(G * G, axis=0)
else:
lc = np.empty(n_positions)
for j in range(n_positions):
G_tmp = G[:, (j * n_orient):((j + 1) * n_orient)]
lc[j] = linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2)
else:
logger.info("Using proximal iterations")
l21_solver = _mixed_norm_solver_prox
lc = 1.01 * linalg.norm(G, ord=2) ** 2
if active_set_size is not None:
E = list()
highest_d_obj = - np.inf
X_init = None
active_set = np.zeros(n_dipoles, dtype=np.bool)
idx_large_corr = np.argsort(groups_norm2(np.dot(G.T, M), n_orient))
new_active_idx = idx_large_corr[-active_set_size:]
if n_orient > 1:
new_active_idx = (n_orient * new_active_idx[:, None] +
np.arange(n_orient)[None, :]).ravel()
active_set[new_active_idx] = True
as_size = np.sum(active_set)
for k in range(maxit):
if solver == 'bcd':
lc_tmp = lc[active_set[::n_orient]]
elif solver == 'cd':
lc_tmp = None
else:
lc_tmp = 1.01 * linalg.norm(G[:, active_set], ord=2) ** 2
X, as_, _ = l21_solver(M, G[:, active_set], alpha, lc_tmp,
maxit=maxit, tol=tol, init=X_init,
n_orient=n_orient, dgap_freq=dgap_freq)
active_set[active_set] = as_.copy()
idx_old_active_set = np.where(active_set)[0]
_, p_obj, d_obj, R = dgap_l21(M, G, X, active_set, alpha,
n_orient)
highest_d_obj = max(d_obj, highest_d_obj)
gap = p_obj - highest_d_obj
E.append(p_obj)
logger.info("Iteration %d :: p_obj %f :: dgap %f ::"
"n_active_start %d :: n_active_end %d" % (
k + 1, p_obj, gap, as_size // n_orient,
np.sum(active_set) // n_orient))
if gap < tol:
logger.info('Convergence reached ! (gap: %s < %s)'
% (gap, tol))
break
# add sources if not last iteration
if k < (maxit - 1):
idx_large_corr = np.argsort(groups_norm2(np.dot(G.T, R),
n_orient))
new_active_idx = idx_large_corr[-active_set_size:]
if n_orient > 1:
new_active_idx = (n_orient * new_active_idx[:, None] +
np.arange(n_orient)[None, :])
new_active_idx = new_active_idx.ravel()
active_set[new_active_idx] = True
idx_active_set = np.where(active_set)[0]
as_size = np.sum(active_set)
X_init = np.zeros((as_size, n_times), dtype=X.dtype)
idx = np.searchsorted(idx_active_set, idx_old_active_set)
X_init[idx] = X
else:
warn('Did NOT converge ! (gap: %s > %s)' % (gap, tol))
else:
X, active_set, E = l21_solver(M, G, alpha, lc, maxit=maxit,
tol=tol, n_orient=n_orient, init=None)
if return_gap:
gap = dgap_l21(M, G, X, active_set, alpha, n_orient)[0]
if np.any(active_set) and debias:
bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient)
X *= bias[:, np.newaxis]
logger.info('Final active set size: %s' % (np.sum(active_set) // n_orient))
if return_gap:
return X, active_set, E, gap
else:
return X, active_set, E
@verbose
def iterative_mixed_norm_solver(M, G, alpha, n_mxne_iter, maxit=3000,
tol=1e-8, verbose=None, active_set_size=50,
debias=True, n_orient=1, dgap_freq=10,
solver='auto'):
"""Solve L0.5/L2 mixed-norm inverse problem with active set strategy.
Parameters
----------
M : array, shape (n_sensors, n_times)
The data.
G : array, shape (n_sensors, n_dipoles)
The gain matrix a.k.a. lead field.
alpha : float
The regularization parameter. It should be between 0 and 100.
A value of 100 will lead to an empty active set (no active source).
n_mxne_iter : int
The number of MxNE iterations. If > 1, iterative reweighting
is applied.
maxit : int
The number of iterations.
tol : float
Tolerance on dual gap for convergence checking.
%(verbose)s
active_set_size : int
Size of active set increase at each iteration.
debias : bool
Debias source estimates.
n_orient : int
The number of orientation (1 : fixed or 3 : free or loose).
dgap_freq : int or np.inf
The duality gap is evaluated every dgap_freq iterations.
solver : 'prox' | 'cd' | 'bcd' | 'auto'
The algorithm to use for the optimization.
Returns
-------
X : array, shape (n_active, n_times)
The source estimates.
active_set : array
The mask of active sources.
E : list
The value of the objective function over the iterations.
References
----------
.. [1] D. Strohmeier, Y. Bekhti, J. Haueisen, A. Gramfort,
"The Iterative Reweighted Mixed-Norm Estimate for Spatio-Temporal
MEG/EEG Source Reconstruction", IEEE Transactions of Medical Imaging,
Volume 35 (10), pp. 2218-2228, 2016.
"""
def g(w):
return np.sqrt(np.sqrt(groups_norm2(w.copy(), n_orient)))
def gprime(w):
return 2. * np.repeat(g(w), n_orient).ravel()
E = list()
active_set = np.ones(G.shape[1], dtype=np.bool)
weights = np.ones(G.shape[1])
X = np.zeros((G.shape[1], M.shape[1]))
for k in range(n_mxne_iter):
X0 = X.copy()
active_set_0 = active_set.copy()
G_tmp = G[:, active_set] * weights[np.newaxis, :]
if active_set_size is not None:
if np.sum(active_set) > (active_set_size * n_orient):
X, _active_set, _ = mixed_norm_solver(
M, G_tmp, alpha, debias=False, n_orient=n_orient,
maxit=maxit, tol=tol, active_set_size=active_set_size,
dgap_freq=dgap_freq, solver=solver, verbose=verbose)
else:
X, _active_set, _ = mixed_norm_solver(
M, G_tmp, alpha, debias=False, n_orient=n_orient,
maxit=maxit, tol=tol, active_set_size=None,
dgap_freq=dgap_freq, solver=solver, verbose=verbose)
else:
X, _active_set, _ = mixed_norm_solver(
M, G_tmp, alpha, debias=False, n_orient=n_orient,
maxit=maxit, tol=tol, active_set_size=None,
dgap_freq=dgap_freq, solver=solver, verbose=verbose)
logger.info('active set size %d' % (_active_set.sum() / n_orient))
if _active_set.sum() > 0:
active_set[active_set] = _active_set
# Reapply weights to have correct unit
X *= weights[_active_set][:, np.newaxis]
weights = gprime(X)
p_obj = 0.5 * linalg.norm(M - np.dot(G[:, active_set], X),
'fro') ** 2. + alpha * np.sum(g(X))
E.append(p_obj)
# Check convergence
if ((k >= 1) and np.all(active_set == active_set_0) and
np.all(np.abs(X - X0) < tol)):
print('Convergence reached after %d reweightings!' % k)
break
else:
active_set = np.zeros_like(active_set)
p_obj = 0.5 * linalg.norm(M) ** 2.
E.append(p_obj)
break
if np.any(active_set) and debias:
bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient)
X *= bias[:, np.newaxis]
return X, active_set, E
###############################################################################
# TF-MxNE
@verbose
def tf_lipschitz_constant(M, G, phi, phiT, tol=1e-3, verbose=None):
"""Compute lipschitz constant for FISTA.
It uses a power iteration method.
"""
n_times = M.shape[1]
n_points = G.shape[1]
iv = np.ones((n_points, n_times), dtype=np.float)
v = phi(iv)
L = 1e100
for it in range(100):
L_old = L
logger.info('Lipschitz estimation: iteration = %d' % it)
iv = np.real(phiT(v))
Gv = np.dot(G, iv)
GtGv = np.dot(G.T, Gv)
w = phi(GtGv)
L = np.max(np.abs(w)) # l_inf norm
v = w / L
if abs((L - L_old) / L_old) < tol:
break
return L
def safe_max_abs(A, ia):
"""Compute np.max(np.abs(A[ia])) possible with empty A."""
if np.sum(ia): # ia is not empty
return np.max(np.abs(A[ia]))
else:
return 0.
def safe_max_abs_diff(A, ia, B, ib):
"""Compute np.max(np.abs(A)) possible with empty A."""
A = A[ia] if np.sum(ia) else 0.0
B = B[ib] if np.sum(ia) else 0.0
return np.max(np.abs(A - B))
class _Phi(object):
"""Have phi stft as callable w/o using a lambda that does not pickle."""
def __init__(self, wsize, tstep, n_coefs): # noqa: D102
self.wsize = np.atleast_1d(wsize)
self.tstep = np.atleast_1d(tstep)
self.n_coefs = np.atleast_1d(n_coefs)
self.n_dicts = len(tstep)
self.n_freqs = wsize // 2 + 1
self.n_steps = self.n_coefs // self.n_freqs
def __call__(self, x): # noqa: D105
if self.n_dicts == 1:
return stft(x, self.wsize[0], self.tstep[0],
verbose=False).reshape(-1, self.n_coefs[0])
else:
return np.hstack(
[stft(x, self.wsize[i], self.tstep[i], verbose=False).reshape(
-1, self.n_coefs[i]) for i in range(self.n_dicts)]) / np.sqrt(
self.n_dicts)
def norm(self, z, ord=2):
"""Squared L2 norm if ord == 2 and L1 norm if order == 1."""
if ord not in (1, 2):
raise ValueError('Only supported norm order are 1 and 2. '
'Got ord = %s' % ord)
stft_norm = stft_norm1 if ord == 1 else stft_norm2
norm = 0.
if len(self.n_coefs) > 1:
z_ = np.array_split(np.atleast_2d(z), np.cumsum(self.n_coefs)[:-1],
axis=1)
else:
z_ = [np.atleast_2d(z)]
for i in range(len(z_)):
norm += stft_norm(
z_[i].reshape(-1, self.n_freqs[i], self.n_steps[i]))
return norm
class _PhiT(object):
"""Have phi.T istft as callable w/o using a lambda that does not pickle."""
def __init__(self, tstep, n_freqs, n_steps, n_times): # noqa: D102
self.tstep = tstep
self.n_freqs = n_freqs
self.n_steps = n_steps
self.n_times = n_times
self.n_dicts = len(tstep) if isinstance(tstep, np.ndarray) else 1
self.n_coefs = self.n_freqs * self.n_steps
def __call__(self, z): # noqa: D105
if self.n_dicts == 1:
return istft(z.reshape(-1, self.n_freqs[0], self.n_steps[0]),
self.tstep[0], self.n_times)
else:
x_out = np.zeros((z.shape[0], self.n_times))
z_ = np.array_split(z, np.cumsum(self.n_coefs)[:-1], axis=1)
for i in range(self.n_dicts):
x_out += istft(z_[i].reshape(-1, self.n_freqs[i],
self.n_steps[i]),
self.tstep[i], self.n_times)
return x_out / np.sqrt(self.n_dicts)
def norm_l21_tf(Z, phi, n_orient):
"""L21 norm for TF."""
if Z.shape[0]:
l21_norm = np.sqrt(
phi.norm(Z, ord=2).reshape(-1, n_orient).sum(axis=1))
l21_norm = l21_norm.sum()
else:
l21_norm = 0.
return l21_norm
def norm_l1_tf(Z, phi, n_orient):
"""L1 norm for TF."""
if Z.shape[0]:
n_positions = Z.shape[0] // n_orient
Z_ = np.sqrt(np.sum(
(np.abs(Z) ** 2.).reshape((n_orient, -1), order='F'), axis=0))
Z_ = Z_.reshape((n_positions, -1), order='F')
l1_norm = phi.norm(Z_, ord=1).sum()
else:
l1_norm = 0.
return l1_norm
def norm_epsilon(Y, l1_ratio, phi):
"""Dual norm of (1. - l1_ratio) * L2 norm + l1_ratio * L1 norm, at Y.
This is the unique solution in nu of
norm(prox_l1(Y, nu * l1_ratio), ord=2) = (1. - l1_ratio) * nu.
Warning: it takes into account the fact that Y only contains coefficients
corresponding to the positive frequencies (see `stft_norm2()`).
Parameters
----------
Y : array, shape (n_freqs * n_steps,)
The input data.
l1_ratio : float between 0 and 1
Tradeoff between L2 and L1 regularization. When it is 0, no temporal
regularization is applied.
phi : instance of _Phi
The TF operator.
Returns
-------
nu : float
The value of the dual norm evaluated at Y.
References
----------
.. [1] E. Ndiaye, O. Fercoq, A. Gramfort, J. Salmon,
"GAP Safe Screening Rules for Sparse-Group Lasso", Advances in Neural
Information Processing Systems (NIPS), 2016.
"""
# since the solution is invariant to flipped signs in Y, all entries
# of Y are assumed positive
norm_inf_Y = np.max(Y)
if l1_ratio == 1.:
# dual norm of L1 is Linf
return norm_inf_Y
elif l1_ratio == 0.:
# dual norm of L2 is L2
return np.sqrt(phi.norm(Y[None, :], ord=2).sum())
if norm_inf_Y == 0.:
return 0.
# get K largest values of Y:
idx = Y > l1_ratio * norm_inf_Y
K = idx.sum()
if K == 1:
return norm_inf_Y
# Add negative freqs: count all freqs twice except first and last:
weights = np.empty(len(Y), dtype=int)
weights.fill(2)
for i, w in enumerate(np.array_split(weights,
np.cumsum(phi.n_coefs)[:-1])):
w[:phi.n_steps[i]] = 1
w[-phi.n_steps[i]:] = 1
# sort both Y and weights at the same time
idx_sort = np.argsort(Y[idx])[::-1]
Y = Y[idx][idx_sort]
weights = weights[idx][idx_sort]
Y = np.repeat(Y, weights)
K = Y.shape[0]
p_sum = np.cumsum(Y[:(K - 1)])
p_sum_2 = np.cumsum(Y[:(K - 1)] ** 2)
upper = p_sum_2 / Y[1:] ** 2 - 2. * p_sum / Y[1:] + np.arange(1, K)
in_lower_upper = np.where(upper > (1. - l1_ratio) ** 2 / l1_ratio ** 2)[0]
if in_lower_upper.size > 0:
j = in_lower_upper[0] + 1
p_sum = p_sum[in_lower_upper[0]]
p_sum_2 = p_sum_2[in_lower_upper[0]]
else:
j = K
p_sum = p_sum[-1] + Y[K - 1]
p_sum_2 = p_sum_2[-1] + Y[K - 1] ** 2
denom = l1_ratio ** 2 * j - (1. - l1_ratio) ** 2
if np.abs(denom) < 1e-10:
return p_sum_2 / (2. * l1_ratio * p_sum)
else:
delta = (l1_ratio * p_sum) ** 2 - p_sum_2 * denom
return (l1_ratio * p_sum - np.sqrt(delta)) / denom
def norm_epsilon_inf(G, R, phi, l1_ratio, n_orient):
"""epsilon-inf norm of phi(np.dot(G.T, R)).
Parameters
----------
G : array, shape (n_sensors, n_sources)
Gain matrix a.k.a. lead field.
R : array, shape (n_sensors, n_times)
Residual.
phi : instance of _Phi
The TF operator.
l1_ratio : float between 0 and 1
Parameter controlling the tradeoff between L21 and L1 regularization.
0 corresponds to an absence of temporal regularization, ie MxNE.
n_orient : int
Number of dipoles per location (typically 1 or 3).
Returns
-------
nu : float
The maximum value of the epsilon norms over groups of n_orient dipoles
(consecutive rows of phi(np.dot(G.T, R))).
"""
n_positions = G.shape[1] // n_orient
GTRPhi = np.abs(phi(np.dot(G.T, R)))
# norm over orientations:
GTRPhi = GTRPhi.reshape((n_orient, -1), order='F')
GTRPhi = np.linalg.norm(GTRPhi, axis=0)
GTRPhi = GTRPhi.reshape((n_positions, -1), order='F')
nu = 0.
for idx in range(n_positions):
GTRPhi_ = GTRPhi[idx]
norm_eps = norm_epsilon(GTRPhi_, l1_ratio, phi)
if norm_eps > nu:
nu = norm_eps
return nu
def dgap_l21l1(M, G, Z, active_set, alpha_space, alpha_time, phi, phiT,
n_orient, highest_d_obj):
"""Duality gap for the time-frequency mixed norm inverse problem.
Parameters
----------
M : array, shape (n_sensors, n_times)
The data.
G : array, shape (n_sensors, n_sources)
Gain matrix a.k.a. lead field.
Z : array, shape (n_active, n_coefs)
Sources in TF domain.
active_set : array of bool, shape (n_sources, )
Mask of active sources.
alpha_space : float
The spatial regularization parameter.
alpha_time : float
The temporal regularization parameter. The higher it is the smoother
will be the estimated time series.
phi : instance of _Phi
The TF operator.
phiT : instance of _PhiT
The transpose of the TF operator.
n_orient : int
Number of dipoles per locations (typically 1 or 3).
highest_d_obj : float
The highest value of the dual objective so far.
Returns
-------
gap : float
Dual gap
p_obj : float
Primal objective
d_obj : float
Dual objective. gap = p_obj - d_obj
R : array, shape (n_sensors, n_times)
Current residual (M - G * X)
References
----------
.. [1] A. Gramfort, M. Kowalski, M. Hamalainen,
"Mixed-norm estimates for the M/EEG inverse problem using accelerated
gradient methods", Physics in Medicine and Biology, 2012.
https://doi.org/10.1088/0031-9155/57/7/1937
.. [2] E. Ndiaye, O. Fercoq, A. Gramfort, J. Salmon,
"GAP Safe Screening Rules for Sparse-Group Lasso", Advances in Neural
Information Processing Systems (NIPS), 2016.
"""
X = phiT(Z)
GX = np.dot(G[:, active_set], X)
R = M - GX
penaltyl1 = norm_l1_tf(Z, phi, n_orient)
penaltyl21 = norm_l21_tf(Z, phi, n_orient)
nR2 = sum_squared(R)
p_obj = 0.5 * nR2 + alpha_space * penaltyl21 + alpha_time * penaltyl1
l1_ratio = alpha_time / (alpha_space + alpha_time)
dual_norm = norm_epsilon_inf(G, R, phi, l1_ratio, n_orient)
scaling = min(1., (alpha_space + alpha_time) / dual_norm)
d_obj = (scaling - 0.5 * (scaling ** 2)) * nR2 + scaling * np.sum(R * GX)
d_obj = max(d_obj, highest_d_obj)
gap = p_obj - d_obj
return gap, p_obj, d_obj, R
def _tf_mixed_norm_solver_bcd_(M, G, Z, active_set, candidates, alpha_space,
alpha_time, lipschitz_constant, phi, phiT,
n_orient=1, maxit=200, tol=1e-8, dgap_freq=10,
perc=None, timeit=True, verbose=None):
# First make G fortran for faster access to blocks of columns
G = np.asfortranarray(G)
n_sources = G.shape[1]
n_positions = n_sources // n_orient
Gd = G.copy()
G = dict(zip(np.arange(n_positions), np.hsplit(G, n_positions)))
R = M.copy() # residual
active = np.where(active_set[::n_orient])[0]
for idx in active:
R -= np.dot(G[idx], phiT(Z[idx]))
E = [] # track primal objective function
alpha_time_lc = alpha_time / lipschitz_constant
alpha_space_lc = alpha_space / lipschitz_constant
converged = False
d_obj = -np.Inf
ii = -1
while True:
ii += 1
for jj in candidates:
ids = jj * n_orient
ide = ids + n_orient
G_j = G[jj]
Z_j = Z[jj]
active_set_j = active_set[ids:ide]
was_active = np.any(active_set_j)
# gradient step
GTR = np.dot(G_j.T, R) / lipschitz_constant[jj]
X_j_new = GTR.copy()
if was_active:
X_j = phiT(Z_j)
R += np.dot(G_j, X_j)
X_j_new += X_j
rows_norm = linalg.norm(X_j_new, 'fro')
if rows_norm <= alpha_space_lc[jj]:
if was_active:
Z[jj] = 0.0
active_set_j[:] = False
else:
if was_active:
Z_j_new = Z_j + phi(GTR)
else:
Z_j_new = phi(GTR)
col_norm = np.sqrt(np.sum(np.abs(Z_j_new) ** 2, axis=0))
if np.all(col_norm <= alpha_time_lc[jj]):
Z[jj] = 0.0
active_set_j[:] = False
else:
# l1
shrink = np.maximum(1.0 - alpha_time_lc[jj] / np.maximum(
col_norm, alpha_time_lc[jj]), 0.0)
Z_j_new *= shrink[np.newaxis, :]
# l21
shape_init = Z_j_new.shape
row_norm = np.sqrt(phi.norm(Z_j_new, ord=2).sum())
if row_norm <= alpha_space_lc[jj]:
Z[jj] = 0.0
active_set_j[:] = False
else:
shrink = np.maximum(
1.0 - alpha_space_lc[jj] /
np.maximum(row_norm, alpha_space_lc[jj]), 0.0)
Z_j_new *= shrink
Z[jj] = Z_j_new.reshape(-1, *shape_init[1:]).copy()
active_set_j[:] = True
R -= np.dot(G_j, phiT(Z[jj]))
if (ii + 1) % dgap_freq == 0:
Zd = np.vstack([Z[pos] for pos in range(n_positions)
if np.any(Z[pos])])
gap, p_obj, d_obj, _ = dgap_l21l1(
M, Gd, Zd, active_set, alpha_space, alpha_time, phi, phiT,
n_orient, d_obj)
converged = (gap < tol)
E.append(p_obj)
logger.info("\n Iteration %d :: n_active %d" % (
ii + 1, np.sum(active_set) / n_orient))
logger.info(" dgap %.2e :: p_obj %f :: d_obj %f" % (
gap, p_obj, d_obj))
if converged:
break
if (ii == maxit - 1):
converged = False
break
if perc is not None:
if np.sum(active_set) / float(n_orient) <= perc * n_positions:
break
return Z, active_set, E, converged
@verbose
def _tf_mixed_norm_solver_bcd_active_set(M, G, alpha_space, alpha_time,
lipschitz_constant, phi, phiT,
Z_init=None, n_orient=1, maxit=200,
tol=1e-8, dgap_freq=10,
verbose=None):
n_sensors, n_times = M.shape
n_sources = G.shape[1]
n_positions = n_sources // n_orient
Z = dict.fromkeys(np.arange(n_positions), 0.0)
active_set = np.zeros(n_sources, dtype=np.bool)
active = []
if Z_init is not None:
if Z_init.shape != (n_sources, phi.n_coefs.sum()):
raise Exception('Z_init must be None or an array with shape '
'(n_sources, n_coefs).')
for ii in range(n_positions):
if np.any(Z_init[ii * n_orient:(ii + 1) * n_orient]):
active_set[ii * n_orient:(ii + 1) * n_orient] = True
active.append(ii)
if len(active):
Z.update(dict(zip(active,
np.vsplit(Z_init[active_set], len(active)))))
E = []
candidates = range(n_positions)
d_obj = -np.inf
while True:
Z_init = dict.fromkeys(np.arange(n_positions), 0.0)
Z_init.update(dict(zip(active, Z.values())))
Z, active_set, E_tmp, _ = _tf_mixed_norm_solver_bcd_(
M, G, Z_init, active_set, candidates, alpha_space, alpha_time,
lipschitz_constant, phi, phiT, n_orient=n_orient, maxit=1, tol=tol,
perc=None, verbose=verbose)
E += E_tmp
active = np.where(active_set[::n_orient])[0]
Z_init = dict(zip(range(len(active)), [Z[idx] for idx in active]))
candidates_ = range(len(active))
Z, as_, E_tmp, converged = _tf_mixed_norm_solver_bcd_(
M, G[:, active_set], Z_init,
np.ones(len(active) * n_orient, dtype=np.bool),
candidates_, alpha_space, alpha_time,
lipschitz_constant[active_set[::n_orient]], phi, phiT,
n_orient=n_orient, maxit=maxit, tol=tol,
dgap_freq=dgap_freq, perc=0.5,
verbose=verbose)
active = np.where(active_set[::n_orient])[0]
active_set[active_set] = as_.copy()
E += E_tmp
converged = True
if converged:
Zd = np.vstack([Z[pos] for pos in range(len(Z)) if np.any(Z[pos])])
gap, p_obj, d_obj, _ = dgap_l21l1(
M, G, Zd, active_set, alpha_space, alpha_time,
phi, phiT, n_orient, d_obj)
logger.info("\ndgap %.2e :: p_obj %f :: d_obj %f :: n_active %d"
% (gap, p_obj, d_obj, np.sum(active_set) / n_orient))
if gap < tol:
logger.info("\nConvergence reached!\n")
break
if active_set.sum():
Z = np.vstack([Z[pos] for pos in range(len(Z)) if np.any(Z[pos])])
X = phiT(Z)
else:
Z = np.zeros((0, phi.n_coefs.sum()), dtype=np.complex)
X = np.zeros((0, n_times))
return X, Z, active_set, E, gap
@verbose
def tf_mixed_norm_solver(M, G, alpha_space, alpha_time, wsize=64, tstep=4,
n_orient=1, maxit=200, tol=1e-8,
active_set_size=None, debias=True, return_gap=False,
dgap_freq=10, verbose=None):
"""Solve TF L21+L1 inverse solver with BCD and active set approach.
Parameters
----------
M : array, shape (n_sensors, n_times)
The data.
G : array, shape (n_sensors, n_dipoles)
The gain matrix a.k.a. lead field.
alpha_space : float
The spatial regularization parameter.
alpha_time : float
The temporal regularization parameter. The higher it is the smoother
will be the estimated time series.
wsize: int or array-like
Length of the STFT window in samples (must be a multiple of 4).
If an array is passed, multiple TF dictionaries are used (each having
its own wsize and tstep) and each entry of wsize must be a multiple
of 4.
tstep: int or array-like
Step between successive windows in samples (must be a multiple of 2,
a divider of wsize and smaller than wsize/2) (default: wsize/2).
If an array is passed, multiple TF dictionaries are used (each having
its own wsize and tstep), and each entry of tstep must be a multiple
of 2 and divide the corresponding entry of wsize.
n_orient : int
The number of orientation (1 : fixed or 3 : free or loose).
maxit : int
The number of iterations.
tol : float
If absolute difference between estimates at 2 successive iterations
is lower than tol, the convergence is reached.
debias : bool
Debias source estimates.
return_gap : bool
Return final duality gap.
dgap_freq : int or np.inf
The duality gap is evaluated every dgap_freq iterations.
%(verbose)s
Returns
-------
X : array, shape (n_active, n_times)
The source estimates.
active_set : array
The mask of active sources.
E : list
The value of the objective function every dgap_freq iteration. If
log_objective is False or dgap_freq is np.inf, it will be empty.
gap : float
Final duality gap. Returned only if return_gap is True.
References
----------
.. [1] A. Gramfort, D. Strohmeier, J. Haueisen, M. Hamalainen, M. Kowalski
"Time-Frequency Mixed-Norm Estimates: Sparse M/EEG imaging with
non-stationary source activations",
Neuroimage, Volume 70, pp. 410-422, 15 April 2013.
DOI: 10.1016/j.neuroimage.2012.12.051
.. [2] A. Gramfort, D. Strohmeier, J. Haueisen, M. Hamalainen, M. Kowalski
"Functional Brain Imaging with M/EEG Using Structured Sparsity in
Time-Frequency Dictionaries",
Proceedings Information Processing in Medical Imaging
Lecture Notes in Computer Science, Volume 6801/2011, pp. 600-611, 2011.
DOI: 10.1007/978-3-642-22092-0_49
.. [3] Y. Bekhti, D. Strohmeier, M. Jas, R. Badeau, A. Gramfort.
"M/EEG source localization with multiscale time-frequency dictionaries",
6th International Workshop on Pattern Recognition in Neuroimaging
(PRNI), 2016.
DOI: 10.1109/PRNI.2016.7552337
"""
n_sensors, n_times = M.shape
n_sensors, n_sources = G.shape
n_positions = n_sources // n_orient
tstep = np.atleast_1d(tstep)
wsize = np.atleast_1d(wsize)
if len(tstep) != len(wsize):
raise ValueError('The same number of window sizes and steps must be '
'passed. Got tstep = %s and wsize = %s' %
(tstep, wsize))
n_steps = np.ceil(M.shape[1] / tstep.astype(float)).astype(int)
n_freqs = wsize // 2 + 1
n_coefs = n_steps * n_freqs
phi = _Phi(wsize, tstep, n_coefs)
phiT = _PhiT(tstep, n_freqs, n_steps, n_times)
if n_orient == 1:
lc = np.sum(G * G, axis=0)
else:
lc = np.empty(n_positions)
for j in range(n_positions):
G_tmp = G[:, (j * n_orient):((j + 1) * n_orient)]
lc[j] = linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2)
logger.info("Using block coordinate descent with active set approach")
X, Z, active_set, E, gap = _tf_mixed_norm_solver_bcd_active_set(
M, G, alpha_space, alpha_time, lc, phi, phiT,
Z_init=None, n_orient=n_orient, maxit=maxit, tol=tol,
dgap_freq=dgap_freq, verbose=None)
if np.any(active_set) and debias:
bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient)
X *= bias[:, np.newaxis]
if return_gap:
return X, active_set, E, gap
else:
return X, active_set, E
| {
"repo_name": "adykstra/mne-python",
"path": "mne/inverse_sparse/mxne_optim.py",
"copies": "1",
"size": "48338",
"license": "bsd-3-clause",
"hash": -5424002858027106000,
"line_mean": 34.4904552129,
"line_max": 93,
"alpha_frac": 0.5343208242,
"autogenerated": false,
"ratio": 3.224037884346028,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42583587085460284,
"avg_score": null,
"num_lines": null
} |
import numpy as np
from scipy import linalg, signal
from ..source_estimate import (SourceEstimate, VolSourceEstimate,
_BaseSourceEstimate)
from ..minimum_norm.inverse import (combine_xyz, _prepare_forward,
_check_reference, _check_loose_forward)
from ..forward import (compute_orient_prior, is_fixed_orient,
convert_forward_solution)
from ..io.pick import pick_channels_evoked
from ..io.proj import deactivate_proj
from ..utils import logger, verbose
from ..dipole import Dipole
from ..externals.six.moves import xrange as range
from .mxne_optim import (mixed_norm_solver, iterative_mixed_norm_solver, _Phi,
norm_l2inf, tf_mixed_norm_solver, norm_epsilon_inf)
@verbose
def _prepare_weights(forward, gain, source_weighting, weights, weights_min):
mask = None
if isinstance(weights, _BaseSourceEstimate):
weights = np.max(np.abs(weights.data), axis=1)
weights_max = np.max(weights)
if weights_min > weights_max:
raise ValueError('weights_min > weights_max (%s > %s)' %
(weights_min, weights_max))
weights_min = weights_min / weights_max
weights = weights / weights_max
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
weights = np.ravel(np.tile(weights, [n_dip_per_pos, 1]).T)
if len(weights) != gain.shape[1]:
raise ValueError('weights do not have the correct dimension '
' (%d != %d)' % (len(weights), gain.shape[1]))
if len(source_weighting.shape) == 1:
source_weighting *= weights
else:
source_weighting *= weights[:, None]
gain *= weights[None, :]
if weights_min is not None:
mask = (weights > weights_min)
gain = gain[:, mask]
n_sources = np.sum(mask) // n_dip_per_pos
logger.info("Reducing source space to %d sources" % n_sources)
return gain, source_weighting, mask
@verbose
def _prepare_gain_column(forward, info, noise_cov, pca, depth, loose, weights,
weights_min, verbose=None):
gain_info, gain, _, whitener, _ = _prepare_forward(forward, info,
noise_cov, pca)
logger.info('Whitening lead field matrix.')
gain = np.dot(whitener, gain)
is_fixed_ori = is_fixed_orient(forward)
if depth is not None:
depth_prior = np.sum(gain ** 2, axis=0)
if not is_fixed_ori:
depth_prior = depth_prior.reshape(-1, 3).sum(axis=1)
# Spherical leadfield can be zero at the center
depth_prior[depth_prior == 0.] = np.min(
depth_prior[depth_prior != 0.])
depth_prior **= depth
if not is_fixed_ori:
depth_prior = np.repeat(depth_prior, 3)
source_weighting = np.sqrt(1. / depth_prior)
else:
source_weighting = np.ones(gain.shape[1], dtype=gain.dtype)
assert (is_fixed_ori or (0 <= loose <= 1))
if loose is not None and loose < 1.:
source_weighting *= np.sqrt(compute_orient_prior(forward, loose))
gain *= source_weighting[None, :]
if weights is None:
mask = None
else:
gain, source_weighting, mask = _prepare_weights(forward, gain,
source_weighting,
weights, weights_min)
return gain, gain_info, whitener, source_weighting, mask
def _prepare_gain(forward, info, noise_cov, pca, depth, loose, weights,
weights_min, verbose=None):
if not isinstance(depth, float):
raise ValueError('Invalid depth parameter. '
'A float is required (got %s).'
% type(depth))
elif depth < 0.0:
raise ValueError('Depth parameter must be positive (got %s).'
% depth)
gain, gain_info, whitener, source_weighting, mask = \
_prepare_gain_column(forward, info, noise_cov, pca, depth,
loose, weights, weights_min)
return gain, gain_info, whitener, source_weighting, mask
def _reapply_source_weighting(X, source_weighting, active_set):
X *= source_weighting[active_set][:, None]
return X
def _compute_residual(forward, evoked, X, active_set, info):
# OK, picking based on row_names is safe
sel = [forward['sol']['row_names'].index(c) for c in info['ch_names']]
residual = evoked.copy()
residual = pick_channels_evoked(residual, include=info['ch_names'])
r_tmp = residual.copy()
r_tmp.data = np.dot(forward['sol']['data'][sel, :][:, active_set], X)
# Take care of proj
active_projs = list()
non_active_projs = list()
for p in evoked.info['projs']:
if p['active']:
active_projs.append(p)
else:
non_active_projs.append(p)
if len(active_projs) > 0:
r_tmp.info['projs'] = deactivate_proj(active_projs, copy=True)
r_tmp.apply_proj()
r_tmp.add_proj(non_active_projs, remove_existing=False)
residual.data -= r_tmp.data
return residual
@verbose
def _make_sparse_stc(X, active_set, forward, tmin, tstep,
active_is_idx=False, verbose=None):
if not is_fixed_orient(forward):
logger.info('combining the current components...')
X = combine_xyz(X)
if not active_is_idx:
active_idx = np.where(active_set)[0]
else:
active_idx = active_set
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
if n_dip_per_pos > 1:
active_idx = np.unique(active_idx // n_dip_per_pos)
src = forward['src']
if src.kind != 'surface':
vertices = src[0]['vertno'][active_idx]
stc = VolSourceEstimate(X, vertices=vertices, tmin=tmin, tstep=tstep)
else:
vertices = []
n_points_so_far = 0
for this_src in src:
this_n_points_so_far = n_points_so_far + len(this_src['vertno'])
this_active_idx = active_idx[(n_points_so_far <= active_idx) &
(active_idx < this_n_points_so_far)]
this_active_idx -= n_points_so_far
this_vertno = this_src['vertno'][this_active_idx]
n_points_so_far = this_n_points_so_far
vertices.append(this_vertno)
stc = SourceEstimate(X, vertices=vertices, tmin=tmin, tstep=tstep)
return stc
@verbose
def _make_dipoles_sparse(X, active_set, forward, tmin, tstep, M, M_est,
active_is_idx=False, verbose=None):
times = tmin + tstep * np.arange(X.shape[1])
if not active_is_idx:
active_idx = np.where(active_set)[0]
else:
active_idx = active_set
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
if n_dip_per_pos > 1:
active_idx = np.unique(active_idx // n_dip_per_pos)
gof = np.zeros(M_est.shape[1])
M_norm2 = np.sum(M ** 2, axis=0)
R_norm2 = np.sum((M - M_est) ** 2, axis=0)
gof[M_norm2 > 0.0] = 1. - R_norm2[M_norm2 > 0.0] / M_norm2[M_norm2 > 0.0]
gof *= 100.
dipoles = []
for k, i_dip in enumerate(active_idx):
i_pos = forward['source_rr'][i_dip][np.newaxis, :]
i_pos = i_pos.repeat(len(times), axis=0)
X_ = X[k * n_dip_per_pos: (k + 1) * n_dip_per_pos]
if n_dip_per_pos == 1:
amplitude = X_[0]
i_ori = forward['source_nn'][i_dip][np.newaxis, :]
i_ori = i_ori.repeat(len(times), axis=0)
else:
if forward['surf_ori']:
X_ = np.dot(forward['source_nn'][i_dip *
n_dip_per_pos:(i_dip + 1) * n_dip_per_pos].T, X_)
amplitude = np.sqrt(np.sum(X_ ** 2, axis=0))
i_ori = np.zeros((len(times), 3))
i_ori[amplitude > 0.] = (X_[:, amplitude > 0.] /
amplitude[amplitude > 0.]).T
dipoles.append(Dipole(times, i_pos, amplitude, i_ori, gof))
return dipoles
@verbose
def make_stc_from_dipoles(dipoles, src, verbose=None):
"""Convert a list of spatio-temporal dipoles into a SourceEstimate.
Parameters
----------
dipoles : Dipole | list of instances of Dipole
The dipoles to convert.
src : instance of SourceSpaces
The source space used to generate the forward operator.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
stc : SourceEstimate
The source estimate.
"""
logger.info('Converting dipoles into a SourceEstimate.')
if isinstance(dipoles, Dipole):
dipoles = [dipoles]
if not isinstance(dipoles, list):
raise ValueError('Dipoles must be an instance of Dipole or '
'a list of instances of Dipole. '
'Got %s!' % type(dipoles))
tmin = dipoles[0].times[0]
tstep = dipoles[0].times[1] - tmin
X = np.zeros((len(dipoles), len(dipoles[0].times)))
source_rr = np.concatenate([_src['rr'][_src['vertno'], :] for _src in src],
axis=0)
n_lh_points = len(src[0]['vertno'])
lh_vertno = list()
rh_vertno = list()
for i in range(len(dipoles)):
if not np.all(dipoles[i].pos == dipoles[i].pos[0]):
raise ValueError('Only dipoles with fixed position over time '
'are supported!')
X[i] = dipoles[i].amplitude
idx = np.all(source_rr == dipoles[i].pos[0], axis=1)
idx = np.where(idx)[0][0]
if idx < n_lh_points:
lh_vertno.append(src[0]['vertno'][idx])
else:
rh_vertno.append(src[1]['vertno'][idx - n_lh_points])
vertices = [np.array(lh_vertno).astype(int),
np.array(rh_vertno).astype(int)]
stc = SourceEstimate(X, vertices=vertices, tmin=tmin, tstep=tstep)
logger.info('[done]')
return stc
@verbose
def mixed_norm(evoked, forward, noise_cov, alpha, loose='auto', depth=0.8,
maxit=3000, tol=1e-4, active_set_size=10, pca=True,
debias=True, time_pca=True, weights=None, weights_min=None,
solver='auto', n_mxne_iter=1, return_residual=False,
return_as_dipoles=False, dgap_freq=10, verbose=None):
"""Mixed-norm estimate (MxNE) and iterative reweighted MxNE (irMxNE).
Compute L1/L2 mixed-norm solution [1]_ or L0.5/L2 [2]_ mixed-norm
solution on evoked data.
Parameters
----------
evoked : instance of Evoked or list of instances of Evoked
Evoked data to invert.
forward : dict
Forward operator.
noise_cov : instance of Covariance
Noise covariance to compute whitener.
alpha : float in range [0, 100)
Regularization parameter. 0 means no regularization, 100 would give 0
active dipole.
loose : float in [0, 1] | 'auto'
Value that weights the source variances of the dipole components
that are parallel (tangential) to the cortical surface. If loose
is 0 then the solution is computed with fixed orientation.
If loose is 1, it corresponds to free orientations.
The default value ('auto') is set to 0.2 for surface-oriented source
space and set to 1.0 for volumic or discrete source space.
depth: None | float in [0, 1]
Depth weighting coefficients. If None, no depth weighting is performed.
maxit : int
Maximum number of iterations.
tol : float
Tolerance parameter.
active_set_size : int | None
Size of active set increment. If None, no active set strategy is used.
pca : bool
If True the rank of the data is reduced to true dimension.
debias : bool
Remove coefficient amplitude bias due to L1 penalty.
time_pca : bool or int
If True the rank of the concatenated epochs is reduced to
its true dimension. If is 'int' the rank is limited to this value.
weights : None | array | SourceEstimate
Weight for penalty in mixed_norm. Can be None, a
1d array with shape (n_sources,), or a SourceEstimate (e.g. obtained
with wMNE, dSPM, or fMRI).
weights_min : float
Do not consider in the estimation sources for which weights
is less than weights_min.
solver : 'prox' | 'cd' | 'bcd' | 'auto'
The algorithm to use for the optimization. 'prox' stands for
proximal iterations using the FISTA algorithm, 'cd' uses
coordinate descent, and 'bcd' applies block coordinate descent.
'cd' is only available for fixed orientation.
n_mxne_iter : int
The number of MxNE iterations. If > 1, iterative reweighting
is applied.
return_residual : bool
If True, the residual is returned as an Evoked instance.
return_as_dipoles : bool
If True, the sources are returned as a list of Dipole instances.
dgap_freq : int or np.inf
The duality gap is evaluated every dgap_freq iterations. Ignored if
solver is 'cd'.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
stc : SourceEstimate | list of SourceEstimate
Source time courses for each evoked data passed as input.
residual : instance of Evoked
The residual a.k.a. data not explained by the sources.
Only returned if return_residual is True.
See Also
--------
tf_mixed_norm
References
----------
.. [1] A. Gramfort, M. Kowalski, M. Hamalainen,
"Mixed-norm estimates for the M/EEG inverse problem using accelerated
gradient methods", Physics in Medicine and Biology, 2012.
https://doi.org/10.1088/0031-9155/57/7/1937
.. [2] D. Strohmeier, Y. Bekhti, J. Haueisen, A. Gramfort,
"The Iterative Reweighted Mixed-Norm Estimate for Spatio-Temporal
MEG/EEG Source Reconstruction", IEEE Transactions of Medical Imaging,
Volume 35 (10), pp. 2218-2228, 2016.
"""
if not (0. <= alpha < 100.):
raise ValueError('alpha must be in [0, 100). '
'Got alpha = %s' % alpha)
if n_mxne_iter < 1:
raise ValueError('MxNE has to be computed at least 1 time. '
'Requires n_mxne_iter >= 1, got %d' % n_mxne_iter)
if dgap_freq <= 0.:
raise ValueError('dgap_freq must be a positive integer.'
' Got dgap_freq = %s' % dgap_freq)
if not isinstance(evoked, list):
evoked = [evoked]
_check_reference(evoked[0])
all_ch_names = evoked[0].ch_names
if not all(all_ch_names == evoked[i].ch_names
for i in range(1, len(evoked))):
raise Exception('All the datasets must have the same good channels.')
loose, forward = _check_loose_forward(loose, forward)
# put the forward solution in fixed orientation if it's not already
if loose == 0. and not is_fixed_orient(forward):
forward = convert_forward_solution(
forward, surf_ori=True, force_fixed=True, copy=True, use_cps=True)
gain, gain_info, whitener, source_weighting, mask = _prepare_gain(
forward, evoked[0].info, noise_cov, pca, depth, loose, weights,
weights_min)
sel = [all_ch_names.index(name) for name in gain_info['ch_names']]
M = np.concatenate([e.data[sel] for e in evoked], axis=1)
# Whiten data
logger.info('Whitening data matrix.')
M = np.dot(whitener, M)
if time_pca:
U, s, Vh = linalg.svd(M, full_matrices=False)
if not isinstance(time_pca, bool) and isinstance(time_pca, int):
U = U[:, :time_pca]
s = s[:time_pca]
Vh = Vh[:time_pca]
M = U * s
# Scaling to make setting of alpha easy
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
alpha_max = norm_l2inf(np.dot(gain.T, M), n_dip_per_pos, copy=False)
alpha_max *= 0.01
gain /= alpha_max
source_weighting /= alpha_max
if n_mxne_iter == 1:
X, active_set, E = mixed_norm_solver(
M, gain, alpha, maxit=maxit, tol=tol,
active_set_size=active_set_size, n_orient=n_dip_per_pos,
debias=debias, solver=solver, dgap_freq=dgap_freq, verbose=verbose)
else:
X, active_set, E = iterative_mixed_norm_solver(
M, gain, alpha, n_mxne_iter, maxit=maxit, tol=tol,
n_orient=n_dip_per_pos, active_set_size=active_set_size,
debias=debias, solver=solver, dgap_freq=dgap_freq, verbose=verbose)
if time_pca:
X = np.dot(X, Vh)
M = np.dot(M, Vh)
# Compute estimated whitened sensor data
M_estimated = np.dot(gain[:, active_set], X)
if mask is not None:
active_set_tmp = np.zeros(len(mask), dtype=np.bool)
active_set_tmp[mask] = active_set
active_set = active_set_tmp
del active_set_tmp
if active_set.sum() == 0:
raise Exception("No active dipoles found. alpha is too big.")
# Reapply weights to have correct unit
X = _reapply_source_weighting(X, source_weighting, active_set)
outs = list()
residual = list()
cnt = 0
for e in evoked:
tmin = e.times[0]
tstep = 1.0 / e.info['sfreq']
Xe = X[:, cnt:(cnt + len(e.times))]
if return_as_dipoles:
out = _make_dipoles_sparse(
Xe, active_set, forward, tmin, tstep,
M[:, cnt:(cnt + len(e.times))],
M_estimated[:, cnt:(cnt + len(e.times))], verbose=None)
else:
out = _make_sparse_stc(Xe, active_set, forward, tmin, tstep)
outs.append(out)
cnt += len(e.times)
if return_residual:
residual.append(_compute_residual(forward, e, Xe, active_set,
gain_info))
logger.info('[done]')
if len(outs) == 1:
out = outs[0]
if return_residual:
residual = residual[0]
else:
out = outs
if return_residual:
out = out, residual
return out
def _window_evoked(evoked, size):
"""Window evoked (size in seconds)."""
if isinstance(size, (float, int)):
lsize = rsize = float(size)
else:
lsize, rsize = size
evoked = evoked.copy()
sfreq = float(evoked.info['sfreq'])
lsize = int(lsize * sfreq)
rsize = int(rsize * sfreq)
lhann = signal.hann(lsize * 2)
rhann = signal.hann(rsize * 2)
window = np.r_[lhann[:lsize],
np.ones(len(evoked.times) - lsize - rsize),
rhann[-rsize:]]
evoked.data *= window[None, :]
return evoked
@verbose
def tf_mixed_norm(evoked, forward, noise_cov,
loose='auto', depth=0.8, maxit=3000,
tol=1e-4, weights=None, weights_min=None, pca=True,
debias=True, wsize=64, tstep=4, window=0.02,
return_residual=False, return_as_dipoles=False,
alpha=None, l1_ratio=None, dgap_freq=10, verbose=None):
"""Time-Frequency Mixed-norm estimate (TF-MxNE).
Compute L1/L2 + L1 mixed-norm solution on time-frequency
dictionary. Works with evoked data [1]_ [2]_.
Parameters
----------
evoked : instance of Evoked
Evoked data to invert.
forward : dict
Forward operator.
noise_cov : instance of Covariance
Noise covariance to compute whitener.
loose : float in [0, 1] | 'auto'
Value that weights the source variances of the dipole components
that are parallel (tangential) to the cortical surface. If loose
is 0 then the solution is computed with fixed orientation.
If loose is 1, it corresponds to free orientations.
The default value ('auto') is set to 0.2 for surface-oriented source
space and set to 1.0 for volumic or discrete source space.
depth: None | float in [0, 1]
Depth weighting coefficients. If None, no depth weighting is performed.
maxit : int
Maximum number of iterations.
tol : float
Tolerance parameter.
weights: None | array | SourceEstimate
Weight for penalty in mixed_norm. Can be None or
1d array of length n_sources or a SourceEstimate e.g. obtained
with wMNE or dSPM or fMRI.
weights_min: float
Do not consider in the estimation sources for which weights
is less than weights_min.
pca: bool
If True the rank of the data is reduced to true dimension.
debias: bool
Remove coefficient amplitude bias due to L1 penalty.
wsize: int or array-like
Length of the STFT window in samples (must be a multiple of 4).
If an array is passed, multiple TF dictionaries are used (each having
its own wsize and tstep) and each entry of wsize must be a multiple
of 4. See [3]_.
tstep: int or array-like
Step between successive windows in samples (must be a multiple of 2,
a divider of wsize and smaller than wsize/2) (default: wsize/2).
If an array is passed, multiple TF dictionaries are used (each having
its own wsize and tstep), and each entry of tstep must be a multiple
of 2 and divide the corresponding entry of wsize. See [3]_.
window : float or (float, float)
Length of time window used to take care of edge artifacts in seconds.
It can be one float or float if the values are different for left
and right window length.
return_residual : bool
If True, the residual is returned as an Evoked instance.
return_as_dipoles : bool
If True, the sources are returned as a list of Dipole instances.
alpha : float in [0, 100) or None
Overall regularization parameter.
If alpha and l1_ratio are not None, alpha_space and alpha_time are
overridden by alpha * alpha_max * (1. - l1_ratio) and alpha * alpha_max
* l1_ratio. 0 means no regularization, 100 would give 0 active dipole.
l1_ratio : float in [0, 1] or None
Proportion of temporal regularization.
If l1_ratio and alpha are not None, alpha_space and alpha_time are
overridden by alpha * alpha_max * (1. - l1_ratio) and alpha * alpha_max
* l1_ratio. 0 means no time regularization aka MxNE.
dgap_freq : int or np.inf
The duality gap is evaluated every dgap_freq iterations.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
stc : instance of SourceEstimate
Source time courses.
residual : instance of Evoked
The residual a.k.a. data not explained by the sources.
Only returned if return_residual is True.
See Also
--------
mixed_norm
References
----------
.. [1] A. Gramfort, D. Strohmeier, J. Haueisen, M. Hamalainen, M. Kowalski
"Time-Frequency Mixed-Norm Estimates: Sparse M/EEG imaging with
non-stationary source activations",
Neuroimage, Volume 70, pp. 410-422, 15 April 2013.
DOI: 10.1016/j.neuroimage.2012.12.051
.. [2] A. Gramfort, D. Strohmeier, J. Haueisen, M. Hamalainen, M. Kowalski
"Functional Brain Imaging with M/EEG Using Structured Sparsity in
Time-Frequency Dictionaries",
Proceedings Information Processing in Medical Imaging
Lecture Notes in Computer Science, Volume 6801/2011, pp. 600-611, 2011.
DOI: 10.1007/978-3-642-22092-0_49
.. [3] Y. Bekhti, D. Strohmeier, M. Jas, R. Badeau, A. Gramfort.
"M/EEG source localization with multiscale time-frequency dictionaries",
6th International Workshop on Pattern Recognition in Neuroimaging
(PRNI), 2016.
DOI: 10.1109/PRNI.2016.7552337
"""
_check_reference(evoked)
all_ch_names = evoked.ch_names
info = evoked.info
if not (0. <= alpha < 100.):
raise ValueError('alpha must be in [0, 100). '
'Got alpha = %s' % alpha)
if not (0. <= l1_ratio <= 1.):
raise ValueError('l1_ratio must be in range [0, 1].'
' Got l1_ratio = %s' % l1_ratio)
alpha_space = alpha * (1. - l1_ratio)
alpha_time = alpha * l1_ratio
if dgap_freq <= 0.:
raise ValueError('dgap_freq must be a positive integer.'
' Got dgap_freq = %s' % dgap_freq)
tstep = np.atleast_1d(tstep)
wsize = np.atleast_1d(wsize)
if len(tstep) != len(wsize):
raise ValueError('The same number of window sizes and steps must be '
'passed. Got tstep = %s and wsize = %s' %
(tstep, wsize))
loose, forward = _check_loose_forward(loose, forward)
# put the forward solution in fixed orientation if it's not already
if loose == 0. and not is_fixed_orient(forward):
forward = convert_forward_solution(
forward, surf_ori=True, force_fixed=True, copy=True, use_cps=True)
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
gain, gain_info, whitener, source_weighting, mask = _prepare_gain(
forward, evoked.info, noise_cov, pca, depth, loose, weights,
weights_min)
if window is not None:
evoked = _window_evoked(evoked, window)
sel = [all_ch_names.index(name) for name in gain_info["ch_names"]]
M = evoked.data[sel]
# Whiten data
logger.info('Whitening data matrix.')
M = np.dot(whitener, M)
# Scaling to make setting of alpha easy
n_steps = np.ceil(M.shape[1] / tstep.astype(float)).astype(int)
n_freqs = wsize // 2 + 1
n_coefs = n_steps * n_freqs
phi = _Phi(wsize, tstep, n_coefs)
alpha_max = norm_epsilon_inf(gain, M, phi, l1_ratio, n_dip_per_pos)
alpha_max *= 0.01
gain /= alpha_max
source_weighting /= alpha_max
X, active_set, E = tf_mixed_norm_solver(
M, gain, alpha_space, alpha_time, wsize=wsize, tstep=tstep,
maxit=maxit, tol=tol, verbose=verbose, n_orient=n_dip_per_pos,
dgap_freq=dgap_freq, debias=debias)
if active_set.sum() == 0:
raise Exception("No active dipoles found. "
"alpha_space/alpha_time are too big.")
# Compute estimated whitened sensor data
M_estimated = np.dot(gain[:, active_set], X)
if mask is not None:
active_set_tmp = np.zeros(len(mask), dtype=np.bool)
active_set_tmp[mask] = active_set
active_set = active_set_tmp
del active_set_tmp
X = _reapply_source_weighting(X, source_weighting, active_set)
if return_residual:
residual = _compute_residual(
forward, evoked, X, active_set, gain_info)
if return_as_dipoles:
out = _make_dipoles_sparse(
X, active_set, forward, evoked.times[0], 1.0 / info['sfreq'],
M, M_estimated, verbose=None)
else:
out = _make_sparse_stc(
X, active_set, forward, evoked.times[0], 1.0 / info['sfreq'])
logger.info('[done]')
if return_residual:
out = out, residual
return out
| {
"repo_name": "teonlamont/mne-python",
"path": "mne/inverse_sparse/mxne_inverse.py",
"copies": "3",
"size": "27459",
"license": "bsd-3-clause",
"hash": 7035603888333090000,
"line_mean": 37.0318559557,
"line_max": 79,
"alpha_frac": 0.6011872246,
"autogenerated": false,
"ratio": 3.470112473145457,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5571299697745458,
"avg_score": null,
"num_lines": null
} |
import pytest
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_allclose, assert_array_less)
from mne.inverse_sparse.mxne_optim import (mixed_norm_solver,
tf_mixed_norm_solver,
iterative_mixed_norm_solver,
norm_epsilon_inf, norm_epsilon,
_Phi, _PhiT, dgap_l21l1)
from mne.time_frequency.stft import stft_norm2
def _generate_tf_data():
n, p, t = 30, 40, 64
rng = np.random.RandomState(0)
G = rng.randn(n, p)
G /= np.std(G, axis=0)[None, :]
X = np.zeros((p, t))
active_set = [0, 4]
times = np.linspace(0, 2 * np.pi, t)
X[0] = np.sin(times)
X[4] = -2 * np.sin(4 * times)
X[4, times <= np.pi / 2] = 0
X[4, times >= np.pi] = 0
M = np.dot(G, X)
M += 1 * rng.randn(*M.shape)
return M, G, active_set
def test_l21_mxne():
"""Test convergence of MxNE solver."""
n, p, t, alpha = 30, 40, 20, 1.
rng = np.random.RandomState(0)
G = rng.randn(n, p)
G /= np.std(G, axis=0)[None, :]
X = np.zeros((p, t))
X[0] = 3
X[4] = -2
M = np.dot(G, X)
args = (M, G, alpha, 1000, 1e-8)
with pytest.warns(None): # CD
X_hat_prox, active_set, _ = mixed_norm_solver(
*args, active_set_size=None,
debias=True, solver='prox')
assert_array_equal(np.where(active_set)[0], [0, 4])
with pytest.warns(None): # CD
X_hat_cd, active_set, _, gap_cd = mixed_norm_solver(
*args, active_set_size=None,
debias=True, solver='cd', return_gap=True)
assert_array_less(gap_cd, 1e-8)
assert_array_equal(np.where(active_set)[0], [0, 4])
with pytest.warns(None): # CD
X_hat_bcd, active_set, E, gap_bcd = mixed_norm_solver(
M, G, alpha, maxit=1000, tol=1e-8, active_set_size=None,
debias=True, solver='bcd', return_gap=True)
assert_array_less(gap_bcd, 9.6e-9)
assert_array_equal(np.where(active_set)[0], [0, 4])
assert_allclose(X_hat_prox, X_hat_cd, rtol=1e-2)
assert_allclose(X_hat_prox, X_hat_bcd, rtol=1e-2)
assert_allclose(X_hat_bcd, X_hat_cd, rtol=1e-2)
with pytest.warns(None): # CD
X_hat_prox, active_set, _ = mixed_norm_solver(
*args, active_set_size=2, debias=True, solver='prox')
assert_array_equal(np.where(active_set)[0], [0, 4])
with pytest.warns(None): # CD
X_hat_cd, active_set, _ = mixed_norm_solver(
*args, active_set_size=2, debias=True, solver='cd')
assert_array_equal(np.where(active_set)[0], [0, 4])
with pytest.warns(None): # CD
X_hat_bcd, active_set, _ = mixed_norm_solver(
*args, active_set_size=2, debias=True, solver='bcd')
assert_array_equal(np.where(active_set)[0], [0, 4])
assert_allclose(X_hat_bcd, X_hat_cd, rtol=1e-2)
assert_allclose(X_hat_bcd, X_hat_prox, rtol=1e-2)
with pytest.warns(None): # CD
X_hat_prox, active_set, _ = mixed_norm_solver(
*args, active_set_size=2, debias=True, n_orient=2, solver='prox')
assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5])
with pytest.warns(None): # CD
X_hat_bcd, active_set, _ = mixed_norm_solver(
*args, active_set_size=2, debias=True, n_orient=2, solver='bcd')
assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5])
# suppress a coordinate-descent warning here
with pytest.warns(RuntimeWarning, match='descent'):
X_hat_cd, active_set, _ = mixed_norm_solver(
*args, active_set_size=2, debias=True, n_orient=2, solver='cd')
assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5])
assert_allclose(X_hat_bcd, X_hat_prox, rtol=1e-2)
assert_allclose(X_hat_bcd, X_hat_cd, rtol=1e-2)
with pytest.warns(None): # CD
X_hat_bcd, active_set, _ = mixed_norm_solver(
*args, active_set_size=2, debias=True, n_orient=5, solver='bcd')
assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4])
with pytest.warns(None): # CD
X_hat_prox, active_set, _ = mixed_norm_solver(
*args, active_set_size=2, debias=True, n_orient=5, solver='prox')
assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4])
with pytest.warns(RuntimeWarning, match='descent'):
X_hat_cd, active_set, _ = mixed_norm_solver(
*args, active_set_size=2, debias=True, n_orient=5, solver='cd')
assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4])
assert_array_equal(X_hat_bcd, X_hat_cd)
assert_allclose(X_hat_bcd, X_hat_prox, rtol=1e-2)
def test_tf_mxne():
"""Test convergence of TF-MxNE solver."""
alpha_space = 10.
alpha_time = 5.
M, G, active_set = _generate_tf_data()
with pytest.warns(None): # CD
X_hat_tf, active_set_hat_tf, E, gap_tfmxne = tf_mixed_norm_solver(
M, G, alpha_space, alpha_time, maxit=200, tol=1e-8, verbose=True,
n_orient=1, tstep=4, wsize=32, return_gap=True)
assert_array_less(gap_tfmxne, 1e-8)
assert_array_equal(np.where(active_set_hat_tf)[0], active_set)
def test_norm_epsilon():
"""Test computation of espilon norm on TF coefficients."""
tstep = np.array([2])
wsize = np.array([4])
n_times = 10
n_steps = np.ceil(n_times / tstep.astype(float)).astype(int)
n_freqs = wsize // 2 + 1
n_coefs = n_steps * n_freqs
phi = _Phi(wsize, tstep, n_coefs)
Y = np.zeros(n_steps * n_freqs)
l1_ratio = 0.5
assert_allclose(norm_epsilon(Y, l1_ratio, phi), 0.)
Y[0] = 2.
assert_allclose(norm_epsilon(Y, l1_ratio, phi), np.max(Y))
l1_ratio = 1.
assert_allclose(norm_epsilon(Y, l1_ratio, phi), np.max(Y))
# dummy value without random:
Y = np.arange(n_steps * n_freqs).reshape(-1, )
l1_ratio = 0.
assert_allclose(norm_epsilon(Y, l1_ratio, phi) ** 2,
stft_norm2(Y.reshape(-1, n_freqs[0], n_steps[0])))
def test_dgapl21l1():
"""Test duality gap for L21 + L1 regularization."""
n_orient = 2
M, G, active_set = _generate_tf_data()
n_times = M.shape[1]
n_sources = G.shape[1]
tstep, wsize = np.array([4, 2]), np.array([64, 16])
n_steps = np.ceil(n_times / tstep.astype(float)).astype(int)
n_freqs = wsize // 2 + 1
n_coefs = n_steps * n_freqs
phi = _Phi(wsize, tstep, n_coefs)
phiT = _PhiT(tstep, n_freqs, n_steps, n_times)
for l1_ratio in [0.05, 0.1]:
alpha_max = norm_epsilon_inf(G, M, phi, l1_ratio, n_orient)
alpha_space = (1. - l1_ratio) * alpha_max
alpha_time = l1_ratio * alpha_max
Z = np.zeros([n_sources, phi.n_coefs.sum()])
# for alpha = alpha_max, Z = 0 is the solution so the dgap is 0
gap = dgap_l21l1(M, G, Z, np.ones(n_sources, dtype=bool),
alpha_space, alpha_time, phi, phiT,
n_orient, -np.inf)[0]
assert_allclose(0., gap)
# check that solution for alpha smaller than alpha_max is non 0:
X_hat_tf, active_set_hat_tf, E, gap = tf_mixed_norm_solver(
M, G, alpha_space / 1.01, alpha_time / 1.01, maxit=200, tol=1e-8,
verbose=True, debias=False, n_orient=n_orient, tstep=tstep,
wsize=wsize, return_gap=True)
# allow possible small numerical errors (negative gap)
assert_array_less(-1e-10, gap)
assert_array_less(gap, 1e-8)
assert_array_less(1, len(active_set_hat_tf))
X_hat_tf, active_set_hat_tf, E, gap = tf_mixed_norm_solver(
M, G, alpha_space / 5., alpha_time / 5., maxit=200, tol=1e-8,
verbose=True, debias=False, n_orient=n_orient, tstep=tstep,
wsize=wsize, return_gap=True)
assert_array_less(-1e-10, gap)
assert_array_less(gap, 1e-8)
assert_array_less(1, len(active_set_hat_tf))
def test_tf_mxne_vs_mxne():
"""Test equivalence of TF-MxNE (with alpha_time=0) and MxNE."""
alpha_space = 60.
alpha_time = 0.
M, G, active_set = _generate_tf_data()
X_hat_tf, active_set_hat_tf, E = tf_mixed_norm_solver(
M, G, alpha_space, alpha_time, maxit=200, tol=1e-8,
verbose=True, debias=False, n_orient=1, tstep=4, wsize=32)
# Also run L21 and check that we get the same
X_hat_l21, _, _ = mixed_norm_solver(
M, G, alpha_space, maxit=200, tol=1e-8, verbose=False, n_orient=1,
active_set_size=None, debias=False)
assert_allclose(X_hat_tf, X_hat_l21, rtol=1e-1)
def test_iterative_reweighted_mxne():
"""Test convergence of irMxNE solver."""
n, p, t, alpha = 30, 40, 20, 1
rng = np.random.RandomState(0)
G = rng.randn(n, p)
G /= np.std(G, axis=0)[None, :]
X = np.zeros((p, t))
X[0] = 3
X[4] = -2
M = np.dot(G, X)
with pytest.warns(None): # CD
X_hat_l21, _, _ = mixed_norm_solver(
M, G, alpha, maxit=1000, tol=1e-8, verbose=False, n_orient=1,
active_set_size=None, debias=False, solver='bcd')
with pytest.warns(None): # CD
X_hat_bcd, active_set, _ = iterative_mixed_norm_solver(
M, G, alpha, 1, maxit=1000, tol=1e-8, active_set_size=None,
debias=False, solver='bcd')
with pytest.warns(None): # CD
X_hat_prox, active_set, _ = iterative_mixed_norm_solver(
M, G, alpha, 1, maxit=1000, tol=1e-8, active_set_size=None,
debias=False, solver='prox')
assert_allclose(X_hat_bcd, X_hat_l21, rtol=1e-3)
assert_allclose(X_hat_prox, X_hat_l21, rtol=1e-3)
with pytest.warns(None): # CD
X_hat_prox, active_set, _ = iterative_mixed_norm_solver(
M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=None,
debias=True, solver='prox')
assert_array_equal(np.where(active_set)[0], [0, 4])
with pytest.warns(None): # CD
X_hat_bcd, active_set, _ = iterative_mixed_norm_solver(
M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2,
debias=True, solver='bcd')
assert_array_equal(np.where(active_set)[0], [0, 4])
with pytest.warns(None): # CD
X_hat_cd, active_set, _ = iterative_mixed_norm_solver(
M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=None,
debias=True, solver='cd')
assert_array_equal(np.where(active_set)[0], [0, 4])
assert_array_almost_equal(X_hat_prox, X_hat_cd, 5)
assert_array_almost_equal(X_hat_bcd, X_hat_cd, 5)
with pytest.warns(None): # CD
X_hat_bcd, active_set, _ = iterative_mixed_norm_solver(
M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2,
debias=True, n_orient=2, solver='bcd')
assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5])
# suppress a coordinate-descent warning here
with pytest.warns(RuntimeWarning, match='descent'):
X_hat_cd, active_set, _ = iterative_mixed_norm_solver(
M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2,
debias=True, n_orient=2, solver='cd')
assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5])
assert_array_equal(X_hat_bcd, X_hat_cd, 5)
X_hat_bcd, active_set, _ = iterative_mixed_norm_solver(
M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2, debias=True,
n_orient=5)
assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4])
with pytest.warns(RuntimeWarning, match='descent'):
X_hat_cd, active_set, _ = iterative_mixed_norm_solver(
M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2,
debias=True, n_orient=5, solver='cd')
assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4])
assert_array_equal(X_hat_bcd, X_hat_cd, 5)
| {
"repo_name": "teonlamont/mne-python",
"path": "mne/inverse_sparse/tests/test_mxne_optim.py",
"copies": "3",
"size": "11932",
"license": "bsd-3-clause",
"hash": -8632989572145968000,
"line_mean": 40.574912892,
"line_max": 77,
"alpha_frac": 0.583221589,
"autogenerated": false,
"ratio": 2.7537502884837295,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.483697187748373,
"avg_score": null,
"num_lines": null
} |
import os.path as op
import copy
import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_true
from mne.datasets import sample
from mne.label import read_label
from mne import read_cov, read_forward_solution, read_evokeds
from mne.inverse_sparse import mixed_norm, tf_mixed_norm
from mne.minimum_norm import apply_inverse, make_inverse_operator
data_path = sample.data_path(download=False)
fname_data = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis-cov.fif')
fname_fwd = op.join(data_path, 'MEG', 'sample',
'sample_audvis-meg-oct-6-fwd.fif')
label = 'Aud-rh'
fname_label = op.join(data_path, 'MEG', 'sample', 'labels', '%s.label' % label)
@sample.requires_sample_data
def test_mxne_inverse():
"""Test (TF-)MxNE inverse computation"""
# Handling forward solution
evoked = read_evokeds(fname_data, condition=1, baseline=(None, 0))
# Read noise covariance matrix
cov = read_cov(fname_cov)
# Handling average file
loose = None
depth = 0.9
evoked = read_evokeds(fname_data, condition=0, baseline=(None, 0))
evoked.crop(tmin=-0.1, tmax=0.4)
evoked_l21 = copy.deepcopy(evoked)
evoked_l21.crop(tmin=0.08, tmax=0.1)
label = read_label(fname_label)
weights_min = 0.5
forward = read_forward_solution(fname_fwd, force_fixed=False,
surf_ori=True)
# Reduce source space to make test computation faster
inverse_operator = make_inverse_operator(evoked.info, forward, cov,
loose=loose, depth=depth,
fixed=True)
stc_dspm = apply_inverse(evoked_l21, inverse_operator, lambda2=1. / 9.,
method='dSPM')
stc_dspm.data[np.abs(stc_dspm.data) < 12] = 0.0
stc_dspm.data[np.abs(stc_dspm.data) >= 12] = 1.
# MxNE tests
alpha = 60 # spatial regularization parameter
stc_prox = mixed_norm(evoked_l21, forward, cov, alpha, loose=None,
depth=0.9, maxit=1000, tol=1e-8, active_set_size=10,
solver='prox')
stc_cd = mixed_norm(evoked_l21, forward, cov, alpha, loose=None,
depth=0.9, maxit=1000, tol=1e-8, active_set_size=10,
solver='cd')
assert_array_almost_equal(stc_prox.times, evoked_l21.times, 5)
assert_array_almost_equal(stc_cd.times, evoked_l21.times, 5)
assert_array_almost_equal(stc_prox.data, stc_cd.data, 5)
assert_true(stc_prox.vertno[1][0] in label.vertices)
assert_true(stc_cd.vertno[1][0] in label.vertices)
stc, _ = mixed_norm(evoked_l21, forward, cov, alpha, loose=None,
depth=depth, maxit=500, tol=1e-4, active_set_size=10,
weights=stc_dspm, weights_min=weights_min,
return_residual=True)
assert_array_almost_equal(stc.times, evoked_l21.times, 5)
assert_true(stc.vertno[1][0] in label.vertices)
# Do with TF-MxNE for test memory savings
alpha_space = 60. # spatial regularization parameter
alpha_time = 1. # temporal regularization parameter
stc, _ = tf_mixed_norm(evoked, forward, cov, alpha_space, alpha_time,
loose=loose, depth=depth, maxit=100, tol=1e-4,
tstep=4, wsize=16, window=0.1, weights=stc_dspm,
weights_min=weights_min, return_residual=True)
assert_array_almost_equal(stc.times, evoked.times, 5)
assert_true(stc.vertno[1][0] in label.vertices)
| {
"repo_name": "jaeilepp/eggie",
"path": "mne/inverse_sparse/tests/test_mxne_inverse.py",
"copies": "2",
"size": "3823",
"license": "bsd-2-clause",
"hash": 1998038223554223000,
"line_mean": 40.1075268817,
"line_max": 79,
"alpha_frac": 0.6269945069,
"autogenerated": false,
"ratio": 3.136177194421657,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47631717013216573,
"avg_score": null,
"num_lines": null
} |
import os.path as op
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_allclose
from nose.tools import assert_true, assert_equal
from mne.datasets import testing
from mne.label import read_label
from mne import read_cov, read_forward_solution, read_evokeds
from mne.inverse_sparse import mixed_norm, tf_mixed_norm
from mne.inverse_sparse.mxne_inverse import make_stc_from_dipoles
from mne.minimum_norm import apply_inverse, make_inverse_operator
from mne.utils import run_tests_if_main, slow_test
from mne.dipole import Dipole
data_path = testing.data_path(download=False)
# NOTE: These use the ave and cov from sample dataset (no _trunc)
fname_data = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis-cov.fif')
fname_fwd = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
label = 'Aud-rh'
fname_label = op.join(data_path, 'MEG', 'sample', 'labels', '%s.label' % label)
def _check_stcs(stc1, stc2):
"""Helper to check correctness"""
assert_allclose(stc1.times, stc2.times)
assert_allclose(stc1.data, stc2.data)
assert_allclose(stc1.vertices[0], stc2.vertices[0])
assert_allclose(stc1.vertices[1], stc2.vertices[1])
assert_allclose(stc1.tmin, stc2.tmin)
assert_allclose(stc1.tstep, stc2.tstep)
@slow_test
@testing.requires_testing_data
def test_mxne_inverse():
"""Test (TF-)MxNE inverse computation"""
# Read noise covariance matrix
cov = read_cov(fname_cov)
# Handling average file
loose = None
depth = 0.9
evoked = read_evokeds(fname_data, condition=0, baseline=(None, 0))
evoked.crop(tmin=-0.05, tmax=0.2)
evoked_l21 = evoked.copy()
evoked_l21.crop(tmin=0.081, tmax=0.1)
label = read_label(fname_label)
forward = read_forward_solution(fname_fwd, force_fixed=False,
surf_ori=True)
# Reduce source space to make test computation faster
inverse_operator = make_inverse_operator(evoked_l21.info, forward, cov,
loose=loose, depth=depth,
fixed=True)
stc_dspm = apply_inverse(evoked_l21, inverse_operator, lambda2=1. / 9.,
method='dSPM')
stc_dspm.data[np.abs(stc_dspm.data) < 12] = 0.0
stc_dspm.data[np.abs(stc_dspm.data) >= 12] = 1.
weights_min = 0.5
# MxNE tests
alpha = 70 # spatial regularization parameter
stc_prox = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
depth=depth, maxit=300, tol=1e-8,
active_set_size=10, weights=stc_dspm,
weights_min=weights_min, solver='prox')
stc_cd = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
depth=depth, maxit=300, tol=1e-8, active_set_size=10,
weights=stc_dspm, weights_min=weights_min,
solver='cd')
stc_bcd = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
depth=depth, maxit=300, tol=1e-8, active_set_size=10,
weights=stc_dspm, weights_min=weights_min,
solver='bcd')
assert_array_almost_equal(stc_prox.times, evoked_l21.times, 5)
assert_array_almost_equal(stc_cd.times, evoked_l21.times, 5)
assert_array_almost_equal(stc_bcd.times, evoked_l21.times, 5)
assert_allclose(stc_prox.data, stc_cd.data, rtol=1e-3, atol=0.0)
assert_allclose(stc_prox.data, stc_bcd.data, rtol=1e-3, atol=0.0)
assert_allclose(stc_cd.data, stc_bcd.data, rtol=1e-3, atol=0.0)
assert_true(stc_prox.vertices[1][0] in label.vertices)
assert_true(stc_cd.vertices[1][0] in label.vertices)
assert_true(stc_bcd.vertices[1][0] in label.vertices)
dips = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
depth=depth, maxit=300, tol=1e-8, active_set_size=10,
weights=stc_dspm, weights_min=weights_min,
solver='cd', return_as_dipoles=True)
stc_dip = make_stc_from_dipoles(dips, forward['src'])
assert_true(isinstance(dips[0], Dipole))
_check_stcs(stc_cd, stc_dip)
stc, _ = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
depth=depth, maxit=300, tol=1e-8,
active_set_size=10, return_residual=True,
solver='cd')
assert_array_almost_equal(stc.times, evoked_l21.times, 5)
assert_true(stc.vertices[1][0] in label.vertices)
# irMxNE tests
stc = mixed_norm(evoked_l21, forward, cov, alpha,
n_mxne_iter=5, loose=loose, depth=depth,
maxit=300, tol=1e-8, active_set_size=10,
solver='cd')
assert_array_almost_equal(stc.times, evoked_l21.times, 5)
assert_true(stc.vertices[1][0] in label.vertices)
assert_equal(stc.vertices, [[63152], [79017]])
# Do with TF-MxNE for test memory savings
alpha_space = 60. # spatial regularization parameter
alpha_time = 1. # temporal regularization parameter
stc, _ = tf_mixed_norm(evoked, forward, cov, alpha_space, alpha_time,
loose=loose, depth=depth, maxit=100, tol=1e-4,
tstep=4, wsize=16, window=0.1, weights=stc_dspm,
weights_min=weights_min, return_residual=True)
assert_array_almost_equal(stc.times, evoked.times, 5)
assert_true(stc.vertices[1][0] in label.vertices)
run_tests_if_main()
| {
"repo_name": "jaeilepp/mne-python",
"path": "mne/inverse_sparse/tests/test_mxne_inverse.py",
"copies": "1",
"size": "5797",
"license": "bsd-3-clause",
"hash": 6430297442701142000,
"line_mean": 42.9166666667,
"line_max": 79,
"alpha_frac": 0.6256684492,
"autogenerated": false,
"ratio": 3.0720720720720722,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9195846581878133,
"avg_score": 0.0003787878787878788,
"num_lines": 132
} |
from copy import deepcopy
import os.path as op
import pickle
import numpy as np
from scipy import fftpack
from numpy.testing import (assert_array_almost_equal, assert_equal,
assert_array_equal, assert_allclose)
import pytest
from mne import (equalize_channels, pick_types, read_evokeds, write_evokeds,
grand_average, combine_evoked, create_info, read_events,
Epochs, EpochsArray)
from mne.evoked import _get_peak, Evoked, EvokedArray
from mne.io import read_raw_fif
from mne.io.constants import FIFF
from mne.utils import (_TempDir, requires_pandas, requires_version,
run_tests_if_main)
base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
fname = op.join(base_dir, 'test-ave.fif')
fname_gz = op.join(base_dir, 'test-ave.fif.gz')
raw_fname = op.join(base_dir, 'test_raw.fif')
event_name = op.join(base_dir, 'test-eve.fif')
def test_decim():
"""Test evoked decimation."""
rng = np.random.RandomState(0)
n_epochs, n_channels, n_times = 5, 10, 20
dec_1, dec_2 = 2, 3
decim = dec_1 * dec_2
sfreq = 1000.
sfreq_new = sfreq / decim
data = rng.randn(n_epochs, n_channels, n_times)
events = np.array([np.arange(n_epochs), [0] * n_epochs, [1] * n_epochs]).T
info = create_info(n_channels, sfreq, 'eeg')
info['lowpass'] = sfreq_new / float(decim)
epochs = EpochsArray(data, info, events)
data_epochs = epochs.copy().decimate(decim).get_data()
data_epochs_2 = epochs.copy().decimate(decim, offset=1).get_data()
data_epochs_3 = epochs.decimate(dec_1).decimate(dec_2).get_data()
assert_array_equal(data_epochs, data[:, :, ::decim])
assert_array_equal(data_epochs_2, data[:, :, 1::decim])
assert_array_equal(data_epochs, data_epochs_3)
# Now let's do it with some real data
raw = read_raw_fif(raw_fname)
events = read_events(event_name)
sfreq_new = raw.info['sfreq'] / decim
raw.info['lowpass'] = sfreq_new / 4. # suppress aliasing warnings
picks = pick_types(raw.info, meg=True, eeg=True, exclude=())
epochs = Epochs(raw, events, 1, -0.2, 0.5, picks=picks, preload=True)
for offset in (0, 1):
ev_ep_decim = epochs.copy().decimate(decim, offset).average()
ev_decim = epochs.average().decimate(decim, offset)
expected_times = epochs.times[offset::decim]
assert_allclose(ev_decim.times, expected_times)
assert_allclose(ev_ep_decim.times, expected_times)
expected_data = epochs.get_data()[:, :, offset::decim].mean(axis=0)
assert_allclose(ev_decim.data, expected_data)
assert_allclose(ev_ep_decim.data, expected_data)
assert_equal(ev_decim.info['sfreq'], sfreq_new)
assert_array_equal(ev_decim.times, expected_times)
@requires_version('scipy', '0.14')
def test_savgol_filter():
"""Test savgol filtering."""
h_freq = 10.
evoked = read_evokeds(fname, 0)
freqs = fftpack.fftfreq(len(evoked.times), 1. / evoked.info['sfreq'])
data = np.abs(fftpack.fft(evoked.data))
match_mask = np.logical_and(freqs >= 0, freqs <= h_freq / 2.)
mismatch_mask = np.logical_and(freqs >= h_freq * 2, freqs < 50.)
pytest.raises(ValueError, evoked.savgol_filter, evoked.info['sfreq'])
evoked_sg = evoked.copy().savgol_filter(h_freq)
data_filt = np.abs(fftpack.fft(evoked_sg.data))
# decent in pass-band
assert_allclose(np.mean(data[:, match_mask], 0),
np.mean(data_filt[:, match_mask], 0),
rtol=1e-4, atol=1e-2)
# suppression in stop-band
assert (np.mean(data[:, mismatch_mask]) >
np.mean(data_filt[:, mismatch_mask]) * 5)
# original preserved
assert_allclose(data, np.abs(fftpack.fft(evoked.data)), atol=1e-16)
def test_hash_evoked():
"""Test evoked hashing."""
ave = read_evokeds(fname, 0)
ave_2 = read_evokeds(fname, 0)
assert hash(ave) == hash(ave_2)
assert ave == ave_2
# do NOT use assert_equal here, failing output is terrible
assert pickle.dumps(ave) == pickle.dumps(ave_2)
ave_2.data[0, 0] -= 1
assert hash(ave) != hash(ave_2)
def _aspect_kinds():
"""Yield evoked aspect kinds."""
kinds = list()
for key in FIFF:
if not key.startswith('FIFFV_ASPECT_'):
continue
kinds.append(getattr(FIFF, str(key)))
return kinds
@pytest.mark.parametrize('aspect_kind', _aspect_kinds())
def test_evoked_aspects(aspect_kind, tmpdir):
"""Test handling of evoked aspects."""
# gh-6359
ave = read_evokeds(fname, 0)
ave._aspect_kind = aspect_kind
assert 'Evoked' in repr(ave)
# for completeness let's try a round-trip
temp_fname = op.join(str(tmpdir), 'test-ave.fif')
ave.save(temp_fname)
ave_2 = read_evokeds(temp_fname, condition=0)
assert_allclose(ave.data, ave_2.data)
assert ave.kind == ave_2.kind
@pytest.mark.slowtest
def test_io_evoked():
"""Test IO for evoked data (fif + gz) with integer and str args."""
tempdir = _TempDir()
ave = read_evokeds(fname, 0)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
ave2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0]
# This not being assert_array_equal due to windows rounding
assert (np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-3))
assert_array_almost_equal(ave.times, ave2.times)
assert_equal(ave.nave, ave2.nave)
assert_equal(ave._aspect_kind, ave2._aspect_kind)
assert_equal(ave.kind, ave2.kind)
assert_equal(ave.last, ave2.last)
assert_equal(ave.first, ave2.first)
assert (repr(ave))
# test compressed i/o
ave2 = read_evokeds(fname_gz, 0)
assert (np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-8))
# test str access
condition = 'Left Auditory'
pytest.raises(ValueError, read_evokeds, fname, condition, kind='stderr')
pytest.raises(ValueError, read_evokeds, fname, condition,
kind='standard_error')
ave3 = read_evokeds(fname, condition)
assert_array_almost_equal(ave.data, ave3.data, 19)
# test read_evokeds and write_evokeds
aves1 = read_evokeds(fname)[1::2]
aves2 = read_evokeds(fname, [1, 3])
aves3 = read_evokeds(fname, ['Right Auditory', 'Right visual'])
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), aves1)
aves4 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))
for aves in [aves2, aves3, aves4]:
for [av1, av2] in zip(aves1, aves):
assert_array_almost_equal(av1.data, av2.data)
assert_array_almost_equal(av1.times, av2.times)
assert_equal(av1.nave, av2.nave)
assert_equal(av1.kind, av2.kind)
assert_equal(av1._aspect_kind, av2._aspect_kind)
assert_equal(av1.last, av2.last)
assert_equal(av1.first, av2.first)
assert_equal(av1.comment, av2.comment)
# test warnings on bad filenames
fname2 = op.join(tempdir, 'test-bad-name.fif')
with pytest.warns(RuntimeWarning, match='-ave.fif'):
write_evokeds(fname2, ave)
with pytest.warns(RuntimeWarning, match='-ave.fif'):
read_evokeds(fname2)
# constructor
pytest.raises(TypeError, Evoked, fname)
# MaxShield
fname_ms = op.join(tempdir, 'test-ave.fif')
assert (ave.info['maxshield'] is False)
ave.info['maxshield'] = True
ave.save(fname_ms)
pytest.raises(ValueError, read_evokeds, fname_ms)
with pytest.warns(RuntimeWarning, match='Elekta'):
aves = read_evokeds(fname_ms, allow_maxshield=True)
assert all(ave.info['maxshield'] is True for ave in aves)
aves = read_evokeds(fname_ms, allow_maxshield='yes')
assert (all(ave.info['maxshield'] is True for ave in aves))
def test_shift_time_evoked():
"""Test for shifting of time scale."""
tempdir = _TempDir()
# Shift backward
ave = read_evokeds(fname, 0)
ave.shift_time(-0.1, relative=True)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
# Shift forward twice the amount
ave_bshift = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
ave_bshift.shift_time(0.2, relative=True)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave_bshift)
# Shift backward again
ave_fshift = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
ave_fshift.shift_time(-0.1, relative=True)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave_fshift)
ave_normal = read_evokeds(fname, 0)
ave_relative = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
assert_allclose(ave_normal.data, ave_relative.data, atol=1e-16, rtol=1e-3)
assert_array_almost_equal(ave_normal.times, ave_relative.times, 10)
assert_equal(ave_normal.last, ave_relative.last)
assert_equal(ave_normal.first, ave_relative.first)
# Absolute time shift
ave = read_evokeds(fname, 0)
ave.shift_time(-0.3, relative=False)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
ave_absolute = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
assert_allclose(ave_normal.data, ave_absolute.data, atol=1e-16, rtol=1e-3)
assert_equal(ave_absolute.first, int(-0.3 * ave.info['sfreq']))
def test_evoked_resample():
"""Test resampling evoked data."""
tempdir = _TempDir()
# upsample, write it out, read it in
ave = read_evokeds(fname, 0)
sfreq_normal = ave.info['sfreq']
ave.resample(2 * sfreq_normal, npad=100)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
ave_up = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
# compare it to the original
ave_normal = read_evokeds(fname, 0)
# and compare the original to the downsampled upsampled version
ave_new = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
ave_new.resample(sfreq_normal, npad=100)
assert_array_almost_equal(ave_normal.data, ave_new.data, 2)
assert_array_almost_equal(ave_normal.times, ave_new.times)
assert_equal(ave_normal.nave, ave_new.nave)
assert_equal(ave_normal._aspect_kind, ave_new._aspect_kind)
assert_equal(ave_normal.kind, ave_new.kind)
assert_equal(ave_normal.last, ave_new.last)
assert_equal(ave_normal.first, ave_new.first)
# for the above to work, the upsampling just about had to, but
# we'll add a couple extra checks anyway
assert (len(ave_up.times) == 2 * len(ave_normal.times))
assert (ave_up.data.shape[1] == 2 * ave_normal.data.shape[1])
def test_evoked_filter():
"""Test filtering evoked data."""
# this is mostly a smoke test as the Epochs and raw tests are more complete
ave = read_evokeds(fname, 0).pick_types('grad')
ave.data[:] = 1.
assert round(ave.info['lowpass']) == 172
ave_filt = ave.copy().filter(None, 40., fir_design='firwin')
assert ave_filt.info['lowpass'] == 40.
assert_allclose(ave.data, 1., atol=1e-6)
def test_evoked_detrend():
"""Test for detrending evoked data."""
ave = read_evokeds(fname, 0)
ave_normal = read_evokeds(fname, 0)
ave.detrend(0)
ave_normal.data -= np.mean(ave_normal.data, axis=1)[:, np.newaxis]
picks = pick_types(ave.info, meg=True, eeg=True, exclude='bads')
assert_allclose(ave.data[picks], ave_normal.data[picks],
rtol=1e-8, atol=1e-16)
@requires_pandas
def test_to_data_frame():
"""Test evoked Pandas exporter."""
ave = read_evokeds(fname, 0)
pytest.raises(ValueError, ave.to_data_frame, picks=np.arange(400))
df = ave.to_data_frame()
assert ((df.columns == ave.ch_names).all())
df = ave.to_data_frame(index=None).reset_index()
assert ('time' in df.columns)
assert_array_equal(df.values[:, 1], ave.data[0] * 1e13)
assert_array_equal(df.values[:, 3], ave.data[2] * 1e15)
df = ave.to_data_frame(long_format=True)
assert(len(df) == ave.data.size)
assert("time" in df.columns)
assert("channel" in df.columns)
assert("ch_type" in df.columns)
assert("observation" in df.columns)
def test_evoked_proj():
"""Test SSP proj operations."""
for proj in [True, False]:
ave = read_evokeds(fname, condition=0, proj=proj)
assert (all(p['active'] == proj for p in ave.info['projs']))
# test adding / deleting proj
if proj:
pytest.raises(ValueError, ave.add_proj, [],
{'remove_existing': True})
pytest.raises(ValueError, ave.del_proj, 0)
else:
projs = deepcopy(ave.info['projs'])
n_proj = len(ave.info['projs'])
ave.del_proj(0)
assert (len(ave.info['projs']) == n_proj - 1)
# Test that already existing projections are not added.
ave.add_proj(projs, remove_existing=False)
assert (len(ave.info['projs']) == n_proj)
ave.add_proj(projs[:-1], remove_existing=True)
assert (len(ave.info['projs']) == n_proj - 1)
ave = read_evokeds(fname, condition=0, proj=False)
data = ave.data.copy()
ave.apply_proj()
assert_allclose(np.dot(ave._projector, data), ave.data)
def test_get_peak():
"""Test peak getter."""
evoked = read_evokeds(fname, condition=0, proj=True)
pytest.raises(ValueError, evoked.get_peak, ch_type='mag', tmin=1)
pytest.raises(ValueError, evoked.get_peak, ch_type='mag', tmax=0.9)
pytest.raises(ValueError, evoked.get_peak, ch_type='mag', tmin=0.02,
tmax=0.01)
pytest.raises(ValueError, evoked.get_peak, ch_type='mag', mode='foo')
pytest.raises(RuntimeError, evoked.get_peak, ch_type=None, mode='foo')
pytest.raises(ValueError, evoked.get_peak, ch_type='misc', mode='foo')
ch_name, time_idx = evoked.get_peak(ch_type='mag')
assert (ch_name in evoked.ch_names)
assert (time_idx in evoked.times)
ch_name, time_idx, max_amp = evoked.get_peak(ch_type='mag',
time_as_index=True,
return_amplitude=True)
assert (time_idx < len(evoked.times))
assert_equal(ch_name, 'MEG 1421')
assert_allclose(max_amp, 7.17057e-13, rtol=1e-5)
pytest.raises(ValueError, evoked.get_peak, ch_type='mag', merge_grads=True)
ch_name, time_idx = evoked.get_peak(ch_type='grad', merge_grads=True)
assert_equal(ch_name, 'MEG 244X')
data = np.array([[0., 1., 2.],
[0., -3., 0]])
times = np.array([.1, .2, .3])
ch_idx, time_idx, max_amp = _get_peak(data, times, mode='abs')
assert_equal(ch_idx, 1)
assert_equal(time_idx, 1)
assert_allclose(max_amp, -3.)
ch_idx, time_idx, max_amp = _get_peak(data * -1, times, mode='neg')
assert_equal(ch_idx, 0)
assert_equal(time_idx, 2)
assert_allclose(max_amp, -2.)
ch_idx, time_idx, max_amp = _get_peak(data, times, mode='pos')
assert_equal(ch_idx, 0)
assert_equal(time_idx, 2)
assert_allclose(max_amp, 2.)
pytest.raises(ValueError, _get_peak, data + 1e3, times, mode='neg')
pytest.raises(ValueError, _get_peak, data - 1e3, times, mode='pos')
def test_drop_channels_mixin():
"""Test channels-dropping functionality."""
evoked = read_evokeds(fname, condition=0, proj=True)
drop_ch = evoked.ch_names[:3]
ch_names = evoked.ch_names[3:]
ch_names_orig = evoked.ch_names
dummy = evoked.copy().drop_channels(drop_ch)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, evoked.ch_names)
assert_equal(len(ch_names_orig), len(evoked.data))
dummy2 = evoked.copy().drop_channels([drop_ch[0]])
assert_equal(dummy2.ch_names, ch_names_orig[1:])
evoked.drop_channels(drop_ch)
assert_equal(ch_names, evoked.ch_names)
assert_equal(len(ch_names), len(evoked.data))
for ch_names in ([1, 2], "fake", ["fake"]):
pytest.raises(ValueError, evoked.drop_channels, ch_names)
def test_pick_channels_mixin():
"""Test channel-picking functionality."""
evoked = read_evokeds(fname, condition=0, proj=True)
ch_names = evoked.ch_names[:3]
ch_names_orig = evoked.ch_names
dummy = evoked.copy().pick_channels(ch_names)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, evoked.ch_names)
assert_equal(len(ch_names_orig), len(evoked.data))
evoked.pick_channels(ch_names)
assert_equal(ch_names, evoked.ch_names)
assert_equal(len(ch_names), len(evoked.data))
evoked = read_evokeds(fname, condition=0, proj=True)
assert ('meg' in evoked)
assert ('eeg' in evoked)
evoked.pick_types(meg=False, eeg=True)
assert ('meg' not in evoked)
assert ('eeg' in evoked)
assert (len(evoked.ch_names) == 60)
def test_equalize_channels():
"""Test equalization of channels."""
evoked1 = read_evokeds(fname, condition=0, proj=True)
evoked2 = evoked1.copy()
ch_names = evoked1.ch_names[2:]
evoked1.drop_channels(evoked1.ch_names[:1])
evoked2.drop_channels(evoked2.ch_names[1:2])
my_comparison = [evoked1, evoked2]
equalize_channels(my_comparison)
for e in my_comparison:
assert_equal(ch_names, e.ch_names)
def test_arithmetic():
"""Test evoked arithmetic."""
ev = read_evokeds(fname, condition=0)
ev1 = EvokedArray(np.ones_like(ev.data), ev.info, ev.times[0], nave=20)
ev2 = EvokedArray(-np.ones_like(ev.data), ev.info, ev.times[0], nave=10)
# combine_evoked([ev1, ev2]) should be the same as ev1 + ev2:
# data should be added according to their `nave` weights
# nave = ev1.nave + ev2.nave
ev = combine_evoked([ev1, ev2], weights='nave')
assert_equal(ev.nave, ev1.nave + ev2.nave)
assert_allclose(ev.data, 1. / 3. * np.ones_like(ev.data))
# with same trial counts, a bunch of things should be equivalent
for weights in ('nave', 'equal', [0.5, 0.5]):
ev = combine_evoked([ev1, ev1], weights=weights)
assert_allclose(ev.data, ev1.data)
assert_equal(ev.nave, 2 * ev1.nave)
ev = combine_evoked([ev1, -ev1], weights=weights)
assert_allclose(ev.data, 0., atol=1e-20)
assert_equal(ev.nave, 2 * ev1.nave)
ev = combine_evoked([ev1, -ev1], weights='equal')
assert_allclose(ev.data, 0., atol=1e-20)
assert_equal(ev.nave, 2 * ev1.nave)
ev = combine_evoked([ev1, -ev2], weights='equal')
expected = int(round(1. / (0.25 / ev1.nave + 0.25 / ev2.nave)))
assert_equal(expected, 27) # this is reasonable
assert_equal(ev.nave, expected)
# default comment behavior if evoked.comment is None
old_comment1 = ev1.comment
old_comment2 = ev2.comment
ev1.comment = None
ev = combine_evoked([ev1, -ev2], weights=[1, -1])
assert_equal(ev.comment.count('unknown'), 2)
assert ('-unknown' in ev.comment)
assert (' + ' in ev.comment)
ev1.comment = old_comment1
ev2.comment = old_comment2
# equal weighting
ev = combine_evoked([ev1, ev2], weights='equal')
assert_allclose(ev.data, np.zeros_like(ev1.data))
# combine_evoked([ev1, ev2], weights=[1, 0]) should yield the same as ev1
ev = combine_evoked([ev1, ev2], weights=[1, 0])
assert_equal(ev.nave, ev1.nave)
assert_allclose(ev.data, ev1.data)
# simple subtraction (like in oddball)
ev = combine_evoked([ev1, ev2], weights=[1, -1])
assert_allclose(ev.data, 2 * np.ones_like(ev1.data))
pytest.raises(ValueError, combine_evoked, [ev1, ev2], weights='foo')
pytest.raises(ValueError, combine_evoked, [ev1, ev2], weights=[1])
# grand average
evoked1, evoked2 = read_evokeds(fname, condition=[0, 1], proj=True)
ch_names = evoked1.ch_names[2:]
evoked1.info['bads'] = ['EEG 008'] # test interpolation
evoked1.drop_channels(evoked1.ch_names[:1])
evoked2.drop_channels(evoked2.ch_names[1:2])
gave = grand_average([evoked1, evoked2])
assert_equal(gave.data.shape, [len(ch_names), evoked1.data.shape[1]])
assert_equal(ch_names, gave.ch_names)
assert_equal(gave.nave, 2)
pytest.raises(TypeError, grand_average, [1, evoked1])
# test channel (re)ordering
evoked1, evoked2 = read_evokeds(fname, condition=[0, 1], proj=True)
data2 = evoked2.data # assumes everything is ordered to the first evoked
data = (evoked1.data + evoked2.data) / 2
evoked2.reorder_channels(evoked2.ch_names[::-1])
assert not np.allclose(data2, evoked2.data)
with pytest.warns(RuntimeWarning, match='reordering'):
ev3 = grand_average((evoked1, evoked2))
assert np.allclose(ev3.data, data)
assert evoked1.ch_names != evoked2.ch_names
assert evoked1.ch_names == ev3.ch_names
def test_array_epochs():
"""Test creating evoked from array."""
tempdir = _TempDir()
# creating
rng = np.random.RandomState(42)
data1 = rng.randn(20, 60)
sfreq = 1e3
ch_names = ['EEG %03d' % (i + 1) for i in range(20)]
types = ['eeg'] * 20
info = create_info(ch_names, sfreq, types)
evoked1 = EvokedArray(data1, info, tmin=-0.01)
# save, read, and compare evokeds
tmp_fname = op.join(tempdir, 'evkdary-ave.fif')
evoked1.save(tmp_fname)
evoked2 = read_evokeds(tmp_fname)[0]
data2 = evoked2.data
assert_allclose(data1, data2)
assert_allclose(evoked1.times, evoked2.times)
assert_equal(evoked1.first, evoked2.first)
assert_equal(evoked1.last, evoked2.last)
assert_equal(evoked1.kind, evoked2.kind)
assert_equal(evoked1.nave, evoked2.nave)
# now compare with EpochsArray (with single epoch)
data3 = data1[np.newaxis, :, :]
events = np.c_[10, 0, 1]
evoked3 = EpochsArray(data3, info, events=events, tmin=-0.01).average()
assert_allclose(evoked1.data, evoked3.data)
assert_allclose(evoked1.times, evoked3.times)
assert_equal(evoked1.first, evoked3.first)
assert_equal(evoked1.last, evoked3.last)
assert_equal(evoked1.kind, evoked3.kind)
assert_equal(evoked1.nave, evoked3.nave)
# test kind check
with pytest.raises(ValueError, match='Invalid value'):
EvokedArray(data1, info, tmin=0, kind=1)
with pytest.raises(ValueError, match='Invalid value'):
EvokedArray(data1, info, kind='mean')
# test match between channels info and data
ch_names = ['EEG %03d' % (i + 1) for i in range(19)]
types = ['eeg'] * 19
info = create_info(ch_names, sfreq, types)
pytest.raises(ValueError, EvokedArray, data1, info, tmin=-0.01)
def test_time_as_index():
"""Test time as index."""
evoked = read_evokeds(fname, condition=0).crop(-.1, .1)
assert_array_equal(evoked.time_as_index([-.1, .1], use_rounding=True),
[0, len(evoked.times) - 1])
def test_add_channels():
"""Test evoked splitting / re-appending channel types."""
evoked = read_evokeds(fname, condition=0)
hpi_coils = [{'event_bits': []},
{'event_bits': np.array([256, 0, 256, 256])},
{'event_bits': np.array([512, 0, 512, 512])}]
evoked.info['hpi_subsystem'] = dict(hpi_coils=hpi_coils, ncoil=2)
evoked_eeg = evoked.copy().pick_types(meg=False, eeg=True)
evoked_meg = evoked.copy().pick_types(meg=True)
evoked_stim = evoked.copy().pick_types(meg=False, stim=True)
evoked_eeg_meg = evoked.copy().pick_types(meg=True, eeg=True)
evoked_new = evoked_meg.copy().add_channels([evoked_eeg, evoked_stim])
assert (all(ch in evoked_new.ch_names
for ch in evoked_stim.ch_names + evoked_meg.ch_names))
evoked_new = evoked_meg.copy().add_channels([evoked_eeg])
assert (ch in evoked_new.ch_names for ch in evoked.ch_names)
assert_array_equal(evoked_new.data, evoked_eeg_meg.data)
assert (all(ch not in evoked_new.ch_names
for ch in evoked_stim.ch_names))
# Now test errors
evoked_badsf = evoked_eeg.copy()
evoked_badsf.info['sfreq'] = 3.1415927
evoked_eeg = evoked_eeg.crop(-.1, .1)
pytest.raises(RuntimeError, evoked_meg.add_channels, [evoked_badsf])
pytest.raises(AssertionError, evoked_meg.add_channels, [evoked_eeg])
pytest.raises(ValueError, evoked_meg.add_channels, [evoked_meg])
pytest.raises(TypeError, evoked_meg.add_channels, evoked_badsf)
def test_evoked_baseline():
"""Test evoked baseline."""
evoked = read_evokeds(fname, condition=0, baseline=None)
# Here we create a data_set with constant data.
evoked = EvokedArray(np.ones_like(evoked.data), evoked.info,
evoked.times[0])
# Mean baseline correction is applied, since the data is equal to its mean
# the resulting data should be a matrix of zeroes.
evoked.apply_baseline((None, None))
assert_allclose(evoked.data, np.zeros_like(evoked.data))
def test_hilbert():
"""Test hilbert on raw, epochs, and evoked."""
raw = read_raw_fif(raw_fname).load_data()
raw.del_proj()
raw.pick_channels(raw.ch_names[:2])
events = read_events(event_name)
epochs = Epochs(raw, events)
with pytest.raises(RuntimeError, match='requires epochs data to be load'):
epochs.apply_hilbert()
epochs.load_data()
evoked = epochs.average()
raw_hilb = raw.apply_hilbert()
epochs_hilb = epochs.apply_hilbert()
evoked_hilb = evoked.copy().apply_hilbert()
evoked_hilb_2_data = epochs_hilb.get_data().mean(0)
assert_allclose(evoked_hilb.data, evoked_hilb_2_data)
# This one is only approximate because of edge artifacts
evoked_hilb_3 = Epochs(raw_hilb, events).average()
corr = np.corrcoef(np.abs(evoked_hilb_3.data.ravel()),
np.abs(evoked_hilb.data.ravel()))[0, 1]
assert 0.96 < corr < 0.98
# envelope=True mode
evoked_hilb_env = evoked.apply_hilbert(envelope=True)
assert_allclose(evoked_hilb_env.data, np.abs(evoked_hilb.data))
run_tests_if_main()
| {
"repo_name": "adykstra/mne-python",
"path": "mne/tests/test_evoked.py",
"copies": "1",
"size": "25901",
"license": "bsd-3-clause",
"hash": 2639158505766476300,
"line_mean": 38.0075301205,
"line_max": 79,
"alpha_frac": 0.6448399676,
"autogenerated": false,
"ratio": 2.924683830171635,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4069523797771635,
"avg_score": null,
"num_lines": null
} |
import os.path as op
from copy import deepcopy
import warnings
import numpy as np
from scipy import fftpack
from numpy.testing import (assert_array_almost_equal, assert_equal,
assert_array_equal, assert_allclose)
from nose.tools import assert_true, assert_raises, assert_not_equal
from mne import (equalize_channels, pick_types, read_evokeds, write_evokeds,
grand_average, combine_evoked, create_info)
from mne.evoked import _get_peak, Evoked, EvokedArray
from mne.epochs import EpochsArray
from mne.tests.common import assert_naming
from mne.utils import (_TempDir, requires_pandas, slow_test, requires_version,
run_tests_if_main)
from mne.externals.six.moves import cPickle as pickle
warnings.simplefilter('always')
fname = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data',
'test-ave.fif')
fname_gz = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data',
'test-ave.fif.gz')
@requires_version('scipy', '0.14')
def test_savgol_filter():
"""Test savgol filtering
"""
h_freq = 10.
evoked = read_evokeds(fname, 0)
freqs = fftpack.fftfreq(len(evoked.times), 1. / evoked.info['sfreq'])
data = np.abs(fftpack.fft(evoked.data))
match_mask = np.logical_and(freqs >= 0, freqs <= h_freq / 2.)
mismatch_mask = np.logical_and(freqs >= h_freq * 2, freqs < 50.)
assert_raises(ValueError, evoked.savgol_filter, evoked.info['sfreq'])
evoked_sg = evoked.copy().savgol_filter(h_freq)
data_filt = np.abs(fftpack.fft(evoked_sg.data))
# decent in pass-band
assert_allclose(np.mean(data[:, match_mask], 0),
np.mean(data_filt[:, match_mask], 0),
rtol=1e-4, atol=1e-2)
# suppression in stop-band
assert_true(np.mean(data[:, mismatch_mask]) >
np.mean(data_filt[:, mismatch_mask]) * 5)
# original preserved
assert_allclose(data, np.abs(fftpack.fft(evoked.data)), atol=1e-16)
def test_hash_evoked():
"""Test evoked hashing
"""
ave = read_evokeds(fname, 0)
ave_2 = read_evokeds(fname, 0)
assert_equal(hash(ave), hash(ave_2))
# do NOT use assert_equal here, failing output is terrible
assert_true(pickle.dumps(ave) == pickle.dumps(ave_2))
ave_2.data[0, 0] -= 1
assert_not_equal(hash(ave), hash(ave_2))
@slow_test
def test_io_evoked():
"""Test IO for evoked data (fif + gz) with integer and str args
"""
tempdir = _TempDir()
ave = read_evokeds(fname, 0)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
ave2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0]
# This not being assert_array_equal due to windows rounding
assert_true(np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-3))
assert_array_almost_equal(ave.times, ave2.times)
assert_equal(ave.nave, ave2.nave)
assert_equal(ave._aspect_kind, ave2._aspect_kind)
assert_equal(ave.kind, ave2.kind)
assert_equal(ave.last, ave2.last)
assert_equal(ave.first, ave2.first)
assert_true(repr(ave))
# test compressed i/o
ave2 = read_evokeds(fname_gz, 0)
assert_true(np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-8))
# test str access
condition = 'Left Auditory'
assert_raises(ValueError, read_evokeds, fname, condition, kind='stderr')
assert_raises(ValueError, read_evokeds, fname, condition,
kind='standard_error')
ave3 = read_evokeds(fname, condition)
assert_array_almost_equal(ave.data, ave3.data, 19)
# test read_evokeds and write_evokeds
aves1 = read_evokeds(fname)[1::2]
aves2 = read_evokeds(fname, [1, 3])
aves3 = read_evokeds(fname, ['Right Auditory', 'Right visual'])
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), aves1)
aves4 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))
for aves in [aves2, aves3, aves4]:
for [av1, av2] in zip(aves1, aves):
assert_array_almost_equal(av1.data, av2.data)
assert_array_almost_equal(av1.times, av2.times)
assert_equal(av1.nave, av2.nave)
assert_equal(av1.kind, av2.kind)
assert_equal(av1._aspect_kind, av2._aspect_kind)
assert_equal(av1.last, av2.last)
assert_equal(av1.first, av2.first)
assert_equal(av1.comment, av2.comment)
# test warnings on bad filenames
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
fname2 = op.join(tempdir, 'test-bad-name.fif')
write_evokeds(fname2, ave)
read_evokeds(fname2)
assert_naming(w, 'test_evoked.py', 2)
# constructor
assert_raises(TypeError, Evoked, fname)
def test_shift_time_evoked():
""" Test for shifting of time scale
"""
tempdir = _TempDir()
# Shift backward
ave = read_evokeds(fname, 0)
ave.shift_time(-0.1, relative=True)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
# Shift forward twice the amount
ave_bshift = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
ave_bshift.shift_time(0.2, relative=True)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave_bshift)
# Shift backward again
ave_fshift = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
ave_fshift.shift_time(-0.1, relative=True)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave_fshift)
ave_normal = read_evokeds(fname, 0)
ave_relative = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
assert_true(np.allclose(ave_normal.data, ave_relative.data,
atol=1e-16, rtol=1e-3))
assert_array_almost_equal(ave_normal.times, ave_relative.times, 10)
assert_equal(ave_normal.last, ave_relative.last)
assert_equal(ave_normal.first, ave_relative.first)
# Absolute time shift
ave = read_evokeds(fname, 0)
ave.shift_time(-0.3, relative=False)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
ave_absolute = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
assert_true(np.allclose(ave_normal.data, ave_absolute.data,
atol=1e-16, rtol=1e-3))
assert_equal(ave_absolute.first, int(-0.3 * ave.info['sfreq']))
def test_evoked_resample():
"""Test for resampling of evoked data
"""
tempdir = _TempDir()
# upsample, write it out, read it in
ave = read_evokeds(fname, 0)
sfreq_normal = ave.info['sfreq']
ave.resample(2 * sfreq_normal, npad=100)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
ave_up = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
# compare it to the original
ave_normal = read_evokeds(fname, 0)
# and compare the original to the downsampled upsampled version
ave_new = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
ave_new.resample(sfreq_normal, npad=100)
assert_array_almost_equal(ave_normal.data, ave_new.data, 2)
assert_array_almost_equal(ave_normal.times, ave_new.times)
assert_equal(ave_normal.nave, ave_new.nave)
assert_equal(ave_normal._aspect_kind, ave_new._aspect_kind)
assert_equal(ave_normal.kind, ave_new.kind)
assert_equal(ave_normal.last, ave_new.last)
assert_equal(ave_normal.first, ave_new.first)
# for the above to work, the upsampling just about had to, but
# we'll add a couple extra checks anyway
assert_true(len(ave_up.times) == 2 * len(ave_normal.times))
assert_true(ave_up.data.shape[1] == 2 * ave_normal.data.shape[1])
def test_evoked_detrend():
"""Test for detrending evoked data
"""
ave = read_evokeds(fname, 0)
ave_normal = read_evokeds(fname, 0)
ave.detrend(0)
ave_normal.data -= np.mean(ave_normal.data, axis=1)[:, np.newaxis]
picks = pick_types(ave.info, meg=True, eeg=True, exclude='bads')
assert_true(np.allclose(ave.data[picks], ave_normal.data[picks],
rtol=1e-8, atol=1e-16))
@requires_pandas
def test_to_data_frame():
"""Test evoked Pandas exporter"""
ave = read_evokeds(fname, 0)
assert_raises(ValueError, ave.to_data_frame, picks=np.arange(400))
df = ave.to_data_frame()
assert_true((df.columns == ave.ch_names).all())
df = ave.to_data_frame(index=None).reset_index('time')
assert_true('time' in df.columns)
assert_array_equal(df.values[:, 1], ave.data[0] * 1e13)
assert_array_equal(df.values[:, 3], ave.data[2] * 1e15)
def test_evoked_proj():
"""Test SSP proj operations
"""
for proj in [True, False]:
ave = read_evokeds(fname, condition=0, proj=proj)
assert_true(all(p['active'] == proj for p in ave.info['projs']))
# test adding / deleting proj
if proj:
assert_raises(ValueError, ave.add_proj, [],
{'remove_existing': True})
assert_raises(ValueError, ave.del_proj, 0)
else:
projs = deepcopy(ave.info['projs'])
n_proj = len(ave.info['projs'])
ave.del_proj(0)
assert_true(len(ave.info['projs']) == n_proj - 1)
# Test that already existing projections are not added.
ave.add_proj(projs, remove_existing=False)
assert_true(len(ave.info['projs']) == n_proj)
ave.add_proj(projs[:-1], remove_existing=True)
assert_true(len(ave.info['projs']) == n_proj - 1)
ave = read_evokeds(fname, condition=0, proj=False)
data = ave.data.copy()
ave.apply_proj()
assert_allclose(np.dot(ave._projector, data), ave.data)
def test_get_peak():
"""Test peak getter
"""
evoked = read_evokeds(fname, condition=0, proj=True)
assert_raises(ValueError, evoked.get_peak, ch_type='mag', tmin=1)
assert_raises(ValueError, evoked.get_peak, ch_type='mag', tmax=0.9)
assert_raises(ValueError, evoked.get_peak, ch_type='mag', tmin=0.02,
tmax=0.01)
assert_raises(ValueError, evoked.get_peak, ch_type='mag', mode='foo')
assert_raises(RuntimeError, evoked.get_peak, ch_type=None, mode='foo')
assert_raises(ValueError, evoked.get_peak, ch_type='misc', mode='foo')
ch_name, time_idx = evoked.get_peak(ch_type='mag')
assert_true(ch_name in evoked.ch_names)
assert_true(time_idx in evoked.times)
ch_name, time_idx = evoked.get_peak(ch_type='mag',
time_as_index=True)
assert_true(time_idx < len(evoked.times))
assert_equal(ch_name, 'MEG 1421')
data = np.array([[0., 1., 2.],
[0., -3., 0]])
times = np.array([.1, .2, .3])
ch_idx, time_idx = _get_peak(data, times, mode='abs')
assert_equal(ch_idx, 1)
assert_equal(time_idx, 1)
ch_idx, time_idx = _get_peak(data * -1, times, mode='neg')
assert_equal(ch_idx, 0)
assert_equal(time_idx, 2)
ch_idx, time_idx = _get_peak(data, times, mode='pos')
assert_equal(ch_idx, 0)
assert_equal(time_idx, 2)
assert_raises(ValueError, _get_peak, data + 1e3, times, mode='neg')
assert_raises(ValueError, _get_peak, data - 1e3, times, mode='pos')
def test_drop_channels_mixin():
"""Test channels-dropping functionality
"""
evoked = read_evokeds(fname, condition=0, proj=True)
drop_ch = evoked.ch_names[:3]
ch_names = evoked.ch_names[3:]
ch_names_orig = evoked.ch_names
dummy = evoked.copy().drop_channels(drop_ch)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, evoked.ch_names)
assert_equal(len(ch_names_orig), len(evoked.data))
evoked.drop_channels(drop_ch)
assert_equal(ch_names, evoked.ch_names)
assert_equal(len(ch_names), len(evoked.data))
def test_pick_channels_mixin():
"""Test channel-picking functionality
"""
evoked = read_evokeds(fname, condition=0, proj=True)
ch_names = evoked.ch_names[:3]
ch_names_orig = evoked.ch_names
dummy = evoked.copy().pick_channels(ch_names)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, evoked.ch_names)
assert_equal(len(ch_names_orig), len(evoked.data))
evoked.pick_channels(ch_names)
assert_equal(ch_names, evoked.ch_names)
assert_equal(len(ch_names), len(evoked.data))
evoked = read_evokeds(fname, condition=0, proj=True)
assert_true('meg' in evoked)
assert_true('eeg' in evoked)
evoked.pick_types(meg=False, eeg=True)
assert_true('meg' not in evoked)
assert_true('eeg' in evoked)
assert_true(len(evoked.ch_names) == 60)
def test_equalize_channels():
"""Test equalization of channels
"""
evoked1 = read_evokeds(fname, condition=0, proj=True)
evoked2 = evoked1.copy()
ch_names = evoked1.ch_names[2:]
evoked1.drop_channels(evoked1.ch_names[:1])
evoked2.drop_channels(evoked2.ch_names[1:2])
my_comparison = [evoked1, evoked2]
equalize_channels(my_comparison)
for e in my_comparison:
assert_equal(ch_names, e.ch_names)
def test_evoked_arithmetic():
"""Test evoked arithmetic
"""
ev = read_evokeds(fname, condition=0)
ev1 = EvokedArray(np.ones_like(ev.data), ev.info, ev.times[0], nave=20)
ev2 = EvokedArray(-np.ones_like(ev.data), ev.info, ev.times[0], nave=10)
# combine_evoked([ev1, ev2]) should be the same as ev1 + ev2:
# data should be added according to their `nave` weights
# nave = ev1.nave + ev2.nave
ev = ev1 + ev2
assert_equal(ev.nave, ev1.nave + ev2.nave)
assert_allclose(ev.data, 1. / 3. * np.ones_like(ev.data))
ev = ev1 - ev2
assert_equal(ev.nave, ev1.nave + ev2.nave)
assert_equal(ev.comment, ev1.comment + ' - ' + ev2.comment)
assert_allclose(ev.data, np.ones_like(ev1.data))
# default comment behavior if evoked.comment is None
old_comment1 = ev1.comment
old_comment2 = ev2.comment
ev1.comment = None
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
ev = ev1 - ev2
assert_equal(ev.comment, 'unknown')
ev1.comment = old_comment1
ev2.comment = old_comment2
# equal weighting
ev = combine_evoked([ev1, ev2], weights='equal')
assert_allclose(ev.data, np.zeros_like(ev1.data))
# combine_evoked([ev1, ev2], weights=[1, 0]) should yield the same as ev1
ev = combine_evoked([ev1, ev2], weights=[1, 0])
assert_equal(ev.nave, ev1.nave)
assert_allclose(ev.data, ev1.data)
# simple subtraction (like in oddball)
ev = combine_evoked([ev1, ev2], weights=[1, -1])
assert_allclose(ev.data, 2 * np.ones_like(ev1.data))
assert_raises(ValueError, combine_evoked, [ev1, ev2], weights='foo')
assert_raises(ValueError, combine_evoked, [ev1, ev2], weights=[1])
# grand average
evoked1, evoked2 = read_evokeds(fname, condition=[0, 1], proj=True)
ch_names = evoked1.ch_names[2:]
evoked1.info['bads'] = ['EEG 008'] # test interpolation
evoked1.drop_channels(evoked1.ch_names[:1])
evoked2.drop_channels(evoked2.ch_names[1:2])
gave = grand_average([evoked1, evoked2])
assert_equal(gave.data.shape, [len(ch_names), evoked1.data.shape[1]])
assert_equal(ch_names, gave.ch_names)
assert_equal(gave.nave, 2)
def test_array_epochs():
"""Test creating evoked from array
"""
tempdir = _TempDir()
# creating
rng = np.random.RandomState(42)
data1 = rng.randn(20, 60)
sfreq = 1e3
ch_names = ['EEG %03d' % (i + 1) for i in range(20)]
types = ['eeg'] * 20
info = create_info(ch_names, sfreq, types)
evoked1 = EvokedArray(data1, info, tmin=-0.01)
# save, read, and compare evokeds
tmp_fname = op.join(tempdir, 'evkdary-ave.fif')
evoked1.save(tmp_fname)
evoked2 = read_evokeds(tmp_fname)[0]
data2 = evoked2.data
assert_allclose(data1, data2)
assert_allclose(evoked1.times, evoked2.times)
assert_equal(evoked1.first, evoked2.first)
assert_equal(evoked1.last, evoked2.last)
assert_equal(evoked1.kind, evoked2.kind)
assert_equal(evoked1.nave, evoked2.nave)
# now compare with EpochsArray (with single epoch)
data3 = data1[np.newaxis, :, :]
events = np.c_[10, 0, 1]
evoked3 = EpochsArray(data3, info, events=events, tmin=-0.01).average()
assert_allclose(evoked1.data, evoked3.data)
assert_allclose(evoked1.times, evoked3.times)
assert_equal(evoked1.first, evoked3.first)
assert_equal(evoked1.last, evoked3.last)
assert_equal(evoked1.kind, evoked3.kind)
assert_equal(evoked1.nave, evoked3.nave)
# test kind check
assert_raises(TypeError, EvokedArray, data1, info, tmin=0, kind=1)
assert_raises(ValueError, EvokedArray, data1, info, tmin=0, kind='mean')
# test match between channels info and data
ch_names = ['EEG %03d' % (i + 1) for i in range(19)]
types = ['eeg'] * 19
info = create_info(ch_names, sfreq, types)
assert_raises(ValueError, EvokedArray, data1, info, tmin=-0.01)
def test_time_as_index():
"""Test time as index"""
evoked = read_evokeds(fname, condition=0).crop(-.1, .1)
assert_array_equal(evoked.time_as_index([-.1, .1], use_rounding=True),
[0, len(evoked.times) - 1])
def test_add_channels():
"""Test evoked splitting / re-appending channel types"""
evoked = read_evokeds(fname, condition=0)
evoked.info['buffer_size_sec'] = None
hpi_coils = [{'event_bits': []},
{'event_bits': np.array([256, 0, 256, 256])},
{'event_bits': np.array([512, 0, 512, 512])}]
evoked.info['hpi_subsystem'] = dict(hpi_coils=hpi_coils, ncoil=2)
evoked_eeg = evoked.copy().pick_types(meg=False, eeg=True)
evoked_meg = evoked.copy().pick_types(meg=True)
evoked_stim = evoked.copy().pick_types(meg=False, stim=True)
evoked_eeg_meg = evoked.copy().pick_types(meg=True, eeg=True)
evoked_new = evoked_meg.copy().add_channels([evoked_eeg, evoked_stim])
assert_true(all(ch in evoked_new.ch_names
for ch in evoked_stim.ch_names + evoked_meg.ch_names))
evoked_new = evoked_meg.copy().add_channels([evoked_eeg])
assert_true(ch in evoked_new.ch_names for ch in evoked.ch_names)
assert_array_equal(evoked_new.data, evoked_eeg_meg.data)
assert_true(all(ch not in evoked_new.ch_names
for ch in evoked_stim.ch_names))
# Now test errors
evoked_badsf = evoked_eeg.copy()
evoked_badsf.info['sfreq'] = 3.1415927
evoked_eeg = evoked_eeg.crop(-.1, .1)
assert_raises(RuntimeError, evoked_meg.add_channels, [evoked_badsf])
assert_raises(AssertionError, evoked_meg.add_channels, [evoked_eeg])
assert_raises(ValueError, evoked_meg.add_channels, [evoked_meg])
assert_raises(AssertionError, evoked_meg.add_channels, evoked_badsf)
run_tests_if_main()
| {
"repo_name": "wronk/mne-python",
"path": "mne/tests/test_evoked.py",
"copies": "2",
"size": "18919",
"license": "bsd-3-clause",
"hash": 662986066439100700,
"line_mean": 36.4633663366,
"line_max": 78,
"alpha_frac": 0.6428458164,
"autogenerated": false,
"ratio": 2.9083781706379708,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9551223987037971,
"avg_score": 0,
"num_lines": 505
} |
import os.path as op
from copy import deepcopy
import numpy as np
from scipy import fftpack
from numpy.testing import (assert_array_almost_equal, assert_equal,
assert_array_equal, assert_allclose)
import pytest
from mne import (equalize_channels, pick_types, read_evokeds, write_evokeds,
grand_average, combine_evoked, create_info, read_events,
Epochs, EpochsArray)
from mne.evoked import _get_peak, Evoked, EvokedArray
from mne.io import read_raw_fif
from mne.utils import (_TempDir, requires_pandas, requires_version,
run_tests_if_main)
from mne.externals.six.moves import cPickle as pickle
base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
fname = op.join(base_dir, 'test-ave.fif')
fname_gz = op.join(base_dir, 'test-ave.fif.gz')
raw_fname = op.join(base_dir, 'test_raw.fif')
event_name = op.join(base_dir, 'test-eve.fif')
def test_decim():
"""Test evoked decimation."""
rng = np.random.RandomState(0)
n_epochs, n_channels, n_times = 5, 10, 20
dec_1, dec_2 = 2, 3
decim = dec_1 * dec_2
sfreq = 1000.
sfreq_new = sfreq / decim
data = rng.randn(n_epochs, n_channels, n_times)
events = np.array([np.arange(n_epochs), [0] * n_epochs, [1] * n_epochs]).T
info = create_info(n_channels, sfreq, 'eeg')
info['lowpass'] = sfreq_new / float(decim)
epochs = EpochsArray(data, info, events)
data_epochs = epochs.copy().decimate(decim).get_data()
data_epochs_2 = epochs.copy().decimate(decim, offset=1).get_data()
data_epochs_3 = epochs.decimate(dec_1).decimate(dec_2).get_data()
assert_array_equal(data_epochs, data[:, :, ::decim])
assert_array_equal(data_epochs_2, data[:, :, 1::decim])
assert_array_equal(data_epochs, data_epochs_3)
# Now let's do it with some real data
raw = read_raw_fif(raw_fname)
events = read_events(event_name)
sfreq_new = raw.info['sfreq'] / decim
raw.info['lowpass'] = sfreq_new / 4. # suppress aliasing warnings
picks = pick_types(raw.info, meg=True, eeg=True, exclude=())
epochs = Epochs(raw, events, 1, -0.2, 0.5, picks=picks, preload=True)
for offset in (0, 1):
ev_ep_decim = epochs.copy().decimate(decim, offset).average()
ev_decim = epochs.average().decimate(decim, offset)
expected_times = epochs.times[offset::decim]
assert_allclose(ev_decim.times, expected_times)
assert_allclose(ev_ep_decim.times, expected_times)
expected_data = epochs.get_data()[:, :, offset::decim].mean(axis=0)
assert_allclose(ev_decim.data, expected_data)
assert_allclose(ev_ep_decim.data, expected_data)
assert_equal(ev_decim.info['sfreq'], sfreq_new)
assert_array_equal(ev_decim.times, expected_times)
@requires_version('scipy', '0.14')
def test_savgol_filter():
"""Test savgol filtering."""
h_freq = 10.
evoked = read_evokeds(fname, 0)
freqs = fftpack.fftfreq(len(evoked.times), 1. / evoked.info['sfreq'])
data = np.abs(fftpack.fft(evoked.data))
match_mask = np.logical_and(freqs >= 0, freqs <= h_freq / 2.)
mismatch_mask = np.logical_and(freqs >= h_freq * 2, freqs < 50.)
pytest.raises(ValueError, evoked.savgol_filter, evoked.info['sfreq'])
evoked_sg = evoked.copy().savgol_filter(h_freq)
data_filt = np.abs(fftpack.fft(evoked_sg.data))
# decent in pass-band
assert_allclose(np.mean(data[:, match_mask], 0),
np.mean(data_filt[:, match_mask], 0),
rtol=1e-4, atol=1e-2)
# suppression in stop-band
assert (np.mean(data[:, mismatch_mask]) >
np.mean(data_filt[:, mismatch_mask]) * 5)
# original preserved
assert_allclose(data, np.abs(fftpack.fft(evoked.data)), atol=1e-16)
def test_hash_evoked():
"""Test evoked hashing."""
ave = read_evokeds(fname, 0)
ave_2 = read_evokeds(fname, 0)
assert_equal(hash(ave), hash(ave_2))
# do NOT use assert_equal here, failing output is terrible
assert (pickle.dumps(ave) == pickle.dumps(ave_2))
ave_2.data[0, 0] -= 1
assert hash(ave) != hash(ave_2)
@pytest.mark.slowtest
def test_io_evoked():
"""Test IO for evoked data (fif + gz) with integer and str args."""
tempdir = _TempDir()
ave = read_evokeds(fname, 0)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
ave2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0]
# This not being assert_array_equal due to windows rounding
assert (np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-3))
assert_array_almost_equal(ave.times, ave2.times)
assert_equal(ave.nave, ave2.nave)
assert_equal(ave._aspect_kind, ave2._aspect_kind)
assert_equal(ave.kind, ave2.kind)
assert_equal(ave.last, ave2.last)
assert_equal(ave.first, ave2.first)
assert (repr(ave))
# test compressed i/o
ave2 = read_evokeds(fname_gz, 0)
assert (np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-8))
# test str access
condition = 'Left Auditory'
pytest.raises(ValueError, read_evokeds, fname, condition, kind='stderr')
pytest.raises(ValueError, read_evokeds, fname, condition,
kind='standard_error')
ave3 = read_evokeds(fname, condition)
assert_array_almost_equal(ave.data, ave3.data, 19)
# test read_evokeds and write_evokeds
aves1 = read_evokeds(fname)[1::2]
aves2 = read_evokeds(fname, [1, 3])
aves3 = read_evokeds(fname, ['Right Auditory', 'Right visual'])
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), aves1)
aves4 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))
for aves in [aves2, aves3, aves4]:
for [av1, av2] in zip(aves1, aves):
assert_array_almost_equal(av1.data, av2.data)
assert_array_almost_equal(av1.times, av2.times)
assert_equal(av1.nave, av2.nave)
assert_equal(av1.kind, av2.kind)
assert_equal(av1._aspect_kind, av2._aspect_kind)
assert_equal(av1.last, av2.last)
assert_equal(av1.first, av2.first)
assert_equal(av1.comment, av2.comment)
# test warnings on bad filenames
fname2 = op.join(tempdir, 'test-bad-name.fif')
with pytest.warns(RuntimeWarning, match='-ave.fif'):
write_evokeds(fname2, ave)
with pytest.warns(RuntimeWarning, match='-ave.fif'):
read_evokeds(fname2)
# constructor
pytest.raises(TypeError, Evoked, fname)
# MaxShield
fname_ms = op.join(tempdir, 'test-ave.fif')
assert (ave.info['maxshield'] is False)
ave.info['maxshield'] = True
ave.save(fname_ms)
pytest.raises(ValueError, read_evokeds, fname_ms)
with pytest.warns(RuntimeWarning, match='Elekta'):
aves = read_evokeds(fname_ms, allow_maxshield=True)
assert all(ave.info['maxshield'] is True for ave in aves)
aves = read_evokeds(fname_ms, allow_maxshield='yes')
assert (all(ave.info['maxshield'] is True for ave in aves))
def test_shift_time_evoked():
"""Test for shifting of time scale."""
tempdir = _TempDir()
# Shift backward
ave = read_evokeds(fname, 0)
ave.shift_time(-0.1, relative=True)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
# Shift forward twice the amount
ave_bshift = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
ave_bshift.shift_time(0.2, relative=True)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave_bshift)
# Shift backward again
ave_fshift = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
ave_fshift.shift_time(-0.1, relative=True)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave_fshift)
ave_normal = read_evokeds(fname, 0)
ave_relative = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
assert_allclose(ave_normal.data, ave_relative.data, atol=1e-16, rtol=1e-3)
assert_array_almost_equal(ave_normal.times, ave_relative.times, 10)
assert_equal(ave_normal.last, ave_relative.last)
assert_equal(ave_normal.first, ave_relative.first)
# Absolute time shift
ave = read_evokeds(fname, 0)
ave.shift_time(-0.3, relative=False)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
ave_absolute = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
assert_allclose(ave_normal.data, ave_absolute.data, atol=1e-16, rtol=1e-3)
assert_equal(ave_absolute.first, int(-0.3 * ave.info['sfreq']))
def test_evoked_resample():
"""Test resampling evoked data."""
tempdir = _TempDir()
# upsample, write it out, read it in
ave = read_evokeds(fname, 0)
sfreq_normal = ave.info['sfreq']
ave.resample(2 * sfreq_normal, npad=100)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
ave_up = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
# compare it to the original
ave_normal = read_evokeds(fname, 0)
# and compare the original to the downsampled upsampled version
ave_new = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
ave_new.resample(sfreq_normal, npad=100)
assert_array_almost_equal(ave_normal.data, ave_new.data, 2)
assert_array_almost_equal(ave_normal.times, ave_new.times)
assert_equal(ave_normal.nave, ave_new.nave)
assert_equal(ave_normal._aspect_kind, ave_new._aspect_kind)
assert_equal(ave_normal.kind, ave_new.kind)
assert_equal(ave_normal.last, ave_new.last)
assert_equal(ave_normal.first, ave_new.first)
# for the above to work, the upsampling just about had to, but
# we'll add a couple extra checks anyway
assert (len(ave_up.times) == 2 * len(ave_normal.times))
assert (ave_up.data.shape[1] == 2 * ave_normal.data.shape[1])
def test_evoked_filter():
"""Test filtering evoked data."""
# this is mostly a smoke test as the Epochs and raw tests are more complete
ave = read_evokeds(fname, 0).pick_types('grad')
ave.data[:] = 1.
assert round(ave.info['lowpass']) == 172
ave_filt = ave.copy().filter(None, 40., fir_design='firwin')
assert ave_filt.info['lowpass'] == 40.
assert_allclose(ave.data, 1., atol=1e-6)
def test_evoked_detrend():
"""Test for detrending evoked data."""
ave = read_evokeds(fname, 0)
ave_normal = read_evokeds(fname, 0)
ave.detrend(0)
ave_normal.data -= np.mean(ave_normal.data, axis=1)[:, np.newaxis]
picks = pick_types(ave.info, meg=True, eeg=True, exclude='bads')
assert_allclose(ave.data[picks], ave_normal.data[picks],
rtol=1e-8, atol=1e-16)
@requires_pandas
def test_to_data_frame():
"""Test evoked Pandas exporter."""
ave = read_evokeds(fname, 0)
pytest.raises(ValueError, ave.to_data_frame, picks=np.arange(400))
df = ave.to_data_frame()
assert ((df.columns == ave.ch_names).all())
df = ave.to_data_frame(index=None).reset_index()
assert ('time' in df.columns)
assert_array_equal(df.values[:, 1], ave.data[0] * 1e13)
assert_array_equal(df.values[:, 3], ave.data[2] * 1e15)
def test_evoked_proj():
"""Test SSP proj operations."""
for proj in [True, False]:
ave = read_evokeds(fname, condition=0, proj=proj)
assert (all(p['active'] == proj for p in ave.info['projs']))
# test adding / deleting proj
if proj:
pytest.raises(ValueError, ave.add_proj, [],
{'remove_existing': True})
pytest.raises(ValueError, ave.del_proj, 0)
else:
projs = deepcopy(ave.info['projs'])
n_proj = len(ave.info['projs'])
ave.del_proj(0)
assert (len(ave.info['projs']) == n_proj - 1)
# Test that already existing projections are not added.
ave.add_proj(projs, remove_existing=False)
assert (len(ave.info['projs']) == n_proj)
ave.add_proj(projs[:-1], remove_existing=True)
assert (len(ave.info['projs']) == n_proj - 1)
ave = read_evokeds(fname, condition=0, proj=False)
data = ave.data.copy()
ave.apply_proj()
assert_allclose(np.dot(ave._projector, data), ave.data)
def test_get_peak():
"""Test peak getter."""
evoked = read_evokeds(fname, condition=0, proj=True)
pytest.raises(ValueError, evoked.get_peak, ch_type='mag', tmin=1)
pytest.raises(ValueError, evoked.get_peak, ch_type='mag', tmax=0.9)
pytest.raises(ValueError, evoked.get_peak, ch_type='mag', tmin=0.02,
tmax=0.01)
pytest.raises(ValueError, evoked.get_peak, ch_type='mag', mode='foo')
pytest.raises(RuntimeError, evoked.get_peak, ch_type=None, mode='foo')
pytest.raises(ValueError, evoked.get_peak, ch_type='misc', mode='foo')
ch_name, time_idx = evoked.get_peak(ch_type='mag')
assert (ch_name in evoked.ch_names)
assert (time_idx in evoked.times)
ch_name, time_idx, max_amp = evoked.get_peak(ch_type='mag',
time_as_index=True,
return_amplitude=True)
assert (time_idx < len(evoked.times))
assert_equal(ch_name, 'MEG 1421')
assert_allclose(max_amp, 7.17057e-13, rtol=1e-5)
pytest.raises(ValueError, evoked.get_peak, ch_type='mag', merge_grads=True)
ch_name, time_idx = evoked.get_peak(ch_type='grad', merge_grads=True)
assert_equal(ch_name, 'MEG 244X')
data = np.array([[0., 1., 2.],
[0., -3., 0]])
times = np.array([.1, .2, .3])
ch_idx, time_idx, max_amp = _get_peak(data, times, mode='abs')
assert_equal(ch_idx, 1)
assert_equal(time_idx, 1)
assert_allclose(max_amp, -3.)
ch_idx, time_idx, max_amp = _get_peak(data * -1, times, mode='neg')
assert_equal(ch_idx, 0)
assert_equal(time_idx, 2)
assert_allclose(max_amp, -2.)
ch_idx, time_idx, max_amp = _get_peak(data, times, mode='pos')
assert_equal(ch_idx, 0)
assert_equal(time_idx, 2)
assert_allclose(max_amp, 2.)
pytest.raises(ValueError, _get_peak, data + 1e3, times, mode='neg')
pytest.raises(ValueError, _get_peak, data - 1e3, times, mode='pos')
def test_drop_channels_mixin():
"""Test channels-dropping functionality."""
evoked = read_evokeds(fname, condition=0, proj=True)
drop_ch = evoked.ch_names[:3]
ch_names = evoked.ch_names[3:]
ch_names_orig = evoked.ch_names
dummy = evoked.copy().drop_channels(drop_ch)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, evoked.ch_names)
assert_equal(len(ch_names_orig), len(evoked.data))
dummy2 = evoked.copy().drop_channels([drop_ch[0]])
assert_equal(dummy2.ch_names, ch_names_orig[1:])
evoked.drop_channels(drop_ch)
assert_equal(ch_names, evoked.ch_names)
assert_equal(len(ch_names), len(evoked.data))
for ch_names in ([1, 2], "fake", ["fake"]):
pytest.raises(ValueError, evoked.drop_channels, ch_names)
def test_pick_channels_mixin():
"""Test channel-picking functionality."""
evoked = read_evokeds(fname, condition=0, proj=True)
ch_names = evoked.ch_names[:3]
ch_names_orig = evoked.ch_names
dummy = evoked.copy().pick_channels(ch_names)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, evoked.ch_names)
assert_equal(len(ch_names_orig), len(evoked.data))
evoked.pick_channels(ch_names)
assert_equal(ch_names, evoked.ch_names)
assert_equal(len(ch_names), len(evoked.data))
evoked = read_evokeds(fname, condition=0, proj=True)
assert ('meg' in evoked)
assert ('eeg' in evoked)
evoked.pick_types(meg=False, eeg=True)
assert ('meg' not in evoked)
assert ('eeg' in evoked)
assert (len(evoked.ch_names) == 60)
def test_equalize_channels():
"""Test equalization of channels."""
evoked1 = read_evokeds(fname, condition=0, proj=True)
evoked2 = evoked1.copy()
ch_names = evoked1.ch_names[2:]
evoked1.drop_channels(evoked1.ch_names[:1])
evoked2.drop_channels(evoked2.ch_names[1:2])
my_comparison = [evoked1, evoked2]
equalize_channels(my_comparison)
for e in my_comparison:
assert_equal(ch_names, e.ch_names)
def test_arithmetic():
"""Test evoked arithmetic."""
ev = read_evokeds(fname, condition=0)
ev1 = EvokedArray(np.ones_like(ev.data), ev.info, ev.times[0], nave=20)
ev2 = EvokedArray(-np.ones_like(ev.data), ev.info, ev.times[0], nave=10)
# combine_evoked([ev1, ev2]) should be the same as ev1 + ev2:
# data should be added according to their `nave` weights
# nave = ev1.nave + ev2.nave
ev = combine_evoked([ev1, ev2], weights='nave')
assert_equal(ev.nave, ev1.nave + ev2.nave)
assert_allclose(ev.data, 1. / 3. * np.ones_like(ev.data))
# with same trial counts, a bunch of things should be equivalent
for weights in ('nave', 'equal', [0.5, 0.5]):
ev = combine_evoked([ev1, ev1], weights=weights)
assert_allclose(ev.data, ev1.data)
assert_equal(ev.nave, 2 * ev1.nave)
ev = combine_evoked([ev1, -ev1], weights=weights)
assert_allclose(ev.data, 0., atol=1e-20)
assert_equal(ev.nave, 2 * ev1.nave)
ev = combine_evoked([ev1, -ev1], weights='equal')
assert_allclose(ev.data, 0., atol=1e-20)
assert_equal(ev.nave, 2 * ev1.nave)
ev = combine_evoked([ev1, -ev2], weights='equal')
expected = int(round(1. / (0.25 / ev1.nave + 0.25 / ev2.nave)))
assert_equal(expected, 27) # this is reasonable
assert_equal(ev.nave, expected)
# default comment behavior if evoked.comment is None
old_comment1 = ev1.comment
old_comment2 = ev2.comment
ev1.comment = None
ev = combine_evoked([ev1, -ev2], weights=[1, -1])
assert_equal(ev.comment.count('unknown'), 2)
assert ('-unknown' in ev.comment)
assert (' + ' in ev.comment)
ev1.comment = old_comment1
ev2.comment = old_comment2
# equal weighting
ev = combine_evoked([ev1, ev2], weights='equal')
assert_allclose(ev.data, np.zeros_like(ev1.data))
# combine_evoked([ev1, ev2], weights=[1, 0]) should yield the same as ev1
ev = combine_evoked([ev1, ev2], weights=[1, 0])
assert_equal(ev.nave, ev1.nave)
assert_allclose(ev.data, ev1.data)
# simple subtraction (like in oddball)
ev = combine_evoked([ev1, ev2], weights=[1, -1])
assert_allclose(ev.data, 2 * np.ones_like(ev1.data))
pytest.raises(ValueError, combine_evoked, [ev1, ev2], weights='foo')
pytest.raises(ValueError, combine_evoked, [ev1, ev2], weights=[1])
# grand average
evoked1, evoked2 = read_evokeds(fname, condition=[0, 1], proj=True)
ch_names = evoked1.ch_names[2:]
evoked1.info['bads'] = ['EEG 008'] # test interpolation
evoked1.drop_channels(evoked1.ch_names[:1])
evoked2.drop_channels(evoked2.ch_names[1:2])
gave = grand_average([evoked1, evoked2])
assert_equal(gave.data.shape, [len(ch_names), evoked1.data.shape[1]])
assert_equal(ch_names, gave.ch_names)
assert_equal(gave.nave, 2)
pytest.raises(TypeError, grand_average, [1, evoked1])
# test channel (re)ordering
evoked1, evoked2 = read_evokeds(fname, condition=[0, 1], proj=True)
data2 = evoked2.data # assumes everything is ordered to the first evoked
data = (evoked1.data + evoked2.data) / 2
evoked2.reorder_channels(evoked2.ch_names[::-1])
assert not np.allclose(data2, evoked2.data)
with pytest.warns(RuntimeWarning, match='reordering'):
ev3 = grand_average((evoked1, evoked2))
assert np.allclose(ev3.data, data)
assert evoked1.ch_names != evoked2.ch_names
assert evoked1.ch_names == ev3.ch_names
def test_array_epochs():
"""Test creating evoked from array."""
tempdir = _TempDir()
# creating
rng = np.random.RandomState(42)
data1 = rng.randn(20, 60)
sfreq = 1e3
ch_names = ['EEG %03d' % (i + 1) for i in range(20)]
types = ['eeg'] * 20
info = create_info(ch_names, sfreq, types)
evoked1 = EvokedArray(data1, info, tmin=-0.01)
# save, read, and compare evokeds
tmp_fname = op.join(tempdir, 'evkdary-ave.fif')
evoked1.save(tmp_fname)
evoked2 = read_evokeds(tmp_fname)[0]
data2 = evoked2.data
assert_allclose(data1, data2)
assert_allclose(evoked1.times, evoked2.times)
assert_equal(evoked1.first, evoked2.first)
assert_equal(evoked1.last, evoked2.last)
assert_equal(evoked1.kind, evoked2.kind)
assert_equal(evoked1.nave, evoked2.nave)
# now compare with EpochsArray (with single epoch)
data3 = data1[np.newaxis, :, :]
events = np.c_[10, 0, 1]
evoked3 = EpochsArray(data3, info, events=events, tmin=-0.01).average()
assert_allclose(evoked1.data, evoked3.data)
assert_allclose(evoked1.times, evoked3.times)
assert_equal(evoked1.first, evoked3.first)
assert_equal(evoked1.last, evoked3.last)
assert_equal(evoked1.kind, evoked3.kind)
assert_equal(evoked1.nave, evoked3.nave)
# test kind check
pytest.raises(TypeError, EvokedArray, data1, info, tmin=0, kind=1)
pytest.raises(ValueError, EvokedArray, data1, info, kind='mean')
# test match between channels info and data
ch_names = ['EEG %03d' % (i + 1) for i in range(19)]
types = ['eeg'] * 19
info = create_info(ch_names, sfreq, types)
pytest.raises(ValueError, EvokedArray, data1, info, tmin=-0.01)
def test_time_as_index():
"""Test time as index."""
evoked = read_evokeds(fname, condition=0).crop(-.1, .1)
assert_array_equal(evoked.time_as_index([-.1, .1], use_rounding=True),
[0, len(evoked.times) - 1])
def test_add_channels():
"""Test evoked splitting / re-appending channel types."""
evoked = read_evokeds(fname, condition=0)
hpi_coils = [{'event_bits': []},
{'event_bits': np.array([256, 0, 256, 256])},
{'event_bits': np.array([512, 0, 512, 512])}]
evoked.info['hpi_subsystem'] = dict(hpi_coils=hpi_coils, ncoil=2)
evoked_eeg = evoked.copy().pick_types(meg=False, eeg=True)
evoked_meg = evoked.copy().pick_types(meg=True)
evoked_stim = evoked.copy().pick_types(meg=False, stim=True)
evoked_eeg_meg = evoked.copy().pick_types(meg=True, eeg=True)
evoked_new = evoked_meg.copy().add_channels([evoked_eeg, evoked_stim])
assert (all(ch in evoked_new.ch_names
for ch in evoked_stim.ch_names + evoked_meg.ch_names))
evoked_new = evoked_meg.copy().add_channels([evoked_eeg])
assert (ch in evoked_new.ch_names for ch in evoked.ch_names)
assert_array_equal(evoked_new.data, evoked_eeg_meg.data)
assert (all(ch not in evoked_new.ch_names
for ch in evoked_stim.ch_names))
# Now test errors
evoked_badsf = evoked_eeg.copy()
evoked_badsf.info['sfreq'] = 3.1415927
evoked_eeg = evoked_eeg.crop(-.1, .1)
pytest.raises(RuntimeError, evoked_meg.add_channels, [evoked_badsf])
pytest.raises(AssertionError, evoked_meg.add_channels, [evoked_eeg])
pytest.raises(ValueError, evoked_meg.add_channels, [evoked_meg])
pytest.raises(TypeError, evoked_meg.add_channels, evoked_badsf)
def test_evoked_baseline():
"""Test evoked baseline."""
evoked = read_evokeds(fname, condition=0, baseline=None)
# Here we create a data_set with constant data.
evoked = EvokedArray(np.ones_like(evoked.data), evoked.info,
evoked.times[0])
# Mean baseline correction is applied, since the data is equal to its mean
# the resulting data should be a matrix of zeroes.
evoked.apply_baseline((None, None))
assert_allclose(evoked.data, np.zeros_like(evoked.data))
run_tests_if_main()
| {
"repo_name": "teonlamont/mne-python",
"path": "mne/tests/test_evoked.py",
"copies": "2",
"size": "23775",
"license": "bsd-3-clause",
"hash": 6466565471980801000,
"line_mean": 38.4933554817,
"line_max": 79,
"alpha_frac": 0.6449211356,
"autogenerated": false,
"ratio": 2.917535893974721,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4562457029574721,
"avg_score": null,
"num_lines": null
} |
from copy import deepcopy
from functools import partial
import glob
import itertools as itt
import os
import os.path as op
import warnings
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose, assert_equal)
from nose.tools import assert_true, assert_raises, assert_not_equal
from mne.datasets import testing
from mne.io.constants import FIFF
from mne.io import RawArray, concatenate_raws, read_raw_fif
from mne.io.tests.test_raw import _test_concat, _test_raw_reader
from mne import (concatenate_events, find_events, equalize_channels,
compute_proj_raw, pick_types, pick_channels, create_info)
from mne.utils import (_TempDir, requires_pandas, slow_test,
requires_mne, run_subprocess, run_tests_if_main)
from mne.externals.six.moves import zip, cPickle as pickle
from mne.io.proc_history import _get_sss_rank
from mne.io.pick import _picks_by_type
from mne.annotations import Annotations
from mne.tests.common import assert_naming
warnings.simplefilter('always') # enable b/c these tests throw warnings
testing_path = testing.data_path(download=False)
data_dir = op.join(testing_path, 'MEG', 'sample')
fif_fname = op.join(data_dir, 'sample_audvis_trunc_raw.fif')
ms_fname = op.join(testing_path, 'SSS', 'test_move_anon_raw.fif')
base_dir = op.join(op.dirname(__file__), '..', '..', 'tests', 'data')
test_fif_fname = op.join(base_dir, 'test_raw.fif')
test_fif_gz_fname = op.join(base_dir, 'test_raw.fif.gz')
ctf_fname = op.join(base_dir, 'test_ctf_raw.fif')
ctf_comp_fname = op.join(base_dir, 'test_ctf_comp_raw.fif')
fif_bad_marked_fname = op.join(base_dir, 'test_withbads_raw.fif')
bad_file_works = op.join(base_dir, 'test_bads.txt')
bad_file_wrong = op.join(base_dir, 'test_wrong_bads.txt')
hp_fname = op.join(base_dir, 'test_chpi_raw_hp.txt')
hp_fif_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')
def test_fix_types():
"""Test fixing of channel types."""
for fname, change in ((hp_fif_fname, True), (test_fif_fname, False),
(ctf_fname, False)):
raw = read_raw_fif(fname, add_eeg_ref=False)
mag_picks = pick_types(raw.info, meg='mag')
other_picks = np.setdiff1d(np.arange(len(raw.ch_names)), mag_picks)
# we don't actually have any files suffering from this problem, so
# fake it
if change:
for ii in mag_picks:
raw.info['chs'][ii]['coil_type'] = FIFF.FIFFV_COIL_VV_MAG_T2
orig_types = np.array([ch['coil_type'] for ch in raw.info['chs']])
raw.fix_mag_coil_types()
new_types = np.array([ch['coil_type'] for ch in raw.info['chs']])
if not change:
assert_array_equal(orig_types, new_types)
else:
assert_array_equal(orig_types[other_picks], new_types[other_picks])
assert_true((orig_types[mag_picks] != new_types[mag_picks]).all())
assert_true((new_types[mag_picks] ==
FIFF.FIFFV_COIL_VV_MAG_T3).all())
def test_concat():
"""Test RawFIF concatenation."""
# we trim the file to save lots of memory and some time
tempdir = _TempDir()
raw = read_raw_fif(test_fif_fname, add_eeg_ref=False)
raw.crop(0, 2., copy=False)
test_name = op.join(tempdir, 'test_raw.fif')
raw.save(test_name)
# now run the standard test
_test_concat(partial(read_raw_fif, add_eeg_ref=False), test_name)
@testing.requires_testing_data
def test_hash_raw():
"""Test hashing raw objects."""
raw = read_raw_fif(fif_fname, add_eeg_ref=False)
assert_raises(RuntimeError, raw.__hash__)
raw = read_raw_fif(fif_fname, add_eeg_ref=False).crop(0, 0.5, copy=False)
raw_size = raw._size
raw.load_data()
raw_load_size = raw._size
assert_true(raw_size < raw_load_size)
raw_2 = read_raw_fif(fif_fname, add_eeg_ref=False).crop(0, 0.5, copy=False)
raw_2.load_data()
assert_equal(hash(raw), hash(raw_2))
# do NOT use assert_equal here, failing output is terrible
assert_equal(pickle.dumps(raw), pickle.dumps(raw_2))
raw_2._data[0, 0] -= 1
assert_not_equal(hash(raw), hash(raw_2))
@testing.requires_testing_data
def test_maxshield():
"""Test maxshield warning."""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
read_raw_fif(ms_fname, allow_maxshield=True, add_eeg_ref=False)
assert_equal(len(w), 1)
assert_true('test_raw_fiff.py' in w[0].filename)
@testing.requires_testing_data
def test_subject_info():
"""Test reading subject information."""
tempdir = _TempDir()
raw = read_raw_fif(fif_fname, add_eeg_ref=False).crop(0, 1, copy=False)
assert_true(raw.info['subject_info'] is None)
# fake some subject data
keys = ['id', 'his_id', 'last_name', 'first_name', 'birthday', 'sex',
'hand']
vals = [1, 'foobar', 'bar', 'foo', (1901, 2, 3), 0, 1]
subject_info = dict()
for key, val in zip(keys, vals):
subject_info[key] = val
raw.info['subject_info'] = subject_info
out_fname = op.join(tempdir, 'test_subj_info_raw.fif')
raw.save(out_fname, overwrite=True)
raw_read = read_raw_fif(out_fname, add_eeg_ref=False)
for key in keys:
assert_equal(subject_info[key], raw_read.info['subject_info'][key])
assert_equal(raw.info['meas_date'], raw_read.info['meas_date'])
@testing.requires_testing_data
def test_copy_append():
"""Test raw copying and appending combinations."""
raw = read_raw_fif(fif_fname, preload=True, add_eeg_ref=False).copy()
raw_full = read_raw_fif(fif_fname, add_eeg_ref=False)
raw_full.append(raw)
data = raw_full[:, :][0]
assert_equal(data.shape[1], 2 * raw._data.shape[1])
@slow_test
@testing.requires_testing_data
def test_rank_estimation():
"""Test raw rank estimation."""
iter_tests = itt.product(
[fif_fname, hp_fif_fname], # sss
['norm', dict(mag=1e11, grad=1e9, eeg=1e5)]
)
for fname, scalings in iter_tests:
raw = read_raw_fif(fname, add_eeg_ref=False)
(_, picks_meg), (_, picks_eeg) = _picks_by_type(raw.info,
meg_combined=True)
n_meg = len(picks_meg)
n_eeg = len(picks_eeg)
raw = read_raw_fif(fname, preload=True, add_eeg_ref=False)
if 'proc_history' not in raw.info:
expected_rank = n_meg + n_eeg
else:
mf = raw.info['proc_history'][0]['max_info']
expected_rank = _get_sss_rank(mf) + n_eeg
assert_array_equal(raw.estimate_rank(scalings=scalings), expected_rank)
assert_array_equal(raw.estimate_rank(picks=picks_eeg,
scalings=scalings),
n_eeg)
raw = read_raw_fif(fname, preload=False, add_eeg_ref=False)
if 'sss' in fname:
tstart, tstop = 0., 30.
raw.add_proj(compute_proj_raw(raw))
raw.apply_proj()
else:
tstart, tstop = 10., 20.
raw.apply_proj()
n_proj = len(raw.info['projs'])
assert_array_equal(raw.estimate_rank(tstart=tstart, tstop=tstop,
scalings=scalings),
expected_rank - (1 if 'sss' in fname else n_proj))
@testing.requires_testing_data
def test_output_formats():
"""Test saving and loading raw data using multiple formats."""
tempdir = _TempDir()
formats = ['short', 'int', 'single', 'double']
tols = [1e-4, 1e-7, 1e-7, 1e-15]
# let's fake a raw file with different formats
raw = read_raw_fif(test_fif_fname,
add_eeg_ref=False).crop(0, 1, copy=False)
temp_file = op.join(tempdir, 'raw.fif')
for ii, (fmt, tol) in enumerate(zip(formats, tols)):
# Let's test the overwriting error throwing while we're at it
if ii > 0:
assert_raises(IOError, raw.save, temp_file, fmt=fmt)
raw.save(temp_file, fmt=fmt, overwrite=True)
raw2 = read_raw_fif(temp_file, add_eeg_ref=False)
raw2_data = raw2[:, :][0]
assert_allclose(raw2_data, raw[:, :][0], rtol=tol, atol=1e-25)
assert_equal(raw2.orig_format, fmt)
def _compare_combo(raw, new, times, n_times):
"""Compare data."""
for ti in times: # let's do a subset of points for speed
orig = raw[:, ti % n_times][0]
# these are almost_equals because of possible dtype differences
assert_allclose(orig, new[:, ti][0])
@slow_test
@testing.requires_testing_data
def test_multiple_files():
"""Test loading multiple files simultaneously."""
# split file
tempdir = _TempDir()
raw = read_raw_fif(fif_fname, add_eeg_ref=False).crop(0, 10, copy=False)
raw.load_data()
raw.load_data() # test no operation
split_size = 3. # in seconds
sfreq = raw.info['sfreq']
nsamp = (raw.last_samp - raw.first_samp)
tmins = np.round(np.arange(0., nsamp, split_size * sfreq))
tmaxs = np.concatenate((tmins[1:] - 1, [nsamp]))
tmaxs /= sfreq
tmins /= sfreq
assert_equal(raw.n_times, len(raw.times))
# going in reverse order so the last fname is the first file (need later)
raws = [None] * len(tmins)
for ri in range(len(tmins) - 1, -1, -1):
fname = op.join(tempdir, 'test_raw_split-%d_raw.fif' % ri)
raw.save(fname, tmin=tmins[ri], tmax=tmaxs[ri])
raws[ri] = read_raw_fif(fname, add_eeg_ref=False)
assert_equal(len(raws[ri].times),
int(round((tmaxs[ri] - tmins[ri]) *
raw.info['sfreq'])) + 1) # + 1 b/c inclusive
events = [find_events(r, stim_channel='STI 014') for r in raws]
last_samps = [r.last_samp for r in raws]
first_samps = [r.first_samp for r in raws]
# test concatenation of split file
assert_raises(ValueError, concatenate_raws, raws, True, events[1:])
all_raw_1, events1 = concatenate_raws(raws, preload=False,
events_list=events)
assert_allclose(all_raw_1.times, raw.times)
assert_equal(raw.first_samp, all_raw_1.first_samp)
assert_equal(raw.last_samp, all_raw_1.last_samp)
assert_allclose(raw[:, :][0], all_raw_1[:, :][0])
raws[0] = read_raw_fif(fname, add_eeg_ref=False)
all_raw_2 = concatenate_raws(raws, preload=True)
assert_allclose(raw[:, :][0], all_raw_2[:, :][0])
# test proper event treatment for split files
events2 = concatenate_events(events, first_samps, last_samps)
events3 = find_events(all_raw_2, stim_channel='STI 014')
assert_array_equal(events1, events2)
assert_array_equal(events1, events3)
# test various methods of combining files
raw = read_raw_fif(fif_fname, preload=True, add_eeg_ref=False)
n_times = raw.n_times
# make sure that all our data match
times = list(range(0, 2 * n_times, 999))
# add potentially problematic points
times.extend([n_times - 1, n_times, 2 * n_times - 1])
raw_combo0 = concatenate_raws([read_raw_fif(f, add_eeg_ref=False)
for f in [fif_fname, fif_fname]],
preload=True)
_compare_combo(raw, raw_combo0, times, n_times)
raw_combo = concatenate_raws([read_raw_fif(f, add_eeg_ref=False)
for f in [fif_fname, fif_fname]],
preload=False)
_compare_combo(raw, raw_combo, times, n_times)
raw_combo = concatenate_raws([read_raw_fif(f, add_eeg_ref=False)
for f in [fif_fname, fif_fname]],
preload='memmap8.dat')
_compare_combo(raw, raw_combo, times, n_times)
with warnings.catch_warnings(record=True): # deprecated
assert_raises(ValueError, read_raw_fif, [fif_fname, ctf_fname])
assert_raises(ValueError, read_raw_fif,
[fif_fname, fif_bad_marked_fname])
assert_equal(raw[:, :][0].shape[1] * 2, raw_combo0[:, :][0].shape[1])
assert_equal(raw_combo0[:, :][0].shape[1], raw_combo0.n_times)
# with all data preloaded, result should be preloaded
raw_combo = read_raw_fif(fif_fname, preload=True, add_eeg_ref=False)
raw_combo.append(read_raw_fif(fif_fname, preload=True, add_eeg_ref=False))
assert_true(raw_combo.preload is True)
assert_equal(raw_combo.n_times, raw_combo._data.shape[1])
_compare_combo(raw, raw_combo, times, n_times)
# with any data not preloaded, don't set result as preloaded
raw_combo = concatenate_raws([read_raw_fif(fif_fname, preload=True,
add_eeg_ref=False),
read_raw_fif(fif_fname, preload=False,
add_eeg_ref=False)])
assert_true(raw_combo.preload is False)
assert_array_equal(find_events(raw_combo, stim_channel='STI 014'),
find_events(raw_combo0, stim_channel='STI 014'))
_compare_combo(raw, raw_combo, times, n_times)
# user should be able to force data to be preloaded upon concat
raw_combo = concatenate_raws([read_raw_fif(fif_fname, preload=False,
add_eeg_ref=False),
read_raw_fif(fif_fname, preload=True,
add_eeg_ref=False)],
preload=True)
assert_true(raw_combo.preload is True)
_compare_combo(raw, raw_combo, times, n_times)
raw_combo = concatenate_raws([read_raw_fif(fif_fname, preload=False,
add_eeg_ref=False),
read_raw_fif(fif_fname, preload=True,
add_eeg_ref=False)],
preload='memmap3.dat')
_compare_combo(raw, raw_combo, times, n_times)
raw_combo = concatenate_raws([
read_raw_fif(fif_fname, preload=True, add_eeg_ref=False),
read_raw_fif(fif_fname, preload=True, add_eeg_ref=False)],
preload='memmap4.dat')
_compare_combo(raw, raw_combo, times, n_times)
raw_combo = concatenate_raws([
read_raw_fif(fif_fname, preload=False, add_eeg_ref=False),
read_raw_fif(fif_fname, preload=False, add_eeg_ref=False)],
preload='memmap5.dat')
_compare_combo(raw, raw_combo, times, n_times)
# verify that combining raws with different projectors throws an exception
raw.add_proj([], remove_existing=True)
assert_raises(ValueError, raw.append,
read_raw_fif(fif_fname, preload=True, add_eeg_ref=False))
# now test event treatment for concatenated raw files
events = [find_events(raw, stim_channel='STI 014'),
find_events(raw, stim_channel='STI 014')]
last_samps = [raw.last_samp, raw.last_samp]
first_samps = [raw.first_samp, raw.first_samp]
events = concatenate_events(events, first_samps, last_samps)
events2 = find_events(raw_combo0, stim_channel='STI 014')
assert_array_equal(events, events2)
# check out the len method
assert_equal(len(raw), raw.n_times)
assert_equal(len(raw), raw.last_samp - raw.first_samp + 1)
@testing.requires_testing_data
def test_split_files():
"""Test writing and reading of split raw files."""
tempdir = _TempDir()
raw_1 = read_raw_fif(fif_fname, preload=True, add_eeg_ref=False)
# Test a very close corner case
raw_crop = raw_1.copy().crop(0, 1., copy=False)
assert_allclose(raw_1.info['buffer_size_sec'], 10., atol=1e-2) # samp rate
split_fname = op.join(tempdir, 'split_raw.fif')
raw_1.save(split_fname, buffer_size_sec=1.0, split_size='10MB')
raw_2 = read_raw_fif(split_fname, add_eeg_ref=False)
assert_allclose(raw_2.info['buffer_size_sec'], 1., atol=1e-2) # samp rate
data_1, times_1 = raw_1[:, :]
data_2, times_2 = raw_2[:, :]
assert_array_equal(data_1, data_2)
assert_array_equal(times_1, times_2)
# test the case where the silly user specifies the split files
fnames = [split_fname]
fnames.extend(sorted(glob.glob(op.join(tempdir, 'split_raw-*.fif'))))
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
raw_2 = read_raw_fif(fnames, add_eeg_ref=False) # deprecated list
data_2, times_2 = raw_2[:, :]
assert_array_equal(data_1, data_2)
assert_array_equal(times_1, times_2)
# test the case where we only end up with one buffer to write
# (GH#3210). These tests rely on writing meas info and annotations
# taking up a certain number of bytes, so if we change those functions
# somehow, the numbers below for e.g. split_size might need to be
# adjusted.
raw_crop = raw_1.copy().crop(0, 5, copy=False)
try:
raw_crop.save(split_fname, split_size='1MB', # too small a size
buffer_size_sec=1., overwrite=True)
except ValueError as exp:
assert_true('after writing measurement information' in str(exp), exp)
try:
raw_crop.save(split_fname,
split_size=3002276, # still too small, now after Info
buffer_size_sec=1., overwrite=True)
except ValueError as exp:
assert_true('too large for the given split size' in str(exp), exp)
# just barely big enough here; the right size to write exactly one buffer
# at a time so we hit GH#3210 if we aren't careful
raw_crop.save(split_fname, split_size='4.5MB',
buffer_size_sec=1., overwrite=True)
raw_read = read_raw_fif(split_fname, add_eeg_ref=False)
assert_allclose(raw_crop[:][0], raw_read[:][0], atol=1e-20)
# Check our buffer arithmetic
# 1 buffer required
raw_crop = raw_1.copy().crop(0, 1, copy=False)
raw_crop.save(split_fname, buffer_size_sec=1., overwrite=True)
raw_read = read_raw_fif(split_fname, add_eeg_ref=False)
assert_equal(len(raw_read._raw_extras[0]), 1)
assert_equal(raw_read._raw_extras[0][0]['nsamp'], 301)
assert_allclose(raw_crop[:][0], raw_read[:][0])
# 2 buffers required
raw_crop.save(split_fname, buffer_size_sec=0.5, overwrite=True)
raw_read = read_raw_fif(split_fname, add_eeg_ref=False)
assert_equal(len(raw_read._raw_extras[0]), 2)
assert_equal(raw_read._raw_extras[0][0]['nsamp'], 151)
assert_equal(raw_read._raw_extras[0][1]['nsamp'], 150)
assert_allclose(raw_crop[:][0], raw_read[:][0])
# 2 buffers required
raw_crop.save(split_fname,
buffer_size_sec=1. - 1.01 / raw_crop.info['sfreq'],
overwrite=True)
raw_read = read_raw_fif(split_fname, add_eeg_ref=False)
assert_equal(len(raw_read._raw_extras[0]), 2)
assert_equal(raw_read._raw_extras[0][0]['nsamp'], 300)
assert_equal(raw_read._raw_extras[0][1]['nsamp'], 1)
assert_allclose(raw_crop[:][0], raw_read[:][0])
raw_crop.save(split_fname,
buffer_size_sec=1. - 2.01 / raw_crop.info['sfreq'],
overwrite=True)
raw_read = read_raw_fif(split_fname, add_eeg_ref=False)
assert_equal(len(raw_read._raw_extras[0]), 2)
assert_equal(raw_read._raw_extras[0][0]['nsamp'], 299)
assert_equal(raw_read._raw_extras[0][1]['nsamp'], 2)
assert_allclose(raw_crop[:][0], raw_read[:][0])
def test_load_bad_channels():
"""Test reading/writing of bad channels."""
tempdir = _TempDir()
# Load correctly marked file (manually done in mne_process_raw)
raw_marked = read_raw_fif(fif_bad_marked_fname, add_eeg_ref=False)
correct_bads = raw_marked.info['bads']
raw = read_raw_fif(test_fif_fname, add_eeg_ref=False)
# Make sure it starts clean
assert_array_equal(raw.info['bads'], [])
# Test normal case
raw.load_bad_channels(bad_file_works)
# Write it out, read it in, and check
raw.save(op.join(tempdir, 'foo_raw.fif'))
raw_new = read_raw_fif(op.join(tempdir, 'foo_raw.fif'), add_eeg_ref=False)
assert_equal(correct_bads, raw_new.info['bads'])
# Reset it
raw.info['bads'] = []
# Test bad case
assert_raises(ValueError, raw.load_bad_channels, bad_file_wrong)
# Test forcing the bad case
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
raw.load_bad_channels(bad_file_wrong, force=True)
n_found = sum(['1 bad channel' in str(ww.message) for ww in w])
assert_equal(n_found, 1) # there could be other irrelevant errors
# write it out, read it in, and check
raw.save(op.join(tempdir, 'foo_raw.fif'), overwrite=True)
raw_new = read_raw_fif(op.join(tempdir, 'foo_raw.fif'),
add_eeg_ref=False)
assert_equal(correct_bads, raw_new.info['bads'])
# Check that bad channels are cleared
raw.load_bad_channels(None)
raw.save(op.join(tempdir, 'foo_raw.fif'), overwrite=True)
raw_new = read_raw_fif(op.join(tempdir, 'foo_raw.fif'), add_eeg_ref=False)
assert_equal([], raw_new.info['bads'])
@slow_test
@testing.requires_testing_data
def test_io_raw():
"""Test IO for raw data (Neuromag + CTF + gz)."""
rng = np.random.RandomState(0)
tempdir = _TempDir()
# test unicode io
for chars in [b'\xc3\xa4\xc3\xb6\xc3\xa9', b'a']:
with read_raw_fif(fif_fname, add_eeg_ref=False) as r:
assert_true('Raw' in repr(r))
assert_true(op.basename(fif_fname) in repr(r))
desc1 = r.info['description'] = chars.decode('utf-8')
temp_file = op.join(tempdir, 'raw.fif')
r.save(temp_file, overwrite=True)
with read_raw_fif(temp_file, add_eeg_ref=False) as r2:
desc2 = r2.info['description']
assert_equal(desc1, desc2)
# Let's construct a simple test for IO first
raw = read_raw_fif(fif_fname, add_eeg_ref=False).crop(0, 3.5, copy=False)
raw.load_data()
# put in some data that we know the values of
data = rng.randn(raw._data.shape[0], raw._data.shape[1])
raw._data[:, :] = data
# save it somewhere
fname = op.join(tempdir, 'test_copy_raw.fif')
raw.save(fname, buffer_size_sec=1.0)
# read it in, make sure the whole thing matches
raw = read_raw_fif(fname, add_eeg_ref=False)
assert_allclose(data, raw[:, :][0], rtol=1e-6, atol=1e-20)
# let's read portions across the 1-sec tag boundary, too
inds = raw.time_as_index([1.75, 2.25])
sl = slice(inds[0], inds[1])
assert_allclose(data[:, sl], raw[:, sl][0], rtol=1e-6, atol=1e-20)
# now let's do some real I/O
fnames_in = [fif_fname, test_fif_gz_fname, ctf_fname]
fnames_out = ['raw.fif', 'raw.fif.gz', 'raw.fif']
for fname_in, fname_out in zip(fnames_in, fnames_out):
fname_out = op.join(tempdir, fname_out)
raw = read_raw_fif(fname_in, add_eeg_ref=False)
nchan = raw.info['nchan']
ch_names = raw.info['ch_names']
meg_channels_idx = [k for k in range(nchan)
if ch_names[k][0] == 'M']
n_channels = 100
meg_channels_idx = meg_channels_idx[:n_channels]
start, stop = raw.time_as_index([0, 5], use_rounding=True)
data, times = raw[meg_channels_idx, start:(stop + 1)]
meg_ch_names = [ch_names[k] for k in meg_channels_idx]
# Set up pick list: MEG + STI 014 - bad channels
include = ['STI 014']
include += meg_ch_names
picks = pick_types(raw.info, meg=True, eeg=False, stim=True,
misc=True, ref_meg=True, include=include,
exclude='bads')
# Writing with drop_small_buffer True
raw.save(fname_out, picks, tmin=0, tmax=4, buffer_size_sec=3,
drop_small_buffer=True, overwrite=True)
raw2 = read_raw_fif(fname_out, add_eeg_ref=False)
sel = pick_channels(raw2.ch_names, meg_ch_names)
data2, times2 = raw2[sel, :]
assert_true(times2.max() <= 3)
# Writing
raw.save(fname_out, picks, tmin=0, tmax=5, overwrite=True)
if fname_in == fif_fname or fname_in == fif_fname + '.gz':
assert_equal(len(raw.info['dig']), 146)
raw2 = read_raw_fif(fname_out, add_eeg_ref=False)
sel = pick_channels(raw2.ch_names, meg_ch_names)
data2, times2 = raw2[sel, :]
assert_allclose(data, data2, rtol=1e-6, atol=1e-20)
assert_allclose(times, times2)
assert_allclose(raw.info['sfreq'], raw2.info['sfreq'], rtol=1e-5)
# check transformations
for trans in ['dev_head_t', 'dev_ctf_t', 'ctf_head_t']:
if raw.info[trans] is None:
assert_true(raw2.info[trans] is None)
else:
assert_array_equal(raw.info[trans]['trans'],
raw2.info[trans]['trans'])
# check transformation 'from' and 'to'
if trans.startswith('dev'):
from_id = FIFF.FIFFV_COORD_DEVICE
else:
from_id = FIFF.FIFFV_MNE_COORD_CTF_HEAD
if trans[4:8] == 'head':
to_id = FIFF.FIFFV_COORD_HEAD
else:
to_id = FIFF.FIFFV_MNE_COORD_CTF_HEAD
for raw_ in [raw, raw2]:
assert_equal(raw_.info[trans]['from'], from_id)
assert_equal(raw_.info[trans]['to'], to_id)
if fname_in == fif_fname or fname_in == fif_fname + '.gz':
assert_allclose(raw.info['dig'][0]['r'], raw2.info['dig'][0]['r'])
# test warnings on bad filenames
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
raw_badname = op.join(tempdir, 'test-bad-name.fif.gz')
raw.save(raw_badname)
read_raw_fif(raw_badname, add_eeg_ref=False)
assert_naming(w, 'test_raw_fiff.py', 2)
@testing.requires_testing_data
def test_io_complex():
"""Test IO with complex data types."""
rng = np.random.RandomState(0)
tempdir = _TempDir()
dtypes = [np.complex64, np.complex128]
raw = _test_raw_reader(partial(read_raw_fif, add_eeg_ref=False),
fname=fif_fname)
picks = np.arange(5)
start, stop = raw.time_as_index([0, 5])
data_orig, _ = raw[picks, start:stop]
for di, dtype in enumerate(dtypes):
imag_rand = np.array(1j * rng.randn(data_orig.shape[0],
data_orig.shape[1]), dtype)
raw_cp = raw.copy()
raw_cp._data = np.array(raw_cp._data, dtype)
raw_cp._data[picks, start:stop] += imag_rand
# this should throw an error because it's complex
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
raw_cp.save(op.join(tempdir, 'raw.fif'), picks, tmin=0, tmax=5,
overwrite=True)
# warning gets thrown on every instance b/c simplifilter('always')
assert_equal(len(w), 1)
raw2 = read_raw_fif(op.join(tempdir, 'raw.fif'), add_eeg_ref=False)
raw2_data, _ = raw2[picks, :]
n_samp = raw2_data.shape[1]
assert_allclose(raw2_data[:, :n_samp], raw_cp._data[picks, :n_samp])
# with preloading
raw2 = read_raw_fif(op.join(tempdir, 'raw.fif'), preload=True,
add_eeg_ref=False)
raw2_data, _ = raw2[picks, :]
n_samp = raw2_data.shape[1]
assert_allclose(raw2_data[:, :n_samp], raw_cp._data[picks, :n_samp])
@testing.requires_testing_data
def test_getitem():
"""Test getitem/indexing of Raw."""
for preload in [False, True, 'memmap.dat']:
raw = read_raw_fif(fif_fname, preload=preload, add_eeg_ref=False)
data, times = raw[0, :]
data1, times1 = raw[0]
assert_array_equal(data, data1)
assert_array_equal(times, times1)
data, times = raw[0:2, :]
data1, times1 = raw[0:2]
assert_array_equal(data, data1)
assert_array_equal(times, times1)
data1, times1 = raw[[0, 1]]
assert_array_equal(data, data1)
assert_array_equal(times, times1)
assert_array_equal(raw[-10:, :][0],
raw[len(raw.ch_names) - 10:, :][0])
assert_raises(ValueError, raw.__getitem__,
(slice(-len(raw.ch_names) - 1), slice(None)))
@testing.requires_testing_data
def test_proj():
"""Test SSP proj operations."""
tempdir = _TempDir()
for proj in [True, False]:
raw = read_raw_fif(fif_fname, preload=False, add_eeg_ref=False)
if proj:
raw.apply_proj()
assert_true(all(p['active'] == proj for p in raw.info['projs']))
data, times = raw[0:2, :]
data1, times1 = raw[0:2]
assert_array_equal(data, data1)
assert_array_equal(times, times1)
# test adding / deleting proj
if proj:
assert_raises(ValueError, raw.add_proj, [],
{'remove_existing': True})
assert_raises(ValueError, raw.del_proj, 0)
else:
projs = deepcopy(raw.info['projs'])
n_proj = len(raw.info['projs'])
raw.del_proj(0)
assert_equal(len(raw.info['projs']), n_proj - 1)
raw.add_proj(projs, remove_existing=False)
# Test that already existing projections are not added.
assert_equal(len(raw.info['projs']), n_proj)
raw.add_proj(projs[:-1], remove_existing=True)
assert_equal(len(raw.info['projs']), n_proj - 1)
# test apply_proj() with and without preload
for preload in [True, False]:
raw = read_raw_fif(fif_fname, preload=preload, proj=False,
add_eeg_ref=False)
data, times = raw[:, 0:2]
raw.apply_proj()
data_proj_1 = np.dot(raw._projector, data)
# load the file again without proj
raw = read_raw_fif(fif_fname, preload=preload, proj=False,
add_eeg_ref=False)
# write the file with proj. activated, make sure proj has been applied
raw.save(op.join(tempdir, 'raw.fif'), proj=True, overwrite=True)
raw2 = read_raw_fif(op.join(tempdir, 'raw.fif'), proj=False,
add_eeg_ref=False)
data_proj_2, _ = raw2[:, 0:2]
assert_allclose(data_proj_1, data_proj_2)
assert_true(all(p['active'] for p in raw2.info['projs']))
# read orig file with proj. active
raw2 = read_raw_fif(fif_fname, preload=preload, add_eeg_ref=False)
raw2.apply_proj()
data_proj_2, _ = raw2[:, 0:2]
assert_allclose(data_proj_1, data_proj_2)
assert_true(all(p['active'] for p in raw2.info['projs']))
# test that apply_proj works
raw.apply_proj()
data_proj_2, _ = raw[:, 0:2]
assert_allclose(data_proj_1, data_proj_2)
assert_allclose(data_proj_2, np.dot(raw._projector, data_proj_2))
tempdir = _TempDir()
out_fname = op.join(tempdir, 'test_raw.fif')
raw = read_raw_fif(test_fif_fname, preload=True,
add_eeg_ref=False).crop(0, 0.002, copy=False)
raw.pick_types(meg=False, eeg=True)
raw.info['projs'] = [raw.info['projs'][-1]]
raw._data.fill(0)
raw._data[-1] = 1.
raw.save(out_fname)
raw = read_raw_fif(out_fname, preload=False, add_eeg_ref=False)
raw.apply_proj()
assert_allclose(raw[:, :][0][:1], raw[0, :][0])
@testing.requires_testing_data
def test_preload_modify():
"""Test preloading and modifying data."""
tempdir = _TempDir()
rng = np.random.RandomState(0)
for preload in [False, True, 'memmap.dat']:
raw = read_raw_fif(fif_fname, preload=preload, add_eeg_ref=False)
nsamp = raw.last_samp - raw.first_samp + 1
picks = pick_types(raw.info, meg='grad', exclude='bads')
data = rng.randn(len(picks), nsamp // 2)
try:
raw[picks, :nsamp // 2] = data
except RuntimeError as err:
if not preload:
continue
else:
raise err
tmp_fname = op.join(tempdir, 'raw.fif')
raw.save(tmp_fname, overwrite=True)
raw_new = read_raw_fif(tmp_fname, add_eeg_ref=False)
data_new, _ = raw_new[picks, :nsamp / 2]
assert_allclose(data, data_new)
@slow_test
@testing.requires_testing_data
def test_filter():
"""Test filtering (FIR and IIR) and Raw.apply_function interface."""
raw = read_raw_fif(fif_fname, add_eeg_ref=False).crop(0, 7, copy=False)
raw.load_data()
sig_dec_notch = 12
sig_dec_notch_fit = 12
picks_meg = pick_types(raw.info, meg=True, exclude='bads')
picks = picks_meg[:4]
trans = 2.0
filter_params = dict(picks=picks, filter_length='auto',
h_trans_bandwidth=trans, l_trans_bandwidth=trans,
phase='zero', fir_window='hamming')
raw_lp = raw.copy().filter(None, 8.0, **filter_params)
raw_hp = raw.copy().filter(16.0, None, **filter_params)
raw_bp = raw.copy().filter(8.0 + trans, 16.0 - trans, **filter_params)
raw_bs = raw.copy().filter(16.0, 8.0, **filter_params)
data, _ = raw[picks, :]
lp_data, _ = raw_lp[picks, :]
hp_data, _ = raw_hp[picks, :]
bp_data, _ = raw_bp[picks, :]
bs_data, _ = raw_bs[picks, :]
tols = dict(atol=1e-20, rtol=1e-5)
assert_allclose(bs_data, lp_data + hp_data, **tols)
assert_allclose(data, lp_data + bp_data + hp_data, **tols)
assert_allclose(data, bp_data + bs_data, **tols)
filter_params_iir = dict(picks=picks, n_jobs=2, method='iir',
iir_params=dict(output='ba'))
raw_lp_iir = raw.copy().filter(None, 4.0, **filter_params_iir)
raw_hp_iir = raw.copy().filter(8.0, None, **filter_params_iir)
raw_bp_iir = raw.copy().filter(4.0, 8.0, **filter_params_iir)
del filter_params_iir
lp_data_iir, _ = raw_lp_iir[picks, :]
hp_data_iir, _ = raw_hp_iir[picks, :]
bp_data_iir, _ = raw_bp_iir[picks, :]
summation = lp_data_iir + hp_data_iir + bp_data_iir
assert_array_almost_equal(data[:, 100:-100], summation[:, 100:-100], 11)
# make sure we didn't touch other channels
data, _ = raw[picks_meg[4:], :]
bp_data, _ = raw_bp[picks_meg[4:], :]
assert_array_equal(data, bp_data)
bp_data_iir, _ = raw_bp_iir[picks_meg[4:], :]
assert_array_equal(data, bp_data_iir)
# ... and that inplace changes are inplace
raw_copy = raw.copy()
raw_copy.filter(None, 20., n_jobs=2, **filter_params)
assert_true(raw._data[0, 0] != raw_copy._data[0, 0])
assert_equal(raw.copy().filter(None, 20., **filter_params)._data,
raw_copy._data)
# do a very simple check on line filtering
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
raw_bs = raw.copy().filter(60.0 + trans, 60.0 - trans, **filter_params)
data_bs, _ = raw_bs[picks, :]
raw_notch = raw.copy().notch_filter(
60.0, picks=picks, n_jobs=2, method='fir', filter_length='auto',
trans_bandwidth=2 * trans)
data_notch, _ = raw_notch[picks, :]
assert_array_almost_equal(data_bs, data_notch, sig_dec_notch)
# now use the sinusoidal fitting
raw_notch = raw.copy().notch_filter(
None, picks=picks, n_jobs=2, method='spectrum_fit')
data_notch, _ = raw_notch[picks, :]
data, _ = raw[picks, :]
assert_array_almost_equal(data, data_notch, sig_dec_notch_fit)
# filter should set the "lowpass" and "highpass" parameters
raw = RawArray(np.random.randn(3, 1000),
create_info(3, 1000., ['eeg'] * 2 + ['stim']))
raw.info['lowpass'] = raw.info['highpass'] = None
for kind in ('none', 'lowpass', 'highpass', 'bandpass', 'bandstop'):
print(kind)
h_freq = l_freq = None
if kind in ('lowpass', 'bandpass'):
h_freq = 70
if kind in ('highpass', 'bandpass'):
l_freq = 30
if kind == 'bandstop':
l_freq, h_freq = 70, 30
assert_true(raw.info['lowpass'] is None)
assert_true(raw.info['highpass'] is None)
kwargs = dict(l_trans_bandwidth=20, h_trans_bandwidth=20,
filter_length='auto', phase='zero', fir_window='hann')
raw_filt = raw.copy().filter(l_freq, h_freq, picks=np.arange(1),
**kwargs)
assert_true(raw.info['lowpass'] is None)
assert_true(raw.info['highpass'] is None)
raw_filt = raw.copy().filter(l_freq, h_freq, **kwargs)
wanted_h = h_freq if kind != 'bandstop' else None
wanted_l = l_freq if kind != 'bandstop' else None
assert_equal(raw_filt.info['lowpass'], wanted_h)
assert_equal(raw_filt.info['highpass'], wanted_l)
# Using all data channels should still set the params (GH#3259)
raw_filt = raw.copy().filter(l_freq, h_freq, picks=np.arange(2),
**kwargs)
assert_equal(raw_filt.info['lowpass'], wanted_h)
assert_equal(raw_filt.info['highpass'], wanted_l)
def test_filter_picks():
"""Test filtering default channel picks."""
ch_types = ['mag', 'grad', 'eeg', 'seeg', 'misc', 'stim', 'ecog', 'hbo',
'hbr']
info = create_info(ch_names=ch_types, ch_types=ch_types, sfreq=256)
raw = RawArray(data=np.zeros((len(ch_types), 1000)), info=info)
# -- Deal with meg mag grad and fnirs exceptions
ch_types = ('misc', 'stim', 'meg', 'eeg', 'seeg', 'ecog')
# -- Filter data channels
for ch_type in ('mag', 'grad', 'eeg', 'seeg', 'ecog', 'hbo', 'hbr'):
picks = dict((ch, ch == ch_type) for ch in ch_types)
picks['meg'] = ch_type if ch_type in ('mag', 'grad') else False
picks['fnirs'] = ch_type if ch_type in ('hbo', 'hbr') else False
raw_ = raw.copy().pick_types(**picks)
raw_.filter(10, 30, l_trans_bandwidth='auto',
h_trans_bandwidth='auto', filter_length='auto',
phase='zero', fir_window='hamming')
# -- Error if no data channel
for ch_type in ('misc', 'stim'):
picks = dict((ch, ch == ch_type) for ch in ch_types)
raw_ = raw.copy().pick_types(**picks)
assert_raises(RuntimeError, raw_.filter, 10, 30)
@testing.requires_testing_data
def test_crop():
"""Test cropping raw files."""
# split a concatenated file to test a difficult case
raw = concatenate_raws([read_raw_fif(f, add_eeg_ref=False)
for f in [fif_fname, fif_fname]])
split_size = 10. # in seconds
sfreq = raw.info['sfreq']
nsamp = (raw.last_samp - raw.first_samp + 1)
# do an annoying case (off-by-one splitting)
tmins = np.r_[1., np.round(np.arange(0., nsamp - 1, split_size * sfreq))]
tmins = np.sort(tmins)
tmaxs = np.concatenate((tmins[1:] - 1, [nsamp - 1]))
tmaxs /= sfreq
tmins /= sfreq
raws = [None] * len(tmins)
for ri, (tmin, tmax) in enumerate(zip(tmins, tmaxs)):
raws[ri] = raw.copy().crop(tmin, tmax, copy=False)
all_raw_2 = concatenate_raws(raws, preload=False)
assert_equal(raw.first_samp, all_raw_2.first_samp)
assert_equal(raw.last_samp, all_raw_2.last_samp)
assert_array_equal(raw[:, :][0], all_raw_2[:, :][0])
tmins = np.round(np.arange(0., nsamp - 1, split_size * sfreq))
tmaxs = np.concatenate((tmins[1:] - 1, [nsamp - 1]))
tmaxs /= sfreq
tmins /= sfreq
# going in revere order so the last fname is the first file (need it later)
raws = [None] * len(tmins)
for ri, (tmin, tmax) in enumerate(zip(tmins, tmaxs)):
raws[ri] = raw.copy().crop(tmin, tmax, copy=False)
# test concatenation of split file
all_raw_1 = concatenate_raws(raws, preload=False)
all_raw_2 = raw.copy().crop(0, None, copy=False)
for ar in [all_raw_1, all_raw_2]:
assert_equal(raw.first_samp, ar.first_samp)
assert_equal(raw.last_samp, ar.last_samp)
assert_array_equal(raw[:, :][0], ar[:, :][0])
# test shape consistency of cropped raw
data = np.zeros((1, 1002001))
info = create_info(1, 1000)
raw = RawArray(data, info)
for tmin in range(0, 1001, 100):
raw1 = raw.copy().crop(tmin=tmin, tmax=tmin + 2, copy=False)
assert_equal(raw1[:][0].shape, (1, 2001))
@testing.requires_testing_data
def test_resample():
"""Test resample (with I/O and multiple files)."""
tempdir = _TempDir()
raw = read_raw_fif(fif_fname, add_eeg_ref=False).crop(0, 3, copy=False)
raw.load_data()
raw_resamp = raw.copy()
sfreq = raw.info['sfreq']
# test parallel on upsample
raw_resamp.resample(sfreq * 2, n_jobs=2, npad='auto')
assert_equal(raw_resamp.n_times, len(raw_resamp.times))
raw_resamp.save(op.join(tempdir, 'raw_resamp-raw.fif'))
raw_resamp = read_raw_fif(op.join(tempdir, 'raw_resamp-raw.fif'),
preload=True, add_eeg_ref=False)
assert_equal(sfreq, raw_resamp.info['sfreq'] / 2)
assert_equal(raw.n_times, raw_resamp.n_times / 2)
assert_equal(raw_resamp._data.shape[1], raw_resamp.n_times)
assert_equal(raw._data.shape[0], raw_resamp._data.shape[0])
# test non-parallel on downsample
raw_resamp.resample(sfreq, n_jobs=1, npad='auto')
assert_equal(raw_resamp.info['sfreq'], sfreq)
assert_equal(raw._data.shape, raw_resamp._data.shape)
assert_equal(raw.first_samp, raw_resamp.first_samp)
assert_equal(raw.last_samp, raw.last_samp)
# upsampling then downsampling doubles resampling error, but this still
# works (hooray). Note that the stim channels had to be sub-sampled
# without filtering to be accurately preserved
# note we have to treat MEG and EEG+STIM channels differently (tols)
assert_allclose(raw._data[:306, 200:-200],
raw_resamp._data[:306, 200:-200],
rtol=1e-2, atol=1e-12)
assert_allclose(raw._data[306:, 200:-200],
raw_resamp._data[306:, 200:-200],
rtol=1e-2, atol=1e-7)
# now check multiple file support w/resampling, as order of operations
# (concat, resample) should not affect our data
raw1 = raw.copy()
raw2 = raw.copy()
raw3 = raw.copy()
raw4 = raw.copy()
raw1 = concatenate_raws([raw1, raw2])
raw1.resample(10., npad='auto')
raw3.resample(10., npad='auto')
raw4.resample(10., npad='auto')
raw3 = concatenate_raws([raw3, raw4])
assert_array_equal(raw1._data, raw3._data)
assert_array_equal(raw1._first_samps, raw3._first_samps)
assert_array_equal(raw1._last_samps, raw3._last_samps)
assert_array_equal(raw1._raw_lengths, raw3._raw_lengths)
assert_equal(raw1.first_samp, raw3.first_samp)
assert_equal(raw1.last_samp, raw3.last_samp)
assert_equal(raw1.info['sfreq'], raw3.info['sfreq'])
# test resampling of stim channel
# basic decimation
stim = [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
assert_allclose(raw.resample(8., npad='auto')._data,
[[1, 1, 0, 0, 1, 1, 0, 0]])
# decimation of multiple stim channels
raw = RawArray(2 * [stim], create_info(2, len(stim), 2 * ['stim']))
assert_allclose(raw.resample(8., npad='auto')._data,
[[1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0]])
# decimation that could potentially drop events if the decimation is
# done naively
stim = [0, 0, 0, 1, 1, 0, 0, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
assert_allclose(raw.resample(4., npad='auto')._data,
[[0, 1, 1, 0]])
# two events are merged in this case (warning)
stim = [0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
raw.resample(8., npad='auto')
assert_true(len(w) == 1)
# events are dropped in this case (warning)
stim = [0, 1, 1, 0, 0, 1, 1, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
raw.resample(4., npad='auto')
assert_true(len(w) == 1)
# test resampling events: this should no longer give a warning
stim = [0, 1, 1, 0, 0, 1, 1, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
events = find_events(raw)
raw, events = raw.resample(4., events=events, npad='auto')
assert_equal(events, np.array([[0, 0, 1], [2, 0, 1]]))
# test copy flag
stim = [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
raw_resampled = raw.copy().resample(4., npad='auto')
assert_true(raw_resampled is not raw)
raw_resampled = raw.resample(4., npad='auto')
assert_true(raw_resampled is raw)
# resample should still work even when no stim channel is present
raw = RawArray(np.random.randn(1, 100), create_info(1, 100, ['eeg']))
raw.info['lowpass'] = 50.
raw.resample(10, npad='auto')
assert_equal(raw.info['lowpass'], 5.)
assert_equal(len(raw), 10)
@testing.requires_testing_data
def test_hilbert():
"""Test computation of analytic signal using hilbert."""
raw = read_raw_fif(fif_fname, preload=True, add_eeg_ref=False)
picks_meg = pick_types(raw.info, meg=True, exclude='bads')
picks = picks_meg[:4]
raw_filt = raw.copy()
raw_filt.filter(10, 20, picks=picks, l_trans_bandwidth='auto',
h_trans_bandwidth='auto', filter_length='auto',
phase='zero', fir_window='blackman')
raw_filt_2 = raw_filt.copy()
raw2 = raw.copy()
raw3 = raw.copy()
raw.apply_hilbert(picks, n_fft='auto')
raw2.apply_hilbert(picks, n_fft='auto', envelope=True)
# Test custom n_fft
raw_filt.apply_hilbert(picks, n_fft='auto')
n_fft = 2 ** int(np.ceil(np.log2(raw_filt_2.n_times + 1000)))
raw_filt_2.apply_hilbert(picks, n_fft=n_fft)
assert_equal(raw_filt._data.shape, raw_filt_2._data.shape)
assert_allclose(raw_filt._data[:, 50:-50], raw_filt_2._data[:, 50:-50],
atol=1e-13, rtol=1e-2)
assert_raises(ValueError, raw3.apply_hilbert, picks,
n_fft=raw3.n_times - 100)
env = np.abs(raw._data[picks, :])
assert_allclose(env, raw2._data[picks, :], rtol=1e-2, atol=1e-13)
@testing.requires_testing_data
def test_raw_copy():
"""Test Raw copy."""
raw = read_raw_fif(fif_fname, preload=True, add_eeg_ref=False)
data, _ = raw[:, :]
copied = raw.copy()
copied_data, _ = copied[:, :]
assert_array_equal(data, copied_data)
assert_equal(sorted(raw.__dict__.keys()),
sorted(copied.__dict__.keys()))
raw = read_raw_fif(fif_fname, preload=False, add_eeg_ref=False)
data, _ = raw[:, :]
copied = raw.copy()
copied_data, _ = copied[:, :]
assert_array_equal(data, copied_data)
assert_equal(sorted(raw.__dict__.keys()),
sorted(copied.__dict__.keys()))
@requires_pandas
def test_to_data_frame():
"""Test raw Pandas exporter."""
raw = read_raw_fif(test_fif_fname, preload=True, add_eeg_ref=False)
_, times = raw[0, :10]
df = raw.to_data_frame()
assert_true((df.columns == raw.ch_names).all())
assert_array_equal(np.round(times * 1e3), df.index.values[:10])
df = raw.to_data_frame(index=None)
assert_true('time' in df.index.names)
assert_array_equal(df.values[:, 0], raw._data[0] * 1e13)
assert_array_equal(df.values[:, 2], raw._data[2] * 1e15)
def test_add_channels():
"""Test raw splitting / re-appending channel types."""
rng = np.random.RandomState(0)
raw = read_raw_fif(test_fif_fname,
add_eeg_ref=False).crop(0, 1, copy=False).load_data()
raw_nopre = read_raw_fif(test_fif_fname, preload=False, add_eeg_ref=False)
raw_eeg_meg = raw.copy().pick_types(meg=True, eeg=True)
raw_eeg = raw.copy().pick_types(meg=False, eeg=True)
raw_meg = raw.copy().pick_types(meg=True, eeg=False)
raw_stim = raw.copy().pick_types(meg=False, eeg=False, stim=True)
raw_new = raw_meg.copy().add_channels([raw_eeg, raw_stim])
assert_true(
all(ch in raw_new.ch_names
for ch in list(raw_stim.ch_names) + list(raw_meg.ch_names))
)
raw_new = raw_meg.copy().add_channels([raw_eeg])
assert_true(ch in raw_new.ch_names for ch in raw.ch_names)
assert_array_equal(raw_new[:, :][0], raw_eeg_meg[:, :][0])
assert_array_equal(raw_new[:, :][1], raw[:, :][1])
assert_true(all(ch not in raw_new.ch_names for ch in raw_stim.ch_names))
# Testing force updates
raw_arr_info = create_info(['1', '2'], raw_meg.info['sfreq'], 'eeg')
orig_head_t = raw_arr_info['dev_head_t']
raw_arr = rng.randn(2, raw_eeg.n_times)
raw_arr = RawArray(raw_arr, raw_arr_info)
# This should error because of conflicts in Info
assert_raises(ValueError, raw_meg.copy().add_channels, [raw_arr])
raw_meg.copy().add_channels([raw_arr], force_update_info=True)
# Make sure that values didn't get overwritten
assert_true(raw_arr.info['dev_head_t'] is orig_head_t)
# Now test errors
raw_badsf = raw_eeg.copy()
raw_badsf.info['sfreq'] = 3.1415927
raw_eeg.crop(.5, copy=False)
assert_raises(AssertionError, raw_meg.add_channels, [raw_nopre])
assert_raises(RuntimeError, raw_meg.add_channels, [raw_badsf])
assert_raises(AssertionError, raw_meg.add_channels, [raw_eeg])
assert_raises(ValueError, raw_meg.add_channels, [raw_meg])
assert_raises(AssertionError, raw_meg.add_channels, raw_badsf)
@testing.requires_testing_data
def test_save():
"""Test saving raw."""
tempdir = _TempDir()
raw = read_raw_fif(fif_fname, preload=False, add_eeg_ref=False)
# can't write over file being read
assert_raises(ValueError, raw.save, fif_fname)
raw = read_raw_fif(fif_fname, preload=True, add_eeg_ref=False)
# can't overwrite file without overwrite=True
assert_raises(IOError, raw.save, fif_fname)
# test abspath support and annotations
sfreq = raw.info['sfreq']
annot = Annotations([10], [5], ['test'],
raw.info['meas_date'] + raw.first_samp / sfreq)
raw.annotations = annot
new_fname = op.join(op.abspath(op.curdir), 'break-raw.fif')
raw.save(op.join(tempdir, new_fname), overwrite=True)
new_raw = read_raw_fif(op.join(tempdir, new_fname), preload=False,
add_eeg_ref=False)
assert_raises(ValueError, new_raw.save, new_fname)
assert_array_equal(annot.onset, new_raw.annotations.onset)
assert_array_equal(annot.duration, new_raw.annotations.duration)
assert_array_equal(annot.description, new_raw.annotations.description)
assert_equal(annot.orig_time, new_raw.annotations.orig_time)
# test that annotations are in sync after cropping and concatenating
annot = Annotations([5., 11., 15.], [2., 1., 3.], ['test', 'test', 'test'])
raw.annotations = annot
with warnings.catch_warnings(record=True) as w:
r1 = raw.copy().crop(2.5, 7.5)
r2 = raw.copy().crop(12.5, 17.5)
r3 = raw.copy().crop(10., 12.)
assert_true(all('data range' in str(ww.message) for ww in w))
raw = concatenate_raws([r1, r2, r3]) # segments reordered
onsets = raw.annotations.onset
durations = raw.annotations.duration
# 2*5s clips combined with annotations at 2.5s + 2s clip, annotation at 1s
assert_array_almost_equal([2.5, 7.5, 11.], onsets, decimal=2)
assert_array_almost_equal([2., 2.5, 1.], durations, decimal=2)
# test annotation clipping
annot = Annotations([0., raw.times[-1]], [2., 2.], 'test',
raw.info['meas_date'] + raw.first_samp / sfreq - 1.)
with warnings.catch_warnings(record=True) as w: # outside range
raw.annotations = annot
assert_true(all('data range' in str(ww.message) for ww in w))
assert_array_almost_equal(raw.annotations.duration, [1., 1.], decimal=3)
# make sure we can overwrite the file we loaded when preload=True
new_raw = read_raw_fif(op.join(tempdir, new_fname), preload=True,
add_eeg_ref=False)
new_raw.save(op.join(tempdir, new_fname), overwrite=True)
os.remove(new_fname)
@testing.requires_testing_data
def test_with_statement():
"""Test with statement."""
for preload in [True, False]:
with read_raw_fif(fif_fname, preload=preload,
add_eeg_ref=False) as raw_:
print(raw_)
def test_compensation_raw():
"""Test Raw compensation."""
tempdir = _TempDir()
raw_3 = read_raw_fif(ctf_comp_fname, add_eeg_ref=False)
assert_equal(raw_3.compensation_grade, 3)
data_3, times = raw_3[:, :]
# data come with grade 3
for ii in range(2):
raw_3_new = raw_3.copy()
if ii == 0:
raw_3_new.load_data()
raw_3_new.apply_gradient_compensation(3)
assert_equal(raw_3_new.compensation_grade, 3)
data_new, times_new = raw_3_new[:, :]
assert_array_equal(times, times_new)
assert_array_equal(data_3, data_new)
# deprecated way
preload = True if ii == 0 else False
raw_3_new = read_raw_fif(ctf_comp_fname, compensation=3,
preload=preload, verbose='error',
add_eeg_ref=False)
assert_equal(raw_3_new.compensation_grade, 3)
data_new, times_new = raw_3_new[:, :]
assert_array_equal(times, times_new)
assert_array_equal(data_3, data_new)
# change to grade 0
raw_0 = raw_3.copy().apply_gradient_compensation(0)
assert_equal(raw_0.compensation_grade, 0)
data_0, times_new = raw_0[:, :]
assert_array_equal(times, times_new)
assert_true(np.mean(np.abs(data_0 - data_3)) > 1e-12)
# change to grade 1
raw_1 = raw_0.copy().apply_gradient_compensation(1)
assert_equal(raw_1.compensation_grade, 1)
data_1, times_new = raw_1[:, :]
assert_array_equal(times, times_new)
assert_true(np.mean(np.abs(data_1 - data_3)) > 1e-12)
assert_raises(ValueError, read_raw_fif, ctf_comp_fname, compensation=33,
verbose='error', add_eeg_ref=False)
raw_bad = raw_0.copy()
raw_bad.add_proj(compute_proj_raw(raw_0, duration=0.5, verbose='error'))
raw_bad.apply_proj()
assert_raises(RuntimeError, raw_bad.apply_gradient_compensation, 1)
# with preload
tols = dict(rtol=1e-12, atol=1e-25)
raw_1_new = raw_3.copy().load_data().apply_gradient_compensation(1)
assert_equal(raw_1_new.compensation_grade, 1)
data_1_new, times_new = raw_1_new[:, :]
assert_array_equal(times, times_new)
assert_true(np.mean(np.abs(data_1_new - data_3)) > 1e-12)
assert_allclose(data_1, data_1_new, **tols)
# deprecated way
for preload in (True, False):
raw_1_new = read_raw_fif(ctf_comp_fname, compensation=1,
verbose='error', preload=preload,
add_eeg_ref=False)
assert_equal(raw_1_new.compensation_grade, 1)
data_1_new, times_new = raw_1_new[:, :]
assert_array_equal(times, times_new)
assert_true(np.mean(np.abs(data_1_new - data_3)) > 1e-12)
assert_allclose(data_1, data_1_new, **tols)
# change back
raw_3_new = raw_1.copy().apply_gradient_compensation(3)
data_3_new, times_new = raw_3_new[:, :]
assert_allclose(data_3, data_3_new, **tols)
raw_3_new = raw_1.copy().load_data().apply_gradient_compensation(3)
data_3_new, times_new = raw_3_new[:, :]
assert_allclose(data_3, data_3_new, **tols)
for load in (False, True):
for raw in (raw_0, raw_1):
raw_3_new = raw.copy()
if load:
raw_3_new.load_data()
raw_3_new.apply_gradient_compensation(3)
assert_equal(raw_3_new.compensation_grade, 3)
data_3_new, times_new = raw_3_new[:, :]
assert_array_equal(times, times_new)
assert_true(np.mean(np.abs(data_3_new - data_1)) > 1e-12)
assert_allclose(data_3, data_3_new, **tols)
# Try IO with compensation
temp_file = op.join(tempdir, 'raw.fif')
raw_3.save(temp_file, overwrite=True)
for preload in (True, False):
raw_read = read_raw_fif(temp_file, preload=preload, add_eeg_ref=False)
assert_equal(raw_read.compensation_grade, 3)
data_read, times_new = raw_read[:, :]
assert_array_equal(times, times_new)
assert_allclose(data_3, data_read, **tols)
raw_read.apply_gradient_compensation(1)
data_read, times_new = raw_read[:, :]
assert_array_equal(times, times_new)
assert_allclose(data_1, data_read, **tols)
# Now save the file that has modified compensation
# and make sure the compensation is the same as it was,
# but that we can undo it
# These channels have norm 1e-11/1e-12, so atol=1e-18 isn't awesome,
# but it's due to the single precision of the info['comps'] leading
# to inexact inversions with saving/loading (casting back to single)
# in between (e.g., 1->3->1 will degrade like this)
looser_tols = dict(rtol=1e-6, atol=1e-18)
raw_1.save(temp_file, overwrite=True)
for preload in (True, False):
raw_read = read_raw_fif(temp_file, preload=preload, verbose=True,
add_eeg_ref=False)
assert_equal(raw_read.compensation_grade, 1)
data_read, times_new = raw_read[:, :]
assert_array_equal(times, times_new)
assert_allclose(data_1, data_read, **looser_tols)
raw_read.apply_gradient_compensation(3, verbose=True)
data_read, times_new = raw_read[:, :]
assert_array_equal(times, times_new)
assert_allclose(data_3, data_read, **looser_tols)
@requires_mne
def test_compensation_raw_mne():
"""Test Raw compensation by comparing with MNE-C."""
tempdir = _TempDir()
def compensate_mne(fname, grad):
tmp_fname = op.join(tempdir, 'mne_ctf_test_raw.fif')
cmd = ['mne_process_raw', '--raw', fname, '--save', tmp_fname,
'--grad', str(grad), '--projoff', '--filteroff']
run_subprocess(cmd)
return read_raw_fif(tmp_fname, preload=True, add_eeg_ref=False)
for grad in [0, 2, 3]:
with warnings.catch_warnings(record=True): # deprecated param
raw_py = read_raw_fif(ctf_comp_fname, preload=True,
compensation=grad, add_eeg_ref=False)
raw_c = compensate_mne(ctf_comp_fname, grad)
assert_allclose(raw_py._data, raw_c._data, rtol=1e-6, atol=1e-17)
assert_equal(raw_py.info['nchan'], raw_c.info['nchan'])
for ch_py, ch_c in zip(raw_py.info['chs'], raw_c.info['chs']):
for key in ('ch_name', 'coil_type', 'scanno', 'logno', 'unit',
'coord_frame', 'kind'):
assert_equal(ch_py[key], ch_c[key])
for key in ('loc', 'unit_mul', 'range', 'cal'):
assert_allclose(ch_py[key], ch_c[key])
@testing.requires_testing_data
def test_drop_channels_mixin():
"""Test channels-dropping functionality."""
raw = read_raw_fif(fif_fname, preload=True, add_eeg_ref=False)
drop_ch = raw.ch_names[:3]
ch_names = raw.ch_names[3:]
ch_names_orig = raw.ch_names
dummy = raw.copy().drop_channels(drop_ch)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, raw.ch_names)
assert_equal(len(ch_names_orig), raw._data.shape[0])
raw.drop_channels(drop_ch)
assert_equal(ch_names, raw.ch_names)
assert_equal(len(ch_names), len(raw._cals))
assert_equal(len(ch_names), raw._data.shape[0])
@testing.requires_testing_data
def test_pick_channels_mixin():
"""Test channel-picking functionality."""
# preload is True
raw = read_raw_fif(fif_fname, preload=True, add_eeg_ref=False)
ch_names = raw.ch_names[:3]
ch_names_orig = raw.ch_names
dummy = raw.copy().pick_channels(ch_names)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, raw.ch_names)
assert_equal(len(ch_names_orig), raw._data.shape[0])
raw.pick_channels(ch_names) # copy is False
assert_equal(ch_names, raw.ch_names)
assert_equal(len(ch_names), len(raw._cals))
assert_equal(len(ch_names), raw._data.shape[0])
assert_raises(ValueError, raw.pick_channels, ch_names[0])
raw = read_raw_fif(fif_fname, preload=False, add_eeg_ref=False)
assert_raises(RuntimeError, raw.pick_channels, ch_names)
assert_raises(RuntimeError, raw.drop_channels, ch_names)
@testing.requires_testing_data
def test_equalize_channels():
"""Test equalization of channels."""
raw1 = read_raw_fif(fif_fname, preload=True, add_eeg_ref=False)
raw2 = raw1.copy()
ch_names = raw1.ch_names[2:]
raw1.drop_channels(raw1.ch_names[:1])
raw2.drop_channels(raw2.ch_names[1:2])
my_comparison = [raw1, raw2]
equalize_channels(my_comparison)
for e in my_comparison:
assert_equal(ch_names, e.ch_names)
run_tests_if_main()
| {
"repo_name": "jmontoyam/mne-python",
"path": "mne/io/fiff/tests/test_raw_fiff.py",
"copies": "2",
"size": "61567",
"license": "bsd-3-clause",
"hash": -8099600252995738000,
"line_mean": 41.0827067669,
"line_max": 79,
"alpha_frac": 0.6010525119,
"autogenerated": false,
"ratio": 3.1006748589846898,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47017273708846896,
"avg_score": null,
"num_lines": null
} |
from copy import deepcopy
from functools import partial
import itertools as itt
import os
import os.path as op
import warnings
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose, assert_equal)
from nose.tools import assert_true, assert_raises, assert_not_equal
from mne.datasets import testing
from mne.io.constants import FIFF
from mne.io import RawArray, concatenate_raws, read_raw_fif
from mne.io.tests.test_raw import _test_concat, _test_raw_reader
from mne import (concatenate_events, find_events, equalize_channels,
compute_proj_raw, pick_types, pick_channels, create_info)
from mne.utils import (_TempDir, requires_pandas, slow_test, object_diff,
requires_mne, run_subprocess, run_tests_if_main)
from mne.externals.six.moves import zip, cPickle as pickle
from mne.io.proc_history import _get_sss_rank
from mne.io.pick import _picks_by_type
from mne.annotations import Annotations
from mne.tests.common import assert_naming
warnings.simplefilter('always') # enable b/c these tests throw warnings
testing_path = testing.data_path(download=False)
data_dir = op.join(testing_path, 'MEG', 'sample')
fif_fname = op.join(data_dir, 'sample_audvis_trunc_raw.fif')
ms_fname = op.join(testing_path, 'SSS', 'test_move_anon_raw.fif')
skip_fname = op.join(testing_path, 'misc', 'intervalrecording_raw.fif')
base_dir = op.join(op.dirname(__file__), '..', '..', 'tests', 'data')
test_fif_fname = op.join(base_dir, 'test_raw.fif')
test_fif_gz_fname = op.join(base_dir, 'test_raw.fif.gz')
ctf_fname = op.join(base_dir, 'test_ctf_raw.fif')
ctf_comp_fname = op.join(base_dir, 'test_ctf_comp_raw.fif')
fif_bad_marked_fname = op.join(base_dir, 'test_withbads_raw.fif')
bad_file_works = op.join(base_dir, 'test_bads.txt')
bad_file_wrong = op.join(base_dir, 'test_wrong_bads.txt')
hp_fname = op.join(base_dir, 'test_chpi_raw_hp.txt')
hp_fif_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')
@testing.requires_testing_data
def test_acq_skip():
"""Test treatment of acquisition skips."""
raw = read_raw_fif(skip_fname)
assert_equal(len(raw.times), 17000)
assert_equal(len(raw.annotations), 3) # there are 3 skips
def test_fix_types():
"""Test fixing of channel types."""
for fname, change in ((hp_fif_fname, True), (test_fif_fname, False),
(ctf_fname, False)):
raw = read_raw_fif(fname)
mag_picks = pick_types(raw.info, meg='mag')
other_picks = np.setdiff1d(np.arange(len(raw.ch_names)), mag_picks)
# we don't actually have any files suffering from this problem, so
# fake it
if change:
for ii in mag_picks:
raw.info['chs'][ii]['coil_type'] = FIFF.FIFFV_COIL_VV_MAG_T2
orig_types = np.array([ch['coil_type'] for ch in raw.info['chs']])
raw.fix_mag_coil_types()
new_types = np.array([ch['coil_type'] for ch in raw.info['chs']])
if not change:
assert_array_equal(orig_types, new_types)
else:
assert_array_equal(orig_types[other_picks], new_types[other_picks])
assert_true((orig_types[mag_picks] != new_types[mag_picks]).all())
assert_true((new_types[mag_picks] ==
FIFF.FIFFV_COIL_VV_MAG_T3).all())
def test_concat():
"""Test RawFIF concatenation."""
# we trim the file to save lots of memory and some time
tempdir = _TempDir()
raw = read_raw_fif(test_fif_fname)
raw.crop(0, 2.)
test_name = op.join(tempdir, 'test_raw.fif')
raw.save(test_name)
# now run the standard test
_test_concat(partial(read_raw_fif), test_name)
@testing.requires_testing_data
def test_hash_raw():
"""Test hashing raw objects."""
raw = read_raw_fif(fif_fname)
assert_raises(RuntimeError, raw.__hash__)
raw = read_raw_fif(fif_fname).crop(0, 0.5)
raw_size = raw._size
raw.load_data()
raw_load_size = raw._size
assert_true(raw_size < raw_load_size)
raw_2 = read_raw_fif(fif_fname).crop(0, 0.5)
raw_2.load_data()
assert_equal(hash(raw), hash(raw_2))
# do NOT use assert_equal here, failing output is terrible
assert_equal(pickle.dumps(raw), pickle.dumps(raw_2))
raw_2._data[0, 0] -= 1
assert_not_equal(hash(raw), hash(raw_2))
@testing.requires_testing_data
def test_maxshield():
"""Test maxshield warning."""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
read_raw_fif(ms_fname, allow_maxshield=True)
assert_equal(len(w), 1)
assert_true('test_raw_fiff.py' in w[0].filename)
@testing.requires_testing_data
def test_subject_info():
"""Test reading subject information."""
tempdir = _TempDir()
raw = read_raw_fif(fif_fname).crop(0, 1)
assert_true(raw.info['subject_info'] is None)
# fake some subject data
keys = ['id', 'his_id', 'last_name', 'first_name', 'birthday', 'sex',
'hand']
vals = [1, 'foobar', 'bar', 'foo', (1901, 2, 3), 0, 1]
subject_info = dict()
for key, val in zip(keys, vals):
subject_info[key] = val
raw.info['subject_info'] = subject_info
out_fname = op.join(tempdir, 'test_subj_info_raw.fif')
raw.save(out_fname, overwrite=True)
raw_read = read_raw_fif(out_fname)
for key in keys:
assert_equal(subject_info[key], raw_read.info['subject_info'][key])
assert_equal(raw.info['meas_date'], raw_read.info['meas_date'])
@testing.requires_testing_data
def test_copy_append():
"""Test raw copying and appending combinations."""
raw = read_raw_fif(fif_fname, preload=True).copy()
raw_full = read_raw_fif(fif_fname)
raw_full.append(raw)
data = raw_full[:, :][0]
assert_equal(data.shape[1], 2 * raw._data.shape[1])
@slow_test
@testing.requires_testing_data
def test_rank_estimation():
"""Test raw rank estimation."""
iter_tests = itt.product(
[fif_fname, hp_fif_fname], # sss
['norm', dict(mag=1e11, grad=1e9, eeg=1e5)]
)
for fname, scalings in iter_tests:
raw = read_raw_fif(fname).crop(0, 4.).load_data()
(_, picks_meg), (_, picks_eeg) = _picks_by_type(raw.info,
meg_combined=True)
n_meg = len(picks_meg)
n_eeg = len(picks_eeg)
if len(raw.info['proc_history']) == 0:
expected_rank = n_meg + n_eeg
else:
mf = raw.info['proc_history'][0]['max_info']
expected_rank = _get_sss_rank(mf) + n_eeg
assert_array_equal(raw.estimate_rank(scalings=scalings), expected_rank)
assert_array_equal(raw.estimate_rank(picks=picks_eeg,
scalings=scalings), n_eeg)
if 'sss' in fname:
raw.add_proj(compute_proj_raw(raw))
raw.apply_proj()
n_proj = len(raw.info['projs'])
assert_array_equal(raw.estimate_rank(tstart=0, tstop=3.,
scalings=scalings),
expected_rank - (1 if 'sss' in fname else n_proj))
@testing.requires_testing_data
def test_output_formats():
"""Test saving and loading raw data using multiple formats."""
tempdir = _TempDir()
formats = ['short', 'int', 'single', 'double']
tols = [1e-4, 1e-7, 1e-7, 1e-15]
# let's fake a raw file with different formats
raw = read_raw_fif(test_fif_fname).crop(0, 1)
temp_file = op.join(tempdir, 'raw.fif')
for ii, (fmt, tol) in enumerate(zip(formats, tols)):
# Let's test the overwriting error throwing while we're at it
if ii > 0:
assert_raises(IOError, raw.save, temp_file, fmt=fmt)
raw.save(temp_file, fmt=fmt, overwrite=True)
raw2 = read_raw_fif(temp_file)
raw2_data = raw2[:, :][0]
assert_allclose(raw2_data, raw[:, :][0], rtol=tol, atol=1e-25)
assert_equal(raw2.orig_format, fmt)
def _compare_combo(raw, new, times, n_times):
"""Compare data."""
for ti in times: # let's do a subset of points for speed
orig = raw[:, ti % n_times][0]
# these are almost_equals because of possible dtype differences
assert_allclose(orig, new[:, ti][0])
@slow_test
@testing.requires_testing_data
def test_multiple_files():
"""Test loading multiple files simultaneously."""
# split file
tempdir = _TempDir()
raw = read_raw_fif(fif_fname).crop(0, 10)
raw.load_data()
raw.load_data() # test no operation
split_size = 3. # in seconds
sfreq = raw.info['sfreq']
nsamp = (raw.last_samp - raw.first_samp)
tmins = np.round(np.arange(0., nsamp, split_size * sfreq))
tmaxs = np.concatenate((tmins[1:] - 1, [nsamp]))
tmaxs /= sfreq
tmins /= sfreq
assert_equal(raw.n_times, len(raw.times))
# going in reverse order so the last fname is the first file (need later)
raws = [None] * len(tmins)
for ri in range(len(tmins) - 1, -1, -1):
fname = op.join(tempdir, 'test_raw_split-%d_raw.fif' % ri)
raw.save(fname, tmin=tmins[ri], tmax=tmaxs[ri])
raws[ri] = read_raw_fif(fname)
assert_equal(len(raws[ri].times),
int(round((tmaxs[ri] - tmins[ri]) *
raw.info['sfreq'])) + 1) # + 1 b/c inclusive
events = [find_events(r, stim_channel='STI 014') for r in raws]
last_samps = [r.last_samp for r in raws]
first_samps = [r.first_samp for r in raws]
# test concatenation of split file
assert_raises(ValueError, concatenate_raws, raws, True, events[1:])
all_raw_1, events1 = concatenate_raws(raws, preload=False,
events_list=events)
assert_allclose(all_raw_1.times, raw.times)
assert_equal(raw.first_samp, all_raw_1.first_samp)
assert_equal(raw.last_samp, all_raw_1.last_samp)
assert_allclose(raw[:, :][0], all_raw_1[:, :][0])
raws[0] = read_raw_fif(fname)
all_raw_2 = concatenate_raws(raws, preload=True)
assert_allclose(raw[:, :][0], all_raw_2[:, :][0])
# test proper event treatment for split files
events2 = concatenate_events(events, first_samps, last_samps)
events3 = find_events(all_raw_2, stim_channel='STI 014')
assert_array_equal(events1, events2)
assert_array_equal(events1, events3)
# test various methods of combining files
raw = read_raw_fif(fif_fname, preload=True)
n_times = raw.n_times
# make sure that all our data match
times = list(range(0, 2 * n_times, 999))
# add potentially problematic points
times.extend([n_times - 1, n_times, 2 * n_times - 1])
raw_combo0 = concatenate_raws([read_raw_fif(f)
for f in [fif_fname, fif_fname]],
preload=True)
_compare_combo(raw, raw_combo0, times, n_times)
raw_combo = concatenate_raws([read_raw_fif(f)
for f in [fif_fname, fif_fname]],
preload=False)
_compare_combo(raw, raw_combo, times, n_times)
raw_combo = concatenate_raws([read_raw_fif(f)
for f in [fif_fname, fif_fname]],
preload='memmap8.dat')
_compare_combo(raw, raw_combo, times, n_times)
assert_equal(raw[:, :][0].shape[1] * 2, raw_combo0[:, :][0].shape[1])
assert_equal(raw_combo0[:, :][0].shape[1], raw_combo0.n_times)
# with all data preloaded, result should be preloaded
raw_combo = read_raw_fif(fif_fname, preload=True)
raw_combo.append(read_raw_fif(fif_fname, preload=True))
assert_true(raw_combo.preload is True)
assert_equal(raw_combo.n_times, raw_combo._data.shape[1])
_compare_combo(raw, raw_combo, times, n_times)
# with any data not preloaded, don't set result as preloaded
raw_combo = concatenate_raws([read_raw_fif(fif_fname, preload=True),
read_raw_fif(fif_fname, preload=False)])
assert_true(raw_combo.preload is False)
assert_array_equal(find_events(raw_combo, stim_channel='STI 014'),
find_events(raw_combo0, stim_channel='STI 014'))
_compare_combo(raw, raw_combo, times, n_times)
# user should be able to force data to be preloaded upon concat
raw_combo = concatenate_raws([read_raw_fif(fif_fname, preload=False),
read_raw_fif(fif_fname, preload=True)],
preload=True)
assert_true(raw_combo.preload is True)
_compare_combo(raw, raw_combo, times, n_times)
raw_combo = concatenate_raws([read_raw_fif(fif_fname, preload=False),
read_raw_fif(fif_fname, preload=True)],
preload='memmap3.dat')
_compare_combo(raw, raw_combo, times, n_times)
raw_combo = concatenate_raws([
read_raw_fif(fif_fname, preload=True),
read_raw_fif(fif_fname, preload=True)], preload='memmap4.dat')
_compare_combo(raw, raw_combo, times, n_times)
raw_combo = concatenate_raws([
read_raw_fif(fif_fname, preload=False),
read_raw_fif(fif_fname, preload=False)], preload='memmap5.dat')
_compare_combo(raw, raw_combo, times, n_times)
# verify that combining raws with different projectors throws an exception
raw.add_proj([], remove_existing=True)
assert_raises(ValueError, raw.append,
read_raw_fif(fif_fname, preload=True))
# now test event treatment for concatenated raw files
events = [find_events(raw, stim_channel='STI 014'),
find_events(raw, stim_channel='STI 014')]
last_samps = [raw.last_samp, raw.last_samp]
first_samps = [raw.first_samp, raw.first_samp]
events = concatenate_events(events, first_samps, last_samps)
events2 = find_events(raw_combo0, stim_channel='STI 014')
assert_array_equal(events, events2)
# check out the len method
assert_equal(len(raw), raw.n_times)
assert_equal(len(raw), raw.last_samp - raw.first_samp + 1)
@testing.requires_testing_data
def test_split_files():
"""Test writing and reading of split raw files."""
tempdir = _TempDir()
raw_1 = read_raw_fif(fif_fname, preload=True)
# Test a very close corner case
raw_crop = raw_1.copy().crop(0, 1.)
assert_allclose(raw_1.info['buffer_size_sec'], 10., atol=1e-2) # samp rate
split_fname = op.join(tempdir, 'split_raw.fif')
raw_1.annotations = Annotations([2.], [5.5], 'test')
raw_1.save(split_fname, buffer_size_sec=1.0, split_size='10MB')
raw_2 = read_raw_fif(split_fname)
assert_allclose(raw_2.info['buffer_size_sec'], 1., atol=1e-2) # samp rate
assert_array_equal(raw_1.annotations.onset, raw_2.annotations.onset)
assert_array_equal(raw_1.annotations.duration, raw_2.annotations.duration)
assert_array_equal(raw_1.annotations.description,
raw_2.annotations.description)
data_1, times_1 = raw_1[:, :]
data_2, times_2 = raw_2[:, :]
assert_array_equal(data_1, data_2)
assert_array_equal(times_1, times_2)
# test the case where we only end up with one buffer to write
# (GH#3210). These tests rely on writing meas info and annotations
# taking up a certain number of bytes, so if we change those functions
# somehow, the numbers below for e.g. split_size might need to be
# adjusted.
raw_crop = raw_1.copy().crop(0, 5)
try:
raw_crop.save(split_fname, split_size='1MB', # too small a size
buffer_size_sec=1., overwrite=True)
except ValueError as exp:
assert_true('after writing measurement information' in str(exp), exp)
try:
raw_crop.save(split_fname,
split_size=3002276, # still too small, now after Info
buffer_size_sec=1., overwrite=True)
except ValueError as exp:
assert_true('too large for the given split size' in str(exp), exp)
# just barely big enough here; the right size to write exactly one buffer
# at a time so we hit GH#3210 if we aren't careful
raw_crop.save(split_fname, split_size='4.5MB',
buffer_size_sec=1., overwrite=True)
raw_read = read_raw_fif(split_fname)
assert_allclose(raw_crop[:][0], raw_read[:][0], atol=1e-20)
# Check our buffer arithmetic
# 1 buffer required
raw_crop = raw_1.copy().crop(0, 1)
raw_crop.save(split_fname, buffer_size_sec=1., overwrite=True)
raw_read = read_raw_fif(split_fname)
assert_equal(len(raw_read._raw_extras[0]), 1)
assert_equal(raw_read._raw_extras[0][0]['nsamp'], 301)
assert_allclose(raw_crop[:][0], raw_read[:][0])
# 2 buffers required
raw_crop.save(split_fname, buffer_size_sec=0.5, overwrite=True)
raw_read = read_raw_fif(split_fname)
assert_equal(len(raw_read._raw_extras[0]), 2)
assert_equal(raw_read._raw_extras[0][0]['nsamp'], 151)
assert_equal(raw_read._raw_extras[0][1]['nsamp'], 150)
assert_allclose(raw_crop[:][0], raw_read[:][0])
# 2 buffers required
raw_crop.save(split_fname,
buffer_size_sec=1. - 1.01 / raw_crop.info['sfreq'],
overwrite=True)
raw_read = read_raw_fif(split_fname)
assert_equal(len(raw_read._raw_extras[0]), 2)
assert_equal(raw_read._raw_extras[0][0]['nsamp'], 300)
assert_equal(raw_read._raw_extras[0][1]['nsamp'], 1)
assert_allclose(raw_crop[:][0], raw_read[:][0])
raw_crop.save(split_fname,
buffer_size_sec=1. - 2.01 / raw_crop.info['sfreq'],
overwrite=True)
raw_read = read_raw_fif(split_fname)
assert_equal(len(raw_read._raw_extras[0]), 2)
assert_equal(raw_read._raw_extras[0][0]['nsamp'], 299)
assert_equal(raw_read._raw_extras[0][1]['nsamp'], 2)
assert_allclose(raw_crop[:][0], raw_read[:][0])
def test_load_bad_channels():
"""Test reading/writing of bad channels."""
tempdir = _TempDir()
# Load correctly marked file (manually done in mne_process_raw)
raw_marked = read_raw_fif(fif_bad_marked_fname)
correct_bads = raw_marked.info['bads']
raw = read_raw_fif(test_fif_fname)
# Make sure it starts clean
assert_array_equal(raw.info['bads'], [])
# Test normal case
raw.load_bad_channels(bad_file_works)
# Write it out, read it in, and check
raw.save(op.join(tempdir, 'foo_raw.fif'))
raw_new = read_raw_fif(op.join(tempdir, 'foo_raw.fif'))
assert_equal(correct_bads, raw_new.info['bads'])
# Reset it
raw.info['bads'] = []
# Test bad case
assert_raises(ValueError, raw.load_bad_channels, bad_file_wrong)
# Test forcing the bad case
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
raw.load_bad_channels(bad_file_wrong, force=True)
n_found = sum(['1 bad channel' in str(ww.message) for ww in w])
assert_equal(n_found, 1) # there could be other irrelevant errors
# write it out, read it in, and check
raw.save(op.join(tempdir, 'foo_raw.fif'), overwrite=True)
raw_new = read_raw_fif(op.join(tempdir, 'foo_raw.fif'))
assert_equal(correct_bads, raw_new.info['bads'])
# Check that bad channels are cleared
raw.load_bad_channels(None)
raw.save(op.join(tempdir, 'foo_raw.fif'), overwrite=True)
raw_new = read_raw_fif(op.join(tempdir, 'foo_raw.fif'))
assert_equal([], raw_new.info['bads'])
@slow_test
@testing.requires_testing_data
def test_io_raw():
"""Test IO for raw data (Neuromag + CTF + gz)."""
rng = np.random.RandomState(0)
tempdir = _TempDir()
# test unicode io
for chars in [b'\xc3\xa4\xc3\xb6\xc3\xa9', b'a']:
with read_raw_fif(fif_fname) as r:
assert_true('Raw' in repr(r))
assert_true(op.basename(fif_fname) in repr(r))
desc1 = r.info['description'] = chars.decode('utf-8')
temp_file = op.join(tempdir, 'raw.fif')
r.save(temp_file, overwrite=True)
with read_raw_fif(temp_file) as r2:
desc2 = r2.info['description']
assert_equal(desc1, desc2)
# Let's construct a simple test for IO first
raw = read_raw_fif(fif_fname).crop(0, 3.5)
raw.load_data()
# put in some data that we know the values of
data = rng.randn(raw._data.shape[0], raw._data.shape[1])
raw._data[:, :] = data
# save it somewhere
fname = op.join(tempdir, 'test_copy_raw.fif')
raw.save(fname, buffer_size_sec=1.0)
# read it in, make sure the whole thing matches
raw = read_raw_fif(fname)
assert_allclose(data, raw[:, :][0], rtol=1e-6, atol=1e-20)
# let's read portions across the 1-sec tag boundary, too
inds = raw.time_as_index([1.75, 2.25])
sl = slice(inds[0], inds[1])
assert_allclose(data[:, sl], raw[:, sl][0], rtol=1e-6, atol=1e-20)
# now let's do some real I/O
fnames_in = [fif_fname, test_fif_gz_fname, ctf_fname]
fnames_out = ['raw.fif', 'raw.fif.gz', 'raw.fif']
for fname_in, fname_out in zip(fnames_in, fnames_out):
fname_out = op.join(tempdir, fname_out)
raw = read_raw_fif(fname_in)
nchan = raw.info['nchan']
ch_names = raw.info['ch_names']
meg_channels_idx = [k for k in range(nchan)
if ch_names[k][0] == 'M']
n_channels = 100
meg_channels_idx = meg_channels_idx[:n_channels]
start, stop = raw.time_as_index([0, 5], use_rounding=True)
data, times = raw[meg_channels_idx, start:(stop + 1)]
meg_ch_names = [ch_names[k] for k in meg_channels_idx]
# Set up pick list: MEG + STI 014 - bad channels
include = ['STI 014']
include += meg_ch_names
picks = pick_types(raw.info, meg=True, eeg=False, stim=True,
misc=True, ref_meg=True, include=include,
exclude='bads')
# Writing with drop_small_buffer True
raw.save(fname_out, picks, tmin=0, tmax=4, buffer_size_sec=3,
drop_small_buffer=True, overwrite=True)
raw2 = read_raw_fif(fname_out)
sel = pick_channels(raw2.ch_names, meg_ch_names)
data2, times2 = raw2[sel, :]
assert_true(times2.max() <= 3)
# Writing
raw.save(fname_out, picks, tmin=0, tmax=5, overwrite=True)
if fname_in == fif_fname or fname_in == fif_fname + '.gz':
assert_equal(len(raw.info['dig']), 146)
raw2 = read_raw_fif(fname_out)
sel = pick_channels(raw2.ch_names, meg_ch_names)
data2, times2 = raw2[sel, :]
assert_allclose(data, data2, rtol=1e-6, atol=1e-20)
assert_allclose(times, times2)
assert_allclose(raw.info['sfreq'], raw2.info['sfreq'], rtol=1e-5)
# check transformations
for trans in ['dev_head_t', 'dev_ctf_t', 'ctf_head_t']:
if raw.info[trans] is None:
assert_true(raw2.info[trans] is None)
else:
assert_array_equal(raw.info[trans]['trans'],
raw2.info[trans]['trans'])
# check transformation 'from' and 'to'
if trans.startswith('dev'):
from_id = FIFF.FIFFV_COORD_DEVICE
else:
from_id = FIFF.FIFFV_MNE_COORD_CTF_HEAD
if trans[4:8] == 'head':
to_id = FIFF.FIFFV_COORD_HEAD
else:
to_id = FIFF.FIFFV_MNE_COORD_CTF_HEAD
for raw_ in [raw, raw2]:
assert_equal(raw_.info[trans]['from'], from_id)
assert_equal(raw_.info[trans]['to'], to_id)
if fname_in == fif_fname or fname_in == fif_fname + '.gz':
assert_allclose(raw.info['dig'][0]['r'], raw2.info['dig'][0]['r'])
# test warnings on bad filenames
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
raw_badname = op.join(tempdir, 'test-bad-name.fif.gz')
raw.save(raw_badname)
read_raw_fif(raw_badname)
assert_naming(w, 'test_raw_fiff.py', 2)
@testing.requires_testing_data
def test_io_complex():
"""Test IO with complex data types."""
rng = np.random.RandomState(0)
tempdir = _TempDir()
dtypes = [np.complex64, np.complex128]
raw = _test_raw_reader(partial(read_raw_fif),
fname=fif_fname)
picks = np.arange(5)
start, stop = raw.time_as_index([0, 5])
data_orig, _ = raw[picks, start:stop]
for di, dtype in enumerate(dtypes):
imag_rand = np.array(1j * rng.randn(data_orig.shape[0],
data_orig.shape[1]), dtype)
raw_cp = raw.copy()
raw_cp._data = np.array(raw_cp._data, dtype)
raw_cp._data[picks, start:stop] += imag_rand
# this should throw an error because it's complex
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
raw_cp.save(op.join(tempdir, 'raw.fif'), picks, tmin=0, tmax=5,
overwrite=True)
# warning gets thrown on every instance b/c simplifilter('always')
assert_equal(len(w), 1)
raw2 = read_raw_fif(op.join(tempdir, 'raw.fif'))
raw2_data, _ = raw2[picks, :]
n_samp = raw2_data.shape[1]
assert_allclose(raw2_data[:, :n_samp], raw_cp._data[picks, :n_samp])
# with preloading
raw2 = read_raw_fif(op.join(tempdir, 'raw.fif'), preload=True)
raw2_data, _ = raw2[picks, :]
n_samp = raw2_data.shape[1]
assert_allclose(raw2_data[:, :n_samp], raw_cp._data[picks, :n_samp])
@testing.requires_testing_data
def test_getitem():
"""Test getitem/indexing of Raw."""
for preload in [False, True, 'memmap.dat']:
raw = read_raw_fif(fif_fname, preload=preload)
data, times = raw[0, :]
data1, times1 = raw[0]
assert_array_equal(data, data1)
assert_array_equal(times, times1)
data, times = raw[0:2, :]
data1, times1 = raw[0:2]
assert_array_equal(data, data1)
assert_array_equal(times, times1)
data1, times1 = raw[[0, 1]]
assert_array_equal(data, data1)
assert_array_equal(times, times1)
assert_array_equal(raw[-10:, :][0],
raw[len(raw.ch_names) - 10:, :][0])
assert_raises(ValueError, raw.__getitem__,
(slice(-len(raw.ch_names) - 1), slice(None)))
@testing.requires_testing_data
def test_proj():
"""Test SSP proj operations."""
tempdir = _TempDir()
for proj in [True, False]:
raw = read_raw_fif(fif_fname, preload=False)
if proj:
raw.apply_proj()
assert_true(all(p['active'] == proj for p in raw.info['projs']))
data, times = raw[0:2, :]
data1, times1 = raw[0:2]
assert_array_equal(data, data1)
assert_array_equal(times, times1)
# test adding / deleting proj
if proj:
assert_raises(ValueError, raw.add_proj, [],
{'remove_existing': True})
assert_raises(ValueError, raw.del_proj, 0)
else:
projs = deepcopy(raw.info['projs'])
n_proj = len(raw.info['projs'])
raw.del_proj(0)
assert_equal(len(raw.info['projs']), n_proj - 1)
raw.add_proj(projs, remove_existing=False)
# Test that already existing projections are not added.
assert_equal(len(raw.info['projs']), n_proj)
raw.add_proj(projs[:-1], remove_existing=True)
assert_equal(len(raw.info['projs']), n_proj - 1)
# test apply_proj() with and without preload
for preload in [True, False]:
raw = read_raw_fif(fif_fname, preload=preload)
data, times = raw[:, 0:2]
raw.apply_proj()
data_proj_1 = np.dot(raw._projector, data)
# load the file again without proj
raw = read_raw_fif(fif_fname, preload=preload)
# write the file with proj. activated, make sure proj has been applied
raw.save(op.join(tempdir, 'raw.fif'), proj=True, overwrite=True)
raw2 = read_raw_fif(op.join(tempdir, 'raw.fif'))
data_proj_2, _ = raw2[:, 0:2]
assert_allclose(data_proj_1, data_proj_2)
assert_true(all(p['active'] for p in raw2.info['projs']))
# read orig file with proj. active
raw2 = read_raw_fif(fif_fname, preload=preload)
raw2.apply_proj()
data_proj_2, _ = raw2[:, 0:2]
assert_allclose(data_proj_1, data_proj_2)
assert_true(all(p['active'] for p in raw2.info['projs']))
# test that apply_proj works
raw.apply_proj()
data_proj_2, _ = raw[:, 0:2]
assert_allclose(data_proj_1, data_proj_2)
assert_allclose(data_proj_2, np.dot(raw._projector, data_proj_2))
tempdir = _TempDir()
out_fname = op.join(tempdir, 'test_raw.fif')
raw = read_raw_fif(test_fif_fname, preload=True).crop(0, 0.002)
raw.pick_types(meg=False, eeg=True)
raw.info['projs'] = [raw.info['projs'][-1]]
raw._data.fill(0)
raw._data[-1] = 1.
raw.save(out_fname)
raw = read_raw_fif(out_fname, preload=False)
raw.apply_proj()
assert_allclose(raw[:, :][0][:1], raw[0, :][0])
@testing.requires_testing_data
def test_preload_modify():
"""Test preloading and modifying data."""
tempdir = _TempDir()
rng = np.random.RandomState(0)
for preload in [False, True, 'memmap.dat']:
raw = read_raw_fif(fif_fname, preload=preload)
nsamp = raw.last_samp - raw.first_samp + 1
picks = pick_types(raw.info, meg='grad', exclude='bads')
data = rng.randn(len(picks), nsamp // 2)
try:
raw[picks, :nsamp // 2] = data
except RuntimeError:
if not preload:
continue
else:
raise
tmp_fname = op.join(tempdir, 'raw.fif')
raw.save(tmp_fname, overwrite=True)
raw_new = read_raw_fif(tmp_fname)
data_new, _ = raw_new[picks, :nsamp // 2]
assert_allclose(data, data_new)
@slow_test
@testing.requires_testing_data
def test_filter():
"""Test filtering (FIR and IIR) and Raw.apply_function interface."""
raw = read_raw_fif(fif_fname).crop(0, 7)
raw.load_data()
sig_dec_notch = 12
sig_dec_notch_fit = 12
picks_meg = pick_types(raw.info, meg=True, exclude='bads')
picks = picks_meg[:4]
trans = 2.0
filter_params = dict(picks=picks, filter_length='auto',
h_trans_bandwidth=trans, l_trans_bandwidth=trans,
fir_design='firwin')
raw_lp = raw.copy().filter(None, 8.0, **filter_params)
raw_hp = raw.copy().filter(16.0, None, **filter_params)
raw_bp = raw.copy().filter(8.0 + trans, 16.0 - trans, **filter_params)
raw_bs = raw.copy().filter(16.0, 8.0, **filter_params)
data, _ = raw[picks, :]
lp_data, _ = raw_lp[picks, :]
hp_data, _ = raw_hp[picks, :]
bp_data, _ = raw_bp[picks, :]
bs_data, _ = raw_bs[picks, :]
tols = dict(atol=1e-20, rtol=1e-5)
assert_allclose(bs_data, lp_data + hp_data, **tols)
assert_allclose(data, lp_data + bp_data + hp_data, **tols)
assert_allclose(data, bp_data + bs_data, **tols)
filter_params_iir = dict(picks=picks, n_jobs=2, method='iir',
iir_params=dict(output='ba'))
raw_lp_iir = raw.copy().filter(None, 4.0, **filter_params_iir)
raw_hp_iir = raw.copy().filter(8.0, None, **filter_params_iir)
raw_bp_iir = raw.copy().filter(4.0, 8.0, **filter_params_iir)
del filter_params_iir
lp_data_iir, _ = raw_lp_iir[picks, :]
hp_data_iir, _ = raw_hp_iir[picks, :]
bp_data_iir, _ = raw_bp_iir[picks, :]
summation = lp_data_iir + hp_data_iir + bp_data_iir
assert_array_almost_equal(data[:, 100:-100], summation[:, 100:-100], 11)
# make sure we didn't touch other channels
data, _ = raw[picks_meg[4:], :]
bp_data, _ = raw_bp[picks_meg[4:], :]
assert_array_equal(data, bp_data)
bp_data_iir, _ = raw_bp_iir[picks_meg[4:], :]
assert_array_equal(data, bp_data_iir)
# ... and that inplace changes are inplace
raw_copy = raw.copy()
raw_copy.filter(None, 20., n_jobs=2, **filter_params)
assert_true(raw._data[0, 0] != raw_copy._data[0, 0])
assert_equal(raw.copy().filter(None, 20., **filter_params)._data,
raw_copy._data)
# do a very simple check on line filtering
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
raw_bs = raw.copy().filter(60.0 + trans, 60.0 - trans, **filter_params)
data_bs, _ = raw_bs[picks, :]
raw_notch = raw.copy().notch_filter(
60.0, picks=picks, n_jobs=2, method='fir',
trans_bandwidth=2 * trans)
data_notch, _ = raw_notch[picks, :]
assert_array_almost_equal(data_bs, data_notch, sig_dec_notch)
# now use the sinusoidal fitting
raw_notch = raw.copy().notch_filter(
None, picks=picks, n_jobs=2, method='spectrum_fit')
data_notch, _ = raw_notch[picks, :]
data, _ = raw[picks, :]
assert_array_almost_equal(data, data_notch, sig_dec_notch_fit)
# filter should set the "lowpass" and "highpass" parameters
raw = RawArray(np.random.randn(3, 1000),
create_info(3, 1000., ['eeg'] * 2 + ['stim']))
raw.info['lowpass'] = raw.info['highpass'] = None
for kind in ('none', 'lowpass', 'highpass', 'bandpass', 'bandstop'):
print(kind)
h_freq = l_freq = None
if kind in ('lowpass', 'bandpass'):
h_freq = 70
if kind in ('highpass', 'bandpass'):
l_freq = 30
if kind == 'bandstop':
l_freq, h_freq = 70, 30
assert_true(raw.info['lowpass'] is None)
assert_true(raw.info['highpass'] is None)
kwargs = dict(l_trans_bandwidth=20, h_trans_bandwidth=20,
filter_length='auto', phase='zero', fir_design='firwin')
raw_filt = raw.copy().filter(l_freq, h_freq, picks=np.arange(1),
**kwargs)
assert_true(raw.info['lowpass'] is None)
assert_true(raw.info['highpass'] is None)
raw_filt = raw.copy().filter(l_freq, h_freq, **kwargs)
wanted_h = h_freq if kind != 'bandstop' else None
wanted_l = l_freq if kind != 'bandstop' else None
assert_equal(raw_filt.info['lowpass'], wanted_h)
assert_equal(raw_filt.info['highpass'], wanted_l)
# Using all data channels should still set the params (GH#3259)
raw_filt = raw.copy().filter(l_freq, h_freq, picks=np.arange(2),
**kwargs)
assert_equal(raw_filt.info['lowpass'], wanted_h)
assert_equal(raw_filt.info['highpass'], wanted_l)
def test_filter_picks():
"""Test filtering default channel picks."""
ch_types = ['mag', 'grad', 'eeg', 'seeg', 'misc', 'stim', 'ecog', 'hbo',
'hbr']
info = create_info(ch_names=ch_types, ch_types=ch_types, sfreq=256)
raw = RawArray(data=np.zeros((len(ch_types), 1000)), info=info)
# -- Deal with meg mag grad and fnirs exceptions
ch_types = ('misc', 'stim', 'meg', 'eeg', 'seeg', 'ecog')
# -- Filter data channels
for ch_type in ('mag', 'grad', 'eeg', 'seeg', 'ecog', 'hbo', 'hbr'):
picks = dict((ch, ch == ch_type) for ch in ch_types)
picks['meg'] = ch_type if ch_type in ('mag', 'grad') else False
picks['fnirs'] = ch_type if ch_type in ('hbo', 'hbr') else False
raw_ = raw.copy().pick_types(**picks)
raw_.filter(10, 30, fir_design='firwin')
# -- Error if no data channel
for ch_type in ('misc', 'stim'):
picks = dict((ch, ch == ch_type) for ch in ch_types)
raw_ = raw.copy().pick_types(**picks)
assert_raises(RuntimeError, raw_.filter, 10, 30)
@testing.requires_testing_data
def test_crop():
"""Test cropping raw files."""
# split a concatenated file to test a difficult case
raw = concatenate_raws([read_raw_fif(f)
for f in [fif_fname, fif_fname]])
split_size = 10. # in seconds
sfreq = raw.info['sfreq']
nsamp = (raw.last_samp - raw.first_samp + 1)
# do an annoying case (off-by-one splitting)
tmins = np.r_[1., np.round(np.arange(0., nsamp - 1, split_size * sfreq))]
tmins = np.sort(tmins)
tmaxs = np.concatenate((tmins[1:] - 1, [nsamp - 1]))
tmaxs /= sfreq
tmins /= sfreq
raws = [None] * len(tmins)
for ri, (tmin, tmax) in enumerate(zip(tmins, tmaxs)):
raws[ri] = raw.copy().crop(tmin, tmax)
all_raw_2 = concatenate_raws(raws, preload=False)
assert_equal(raw.first_samp, all_raw_2.first_samp)
assert_equal(raw.last_samp, all_raw_2.last_samp)
assert_array_equal(raw[:, :][0], all_raw_2[:, :][0])
tmins = np.round(np.arange(0., nsamp - 1, split_size * sfreq))
tmaxs = np.concatenate((tmins[1:] - 1, [nsamp - 1]))
tmaxs /= sfreq
tmins /= sfreq
# going in revere order so the last fname is the first file (need it later)
raws = [None] * len(tmins)
for ri, (tmin, tmax) in enumerate(zip(tmins, tmaxs)):
raws[ri] = raw.copy().crop(tmin, tmax)
# test concatenation of split file
all_raw_1 = concatenate_raws(raws, preload=False)
all_raw_2 = raw.copy().crop(0, None)
for ar in [all_raw_1, all_raw_2]:
assert_equal(raw.first_samp, ar.first_samp)
assert_equal(raw.last_samp, ar.last_samp)
assert_array_equal(raw[:, :][0], ar[:, :][0])
# test shape consistency of cropped raw
data = np.zeros((1, 1002001))
info = create_info(1, 1000)
raw = RawArray(data, info)
for tmin in range(0, 1001, 100):
raw1 = raw.copy().crop(tmin=tmin, tmax=tmin + 2)
assert_equal(raw1[:][0].shape, (1, 2001))
@testing.requires_testing_data
def test_resample():
"""Test resample (with I/O and multiple files)."""
tempdir = _TempDir()
raw = read_raw_fif(fif_fname).crop(0, 3)
raw.load_data()
raw_resamp = raw.copy()
sfreq = raw.info['sfreq']
# test parallel on upsample
raw_resamp.resample(sfreq * 2, n_jobs=2, npad='auto')
assert_equal(raw_resamp.n_times, len(raw_resamp.times))
raw_resamp.save(op.join(tempdir, 'raw_resamp-raw.fif'))
raw_resamp = read_raw_fif(op.join(tempdir, 'raw_resamp-raw.fif'),
preload=True)
assert_equal(sfreq, raw_resamp.info['sfreq'] / 2)
assert_equal(raw.n_times, raw_resamp.n_times / 2)
assert_equal(raw_resamp._data.shape[1], raw_resamp.n_times)
assert_equal(raw._data.shape[0], raw_resamp._data.shape[0])
# test non-parallel on downsample
raw_resamp.resample(sfreq, n_jobs=1, npad='auto')
assert_equal(raw_resamp.info['sfreq'], sfreq)
assert_equal(raw._data.shape, raw_resamp._data.shape)
assert_equal(raw.first_samp, raw_resamp.first_samp)
assert_equal(raw.last_samp, raw.last_samp)
# upsampling then downsampling doubles resampling error, but this still
# works (hooray). Note that the stim channels had to be sub-sampled
# without filtering to be accurately preserved
# note we have to treat MEG and EEG+STIM channels differently (tols)
assert_allclose(raw._data[:306, 200:-200],
raw_resamp._data[:306, 200:-200],
rtol=1e-2, atol=1e-12)
assert_allclose(raw._data[306:, 200:-200],
raw_resamp._data[306:, 200:-200],
rtol=1e-2, atol=1e-7)
# now check multiple file support w/resampling, as order of operations
# (concat, resample) should not affect our data
raw1 = raw.copy()
raw2 = raw.copy()
raw3 = raw.copy()
raw4 = raw.copy()
raw1 = concatenate_raws([raw1, raw2])
raw1.resample(10., npad='auto')
raw3.resample(10., npad='auto')
raw4.resample(10., npad='auto')
raw3 = concatenate_raws([raw3, raw4])
assert_array_equal(raw1._data, raw3._data)
assert_array_equal(raw1._first_samps, raw3._first_samps)
assert_array_equal(raw1._last_samps, raw3._last_samps)
assert_array_equal(raw1._raw_lengths, raw3._raw_lengths)
assert_equal(raw1.first_samp, raw3.first_samp)
assert_equal(raw1.last_samp, raw3.last_samp)
assert_equal(raw1.info['sfreq'], raw3.info['sfreq'])
# test resampling of stim channel
# basic decimation
stim = [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
assert_allclose(raw.resample(8., npad='auto')._data,
[[1, 1, 0, 0, 1, 1, 0, 0]])
# decimation of multiple stim channels
raw = RawArray(2 * [stim], create_info(2, len(stim), 2 * ['stim']))
assert_allclose(raw.resample(8., npad='auto')._data,
[[1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0]])
# decimation that could potentially drop events if the decimation is
# done naively
stim = [0, 0, 0, 1, 1, 0, 0, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
assert_allclose(raw.resample(4., npad='auto')._data,
[[0, 1, 1, 0]])
# two events are merged in this case (warning)
stim = [0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
raw.resample(8., npad='auto')
assert_true(len(w) == 1)
# events are dropped in this case (warning)
stim = [0, 1, 1, 0, 0, 1, 1, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
raw.resample(4., npad='auto')
assert_true(len(w) == 1)
# test resampling events: this should no longer give a warning
stim = [0, 1, 1, 0, 0, 1, 1, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
events = find_events(raw)
raw, events = raw.resample(4., events=events, npad='auto')
assert_equal(events, np.array([[0, 0, 1], [2, 0, 1]]))
# test copy flag
stim = [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
raw_resampled = raw.copy().resample(4., npad='auto')
assert_true(raw_resampled is not raw)
raw_resampled = raw.resample(4., npad='auto')
assert_true(raw_resampled is raw)
# resample should still work even when no stim channel is present
raw = RawArray(np.random.randn(1, 100), create_info(1, 100, ['eeg']))
raw.info['lowpass'] = 50.
raw.resample(10, npad='auto')
assert_equal(raw.info['lowpass'], 5.)
assert_equal(len(raw), 10)
@testing.requires_testing_data
def test_hilbert():
"""Test computation of analytic signal using hilbert."""
raw = read_raw_fif(fif_fname, preload=True)
picks_meg = pick_types(raw.info, meg=True, exclude='bads')
picks = picks_meg[:4]
raw_filt = raw.copy()
raw_filt.filter(10, 20, picks=picks, l_trans_bandwidth='auto',
h_trans_bandwidth='auto', filter_length='auto',
phase='zero', fir_window='blackman', fir_design='firwin')
raw_filt_2 = raw_filt.copy()
raw2 = raw.copy()
raw3 = raw.copy()
raw.apply_hilbert(picks, n_fft='auto')
raw2.apply_hilbert(picks, n_fft='auto', envelope=True)
# Test custom n_fft
raw_filt.apply_hilbert(picks, n_fft='auto')
n_fft = 2 ** int(np.ceil(np.log2(raw_filt_2.n_times + 1000)))
raw_filt_2.apply_hilbert(picks, n_fft=n_fft)
assert_equal(raw_filt._data.shape, raw_filt_2._data.shape)
assert_allclose(raw_filt._data[:, 50:-50], raw_filt_2._data[:, 50:-50],
atol=1e-13, rtol=1e-2)
assert_raises(ValueError, raw3.apply_hilbert, picks,
n_fft=raw3.n_times - 100)
env = np.abs(raw._data[picks, :])
assert_allclose(env, raw2._data[picks, :], rtol=1e-2, atol=1e-13)
@testing.requires_testing_data
def test_raw_copy():
"""Test Raw copy."""
raw = read_raw_fif(fif_fname, preload=True)
data, _ = raw[:, :]
copied = raw.copy()
copied_data, _ = copied[:, :]
assert_array_equal(data, copied_data)
assert_equal(sorted(raw.__dict__.keys()),
sorted(copied.__dict__.keys()))
raw = read_raw_fif(fif_fname, preload=False)
data, _ = raw[:, :]
copied = raw.copy()
copied_data, _ = copied[:, :]
assert_array_equal(data, copied_data)
assert_equal(sorted(raw.__dict__.keys()),
sorted(copied.__dict__.keys()))
@requires_pandas
def test_to_data_frame():
"""Test raw Pandas exporter."""
raw = read_raw_fif(test_fif_fname, preload=True)
_, times = raw[0, :10]
df = raw.to_data_frame()
assert_true((df.columns == raw.ch_names).all())
assert_array_equal(np.round(times * 1e3), df.index.values[:10])
df = raw.to_data_frame(index=None)
assert_true('time' in df.index.names)
assert_array_equal(df.values[:, 0], raw._data[0] * 1e13)
assert_array_equal(df.values[:, 2], raw._data[2] * 1e15)
def test_add_channels():
"""Test raw splitting / re-appending channel types."""
rng = np.random.RandomState(0)
raw = read_raw_fif(test_fif_fname).crop(0, 1).load_data()
raw_nopre = read_raw_fif(test_fif_fname, preload=False)
raw_eeg_meg = raw.copy().pick_types(meg=True, eeg=True)
raw_eeg = raw.copy().pick_types(meg=False, eeg=True)
raw_meg = raw.copy().pick_types(meg=True, eeg=False)
raw_stim = raw.copy().pick_types(meg=False, eeg=False, stim=True)
raw_new = raw_meg.copy().add_channels([raw_eeg, raw_stim])
assert_true(
all(ch in raw_new.ch_names
for ch in list(raw_stim.ch_names) + list(raw_meg.ch_names))
)
raw_new = raw_meg.copy().add_channels([raw_eeg])
assert_true(ch in raw_new.ch_names for ch in raw.ch_names)
assert_array_equal(raw_new[:, :][0], raw_eeg_meg[:, :][0])
assert_array_equal(raw_new[:, :][1], raw[:, :][1])
assert_true(all(ch not in raw_new.ch_names for ch in raw_stim.ch_names))
# Testing force updates
raw_arr_info = create_info(['1', '2'], raw_meg.info['sfreq'], 'eeg')
orig_head_t = raw_arr_info['dev_head_t']
raw_arr = rng.randn(2, raw_eeg.n_times)
raw_arr = RawArray(raw_arr, raw_arr_info)
# This should error because of conflicts in Info
assert_raises(ValueError, raw_meg.copy().add_channels, [raw_arr])
raw_meg.copy().add_channels([raw_arr], force_update_info=True)
# Make sure that values didn't get overwritten
assert_equal(object_diff(raw_arr.info['dev_head_t'], orig_head_t), '')
# Now test errors
raw_badsf = raw_eeg.copy()
raw_badsf.info['sfreq'] = 3.1415927
raw_eeg.crop(.5)
assert_raises(AssertionError, raw_meg.add_channels, [raw_nopre])
assert_raises(RuntimeError, raw_meg.add_channels, [raw_badsf])
assert_raises(AssertionError, raw_meg.add_channels, [raw_eeg])
assert_raises(ValueError, raw_meg.add_channels, [raw_meg])
assert_raises(AssertionError, raw_meg.add_channels, raw_badsf)
@testing.requires_testing_data
def test_save():
"""Test saving raw."""
tempdir = _TempDir()
raw = read_raw_fif(fif_fname, preload=False)
# can't write over file being read
assert_raises(ValueError, raw.save, fif_fname)
raw = read_raw_fif(fif_fname, preload=True)
# can't overwrite file without overwrite=True
assert_raises(IOError, raw.save, fif_fname)
# test abspath support and annotations
sfreq = raw.info['sfreq']
annot = Annotations([10], [5], ['test'],
raw.info['meas_date'] + raw.first_samp / sfreq)
raw.annotations = annot
new_fname = op.join(op.abspath(op.curdir), 'break-raw.fif')
raw.save(op.join(tempdir, new_fname), overwrite=True)
new_raw = read_raw_fif(op.join(tempdir, new_fname), preload=False)
assert_raises(ValueError, new_raw.save, new_fname)
assert_array_equal(annot.onset, new_raw.annotations.onset)
assert_array_equal(annot.duration, new_raw.annotations.duration)
assert_array_equal(annot.description, new_raw.annotations.description)
assert_equal(annot.orig_time, new_raw.annotations.orig_time)
# test that annotations are in sync after cropping and concatenating
annot = Annotations([5., 11., 15.], [2., 1., 3.], ['test', 'test', 'test'])
raw.annotations = annot
with warnings.catch_warnings(record=True) as w:
r1 = raw.copy().crop(2.5, 7.5)
r2 = raw.copy().crop(12.5, 17.5)
r3 = raw.copy().crop(10., 12.)
assert_true(all('data range' in str(ww.message) for ww in w))
raw = concatenate_raws([r1, r2, r3]) # segments reordered
onsets = raw.annotations.onset
durations = raw.annotations.duration
# 2*5s clips combined with annotations at 2.5s + 2s clip, annotation at 1s
assert_array_almost_equal([2.5, 7.5, 11.], onsets[:3], decimal=2)
assert_array_almost_equal([2., 2.5, 1.], durations[:3], decimal=2)
# test annotation clipping
annot = Annotations([0., raw.times[-1]], [2., 2.], 'test',
raw.info['meas_date'] + raw.first_samp / sfreq - 1.)
with warnings.catch_warnings(record=True) as w: # outside range
raw.annotations = annot
assert_true(all('data range' in str(ww.message) for ww in w))
assert_array_almost_equal(raw.annotations.duration, [1., 1.], decimal=3)
# make sure we can overwrite the file we loaded when preload=True
new_raw = read_raw_fif(op.join(tempdir, new_fname), preload=True)
new_raw.save(op.join(tempdir, new_fname), overwrite=True)
os.remove(new_fname)
@testing.requires_testing_data
def test_with_statement():
"""Test with statement."""
for preload in [True, False]:
with read_raw_fif(fif_fname, preload=preload) as raw_:
print(raw_)
def test_compensation_raw():
"""Test Raw compensation."""
tempdir = _TempDir()
raw_3 = read_raw_fif(ctf_comp_fname)
assert_equal(raw_3.compensation_grade, 3)
data_3, times = raw_3[:, :]
# data come with grade 3
for ii in range(2):
raw_3_new = raw_3.copy()
if ii == 0:
raw_3_new.load_data()
raw_3_new.apply_gradient_compensation(3)
assert_equal(raw_3_new.compensation_grade, 3)
data_new, times_new = raw_3_new[:, :]
assert_array_equal(times, times_new)
assert_array_equal(data_3, data_new)
# change to grade 0
raw_0 = raw_3.copy().apply_gradient_compensation(0)
assert_equal(raw_0.compensation_grade, 0)
data_0, times_new = raw_0[:, :]
assert_array_equal(times, times_new)
assert_true(np.mean(np.abs(data_0 - data_3)) > 1e-12)
# change to grade 1
raw_1 = raw_0.copy().apply_gradient_compensation(1)
assert_equal(raw_1.compensation_grade, 1)
data_1, times_new = raw_1[:, :]
assert_array_equal(times, times_new)
assert_true(np.mean(np.abs(data_1 - data_3)) > 1e-12)
assert_raises(ValueError, raw_1.apply_gradient_compensation, 33)
raw_bad = raw_0.copy()
raw_bad.add_proj(compute_proj_raw(raw_0, duration=0.5, verbose='error'))
raw_bad.apply_proj()
assert_raises(RuntimeError, raw_bad.apply_gradient_compensation, 1)
# with preload
tols = dict(rtol=1e-12, atol=1e-25)
raw_1_new = raw_3.copy().load_data().apply_gradient_compensation(1)
assert_equal(raw_1_new.compensation_grade, 1)
data_1_new, times_new = raw_1_new[:, :]
assert_array_equal(times, times_new)
assert_true(np.mean(np.abs(data_1_new - data_3)) > 1e-12)
assert_allclose(data_1, data_1_new, **tols)
# change back
raw_3_new = raw_1.copy().apply_gradient_compensation(3)
data_3_new, times_new = raw_3_new[:, :]
assert_allclose(data_3, data_3_new, **tols)
raw_3_new = raw_1.copy().load_data().apply_gradient_compensation(3)
data_3_new, times_new = raw_3_new[:, :]
assert_allclose(data_3, data_3_new, **tols)
for load in (False, True):
for raw in (raw_0, raw_1):
raw_3_new = raw.copy()
if load:
raw_3_new.load_data()
raw_3_new.apply_gradient_compensation(3)
assert_equal(raw_3_new.compensation_grade, 3)
data_3_new, times_new = raw_3_new[:, :]
assert_array_equal(times, times_new)
assert_true(np.mean(np.abs(data_3_new - data_1)) > 1e-12)
assert_allclose(data_3, data_3_new, **tols)
# Try IO with compensation
temp_file = op.join(tempdir, 'raw.fif')
raw_3.save(temp_file, overwrite=True)
for preload in (True, False):
raw_read = read_raw_fif(temp_file, preload=preload)
assert_equal(raw_read.compensation_grade, 3)
data_read, times_new = raw_read[:, :]
assert_array_equal(times, times_new)
assert_allclose(data_3, data_read, **tols)
raw_read.apply_gradient_compensation(1)
data_read, times_new = raw_read[:, :]
assert_array_equal(times, times_new)
assert_allclose(data_1, data_read, **tols)
# Now save the file that has modified compensation
# and make sure the compensation is the same as it was,
# but that we can undo it
# These channels have norm 1e-11/1e-12, so atol=1e-18 isn't awesome,
# but it's due to the single precision of the info['comps'] leading
# to inexact inversions with saving/loading (casting back to single)
# in between (e.g., 1->3->1 will degrade like this)
looser_tols = dict(rtol=1e-6, atol=1e-18)
raw_1.save(temp_file, overwrite=True)
for preload in (True, False):
raw_read = read_raw_fif(temp_file, preload=preload, verbose=True)
assert_equal(raw_read.compensation_grade, 1)
data_read, times_new = raw_read[:, :]
assert_array_equal(times, times_new)
assert_allclose(data_1, data_read, **looser_tols)
raw_read.apply_gradient_compensation(3, verbose=True)
data_read, times_new = raw_read[:, :]
assert_array_equal(times, times_new)
assert_allclose(data_3, data_read, **looser_tols)
@requires_mne
def test_compensation_raw_mne():
"""Test Raw compensation by comparing with MNE-C."""
tempdir = _TempDir()
def compensate_mne(fname, grad):
tmp_fname = op.join(tempdir, 'mne_ctf_test_raw.fif')
cmd = ['mne_process_raw', '--raw', fname, '--save', tmp_fname,
'--grad', str(grad), '--projoff', '--filteroff']
run_subprocess(cmd)
return read_raw_fif(tmp_fname, preload=True)
for grad in [0, 2, 3]:
raw_py = read_raw_fif(ctf_comp_fname, preload=True)
raw_py.apply_gradient_compensation(grad)
raw_c = compensate_mne(ctf_comp_fname, grad)
assert_allclose(raw_py._data, raw_c._data, rtol=1e-6, atol=1e-17)
assert_equal(raw_py.info['nchan'], raw_c.info['nchan'])
for ch_py, ch_c in zip(raw_py.info['chs'], raw_c.info['chs']):
for key in ('ch_name', 'coil_type', 'scanno', 'logno', 'unit',
'coord_frame', 'kind'):
assert_equal(ch_py[key], ch_c[key])
for key in ('loc', 'unit_mul', 'range', 'cal'):
assert_allclose(ch_py[key], ch_c[key])
@testing.requires_testing_data
def test_drop_channels_mixin():
"""Test channels-dropping functionality."""
raw = read_raw_fif(fif_fname, preload=True)
drop_ch = raw.ch_names[:3]
ch_names = raw.ch_names[3:]
ch_names_orig = raw.ch_names
dummy = raw.copy().drop_channels(drop_ch)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, raw.ch_names)
assert_equal(len(ch_names_orig), raw._data.shape[0])
raw.drop_channels(drop_ch)
assert_equal(ch_names, raw.ch_names)
assert_equal(len(ch_names), len(raw._cals))
assert_equal(len(ch_names), raw._data.shape[0])
@testing.requires_testing_data
def test_pick_channels_mixin():
"""Test channel-picking functionality."""
# preload is True
raw = read_raw_fif(fif_fname, preload=True)
ch_names = raw.ch_names[:3]
ch_names_orig = raw.ch_names
dummy = raw.copy().pick_channels(ch_names)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, raw.ch_names)
assert_equal(len(ch_names_orig), raw._data.shape[0])
raw.pick_channels(ch_names) # copy is False
assert_equal(ch_names, raw.ch_names)
assert_equal(len(ch_names), len(raw._cals))
assert_equal(len(ch_names), raw._data.shape[0])
assert_raises(ValueError, raw.pick_channels, ch_names[0])
raw = read_raw_fif(fif_fname, preload=False)
assert_raises(RuntimeError, raw.pick_channels, ch_names)
assert_raises(RuntimeError, raw.drop_channels, ch_names)
@testing.requires_testing_data
def test_equalize_channels():
"""Test equalization of channels."""
raw1 = read_raw_fif(fif_fname, preload=True)
raw2 = raw1.copy()
ch_names = raw1.ch_names[2:]
raw1.drop_channels(raw1.ch_names[:1])
raw2.drop_channels(raw2.ch_names[1:2])
my_comparison = [raw1, raw2]
equalize_channels(my_comparison)
for e in my_comparison:
assert_equal(ch_names, e.ch_names)
run_tests_if_main()
| {
"repo_name": "jaeilepp/mne-python",
"path": "mne/io/fiff/tests/test_raw_fiff.py",
"copies": "1",
"size": "57420",
"license": "bsd-3-clause",
"hash": -3511585142599957000,
"line_mean": 39.78125,
"line_max": 79,
"alpha_frac": 0.605276907,
"autogenerated": false,
"ratio": 3.100766821471001,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9206043728471001,
"avg_score": 0,
"num_lines": 1408
} |
from copy import deepcopy
from functools import partial
import itertools as itt
import os.path as op
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose, assert_equal)
import pytest
from mne.datasets import testing
from mne.filter import filter_data
from mne.io.constants import FIFF
from mne.io import RawArray, concatenate_raws, read_raw_fif
from mne.io.tests.test_raw import _test_concat, _test_raw_reader
from mne import (concatenate_events, find_events, equalize_channels,
compute_proj_raw, pick_types, pick_channels, create_info)
from mne.utils import (_TempDir, requires_pandas, object_diff,
requires_mne, run_subprocess, run_tests_if_main)
from mne.externals.six.moves import zip, cPickle as pickle
from mne.io.proc_history import _get_rank_sss
from mne.io.pick import _picks_by_type
from mne.annotations import Annotations
testing_path = testing.data_path(download=False)
data_dir = op.join(testing_path, 'MEG', 'sample')
fif_fname = op.join(data_dir, 'sample_audvis_trunc_raw.fif')
ms_fname = op.join(testing_path, 'SSS', 'test_move_anon_raw.fif')
skip_fname = op.join(testing_path, 'misc', 'intervalrecording_raw.fif')
base_dir = op.join(op.dirname(__file__), '..', '..', 'tests', 'data')
test_fif_fname = op.join(base_dir, 'test_raw.fif')
test_fif_gz_fname = op.join(base_dir, 'test_raw.fif.gz')
ctf_fname = op.join(base_dir, 'test_ctf_raw.fif')
ctf_comp_fname = op.join(base_dir, 'test_ctf_comp_raw.fif')
fif_bad_marked_fname = op.join(base_dir, 'test_withbads_raw.fif')
bad_file_works = op.join(base_dir, 'test_bads.txt')
bad_file_wrong = op.join(base_dir, 'test_wrong_bads.txt')
hp_fname = op.join(base_dir, 'test_chpi_raw_hp.txt')
hp_fif_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')
@testing.requires_testing_data
def test_acq_skip():
"""Test treatment of acquisition skips."""
raw = read_raw_fif(skip_fname, preload=True)
picks = [1, 2, 10]
assert_equal(len(raw.times), 17000)
annotations = raw.annotations
assert_equal(len(annotations), 3) # there are 3 skips
assert_allclose(annotations.onset, [2, 7, 11])
assert_allclose(annotations.duration, [2., 2., 3.]) # inclusive!
data, times = raw.get_data(
picks, reject_by_annotation='omit', return_times=True)
expected_data, expected_times = zip(raw[picks, :2000],
raw[picks, 4000:7000],
raw[picks, 9000:11000],
raw[picks, 14000:17000])
expected_times = np.concatenate(list(expected_times), axis=-1)
assert_allclose(times, expected_times)
expected_data = list(expected_data)
assert_allclose(data, np.concatenate(expected_data, axis=-1), atol=1e-22)
# Check that acquisition skips are handled properly in filtering
kwargs = dict(l_freq=None, h_freq=50., fir_design='firwin')
raw_filt = raw.copy().filter(picks=picks, **kwargs)
for data in expected_data:
filter_data(data, raw.info['sfreq'], copy=False, **kwargs)
data = raw_filt.get_data(picks, reject_by_annotation='omit')
assert_allclose(data, np.concatenate(expected_data, axis=-1), atol=1e-22)
# Check that acquisition skips are handled properly during I/O
tempdir = _TempDir()
fname = op.join(tempdir, 'test_raw.fif')
raw.save(fname, fmt=raw.orig_format)
# first: file size should not increase much (orig data is missing
# 7 of 17 buffers, so if we write them out it should increase the file
# size quite a bit.
orig_size = op.getsize(skip_fname)
new_size = op.getsize(fname)
max_size = int(1.05 * orig_size) # almost the same + annotations
assert new_size < max_size, (new_size, max_size)
raw_read = read_raw_fif(fname)
assert raw_read.annotations is not None
assert_allclose(raw.times, raw_read.times)
assert_allclose(raw_read[:][0], raw[:][0], atol=1e-17)
# Saving with a bad buffer length emits warning
raw.pick_channels(raw.ch_names[:2])
with pytest.warns(None) as w:
raw.save(fname, buffer_size_sec=0.5, overwrite=True)
assert len(w) == 0
with pytest.warns(RuntimeWarning, match='did not fit evenly'):
raw.save(fname, buffer_size_sec=2., overwrite=True)
def test_fix_types():
"""Test fixing of channel types."""
for fname, change in ((hp_fif_fname, True), (test_fif_fname, False),
(ctf_fname, False)):
raw = read_raw_fif(fname)
mag_picks = pick_types(raw.info, meg='mag')
other_picks = np.setdiff1d(np.arange(len(raw.ch_names)), mag_picks)
# we don't actually have any files suffering from this problem, so
# fake it
if change:
for ii in mag_picks:
raw.info['chs'][ii]['coil_type'] = FIFF.FIFFV_COIL_VV_MAG_T2
orig_types = np.array([ch['coil_type'] for ch in raw.info['chs']])
raw.fix_mag_coil_types()
new_types = np.array([ch['coil_type'] for ch in raw.info['chs']])
if not change:
assert_array_equal(orig_types, new_types)
else:
assert_array_equal(orig_types[other_picks], new_types[other_picks])
assert ((orig_types[mag_picks] != new_types[mag_picks]).all())
assert ((new_types[mag_picks] ==
FIFF.FIFFV_COIL_VV_MAG_T3).all())
def test_concat():
"""Test RawFIF concatenation."""
# we trim the file to save lots of memory and some time
tempdir = _TempDir()
raw = read_raw_fif(test_fif_fname)
raw.crop(0, 2.)
test_name = op.join(tempdir, 'test_raw.fif')
raw.save(test_name)
# now run the standard test
_test_concat(partial(read_raw_fif), test_name)
@testing.requires_testing_data
def test_hash_raw():
"""Test hashing raw objects."""
raw = read_raw_fif(fif_fname)
pytest.raises(RuntimeError, raw.__hash__)
raw = read_raw_fif(fif_fname).crop(0, 0.5)
raw_size = raw._size
raw.load_data()
raw_load_size = raw._size
assert (raw_size < raw_load_size)
raw_2 = read_raw_fif(fif_fname).crop(0, 0.5)
raw_2.load_data()
assert_equal(hash(raw), hash(raw_2))
# do NOT use assert_equal here, failing output is terrible
assert_equal(pickle.dumps(raw), pickle.dumps(raw_2))
raw_2._data[0, 0] -= 1
assert hash(raw) != hash(raw_2)
@testing.requires_testing_data
def test_maxshield():
"""Test maxshield warning."""
with pytest.warns(RuntimeWarning, match='Internal Active Shielding') as w:
read_raw_fif(ms_fname, allow_maxshield=True)
assert ('test_raw_fiff.py' in w[0].filename)
@testing.requires_testing_data
def test_subject_info():
"""Test reading subject information."""
tempdir = _TempDir()
raw = read_raw_fif(fif_fname).crop(0, 1)
assert (raw.info['subject_info'] is None)
# fake some subject data
keys = ['id', 'his_id', 'last_name', 'first_name', 'birthday', 'sex',
'hand']
vals = [1, 'foobar', 'bar', 'foo', (1901, 2, 3), 0, 1]
subject_info = dict()
for key, val in zip(keys, vals):
subject_info[key] = val
raw.info['subject_info'] = subject_info
out_fname = op.join(tempdir, 'test_subj_info_raw.fif')
raw.save(out_fname, overwrite=True)
raw_read = read_raw_fif(out_fname)
for key in keys:
assert_equal(subject_info[key], raw_read.info['subject_info'][key])
assert_equal(raw.info['meas_date'], raw_read.info['meas_date'])
for key in ['secs', 'usecs', 'version']:
assert_equal(raw.info['meas_id'][key], raw_read.info['meas_id'][key])
assert_array_equal(raw.info['meas_id']['machid'],
raw_read.info['meas_id']['machid'])
@testing.requires_testing_data
def test_copy_append():
"""Test raw copying and appending combinations."""
raw = read_raw_fif(fif_fname, preload=True).copy()
raw_full = read_raw_fif(fif_fname)
raw_full.append(raw)
data = raw_full[:, :][0]
assert_equal(data.shape[1], 2 * raw._data.shape[1])
@pytest.mark.slowtest
@testing.requires_testing_data
def test_rank_estimation():
"""Test raw rank estimation."""
iter_tests = itt.product(
[fif_fname, hp_fif_fname], # sss
['norm', dict(mag=1e11, grad=1e9, eeg=1e5)]
)
for fname, scalings in iter_tests:
raw = read_raw_fif(fname).crop(0, 4.).load_data()
(_, picks_meg), (_, picks_eeg) = _picks_by_type(raw.info,
meg_combined=True)
n_meg = len(picks_meg)
n_eeg = len(picks_eeg)
if len(raw.info['proc_history']) == 0:
expected_rank = n_meg + n_eeg
else:
expected_rank = _get_rank_sss(raw.info) + n_eeg
assert_array_equal(raw.estimate_rank(scalings=scalings), expected_rank)
assert_array_equal(raw.estimate_rank(picks=picks_eeg,
scalings=scalings), n_eeg)
if 'sss' in fname:
raw.add_proj(compute_proj_raw(raw))
raw.apply_proj()
n_proj = len(raw.info['projs'])
assert_array_equal(raw.estimate_rank(tstart=0, tstop=3.,
scalings=scalings),
expected_rank - (0 if 'sss' in fname else n_proj))
@testing.requires_testing_data
def test_output_formats():
"""Test saving and loading raw data using multiple formats."""
tempdir = _TempDir()
formats = ['short', 'int', 'single', 'double']
tols = [1e-4, 1e-7, 1e-7, 1e-15]
# let's fake a raw file with different formats
raw = read_raw_fif(test_fif_fname).crop(0, 1)
temp_file = op.join(tempdir, 'raw.fif')
for ii, (fmt, tol) in enumerate(zip(formats, tols)):
# Let's test the overwriting error throwing while we're at it
if ii > 0:
pytest.raises(IOError, raw.save, temp_file, fmt=fmt)
raw.save(temp_file, fmt=fmt, overwrite=True)
raw2 = read_raw_fif(temp_file)
raw2_data = raw2[:, :][0]
assert_allclose(raw2_data, raw[:, :][0], rtol=tol, atol=1e-25)
assert_equal(raw2.orig_format, fmt)
def _compare_combo(raw, new, times, n_times):
"""Compare data."""
for ti in times: # let's do a subset of points for speed
orig = raw[:, ti % n_times][0]
# these are almost_equals because of possible dtype differences
assert_allclose(orig, new[:, ti][0])
@pytest.mark.slowtest
@testing.requires_testing_data
def test_multiple_files():
"""Test loading multiple files simultaneously."""
# split file
tempdir = _TempDir()
raw = read_raw_fif(fif_fname).crop(0, 10)
raw.load_data()
raw.load_data() # test no operation
split_size = 3. # in seconds
sfreq = raw.info['sfreq']
nsamp = (raw.last_samp - raw.first_samp)
tmins = np.round(np.arange(0., nsamp, split_size * sfreq))
tmaxs = np.concatenate((tmins[1:] - 1, [nsamp]))
tmaxs /= sfreq
tmins /= sfreq
assert_equal(raw.n_times, len(raw.times))
# going in reverse order so the last fname is the first file (need later)
raws = [None] * len(tmins)
for ri in range(len(tmins) - 1, -1, -1):
fname = op.join(tempdir, 'test_raw_split-%d_raw.fif' % ri)
raw.save(fname, tmin=tmins[ri], tmax=tmaxs[ri])
raws[ri] = read_raw_fif(fname)
assert_equal(len(raws[ri].times),
int(round((tmaxs[ri] - tmins[ri]) *
raw.info['sfreq'])) + 1) # + 1 b/c inclusive
events = [find_events(r, stim_channel='STI 014') for r in raws]
last_samps = [r.last_samp for r in raws]
first_samps = [r.first_samp for r in raws]
# test concatenation of split file
pytest.raises(ValueError, concatenate_raws, raws, True, events[1:])
all_raw_1, events1 = concatenate_raws(raws, preload=False,
events_list=events)
assert_allclose(all_raw_1.times, raw.times)
assert_equal(raw.first_samp, all_raw_1.first_samp)
assert_equal(raw.last_samp, all_raw_1.last_samp)
assert_allclose(raw[:, :][0], all_raw_1[:, :][0])
raws[0] = read_raw_fif(fname)
all_raw_2 = concatenate_raws(raws, preload=True)
assert_allclose(raw[:, :][0], all_raw_2[:, :][0])
# test proper event treatment for split files
events2 = concatenate_events(events, first_samps, last_samps)
events3 = find_events(all_raw_2, stim_channel='STI 014')
assert_array_equal(events1, events2)
assert_array_equal(events1, events3)
# test various methods of combining files
raw = read_raw_fif(fif_fname, preload=True)
n_times = raw.n_times
# make sure that all our data match
times = list(range(0, 2 * n_times, 999))
# add potentially problematic points
times.extend([n_times - 1, n_times, 2 * n_times - 1])
raw_combo0 = concatenate_raws([read_raw_fif(f)
for f in [fif_fname, fif_fname]],
preload=True)
_compare_combo(raw, raw_combo0, times, n_times)
raw_combo = concatenate_raws([read_raw_fif(f)
for f in [fif_fname, fif_fname]],
preload=False)
_compare_combo(raw, raw_combo, times, n_times)
raw_combo = concatenate_raws([read_raw_fif(f)
for f in [fif_fname, fif_fname]],
preload='memmap8.dat')
_compare_combo(raw, raw_combo, times, n_times)
assert_equal(raw[:, :][0].shape[1] * 2, raw_combo0[:, :][0].shape[1])
assert_equal(raw_combo0[:, :][0].shape[1], raw_combo0.n_times)
# with all data preloaded, result should be preloaded
raw_combo = read_raw_fif(fif_fname, preload=True)
raw_combo.append(read_raw_fif(fif_fname, preload=True))
assert (raw_combo.preload is True)
assert_equal(raw_combo.n_times, raw_combo._data.shape[1])
_compare_combo(raw, raw_combo, times, n_times)
# with any data not preloaded, don't set result as preloaded
raw_combo = concatenate_raws([read_raw_fif(fif_fname, preload=True),
read_raw_fif(fif_fname, preload=False)])
assert (raw_combo.preload is False)
assert_array_equal(find_events(raw_combo, stim_channel='STI 014'),
find_events(raw_combo0, stim_channel='STI 014'))
_compare_combo(raw, raw_combo, times, n_times)
# user should be able to force data to be preloaded upon concat
raw_combo = concatenate_raws([read_raw_fif(fif_fname, preload=False),
read_raw_fif(fif_fname, preload=True)],
preload=True)
assert (raw_combo.preload is True)
_compare_combo(raw, raw_combo, times, n_times)
raw_combo = concatenate_raws([read_raw_fif(fif_fname, preload=False),
read_raw_fif(fif_fname, preload=True)],
preload='memmap3.dat')
_compare_combo(raw, raw_combo, times, n_times)
raw_combo = concatenate_raws([
read_raw_fif(fif_fname, preload=True),
read_raw_fif(fif_fname, preload=True)], preload='memmap4.dat')
_compare_combo(raw, raw_combo, times, n_times)
raw_combo = concatenate_raws([
read_raw_fif(fif_fname, preload=False),
read_raw_fif(fif_fname, preload=False)], preload='memmap5.dat')
_compare_combo(raw, raw_combo, times, n_times)
# verify that combining raws with different projectors throws an exception
raw.add_proj([], remove_existing=True)
pytest.raises(ValueError, raw.append,
read_raw_fif(fif_fname, preload=True))
# now test event treatment for concatenated raw files
events = [find_events(raw, stim_channel='STI 014'),
find_events(raw, stim_channel='STI 014')]
last_samps = [raw.last_samp, raw.last_samp]
first_samps = [raw.first_samp, raw.first_samp]
events = concatenate_events(events, first_samps, last_samps)
events2 = find_events(raw_combo0, stim_channel='STI 014')
assert_array_equal(events, events2)
# check out the len method
assert_equal(len(raw), raw.n_times)
assert_equal(len(raw), raw.last_samp - raw.first_samp + 1)
@testing.requires_testing_data
def test_split_files():
"""Test writing and reading of split raw files."""
tempdir = _TempDir()
raw_1 = read_raw_fif(fif_fname, preload=True)
# Test a very close corner case
raw_crop = raw_1.copy().crop(0, 1.)
assert_allclose(raw_1.buffer_size_sec, 10., atol=1e-2) # samp rate
split_fname = op.join(tempdir, 'split_raw.fif')
raw_1.set_annotations(Annotations([2.], [5.5], 'test'))
raw_1.save(split_fname, buffer_size_sec=1.0, split_size='10MB')
raw_2 = read_raw_fif(split_fname)
assert_allclose(raw_2.buffer_size_sec, 1., atol=1e-2) # samp rate
assert_array_equal(raw_1.annotations.onset, raw_2.annotations.onset)
assert_array_equal(raw_1.annotations.duration, raw_2.annotations.duration)
assert_array_equal(raw_1.annotations.description,
raw_2.annotations.description)
data_1, times_1 = raw_1[:, :]
data_2, times_2 = raw_2[:, :]
assert_array_equal(data_1, data_2)
assert_array_equal(times_1, times_2)
# test the case where we only end up with one buffer to write
# (GH#3210). These tests rely on writing meas info and annotations
# taking up a certain number of bytes, so if we change those functions
# somehow, the numbers below for e.g. split_size might need to be
# adjusted.
raw_crop = raw_1.copy().crop(0, 5)
with pytest.raises(ValueError,
match='after writing measurement information'):
raw_crop.save(split_fname, split_size='1MB', # too small a size
buffer_size_sec=1., overwrite=True)
with pytest.raises(ValueError,
match='too large for the given split size'):
raw_crop.save(split_fname,
split_size=3002276, # still too small, now after Info
buffer_size_sec=1., overwrite=True)
# just barely big enough here; the right size to write exactly one buffer
# at a time so we hit GH#3210 if we aren't careful
raw_crop.save(split_fname, split_size='4.5MB',
buffer_size_sec=1., overwrite=True)
raw_read = read_raw_fif(split_fname)
assert_allclose(raw_crop[:][0], raw_read[:][0], atol=1e-20)
# Check our buffer arithmetic
# 1 buffer required
raw_crop = raw_1.copy().crop(0, 1)
raw_crop.save(split_fname, buffer_size_sec=1., overwrite=True)
raw_read = read_raw_fif(split_fname)
assert_equal(len(raw_read._raw_extras[0]), 1)
assert_equal(raw_read._raw_extras[0][0]['nsamp'], 301)
assert_allclose(raw_crop[:][0], raw_read[:][0])
# 2 buffers required
raw_crop.save(split_fname, buffer_size_sec=0.5, overwrite=True)
raw_read = read_raw_fif(split_fname)
assert_equal(len(raw_read._raw_extras[0]), 2)
assert_equal(raw_read._raw_extras[0][0]['nsamp'], 151)
assert_equal(raw_read._raw_extras[0][1]['nsamp'], 150)
assert_allclose(raw_crop[:][0], raw_read[:][0])
# 2 buffers required
raw_crop.save(split_fname,
buffer_size_sec=1. - 1.01 / raw_crop.info['sfreq'],
overwrite=True)
raw_read = read_raw_fif(split_fname)
assert_equal(len(raw_read._raw_extras[0]), 2)
assert_equal(raw_read._raw_extras[0][0]['nsamp'], 300)
assert_equal(raw_read._raw_extras[0][1]['nsamp'], 1)
assert_allclose(raw_crop[:][0], raw_read[:][0])
raw_crop.save(split_fname,
buffer_size_sec=1. - 2.01 / raw_crop.info['sfreq'],
overwrite=True)
raw_read = read_raw_fif(split_fname)
assert_equal(len(raw_read._raw_extras[0]), 2)
assert_equal(raw_read._raw_extras[0][0]['nsamp'], 299)
assert_equal(raw_read._raw_extras[0][1]['nsamp'], 2)
assert_allclose(raw_crop[:][0], raw_read[:][0])
def test_load_bad_channels():
"""Test reading/writing of bad channels."""
tempdir = _TempDir()
# Load correctly marked file (manually done in mne_process_raw)
raw_marked = read_raw_fif(fif_bad_marked_fname)
correct_bads = raw_marked.info['bads']
raw = read_raw_fif(test_fif_fname)
# Make sure it starts clean
assert_array_equal(raw.info['bads'], [])
# Test normal case
raw.load_bad_channels(bad_file_works)
# Write it out, read it in, and check
raw.save(op.join(tempdir, 'foo_raw.fif'))
raw_new = read_raw_fif(op.join(tempdir, 'foo_raw.fif'))
assert_equal(correct_bads, raw_new.info['bads'])
# Reset it
raw.info['bads'] = []
# Test bad case
pytest.raises(ValueError, raw.load_bad_channels, bad_file_wrong)
# Test forcing the bad case
with pytest.warns(RuntimeWarning, match='1 bad channel'):
raw.load_bad_channels(bad_file_wrong, force=True)
# write it out, read it in, and check
raw.save(op.join(tempdir, 'foo_raw.fif'), overwrite=True)
raw_new = read_raw_fif(op.join(tempdir, 'foo_raw.fif'))
assert correct_bads == raw_new.info['bads']
# Check that bad channels are cleared
raw.load_bad_channels(None)
raw.save(op.join(tempdir, 'foo_raw.fif'), overwrite=True)
raw_new = read_raw_fif(op.join(tempdir, 'foo_raw.fif'))
assert_equal([], raw_new.info['bads'])
@pytest.mark.slowtest
@testing.requires_testing_data
def test_io_raw():
"""Test IO for raw data (Neuromag + CTF + gz)."""
rng = np.random.RandomState(0)
tempdir = _TempDir()
# test unicode io
for chars in [b'\xc3\xa4\xc3\xb6\xc3\xa9', b'a']:
with read_raw_fif(fif_fname) as r:
assert ('Raw' in repr(r))
assert (op.basename(fif_fname) in repr(r))
desc1 = r.info['description'] = chars.decode('utf-8')
temp_file = op.join(tempdir, 'raw.fif')
r.save(temp_file, overwrite=True)
with read_raw_fif(temp_file) as r2:
desc2 = r2.info['description']
assert_equal(desc1, desc2)
# Let's construct a simple test for IO first
raw = read_raw_fif(fif_fname).crop(0, 3.5)
raw.load_data()
# put in some data that we know the values of
data = rng.randn(raw._data.shape[0], raw._data.shape[1])
raw._data[:, :] = data
# save it somewhere
fname = op.join(tempdir, 'test_copy_raw.fif')
raw.save(fname, buffer_size_sec=1.0)
# read it in, make sure the whole thing matches
raw = read_raw_fif(fname)
assert_allclose(data, raw[:, :][0], rtol=1e-6, atol=1e-20)
# let's read portions across the 1-sec tag boundary, too
inds = raw.time_as_index([1.75, 2.25])
sl = slice(inds[0], inds[1])
assert_allclose(data[:, sl], raw[:, sl][0], rtol=1e-6, atol=1e-20)
# now let's do some real I/O
fnames_in = [fif_fname, test_fif_gz_fname, ctf_fname]
fnames_out = ['raw.fif', 'raw.fif.gz', 'raw.fif']
for fname_in, fname_out in zip(fnames_in, fnames_out):
fname_out = op.join(tempdir, fname_out)
raw = read_raw_fif(fname_in)
nchan = raw.info['nchan']
ch_names = raw.info['ch_names']
meg_channels_idx = [k for k in range(nchan)
if ch_names[k][0] == 'M']
n_channels = 100
meg_channels_idx = meg_channels_idx[:n_channels]
start, stop = raw.time_as_index([0, 5], use_rounding=True)
data, times = raw[meg_channels_idx, start:(stop + 1)]
meg_ch_names = [ch_names[k] for k in meg_channels_idx]
# Set up pick list: MEG + STI 014 - bad channels
include = ['STI 014']
include += meg_ch_names
picks = pick_types(raw.info, meg=True, eeg=False, stim=True,
misc=True, ref_meg=True, include=include,
exclude='bads')
# Writing with drop_small_buffer True
raw.save(fname_out, picks, tmin=0, tmax=4, buffer_size_sec=3,
drop_small_buffer=True, overwrite=True)
raw2 = read_raw_fif(fname_out)
sel = pick_channels(raw2.ch_names, meg_ch_names)
data2, times2 = raw2[sel, :]
assert (times2.max() <= 3)
# Writing
raw.save(fname_out, picks, tmin=0, tmax=5, overwrite=True)
if fname_in == fif_fname or fname_in == fif_fname + '.gz':
assert_equal(len(raw.info['dig']), 146)
raw2 = read_raw_fif(fname_out)
sel = pick_channels(raw2.ch_names, meg_ch_names)
data2, times2 = raw2[sel, :]
assert_allclose(data, data2, rtol=1e-6, atol=1e-20)
assert_allclose(times, times2)
assert_allclose(raw.info['sfreq'], raw2.info['sfreq'], rtol=1e-5)
# check transformations
for trans in ['dev_head_t', 'dev_ctf_t', 'ctf_head_t']:
if raw.info[trans] is None:
assert (raw2.info[trans] is None)
else:
assert_array_equal(raw.info[trans]['trans'],
raw2.info[trans]['trans'])
# check transformation 'from' and 'to'
if trans.startswith('dev'):
from_id = FIFF.FIFFV_COORD_DEVICE
else:
from_id = FIFF.FIFFV_MNE_COORD_CTF_HEAD
if trans[4:8] == 'head':
to_id = FIFF.FIFFV_COORD_HEAD
else:
to_id = FIFF.FIFFV_MNE_COORD_CTF_HEAD
for raw_ in [raw, raw2]:
assert_equal(raw_.info[trans]['from'], from_id)
assert_equal(raw_.info[trans]['to'], to_id)
if fname_in == fif_fname or fname_in == fif_fname + '.gz':
assert_allclose(raw.info['dig'][0]['r'], raw2.info['dig'][0]['r'])
# test warnings on bad filenames
raw_badname = op.join(tempdir, 'test-bad-name.fif.gz')
with pytest.warns(RuntimeWarning, match='raw.fif'):
raw.save(raw_badname)
with pytest.warns(RuntimeWarning, match='raw.fif'):
read_raw_fif(raw_badname)
@testing.requires_testing_data
def test_io_complex():
"""Test IO with complex data types."""
rng = np.random.RandomState(0)
tempdir = _TempDir()
dtypes = [np.complex64, np.complex128]
raw = _test_raw_reader(partial(read_raw_fif),
fname=fif_fname)
picks = np.arange(5)
start, stop = raw.time_as_index([0, 5])
data_orig, _ = raw[picks, start:stop]
for di, dtype in enumerate(dtypes):
imag_rand = np.array(1j * rng.randn(data_orig.shape[0],
data_orig.shape[1]), dtype)
raw_cp = raw.copy()
raw_cp._data = np.array(raw_cp._data, dtype)
raw_cp._data[picks, start:stop] += imag_rand
with pytest.warns(RuntimeWarning, match='Saving .* complex data.'):
raw_cp.save(op.join(tempdir, 'raw.fif'), picks, tmin=0, tmax=5,
overwrite=True)
raw2 = read_raw_fif(op.join(tempdir, 'raw.fif'))
raw2_data, _ = raw2[picks, :]
n_samp = raw2_data.shape[1]
assert_allclose(raw2_data[:, :n_samp], raw_cp._data[picks, :n_samp])
# with preloading
raw2 = read_raw_fif(op.join(tempdir, 'raw.fif'), preload=True)
raw2_data, _ = raw2[picks, :]
n_samp = raw2_data.shape[1]
assert_allclose(raw2_data[:, :n_samp], raw_cp._data[picks, :n_samp])
@testing.requires_testing_data
def test_getitem():
"""Test getitem/indexing of Raw."""
for preload in [False, True, 'memmap.dat']:
raw = read_raw_fif(fif_fname, preload=preload)
data, times = raw[0, :]
data1, times1 = raw[0]
assert_array_equal(data, data1)
assert_array_equal(times, times1)
data, times = raw[0:2, :]
data1, times1 = raw[0:2]
assert_array_equal(data, data1)
assert_array_equal(times, times1)
data1, times1 = raw[[0, 1]]
assert_array_equal(data, data1)
assert_array_equal(times, times1)
assert_array_equal(
raw[-10:-1, :][0],
raw[len(raw.ch_names) - 10:len(raw.ch_names) - 1, :][0])
pytest.raises(ValueError, raw.__getitem__,
(slice(-len(raw.ch_names) - 1), slice(None)))
with pytest.raises(ValueError, match='start must be'):
raw[-1000:]
with pytest.raises(ValueError, match='stop must be'):
raw[:-1000]
@testing.requires_testing_data
def test_proj():
"""Test SSP proj operations."""
tempdir = _TempDir()
for proj in [True, False]:
raw = read_raw_fif(fif_fname, preload=False)
if proj:
raw.apply_proj()
assert (all(p['active'] == proj for p in raw.info['projs']))
data, times = raw[0:2, :]
data1, times1 = raw[0:2]
assert_array_equal(data, data1)
assert_array_equal(times, times1)
# test adding / deleting proj
if proj:
pytest.raises(ValueError, raw.add_proj, [],
{'remove_existing': True})
pytest.raises(ValueError, raw.del_proj, 0)
else:
projs = deepcopy(raw.info['projs'])
n_proj = len(raw.info['projs'])
raw.del_proj(0)
assert_equal(len(raw.info['projs']), n_proj - 1)
raw.add_proj(projs, remove_existing=False)
# Test that already existing projections are not added.
assert_equal(len(raw.info['projs']), n_proj)
raw.add_proj(projs[:-1], remove_existing=True)
assert_equal(len(raw.info['projs']), n_proj - 1)
# test apply_proj() with and without preload
for preload in [True, False]:
raw = read_raw_fif(fif_fname, preload=preload)
data, times = raw[:, 0:2]
raw.apply_proj()
data_proj_1 = np.dot(raw._projector, data)
# load the file again without proj
raw = read_raw_fif(fif_fname, preload=preload)
# write the file with proj. activated, make sure proj has been applied
raw.save(op.join(tempdir, 'raw.fif'), proj=True, overwrite=True)
raw2 = read_raw_fif(op.join(tempdir, 'raw.fif'))
data_proj_2, _ = raw2[:, 0:2]
assert_allclose(data_proj_1, data_proj_2)
assert (all(p['active'] for p in raw2.info['projs']))
# read orig file with proj. active
raw2 = read_raw_fif(fif_fname, preload=preload)
raw2.apply_proj()
data_proj_2, _ = raw2[:, 0:2]
assert_allclose(data_proj_1, data_proj_2)
assert (all(p['active'] for p in raw2.info['projs']))
# test that apply_proj works
raw.apply_proj()
data_proj_2, _ = raw[:, 0:2]
assert_allclose(data_proj_1, data_proj_2)
assert_allclose(data_proj_2, np.dot(raw._projector, data_proj_2))
tempdir = _TempDir()
out_fname = op.join(tempdir, 'test_raw.fif')
raw = read_raw_fif(test_fif_fname, preload=True).crop(0, 0.002)
raw.pick_types(meg=False, eeg=True)
raw.info['projs'] = [raw.info['projs'][-1]]
raw._data.fill(0)
raw._data[-1] = 1.
raw.save(out_fname)
raw = read_raw_fif(out_fname, preload=False)
raw.apply_proj()
assert_allclose(raw[:, :][0][:1], raw[0, :][0])
@testing.requires_testing_data
def test_preload_modify():
"""Test preloading and modifying data."""
tempdir = _TempDir()
rng = np.random.RandomState(0)
for preload in [False, True, 'memmap.dat']:
raw = read_raw_fif(fif_fname, preload=preload)
nsamp = raw.last_samp - raw.first_samp + 1
picks = pick_types(raw.info, meg='grad', exclude='bads')
data = rng.randn(len(picks), nsamp // 2)
try:
raw[picks, :nsamp // 2] = data
except RuntimeError:
if not preload:
continue
else:
raise
tmp_fname = op.join(tempdir, 'raw.fif')
raw.save(tmp_fname, overwrite=True)
raw_new = read_raw_fif(tmp_fname)
data_new, _ = raw_new[picks, :nsamp // 2]
assert_allclose(data, data_new)
@pytest.mark.slowtest
@testing.requires_testing_data
def test_filter():
"""Test filtering (FIR and IIR) and Raw.apply_function interface."""
raw = read_raw_fif(fif_fname).crop(0, 7)
raw.load_data()
sig_dec_notch = 12
sig_dec_notch_fit = 12
picks_meg = pick_types(raw.info, meg=True, exclude='bads')
picks = picks_meg[:4]
trans = 2.0
filter_params = dict(picks=picks, filter_length='auto',
h_trans_bandwidth=trans, l_trans_bandwidth=trans,
fir_design='firwin')
raw_lp = raw.copy().filter(None, 8.0, **filter_params)
raw_hp = raw.copy().filter(16.0, None, **filter_params)
raw_bp = raw.copy().filter(8.0 + trans, 16.0 - trans, **filter_params)
raw_bs = raw.copy().filter(16.0, 8.0, **filter_params)
data, _ = raw[picks, :]
lp_data, _ = raw_lp[picks, :]
hp_data, _ = raw_hp[picks, :]
bp_data, _ = raw_bp[picks, :]
bs_data, _ = raw_bs[picks, :]
tols = dict(atol=1e-20, rtol=1e-5)
assert_allclose(bs_data, lp_data + hp_data, **tols)
assert_allclose(data, lp_data + bp_data + hp_data, **tols)
assert_allclose(data, bp_data + bs_data, **tols)
filter_params_iir = dict(picks=picks, n_jobs=2, method='iir',
iir_params=dict(output='ba'))
raw_lp_iir = raw.copy().filter(None, 4.0, **filter_params_iir)
raw_hp_iir = raw.copy().filter(8.0, None, **filter_params_iir)
raw_bp_iir = raw.copy().filter(4.0, 8.0, **filter_params_iir)
del filter_params_iir
lp_data_iir, _ = raw_lp_iir[picks, :]
hp_data_iir, _ = raw_hp_iir[picks, :]
bp_data_iir, _ = raw_bp_iir[picks, :]
summation = lp_data_iir + hp_data_iir + bp_data_iir
assert_array_almost_equal(data[:, 100:-100], summation[:, 100:-100], 11)
# make sure we didn't touch other channels
data, _ = raw[picks_meg[4:], :]
bp_data, _ = raw_bp[picks_meg[4:], :]
assert_array_equal(data, bp_data)
bp_data_iir, _ = raw_bp_iir[picks_meg[4:], :]
assert_array_equal(data, bp_data_iir)
# ... and that inplace changes are inplace
raw_copy = raw.copy()
assert np.may_share_memory(raw._data, raw._data)
assert not np.may_share_memory(raw_copy._data, raw._data)
# this could be assert_array_equal but we do this to mirror the call below
assert (raw._data[0] == raw_copy._data[0]).all()
raw_copy.filter(None, 20., n_jobs=2, **filter_params)
assert not (raw._data[0] == raw_copy._data[0]).all()
assert_equal(raw.copy().filter(None, 20., **filter_params)._data,
raw_copy._data)
# do a very simple check on line filtering
raw_bs = raw.copy().filter(60.0 + trans, 60.0 - trans, **filter_params)
data_bs, _ = raw_bs[picks, :]
raw_notch = raw.copy().notch_filter(
60.0, picks=picks, n_jobs=2, method='fir',
trans_bandwidth=2 * trans)
data_notch, _ = raw_notch[picks, :]
assert_array_almost_equal(data_bs, data_notch, sig_dec_notch)
# now use the sinusoidal fitting
raw_notch = raw.copy().notch_filter(
None, picks=picks, n_jobs=2, method='spectrum_fit')
data_notch, _ = raw_notch[picks, :]
data, _ = raw[picks, :]
assert_array_almost_equal(data, data_notch, sig_dec_notch_fit)
# filter should set the "lowpass" and "highpass" parameters
raw = RawArray(np.random.randn(3, 1000),
create_info(3, 1000., ['eeg'] * 2 + ['stim']))
raw.info['lowpass'] = raw.info['highpass'] = None
for kind in ('none', 'lowpass', 'highpass', 'bandpass', 'bandstop'):
print(kind)
h_freq = l_freq = None
if kind in ('lowpass', 'bandpass'):
h_freq = 70
if kind in ('highpass', 'bandpass'):
l_freq = 30
if kind == 'bandstop':
l_freq, h_freq = 70, 30
assert (raw.info['lowpass'] is None)
assert (raw.info['highpass'] is None)
kwargs = dict(l_trans_bandwidth=20, h_trans_bandwidth=20,
filter_length='auto', phase='zero', fir_design='firwin')
raw_filt = raw.copy().filter(l_freq, h_freq, picks=np.arange(1),
**kwargs)
assert (raw.info['lowpass'] is None)
assert (raw.info['highpass'] is None)
raw_filt = raw.copy().filter(l_freq, h_freq, **kwargs)
wanted_h = h_freq if kind != 'bandstop' else None
wanted_l = l_freq if kind != 'bandstop' else None
assert_equal(raw_filt.info['lowpass'], wanted_h)
assert_equal(raw_filt.info['highpass'], wanted_l)
# Using all data channels should still set the params (GH#3259)
raw_filt = raw.copy().filter(l_freq, h_freq, picks=np.arange(2),
**kwargs)
assert_equal(raw_filt.info['lowpass'], wanted_h)
assert_equal(raw_filt.info['highpass'], wanted_l)
def test_filter_picks():
"""Test filtering default channel picks."""
ch_types = ['mag', 'grad', 'eeg', 'seeg', 'misc', 'stim', 'ecog', 'hbo',
'hbr']
info = create_info(ch_names=ch_types, ch_types=ch_types, sfreq=256)
raw = RawArray(data=np.zeros((len(ch_types), 1000)), info=info)
# -- Deal with meg mag grad and fnirs exceptions
ch_types = ('misc', 'stim', 'meg', 'eeg', 'seeg', 'ecog')
# -- Filter data channels
for ch_type in ('mag', 'grad', 'eeg', 'seeg', 'ecog', 'hbo', 'hbr'):
picks = dict((ch, ch == ch_type) for ch in ch_types)
picks['meg'] = ch_type if ch_type in ('mag', 'grad') else False
picks['fnirs'] = ch_type if ch_type in ('hbo', 'hbr') else False
raw_ = raw.copy().pick_types(**picks)
raw_.filter(10, 30, fir_design='firwin')
# -- Error if no data channel
for ch_type in ('misc', 'stim'):
picks = dict((ch, ch == ch_type) for ch in ch_types)
raw_ = raw.copy().pick_types(**picks)
pytest.raises(RuntimeError, raw_.filter, 10, 30)
@testing.requires_testing_data
def test_crop():
"""Test cropping raw files."""
# split a concatenated file to test a difficult case
raw = concatenate_raws([read_raw_fif(f)
for f in [fif_fname, fif_fname]])
split_size = 10. # in seconds
sfreq = raw.info['sfreq']
nsamp = (raw.last_samp - raw.first_samp + 1)
# do an annoying case (off-by-one splitting)
tmins = np.r_[1., np.round(np.arange(0., nsamp - 1, split_size * sfreq))]
tmins = np.sort(tmins)
tmaxs = np.concatenate((tmins[1:] - 1, [nsamp - 1]))
tmaxs /= sfreq
tmins /= sfreq
raws = [None] * len(tmins)
for ri, (tmin, tmax) in enumerate(zip(tmins, tmaxs)):
raws[ri] = raw.copy().crop(tmin, tmax)
all_raw_2 = concatenate_raws(raws, preload=False)
assert_equal(raw.first_samp, all_raw_2.first_samp)
assert_equal(raw.last_samp, all_raw_2.last_samp)
assert_array_equal(raw[:, :][0], all_raw_2[:, :][0])
tmins = np.round(np.arange(0., nsamp - 1, split_size * sfreq))
tmaxs = np.concatenate((tmins[1:] - 1, [nsamp - 1]))
tmaxs /= sfreq
tmins /= sfreq
# going in revere order so the last fname is the first file (need it later)
raws = [None] * len(tmins)
for ri, (tmin, tmax) in enumerate(zip(tmins, tmaxs)):
raws[ri] = raw.copy().crop(tmin, tmax)
# test concatenation of split file
all_raw_1 = concatenate_raws(raws, preload=False)
all_raw_2 = raw.copy().crop(0, None)
for ar in [all_raw_1, all_raw_2]:
assert_equal(raw.first_samp, ar.first_samp)
assert_equal(raw.last_samp, ar.last_samp)
assert_array_equal(raw[:, :][0], ar[:, :][0])
# test shape consistency of cropped raw
data = np.zeros((1, 1002001))
info = create_info(1, 1000)
raw = RawArray(data, info)
for tmin in range(0, 1001, 100):
raw1 = raw.copy().crop(tmin=tmin, tmax=tmin + 2)
assert_equal(raw1[:][0].shape, (1, 2001))
@testing.requires_testing_data
def test_resample():
"""Test resample (with I/O and multiple files)."""
tempdir = _TempDir()
raw = read_raw_fif(fif_fname).crop(0, 3)
raw.load_data()
raw_resamp = raw.copy()
sfreq = raw.info['sfreq']
# test parallel on upsample
raw_resamp.resample(sfreq * 2, n_jobs=2, npad='auto')
assert_equal(raw_resamp.n_times, len(raw_resamp.times))
raw_resamp.save(op.join(tempdir, 'raw_resamp-raw.fif'))
raw_resamp = read_raw_fif(op.join(tempdir, 'raw_resamp-raw.fif'),
preload=True)
assert_equal(sfreq, raw_resamp.info['sfreq'] / 2)
assert_equal(raw.n_times, raw_resamp.n_times / 2)
assert_equal(raw_resamp._data.shape[1], raw_resamp.n_times)
assert_equal(raw._data.shape[0], raw_resamp._data.shape[0])
# test non-parallel on downsample
raw_resamp.resample(sfreq, n_jobs=1, npad='auto')
assert_equal(raw_resamp.info['sfreq'], sfreq)
assert_equal(raw._data.shape, raw_resamp._data.shape)
assert_equal(raw.first_samp, raw_resamp.first_samp)
assert_equal(raw.last_samp, raw.last_samp)
# upsampling then downsampling doubles resampling error, but this still
# works (hooray). Note that the stim channels had to be sub-sampled
# without filtering to be accurately preserved
# note we have to treat MEG and EEG+STIM channels differently (tols)
assert_allclose(raw._data[:306, 200:-200],
raw_resamp._data[:306, 200:-200],
rtol=1e-2, atol=1e-12)
assert_allclose(raw._data[306:, 200:-200],
raw_resamp._data[306:, 200:-200],
rtol=1e-2, atol=1e-7)
# now check multiple file support w/resampling, as order of operations
# (concat, resample) should not affect our data
raw1 = raw.copy()
raw2 = raw.copy()
raw3 = raw.copy()
raw4 = raw.copy()
raw1 = concatenate_raws([raw1, raw2])
raw1.resample(10., npad='auto')
raw3.resample(10., npad='auto')
raw4.resample(10., npad='auto')
raw3 = concatenate_raws([raw3, raw4])
assert_array_equal(raw1._data, raw3._data)
assert_array_equal(raw1._first_samps, raw3._first_samps)
assert_array_equal(raw1._last_samps, raw3._last_samps)
assert_array_equal(raw1._raw_lengths, raw3._raw_lengths)
assert_equal(raw1.first_samp, raw3.first_samp)
assert_equal(raw1.last_samp, raw3.last_samp)
assert_equal(raw1.info['sfreq'], raw3.info['sfreq'])
# test resampling of stim channel
# basic decimation
stim = [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
assert_allclose(raw.resample(8., npad='auto')._data,
[[1, 1, 0, 0, 1, 1, 0, 0]])
# decimation of multiple stim channels
raw = RawArray(2 * [stim], create_info(2, len(stim), 2 * ['stim']))
assert_allclose(raw.resample(8., npad='auto', verbose='error')._data,
[[1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0]])
# decimation that could potentially drop events if the decimation is
# done naively
stim = [0, 0, 0, 1, 1, 0, 0, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
assert_allclose(raw.resample(4., npad='auto')._data,
[[0, 1, 1, 0]])
# two events are merged in this case (warning)
stim = [0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
with pytest.warns(RuntimeWarning, match='become unreliable'):
raw.resample(8., npad='auto')
# events are dropped in this case (warning)
stim = [0, 1, 1, 0, 0, 1, 1, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
with pytest.warns(RuntimeWarning, match='become unreliable'):
raw.resample(4., npad='auto')
# test resampling events: this should no longer give a warning
# we often have first_samp != 0, include it here too
stim = [0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0]
# test is on half the sfreq, but should work with trickier ones too
o_sfreq, sfreq_ratio = len(stim), 0.5
n_sfreq = o_sfreq * sfreq_ratio
first_samp = len(stim) // 2
raw = RawArray([stim], create_info(1, o_sfreq, ['stim']),
first_samp=first_samp)
events = find_events(raw)
raw, events = raw.resample(n_sfreq, events=events, npad='auto')
n_fsamp = int(first_samp * sfreq_ratio) # how it's calc'd in base.py
# NB np.round used for rounding event times, which has 0.5 as corner case:
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.around.html
assert_equal(events,
np.array([[np.round(1 * sfreq_ratio) + n_fsamp, 0, 1],
[np.round(10 * sfreq_ratio) + n_fsamp, 0, 1]]))
# test copy flag
stim = [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
raw_resampled = raw.copy().resample(4., npad='auto')
assert (raw_resampled is not raw)
raw_resampled = raw.resample(4., npad='auto')
assert (raw_resampled is raw)
# resample should still work even when no stim channel is present
raw = RawArray(np.random.randn(1, 100), create_info(1, 100, ['eeg']))
raw.info['lowpass'] = 50.
raw.resample(10, npad='auto')
assert_equal(raw.info['lowpass'], 5.)
assert_equal(len(raw), 10)
@testing.requires_testing_data
def test_hilbert():
"""Test computation of analytic signal using hilbert."""
raw = read_raw_fif(fif_fname, preload=True)
picks_meg = pick_types(raw.info, meg=True, exclude='bads')
picks = picks_meg[:4]
raw_filt = raw.copy()
raw_filt.filter(10, 20, picks=picks, l_trans_bandwidth='auto',
h_trans_bandwidth='auto', filter_length='auto',
phase='zero', fir_window='blackman', fir_design='firwin')
raw_filt_2 = raw_filt.copy()
raw2 = raw.copy()
raw3 = raw.copy()
raw.apply_hilbert(picks, n_fft='auto')
raw2.apply_hilbert(picks, n_fft='auto', envelope=True)
# Test custom n_fft
raw_filt.apply_hilbert(picks, n_fft='auto')
n_fft = 2 ** int(np.ceil(np.log2(raw_filt_2.n_times + 1000)))
raw_filt_2.apply_hilbert(picks, n_fft=n_fft)
assert_equal(raw_filt._data.shape, raw_filt_2._data.shape)
assert_allclose(raw_filt._data[:, 50:-50], raw_filt_2._data[:, 50:-50],
atol=1e-13, rtol=1e-2)
pytest.raises(ValueError, raw3.apply_hilbert, picks,
n_fft=raw3.n_times - 100)
env = np.abs(raw._data[picks, :])
assert_allclose(env, raw2._data[picks, :], rtol=1e-2, atol=1e-13)
@testing.requires_testing_data
def test_raw_copy():
"""Test Raw copy."""
raw = read_raw_fif(fif_fname, preload=True)
data, _ = raw[:, :]
copied = raw.copy()
copied_data, _ = copied[:, :]
assert_array_equal(data, copied_data)
assert_equal(sorted(raw.__dict__.keys()),
sorted(copied.__dict__.keys()))
raw = read_raw_fif(fif_fname, preload=False)
data, _ = raw[:, :]
copied = raw.copy()
copied_data, _ = copied[:, :]
assert_array_equal(data, copied_data)
assert_equal(sorted(raw.__dict__.keys()),
sorted(copied.__dict__.keys()))
@requires_pandas
def test_to_data_frame():
"""Test raw Pandas exporter."""
raw = read_raw_fif(test_fif_fname, preload=True)
_, times = raw[0, :10]
df = raw.to_data_frame()
assert ((df.columns == raw.ch_names).all())
assert_array_equal(np.round(times * 1e3), df.index.values[:10])
df = raw.to_data_frame(index=None)
assert ('time' in df.index.names)
assert_array_equal(df.values[:, 0], raw._data[0] * 1e13)
assert_array_equal(df.values[:, 2], raw._data[2] * 1e15)
def test_add_channels():
"""Test raw splitting / re-appending channel types."""
rng = np.random.RandomState(0)
raw = read_raw_fif(test_fif_fname).crop(0, 1).load_data()
raw_nopre = read_raw_fif(test_fif_fname, preload=False)
raw_eeg_meg = raw.copy().pick_types(meg=True, eeg=True)
raw_eeg = raw.copy().pick_types(meg=False, eeg=True)
raw_meg = raw.copy().pick_types(meg=True, eeg=False)
raw_stim = raw.copy().pick_types(meg=False, eeg=False, stim=True)
raw_new = raw_meg.copy().add_channels([raw_eeg, raw_stim])
assert (
all(ch in raw_new.ch_names
for ch in list(raw_stim.ch_names) + list(raw_meg.ch_names))
)
raw_new = raw_meg.copy().add_channels([raw_eeg])
assert (ch in raw_new.ch_names for ch in raw.ch_names)
assert_array_equal(raw_new[:, :][0], raw_eeg_meg[:, :][0])
assert_array_equal(raw_new[:, :][1], raw[:, :][1])
assert (all(ch not in raw_new.ch_names for ch in raw_stim.ch_names))
# Testing force updates
raw_arr_info = create_info(['1', '2'], raw_meg.info['sfreq'], 'eeg')
orig_head_t = raw_arr_info['dev_head_t']
raw_arr = rng.randn(2, raw_eeg.n_times)
raw_arr = RawArray(raw_arr, raw_arr_info)
# This should error because of conflicts in Info
pytest.raises(ValueError, raw_meg.copy().add_channels, [raw_arr])
raw_meg.copy().add_channels([raw_arr], force_update_info=True)
# Make sure that values didn't get overwritten
assert_equal(object_diff(raw_arr.info['dev_head_t'], orig_head_t), '')
# Now test errors
raw_badsf = raw_eeg.copy()
raw_badsf.info['sfreq'] = 3.1415927
raw_eeg.crop(.5)
pytest.raises(RuntimeError, raw_meg.add_channels, [raw_nopre])
pytest.raises(RuntimeError, raw_meg.add_channels, [raw_badsf])
pytest.raises(AssertionError, raw_meg.add_channels, [raw_eeg])
pytest.raises(ValueError, raw_meg.add_channels, [raw_meg])
pytest.raises(TypeError, raw_meg.add_channels, raw_badsf)
@testing.requires_testing_data
def test_save():
"""Test saving raw."""
tempdir = _TempDir()
raw = read_raw_fif(fif_fname, preload=False)
# can't write over file being read
pytest.raises(ValueError, raw.save, fif_fname)
raw = read_raw_fif(fif_fname, preload=True)
# can't overwrite file without overwrite=True
pytest.raises(IOError, raw.save, fif_fname)
# test abspath support and annotations
annot = Annotations([10], [5], ['test'],
raw.info['meas_date'] +
raw.first_samp / raw.info['sfreq'])
raw.set_annotations(annot)
new_fname = op.join(op.abspath(op.curdir), 'break_raw.fif')
raw.save(op.join(tempdir, new_fname), overwrite=True)
new_raw = read_raw_fif(op.join(tempdir, new_fname), preload=False)
pytest.raises(ValueError, new_raw.save, new_fname)
assert_array_equal(annot.onset, new_raw.annotations.onset)
assert_array_equal(annot.duration, new_raw.annotations.duration)
assert_array_equal(annot.description, new_raw.annotations.description)
assert_equal(annot.orig_time, new_raw.annotations.orig_time)
@testing.requires_testing_data
def test_annotation_crop():
"""Test annotation sync after cropping and concatenating."""
tempdir = _TempDir()
new_fname = op.join(op.abspath(op.curdir), 'break_raw.fif')
annot = Annotations([5., 11., 15.], [2., 1., 3.], ['test', 'test', 'test'])
raw = read_raw_fif(fif_fname, preload=False)
raw.set_annotations(annot)
r1 = raw.copy().crop(2.5, 7.5)
r2 = raw.copy().crop(12.5, 17.5)
r3 = raw.copy().crop(10., 12.)
raw = concatenate_raws([r1, r2, r3]) # segments reordered
onsets = raw.annotations.onset
durations = raw.annotations.duration
# 2*5s clips combined with annotations at 2.5s + 2s clip, annotation at 1s
assert_array_almost_equal([2.5, 7.5, 11.], onsets[:3], decimal=2)
assert_array_almost_equal([2., 2.5, 1.], durations[:3], decimal=2)
# test annotation clipping
sfreq = raw.info['sfreq']
annot = Annotations([0., raw.times[-1]], [2., 2.], 'test',
raw.info['meas_date'] + raw.first_samp / sfreq - 1.)
with pytest.warns(RuntimeWarning, match='Limited .* expanding outside'):
raw.set_annotations(annot)
assert_allclose(raw.annotations.duration,
[1., 1. + 1. / raw.info['sfreq']], atol=1e-3)
# make sure we can overwrite the file we loaded when preload=True
new_raw = read_raw_fif(op.join(tempdir, new_fname), preload=True)
new_raw.save(op.join(tempdir, new_fname), overwrite=True)
@testing.requires_testing_data
def test_with_statement():
"""Test with statement."""
for preload in [True, False]:
with read_raw_fif(fif_fname, preload=preload) as raw_:
print(raw_)
def test_compensation_raw():
"""Test Raw compensation."""
tempdir = _TempDir()
raw_3 = read_raw_fif(ctf_comp_fname)
assert_equal(raw_3.compensation_grade, 3)
data_3, times = raw_3[:, :]
# data come with grade 3
for ii in range(2):
raw_3_new = raw_3.copy()
if ii == 0:
raw_3_new.load_data()
raw_3_new.apply_gradient_compensation(3)
assert_equal(raw_3_new.compensation_grade, 3)
data_new, times_new = raw_3_new[:, :]
assert_array_equal(times, times_new)
assert_array_equal(data_3, data_new)
# change to grade 0
raw_0 = raw_3.copy().apply_gradient_compensation(0)
assert_equal(raw_0.compensation_grade, 0)
data_0, times_new = raw_0[:, :]
assert_array_equal(times, times_new)
assert (np.mean(np.abs(data_0 - data_3)) > 1e-12)
# change to grade 1
raw_1 = raw_0.copy().apply_gradient_compensation(1)
assert_equal(raw_1.compensation_grade, 1)
data_1, times_new = raw_1[:, :]
assert_array_equal(times, times_new)
assert (np.mean(np.abs(data_1 - data_3)) > 1e-12)
pytest.raises(ValueError, raw_1.apply_gradient_compensation, 33)
raw_bad = raw_0.copy()
raw_bad.add_proj(compute_proj_raw(raw_0, duration=0.5, verbose='error'))
raw_bad.apply_proj()
pytest.raises(RuntimeError, raw_bad.apply_gradient_compensation, 1)
# with preload
tols = dict(rtol=1e-12, atol=1e-25)
raw_1_new = raw_3.copy().load_data().apply_gradient_compensation(1)
assert_equal(raw_1_new.compensation_grade, 1)
data_1_new, times_new = raw_1_new[:, :]
assert_array_equal(times, times_new)
assert (np.mean(np.abs(data_1_new - data_3)) > 1e-12)
assert_allclose(data_1, data_1_new, **tols)
# change back
raw_3_new = raw_1.copy().apply_gradient_compensation(3)
data_3_new, times_new = raw_3_new[:, :]
assert_allclose(data_3, data_3_new, **tols)
raw_3_new = raw_1.copy().load_data().apply_gradient_compensation(3)
data_3_new, times_new = raw_3_new[:, :]
assert_allclose(data_3, data_3_new, **tols)
for load in (False, True):
for raw in (raw_0, raw_1):
raw_3_new = raw.copy()
if load:
raw_3_new.load_data()
raw_3_new.apply_gradient_compensation(3)
assert_equal(raw_3_new.compensation_grade, 3)
data_3_new, times_new = raw_3_new[:, :]
assert_array_equal(times, times_new)
assert (np.mean(np.abs(data_3_new - data_1)) > 1e-12)
assert_allclose(data_3, data_3_new, **tols)
# Try IO with compensation
temp_file = op.join(tempdir, 'raw.fif')
raw_3.save(temp_file, overwrite=True)
for preload in (True, False):
raw_read = read_raw_fif(temp_file, preload=preload)
assert_equal(raw_read.compensation_grade, 3)
data_read, times_new = raw_read[:, :]
assert_array_equal(times, times_new)
assert_allclose(data_3, data_read, **tols)
raw_read.apply_gradient_compensation(1)
data_read, times_new = raw_read[:, :]
assert_array_equal(times, times_new)
assert_allclose(data_1, data_read, **tols)
# Now save the file that has modified compensation
# and make sure the compensation is the same as it was,
# but that we can undo it
# These channels have norm 1e-11/1e-12, so atol=1e-18 isn't awesome,
# but it's due to the single precision of the info['comps'] leading
# to inexact inversions with saving/loading (casting back to single)
# in between (e.g., 1->3->1 will degrade like this)
looser_tols = dict(rtol=1e-6, atol=1e-18)
raw_1.save(temp_file, overwrite=True)
for preload in (True, False):
raw_read = read_raw_fif(temp_file, preload=preload, verbose=True)
assert_equal(raw_read.compensation_grade, 1)
data_read, times_new = raw_read[:, :]
assert_array_equal(times, times_new)
assert_allclose(data_1, data_read, **looser_tols)
raw_read.apply_gradient_compensation(3, verbose=True)
data_read, times_new = raw_read[:, :]
assert_array_equal(times, times_new)
assert_allclose(data_3, data_read, **looser_tols)
@requires_mne
def test_compensation_raw_mne():
"""Test Raw compensation by comparing with MNE-C."""
tempdir = _TempDir()
def compensate_mne(fname, grad):
tmp_fname = op.join(tempdir, 'mne_ctf_test_raw.fif')
cmd = ['mne_process_raw', '--raw', fname, '--save', tmp_fname,
'--grad', str(grad), '--projoff', '--filteroff']
run_subprocess(cmd)
return read_raw_fif(tmp_fname, preload=True)
for grad in [0, 2, 3]:
raw_py = read_raw_fif(ctf_comp_fname, preload=True)
raw_py.apply_gradient_compensation(grad)
raw_c = compensate_mne(ctf_comp_fname, grad)
assert_allclose(raw_py._data, raw_c._data, rtol=1e-6, atol=1e-17)
assert_equal(raw_py.info['nchan'], raw_c.info['nchan'])
for ch_py, ch_c in zip(raw_py.info['chs'], raw_c.info['chs']):
for key in ('ch_name', 'coil_type', 'scanno', 'logno', 'unit',
'coord_frame', 'kind'):
assert_equal(ch_py[key], ch_c[key])
for key in ('loc', 'unit_mul', 'range', 'cal'):
assert_allclose(ch_py[key], ch_c[key])
@testing.requires_testing_data
def test_drop_channels_mixin():
"""Test channels-dropping functionality."""
raw = read_raw_fif(fif_fname, preload=True)
drop_ch = raw.ch_names[:3]
ch_names = raw.ch_names[3:]
ch_names_orig = raw.ch_names
dummy = raw.copy().drop_channels(drop_ch)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, raw.ch_names)
assert_equal(len(ch_names_orig), raw._data.shape[0])
raw.drop_channels(drop_ch)
assert_equal(ch_names, raw.ch_names)
assert_equal(len(ch_names), len(raw._cals))
assert_equal(len(ch_names), raw._data.shape[0])
@testing.requires_testing_data
def test_pick_channels_mixin():
"""Test channel-picking functionality."""
# preload is True
raw = read_raw_fif(fif_fname, preload=True)
ch_names = raw.ch_names[:3]
ch_names_orig = raw.ch_names
dummy = raw.copy().pick_channels(ch_names)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, raw.ch_names)
assert_equal(len(ch_names_orig), raw._data.shape[0])
raw.pick_channels(ch_names) # copy is False
assert_equal(ch_names, raw.ch_names)
assert_equal(len(ch_names), len(raw._cals))
assert_equal(len(ch_names), raw._data.shape[0])
pytest.raises(ValueError, raw.pick_channels, ch_names[0])
raw = read_raw_fif(fif_fname, preload=False)
pytest.raises(RuntimeError, raw.pick_channels, ch_names)
pytest.raises(RuntimeError, raw.drop_channels, ch_names)
@testing.requires_testing_data
def test_equalize_channels():
"""Test equalization of channels."""
raw1 = read_raw_fif(fif_fname, preload=True)
raw2 = raw1.copy()
ch_names = raw1.ch_names[2:]
raw1.drop_channels(raw1.ch_names[:1])
raw2.drop_channels(raw2.ch_names[1:2])
my_comparison = [raw1, raw2]
equalize_channels(my_comparison)
for e in my_comparison:
assert_equal(ch_names, e.ch_names)
run_tests_if_main()
| {
"repo_name": "teonlamont/mne-python",
"path": "mne/io/fiff/tests/test_raw_fiff.py",
"copies": "2",
"size": "60041",
"license": "bsd-3-clause",
"hash": -7504805733982241000,
"line_mean": 40.152159013,
"line_max": 79,
"alpha_frac": 0.6053196982,
"autogenerated": false,
"ratio": 3.1112550523370297,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4716574750537029,
"avg_score": null,
"num_lines": null
} |
import os.path as op
from copy import deepcopy
from nose.tools import (assert_true, assert_equal, assert_raises,
assert_not_equal)
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_allclose)
import numpy as np
import copy as cp
import warnings
from scipy import fftpack
import matplotlib
from mne import (Epochs, Annotations, read_events, pick_events, read_epochs,
equalize_channels, pick_types, pick_channels, read_evokeds,
write_evokeds, create_info, make_fixed_length_events,
combine_evoked)
from mne.baseline import rescale
from mne.preprocessing import maxwell_filter
from mne.epochs import (
bootstrap, equalize_epoch_counts, combine_event_ids, add_channels_epochs,
EpochsArray, concatenate_epochs, _BaseEpochs, average_movements)
from mne.utils import (_TempDir, requires_pandas, slow_test,
clean_warning_registry, run_tests_if_main,
requires_version)
from mne.chpi import read_head_pos, head_pos_to_trans_rot_t
from mne.io import RawArray, Raw
from mne.io.proj import _has_eeg_average_ref_proj
from mne.event import merge_events
from mne.io.constants import FIFF
from mne.externals.six import text_type
from mne.externals.six.moves import zip, cPickle as pickle
from mne.datasets import testing
from mne.tests.common import assert_meg_snr, assert_naming
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_path = testing.data_path(download=False)
fname_raw_move = op.join(data_path, 'SSS', 'test_move_anon_raw.fif')
fname_raw_movecomp_sss = op.join(
data_path, 'SSS', 'test_move_anon_movecomp_raw_sss.fif')
fname_raw_move_pos = op.join(data_path, 'SSS', 'test_move_anon_raw.pos')
base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
event_name = op.join(base_dir, 'test-eve.fif')
evoked_nf_name = op.join(base_dir, 'test-nf-ave.fif')
event_id, tmin, tmax = 1, -0.2, 0.5
event_id_2 = 2
rng = np.random.RandomState(42)
def _get_data(preload=False):
raw = Raw(raw_fname, preload=preload, add_eeg_ref=False, proj=False)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, eeg=True, stim=True,
ecg=True, eog=True, include=['STI 014'],
exclude='bads')
return raw, events, picks
reject = dict(grad=1000e-12, mag=4e-12, eeg=80e-6, eog=150e-6)
flat = dict(grad=1e-15, mag=1e-15)
clean_warning_registry() # really clean warning stack
@slow_test
@testing.requires_testing_data
def test_average_movements():
"""Test movement averaging algorithm
"""
# usable data
crop = 0., 10.
origin = (0., 0., 0.04)
raw = Raw(fname_raw_move, allow_maxshield='yes')
raw.info['bads'] += ['MEG2443'] # mark some bad MEG channel
raw.crop(*crop, copy=False).load_data()
raw.filter(None, 20, method='iir')
events = make_fixed_length_events(raw, event_id)
picks = pick_types(raw.info, meg=True, eeg=True, stim=True,
ecg=True, eog=True, exclude=())
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, proj=False,
preload=True)
epochs_proj = Epochs(raw, events[:1], event_id, tmin, tmax, picks=picks,
proj=True, preload=True)
raw_sss_stat = maxwell_filter(raw, origin=origin, regularize=None,
bad_condition='ignore')
del raw
epochs_sss_stat = Epochs(raw_sss_stat, events, event_id, tmin, tmax,
picks=picks, proj=False)
evoked_sss_stat = epochs_sss_stat.average()
del raw_sss_stat, epochs_sss_stat
head_pos = read_head_pos(fname_raw_move_pos)
trans = epochs.info['dev_head_t']['trans']
head_pos_stat = (np.array([trans[:3, 3]]),
np.array([trans[:3, :3]]),
np.array([0.]))
# SSS-based
assert_raises(TypeError, average_movements, epochs, None)
evoked_move_non = average_movements(epochs, head_pos=head_pos,
weight_all=False, origin=origin)
evoked_move_all = average_movements(epochs, head_pos=head_pos,
weight_all=True, origin=origin)
evoked_stat_all = average_movements(epochs, head_pos=head_pos_stat,
weight_all=True, origin=origin)
evoked_std = epochs.average()
for ev in (evoked_move_non, evoked_move_all, evoked_stat_all):
assert_equal(ev.nave, evoked_std.nave)
assert_equal(len(ev.info['bads']), 0)
# substantial changes to MEG data
for ev in (evoked_move_non, evoked_stat_all):
assert_meg_snr(ev, evoked_std, 0., 0.1)
assert_raises(AssertionError, assert_meg_snr,
ev, evoked_std, 1., 1.)
meg_picks = pick_types(evoked_std.info, meg=True, exclude=())
assert_allclose(evoked_move_non.data[meg_picks],
evoked_move_all.data[meg_picks], atol=1e-20)
# compare to averaged movecomp version (should be fairly similar)
raw_sss = Raw(fname_raw_movecomp_sss).crop(*crop, copy=False).load_data()
raw_sss.filter(None, 20, method='iir')
picks_sss = pick_types(raw_sss.info, meg=True, eeg=True, stim=True,
ecg=True, eog=True, exclude=())
assert_array_equal(picks, picks_sss)
epochs_sss = Epochs(raw_sss, events, event_id, tmin, tmax,
picks=picks_sss, proj=False)
evoked_sss = epochs_sss.average()
assert_equal(evoked_std.nave, evoked_sss.nave)
# this should break the non-MEG channels
assert_raises(AssertionError, assert_meg_snr,
evoked_sss, evoked_move_all, 0., 0.)
assert_meg_snr(evoked_sss, evoked_move_non, 0.02, 2.6)
assert_meg_snr(evoked_sss, evoked_stat_all, 0.05, 3.2)
# these should be close to numerical precision
assert_allclose(evoked_sss_stat.data, evoked_stat_all.data, atol=1e-20)
# pos[0] > epochs.events[0] uses dev_head_t, so make it equivalent
destination = deepcopy(epochs.info['dev_head_t'])
x = head_pos_to_trans_rot_t(head_pos[1])
epochs.info['dev_head_t']['trans'][:3, :3] = x[1]
epochs.info['dev_head_t']['trans'][:3, 3] = x[0]
assert_raises(AssertionError, assert_allclose,
epochs.info['dev_head_t']['trans'],
destination['trans'])
evoked_miss = average_movements(epochs, head_pos=head_pos[2:],
origin=origin, destination=destination)
assert_allclose(evoked_miss.data, evoked_move_all.data,
atol=1e-20)
assert_allclose(evoked_miss.info['dev_head_t']['trans'],
destination['trans'])
# degenerate cases
destination['to'] = destination['from'] # bad dest
assert_raises(RuntimeError, average_movements, epochs, head_pos,
origin=origin, destination=destination)
assert_raises(TypeError, average_movements, 'foo', head_pos=head_pos)
assert_raises(RuntimeError, average_movements, epochs_proj,
head_pos=head_pos) # prj
def test_reject():
"""Test epochs rejection
"""
raw, events, picks = _get_data()
# cull the list just to contain the relevant event
events = events[events[:, 2] == event_id, :]
selection = np.arange(3)
drop_log = [[]] * 3 + [['MEG 2443']] * 4
assert_raises(TypeError, pick_types, raw)
picks_meg = pick_types(raw.info, meg=True, eeg=False)
assert_raises(TypeError, Epochs, raw, events, event_id, tmin, tmax,
picks=picks, preload=False, reject='foo')
assert_raises(ValueError, Epochs, raw, events, event_id, tmin, tmax,
picks=picks_meg, preload=False, reject=dict(eeg=1.))
# this one is okay because it's not actually requesting rejection
Epochs(raw, events, event_id, tmin, tmax, picks=picks_meg,
preload=False, reject=dict(eeg=np.inf))
for val in (None, -1): # protect against older MNE-C types
for kwarg in ('reject', 'flat'):
assert_raises(ValueError, Epochs, raw, events, event_id,
tmin, tmax, picks=picks_meg, preload=False,
**{kwarg: dict(grad=val)})
assert_raises(KeyError, Epochs, raw, events, event_id, tmin, tmax,
picks=picks, preload=False, reject=dict(foo=1.))
data_7 = dict()
keep_idx = [0, 1, 2]
for preload in (True, False):
for proj in (True, False, 'delayed'):
# no rejection
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=preload)
assert_raises(ValueError, epochs.drop_bad, reject='foo')
epochs.drop_bad()
assert_equal(len(epochs), len(events))
assert_array_equal(epochs.selection, np.arange(len(events)))
assert_array_equal(epochs.drop_log, [[]] * 7)
if proj not in data_7:
data_7[proj] = epochs.get_data()
assert_array_equal(epochs.get_data(), data_7[proj])
# with rejection
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
reject=reject, preload=preload)
epochs.drop_bad()
assert_equal(len(epochs), len(events) - 4)
assert_array_equal(epochs.selection, selection)
assert_array_equal(epochs.drop_log, drop_log)
assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
# rejection post-hoc
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=preload)
epochs.drop_bad()
assert_equal(len(epochs), len(events))
assert_array_equal(epochs.get_data(), data_7[proj])
epochs.drop_bad(reject)
assert_equal(len(epochs), len(events) - 4)
assert_equal(len(epochs), len(epochs.get_data()))
assert_array_equal(epochs.selection, selection)
assert_array_equal(epochs.drop_log, drop_log)
assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
# rejection twice
reject_part = dict(grad=1100e-12, mag=4e-12, eeg=80e-6, eog=150e-6)
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
reject=reject_part, preload=preload)
epochs.drop_bad()
assert_equal(len(epochs), len(events) - 1)
epochs.drop_bad(reject)
assert_equal(len(epochs), len(events) - 4)
assert_array_equal(epochs.selection, selection)
assert_array_equal(epochs.drop_log, drop_log)
assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
# ensure that thresholds must become more stringent, not less
assert_raises(ValueError, epochs.drop_bad, reject_part)
assert_equal(len(epochs), len(events) - 4)
assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
epochs.drop_bad(flat=dict(mag=1.))
assert_equal(len(epochs), 0)
assert_raises(ValueError, epochs.drop_bad,
flat=dict(mag=0.))
# rejection of subset of trials (ensure array ownership)
reject_part = dict(grad=1100e-12, mag=4e-12, eeg=80e-6, eog=150e-6)
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
reject=None, preload=preload)
epochs = epochs[:-1]
epochs.drop_bad(reject=reject)
assert_equal(len(epochs), len(events) - 4)
assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
# rejection on annotations
raw.annotations = Annotations([events[0][0] / raw.info['sfreq']],
[1], ['BAD'])
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=[0],
reject=None, preload=preload)
epochs.drop_bad()
assert_equal(len(events) - 1, len(epochs.events))
assert_equal(epochs.drop_log[0][0], 'BAD')
raw.annotations = None
def test_decim():
"""Test epochs decimation
"""
# First with EpochsArray
dec_1, dec_2 = 2, 3
decim = dec_1 * dec_2
n_epochs, n_channels, n_times = 5, 10, 20
sfreq = 1000.
sfreq_new = sfreq / decim
data = rng.randn(n_epochs, n_channels, n_times)
events = np.array([np.arange(n_epochs), [0] * n_epochs, [1] * n_epochs]).T
info = create_info(n_channels, sfreq, 'eeg')
info['lowpass'] = sfreq_new / float(decim)
epochs = EpochsArray(data, info, events)
data_epochs = epochs.copy().decimate(decim).get_data()
data_epochs_2 = epochs.copy().decimate(decim, offset=1).get_data()
data_epochs_3 = epochs.decimate(dec_1).decimate(dec_2).get_data()
assert_array_equal(data_epochs, data[:, :, ::decim])
assert_array_equal(data_epochs_2, data[:, :, 1::decim])
assert_array_equal(data_epochs, data_epochs_3)
# Now let's do it with some real data
raw, events, picks = _get_data()
events = events[events[:, 2] == 1][:2]
raw.load_data().pick_channels([raw.ch_names[pick] for pick in picks[::30]])
raw.info.normalize_proj()
del picks
sfreq_new = raw.info['sfreq'] / decim
raw.info['lowpass'] = sfreq_new / 12. # suppress aliasing warnings
assert_raises(ValueError, epochs.decimate, -1)
assert_raises(ValueError, epochs.decimate, 2, offset=-1)
assert_raises(ValueError, epochs.decimate, 2, offset=2)
for this_offset in range(decim):
epochs = Epochs(raw, events, event_id,
tmin=-this_offset / raw.info['sfreq'],
tmax=tmax, preload=False)
idx_offsets = np.arange(decim) + this_offset
for offset, idx_offset in zip(np.arange(decim), idx_offsets):
expected_times = epochs.times[idx_offset::decim]
expected_data = epochs.get_data()[:, :, idx_offset::decim]
must_have = offset / float(epochs.info['sfreq'])
assert_true(np.isclose(must_have, expected_times).any())
ep_decim = epochs.copy().decimate(decim, offset)
assert_true(np.isclose(must_have, ep_decim.times).any())
assert_allclose(ep_decim.times, expected_times)
assert_allclose(ep_decim.get_data(), expected_data)
assert_equal(ep_decim.info['sfreq'], sfreq_new)
# More complex cases
epochs = Epochs(raw, events, event_id, tmin, tmax, preload=False)
expected_data = epochs.get_data()[:, :, ::decim]
expected_times = epochs.times[::decim]
for preload in (True, False):
# at init
epochs = Epochs(raw, events, event_id, tmin, tmax, decim=decim,
preload=preload)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
# split between init and afterward
epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_1,
preload=preload).decimate(dec_2)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_2,
preload=preload).decimate(dec_1)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
# split between init and afterward, with preload in between
epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_1,
preload=preload)
epochs.load_data()
epochs = epochs.decimate(dec_2)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_2,
preload=preload)
epochs.load_data()
epochs = epochs.decimate(dec_1)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
# decimate afterward
epochs = Epochs(raw, events, event_id, tmin, tmax,
preload=preload).decimate(decim)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
# decimate afterward, with preload in between
epochs = Epochs(raw, events, event_id, tmin, tmax,
preload=preload)
epochs.load_data()
epochs.decimate(decim)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
def test_base_epochs():
"""Test base epochs class
"""
raw = _get_data()[0]
epochs = _BaseEpochs(raw.info, None, np.ones((1, 3), int),
event_id, tmin, tmax)
assert_raises(NotImplementedError, epochs.get_data)
# events with non integers
assert_raises(ValueError, _BaseEpochs, raw.info, None,
np.ones((1, 3), float), event_id, tmin, tmax)
assert_raises(ValueError, _BaseEpochs, raw.info, None,
np.ones((1, 3, 2), int), event_id, tmin, tmax)
@requires_version('scipy', '0.14')
def test_savgol_filter():
"""Test savgol filtering
"""
h_freq = 10.
raw, events = _get_data()[:2]
epochs = Epochs(raw, events, event_id, tmin, tmax)
assert_raises(RuntimeError, epochs.savgol_filter, 10.)
epochs = Epochs(raw, events, event_id, tmin, tmax, preload=True)
freqs = fftpack.fftfreq(len(epochs.times), 1. / epochs.info['sfreq'])
data = np.abs(fftpack.fft(epochs.get_data()))
match_mask = np.logical_and(freqs >= 0, freqs <= h_freq / 2.)
mismatch_mask = np.logical_and(freqs >= h_freq * 2, freqs < 50.)
epochs.savgol_filter(h_freq)
data_filt = np.abs(fftpack.fft(epochs.get_data()))
# decent in pass-band
assert_allclose(np.mean(data[:, :, match_mask], 0),
np.mean(data_filt[:, :, match_mask], 0),
rtol=1e-4, atol=1e-2)
# suppression in stop-band
assert_true(np.mean(data[:, :, mismatch_mask]) >
np.mean(data_filt[:, :, mismatch_mask]) * 5)
def test_epochs_hash():
"""Test epoch hashing
"""
raw, events = _get_data()[:2]
epochs = Epochs(raw, events, event_id, tmin, tmax)
assert_raises(RuntimeError, epochs.__hash__)
epochs = Epochs(raw, events, event_id, tmin, tmax, preload=True)
assert_equal(hash(epochs), hash(epochs))
epochs_2 = Epochs(raw, events, event_id, tmin, tmax, preload=True)
assert_equal(hash(epochs), hash(epochs_2))
# do NOT use assert_equal here, failing output is terrible
assert_true(pickle.dumps(epochs) == pickle.dumps(epochs_2))
epochs_2._data[0, 0, 0] -= 1
assert_not_equal(hash(epochs), hash(epochs_2))
def test_event_ordering():
"""Test event order"""
raw, events = _get_data()[:2]
events2 = events.copy()
rng.shuffle(events2)
for ii, eve in enumerate([events, events2]):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
Epochs(raw, eve, event_id, tmin, tmax,
baseline=(None, 0), reject=reject, flat=flat)
assert_equal(len(w), ii)
if ii > 0:
assert_true('chronologically' in '%s' % w[-1].message)
def test_epochs_bad_baseline():
"""Test Epochs initialization with bad baseline parameters
"""
raw, events = _get_data()[:2]
assert_raises(ValueError, Epochs, raw, events, None, -0.1, 0.3, (-0.2, 0))
assert_raises(ValueError, Epochs, raw, events, None, -0.1, 0.3, (0, 0.4))
assert_raises(ValueError, Epochs, raw, events, None, -0.1, 0.3, (0.1, 0))
assert_raises(ValueError, Epochs, raw, events, None, 0.1, 0.3, (None, 0))
assert_raises(ValueError, Epochs, raw, events, None, -0.3, -0.1, (0, None))
epochs = Epochs(raw, events, None, 0.1, 0.3, baseline=None)
assert_raises(RuntimeError, epochs.apply_baseline, (0.1, 0.2))
epochs.load_data()
assert_raises(ValueError, epochs.apply_baseline, (None, 0))
assert_raises(ValueError, epochs.apply_baseline, (0, None))
# put some rescale options here, too
data = np.arange(100, dtype=float)
assert_raises(ValueError, rescale, data, times=data, baseline=(-2, -1))
rescale(data.copy(), times=data, baseline=(2, 2)) # ok
assert_raises(ValueError, rescale, data, times=data, baseline=(2, 1))
assert_raises(ValueError, rescale, data, times=data, baseline=(100, 101))
def test_epoch_combine_ids():
"""Test combining event ids in epochs compared to events
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3,
'd': 4, 'e': 5, 'f': 32},
tmin, tmax, picks=picks, preload=False)
events_new = merge_events(events, [1, 2], 12)
epochs_new = combine_event_ids(epochs, ['a', 'b'], {'ab': 12})
assert_equal(epochs_new['ab'].name, 'ab')
assert_array_equal(events_new, epochs_new.events)
# should probably add test + functionality for non-replacement XXX
def test_epoch_multi_ids():
"""Test epoch selection via multiple/partial keys
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, {'a/b/a': 1, 'a/b/b': 2, 'a/c': 3,
'b/d': 4, 'a_b': 5},
tmin, tmax, picks=picks, preload=False)
epochs_regular = epochs[['a', 'b']]
epochs_multi = epochs[['a/b/a', 'a/b/b']]
assert_array_equal(epochs_regular.events, epochs_multi.events)
def test_read_epochs_bad_events():
"""Test epochs when events are at the beginning or the end of the file
"""
raw, events, picks = _get_data()
# Event at the beginning
epochs = Epochs(raw, np.array([[raw.first_samp, 0, event_id]]),
event_id, tmin, tmax, picks=picks, baseline=(None, 0))
with warnings.catch_warnings(record=True):
evoked = epochs.average()
epochs = Epochs(raw, np.array([[raw.first_samp, 0, event_id]]),
event_id, tmin, tmax, picks=picks, baseline=(None, 0))
assert_true(repr(epochs)) # test repr
epochs.drop_bad()
assert_true(repr(epochs))
with warnings.catch_warnings(record=True):
evoked = epochs.average()
# Event at the end
epochs = Epochs(raw, np.array([[raw.last_samp, 0, event_id]]),
event_id, tmin, tmax, picks=picks, baseline=(None, 0))
with warnings.catch_warnings(record=True):
evoked = epochs.average()
assert evoked
warnings.resetwarnings()
@slow_test
def test_read_write_epochs():
"""Test epochs from raw files with IO as fif file
"""
raw, events, picks = _get_data(preload=True)
tempdir = _TempDir()
temp_fname = op.join(tempdir, 'test-epo.fif')
temp_fname_no_bl = op.join(tempdir, 'test_no_bl-epo.fif')
baseline = (None, 0)
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=baseline, preload=True)
epochs_orig = epochs.copy()
epochs_no_bl = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=None, preload=True)
assert_true(epochs_no_bl.baseline is None)
evoked = epochs.average()
data = epochs.get_data()
# Bad tmin/tmax parameters
assert_raises(ValueError, Epochs, raw, events, event_id, tmax, tmin,
baseline=None)
epochs_no_id = Epochs(raw, pick_events(events, include=event_id),
None, tmin, tmax, picks=picks,
baseline=(None, 0))
assert_array_equal(data, epochs_no_id.get_data())
eog_picks = pick_types(raw.info, meg=False, eeg=False, stim=False,
eog=True, exclude='bads')
eog_ch_names = [raw.ch_names[k] for k in eog_picks]
epochs.drop_channels(eog_ch_names)
assert_true(len(epochs.info['chs']) == len(epochs.ch_names) ==
epochs.get_data().shape[1])
data_no_eog = epochs.get_data()
assert_true(data.shape[1] == (data_no_eog.shape[1] + len(eog_picks)))
# test decim kwarg
with warnings.catch_warnings(record=True) as w:
# decim with lowpass
warnings.simplefilter('always')
epochs_dec = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), decim=2)
assert_equal(len(w), 1)
# decim without lowpass
epochs_dec.info['lowpass'] = None
epochs_dec.decimate(2)
assert_equal(len(w), 2)
data_dec = epochs_dec.get_data()
assert_allclose(data[:, :, epochs_dec._decim_slice], data_dec, rtol=1e-7,
atol=1e-12)
evoked_dec = epochs_dec.average()
assert_allclose(evoked.data[:, epochs_dec._decim_slice],
evoked_dec.data, rtol=1e-12, atol=1e-17)
n = evoked.data.shape[1]
n_dec = evoked_dec.data.shape[1]
n_dec_min = n // 4
assert_true(n_dec_min <= n_dec <= n_dec_min + 1)
assert_true(evoked_dec.info['sfreq'] == evoked.info['sfreq'] / 4)
# Test event access on non-preloaded data (#2345)
# due to reapplication of the proj matrix, this is our quality limit
# for some tests
tols = dict(atol=1e-3, rtol=1e-20)
raw, events, picks = _get_data()
events[::2, 1] = 1
events[1::2, 2] = 2
event_ids = dict(a=1, b=2)
for proj in (True, 'delayed', False):
epochs = Epochs(raw, events, event_ids, tmin, tmax, picks=picks,
baseline=(None, 0), proj=proj, reject=reject,
add_eeg_ref=True)
assert_equal(epochs.proj, proj if proj != 'delayed' else False)
data1 = epochs.get_data()
epochs2 = epochs.copy().apply_proj()
assert_equal(epochs2.proj, True)
data2 = epochs2.get_data()
assert_allclose(data1, data2, **tols)
epochs.save(temp_fname)
epochs_read = read_epochs(temp_fname, preload=False)
assert_allclose(epochs.get_data(), epochs_read.get_data(), **tols)
assert_allclose(epochs['a'].get_data(),
epochs_read['a'].get_data(), **tols)
assert_allclose(epochs['b'].get_data(),
epochs_read['b'].get_data(), **tols)
# ensure we don't leak file descriptors
epochs_read = read_epochs(temp_fname, preload=False)
epochs_copy = epochs_read.copy()
del epochs_read
epochs_copy.get_data()
with warnings.catch_warnings(record=True) as w:
del epochs_copy
assert_equal(len(w), 0)
# test IO
for preload in (False, True):
epochs = epochs_orig.copy()
epochs.save(temp_fname)
epochs_no_bl.save(temp_fname_no_bl)
epochs_read = read_epochs(temp_fname, preload=preload)
epochs_no_bl.save(temp_fname_no_bl)
epochs_read = read_epochs(temp_fname)
epochs_no_bl_read = read_epochs(temp_fname_no_bl)
assert_raises(ValueError, epochs.apply_baseline, baseline=[1, 2, 3])
epochs_with_bl = epochs_no_bl_read.copy().apply_baseline(baseline)
assert_true(isinstance(epochs_with_bl, _BaseEpochs))
assert_true(epochs_with_bl.baseline == baseline)
assert_true(epochs_no_bl_read.baseline != baseline)
assert_true(str(epochs_read).startswith('<Epochs'))
epochs_no_bl_read.apply_baseline(baseline)
assert_array_equal(epochs_no_bl_read.times, epochs.times)
assert_array_almost_equal(epochs_read.get_data(), epochs.get_data())
assert_array_almost_equal(epochs.get_data(),
epochs_no_bl_read.get_data())
assert_array_equal(epochs_read.times, epochs.times)
assert_array_almost_equal(epochs_read.average().data, evoked.data)
assert_equal(epochs_read.proj, epochs.proj)
bmin, bmax = epochs.baseline
if bmin is None:
bmin = epochs.times[0]
if bmax is None:
bmax = epochs.times[-1]
baseline = (bmin, bmax)
assert_array_almost_equal(epochs_read.baseline, baseline)
assert_array_almost_equal(epochs_read.tmin, epochs.tmin, 2)
assert_array_almost_equal(epochs_read.tmax, epochs.tmax, 2)
assert_equal(epochs_read.event_id, epochs.event_id)
epochs.event_id.pop('1')
epochs.event_id.update({'a:a': 1}) # test allow for ':' in key
epochs.save(op.join(tempdir, 'foo-epo.fif'))
epochs_read2 = read_epochs(op.join(tempdir, 'foo-epo.fif'),
preload=preload)
assert_equal(epochs_read2.event_id, epochs.event_id)
# add reject here so some of the epochs get dropped
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
epochs.save(temp_fname)
# ensure bad events are not saved
epochs_read3 = read_epochs(temp_fname, preload=preload)
assert_array_equal(epochs_read3.events, epochs.events)
data = epochs.get_data()
assert_true(epochs_read3.events.shape[0] == data.shape[0])
# test copying loaded one (raw property)
epochs_read4 = epochs_read3.copy()
assert_array_almost_equal(epochs_read4.get_data(), data)
# test equalizing loaded one (drop_log property)
epochs_read4.equalize_event_counts(epochs.event_id, copy=False)
epochs.drop([1, 2], reason='can we recover orig ID?')
epochs.save(temp_fname)
epochs_read5 = read_epochs(temp_fname, preload=preload)
assert_array_equal(epochs_read5.selection, epochs.selection)
assert_equal(len(epochs_read5.selection), len(epochs_read5.events))
assert_array_equal(epochs_read5.drop_log, epochs.drop_log)
if preload:
# Test that one can drop channels on read file
epochs_read5.drop_channels(epochs_read5.ch_names[:1])
# test warnings on bad filenames
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
epochs_badname = op.join(tempdir, 'test-bad-name.fif.gz')
epochs.save(epochs_badname)
read_epochs(epochs_badname, preload=preload)
assert_naming(w, 'test_epochs.py', 2)
# test loading epochs with missing events
epochs = Epochs(raw, events, dict(foo=1, bar=999), tmin, tmax,
picks=picks, on_missing='ignore')
epochs.save(temp_fname)
epochs_read = read_epochs(temp_fname, preload=preload)
assert_allclose(epochs.get_data(), epochs_read.get_data(), **tols)
assert_array_equal(epochs.events, epochs_read.events)
assert_equal(set(epochs.event_id.keys()),
set(text_type(x) for x in epochs_read.event_id.keys()))
# test saving split epoch files
epochs.save(temp_fname, split_size='7MB')
epochs_read = read_epochs(temp_fname, preload=preload)
assert_allclose(epochs.get_data(), epochs_read.get_data(), **tols)
assert_array_equal(epochs.events, epochs_read.events)
assert_array_equal(epochs.selection, epochs_read.selection)
assert_equal(epochs.drop_log, epochs_read.drop_log)
# Test that having a single time point works
epochs.load_data().crop(0, 0)
assert_equal(len(epochs.times), 1)
assert_equal(epochs.get_data().shape[-1], 1)
epochs.save(temp_fname)
epochs_read = read_epochs(temp_fname, preload=preload)
assert_equal(len(epochs_read.times), 1)
assert_equal(epochs.get_data().shape[-1], 1)
def test_epochs_proj():
"""Test handling projection (apply proj in Raw or in Epochs)
"""
tempdir = _TempDir()
raw, events, picks = _get_data()
exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more
this_picks = pick_types(raw.info, meg=True, eeg=False, stim=True,
eog=True, exclude=exclude)
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
baseline=(None, 0), proj=True)
assert_true(all(p['active'] is True for p in epochs.info['projs']))
evoked = epochs.average()
assert_true(all(p['active'] is True for p in evoked.info['projs']))
data = epochs.get_data()
raw_proj = Raw(raw_fname, proj=True)
epochs_no_proj = Epochs(raw_proj, events[:4], event_id, tmin, tmax,
picks=this_picks, baseline=(None, 0), proj=False)
data_no_proj = epochs_no_proj.get_data()
assert_true(all(p['active'] is True for p in epochs_no_proj.info['projs']))
evoked_no_proj = epochs_no_proj.average()
assert_true(all(p['active'] is True for p in evoked_no_proj.info['projs']))
assert_true(epochs_no_proj.proj is True) # as projs are active from Raw
assert_array_almost_equal(data, data_no_proj, decimal=8)
# make sure we can exclude avg ref
this_picks = pick_types(raw.info, meg=True, eeg=True, stim=True,
eog=True, exclude=exclude)
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
baseline=(None, 0), proj=True, add_eeg_ref=True)
assert_true(_has_eeg_average_ref_proj(epochs.info['projs']))
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
baseline=(None, 0), proj=True, add_eeg_ref=False)
assert_true(not _has_eeg_average_ref_proj(epochs.info['projs']))
# make sure we don't add avg ref when a custom ref has been applied
raw.info['custom_ref_applied'] = True
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
baseline=(None, 0), proj=True)
assert_true(not _has_eeg_average_ref_proj(epochs.info['projs']))
# From GH#2200:
# This has no problem
proj = raw.info['projs']
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
baseline=(None, 0), proj=False)
epochs.info['projs'] = []
data = epochs.copy().add_proj(proj).apply_proj().get_data()
# save and reload data
fname_epo = op.join(tempdir, 'temp-epo.fif')
epochs.save(fname_epo) # Save without proj added
epochs_read = read_epochs(fname_epo)
epochs_read.add_proj(proj)
epochs_read.apply_proj() # This used to bomb
data_2 = epochs_read.get_data() # Let's check the result
assert_allclose(data, data_2, atol=1e-15, rtol=1e-3)
# adding EEG ref (GH #2727)
raw = Raw(raw_fname)
raw.add_proj([], remove_existing=True)
raw.info['bads'] = ['MEG 2443', 'EEG 053']
picks = pick_types(raw.info, meg=False, eeg=True, stim=True, eog=False,
exclude='bads')
epochs = Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
baseline=(None, 0), preload=True, add_eeg_ref=False)
epochs.pick_channels(['EEG 001', 'EEG 002'])
assert_equal(len(epochs), 7) # sufficient for testing
temp_fname = op.join(tempdir, 'test-epo.fif')
epochs.save(temp_fname)
for preload in (True, False):
epochs = read_epochs(temp_fname, add_eeg_ref=True, proj=True,
preload=preload)
assert_allclose(epochs.get_data().mean(axis=1), 0, atol=1e-15)
epochs = read_epochs(temp_fname, add_eeg_ref=True, proj=False,
preload=preload)
assert_raises(AssertionError, assert_allclose,
epochs.get_data().mean(axis=1), 0., atol=1e-15)
epochs.add_eeg_average_proj()
assert_raises(AssertionError, assert_allclose,
epochs.get_data().mean(axis=1), 0., atol=1e-15)
epochs.apply_proj()
assert_allclose(epochs.get_data().mean(axis=1), 0, atol=1e-15)
def test_evoked_arithmetic():
"""Test arithmetic of evoked data
"""
raw, events, picks = _get_data()
epochs1 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
evoked1 = epochs1.average()
epochs2 = Epochs(raw, events[4:8], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
evoked2 = epochs2.average()
epochs = Epochs(raw, events[:8], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
evoked = epochs.average()
evoked_sum = combine_evoked([evoked1, evoked2], weights='nave')
assert_array_equal(evoked.data, evoked_sum.data)
assert_array_equal(evoked.times, evoked_sum.times)
assert_equal(evoked_sum.nave, evoked1.nave + evoked2.nave)
evoked_diff = combine_evoked([evoked1, evoked1], weights=[1, -1])
assert_array_equal(np.zeros_like(evoked.data), evoked_diff.data)
def test_evoked_io_from_epochs():
"""Test IO of evoked data made from epochs
"""
tempdir = _TempDir()
raw, events, picks = _get_data()
# offset our tmin so we don't get exactly a zero value when decimating
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
epochs = Epochs(raw, events[:4], event_id, tmin + 0.011, tmax,
picks=picks, baseline=(None, 0), decim=5)
assert_true(len(w) == 1)
evoked = epochs.average()
evoked.info['proj_name'] = '' # Test that empty string shortcuts to None.
evoked.save(op.join(tempdir, 'evoked-ave.fif'))
evoked2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0]
assert_equal(evoked2.info['proj_name'], None)
assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20)
assert_allclose(evoked.times, evoked2.times, rtol=1e-4,
atol=1 / evoked.info['sfreq'])
# now let's do one with negative time
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
epochs = Epochs(raw, events[:4], event_id, 0.1, tmax,
picks=picks, baseline=(0.1, 0.2), decim=5)
evoked = epochs.average()
evoked.save(op.join(tempdir, 'evoked-ave.fif'))
evoked2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0]
assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20)
assert_allclose(evoked.times, evoked2.times, rtol=1e-4, atol=1e-20)
# should be equivalent to a cropped original
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
epochs = Epochs(raw, events[:4], event_id, -0.2, tmax,
picks=picks, baseline=(0.1, 0.2), decim=5)
evoked = epochs.average()
evoked.crop(0.099, None)
assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20)
assert_allclose(evoked.times, evoked2.times, rtol=1e-4, atol=1e-20)
def test_evoked_standard_error():
"""Test calculation and read/write of standard error
"""
raw, events, picks = _get_data()
tempdir = _TempDir()
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
evoked = [epochs.average(), epochs.standard_error()]
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), evoked)
evoked2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), [0, 1])
evoked3 = [read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 'Unknown'),
read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 'Unknown',
kind='standard_error')]
for evoked_new in [evoked2, evoked3]:
assert_true(evoked_new[0]._aspect_kind ==
FIFF.FIFFV_ASPECT_AVERAGE)
assert_true(evoked_new[0].kind == 'average')
assert_true(evoked_new[1]._aspect_kind ==
FIFF.FIFFV_ASPECT_STD_ERR)
assert_true(evoked_new[1].kind == 'standard_error')
for ave, ave2 in zip(evoked, evoked_new):
assert_array_almost_equal(ave.data, ave2.data)
assert_array_almost_equal(ave.times, ave2.times)
assert_equal(ave.nave, ave2.nave)
assert_equal(ave._aspect_kind, ave2._aspect_kind)
assert_equal(ave.kind, ave2.kind)
assert_equal(ave.last, ave2.last)
assert_equal(ave.first, ave2.first)
def test_reject_epochs():
"""Test of epochs rejection
"""
raw, events, picks = _get_data()
events1 = events[events[:, 2] == event_id]
epochs = Epochs(raw, events1,
event_id, tmin, tmax, baseline=(None, 0),
reject=reject, flat=flat)
assert_raises(RuntimeError, len, epochs)
n_events = len(epochs.events)
data = epochs.get_data()
n_clean_epochs = len(data)
# Should match
# mne_process_raw --raw test_raw.fif --projoff \
# --saveavetag -ave --ave test.ave --filteroff
assert_true(n_events > n_clean_epochs)
assert_true(n_clean_epochs == 3)
assert_true(epochs.drop_log == [[], [], [], ['MEG 2443'], ['MEG 2443'],
['MEG 2443'], ['MEG 2443']])
# Ensure epochs are not dropped based on a bad channel
raw_2 = raw.copy()
raw_2.info['bads'] = ['MEG 2443']
reject_crazy = dict(grad=1000e-15, mag=4e-15, eeg=80e-9, eog=150e-9)
epochs = Epochs(raw_2, events1, event_id, tmin, tmax, baseline=(None, 0),
reject=reject_crazy, flat=flat)
epochs.drop_bad()
assert_true(all('MEG 2442' in e for e in epochs.drop_log))
assert_true(all('MEG 2443' not in e for e in epochs.drop_log))
# Invalid reject_tmin/reject_tmax/detrend
assert_raises(ValueError, Epochs, raw, events1, event_id, tmin, tmax,
reject_tmin=1., reject_tmax=0)
assert_raises(ValueError, Epochs, raw, events1, event_id, tmin, tmax,
reject_tmin=tmin - 1, reject_tmax=1.)
assert_raises(ValueError, Epochs, raw, events1, event_id, tmin, tmax,
reject_tmin=0., reject_tmax=tmax + 1)
epochs = Epochs(raw, events1, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject, flat=flat,
reject_tmin=0., reject_tmax=.1)
data = epochs.get_data()
n_clean_epochs = len(data)
assert_true(n_clean_epochs == 7)
assert_true(len(epochs) == 7)
assert_true(epochs.times[epochs._reject_time][0] >= 0.)
assert_true(epochs.times[epochs._reject_time][-1] <= 0.1)
# Invalid data for _is_good_epoch function
epochs = Epochs(raw, events1, event_id, tmin, tmax, reject=None, flat=None)
assert_equal(epochs._is_good_epoch(None), (False, ['NO_DATA']))
assert_equal(epochs._is_good_epoch(np.zeros((1, 1))),
(False, ['TOO_SHORT']))
data = epochs[0].get_data()[0]
assert_equal(epochs._is_good_epoch(data), (True, None))
def test_preload_epochs():
"""Test preload of epochs
"""
raw, events, picks = _get_data()
epochs_preload = Epochs(raw, events[:16], event_id, tmin, tmax,
picks=picks, baseline=(None, 0), preload=True,
reject=reject, flat=flat)
data_preload = epochs_preload.get_data()
epochs = Epochs(raw, events[:16], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=False,
reject=reject, flat=flat)
data = epochs.get_data()
assert_array_equal(data_preload, data)
assert_array_almost_equal(epochs_preload.average().data,
epochs.average().data, 18)
def test_indexing_slicing():
"""Test of indexing and slicing operations
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:20], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=False,
reject=reject, flat=flat)
data_normal = epochs.get_data()
n_good_events = data_normal.shape[0]
# indices for slicing
start_index = 1
end_index = n_good_events - 1
assert((end_index - start_index) > 0)
for preload in [True, False]:
epochs2 = Epochs(raw, events[:20], event_id, tmin, tmax,
picks=picks, baseline=(None, 0), preload=preload,
reject=reject, flat=flat)
if not preload:
epochs2.drop_bad()
# using slicing
epochs2_sliced = epochs2[start_index:end_index]
data_epochs2_sliced = epochs2_sliced.get_data()
assert_array_equal(data_epochs2_sliced,
data_normal[start_index:end_index])
# using indexing
pos = 0
for idx in range(start_index, end_index):
data = epochs2_sliced[pos].get_data()
assert_array_equal(data[0], data_normal[idx])
pos += 1
# using indexing with an int
data = epochs2[data_epochs2_sliced.shape[0]].get_data()
assert_array_equal(data, data_normal[[idx]])
# using indexing with an array
idx = rng.randint(0, data_epochs2_sliced.shape[0], 10)
data = epochs2[idx].get_data()
assert_array_equal(data, data_normal[idx])
# using indexing with a list of indices
idx = [0]
data = epochs2[idx].get_data()
assert_array_equal(data, data_normal[idx])
idx = [0, 1]
data = epochs2[idx].get_data()
assert_array_equal(data, data_normal[idx])
def test_comparision_with_c():
"""Test of average obtained vs C code
"""
raw, events = _get_data()[:2]
c_evoked = read_evokeds(evoked_nf_name, condition=0)
epochs = Epochs(raw, events, event_id, tmin, tmax,
baseline=None, preload=True,
reject=None, flat=None)
evoked = epochs.average()
sel = pick_channels(c_evoked.ch_names, evoked.ch_names)
evoked_data = evoked.data
c_evoked_data = c_evoked.data[sel]
assert_true(evoked.nave == c_evoked.nave)
assert_array_almost_equal(evoked_data, c_evoked_data, 10)
assert_array_almost_equal(evoked.times, c_evoked.times, 12)
def test_crop():
"""Test of crop of epochs
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=False,
reject=reject, flat=flat)
assert_raises(RuntimeError, epochs.crop, None, 0.2) # not preloaded
data_normal = epochs.get_data()
epochs2 = Epochs(raw, events[:5], event_id, tmin, tmax,
picks=picks, baseline=(None, 0), preload=True,
reject=reject, flat=flat)
with warnings.catch_warnings(record=True) as w:
epochs2.crop(-20, 200)
assert_true(len(w) == 2)
# indices for slicing
tmin_window = tmin + 0.1
tmax_window = tmax - 0.1
tmask = (epochs.times >= tmin_window) & (epochs.times <= tmax_window)
assert_true(tmin_window > tmin)
assert_true(tmax_window < tmax)
epochs3 = epochs2.copy().crop(tmin_window, tmax_window)
data3 = epochs3.get_data()
epochs2.crop(tmin_window, tmax_window)
data2 = epochs2.get_data()
assert_array_equal(data2, data_normal[:, :, tmask])
assert_array_equal(data3, data_normal[:, :, tmask])
assert_array_equal(epochs.time_as_index([tmin, tmax], use_rounding=True),
[0, len(epochs.times) - 1])
assert_array_equal(epochs3.time_as_index([tmin_window, tmax_window],
use_rounding=True),
[0, len(epochs3.times) - 1])
# test time info is correct
epochs = EpochsArray(np.zeros((1, 1, 1000)), create_info(1, 1000., 'eeg'),
np.ones((1, 3), int), tmin=-0.2)
epochs.crop(-.200, .700)
last_time = epochs.times[-1]
with warnings.catch_warnings(record=True): # not LP filtered
epochs.decimate(10)
assert_allclose(last_time, epochs.times[-1])
epochs = Epochs(raw, events[:5], event_id, -1, 1,
picks=picks, baseline=(None, 0), preload=True,
reject=reject, flat=flat)
# We include nearest sample, so actually a bit beyound our bounds here
assert_allclose(epochs.tmin, -1.0006410259015925, rtol=1e-12)
assert_allclose(epochs.tmax, 1.0006410259015925, rtol=1e-12)
epochs_crop = epochs.copy().crop(-1, 1)
assert_allclose(epochs.times, epochs_crop.times, rtol=1e-12)
# Ensure we don't allow silly crops
with warnings.catch_warnings(record=True): # tmin/tmax out of bounds
assert_raises(ValueError, epochs.crop, 1000, 2000)
assert_raises(ValueError, epochs.crop, 0.1, 0)
def test_resample():
"""Test of resample of epochs
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=False,
reject=reject, flat=flat)
assert_raises(RuntimeError, epochs.resample, 100)
epochs_o = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True,
reject=reject, flat=flat)
epochs = epochs_o.copy()
data_normal = cp.deepcopy(epochs.get_data())
times_normal = cp.deepcopy(epochs.times)
sfreq_normal = epochs.info['sfreq']
# upsample by 2
epochs = epochs_o.copy()
epochs.resample(sfreq_normal * 2, npad=0)
data_up = cp.deepcopy(epochs.get_data())
times_up = cp.deepcopy(epochs.times)
sfreq_up = epochs.info['sfreq']
# downsamply by 2, which should match
epochs.resample(sfreq_normal, npad=0)
data_new = cp.deepcopy(epochs.get_data())
times_new = cp.deepcopy(epochs.times)
sfreq_new = epochs.info['sfreq']
assert_true(data_up.shape[2] == 2 * data_normal.shape[2])
assert_true(sfreq_up == 2 * sfreq_normal)
assert_true(sfreq_new == sfreq_normal)
assert_true(len(times_up) == 2 * len(times_normal))
assert_array_almost_equal(times_new, times_normal, 10)
assert_true(data_up.shape[2] == 2 * data_normal.shape[2])
assert_array_almost_equal(data_new, data_normal, 5)
# use parallel
epochs = epochs_o.copy()
epochs.resample(sfreq_normal * 2, n_jobs=2, npad=0)
assert_true(np.allclose(data_up, epochs._data, rtol=1e-8, atol=1e-16))
# test copy flag
epochs = epochs_o.copy()
epochs_resampled = epochs.copy().resample(sfreq_normal * 2, npad=0)
assert_true(epochs_resampled is not epochs)
epochs_resampled = epochs.resample(sfreq_normal * 2, npad=0)
assert_true(epochs_resampled is epochs)
# test proper setting of times (#2645)
n_trial, n_chan, n_time, sfreq = 1, 1, 10, 1000.
data = np.zeros((n_trial, n_chan, n_time))
events = np.zeros((n_trial, 3), int)
info = create_info(n_chan, sfreq, 'eeg')
epochs1 = EpochsArray(data, deepcopy(info), events)
epochs2 = EpochsArray(data, deepcopy(info), events)
epochs = concatenate_epochs([epochs1, epochs2])
epochs1.resample(epochs1.info['sfreq'] // 2, npad='auto')
epochs2.resample(epochs2.info['sfreq'] // 2, npad='auto')
epochs = concatenate_epochs([epochs1, epochs2])
for e in epochs1, epochs2, epochs:
assert_equal(e.times[0], epochs.tmin)
assert_equal(e.times[-1], epochs.tmax)
# test that cropping after resampling works (#3296)
this_tmin = -0.002
epochs = EpochsArray(data, deepcopy(info), events, tmin=this_tmin)
for times in (epochs.times, epochs._raw_times):
assert_allclose(times, np.arange(n_time) / sfreq + this_tmin)
epochs.resample(info['sfreq'] * 2.)
for times in (epochs.times, epochs._raw_times):
assert_allclose(times, np.arange(2 * n_time) / (sfreq * 2) + this_tmin)
epochs.crop(0, None)
for times in (epochs.times, epochs._raw_times):
assert_allclose(times, np.arange((n_time - 2) * 2) / (sfreq * 2))
epochs.resample(sfreq)
for times in (epochs.times, epochs._raw_times):
assert_allclose(times, np.arange(n_time - 2) / sfreq)
def test_detrend():
"""Test detrending of epochs
"""
raw, events, picks = _get_data()
# test first-order
epochs_1 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=None, detrend=1)
epochs_2 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=None, detrend=None)
data_picks = pick_types(epochs_1.info, meg=True, eeg=True,
exclude='bads')
evoked_1 = epochs_1.average()
evoked_2 = epochs_2.average()
evoked_2.detrend(1)
# Due to roundoff these won't be exactly equal, but they should be close
assert_true(np.allclose(evoked_1.data, evoked_2.data,
rtol=1e-8, atol=1e-20))
# test zeroth-order case
for preload in [True, False]:
epochs_1 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, None), preload=preload)
epochs_2 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=None, preload=preload, detrend=0)
a = epochs_1.get_data()
b = epochs_2.get_data()
# All data channels should be almost equal
assert_true(np.allclose(a[:, data_picks, :], b[:, data_picks, :],
rtol=1e-16, atol=1e-20))
# There are non-M/EEG channels that should not be equal:
assert_true(not np.allclose(a, b))
assert_raises(ValueError, Epochs, raw, events[:4], event_id, tmin, tmax,
detrend=2)
def test_bootstrap():
"""Test of bootstrapping of epochs
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True,
reject=reject, flat=flat)
epochs2 = bootstrap(epochs, random_state=0)
assert_true(len(epochs2.events) == len(epochs.events))
assert_true(epochs._data.shape == epochs2._data.shape)
def test_epochs_copy():
"""Test copy epochs
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True,
reject=reject, flat=flat)
copied = epochs.copy()
assert_array_equal(epochs._data, copied._data)
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=False,
reject=reject, flat=flat)
copied = epochs.copy()
data = epochs.get_data()
copied_data = copied.get_data()
assert_array_equal(data, copied_data)
def test_iter_evoked():
"""Test the iterator for epochs -> evoked
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
for ii, ev in enumerate(epochs.iter_evoked()):
x = ev.data
y = epochs.get_data()[ii, :, :]
assert_array_equal(x, y)
def test_subtract_evoked():
"""Test subtraction of Evoked from Epochs
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
# make sure subraction fails if data channels are missing
assert_raises(ValueError, epochs.subtract_evoked,
epochs.average(picks[:5]))
# do the subraction using the default argument
epochs.subtract_evoked()
# apply SSP now
epochs.apply_proj()
# use preloading and SSP from the start
epochs2 = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True, proj=True)
evoked = epochs2.average()
epochs2.subtract_evoked(evoked)
# this gives the same result
assert_allclose(epochs.get_data(), epochs2.get_data())
# if we compute the evoked response after subtracting it we get zero
zero_evoked = epochs.average()
data = zero_evoked.data
assert_allclose(data, np.zeros_like(data), atol=1e-15)
def test_epoch_eq():
"""Test epoch count equalization and condition combining
"""
raw, events, picks = _get_data()
# equalizing epochs objects
epochs_1 = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
epochs_2 = Epochs(raw, events, event_id_2, tmin, tmax, picks=picks)
epochs_1.drop_bad() # make sure drops are logged
assert_true(len([l for l in epochs_1.drop_log if not l]) ==
len(epochs_1.events))
drop_log1 = epochs_1.drop_log = [[] for _ in range(len(epochs_1.events))]
drop_log2 = [[] if l == ['EQUALIZED_COUNT'] else l for l in
epochs_1.drop_log]
assert_true(drop_log1 == drop_log2)
assert_true(len([l for l in epochs_1.drop_log if not l]) ==
len(epochs_1.events))
assert_true(epochs_1.events.shape[0] != epochs_2.events.shape[0])
equalize_epoch_counts([epochs_1, epochs_2], method='mintime')
assert_true(epochs_1.events.shape[0] == epochs_2.events.shape[0])
epochs_3 = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
epochs_4 = Epochs(raw, events, event_id_2, tmin, tmax, picks=picks)
equalize_epoch_counts([epochs_3, epochs_4], method='truncate')
assert_true(epochs_1.events.shape[0] == epochs_3.events.shape[0])
assert_true(epochs_3.events.shape[0] == epochs_4.events.shape[0])
# equalizing conditions
epochs = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3, 'd': 4},
tmin, tmax, picks=picks, reject=reject)
epochs.drop_bad() # make sure drops are logged
assert_true(len([l for l in epochs.drop_log if not l]) ==
len(epochs.events))
drop_log1 = deepcopy(epochs.drop_log)
old_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
epochs.equalize_event_counts(['a', 'b'], copy=False)
# undo the eq logging
drop_log2 = [[] if l == ['EQUALIZED_COUNT'] else l for l in
epochs.drop_log]
assert_true(drop_log1 == drop_log2)
assert_true(len([l for l in epochs.drop_log if not l]) ==
len(epochs.events))
new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
assert_true(new_shapes[0] == new_shapes[1])
assert_true(new_shapes[2] == new_shapes[2])
assert_true(new_shapes[3] == new_shapes[3])
# now with two conditions collapsed
old_shapes = new_shapes
epochs.equalize_event_counts([['a', 'b'], 'c'], copy=False)
new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
assert_true(new_shapes[0] + new_shapes[1] == new_shapes[2])
assert_true(new_shapes[3] == old_shapes[3])
assert_raises(KeyError, epochs.equalize_event_counts, [1, 'a'], copy=False)
# now let's combine conditions
old_shapes = new_shapes
epochs.equalize_event_counts([['a', 'b'], ['c', 'd']], copy=False)
new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
assert_true(old_shapes[0] + old_shapes[1] == new_shapes[0] + new_shapes[1])
assert_true(new_shapes[0] + new_shapes[1] == new_shapes[2] + new_shapes[3])
assert_raises(ValueError, combine_event_ids, epochs, ['a', 'b'],
{'ab': 1}, copy=False)
combine_event_ids(epochs, ['a', 'b'], {'ab': 12}, copy=False)
caught = 0
for key in ['a', 'b']:
try:
epochs[key]
except KeyError:
caught += 1
assert_equal(caught, 2)
assert_true(not np.any(epochs.events[:, 2] == 1))
assert_true(not np.any(epochs.events[:, 2] == 2))
epochs = combine_event_ids(epochs, ['c', 'd'], {'cd': 34})
assert_true(np.all(np.logical_or(epochs.events[:, 2] == 12,
epochs.events[:, 2] == 34)))
assert_true(epochs['ab'].events.shape[0] == old_shapes[0] + old_shapes[1])
assert_true(epochs['ab'].events.shape[0] == epochs['cd'].events.shape[0])
# equalizing with hierarchical tags
epochs = Epochs(raw, events, {'a/x': 1, 'b/x': 2, 'a/y': 3, 'b/y': 4},
tmin, tmax, picks=picks, reject=reject)
cond1, cond2 = ['a', ['b/x', 'b/y']], [['a/x', 'a/y'], 'b']
es = [epochs.copy().equalize_event_counts(c, copy=False)[0]
for c in (cond1, cond2)]
assert_array_equal(es[0].events[:, 0], es[1].events[:, 0])
cond1, cond2 = ['a', ['b', 'b/y']], [['a/x', 'a/y'], 'x']
for c in (cond1, cond2): # error b/c tag and id mix/non-orthogonal tags
assert_raises(ValueError, epochs.equalize_event_counts, c, copy=False)
assert_raises(KeyError, epochs.equalize_event_counts,
["a/no_match", "b"], copy=False)
# test equalization with no events of one type
epochs.drop(np.arange(10))
assert_equal(len(epochs['a/x']), 0)
assert_true(len(epochs['a/y']) > 0)
epochs.equalize_event_counts(['a/x', 'a/y'], copy=False)
assert_equal(len(epochs['a/x']), 0)
assert_equal(len(epochs['a/y']), 0)
def test_access_by_name():
"""Test accessing epochs by event name and on_missing for rare events
"""
tempdir = _TempDir()
raw, events, picks = _get_data()
# Test various invalid inputs
assert_raises(ValueError, Epochs, raw, events, {1: 42, 2: 42}, tmin,
tmax, picks=picks)
assert_raises(ValueError, Epochs, raw, events, {'a': 'spam', 2: 'eggs'},
tmin, tmax, picks=picks)
assert_raises(ValueError, Epochs, raw, events, {'a': 'spam', 2: 'eggs'},
tmin, tmax, picks=picks)
assert_raises(ValueError, Epochs, raw, events, 'foo', tmin, tmax,
picks=picks)
assert_raises(ValueError, Epochs, raw, events, ['foo'], tmin, tmax,
picks=picks)
# Test accessing non-existent events (assumes 12345678 does not exist)
event_id_illegal = dict(aud_l=1, does_not_exist=12345678)
assert_raises(ValueError, Epochs, raw, events, event_id_illegal,
tmin, tmax)
# Test on_missing
assert_raises(ValueError, Epochs, raw, events, 1, tmin, tmax,
on_missing='foo')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
Epochs(raw, events, event_id_illegal, tmin, tmax, on_missing='warning')
nw = len(w)
assert_true(1 <= nw <= 2)
Epochs(raw, events, event_id_illegal, tmin, tmax, on_missing='ignore')
assert_equal(len(w), nw)
# Test constructing epochs with a list of ints as events
epochs = Epochs(raw, events, [1, 2], tmin, tmax, picks=picks)
for k, v in epochs.event_id.items():
assert_equal(int(k), v)
epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks)
assert_raises(KeyError, epochs.__getitem__, 'bar')
data = epochs['a'].get_data()
event_a = events[events[:, 2] == 1]
assert_true(len(data) == len(event_a))
epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks,
preload=True)
assert_raises(KeyError, epochs.__getitem__, 'bar')
temp_fname = op.join(tempdir, 'test-epo.fif')
epochs.save(temp_fname)
epochs2 = read_epochs(temp_fname)
for ep in [epochs, epochs2]:
data = ep['a'].get_data()
event_a = events[events[:, 2] == 1]
assert_true(len(data) == len(event_a))
assert_array_equal(epochs2['a'].events, epochs['a'].events)
epochs3 = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3, 'd': 4},
tmin, tmax, picks=picks, preload=True)
assert_equal(list(sorted(epochs3[('a', 'b')].event_id.values())),
[1, 2])
epochs4 = epochs['a']
epochs5 = epochs3['a']
assert_array_equal(epochs4.events, epochs5.events)
# 20 is our tolerance because epochs are written out as floats
assert_array_almost_equal(epochs4.get_data(), epochs5.get_data(), 20)
epochs6 = epochs3[['a', 'b']]
assert_true(all(np.logical_or(epochs6.events[:, 2] == 1,
epochs6.events[:, 2] == 2)))
assert_array_equal(epochs.events, epochs6.events)
assert_array_almost_equal(epochs.get_data(), epochs6.get_data(), 20)
# Make sure we preserve names
assert_equal(epochs['a'].name, 'a')
assert_equal(epochs[['a', 'b']]['a'].name, 'a')
@requires_pandas
def test_to_data_frame():
"""Test epochs Pandas exporter"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks)
assert_raises(ValueError, epochs.to_data_frame, index=['foo', 'bar'])
assert_raises(ValueError, epochs.to_data_frame, index='qux')
assert_raises(ValueError, epochs.to_data_frame, np.arange(400))
df = epochs.to_data_frame(index=['condition', 'epoch', 'time'],
picks=list(range(epochs.info['nchan'])))
# Default index and picks
df2 = epochs.to_data_frame()
assert_equal(df.index.names, df2.index.names)
assert_array_equal(df.columns.values, epochs.ch_names)
data = np.hstack(epochs.get_data())
assert_true((df.columns == epochs.ch_names).all())
assert_array_equal(df.values[:, 0], data[0] * 1e13)
assert_array_equal(df.values[:, 2], data[2] * 1e15)
for ind in ['time', ['condition', 'time'], ['condition', 'time', 'epoch']]:
df = epochs.to_data_frame(index=ind)
assert_true(df.index.names == ind if isinstance(ind, list) else [ind])
# test that non-indexed data were present as categorial variables
assert_array_equal(sorted(df.reset_index().columns[:3]),
sorted(['time', 'condition', 'epoch']))
def test_epochs_proj_mixin():
"""Test SSP proj methods from ProjMixin class
"""
raw, events, picks = _get_data()
for proj in [True, False]:
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), proj=proj)
assert_true(all(p['active'] == proj for p in epochs.info['projs']))
# test adding / deleting proj
if proj:
epochs.get_data()
assert_true(all(p['active'] == proj for p in epochs.info['projs']))
assert_raises(ValueError, epochs.add_proj, epochs.info['projs'][0],
{'remove_existing': True})
assert_raises(ValueError, epochs.add_proj, 'spam')
assert_raises(ValueError, epochs.del_proj, 0)
else:
projs = deepcopy(epochs.info['projs'])
n_proj = len(epochs.info['projs'])
epochs.del_proj(0)
assert_true(len(epochs.info['projs']) == n_proj - 1)
# Test that already existing projections are not added.
epochs.add_proj(projs, remove_existing=False)
assert_true(len(epochs.info['projs']) == n_proj)
epochs.add_proj(projs[:-1], remove_existing=True)
assert_true(len(epochs.info['projs']) == n_proj - 1)
# catch no-gos.
# wrong proj argument
assert_raises(ValueError, Epochs, raw, events[:4], event_id, tmin, tmax,
picks=picks, baseline=(None, 0), proj='crazy')
for preload in [True, False]:
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), proj='delayed', preload=preload,
add_eeg_ref=True, reject=reject)
epochs_proj = Epochs(
raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), proj=True, preload=preload, add_eeg_ref=True,
reject=reject)
epochs_noproj = Epochs(
raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), proj=False, preload=preload, add_eeg_ref=True,
reject=reject)
assert_allclose(epochs.copy().apply_proj().get_data(),
epochs_proj.get_data(), rtol=1e-10, atol=1e-25)
assert_allclose(epochs.get_data(),
epochs_noproj.get_data(), rtol=1e-10, atol=1e-25)
# make sure data output is constant across repeated calls
# e.g. drop bads
assert_array_equal(epochs.get_data(), epochs.get_data())
assert_array_equal(epochs_proj.get_data(), epochs_proj.get_data())
assert_array_equal(epochs_noproj.get_data(), epochs_noproj.get_data())
# test epochs.next calls
data = epochs.get_data().copy()
data2 = np.array([e for e in epochs])
assert_array_equal(data, data2)
# cross application from processing stream 1 to 2
epochs.apply_proj()
assert_array_equal(epochs._projector, epochs_proj._projector)
assert_allclose(epochs._data, epochs_proj.get_data())
# test mixin against manual application
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=None, proj=False, add_eeg_ref=True)
data = epochs.get_data().copy()
epochs.apply_proj()
assert_allclose(np.dot(epochs._projector, data[0]), epochs._data[0])
def test_delayed_epochs():
"""Test delayed projection on Epochs"""
raw, events, picks = _get_data()
events = events[:10]
picks = np.concatenate([pick_types(raw.info, meg=True, eeg=True)[::22],
pick_types(raw.info, meg=False, eeg=False,
ecg=True, eog=True)])
picks = np.sort(picks)
raw.load_data().pick_channels([raw.ch_names[pick] for pick in picks])
raw.info.normalize_proj()
del picks
n_epochs = 2 # number we expect after rejection
raw.info['lowpass'] = 40. # fake the LP info so no warnings
for decim in (1, 3):
proj_data = Epochs(raw, events, event_id, tmin, tmax, proj=True,
reject=reject, decim=decim)
use_tmin = proj_data.tmin
proj_data = proj_data.get_data()
noproj_data = Epochs(raw, events, event_id, tmin, tmax, proj=False,
reject=reject, decim=decim).get_data()
assert_equal(proj_data.shape, noproj_data.shape)
assert_equal(proj_data.shape[0], n_epochs)
for preload in (True, False):
for proj in (True, False, 'delayed'):
for ii in range(3):
print(decim, preload, proj, ii)
comp = proj_data if proj is True else noproj_data
if ii in (0, 1):
epochs = Epochs(raw, events, event_id, tmin, tmax,
proj=proj, reject=reject,
preload=preload, decim=decim)
else:
fake_events = np.zeros((len(comp), 3), int)
fake_events[:, 0] = np.arange(len(comp))
fake_events[:, 2] = 1
epochs = EpochsArray(comp, raw.info, tmin=use_tmin,
event_id=1, events=fake_events,
proj=proj)
epochs.info['sfreq'] /= decim
assert_equal(len(epochs), n_epochs)
assert_true(raw.proj is False)
assert_true(epochs.proj is
(True if proj is True else False))
if ii == 1:
epochs.load_data()
picks_data = pick_types(epochs.info, meg=True, eeg=True)
evoked = epochs.average(picks=picks_data)
assert_equal(evoked.nave, n_epochs, epochs.drop_log)
if proj is True:
evoked.apply_proj()
else:
assert_true(evoked.proj is False)
assert_array_equal(evoked.ch_names,
np.array(epochs.ch_names)[picks_data])
assert_allclose(evoked.times, epochs.times)
epochs_data = epochs.get_data()
assert_allclose(evoked.data,
epochs_data.mean(axis=0)[picks_data],
rtol=1e-5, atol=1e-20)
assert_allclose(epochs_data, comp, rtol=1e-5, atol=1e-20)
def test_drop_epochs():
"""Test dropping of epochs.
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
events1 = events[events[:, 2] == event_id]
# Bound checks
assert_raises(IndexError, epochs.drop, [len(epochs.events)])
assert_raises(IndexError, epochs.drop, [-1])
assert_raises(ValueError, epochs.drop, [[1, 2], [3, 4]])
# Test selection attribute
assert_array_equal(epochs.selection,
np.where(events[:, 2] == event_id)[0])
assert_equal(len(epochs.drop_log), len(events))
assert_true(all(epochs.drop_log[k] == ['IGNORED']
for k in set(range(len(events))) - set(epochs.selection)))
selection = epochs.selection.copy()
n_events = len(epochs.events)
epochs.drop([2, 4], reason='d')
assert_equal(epochs.drop_log_stats(), 2. / n_events * 100)
assert_equal(len(epochs.drop_log), len(events))
assert_equal([epochs.drop_log[k]
for k in selection[[2, 4]]], [['d'], ['d']])
assert_array_equal(events[epochs.selection], events1[[0, 1, 3, 5, 6]])
assert_array_equal(events[epochs[3:].selection], events1[[5, 6]])
assert_array_equal(events[epochs['1'].selection], events1[[0, 1, 3, 5, 6]])
def test_drop_epochs_mult():
"""Test that subselecting epochs or making less epochs is equivalent"""
raw, events, picks = _get_data()
for preload in [True, False]:
epochs1 = Epochs(raw, events, {'a': 1, 'b': 2},
tmin, tmax, picks=picks, reject=reject,
preload=preload)['a']
epochs2 = Epochs(raw, events, {'a': 1},
tmin, tmax, picks=picks, reject=reject,
preload=preload)
if preload:
# In the preload case you cannot know the bads if already ignored
assert_equal(len(epochs1.drop_log), len(epochs2.drop_log))
for d1, d2 in zip(epochs1.drop_log, epochs2.drop_log):
if d1 == ['IGNORED']:
assert_true(d2 == ['IGNORED'])
if d1 != ['IGNORED'] and d1 != []:
assert_true((d2 == d1) or (d2 == ['IGNORED']))
if d1 == []:
assert_true(d2 == [])
assert_array_equal(epochs1.events, epochs2.events)
assert_array_equal(epochs1.selection, epochs2.selection)
else:
# In the non preload is should be exactly the same
assert_equal(epochs1.drop_log, epochs2.drop_log)
assert_array_equal(epochs1.events, epochs2.events)
assert_array_equal(epochs1.selection, epochs2.selection)
def test_contains():
"""Test membership API"""
raw, events = _get_data(True)[:2]
# Add seeg channel
seeg = RawArray(np.zeros((1, len(raw.times))),
create_info(['SEEG 001'], raw.info['sfreq'], 'seeg'))
for key in ('dev_head_t', 'buffer_size_sec', 'highpass', 'lowpass',
'filename', 'dig', 'description', 'acq_pars', 'experimenter',
'proj_name'):
seeg.info[key] = raw.info[key]
raw.add_channels([seeg])
tests = [(('mag', False, False), ('grad', 'eeg', 'seeg')),
(('grad', False, False), ('mag', 'eeg', 'seeg')),
((False, True, False), ('grad', 'mag', 'seeg')),
((False, False, True), ('grad', 'mag', 'eeg'))]
for (meg, eeg, seeg), others in tests:
picks_contains = pick_types(raw.info, meg=meg, eeg=eeg, seeg=seeg)
epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax,
picks=picks_contains, reject=None,
preload=False)
if eeg:
test = 'eeg'
elif seeg:
test = 'seeg'
else:
test = meg
assert_true(test in epochs)
assert_true(not any(o in epochs for o in others))
assert_raises(ValueError, epochs.__contains__, 'foo')
assert_raises(ValueError, epochs.__contains__, 1)
def test_drop_channels_mixin():
"""Test channels-dropping functionality
"""
raw, events = _get_data()[:2]
# here without picks to get additional coverage
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=None,
baseline=(None, 0), preload=True)
drop_ch = epochs.ch_names[:3]
ch_names = epochs.ch_names[3:]
ch_names_orig = epochs.ch_names
dummy = epochs.copy().drop_channels(drop_ch)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, epochs.ch_names)
assert_equal(len(ch_names_orig), epochs.get_data().shape[1])
epochs.drop_channels(drop_ch)
assert_equal(ch_names, epochs.ch_names)
assert_equal(len(ch_names), epochs.get_data().shape[1])
def test_pick_channels_mixin():
"""Test channel-picking functionality
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
ch_names = epochs.ch_names[:3]
epochs.preload = False
assert_raises(RuntimeError, epochs.drop_channels, ['foo'])
epochs.preload = True
ch_names_orig = epochs.ch_names
dummy = epochs.copy().pick_channels(ch_names)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, epochs.ch_names)
assert_equal(len(ch_names_orig), epochs.get_data().shape[1])
epochs.pick_channels(ch_names)
assert_equal(ch_names, epochs.ch_names)
assert_equal(len(ch_names), epochs.get_data().shape[1])
# Invalid picks
assert_raises(ValueError, Epochs, raw, events, event_id, tmin, tmax,
picks=[])
def test_equalize_channels():
"""Test equalization of channels
"""
raw, events, picks = _get_data()
epochs1 = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), proj=False, preload=True)
epochs2 = epochs1.copy()
ch_names = epochs1.ch_names[2:]
epochs1.drop_channels(epochs1.ch_names[:1])
epochs2.drop_channels(epochs2.ch_names[1:2])
my_comparison = [epochs1, epochs2]
equalize_channels(my_comparison)
for e in my_comparison:
assert_equal(ch_names, e.ch_names)
def test_illegal_event_id():
"""Test handling of invalid events ids"""
raw, events, picks = _get_data()
event_id_illegal = dict(aud_l=1, does_not_exist=12345678)
assert_raises(ValueError, Epochs, raw, events, event_id_illegal, tmin,
tmax, picks=picks, baseline=(None, 0), proj=False)
def test_add_channels_epochs():
"""Test adding channels"""
raw, events, picks = _get_data()
def make_epochs(picks, proj):
return Epochs(raw, events, event_id, tmin, tmax, baseline=(None, 0),
reject=None, preload=True, proj=proj, picks=picks)
picks = pick_types(raw.info, meg=True, eeg=True, exclude='bads')
picks_meg = pick_types(raw.info, meg=True, eeg=False, exclude='bads')
picks_eeg = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
for proj in (False, True):
epochs = make_epochs(picks=picks, proj=proj)
epochs_meg = make_epochs(picks=picks_meg, proj=proj)
epochs_eeg = make_epochs(picks=picks_eeg, proj=proj)
epochs.info._check_consistency()
epochs_meg.info._check_consistency()
epochs_eeg.info._check_consistency()
epochs2 = add_channels_epochs([epochs_meg, epochs_eeg])
assert_equal(len(epochs.info['projs']), len(epochs2.info['projs']))
assert_equal(len(epochs.info.keys()), len(epochs_meg.info.keys()))
assert_equal(len(epochs.info.keys()), len(epochs_eeg.info.keys()))
assert_equal(len(epochs.info.keys()), len(epochs2.info.keys()))
data1 = epochs.get_data()
data2 = epochs2.get_data()
data3 = np.concatenate([e.get_data() for e in
[epochs_meg, epochs_eeg]], axis=1)
assert_array_equal(data1.shape, data2.shape)
assert_allclose(data1, data3, atol=1e-25)
assert_allclose(data1, data2, atol=1e-25)
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['meas_date'] += 10
add_channels_epochs([epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs2.info['filename'] = epochs2.info['filename'].upper()
epochs2 = add_channels_epochs([epochs_meg, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.events[3, 2] -= 1
assert_raises(ValueError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
assert_raises(ValueError, add_channels_epochs,
[epochs_meg, epochs_eeg[:2]])
epochs_meg.info['chs'].pop(0)
epochs_meg.info._update_redundant()
assert_raises(RuntimeError, add_channels_epochs,
[epochs_meg, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['sfreq'] = None
assert_raises(RuntimeError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['sfreq'] += 10
assert_raises(RuntimeError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['chs'][1]['ch_name'] = epochs_meg2.info['ch_names'][0]
epochs_meg2.info._update_redundant()
assert_raises(RuntimeError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['dev_head_t']['to'] += 1
assert_raises(ValueError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['dev_head_t']['to'] += 1
assert_raises(ValueError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['expimenter'] = 'foo'
assert_raises(RuntimeError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.preload = False
assert_raises(ValueError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.times += 0.4
assert_raises(NotImplementedError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.times += 0.5
assert_raises(NotImplementedError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.baseline = None
assert_raises(NotImplementedError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.event_id['b'] = 2
assert_raises(NotImplementedError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
def test_array_epochs():
"""Test creating epochs from array
"""
import matplotlib.pyplot as plt
tempdir = _TempDir()
# creating
data = rng.random_sample((10, 20, 300))
sfreq = 1e3
ch_names = ['EEG %03d' % (i + 1) for i in range(20)]
types = ['eeg'] * 20
info = create_info(ch_names, sfreq, types)
events = np.c_[np.arange(1, 600, 60),
np.zeros(10, int),
[1, 2] * 5]
event_id = {'a': 1, 'b': 2}
epochs = EpochsArray(data, info, events, tmin, event_id)
assert_true(str(epochs).startswith('<EpochsArray'))
# From GH#1963
assert_raises(ValueError, EpochsArray, data[:-1], info, events, tmin,
event_id)
assert_raises(ValueError, EpochsArray, data, info, events, tmin,
dict(a=1))
# saving
temp_fname = op.join(tempdir, 'test-epo.fif')
epochs.save(temp_fname)
epochs2 = read_epochs(temp_fname)
data2 = epochs2.get_data()
assert_allclose(data, data2)
assert_allclose(epochs.times, epochs2.times)
assert_equal(epochs.event_id, epochs2.event_id)
assert_array_equal(epochs.events, epochs2.events)
# plotting
epochs[0].plot()
plt.close('all')
# indexing
assert_array_equal(np.unique(epochs['a'].events[:, 2]), np.array([1]))
assert_equal(len(epochs[:2]), 2)
data[0, 5, 150] = 3000
data[1, :, :] = 0
data[2, 5, 210] = 3000
data[3, 5, 260] = 0
epochs = EpochsArray(data, info, events=events, event_id=event_id,
tmin=0, reject=dict(eeg=1000), flat=dict(eeg=1e-1),
reject_tmin=0.1, reject_tmax=0.2)
assert_equal(len(epochs), len(events) - 2)
assert_equal(epochs.drop_log[0], ['EEG 006'])
assert_equal(len(epochs.drop_log), 10)
assert_equal(len(epochs.events), len(epochs.selection))
# baseline
data = np.ones((10, 20, 300))
epochs = EpochsArray(data, info, events=events, event_id=event_id,
tmin=-.2, baseline=(None, 0))
ep_data = epochs.get_data()
assert_array_equal(np.zeros_like(ep_data), ep_data)
# one time point
epochs = EpochsArray(data[:, :, :1], info, events=events,
event_id=event_id, tmin=0., baseline=None)
assert_allclose(epochs.times, [0.])
assert_allclose(epochs.get_data(), data[:, :, :1])
epochs.save(temp_fname)
epochs_read = read_epochs(temp_fname)
assert_allclose(epochs_read.times, [0.])
assert_allclose(epochs_read.get_data(), data[:, :, :1])
# event as integer (#2435)
mask = (events[:, 2] == 1)
data_1 = data[mask]
events_1 = events[mask]
epochs = EpochsArray(data_1, info, events=events_1, event_id=1,
tmin=-0.2, baseline=(None, 0))
# default events
epochs = EpochsArray(data_1, info)
assert_array_equal(epochs.events[:, 0], np.arange(len(data_1)))
assert_array_equal(epochs.events[:, 1], np.zeros(len(data_1), int))
assert_array_equal(epochs.events[:, 2], np.ones(len(data_1), int))
def test_concatenate_epochs():
"""Test concatenate epochs"""
raw, events, picks = _get_data()
epochs = Epochs(
raw=raw, events=events, event_id=event_id, tmin=tmin, tmax=tmax,
picks=picks)
epochs2 = epochs.copy()
epochs_list = [epochs, epochs2]
epochs_conc = concatenate_epochs(epochs_list)
assert_array_equal(
epochs_conc.events[:, 0], np.unique(epochs_conc.events[:, 0]))
expected_shape = list(epochs.get_data().shape)
expected_shape[0] *= 2
expected_shape = tuple(expected_shape)
assert_equal(epochs_conc.get_data().shape, expected_shape)
assert_equal(epochs_conc.drop_log, epochs.drop_log * 2)
epochs2 = epochs.copy()
epochs2._data = epochs2.get_data()
epochs2.preload = True
assert_raises(
ValueError, concatenate_epochs,
[epochs, epochs2.copy().drop_channels(epochs2.ch_names[:1])])
epochs2.times = np.delete(epochs2.times, 1)
assert_raises(
ValueError,
concatenate_epochs, [epochs, epochs2])
assert_equal(epochs_conc._raw, None)
# check if baseline is same for all epochs
epochs2.baseline = (-0.1, None)
assert_raises(ValueError, concatenate_epochs, [epochs, epochs2])
# check if dev_head_t is same
epochs2 = epochs.copy()
concatenate_epochs([epochs, epochs2]) # should work
epochs2.info['dev_head_t']['trans'][:3, 3] += 0.0001
assert_raises(ValueError, concatenate_epochs, [epochs, epochs2])
assert_raises(TypeError, concatenate_epochs, 'foo')
assert_raises(TypeError, concatenate_epochs, [epochs, 'foo'])
epochs2.info['dev_head_t'] = None
assert_raises(ValueError, concatenate_epochs, [epochs, epochs2])
epochs.info['dev_head_t'] = None
concatenate_epochs([epochs, epochs2]) # should work
def test_add_channels():
"""Test epoch splitting / re-appending channel types
"""
raw, events, picks = _get_data()
epoch_nopre = Epochs(
raw=raw, events=events, event_id=event_id, tmin=tmin, tmax=tmax,
picks=picks)
epoch = Epochs(
raw=raw, events=events, event_id=event_id, tmin=tmin, tmax=tmax,
picks=picks, preload=True)
epoch_eeg = epoch.copy().pick_types(meg=False, eeg=True)
epoch_meg = epoch.copy().pick_types(meg=True)
epoch_stim = epoch.copy().pick_types(meg=False, stim=True)
epoch_eeg_meg = epoch.copy().pick_types(meg=True, eeg=True)
epoch_new = epoch_meg.copy().add_channels([epoch_eeg, epoch_stim])
assert_true(all(ch in epoch_new.ch_names
for ch in epoch_stim.ch_names + epoch_meg.ch_names))
epoch_new = epoch_meg.copy().add_channels([epoch_eeg])
assert_true(ch in epoch_new.ch_names for ch in epoch.ch_names)
assert_array_equal(epoch_new._data, epoch_eeg_meg._data)
assert_true(all(ch not in epoch_new.ch_names
for ch in epoch_stim.ch_names))
# Now test errors
epoch_badsf = epoch_eeg.copy()
epoch_badsf.info['sfreq'] = 3.1415927
epoch_eeg = epoch_eeg.crop(-.1, .1)
assert_raises(AssertionError, epoch_meg.add_channels, [epoch_nopre])
assert_raises(RuntimeError, epoch_meg.add_channels, [epoch_badsf])
assert_raises(AssertionError, epoch_meg.add_channels, [epoch_eeg])
assert_raises(ValueError, epoch_meg.add_channels, [epoch_meg])
assert_raises(AssertionError, epoch_meg.add_channels, epoch_badsf)
def test_seeg_ecog():
"""Test the compatibility of the Epoch object with SEEG and ECoG data."""
n_epochs, n_channels, n_times, sfreq = 5, 10, 20, 1000.
data = np.ones((n_epochs, n_channels, n_times))
events = np.array([np.arange(n_epochs), [0] * n_epochs, [1] * n_epochs]).T
pick_dict = dict(meg=False, exclude=[])
for key in ('seeg', 'ecog'):
info = create_info(n_channels, sfreq, key)
epochs = EpochsArray(data, info, events)
pick_dict.update({key: True})
picks = pick_types(epochs.info, **pick_dict)
del pick_dict[key]
assert_equal(len(picks), n_channels)
def test_default_values():
"""Test default event_id, tmax tmin values are working correctly"""
raw, events = _get_data()[:2]
epoch_1 = Epochs(raw, events[:1], preload=True)
epoch_2 = Epochs(raw, events[:1], event_id=None, tmin=-0.2, tmax=0.5,
preload=True)
assert_equal(hash(epoch_1), hash(epoch_2))
run_tests_if_main()
| {
"repo_name": "alexandrebarachant/mne-python",
"path": "mne/tests/test_epochs.py",
"copies": "1",
"size": "91689",
"license": "bsd-3-clause",
"hash": 6598657369602859000,
"line_mean": 41.8653576438,
"line_max": 79,
"alpha_frac": 0.5993630643,
"autogenerated": false,
"ratio": 3.285519762066865,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4384882826366865,
"avg_score": null,
"num_lines": null
} |
import os.path as op
from nose.tools import assert_true
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from mne import Epochs, read_evokeds, pick_types
from mne.io.compensator import make_compensator, get_current_comp
from mne.io import Raw
from mne.utils import _TempDir, requires_mne, run_subprocess
base_dir = op.join(op.dirname(__file__), 'data')
ctf_comp_fname = op.join(base_dir, 'test_ctf_comp_raw.fif')
def test_compensation():
"""Test compensation"""
tempdir = _TempDir()
raw = Raw(ctf_comp_fname, compensation=None)
assert_equal(get_current_comp(raw.info), 3)
comp1 = make_compensator(raw.info, 3, 1, exclude_comp_chs=False)
assert_true(comp1.shape == (340, 340))
comp2 = make_compensator(raw.info, 3, 1, exclude_comp_chs=True)
assert_true(comp2.shape == (311, 340))
# round-trip
desired = np.eye(340)
for from_ in range(3):
for to in range(3):
if from_ == to:
continue
comp1 = make_compensator(raw.info, from_, to)
comp2 = make_compensator(raw.info, to, from_)
# To get 1e-12 here (instead of 1e-6) we must use the linalg.inv
# method mentioned in compensator.py
assert_allclose(np.dot(comp1, comp2), desired, atol=1e-12)
assert_allclose(np.dot(comp2, comp1), desired, atol=1e-12)
# make sure that changing the comp doesn't modify the original data
raw2 = Raw(ctf_comp_fname).apply_gradient_compensation(2)
assert_equal(get_current_comp(raw2.info), 2)
fname = op.join(tempdir, 'ctf-raw.fif')
raw2.save(fname)
raw2 = Raw(fname)
assert_equal(raw2.compensation_grade, 2)
raw2.apply_gradient_compensation(3)
assert_equal(raw2.compensation_grade, 3)
data, _ = raw[:, :]
data2, _ = raw2[:, :]
# channels have norm ~1e-12
assert_allclose(data, data2, rtol=1e-9, atol=1e-18)
for ch1, ch2 in zip(raw.info['chs'], raw2.info['chs']):
assert_true(ch1['coil_type'] == ch2['coil_type'])
@requires_mne
def test_compensation_mne():
"""Test comensation by comparing with MNE"""
tempdir = _TempDir()
def make_evoked(fname, comp):
raw = Raw(fname)
if comp is not None:
raw.apply_gradient_compensation(comp)
picks = pick_types(raw.info, meg=True, ref_meg=True)
events = np.array([[0, 0, 1]], dtype=np.int)
evoked = Epochs(raw, events, 1, 0, 20e-3, picks=picks).average()
return evoked
def compensate_mne(fname, comp):
tmp_fname = '%s-%d-ave.fif' % (fname[:-4], comp)
cmd = ['mne_compensate_data', '--in', fname,
'--out', tmp_fname, '--grad', str(comp)]
run_subprocess(cmd)
return read_evokeds(tmp_fname)[0]
# save evoked response with default compensation
fname_default = op.join(tempdir, 'ctf_default-ave.fif')
make_evoked(ctf_comp_fname, None).save(fname_default)
for comp in [0, 1, 2, 3]:
evoked_py = make_evoked(ctf_comp_fname, comp)
evoked_c = compensate_mne(fname_default, comp)
picks_py = pick_types(evoked_py.info, meg=True, ref_meg=True)
picks_c = pick_types(evoked_c.info, meg=True, ref_meg=True)
assert_allclose(evoked_py.data[picks_py], evoked_c.data[picks_c],
rtol=1e-3, atol=1e-17)
chs_py = [evoked_py.info['chs'][ii] for ii in picks_py]
chs_c = [evoked_c.info['chs'][ii] for ii in picks_c]
for ch_py, ch_c in zip(chs_py, chs_c):
assert_equal(ch_py['coil_type'], ch_c['coil_type'])
| {
"repo_name": "alexandrebarachant/mne-python",
"path": "mne/io/tests/test_compensator.py",
"copies": "2",
"size": "3682",
"license": "bsd-3-clause",
"hash": 5782539379675446000,
"line_mean": 38.170212766,
"line_max": 76,
"alpha_frac": 0.6265616513,
"autogenerated": false,
"ratio": 2.974151857835218,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46007135091352175,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.