text stringlengths 0 1.05M | meta dict |
|---|---|
__author__ = 'demi'
#Finishing the page ranking algorithm.
def compute_ranks(graph):
d = 0.8 # damping factor
numloops = 10
ranks = {}
npages = len(graph)
for page in graph:
ranks[page] = 1.0 / npages
for i in range(0, numloops):
newranks = {}
for page in graph:
newrank = (1 - d) / npages
for p in graph:
if page in graph[p]:
newrank = newrank + d * (ranks[p] / len(graph[p]))
newranks[page] = newrank
ranks = newranks
return ranks
cache = {
'http://udacity.com/cs101x/urank/index.html': """<html>
<body>
<h1>Dave's Cooking Algorithms</h1>
<p>
Here are my favorite recipies:
<ul>
<li> <a href="http://udacity.com/cs101x/urank/hummus.html">Hummus Recipe</a>
<li> <a href="http://udacity.com/cs101x/urank/arsenic.html">World's Best Hummus</a>
<li> <a href="http://udacity.com/cs101x/urank/kathleen.html">Kathleen's Hummus Recipe</a>
</ul>
For more expert opinions, check out the
<a href="http://udacity.com/cs101x/urank/nickel.html">Nickel Chef</a>
and <a href="http://udacity.com/cs101x/urank/zinc.html">Zinc Chef</a>.
</body>
</html>
""",
'http://udacity.com/cs101x/urank/zinc.html': """<html>
<body>
<h1>The Zinc Chef</h1>
<p>
I learned everything I know from
<a href="http://udacity.com/cs101x/urank/nickel.html">the Nickel Chef</a>.
</p>
<p>
For great hummus, try
<a href="http://udacity.com/cs101x/urank/arsenic.html">this recipe</a>.
</body>
</html>
""",
'http://udacity.com/cs101x/urank/nickel.html': """<html>
<body>
<h1>The Nickel Chef</h1>
<p>
This is the
<a href="http://udacity.com/cs101x/urank/kathleen.html">
best Hummus recipe!
</a>
</body>
</html>
""",
'http://udacity.com/cs101x/urank/kathleen.html': """<html>
<body>
<h1>
Kathleen's Hummus Recipe
</h1>
<p>
<ol>
<li> Open a can of garbonzo beans.
<li> Crush them in a blender.
<li> Add 3 tablesppons of tahini sauce.
<li> Squeeze in one lemon.
<li> Add salt, pepper, and buttercream frosting to taste.
</ol>
</body>
</html>
""",
'http://udacity.com/cs101x/urank/arsenic.html': """<html>
<body>
<h1>
The Arsenic Chef's World Famous Hummus Recipe
</h1>
<p>
<ol>
<li> Kidnap the <a href="http://udacity.com/cs101x/urank/nickel.html">Nickel Chef</a>.
<li> Force her to make hummus for you.
</ol>
</body>
</html>
""",
'http://udacity.com/cs101x/urank/hummus.html': """<html>
<body>
<h1>
Hummus Recipe
</h1>
<p>
<ol>
<li> Go to the store and buy a container of hummus.
<li> Open it.
</ol>
</body>
</html>
""",
}
def crawl_web(seed): # returns index, graph of inlinks
tocrawl = [seed]
crawled = []
graph = {} # <url>, [list of pages it links to]
index = {}
while tocrawl:
page = tocrawl.pop()
if page not in crawled:
content = get_page(page)
add_page_to_index(index, page, content)
outlinks = get_all_links(content)
graph[page] = outlinks
union(tocrawl, outlinks)
crawled.append(page)
return index, graph
def get_page(url):
if url in cache:
return cache[url]
else:
return None
def get_next_target(page):
start_link = page.find('<a href=')
if start_link == -1:
return None, 0
start_quote = page.find('"', start_link)
end_quote = page.find('"', start_quote + 1)
url = page[start_quote + 1:end_quote]
return url, end_quote
def get_all_links(page):
links = []
while True:
url, endpos = get_next_target(page)
if url:
links.append(url)
page = page[endpos:]
else:
break
return links
def union(a, b):
for e in b:
if e not in a:
a.append(e)
def add_page_to_index(index, url, content):
words = content.split()
for word in words:
add_to_index(index, word, url)
def add_to_index(index, keyword, url):
if keyword in index:
index[keyword].append(url)
else:
index[keyword] = [url]
def lookup(index, keyword):
if keyword in index:
return index[keyword]
else:
return None
index, graph = crawl_web('http://udacity.com/cs101x/urank/index.html')
ranks = compute_ranks(graph)
print(ranks)
#>>> {'http://udacity.com/cs101x/urank/kathleen.html': 0.11661866666666663,
#'http://udacity.com/cs101x/urank/zinc.html': 0.038666666666666655,
#'http://udacity.com/cs101x/urank/hummus.html': 0.038666666666666655,
#'http://udacity.com/cs101x/urank/arsenic.html': 0.054133333333333325,
#'http://udacity.com/cs101x/urank/index.html': 0.033333333333333326,
#'http://udacity.com/cs101x/urank/nickel.html': 0.09743999999999997}
| {
"repo_name": "dmitry-izmerov/Udacity-Intro-to-computer-science",
"path": "Lesson06/Finishing Urank.py",
"copies": "1",
"size": "4692",
"license": "mit",
"hash": 8061965079611727000,
"line_mean": 19.4890829694,
"line_max": 89,
"alpha_frac": 0.6148763853,
"autogenerated": false,
"ratio": 2.6613726602382304,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.377624904553823,
"avg_score": null,
"num_lines": null
} |
__author__ = 'demi'
# Memoization is a way to make code run faster by saving
# previously computed results. Instead of needing to recompute the value of an
# expression, a memoized computation first looks for the value in a cache of
# pre-computed values.
# Define a procedure, cached_execution(cache, proc, proc_input), that takes in
# three inputs: a cache, which is a Dictionary that maps inputs to proc to
# their previously computed values, a procedure, proc, which can be called by
# just writing proc(proc_input), and proc_input which is the input to proc.
# Your procedure should return the value of the proc with input proc_input,
# but should only evaluate it if it has not been previously called.
def cached_execution(cache, proc, proc_input):
if proc_input in cache:
return cache[proc_input]
else:
res = proc(proc_input)
cache[proc_input] = res
return res
# Here is an example showing the desired behavior of cached_execution:
def factorial(n):
print("Running factorial")
result = 1
for i in range(2, n + 1):
result = result * i
return result
cache = {} # start cache as an empty dictionary
### first execution (should print out Running factorial and the result)
print(cached_execution(cache, factorial, 50))
print("Second time:")
### second execution (should only print out the result)
print(cached_execution(cache, factorial, 50))
# Here is a more interesting example using cached_execution
# (do not worry if you do not understand this, though,
# it will be clearer after Unit 6):
def cached_fibo(n):
if n == 1 or n == 0:
return n
else:
return (cached_execution(cache, cached_fibo, n - 1 )
+ cached_execution(cache, cached_fibo, n - 2 ))
cache = {} # new cache for this procedure
# do not try this at home...at least without a cache!
print(cached_execution(cache, cached_fibo, 100))
| {
"repo_name": "dmitry-izmerov/Udacity-Intro-to-computer-science",
"path": "Lesson05/Memoization.py",
"copies": "1",
"size": "1916",
"license": "mit",
"hash": -8748342581801935000,
"line_mean": 33.2142857143,
"line_max": 79,
"alpha_frac": 0.7004175365,
"autogenerated": false,
"ratio": 3.832,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0030120569402624434,
"num_lines": 56
} |
__author__ = 'demi'
# Modify the crawl_web procedure so that instead of just returning the
# index, it returns an index and a graph. The graph should be a
# Dictionary where the key:value entries are:
# url: [list of pages url links to]
def crawl_web(seed): # returns index, graph of outlinks
tocrawl = [seed]
crawled = []
graph = {} # <url>:[list of pages it links to]
index = {}
while tocrawl:
page = tocrawl.pop()
if page not in crawled:
content = get_page(page)
add_page_to_index(index, page, content)
outlinks = get_all_links(content)
graph[page] = outlinks
union(tocrawl, outlinks)
crawled.append(page)
return index, graph
cache = {
'http://udacity.com/cs101x/urank/index.html': """<html>
<body>
<h1>Dave's Cooking Algorithms</h1>
<p>
Here are my favorite recipies:
<ul>
<li> <a href="http://udacity.com/cs101x/urank/hummus.html">Hummus Recipe</a>
<li> <a href="http://udacity.com/cs101x/urank/arsenic.html">World's Best Hummus</a>
<li> <a href="http://udacity.com/cs101x/urank/kathleen.html">Kathleen's Hummus Recipe</a>
</ul>
For more expert opinions, check out the
<a href="http://udacity.com/cs101x/urank/nickel.html">Nickel Chef</a>
and <a href="http://udacity.com/cs101x/urank/zinc.html">Zinc Chef</a>.
</body>
</html>
""",
'http://udacity.com/cs101x/urank/zinc.html': """<html>
<body>
<h1>The Zinc Chef</h1>
<p>
I learned everything I know from
<a href="http://udacity.com/cs101x/urank/nickel.html">the Nickel Chef</a>.
</p>
<p>
For great hummus, try
<a href="http://udacity.com/cs101x/urank/arsenic.html">this recipe</a>.
</body>
</html>
""",
'http://udacity.com/cs101x/urank/nickel.html': """<html>
<body>
<h1>The Nickel Chef</h1>
<p>
This is the
<a href="http://udacity.com/cs101x/urank/kathleen.html">
best Hummus recipe!
</a>
</body>
</html>
""",
'http://udacity.com/cs101x/urank/kathleen.html': """<html>
<body>
<h1>
Kathleen's Hummus Recipe
</h1>
<p>
<ol>
<li> Open a can of garbonzo beans.
<li> Crush them in a blender.
<li> Add 3 tablesppons of tahini sauce.
<li> Squeeze in one lemon.
<li> Add salt, pepper, and buttercream frosting to taste.
</ol>
</body>
</html>
""",
'http://udacity.com/cs101x/urank/arsenic.html': """<html>
<body>
<h1>
The Arsenic Chef's World Famous Hummus Recipe
</h1>
<p>
<ol>
<li> Kidnap the <a href="http://udacity.com/cs101x/urank/nickel.html">Nickel Chef</a>.
<li> Force her to make hummus for you.
</ol>
</body>
</html>
""",
'http://udacity.com/cs101x/urank/hummus.html': """<html>
<body>
<h1>
Hummus Recipe
</h1>
<p>
<ol>
<li> Go to the store and buy a container of hummus.
<li> Open it.
</ol>
</body>
</html>
""",
}
def get_page(url):
if url in cache:
return cache[url]
else:
return None
def get_next_target(page):
start_link = page.find('<a href=')
if start_link == -1:
return None, 0
start_quote = page.find('"', start_link)
end_quote = page.find('"', start_quote + 1)
url = page[start_quote + 1:end_quote]
return url, end_quote
def get_all_links(page):
links = []
while True:
url, endpos = get_next_target(page)
if url:
links.append(url)
page = page[endpos:]
else:
break
return links
def union(a, b):
for e in b:
if e not in a:
a.append(e)
def add_page_to_index(index, url, content):
words = content.split()
for word in words:
add_to_index(index, word, url)
def add_to_index(index, keyword, url):
if keyword in index:
index[keyword].append(url)
else:
index[keyword] = [url]
def lookup(index, keyword):
if keyword in index:
return index[keyword]
else:
return None
index, graph = crawl_web('http://udacity.com/cs101x/urank/index.html')
if 'http://udacity.com/cs101x/urank/index.html' in graph:
print(graph['http://udacity.com/cs101x/urank/index.html'])
#>>> ['http://udacity.com/cs101x/urank/hummus.html',
#'http://udacity.com/cs101x/urank/arsenic.html',
#'http://udacity.com/cs101x/urank/kathleen.html',
#'http://udacity.com/cs101x/urank/nickel.html',
#'http://udacity.com/cs101x/urank/zinc.html']
| {
"repo_name": "dmitry-izmerov/Udacity-Intro-to-computer-science",
"path": "Lesson06/Implementing Urank.py",
"copies": "1",
"size": "4261",
"license": "mit",
"hash": -5788446812684716000,
"line_mean": 19.4855769231,
"line_max": 89,
"alpha_frac": 0.6280215912,
"autogenerated": false,
"ratio": 2.6077111383108935,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37357327295108933,
"avg_score": null,
"num_lines": null
} |
__author__ = 'demi'
# One Gold Star
# Question 1-star: Stirling and Bell Numbers
# The number of ways of splitting n items in k non-empty sets is called
# the Stirling number, S(n,k), of the second kind. For example, the group
# of people Dave, Sarah, Peter and Andy could be split into two groups in
# the following ways.
# 1. Dave, Sarah, Peter Andy
# 2. Dave, Sarah, Andy Peter
# 3. Dave, Andy, Peter Sarah
# 4. Sarah, Andy, Peter Dave
# 5. Dave, Sarah Andy, Peter
# 6. Dave, Andy Sarah, Peter
# 7. Dave, Peter Andy, Sarah
# so S(4,2) = 7
# If instead we split the group into one group, we have just one way to
# do it.
# 1. Dave, Sarah, Peter, Andy
# so S(4,1) = 1
# or into four groups, there is just one way to do it as well
# 1. Dave Sarah Peter Andy
# so S(4,4) = 1
# If we try to split into more groups than we have people, there are no
# ways to do it.
# The formula for calculating the Stirling numbers is
# S(n, k) = k*S(n-1, k) + S(n-1, k-1)
# Furthermore, the Bell number B(n) is the number of ways of splitting n
# into any number of parts, that is,
# B(n) is the sum of S(n,k) for k =1,2, ... , n.
# Write two procedures, stirling and bell. The first procedure, stirling
# takes as its inputs two positive integers of which the first is the
# number of items and the second is the number of sets into which those
# items will be split. The second procedure, bell, takes as input a
# positive integer n and returns the Bell number B(n).
def stirling(n, k):
if n == k or k == 1:
return 1
elif n < k:
return 0
else:
return k * stirling(n - 1, k) + stirling(n - 1, k - 1)
def bell(n):
res = 0
for k in range(1, n + 1):
res += stirling(n, k)
return res
# print(stirling(1, 1))
#>>> 1
# print(stirling(2, 1))
#>>> 1
# print(stirling(2, 2))
#>>> 1
# print(stirling(2, 3))
#>>>0
# print(stirling(3, 1))
#>>> 1
# print(stirling(3, 2))
#>>> 3
# print(stirling(3, 3))
#>>> 1
# print(stirling(4, 1))
#>>> 1
# print(stirling(4, 2))
#>>> 7
# print(stirling(4, 3))
#>>> 6
# print(stirling(4, 4))
#>>> 1
# print(stirling(5, 1))
#>>> 1
# print(stirling(5, 2))
#>>> 15
# print(stirling(5, 3))
#>>> 25
# print(stirling(5, 4))
#>>> 10
# print(stirling(5, 5))
#>>> 1
# print(stirling(20, 15))
#>>> 452329200
print(bell(1))
#>>> 1
print(bell(2))
#>>> 2
print(bell(3))
#>>> 5
print(bell(4))
#>>> 15
print(bell(5))
#>>> 52
print(bell(15))
#>>> 1382958545
| {
"repo_name": "dmitry-izmerov/Udacity-Intro-to-computer-science",
"path": "Challenging Practice Problems/Stirling and Bell.py",
"copies": "1",
"size": "2547",
"license": "mit",
"hash": 8536943927500074000,
"line_mean": 20.7692307692,
"line_max": 73,
"alpha_frac": 0.588535532,
"autogenerated": false,
"ratio": 2.5495495495495497,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8501021824293984,
"avg_score": 0.02741265145111299,
"num_lines": 117
} |
__author__ = 'demi'
# Question 5: Date Converter
# Write a procedure date_converter which takes two inputs. The first is
# a dictionary and the second a string. The string is a valid date in
# the format month/day/year. The procedure should return
# the date written in the form <day> <name of month> <year>.
# For example , if the
# dictionary is in English,
english = {1:"January", 2:"February", 3:"March", 4:"April", 5:"May",
6:"June", 7:"July", 8:"August", 9:"September",10:"October",
11:"November", 12:"December"}
# then "5/11/2012" should be converted to "11 May 2012".
# If the dictionary is in Swedish
swedish = {1:"januari", 2:"februari", 3:"mars", 4:"april", 5:"maj",
6:"juni", 7:"juli", 8:"augusti", 9:"september",10:"oktober",
11:"november", 12:"december"}
# then "5/11/2012" should be converted to "11 maj 2012".
# Hint: int('12') converts the string '12' to the integer 12.
def date_converter(settings, date_string):
l = date_string.split('/')
day = int(l[1])
month = int(l[0])
return '%d %s %s' % (day, settings[month], l[2])
print(date_converter(english, '5/11/2012'))
#>>> 11 May 2012
print(date_converter(english, '5/11/12'))
#>>> 11 May 12
print(date_converter(swedish, '5/11/2012'))
#>>> 11 maj 2012
print(date_converter(swedish, '12/5/1791'))
#>>> 5 december 1791
| {
"repo_name": "dmitry-izmerov/Udacity-Intro-to-computer-science",
"path": "Cumulative Practice Problems/Date Converter.py",
"copies": "1",
"size": "1315",
"license": "mit",
"hash": -4356717933915748000,
"line_mean": 28.8863636364,
"line_max": 71,
"alpha_frac": 0.6547528517,
"autogenerated": false,
"ratio": 2.7742616033755274,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39290144550755274,
"avg_score": null,
"num_lines": null
} |
__author__ = 'demi'
# Question 7: Find and Replace
# For this question you need to define two procedures:
# make_converter(match, replacement)
# Takes as input two strings and returns a converter. It doesn't have
# to make a specific type of thing. It can
# return anything you would find useful in apply_converter.
# apply_converter(converter, string)
# Takes as input a converter (produced by make_converter), and
# a string, and returns the result of applying the converter to the
# input string. This replaces all occurrences of the match used to
# build the converter, with the replacement. It keeps doing
# replacements until there are no more opportunities for replacements.
def make_converter(match, replacement):
return match, replacement
def apply_converter(converter, string):
while string.find(converter[0]) != -1:
string = string.replace(converter[0], converter[1], 1)
return string
# For example,
c1 = make_converter('aa', 'a')
print(apply_converter(c1, 'aaaa'))
#>>> a
c = make_converter('aba', 'b')
print(apply_converter(c, 'aaaaaabaaaaa'))
#>>> ab
# Note that this process is not guaranteed to terminate for all inputs
# (for example, apply_converter(make_converter('a', 'aa'), 'a') would
# run forever).
| {
"repo_name": "dmitry-izmerov/Udacity-Intro-to-computer-science",
"path": "Cumulative Practice Problems/Find and Replace.py",
"copies": "1",
"size": "1288",
"license": "mit",
"hash": 697854384534577000,
"line_mean": 31.2,
"line_max": 74,
"alpha_frac": 0.7065217391,
"autogenerated": false,
"ratio": 3.6695156695156697,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9839430265758526,
"avg_score": 0.007321428571428571,
"num_lines": 40
} |
__author__ = 'demi'
# Question 9: Deep Reverse
# Define a procedure, deep_reverse, that takes as input a list,
# and returns a new list that is the deep reverse of the input list.
# This means it reverses all the elements in the list, and if any
# of those elements are lists themselves, reverses all the elements
# in the inner list, all the way down.
# Note: The procedure must not change the input list.
# The procedure is_list below is from Homework 6. It returns True if
# p is a list and False if it is not.
def is_list(p):
return isinstance(p, list)
def deep_reverse(l):
if is_list(l):
length = len(l)
newlist = [i for i in l]
i = 0
half = int(length/2)
while (i < half):
first = l[i]
last = l[length - i - 1]
first = deep_reverse(first)
last = deep_reverse(last)
newlist[i] = last
newlist[length - i - 1] = first
i += 1
if length % 2 != 0 and is_list(newlist[half]):
newlist[half] = deep_reverse(newlist[half])
return newlist
return l
#For example,
p = [1, [2, 3], [4, 5]]
print(deep_reverse(p))
#>>> [[5, 4], [3, 2], 1]
# print(p)
# p = [1, [2, 3, [4, [5, 6]]]]
# print(deep_reverse(p))
# #>>> [[[[6, 5], 4], 3, 2], 1]
# print(p)
# #>>> [1, [2, 3, [4, [5, 6]]]]
#
# q = [1, [2,3], 4, [5,6]]
# print(deep_reverse(q))
# #>>> [ [6,5], 4, [3, 2], 1]
# print(q)
#>>> [1, [2,3], 4, [5,6]]
| {
"repo_name": "dmitry-izmerov/Udacity-Intro-to-computer-science",
"path": "Cumulative Practice Problems/Deep Reverse.py",
"copies": "1",
"size": "1466",
"license": "mit",
"hash": -8925999895285623000,
"line_mean": 25.6545454545,
"line_max": 68,
"alpha_frac": 0.5402455662,
"autogenerated": false,
"ratio": 2.8355899419729207,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8849763247100659,
"avg_score": 0.005214452214452214,
"num_lines": 55
} |
__author__ = 'demi'
# Rabbits Multiplying
# A (slightly) more realistic model of rabbit multiplication than the Fibonacci
# model, would assume that rabbits eventually die. For this question, some
# rabbits die from month 6 onwards.
#
# Thus, we can model the number of rabbits as:
#
# rabbits(1) = 1 # There is one pair of immature rabbits in Month 1
# rabbits(2) = 1 # There is one pair of mature rabbits in Month 2
#
# For months 3-5:
# Same as Fibonacci model, no rabbits dying yet
# rabbits(n) = rabbits(n - 1) + rabbits(n - 2)
#
#
# For months > 5:
# All the rabbits that are over 5 months old die along with a few others
# so that the number that die is equal to the number alive 5 months ago.
# Before dying, the bunnies reproduce.
# rabbits(n) = rabbits(n - 1) + rabbits(n - 2) - rabbits(n - 5)
#
# This produces the rabbit sequence: 1, 1, 2, 3, 5, 7, 11, 16, 24, 35, 52, ...
#
# Define a procedure rabbits that takes as input a number n, and returns a
# number that is the value of the nth number in the rabbit sequence.
# For example, rabbits(10) -> 35. (It is okay if your procedure takes too
# long to run on inputs above 30.)
def rabbits(n):
current = 0
next = 1
cache = []
for i in range(1, n + 1):
prev = current
current = next
if i > 4:
next = current + prev - cache.pop(0)
else:
next = current + prev
cache.append(current)
return current
print(rabbits(10))
#>>> 35
s = ""
for i in range(1, 12):
s = s + str(rabbits(i)) + " "
print(s)
#>>> 1 1 2 3 5 7 11 16 24 35 52
| {
"repo_name": "dmitry-izmerov/Udacity-Intro-to-computer-science",
"path": "Lesson06/Rabbits Multiplying.py",
"copies": "1",
"size": "1613",
"license": "mit",
"hash": 7079236464327326000,
"line_mean": 26.8103448276,
"line_max": 79,
"alpha_frac": 0.6249225046,
"autogenerated": false,
"ratio": 2.9925788497217067,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41175013543217065,
"avg_score": null,
"num_lines": null
} |
__author__ = 'demi'
# Single Gold Star
# Family Trees
# In the lecture, we showed a recursive definition for your ancestors. For this
# question, your goal is to define a procedure that finds someone's ancestors,
# given a Dictionary that provides the parent relationships.
# Here's an example of an input Dictionary:
ada_family = { 'Judith Blunt-Lytton': ['Anne Isabella Blunt', 'Wilfrid Scawen Blunt'],
'Ada King-Milbanke': ['Ralph King-Milbanke', 'Fanny Heriot'],
'Ralph King-Milbanke': ['Augusta Ada King', 'William King-Noel'],
'Anne Isabella Blunt': ['Augusta Ada King', 'William King-Noel'],
'Byron King-Noel': ['Augusta Ada King', 'William King-Noel'],
'Augusta Ada King': ['Anne Isabella Milbanke', 'George Gordon Byron'],
'George Gordon Byron': ['Catherine Gordon', 'Captain John Byron'],
'John Byron': ['Vice-Admiral John Byron', 'Sophia Trevannion'] }
# Define a procedure, ancestors(genealogy, person), that takes as its first input
# a Dictionary in the form given above, and as its second input the name of a
# person. It should return a list giving all the known ancestors of the input
# person (this should be the empty list if there are none). The order of the list
# does not matter and duplicates will be ignored.
def ancestors(genealogy, person):
if person in genealogy:
l = []
for ancestor in genealogy[person]:
if ancestor != person:
l.append(ancestor)
l += ancestors(genealogy, ancestor)
return l
return []
# Here are some examples:
print(ancestors(ada_family, 'Augusta Ada King'))
#>>> ['Anne Isabella Milbanke', 'George Gordon Byron',
# 'Catherine Gordon','Captain John Byron']
print(ancestors(ada_family, 'Judith Blunt-Lytton'))
#>>> ['Anne Isabella Blunt', 'Wilfrid Scawen Blunt', 'Augusta Ada King',
# 'William King-Noel', 'Anne Isabella Milbanke', 'George Gordon Byron',
# 'Catherine Gordon', 'Captain John Byron']
print(ancestors(ada_family, 'Dave'))
#>>> []
| {
"repo_name": "dmitry-izmerov/Udacity-Intro-to-computer-science",
"path": "Lesson06/Family Trees.py",
"copies": "1",
"size": "2087",
"license": "mit",
"hash": -5819783552943610000,
"line_mean": 38.3773584906,
"line_max": 86,
"alpha_frac": 0.6602779109,
"autogenerated": false,
"ratio": 3.105654761904762,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4265932672804762,
"avg_score": null,
"num_lines": null
} |
__author__ = 'demi'
# The current index includes a url in the list of urls
# for a keyword multiple times if the keyword appears
# on that page more than once.
# It might be better to only include the same url
# once in the url list for a keyword, even if it appears
# many times.
# Modify add_to_index so that a given url is only
# included once in the url list for a keyword,
# no matter how many times that keyword appears.
def add_to_index(index, keyword, url):
for entry in index:
if entry[0] == keyword:
if not url in entry[1]:
entry[1].append(url)
return
# not found, add new keyword to index
index.append([keyword, [url]])
def get_page(url):
try:
if url == "http://www.udacity.com/cs101x/index.html":
return '''<html> <body> This is a test page for learning to crawl!
<p> It is a good idea to
<a href="http://www.udacity.com/cs101x/crawling.html">
learn to crawl</a> before you try to
<a href="http://www.udacity.com/cs101x/walking.html">walk</a> or
<a href="http://www.udacity.com/cs101x/flying.html">fly</a>.</p></body>
</html>'''
elif url == "http://www.udacity.com/cs101x/crawling.html":
return '''<html> <body> I have not learned to crawl yet, but I am
quite good at <a href="http://www.udacity.com/cs101x/kicking.html">kicking</a>.
</body> </html>'''
elif url == "http://www.udacity.com/cs101x/walking.html":
return '''<html> <body> I cant get enough
<a href="http://www.udacity.com/cs101x/index.html">crawling</a></body></html>'''
elif url == "http://www.udacity.com/cs101x/flying.html":
return '''<html>
<body>The magic words are Squeamish Ossifrage!</body></html>'''
except:
return ""
return ""
def union(a, b):
for e in b:
if e not in a:
a.append(e)
def get_next_target(page):
start_link = page.find('<a href=')
if start_link == -1:
return None, 0
start_quote = page.find('"', start_link)
end_quote = page.find('"', start_quote + 1)
url = page[start_quote + 1:end_quote]
return url, end_quote
def get_all_links(page):
links = []
while True:
url, endpos = get_next_target(page)
if url:
links.append(url)
page = page[endpos:]
else:
break
return links
def crawl_web(seed):
tocrawl = [seed]
crawled = []
index = []
while tocrawl:
page = tocrawl.pop()
if page not in crawled:
content = get_page(page)
add_page_to_index(index, page, content)
union(tocrawl, get_all_links(content))
crawled.append(page)
return index
def add_page_to_index(index, url, content):
words = content.split()
for word in words:
add_to_index(index, word, url)
def lookup(index, keyword):
for entry in index:
if entry[0] == keyword:
return entry[1]
return None
#index = crawl_web("http://www.udacity.com/cs101x/index.html")
#print lookup(index,"is")
#>>> ['http://www.udacity.com/cs101x/index.html'] | {
"repo_name": "dmitry-izmerov/Udacity-Intro-to-computer-science",
"path": "Lesson04/Improving the Index.py",
"copies": "1",
"size": "3117",
"license": "mit",
"hash": -2485578707613509000,
"line_mean": 28.9807692308,
"line_max": 80,
"alpha_frac": 0.6025024062,
"autogenerated": false,
"ratio": 3.2267080745341614,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43292104807341614,
"avg_score": null,
"num_lines": null
} |
__author__ = 'demi'
#
# This question explores a different way (from the previous question)
# to limit the pages that it can crawl.
#
#######
# THREE GOLD STARS #
# Yes, we really mean it! This is really tough (but doable) unless
# you have some previous experience before this course.
# Modify the crawl_web procedure to take a second parameter,
# max_depth, that limits the depth of the search. We can
# define the depth of a page as the number of links that must
# be followed to reach that page starting from the seed page,
# that is, the length of the shortest path from the seed to
# the page. No pages whose depth exceeds max_depth should be
# included in the crawl.
#
# For example, if max_depth is 0, the only page that should
# be crawled is the seed page. If max_depth is 1, the pages
# that should be crawled are the seed page and every page that
# it links to directly. If max_depth is 2, the crawl should
# also include all pages that are linked to by these pages.
#
# Note that the pages in the crawl may be in any order.
#
# The following definition of get_page provides an interface
# to the website found at http://www.udacity.com/cs101x/index.html
# The function output order does not affect grading.
def get_page(url):
try:
if url == "http://www.udacity.com/cs101x/index.html":
return ('<html> <body> This is a test page for learning to crawl! '
'<p> It is a good idea to '
'<a href="http://www.udacity.com/cs101x/crawling.html">learn to '
'crawl</a> before you try to '
'<a href="http://www.udacity.com/cs101x/walking.html">walk</a> '
'or <a href="http://www.udacity.com/cs101x/flying.html">fly</a>. '
'</p> </body> </html> ')
elif url == "http://www.udacity.com/cs101x/crawling.html":
return ('<html> <body> I have not learned to crawl yet, but I '
'am quite good at '
'<a href="http://www.udacity.com/cs101x/kicking.html">kicking</a>.'
'</body> </html>')
elif url == "http://www.udacity.com/cs101x/walking.html":
return ('<html> <body> I cant get enough '
'<a href="http://www.udacity.com/cs101x/index.html">crawling</a>! '
'</body> </html>')
elif url == "http://www.udacity.com/cs101x/flying.html":
return ('<html> <body> The magic words are Squeamish Ossifrage! '
'</body> </html>')
elif url == "http://top.contributors/velak.html":
return ('<a href="http://top.contributors/jesyspa.html">'
'<a href="http://top.contributors/forbiddenvoid.html">')
elif url == "http://top.contributors/jesyspa.html":
return ('<a href="http://top.contributors/elssar.html">'
'<a href="http://top.contributors/kilaws.html">')
elif url == "http://top.contributors/forbiddenvoid.html":
return ('<a href="http://top.contributors/charlzz.html">'
'<a href="http://top.contributors/johang.html">'
'<a href="http://top.contributors/graemeblake.html">')
elif url == "http://top.contributors/kilaws.html":
return ('<a href="http://top.contributors/tomvandenbosch.html">'
'<a href="http://top.contributors/mathprof.html">')
elif url == "http://top.contributors/graemeblake.html":
return ('<a href="http://top.contributors/dreyescat.html">'
'<a href="http://top.contributors/angel.html">')
elif url == "A1":
return '<a href="B1"> <a href="C1"> '
elif url == "B1":
return '<a href="E1">'
elif url == "C1":
return '<a href="D1">'
elif url == "D1":
return '<a href="E1"> '
elif url == "E1":
return '<a href="F1"> '
except:
return ""
return ""
def get_next_target(page):
start_link = page.find('<a href=')
if start_link == -1:
return None, 0
start_quote = page.find('"', start_link)
end_quote = page.find('"', start_quote + 1)
url = page[start_quote + 1:end_quote]
return url, end_quote
def union(p,q):
for e in q:
if e not in p:
p.append(e)
def get_all_links(page):
links = []
while True:
url,endpos = get_next_target(page)
if url:
links.append(url)
page = page[endpos:]
else:
break
return links
def crawl_web(seed,max_depth):
tocrawl = [seed]
crawled = []
depth = 0
next_depth = []
while tocrawl and depth <= max_depth:
page = tocrawl.pop()
if page not in crawled:
union(next_depth, get_all_links(get_page(page)))
crawled.append(page)
if not tocrawl:
next_depth, tocrawl = [], next_depth
depth += 1
return crawled
# print(crawl_web("http://www.udacity.com/cs101x/index.html", 0))
#>>> ['http://www.udacity.com/cs101x/index.html']
print(crawl_web("http://www.udacity.com/cs101x/index.html", 1))
#>>> ['http://www.udacity.com/cs101x/index.html',
#>>> 'http://www.udacity.com/cs101x/flying.html',
#>>> 'http://www.udacity.com/cs101x/walking.html',
#>>> 'http://www.udacity.com/cs101x/crawling.html']
# print(crawl_web("http://www.udacity.com/cs101x/index.html", 50))
#>>> ['http://www.udacity.com/cs101x/index.html',
#>>> 'http://www.udacity.com/cs101x/flying.html',
#>>> 'http://www.udacity.com/cs101x/walking.html',
#>>> 'http://www.udacity.com/cs101x/crawling.html',
#>>> 'http://www.udacity.com/cs101x/kicking.html']
# print(crawl_web("http://top.contributors/forbiddenvoid.html", 2))
#>>> ['http://top.contributors/forbiddenvoid.html',
#>>> 'http://top.contributors/graemeblake.html',
#>>> 'http://top.contributors/angel.html',
#>>> 'http://top.contributors/dreyescat.html',
#>>> 'http://top.contributors/johang.html',
#>>> 'http://top.contributors/charlzz.html']
# print(crawl_web("A1", 3))
#>>> ['A1', 'C1', 'B1', 'E1', 'D1', 'F1']
# (May be in any order) | {
"repo_name": "dmitry-izmerov/Udacity-Intro-to-computer-science",
"path": "Lesson03/max_depth.py",
"copies": "1",
"size": "5993",
"license": "mit",
"hash": -5735248165166022000,
"line_mean": 37.9220779221,
"line_max": 79,
"alpha_frac": 0.601368263,
"autogenerated": false,
"ratio": 3.1642027455121435,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9226789525395598,
"avg_score": 0.007756296623309161,
"num_lines": 154
} |
__author__ = 'demi'
# THREE GOLD STARS
# Question 3-star: Elementary Cellular Automaton
# Please see the video for additional explanation.
# A one-dimensional cellular automata takes in a string, which in our
# case, consists of the characters '.' and 'x', and changes it according
# to some predetermined rules. The rules consider three characters, which
# are a character at position k and its two neighbours, and determine
# what the character at the corresponding position k will be in the new
# string.
# For example, if the character at position k in the string is '.' and
# its neighbours are '.' and 'x', then the pattern is '..x'. We look up
# '..x' in the table below. In the table, '..x' corresponds to 'x' which
# means that in the new string, 'x' will be at position k.
# Rules:
# pattern in position k in contribution to
# Value current string new string pattern number
# is 0 if replaced by '.'
# and value if replaced
# by 'x'
# 1 '...' '.' 1 * 0
# 2 '..x' 'x' 2 * 1
# 4 '.x.' 'x' 4 * 1
# 8 '.xx' 'x' 8 * 1
# 16 'x..' '.' 16 * 0
# 32 'x.x' '.' 32 * 0
# 64 'xx.' '.' 64 * 0
# 128 'xxx' 'x' 128 * 1
# ----------
# 142
# To calculate the patterns which will have the central character x, work
# out the values required to sum to the pattern number. For example,
# 32 = 32 so only pattern 32 which is x.x changes the central position to
# an x. All the others have a . in the next line.
# 23 = 16 + 4 + 2 + 1 which means that 'x..', '.x.', '..x' and '...' all
# lead to an 'x' in the next line and the rest have a '.'
# For pattern 142, and starting string
# ...........x...........
# the new strings created will be
# ..........xx........... (generations = 1)
# .........xx............ (generations = 2)
# ........xx............. (generations = 3)
# .......xx.............. (generations = 4)
# ......xx............... (generations = 5)
# .....xx................ (generations = 6)
# ....xx................. (generations = 7)
# ...xx.................. (generations = 8)
# ..xx................... (generations = 9)
# .xx.................... (generations = 10)
# Note that the first position of the string is next to the last position
# in the string.
# Define a procedure, cellular_automaton, that takes three inputs:
# a non-empty string,
# a pattern number which is an integer between 0 and 255 that
# represents a set of rules, and
# a positive integer, n, which is the number of generations.
# The procedure should return a string which is the result of
# applying the rules generated by the pattern to the string n times.
def cellular_automaton(field, ptn_number, gen_number):
pattern = build_patterns(ptn_number)
return calculate_field(field, pattern, gen_number)
def build_patterns(pattern_number):
default_patterns = {
1 : '...',
2 : '..x',
4 : '.x.',
8 : '.xx',
16 : 'x..',
32 : 'x.x',
64 : 'xx.',
128 : 'xxx'
}
patterns = {}
for k in reversed(sorted(default_patterns.keys())):
if pattern_number >= k:
patterns[default_patterns[k]] = 'x'
pattern_number -= k
else:
patterns[default_patterns[k]] = '.'
return patterns
def calculate_field(field, patterns, gen_number):
if gen_number == 0:
return field
else:
l = ['']*len(field)
length = len(field)
for i in range(0, length):
pattern_item = field[i-1] + field[i] + field[(i+1) % length]
l[i] = patterns[pattern_item]
field = ''.join(l)
return calculate_field(field, patterns, gen_number - 1)
print(cellular_automaton('.', 21, 1))
print(cellular_automaton('.x.x.x.x.', 17, 2))
# #>>> xxxxxxx..
print(cellular_automaton('.x.x.x.x.', 249, 3))
# #>>> .x..x.x.x
print(cellular_automaton('...x....', 125, 1))
# #>>> xx.xxxxx
print(cellular_automaton('...x....', 125, 2))
# #>>> .xxx....
print(cellular_automaton('...x....', 125, 3))
# #>>> .x.xxxxx
print(cellular_automaton('...x....', 125, 4))
# #>>> xxxx...x
print(cellular_automaton('...x....', 125, 5))
# #>>> ...xxx.x
print(cellular_automaton('...x....', 125, 6))
# #>>> xx.x.xxx
print(cellular_automaton('...x....', 125, 7))
# #>>> .xxxxx..
print(cellular_automaton('...x....', 125, 8))
# #>>> .x...xxx
print(cellular_automaton('...x....', 125, 9))
# #>>> xxxx.x.x
print(cellular_automaton('...x....', 125, 10))
#>>> ...xxxxx
| {
"repo_name": "dmitry-izmerov/Udacity-Intro-to-computer-science",
"path": "Challenging Practice Problems/Elementary Cellular Automaton.py",
"copies": "1",
"size": "5056",
"license": "mit",
"hash": 1025203493057566800,
"line_mean": 36.4518518519,
"line_max": 74,
"alpha_frac": 0.4994066456,
"autogenerated": false,
"ratio": 3.525801952580195,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45252085981801954,
"avg_score": null,
"num_lines": null
} |
__author__ = 'demi'
# Triple Gold Star
# Only A Little Lucky
# The Feeling Lucky question (from the regular homework) assumed it was enough
# to find the best-ranked page for a given query. For most queries, though, we
# don't just want the best page (according to the page ranking algorithm), we
# want a list of many pages that match the query, ordered from the most likely
# to be useful to the least likely.
# Your goal for this question is to define a procedure, ordered_search(index,
# ranks, keyword), that takes the same inputs as lucky_search from Question 5,
# but returns an ordered list of all the URLs that match the query.
# To order the pages, use the quicksort algorithm, invented by Sir Tony Hoare in
# 1959. Quicksort provides a way to sort any list of data, using an expected
# number of comparisons that scales as n log n where n is the number of elements
# in the list.
# The idea of quicksort is quite simple:
# If the list has zero or one elements, it is already sorted.
# Otherwise, pick a pivot element, and split the list into two partitions: one
# contains all the elements equal to or lower than the value of the pivot
# element, and the other contains all the elements that are greater than the
# pivot element. Recursively sort each of the sub-lists, and then return the
# result of concatenating the sorted left sub-list, the pivot element, and the
# sorted right sub-list.
# For simplicity, use the first element in the list as your pivot element (this
# is not usually a good choice, since it means if the input list is already
# nearly sorted, the actual work will be much worse than expected).
def ordered_search(index, ranks, keyword):
urls = lookup(index, keyword)
if not urls:
return None
dict = {}
for url in urls:
dict[ranks[url]] = url
lranks = []
for i in dict.keys():
lranks.append(i)
lranks = quicksort(lranks)
res = []
for rank in lranks:
res.append(dict[rank])
return res
def quicksort(l):
if not l or len(l) <= 1:
return l
else:
bigger = []
less = []
pivot = l[0]
for i in l[1:]:
if i > pivot:
bigger.append(i)
else:
less.append(i)
return quicksort(bigger) + [pivot] + quicksort(less)
cache = {
'http://udacity.com/cs101x/urank/index.html': """<html>
<body>
<h1>Dave's Cooking Algorithms</h1>
<p>
Here are my favorite recipies:
<ul>
<li> <a href="http://udacity.com/cs101x/urank/hummus.html">Hummus Recipe</a>
<li> <a href="http://udacity.com/cs101x/urank/arsenic.html">World's Best Hummus</a>
<li> <a href="http://udacity.com/cs101x/urank/kathleen.html">Kathleen's Hummus Recipe</a>
</ul>
For more expert opinions, check out the
<a href="http://udacity.com/cs101x/urank/nickel.html">Nickel Chef</a>
and <a href="http://udacity.com/cs101x/urank/zinc.html">Zinc Chef</a>.
</body>
</html>
""",
'http://udacity.com/cs101x/urank/zinc.html': """<html>
<body>
<h1>The Zinc Chef</h1>
<p>
I learned everything I know from
<a href="http://udacity.com/cs101x/urank/nickel.html">the Nickel Chef</a>.
</p>
<p>
For great hummus, try
<a href="http://udacity.com/cs101x/urank/arsenic.html">this recipe</a>.
</body>
</html>
""",
'http://udacity.com/cs101x/urank/nickel.html': """<html>
<body>
<h1>The Nickel Chef</h1>
<p>
This is the
<a href="http://udacity.com/cs101x/urank/kathleen.html">
best Hummus recipe!
</a>
</body>
</html>
""",
'http://udacity.com/cs101x/urank/kathleen.html': """<html>
<body>
<h1>
Kathleen's Hummus Recipe
</h1>
<p>
<ol>
<li> Open a can of garbonzo beans.
<li> Crush them in a blender.
<li> Add 3 tablesppons of tahini sauce.
<li> Squeeze in one lemon.
<li> Add salt, pepper, and buttercream frosting to taste.
</ol>
</body>
</html>
""",
'http://udacity.com/cs101x/urank/arsenic.html': """<html>
<body>
<h1>
The Arsenic Chef's World Famous Hummus Recipe
</h1>
<p>
<ol>
<li> Kidnap the <a href="http://udacity.com/cs101x/urank/nickel.html">Nickel Chef</a>.
<li> Force her to make hummus for you.
</ol>
</body>
</html>
""",
'http://udacity.com/cs101x/urank/hummus.html': """<html>
<body>
<h1>
Hummus Recipe
</h1>
<p>
<ol>
<li> Go to the store and buy a container of hummus.
<li> Open it.
</ol>
</body>
</html>
""",
}
def get_page(url):
if url in cache:
return cache[url]
return ""
def get_next_target(page):
start_link = page.find('<a href=')
if start_link == -1:
return None, 0
start_quote = page.find('"', start_link)
end_quote = page.find('"', start_quote + 1)
url = page[start_quote + 1:end_quote]
return url, end_quote
def get_all_links(page):
links = []
while True:
url, endpos = get_next_target(page)
if url:
links.append(url)
page = page[endpos:]
else:
break
return links
def union(a, b):
for e in b:
if e not in a:
a.append(e)
def add_page_to_index(index, url, content):
words = content.split()
for word in words:
add_to_index(index, word, url)
def add_to_index(index, keyword, url):
if keyword in index:
index[keyword].append(url)
else:
index[keyword] = [url]
def lookup(index, keyword):
if keyword in index:
return index[keyword]
else:
return None
def crawl_web(seed): # returns index, graph of inlinks
tocrawl = [seed]
crawled = []
graph = {} # <url>, [list of pages it links to]
index = {}
while tocrawl:
page = tocrawl.pop()
if page not in crawled:
content = get_page(page)
add_page_to_index(index, page, content)
outlinks = get_all_links(content)
graph[page] = outlinks
union(tocrawl, outlinks)
crawled.append(page)
return index, graph
def compute_ranks(graph):
d = 0.8 # damping factor
numloops = 10
ranks = {}
npages = len(graph)
for page in graph:
ranks[page] = 1.0 / npages
for i in range(0, numloops):
newranks = {}
for page in graph:
newrank = (1 - d) / npages
for node in graph:
if page in graph[node]:
newrank = newrank + d * (ranks[node] / len(graph[node]))
newranks[page] = newrank
ranks = newranks
return ranks
# Here are some example showing what ordered_search should do:
# Observe that the result list is sorted so the highest-ranking site is at the
# beginning of the list.
# Note: the intent of this question is for students to write their own sorting
# code, not to use the built-in sort procedure.
index, graph = crawl_web('http://udacity.com/cs101x/urank/index.html')
ranks = compute_ranks(graph)
print(ordered_search(index, ranks, 'Hummus'))
#>>> ['http://udacity.com/cs101x/urank/kathleen.html',
# 'http://udacity.com/cs101x/urank/nickel.html',
# 'http://udacity.com/cs101x/urank/arsenic.html',
# 'http://udacity.com/cs101x/urank/hummus.html',
# 'http://udacity.com/cs101x/urank/index.html']
print(ordered_search(index, ranks, 'the'))
#>>> ['http://udacity.com/cs101x/urank/nickel.html',
# 'http://udacity.com/cs101x/urank/arsenic.html',
# 'http://udacity.com/cs101x/urank/hummus.html',
# 'http://udacity.com/cs101x/urank/index.html']
print(ordered_search(index, ranks, 'babaganoush'))
#>>> None
| {
"repo_name": "dmitry-izmerov/Udacity-Intro-to-computer-science",
"path": "Lesson06/Only a Little Lucky.py",
"copies": "1",
"size": "7433",
"license": "mit",
"hash": -2953047044619158500,
"line_mean": 23.5313531353,
"line_max": 89,
"alpha_frac": 0.6391766447,
"autogenerated": false,
"ratio": 2.942596991290578,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4081773635990578,
"avg_score": null,
"num_lines": null
} |
__author__ = 'demi'
# Write a procedure, convert_seconds, which takes as input a non-negative
# number of seconds and returns a string of the form
# '<integer> hours, <integer> minutes, <number> seconds' but
# where if <integer> is 1 for the number of hours or minutes,
# then it should be hour/minute. Further, <number> may be an integer
# or decimal, and if it is 1, then it should be followed by second.
# You might need to use int() to turn a decimal into a float depending
# on how you code this. int(3.0) gives 3
#
# Note that English uses the plural when talking about 0 items, so
# it should be "0 minutes".
#
def convert_seconds(seconds):
res = ''
mins = 0
hours = 0
if(seconds > 0):
mins = int(seconds/60)
seconds = seconds - (mins*60)
hours = int(mins/60)
mins = mins - (hours*60)
if hours == 1:
res += '%d hour' % hours
else:
res += '%d hours' % hours
if mins == 1:
res += ', '
res += '%d minute' % mins
else:
res += ', '
res += '%d minutes' % mins
if seconds == 1:
res += ', '
if isinstance(seconds, int):
res += '%d second' % seconds
else:
res += '%.1f second' % seconds
else:
res += ', '
if isinstance(seconds, int):
res += '%d seconds' % seconds
else:
res += '%.1f seconds' % seconds
return res
print(convert_seconds(3600))
print(convert_seconds(3661))
#>>> 1 hour, 1 minute, 1 second
print(convert_seconds(7325))
#>>> 2 hours, 2 minutes, 5 seconds
print(convert_seconds(7261.7))
#>>> 2 hours, 1 minute, 1.7 seconds | {
"repo_name": "dmitry-izmerov/Udacity-Intro-to-computer-science",
"path": "Lesson04/Converting Seconds.py",
"copies": "1",
"size": "1662",
"license": "mit",
"hash": -4056722186968291300,
"line_mean": 25.3968253968,
"line_max": 73,
"alpha_frac": 0.5734055355,
"autogenerated": false,
"ratio": 3.5137420718816066,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9575128000688354,
"avg_score": 0.0024039213386504023,
"num_lines": 63
} |
__author__ = 'demi'
# Write a procedure download_time which takes as inputs a file size, the
# units that file size is given in, bandwidth and the units for
# bandwidth (excluding per second) and returns the time taken to download
# the file.
# Your answer should be a string in the form
# "<number> hours, <number> minutes, <number> seconds"
# Some information you might find useful is the number of bits
# in kilobits (kb), kilobytes (kB), megabits (Mb), megabytes (MB),
# gigabits (Gb), gigabytes (GB) and terabits (Tb), terabytes (TB).
#print 2 ** 10 # one kilobit, kb
#print 2 ** 10 * 8 # one kilobyte, kB
#print 2 ** 20 # one megabit, Mb
#print 2 ** 20 * 8 # one megabyte, MB
#print 2 ** 30 # one gigabit, Gb
#print 2 ** 30 * 8 # one gigabyte, GB
#print 2 ** 40 # one terabit, Tb
#print 2 ** 40 * 8 # one terabyte, TB
# Often bandwidth is given in megabits (Mb) per second whereas file size
# is given in megabytes (MB).
def convert_seconds(seconds):
res = ''
mins = 0
hours = 0
if(seconds > 0):
mins = int(seconds/60)
seconds = seconds - (mins*60)
hours = int(mins/60)
mins = mins - (hours*60)
if hours == 1:
res += '%d hour' % hours
else:
res += '%d hours' % hours
if mins == 1:
res += ', '
res += '%d minute' % mins
else:
res += ', '
res += '%d minutes' % mins
if seconds == 1:
res += ', '
if isinstance(seconds, int):
res += '%d second' % seconds
else:
res += '%f second' % seconds
else:
res += ', '
if isinstance(seconds, int):
res += '%d seconds' % seconds
else:
res += '%f seconds' % seconds
return res
def download_time(file_size, size_units, bandwidth, bw_units):
fileBytes = getBytes(file_size, size_units)
bwBytes = getBytes(bandwidth, bw_units)
res = convert_seconds(float(fileBytes) / bwBytes)
return res
def getBytes(size, units):
res = size
if units == 'kB':
res *= 2 ** 10 * 8
elif units == 'kb':
res *= 2 ** 10
elif units == 'MB':
res *= 2 ** 20 * 8
elif units == 'Mb':
res *= 2 ** 20
elif units == 'GB':
res *= 2 ** 30 * 8
elif units == 'Gb':
res *= 2 ** 30
elif units == 'TB':
res *= 2 ** 40 * 8
elif units == 'Tb':
res *= 2 ** 40
return res
print(download_time(1,'kB',3,'MB'))
print(download_time(11,'GB', 5, 'MB'))
print(download_time(1024, 'kB', 1, 'MB'))
#>>> 0 hours, 0 minutes, 1 second
print(download_time(1024, 'kB', 1, 'Mb'))
#>>> 0 hours, 0 minutes, 8 seconds # 8.0 seconds is also acceptable
print(download_time(13, 'GB', 5.6, 'MB'))
#>>> 0 hours, 39 minutes, 37.1428571429 seconds
print(download_time(13, 'GB', 5.6, 'Mb'))
#>>> 5 hours, 16 minutes, 57.1428571429 seconds
print(download_time(10, 'MB', 2, 'kB'))
#>>> 1 hour, 25 minutes, 20 seconds # 20.0 seconds is also acceptable
print(download_time(10, 'MB', 2, 'kb'))
#>>> 11 hours, 22 minutes, 40 seconds # 40.0 seconds is also acceptable
| {
"repo_name": "dmitry-izmerov/Udacity-Intro-to-computer-science",
"path": "Lesson04/Download Calculator.py",
"copies": "1",
"size": "3124",
"license": "mit",
"hash": 200906764860253400,
"line_mean": 25.7008547009,
"line_max": 73,
"alpha_frac": 0.5611395647,
"autogenerated": false,
"ratio": 3.1365461847389557,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4197685749438956,
"avg_score": null,
"num_lines": null
} |
__author__ = 'demi'
# THREE GOLD STARS
# Sudoku [http://en.wikipedia.org/wiki/Sudoku]
# is a logic puzzle where a game
# is defined by a partially filled
# 9 x 9 square of digits where each square
# contains one of the digits 1,2,3,4,5,6,7,8,9.
# For this question we will generalize
# and simplify the game.
# Define a procedure, check_sudoku,
# that takes as input a square list
# of lists representing an n x n
# sudoku puzzle solution and returns the boolean
# True if the input is a valid
# sudoku square and returns the boolean False
# otherwise.
# A valid sudoku square satisfies these
# two properties:
# 1. Each column of the square contains
# each of the whole numbers from 1 to n exactly once.
# 2. Each row of the square contains each
# of the whole numbers from 1 to n exactly once.
# You may assume the the input is square and contains at
# least one row and column.
correct = [[1,2,3],
[2,3,1],
[3,1,2]]
incorrect = [[1,2,3,4],
[2,3,1,3],
[3,1,2,3],
[4,4,4,4]]
incorrect2 = [[1,2,3,4],
[2,3,1,4],
[4,1,2,3],
[3,4,1,2]]
incorrect3 = [[1,2,3,4,5],
[2,3,1,5,6],
[4,5,2,1,3],
[3,4,5,2,1],
[5,6,4,3,2]]
incorrect4 = [['a','b','c'],
['b','c','a'],
['c','a','b']]
incorrect5 = [ [1, 1.5],
[1.5, 1]]
def check_sudoku(lists):
if not has_correct_digits(lists):
return False
nums = {}
for i in range(1, 10):
nums[i] = 0
# check rows
for l in lists:
for i in l:
if i not in nums:
return False
nums[i] += 1
if nums[i] > 1:
return False
nums = clear_nums(nums)
# check columns
j = 0
while j < len(lists[0]):
i = 0
while i < len(lists):
item = lists[i][j]
if item not in nums:
return False
nums[item] += 1
if nums[item] > 1:
return False
i += 1
nums = clear_nums(nums)
j += 1
return True
def clear_nums(nums):
for i in nums:
nums[i] = 0
return nums
def has_correct_digits(lists):
size = len(lists[0])
nums = [i for i in range(1, size+1)]
for l in lists:
for n in l:
if n not in nums:
return False
return True
print(check_sudoku(incorrect))
#>>> False
print(check_sudoku(correct))
#>>> True
print(check_sudoku(incorrect2))
#>>> False
print(check_sudoku(incorrect3))
#>>> False
print(check_sudoku(incorrect4))
#>>> False
print(check_sudoku(incorrect5))
#>>> False | {
"repo_name": "dmitry-izmerov/Udacity-Intro-to-computer-science",
"path": "Lesson03/sudoku.py",
"copies": "1",
"size": "2729",
"license": "mit",
"hash": 6922848631838889000,
"line_mean": 21.0161290323,
"line_max": 59,
"alpha_frac": 0.5272993771,
"autogenerated": false,
"ratio": 3.122425629290618,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4149725006390618,
"avg_score": null,
"num_lines": null
} |
from aliyunsdkcore import client
from aliyunsdkcms.request.v20170301 import QueryMetricListRequest
from aliyunsdkecs.request.v20140526 import DescribeInstancesRequest
import time
import json
import shutil
import sys
from multiprocessing import Process
from multiprocessing import cpu_count,Pool
reload(sys)
sys.setdefaultencoding('utf8')
#https://help.aliyun.com/document_detail/28619.html?spm=5176.doc51936.6.664.Q5sQGo
def GetTraffic(AliInstance,traffic_type):
if traffic_type == 'VPC_PublicIP_InternetOutRate':
metric_name = 'duimonitor_traffic_outrate'
elif traffic_type == 'VPC_PublicIP_InternetInRate':
metric_name = 'duimonitor_traffic_inrate'
elif traffic_type == "VPC_PublicIP_InternetOutRate_Percent":
metric_name = 'duimonitor_traffic_outrate_percent'
for instance in AliInstance:
clt = instance[3]
request = QueryMetricListRequest.QueryMetricListRequest()
request.set_accept_format('json')
request.set_Project('acs_ecs_dashboard')
request.set_Metric(traffic_type)
timestamp = int(time.time()) - 600
tmp = time.localtime(timestamp)
start_time = time.strftime('%Y-%m-%d %H:%M:%S',tmp)
timestamp_start = int(time.mktime(time.strptime(start_time, "%Y-%m-%d %H:%M:%S"))) * 1000
request.set_StartTime(timestamp_start)
request.set_Dimensions("{'instanceId': '%s'}" % instance[0])
request.set_Period('60')
response = clt.do_action_with_exception(request)
result = json.loads(response)
if result.has_key("Datapoints") and result["Datapoints"] and len(instance) == 5:
print ("%s{machine=\"%s\",region=\"%s\"} %s" %(metric_name,instance[4],instance[2],result['Datapoints'][-1]['Average'])).strip()
elif result.has_key("Datapoints") and len(instance) == 5:
print ("%s{machine=\"%s\",region=\"%s\"} %s" %(metric_name,instance[4],instance[2],0)).strip()
#opsmind not hostname data
elif result.has_key("Datapoints") and result["Datapoints"]:
print ("%s{machine=\"%s\",region=\"%s\"} %s" %(metric_name,instance[0],instance[2],result['Datapoints'][-1]['Average'])).strip()
# aliyun not traffic data
elif result.has_key("Datapoints"):
print ("%s{machine=\"%s\",region=\"%s\"} %s" %(metric_name,instance[0],instance[2],0)).strip()
def GetInstance():
key = '****'
secret = '****'
instance = []
region_list = ['cn-hangzhou','cn-shenzhen','cn-beijing','cn-shanghai']
for region_name in region_list:
clt = client.AcsClient(key,secret, region_name)
request = DescribeInstancesRequest.DescribeInstancesRequest()
request.set_PageSize(100)
response = clt.do_action_with_exception(request)
response_json = json.loads(response)
for i in response_json['Instances']['Instance']:
instance.append([i['InstanceId'],i['SerialNumber'].upper(),region_name,clt])
if region_name == 'cn-hangzhou':
instance.append(['i-bp1bqn54uq1tyjd5oeco','D4B04214-BD2A-4554-A2C0-847DDD5D375F','cn-hangzhou',clt])#d1-public-001
return instance
def main():
with open('/tmp/opsmind_hostlist') as f:
OpsmindInstance = json.load(f)
AliInstance = GetInstance()
AliInstanceCopy = AliInstance[:]
for instance in AliInstance:
if OpsmindInstance.has_key(instance[1]):
hostname = OpsmindInstance[instance[1]]
index = AliInstance.index(instance)
AliInstanceCopy[index].append(hostname)
else:
pass
#print instance[0],instance[1],instance[2]
p1 = Process(target=GetTraffic,args=(AliInstanceCopy,"VPC_PublicIP_InternetOutRate"))
p2 = Process(target=GetTraffic,args=(AliInstanceCopy,"VPC_PublicIP_InternetInRate"))
p3 = Process(target=GetTraffic,args=(AliInstanceCopy,"VPC_PublicIP_InternetOutRate_Percent"))
p1.start()
p2.start()
p3.start()
p1.join()
p2.join()
p3.join()
if __name__ == '__main__':
main()
| {
"repo_name": "dengxiangyu768/dengxytools",
"path": "promethues/monitor_traffic.py",
"copies": "1",
"size": "3876",
"license": "apache-2.0",
"hash": 3990517754242985000,
"line_mean": 40.2340425532,
"line_max": 134,
"alpha_frac": 0.689628483,
"autogenerated": false,
"ratio": 3.1848808545603946,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9215914975705402,
"avg_score": 0.031718872370998505,
"num_lines": 94
} |
__author__ = 'dengzhihong'
from numpy import *
import numpy as np
from sklearn.decomposition import *
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.neighbors import KNeighborsClassifier
import pylab
import matplotlib.cm as cm
import matplotlib.pyplot as plt
def randomTrail(vectors, labels):
train = open("./Output/random_train.txt","w")
test = open("./Output/random_test.txt","w")
TrainIndice = []
TestIndice = []
for i in range(2000):
rand = random.randint(0, 3999)
while(TrainIndice.count(rand) != 0):
rand = random.randint(0, 3999)
TrainIndice.append(rand)
TrainIndice.sort()
for i in range(4000):
if(TrainIndice.count(i) == 0):
TestIndice.append(i)
for i in range(2000):
train.write(str(TrainIndice[i]) + " " + str(TrainIndice[i]) + "\n")
test.write(str(TestIndice[i]) + " " + str(TestIndice[i]) + "\n")
train.close()
test.close()
return 0
def toFloatList(stringlist):
floatlist = []
for i in range(0, len(stringlist)):
floatlist.append(float(stringlist[i]))
return floatlist
def toStrList(floatlist):
strlist = []
for i in range(len(floatlist)):
strlist.append(str(floatlist[i]))
return strlist
def prepareData(Data, Label, index):
#process the data
Num = index.shape[0]
k = Data.shape[1]
OutData = zeros((Num, k))
OutLabel = zeros(Num)
for i in range(Num):
DataIndex = index[i][0]
LabelIndex = index[i][1]
OutData[i] = Data[DataIndex]
OutLabel[i] = Label[LabelIndex]
return OutData, OutLabel
def prepareSvmClassifier(TrainData, TrainLabel, N, Kernel, c=1.0, Gamma=0.0):
ClfSet = []
Num = TrainData.shape[0]
for i in range(N):
#print i , "++++++++++++"
TempLabel = TrainLabel.copy()
# Generate Label, once only a number's label will be +1 others will be -1
for j in range(Num):
if(TrainLabel[j] == i):
TempLabel[j] = 1
else:
TempLabel[j] = -1
#outputLabelList(TempLabel, "TrainLabel" + str(i), "Train Label For Classifier" + str(i))
if(Kernel == 'linear'):
clf = SVC(kernel='linear', C=c)
elif(Kernel == 'poly'):
clf = SVC(kernel='poly', C=c, gamma=Gamma)
elif(Kernel == 'rbf'):
clf = SVC(kernel='rbf', C=c, gamma=Gamma)
#print '----------------------------------'
#print "Fit classifier " , i
clf.fit(TrainData, TempLabel)
#print clf.support_vectors_.shape
#print '----------------------------------'
ClfSet.append(clf)
return ClfSet
def prepareLrClassifier(TrainData, TrainLabel, N):
ClfSet = []
Num = TrainData.shape[0]
for i in range(N):
#print i , "++++++++++++"
TempLabel = TrainLabel.copy()
# Generate Label, once only a number's label will be +1 others will be -1
for j in range(Num):
if(TrainLabel[j] == i):
TempLabel[j] = 1
else:
TempLabel[j] = -1
#outputLabelList(TempLabel, "TrainLabel" + str(i), "Train Label For Classifier" + str(i))
clf = LogisticRegression()
#print '----------------------------------'
#print "Fit classifier " , i
clf.fit(TrainData, TempLabel)
#print clf.support_vectors_.shape
#print '----------------------------------'
ClfSet.append(clf)
return ClfSet
def processLabel(Label, target):
ResultLabel =Label.copy()
Num = Label.shape[0]
# Generate Label, once only a number's label will be +1 others will be -1
for j in range(Num):
if(ResultLabel[j] == target):
ResultLabel[j] = 1
else:
ResultLabel[j] = -1
return ResultLabel
def showDigit(digit, title = "Digit"):
fig = pylab.figure()
pylab.title(title)
fig.add_subplot(1,1,1)
pylab.imshow(digit.reshape(28, 28).T, cmap = cm.Greys_r)
pylab.show()
def outputTrainingData(TrainData, TrainLabel, PCA_K):
output = open("./Output/Trial2_" + str(PCA_K) + ".txt", "w")
N = TrainData.shape[0]
D = TrainData.shape[1]
for i in range(N):
output.write(str(int(TrainLabel[i])) + " ")
for j in range(D):
output.write(str(j) + ":" + str(TrainData[i][j]) + " ")
output.write("\n")
output.close()
def normalization(data):
return data/255.0 * 2 - 1
def testWithSVM(labels, vectors, testset, trainset, Kernel='linear', C=1.0, gamma=0.0, PCA_K=0):
Label = array(toFloatList(labels))
OriginData = array(toFloatList(vectors)).reshape(4000, 784)
TestNum = len(testset)
test = array(toFloatList(testset)).reshape(TestNum/2,2) - 1
TrainNum = len(trainset)
train = array(toFloatList(trainset)).reshape(TrainNum/2,2) - 1
OriginData = normalization(OriginData)
Data = OriginData
RetainRate = 100
if(PCA_K != 0):
k = PCA_K
pca = PCA(n_components=k)
Data = pca.fit_transform(OriginData)
sum = 0
for i in range(k):
sum += pca.explained_variance_ratio_[i]
RetainRate = sum * 100
print "retain " + str(RetainRate) +"% of the variance"
TrainData, TrainLabel = prepareData(Data, Label, train)
TestData, TestLabel = prepareData(Data, Label, test)
#outputTrainingData(TrainData, TrainLabel, PCA_K)
print 'SVM with ', Kernel, ' Kernel'
if(Kernel == 'linear'):
ClfSet = prepareSvmClassifier(TrainData, TrainLabel, 10, Kernel, C)
elif(Kernel == 'poly'):
ClfSet = prepareSvmClassifier(TrainData, TrainLabel, 10, Kernel, C, Gamma=gamma)
elif(Kernel == 'rbf'):
ClfSet = prepareSvmClassifier(TrainData, TrainLabel, 10, Kernel, C, Gamma=gamma)
else:
ClfSet = []
print "Please Choose a kernel"
exit()
N = test.shape[0]
correct = 0.0
CorrectNum = []
for i in range(10):
CorrectNum.append(0)
for i in range(N):
confidence = -999
classification = -1
for j in range(10):
temp = ClfSet[j].decision_function(TestData[i])
if(confidence < temp):
confidence = temp
classification = j
if(classification == TestLabel[i]):
CorrectNum[classification] += 1
correct += 1
Correctness = correct/N * 100
print "Accuracy: ", Correctness, "%"
for i in range(10):
print "digit ", i , ": ", CorrectNum[i], "/200"
return RetainRate, Correctness, CorrectNum
def testWithLR(labels, vectors, testset, trainset, PCA_K=0):
Label = array(toFloatList(labels))
OriginData = array(toFloatList(vectors)).reshape(4000,784)
TestNum = len(testset)
test = array(toFloatList(testset)).reshape(TestNum/2,2) - 1
TrainNum = len(trainset)
train = array(toFloatList(trainset)).reshape(TrainNum/2,2) - 1
OriginData = normalization(OriginData)
Data = OriginData
RetainRate = 100
if(PCA_K != 0):
k = PCA_K
pca = PCA(n_components=k)
Data = pca.fit_transform(OriginData)
sum = 0
for i in range(k):
sum += pca.explained_variance_ratio_[i]
RetainRate = sum * 100
print "retain " + str(sum * 100) +"% of the variance"
TrainData, TrainLabel = prepareData(Data, Label, train)
TestData, TestLabel = prepareData(Data, Label, test)
print 'Logistic Regression With Dimension Reduced to ', PCA_K
clf = LogisticRegression()
clf.fit(TrainData, TrainLabel)
result = clf.predict(TestData)
N = TestLabel.shape[0]
correct = 0.0
CorrectNum = []
for i in range(10):
CorrectNum.append(0)
for i in range(N):
#print result[i], " - ", TestLabel[i]
if(result[i] == TestLabel[i]):
CorrectNum[int(result[i])] += 1
correct += 1
Correctness = correct/N * 100
print "Accuracy: ", Correctness
for i in range(10):
print "digit ", i , ": ", CorrectNum[i], "/200"
return RetainRate, Correctness, CorrectNum
def testWithKNN(labels, vectors, testset, trainset, PCA_K=0):
Label = array(toFloatList(labels))
OriginData = array(toFloatList(vectors)).reshape(4000,784)
TestNum = len(testset)
test = array(toFloatList(testset)).reshape(TestNum/2,2) - 1
TrainNum = len(trainset)
train = array(toFloatList(trainset)).reshape(TrainNum/2,2) - 1
OriginData = normalization(OriginData)
Data = OriginData
RetainRate = 100
if(PCA_K != 0):
k = PCA_K
pca = PCA(n_components=k)
Data = pca.fit_transform(OriginData)
sum = 0
for i in range(k):
sum += pca.explained_variance_ratio_[i]
RetainRate = sum * 100
print "retain " + str(sum * 100) +"% of the variance"
print '1NN With Dimension Reduced to ', PCA_K
TrainData, TrainLabel = prepareData(Data, Label, train)
TestData, TestLabel = prepareData(Data, Label, test)
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(TrainData, TrainLabel)
result = knn.predict(TestData)
N = TestLabel.shape[0]
correct = 0.0
CorrectNum = []
for i in range(10):
CorrectNum.append(0)
for i in range(N):
#print result[i], " - ", TestLabel[i]
if(result[i] == TestLabel[i]):
CorrectNum[int(result[i])] += 1
correct += 1
Correctness = correct/N * 100
print "Accuracy: ", Correctness
for i in range(10):
print "digit ", i , ": ", CorrectNum[i], "/200"
return RetainRate, Correctness, CorrectNum
from test import *
from numpy import *
def outputDataForCrossValidation(labels, vectors, testset, trainset, PCA_K=0):
Label = array(toFloatList(labels))
OriginData = array(toFloatList(vectors)).reshape(4000,784)
TestNum = len(testset)
test = array(toFloatList(testset)).reshape(TestNum/2,2) - 1
TrainNum = len(trainset)
train = array(toFloatList(trainset)).reshape(TrainNum/2,2) - 1
OriginData = normalization(OriginData)
Data = OriginData
RetainRate = 100
if(PCA_K != 0):
k = PCA_K
pca = PCA(n_components=k)
Data = pca.fit_transform(OriginData)
sum = 0
for i in range(k):
sum += pca.explained_variance_ratio_[i]
RetainRate = sum * 100
print "retain " + str(RetainRate) +"% of the variance"
TrainData, TrainLabel = prepareData(Data, Label, train)
TestData, TestLabel = prepareData(Data, Label, test)
outputTrainingData(TrainData, TrainLabel, PCA_K)
def accuracyAndDimension(labels, vectors, testset, trainset):
TrialResult = zeros((5,1,6))
# ------------------------------------------------------------------------------------------------
# Trail 1
# ------------------------------------------------------------------------------------------------
print "Trial 1"
TestC_1 = [2, 8, 2, 2, 2, 2]
TestGamma_1 = [0.0078125, 0.0078125, 0.0078125, 0.0078125, 0.0078125, 0.0078125]
TestK = [20, 50, 84, 150, 300, 400]
#drawCombinedDiagram(labels, vectors, testset, trainset, 'Trial 1', TestK, TestC, TestGamma)
Result_1 = getTrialAccuracyResult(labels, vectors, testset, trainset, TestK, TestC_1, TestGamma_1)
#print TrialResult
#print "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
TrialResult += Result_1
print TrialResult
# ------------------------------------------------------------------------------------------------
# Trail 2
print "Trial 2"
TestC_2 = [2, 8, 8, 2, 2, 2]
TestGamma_2 = [0.03125, 0.0078125, 0.0078125, 0.0078125, 0.0078125, 0.0078125]
#drawCombinedDiagram(labels, vectors, trainset, testset, 'Trial 2', TestK, TestC, TestGamma)
Result_2 = getTrialAccuracyResult(labels, vectors, trainset, testset, TestK, TestC_2, TestGamma_2)
#print TrialResult
#print "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
TrialResult += Result_2
print TrialResult
# ------------------------------------------------------------------------------------------------
#Random Trial
'''
print "Trial 3"
#testWithSVM(labels, vectors, random_testset, random_trainset, Kernel='linear', PCA_K=20)
TestC_3 = [2, 8, 8, 2, 2, 2]
TestGamma_3 = [0.0078125, 0.0078125, 0.0078125, 0.0078125, 0.0078125, 0.0078125]
#drawCombinedDiagram(labels, vectors, random_testset, random_trainset, 'Random Trial', TestK, TestC, TestGamma)
Result_3 = getTrialResult(labels, vectors, random_testset, random_trainset, TestK, TestC_3, TestGamma_3)
TrialResult += Result_3
'''
#print TrialResult
#print "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
# ------------------------------------------------------------------------------------------------
TrialResult = TrialResult/2.0
print TrialResult
print "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
C_SVM_LINEAR = TrialResult[0][0].tolist()
C_SVM_POLY = TrialResult[1][0].tolist()
C_SVM_RBF = TrialResult[2][0].tolist()
C_LR = TrialResult[3][0].tolist()
C_1NN = TrialResult[4][0].tolist()
plt.figure(1)
plt.title("Average Accuracy for all trials")
plt.xlabel("dimension")
plt.ylabel("accuracy(%)")
plt.axis([0,450,83,97])
plt.grid(True)
plt.plot(TestK, C_SVM_LINEAR, 'ro--')
plt.plot(TestK, C_SVM_POLY, 'b<--')
plt.plot(TestK, C_SVM_RBF, 'gs--')
plt.plot(TestK, C_LR, 'yd--')
plt.plot(TestK, C_1NN, 'co--')
P = 4
plt.annotate("svm linear", xy=(TestK[P], C_SVM_LINEAR[P]),xytext=(TestK[P]-18, C_SVM_LINEAR[P]+0.2))
plt.annotate("svm poly", xy=(TestK[P], C_SVM_POLY[P]),xytext=(TestK[P]-18, C_SVM_POLY[P]+0.5))
plt.annotate("svm rbf", xy=(TestK[P], C_SVM_RBF[P]),xytext=(TestK[P]-18, C_SVM_RBF[P]+0.2))
plt.annotate("lr", xy=(TestK[P], C_LR[P]),xytext=(TestK[P]-18, C_LR[P]+0.2))
plt.annotate("1nn", xy=(TestK[P], C_1NN[P]),xytext=(TestK[P]-18, C_1NN[P]+0.2))
plt.show()
def recognitionNumberAndDigit(labels, vectors, testset, trainset):
TrialResult = zeros((5,1,10))
Digit = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
# ------------------------------------------------------------------------------------------------
# Trail 1
# ------------------------------------------------------------------------------------------------
print "Trial 1"
TestC_1 = [2, 8, 2, 2, 2, 2]
TestGamma_1 = [0.0078125, 0.0078125, 0.0078125, 0.0078125, 0.0078125, 0.0078125]
TestK = [20, 50, 84, 150, 300, 400]
#drawCombinedDiagram(labels, vectors, testset, trainset, 'Trial 1', TestK, TestC, TestGamma)
Result_1 = getTrialDigitResult(labels, vectors, testset, trainset, TestK, TestC_1, TestGamma_1)
#print TrialResult
#print "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
TrialResult += Result_1
print TrialResult
# ------------------------------------------------------------------------------------------------
# Trail 2
print "Trial 2"
TestC_2 = [2, 8, 8, 2, 2, 2]
TestGamma_2 = [0.03125, 0.0078125, 0.0078125, 0.0078125, 0.0078125, 0.0078125]
#drawCombinedDiagram(labels, vectors, trainset, testset, 'Trial 2', TestK, TestC, TestGamma)
Result_2 = getTrialDigitResult(labels, vectors, trainset, testset, TestK, TestC_2, TestGamma_2)
#print TrialResult
#print "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
TrialResult += Result_2
print TrialResult
# ------------------------------------------------------------------------------------------------
TrialResult = TrialResult/2.0
TrialResult = TrialResult.round(2)
print TrialResult
print "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
C_SVM_LINEAR = TrialResult[0][0].tolist()
C_SVM_POLY = TrialResult[1][0].tolist()
C_SVM_RBF = TrialResult[2][0].tolist()
C_LR = TrialResult[3][0].tolist()
C_1NN = TrialResult[4][0].tolist()
plt.figure(1)
plt.title("Average Recognition Number for all digits")
plt.xlabel("digit")
plt.ylabel("num of correctly recognition")
plt.axis([-1,10,140,205])
plt.grid(True)
plt.plot(Digit, C_SVM_LINEAR, 'ro--')
plt.plot(Digit, C_SVM_POLY, 'b<--')
plt.plot(Digit, C_SVM_RBF, 'gs--')
plt.plot(Digit, C_LR, 'yd--')
plt.plot(Digit, C_1NN, 'co--')
P = 9
plt.annotate("svm linear", xy=(Digit[P], C_SVM_LINEAR[P]),xytext=(Digit[P]+0.1, C_SVM_LINEAR[P]))
plt.annotate("svm poly", xy=(Digit[P], C_SVM_POLY[P]),xytext=(Digit[P]+0.1, C_SVM_POLY[P]))
plt.annotate("svm rbf", xy=(Digit[P], C_SVM_RBF[P]),xytext=(Digit[P]+0.1, C_SVM_RBF[P]))
plt.annotate("lr", xy=(Digit[P], C_LR[P]),xytext=(Digit[P]+0.1, C_LR[P]))
plt.annotate("1nn", xy=(Digit[P], C_1NN[P]),xytext=(Digit[P]+0.1, C_1NN[P]))
plt.show()
def getTestResult(vectors, labels, testset, trainset, PCA_K=0, C=1, GAMMA=0.0, method=''):
Label = array(toFloatList(labels))
OriginData = array(toFloatList(vectors)).reshape(4000,784)
TestNum = len(testset)
test = array(toFloatList(testset)).reshape(TestNum/2,2) - 1
TrainNum = len(trainset)
train = array(toFloatList(trainset)).reshape(TrainNum/2,2) - 1
OriginData = normalization(OriginData)
Data = OriginData
RetainRate = 100
if(PCA_K != 0):
k = PCA_K
pca = PCA(n_components=k)
Data = pca.fit_transform(OriginData)
sum = 0
for i in range(k):
sum += pca.explained_variance_ratio_[i]
RetainRate = sum * 100
#print "retain " + str(RetainRate) +"% of the variance"
TrainData, TrainLabel = prepareData(Data, Label, train)
TestData, TestLabel = prepareData(Data, Label, test)
#outputTrainingData(TrainData, TrainLabel, PCA_K)
clf = 0
ClfSet = []
#print 'Method: ', method
if(method == 'svm_linear'):
ClfSet = prepareSvmClassifier(TrainData, TrainLabel, 10, 'linear')
elif(method == 'svm_poly'):
ClfSet = prepareSvmClassifier(TrainData, TrainLabel, 10, 'poly')
elif(method == 'svm_rbf'):
ClfSet = prepareSvmClassifier(TrainData, TrainLabel, 10, 'rbf', c=C, Gamma=GAMMA)
elif(method == 'lr'):
clf = LogisticRegression()
clf.fit(TrainData, TrainLabel)
elif(method == '1nn'):
clf = KNeighborsClassifier(n_neighbors=1)
clf.fit(TrainData, TrainLabel)
else:
print "Please Choose a kernel"
exit()
N = TestData.shape[0]
#print "Totoal ", N, " test data"
correct = 0.0
CorrectNum = []
for i in range(10):
CorrectNum.append(0)
if(method[0:3] == 'svm'):
for i in range(N):
confidence = -999
classification = -1
for j in range(10):
temp = ClfSet[j].decision_function(TestData[i])
if(confidence < temp):
confidence = temp
classification = j
if(classification == TestLabel[i]):
CorrectNum[classification] += 1
correct += 1
else:
result = clf.predict(TestData)
for i in range(N):
if(result[i] == TestLabel[i]):
CorrectNum[int(result[i])] += 1
correct += 1
Correctness = correct/N * 100
'''
print "Accuracy: ", Correctness, "%"
for i in range(10):
print "digit ", i , ": ", CorrectNum[i], "/10"
'''
return Correctness
def getTrialAccuracyResult(labels, vectors, testset, trainset, TestK, TestC, TestGamma):
N = len(TestK)
C_SVM_LINEAR = []
C_SVM_POLY = []
C_SVM_RBF = []
C_LR = []
C_1NN = []
AccuracyResult = zeros((5,1,N))
for i in range(N):
print "Test ", i
print "--------------------------------------------------------------------------------------------------"
print "K = ", TestK[i]
Rate, Correct, C_NUM = testWithSVM(labels, vectors, testset, trainset, Kernel='linear', PCA_K=TestK[i])
C_SVM_LINEAR.append(Correct)
print "--------------------------------------------------------------------------------------------------"
Rate, Correct, C_NUM = testWithSVM(labels, vectors, testset, trainset, Kernel='poly', PCA_K=TestK[i])
C_SVM_POLY.append(Correct)
print "--------------------------------------------------------------------------------------------------"
Rate, Correct, C_NUM = testWithSVM(labels, vectors, testset, trainset, Kernel='rbf', C=TestC[i], gamma=TestGamma[i], PCA_K=TestK[i])
C_SVM_RBF.append(Correct)
print "--------------------------------------------------------------------------------------------------"
Rate, Correct, C_NUM = testWithLR(labels, vectors, testset, trainset, PCA_K=TestK[i])
C_LR.append(Correct)
print "--------------------------------------------------------------------------------------------------"
Rate, Correct, C_NUM = testWithKNN(labels, vectors, testset, trainset, PCA_K=TestK[i])
C_1NN.append(Correct)
print "--------------------------------------------------------------------------------------------------"
AccuracyResult[0][0] = array(C_SVM_LINEAR)
AccuracyResult[1][0] = array(C_SVM_POLY)
AccuracyResult[2][0] = array(C_SVM_RBF)
AccuracyResult[3][0] = array(C_LR)
AccuracyResult[4][0] = array(C_1NN)
#print "Return:"
#print AccuracyResult
#print "\n\n"
return AccuracyResult
def getTrialDigitResult(labels, vectors, testset, trainset, TestK, TestC, TestGamma):
N = len(TestK)
Num_linear = zeros((1, 10))
Num_poly = zeros((1, 10))
Num_rbf = zeros((1, 10))
Num_lr = zeros((1, 10))
Num_1nn = zeros((1, 10))
RecognitionNum = zeros((5,1,10))
for i in range(len(TestK)):
print "Test ", i
print "--------------------------------------------------------------------------------------------------"
print "K = ", TestK[i]
Rate, Correct, C_NUM = testWithSVM(labels, vectors, testset, trainset, Kernel='linear', PCA_K=TestK[i])
Num_linear += array(C_NUM)
print "--------------------------------------------------------------------------------------------------"
Rate, Correct, C_NUM = testWithSVM(labels, vectors, testset, trainset, Kernel='poly', PCA_K=TestK[i])
Num_poly += array(C_NUM)
print "--------------------------------------------------------------------------------------------------"
Rate, Correct, C_NUM = testWithSVM(labels, vectors, testset, trainset, Kernel='rbf', C=TestC[i], gamma=TestGamma[i], PCA_K=TestK[i])
Num_rbf += array(C_NUM)
print "--------------------------------------------------------------------------------------------------"
Rate, Correct, C_NUM = testWithLR(labels, vectors, testset, trainset, PCA_K=TestK[i])
Num_lr += array(C_NUM)
print "--------------------------------------------------------------------------------------------------"
Rate, Correct, C_NUM = testWithKNN(labels, vectors, testset, trainset, PCA_K=TestK[i])
Num_1nn += array(C_NUM)
RecognitionNum[0][0] = array(Num_linear)
RecognitionNum[1][0] = array(Num_poly)
RecognitionNum[2][0] = array(Num_rbf)
RecognitionNum[3][0] = array(Num_lr)
RecognitionNum[4][0] = array(Num_1nn)
return RecognitionNum/N | {
"repo_name": "dzh123xt/DigitRecognition",
"path": "src/Methods/TestMethods.py",
"copies": "1",
"size": "23664",
"license": "mit",
"hash": 3214576971996793300,
"line_mean": 38.3106312292,
"line_max": 140,
"alpha_frac": 0.53469405,
"autogenerated": false,
"ratio": 3.4385353095030515,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44732293595030514,
"avg_score": null,
"num_lines": null
} |
__author__ = 'dengzhihong'
from src.Cluster.base import *
import numpy as np
from src.Methods.math_methods import *
from src.Methods.process_data import *
from src.Methods.draw_diagram import *
class KMeans(ClusterBase):
@staticmethod
def clusterAssignment(data, Mean):
D = data.shape[1]
K = Mean.shape[0]
N = data.shape[0]
min = np.inf
Z = np.zeros((N, 1))
index = -1
for i in range(N):
for k in range(K):
norm = getSquareNorm(data[i], Mean[k])
if(norm < min):
index = k
min = norm
min = np.inf
Z[i] = index
index = -1
return Z
@staticmethod
def estimateCenter(data, label, K):
D = data.shape[1]
Mean = np.mat(np.zeros((K,D)))
for j in range(K):
DataInClusterJ = data[np.nonzero(label[:,0] == j)]
Mean[j,:] = np.mean(DataInClusterJ, axis=0)
return Mean
@staticmethod
def runKmeans(data, K):
D = data.shape[1]
N = data.shape[0]
Z = np.zeros((N, 1))
Mean = ClusterBase.genRandMean(data, K)
print 'Initial Mean: '
print Mean
count = 0
Mean_Old = np.mat(np.zeros((K, D)))
while(True):
print '++++++++++++++++++++++++++++++++'
print count
Z = KMeans.clusterAssignment(data, Mean)
Mean = KMeans.estimateCenter(data, Z, K)
print Mean
count +=1
if(KMeans.isVectorConverge(Mean, Mean_Old, 0.0001)):
print 'Converge'
break
Mean_Old = Mean
return Z.reshape(-1).astype(int).tolist()
@staticmethod
def testWithKmeans(data, title, K):
X = RawData2XYArray(data)
Z = KMeans.runKmeans(X, K)
showDiagramInCluster(X, Z, "Kmeans_" + title) | {
"repo_name": "dzh123xt/pythonML",
"path": "src/Cluster/kmeans.py",
"copies": "1",
"size": "1929",
"license": "mit",
"hash": -4313603193568317400,
"line_mean": 28.6923076923,
"line_max": 64,
"alpha_frac": 0.5064800415,
"autogenerated": false,
"ratio": 3.639622641509434,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9633738783671659,
"avg_score": 0.002472779867554904,
"num_lines": 65
} |
__author__ = 'dengzhihong'
from src.Regression.base import *
from scipy import optimize
from numpy import *
class RR(RegressionBase):
@staticmethod
def run(sampx, sampy, K):
y = RegressionBase.strlistToFloatvector(sampy)
fai_matrix_trans = transpose(RegressionBase.constructFaiMartix(sampx, K))
n = len(sampx)
D = K + 1
I_n = eye(n)
A_matrix = array(row_stack( (column_stack( (-fai_matrix_trans,-I_n) ), column_stack( (fai_matrix_trans,-I_n) )) ))
f_matrix = array(row_stack( ( zeros( (D,1) ), ones( (n,1) ) ) ))
b_matrix = array(row_stack( (-y,y) ))
# Arbitrary define value for theta and t
x_matrix = array(row_stack( (ones( (D,1) ), ones((n,1)) ) ))
def constraintFunc(x):
#print '-----------------con--------------'
#print "x : ",transpose(x)
b_list = []
c = b_matrix.tolist()
for i in c:
b_list.append(i[0])
B = array(b_list)
result = B - dot(A_matrix,x)
return result
def objFunc(x):
#print '-----------------obj--------------'
x = array(transpose(mat(x)))
result = dot(transpose(f_matrix), x)
#print "obj: ",float(result)
return float(result)
con = ({'type': 'ineq', 'fun': constraintFunc})
res = optimize.minimize(objFunc, x_matrix, method='SLSQP', constraints=con)
return transpose(mat(res.x[:D])) | {
"repo_name": "dzh123xt/pythonML",
"path": "src/Regression/rr.py",
"copies": "1",
"size": "1511",
"license": "mit",
"hash": 1648903748403743200,
"line_mean": 35.8780487805,
"line_max": 122,
"alpha_frac": 0.5129053607,
"autogenerated": false,
"ratio": 3.4263038548752833,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9369145688597045,
"avg_score": 0.01401270539564771,
"num_lines": 41
} |
__author__ = 'dengzhihong'
from src.Regression.base import *
from scipy import optimize
class LASSO(RegressionBase):
@staticmethod
def run(sampx, sampy, K):
y = RegressionBase.strlistToFloatvector(sampy)
fai_matrix = RegressionBase.constructFaiMartix(sampx, K)
product_fai = np.dot(fai_matrix, np.transpose(fai_matrix))
n = len(sampx)
D = K + 1
Lambda = 0.18
H_matrix = np.array(np.row_stack( (np.column_stack( (product_fai,-product_fai) ), np.column_stack( (-product_fai,product_fai) )) ))
f_matrix = np.array(np.row_stack( (np.dot(fai_matrix,y), - np.dot(fai_matrix, y) ) ))
f_matrix = -f_matrix + Lambda
x_matrix = np.array(np.row_stack( (np.ones( (D,1) ), np.ones((D,1)) ) ))
def constraintFunc(x):
#print '-----------------con--------------'
#print "x : ",transpose(x)
return x
def objFunc(x):
#print '-----------------obj--------------'
result = np.dot(np.dot(np.transpose(x), H_matrix), x)/2 + np.dot(np.transpose(f_matrix), x)
#print "obj: ",float(result)
return float(result)
con = ({'type': 'ineq', 'fun': constraintFunc})
res = optimize.minimize(objFunc, x_matrix, method='SLSQP', constraints=con)
theta = []
for i in range(res.x.shape[0]/2):
theta.append(res.x[i] - res.x[i+res.x.shape[0]/2])
return theta | {
"repo_name": "dzh123xt/pythonML",
"path": "src/Regression/lasso.py",
"copies": "1",
"size": "1462",
"license": "mit",
"hash": 4171919664997881300,
"line_mean": 39.6388888889,
"line_max": 139,
"alpha_frac": 0.5451436389,
"autogenerated": false,
"ratio": 3.1508620689655173,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9139340277845411,
"avg_score": 0.011333086004021343,
"num_lines": 36
} |
__author__ = 'dengzhihong'
from src.Regression.base import *
class BR(RegressionBase):
@staticmethod
def getPredictionVariance(star_scalar_x, Sigma, theta, K):
Fai = np.mat(RegressionBase.getFaiList(star_scalar_x, K))
return float(np.dot(np.dot(Fai,Sigma),theta))
@staticmethod
def getPredictionVarianceList(star_list_x, Sigma, theta, K):
x = RegressionBase.toFloatList(star_list_x)
y = []
for element in x:
y.append(BR.getPredictionVariance(element, Sigma, theta, K))
return y
@staticmethod
def run(sampx, sampy, K):
alpha = 0.6
variance = 5
y = RegressionBase.strlistToFloatvector(sampy)
fai_matrix = RegressionBase.constructFaiMartix(sampx, K)
sigma_theta_head = np.dot(fai_matrix, fai_matrix.transpose()) * (1.0/variance)
sigma_theta_head += (1.0/alpha) * np.eye(sigma_theta_head.shape[0], sigma_theta_head.shape[1])
sigma_theta_head = sigma_theta_head.I
miu_theta_head = np.dot(sigma_theta_head,fai_matrix)
miu_theta_head = np.dot(miu_theta_head, y) * (1.0/variance)
return miu_theta_head, sigma_theta_head | {
"repo_name": "dzh123xt/pythonML",
"path": "src/Regression/br.py",
"copies": "1",
"size": "1182",
"license": "mit",
"hash": -3971948756549478400,
"line_mean": 34.8484848485,
"line_max": 102,
"alpha_frac": 0.6429780034,
"autogenerated": false,
"ratio": 3.143617021276596,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42865950246765955,
"avg_score": null,
"num_lines": null
} |
__author__ = 'dengzhihong'
from src.Regression.ls import *
from src.Regression.rls import *
from src.Regression.rr import *
from src.Regression.br import *
from src.Regression.lasso import *
import numpy as np
from src.Methods.draw_diagram import *
import random
def testWithRegression(sampx, sampy, polyx, polyy, K, MethodList):
y = RegressionBase.toFloatList(polyy)
for method in MethodList:
SquareMean = 0
prediction = Regression(sampx, sampy, polyx, polyy, K, method)
for i in range(len(prediction)):
SquareMean += (prediction[i] - y[i])**2
SquareMean /= len(prediction)
print method
print 'Square Mean Error = ', SquareMean
def testWithReduction(sampx, sampy, polyx, polyy, K, MethodList):
ResultList = []
TestTimes = 1
ReductionRateList = [0, 15, 30, 45, 60, 75, 90]
for rate in ReductionRateList:
for method in MethodList:
ResultList.append( regressionWithReduction(sampx, sampy, polyx, polyy, K, TestTimes, rate, method) )
x = np.array(ResultList).reshape(len(ReductionRateList), len(MethodList)).tolist()
for row in x:
for col in row:
print str(col) + '\t',
print
for j in range(len(MethodList)):
MeanError = []
for i in range(len(ReductionRateList)):
MeanError.append(x[i][j])
showMeanErrorDiagram(ReductionRateList, MeanError, 'Average MeanError, Method = ', MethodList[j])
def testWithLargeValue(sampx, sampy, polyx, polyy, K, MethodList):
N = len(sampx)
Num = 5
randlist = []
for i in range(Num):
r = random.randint(0, N-1)
while(randlist.count(r) != 0):
r = random.randint(0,N-1)
randlist.append(r)
for i in randlist:
sampy[i] = str(float(sampy[i]) + 200)
for method in MethodList:
Regression(sampx, sampy, polyx, polyy, K, method)
def testWithHigherK(sampx, sampy, polyx, polyy, K, MethodList):
for method in MethodList:
Regression(sampx, sampy, polyx, polyy, K, method)
def Regression(sampx, sampy, polyx, polyy, K, method):
sigma = 0
if(method == 'LS'):
theta = LS.run(sampx, sampy, K)
elif(method == 'RLS'):
theta = RLS.run(sampx, sampy, K)
elif(method == 'LASSO'):
theta = LASSO.run(sampx, sampy, K)
elif(method == 'RR'):
theta = RR.run(sampx, sampy, K)
elif(method == 'BR'):
theta, sigma = BR.run(sampx, sampy, K)
else:
print 'No method'
return 0
prediction = RegressionBase.getPredictionValueList(polyx, theta, K)
if(method == 'BR'):
showRegressionDiagramBR(sampx, sampy, polyx, polyy, prediction, theta, sigma, K, method)
else:
showRegressionDiagramExceptBR(sampx, sampy, polyx, polyy, prediction, method)
return prediction
def regressionWithReduction(sampx, sampy, polyx, polyy, K, TestTimes, ReducitionRate, method):
print str(K) + 'th ' + 'TestTimes = ' + str(TestTimes) + ' ReductionRate = ' + str(ReducitionRate) + '% Method = ' + method
MeanErrorScalar = 0.0
PredictionSum = np.zeros(len(polyx))
for i in range(TestTimes):
x = sampx[:]
y = sampy[:]
Theta, x, y = getThetaWithReduction(x, y, K, ReducitionRate, method)
Prediction = BR.getPredictionValueList(polyx, Theta, K)
#PredictionSum += Prediction
if(i == 0):
showPredictionDiagramWithReduction(x, y, polyx, polyy, Prediction, K, ReducitionRate, method)
MeanErrorVector = np.array(RegressionBase.strlistToFloatvector(Prediction) - RegressionBase.strlistToFloatvector(polyy))
MeanErrorScalar += np.dot(MeanErrorVector.T, MeanErrorVector)
#PredictionSum /= TestTimes
MeanErrorScalar /= (TestTimes * len(polyy))
MeanErrorScalar = float(MeanErrorScalar)
return MeanErrorScalar
def getThetaWithReduction(sampx, sampy, K, ReductionRate, method):
N = len(sampx)
reduction = int(N * ReductionRate * 0.01)
rest = N - reduction
randlist = []
new_x = []
new_y = []
for i in range(rest):
r = random.randint(0, N-1)
while(randlist.count(r) != 0):
r = random.randint(0,N-1)
randlist.append(r)
for i in randlist:
new_x.append(sampx[i])
new_y.append(sampy[i])
if(method == 'LS'):
theta = LS.run(new_x, new_y, K)
elif(method == 'RLS'):
theta = RLS.run(new_x, new_y, K)
elif(method == 'LASSO'):
theta = LASSO.run(new_x, new_y, K)
elif(method == 'RR'):
theta = RR.run(new_x, new_y, K)
elif(method == 'BR'):
theta, variance = BR.run(new_x, new_y, K)
else:
print 'No method'
return 0
return theta, new_x, new_y
| {
"repo_name": "dzh123xt/pythonML",
"path": "src/Methods/regression_methods.py",
"copies": "1",
"size": "4750",
"license": "mit",
"hash": 1238776215997315300,
"line_mean": 34.447761194,
"line_max": 128,
"alpha_frac": 0.6210526316,
"autogenerated": false,
"ratio": 3.2072923700202565,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4328345001620256,
"avg_score": null,
"num_lines": null
} |
__author__ = 'dengzhihong'
import matplotlib.pyplot as plt
from src.Methods.process_data import *
from src.Regression.br import *
def showDiagram(x, y, title="", MethodName=""):
ax = plt.figure().add_subplot(111)
ax.set_title(title + ' Algorithm: '+ MethodName, fontsize = 18)
plt.axis([-20,20,-20,20])
ax.xaxis.grid(True, which='major')
ax.yaxis.grid(True, which='major')
ax.set_xlabel('x1')
ax.set_ylabel('x2')
plt.plot(x, y,'b.')
plt.show()
def showDiagramInCluster(data, Z, title=""):
N = data.shape[0]
X = np.array(data)
ColorPattern = ['r.', 'g.', 'b.', 'y.', 'k.', 'm.', 'c.']
ClusterX = [[],[],[],[]]
ClusterY = [[],[],[],[]]
for i in range(N):
ClusterX[Z[i]].append(X[i][0])
ClusterY[Z[i]].append(X[i][1])
K = len(ClusterX)
ax = plt.figure().add_subplot(111)
ax.set_title(title , fontsize = 18)
plt.axis([-20,20,-20,20])
ax.xaxis.grid(True, which='major')
ax.yaxis.grid(True, which='major')
ax.set_xlabel('x1')
ax.set_ylabel('x2')
for k in range(K):
plt.plot(ClusterX[k], ClusterY[k], ColorPattern[k], label = 'c' + str(k))
plt.show()
def showLabeledDiagram(dataA_X, dataA_Y, title):
X, Y = RawData2FloatXYList(dataA_X)
Z = RawLabel2IntList(dataA_Y)
N = len(Z)
ColorPattern = ['r.', 'g.', 'b.', 'y.', 'k.', 'm.', 'c.']
ClusterX = [[],[],[],[]]
ClusterY = [[],[],[],[]]
for i in range(N):
ClusterX[Z[i]].append(X[i])
ClusterY[Z[i]].append(Y[i])
K = len(ClusterX)
ax = plt.figure().add_subplot(111)
ax.set_title('Real Labeled diagram of ' + title , fontsize = 18)
plt.axis([-20,20,-20,20])
ax.xaxis.grid(True, which='major')
ax.yaxis.grid(True, which='major')
ax.set_xlabel('x1')
ax.set_ylabel('x2')
for k in range(K):
plt.plot(ClusterX[k], ClusterY[k], ColorPattern[k], label = 'c' + str(k))
plt.show()
def showMeanErrorDiagram(x, y, title, method):
plt.figure().add_subplot(111).set_title(title + method, fontsize = 18)
plt.plot(x, y, 'bo-', label = 'MeanError')
plt.axis([0,100,0,20])
plt.show()
plt.clf()
def showPredictionDiagramWithReduction(sampx, sampy, polyx, polyy, prediction, K, ReducitionRate, method):
title = 'K = ' + str(K) + ' ReducitionRate = ' + str(ReducitionRate) + '% Method = ' + method
plt.figure().add_subplot(111).set_title(title, fontsize = 18)
plt.plot(map(float, polyx), prediction,'r-', label='prediction',linewidth=1)
plt.plot(map(float, polyx), map(float, polyy), 'g-',label='real',linewidth=0.8)
plt.plot(map(float, sampx), map(float, sampy), 'ko',label='sample')
plt.legend()
plt.show()
def showRegressionDiagramExceptBR(sampx, sampy, polyx, polyy, prediction, title):
plt.figure().add_subplot(111).set_title(title, fontsize = 18)
plt.plot(map(float, polyx), prediction,'r-', label='prediction',linewidth=1)
plt.plot(map(float, polyx), map(float, polyy), 'g-',label='real',linewidth=0.8)
plt.plot(map(float, sampx), map(float, sampy), 'ko',label='sample')
plt.legend()
plt.show()
def showRegressionDiagramBR(sampx, sampy, polyx, polyy, prediction, theta, sigma, K, title):
plt.figure().add_subplot(111).set_title(title, fontsize = 18)
plt.plot(map(float, polyx), prediction,'r-', label='prediction',linewidth=1)
plt.plot(map(float, polyx), map(float, polyy), 'g-',label='real',linewidth=0.8)
plt.plot(map(float, sampx), map(float, sampy), 'ko',label='sample')
variance = BR.getPredictionVarianceList(polyx, sigma, theta, K)
add_variance = (np.array(prediction) + np.array(variance)).tolist()
sub_variance = (np.array(prediction) + np.array(variance)).tolist()
plt.plot(map(float, polyx),add_variance,'b-', label = 'prediction + variance', linewidth = 1)
plt.plot(map(float, polyx),sub_variance,'g--', label = 'prediction - variance', linewidth = 2)
plt.legend()
plt.show() | {
"repo_name": "dzh123xt/pythonML",
"path": "src/Methods/draw_diagram.py",
"copies": "1",
"size": "3966",
"license": "mit",
"hash": -3467962224327320600,
"line_mean": 39.0707070707,
"line_max": 106,
"alpha_frac": 0.6129601614,
"autogenerated": false,
"ratio": 2.8843636363636365,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8891209509834495,
"avg_score": 0.021222857585828335,
"num_lines": 99
} |
__author__ = 'dengzhihong'
import numpy as np
from src.Cluster.base import *
from src.Methods.math_methods import *
from src.Methods.draw_diagram import *
from src.Methods.process_data import *
import random
class EM(ClusterBase):
@staticmethod
def E_Step(X, Mean, Cov, Pai):
K = Mean.shape[0]
N = X.shape[0]
Z = np.zeros((N, K))
Temp = 0
Sum = []
for i in range(N):
for k in range(K):
'''
print k
print 'X[i]: ', X[i]
print 'Mean[k]: ', Mean[k]
print 'Cov[k]: ', Cov[k]
print 'Pai[k]: ', Pai[k]
'''
#a = Pai[k] * multivariateGaussian( transpose(X[i]), transpose(Mean[k]), Cov[k])
a = Pai[k] * multivariateGaussian( X[i], Mean[k], Cov[k])
Temp += a
Sum.append(Temp)
Temp = 0
for i in range(N):
for j in range(K):
#b = Pai[j] * multivariateGaussian( transpose(X[i]), transpose(Mean[j]), Cov[j])
b = Pai[j] * multivariateGaussian( X[i], Mean[j], Cov[j])
#print 'Z[i][j]: ', Z[i][j]
#print 'Sum[i]: ', Sum[i]
#print 'b: ', b
Z[i][j] = b / Sum[i]
for i in range(N):
if(EM.sumLine(Z[i]) - 1.0 > 0.00001):
print 'False'
print EM.sumLine(Z[i])
return Z
@staticmethod
def sumLine(array):
N = 4
sum = 0
for i in range(N):
sum += float(array[i])
return sum
@staticmethod
def M_Step(X, Z):
N = Z.shape[0]
K = Z.shape[1]
D = X.shape[1]
Mean = np.zeros((K, 1, D))
Cov = np.zeros((K, D, D))
TempCov = np.zeros((D, D))
Pai = []
Meanj = np.zeros((1, D))
Nj = []
TempNj = 0
Covj = np.zeros((D,D))
# Get Nj
for j in range(K):
for i in range(N):
TempNj += Z[i][j]
Nj.append(TempNj)
TempNj = 0
#print 'Nj:'
#print Nj
# Get Paij
for j in range(K):
Pai.append(Nj[j]/N)
#print 'Pai:'
#print Pai
# Get Meanj
for j in range(K):
for i in range(N):
Meanj += Z[i][j] * X[i]
Meanj /= Nj[j]
Mean[j] = Meanj
Meanj = np.zeros((1, D))
#print 'Mean: '
# Get Cov
for j in range(K):
for i in range(N):
#print '---'
#print TempCov
TempCov += Z[i][j] * np.dot(np.transpose(X[i] - Mean[j]), X[i] - Mean[j])
Cov[j] = TempCov/Nj[j]
TempCov = np.zeros((D, D))
#print Cov
return Mean, Cov, Pai
@staticmethod
def getQ(X, Z, Mean, Cov, Pai):
N = Z.shape[0]
K = Z.shape[1]
Q = 0
for i in range(N):
for j in range(K):
#Q += Z[i][j] * math.log(multivariateGaussian(X[i], Mean[j], Cov[j]) * Pai[j])
Q += Z[i][j] * multivariateGaussian(X[i], Mean[j], Cov[j]) * Pai[j]
return Q
@staticmethod
def genRandCov(data, K):
D = data.shape[1]
o_cov = np.cov(data.transpose())
Cov = np.zeros((K, D, D))
for k in range(K):
Cov[k] = o_cov + np.random.rand(2,2)
return Cov
@staticmethod
def genPai(K):
Pai = []
P = 0
for k in range(K):
Pai.append(1.0/K)
for i in range(K):
p = random.uniform(0, 1.0/K - 0.05)
Pai[i] -= p
Pai[K - 1 - i] += p
return np.array(Pai)
@staticmethod
def runEM(data, K, iteration):
D = data.shape[1]
N = data.shape[0]
Z = np.zeros((N,1))
# Generate initial Mean which contains 4 matrix of 2x2
Mean = ClusterBase.genRandMean(data, K)
# Generate initial Cov
Cov = EM.genRandCov(data, K)
# Generate pai
print 'Mean: \n', Mean
print 'Cov: \n', Cov
Pai = EM.genPai(K)
print 'Pai: \n', Pai
count = 0
Q_old = -9999999
Q_new = 0
while(True):
print count
print '+++++++++++++++++++++++++++++++++++++++++'
#displayData(Z, Mean, Cov, Pai, 'Old Parameter')
print 'E-Step'
Z = EM.E_Step(data, Mean, Cov, Pai)
#print Z
print 'M-Step'
Mean, Cov, Pai = EM.M_Step(data, Z)
#displayData(Z, Mean, Cov, Pai, 'New Parameter')
Q_new = EM.getQ(data, Z, Mean, Cov, Pai)
print 'Q = ', Q_new
'''
if(Q_new > Q_old and (Q_new - Q_old < 0.000000000001)):
print 'Converge!!'
break
'''
Q_old = Q_new
if(count == iteration):
print 'Iteration Reached'
break
count += 1
Z_Normal = np.zeros((N,1))
for i in range(N):
index = EM.findMax(Z[i])
Z_Normal[i] = index
return Z_Normal.astype(int).reshape(-1).tolist()
@staticmethod
def findMax(list):
MaxIndex = -1
Max = -999999
#print list
for i in range(len(list)):
if(list[i] > Max):
Max = list[i]
MaxIndex = i
return MaxIndex
@staticmethod
def testWithEM(data, title, K):
X = RawData2XYArray(data)
Z = EM.runEM(X, K, 100)
showDiagramInCluster(X, Z, "EM_" + title) | {
"repo_name": "dzh123xt/pythonML",
"path": "src/Cluster/em.py",
"copies": "1",
"size": "5685",
"license": "mit",
"hash": -6593262899877286000,
"line_mean": 28.158974359,
"line_max": 96,
"alpha_frac": 0.4335971856,
"autogenerated": false,
"ratio": 3.2747695852534564,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4208366770853456,
"avg_score": null,
"num_lines": null
} |
__author__ = 'dengzhihong'
import numpy as np
class RegressionBase(object):
# This method get a list of data in string form and turn them into float vector
@staticmethod
def strlistToFloatvector(strlist):
floatvector = []
for i in range(0, len(strlist)):
floatvector.append(float(strlist[i]))
floatvector = np.transpose(np.mat(floatvector))
return floatvector
# This method get a single float scalar value x and get its fai(x) float list not vector
@staticmethod
def getFaiList(scalar_x, K):
floatlist_fai = []
for i in range(0, K+1):
floatlist_fai.append(scalar_x**i)
return floatlist_fai
# This method will construct Fai matrix which contains many fai vector of x
@staticmethod
def constructFaiMartix(strlist_x, K):
fai_martix = []
for i in range(0,len(strlist_x)):
float_x = float(strlist_x[i])
fai_martix.append(RegressionBase.getFaiList(float_x, K))
FaiMartix = np.transpose(np.mat(fai_martix))
return FaiMartix
# This method get the float x and get its prediction float value y
@staticmethod
def getPredictionValue(star_scalar_x, theta, K):
Fai = np.mat(RegressionBase.getFaiList(star_scalar_x, K))
return float(np.dot(Fai,theta))
# This method get a string list x and get the float list prediction value y
@staticmethod
def getPredictionValueList(star_list_x, theta, K):
x = RegressionBase.toFloatList(star_list_x)
y = []
for element in x:
y.append(RegressionBase.getPredictionValue(element, theta, K))
return y
# This method will turn string list to float list, which is highly useful
@staticmethod
def toFloatList(stringlist):
floatlist = []
for i in range(0, len(stringlist)):
floatlist.append(float(stringlist[i]))
return floatlist
@staticmethod
def run(sampx, sampy, K):
pass
@staticmethod
def toFloatList(stringlist):
floatlist = []
for i in range(0, len(stringlist)):
floatlist.append(float(stringlist[i]))
return floatlist | {
"repo_name": "dzh123xt/pythonML",
"path": "src/Regression/base.py",
"copies": "1",
"size": "2214",
"license": "mit",
"hash": -422624193242089400,
"line_mean": 32.5606060606,
"line_max": 92,
"alpha_frac": 0.6377597109,
"autogenerated": false,
"ratio": 3.739864864864865,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.98574773604392,
"avg_score": 0.004029443065132839,
"num_lines": 66
} |
__author__ = 'dengzhihong'
from src.Methods.TestMethods import *
def testWithChallenge(train_vectors, train_labels, test_vectors, test_labels, trainset, PCA_K, C, GAMMA, method):
TrainVectors = array(toFloatList(train_vectors)).reshape(4000, 784)
TrainLabel = array(toFloatList(train_labels))
TestVectors = array(toFloatList(test_vectors)).reshape(50, 784)
TestLabel = array(toFloatList(test_labels))
TrainNum = len(trainset)
train = array(toFloatList(trainset)).reshape(TrainNum/2,2) - 1
DataVectors = vstack((TrainVectors, TestVectors))
DataVectors = normalization(DataVectors)
if(PCA_K != 0):
k = PCA_K
pca = PCA(n_components=k)
DataVectors = pca.fit_transform(DataVectors)
sum = 0
for i in range(k):
sum += pca.explained_variance_ratio_[i]
RetainRate = sum * 100
#print "retain " + str(RetainRate) +"% of the variance"
TrainData = DataVectors[0:4000]
TestData = DataVectors[4000:4050]
#TrainData, TrainLabel = prepareData(TrainData, TrainLabel, train)
#outputTrainingData(TrainData, TrainLabel, PCA_K)
clf = 0
ClfSet = []
#print 'Method: ', method
if(method == 'svm_linear'):
ClfSet = prepareSvmClassifier(TrainData, TrainLabel, 10, 'linear')
elif(method == 'svm_poly'):
ClfSet = prepareSvmClassifier(TrainData, TrainLabel, 10, 'poly')
elif(method == 'svm_rbf'):
ClfSet = prepareSvmClassifier(TrainData, TrainLabel, 10, 'rbf', c=C, Gamma=GAMMA)
elif(method == 'lr'):
clf = LogisticRegression()
clf.fit(TrainData, TrainLabel)
elif(method == '1nn'):
clf = KNeighborsClassifier(n_neighbors=1)
clf.fit(TrainData, TrainLabel)
else:
print "Please Choose a kernel"
exit()
N = TestData.shape[0]
#print "Totoal ", N, " test data"
correct = 0.0
CorrectNum = []
for i in range(10):
CorrectNum.append(0)
if(method[0:3] == 'svm'):
for i in range(N):
confidence = -999
classification = -1
for j in range(10):
temp = ClfSet[j].decision_function(TestData[i])
if(confidence < temp):
confidence = temp
classification = j
if(classification == TestLabel[i]):
CorrectNum[classification] += 1
correct += 1
else:
result = clf.predict(TestData)
for i in range(N):
if(result[i] == TestLabel[i]):
CorrectNum[int(result[i])] += 1
correct += 1
Correctness = correct/N * 100
'''
print "Accuracy: ", Correctness, "%"
for i in range(10):
print "digit ", i , ": ", CorrectNum[i], "/10"
'''
return Correctness
| {
"repo_name": "dzh123xt/DigitRecognition",
"path": "src/Methods/ChallengeTest.py",
"copies": "1",
"size": "2811",
"license": "mit",
"hash": 5843190010953209000,
"line_mean": 31.6860465116,
"line_max": 113,
"alpha_frac": 0.5862682319,
"autogenerated": false,
"ratio": 3.617760617760618,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4704028849660618,
"avg_score": null,
"num_lines": null
} |
import os.path as op
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_equal
import pytest
from mne import io, Epochs, read_events, pick_types
from mne.utils import requires_sklearn
from mne.decoding import compute_ems, EMS
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
curdir = op.join(op.dirname(__file__))
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, vis_l=3)
@requires_sklearn
def test_ems():
"""Test event-matched spatial filters."""
from sklearn.model_selection import StratifiedKFold
raw = io.read_raw_fif(raw_fname, preload=False)
# create unequal number of events
events = read_events(event_name)
events[-2, 2] = 3
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
pytest.raises(ValueError, compute_ems, epochs, ['aud_l', 'vis_l'])
epochs.equalize_event_counts(epochs.event_id)
pytest.raises(KeyError, compute_ems, epochs, ['blah', 'hahah'])
surrogates, filters, conditions = compute_ems(epochs)
assert_equal(list(set(conditions)), [1, 3])
events = read_events(event_name)
event_id2 = dict(aud_l=1, aud_r=2, vis_l=3)
epochs = Epochs(raw, events, event_id2, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
epochs.equalize_event_counts(epochs.event_id)
n_expected = sum([len(epochs[k]) for k in ['aud_l', 'vis_l']])
pytest.raises(ValueError, compute_ems, epochs)
surrogates, filters, conditions = compute_ems(epochs, ['aud_r', 'vis_l'])
assert_equal(n_expected, len(surrogates))
assert_equal(n_expected, len(conditions))
assert_equal(list(set(conditions)), [2, 3])
# test compute_ems cv
epochs = epochs['aud_r', 'vis_l']
epochs.equalize_event_counts(epochs.event_id)
cv = StratifiedKFold(n_splits=3)
compute_ems(epochs, cv=cv)
compute_ems(epochs, cv=2)
pytest.raises(ValueError, compute_ems, epochs, cv='foo')
pytest.raises(ValueError, compute_ems, epochs, cv=len(epochs) + 1)
raw.close()
# EMS transformer, check that identical to compute_ems
X = epochs.get_data()
y = epochs.events[:, 2]
X = X / np.std(X) # X scaled outside cv in compute_ems
Xt, coefs = list(), list()
ems = EMS()
assert_equal(ems.__repr__(), '<EMS: not fitted.>')
# manual leave-one-out to avoid sklearn version problem
for test in range(len(y)):
train = np.setdiff1d(range(len(y)), np.atleast_1d(test))
ems.fit(X[train], y[train])
coefs.append(ems.filters_)
Xt.append(ems.transform(X[[test]]))
assert_equal(ems.__repr__(), '<EMS: fitted with 4 filters on 2 classes.>')
assert_array_almost_equal(filters, np.mean(coefs, axis=0))
assert_array_almost_equal(surrogates, np.vstack(Xt))
| {
"repo_name": "drammock/mne-python",
"path": "mne/decoding/tests/test_ems.py",
"copies": "4",
"size": "3154",
"license": "bsd-3-clause",
"hash": 2193042298242847200,
"line_mean": 36.5476190476,
"line_max": 78,
"alpha_frac": 0.6490171211,
"autogenerated": false,
"ratio": 3.0095419847328246,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 84
} |
import os.path as op
from nose.tools import assert_equal, assert_raises
from mne import io, Epochs, read_events, pick_types
from mne.utils import requires_sklearn
from mne.decoding import compute_ems
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
curdir = op.join(op.dirname(__file__))
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, vis_l=3)
@requires_sklearn
def test_ems():
"""Test event-matched spatial filters"""
raw = io.Raw(raw_fname, preload=False)
# create unequal number of events
events = read_events(event_name)
events[-2, 2] = 3
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
assert_raises(ValueError, compute_ems, epochs, ['aud_l', 'vis_l'])
epochs.equalize_event_counts(epochs.event_id, copy=False)
assert_raises(KeyError, compute_ems, epochs, ['blah', 'hahah'])
surrogates, filters, conditions = compute_ems(epochs)
assert_equal(list(set(conditions)), [1, 3])
events = read_events(event_name)
event_id2 = dict(aud_l=1, aud_r=2, vis_l=3)
epochs = Epochs(raw, events, event_id2, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
epochs.equalize_event_counts(epochs.event_id, copy=False)
n_expected = sum([len(epochs[k]) for k in ['aud_l', 'vis_l']])
assert_raises(ValueError, compute_ems, epochs)
surrogates, filters, conditions = compute_ems(epochs, ['aud_r', 'vis_l'])
assert_equal(n_expected, len(surrogates))
assert_equal(n_expected, len(conditions))
assert_equal(list(set(conditions)), [2, 3])
raw.close()
| {
"repo_name": "Odingod/mne-python",
"path": "mne/decoding/tests/test_ems.py",
"copies": "19",
"size": "1969",
"license": "bsd-3-clause",
"hash": -4770249439056926000,
"line_mean": 34.1607142857,
"line_max": 77,
"alpha_frac": 0.6480446927,
"autogenerated": false,
"ratio": 2.952023988005997,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 56
} |
import os.path as op
from nose.tools import assert_equal, assert_raises
from mne import io, Epochs, read_events, pick_types
from mne.utils import _TempDir, requires_sklearn
from mne.decoding import compute_ems
tempdir = _TempDir()
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
curdir = op.join(op.dirname(__file__))
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, vis_l=3)
@requires_sklearn
def test_ems():
"""Test event-matched spatial filters"""
raw = io.Raw(raw_fname, preload=False)
# create unequal number of events
events = read_events(event_name)
events[-2, 2] = 3
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
assert_raises(ValueError, compute_ems, epochs, ['aud_l', 'vis_l'])
epochs.equalize_event_counts(epochs.event_id, copy=False)
assert_raises(KeyError, compute_ems, epochs, ['blah', 'hahah'])
surrogates, filters, conditions = compute_ems(epochs)
assert_equal(list(set(conditions)), [1, 3])
events = read_events(event_name)
event_id2 = dict(aud_l=1, aud_r=2, vis_l=3)
epochs = Epochs(raw, events, event_id2, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
epochs.equalize_event_counts(epochs.event_id, copy=False)
n_expected = sum([len(epochs[k]) for k in ['aud_l', 'vis_l']])
assert_raises(ValueError, compute_ems, epochs)
surrogates, filters, conditions = compute_ems(epochs, ['aud_r', 'vis_l'])
assert_equal(n_expected, len(surrogates))
assert_equal(n_expected, len(conditions))
assert_equal(list(set(conditions)), [2, 3])
raw.close()
| {
"repo_name": "jaeilepp/eggie",
"path": "mne/decoding/tests/test_ems.py",
"copies": "2",
"size": "2006",
"license": "bsd-2-clause",
"hash": 3233105237989224400,
"line_mean": 33.5862068966,
"line_max": 77,
"alpha_frac": 0.646560319,
"autogenerated": false,
"ratio": 2.9630723781388477,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4609632697138848,
"avg_score": null,
"num_lines": null
} |
from copy import deepcopy
import numpy as np
from mne.report import Report
from mne.preprocessing import ICA, create_ecg_epochs, create_eog_epochs
from mne import pick_types
from mne.utils import logger
from mne.defaults import _handle_default
from .viz import _prepare_filter_plot, _render_components_table
from .utils import get_data_picks
def check_apply_filter(raw, subject, filter_params=None,
notch_filter_params=None, plot_fmin=None,
plot_fmax=None, n_jobs=1, figsize=None, show=True,
report=None, img_scale=1.0):
"""Apply filtering and save diagnostic plots
Parameters
----------
raw : instance of Raw
Raw measurements to be decomposed.
subject : str
The name of the subject.
filter_params : dict | list of dict | None
The parametrs passed to raw.filter. If list, raw.filter will be
invoked len(filter_params) times. Defaults to None. If None, expands
to:
dict(l_freq=0.5, h_freq=200, n_jobs=n_jobs,
method='fft', l_trans_bandwidth=0.1, h_trans_bandwidth=0.5)
notch_filter_params : dict | list of dict | None
The parametrs passed to raw.notch_filter. Defaults to None.
If None, expands to:
n_jobs : int
The number of CPUs to use in parallel.
figsize : tuple of int
The figsize in inches. See matplotlib documentation.
show : bool
Show figure if True
scale_img : float
The scaling factor for the report. Defaults to 1.0.
report : instance of Report | None
The report object. If None, a new report will be generated.
"""
_default_filter_params = dict(l_freq=0.5, h_freq=200, n_jobs=n_jobs,
method='fft',
l_trans_bandwidth=0.1, h_trans_bandwidth=0.5)
if filter_params is None:
filter_params = _default_filter_params
if not isinstance(filter_params, (list, tuple)):
filter_params = [filter_params]
if notch_filter_params is None:
notch_filter_params = dict(freqs=(50, 100, 150, 200, 250,),
method='fft')
if report is None:
report = Report(subject)
notch_filter_params.update(n_jobs=n_jobs)
picks_list, n_rows, fig, axes = _prepare_filter_plot(raw, figsize)
iter_plot = zip(axes, picks_list)
fmin, fmax = plot_fmin or 0, plot_fmax or raw.info['lowpass'] + 20
###########################################################################
# plot before filter
for ax, (picks, ch_type) in iter_plot:
raw.plot_psd(fmin=fmin, fmax=fmax, ax=ax,
picks=picks, color='black', show=show)
first_line = ax.get_lines()[0]
first_line.set_label('{} - raw'.format(ch_type))
ax.set_ylabel('Power (dB)')
ax.grid(True)
ax.set_title(ch_type)
###########################################################################
# filter
# Note. It turns out to be safer to first run the notch filter.
# Ohterwise crazy notch resonance with some filter settings.
raw.notch_filter(**notch_filter_params)
for filter_params_ in filter_params:
final_filter_params_ = deepcopy(_default_filter_params)
final_filter_params_.update(filter_params_)
final_filter_params_.update({'n_jobs': n_jobs})
raw.filter(**final_filter_params_)
###########################################################################
# plot after filter
for ax, (picks, ch_type) in iter_plot:
raw.plot_psd(fmin=fmin, fmax=fmax, ax=ax,
picks=picks, color='red', show=show)
second_line = ax.get_lines()[1]
second_line.set_label('{} - filtered'.format(ch_type))
ax.legend(loc='best')
fig.suptitle('Multitaper PSD')
report.add_figs_to_section(fig, 'filter PSD spectra {}'.format(subject),
'FILTER', scale=img_scale)
return fig, report
def _put_artifact_range(info, evoked, kind):
"""Helper to set artifact stats"""
ch_scales = _handle_default('scalings')
for this_picks, ch_type in get_data_picks(evoked, meg_combined=False):
amp_range = (evoked.data[this_picks].max() -
evoked.data[this_picks].min()) * ch_scales[ch_type]
info.update({'%s_amp_range_%s' % (kind, ch_type): amp_range})
def compute_ica(raw, subject, n_components=0.99, picks=None, decim=None,
reject=None, ecg_tmin=-0.5, ecg_tmax=0.5, eog_tmin=-0.5,
eog_tmax=0.5, n_max_ecg=3, n_max_eog=1,
n_max_ecg_epochs=200, show=True, img_scale=1.0,
random_state=None, report=None, artifact_stats=None):
"""Run ICA in raw data
Parameters
----------,
raw : instance of Raw
Raw measurements to be decomposed.
subject : str
The name of the subject.
picks : array-like of int, shape(n_channels, ) | None
Channels to be included. This selection remains throughout the
initialized ICA solution. If None only good data channels are used.
Defaults to None.
n_components : int | float | None | 'rank'
The number of components used for ICA decomposition. If int, it must be
smaller then max_pca_components. If None, all PCA components will be
used. If float between 0 and 1 components can will be selected by the
cumulative percentage of explained variance.
If 'rank', the number of components equals the rank estimate.
Defaults to 0.99.
decim : int | None
Increment for selecting each nth time slice. If None, all samples
within ``start`` and ``stop`` are used. Defalts to None.
reject : dict | None
Rejection parameters based on peak to peak amplitude.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
If reject is None then no rejection is done. You should
use such parameters to reject big measurement artifacts
and not EOG for example. It only applies if `inst` is of type Raw.
Defaults to {'mag': 5e-12}
ecg_tmin : float
Start time before ECG event. Defaults to -0.5.
ecg_tmax : float
End time after ECG event. Defaults to 0.5.
eog_tmin : float
Start time before rog event. Defaults to -0.5.
eog_tmax : float
End time after rog event. Defaults to 0.5.
n_max_ecg : int | None
The maximum number of ECG components to exclude. Defaults to 3.
n_max_eog : int | None
The maximum number of EOG components to exclude. Defaults to 1.
n_max_ecg_epochs : int
The maximum number of ECG epochs to use for phase-consistency
estimation. Defaults to 200.
show : bool
Show figure if True
scale_img : float
The scaling factor for the report. Defaults to 1.0.
random_state : None | int | instance of np.random.RandomState
np.random.RandomState to initialize the FastICA estimation.
As the estimation is non-deterministic it can be useful to
fix the seed to have reproducible results. Defaults to None.
report : instance of Report | None
The report object. If None, a new report will be generated.
artifact_stats : None | dict
A dict that contains info on amplitude ranges of artifacts and
numbers of events, etc. by channel type.
Returns
-------
ica : instance of ICA
The ICA solution.
report : dict
A dict with an html report ('html') and artifact statistics ('stats').
"""
if report is None:
report = Report(subject=subject, title='ICA preprocessing')
if n_components == 'rank':
n_components = raw.estimate_rank(picks=picks)
ica = ICA(n_components=n_components, max_pca_components=None,
random_state=random_state, max_iter=256)
ica.fit(raw, picks=picks, decim=decim, reject=reject)
comment = []
for ch in ('mag', 'grad', 'eeg'):
if ch in ica:
comment += [ch.upper()]
if len(comment) > 0:
comment = '+'.join(comment) + ' '
else:
comment = ''
topo_ch_type = 'mag'
if 'GRAD' in comment and 'MAG' not in comment:
topo_ch_type = 'grad'
elif 'EEG' in comment:
topo_ch_type = 'eeg'
###########################################################################
# 2) identify bad components by analyzing latent sources.
title = '%s related to %s artifacts (red) ({})'.format(subject)
# generate ECG epochs use detection via phase statistics
reject_ = {'mag': 5e-12, 'grad': 5000e-13, 'eeg': 300e-6}
if reject is not None:
reject_.update(reject)
for ch_type in ['mag', 'grad', 'eeg']:
if ch_type not in ica:
reject_.pop(ch_type)
picks_ = np.array([raw.ch_names.index(k) for k in ica.ch_names])
if 'eeg' in ica:
if 'ecg' in raw:
picks_ = np.append(picks_,
pick_types(raw.info, meg=False, ecg=True)[0])
else:
logger.info('There is no ECG channel, trying to guess ECG from '
'magnetormeters')
if artifact_stats is None:
artifact_stats = dict()
ecg_epochs = create_ecg_epochs(raw, tmin=ecg_tmin, tmax=ecg_tmax,
keep_ecg=True, picks=picks_, reject=reject_)
n_ecg_epochs_found = len(ecg_epochs.events)
artifact_stats['ecg_n_events'] = n_ecg_epochs_found
n_max_ecg_epochs = min(n_max_ecg_epochs, n_ecg_epochs_found)
artifact_stats['ecg_n_used'] = n_max_ecg_epochs
sel_ecg_epochs = np.arange(n_ecg_epochs_found)
rng = np.random.RandomState(42)
rng.shuffle(sel_ecg_epochs)
ecg_ave = ecg_epochs.average()
report.add_figs_to_section(ecg_ave.plot(), 'ECG-full', 'artifacts')
ecg_epochs = ecg_epochs[sel_ecg_epochs[:n_max_ecg_epochs]]
ecg_ave = ecg_epochs.average()
report.add_figs_to_section(ecg_ave.plot(), 'ECG-used', 'artifacts')
_put_artifact_range(artifact_stats, ecg_ave, kind='ecg')
ecg_inds, scores = ica.find_bads_ecg(ecg_epochs, method='ctps')
if len(ecg_inds) > 0:
ecg_evoked = ecg_epochs.average()
del ecg_epochs
fig = ica.plot_scores(scores, exclude=ecg_inds, labels='ecg',
title='', show=show)
report.add_figs_to_section(fig, 'scores ({})'.format(subject),
section=comment + 'ECG',
scale=img_scale)
current_exclude = [e for e in ica.exclude] # issue #2608 MNE
fig = ica.plot_sources(raw, ecg_inds, exclude=ecg_inds,
title=title % ('components', 'ecg'), show=show)
report.add_figs_to_section(fig, 'sources ({})'.format(subject),
section=comment + 'ECG',
scale=img_scale)
ica.exclude = current_exclude
fig = ica.plot_components(ecg_inds, ch_type=topo_ch_type,
title='', colorbar=True, show=show)
report.add_figs_to_section(fig, title % ('sources', 'ecg'),
section=comment + 'ECG', scale=img_scale)
ica.exclude = current_exclude
ecg_inds = ecg_inds[:n_max_ecg]
ica.exclude += ecg_inds
fig = ica.plot_sources(ecg_evoked, exclude=ecg_inds, show=show)
report.add_figs_to_section(fig, 'evoked sources ({})'.format(subject),
section=comment + 'ECG',
scale=img_scale)
fig = ica.plot_overlay(ecg_evoked, exclude=ecg_inds, show=show)
report.add_figs_to_section(fig,
'rejection overlay ({})'.format(subject),
section=comment + 'ECG',
scale=img_scale)
# detect EOG by correlation
picks_eog = np.concatenate(
[picks_, pick_types(raw.info, meg=False, eeg=False, ecg=False,
eog=True)])
eog_epochs = create_eog_epochs(raw, tmin=eog_tmin, tmax=eog_tmax,
picks=picks_eog, reject=reject_)
artifact_stats['eog_n_events'] = len(eog_epochs.events)
artifact_stats['eog_n_used'] = artifact_stats['eog_n_events']
eog_ave = eog_epochs.average()
report.add_figs_to_section(eog_ave.plot(), 'EOG-used', 'artifacts')
_put_artifact_range(artifact_stats, eog_ave, kind='eog')
eog_inds = None
if len(eog_epochs.events) > 0:
eog_inds, scores = ica.find_bads_eog(eog_epochs)
if eog_inds is not None and len(eog_epochs.events) > 0:
fig = ica.plot_scores(scores, exclude=eog_inds, labels='eog',
show=show, title='')
report.add_figs_to_section(fig, 'scores ({})'.format(subject),
section=comment + 'EOG',
scale=img_scale)
current_exclude = [e for e in ica.exclude] # issue #2608 MNE
fig = ica.plot_sources(raw, eog_inds, exclude=ecg_inds,
title=title % ('sources', 'eog'), show=show)
report.add_figs_to_section(fig, 'sources', section=comment + 'EOG',
scale=img_scale)
ica.exclude = current_exclude
fig = ica.plot_components(eog_inds, ch_type=topo_ch_type,
title='', colorbar=True, show=show)
report.add_figs_to_section(fig, title % ('components', 'eog'),
section=comment + 'EOG', scale=img_scale)
ica.exclude = current_exclude
eog_inds = eog_inds[:n_max_eog]
ica.exclude += eog_inds
eog_evoked = eog_epochs.average()
fig = ica.plot_sources(eog_evoked, exclude=eog_inds, show=show)
report.add_figs_to_section(
fig, 'evoked sources ({})'.format(subject),
section=comment + 'EOG', scale=img_scale)
fig = ica.plot_overlay(eog_evoked, exclude=eog_inds, show=show)
report.add_figs_to_section(
fig, 'rejection overlay({})'.format(subject),
section=comment + 'EOG', scale=img_scale)
else:
del eog_epochs
# check the amplitudes do not change
if len(ica.exclude) > 0:
fig = ica.plot_overlay(raw, show=show) # EOG artifacts remain
html = _render_components_table(ica)
report.add_htmls_to_section(
html, captions='excluded components',
section='ICA rejection summary (%s)' % ch_type)
report.add_figs_to_section(
fig, 'rejection overlay({})'.format(subject),
section=comment + 'RAW', scale=img_scale)
return ica, dict(html=report, stats=artifact_stats)
| {
"repo_name": "dengemann/meeg-preprocessing",
"path": "meeg_preprocessing/preprocessing.py",
"copies": "1",
"size": "14982",
"license": "bsd-2-clause",
"hash": -1507489948678972400,
"line_mean": 40.5013850416,
"line_max": 79,
"alpha_frac": 0.5762915499,
"autogenerated": false,
"ratio": 3.585926280516994,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46622178304169937,
"avg_score": null,
"num_lines": null
} |
from copy import deepcopy
import numpy as np
from mne.report import Report
from mne.preprocessing import ICA, create_ecg_epochs, create_eog_epochs
from mne import pick_types
from .viz import _prepare_filter_plot
def check_apply_filter(raw, subject, filter_params=None,
notch_filter_params=None, plot_fmin=None,
plot_fmax=None, n_jobs=1, figsize=None, show=True,
report=None, img_scale=1.0):
"""Apply filtering and save diagnostic plots
Parameters
----------
raw : instance of Raw
Raw measurements to be decomposed.
subject : str
The name of the subject.
filter_params : dict | list of dict | None
The parametrs passed to raw.filter. If list, raw.filter will be
invoked len(filter_params) times. Defaults to None. If None, expands
to:
dict(l_freq=0.5, h_freq=200, n_jobs=n_jobs,
method='fft', l_trans_bandwidth=0.1, h_trans_bandwidth=0.5)
notch_filter_params : dict | list of dict | None
The parametrs passed to raw.notch_filter. Defaults to None.
If None, expands to:
n_jobs : int
The number of CPUs to use in parallel.
figsize : tuple of int
The figsize in inches. See matplotlib documentation.
show : bool
Show figure if True
scale_img : float
The scaling factor for the report. Defaults to 1.0.
report : instance of Report | None
The report object. If None, a new report will be generated.
"""
_default_filter_params = dict(l_freq=0.5, h_freq=200, n_jobs=n_jobs,
method='fft',
l_trans_bandwidth=0.1, h_trans_bandwidth=0.5)
if filter_params is None:
filter_params = _default_filter_params
if not isinstance(filter_params, (list, tuple)):
filter_params = [filter_params]
if notch_filter_params is None:
notch_filter_params = dict(freqs=(50, 100, 150, 200, 250,),
method='fft')
if report is None:
report = Report(subject)
notch_filter_params.update(n_jobs=n_jobs)
picks_list, n_rows, fig, axes = _prepare_filter_plot(raw, figsize)
iter_plot = zip(axes, picks_list)
fmin, fmax = plot_fmin or 0, plot_fmax or raw.info['lowpass'] + 20
###########################################################################
# plot before filter
for ax, (picks, ch_type) in iter_plot:
raw.plot_psd(fmin=fmin, fmax=fmax, ax=ax,
picks=picks, color='black', show=show)
first_line = ax.get_lines()[0]
first_line.set_label('{} - raw'.format(ch_type))
ax.set_ylabel('Power (dB)')
ax.grid(True)
ax.set_title(ch_type)
###########################################################################
# filter
for filter_params_ in filter_params:
final_filter_params_ = deepcopy(_default_filter_params)
final_filter_params_.update(filter_params_)
final_filter_params_.update({'n_jobs': n_jobs})
raw.filter(**final_filter_params_)
raw.notch_filter(**notch_filter_params)
###########################################################################
# plot after filter
for ax, (picks, ch_type) in iter_plot:
raw.plot_psd(fmin=fmin, fmax=fmax, ax=ax,
picks=picks, color='red', show=show)
second_line = ax.get_lines()[1]
second_line.set_label('{} - filtered'.format(ch_type))
ax.legend(loc='best')
fig.suptitle('Multitaper PSD')
report.add_figs_to_section(fig, 'filter PSD spectra {}'.format(subject),
'FILTER', scale=img_scale)
return fig, report
def compute_ica(raw, subject, n_components=0.99, picks=None, decim=None,
reject=None, ecg_tmin=-0.5, ecg_tmax=0.5, eog_tmin=-0.5,
eog_tmax=0.5, n_max_ecg=3, n_max_eog=1,
n_max_ecg_epochs=200, show=True, img_scale=1.0,
report=None):
"""Run ICA in raw data
Parameters
----------,
raw : instance of Raw
Raw measurements to be decomposed.
subject : str
The name of the subject.
picks : array-like of int, shape(n_channels, ) | None
Channels to be included. This selection remains throughout the
initialized ICA solution. If None only good data channels are used.
Defaults to None.
n_components : int | float | None | 'rank'
The number of components used for ICA decomposition. If int, it must be
smaller then max_pca_components. If None, all PCA components will be
used. If float between 0 and 1 components can will be selected by the
cumulative percentage of explained variance.
If 'rank', the number of components equals the rank estimate.
Defaults to 0.99.
decim : int | None
Increment for selecting each nth time slice. If None, all samples
within ``start`` and ``stop`` are used. Defalts to None.
reject : dict | None
Rejection parameters based on peak to peak amplitude.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
If reject is None then no rejection is done. You should
use such parameters to reject big measurement artifacts
and not EOG for example. It only applies if `inst` is of type Raw.
Defaults to {'mag': 5e-12}
ecg_tmin : float
Start time before ECG event. Defaults to -0.5.
ecg_tmax : float
End time after ECG event. Defaults to 0.5.
eog_tmin : float
Start time before rog event. Defaults to -0.5.
eog_tmax : float
End time after rog event. Defaults to 0.5.
n_max_ecg : int | None
The maximum number of ECG components to exclude. Defaults to 3.
n_max_eog : int | None
The maximum number of EOG components to exclude. Defaults to 1.
n_max_ecg_epochs : int
The maximum number of ECG epochs to use for phase-consistency
estimation. Defaults to 200.
show : bool
Show figure if True
scale_img : float
The scaling factor for the report. Defaults to 1.0.
report : instance of Report | None
The report object. If None, a new report will be generated.
Returns
-------
ica : isntance of ICA
The ICA solution.
report : instance of Report
The report object.
"""
if report is None:
report = Report(subject=subject, title='ICA preprocessing')
if n_components == 'rank':
n_components = raw.estimate_rank(picks=picks)
ica = ICA(n_components=n_components, max_pca_components=None,
max_iter=256)
ica.fit(raw, picks=picks, decim=decim, reject=reject)
comment = []
for ch in ('mag', 'grad', 'eeg'):
if ch in ica:
comment += [ch.upper()]
if len(comment) > 0:
comment = '+'.join(comment) + ' '
else:
comment = ''
topo_ch_type = 'mag'
if 'GRAD' in comment and 'MAG' not in comment:
topo_ch_type = 'grad'
elif 'EEG' in comment:
topo_ch_type = 'eeg'
###########################################################################
# 2) identify bad components by analyzing latent sources.
title = '%s related to %s artifacts (red) ({})'.format(subject)
# generate ECG epochs use detection via phase statistics
reject_ = {'mag': 5e-12, 'grad': 5000e-13, 'eeg': 300e-6}
if reject is not None:
reject_.update(reject)
for ch_type in ['mag', 'grad', 'eeg']:
if ch_type not in ica:
reject_.pop(ch_type)
picks_ = np.array([raw.ch_names.index(k) for k in ica.ch_names])
if 'eeg' in ica:
if 'ecg' in raw:
picks_ = np.append(picks_,
pick_types(raw.info, meg=False, ecg=True)[0])
else:
raise ValueError('There is no ECG channel')
ecg_epochs = create_ecg_epochs(raw, tmin=ecg_tmin, tmax=ecg_tmax,
picks=picks_, reject=reject_)
n_ecg_epochs_found = len(ecg_epochs.events)
n_max_ecg_epochs = min(n_max_ecg_epochs, n_ecg_epochs_found)
sel_ecg_epochs = np.arange(n_ecg_epochs_found)
rng = np.random.RandomState(42)
rng.shuffle(sel_ecg_epochs)
ecg_epochs = ecg_epochs[sel_ecg_epochs[:n_max_ecg_epochs]]
ecg_inds, scores = ica.find_bads_ecg(ecg_epochs, method='ctps')
if len(ecg_inds) > 0:
ecg_evoked = ecg_epochs.average()
del ecg_epochs
fig = ica.plot_scores(scores, exclude=ecg_inds,
title=title % ('scores', 'ecg'), show=show)
report.add_figs_to_section(fig, 'scores ({})'.format(subject),
section=comment + 'ECG',
scale=img_scale)
fig = ica.plot_sources(raw, ecg_inds, exclude=ecg_inds,
title=title % ('components', 'ecg'), show=show)
report.add_figs_to_section(fig, 'sources ({})'.format(subject),
section=comment + 'ECG',
scale=img_scale)
fig = ica.plot_components(ecg_inds, ch_type=topo_ch_type,
title='', colorbar=True, show=show)
report.add_figs_to_section(fig, title % ('sources', 'ecg'),
section=comment + 'ECG', scale=img_scale)
ecg_inds = ecg_inds[:n_max_ecg]
ica.exclude += ecg_inds
fig = ica.plot_sources(ecg_evoked, exclude=ecg_inds, show=show)
report.add_figs_to_section(fig, 'evoked sources ({})'.format(subject),
section=comment + 'ECG',
scale=img_scale)
fig = ica.plot_overlay(ecg_evoked, exclude=ecg_inds, show=show)
report.add_figs_to_section(fig,
'rejection overlay ({})'.format(subject),
section=comment + 'ECG',
scale=img_scale)
# detect EOG by correlation
eog_inds, scores = ica.find_bads_eog(raw)
if len(eog_inds) > 0:
fig = ica.plot_scores(scores, exclude=eog_inds,
title=title % ('scores', 'eog'), show=show)
report.add_figs_to_section(fig, 'scores ({})'.format(subject),
section=comment + 'EOG',
scale=img_scale)
fig = ica.plot_sources(raw, eog_inds, exclude=ecg_inds,
title=title % ('sources', 'eog'), show=show)
report.add_figs_to_section(fig, 'sources', section=comment + 'EOG',
scale=img_scale)
fig = ica.plot_components(eog_inds, ch_type=topo_ch_type,
title='', colorbar=True, show=show)
report.add_figs_to_section(fig, title % ('components', 'eog'),
section=comment + 'EOG', scale=img_scale)
eog_inds = eog_inds[:n_max_eog]
ica.exclude += eog_inds
# estimate average artifact
eog_epochs = create_eog_epochs(raw, tmin=eog_tmin, tmax=eog_tmax,
picks=picks_, reject=reject_)
if len(eog_epochs.events) > 1:
eog_evoked = eog_epochs.average()
fig = ica.plot_sources(eog_evoked, exclude=eog_inds, show=show)
report.add_figs_to_section(
fig, 'evoked sources ({})'.format(subject),
section=comment + 'EOG', scale=img_scale)
fig = ica.plot_overlay(eog_evoked, exclude=eog_inds, show=show)
report.add_figs_to_section(
fig, 'rejection overlay({})'.format(subject),
section=comment + 'EOG', scale=img_scale)
else:
del eog_epochs
# check the amplitudes do not change
if len(ica.exclude) > 0:
fig = ica.plot_overlay(raw, show=show) # EOG artifacts remain
report.add_figs_to_section(
fig, 'rejection overlay({})'.format(subject),
section=comment + 'RAW', scale=img_scale)
return ica, report
| {
"repo_name": "cmoutard/meeg-preprocessing",
"path": "meeg_preprocessing/preprocessing.py",
"copies": "1",
"size": "12345",
"license": "bsd-2-clause",
"hash": 5954098116188855000,
"line_mean": 40.0132890365,
"line_max": 79,
"alpha_frac": 0.5566626164,
"autogenerated": false,
"ratio": 3.663204747774481,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4719867364174481,
"avg_score": null,
"num_lines": null
} |
from .utils import get_data_picks
def _prepare_filter_plot(raw, figsize):
"""Aux function"""
import matplotlib.pyplot as plt
picks_list = get_data_picks(raw)
n_rows = len(picks_list)
fig, axes = plt.subplots(1, n_rows, sharey=True, sharex=True,
figsize=(6 * n_rows, 6) if figsize is None
else figsize)
if n_rows == 1:
axes = [axes]
else:
axes = axes.flatten()
return picks_list, n_rows, fig, axes
def plot_psd_ica_overlay(raw, ica, fmin=None, fmax=None, n_jobs=1,
figsize=None, show=True, copy=True):
"""Plot raw power spectrum before and after ICA
Note. Additional arguments can be passed to raw.plot_psd
Using **kwargs.
Parameters
----------
raw : instance of Raw
Raw measurements to be decomposed.
ica : instance of ICA
The ICA solution.
fmin : float
Start frequency to consider. Is passed to `raw.plot_psd`. Defaults
to None.
fmax : float
End frequency to consider. Is passed to `raw.plot_psd`. Defaults to
None.
subject : str
The name of the subject.
n_jobs : int
The number of CPUs to use in parallel. Is passed to `raw.plot_psd`.
figsize : tuple of int
The figsize in inches. See matplotlib documentation.
show : bool
Show figure if True
Returns
-------
fig : matplotlib.figure.Figure.
The figure object.
"""
picks_list, n_rows, fig, axes = _prepare_filter_plot(raw, figsize)
iter_plot = zip(axes, picks_list)
fmin, fmax = fmin or 0, fmax or raw.info['lowpass'] + 20
###########################################################################
# plot before filter
for ax, (picks, ch_type) in iter_plot:
raw.plot_psd(fmin=fmin, fmax=fmax, ax=ax,
picks=picks, color='black', show=show)
first_line = ax.get_lines()[0]
first_line.set_label('{} - raw'.format(ch_type))
ax.set_ylabel('Power (dB)')
ax.grid(True)
ax.set_title(ch_type)
###########################################################################
# ICA
if copy is True:
raw = raw.copy()
ica.apply(raw)
###########################################################################
# plot after ICA
for ax, (picks, ch_type) in iter_plot:
raw.plot_psd(fmin=fmin, fmax=fmax, ax=ax,
picks=picks, color='red', show=show)
second_line = ax.get_lines()[1]
second_line.set_label('{} - ICA applied'.format(ch_type))
ax.legend(loc='best')
fig.suptitle('Multitaper PSD')
return fig
| {
"repo_name": "cmoutard/meeg-preprocessing",
"path": "meeg_preprocessing/viz.py",
"copies": "1",
"size": "2805",
"license": "bsd-2-clause",
"hash": -8180266110916603000,
"line_mean": 30.1666666667,
"line_max": 79,
"alpha_frac": 0.5308377897,
"autogenerated": false,
"ratio": 3.6907894736842106,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47216272633842105,
"avg_score": null,
"num_lines": null
} |
import itertools as itt
import os.path as op
import re
import numpy as np
import scipy.io as scio
from scipy import linalg
from mne import (EpochsArray, EvokedArray, pick_info,
rename_channels)
from mne.io.bti.bti import _get_bti_info, read_raw_bti
from mne.io import _loc_to_coil_trans
from mne.utils import logger
from .file_mapping import get_file_paths
_data_labels = [
'TRIGGER',
'RESPONSE',
'MLzA',
'MLyA',
'MLzaA',
'MLyaA',
'MLxA',
'A22',
'MLxaA',
'A2',
'MRzA',
'MRxA',
'MRzaA',
'MRxaA',
'MRyA',
'MCzA',
'MRyaA',
'MCzaA',
'MCyA',
'GzxA',
'MCyaA',
'A104',
'SA1',
'A241',
'MCxA',
'A138',
'MCxaA',
'A214',
'SA2',
'SA3',
'A71',
'A26',
'A93',
'A39',
'A125',
'A20',
'A65',
'A9',
'A8',
'A95',
'A114',
'A175',
'A16',
'A228',
'A35',
'A191',
'A37',
'A170',
'A207',
'A112',
'A224',
'A82',
'A238',
'A202',
'A220',
'A28',
'A239',
'A13',
'A165',
'A204',
'A233',
'A98',
'A25',
'A70',
'A72',
'A11',
'A47',
'A160',
'A64',
'A3',
'A177',
'A63',
'A155',
'A10',
'A127',
'A67',
'A115',
'A247',
'A174',
'A194',
'A5',
'A242',
'A176',
'A78',
'A168',
'A31',
'A223',
'A245',
'A219',
'A12',
'A186',
'A105',
'A222',
'A76',
'A50',
'A188',
'A231',
'A45',
'A180',
'A99',
'A234',
'A215',
'A235',
'A181',
'A38',
'A230',
'A91',
'A212',
'A24',
'A66',
'A42',
'A96',
'A57',
'A86',
'A56',
'A116',
'A151',
'A141',
'A120',
'A189',
'A80',
'A210',
'A143',
'A113',
'A27',
'A137',
'A135',
'A167',
'A75',
'A240',
'A206',
'A107',
'A130',
'A100',
'A43',
'A200',
'A102',
'A132',
'A183',
'A199',
'A122',
'A19',
'A62',
'A21',
'A229',
'A84',
'A213',
'A55',
'A32',
'A85',
'A146',
'A58',
'A60',
'GyyA',
'A88',
'A79',
'GzyA',
'GxxA',
'A169',
'A54',
'GyxA',
'A203',
'A145',
'A103',
'A163',
'A139',
'A49',
'A166',
'A156',
'A128',
'A68',
'A159',
'A236',
'A161',
'A121',
'A4',
'A61',
'A6',
'A126',
'A14',
'A94',
'A15',
'A193',
'A150',
'A227',
'A59',
'A36',
'A225',
'A195',
'A30',
'A109',
'A172',
'A108',
'A81',
'A171',
'A218',
'A173',
'A201',
'A74',
'A29',
'A164',
'A205',
'A232',
'A69',
'A157',
'A97',
'A217',
'A101',
'A124',
'A40',
'A123',
'A153',
'A178',
'A1',
'A179',
'A33',
'A147',
'A117',
'A148',
'A87',
'A89',
'A243',
'A119',
'A52',
'A142',
'A211',
'A190',
'A53',
'A192',
'A73',
'A226',
'A136',
'A184',
'A51',
'A237',
'A77',
'A129',
'A131',
'A198',
'A197',
'A182',
'A46',
'A92',
'A41',
'A90',
'A7',
'A23',
'A83',
'A154',
'A34',
'A17',
'A18',
'A248',
'A149',
'A118',
'A208',
'A152',
'A140',
'A144',
'A209',
'A110',
'A111',
'A244',
'A185',
'A246',
'A162',
'A106',
'A187',
'A48',
'A221',
'A196',
'A133',
'A158',
'A44',
'A134',
'A216',
'UACurrent',
'ECG+',
'VEOG+',
'HEOG+',
'EMG_LF',
'EMG_LH',
'ECG-',
'VEOG-',
'HEOG-',
'EMG_RF',
'EMG_RH'
]
_label_mapping = [
('E1', 'ECG+'),
('E3', 'VEOG+'),
('E5', 'HEOG+'),
('E63', 'EMG_LF'),
('E31', 'EMG_LH'),
('E2', 'ECG-'),
('E4', 'VEOG-'),
('E6', 'HEOG-'),
('E64', 'EMG_RF'),
('E32', 'EMG_RH')
]
_time_lock_mapping = dict(
TRESP='resp',
TEMG='resp',
TIM='stim',
TEV='stim',
TFLA='stim',
BSENT='stim',
BU='stim'
)
def _parse_trans(string):
"""helper to parse transforms"""
return np.array(string.replace('\n', '')
.strip('[] ')
.split(' '), dtype=float).reshape(4, 4)
def _parse_hcp_trans(fid, transforms, convert_to_meter):
""""another helper"""
contents = fid.read()
for trans in contents.split(';'):
if 'filename' in trans or trans == '\n':
continue
key, trans = trans.split(' = ')
key = key.lstrip('\ntransform.')
transforms[key] = _parse_trans(trans)
if convert_to_meter:
transforms[key][:3, 3] *= 1e-3 # mm to m
if not transforms:
raise RuntimeError('Could not parse the transforms.')
def _read_trans_hcp(fname, convert_to_meter):
"""Read + parse transforms
subject_MEG_anatomy_transform.txt
"""
transforms = dict()
with open(fname) as fid:
_parse_hcp_trans(fid, transforms, convert_to_meter)
return transforms
def _read_landmarks_hcp(fname):
"""XXX parse landmarks currently not used"""
out = dict()
with open(fname) as fid:
for line in fid:
kind, data = line.split(' = ')
kind = kind.split('.')[1]
if kind == 'coordsys':
out['coord_frame'] = data.split(';')[0].replace("'", "")
else:
data = data.split()
for c in ('[', '];'):
if c in data:
data.remove(c)
out[kind] = np.array(data, dtype=int) * 1e-3 # mm to m
return out
def _get_head_model(head_model_fname):
"""helper to parse head model from matfile"""
head_mat = scio.loadmat(head_model_fname, squeeze_me=False)
pnts = head_mat['headmodel']['bnd'][0][0][0][0][0]
faces = head_mat['headmodel']['bnd'][0][0][0][0][1]
faces -= 1 # correct for Matlab's 1-based index
return pnts, faces
def _read_bti_info(raw_fid, config):
"""helper to only access bti info from pdf file"""
info, bti_info = _get_bti_info(
pdf_fname=raw_fid, config_fname=config, head_shape_fname=None,
rotation_x=0.0, translation=(0.0, 0.02, 0.11),
ecg_ch='E31', eog_ch=('E63', 'E64'),
convert=False, # no conversion to neuromag coordinates
rename_channels=False, # keep native channel names
sort_by_ch_name=False) # do not change native order
return info
def _read_raw_bti(raw_fid, config_fid, convert, verbose=None):
"""Convert and raw file from HCP input"""
raw = read_raw_bti( # no convrt + no rename for HCP compatibility
raw_fid, config_fid, convert=convert, head_shape_fname=None,
sort_by_ch_name=False, rename_channels=False, preload=False,
verbose=verbose)
return raw
def _check_raw_config_runs(raws, configs):
"""XXX this goes to tests later, currently not used """
for raw, config in zip(raws, configs):
assert op.split(raw)[0] == op.split(config)[0]
run_str = set([configs[0].split('/')[-3]])
for config in configs[1:]:
assert set(configs[0].split('/')) - set(config.split('/')) == run_str
def _check_infos_trans(infos):
"""XXX this goes to tests later, currently not used"""
chan_max_idx = np.argmax([c['nchan'] for c in infos])
chan_template = infos[chan_max_idx]['ch_names']
channels = [c['ch_names'] for c in infos]
common_channels = set(chan_template).intersection(*channels)
common_chs = [[c['chs'][c['ch_names'].index(ch)] for ch in common_channels]
for c in infos]
dev_ctf_trans = [i['dev_ctf_t']['trans'] for i in infos]
cns = [[c['ch_name'] for c in cc] for cc in common_chs]
for cn1, cn2 in itt.combinations(cns, 2):
assert cn1 == cn2
# BTI stores data in head coords, as a consequence the coordinates
# change across run, we apply the ctf->ctf_head transform here
# to check that all transforms are correct.
cts = [np.array([linalg.inv(_loc_to_coil_trans(c['loc'])).dot(t)
for c in cc])
for t, cc in zip(dev_ctf_trans, common_chs)]
for ct1, ct2 in itt.combinations(cts, 2):
np.testing.assert_array_almost_equal(ct1, ct2, 12)
def read_raw(subject, data_type, run_index=0, hcp_path=op.curdir,
verbose=None):
"""Read HCP raw data
Parameters
----------
subject : str, file_map
The subject
data_type : str
The kind of data to read. The following options are supported:
'rest'
'task_motor'
'task_story_math'
'task_working_memory'
'noise_empty_room'
'noise_subject'
run_index : int
The run index. For the first run, use 0, for the second, use 1.
Also see HCP documentation for the number of runs for a given data
type.
hcp_path : str
The HCP directory, defaults to op.curdir.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
raw : instance of mne.io.Raw
The MNE raw object.
"""
pdf, config = get_file_paths(
subject=subject, data_type=data_type, output='raw',
run_index=run_index, hcp_path=hcp_path)
raw = _read_raw_bti(pdf, config, convert=False, verbose=verbose)
return raw
def read_info(subject, data_type, run_index=0, hcp_path=op.curdir):
"""Read info from unprocessed data
Parameters
----------
subject : str, file_map
The subject
data_type : str
The kind of data to read. The following options are supported:
'rest'
'task_motor'
'task_story_math'
'task_working_memory'
'noise_empty_room'
'noise_subject'
run_index : int
The run index. For the first run, use 0, for the second, use 1.
Also see HCP documentation for the number of runs for a given data
type.
hcp_path : str
The HCP directory, defaults to op.curdir.
Returns
-------
info : instance of mne.io.meas_info.Info
The MNE channel info object.
.. note::
HCP MEG does not deliver only 3 of the 5 task packages from MRI HCP.
"""
raw, config = get_file_paths(
subject=subject, data_type=data_type, output='raw',
run_index=run_index, hcp_path=hcp_path)
if not op.exists(raw):
raw = None
meg_info = _read_bti_info(raw, config)
if raw is None:
logger.info('Did not find Raw data. Guessing EMG, ECG and EOG '
'channels')
rename_channels(meg_info, dict(_label_mapping))
return meg_info
def read_epochs(subject, data_type, onset='stim', run_index=0,
hcp_path=op.curdir, return_fixations_motor=False):
"""Read HCP processed data
Parameters
----------
subject : str, file_map
The subject
data_type : str
The kind of data to read. The following options are supported:
'rest'
'task_motor'
'task_story_math'
'task_working_memory'
onset : {'stim', 'resp', 'sentence', 'block'}
The event onset. The mapping is generous, everything that is not a
response is a stimulus, in the sense of internal or external events.
`sentence` and `block` are specific to task_story_math.
run_index : int
The run index. For the first run, use 0, for the second, use 1.
Also see HCP documentation for the number of runs for a given data
type.
hcp_path : str
The HCP directory, defaults to op.curdir.
return_fixations_motor : bool
Weather to return fixations or regular trials. For motor data only.
Defaults to False.
Returns
-------
epochs : instance of mne.Epochs
The MNE epochs. Note, these are pseudo-epochs in the case of
onset == 'rest'.
"""
info = read_info(subject=subject, data_type=data_type,
run_index=run_index, hcp_path=hcp_path)
epochs_mat_fname = get_file_paths(
subject=subject, data_type=data_type, output='epochs',
onset=onset,
run_index=run_index, hcp_path=hcp_path)[0]
if data_type != 'task_motor':
return_fixations_motor = None
epochs = _read_epochs(epochs_mat_fname=epochs_mat_fname, info=info,
return_fixations_motor=return_fixations_motor)
if data_type == 'task_motor':
epochs.set_channel_types(
{ch: 'emg' for ch in epochs.ch_names if 'EMG' in ch})
return epochs
def _read_epochs(epochs_mat_fname, info, return_fixations_motor):
"""read the epochs from matfile"""
data = scio.loadmat(epochs_mat_fname,
squeeze_me=True)['data']
ch_names = [ch for ch in data['label'].tolist()]
info['sfreq'] = data['fsample'].tolist()
times = data['time'].tolist()[0]
# deal with different event lengths
if return_fixations_motor is not None:
fixation_mask = data['trialinfo'].tolist()[:, 1] == 6
if return_fixations_motor is False:
fixation_mask = ~fixation_mask
data = np.array(data['trial'].tolist()[fixation_mask].tolist())
else:
data = np.array(data['trial'].tolist().tolist())
# warning: data are not chronologically ordered but
# match the trial info
events = np.zeros((len(data), 3), dtype=np.int)
events[:, 0] = np.arange(len(data))
events[:, 2] = 99 # all events
# we leave it to the user to construct his events
# as from the data['trialinfo'] arbitrary events can be constructed.
# and it is task specific.
this_info = _hcp_pick_info(info, ch_names)
epochs = EpochsArray(data=data, info=this_info, events=events,
tmin=times.min())
# XXX hack for now due to issue with EpochsArray constructor
# cf https://github.com/mne-tools/mne-hcp/issues/9
epochs.times = times
return epochs
def _hcp_pick_info(info, ch_names):
"""helper to subset info"""
return pick_info(
info, [info['ch_names'].index(ch) for ch in ch_names],
copy=True)
def read_trial_info(subject, data_type, run_index=0, hcp_path=op.curdir):
"""Read information about trials
Parameters
----------
subject : str
The HCP subject.
data_type : str
The kind of data to read. The following options are supported:
'rest'
'task_motor'
'task_story_math'
'task_working_memory'
run_index : int
The run index. For the first run, use 0, for the second, use 1.
Also see HCP documentation for the number of runs for a given data
type.
hcp_path : str
The HCP directory, defaults to op.curdir.
Returns
-------
trial_info : dict
The trial info including event labels, indices and times.
"""
trial_info_mat_fname = get_file_paths(
subject=subject, data_type=data_type,
output='trial_info', run_index=run_index,
hcp_path=hcp_path)[0]
trl_info = _read_trial_info(trial_info_mat_fname=trial_info_mat_fname)
return trl_info
def _read_trial_info(trial_info_mat_fname):
"""helper to read trial info"""
# XXX FIXME index -1
data = scio.loadmat(trial_info_mat_fname, squeeze_me=True)['trlInfo']
out = dict()
for idx, lock_name in enumerate(data['lockNames'].tolist()):
key = _time_lock_mapping[lock_name]
out[key] = dict(
comments=data['trlColDescr'].tolist()[idx],
codes=data['lockTrl'].tolist().tolist()[idx])
return out
def _check_sorting_runs(candidates, id_char):
"""helper to ensure correct run-parsing and mapping"""
run_idx = [f.find(id_char) for f in candidates]
for config, idx in zip(candidates, run_idx):
assert config[idx - 1].isdigit()
assert not config[idx - 2].isdigit()
runs = [int(f[idx - 1]) for f, idx in zip(candidates, run_idx)]
return runs, candidates
def _parse_annotations_segments(segment_strings):
"""Read bad segments defintions from text file"""
for char in '}]': # multi line array definitions
segment_strings = segment_strings.replace(
char + ';', 'splitme'
)
split = segment_strings.split('splitme')
out = dict()
for entry in split:
if len(entry) == 1 or entry == '\n':
continue
key, rest = entry.split(' = ')
val = np.array(
[k for k in [''.join([c for c in e if c.isdigit()])
for e in rest.split()] if k.isdigit()], dtype=int)
# reshape and map to Python index
val = val.reshape(-1, 2) - 1
out[key.split('.')[1]] = val
return out
def read_annot(subject, data_type, run_index=0, hcp_path=op.curdir):
"""Read annotations for bad data and ICA.
Parameters
----------
subject : str, file_map
The subject
data_type : str
The kind of data to read. The following options are supported:
'rest'
'task_motor'
'task_story_math'
'task_working_memory'
run_index : int
The run index. For the first run, use 0, for the second, use 1.
Also see HCP documentation for the number of runs for a given data
type.
hcp_path : str
The HCP directory, defaults to op.curdir.
Returns
-------
out : dict
The annotations.
"""
bads_files = get_file_paths(
subject=subject, data_type=data_type,
output='bads', run_index=run_index,
hcp_path=hcp_path)
segments_fname = [k for k in bads_files if
k.endswith('baddata_badsegments.txt')][0]
bads_fname = [k for k in bads_files if
k.endswith('baddata_badchannels.txt')][0]
ica_files = get_file_paths(
subject=subject, data_type=data_type,
output='ica', run_index=run_index,
hcp_path=hcp_path)
ica_fname = [k for k in ica_files if k.endswith('icaclass_vs.txt')][0]
out = dict()
iter_fun = [
('channels', _parse_annotations_bad_channels, bads_fname),
('segments', _parse_annotations_segments, segments_fname),
('ica', _parse_annotations_ica, ica_fname)]
for subtype, fun, fname in iter_fun:
with open(fname, 'r') as fid:
out[subtype] = fun(fid.read())
return out
def read_ica(subject, data_type, run_index=0, hcp_path=op.curdir):
"""Read precomputed independent components from subject
Parameters
----------
subject : str, file_map
The subject
data_type : str
The kind of data to read. The following options are supported:
'rest'
'task_motor'
'task_story_math'
'task_working_memory'
run_index : int
The run index. For the first run, use 0, for the second, use 1.
Also see HCP documentation for the number of runs for a given data
type.
hcp_path : str
The HCP directory, defaults to op.curdir.
Returns
-------
mat : numpy structured array
The ICA mat struct.
"""
ica_files = get_file_paths(
subject=subject, data_type=data_type,
output='ica', run_index=run_index,
hcp_path=hcp_path)
ica_fname_mat = [k for k in ica_files if k.endswith('icaclass.mat')][0]
mat = scio.loadmat(ica_fname_mat, squeeze_me=True)['comp_class']
return mat
def _parse_annotations_bad_channels(bads_strings):
"""Read bad channel definitions from text file"""
for char in '}]':
bads_strings = bads_strings.replace(
char + ';', 'splitme'
)
split = bads_strings.split('splitme')
out = dict()
for entry in split:
if len(entry) == 1 or entry == '\n':
continue
key, rest = entry.split(' = ')
val = [ch for ch in rest.split("'") if ch.isalnum()]
out[key.split('.')[1]] = val
return out
def _parse_annotations_ica(ica_strings):
"""Read bad channel definitions from text file"""
# prepare splitting
for char in '}]': # multi line array definitions
ica_strings = ica_strings.replace(
char + ';', 'splitme'
)
# scalar variables
match_inds = list()
for match in re.finditer(';', ica_strings):
ii = match.start()
if ica_strings[ii - 1].isalnum():
match_inds.append(ii)
ica_strings = list(ica_strings)
for ii in match_inds:
ica_strings[ii] = 'splitme'
ica_strings = ''.join(ica_strings)
split = ica_strings.split('splitme')
out = dict()
for entry in split:
if len(entry) == 1 or entry == '\n':
continue
key, rest = entry.split(' = ')
if '[' in rest:
sep = ' '
else:
sep = "'"
val = [ch for ch in rest.split(sep) if ch.isalnum()]
if all(v.isdigit() for v in val):
val = [int(v) - 1 for v in val] # map to Python index
out[key.split('.')[1]] = val
return out
def read_evokeds(subject, data_type, onset='stim', sensor_mode='mag',
hcp_path=op.curdir, kind='average'):
"""Read HCP processed data
Parameters
----------
subject : str, file_map
The subject
data_type : str
The kind of data to read. The following options are supported:
'rest'
'task_motor'
'task_story_math'
'task_working_memory'
onset : {'stim', 'resp'}
The event onset. The mapping is generous, everything that is not a
response is a stimulus, in the sense of internal or external events.
sensor_mode : {'mag', 'planar'}
The sensor projection. Defaults to 'mag'. Only relevant for
evoked output.
hcp_path : str
The HCP directory, defaults to op.curdir.
kind : {'average', 'standard_error'}
The averaging mode. Defaults to 'average'.
Returns
-------
epochs : instance of mne.Epochs
The MNE epochs. Note, these are pseudo-epochs in the case of
onset == 'rest'.
"""
info = read_info(subject=subject, data_type=data_type,
hcp_path=hcp_path, run_index=0)
evoked_files = list()
for fname in get_file_paths(
subject=subject, data_type=data_type, onset=onset,
output='evoked', sensor_mode=sensor_mode, hcp_path=hcp_path):
evoked_files.extend(_read_evoked(fname, sensor_mode, info, kind))
return evoked_files
def _read_evoked(fname, sensor_mode, info, kind):
"""helper to read evokeds"""
data = scio.loadmat(fname, squeeze_me=True)['data']
ch_names = [ch for ch in data['label'].tolist()]
times = data['time'].tolist()
sfreq = 1. / np.diff(times)[0]
info = _hcp_pick_info(info, ch_names)
info['sfreq'] = sfreq
out = list()
comment = ('_'.join(fname.split('/')[-1].split('_')[2:])
.replace('.mat', '')
.replace('_eravg_', '_')
.replace('[', '')
.replace(']', ''))
nave = np.unique(data['dof'].tolist())
assert len(nave) == 1
nave = nave[0]
for key, this_kind in (('var', 'standard_error'), ('avg', 'average')):
if this_kind != kind:
continue
evoked = EvokedArray(
data=data[key].tolist(), info=info, tmin=min(times),
kind=this_kind, comment=comment, nave=nave)
out.append(evoked)
return out
| {
"repo_name": "RPGOne/Skynet",
"path": "mne-hcp-master/hcp/io/read.py",
"copies": "3",
"size": "23686",
"license": "bsd-3-clause",
"hash": 9217707194220262000,
"line_mean": 24.9714912281,
"line_max": 79,
"alpha_frac": 0.5468631259,
"autogenerated": false,
"ratio": 3.280155103171306,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5327018229071306,
"avg_score": null,
"num_lines": null
} |
import numpy as np
from nose.tools import assert_equal
import mne
import os
import os.path as op
import subprocess
import json
import warnings
from nose.tools import (assert_true, assert_equals, assert_not_equals,
assert_raises)
from mne.utils import _TempDir
from mne import io
from mne import read_events, pick_types
from mne.io.constants import FIFF
from meeg_preprocessing.utils import (
_get_git_head, get_versions, setup_provenance, set_eog_ecg_channels,
get_data_picks)
warnings.simplefilter('always') # enable b/c these tests throw warnings
base_dir = op.join(op.dirname(__file__), 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
event_name = op.join(base_dir, 'test-eve.fif')
evoked_nf_name = op.join(base_dir, 'test-nf-ave.fif')
def _get_data():
raw = io.Raw(raw_fname, add_eeg_ref=False)
events = read_events(event_name)
picks = pick_types(
raw.info, meg=True, eeg=True, stim=True, ecg=True, eog=True,
include=['STI 014'], exclude='bads')
return raw, events, picks
def test_get_git_hash():
"""Test reading git hashes"""
assert_raises(ValueError, _get_git_head, 1e15)
assert_raises(ValueError, _get_git_head, 'foofoo')
def my_call(cmd):
process = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
shell=True)
out = process.communicate()[0].strip()
del process
return out
tmp_dir = _TempDir()
command = 'cd {}; git init'.format(tmp_dir)
my_call(command)
get_head = 'cd {gitpath}; git rev-parse --verify HEAD'.format(
gitpath=tmp_dir
)
git_out = my_call('cd {}; '
'echo 123 >> tmp.txt; '
'git add tmp.txt; '
'git commit -am "blub"'.format(tmp_dir))
assert_true('1 file changed' in git_out)
assert_true('1 insertion' in git_out)
assert_true('create mode 100644 tmp.txt' in git_out)
assert_true('tmp.txt' in os.listdir(tmp_dir))
head1 = _get_git_head(tmp_dir)
head2 = my_call(get_head)
assert_equals(head1, head2)
git_out = my_call('cd {};'
'echo -n 123 >> tmp.txt; '
'git add tmp.txt; '
'git commit -am "blub2"'.format(tmp_dir))
head3 = _get_git_head(tmp_dir)
head4 = my_call(get_head)
assert_equals(head3, head4)
assert_not_equals(head1, head3)
assert_true(isinstance(int(head3, 16), long))
def test_get_version():
"""Test version checks"""
import sys
from meeg_preprocessing import __path__ as cpath
from meeg_preprocessing import __version__ as cversion
cpath = cpath[0]
name = 'meeg_preprocessing'
versions = get_versions(sys)
cpype_version_git = _get_git_head(cpath)
assert_true(cpype_version_git in versions[name])
assert_true(cversion in versions[name])
import meeg_preprocessing
meeg_preprocessing.__version__ = '0.2.git'
versions = get_versions(sys)
assert_true('0.2.git' in versions[name])
def test_setup_provenance():
"""Test provenance tracking"""
for config_opt in ['abs_py', 'default', 'other']:
tmp_dir = _TempDir()
if config_opt == 'default':
config_fname = op.join(op.dirname(__file__), 'config.py')
config_content = 'import this'
config_param = None
elif config_opt == 'abs_py':
config_fname = op.join(tmp_dir, 'config.py')
config_content = 'import antigravity'
config_param = config_fname
elif config_opt == 'other':
config_fname = op.join(tmp_dir, 'config.txt')
config_content = 'my_config :: 42'
config_param = config_fname
with open(config_fname, 'w') as fid_config:
fid_config.write(config_content)
report, run_id, results_dir, logger = setup_provenance(
script=__file__, results_dir=tmp_dir, config=config_param)
logging_dir = op.join(results_dir, run_id)
assert_true(op.isdir(logging_dir))
assert_true(op.isfile(op.join(logging_dir, 'run_time.json')))
assert_true(op.isfile(op.join(logging_dir, 'run_output.log')))
assert_true(op.isfile(op.join(logging_dir, 'script.py')))
config_basename = op.split(config_fname)[-1]
with open(op.join(results_dir, run_id, config_basename)) as config_fid:
config_code = config_fid.read()
assert_equal(config_code, config_content)
with open(__file__) as fid:
this_file_code = fid.read()
with open(op.join(results_dir, run_id, 'script.py')) as fid:
other_file_code = fid.read()
assert_equals(this_file_code, other_file_code)
with open(op.join(results_dir, run_id, 'run_time.json')) as fid:
modules = json.load(fid)
assert_true('meeg_preprocessing' in modules)
assert_equals(report.title, op.splitext(op.split(__file__)[1])[0])
assert_equals(report.data_path, logging_dir)
if config_opt == 'default':
os.remove(config_fname)
def test_set_eog_ecg_channels():
"""Test set eeg channels"""
raw, _, _ = _get_data()
set_eog_ecg_channels(raw, 'EEG 001', 'EEG 002')
eog_idx, ecg_idx = [raw.ch_names.index(k) for k in ['EEG 001', 'EEG 002']]
assert_equals(raw.info['chs'][eog_idx]['kind'], FIFF.FIFFV_EOG_CH)
assert_equals(raw.info['chs'][ecg_idx]['kind'], FIFF.FIFFV_ECG_CH)
def test_get_data_picks():
"""Test creating pick_lists"""
rng = np.random.RandomState(909)
test_data = rng.random_sample((4, 2000))
ch_names = ['MEG %03d' % i for i in [1, 2, 3, 4]]
ch_types = ['grad', 'mag', 'mag', 'eeg']
sfreq = 250.0
info = mne.create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
raw = mne.io.RawArray(test_data, info)
pick_list = get_data_picks(raw)
assert_equal(len(pick_list), 3)
assert_equal(pick_list[0][1], 'mag')
pick_list2 = get_data_picks(raw, meg_combined=False)
assert_equal(len(pick_list), len(pick_list2))
assert_equal(pick_list2[0][1], 'mag')
pick_list2 = get_data_picks(raw, meg_combined=True)
assert_equal(len(pick_list), len(pick_list2) + 1)
assert_equal(pick_list2[0][1], 'meg')
test_data = rng.random_sample((4, 2000))
ch_names = ['MEG %03d' % i for i in [1, 2, 3, 4]]
ch_types = ['mag', 'mag', 'mag', 'mag']
sfreq = 250.0
info = mne.create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
raw = mne.io.RawArray(test_data, info)
pick_list = get_data_picks(raw)
assert_equal(len(pick_list), 1)
assert_equal(pick_list[0][1], 'mag')
pick_list2 = get_data_picks(raw, meg_combined=True)
assert_equal(len(pick_list), len(pick_list2))
assert_equal(pick_list2[0][1], 'mag')
if __name__ == "__main__":
import nose
nose.run(defaultTest=__name__)
| {
"repo_name": "dengemann/meeg-preprocessing",
"path": "meeg_preprocessing/tests/test_utils.py",
"copies": "1",
"size": "7036",
"license": "bsd-2-clause",
"hash": -2216791777537629200,
"line_mean": 32.345971564,
"line_max": 79,
"alpha_frac": 0.6050312678,
"autogenerated": false,
"ratio": 3.1298932384341636,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4234924506234164,
"avg_score": null,
"num_lines": null
} |
import numpy as np
import mne
from mne.io import set_bipolar_reference
from mne.io.bti.bti import (
_convert_coil_trans, _coil_trans_to_loc, _get_bti_dev_t,
_loc_to_coil_trans)
from mne.transforms import Transform
from mne.utils import logger
from .io import read_info
from .io.read import _hcp_pick_info
from .io.read import _data_labels
def set_eog_ecg_channels(raw):
"""Set the HCP ECG and EOG channels
.. note::
Operates in place.
Parameters
----------
raw : instance of Raw
the hcp raw data.
"""
for kind in ['ECG', 'VEOG', 'HEOG']:
set_bipolar_reference(
raw, anode=kind + '-', cathode=kind + '+', ch_name=kind,
copy=False)
raw.set_channel_types({'ECG': 'ecg', 'VEOG': 'eog', 'HEOG': 'eog'})
def apply_ica_hcp(raw, ica_mat, exclude):
"""Apply the HCP ICA.
.. note::
Operates in place and data must be loaded.
Parameters
----------
raw : instance of Raw
the hcp raw data.
ica_mat : numpy structured array
The hcp ICA solution
exclude : array-like
the components to be excluded.
"""
if not raw.preload:
raise RuntimeError('raw data must be loaded, use raw.load_data()')
ch_names = ica_mat['topolabel'].tolist().tolist()
picks = mne.pick_channels(raw.info['ch_names'], include=ch_names)
assert ch_names == [raw.ch_names[p] for p in picks]
unmixing_matrix = np.array(ica_mat['unmixing'].tolist())
n_components, n_channels = unmixing_matrix.shape
mixing = np.array(ica_mat['topo'].tolist())
proj_mat = (np.eye(n_channels) - np.dot(
mixing[:, exclude], unmixing_matrix[exclude]))
raw._data *= 1e15
raw._data[picks] = np.dot(proj_mat, raw._data[picks])
raw._data /= 1e15
def apply_ref_correction(raw, decim_fit=100):
"""Regress out MEG ref channels
Computes linear models from MEG reference channels
on each sensors, predicts the MEG data and subtracts
and computes the residual by subtracting the predictions.
.. note::
Operates in place.
.. note::
Can be memory demanding. To alleviate this problem the model can be fit
on decimated data. This is legitimate because the linear model does
not have any representation of time, only the distributions
matter.
Parameters
----------
raw : instance of Raw
The BTi/4D raw data.
decim_fit : int
The decimation factor used for fitting the model.
Defaults to 100.
"""
from sklearn.linear_model import LinearRegression
meg_picks = mne.pick_types(raw.info, ref_meg=False, meg=True)
ref_picks = mne.pick_types(raw.info, ref_meg=True, meg=False)
if len(ref_picks) == 0:
raise ValueError('Could not find meg ref channels.')
estimator = LinearRegression(normalize=True) # ref MAG + GRAD
Y_pred = estimator.fit(
raw[ref_picks][0][:, ::decim_fit].T,
raw[meg_picks][0][:, ::decim_fit].T).predict(
raw[ref_picks][0].T)
raw._data[meg_picks] -= Y_pred.T
def map_ch_coords_to_mne(inst):
"""Transform sensors to MNE coordinates
.. note::
operates in place
.. warning::
For several reasons we do not use the MNE coordinates for the inverse
modeling. This however won't always play nicely with visualization.
Parameters
----------
inst : MNE data containers
Raw, Epochs, Evoked.
"""
bti_dev_t = Transform('ctf_meg', 'meg', _get_bti_dev_t())
dev_ctf_t = inst.info['dev_ctf_t']
for ch in inst.info['chs']:
loc = ch['loc'][:]
if loc is not None:
logger.debug('converting %s' % ch['ch_name'])
t = _loc_to_coil_trans(loc)
t = _convert_coil_trans(t, dev_ctf_t, bti_dev_t)
loc = _coil_trans_to_loc(t)
ch['loc'] = loc
def interpolate_missing(inst, subject, data_type, hcp_path,
run_index=0, mode='fast'):
"""Interpolate all MEG channels that are missing
.. warning::
This function may require some memory.
Parameters
----------
inst : MNE data containers
Raw, Epochs, Evoked.
subject : str, file_map
The subject
data_type : str
The kind of data to read. The following options are supported:
'rest'
'task_motor'
'task_story_math'
'task_working_memory'
'noise_empty_room'
'noise_subject'
run_index : int
The run index. For the first run, use 0, for the second, use 1.
Also see HCP documentation for the number of runs for a given data
type.
hcp_path : str
The HCP directory, defaults to op.curdir.
mode : str
Either `'accurate'` or `'fast'`, determines the quality of the
Legendre polynomial expansion used for interpolation of MEG
channels.
Returns
-------
out : MNE data containers
Raw, Epochs, Evoked but with missing channels interpolated.
"""
try:
info = read_info(
subject=subject, data_type=data_type, hcp_path=hcp_path,
run_index=run_index if run_index is None else run_index)
except (ValueError, IOError):
raise ValueError(
'could not find config to complete info.'
'reading only channel positions without '
'transforms.')
# full BTI MEG channels
bti_meg_channel_names = ['A%i' % ii for ii in range(1, 249, 1)]
# figure out which channels are missing
bti_meg_channel_missing_names = [
ch for ch in bti_meg_channel_names if ch not in inst.ch_names]
# get meg picks
picks_meg = mne.pick_types(inst.info, meg=True, ref_meg=False)
# some non-contiguous block in the middle so let's try to invert
picks_other = [ii for ii in range(len(inst.ch_names)) if ii not in
picks_meg]
other_chans = [inst.ch_names[po] for po in picks_other]
# compute new n channels
n_channels = (len(picks_meg) +
len(bti_meg_channel_missing_names) +
len(other_chans))
# restrict info to final channels
# ! info read from config file is not sorted like inst.info
# ! therefore picking order matters, but we don't know it.
# ! so far we will rely on the consistent layout for raw files
final_names = [ch for ch in _data_labels if ch in bti_meg_channel_names or
ch in other_chans]
info = _hcp_pick_info(info, final_names)
assert len(info['ch_names']) == n_channels
existing_channels_index = [ii for ii, ch in enumerate(info['ch_names']) if
ch in inst.ch_names]
info['sfreq'] = inst.info['sfreq']
# compute shape of data to be added
is_raw = isinstance(inst, (mne.io.Raw,
mne.io.RawArray,
mne.io.bti.bti.RawBTi))
is_epochs = isinstance(inst, mne.BaseEpochs)
is_evoked = isinstance(inst, (mne.Evoked, mne.EvokedArray))
if is_raw:
shape = (n_channels,
(inst.last_samp - inst.first_samp) + 1)
data = inst._data
elif is_epochs:
shape = (n_channels, len(inst.events), len(inst.times))
data = np.transpose(inst.get_data(), (1, 0, 2))
elif is_evoked:
shape = (n_channels, len(inst.times))
data = inst.data
else:
raise ValueError('instance must be Raw, Epochs '
'or Evoked')
out_data = np.empty(shape, dtype=data.dtype)
out_data[existing_channels_index] = data
if is_raw:
out = mne.io.RawArray(out_data, info)
if inst.annotations is not None:
out.annotations = inst.annotations
elif is_epochs:
out = mne.EpochsArray(data=np.transpose(out_data, (1, 0, 2)),
info=info, events=inst.events,
tmin=inst.times.min(), event_id=inst.event_id)
elif is_evoked:
out = mne.EvokedArray(
data=out_data, info=info, tmin=inst.times.min(),
comment=inst.comment, nave=inst.nave, kind=inst.kind)
else:
raise ValueError('instance must be Raw, Epochs '
'or Evoked')
# set "bad" channels and interpolate.
out.info['bads'] = bti_meg_channel_missing_names
out.interpolate_bads(mode=mode)
return out
| {
"repo_name": "RPGOne/Skynet",
"path": "mne-hcp-master/hcp/preprocessing.py",
"copies": "3",
"size": "8494",
"license": "bsd-3-clause",
"hash": -4397618080943197700,
"line_mean": 32.4409448819,
"line_max": 78,
"alpha_frac": 0.5990110666,
"autogenerated": false,
"ratio": 3.5128205128205128,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 254
} |
import os.path as op
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
from nose.tools import assert_equal, assert_true
from numpy.testing import assert_array_equal
import mne
from meeg_preprocessing.preprocessing import (check_apply_filter, compute_ica,
_prepare_filter_plot)
test_raw_fname = op.join(op.dirname(__file__), 'data', 'test_raw.fif')
raw = mne.io.Raw(test_raw_fname, preload=True)
rng = np.random.RandomState(909)
def test_check_apply_filter():
"""Test filtering"""
import matplotlib as mpl
test_data = rng.random_sample((4, 2000))
ch_names = ['MEG %03d' % i for i in [1, 2, 3, 4]]
ch_types = ['grad', 'mag', 'mag', 'eeg']
sfreq = 250.0
info = mne.create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
test_raw1 = mne.io.RawArray(test_data, info)
test_raw2 = test_raw1.copy()
test_raw2.pick_channels(['MEG 001'])
expected_picks_lists = [
[(np.array([1, 2]), 'mag'),
(np.array([0]), 'grad'),
(np.array([3]), 'eeg')],
[(np.array([0]), 'grad')]
]
test_raws = [
test_raw1,
test_raw2
]
test_nrows = (3, 1)
test_axes = (np.ndarray, list)
iter_tests = zip(expected_picks_lists, test_raws, test_nrows, test_axes)
for expected_picks_list, test_raw, this_test_nrows, this_test_axes in \
iter_tests:
picks_list, n_rows, fig, axes = _prepare_filter_plot(test_raw, None)
assert_equal(n_rows, this_test_nrows)
assert_true(isinstance(fig, mpl.figure.Figure))
assert_true(isinstance(axes, this_test_axes))
for (picks1, ch1), (picks2, ch2) in zip(expected_picks_list,
picks_list):
assert_equal(ch1, ch2)
assert_array_equal(picks1, picks2)
# test filtering
raw2 = raw.pick_channels(['MEG 1113'], copy=True)
lp_before = raw2.info['lowpass']
filter_params = dict(l_freq=0.5, h_freq=20, n_jobs=1,
method='fft', l_trans_bandwidth=0.1,
h_trans_bandwidth=0.5)
fig, report = check_apply_filter(raw2, 'test-subject',
filter_params=filter_params)
lp_after = raw2.info['lowpass']
assert_true(isinstance(fig, mpl.figure.Figure))
assert_equal(report.sections, ['FILTER'])
assert_equal(len(report.html), 1)
assert_true(lp_after < lp_before)
fig2, _ = check_apply_filter(raw2, 'test-subject', figsize=(12, 12))
assert_true(np.any(fig.get_size_inches() != fig2.get_size_inches()))
def test_preprocessing_ica():
"""Test ICA preprocessing"""
n_max_ecg = 1
n_max_eog = 0
n_plots = (5 if n_max_ecg > 0 else 0) + (5 if n_max_eog > 0 else 0)
n_plots += (1 if n_plots != 0 else 0)
ica, report = compute_ica(raw, n_components=4, picks=[0, 1, 2, 3, 5],
subject='test-subject', decim=2,
n_max_ecg=n_max_ecg, n_max_eog=n_max_eog)
assert_equal(len(ica.exclude), n_max_ecg)
assert_equal(report.sections, ['MAG+GRAD ECG', 'MAG+GRAD RAW'])
assert_equal(len(report.html), n_plots)
picks = np.array([0, 1, 2, 3, 5])
rank = raw.estimate_rank(picks=picks)
ica, report = compute_ica(raw, n_components='rank', picks=picks,
subject='test-subject', decim=2,
n_max_ecg=n_max_ecg, n_max_eog=n_max_eog)
assert_equal(ica.n_components_, rank)
if __name__ == "__main__":
import nose
nose.run(defaultTest=__name__)
| {
"repo_name": "cmoutard/meeg-preprocessing",
"path": "meeg_preprocessing/tests/test_preprocessing.py",
"copies": "1",
"size": "3693",
"license": "bsd-2-clause",
"hash": 8498376648605337000,
"line_mean": 34.1714285714,
"line_max": 78,
"alpha_frac": 0.5786623341,
"autogenerated": false,
"ratio": 3.044517724649629,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9114454594879069,
"avg_score": 0.0017450927741119765,
"num_lines": 105
} |
"""
=========================================
Load and process previously saved records
=========================================
In this example previously downloaded records will be loaded.
We explore how to access and print single records.
Subsequently, we will explore filtering and combining records.
A last section shows how to save and export results for usage
with bibliographic software.
"""
import pymed as pm
import numpy as np
print(__doc__)
# load records
recs = pm.read_records('sample_records_dki.json')
###############################################################################
# Access records in different ways
# ... read one record, nicely printed.
print(recs[12].to_ascii(width=100))
# ... get the publication year as integer
print(recs[12].year)
# ... get contents as corpus (concatenated as one string)
# this is particularly useful for search in terms in records.
print(recs[12].as_corpus())
# ... resolve digital object identifier of a record
# (requires network connection --- uncomment if you're connected).
# print recs[12].resolve_doi()
# Uncomment the following line to read through your records and discard
# uninteresting ones. Hit 'n' to drop, hit any other key to see the next
# record.
# recs.browse()
# Note. records are special cases of lists and a single records are special
# cases of dictionaries.
last_rec = recs.pop(-1)
print(last_rec.keys())
###############################################################################
# Filter and combine records
# get all records that have an abstract and are related to brains.
recs = pm.Records(r for r in recs if 'AB' in r and r.match('brain'))
print(recs)
# remove all records published before 2010.
recs = pm.Records(r for r in recs if r.year > 2010)
print(recs)
# Because of the PubMed ID records are unique and can therefore be hashed.
# This means you can use records as keys in dictionaries or use set logic
# to remove duplicates, take differences, etc.
# In the following example we will create two overlapping collections
# using random indices and then apply set operations to uniquely combine them.
n_rec = len(recs)
inds1, inds2, = np.random.randint(n_rec / 2, size=(2, n_rec))
recs1 = pm.Records(rec for ii, rec in enumerate(recs) if ii in inds1)
recs2 = pm.Records(rec for ii, rec in enumerate(recs) if ii in inds2)
# Now print unique records.
print(pm.Records(set(recs1 + recs2)))
###############################################################################
# Save and export results.
# Finally we can export the records to a BibTex file. Another valid choice is
# the Medline nbib format using he `save_as_nbib` method.
# ... print single record in BibTex format
print(recs[12].to_bibtex())
# ... save all records
recs.save_as_bibtex('mybib.bib')
| {
"repo_name": "PyMed/PyMed",
"path": "examples/load_and_work_with_records.py",
"copies": "1",
"size": "2862",
"license": "bsd-3-clause",
"hash": -7188274343648773000,
"line_mean": 31.1573033708,
"line_max": 79,
"alpha_frac": 0.6565338924,
"autogenerated": false,
"ratio": 3.8313253012048194,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.998785919360482,
"avg_score": 0,
"num_lines": 89
} |
import numpy as np
from scipy.linalg import eigh
from ..filter import filter_data
from ..cov import _regularized_covariance
from . import TransformerMixin, BaseEstimator
from ..time_frequency import psd_array_welch
from ..utils import _time_mask, fill_doc, _validate_type, _check_option
from ..io.pick import _get_channel_types, _picks_to_idx
@fill_doc
class SSD(BaseEstimator, TransformerMixin):
"""
M/EEG signal decomposition using the Spatio-Spectral Decomposition (SSD).
SSD seeks to maximize the power at a frequency band of interest while
simultaneously minimizing it at the flanking (surrounding) frequency bins
(considered noise). It extremizes the covariance matrices associated with
signal and noise :footcite:`NikulinEtAl2011`.
SSD can either be used as a dimensionality reduction method or a
‘denoised’ low rank factorization method :footcite:`HaufeEtAl2014b`.
Parameters
----------
info : instance of mne.Info
The info object containing the channel and sampling information.
It must match the input data.
filt_params_signal : dict
Filtering for the frequencies of interest.
filt_params_noise : dict
Filtering for the frequencies of non-interest.
reg : float | str | None (default)
Which covariance estimator to use.
If not None (same as 'empirical'), allow regularization for
covariance estimation. If float, shrinkage is used
(0 <= shrinkage <= 1). For str options, reg will be passed to
method to :func:`mne.compute_covariance`.
n_components : int | None (default None)
The number of components to extract from the signal.
If n_components is None, no dimensionality reduction is applied.
picks : array of int | None (default None)
The indices of good channels.
sort_by_spectral_ratio : bool (default False)
If set to True, the components are sorted accordingly
to the spectral ratio.
See Eq. (24) in :footcite:`NikulinEtAl2011`.
return_filtered : bool (default True)
If return_filtered is True, data is bandpassed and projected onto
the SSD components.
n_fft : int (default None)
If sort_by_spectral_ratio is set to True, then the SSD sources will be
sorted accordingly to their spectral ratio which is calculated based on
:func:`mne.time_frequency.psd_array_welch` function. The n_fft parameter
set the length of FFT used.
See :func:`mne.time_frequency.psd_array_welch` for more information.
cov_method_params : dict | None (default None)
As in :class:`mne.decoding.SPoC`
The default is None.
rank : None | dict | ‘info’ | ‘full’
As in :class:`mne.decoding.SPoC`
This controls the rank computation that can be read from the
measurement info or estimated from the data.
See Notes of :func:`mne.compute_rank` for details.
We recommend to use 'full' when working with epoched data.
Attributes
----------
filters_ : array, shape (n_channels, n_components)
The spatial filters to be multiplied with the signal.
patterns_ : array, shape (n_components, n_channels)
The patterns for reconstructing the signal from the filtered data.
References
----------
.. footbibliography::
"""
def __init__(self, info, filt_params_signal, filt_params_noise,
reg=None, n_components=None, picks=None,
sort_by_spectral_ratio=True, return_filtered=False,
n_fft=None, cov_method_params=None, rank=None):
"""Initialize instance."""
dicts = {"signal": filt_params_signal, "noise": filt_params_noise}
for param, dd in [('l', 0), ('h', 0), ('l', 1), ('h', 1)]:
key = ('signal', 'noise')[dd]
if param + '_freq' not in dicts[key]:
raise ValueError(
'%s must be defined in filter parameters for %s'
% (param + '_freq', key))
val = dicts[key][param + '_freq']
if not isinstance(val, (int, float)):
_validate_type(val, ('numeric',), f'{key} {param}_freq')
# check freq bands
if (filt_params_noise['l_freq'] > filt_params_signal['l_freq'] or
filt_params_signal['h_freq'] > filt_params_noise['h_freq']):
raise ValueError('Wrongly specified frequency bands!\n'
'The signal band-pass must be within the noise '
'band-pass!')
self.picks_ = _picks_to_idx(info, picks, none='data', exclude='bads')
del picks
ch_types = _get_channel_types(info, picks=self.picks_, unique=True)
if len(ch_types) > 1:
raise ValueError('At this point SSD only supports fitting '
'single channel types. Your info has %i types' %
(len(ch_types)))
self.info = info
self.freqs_signal = (filt_params_signal['l_freq'],
filt_params_signal['h_freq'])
self.freqs_noise = (filt_params_noise['l_freq'],
filt_params_noise['h_freq'])
self.filt_params_signal = filt_params_signal
self.filt_params_noise = filt_params_noise
self.sort_by_spectral_ratio = sort_by_spectral_ratio
if n_fft is None:
self.n_fft = int(self.info['sfreq'])
else:
self.n_fft = int(n_fft)
self.return_filtered = return_filtered
self.reg = reg
self.n_components = n_components
self.rank = rank
self.cov_method_params = cov_method_params
def _check_X(self, X):
"""Check input data."""
_validate_type(X, np.ndarray, 'X')
_check_option('X.ndim', X.ndim, (2, 3))
n_chan = X.shape[-2]
if n_chan != self.info['nchan']:
raise ValueError('Info must match the input data.'
'Found %i channels but expected %i.' %
(n_chan, self.info['nchan']))
def fit(self, X, y=None):
"""Estimate the SSD decomposition on raw or epoched data.
Parameters
----------
X : array, shape ([n_epochs, ]n_channels, n_times)
The input data from which to estimate the SSD. Either 2D array
obtained from continuous data or 3D array obtained from epoched
data.
y : None | array, shape (n_samples,)
Used for scikit-learn compatibility.
Returns
-------
self : instance of SSD
Returns the modified instance.
"""
self._check_X(X)
X_aux = X[..., self.picks_, :]
X_signal = filter_data(
X_aux, self.info['sfreq'], **self.filt_params_signal)
X_noise = filter_data(
X_aux, self.info['sfreq'], **self.filt_params_noise)
X_noise -= X_signal
if X.ndim == 3:
X_signal = np.hstack(X_signal)
X_noise = np.hstack(X_noise)
cov_signal = _regularized_covariance(
X_signal, reg=self.reg, method_params=self.cov_method_params,
rank=self.rank, info=self.info)
cov_noise = _regularized_covariance(
X_noise, reg=self.reg, method_params=self.cov_method_params,
rank=self.rank, info=self.info)
eigvals_, eigvects_ = eigh(cov_signal, cov_noise)
# sort in descending order
ix = np.argsort(eigvals_)[::-1]
self.eigvals_ = eigvals_[ix]
self.filters_ = eigvects_[:, ix]
self.patterns_ = np.linalg.pinv(self.filters_)
return self
def transform(self, X):
"""Estimate epochs sources given the SSD filters.
Parameters
----------
X : array, shape ([n_epochs, ]n_channels, n_times)
The input data from which to estimate the SSD. Either 2D array
obtained from continuous data or 3D array obtained from epoched
data.
Returns
-------
X_ssd : array, shape ([n_epochs, ]n_components, n_times)
The processed data.
"""
self._check_X(X)
if self.filters_ is None:
raise RuntimeError('No filters available. Please first call fit')
X_ssd = self.filters_.T @ X[..., self.picks_, :]
# We assume that ordering by spectral ratio is more important
# than the initial ordering. This is why we apply component picks
# after ordering.
sorter_spec = Ellipsis
if self.sort_by_spectral_ratio:
_, sorter_spec = self.get_spectral_ratio(ssd_sources=X_ssd)
if X.ndim == 2:
X_ssd = X_ssd[sorter_spec][:self.n_components]
else:
X_ssd = X_ssd[:, sorter_spec, :][:, :self.n_components, :]
return X_ssd
def get_spectral_ratio(self, ssd_sources):
"""Get the spectal signal-to-noise ratio for each spatial filter.
Spectral ratio measure for best n_components selection
See :footcite:`NikulinEtAl2011`, Eq. (24).
Parameters
----------
ssd_sources : array
Data projectded to SSD space.
Returns
-------
spec_ratio : array, shape (n_channels)
Array with the sprectal ratio value for each component.
sorter_spec : array, shape (n_channels)
Array of indices for sorting spec_ratio.
References
----------
.. footbibliography::
"""
psd, freqs = psd_array_welch(
ssd_sources, sfreq=self.info['sfreq'], n_fft=self.n_fft)
sig_idx = _time_mask(freqs, *self.freqs_signal)
noise_idx = _time_mask(freqs, *self.freqs_noise)
if psd.ndim == 3:
mean_sig = psd[:, :, sig_idx].mean(axis=2).mean(axis=0)
mean_noise = psd[:, :, noise_idx].mean(axis=2).mean(axis=0)
spec_ratio = mean_sig / mean_noise
else:
mean_sig = psd[:, sig_idx].mean(axis=1)
mean_noise = psd[:, noise_idx].mean(axis=1)
spec_ratio = mean_sig / mean_noise
sorter_spec = spec_ratio.argsort()[::-1]
return spec_ratio, sorter_spec
def inverse_transform(self):
"""Not implemented yet."""
raise NotImplementedError('inverse_transform is not yet available.')
def apply(self, X):
"""Remove selected components from the signal.
This procedure will reconstruct M/EEG signals from which the dynamics
described by the excluded components is subtracted
(denoised by low-rank factorization).
See :footcite:`HaufeEtAl2014b` for more information.
.. note:: Unlike in other classes with an apply method,
only NumPy arrays are supported (not instances of MNE objects).
Parameters
----------
X : array, shape ([n_epochs, ]n_channels, n_times)
The input data from which to estimate the SSD. Either 2D array
obtained from continuous data or 3D array obtained from epoched
data.
Returns
-------
X : array, shape ([n_epochs, ]n_channels, n_times)
The processed data.
"""
X_ssd = self.transform(X)
sorter_spec = Ellipsis
if self.sort_by_spectral_ratio:
_, sorter_spec = self.get_spectral_ratio(ssd_sources=X_ssd)
pick_patterns = self.patterns_[sorter_spec, :self.n_components].T
X = pick_patterns @ X_ssd
return X
| {
"repo_name": "larsoner/mne-python",
"path": "mne/decoding/ssd.py",
"copies": "4",
"size": "11726",
"license": "bsd-3-clause",
"hash": -6750656417355844000,
"line_mean": 39.8153310105,
"line_max": 79,
"alpha_frac": 0.5909168516,
"autogenerated": false,
"ratio": 3.909879839786382,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6500796691386382,
"avg_score": null,
"num_lines": null
} |
import numpy as np
import pytest
from numpy.testing import (assert_array_almost_equal, assert_array_equal)
from mne import io
from mne.time_frequency import psd_array_welch
from mne.decoding.ssd import SSD
from mne.utils import requires_sklearn
from mne.filter import filter_data
from mne import create_info
from mne.decoding import CSP
freqs_sig = 9, 12
freqs_noise = 8, 13
def simulate_data(freqs_sig=[9, 12], n_trials=100, n_channels=20,
n_samples=500, samples_per_second=250,
n_components=5, SNR=0.05, random_state=42):
"""Simulate data according to an instantaneous mixin model.
Data are simulated in the statistical source space, where n=n_components
sources contain the peak of interest.
"""
rng = np.random.RandomState(random_state)
filt_params_signal = dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1],
l_trans_bandwidth=1, h_trans_bandwidth=1,
fir_design='firwin')
# generate an orthogonal mixin matrix
mixing_mat = np.linalg.svd(rng.randn(n_channels, n_channels))[0]
# define sources
S_s = rng.randn(n_trials * n_samples, n_components)
# filter source in the specific freq. band of interest
S_s = filter_data(S_s.T, samples_per_second, **filt_params_signal).T
S_n = rng.randn(n_trials * n_samples, n_channels - n_components)
S = np.hstack((S_s, S_n))
# mix data
X_s = np.dot(mixing_mat[:, :n_components], S_s.T).T
X_n = np.dot(mixing_mat[:, n_components:], S_n.T).T
# add noise
X_s = X_s / np.linalg.norm(X_s, 'fro')
X_n = X_n / np.linalg.norm(X_n, 'fro')
X = SNR * X_s + (1 - SNR) * X_n
X = X.T
S = S.T
return X, mixing_mat, S
@pytest.mark.slowtest
def test_ssd():
"""Test Common Spatial Patterns algorithm on raw data."""
X, A, S = simulate_data()
sf = 250
n_channels = X.shape[0]
info = create_info(ch_names=n_channels, sfreq=sf, ch_types='eeg')
n_components_true = 5
# Init
filt_params_signal = dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1],
l_trans_bandwidth=1, h_trans_bandwidth=1)
filt_params_noise = dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1],
l_trans_bandwidth=1, h_trans_bandwidth=1)
ssd = SSD(info, filt_params_signal, filt_params_noise)
# freq no int
freq = 'foo'
filt_params_signal = dict(l_freq=freq, h_freq=freqs_sig[1],
l_trans_bandwidth=1, h_trans_bandwidth=1)
filt_params_noise = dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1],
l_trans_bandwidth=1, h_trans_bandwidth=1)
with pytest.raises(TypeError, match='must be an instance '):
ssd = SSD(info, filt_params_signal, filt_params_noise)
# Wrongly specified noise band
freq = 2
filt_params_signal = dict(l_freq=freq, h_freq=freqs_sig[1],
l_trans_bandwidth=1, h_trans_bandwidth=1)
filt_params_noise = dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1],
l_trans_bandwidth=1, h_trans_bandwidth=1)
with pytest.raises(ValueError, match='Wrongly specified '):
ssd = SSD(info, filt_params_signal, filt_params_noise)
# filt param no dict
filt_params_signal = freqs_sig
filt_params_noise = freqs_noise
with pytest.raises(ValueError, match='must be defined'):
ssd = SSD(info, filt_params_signal, filt_params_noise)
# Data type
filt_params_signal = dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1],
l_trans_bandwidth=1, h_trans_bandwidth=1)
filt_params_noise = dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1],
l_trans_bandwidth=1, h_trans_bandwidth=1)
ssd = SSD(info, filt_params_signal, filt_params_noise)
raw = io.RawArray(X, info)
pytest.raises(TypeError, ssd.fit, raw)
# check non-boolean return_filtered
with pytest.raises(ValueError, match='return_filtered'):
ssd = SSD(info, filt_params_signal, filt_params_noise,
return_filtered=0)
# check non-boolean sort_by_spectral_ratio
with pytest.raises(ValueError, match='sort_by_spectral_ratio'):
ssd = SSD(info, filt_params_signal, filt_params_noise,
sort_by_spectral_ratio=0)
# More than 1 channel type
ch_types = np.reshape([['mag'] * 10, ['eeg'] * 10], n_channels)
info_2 = create_info(ch_names=n_channels, sfreq=sf, ch_types=ch_types)
with pytest.raises(ValueError, match='At this point SSD'):
ssd = SSD(info_2, filt_params_signal, filt_params_noise)
# Number of channels
info_3 = create_info(ch_names=n_channels + 1, sfreq=sf, ch_types='eeg')
ssd = SSD(info_3, filt_params_signal, filt_params_noise)
pytest.raises(ValueError, ssd.fit, X)
# Fit
n_components = 10
ssd = SSD(info, filt_params_signal, filt_params_noise,
n_components=n_components)
# Call transform before fit
pytest.raises(AttributeError, ssd.transform, X)
# Check outputs
ssd.fit(X)
assert (ssd.filters_.shape == (n_channels, n_channels))
assert (ssd.patterns_.shape == (n_channels, n_channels))
# Transform
X_ssd = ssd.fit_transform(X)
assert (X_ssd.shape[0] == n_components)
# back and forward
ssd = SSD(info, filt_params_signal, filt_params_noise,
n_components=None, sort_by_spectral_ratio=False)
ssd.fit(X)
X_denoised = ssd.apply(X)
assert_array_almost_equal(X_denoised, X)
# denoised by low-rank-factorization
ssd = SSD(info, filt_params_signal, filt_params_noise,
n_components=n_components, sort_by_spectral_ratio=True)
ssd.fit(X)
X_denoised = ssd.apply(X)
assert (np.linalg.matrix_rank(X_denoised) == n_components)
# Power ratio ordering
ssd = SSD(info, filt_params_signal, filt_params_noise,
n_components=None, sort_by_spectral_ratio=False)
ssd.fit(X)
spec_ratio, sorter_spec = ssd.get_spectral_ratio(ssd.transform(X))
# since we now that the number of true components is 5, the relative
# difference should be low for the first 5 components and then increases
index_diff = np.argmax(-np.diff(spec_ratio))
assert index_diff == n_components_true - 1
# Check detected peaks
# fit ssd
n_components = n_components_true
filt_params_signal = dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1],
l_trans_bandwidth=1, h_trans_bandwidth=1)
filt_params_noise = dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1],
l_trans_bandwidth=1, h_trans_bandwidth=1)
ssd = SSD(info, filt_params_signal, filt_params_noise,
n_components=n_components, sort_by_spectral_ratio=False)
ssd.fit(X)
out = ssd.transform(X)
psd_out, _ = psd_array_welch(out[0], sfreq=250, n_fft=250)
psd_S, _ = psd_array_welch(S[0], sfreq=250, n_fft=250)
corr = np.abs(np.corrcoef((psd_out, psd_S))[0, 1])
assert np.abs(corr) > 0.95
# Check pattern estimation
# Since there is no exact ordering of the recovered patterns
# a pair-wise greedy search will be done
error = list()
for ii in range(n_channels):
corr = np.abs(np.corrcoef(ssd.patterns_[ii, :].T, A[:, 0])[0, 1])
error.append(1 - corr)
min_err = np.min(error)
assert min_err < 0.3 # threshold taken from SSD original paper
def test_ssd_epoched_data():
"""Test Common Spatial Patterns algorithm on epoched data.
Compare the outputs when raw data is used.
"""
X, A, S = simulate_data(n_trials=100, n_channels=20, n_samples=500)
sf = 250
n_channels = X.shape[0]
info = create_info(ch_names=n_channels, sfreq=sf, ch_types='eeg')
n_components_true = 5
# Build epochs as sliding windows over the continuous raw file
# Epoch length is 1 second
X_e = np.reshape(X, (100, 20, 500))
# Fit
filt_params_signal = dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1],
l_trans_bandwidth=4, h_trans_bandwidth=4)
filt_params_noise = dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1],
l_trans_bandwidth=4, h_trans_bandwidth=4)
# ssd on epochs
ssd_e = SSD(info, filt_params_signal, filt_params_noise)
ssd_e.fit(X_e)
# ssd on raw
ssd = SSD(info, filt_params_signal, filt_params_noise)
ssd.fit(X)
# Check if the 5 first 5 components are the same for both
_, sorter_spec_e = ssd_e.get_spectral_ratio(ssd_e.transform(X_e))
_, sorter_spec = ssd.get_spectral_ratio(ssd.transform(X))
assert_array_equal(sorter_spec_e[:n_components_true],
sorter_spec[:n_components_true])
@requires_sklearn
def test_ssd_pipeline():
"""Test if SSD works in a pipeline."""
from sklearn.pipeline import Pipeline
sf = 250
X, A, S = simulate_data(n_trials=100, n_channels=20, n_samples=500)
X_e = np.reshape(X, (100, 20, 500))
# define bynary random output
y = np.random.randint(2, size=100)
info = create_info(ch_names=20, sfreq=sf, ch_types='eeg')
filt_params_signal = dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1],
l_trans_bandwidth=4, h_trans_bandwidth=4)
filt_params_noise = dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1],
l_trans_bandwidth=4, h_trans_bandwidth=4)
ssd = SSD(info, filt_params_signal, filt_params_noise)
csp = CSP()
pipe = Pipeline([('SSD', ssd), ('CSP', csp)])
pipe.set_params(SSD__n_components=5)
pipe.set_params(CSP__n_components=2)
out = pipe.fit_transform(X_e, y)
assert (out.shape == (100, 2))
assert (pipe.get_params()['SSD__n_components'] == 5)
def test_sorting():
"""Test sorting learning during training."""
X, _, _ = simulate_data(n_trials=100, n_channels=20, n_samples=500)
# Epoch length is 1 second
X = np.reshape(X, (100, 20, 500))
# split data
Xtr, Xte = X[:80], X[80:]
sf = 250
n_channels = Xtr.shape[1]
info = create_info(ch_names=n_channels, sfreq=sf, ch_types='eeg')
filt_params_signal = dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1],
l_trans_bandwidth=4, h_trans_bandwidth=4)
filt_params_noise = dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1],
l_trans_bandwidth=4, h_trans_bandwidth=4)
# check sort_by_spectral_ratio set to False
ssd = SSD(info, filt_params_signal, filt_params_noise,
n_components=None, sort_by_spectral_ratio=False)
ssd.fit(Xtr)
_, sorter_tr = ssd.get_spectral_ratio(ssd.transform(Xtr))
_, sorter_te = ssd.get_spectral_ratio(ssd.transform(Xte))
assert any(sorter_tr != sorter_te)
# check sort_by_spectral_ratio set to True
ssd = SSD(info, filt_params_signal, filt_params_noise,
n_components=None, sort_by_spectral_ratio=True)
ssd.fit(Xtr)
# check sorters
sorter_in = ssd.sorter_spec
ssd = SSD(info, filt_params_signal, filt_params_noise,
n_components=None, sort_by_spectral_ratio=False)
ssd.fit(Xtr)
_, sorter_out = ssd.get_spectral_ratio(ssd.transform(Xtr))
assert all(sorter_in == sorter_out)
def test_return_filtered():
"""Test return filtered option."""
# Check return_filtered
# Simulated more noise data and with broader freqquency than the desired
X, _, _ = simulate_data(SNR=0.9, freqs_sig=[4, 13])
sf = 250
n_channels = X.shape[0]
info = create_info(ch_names=n_channels, sfreq=sf, ch_types='eeg')
filt_params_signal = dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1],
l_trans_bandwidth=1, h_trans_bandwidth=1)
filt_params_noise = dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1],
l_trans_bandwidth=1, h_trans_bandwidth=1)
# return filtered to true
ssd = SSD(info, filt_params_signal, filt_params_noise,
sort_by_spectral_ratio=False, return_filtered=True)
ssd.fit(X)
out = ssd.transform(X)
psd_out, freqs = psd_array_welch(out[0], sfreq=250, n_fft=250)
freqs_up = int(freqs[psd_out > 0.5][0]), int(freqs[psd_out > 0.5][-1])
assert (freqs_up == freqs_sig)
# return filtered to false
ssd = SSD(info, filt_params_signal, filt_params_noise,
sort_by_spectral_ratio=False, return_filtered=False)
ssd.fit(X)
out = ssd.transform(X)
psd_out, freqs = psd_array_welch(out[0], sfreq=250, n_fft=250)
freqs_up = int(freqs[psd_out > 0.5][0]), int(freqs[psd_out > 0.5][-1])
assert (freqs_up != freqs_sig)
| {
"repo_name": "kambysese/mne-python",
"path": "mne/decoding/tests/test_ssd.py",
"copies": "8",
"size": "12817",
"license": "bsd-3-clause",
"hash": -7216064647460166000,
"line_mean": 38.5586419753,
"line_max": 76,
"alpha_frac": 0.6232347663,
"autogenerated": false,
"ratio": 3.0516666666666667,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 324
} |
import numpy as np
from ..filter import filter_data
from ..cov import _regularized_covariance
from . import TransformerMixin, BaseEstimator
from ..time_frequency import psd_array_welch
from ..utils import _time_mask, fill_doc, _validate_type, _check_option
from ..io.pick import _get_channel_types, _picks_to_idx
@fill_doc
class SSD(BaseEstimator, TransformerMixin):
"""
M/EEG signal decomposition using the Spatio-Spectral Decomposition (SSD).
SSD seeks to maximize the power at a frequency band of interest while
simultaneously minimizing it at the flanking (surrounding) frequency bins
(considered noise). It extremizes the covariance matrices associated with
signal and noise :footcite:`NikulinEtAl2011`.
SSD can either be used as a dimensionality reduction method or a
‘denoised’ low rank factorization method :footcite:`HaufeEtAl2014b`.
Parameters
----------
info : instance of mne.Info
The info object containing the channel and sampling information.
It must match the input data.
filt_params_signal : dict
Filtering for the frequencies of interest.
filt_params_noise : dict
Filtering for the frequencies of non-interest.
reg : float | str | None (default)
Which covariance estimator to use.
If not None (same as 'empirical'), allow regularization for
covariance estimation. If float, shrinkage is used
(0 <= shrinkage <= 1). For str options, reg will be passed to
method to :func:`mne.compute_covariance`.
n_components : int | None (default None)
The number of components to extract from the signal.
If n_components is None, no dimensionality reduction is applied.
picks : array of int | None (default None)
The indices of good channels.
sort_by_spectral_ratio : bool (default False)
If set to True, the components are sorted accordingly
to the spectral ratio.
See Eq. (24) in :footcite:`NikulinEtAl2011`.
return_filtered : bool (default True)
If return_filtered is True, data is bandpassed and projected onto
the SSD components.
n_fft : int (default None)
If sort_by_spectral_ratio is set to True, then the SSD sources will be
sorted accordingly to their spectral ratio which is calculated based on
:func:`mne.time_frequency.psd_array_welch` function. The n_fft parameter
set the length of FFT used.
See :func:`mne.time_frequency.psd_array_welch` for more information.
cov_method_params : dict | None (default None)
As in :class:`mne.decoding.SPoC`
The default is None.
rank : None | dict | ‘info’ | ‘full’
As in :class:`mne.decoding.SPoC`
This controls the rank computation that can be read from the
measurement info or estimated from the data.
See Notes of :func:`mne.compute_rank` for details.
We recommend to use 'full' when working with epoched data.
Attributes
----------
filters_ : array, shape (n_channels, n_components)
The spatial filters to be multiplied with the signal.
patterns_ : array, shape (n_components, n_channels)
The patterns for reconstructing the signal from the filtered data.
References
----------
.. footbibliography::
"""
def __init__(self, info, filt_params_signal, filt_params_noise,
reg=None, n_components=None, picks=None,
sort_by_spectral_ratio=True, return_filtered=False,
n_fft=None, cov_method_params=None, rank=None):
"""Initialize instance."""
dicts = {"signal": filt_params_signal, "noise": filt_params_noise}
for param, dd in [('l', 0), ('h', 0), ('l', 1), ('h', 1)]:
key = ('signal', 'noise')[dd]
if param + '_freq' not in dicts[key]:
raise ValueError(
'%s must be defined in filter parameters for %s'
% (param + '_freq', key))
val = dicts[key][param + '_freq']
if not isinstance(val, (int, float)):
_validate_type(val, ('numeric',), f'{key} {param}_freq')
# check freq bands
if (filt_params_noise['l_freq'] > filt_params_signal['l_freq'] or
filt_params_signal['h_freq'] > filt_params_noise['h_freq']):
raise ValueError('Wrongly specified frequency bands!\n'
'The signal band-pass must be within the noise '
'band-pass!')
self.picks_ = _picks_to_idx(info, picks, none='data', exclude='bads')
del picks
ch_types = _get_channel_types(info, picks=self.picks_, unique=True)
if len(ch_types) > 1:
raise ValueError('At this point SSD only supports fitting '
'single channel types. Your info has %i types' %
(len(ch_types)))
self.info = info
self.freqs_signal = (filt_params_signal['l_freq'],
filt_params_signal['h_freq'])
self.freqs_noise = (filt_params_noise['l_freq'],
filt_params_noise['h_freq'])
self.filt_params_signal = filt_params_signal
self.filt_params_noise = filt_params_noise
# check if boolean
if not isinstance(sort_by_spectral_ratio, (bool)):
raise ValueError('sort_by_spectral_ratio must be boolean')
self.sort_by_spectral_ratio = sort_by_spectral_ratio
if n_fft is None:
self.n_fft = int(self.info['sfreq'])
else:
self.n_fft = int(n_fft)
# check if boolean
if not isinstance(return_filtered, (bool)):
raise ValueError('return_filtered must be boolean')
self.return_filtered = return_filtered
self.reg = reg
self.n_components = n_components
self.rank = rank
self.cov_method_params = cov_method_params
def _check_X(self, X):
"""Check input data."""
_validate_type(X, np.ndarray, 'X')
_check_option('X.ndim', X.ndim, (2, 3))
n_chan = X.shape[-2]
if n_chan != self.info['nchan']:
raise ValueError('Info must match the input data.'
'Found %i channels but expected %i.' %
(n_chan, self.info['nchan']))
def fit(self, X, y=None):
"""Estimate the SSD decomposition on raw or epoched data.
Parameters
----------
X : array, shape ([n_epochs, ]n_channels, n_times)
The input data from which to estimate the SSD. Either 2D array
obtained from continuous data or 3D array obtained from epoched
data.
y : None | array, shape (n_samples,)
Used for scikit-learn compatibility.
Returns
-------
self : instance of SSD
Returns the modified instance.
"""
from scipy.linalg import eigh
self._check_X(X)
X_aux = X[..., self.picks_, :]
X_signal = filter_data(
X_aux, self.info['sfreq'], **self.filt_params_signal)
X_noise = filter_data(
X_aux, self.info['sfreq'], **self.filt_params_noise)
X_noise -= X_signal
if X.ndim == 3:
X_signal = np.hstack(X_signal)
X_noise = np.hstack(X_noise)
cov_signal = _regularized_covariance(
X_signal, reg=self.reg, method_params=self.cov_method_params,
rank=self.rank, info=self.info)
cov_noise = _regularized_covariance(
X_noise, reg=self.reg, method_params=self.cov_method_params,
rank=self.rank, info=self.info)
eigvals_, eigvects_ = eigh(cov_signal, cov_noise)
# sort in descending order
ix = np.argsort(eigvals_)[::-1]
self.eigvals_ = eigvals_[ix]
self.filters_ = eigvects_[:, ix]
self.patterns_ = np.linalg.pinv(self.filters_)
# We assume that ordering by spectral ratio is more important
# than the initial ordering. This ording should be also learned when
# fitting.
X_ssd = self.filters_.T @ X[..., self.picks_, :]
sorter_spec = Ellipsis
if self.sort_by_spectral_ratio:
_, sorter_spec = self.get_spectral_ratio(ssd_sources=X_ssd)
self.sorter_spec = sorter_spec
return self
def transform(self, X):
"""Estimate epochs sources given the SSD filters.
Parameters
----------
X : array, shape ([n_epochs, ]n_channels, n_times)
The input data from which to estimate the SSD. Either 2D array
obtained from continuous data or 3D array obtained from epoched
data.
Returns
-------
X_ssd : array, shape ([n_epochs, ]n_components, n_times)
The processed data.
"""
self._check_X(X)
if self.filters_ is None:
raise RuntimeError('No filters available. Please first call fit')
if self.return_filtered:
X_aux = X[..., self.picks_, :]
X = filter_data(X_aux, self.info['sfreq'],
**self.filt_params_signal)
X_ssd = self.filters_.T @ X[..., self.picks_, :]
if X.ndim == 2:
X_ssd = X_ssd[self.sorter_spec][:self.n_components]
else:
X_ssd = X_ssd[:, self.sorter_spec, :][:, :self.n_components, :]
return X_ssd
def get_spectral_ratio(self, ssd_sources):
"""Get the spectal signal-to-noise ratio for each spatial filter.
Spectral ratio measure for best n_components selection
See :footcite:`NikulinEtAl2011`, Eq. (24).
Parameters
----------
ssd_sources : array
Data projected to SSD space.
Returns
-------
spec_ratio : array, shape (n_channels)
Array with the sprectal ratio value for each component.
sorter_spec : array, shape (n_channels)
Array of indices for sorting spec_ratio.
References
----------
.. footbibliography::
"""
psd, freqs = psd_array_welch(
ssd_sources, sfreq=self.info['sfreq'], n_fft=self.n_fft)
sig_idx = _time_mask(freqs, *self.freqs_signal)
noise_idx = _time_mask(freqs, *self.freqs_noise)
if psd.ndim == 3:
mean_sig = psd[:, :, sig_idx].mean(axis=2).mean(axis=0)
mean_noise = psd[:, :, noise_idx].mean(axis=2).mean(axis=0)
spec_ratio = mean_sig / mean_noise
else:
mean_sig = psd[:, sig_idx].mean(axis=1)
mean_noise = psd[:, noise_idx].mean(axis=1)
spec_ratio = mean_sig / mean_noise
sorter_spec = spec_ratio.argsort()[::-1]
return spec_ratio, sorter_spec
def inverse_transform(self):
"""Not implemented yet."""
raise NotImplementedError('inverse_transform is not yet available.')
def apply(self, X):
"""Remove selected components from the signal.
This procedure will reconstruct M/EEG signals from which the dynamics
described by the excluded components is subtracted
(denoised by low-rank factorization).
See :footcite:`HaufeEtAl2014b` for more information.
.. note:: Unlike in other classes with an apply method,
only NumPy arrays are supported (not instances of MNE objects).
Parameters
----------
X : array, shape ([n_epochs, ]n_channels, n_times)
The input data from which to estimate the SSD. Either 2D array
obtained from continuous data or 3D array obtained from epoched
data.
Returns
-------
X : array, shape ([n_epochs, ]n_channels, n_times)
The processed data.
"""
X_ssd = self.transform(X)
pick_patterns = self.patterns_[self.sorter_spec][:self.n_components].T
X = pick_patterns @ X_ssd
return X
| {
"repo_name": "pravsripad/mne-python",
"path": "mne/decoding/ssd.py",
"copies": "8",
"size": "12181",
"license": "bsd-3-clause",
"hash": -355547508855911900,
"line_mean": 40.3911564626,
"line_max": 79,
"alpha_frac": 0.5886268387,
"autogenerated": false,
"ratio": 3.9229529335912314,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 294
} |
class Bunch(dict):
""" Dict that exposes keys as attributes """
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.__dict__ = self
PMD = Bunch()
PMD.PT_ARTICLE = 'journal article'
PMD.DEF_FIELDS = ['TI', 'AU', 'DP', 'AB', 'JT', 'TA', 'PT', 'MH', 'PMID']
PMD.SEP_PAGES_ENTRY = ';'
PMD.AGES_RANGE = '-'
PMD.AB = 'Abstract'
PMD.CI = 'Copyright Information'
PMD.AD = 'Affiliation'
PMD.IRAD = 'Investigator Affiliation'
PMD.AID = 'Article Identifier'
PMD.AU = 'Author'
PMD.FAU = 'Full Author'
PMD.CN = 'Corporate Author'
PMD.DCOM = 'Date Completed'
PMD.DA = 'Date Created'
PMD.LR = 'Date Last Revised'
PMD.DEP = 'Date of Electronic Publication'
PMD.DP = 'Date of Publication'
PMD.EDAT = 'Entrez Date'
PMD.GS = 'Gene Symbol'
PMD.GN = 'General Note'
PMD.GR = 'Grant Number'
PMD.IR = 'Investigator Name'
PMD.FIR = 'Full Investigator Name'
PMD.IS = 'ISSN'
PMD.IP = 'Issue'
PMD.TA = 'Journal Title Abbreviation'
PMD.JT = 'Journal Title'
PMD.LA = 'Language'
PMD.LID = 'Location Identifier'
PMD.MID = 'Manuscript Identifier'
PMD.MHDA = 'MeSH Date'
PMD.MH = 'MeSH Terms'
PMD.JID = 'NLM Unique ID'
PMD.RF = 'Number of References'
PMD.OAB = 'Other Abstract'
PMD.OCI = 'Other Copyright Information'
PMD.OID = 'Other ID'
PMD.OT = 'Other Term'
PMD.OTO = 'Other Term Owner'
PMD.OWN = 'Owner'
PMD.PG = 'Pagination'
PMD.PS = 'Personal Name as Subject'
PMD.FPS = 'Full Personal Name as Subject'
PMD.PL = 'Place of Publication'
PMD.PHST = 'Publication History Status'
PMD.PST = 'Publication Status'
PMD.PT = 'Publication Type'
PMD.PUBM = 'Publishing Model'
PMD.PMC = 'PubMed Central Identifier'
PMD.PMID = 'PubMed Unique Identifier'
PMD.RN = 'Registry Number/EC Number'
PMD.NM = 'Substance Name'
PMD.SI = 'Secondary Source ID'
PMD.SO = 'Source'
PMD.SFM = 'Space Flight Mission'
PMD.STAT = 'Status'
PMD.SB = 'Subset'
PMD.TI = 'Title'
PMD.TT = 'Transliterated Title'
PMD.VI = 'Volume'
PMD.CON = 'Comment on'
PMD.CIN = 'Comment in'
PMD.EIN = 'Erratum in'
PMD.EFR = 'Erratum for'
PMD.CRI = 'Corrected and Republished in'
PMD.CRF = 'Corrected and Republished from'
PMD.PRIN = 'Partial retraction in'
PMD.PROF = 'Partial retraction of'
PMD.RPI = 'Republished in'
PMD.RPF = 'Republished from'
PMD.RIN = 'Retraction in'
PMD.ROF = 'Retraction of'
PMD.UIN = 'Update in'
PMD.UOF = 'Update of'
PMD.SPIN = 'Summary for patient'
PMD.ORI = 'Original report' | {
"repo_name": "PyMed/PyMed",
"path": "pymed/constants.py",
"copies": "1",
"size": "3365",
"license": "bsd-3-clause",
"hash": 6841277360898987000,
"line_mean": 36.4,
"line_max": 78,
"alpha_frac": 0.5001485884,
"autogenerated": false,
"ratio": 3.1273234200743496,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9019745391657193,
"avg_score": 0.021545323363431217,
"num_lines": 90
} |
__author__ = 'denisbalyko'
def checkio(labyrinth):
queue, answer = [], ""
xn, yn = 1, 1 #start_position
start_value = 10 #(any greater than 0 and 1)
queue.append([xn, yn])
labyrinth[xn][yn] = start_value
"""Create path"""
while queue:
xn, yn = queue.pop(0)
for x, y in [(1, 0), (-1, 0), (0, 1), (0, -1)]:
xk, yk = xn + x, yn + y
if (xk > 0) and (xk < 12) and (yk > 0) and (yk < 12) and (labyrinth[xk][yk] == 0):
queue.append([xk, yk])
labyrinth[xk][yk] = labyrinth[xn][yn] + 1
"""Back to home"""
xn, yn = 10, 10 #end_position
while not (labyrinth[xn][yn] == start_value):
for x, y, direct in [(1, 0, "N"), (-1, 0, "S"), (0, 1, "W"), (0, -1, "E")]:
xk, yk = xn + x, yn + y
if labyrinth[xn][yn] == labyrinth[xk][yk] + 1:
xn, yn = xk, yk
answer = direct + answer
break
return answer
print checkio([[1,1,1,1,1,1,1,1,1,1,1,1],
[1,0,0,0,0,0,0,0,0,0,0,1],
[1,0,1,1,1,1,1,1,0,1,1,1],
[1,0,1,0,0,0,0,0,0,0,0,1],
[1,0,1,0,1,1,1,1,1,1,0,1],
[1,0,1,0,1,0,0,0,0,0,0,1],
[1,0,0,0,1,1,0,1,1,1,0,1],
[1,0,1,0,0,0,0,1,0,1,1,1],
[1,0,1,1,0,1,0,0,0,0,0,1],
[1,0,1,0,0,1,1,1,1,1,0,1],
[1,0,0,0,1,1,0,0,0,0,0,1],
[1,1,1,1,1,1,1,1,1,1,1,1]])
print checkio([[1,1,1,1,1,1,1,1,1,1,1,1],
[1,0,0,0,0,0,0,0,0,0,0,1],
[1,0,1,1,1,1,1,1,0,1,1,1],
[1,0,1,0,0,0,0,0,0,0,0,1],
[1,0,1,0,1,1,1,1,1,1,0,1],
[1,0,1,0,1,0,0,0,0,0,0,1],
[1,0,0,0,1,1,0,1,1,1,0,1],
[1,0,1,0,0,0,0,1,0,1,1,1],
[1,0,1,1,0,1,0,0,0,0,0,1],
[1,0,1,0,0,1,1,1,1,1,0,1],
[1,0,0,0,1,1,0,0,0,0,0,1],
[1,1,1,1,1,1,1,1,1,1,1,1]]) == "SSSSSEENNNEEEEEEESSWWWWSSSEEEESS" | {
"repo_name": "denisbalyko/checkio-solution",
"path": "Open Labyrinth.py",
"copies": "1",
"size": "2066",
"license": "mit",
"hash": -5097396239516195000,
"line_mean": 36.5818181818,
"line_max": 94,
"alpha_frac": 0.3814133591,
"autogenerated": false,
"ratio": 2.029469548133595,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.2910882907233595,
"avg_score": null,
"num_lines": null
} |
from collections import Counter
import numpy as np
from .mixin import TransformerMixin, EstimatorMixin
from .base import _set_cv
from ..io.pick import _picks_to_idx
from ..parallel import parallel_func
from ..utils import logger, verbose
from .. import pick_types, pick_info
class EMS(TransformerMixin, EstimatorMixin):
"""Transformer to compute event-matched spatial filters.
This version of EMS [1]_ operates on the entire time course. No time
window needs to be specified. The result is a spatial filter at each
time point and a corresponding time course. Intuitively, the result
gives the similarity between the filter at each time point and the
data vector (sensors) at that time point.
.. note:: EMS only works for binary classification.
Attributes
----------
filters_ : ndarray, shape (n_channels, n_times)
The set of spatial filters.
classes_ : ndarray, shape (n_classes,)
The target classes.
References
----------
.. [1] Aaron Schurger, Sebastien Marti, and Stanislas Dehaene, "Reducing
multi-sensor data to a single time course that reveals experimental
effects", BMC Neuroscience 2013, 14:122
"""
def __repr__(self): # noqa: D105
if hasattr(self, 'filters_'):
return '<EMS: fitted with %i filters on %i classes.>' % (
len(self.filters_), len(self.classes_))
else:
return '<EMS: not fitted.>'
def fit(self, X, y):
"""Fit the spatial filters.
.. note : EMS is fitted on data normalized by channel type before the
fitting of the spatial filters.
Parameters
----------
X : array, shape (n_epochs, n_channels, n_times)
The training data.
y : array of int, shape (n_epochs)
The target classes.
Returns
-------
self : instance of EMS
Returns self.
"""
classes = np.unique(y)
if len(classes) != 2:
raise ValueError('EMS only works for binary classification.')
self.classes_ = classes
filters = X[y == classes[0]].mean(0) - X[y == classes[1]].mean(0)
filters /= np.linalg.norm(filters, axis=0)[None, :]
self.filters_ = filters
return self
def transform(self, X):
"""Transform the data by the spatial filters.
Parameters
----------
X : array, shape (n_epochs, n_channels, n_times)
The input data.
Returns
-------
X : array, shape (n_epochs, n_times)
The input data transformed by the spatial filters.
"""
Xt = np.sum(X * self.filters_, axis=1)
return Xt
@verbose
def compute_ems(epochs, conditions=None, picks=None, n_jobs=1, cv=None,
verbose=None):
"""Compute event-matched spatial filter on epochs.
This version of EMS [1]_ operates on the entire time course. No time
window needs to be specified. The result is a spatial filter at each
time point and a corresponding time course. Intuitively, the result
gives the similarity between the filter at each time point and the
data vector (sensors) at that time point.
.. note : EMS only works for binary classification.
.. note : The present function applies a leave-one-out cross-validation,
following Schurger et al's paper. However, we recommend using
a stratified k-fold cross-validation. Indeed, leave-one-out tends
to overfit and cannot be used to estimate the variance of the
prediction within a given fold.
.. note : Because of the leave-one-out, this function needs an equal
number of epochs in each of the two conditions.
Parameters
----------
epochs : instance of mne.Epochs
The epochs.
conditions : list of str | None, default None
If a list of strings, strings must match the epochs.event_id's key as
well as the number of conditions supported by the objective_function.
If None keys in epochs.event_id are used.
%(picks_good_data)s
%(n_jobs)s
cv : cross-validation object | str | None, default LeaveOneOut
The cross-validation scheme.
%(verbose)s
Returns
-------
surrogate_trials : ndarray, shape (n_trials // 2, n_times)
The trial surrogates.
mean_spatial_filter : ndarray, shape (n_channels, n_times)
The set of spatial filters.
conditions : ndarray, shape (n_classes,)
The conditions used. Values correspond to original event ids.
References
----------
.. [1] Aaron Schurger, Sebastien Marti, and Stanislas Dehaene, "Reducing
multi-sensor data to a single time course that reveals experimental
effects", BMC Neuroscience 2013, 14:122
"""
logger.info('...computing surrogate time series. This can take some time')
# Default to leave-one-out cv
cv = 'LeaveOneOut' if cv is None else cv
picks = _picks_to_idx(epochs.info, picks)
if not len(set(Counter(epochs.events[:, 2]).values())) == 1:
raise ValueError('The same number of epochs is required by '
'this function. Please consider '
'`epochs.equalize_event_counts`')
if conditions is None:
conditions = epochs.event_id.keys()
epochs = epochs.copy()
else:
epochs = epochs[conditions]
epochs.drop_bad()
if len(conditions) != 2:
raise ValueError('Currently this function expects exactly 2 '
'conditions but you gave me %i' %
len(conditions))
ev = epochs.events[:, 2]
# Special care to avoid path dependent mappings and orders
conditions = list(sorted(conditions))
cond_idx = [np.where(ev == epochs.event_id[k])[0] for k in conditions]
info = pick_info(epochs.info, picks)
data = epochs.get_data(picks=picks)
# Scale (z-score) the data by channel type
# XXX the z-scoring is applied outside the CV, which is not standard.
for ch_type in ['mag', 'grad', 'eeg']:
if ch_type in epochs:
# FIXME should be applied to all sort of data channels
if ch_type == 'eeg':
this_picks = pick_types(info, meg=False, eeg=True)
else:
this_picks = pick_types(info, meg=ch_type, eeg=False)
data[:, this_picks] /= np.std(data[:, this_picks])
# Setup cross-validation. Need to use _set_cv to deal with sklearn
# deprecation of cv objects.
y = epochs.events[:, 2]
_, cv_splits = _set_cv(cv, 'classifier', X=y, y=y)
parallel, p_func, _ = parallel_func(_run_ems, n_jobs=n_jobs)
# FIXME this parallelization should be removed.
# 1) it's numpy computation so it's already efficient,
# 2) it duplicates the data in RAM,
# 3) the computation is already super fast.
out = parallel(p_func(_ems_diff, data, cond_idx, train, test)
for train, test in cv_splits)
surrogate_trials, spatial_filter = zip(*out)
surrogate_trials = np.array(surrogate_trials)
spatial_filter = np.mean(spatial_filter, axis=0)
return surrogate_trials, spatial_filter, epochs.events[:, 2]
def _ems_diff(data0, data1):
"""Compute the default diff objective function."""
return np.mean(data0, axis=0) - np.mean(data1, axis=0)
def _run_ems(objective_function, data, cond_idx, train, test):
"""Run EMS."""
d = objective_function(*(data[np.intersect1d(c, train)] for c in cond_idx))
d /= np.sqrt(np.sum(d ** 2, axis=0))[None, :]
# compute surrogates
return np.sum(data[test[0]] * d, axis=0), d
| {
"repo_name": "cjayb/mne-python",
"path": "mne/decoding/ems.py",
"copies": "2",
"size": "7930",
"license": "bsd-3-clause",
"hash": -4423936390918831000,
"line_mean": 35.2100456621,
"line_max": 79,
"alpha_frac": 0.619924338,
"autogenerated": false,
"ratio": 3.900639449090015,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5520563787090015,
"avg_score": null,
"num_lines": null
} |
from collections import Counter
import numpy as np
from .mixin import TransformerMixin, EstimatorMixin
from .base import _set_cv
from ..utils import logger, verbose
from ..parallel import parallel_func
from .. import pick_types, pick_info
class EMS(TransformerMixin, EstimatorMixin):
"""Transformer to compute event-matched spatial filters.
This version of EMS [1]_ operates on the entire time course. No time
window needs to be specified. The result is a spatial filter at each
time point and a corresponding time course. Intuitively, the result
gives the similarity between the filter at each time point and the
data vector (sensors) at that time point.
.. note : EMS only works for binary classification.
Attributes
----------
filters_ : ndarray, shape (n_channels, n_times)
The set of spatial filters.
classes_ : ndarray, shape (n_classes,)
The target classes.
References
----------
.. [1] Aaron Schurger, Sebastien Marti, and Stanislas Dehaene, "Reducing
multi-sensor data to a single time course that reveals experimental
effects", BMC Neuroscience 2013, 14:122
"""
def __repr__(self): # noqa: D105
if hasattr(self, 'filters_'):
return '<EMS: fitted with %i filters on %i classes.>' % (
len(self.filters_), len(self.classes_))
else:
return '<EMS: not fitted.>'
def fit(self, X, y):
"""Fit the spatial filters.
.. note : EMS is fitted on data normalized by channel type before the
fitting of the spatial filters.
Parameters
----------
X : array, shape (n_epochs, n_channels, n_times)
The training data.
y : array of int, shape (n_epochs)
The target classes.
Returns
-------
self : returns and instance of self.
"""
classes = np.unique(y)
if len(classes) != 2:
raise ValueError('EMS only works for binary classification.')
self.classes_ = classes
filters = X[y == classes[0]].mean(0) - X[y == classes[1]].mean(0)
filters /= np.linalg.norm(filters, axis=0)[None, :]
self.filters_ = filters
return self
def transform(self, X):
"""Transform the data by the spatial filters.
Parameters
----------
X : array, shape (n_epochs, n_channels, n_times)
The input data.
Returns
-------
X : array, shape (n_epochs, n_times)
The input data transformed by the spatial filters.
"""
Xt = np.sum(X * self.filters_, axis=1)
return Xt
@verbose
def compute_ems(epochs, conditions=None, picks=None, n_jobs=1, verbose=None,
cv=None):
"""Compute event-matched spatial filter on epochs.
This version of EMS [1]_ operates on the entire time course. No time
window needs to be specified. The result is a spatial filter at each
time point and a corresponding time course. Intuitively, the result
gives the similarity between the filter at each time point and the
data vector (sensors) at that time point.
.. note : EMS only works for binary classification.
.. note : The present function applies a leave-one-out cross-validation,
following Schurger et al's paper. However, we recommend using
a stratified k-fold cross-validation. Indeed, leave-one-out tends
to overfit and cannot be used to estimate the variance of the
prediction within a given fold.
.. note : Because of the leave-one-out, this function needs an equal
number of epochs in each of the two conditions.
Parameters
----------
epochs : instance of mne.Epochs
The epochs.
conditions : list of str | None, defaults to None
If a list of strings, strings must match the epochs.event_id's key as
well as the number of conditions supported by the objective_function.
If None keys in epochs.event_id are used.
picks : array-like of int | None, defaults to None
Channels to be included. If None only good data channels are used.
n_jobs : int, defaults to 1
Number of jobs to run in parallel.
verbose : bool, str, int, or None, defaults to self.verbose
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
cv : cross-validation object | str | None, defaults to LeaveOneOut
The cross-validation scheme.
Returns
-------
surrogate_trials : ndarray, shape (n_trials // 2, n_times)
The trial surrogates.
mean_spatial_filter : ndarray, shape (n_channels, n_times)
The set of spatial filters.
conditions : ndarray, shape (n_classes,)
The conditions used. Values correspond to original event ids.
References
----------
.. [1] Aaron Schurger, Sebastien Marti, and Stanislas Dehaene, "Reducing
multi-sensor data to a single time course that reveals experimental
effects", BMC Neuroscience 2013, 14:122
"""
logger.info('...computing surrogate time series. This can take some time')
# Default to leave-one-out cv
cv = 'LeaveOneOut' if cv is None else cv
if picks is None:
picks = pick_types(epochs.info, meg=True, eeg=True)
if not len(set(Counter(epochs.events[:, 2]).values())) == 1:
raise ValueError('The same number of epochs is required by '
'this function. Please consider '
'`epochs.equalize_event_counts`')
if conditions is None:
conditions = epochs.event_id.keys()
epochs = epochs.copy()
else:
epochs = epochs[conditions]
epochs.drop_bad()
if len(conditions) != 2:
raise ValueError('Currently this function expects exactly 2 '
'conditions but you gave me %i' %
len(conditions))
ev = epochs.events[:, 2]
# Special care to avoid path dependent mappings and orders
conditions = list(sorted(conditions))
cond_idx = [np.where(ev == epochs.event_id[k])[0] for k in conditions]
info = pick_info(epochs.info, picks)
data = epochs.get_data()[:, picks]
# Scale (z-score) the data by channel type
# XXX the z-scoring is applied outside the CV, which is not standard.
for ch_type in ['mag', 'grad', 'eeg']:
if ch_type in epochs:
# FIXME should be applied to all sort of data channels
if ch_type == 'eeg':
this_picks = pick_types(info, meg=False, eeg=True)
else:
this_picks = pick_types(info, meg=ch_type, eeg=False)
data[:, this_picks] /= np.std(data[:, this_picks])
# Setup cross-validation. Need to use _set_cv to deal with sklearn
# deprecation of cv objects.
y = epochs.events[:, 2]
_, cv_splits = _set_cv(cv, 'classifier', X=y, y=y)
parallel, p_func, _ = parallel_func(_run_ems, n_jobs=n_jobs)
# FIXME this parallelization should be removed.
# 1) it's numpy computation so it's already efficient,
# 2) it duplicates the data in RAM,
# 3) the computation is already super fast.
out = parallel(p_func(_ems_diff, data, cond_idx, train, test)
for train, test in cv_splits)
surrogate_trials, spatial_filter = zip(*out)
surrogate_trials = np.array(surrogate_trials)
spatial_filter = np.mean(spatial_filter, axis=0)
return surrogate_trials, spatial_filter, epochs.events[:, 2]
def _ems_diff(data0, data1):
"""Compute the default diff objective function."""
return np.mean(data0, axis=0) - np.mean(data1, axis=0)
def _run_ems(objective_function, data, cond_idx, train, test):
"""Run EMS."""
d = objective_function(*(data[np.intersect1d(c, train)] for c in cond_idx))
d /= np.sqrt(np.sum(d ** 2, axis=0))[None, :]
# compute surrogates
return np.sum(data[test[0]] * d, axis=0), d
| {
"repo_name": "teonlamont/mne-python",
"path": "mne/decoding/ems.py",
"copies": "4",
"size": "8295",
"license": "bsd-3-clause",
"hash": 3823947811729582600,
"line_mean": 36.197309417,
"line_max": 79,
"alpha_frac": 0.6233875829,
"autogenerated": false,
"ratio": 3.935009487666034,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6558397070566034,
"avg_score": null,
"num_lines": null
} |
import numpy as np
from .mixin import TransformerMixin, EstimatorMixin
from .base import _set_cv
from ..utils import logger, verbose
from ..fixes import Counter
from ..parallel import parallel_func
from .. import pick_types, pick_info
class EMS(TransformerMixin, EstimatorMixin):
"""Transformer to compute event-matched spatial filters.
This version operates on the entire time course. The result is a spatial
filter at each time point and a corresponding time course. Intuitively,
the result gives the similarity between the filter at each time point and
the data vector (sensors) at that time point.
.. note : EMS only works for binary classification.
References
----------
[1] Aaron Schurger, Sebastien Marti, and Stanislas Dehaene, "Reducing
multi-sensor data to a single time course that reveals experimental
effects", BMC Neuroscience 2013, 14:122
Attributes
----------
filters_ : ndarray, shape (n_channels, n_times)
The set of spatial filters.
classes_ : ndarray, shape (n_classes,)
The target classes.
"""
def __repr__(self):
if hasattr(self, 'filters_'):
return '<EMS: fitted with %i filters on %i classes.>' % (
len(self.filters_), len(self.classes_))
else:
return '<EMS: not fitted.>'
def fit(self, X, y):
"""Fit the spatial filters.
.. note : EMS is fitted on data normalized by channel type before the
fitting of the spatial filters.
Parameters
----------
X : array, shape (n_epochs, n_channels, n_times)
The training data.
y : array of int, shape (n_epochs)
The target classes.
Returns
-------
self : returns and instance of self.
"""
classes = np.unique(y)
if len(classes) != 2:
raise ValueError('EMS only works for binary classification.')
self.classes_ = classes
filters = X[y == classes[0]].mean(0) - X[y == classes[1]].mean(0)
filters /= np.linalg.norm(filters, axis=0)[None, :]
self.filters_ = filters
return self
def transform(self, X):
"""Transform the data by the spatial filters.
Parameters
----------
X : array, shape (n_epochs, n_channels, n_times)
The input data.
Returns
-------
X : array, shape (n_epochs, n_times)
The input data transformed by the spatial filters.
"""
Xt = np.sum(X * self.filters_, axis=1)
return Xt
@verbose
def compute_ems(epochs, conditions=None, picks=None, n_jobs=1, verbose=None,
cv=None):
"""Compute event-matched spatial filter on epochs.
This version operates on the entire time course. No time window needs to
be specified. The result is a spatial filter at each time point and a
corresponding time course. Intuitively, the result gives the similarity
between the filter at each time point and the data vector (sensors) at
that time point.
.. note : EMS only works for binary classification.
.. note : The present function applies a leave-one-out cross-validation,
following Schurger et al's paper. However, we recommend using
a stratified k-fold cross-validation. Indeed, leave-one-out tends
to overfit and cannot be used to estimate the variance of the
prediction within a given fold.
.. note : Because of the leave-one-out, thise function needs an equal
number of epochs in each of the two conditions.
References
----------
[1] Aaron Schurger, Sebastien Marti, and Stanislas Dehaene, "Reducing
multi-sensor data to a single time course that reveals experimental
effects", BMC Neuroscience 2013, 14:122
Parameters
----------
epochs : instance of mne.Epochs
The epochs.
conditions : list of str | None, defaults to None
If a list of strings, strings must match the epochs.event_id's key as
well as the number of conditions supported by the objective_function.
If None keys in epochs.event_id are used.
picks : array-like of int | None, defaults to None
Channels to be included. If None only good data channels are used.
n_jobs : int, defaults to 1
Number of jobs to run in parallel.
verbose : bool, str, int, or None, defaults to self.verbose
If not None, override default verbose level (see mne.verbose).
cv : cross-validation object | str | None, defaults to LeaveOneOut
The cross-validation scheme.
Returns
-------
surrogate_trials : ndarray, shape (n_trials // 2, n_times)
The trial surrogates.
mean_spatial_filter : ndarray, shape (n_channels, n_times)
The set of spatial filters.
conditions : ndarray, shape (n_classes,)
The conditions used. Values correspond to original event ids.
"""
logger.info('...computing surrogate time series. This can take some time')
# Default to leave-one-out cv
cv = 'LeaveOneOut' if cv is None else cv
if picks is None:
picks = pick_types(epochs.info, meg=True, eeg=True)
if not len(set(Counter(epochs.events[:, 2]).values())) == 1:
raise ValueError('The same number of epochs is required by '
'this function. Please consider '
'`epochs.equalize_event_counts`')
if conditions is None:
conditions = epochs.event_id.keys()
epochs = epochs.copy()
else:
epochs = epochs[conditions]
epochs.drop_bad()
if len(conditions) != 2:
raise ValueError('Currently this function expects exactly 2 '
'conditions but you gave me %i' %
len(conditions))
ev = epochs.events[:, 2]
# Special care to avoid path dependent mappings and orders
conditions = list(sorted(conditions))
cond_idx = [np.where(ev == epochs.event_id[k])[0] for k in conditions]
info = pick_info(epochs.info, picks)
data = epochs.get_data()[:, picks]
# Scale (z-score) the data by channel type
# XXX the z-scoring is applied outside the CV, which is not standard.
for ch_type in ['mag', 'grad', 'eeg']:
if ch_type in epochs:
# FIXME should be applied to all sort of data channels
if ch_type == 'eeg':
this_picks = pick_types(info, meg=False, eeg=True)
else:
this_picks = pick_types(info, meg=ch_type, eeg=False)
data[:, this_picks] /= np.std(data[:, this_picks])
# Setup cross-validation. Need to use _set_cv to deal with sklearn
# deprecation of cv objects.
y = epochs.events[:, 2]
_, cv_splits = _set_cv(cv, 'classifier', X=y, y=y)
parallel, p_func, _ = parallel_func(_run_ems, n_jobs=n_jobs)
# FIXME this parallization should be removed.
# 1) it's numpy computation so it's already efficient,
# 2) it duplicates the data in RAM,
# 3) the computation is already super fast.
out = parallel(p_func(_ems_diff, data, cond_idx, train, test)
for train, test in cv_splits)
surrogate_trials, spatial_filter = zip(*out)
surrogate_trials = np.array(surrogate_trials)
spatial_filter = np.mean(spatial_filter, axis=0)
return surrogate_trials, spatial_filter, epochs.events[:, 2]
def _ems_diff(data0, data1):
"""Aux. function to compute_ems that computes the default diff
objective function."""
return np.mean(data0, axis=0) - np.mean(data1, axis=0)
def _run_ems(objective_function, data, cond_idx, train, test):
d = objective_function(*(data[np.intersect1d(c, train)] for c in cond_idx))
d /= np.sqrt(np.sum(d ** 2, axis=0))[None, :]
# compute surrogates
return np.sum(data[test[0]] * d, axis=0), d
| {
"repo_name": "alexandrebarachant/mne-python",
"path": "mne/decoding/ems.py",
"copies": "1",
"size": "8137",
"license": "bsd-3-clause",
"hash": 6649807584610984000,
"line_mean": 36.3256880734,
"line_max": 79,
"alpha_frac": 0.625414772,
"autogenerated": false,
"ratio": 3.9385285575992257,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5063943329599225,
"avg_score": null,
"num_lines": null
} |
from itertools import product
import os
import os.path as op
from unittest import SkipTest
import pytest
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose, assert_equal)
from scipy import stats
import matplotlib.pyplot as plt
from mne import (Epochs, read_events, pick_types, create_info, EpochsArray,
EvokedArray, Annotations, pick_channels_regexp)
from mne.cov import read_cov
from mne.preprocessing import (ICA, ica_find_ecg_events, ica_find_eog_events,
read_ica, run_ica)
from mne.preprocessing.ica import (get_score_funcs, corrmap, _sort_components,
_ica_explained_variance)
from mne.io import read_raw_fif, Info, RawArray, read_raw_ctf, read_raw_eeglab
from mne.io.meas_info import _kind_dict
from mne.io.pick import _DATA_CH_TYPES_SPLIT
from mne.rank import _compute_rank_int
from mne.utils import (catch_logging, _TempDir, requires_sklearn,
run_tests_if_main)
from mne.datasets import testing
from mne.event import make_fixed_length_events
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
test_cov_name = op.join(data_dir, 'test-cov.fif')
test_base_dir = testing.data_path(download=False)
ctf_fname = op.join(test_base_dir, 'CTF', 'testdata_ctf.ds')
fif_fname = op.join(test_base_dir, 'MEG', 'sample',
'sample_audvis_trunc_raw.fif')
eeglab_fname = op.join(test_base_dir, 'EEGLAB', 'test_raw.set')
eeglab_montage = op.join(test_base_dir, 'EEGLAB', 'test_chans.locs')
ctf_fname2 = op.join(test_base_dir, 'CTF', 'catch-alp-good-f.ds')
event_id, tmin, tmax = 1, -0.2, 0.2
# if stop is too small pca may fail in some cases, but we're okay on this file
start, stop = 0, 6
score_funcs_unsuited = ['pointbiserialr', 'ansari']
def _skip_check_picard(method):
if method == 'picard':
try:
import picard # noqa, analysis:ignore
except Exception as exp:
raise SkipTest("Picard is not installed (%s)." % (exp,))
@requires_sklearn
@pytest.mark.parametrize("method", ["fastica", "picard"])
def test_ica_full_data_recovery(method):
"""Test recovery of full data when no source is rejected."""
# Most basic recovery
_skip_check_picard(method)
raw = read_raw_fif(raw_fname).crop(0.5, stop).load_data()
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')[:10]
with pytest.warns(RuntimeWarning, match='projection'):
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
evoked = epochs.average()
n_channels = 5
data = raw._data[:n_channels].copy()
data_epochs = epochs.get_data()
data_evoked = evoked.data
raw.set_annotations(Annotations([0.5], [0.5], ['BAD']))
methods = [method]
for method in methods:
stuff = [(2, n_channels, True), (2, n_channels // 2, False)]
for n_components, n_pca_components, ok in stuff:
ica = ICA(n_components=n_components, random_state=0,
max_pca_components=n_pca_components,
n_pca_components=n_pca_components,
method=method, max_iter=1)
with pytest.warns(UserWarning, match=None): # sometimes warns
ica.fit(raw, picks=list(range(n_channels)))
raw2 = ica.apply(raw.copy(), exclude=[])
if ok:
assert_allclose(data[:n_channels], raw2._data[:n_channels],
rtol=1e-10, atol=1e-15)
else:
diff = np.abs(data[:n_channels] - raw2._data[:n_channels])
assert (np.max(diff) > 1e-14)
ica = ICA(n_components=n_components, method=method,
max_pca_components=n_pca_components,
n_pca_components=n_pca_components, random_state=0)
with pytest.warns(None): # sometimes warns
ica.fit(epochs, picks=list(range(n_channels)))
epochs2 = ica.apply(epochs.copy(), exclude=[])
data2 = epochs2.get_data()[:, :n_channels]
if ok:
assert_allclose(data_epochs[:, :n_channels], data2,
rtol=1e-10, atol=1e-15)
else:
diff = np.abs(data_epochs[:, :n_channels] - data2)
assert (np.max(diff) > 1e-14)
evoked2 = ica.apply(evoked.copy(), exclude=[])
data2 = evoked2.data[:n_channels]
if ok:
assert_allclose(data_evoked[:n_channels], data2,
rtol=1e-10, atol=1e-15)
else:
diff = np.abs(evoked.data[:n_channels] - data2)
assert (np.max(diff) > 1e-14)
pytest.raises(ValueError, ICA, method='pizza-decomposision')
@pytest.mark.parametrize("method", ["fastica", "picard"])
def test_ica_simple(method):
"""Test that ICA recovers the unmixing matrix in a simple case."""
if method == "fastica":
try:
import sklearn # noqa: F401
except ImportError:
raise SkipTest("scikit-learn not installed")
_skip_check_picard(method)
n_components = 3
n_samples = 1000
rng = np.random.RandomState(0)
S = rng.laplace(size=(n_components, n_samples))
A = rng.randn(n_components, n_components)
data = np.dot(A, S)
ica = ICA(n_components=n_components, method=method, random_state=0)
ica._fit(data, n_components, 0)
transform = np.dot(np.dot(ica.unmixing_matrix_, ica.pca_components_), A)
amari_distance = np.mean(np.sum(np.abs(transform), axis=1) /
np.max(np.abs(transform), axis=1) - 1.)
assert amari_distance < 0.1
@requires_sklearn
@pytest.mark.parametrize("method", ["fastica", "picard"])
def test_ica_rank_reduction(method):
"""Test recovery ICA rank reduction."""
_skip_check_picard(method)
# Most basic recovery
raw = read_raw_fif(raw_fname).crop(0.5, stop).load_data()
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')[:10]
n_components = 5
max_pca_components = len(picks)
for n_pca_components in [6, 10]:
with pytest.warns(UserWarning, match='did not converge'):
ica = ICA(n_components=n_components,
max_pca_components=max_pca_components,
n_pca_components=n_pca_components,
method=method, max_iter=1).fit(raw, picks=picks)
rank_before = _compute_rank_int(raw.copy().pick(picks), proj=False)
assert_equal(rank_before, len(picks))
raw_clean = ica.apply(raw.copy())
rank_after = _compute_rank_int(raw_clean.copy().pick(picks),
proj=False)
# interaction between ICA rejection and PCA components difficult
# to preduct. Rank_after often seems to be 1 higher then
# n_pca_components
assert (n_components < n_pca_components <= rank_after <=
rank_before)
@requires_sklearn
@pytest.mark.parametrize("method", ["fastica", "picard"])
def test_ica_reset(method):
"""Test ICA resetting."""
_skip_check_picard(method)
raw = read_raw_fif(raw_fname).crop(0.5, stop).load_data()
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')[:10]
run_time_attrs = (
'pre_whitener_',
'unmixing_matrix_',
'mixing_matrix_',
'n_components_',
'n_samples_',
'pca_components_',
'pca_explained_variance_',
'pca_mean_'
)
with pytest.warns(UserWarning, match='did not converge'):
ica = ICA(
n_components=3, max_pca_components=3, n_pca_components=3,
method=method, max_iter=1).fit(raw, picks=picks)
assert (all(hasattr(ica, attr) for attr in run_time_attrs))
assert ica.labels_ is not None
ica._reset()
assert (not any(hasattr(ica, attr) for attr in run_time_attrs))
assert ica.labels_ is not None
@requires_sklearn
@pytest.mark.parametrize("method", ["fastica", "picard"])
def test_ica_core(method):
"""Test ICA on raw and epochs."""
_skip_check_picard(method)
raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()
# XXX. The None cases helped revealing bugs but are time consuming.
test_cov = read_cov(test_cov_name)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
noise_cov = [None, test_cov]
# removed None cases to speed up...
n_components = [2, 1.0] # for future dbg add cases
max_pca_components = [3]
picks_ = [picks]
methods = [method]
iter_ica_params = product(noise_cov, n_components, max_pca_components,
picks_, methods)
# # test init catchers
pytest.raises(ValueError, ICA, n_components=3, max_pca_components=2)
pytest.raises(ValueError, ICA, n_components=2.3, max_pca_components=2)
# test essential core functionality
for n_cov, n_comp, max_n, pcks, method in iter_ica_params:
# Test ICA raw
ica = ICA(noise_cov=n_cov, n_components=n_comp,
max_pca_components=max_n, n_pca_components=max_n,
random_state=0, method=method, max_iter=1)
pytest.raises(ValueError, ica.__contains__, 'mag')
print(ica) # to test repr
# test fit checker
pytest.raises(RuntimeError, ica.get_sources, raw)
pytest.raises(RuntimeError, ica.get_sources, epochs)
# test decomposition
with pytest.warns(UserWarning, match='did not converge'):
ica.fit(raw, picks=pcks, start=start, stop=stop)
repr(ica) # to test repr
assert ('mag' in ica) # should now work without error
# test re-fit
unmixing1 = ica.unmixing_matrix_
with pytest.warns(UserWarning, match='did not converge'):
ica.fit(raw, picks=pcks, start=start, stop=stop)
assert_array_almost_equal(unmixing1, ica.unmixing_matrix_)
raw_sources = ica.get_sources(raw)
# test for #3804
assert_equal(raw_sources._filenames, [None])
print(raw_sources)
# test for gh-6271 (scaling of ICA traces)
fig = raw_sources.plot()
assert len(fig.axes[0].lines) in (4, 5)
for line in fig.axes[0].lines[1:-1]: # first and last are markers
y = line.get_ydata()
assert np.ptp(y) < 10
plt.close('all')
sources = raw_sources[:, :][0]
assert (sources.shape[0] == ica.n_components_)
# test preload filter
raw3 = raw.copy()
raw3.preload = False
pytest.raises(RuntimeError, ica.apply, raw3,
include=[1, 2])
#######################################################################
# test epochs decomposition
ica = ICA(noise_cov=n_cov, n_components=n_comp,
max_pca_components=max_n, n_pca_components=max_n,
random_state=0, method=method)
with pytest.warns(None): # sometimes warns
ica.fit(epochs, picks=picks)
data = epochs.get_data()[:, 0, :]
n_samples = np.prod(data.shape)
assert_equal(ica.n_samples_, n_samples)
print(ica) # to test repr
sources = ica.get_sources(epochs).get_data()
assert (sources.shape[1] == ica.n_components_)
pytest.raises(ValueError, ica.score_sources, epochs,
target=np.arange(1))
# test preload filter
epochs3 = epochs.copy()
epochs3.preload = False
pytest.raises(RuntimeError, ica.apply, epochs3,
include=[1, 2])
# test for bug with whitener updating
_pre_whitener = ica.pre_whitener_.copy()
epochs._data[:, 0, 10:15] *= 1e12
ica.apply(epochs.copy())
assert_array_equal(_pre_whitener, ica.pre_whitener_)
# test expl. var threshold leading to empty sel
ica.n_components = 0.1
pytest.raises(RuntimeError, ica.fit, epochs)
offender = 1, 2, 3,
pytest.raises(ValueError, ica.get_sources, offender)
pytest.raises(TypeError, ica.fit, offender)
pytest.raises(TypeError, ica.apply, offender)
@requires_sklearn
@pytest.mark.slowtest
@pytest.mark.parametrize("method", ["picard", "fastica"])
def test_ica_additional(method):
"""Test additional ICA functionality."""
_skip_check_picard(method)
tempdir = _TempDir()
stop2 = 500
raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()
raw.del_proj() # avoid warnings
raw.set_annotations(Annotations([0.5], [0.5], ['BAD']))
# XXX This breaks the tests :(
# raw.info['bads'] = [raw.ch_names[1]]
test_cov = read_cov(test_cov_name)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')[1::2]
epochs = Epochs(raw, events, None, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True, proj=False)
epochs.decimate(3, verbose='error')
assert len(epochs) == 4
# test if n_components=None works
ica = ICA(n_components=None, max_pca_components=None,
n_pca_components=None, random_state=0, method=method, max_iter=1)
with pytest.warns(UserWarning, match='did not converge'):
ica.fit(epochs)
# for testing eog functionality
picks2 = np.concatenate([picks, pick_types(raw.info, False, eog=True)])
epochs_eog = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks2,
baseline=(None, 0), preload=True)
del picks2
test_cov2 = test_cov.copy()
ica = ICA(noise_cov=test_cov2, n_components=3, max_pca_components=4,
n_pca_components=4, method=method)
assert (ica.info is None)
with pytest.warns(RuntimeWarning, match='normalize_proj'):
ica.fit(raw, picks[:5])
assert (isinstance(ica.info, Info))
assert (ica.n_components_ < 5)
ica = ICA(n_components=3, max_pca_components=4, method=method,
n_pca_components=4, random_state=0)
pytest.raises(RuntimeError, ica.save, '')
ica.fit(raw, picks=[1, 2, 3, 4, 5], start=start, stop=stop2)
# check passing a ch_name to find_bads_ecg
with pytest.warns(RuntimeWarning, match='longer'):
_, scores_1 = ica.find_bads_ecg(raw)
_, scores_2 = ica.find_bads_ecg(raw, raw.ch_names[1])
assert scores_1[0] != scores_2[0]
# test corrmap
ica2 = ica.copy()
ica3 = ica.copy()
corrmap([ica, ica2], (0, 0), threshold='auto', label='blinks', plot=True,
ch_type="mag")
corrmap([ica, ica2], (0, 0), threshold=2, plot=False, show=False)
assert (ica.labels_["blinks"] == ica2.labels_["blinks"])
assert (0 in ica.labels_["blinks"])
# test retrieval of component maps as arrays
components = ica.get_components()
template = components[:, 0]
EvokedArray(components, ica.info, tmin=0.).plot_topomap([0], time_unit='s')
corrmap([ica, ica3], template, threshold='auto', label='blinks', plot=True,
ch_type="mag")
assert (ica2.labels_["blinks"] == ica3.labels_["blinks"])
plt.close('all')
ica_different_channels = ICA(n_components=2, random_state=0).fit(
raw, picks=[2, 3, 4, 5])
pytest.raises(ValueError, corrmap, [ica_different_channels, ica], (0, 0))
# test warnings on bad filenames
ica_badname = op.join(op.dirname(tempdir), 'test-bad-name.fif.gz')
with pytest.warns(RuntimeWarning, match='-ica.fif'):
ica.save(ica_badname)
with pytest.warns(RuntimeWarning, match='-ica.fif'):
read_ica(ica_badname)
# test decim
ica = ICA(n_components=3, max_pca_components=4,
n_pca_components=4, method=method, max_iter=1)
raw_ = raw.copy()
for _ in range(3):
raw_.append(raw_)
n_samples = raw_._data.shape[1]
with pytest.warns(UserWarning, match='did not converge'):
ica.fit(raw, picks=picks[:5], decim=3)
assert raw_._data.shape[1] == n_samples
# test expl var
ica = ICA(n_components=1.0, max_pca_components=4,
n_pca_components=4, method=method, max_iter=1)
with pytest.warns(UserWarning, match='did not converge'):
ica.fit(raw, picks=None, decim=3)
assert (ica.n_components_ == 4)
ica_var = _ica_explained_variance(ica, raw, normalize=True)
assert (np.all(ica_var[:-1] >= ica_var[1:]))
# test ica sorting
ica.exclude = [0]
ica.labels_ = dict(blink=[0], think=[1])
ica_sorted = _sort_components(ica, [3, 2, 1, 0], copy=True)
assert_equal(ica_sorted.exclude, [3])
assert_equal(ica_sorted.labels_, dict(blink=[3], think=[2]))
# epochs extraction from raw fit
pytest.raises(RuntimeError, ica.get_sources, epochs)
# test reading and writing
test_ica_fname = op.join(op.dirname(tempdir), 'test-ica.fif')
for cov in (None, test_cov):
ica = ICA(noise_cov=cov, n_components=2, max_pca_components=4,
n_pca_components=4, method=method, max_iter=1)
with pytest.warns(None): # ICA does not converge
ica.fit(raw, picks=picks[:10], start=start, stop=stop2)
sources = ica.get_sources(epochs).get_data()
assert (ica.mixing_matrix_.shape == (2, 2))
assert (ica.unmixing_matrix_.shape == (2, 2))
assert (ica.pca_components_.shape == (4, 10))
assert (sources.shape[1] == ica.n_components_)
for exclude in [[], [0], np.array([1, 2, 3])]:
ica.exclude = exclude
ica.labels_ = {'foo': [0]}
ica.save(test_ica_fname)
ica_read = read_ica(test_ica_fname)
assert (list(ica.exclude) == ica_read.exclude)
assert_equal(ica.labels_, ica_read.labels_)
ica.apply(raw)
ica.exclude = []
ica.apply(raw, exclude=[1])
assert (ica.exclude == [])
ica.exclude = [0, 1]
ica.apply(raw, exclude=[1])
assert (ica.exclude == [0, 1])
ica_raw = ica.get_sources(raw)
assert (ica.exclude == [ica_raw.ch_names.index(e) for e in
ica_raw.info['bads']])
# test filtering
d1 = ica_raw._data[0].copy()
ica_raw.filter(4, 20, fir_design='firwin2')
assert_equal(ica_raw.info['lowpass'], 20.)
assert_equal(ica_raw.info['highpass'], 4.)
assert ((d1 != ica_raw._data[0]).any())
d1 = ica_raw._data[0].copy()
ica_raw.notch_filter([10], trans_bandwidth=10, fir_design='firwin')
assert ((d1 != ica_raw._data[0]).any())
ica.n_pca_components = 2
ica.method = 'fake'
ica.save(test_ica_fname)
ica_read = read_ica(test_ica_fname)
assert (ica.n_pca_components == ica_read.n_pca_components)
assert_equal(ica.method, ica_read.method)
assert_equal(ica.labels_, ica_read.labels_)
# check type consistency
attrs = ('mixing_matrix_ unmixing_matrix_ pca_components_ '
'pca_explained_variance_ pre_whitener_')
def f(x, y):
return getattr(x, y).dtype
for attr in attrs.split():
assert_equal(f(ica_read, attr), f(ica, attr))
ica.n_pca_components = 4
ica_read.n_pca_components = 4
ica.exclude = []
ica.save(test_ica_fname)
ica_read = read_ica(test_ica_fname)
for attr in ['mixing_matrix_', 'unmixing_matrix_', 'pca_components_',
'pca_mean_', 'pca_explained_variance_',
'pre_whitener_']:
assert_array_almost_equal(getattr(ica, attr),
getattr(ica_read, attr))
assert (ica.ch_names == ica_read.ch_names)
assert (isinstance(ica_read.info, Info))
sources = ica.get_sources(raw)[:, :][0]
sources2 = ica_read.get_sources(raw)[:, :][0]
assert_array_almost_equal(sources, sources2)
_raw1 = ica.apply(raw, exclude=[1])
_raw2 = ica_read.apply(raw, exclude=[1])
assert_array_almost_equal(_raw1[:, :][0], _raw2[:, :][0])
os.remove(test_ica_fname)
# check score funcs
for name, func in get_score_funcs().items():
if name in score_funcs_unsuited:
continue
scores = ica.score_sources(raw, target='EOG 061', score_func=func,
start=0, stop=10)
assert (ica.n_components_ == len(scores))
# check univariate stats
scores = ica.score_sources(raw, start=0, stop=50, score_func=stats.skew)
# check exception handling
pytest.raises(ValueError, ica.score_sources, raw,
target=np.arange(1))
params = []
params += [(None, -1, slice(2), [0, 1])] # variance, kurtosis params
params += [(None, 'MEG 1531')] # ECG / EOG channel params
for idx, ch_name in product(*params):
ica.detect_artifacts(raw, start_find=0, stop_find=50, ecg_ch=ch_name,
eog_ch=ch_name, skew_criterion=idx,
var_criterion=idx, kurt_criterion=idx)
# Make sure detect_artifacts marks the right components.
# For int criterion, the doc says "E.g. range(2) would return the two
# sources with the highest score". Assert that's what it does.
# Only test for skew, since it's always the same code.
ica.exclude = []
ica.detect_artifacts(raw, start_find=0, stop_find=50, ecg_ch=None,
eog_ch=None, skew_criterion=0,
var_criterion=None, kurt_criterion=None)
assert np.abs(scores[ica.exclude]) == np.max(np.abs(scores))
evoked = epochs.average()
evoked_data = evoked.data.copy()
raw_data = raw[:][0].copy()
epochs_data = epochs.get_data().copy()
with pytest.warns(RuntimeWarning, match='longer'):
idx, scores = ica.find_bads_ecg(raw, method='ctps')
assert_equal(len(scores), ica.n_components_)
with pytest.warns(RuntimeWarning, match='longer'):
idx, scores = ica.find_bads_ecg(raw, method='correlation')
assert_equal(len(scores), ica.n_components_)
with pytest.warns(RuntimeWarning, match='longer'):
idx, scores = ica.find_bads_eog(raw)
assert_equal(len(scores), ica.n_components_)
idx, scores = ica.find_bads_ecg(epochs, method='ctps')
assert_equal(len(scores), ica.n_components_)
pytest.raises(ValueError, ica.find_bads_ecg, epochs.average(),
method='ctps')
pytest.raises(ValueError, ica.find_bads_ecg, raw,
method='crazy-coupling')
with pytest.warns(RuntimeWarning, match='longer'):
idx, scores = ica.find_bads_eog(raw)
assert_equal(len(scores), ica.n_components_)
raw.info['chs'][raw.ch_names.index('EOG 061') - 1]['kind'] = 202
with pytest.warns(RuntimeWarning, match='longer'):
idx, scores = ica.find_bads_eog(raw)
assert (isinstance(scores, list))
assert_equal(len(scores[0]), ica.n_components_)
idx, scores = ica.find_bads_eog(evoked, ch_name='MEG 1441')
assert_equal(len(scores), ica.n_components_)
idx, scores = ica.find_bads_ecg(evoked, method='correlation')
assert_equal(len(scores), ica.n_components_)
assert_array_equal(raw_data, raw[:][0])
assert_array_equal(epochs_data, epochs.get_data())
assert_array_equal(evoked_data, evoked.data)
# check score funcs
for name, func in get_score_funcs().items():
if name in score_funcs_unsuited:
continue
scores = ica.score_sources(epochs_eog, target='EOG 061',
score_func=func)
assert (ica.n_components_ == len(scores))
# check univariate stats
scores = ica.score_sources(epochs, score_func=stats.skew)
# check exception handling
pytest.raises(ValueError, ica.score_sources, epochs,
target=np.arange(1))
# ecg functionality
ecg_scores = ica.score_sources(raw, target='MEG 1531',
score_func='pearsonr')
with pytest.warns(RuntimeWarning, match='longer'):
ecg_events = ica_find_ecg_events(
raw, sources[np.abs(ecg_scores).argmax()])
assert (ecg_events.ndim == 2)
# eog functionality
eog_scores = ica.score_sources(raw, target='EOG 061',
score_func='pearsonr')
with pytest.warns(RuntimeWarning, match='longer'):
eog_events = ica_find_eog_events(
raw, sources[np.abs(eog_scores).argmax()])
assert (eog_events.ndim == 2)
# Test ica fiff export
ica_raw = ica.get_sources(raw, start=0, stop=100)
assert (ica_raw.last_samp - ica_raw.first_samp == 100)
assert_equal(len(ica_raw._filenames), 1) # API consistency
ica_chans = [ch for ch in ica_raw.ch_names if 'ICA' in ch]
assert (ica.n_components_ == len(ica_chans))
test_ica_fname = op.join(op.abspath(op.curdir), 'test-ica_raw.fif')
ica.n_components = np.int32(ica.n_components)
ica_raw.save(test_ica_fname, overwrite=True)
ica_raw2 = read_raw_fif(test_ica_fname, preload=True)
assert_allclose(ica_raw._data, ica_raw2._data, rtol=1e-5, atol=1e-4)
ica_raw2.close()
os.remove(test_ica_fname)
# Test ica epochs export
ica_epochs = ica.get_sources(epochs)
assert (ica_epochs.events.shape == epochs.events.shape)
ica_chans = [ch for ch in ica_epochs.ch_names if 'ICA' in ch]
assert (ica.n_components_ == len(ica_chans))
assert (ica.n_components_ == ica_epochs.get_data().shape[1])
assert (ica_epochs._raw is None)
assert (ica_epochs.preload is True)
# test float n pca components
ica.pca_explained_variance_ = np.array([0.2] * 5)
ica.n_components_ = 0
for ncomps, expected in [[0.3, 1], [0.9, 4], [1, 1]]:
ncomps_ = ica._check_n_pca_components(ncomps)
assert (ncomps_ == expected)
ica = ICA(method=method)
with pytest.warns(None): # sometimes does not converge
ica.fit(raw, picks=picks[:5])
with pytest.warns(RuntimeWarning, match='longer'):
ica.find_bads_ecg(raw)
ica.find_bads_eog(epochs, ch_name='MEG 0121')
assert_array_equal(raw_data, raw[:][0])
raw.drop_channels(['MEG 0122'])
pytest.raises(RuntimeError, ica.find_bads_eog, raw)
with pytest.warns(RuntimeWarning, match='longer'):
pytest.raises(RuntimeError, ica.find_bads_ecg, raw)
@requires_sklearn
@pytest.mark.parametrize("method", ["fastica", "picard"])
def test_run_ica(method):
"""Test run_ica function."""
_skip_check_picard(method)
raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()
params = []
params += [(None, -1, slice(2), [0, 1])] # varicance, kurtosis idx
params += [(None, 'MEG 1531')] # ECG / EOG channel params
for idx, ch_name in product(*params):
run_ica(raw, n_components=2, start=0, stop=0.5, start_find=0,
stop_find=5, ecg_ch=ch_name, eog_ch=ch_name, method=method,
skew_criterion=idx, var_criterion=idx, kurt_criterion=idx)
@requires_sklearn
@pytest.mark.parametrize("method", ["fastica", "picard"])
def test_ica_reject_buffer(method):
"""Test ICA data raw buffer rejection."""
_skip_check_picard(method)
raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
raw._data[2, 1000:1005] = 5e-12
ica = ICA(n_components=3, max_pca_components=4, n_pca_components=4,
method=method)
with catch_logging() as drop_log:
ica.fit(raw, picks[:5], reject=dict(mag=2.5e-12), decim=2,
tstep=0.01, verbose=True, reject_by_annotation=False)
assert (raw._data[:5, ::2].shape[1] - 4 == ica.n_samples_)
log = [l for l in drop_log.getvalue().split('\n') if 'detected' in l]
assert_equal(len(log), 1)
@requires_sklearn
@pytest.mark.parametrize("method", ["fastica", "picard"])
def test_ica_twice(method):
"""Test running ICA twice."""
_skip_check_picard(method)
raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()
picks = pick_types(raw.info, meg='grad', exclude='bads')
n_components = 0.9
max_pca_components = None
n_pca_components = 1.1
ica1 = ICA(n_components=n_components, method=method,
max_pca_components=max_pca_components,
n_pca_components=n_pca_components, random_state=0)
ica1.fit(raw, picks=picks, decim=3)
raw_new = ica1.apply(raw, n_pca_components=n_pca_components)
ica2 = ICA(n_components=n_components, method=method,
max_pca_components=max_pca_components,
n_pca_components=1.0, random_state=0)
ica2.fit(raw_new, picks=picks, decim=3)
assert_equal(ica1.n_components_, ica2.n_components_)
@requires_sklearn
@pytest.mark.parametrize("method", ["fastica", "picard"])
def test_fit_params(method):
"""Test fit_params for ICA."""
_skip_check_picard(method)
fit_params = {}
ICA(fit_params=fit_params, method=method) # test no side effects
assert_equal(fit_params, {})
@requires_sklearn
@pytest.mark.parametrize("method", ["fastica", "picard"])
@pytest.mark.parametrize("allow_ref_meg", [True, False])
def test_bad_channels(method, allow_ref_meg):
"""Test exception when unsupported channels are used."""
_skip_check_picard(method)
chs = [i for i in _kind_dict]
info = create_info(len(chs), 500, chs)
rng = np.random.RandomState(0)
data = rng.rand(len(chs), 50)
raw = RawArray(data, info)
data = rng.rand(100, len(chs), 50)
epochs = EpochsArray(data, info)
n_components = 0.9
data_chs = list(_DATA_CH_TYPES_SPLIT + ('eog',))
if allow_ref_meg:
data_chs.append('ref_meg')
chs_bad = list(set(chs) - set(data_chs))
ica = ICA(n_components=n_components, method=method,
allow_ref_meg=allow_ref_meg)
for inst in [raw, epochs]:
for ch in chs_bad:
if allow_ref_meg:
# Test case for only bad channels
picks_bad1 = pick_types(inst.info, meg=False,
ref_meg=False,
**{str(ch): True})
# Test case for good and bad channels
picks_bad2 = pick_types(inst.info, meg=True,
ref_meg=True,
**{str(ch): True})
else:
# Test case for only bad channels
picks_bad1 = pick_types(inst.info, meg=False,
**{str(ch): True})
# Test case for good and bad channels
picks_bad2 = pick_types(inst.info, meg=True,
**{str(ch): True})
pytest.raises(ValueError, ica.fit, inst, picks=picks_bad1)
pytest.raises(ValueError, ica.fit, inst, picks=picks_bad2)
pytest.raises(ValueError, ica.fit, inst, picks=[])
@requires_sklearn
@pytest.mark.parametrize("method", ["fastica", "picard"])
def test_eog_channel(method):
"""Test that EOG channel is included when performing ICA."""
_skip_check_picard(method)
raw = read_raw_fif(raw_fname, preload=True)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=True, ecg=False,
eog=True, exclude='bads')
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
n_components = 0.9
ica = ICA(n_components=n_components, method=method)
# Test case for MEG and EOG data. Should have EOG channel
for inst in [raw, epochs]:
picks1a = pick_types(inst.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')[:4]
picks1b = pick_types(inst.info, meg=False, stim=False, ecg=False,
eog=True, exclude='bads')
picks1 = np.append(picks1a, picks1b)
ica.fit(inst, picks=picks1)
assert (any('EOG' in ch for ch in ica.ch_names))
# Test case for MEG data. Should have no EOG channel
for inst in [raw, epochs]:
picks1 = pick_types(inst.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')[:5]
ica.fit(inst, picks=picks1)
assert not any('EOG' in ch for ch in ica.ch_names)
@requires_sklearn
@pytest.mark.parametrize("method", ["fastica", "picard"])
def test_max_pca_components_none(method):
"""Test max_pca_components=None."""
_skip_check_picard(method)
raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()
events = read_events(event_name)
picks = pick_types(raw.info, eeg=True, meg=False)
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
max_pca_components = None
n_components = 10
random_state = 12345
tempdir = _TempDir()
output_fname = op.join(tempdir, 'test_ica-ica.fif')
ica = ICA(max_pca_components=max_pca_components, method=method,
n_components=n_components, random_state=random_state)
with pytest.warns(None):
ica.fit(epochs)
ica.save(output_fname)
ica = read_ica(output_fname)
# ICA.fit() replaced max_pca_components, which was previously None,
# with the appropriate integer value.
assert_equal(ica.max_pca_components, epochs.info['nchan'])
assert_equal(ica.n_components, 10)
@requires_sklearn
@pytest.mark.parametrize("method", ["fastica", "picard"])
def test_n_components_none(method):
"""Test n_components=None."""
_skip_check_picard(method)
raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()
events = read_events(event_name)
picks = pick_types(raw.info, eeg=True, meg=False)
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
max_pca_components = 10
n_components = None
random_state = 12345
tempdir = _TempDir()
output_fname = op.join(tempdir, 'test_ica-ica.fif')
ica = ICA(max_pca_components=max_pca_components, method=method,
n_components=n_components, random_state=random_state)
with pytest.warns(None):
ica.fit(epochs)
ica.save(output_fname)
ica = read_ica(output_fname)
# ICA.fit() replaced max_pca_components, which was previously None,
# with the appropriate integer value.
assert_equal(ica.max_pca_components, 10)
assert ica.n_components is None
@requires_sklearn
@pytest.mark.parametrize("method", ["fastica", "picard"])
def test_n_components_and_max_pca_components_none(method):
"""Test n_components and max_pca_components=None."""
_skip_check_picard(method)
raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()
events = read_events(event_name)
picks = pick_types(raw.info, eeg=True, meg=False)
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
max_pca_components = None
n_components = None
random_state = 12345
tempdir = _TempDir()
output_fname = op.join(tempdir, 'test_ica-ica.fif')
ica = ICA(max_pca_components=max_pca_components, method=method,
n_components=n_components, random_state=random_state)
with pytest.warns(None): # convergence
ica.fit(epochs)
ica.save(output_fname)
ica = read_ica(output_fname)
# ICA.fit() replaced max_pca_components, which was previously None,
# with the appropriate integer value.
assert_equal(ica.max_pca_components, epochs.info['nchan'])
assert ica.n_components is None
@requires_sklearn
@testing.requires_testing_data
def test_ica_ctf():
"""Test run ICA computation on ctf data with/without compensation."""
method = 'fastica'
raw = read_raw_ctf(ctf_fname, preload=True)
events = make_fixed_length_events(raw, 99999)
for comp in [0, 1]:
raw.apply_gradient_compensation(comp)
epochs = Epochs(raw, events, None, -0.2, 0.2, preload=True)
evoked = epochs.average()
# test fit
for inst in [raw, epochs]:
ica = ICA(n_components=2, random_state=0, max_iter=2,
method=method)
with pytest.warns(UserWarning, match='did not converge'):
ica.fit(inst)
# test apply and get_sources
for inst in [raw, epochs, evoked]:
ica.apply(inst)
ica.get_sources(inst)
# test mixed compensation case
raw.apply_gradient_compensation(0)
ica = ICA(n_components=2, random_state=0, max_iter=2, method=method)
with pytest.warns(UserWarning, match='did not converge'):
ica.fit(raw)
raw.apply_gradient_compensation(1)
epochs = Epochs(raw, events, None, -0.2, 0.2, preload=True)
evoked = epochs.average()
for inst in [raw, epochs, evoked]:
with pytest.raises(RuntimeError, match='Compensation grade of ICA'):
ica.apply(inst)
with pytest.raises(RuntimeError, match='Compensation grade of ICA'):
ica.get_sources(inst)
@requires_sklearn
@testing.requires_testing_data
def test_ica_labels():
"""Test ICA labels."""
# The CTF data are uniquely well suited to testing the ICA.find_bads_
# methods
raw = read_raw_ctf(ctf_fname, preload=True)
# derive reference ICA components and append them to raw
icarf = ICA(n_components=2, random_state=0, max_iter=2, allow_ref_meg=True)
with pytest.warns(UserWarning, match='did not converge'):
icarf.fit(raw.copy().pick_types(meg=False, ref_meg=True))
icacomps = icarf.get_sources(raw)
# rename components so they are auto-detected by find_bads_ref
icacomps.rename_channels({c: 'REF_' + c for c in icacomps.ch_names})
# and add them to raw
raw.add_channels([icacomps])
# set the appropriate EEG channels to EOG and ECG
raw.set_channel_types({'EEG057': 'eog', 'EEG058': 'eog', 'EEG059': 'ecg'})
ica = ICA(n_components=4, random_state=0, max_iter=2, method='fastica')
with pytest.warns(UserWarning, match='did not converge'):
ica.fit(raw)
ica.find_bads_eog(raw, l_freq=None, h_freq=None)
picks = list(pick_types(raw.info, meg=False, eog=True))
for idx, ch in enumerate(picks):
assert '{}/{}/{}'.format('eog', idx, raw.ch_names[ch]) in ica.labels_
assert 'eog' in ica.labels_
for key in ('ecg', 'ref_meg', 'ecg/ECG-MAG'):
assert key not in ica.labels_
ica.find_bads_ecg(raw, l_freq=None, h_freq=None, method='correlation')
picks = list(pick_types(raw.info, meg=False, ecg=True))
for idx, ch in enumerate(picks):
assert '{}/{}/{}'.format('ecg', idx, raw.ch_names[ch]) in ica.labels_
for key in ('ecg', 'eog'):
assert key in ica.labels_
for key in ('ref_meg', 'ecg/ECG-MAG'):
assert key not in ica.labels_
ica.find_bads_ref(raw, l_freq=None, h_freq=None)
picks = pick_channels_regexp(raw.ch_names, 'REF_ICA*')
for idx, ch in enumerate(picks):
assert '{}/{}/{}'.format('ref_meg', idx,
raw.ch_names[ch]) in ica.labels_
for key in ('ecg', 'eog', 'ref_meg'):
assert key in ica.labels_
assert 'ecg/ECG-MAG' not in ica.labels_
ica.find_bads_ecg(raw, l_freq=None, h_freq=None)
for key in ('ecg', 'eog', 'ref_meg', 'ecg/ECG-MAG'):
assert key in ica.labels_
@requires_sklearn
@testing.requires_testing_data
def test_ica_eeg():
"""Test ICA on EEG."""
method = 'fastica'
raw_fif = read_raw_fif(fif_fname, preload=True)
raw_eeglab = read_raw_eeglab(input_fname=eeglab_fname,
montage=eeglab_montage, preload=True)
for raw in [raw_fif, raw_eeglab]:
events = make_fixed_length_events(raw, 99999, start=0, stop=0.3,
duration=0.1)
picks_meg = pick_types(raw.info, meg=True, eeg=False)[:2]
picks_eeg = pick_types(raw.info, meg=False, eeg=True)[:2]
picks_all = []
picks_all.extend(picks_meg)
picks_all.extend(picks_eeg)
epochs = Epochs(raw, events, None, -0.1, 0.1, preload=True)
evoked = epochs.average()
for picks in [picks_meg, picks_eeg, picks_all]:
if len(picks) == 0:
continue
# test fit
for inst in [raw, epochs]:
ica = ICA(n_components=2, random_state=0, max_iter=2,
method=method)
with pytest.warns(None):
ica.fit(inst, picks=picks)
# test apply and get_sources
for inst in [raw, epochs, evoked]:
ica.apply(inst)
ica.get_sources(inst)
with pytest.warns(RuntimeWarning, match='MISC channel'):
raw = read_raw_ctf(ctf_fname2, preload=True)
events = make_fixed_length_events(raw, 99999, start=0, stop=0.2,
duration=0.1)
picks_meg = pick_types(raw.info, meg=True, eeg=False)[:2]
picks_eeg = pick_types(raw.info, meg=False, eeg=True)[:2]
picks_all = picks_meg + picks_eeg
for comp in [0, 1]:
raw.apply_gradient_compensation(comp)
epochs = Epochs(raw, events, None, -0.1, 0.1, preload=True)
evoked = epochs.average()
for picks in [picks_meg, picks_eeg, picks_all]:
if len(picks) == 0:
continue
# test fit
for inst in [raw, epochs]:
ica = ICA(n_components=2, random_state=0, max_iter=2,
method=method)
with pytest.warns(None):
ica.fit(inst)
# test apply and get_sources
for inst in [raw, epochs, evoked]:
ica.apply(inst)
ica.get_sources(inst)
run_tests_if_main()
| {
"repo_name": "adykstra/mne-python",
"path": "mne/preprocessing/tests/test_ica.py",
"copies": "1",
"size": "42637",
"license": "bsd-3-clause",
"hash": 7685854663471898000,
"line_mean": 39.0723684211,
"line_max": 79,
"alpha_frac": 0.6005582006,
"autogenerated": false,
"ratio": 3.2389091461561836,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43394673467561834,
"avg_score": null,
"num_lines": null
} |
import numpy as np
from ..utils import logger, verbose
from ..fixes import Counter
from ..parallel import parallel_func
from .. import pick_types, pick_info
@verbose
def compute_ems(epochs, conditions=None, picks=None, n_jobs=1, verbose=None):
"""Compute event-matched spatial filter on epochs
This version operates on the entire time course. No time window needs to
be specified. The result is a spatial filter at each time point and a
corresponding time course. Intuitively, the result gives the similarity
between the filter at each time point and the data vector (sensors) at
that time point.
References
----------
[1] Aaron Schurger, Sebastien Marti, and Stanislas Dehaene, "Reducing
multi-sensor data to a single time course that reveals experimental
effects", BMC Neuroscience 2013, 14:122
Parameters
----------
epochs : instance of mne.Epochs
The epochs.
conditions : list of str | None
If a list of strings, strings must match the
epochs.event_id's key as well as the number of conditions supported
by the objective_function. If None keys in epochs.event_id are used.
picks : array-like of int | None
Channels to be included. If None only good data channels are used.
Defaults to None
n_jobs : int
Number of jobs to run in parallel.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Returns
-------
surrogate_trials : ndarray, shape (trials, n_trials, n_time_points)
The trial surrogates.
mean_spatial_filter : ndarray, shape (n_channels, n_times)
The set of spatial filters.
conditions : ndarray, shape (n_epochs,)
The conditions used. Values correspond to original event ids.
"""
logger.info('...computing surrogate time series. This can take some time')
if picks is None:
picks = pick_types(epochs.info, meg=True, eeg=True)
if not len(set(Counter(epochs.events[:, 2]).values())) == 1:
raise ValueError('The same number of epochs is required by '
'this function. Please consider '
'`epochs.equalize_event_counts`')
if conditions is None:
conditions = epochs.event_id.keys()
epochs = epochs.copy()
else:
epochs = epochs[conditions]
epochs.drop_bad()
if len(conditions) != 2:
raise ValueError('Currently this function expects exactly 2 '
'conditions but you gave me %i' %
len(conditions))
ev = epochs.events[:, 2]
# special care to avoid path dependent mappings and orders
conditions = list(sorted(conditions))
cond_idx = [np.where(ev == epochs.event_id[k])[0] for k in conditions]
info = pick_info(epochs.info, picks)
data = epochs.get_data()[:, picks]
# Scale (z-score) the data by channel type
for ch_type in ['mag', 'grad', 'eeg']:
if ch_type in epochs:
if ch_type == 'eeg':
this_picks = pick_types(info, meg=False, eeg=True)
else:
this_picks = pick_types(info, meg=ch_type, eeg=False)
data[:, this_picks] /= np.std(data[:, this_picks])
try:
from sklearn.model_selection import LeaveOneOut
except: # XXX support sklearn < 0.18
from sklearn.cross_validation import LeaveOneOut
def _iter_cv(n): # XXX support sklearn < 0.18
if hasattr(LeaveOneOut, 'split'):
cv = LeaveOneOut()
return cv.split(np.zeros((n, 1)))
else:
cv = LeaveOneOut(len(data))
return cv
parallel, p_func, _ = parallel_func(_run_ems, n_jobs=n_jobs)
out = parallel(p_func(_ems_diff, data, cond_idx, train, test)
for train, test in _iter_cv(len(data)))
surrogate_trials, spatial_filter = zip(*out)
surrogate_trials = np.array(surrogate_trials)
spatial_filter = np.mean(spatial_filter, axis=0)
return surrogate_trials, spatial_filter, epochs.events[:, 2]
def _ems_diff(data0, data1):
"""default diff objective function"""
return np.mean(data0, axis=0) - np.mean(data1, axis=0)
def _run_ems(objective_function, data, cond_idx, train, test):
d = objective_function(*(data[np.intersect1d(c, train)] for c in cond_idx))
d /= np.sqrt(np.sum(d ** 2, axis=0))[None, :]
# compute surrogates
return np.sum(data[test[0]] * d, axis=0), d
| {
"repo_name": "ARudiuk/mne-python",
"path": "mne/decoding/ems.py",
"copies": "3",
"size": "4695",
"license": "bsd-3-clause",
"hash": -6935707497395011000,
"line_mean": 35.6796875,
"line_max": 79,
"alpha_frac": 0.6319488818,
"autogenerated": false,
"ratio": 3.7590072057646116,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00018601190476190475,
"num_lines": 128
} |
__author__ = 'denis_makogon'
from gigaspace.common import remote
exceptions = remote.cinderclient.client.exceptions
class BaseCinderActions(remote.RemoteServices):
"""
Base Cinder actions class
"""
def __init__(self):
super(BaseCinderActions, self).__init__()
def create_volume(self, size, name):
"""
Create volume by given size and display name
:param size: volume size
:type size basestring
:param name: volume display name
:type name: basestring
:return volume info
:rtype: dict
"""
try:
volume = self.cinderclient.volumes.create(
int(size), name=name)
return volume
except exceptions.ClientException:
raise
def show_volume(self, id_or_name):
"""
Returns volume info by given ID or name
:param id_or_name: volume ID or name
:type id_or_name: basestring
:return volume info
:rtype: dict
"""
try:
volume = self.cinderclient.volumes.get(id_or_name)
return volume
except exceptions.ClientException:
raise
def list_volumes(self):
"""
Lists volumes
:return: list of volumes
:type list of volumes: list
"""
try:
return self.cinderclient.volumes.list()
except exceptions.ClientException:
raise
| {
"repo_name": "denismakogon/gigaspace-test-task",
"path": "gigaspace/cinder_workflow/base.py",
"copies": "1",
"size": "1460",
"license": "apache-2.0",
"hash": 5868636643810426000,
"line_mean": 25.5454545455,
"line_max": 62,
"alpha_frac": 0.5705479452,
"autogenerated": false,
"ratio": 4.548286604361371,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 55
} |
__author__ = 'denis_makogon'
import argparse
import six
def args(*args, **kwargs):
"""
Decorates commandline arguments for actions
:param args: sub-category commandline arguments
:param kwargs: sub-category commandline arguments
:return: decorator: object attribute setter
:rtype: callable
"""
def _decorator(func):
func.__dict__.setdefault('args', []).insert(0, (args, kwargs))
return func
return _decorator
def methods_of(obj):
"""
Get all callable methods of an object that don't
start with underscore (private attributes)
returns
:param obj: objects to get callable attributes from
:type obj: object
:return result: a list of tuples of the form (method_name, method)
:rtype: list
"""
result = []
for i in dir(obj):
if callable(getattr(obj, i)) and not i.startswith('_'):
result.append((i, getattr(obj, i)))
return result
def add_command_parsers(categories):
"""
Parses actions commandline arguments from each category
:param categories: commandline categories
:type categories: dict
:return: _subparser: commandline subparser
"""
def _subparser(subparsers):
"""
Iterates over categories and registers action
commandline arguments for each category
:param subparsers: commandline subparser
:return: None
:rtype: None
"""
for category in categories:
command_object = categories[category]()
desc = getattr(command_object, 'description', None)
parser = subparsers.add_parser(category, description=desc)
parser.set_defaults(command_object=command_object)
category_subparsers = parser.add_subparsers(dest='action')
for (action, action_fn) in methods_of(command_object):
parser = category_subparsers.add_parser(
action, description=desc)
action_kwargs = []
for args, kwargs in getattr(action_fn, 'args', []):
kwargs.setdefault('dest', args[0][2:])
if kwargs['dest'].startswith('action_kwarg_'):
action_kwargs.append(
kwargs['dest'][len('action_kwarg_'):])
else:
action_kwargs.append(kwargs['dest'])
kwargs['dest'] = 'action_kwarg_' + kwargs['dest']
parser.add_argument(*args, **kwargs)
parser.set_defaults(action_fn=action_fn)
parser.set_defaults(action_kwargs=action_kwargs)
parser.add_argument('action_args', nargs='*',
help=argparse.SUPPRESS)
return _subparser
def _main(global_conf, local_conf, category_opt, cli_args):
"""
:param global_conf: staged CONF
:param local_conf: tool conf
:param category_opt: subparser category options
:param cli_args: tool CLI arguments
:return:
"""
global_conf.register_cli_opt(category_opt)
local_conf.parse_args(cli_args)
fn = global_conf.category.action_fn
fn_args = [arg.decode('utf-8') for arg in global_conf.category.action_args]
fn_kwargs = {}
for k in global_conf.category.action_kwargs:
v = getattr(global_conf.category, 'action_kwarg_' + k)
if v is None:
continue
if isinstance(v, six.string_types):
v = v.decode('utf-8')
fn_kwargs[k] = v
try:
ret = fn(*fn_args, **fn_kwargs)
return ret
except Exception as e:
print(str(e))
| {
"repo_name": "denismakogon/gigaspace-test-task",
"path": "gigaspace/cmd/common.py",
"copies": "1",
"size": "3647",
"license": "apache-2.0",
"hash": -1406792483494136000,
"line_mean": 31.2743362832,
"line_max": 79,
"alpha_frac": 0.5889772416,
"autogenerated": false,
"ratio": 4.3108747044917255,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5399851946091725,
"avg_score": null,
"num_lines": null
} |
__author__ = 'denis_makogon'
import proboscis
from proboscis import asserts
from proboscis import decorators
from gigaspace.cinder_workflow import base as cinder_workflow
from gigaspace.nova_workflow import base as nova_workflow
from gigaspace.common import cfg
from gigaspace.common import utils
GROUP_WORKFLOW = 'gigaspace.cinder.volumes.api'
CONF = cfg.CONF
@proboscis.test(groups=[GROUP_WORKFLOW])
class TestWorkflow(object):
"""
This is a test suit that represents described workflow:
- create volume:
- check while it will reach 'available status'
- list volumes
- get volume:
- by id
- by name
Attachment workflow was implemented in two different manners:
Main workflow:
- boot an instance:
- format volume using cloudinit
- poll until instance would reach ACTIVE state
- check volume and server attachments
- delete an instance
- poll until instance would gone away
- check if volume was deleted
Alternative workflow:
- boot volume
- poll untill volume reach 'available' state
- boot an instance without volume and userdata
- poll untill instance would reach ACTIVE state
- use Nova volumes API to attach volume
- check volume attachments
- reboot server (required, to let operating system to
discover volume during reboot)
- poll untill instance would reach ACTIVE state
- check server attachments
"""
def __init__(self):
self.cinder_actions = cinder_workflow.BaseCinderActions()
self.nova_actions = nova_workflow.BaseNovaActions()
self.volume = None
self.server = None
self.volume_size, self.display_name, self.expected_status = (
"1", "test_volume", "available")
self.server_name, self.flavor_id, self.image_id, = (
"test_server",
CONF.test_config.test_flavor_id,
CONF.test_config.test_image_id
)
def _poll_volume_status(self, expected_status):
def _pollster():
volume = self.cinder_actions.show_volume(self.volume.id)
if volume.status in ("error", "failed"):
raise Exception("Volume is not in valid state")
return volume.status == expected_status
return _pollster
def _create_volume(self):
self.volume = self.cinder_actions.create_volume(
self.volume_size, self.display_name)
utils.poll_until(self._poll_volume_status(self.expected_status),
expected_result=True,
sleep_time=1)
asserts.assert_equal(self.volume.size, int(self.volume_size))
volume = self.cinder_actions.show_volume(self.volume.id)
asserts.assert_equal(volume.status, self.expected_status)
@proboscis.test
@decorators.time_out(300)
def test_create_volume(self):
"""
- create volume:
- check while it will reach 'available status'
"""
self._create_volume()
@proboscis.test(depends_on=[test_create_volume])
def test_list_volumes(self):
"""
- list volumes
"""
volumes = self.cinder_actions.list_volumes()
asserts.assert_equal(len(volumes), 1)
@proboscis.test(depends_on=[test_list_volumes])
def test_get_volume_by_its_name_or_id(self):
"""
- get volume:
- by name
- by ID
"""
try:
volume = self.cinder_actions.show_volume(self.display_name)
except Exception as e:
print("Can't get volume by its display name. %s" % str(e))
volume = self.cinder_actions.show_volume(self.volume.id)
pass
asserts.assert_equal(volume.status, self.expected_status)
def _poll_until_server_is_active(self, expected_status):
def _pollster():
server = self.nova_actions.get(self.server.id)
if server.status.upper() in ["ERROR", "FAILED"]:
raise Exception("Failed to spawn compute instance.")
return server.status == expected_status
return _pollster
def _boot(self, volume_id):
try:
self.server = self.nova_actions.boot(self.server_name,
self.flavor_id,
self.image_id,
volume_id=volume_id)
utils.poll_until(self._poll_until_server_is_active("ACTIVE"),
expected_result=True,
sleep_time=1)
self.server = self.nova_actions.get(self.server.id)
asserts.assert_equal(self.server.status, "ACTIVE")
except Exception as e:
print(str(e))
raise proboscis.SkipTest("Failed to spawn an instance.")
@decorators.time_out(300)
@proboscis.test(depends_on=[test_get_volume_by_its_name_or_id])
def test_boot_instance(self):
"""
- boot an instance:
- poll until instance would reach ACTIVE state
- check attachments
"""
self._boot(self.volume.id)
def _check_attachments(self):
server = self.nova_actions.get(self.server.id)
server_attachment = getattr(
server, 'os-extended-volumes:volumes_attached').pop(0)
volume_id = server_attachment['id']
volume = self.cinder_actions.show_volume(self.volume.id)
volume_attachment = volume.attachments.pop(0)
server_id = volume_attachment['server_id']
asserts.assert_equal(server.id, server_id)
asserts.assert_equal(volume.id, volume_id)
@proboscis.test(depends_on=[test_boot_instance])
def test_server_and_volume_attachments(self):
"""
- checks volume and server attachments
"""
self._check_attachments()
def _poll_until_server_is_gone(self, server_id=None):
def _pollster():
try:
_server_id = (server_id if server_id
else self.server.id)
self.nova_actions.delete(_server_id)
except Exception:
print("\nInstance has gone.")
return True
return _pollster
def _poll_until_volume_is_gone(self, volume_id=None):
def _pollster():
try:
_volume_id = (volume_id if volume_id
else self.volume.id)
self.cinder_actions.cinderclient.volumes.delete(
_volume_id)
except Exception:
print("Volume has gone.")
return True
return _pollster
@decorators.time_out(300)
@proboscis.test(runs_after=[test_server_and_volume_attachments])
def test_boot_without_volume(self):
"""
- boot instance without volume
"""
self._boot(None)
@proboscis.test(depends_on=[test_boot_without_volume])
def test_volume_create(self):
"""
- create volume
"""
self._create_volume()
@proboscis.test(depends_on=[test_volume_create])
def test_attach_volume(self):
self.nova_actions.create_server_volume(self.volume.id, self.server.id)
utils.poll_until(self._poll_volume_status("in-use"),
expected_result=True,
sleep_time=1)
self._check_attachments()
@decorators.time_out(300)
@proboscis.test(depends_on=[test_attach_volume])
def test_server_reboot_for_volume_discovery(self):
self.nova_actions.novaclient.servers.reboot(self.server.id)
utils.poll_until(self._poll_until_server_is_active("ACTIVE"),
expected_result=True,
sleep_time=1)
self._check_attachments()
@proboscis.after_class
def test_delete_resources(self):
"""
- delete instance
- delete volumes
"""
for server in self.nova_actions.novaclient.servers.list():
server.delete()
utils.poll_until(
self._poll_until_server_is_gone(server_id=server.id),
expected_result=True,
sleep_time=1)
for volume in self.cinder_actions.cinderclient.volumes.list():
# one of the volumes was bootstraped with delete flag in block
# mapping device, so Cinder API service would reject request
# because of volume status that is 'deleting' at this stage
if volume.status in ['available', 'error']:
volume.delete()
utils.poll_until(
self._poll_until_volume_is_gone(volume_id=volume.id),
expected_result=True,
sleep_time=1)
| {
"repo_name": "denismakogon/gigaspace-test-task",
"path": "gigaspace/tests/functional/test_workflow.py",
"copies": "1",
"size": "8918",
"license": "apache-2.0",
"hash": 2692308321273690000,
"line_mean": 35.4,
"line_max": 78,
"alpha_frac": 0.5808477237,
"autogenerated": false,
"ratio": 4.119168591224018,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 245
} |
__author__ = "denis_makogon"
import sys
from oslo_config import cfg
from gigaspace.cmd import common
from gigaspace.common import cfg as config
from gigaspace.common import utils
from gigaspace.cinder_workflow import (
base as cinder_workflow)
from gigaspace.nova_workflow import (
base as nova_workflow)
CONF = cfg.CONF
class Volumes(object):
def list(self, *args, **kwargs):
"""
CLI representation of 'list volumes'
:param args:
:param kwargs:
:return: CLI representation of 'list of volumes'
:rtype: None
"""
attrs = ['id', 'size', 'name', 'status', 'bootable', 'attachments']
cinder = cinder_workflow.BaseCinderActions()
volumes = cinder.list_volumes()
utils.print_list(volumes, attrs)
@common.args("--size", dest="size",
help='Volume size')
@common.args("--display-name", dest="name",
help='Volume name')
def create(self, size, name):
"""
CLI representation of 'create volume'
:param size: volume size
:type size: basestring
:param name: volume display name
:type name: basestring
:return: CLI representation of 'create volume;
:rtype: None
"""
cinder = cinder_workflow.BaseCinderActions()
volume = cinder.create_volume(size, name)._info
del volume['links']
utils.print_dict(volume)
@common.args("--id-or-name", dest='id_or_name',
help='Volume ID or name')
def show(self, id_or_name):
"""
CLI representation of 'show volume'
:param id_or_name: volume ID or name
:type id_or_name: basestring
:return: CLI representation of 'show volume'
:rtype: None
"""
cinder = cinder_workflow.BaseCinderActions()
volume = cinder.show_volume(id_or_name)._info
del volume['links']
utils.print_dict(volume)
class Instances(object):
def _boot(self, name, flavor, image_id, volume_id=None):
"""
CLI representation of 'boot instance'
:param name: instance name
:type name: basestring
:param flavor: flavor
:type flavor: basestring
:param image_id: Glance image id
:type image_id: basestring
:return server
:rtype: dict
"""
nova = nova_workflow.BaseNovaActions()
server = nova.boot(
name, flavor, image_id, volume_id)._info
del server['links']
return server
@common.args('--name', dest='name',
help='Instance name')
@common.args('--flavor', dest='flavor',
help='Flavor id')
@common.args('--image-id', dest='image_id',
help='Glance image ID')
@common.args('--volume-id', dest='volume_id',
help='Volume ID')
def boot_with_volume(self, name, flavor, image_id, volume_id):
"""
CLI representation of 'boot instance'
:param name: instance name
:type name: basestring
:param flavor: flavor
:type flavor: basestring
:param image_id: Glance image id
:type image_id: basestring
:return: CLI representation of 'boot instance'
:rtype: None
"""
server = self._boot(name, flavor, image_id,
volume_id=volume_id)
utils.print_dict(server)
@common.args('--name', dest='name',
help='Instance name')
@common.args('--flavor', dest='flavor',
help='Flavor id')
@common.args('--image-id', dest='image_id',
help='Glance image ID')
def boot_without_volume(self, name, flavor, image_id):
"""
CLI representation of 'boot instance'
:param name: instance name
:type name: basestring
:param flavor: flavor
:type flavor: basestring
:param image_id: Glance image id
:type image_id: basestring
:return: CLI representation of 'boot instance'
:rtype: None
"""
server = self._boot(name, flavor, image_id)
utils.print_dict(server)
@common.args('--server-id', dest="server_id")
def delete(self, server_id):
nova = nova_workflow.BaseNovaActions()
nova.delete(server_id)
print(str("Server accepted for deletion."))
@common.args('--volume-id', dest="volume_id")
@common.args('--server-id', dest="server_id")
def attach_volume(self, volume_id, server_id):
nova = nova_workflow.BaseNovaActions()
nova.create_server_volume(volume_id, server_id)
print("Server requires:"
"\n - restart to discover new block storage"
"\n - manual volume formatting")
@common.args('--volume-id', dest="volume_id")
@common.args('--server-id', dest="server_id")
def detach_volume(self, volume_id, server_id):
nova = nova_workflow.BaseNovaActions()
nova.delete_server_volume(server_id, volume_id)
CATS = {
'volumes': Volumes,
'instances': Instances
}
category_opt = cfg.SubCommandOpt('category',
title='Command categories',
help='Available categories',
handler=common.add_command_parsers(CATS))
def main():
"""Parse options and call the appropriate class/method."""
common._main(CONF, config, category_opt, sys.argv)
if __name__ == "__main__":
main()
| {
"repo_name": "denismakogon/gigaspace-test-task",
"path": "gigaspace/cmd/gigaspace_tool.py",
"copies": "1",
"size": "5523",
"license": "apache-2.0",
"hash": -129697867108429280,
"line_mean": 31.1104651163,
"line_max": 75,
"alpha_frac": 0.577403585,
"autogenerated": false,
"ratio": 4.073008849557522,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5150412434557522,
"avg_score": null,
"num_lines": null
} |
__author__ = 'denis_makogon'
import uuid
from cinderclient import client as novaclient
from novaclient import client as cinderclient
cinder_exceptions = cinderclient.exceptions
nova_exceptions = novaclient.exceptions
class FakeServer(object):
def __init__(self, id, name, image_id, flavor_ref,
block_device_mapping, volumes):
self.id = id
self.name = name
self.image_id = image_id
self.flavor_ref = flavor_ref
self.old_flavor_ref = None
self._current_status = "BUILDING"
self.volumes = volumes
self.bdm = block_device_mapping
info_vols = []
for volume in self.volumes:
info_vols.append({'id': volume.id})
volume.set_attachment(id)
setattr(self,
'os-extended-volumes:volumes_attached',
info_vols)
def delete(self):
for volume in self.volumes:
volume._current_status = 'deleting'
volume.delete_attachment(self.id)
Cinder().delete(volume.id)
self._current_status = "SHUTDOWN"
@property
def status(self):
return self._current_status
class FakeBlockDeviceMappingInfo(object):
def __init__(self, id, device, _type, size, delete_on_terminate):
self.volume_id = id
self.device = device
self.type = _type
self.size = size
self.delete_on_terminate = delete_on_terminate
FAKE_SERVERS_DB = {}
class Nova(object):
def __init__(self):
self.db = FAKE_SERVERS_DB
self.volumes = Cinder()
def reboot(self, server_id, **kwargs):
import time
server = self.get(server_id)
server._current_status = "REBOOT"
time.sleep(5)
server._current_status = "ACTIVE"
def create(self, name, image_id, flavor_ref, userdata=None,
block_device_mapping=None):
id = "FAKE_%s" % uuid.uuid4()
volumes = self._get_volumes_from_bdm(block_device_mapping)
for volume in volumes:
volume._current_status = 'in-use'
server = FakeServer(id, name, image_id, flavor_ref,
block_device_mapping, volumes)
self.db[id] = server
server._current_status = "ACTIVE"
return server
def _get_volumes_from_bdm(self, block_device_mapping):
volumes = []
if block_device_mapping is not None:
for device in block_device_mapping:
mapping = block_device_mapping[device]
(id, _type, size, delete_on_terminate) = mapping.split(":")
volume = self.volumes.get(id)
volume.mapping = FakeBlockDeviceMappingInfo(
id, device, _type, size, delete_on_terminate)
volumes.append(volume)
return volumes
def get(self, id):
if id not in self.db.keys():
raise nova_exceptions.NotFound(404)
else:
return self.db[id]
def list(self):
return [v for (k, v) in self.db.items()]
def delete(self, id):
if id not in self.db.keys():
raise nova_exceptions.NotFound("HTTP 404. Not Found.")
else:
del self.db[id]
class FakeVolume(object):
def __init__(self, id, size, name):
self.attachments = []
self.id = id
self.size = size
self.name = name
self._current_status = "building"
self.device = "vdb"
def __repr__(self):
msg = ("FakeVolume(id=%s, size=%s, name=%s, "
"_current_status=%s)")
params = (self.id, self.size, self.name,
self._current_status)
return (msg % params)
@property
def created_at(self):
return "2020-01-01-12:59:59"
def get(self, key):
return getattr(self, key)
def set_attachment(self, server_id):
for attachment in self.attachments:
if attachment['server_id'] == server_id:
return # Do nothing
self.attachments.append({'server_id': server_id,
'device': self.device,
'id': self.id,
'volume_id': self.id})
def delete_attachment(self, server_id):
for attachment in self.attachments:
if attachment['server_id'] == server_id:
self.attachments.pop(
self.attachments.index(attachment))
@property
def status(self):
return self._current_status
def delete(self):
Cinder().delete(self.id)
FAKE_VOLUMES_DB = {}
class Cinder(object):
def __init__(self):
self.db = FAKE_VOLUMES_DB
def get(self, id):
if id not in self.db.keys():
raise cinder_exceptions.NotFound(
404, message="Volume not found.")
else:
return self.db[id]
def create(self, size, name=None, **kwargs):
id = "FAKE_VOL_%s" % uuid.uuid4()
volume = FakeVolume(id, size, name)
self.db[id] = volume
volume._current_status = "available"
return volume
def list(self):
return [self.db[key] for key in self.db]
def delete(self, id):
if id not in self.db.keys():
raise Exception
else:
del self.db[id]
def create_server_volume(self, server_id, volume_id, device):
server = Nova().get(server_id)
volume = self.get(volume_id)
volumes = getattr(server, 'os-extended-volumes:volumes_attached')
volumes.append({'id': volume.id})
setattr(server,
'os-extended-volumes:volumes_attached',
volumes)
volume.device = device
volume.set_attachment(server_id)
if volume._current_status != "available":
raise Exception("Invalid volume status: "
"expected 'available' but was '%s'" %
volume._current_status)
volume._current_status = "in-use"
def delete_server_volume(self, server_id, volume_id):
server = Nova().get(server_id)
volumes = getattr(
server,
'os-extended-volumes:volumes_attached')
volumes.pop(volumes.index({'id': volume_id}))
setattr(server,
'os-extended-volumes:volumes_attached',
volumes)
volume = self.get(volume_id)
volume.delete_attachment(server_id)
if volume._current_status != 'in-use':
raise Exception("Invalid volume status: "
"expected 'in-use' but was '%s'" %
volume._current_status)
volume._current_status = "available"
def fake_create_nova_client():
class _fake_nova():
def __init__(self):
self.servers = Nova()
self.volumes = Cinder()
return _fake_nova()
def fake_create_cinder_client():
class _fake_cinder():
def __init__(self):
self.volumes = Cinder()
return _fake_cinder()
| {
"repo_name": "denismakogon/gigaspace-test-task",
"path": "gigaspace/tests/fakes/openstack.py",
"copies": "1",
"size": "7078",
"license": "apache-2.0",
"hash": -7405878629647948000,
"line_mean": 29.1191489362,
"line_max": 75,
"alpha_frac": 0.5528397853,
"autogenerated": false,
"ratio": 4.001130582249859,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5053970367549858,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Denis'
def merge_two_lists(list_one, list_two):
"""
Function merge two lists in a list. Then return the sorted list.
Input lists don't change.
:rtype: list
:return: sorted list
"""
# Copy lists by value
temp_list_one = list_one[:]
temp_list_two = list_two[:]
mergedlist = []
while temp_list_one and temp_list_two:
if temp_list_one[0] <= temp_list_two[0]:
mergedlist.append(temp_list_one.pop(0))
else:
mergedlist.append(temp_list_two.pop(0))
while temp_list_one:
mergedlist.append(temp_list_one.pop(0))
while temp_list_two:
mergedlist.append(temp_list_two.pop(0))
return mergedlist
def find_median(list_one, list_two):
"""
Function merge two lists in sorted order into sorted list,
and whem find median in sorted list.
:rtype: float
"""
sorted_list = merge_two_lists(list_one, list_two)
result = 0.0
length = len(sorted_list)
if length > 1 and length % 2 == 0:
center_one = sorted_list[length // 2 - 1]
center_two = sorted_list[length // 2]
result = (center_one + center_two) / 2.0
else:
print("Error! Invalid input list_one, list_two")
return
return result
def main():
"""
Given two sorted numeric array of the same length N. Find the median of the numerical array of length 2N,
containing all the numbers of the two data sets.
Sample input:
1 2 3 4
1 4 5 6
Example output:
3.5
"""
list_one = [1, 2, 3, 4]
list_two = [1, 4, 5, 6]
file_name = "median_input.txt"
try:
with open(file_name) as file:
list_one = file.readline().split(",")
list_two = file.readline().split(",")
# ValueError, TypeError
except FileNotFoundError:
print("File '{}' doesn't exist, using default arrays ".format(file_name))
list_one = list(map(int, list_one))
list_two = list(map(int, list_two))
print(list_one)
print(list_two)
# print("The correct result: 3.5, my result: {}".format(find_median(list_one, list_two)))
print("The median is: {}".format(find_median(list_one, list_two)))
print("Done")
if __name__ == "__main__":
main()
| {
"repo_name": "VDenis/hh_school",
"path": "median.py",
"copies": "1",
"size": "2273",
"license": "mit",
"hash": 8724024646603064000,
"line_mean": 23.1808510638,
"line_max": 109,
"alpha_frac": 0.5908490981,
"autogenerated": false,
"ratio": 3.443939393939394,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4534788492039393,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Deniz'
import argparse, re, os
def main():
parser = argparse.ArgumentParser(description='Attempt to generate X number'
' of random summoners.')
parser.add_argument('-in', metavar='i', type=str)
args = parser.parse_args()
inputLocation = vars(args).values()[0]
summoner_name_parse_pass0(inputLocation)
summoner_name_parse_pass1(inputLocation)
# Given an input file, read and parse for the summoner names
def summoner_name_parse_pass0(inputLocation):
# Read input txt file
f = open(inputLocation, 'r')
lines = f.readlines()
# Search for summoner string after <a href="http://www.mobafire.com/profile/ and before hyphen
start = '<a href="http://www.mobafire.com/profile/'
end = '" class="'
array_summoners = [] # Keep an array of summoners, write it at the end
lineNum = 0 # Count the total number of lines read
summonerFoundNum = 0 # Count number of summoners found
for line in lines:
lineNum += 1
try:
summoner_name = re.search("%s(.*)%s" % (start, end), str(line)).group(1)
summonerFoundNum += 1
array_summoners.append(summoner_name[:-7]) # Lop off the last 7 digit of the string,
# contains useless hyphen and random digits
#print "Summoner found in line " + str(lineNum)
except AttributeError:
pass
# print "No summoner in line " + str(lineNum)
print "OUT: " + str(lineNum) + " lines parsed, " + str(summonerFoundNum) + " summoners found."
file = open(os.curdir+"\_outSummonerParser\summonerParserOutput"+inputLocation+".txt", 'w')
for summoner in array_summoners:
file.writelines(str(summoner)+"\n")
def summoner_name_parse_pass1(inputLocation):
# Read input txt file
f = open(inputLocation, 'r')
lines = f.readlines()
# Search for summoner string after <a href="http://www.mobafire.com/profile/ and before hyphen
start = '<a href="/profile/'
end = '" class="'
array_summoners = [] # Keep an array of summoners, write it at the end
lineNum = 0 # Count the total number of lines read
summonerFoundNum = 0 # Count number of summoners found
for line in lines:
lineNum += 1
try:
summoner_name = re.search("%s(.*)%s" % (start, end), str(line)).group(1)
summonerFoundNum += 1
array_summoners.append(summoner_name[:-7]) # Lop off the last 7 digit of the string,
# contains useless hyphen and random digits
#print "Summoner found in line " + str(lineNum)
except AttributeError:
pass
# print "No summoner in line " + str(lineNum)
print "OUT: " + str(lineNum) + " lines parsed, " + str(summonerFoundNum) + " summoners found."
file = open(os.curdir+"\_outSummonerParser\summonerParserOutput"+inputLocation+".txt", 'w')
for summoner in array_summoners:
file.writelines(str(summoner)+"\n")
if __name__ == "__main__":
main() | {
"repo_name": "Murkantilism/LoL_API_Research",
"path": "Summoner_Data_Retrieval/WebPasrer.py",
"copies": "1",
"size": "3187",
"license": "mit",
"hash": -451471648545641000,
"line_mean": 38.85,
"line_max": 101,
"alpha_frac": 0.5961719485,
"autogenerated": false,
"ratio": 3.6464530892448512,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4742625037744851,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Deniz'
from bs4 import BeautifulSoup
from splinter import Browser
import argparse, os, re, time
mmr_filepath_Dict = {}
def main():
global mmr_filepath_Dict
BASE_URL = "http://na.op.gg/"
parser = argparse.ArgumentParser(description='Attempt to search op.gg with the summoner names in every file in the'
'given directory location. Scrap html to find mmr and avg mmr.')
parser.add_argument('-in', metavar='i', type=str)
args = parser.parse_args()
inputLocation = vars(args).values()[0]
get_summoner_ids_names(inputLocation, BASE_URL)
# For every file in the input dir, get the summoner names and ids
def get_summoner_ids_names(inputLocation, BASE_URL):
# Define search terms for summoner id
start_summoner_id = "': "
end_summoner_id = ", u'matchHistoryUri"
# Define search terms for summoner name
start_summoner_name = "u'summonerName': u'"
end_summoner_name = "'}"
for root, __, files in os.walk(inputLocation):
for f in files:
fullpath = os.path.join(root, f)
f = open(fullpath, 'r')
read_file = f.readlines()
f.close()
# Split up the match history (which is all 1 line) by the summonerId string
match_history_split = str(read_file).split("u'summonerId")
# Pop off first element of array (doesn't contain any summoner ID's)
match_history_split.pop(0)
#print match_history_split[0]
# Get first element of array
match = match_history_split[0]
# Lop off everything past 135 characters
match = match[:135]
#print "MATCH: " + match
# Find the summoner id
tmp_id = re.search("%s(.*)%s" % (start_summoner_id, end_summoner_id), str(match)).group(1)
# Find the summoner name
tmp_name = re.search("%s(.*)%s" % (start_summoner_name, end_summoner_name), str(match)).group(1)
# Strip the summoner name of all whitespace
tmp_name = tmp_name.replace(' ', '')
# Pass the name, id to splinter to search on op.gg
browser = navigate_to_summoner_page(BASE_URL, tmp_name)
# Attempt to click Check MMR button
click_check_mmr(browser, fullpath, tmp_name)
# Parse the webpage to find mmr
find_mmr(browser, fullpath, tmp_name)
def navigate_to_summoner_page(BASE_URL, summonerName):
browser = Browser('chrome')
print "BROWSER visiting URL: " + str(BASE_URL+'summoner/userName='+summonerName)
browser.visit(BASE_URL+'summoner/userName='+summonerName)
return browser
def click_check_mmr(browser, fullpath, summonerName):
# Declare boolean switch
summonerFound = False
# Find the second button with the css .opButton.small (the first button is Ingame Info, second is Check MMR)
try:
button = browser.find_by_css('.opButton.small')[1]
button.click()
summonerFound = True
except Exception:
summonerFound = False
print "ERR: Summoner " + summonerName + " NOT found."
if summonerFound:
# Parse the webpage to find mmr
find_mmr(browser, fullpath, summonerName)
# Declare attempt counter
atmpt_cnt = 0
def find_mmr(browser, fullpath, summonerName):
global atmpt_cnt
# Wait 2 seconds before searching for MMR data
time.sleep(2)
# Declare boolean switch
mmr_found = False
# Find the MMR by css
try:
get_mmr = browser.find_by_css('div.InnerSummonerMMR').first.value
mmr_found = True
except Exception:
if atmpt_cnt < 5:
print "ERR: MMR CSS NOT FOUND"
print "ATTEMPT " + str(atmpt_cnt) + ": Waiting 5 seconds before trying again..."
time.sleep(5)
atmpt_cnt += 1
mmr_found = False
# Recursively try to click MMR button and parse again, up to 5 times.
click_check_mmr(browser, fullpath, summonerName)
else:
mmr_found = False
print "ERR: MMR CSS NOT FOUND, ALL ATTEMPTS EXHAUSTED"
if mmr_found:
# Define search terms
start = 'MMR for this league is '
end = 'beta'
# Define regex to search for mmr
mmr = re.findall("%s(.*)%s" % (start, end), str(get_mmr), re.S)
# Split mmr to get avg mmr and mmr
mmr = str(mmr).split(r'.\n')
try:
# Strip last 2 characters off avg mmr ']
mmr[0] = mmr[0][2:]
_avg_mmr = mmr[0]
except IndexError:
# This means avg mmr wasn't found properly
print 'ERR: AVG MMR NOT FOUND PROPERLY'
_avg_mmr = 'NONE'
try:
# Strip first 2 characters off mmr ['
mmr[1] = mmr[1][:-2]
_mmr = mmr[1]
except IndexError:
# This means mmr wasn't found properly
print 'ERR: MMR NOT FOUND PROPERLY'
_mmr = 'NONE'
print "MMR: " + _mmr
print "AVERAGE LEAGUE MMR: " + _avg_mmr
atmpt_cnt = 0 # Reset attempt counter
# Close the browser, kills chromedriver.exe as well
browser.quit()
# Write MMR info to filenames
write_mmr(_avg_mmr, _mmr, fullpath)
# Given avg mmr, mmr, and a filepath slice the last 4 characters off the filepath (.txt) append
# the avg mmr and mmr, add .txt back in and replace the old filepath with the new one.
def write_mmr(avg_mmr, mmr, fullpath):
# Rename the file
os.rename(os.curdir+'\\'+fullpath, os.curdir+'\\'+fullpath[:-4]+"_mmr="+str(mmr)+"_avg="+str(avg_mmr)+'.txt')
if __name__ == "__main__":
main() | {
"repo_name": "Murkantilism/LoL_API_Research",
"path": "Summoner_Data_Retrieval/Scrape_mmr_opgg.py",
"copies": "1",
"size": "5728",
"license": "mit",
"hash": 8366531034229682000,
"line_mean": 34.5838509317,
"line_max": 119,
"alpha_frac": 0.59375,
"autogenerated": false,
"ratio": 3.4926829268292683,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4586432926829268,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Deniz'
from bs4 import BeautifulSoup
from splinter import Browser
import argparse, os, re, time, sys
mmr_filepath_Dict = {}
def main():
global mmr_filepath_Dict
BASE_URL = "http://na.op.gg/ranking/ladder/"
parser = argparse.ArgumentParser(description='Attempt to scrape op.gg rankings to get X number of summoner names '
'in gold and plat.')
parser.add_argument('-numSum', metavar='n', type=str)
args = parser.parse_args()
numSummoners = vars(args).values()[0]
#get_summoner_ids_names(inputLocation, BASE_URL)
navigate_to_platgold_rankings_page(BASE_URL, numSummoners)
def navigate_to_platgold_rankings_page(BASE_URL, numSummoners):
browser = Browser('chrome')
print "BROWSER visiting URL: " + str(BASE_URL)
browser.visit(BASE_URL)
# Keep scrolling down until we've reached the right league
for i in range(1, 100000):
# Scroll to bottom of page
browser.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Wait 1 second
time.sleep(1)
# Find and click "See More" button if it exists
click_see_more_btn(browser, numSummoners)
# Wait 1 second
time.sleep(1)
return browser
numAttempts = 0
btnFound = False
def click_see_more_btn(browser, numSummoners):
global numAttempts
global btnFound
# Find the button with the id "moreLadderRakingTable"
if numAttempts < 4:
try:
# NOTE: op.gg has a typo in the button id, Raking NOT Ranking
button = browser.find_by_id('moreLadderRakingTable')
button.click()
numAttempts = 0 #Reset num attempts
btnFound = True
check_ranking(browser, numSummoners)
except Exception:
btnFound = False
print "ERR: See More button NOT found."
if not btnFound:
# Try again
numAttempts += 1
click_see_more_btn(browser, numSummoners)
loSummonerNames = []
def check_ranking(browser, numSummoners):
try:
# Find every summoner on the current page by xpath
#xpath = '//td[@class="SummonerName"]'
xpath = '//tr[@class]'
summoners = browser.find_by_xpath(xpath)
# Check the LAST summoner on the current page to see if it's in Plat or Gold. This is
# a hack to quickly figure out if we can get lots of summoners of desired rank
last_summoner_league = summoners[-1].text.split('\n')[2]
if 'Platinum' in last_summoner_league or 'Gold' in last_summoner_league:
print 'Acceptable Rank Found in Last Element.'
# If the LAST summoner is good, check the rest of the the page
for summoner in summoners:
split_summoner = summoner.text.split('\n')
#rank = split_summoner[0]
summoner_name = split_summoner[1]
league = split_summoner[2]
if 'Platinum' in str(league) or 'Gold' in str(league):
print "Acceptable league found"
if (numSummoners > len(loSummonerNames)):
print "Appended summoner " + summoner_name
write_summoner_name(summoner_name)
else:
print "DONE: " + str(len(loSummonerNames)) + " summoners found."
sys.exit(0)
else:
print "League not acceptable"
except Exception:
print "ERR: Rank NOT found."
def write_summoner_name(summoner_name):
file = open(os.curdir+"_out_opgg_summonerNames.txt", 'a')
file.writelines(str(summoner_name)+"\n")
'''
def click_check_mmr(browser, fullpath, summonerName):
# Declare boolean switch
summonerFound = False
# Find the second button with the css .opButton.small (the first button is Ingame Info, second is Check MMR)
try:
button = browser.find_by_css('.opButton.small')[1]
button.click()
summonerFound = True
except Exception:
summonerFound = False
print "ERR: Summoner " + summonerName + " NOT found."
if summonerFound:
# Parse the webpage to find mmr
find_mmr(browser, fullpath, summonerName)
# Declare attempt counter
atmpt_cnt = 0
def find_mmr(browser, fullpath, summonerName):
global atmpt_cnt
# Wait 2 seconds before searching for MMR data
time.sleep(2)
# Declare boolean switch
mmr_found = False
# Find the MMR by css
try:
get_mmr = browser.find_by_css('div.InnerSummonerMMR').first.value
mmr_found = True
except Exception:
if atmpt_cnt < 5:
print "ERR: MMR CSS NOT FOUND"
print "ATTEMPT " + str(atmpt_cnt) + ": Waiting 5 seconds before trying again..."
time.sleep(5)
atmpt_cnt += 1
mmr_found = False
# Recursively try to click MMR button and parse again, up to 5 times.
click_check_mmr(browser, fullpath, summonerName)
else:
mmr_found = False
print "ERR: MMR CSS NOT FOUND, ALL ATTEMPTS EXHAUSTED"
if mmr_found:
# Define search terms
start = 'MMR for this league is '
end = 'beta'
# Define regex to search for mmr
mmr = re.findall("%s(.*)%s" % (start, end), str(get_mmr), re.S)
# Split mmr to get avg mmr and mmr
mmr = str(mmr).split(r'.\n')
try:
# Strip last 2 characters off avg mmr ']
mmr[0] = mmr[0][2:]
_avg_mmr = mmr[0]
except IndexError:
# This means avg mmr wasn't found properly
print 'ERR: AVG MMR NOT FOUND PROPERLY'
_avg_mmr = 'NONE'
try:
# Strip first 2 characters off mmr ['
mmr[1] = mmr[1][:-2]
_mmr = mmr[1]
except IndexError:
# This means mmr wasn't found properly
print 'ERR: MMR NOT FOUND PROPERLY'
_mmr = 'NONE'
print "MMR: " + _mmr
print "AVERAGE LEAGUE MMR: " + _avg_mmr
atmpt_cnt = 0 # Reset attempt counter
# Close the browser, kills chromedriver.exe as well
browser.quit()
# Write MMR info to filenames
write_mmr(_avg_mmr, _mmr, fullpath)
# Given avg mmr, mmr, and a filepath slice the last 4 characters off the filepath (.txt) append
# the avg mmr and mmr, add .txt back in and replace the old filepath with the new one.
def write_mmr(avg_mmr, mmr, fullpath):
# Rename the file
os.rename(os.curdir+'\\'+fullpath, os.curdir+'\\'+fullpath[:-4]+"_mmr="+str(mmr)+"_avg="+str(avg_mmr)+'.txt')
'''
if __name__ == "__main__":
main() | {
"repo_name": "Murkantilism/LoL_API_Research",
"path": "Summoner_Data_Retrieval/Scrape_opgg_summoner_rankings.py",
"copies": "1",
"size": "6740",
"license": "mit",
"hash": -4446037052760742400,
"line_mean": 33.3928571429,
"line_max": 118,
"alpha_frac": 0.5910979228,
"autogenerated": false,
"ratio": 3.545502367175171,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9620921021363831,
"avg_score": 0.003135853722268194,
"num_lines": 196
} |
__author__ = 'Deniz'
from RiotWatcher.riotwatcher import RiotWatcher
from RiotWatcher.riotwatcher import LoLException
import re, argparse
from operator import itemgetter
# Setup RiotWatcher object with api key
f = open('apikey.txt', 'r')
api = RiotWatcher(f.read())
allChampionsUsed = []
f = open('loChampionPairs', 'r')
champions = f.read()
champions = champions.splitlines()
def main():
global random_summoners_1k
# Command line parsing
global outputLocation
parser = argparse.ArgumentParser(description='Attempt to generate X number'
' of random summoners.')
parser.add_argument('-out', metavar='o', type=str)
args = parser.parse_args()
print vars(args).values()
outputLocation = vars(args).values()[0]
with open(outputLocation, 'r') as file:
# read a list of lines into data
random_summoners_1k = file.readlines()
# Check if we have API calls remaining
if(api.can_make_request()):
getSummoners()
# Keyword: u'id':
# Keyword: u'totalSessionsPlayed':
print "MOST USED CHAMPIONS FOUND"
def getSummoners():
f = open(outputLocation, 'r')
summoners = f.read()
summoners = summoners.splitlines()
for s in summoners:
# Lots of info we don't care about, get just the key (champion name)
start = "'id': "
end = ", '"
result = re.search("%s(.*)%s" % (start, end), str(s)).group(1)
getSummonerStats(result)
# Get the ranked stats of the given summoner
def getSummonerStats(summoner_id):
try:
summoner_stats = api.get_ranked_stats(summoner_id, region=None, season=None)
except LoLException:
#print "GAME DATA NOT FOUND FOR SUMMONER: " + str(summoner_id)
summoner_stats = "{u'modifyDate': 1406927571000L, u'summonerId': 0000, u'champions': [{u'stats': {u'totalPhysicalDamageDealt': 152101, u'totalTurretsKilled': 1, u'totalSessionsPlayed': 1000, u'totalAssists': 10, u'totalDamageDealt': 158764, u'mostChampionKillsPerSession': 2, u'totalPentaKills': 0, u'mostSpellsCast': 0, u'totalDoubleKills': 0, u'maxChampionsKilled': 2, u'totalDeathsPerSession': 8, u'totalSessionsWon': 0, u'totalGoldEarned': 12405, u'totalTripleKills': 0, u'totalChampionKills': 2, u'maxNumDeaths': 8, u'totalMinionKills': 199, u'totalMagicDamageDealt': 5315, u'totalQuadraKills': 0, u'totalUnrealKills': 0, u'totalDamageTaken': 17519, u'totalSessionsLost': 1, u'totalFirstBlood': 0}, u'id': XX}, 2]}"
summoner_id += "XX"
parseSummonerStats(summoner_stats, summoner_id)
# Given the ranked stats, parse it to get the totalSessionsPlayed and
# corresponding champion id value
def parseSummonerStats(summoner_stats, summoner_id):
summoner_stats = str(summoner_stats).split(', {')
start = "'totalSessionsPlayed': "
end = ", u'totalAssists"
start1 = "u'id': "
end1 = "}"
for s in summoner_stats:
# Get the number of totalSessionsPlayed
result = re.search("%s(.*)%s" % (start, end), str(s)).group(1)
# And the corresponding champion
result1 = re.search("%s(.*)%s" % (start1, end1), str(s)).group(1)
# And create a pair [totalSessionsPlayed, id]
allChampionsUsed.append([result, result1])
sortChampions(summoner_id)
# Sort the list of all champions used by this summoner based on the number
# of totalSessionsPlayed (which is the first value in the pair)
def sortChampions(summoner_id):
allChampionsUsed_sorted = sorted(allChampionsUsed, key=itemgetter(0))
# Pass only the LAST pair to getChampionTitle (most used champion!)
getChampionTitle(allChampionsUsed_sorted[-1], summoner_id)
#print "Most used champ:" + str(allChampionsUsed_sorted[-1])
# Given a champion ID, look at the loChampionPairs file to get it's
# corresponding champion title
def getChampionTitle(mostUsedCHampionPair, summoner_id):
# For every champion in loChampionPairs, get the most used champ's title
for line in champions:
line = line.split(' | ')
if line[0] == mostUsedCHampionPair[1]:
if summoner_id.__contains__("XX"):
mostUsedChampion = "GAME DATA NOT FOUND"
else:
mostUsedChampion = line[1]
#print "MOST USED CHAMPION FOR ID #" + str(summoner_id).strip("XX") +\
# " IS: " + str(mostUsedChampion)
writeMostUsedChampion(summoner_id, mostUsedChampion)
# Write the most used champion back into Random_Summoners_1000.txt
def writeMostUsedChampion(summoner_id, mostUsedChampion):
start = "'id': "
end = ", '"
lineCnt = 0
# For every random summoner
for line in random_summoners_1k:
# Find the summoner ID
result = re.search("%s(.*)%s" % (start, end), str(line)).group(1)
# Once we've found the line where the passed in summoner id resides,
if result == str(summoner_id).strip("XX"):
# Strip out newlines so we can append most used champ at end,
# append most used champion at end, then write newline back in.
random_summoners_1k[lineCnt] = ''.join([line.strip('\n'),
(" 'mostUsedChampion': " +
str(mostUsedChampion) + " "), '\n'])
# Write the new version of the line into the file
with open(outputLocation+"_CHAMPS_OUT.txt", 'w') as file:
file.writelines(random_summoners_1k)
lineCnt += 1
if __name__ == "__main__":
main() | {
"repo_name": "Murkantilism/LoL_API_Research",
"path": "Summoner_Data_Retrieval/Get_Most_Used_Champion.py",
"copies": "1",
"size": "5563",
"license": "mit",
"hash": -878080424210730400,
"line_mean": 37.9090909091,
"line_max": 728,
"alpha_frac": 0.6433579004,
"autogenerated": false,
"ratio": 3.1934557979334097,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.433681369833341,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Deniz'
from RiotWatcher.riotwatcher import RiotWatcher
from RiotWatcher.riotwatcher import LoLException
import re, argparse
from random import randint
import subprocess
# Setup RiotWatcher object with api key
f = open('apikey.txt', 'r')
api = RiotWatcher(f.read())
# A global counter used by Generate_Summoner_ID
cnt = 0
numSummonersWritten = 0
def main():
# Command line parsing
global outputLocation
parser = argparse.ArgumentParser(description='Attempt to generate X number'
' of random summoners.')
parser.add_argument('-out', metavar='o', type=str)
parser.add_argument('-num', metavar='n', type=int)
args = parser.parse_args()
print vars(args).values()
numSummoners = vars(args).values()[0]
outputLocation = vars(args).values()[1]
# Check if we have API calls remaining
if(api.can_make_request()):
# Generate a bunch of random summoners
while (cnt < numSummoners):
Generate_Summoner_ID()
print str(numSummonersWritten) + " SUMMONERS GENERATED"
# Generate a random single summoner
def Generate_Summoner_ID():
try:
# Generate a random summoner ID between 4 to 7 digits
random_id = random_with_N_digits(randint(4, 7))
random_summoner = api.get_summoner(None, random_id)
# Check if the summoner exists
if Check_Summoner(random_id, random_summoner):
#print "Summoner Exists"
# Check the summoner's level
if Check_Level(random_id) == True:
#print "Level checked"
# Check the summoner's ranking
if Check_Rank(random_id) == True:
print "Rank checked"
# Write the summoner to a text doc
Write_Summoner(random_summoner)
# If the random summoner ID isn't a real summoner, catch error
except LoLException:
print "Summoner ID " + str(random_id) + " Does Not Exist"
return
# Helper method to create random numbers of a certain digit length
def random_with_N_digits(n):
range_start = 10**(n-1)
range_end = (10**n)-1
return randint(range_start, range_end)
# Check if the summoner name is good
# This is to avoid summoners like: IS148be5be2f2d180191298
def Check_Summoner(random_id, random_summoner):
check_name = str(random_summoner).split("name': u'", 1)[1]
if(check_name[0:3].__contains__("IS1")):
print("Summoner ID " + str(random_id) + " has been culled (fake)")
return False
else:
# Increment the counter if a real summoner is found
global cnt
cnt = cnt + 1
print "Summoner ID " + str(random_id) + " Exists!"
return True
def Check_Level(random_id):
check_level = api.get_summoner(name=None, id=random_id, region=None)
start = "u'summonerLevel': "
end = ", u'revisionDate"
result = re.search("%s(.*)%s" % (start, end), str(check_level)).group(1)
if (str(result) == "30"):
return True
else:
return False
# Checks if the given summoner ID is in Gold or Platinum
def Check_Rank(random_id):
try:
check_rank = api.get_league_entry(str(random_id), team_ids=None, region=None)
except LoLException:
print "Summoner ID " + str(random_id) + " is not in any league"
return
# Only check the first 40 characters (in case GOLD is in username/elsewhere)
if str(check_rank)[0:40].__contains__("GOLD") | \
str(check_rank)[0:40].__contains__("PLATINUM"):
print "RANK: " + str(check_rank)
return True
else:
print("Summoner ID " + str(random_id) + " is not in GOLD or PLATINUM")
return False
# Write this summoner to a text document
def Write_Summoner(summoner):
# Write a newline after each } to make output readable
summoner = re.sub(r'([}])', r'}\n', str(summoner))
# Cull all extraneous u' characters (leaving ' character)
summoner = re.sub(r"([u]+['])", r"'", str(summoner))
f = open(outputLocation+".txt", 'a')
f.write(str(summoner))
global numSummonersWritten
numSummonersWritten = numSummonersWritten + 1
f.close()
# Method to write a readable text file of the *crapton* of league data returned
def Write_League_Data(_id):
my_league = api.get_league(_id)
# Write a newline after each } to make output readable
my_league = re.sub(r'([}])', r'}\n', str(my_league))
# Cull all extraneous u' characters (leaving ' character)
my_league = re.sub(r"([u]+['])", r"'", str(my_league))
f = open('League_Data', 'w')
f.write(str(my_league))
f.close()
if __name__ == "__main__":
main() | {
"repo_name": "Murkantilism/LoL_API_Research",
"path": "Summoner_Data_Retrieval/DEPRECATED/Generate_Summoners.py",
"copies": "1",
"size": "4717",
"license": "mit",
"hash": -6228846367567909000,
"line_mean": 30.6644295302,
"line_max": 85,
"alpha_frac": 0.6217935128,
"autogenerated": false,
"ratio": 3.3430191353649894,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44648126481649897,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Deniz'
from RiotWatcher.riotwatcher import RiotWatcher
from RiotWatcher.riotwatcher import LoLException
import re, argparse, os
# Setup RiotWatcher object with api key
f = open('apikey.txt', 'r')
api_key = f.read()
api = RiotWatcher(f.read())
match_history_data = []
match_data = []
def main():
global match_history_data
global match_data
parser = argparse.ArgumentParser(description='Attempt to grab data for given list of summoners.')
parser.add_argument('-in', metavar='i', type=str)
args = parser.parse_args()
inputLocation = vars(args).values()[0]
# Read a list of lines into data
file = open(inputLocation+".txt", 'r')
summoners_list = file.readlines()
# Check if we have API calls remaining
if(api.can_make_request()):
get_summoner_data(summoners_list)
write_summoner_data(inputLocation)
print "DATA FOR " + str(len(match_history_data)) + " SUMMONERS ACQUIRED"
def get_summoner_data(summoners_list):
start = "'id': "
end = ", '"
for summoner in summoners_list:
# Get the summoner ID
tmp_id = re.search("%s(.*)%s" % (start, end), str(summoner)).group(1)
# Try to retrieve the summoner data for the ID we just got
try:
#s_data = api.get_match_history(summoner_id=tmp_id, region=None, champion_ids=None, ranked_queues=None, begin_index=0, end_index=15)
s_data = api._match_history_request(end_url=tmp_id+"?includeTimeline=true&beginIndex=0&endIndex=15&api_key="+str(api_key), region=None)
match_history_data.append("'id': "+tmp_id+", ' "+str(s_data))
except LoLException:
print 'DATA NOT FOUND FOR SUMMONER ' + str(tmp_id)
# Write the summoner data, writing a new file for each summoner
def write_summoner_data(inputLocation):
start = "'id': "
end = ", '"
for summoner in match_history_data:
# Get the summoner id again, used to write individual files for each summoner
tmp_id = re.search("%s(.*)%s" % (start, end), str(summoner)).group(1)
# Write summoner data to output folder
file = open(os.curdir+"\_outGendered\_"+tmp_id+"_"+inputLocation+".txt", 'w')
file.writelines(str(summoner)+"\n\n\n")
if __name__ == "__main__":
main() | {
"repo_name": "Murkantilism/LoL_API_Research",
"path": "Summoner_Data_Retrieval/Get_Summoner_MatchHistory.py",
"copies": "1",
"size": "2290",
"license": "mit",
"hash": -4645159424516854000,
"line_mean": 32.2028985507,
"line_max": 148,
"alpha_frac": 0.6401746725,
"autogenerated": false,
"ratio": 3.1629834254143647,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9272291682063448,
"avg_score": 0.006173283170183323,
"num_lines": 69
} |
__author__ = 'Deniz'
from RiotWatcher.riotwatcher import RiotWatcher
from RiotWatcher.riotwatcher import LoLException
import re, argparse, os, time
import urllib2
# Setup RiotWatcher object with api key
f = open('apikey.txt', 'r')
api_key = f.read()
api = RiotWatcher(f.read())
match_history_data = []
match_ids = []
match_data = []
def main():
global match_history_data
global match_ids
global match_data
parser = argparse.ArgumentParser(description='Attempt to grab match data for given list of summoners.')
parser.add_argument('-in', metavar='i', type=str)
args = parser.parse_args()
inputLocation = vars(args).values()[0]
# Read a list of lines into data
file = open(inputLocation, 'r')
match_history_data = file.readlines()
# First find the summoner ID for this match
summoner_id = get_summoner_id(inputLocation)
# Split up the match history (which is all 1 line) by the matchID string
match_history_split = str(match_history_data).split("u'matchId")
# Pop off first element of array (doesn't contain any match ID's)
match_history_split.pop(0)
# Get the 15 match ID's in the given match history file
get_match_ids(match_history_split)
# Get the match data!
get_match_data_HTTPS(match_ids, summoner_id)
# Write match data
#write_summoner_data(inputLocation, match_data, summoner_id)
print "DATA FOR " + str(len(match_data)) + " MATCHES ACQUIRED"
# Given the input location, search for the top level folder and summoner
# summoner id. Lop off the top level folder, leaving just the summoner id.
def get_summoner_id(inputLocation):
# Define search terms to get match ID
start = "\_"
end = "__out"
summoner_id = re.search("%s(.*)%s" % (start, end), str(inputLocation)).group(1)
return summoner_id[23:]
# Given match history data, find all 15 matchID's and store in array
def get_match_ids(match_history_data):
# Define search terms to get match ID
start = "': "
end = ", u'mapId':"
# For each element of the match history array
for match in match_history_data:
# Lop off everything past 25 characters (doesn't include matchID!)
match = match[:25]
# Search for the match ID
tmp_id = re.search("%s(.*)%s" % (start, end), str(match)).group(1)
#print "MATCH ID: " + str(tmp_id)
match_ids.append(tmp_id)
if not (len(match_ids) == 15):
print "WARN: " + str(len(match_ids)) + " MATCH IDs FOUND"
else:
print str(len(match_ids)) + " MATCH IDs FOUND"
# An ugly workaround for the 401 unauthorized error Riot API is giving get_match() call
# Open the match https URL and read the data, feed it to write_summoner_data()
def get_match_data_HTTPS(match_ids, summoner_id):
for id in match_ids:
response = urllib2.urlopen("https://na.api.pvp.net/api/lol/na/v2.2/match/"+str(id)+"?includeTimeline=true&api_key="+str(api_key))
write_summoner_data(response.read(), summoner_id)
'''
This function, for some reason, returns a 401 unauthorized error. It should be a problem
with the API key or URL being passed, but neither seem to be incorrect. API key works fine,
URL could be wrong if there's a bug in RiotWatcher but otherwise should be good. See hack
function above that replaces this one.
def get_match_data(match_ids):
for id in match_ids:
# Try to retrieve the match data for each id
if api.can_make_request():
#try:
m_data = api.get_match(match_id=id, region=None, include_timeline=True)
match_data.append(str(m_data))
print 'DATA FOUND FOR MATCH ID ' + str(id)
#except LoLException:
# print 'ERR: DATA NOT FOUND FOR MATCH ID ' + str(id)
else:
print "API: Waiting 5 seconds for more calls"
time.sleep(5)
'''
# Write the match data, writing a new file for each summoner ID
def write_summoner_data(match_data, summoner_id):
# Write summoner data to output folder
with open(os.curdir+"\_outMatchData_0to15\_"+summoner_id, 'a') as writeFile:
writeFile.writelines(str(match_data))
if __name__ == "__main__":
main() | {
"repo_name": "Murkantilism/LoL_API_Research",
"path": "Summoner_Data_Retrieval/Get_Summoner_MatchData.py",
"copies": "1",
"size": "4196",
"license": "mit",
"hash": 903568000999268500,
"line_mean": 33.6859504132,
"line_max": 137,
"alpha_frac": 0.6620591039,
"autogenerated": false,
"ratio": 3.3301587301587303,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44922178340587304,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Deniz'
from RiotWatcher.riotwatcher import RiotWatcher
from RiotWatcher.riotwatcher import LoLException
import re, argparse, os, time
# Setup RiotWatcher object with api key
f = open('apikey.txt', 'r')
api_key = f.read()
api = RiotWatcher(f.read())
match_history_data = []
def main():
global match_history_data
parser = argparse.ArgumentParser(description='Attempt to grab data for given list of summoners.')
parser.add_argument('-in', metavar='i', type=str)
args = parser.parse_args()
inputLocation = vars(args).values()[0]
# Read a list of lines into data
file = open(inputLocation, 'r')
read_input = file.readlines()
# Check if we have API calls remaining
if(api.can_make_request()):
get_summoner_data(read_input)
write_summoner_data(inputLocation)
print "DATA FOR " + str(len(match_history_data)) + " SUMMONERS ACQUIRED"
def get_summoner_data(summoners_list):
start = "`"
end = ":"
for summoner in summoners_list:
# Get the summoner ID
tmp_id = re.search("%s(.*)%s" % (start, end), str(summoner)).group(1)
# Try to retrieve the summoner data for the ID we just got
if api.can_make_request():
try:
s_data = api._match_history_request(end_url=tmp_id+"?includeTimeline=true&beginIndex=16&endIndex=31&api_key="+str(api_key), region=None)
match_history_data.append("`"+str(tmp_id)+"`:"+str(s_data))
print 'DATA FOUND FOR SUMMONER ' + str(tmp_id)
except LoLException:
print 'ERR: DATA NOT FOUND FOR SUMMONER ' + str(tmp_id)
else:
print "API: Waiting 5 seconds for more calls"
time.sleep(5)
# Write the summoner data, writing a new file for each summoner
def write_summoner_data(inputLocation):
start = "`"
end = "`:"
for summoner in match_history_data:
# Get the summoner id again, used to write individual files for each summoner
tmp_id = re.search("%s(.*)%s" % (start, end), str(summoner)).group(1)
# Write summoner data to output folder
with open(os.curdir+"\_outMatchHistory_16to31\_"+tmp_id+"_"+inputLocation, 'w') as writeFile:
writeFile.writelines(str(summoner)+r"\n")
if __name__ == "__main__":
main() | {
"repo_name": "Murkantilism/LoL_API_Research",
"path": "Summoner_Data_Retrieval/Get_Summoner_MatchHistory_Modified.py",
"copies": "1",
"size": "2320",
"license": "mit",
"hash": 3600259230790982700,
"line_mean": 31.6901408451,
"line_max": 152,
"alpha_frac": 0.6288793103,
"autogenerated": false,
"ratio": 3.2814710042432815,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9386682480411777,
"avg_score": 0.004733566826301071,
"num_lines": 71
} |
__author__ = 'Deniz'
from RiotWatcher.riotwatcher import RiotWatcher
from RiotWatcher.riotwatcher import LoLException
import re, argparse, time
from operator import itemgetter
# Setup RiotWatcher object with api key
f = open('apikey.txt', 'r')
api = RiotWatcher(f.read())
allChampionsUsed = []
f = open('loChampionPairs', 'r')
champions = f.read()
champions = champions.splitlines()
summoner_name_id_dict = {}
summoner_most_used_champ_dict = {}
def main():
global summoner_name_id_dict
global summoner_most_used_champ_dict
global allChampionsUsed
# Command line parsing
global inputLocation
global outputLocation
parser = argparse.ArgumentParser(description='Attempt to find every given summoners most used champion.')
parser.add_argument('-in', metavar='i', type=str)
parser.add_argument('-out', metavar='o', type=str)
args = parser.parse_args()
print vars(args).values()
outputLocation = vars(args).values()[0]
inputLocation = vars(args).values()[1]
# Check if we have API calls remaining
createDictOfSummoners()
print len(summoner_name_id_dict)
# For every key in the dict, get the summoner stats
for k, v in summoner_name_id_dict.iteritems():
getSummonerStats(k, v)
print "MOST USED CHAMPIONS FOUND"
# Read the input txt file and create a dictionary of summoners.
# Dict format is {summonerId: summonerName} since summoners cannot have the same ID
# but could possibly have the same username across different region servers.
def createDictOfSummoners():
f = open(inputLocation, 'r')
read_input = f.readlines()
for i, line in enumerate(read_input):
# Append the summoner ID minus last 2 characters (\n) and summoner name to dict
summoner_name_id_dict.update({line.split(":")[1][:-1]: line.split(":")[0]})
# Get the ranked stats of the given summoner ID
def getSummonerStats(summoner_id, summoner_name):
if api.can_make_request():
try:
summoner_stats = api.get_ranked_stats(summoner_id, region=None, season=None)
except LoLException:
print "GAME DATA NOT FOUND FOR SUMMONER: " + str(summoner_id)
summoner_stats = "{u'modifyDate': 1406927571000L, u'summonerId': 0000, u'champions': [{u'stats': {u'totalPhysicalDamageDealt': 152101, u'totalTurretsKilled': 1, u'totalSessionsPlayed': 1000, u'totalAssists': 10, u'totalDamageDealt': 158764, u'mostChampionKillsPerSession': 2, u'totalPentaKills': 0, u'mostSpellsCast': 0, u'totalDoubleKills': 0, u'maxChampionsKilled': 2, u'totalDeathsPerSession': 8, u'totalSessionsWon': 0, u'totalGoldEarned': 12405, u'totalTripleKills': 0, u'totalChampionKills': 2, u'maxNumDeaths': 8, u'totalMinionKills': 199, u'totalMagicDamageDealt': 5315, u'totalQuadraKills': 0, u'totalUnrealKills': 0, u'totalDamageTaken': 17519, u'totalSessionsLost': 1, u'totalFirstBlood': 0}, u'id': XX}, 2]}"
summoner_id += "XX"
parseSummonerStats(summoner_stats, summoner_id, summoner_name)
else:
print "Not enough API calls for ID: " + summoner_id + " waiting 4 seconds"
time.sleep(4)
getSummonerStats(summoner_id, summoner_name)
# Given the ranked stats, parse it to get the totalSessionsPlayed and
# corresponding champion id value
def parseSummonerStats(summoner_stats, summoner_id, summoner_name):
championsUsed = []
# Break up the stats by summoner
summoner_stats = str(summoner_stats).split(', {')
# Throw away the last summoner (id = 0 this is the combined stats)
summoner_stats = summoner_stats[:-1]
start = "'totalSessionsPlayed': "
end = ", u'totalAssists"
start1 = "u'id': "
end1 = "}"
for s in summoner_stats:
# Get the number of totalSessionsPlayed
result = re.search("%s(.*)%s" % (start, end), str(s)).group(1)
# And the corresponding champion
result1 = re.search("%s(.*)%s" % (start1, end1), str(s)).group(1)
# And create a pair [totalSessionsPlayed, id]
championsUsed.append([result, result1])
sortChampions(summoner_id, summoner_name, championsUsed)
# Sort the list of all champions used by this summoner based on the number
# of totalSessionsPlayed (which is the first value in the pair)
def sortChampions(summoner_id, summoner_name, championsUsed):
if len(championsUsed) > 0:
allChampionsUsed_sorted = sorted(championsUsed, key=itemgetter(0))
# Pass only the LAST pair to getChampionTitle (most used champion!)
getChampionTitle(allChampionsUsed_sorted[-1], summoner_id, summoner_name)
else:
getChampionTitle([0, 1], summoner_id, summoner_name)
# Given a champion ID, look at the loChampionPairs file to get it's
# corresponding champion title
def getChampionTitle(mostUsedChampionPair, summoner_id, summoner_name):
# For every champion in loChampionPairs, get the most used champ's title
for line in champions:
line = line.split(' | ')
if line[0] == mostUsedChampionPair[1]:
if summoner_id.__contains__("XX"):
mostUsedChampion = "GAME DATA NOT FOUND"
else:
mostUsedChampion = line[1]
writeMostUsedChampion(summoner_id, str(summoner_name)+":"+mostUsedChampion)
print "MOST USED CHAMPION FOR ID #" + str(summoner_id).strip("XX") +\
" IS: " + str(mostUsedChampion)
# Write the most used champion to output file
def writeMostUsedChampion(summoner_id, summoner_name_and_champ):
with open(outputLocation, 'a') as file:
file.writelines(summoner_id + ":" + summoner_name_and_champ + "\n")
if __name__ == "__main__":
main() | {
"repo_name": "Murkantilism/LoL_API_Research",
"path": "Summoner_Data_Retrieval/Get_Most_Used_Champion_Modified.py",
"copies": "1",
"size": "5665",
"license": "mit",
"hash": -4577323289710996000,
"line_mean": 40.3576642336,
"line_max": 732,
"alpha_frac": 0.6833186231,
"autogenerated": false,
"ratio": 3.087193460490463,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4270512083590463,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Deniz'
from RiotWatcher.riotwatcher import RiotWatcher
from RiotWatcher.riotwatcher import LoLException
import re, argparse, time
# Setup RiotWatcher object with api key
f = open('apikey.txt', 'r')
api = RiotWatcher(f.read())
numSummonersWritten = 0
summonerDict = {}
def main():
# Command line parsing
global outputLocation
global summonerDict
parser = argparse.ArgumentParser(description='Attempt to generate X number'
' of random summoners.')
parser.add_argument('-in', metavar='i', type=str)
parser.add_argument('-out', metavar='o', type=str)
args = parser.parse_args()
inputLocation = vars(args).values()[1]
outputLocation = vars(args).values()[0]
f = open(inputLocation, "r")
summoners = f.readlines()
# For every summoner in the text file (NOTE: We slice the last 2 chars of each line '\n' newline)
for s in summoners:
# Check if we have API calls remaining
if(api.can_make_request()):
# Get it's ID, append it to dict. Returns true if id is found, false otherwise
if Get_Summoner_Ids(s[:-1]) == True:
# Check to make sure it's a real summoner
if Check_Summoner(s[:-1]) == True:
# Check the summoner's level is 30
if Check_Level(s[:-1]) == True:
Write_Summoner(s[:-1])
'''
# Check the summoner's rank's is gold/plat
if Check_Rank(s[:-1]) == True:
# If all tests pass, write summoner to out
Write_Summoner(s[:-1])
# If not right rank, remove it from dict
else:
summonerDict.pop(str(s[:-1]))
'''
# If not lvl 30 remove it from dict
else:
summonerDict.pop(str(s[:-1]))
# If not real summoner remove it from dict
else:
summonerDict.pop(str(s[:-1]))
# If no data is returned, do nothing
else:
pass
# If no api call remain, wait 5 seconds
else:
print "API: Waiting 10 seconds for more API calls..."
time.sleep(10)
print "OUT: " + str(numSummonersWritten) + " summoner IDs retrieved."
# Given a summoner string name, get the summoner id
def Get_Summoner_Ids(summoner):
# Get the summoner object
try:
summonerObj = api.get_summoner(name=summoner, _id=None, region=None)
except LoLException:
print "Summoner " + summoner + " not found."
return False
# Define search terms
start = "u'id': "
end = ", u'name':"
# Search json for summoner id
summoner_id = re.search("%s(.*)%s" % (start, end), str(summonerObj)).group(1)
# Append summoner string and id to a dict
summonerDict.update({str(summoner): str(summoner_id)})
return True
# Check if the summoner name is good
# This is to avoid summoners like: IS148be5be2f2d180191298
def Check_Summoner(summoner):
if summoner[0:3].__contains__("IS1"):
print("Summoner " + str(summoner) + ":" + str(summonerDict[str(summoner)]) + " has been culled (fake)")
return False
else:
print "Summoner " + str(summoner) + ":" + str(summonerDict[str(summoner)]) + " Exists!"
return True
# Check if summoner is level 30
def Check_Level(summoner):
start = "u'summonerLevel': "
end = ", u'revisionDate"
if(api.can_make_request()):
check_level = api.get_summoner(name=None, _id=summonerDict[str(summoner)], region=None)
result = re.search("%s(.*)%s" % (start, end), str(check_level)).group(1)
if (str(result) == "30"):
return True
else:
return False
else:
print "API: Wating 10 seconds for more API calls..."
time.sleep(10)
Check_Level(summoner) # Recursive call to try again
# Checks if the given summoner is in Gold or Platinum
def Check_Rank(summoner):
try:
print "Checking: " + str(summonerDict[str(summoner)])
check_rank = api.get_league_entry(str(summonerDict[str(summoner)]), team_ids=None, region=None)
except LoLException:
print "Summoner ID " + str(summonerDict[str(summoner)]) + " is not in any leagues."
return False
# Only check the first 40 characters (in case GOLD is in username/elsewhere)
if str(check_rank)[0:40].__contains__("GOLD") | \
str(check_rank)[0:40].__contains__("PLATINUM"):
print "RANK: " + str(check_rank)
return True
else:
print("Summoner ID " + str(summonerDict[str(summoner)]) + " is not in GOLD or PLATINUM")
return False
# Write the dict of summoners to a text document
def Write_Summoner(summoner):
f = open(outputLocation, 'a') # Open file in append mode instead of write!
f.write(str(summoner)+":"+str(summonerDict[str(summoner)])+"\n")
global numSummonersWritten
numSummonersWritten += 1
f.close()
if __name__ == "__main__":
main() | {
"repo_name": "Murkantilism/LoL_API_Research",
"path": "Summoner_Data_Retrieval/Get_Summoner_Ids.py",
"copies": "1",
"size": "5219",
"license": "mit",
"hash": 6514884125351027000,
"line_mean": 33.5695364238,
"line_max": 111,
"alpha_frac": 0.5767388389,
"autogenerated": false,
"ratio": 3.535907859078591,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9592759518238112,
"avg_score": 0.003977435948095736,
"num_lines": 151
} |
__author__ = 'Deniz'
from RiotWatcher.riotwatcher import RiotWatcher
from RiotWatcher.riotwatcher import LoLException
import re
# Setup RiotWatcher object with api key
f = open('apikey.txt', 'r')
api = RiotWatcher(f.read())
list_of_champion_ids = []
def main():
# Check if we have API calls remaining
if(api.can_make_request()):
#getListOfIDs()
#sortIDs()
sortIDs()
# Gets a list of Champion ID's from riot's API (unsorted strings)
def getListOfIDs():
champions = api.get_all_champions(region=None, free_to_play=False)
# Above string has lots of info we don't care about
champions = str(champions).split(", {")
# Look for only ID's
start = "id': "
end = "}"
f = open('loChampionIDs', 'a')
# For every element in champions, get the string in between start and end
for champ in champions:
result = re.search("%s(.*)%s" % (start, end), str(champ)).group(1)
print(result)
f.write(str(result) +'\n')
f.close()
# Look through the list of unsorted ID strings, sort them as ints
def sortIDs():
f = open('loChampionIDs', 'r')
champs = f.read()
champs = champs.split('\n')
champs = [int(x) for x in champs]
champs.sort()
f = open('loChampionIDs_sorted', 'a')
f.write(str(champs))
print champs
pairChampions(champs)
# Look through the list of sorted ID strings, get the static champion info
# based on the ID, and create a text file of pairs: ID | Champion Name
def pairChampions(champs):
f = open('loChampionPairs', 'a')
for champ in champs:
champ_info = api.static_get_champion(champ)
# Lots of info we don't care about, get just the key (champion name)
start = "'key': u'"
end = "',"
result = re.search("%s(.*)%s" % (start, end), str(champ_info)).group(1)
print champ_info
f.write(str(champ) + " | " + str(result) + "\n")
f.close()
if __name__ == "__main__":
main() | {
"repo_name": "Murkantilism/LoL_API_Research",
"path": "Summoner_Data_Retrieval/Get_Champions.py",
"copies": "1",
"size": "1983",
"license": "mit",
"hash": 4040895551298573300,
"line_mean": 26.1780821918,
"line_max": 79,
"alpha_frac": 0.6147251639,
"autogenerated": false,
"ratio": 2.990950226244344,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4105675390144344,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Deniz'
import argparse, csv, os, re
from collections import OrderedDict
summoner_match_history_arryOfDicts = []
def main():
global summoner_match_history
parser = argparse.ArgumentParser(description='Parse input directory and write summoner data to CSV file.')
parser.add_argument('-in', metavar='i', type=str)
args = parser.parse_args()
inputLocation = vars(args).values()[0]
# Loop through every file in the given input dir
for root, __, files in os.walk(inputLocation):
for f in files:
fullpath = os.path.join(root, f)
parse_input(fullpath, f)
get_mmr(f)
write_csv(f)
# Given an input filepath, read and parse the summoner data into a dict
def parse_input(filepath, filename):
# Read input file
f = open(filepath, 'r')
read_file = f.read()
f.close()
# Split the file into each match
split_file = read_file.split("u'matchId': ")
# Define search terms
start = ", u'"
end = ", u'"
# Save the match history info like matchId, mapId,
match_history_header = split_file[0]
# Wipe the array of dicts
summoner_match_history_arryOfDicts[:] = []
# For every match find the field and it's value, add them to dict
for match in split_file:
# Split based on commas
split_match = match.split(',')
# Pop off the first element RANKED_TEAM_5x5 we don't care about
split_match.pop(0)
# Declare temp dict
tmp_dict = OrderedDict()
for field in split_match:
# Strip out all unneeded characters
field = field.replace('participants:', '')
field = field.replace('stats:', '')
field = field.replace('{', '')
field = field.replace('}', '')
field = field.replace('[', '')
field = field.replace(']', '')
field = field.replace(" u'", '')
field = field.replace("u'", '')
field = field.replace("'", '')
# If the field is empty (due to above characters being stripped) skip it
if ' ' == field:
pass
# Otherwise, split this field based on : to get key and value for dict
else:
split_field = field.split(":")
# Remove any spaces from the value
split_field[1] = split_field[1].replace(' ', '')
# Populate the tmp dict with fields
tmp_dict[split_field[0]] = split_field[1]
if len(tmp_dict.keys()) > 0:
# Get the MMR and AVG mmr keys and values
mmr_k, mmr_v = get_mmr(filename=filename)
avg_mmr_k, avg_mmr_v = get_avg_mmr(filename=filename)
# Append them at the end of the tmp dict
tmp_dict[mmr_k] = mmr_v
tmp_dict[avg_mmr_k] = avg_mmr_v
# Append tmp dict to the array of dicts
summoner_match_history_arryOfDicts.append(tmp_dict)
def get_mmr(filename):
# Lop off everything before the last 23 characters (mmr info) and
# then lop off last 4 characters (.txt)
mmr_info = filename[-23:-4]
# Split mmr info by underscore
mmr_info = mmr_info.split('_')
# MMR is element 0 of split array
mmr = mmr_info[0]
# Strip comma out of value
mmr = mmr.replace(',', '')
# MMR label is first 3 characters of mmr element, actual MMR value is everything but first 4 characters (mmr=)
return mmr[:3], mmr[4:]
def get_avg_mmr(filename):
# Lop off everything before the last 23 characters (mmr info) and
# then lop off last 4 characters (.txt)
mmr_info = filename[-23:-4]
# Split mmr info by underscore
mmr_info = mmr_info.split('_')
# Avg MMR is element 1 of split array
avg_mmr = mmr_info[1]
# Strip comma out of value
avg_mmr = avg_mmr.replace(',', '')
# Avg label is first 3 characters of avg mmr element, actual Avg value is everything but first 4 characters (avg=)
return avg_mmr[:3], avg_mmr[4:]
def write_csv(filename):
#for dictionary in summoner_match_history_arryOfDicts:
# for k, v in dictionary.iteritems():
# print str(k) + " : " + str(v)
# Pop the first empty dict from array of dicts
summoner_match_history_arryOfDicts.pop(0)
# Open CSV file at output folder + filename minus _mmr=x,xxx_avg=x,xxx.txt + .csv
with open('.\_outMatchHistoryCSV_0to15\\'+filename[:-4]+'.csv', 'a') as csvfile:
# Assign fieldnames (headers)
fieldnames = summoner_match_history_arryOfDicts[0].keys()
print len(fieldnames)
print len(summoner_match_history_arryOfDicts)
# Create CSV writer object
writer = csv.DictWriter(csvfile, quoting=csv.QUOTE_NONE, fieldnames=fieldnames, extrasaction='ignore')
# Write headers
writer.writeheader()
# Every every dict in the array of dicts, write it as a row
for dictionary in summoner_match_history_arryOfDicts:
writer.writerow(dictionary)
if __name__ == "__main__":
main() | {
"repo_name": "Murkantilism/LoL_API_Research",
"path": "Summoner_Data_Retrieval/CSV_Data_Formatter.py",
"copies": "1",
"size": "5088",
"license": "mit",
"hash": -1056203335726594200,
"line_mean": 32.701986755,
"line_max": 118,
"alpha_frac": 0.5998427673,
"autogenerated": false,
"ratio": 3.670995670995671,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4770838438295671,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Deniz'
import argparse
# Declare an empty list of summoners
lo_summoners = []
def main():
# Command line parsing
global outputLocation
parser = argparse.ArgumentParser(description='Attempt to generate X number'
' of random summoners.')
parser.add_argument('-out', metavar='o', type=str)
args = parser.parse_args()
print vars(args).values()
outputLocation = vars(args).values()[0]
global lo_summoners
f = open(outputLocation+'.txt', 'r')
champions = f.read()
champions = champions.splitlines()
scrubCnt = 0
cnt = 0
# For every line in the file, if the most used
# champ is "GAME DATA NOT FOUND", erase it
for line in champions:
try:
if str(line.split("'mostUsedChampion': ", 1)[1]).__contains__("GAME DATA NOT FOUND"):
champions[cnt] = '' # Erase that summoner line
scrubCnt += 1
else:
champions[cnt] += "\n"
except IndexError:
"Most used champion not found"
champions[cnt] = '' # Erase that summoner line
scrubCnt += 1
cnt += 1
# Close file and reopen in write mode (to overwrite it)
#f.close()
f = open(outputLocation+'.txt', 'w')
# Write the new list sans duplicates
for summoner in champions:
f.write(summoner)
print str(scrubCnt) + ' SUMMONERS WITH NO DATA SCRUBBED'
f.close()
if __name__ == "__main__":
main() | {
"repo_name": "Murkantilism/LoL_API_Research",
"path": "Summoner_Data_Retrieval/Scrub_Useless_Summoners.py",
"copies": "1",
"size": "1533",
"license": "mit",
"hash": 851370026436835300,
"line_mean": 24.1475409836,
"line_max": 97,
"alpha_frac": 0.5688193085,
"autogenerated": false,
"ratio": 3.693975903614458,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9737065065775412,
"avg_score": 0.005146029267809127,
"num_lines": 61
} |
__author__ = 'Deniz'
import argparse, os, os.path, sys
input_dir0_filenames = []
input_dir1_filenames = []
unlike_filenames = []
def main():
global input_dir0_filenames
global input_dir1_filenames
global unlike_filenames
parser = argparse.ArgumentParser(description="Given dir0 and dir1 locations, search for files in dir0 that "
"don't have the same corresponding filename in dir1")
parser.add_argument('-dir0', metavar='d0', type=str)
parser.add_argument('-dir1', metavar='d1', type=str)
args = parser.parse_args()
dir0 = vars(args).values()[1]
dir1 = vars(args).values()[0]
read_filenames(dir0, dir1)
find_unlike_filenames(dir0, dir1)
# Read the filename of every file in each input directory, store in array
def read_filenames(dir0, dir1):
for root, __, files in os.walk(dir0):
for f in files:
fullpath = os.path.join(root, f)
#print fullpath
input_dir0_filenames.append(fullpath)
for root, __, files in os.walk(dir1):
for f in files:
fullpath = os.path.join(root, f)
#print fullpath
input_dir1_filenames.append(fullpath)
# Search for files in dir0 that don't have the same corresponding filename in dir1
def find_unlike_filenames(dir0, dir1):
fileFound = False # Declare boolean flag
cnt = 0 # Declare counter
# First loop through every filename in dir0 and compare against every filename in dir1
for filename in input_dir0_filenames:
fileFound = False
cnt = 0
#for filename1 in input_dir1_filenames:
print "HERE:" + str(filename)
print "HERE:" + str(filename[23:-24]+'.txt')
sys.exit()
while fileFound == False:
try:
#print str(filename)[25:] + " == " + str(input_dir1_filenames[cnt])[26:]
#print input_dir1_filenames[cnt] == input_dir1_filenames[-1]
# If filenames are the same, we've found are match and can
# stop inner loop temporarily
if str(filename)[23:-24]+'.txt' == str(input_dir1_filenames[cnt])[26:]:
fileFound = True
# Otherwise check if this is the last file of dir1. If it is
# this must be an unlike file.
elif(input_dir1_filenames[cnt] == input_dir1_filenames[-1]):
print "UNLIKE FILE FOUND: ./dir0/" + filename[23:-24]+'.txt'
#unlike_filenames.append(filename[25:])
os.remove(dir0+'/'+(filename[23:]))
# Otherwise pass and continue inner loop
else:
pass
cnt += 1
except IndexError:
fileFound = True
# Next loop through every filename in dir1 and compare against every filename in dir0
for filename in input_dir1_filenames:
fileFound = False
cnt = 0
while fileFound == False:
try:
#print str(filename)[26:] + " == " + str(input_dir0_filenames[cnt])[25:]
#print input_dir0_filenames[cnt] == input_dir0_filenames[-1]
# If filenames are the same, we've found are match and can
# stop inner loop temporarily
if str(filename)[24:] == str(input_dir0_filenames[cnt])[23:-24]+'.txt':
fileFound = True
# Otherwise check if this is the last file of dir1. If it is
# this must be an unlike file.
elif(input_dir0_filenames[cnt] == input_dir0_filenames[-1]):
print "UNLIKE FILE FOUND: ./dir1/" + filename[24:]
#unlike_filenames.append(filename[24:])
os.remove(dir1+'/'+(filename[24:]))
# Otherwise pass and continue inner loop
else:
pass
cnt += 1
except IndexError:
fileFound = True
# Given an array of unlike filenames, delete those files
def delete_unlike_files():
return
if __name__ == "__main__":
main() | {
"repo_name": "Murkantilism/LoL_API_Research",
"path": "Summoner_Data_Retrieval/Find_Like_Filenames.py",
"copies": "1",
"size": "4168",
"license": "mit",
"hash": 4475653152394823700,
"line_mean": 37.9626168224,
"line_max": 112,
"alpha_frac": 0.5616602687,
"autogenerated": false,
"ratio": 4.098328416912488,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006418938069692428,
"num_lines": 107
} |
__author__ = 'Deniz'
import os.path
import shutil
def main():
source0 = os.curdir + "\_outControl_0to15\\"
source1 = os.curdir + "\_outControl_16to30\\"
dest = os.curdir + "\_outControl\\"
sourcefiles = {os.path.splitext(x)[0] for x in os.listdir(source0) if os.path.splitext(x)[1] == '.txt'}
source1files = {os.path.splitext(x)[0] for x in os.listdir(source1) if os.path.splitext(x)[1] == '.txt'}
for missing in sourcefiles - source1files: # calculate the difference
source0file = os.path.join(source0, missing + '.txt')
source1file = os.path.join(source1, missing + '.txt')
shutil.copy(source0file, dest)
#deleteMissingFiles(source0, dest)
#deleteMissingFiles(source1, dest)
def deleteMissingFiles(source, dest):
filesToDelete = {os.path.splitext(x)[0] for x in os.listdir(dest) if os.path.splitext(x)[1] == '.txt'}
for file in filesToDelete:
#print file
if os.path.exists(os.curdir+"\_outControl_0to15\\"+file+".txt"):
os.remove(os.curdir+"\_outControl_0to15\\"+file+".txt")
if os.path.exists(os.curdir+"\_outControl_16to30\\"+file+".txt"):
os.remove(os.curdir+"\_outControl_16to30\\"+file+".txt")
if __name__ == "__main__":
main() | {
"repo_name": "Murkantilism/LoL_API_Research",
"path": "Summoner_Data_Retrieval/Duplicate_File_Finder.py",
"copies": "1",
"size": "1259",
"license": "mit",
"hash": -8627436113358236000,
"line_mean": 38.375,
"line_max": 108,
"alpha_frac": 0.6298649722,
"autogenerated": false,
"ratio": 3.100985221674877,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9153105425187935,
"avg_score": 0.015548953737388424,
"num_lines": 32
} |
__author__ = 'Deniz'
import re, argparse
# Declare an empty list of summoners
lo_summoners = []
lo_ids = []
no_dups_lo_summoners = []
def main():
# Command line parsing
global inputLocation
parser = argparse.ArgumentParser(description='Attempt to generate X number'
' of random summoners.')
parser.add_argument('-out', metavar='o', type=str)
args = parser.parse_args()
print vars(args).values()
inputLocation = vars(args).values()[0]
global lo_summoners
global lo_ids
global no_dups_lo_summoners
f = open(inputLocation, 'r')
read_lines = f.readlines()
duplicate_cnt = 0
start = "'id': "
end = ", 'name': '"
# For every line in the file, get the summoner ID
for line in read_lines:
result = re.search("%s(.*)%s" % (start, end), str(line)).group(1)
lo_ids.append(result)
duplicate_ids = []
seen_ids = []
# Get all the duplicate IDs
for id in lo_ids:
if id not in seen_ids:
seen_ids.append(id)
else:
duplicate_ids.append(id)
duplicate_summoners = []
# For every line
for line in read_lines:
# Get this line's ID
result = re.search("%s(.*)%s" % (start, end), str(line)).group(1)
# Check if the ID is a duplicate, if so add it to list of dups
if result in duplicate_ids:
duplicate_summoners.append(line)
duplicate_cnt += 1
# Otherwise add it to non-duplicate list
else:
no_dups_lo_summoners.append(line)
# Write the duplicates to a separate text file
g = open(inputLocation+"_DUPLICATES.txt", 'a')
for summoner in duplicate_summoners:
g.write(summoner)
# Write the non-duplicates to main file
f = open(inputLocation, 'a')
f.write("===============================================================\n")
for summoner in no_dups_lo_summoners:
f.write(summoner)
print str(duplicate_cnt) + ' DUPLICATE SUMMONERS DELETED'
f.close()
if __name__ == "__main__":
main() | {
"repo_name": "Murkantilism/LoL_API_Research",
"path": "Summoner_Data_Retrieval/DEPRECATED/Check_Duplicate_Summoners.py",
"copies": "1",
"size": "2112",
"license": "mit",
"hash": -4079504935165581000,
"line_mean": 25.4125,
"line_max": 80,
"alpha_frac": 0.5662878788,
"autogenerated": false,
"ratio": 3.597955706984668,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46642435857846676,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Deniz'
import re, argparse
def main():
parser = argparse.ArgumentParser(description='Attempt to generate X number'
' of random summoners.')
parser.add_argument('-in', metavar='i', type=str)
args = parser.parse_args()
inputLocation = vars(args).values()[0]
calcWinLoss(inputLocation)
# Given a txt file with 15 games worth of match history, calc win loss percentage
def calcWinLoss(inputLocation):
# Read txt file
f = open(inputLocation, 'r')
lines = f.readline()
# Search for true/false value after u'winner': field
start = "u'winner': "
end = ", u'"
numWins = 0
numLosses = 0
for line in lines:
winner = re.search("%s(.*)%s" % (start, end), str(line)).group(1)
if winner == "True":
numWins += 1
elif winner == "False":
numLosses += 1
winPercentage = (numWins / 15) * 100
return winPercentage
# Given the ELO rating of two players, calculate the expected outcome using:
# 1 / 1 + 10^((rb - ra) / 400)
def calcExpectedOutcome(ra, rb):
expectedOutcome = 1 / (1 + 10**((rb - ra)/400))
return expectedOutcome
if __name__ == "__main__":
main() | {
"repo_name": "Murkantilism/LoL_API_Research",
"path": "WinLossPredictionModel/CalcWinLossELO.py",
"copies": "1",
"size": "1240",
"license": "mit",
"hash": 8893454287152562000,
"line_mean": 24.3265306122,
"line_max": 81,
"alpha_frac": 0.5862903226,
"autogenerated": false,
"ratio": 3.5632183908045976,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46495087134045976,
"avg_score": null,
"num_lines": null
} |
__author__='Dennis Hafemann, https://github.com/dennishafemann/python-TerminalColors'
# Severals
ESCAPE_SEQUENCE="\033[%sm"
# Styles
RESET = 0
BOLD = 1
UNDERLINE = 4
BLINK = 5
REVERSE_VIDEO = 7
# Colors
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
WHITE = 37
def _createColoredString(*args):
"""
Internal method, which generates, returns a color-string.
"""
value=""
# Walk through parameters/arguments
for arg in args[0]:
if type(arg)==int:
value+=ESCAPE_SEQUENCE % str(arg)
else:
value+=arg
# If args available, append reset
if value:
value+=ESCAPE_SEQUENCE % str(RESET)
return value
def cprint(*args):
"""
Directly prints out the generated color-string.
"""
print(_createColoredString(args))
def rcprint(*args):
"""
Return a generated color-string.
"""
return _createColoredString(args)
def error(message):
"""
For printing/outputting simple, small error messages.
"""
cprint(RED, message)
if __name__=='__main__':
# Example
# Prints directly
cprint('This text is', RED, ' red', RESET, ' and ', RED, BOLD, 'bold', RESET, '.')
# Prints via a variable
colorString=rcprint('This text is', RED, ' red', RESET, ' and ', RED, BOLD, 'bold', RESET, ', returned.')
print(colorString)
# Error
error('This is a simple, small error message.') | {
"repo_name": "danoan/image-processing",
"path": "ext/TerminalColors/__init__.py",
"copies": "1",
"size": "1442",
"license": "mit",
"hash": 388734929736581600,
"line_mean": 19.6142857143,
"line_max": 109,
"alpha_frac": 0.6151178918,
"autogenerated": false,
"ratio": 3.4170616113744074,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4532179503174407,
"avg_score": null,
"num_lines": null
} |
"A metamorphosis client for python"
import socket
import struct
import sys
import time
import threading
from zkclient import ZKClient, zookeeper, watchmethod
from urlparse import urlparse
from threading import Timer
_DEAD_RETRY = 5 # number of seconds before retrying a dead server.
_SOCKET_TIMEOUT = 10 # number of seconds before sockets timeout.
_IDLE_TIMEOUT = 5 # number of seconds to consider connection is idle.
class _Error(Exception):
pass
class _ConnectionDeadError(Exception):
pass
class Partition:
"A broker partition"
def __init__(self, broker_id, partition):
self.broker_id = broker_id
self.partition = partition
def __repr__(self):
return self.__str__()
def __str__(self):
return "%s-%s" % (self.broker_id, self.partition)
def partition_comp(x,y):
if x.broker_id <> y.broker_id:
return cmp(x.broker_id, y.broker_id)
else:
return cmp(x.partition, y.partition)
def get_round_robin_selector():
"""
Returns a round-robin partition selector"
"""
ji = JavaInt()
def round_robin_select(topic, partitions, msg):
if partitions is None or len(partitions) == 0 :
raise _Error("There is no available parition for topic %s right now" % (topic))
return partitions[ji.increase_and_get() % len(partitions)]
return round_robin_select
class Broker:
"A metaq broker"
def __init__(self, broker_id, broker_uri):
self.broker_id = broker_id
self.broker_uri = broker_uri
def __str__(self):
return self.broker_uri
def __repr__(self):
return self.__str__()
class Message:
"A metaq message"
def __init__(self, topic, data, attribute=None):
"""
Create a new message with topic and data,attribute is not supported right now.
@param topic: the topic of the message
@param data: the payload of the message
"""
self.id = None
self.topic = topic
self.data = data
self.attribute = attribute
self.flag = 0
self.partition = -1
def _encode_payload(self):
if self.attribute is None:
return self.data
else:
attr_len = struct.pack(">i",len(self.attribute))
self.flag= self.flag & 0xFFFFFFFE | 1
return "%s%s%s" % (attr_len, self.attribute, self.data)
def encode(self, partition, opaque):
"""
Encode message to put command
@param partition: the partition of the message will be sent
@param opaque: request's opaque
"""
payload = self._encode_payload()
if payload is None:
payload = ""
vlen = len(payload)
return "put %s %d %d %d %d\r\n%s" % (self.topic, partition, vlen, self.flag,opaque, payload)
class JavaInt:
"Java integer"
_MIN = -2147483648
_MAX = 2147483647
def __init__(self):
self.value = JavaInt._MIN
def increase_and_get(self):
self.value += 1
if self.value >= JavaInt._MAX:
self.value = JavaInt._MIN
return self.value
class HttpStatus:
BadRequest = 400
NotFound = 404
Forbidden = 403
Unauthorized = 401
InternalServerError = 500
ServiceUnavilable = 503
GatewayTimeout = 504
Success = 200
Moved = 301
class Conn:
def __init__(self, uri, dead_retry=_DEAD_RETRY, socket_timeout=_SOCKET_TIMEOUT, idle_timeout=_IDLE_TIMEOUT, debug=True):
self.uri = uri
self.debug = debug
self.socket = None
self.fd = None
self.dead_retry = dead_retry
self.socket_timeout = socket_timeout
self.deaduntil = 0
self.idle_timeout = idle_timeout
self.io_timestamp = 0
self.connect()
def _update_io_timestamp(self):
self.io_timestamp = time.time()
def _config_socket(self, s):
s.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY,1)
if hasattr(s, 'settimeout'):
s.settimeout(self.socket_timeout)
def send_msg(self,msg):
if not self._get_socket():
raise _ConnectionDeadError("Connection was broken:%s"% (self.uri))
self._update_io_timestamp()
self.socket.sendall(msg)
def recv(self, rlen):
if not self._get_socket():
raise _ConnectionDeadError("Connection was broken:%s"% (self.uri))
self._update_io_timestamp()
return self.fd.read(rlen)
def readline(self):
if not self._get_socket():
raise _ConnectionDeadError("Connection was broken:%s"% (self.uri))
line = self.fd.readline()
self._update_io_timestamp()
if line == '':
raise _ConnectionDeadError("Connection was broken:%s"% (self.uri))
return line
def _check_dead(self):
if self.deaduntil and self.deaduntil > time.time():
return True
self.deaduntil = 0
return False
def _check_idle(self):
return self.deaduntil != 0 or time.time() - self.io_timestamp > self.idle_timeout
def connect(self):
if self._get_socket():
return True
return False
def debuglog(self, str):
if self.debug:
sys.stderr.write("MessageProducer: %s\n" % str)
def mark_dead(self, reason):
self.debuglog("%s: %s. marking dead." % (self.uri, reason))
self.deaduntil = time.time() + self.dead_retry
self.close()
def _get_socket(self):
if self._check_dead():
return None
if self.socket:
return self.socket
s = socket.socket()
self._config_socket(s)
parse_rt=urlparse(self.uri)
try:
s.connect((parse_rt.hostname, parse_rt.port))
except socket.timeout, msg:
self.mark_dead("connect: %s" % msg)
return None
except socket.error, msg:
if isinstance(msg, tuple): msg = msg[1]
self.mark_dead("connect: %s" % msg[1])
return None
self.debuglog("Connect to %s successfully." % self.uri)
self._update_io_timestamp()
self.socket = s
self.fd = s.makefile()
return self.socket
def close(self):
if self.socket:
self.socket.close()
self.fd.close()
self.socket = None
self.fd = None
class SendResult:
"A send message result"
def __init__(self, success, partition, offset, error=None):
self.success = success
self.partition = partition
self.offset = offset
self.error = error
def __repr__(self):
return self.__str__()
def __str__(self):
return "SendResult[success=%s, partition=%s, offset=%s, error=%s]" % (self.success, self.partition, self.offset, self.error)
class MessageProducer:
def __init__(self, topic, zk_servers="localhost:2181", partition_selector=get_round_robin_selector(), zk_timeout=10000,zk_root="/meta",
dead_retry=_DEAD_RETRY, socket_timeout=_SOCKET_TIMEOUT, idle_timeout=_IDLE_TIMEOUT, debug=True):
"""
Create a new message producer to send messages to metamorphosis broker
@param topic: the topic to be sent by this producer
@param zk_servers: the zookeeper server list,in the form of 'server1:port,server2:port...'
@param partition_selector: the function to determin which partion the message will be sent
@param zk_timeout: zookeeper timeout in mills,default is 10000
@param zk_root: the metamorphosis broker root path in zookeeper,default is '/meta'
@param dead_retry:number of seconds before retrying a blacklisted server. Default to 30 s.
@param socket_timeout: timeout in seconds for all calls to a server. Defaults to 3 seconds.
@param idle_timeout: timeout in seconds for marking connection is idle to send heartbeats,default is 5 seconds.
@param debug: whether to debug producer,default is True.
"""
self.partition_selector = partition_selector
self.topic = topic
self.debug = debug
self.dead_retry = dead_retry
self.socket_timeout = socket_timeout
self.zk_root = zk_root
self.zk_servers = zk_servers
self.zk_timeout = zk_timeout
self.idle_timeout = idle_timeout
self._broker_topic_path = "%s/brokers/topics" % (self.zk_root)
self._broker_ids_of_path = "%s/brokers/ids" % (self.zk_root)
self.zk = ZKClient(zk_servers, timeout=zk_timeout)
self._opaque = JavaInt()
self._broker_dict = {}
self._conn_dict = {}
self._partition_list = []
self._lock = threading.Lock()
if self.topic is None:
raise _Error("Topic is none")
if self.zk_root is None:
raise _Error("Zookeeper root path is none")
if self.zk_servers is None:
raise _Error("Zookeeper servers is none")
if self.dead_retry is None or self.dead_retry < 0:
raise _Error("Invalid dead retry times %s" % self.dead_retry)
self._start_check_idle_timer()
self._update_broker_infos()
def _safe_zk_close(self):
try:
if self.zk is not None:
self.zk.close()
except:
pass
def _safe_zk_get(self, path, count=0):
"Get path's data from zookeeper in safe mode"
try:
return self.zk.get(path)
except Exception, e:
if count > 3:
raise
else:
self._safe_zk_close()
self.zk = ZKClient(self.zk_servers, timeout=self.zk_timeout)
return self._safe_zk_get(path, count+1)
def _safe_zk_get_children(self, path, watcher, count=0):
"Get path's children from zookeeper in safe mode"
try:
return self.zk.get_children(path, watcher)
except Exception, e:
if count > 3:
raise
else:
self._safe_zk_close()
self.zk = ZKClient(self.zk_servers, timeout=self.zk_timeout)
return self._safe_zk_get_children(path, watcher, count+1)
def _debug(self,msg):
if self.debug:
sys.stderr.write("[meta-producer-debug]:%s\n" % (msg))
def _update_broker_infos(self):
""" Update broker infos from zookeeper"""
self._lock.acquire()
try:
self._debug("begin to update broker infos from zookeeper with topic %s" % (self.topic))
@watchmethod
def watcher(event):
self._update_broker_infos()
topic_path = "%s/%s"%(self._broker_topic_path, self.topic)
children = self._safe_zk_get_children(topic_path, watcher)
broker_dict = {}
partition_list = []
if children is not None:
for child in children:
if child is not None and child.endswith("-m"):
broker_id = int(child[0:child.index("-m")])
broker = self._get_broker_by_id(broker_id)
partition_list.extend(self._get_parts(child, broker_id))
broker_dict[broker_id] = broker
partition_list.sort(cmp=Partition.partition_comp)
self._update_conn_dict(broker_dict)
self._broker_dict = broker_dict
self._partition_list = partition_list
self._debug("New broker dict for topic %s:%s" % (self.topic, str(self._broker_dict)))
self._debug("New partition list for topic %s:%s" % (self.topic, str(self._partition_list)))
self._debug("End to update broker infos from zookeeper with topic %s" % (self.topic))
finally:
self._lock.release()
def _update_conn_dict(self,new_broker_dict):
for broker_id in self._broker_dict.keys():
#broker is both in old dict and new dict
if new_broker_dict.get(broker_id) <> None:
#if broker uri changed
if new_broker_dict.get(broker_id).broker_uri <> self._broker_dict.get(broker_id).broker_uri:
conn = self._conn_dict.get(broker_id)
#close old connection
if conn is not None:
del self._conn_dict[broker_id]
self._debug("Closing %s" % (conn.uri))
conn.close()
new_uri = new_broker_dict.get(broker_id).broker_uri
#connect to new broker
self._debug("connecting to %s" % (new_uri))
self._conn_dict[broker_id] = Conn(new_uri, self.dead_retry, self.socket_timeout, self.debug)
else:
#Broker is not in new dict,close it.
conn = self._conn_dict.get(broker_id)
if conn is not None:
del self._conn_dict[broker_id]
self._debug("Closing %s" % (conn.uri))
conn.close()
for broker_id in new_broker_dict.keys():
#A new broker,we must connect it.
if self._broker_dict.get(broker_id) is None:
new_uri = new_broker_dict.get(broker_id).broker_uri
self._debug("connecting to %s" % (new_uri))
self._conn_dict[broker_id] = Conn(new_uri, self.dead_retry, self.socket_timeout, self.debug)
def _get_parts(self,child,broker_id):
n_parts = int(self._safe_zk_get("%s/%s/%s" % (self._broker_topic_path,self.topic,child))[0])
rt = []
for n in range(0, n_parts):
rt.append(Partition(broker_id, n))
return rt
def _get_broker_by_id(self,broker_id):
broker_uri,_ = self._safe_zk_get("%s/%s/master" % (self._broker_ids_of_path, broker_id))
return Broker(broker_id, broker_uri)
def send(self,msg):
"""
Send message to broker
@param msg: message to be sent,it's topic must be equals to producer's topic,and it's data must not be none
"""
if msg is None:
raise _Error("Message is none")
topic = msg.topic
if topic <> self.topic:
raise _Error("Expect topic %s,but was %s" % (self.topic, topic))
data = msg.data
if data == None:
raise _Error("message data is none")
partition = self.partition_selector(topic, self._partition_list, msg)
if partition is None:
raise _Error("There is no avaiable partition for topic %s" % (topic))
conn = self._conn_dict.get(partition.broker_id)
if conn is None:
raise _Error("There is no avaiable server right now for topic % and partition %d" % (topic, partition.partition))
opaque = self._opaque.increase_and_get()
cmd = msg.encode(partition.partition,opaque)
def _unsafe_send(cmd, message):
try:
conn.send_msg(cmd)
head = conn.readline()
_, status, bodylen, resp_opaque = head.split(" ")
status = int(status)
bodylen = int(bodylen)
resp_opaque = int(resp_opaque)
body = conn.recv(bodylen)
if len(body) <> bodylen:
conn.mark_dead("Response format error,expect body length is %s,but is %s" % (bodylen,len(body)))
return SendResult(False, None, -1, error="network error")
if resp_opaque <> opaque:
conn.mark_dead("Response opaque is not equals to request opaque")
return SendResult(False, None, -1, error="network error")
if status == HttpStatus.Success:
msg_id, _, offset = body.split(" ")
message.id = long(msg_id)
message.partition = partition
return SendResult(True, partition, long(offset))
else:
return SendResult(False, None, -1, error=body)
except (_Error, socket.error), msg:
if isinstance(msg, tuple): msg = msg[1]
conn.mark_dead(msg)
return SendResult(False, None, -1, error=msg)
try:
return _unsafe_send(cmd, msg)
except _ConnectionDeadError:
# retry once
try:
if conn.connect():
return _unsafe_send(cmd, msg)
return SendResult(False, None, -1, error="Connection was broken")
except (_ConnectionDeadError, socket.error), msg:
conn.mark_dead(msg)
return SendResult(False, None, -1, error=msg)
def _send_heartbeats(self):
self._lock.acquire()
try:
for conn in self._conn_dict.values():
if conn._check_idle():
try:
opaque = self._opaque.increase_and_get()
conn.send_msg("version %d\r\n" % opaque)
head = conn.readline()
_, status, bodylen, resp_opaque = head.split(" ")
status = int(status)
bodylen = int(bodylen)
resp_opaque = int(resp_opaque)
body = conn.recv(bodylen)
if len(body) <> bodylen:
conn.mark_dead("Response format error,expect body length is %s,but is %s" % (bodylen,len(body)))
if resp_opaque <> opaque:
conn.mark_dead("Response opaque is not equals to request opaque")
if status != HttpStatus.Success:
conn.mark_dead("Heartbeat failure")
except:
conn.mark_dead("Heartbeat failure")
finally:
self._start_check_idle_timer()
self._lock.release()
def _start_check_idle_timer(self):
self.idle_timer = Timer(self.idle_timeout/2, self._send_heartbeats)
self.idle_timer.start()
def close(self):
""" Close message producer"""
if self.zk is not None:
self.zk.close()
self._lock.acquire()
try:
if self.idle_timer:
self.idle_timer.cancel()
for conn in self._conn_dict.values():
conn.close()
self._conn_dict = {}
finally:
self._lock.release()
if __name__ == '__main__':
p = MessageProducer("avos-fetch-tasks",zk_root="/avos-fetch-meta")
message = Message("avos-fetch-tasks","http://www.taobao.com")
print p.send(message)
p.close()
| {
"repo_name": "272029252/Metamorphosis",
"path": "contrib/python/meta-python/metaq/producer.py",
"copies": "13",
"size": "18962",
"license": "apache-2.0",
"hash": -4381477689598403600,
"line_mean": 37.3846153846,
"line_max": 139,
"alpha_frac": 0.5562704356,
"autogenerated": false,
"ratio": 3.988641144299537,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
__author__ = 'dennis.lutter'
from functools import partial
import logging
from cachecontrol import CacheControl
import requests
from models import model_from_item
from models import TYPE_MAP
BASE_URL = "https://api-v2launch.trakt.tv"
logger = logging.getLogger("easytrakt")
class Client(object):
def __init__(self, session=None, verify_ssl=True, url=BASE_URL):
session = session or requests.session()
session.headers = {
"Content-type": "application/json",
"trakt-api-key": getattr(session, "client_id", ""),
"trakt-api-version": 2
}
self.session = CacheControl(session)
self.logger = logger
self.verify_ssl = verify_ssl
self.base_url = url
for type_ in TYPE_MAP:
setattr(self, type_ + "s", partial(self.search, type=type_))
def search(self, query, type=None, year=None):
params = {
"query": query,
"type": type,
"year": year
}
return [model_from_item(self, item)
for item in self.request("search", params)]
def request(self, uri_path, params=None):
uri = "{}/{}".format(self.base_url, uri_path)
self.logger.info("call to: %s with %s", uri, params)
result = self.session.get(
uri,
params=params,
verify=self.verify_ssl
)
result.raise_for_status()
return result.json()
| {
"repo_name": "lad1337/easytrakt",
"path": "easytrakt/__init__.py",
"copies": "1",
"size": "1463",
"license": "unlicense",
"hash": 1210142257291646500,
"line_mean": 27.1346153846,
"line_max": 72,
"alpha_frac": 0.5789473684,
"autogenerated": false,
"ratio": 3.8,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.98789473684,
"avg_score": 0,
"num_lines": 52
} |
__author__ = 'dennis.lutter'
import models
from attrdict import AttrDict
from dateutil.parser import parse as date_parser
def attrdict(client, data, parent):
return AttrDict(data)
def images(client, images, parent, expected=()):
if expected and not all(type_ in images for type_ in expected):
raise GeneratorExit(
"not all expected image types found %s", expected)
out = AttrDict()
for type_, sizes in images.items():
for size, url in sizes.items():
if type_ not in out:
out[type_] = AttrDict()
out[type_][size] = url
return out
def date(client, data, parent):
if data:
return date_parser(data)
def watchlist_items(client, items, parent):
client.logger.debug("watchlist_items: %s", items)
watchlist_items = []
for item in items:
model = models.model_from_item(client, item)
model._data["listed_at"] = item["listed_at"]
model.nested["listed_at"] = date
client.logger.debug("watchlist_items model: %s", model)
watchlist_items.append(model)
return watchlist_items
| {
"repo_name": "lad1337/easytrakt",
"path": "easytrakt/generator.py",
"copies": "1",
"size": "1128",
"license": "unlicense",
"hash": -4717426986297931000,
"line_mean": 26.512195122,
"line_max": 67,
"alpha_frac": 0.6312056738,
"autogenerated": false,
"ratio": 3.76,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48912056738,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Dennis'
from copy import deepcopy
import csv
from world import World
from log import Log
class SimulationSetting(object):
def __init__(self):
self.condition = ""
self.initial_triggers = []
self.trigger_additions = {}
self.trigger_removals = {}
self.entities = {}
self.controllers = {}
class SimulationSuite(object):
"""
Make it easier to run several simulations in succession.
Attributes
----------
visualizer : Visualizer
Visualizer that will be used for the simulations.
simulations : [SimulationSettings]
Methods
-------
run_simulations
Runs all configured simulations.
"""
def __init__(self):
self.visualizer = None
self.simulation_length = 10
self.constant_entities = {}
self.controllers = {}
self.initial_triggers = {}
self.conditional_trigger_changes = {}
self.constant_data_collection = {}
self.bins = -1
def set_visualizer(self, visualizer):
self.visualizer = visualizer
def set_simulation_length(self, length):
self.simulation_length = length
def set_data_bins(self, bins):
self.bins = bins
def run_single(self, condition, trigger_condition, controllers):
self.run_simulations(condition, trigger_condition, controllers)
def run_multiple(self, condition, trigger_condition, controllers, n):
for i in range(n):
self.run_simulations(condition, trigger_condition, controllers, number=i)
@staticmethod
def make_average(name, n):
data_meta = {}
c = []
for i in range(n):
f = open(name + "_{0}_bins".format(str(i)) + ".csv", "rt")
try:
# Skip header
header = f.readline()
c = [x.rstrip() for x in header.split(' ')[1:]]
reader = csv.reader(f, delimiter=' ')
current_data = []
for row in reader:
current_data.append([int(x) for x in row[1:]])
data_meta[i] = current_data
finally:
f.close()
# Calculate totals and averages
totals = deepcopy(data_meta[0])
for i in range(1, n):
for j in range(len(totals)):
for k in range(len(c)):
totals[j][k] = totals[j][k] + data_meta[i][j][k]
for i in range(len(totals)):
for k in range(len(c)):
totals[i][k] /= float(n)
Log.write_data(name + "_avg", c, totals)
def run_simulations(self, only_condition=None, only_trigger_condition=None, only_controllers=None, number=None):
for setting in self.create_all_settings():
print "running {0}-{1}".format(setting.condition, setting.trigger_condition)
# Skip if this is not one of the conditions we want to simulate
if only_condition is not None and setting.condition != only_condition:
print "skipping {0}".format(only_condition)
continue
if only_controllers is not None:
is_match = True
for e in only_controllers:
if only_controllers[e] != setting.controllers[e][0]:
is_match = False
if not is_match:
print "skipping {0}, {1}".format(only_condition, only_controllers)
continue
world = World(self.visualizer)
file_name = setting.condition + "-" + setting.trigger_condition
for entity_name in setting.entities:
entity = setting.entities[entity_name]()
if entity_name in setting.controllers:
entity.set_agent(setting.controllers[entity_name][1]())
file_name += "-{0}-{1}".format(entity_name, setting.controllers[entity_name][0])
world.add_entity(entity)
for trigger in setting.initial_triggers:
world.add_trigger(*trigger)
if only_trigger_condition is not None and setting.trigger_condition != only_trigger_condition:
print "skipping {0}".format(only_trigger_condition)
continue
world.run(self.simulation_length, add_triggers=setting.trigger_additions, remove_triggers=setting.trigger_removals)
log = world.log
log.make_data(file_name, self.constant_data_collection, number)
Log.make_bins(file_name, self.constant_data_collection.values(), self.bins, number)
def create_all_settings(self):
settings = []
updated_settings = []
for condition in self.initial_triggers:
setting = SimulationSetting()
setting.condition = condition
setting.initial_triggers = self.initial_triggers[condition]
setting.entities = {}
for entity_name in self.constant_entities:
setting.entities[entity_name] = self.constant_entities[entity_name]
if condition in self.conditional_trigger_changes:
original = deepcopy(setting)
for trigger_condition in self.conditional_trigger_changes[condition]:
setting = deepcopy(original)
setting.trigger_condition = trigger_condition
setting.trigger_additions.update(self.conditional_trigger_changes[condition][trigger_condition][0])
setting.trigger_removals.update(self.conditional_trigger_changes[condition][trigger_condition][1])
settings.append(setting)
else:
settings.append(setting)
for i in range(len(settings)):
setting = settings.pop()
for entity_name in setting.entities:
for controller in self.controllers[entity_name]:
updated = deepcopy(setting)
updated.controllers[entity_name] = (controller, self.controllers[entity_name][controller])
updated_settings.append(updated)
settings = updated_settings
return settings
def add_constant_entities(self, entities):
"""
Parameters
----------
entities : {string: function : Entity}
"""
self.constant_entities.update(entities)
for entity in self.constant_entities:
self.controllers[entity] = {}
def add_controllers(self, entity_name, controllers):
"""
Parameters
----------
entity_name : string
controllers : {string; function : Controller}
"""
self.controllers[entity_name].update(controllers)
def add_constant_data_collection(self, attribute_names, column_labels):
"""
Parameters
----------
entity_name : string
attribute_names : [string]
column_labels : [string]
Should be same size as attribute names list.
"""
self.constant_data_collection.update(dict(zip(attribute_names, column_labels)))
def add_initial_triggers(self, triggers):
self.initial_triggers.update(triggers)
def add_conditional_trigger_changes(self, changes):
"""
Parameters
----------
condition_name : string
changes : {int: ([trigger], [trigger])}
Lists for every condition, for every time what the triggers are that are
added and removed respectively.
"""
self.conditional_trigger_changes.update(changes)
| {
"repo_name": "Luciden/easl",
"path": "easl/simulation_suite.py",
"copies": "1",
"size": "7634",
"license": "mit",
"hash": -570171954082711200,
"line_mean": 33.5429864253,
"line_max": 127,
"alpha_frac": 0.5759758973,
"autogenerated": false,
"ratio": 4.541344437834622,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5617320335134622,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Dennis'
from mechanism import Mechanism
from easl import *
from easl.visualize import *
import random
class NewSimpleVisual(Visual):
@staticmethod
def visualize(self):
trees = {}
for action in self.motor_signals_and_domains:
trees[action] = {}
for value in self.motor_signals_and_domains[action]:
trees[action][value] = 0.0
for combination in self.all_possibilities(self.motor_signals_and_domains):
for k, v in combination.iteritems():
trees[k][v] += self.probabilities.get_value(combination)
group = Group("simple")
for action in trees:
group.add_element(Tree(action, trees[action]))
return group
class OperantConditioningMechanism(Mechanism):
""" Reinforces motor signals that are followed by a 'reward' state.
Attributes
----------
observations : {name: value}
Stores current, new, observations.
rule : LearningRule
action : (name, value)
The action that was performed last.
counts : {name: {value: int}}
Maintains a count for any action/value pair.
rewards : [(name, value)]
List of sensory stimuli that are considered as rewarding.
"""
def __init__(self, rewards):
"""
Parameters
----------
rewards : [(name, value)]
List of sensory stimuli that are considered as rewarding.
"""
super(OperantConditioningMechanism, self).__init__()
self.visual = NewSimpleVisual()
self.observations = {}
self.rewards = rewards
self.action = None
self.probabilities = None
self.motor_signal_valuation = lambda x: 1.0
self.motor_signal_bias = 1.0
self.delta_pos = 0.1
self.delta_neg = 0.05
self.min_probability = 0.01
def init_internal(self, entity):
super(OperantConditioningMechanism, self).init_internal(entity)
# Initialize the probability table
self.probabilities = utils.FullTable(self.motor_signals_and_domains)
# Initialize with uniform distribution
# Count total possibilities
n = len(self.all_possibilities(self.motor_signals_and_domains))
p = 1 / float(n)
self.probabilities.map_function_over_all_values(lambda x: p)
print self.probabilities.table
def set_motor_signal_bias(self, valuation, bias):
self.motor_signal_valuation = valuation
self.motor_signal_bias = bias
def sense(self, observation):
name, value = observation
self.observations[name] = value
def act(self):
# Change the counts according to selected action and whether a
# reward is present
if self.action is not None:
self.__update_probabilities(self.__got_reward())
# Select a new action (max probability)
self.action = self.__select_action()
return [(x, y) for x, y in self.action.iteritems()]
def __select_action(self):
"""
Select the combination of actions with the maximum likelihood of
resulting in a reward.
"""
values = []
total = 0.0
print self.probabilities.table
possibilities = self.all_possibilities(self.motor_signals_and_domains)
for combination in possibilities:
v = self.probabilities.get_value(combination) * self.motor_signal_valuation(combination)
values.append(v)
total += v
r = random.random() * total
cumulative = 0.0
for i in range(len(values)):
cumulative += values[i]
if cumulative >= r:
print "Selected {0}, which had probability {1}".format(possibilities[i], values[i] / float(total))
return possibilities[i]
def __update_probabilities(self, rewarded):
old = self.probabilities.get_value(self.action)
new = 0
# Change probability of one particular
if rewarded:
print "Rewarded"
new = old + self.delta_pos
else:
new = max(old - self.delta_neg, self.min_probability)
self.probabilities.set_value(self.action, new)
# Renormalize
self.__normalize(1.0 + (new - old))
print "Old: {0}, New {1}, Normalized {2}".format(old, new, self.probabilities.get_value(self.action))
def __normalize(self, new_total):
self.probabilities.map_function_over_all_values(lambda x: x / float(new_total))
def __increase_probability(self, combination):
old = self.probabilities.get_value(self.action)
# Change probability of one particular
new = old + self.delta_pos
self.probabilities.set_value(combination, new)
return new - old
def __update_probabilities_subsets(self, rewarded):
if rewarded:
print "Rewarded"
new_total = 1.0
for combination in self.all_possibilities(self.motor_signals_and_domains):
match = False
for k, v in self.action.iteritems():
if combination[k] == v:
match = True
if match:
new_total += self.__increase_probability(combination)
print "New {0}".format(new_total)
self.__normalize(new_total)
else:
self.__update_probabilities(rewarded)
def __got_reward(self):
"""
Returns
-------
got_reward : boolean
True if a rewarding stimulus is present, False otherwise.
"""
for (name, value) in self.rewards:
if name in self.observations and self.observations[name] == value:
return True
return False
| {
"repo_name": "Luciden/easl",
"path": "easl/mechanisms/operant_conditioning.py",
"copies": "1",
"size": "5827",
"license": "mit",
"hash": -7030115209243571000,
"line_mean": 30.1604278075,
"line_max": 114,
"alpha_frac": 0.5927578514,
"autogenerated": false,
"ratio": 4.243991260014567,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5336749111414567,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Dennis'
import itertools
class Mechanism(object):
""" Abstract class for a (learning) mechanism to be used in the simulator.
All mechanisms have a set of motor signals that can be sent, with the
respective domains.
Attributes
----------
log : Log
all_variables_and_domains : {str: [str]}
motor_signals_and_domains : {str: [str]}
Motor signal names and their possible values. Subset of all variables.
sensory_variables_and_domains : {name: [value]}
Sensory symbols and possible values. Subset of all variables.
signals : {name: [value]}
All signals and possible values that can be sensed by the entity.
default_action : {name: value}
default_signal : {name: value}
"""
def __init__(self, visual=None):
self.log = None
self.visual = visual
self.all_variables_and_domains = {}
self.motor_signals_and_domains = {}
self.sensory_variables_and_domains = {}
self.signals = {}
self.default_action = {}
self.default_signal = {}
def set_log(self, log):
self.log = log
def init_internal(self, entity):
"""
Called at the beginning of simulation to initialize internal representation.
Should include a list of all possible actions the mechanisms can do.
Parameters
----------
entity : Entity
"""
# Strip the functions from the action predicates
self.motor_signals_and_domains = {}
for action in entity.actions:
self.motor_signals_and_domains[action] = entity.actions[action][1]
signals = {}
for sensor in entity.sensors:
signals.update(sensor.signals)
self.signals.update(sensor.signals)
self.default_signal.update(sensor.default_signals)
# Add all together into variables
self.default_action = entity.default_action
self.sensory_variables_and_domains.update(entity.attribute_values)
self.sensory_variables_and_domains.update(signals)
self.all_variables_and_domains.update(self.motor_signals_and_domains)
self.all_variables_and_domains.update(self.sensory_variables_and_domains)
def sense(self, observation):
"""
Receive a part of the observable world.
Called once for every piece of information in every time step.
Both information about own body and external world.
Parameters
----------
observation : (name, value)
name/value pair of what was observed, which can be processed/stored
according to the Agent's needs
"""
raise NotImplementedError("Hmm.")
def act(self):
"""
Use current state of the internal model to see which actions should
be performed.
Returns
-------
[]
All actions that should be performed in this time step.
"""
raise NotImplementedError("Hmm.")
def visualize(self):
"""
Creates a Visualization from the attributes.
:return:
"""
if self.visual is not None:
return self.visual.visualize(self)
else:
return None
@staticmethod
def all_possibilities(actions):
"""
Parameters
----------
actions : {string: [string]}
"""
return [dict(zip(actions, product)) for product in itertools.product(*(actions[name] for name in actions))]
| {
"repo_name": "Luciden/easl",
"path": "easl/mechanisms/mechanism.py",
"copies": "1",
"size": "3534",
"license": "mit",
"hash": -5411551331783718000,
"line_mean": 29.4655172414,
"line_max": 115,
"alpha_frac": 0.6066779853,
"autogenerated": false,
"ratio": 4.490470139771284,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002808665300753868,
"num_lines": 116
} |
__author__ = 'Dennis'
from controller import Controller
from easl.visualize import *
import random
class LearningRule(object):
@staticmethod
def update_counts(counts, action, has_reward):
"""
Describes how the counts/probability changes given that an action was
contiguous with a reward.
Changes counts as a side-effect without returning anything.
Parameters
----------
counts : {name: {value: int}}
See SimpleController.counts.
action : (name, value)
has_reward : bool
True if the reward is present, False otherwise.
"""
raise NotImplementedError("The interface method should be overridden.")
class ActionSelection(object):
@staticmethod
def select_action(counts):
"""Select an action with the highest count.
When multiple actions have the same highest count, select one of those
at random.
Parameters
----------
counts : {name: {value: int}}
Returns
-------
action : (name, value)
"""
raise NotImplementedError("The interface method should be overridden.")
class SimpleLearningRule(LearningRule):
"""
Increments count of an action by a fixed amount if and only if the
action was followed by a reward.
Decrements count of an action if and only if it was not followed by
a reward.
"""
@staticmethod
def update_counts(counts, action, has_reward):
a, v = action
# If the reward is present, increase the probability of selecting the
# action.
if has_reward:
counts[a][v] += 3
# If not, decrease the probability of selecting the action again.
else:
counts[a][v] = max(1, counts[a][v] - 1)
class SimpleSelection(ActionSelection):
@staticmethod
def select_action(counts):
choices = []
# Find actions with maximum count
for action in counts:
for value in counts[action]:
if len(choices) == 0:
choices.append((action, value))
else:
a, v = choices[0]
# If the count of the new action is the same as the already
# found actions, add it
if counts[action][value] == counts[a][v]:
choices.append((action, value))
# If the count of the new action is higher, replace
elif counts[action][value] > counts[a][v]:
choices = [(action, value)]
# Count is lower, so skip
else:
continue
# If there are multiple choices, select one at random
print len(choices)
return random.choice(choices)
class BetterLearningRule(LearningRule):
"""Uses information that some actions might be related.
Whenever an action is followed by a reward, it is incremented by a fixed
amount A, and actions sharing the same action symbol are incremented by
a fixed amount B.
Whenever an action is not followed by a reward, its count is decremented
by a fixed amount C, and actions sharing the same action symbol are
decremented by a fixed amount D.
"""
@staticmethod
def update_counts(counts, action, has_reward):
a, v = action
if has_reward:
# Increase the probability of choosing the specific action again
counts[a][v] += 3
# Also increase the probability of all other actions of this type
for value in counts[a]:
if value == v:
continue
counts[a][value] += 2
else:
# Decrease probability of choosing this action
counts[a][v] = max(1, counts[a][v] - 2)
# Also decrease probability of choosing other actions of same type
for value in counts[a]:
if value == v:
continue
counts[a][value] = max(1, counts[a][value] - 1)
class RouletteWheelSelection(ActionSelection):
@staticmethod
def select_action(counts):
# Get total size of wheel
total = 0
for a in counts:
for v in counts[a]:
total += counts[a][v]
# Create structure that has for every upper threshold
# the selection
cumulative = 0
wheel = {}
for a in counts:
for v in counts[a]:
c = counts[a][v]
if c == 0:
continue
cumulative += c
wheel[cumulative/float(total)] = (a, v)
# Generate random number between 0 and 1
r = random.random()
# Find item that has number fall between previous upper threshold and own threshold
previous = 0.0
for p in sorted(wheel):
# When the wheel doesn't stop at this section
if previous <= r < p:
return wheel[p]
class SimpleVisual(Visual):
@staticmethod
def visualize(self):
tree = Tree("counts", self.counts)
group = Group("simple")
group.add_element(tree)
return group
class SimpleController(Controller):
"""
Learns based on operant conditioning.
Probability of action increases if action is followed
by reinforcer.
Attributes
----------
observations : {name: value}
Stores current, new, observations.
rule : LearningRule
action : (name, value)
The action that was performed last.
counts : {name: {value: int}}
Maintains a count for any action/value pair.
rewards : [(name, value)]
List of sensory stimuli that are considered as rewarding.
"""
def __init__(self, rewards):
"""
Parameters
----------
rewards : [(name, value)]
List of sensory stimuli that are considered as rewarding.
"""
super(SimpleController, self).__init__()
self.visual = SimpleVisual()
self.rule = SimpleLearningRule()
self.selection = RouletteWheelSelection()
self.observations = {}
self.rewards = rewards
self.action = None
self.counts = {}
def init_internal(self, entity):
super(SimpleController, self).init_internal(entity)
for action in self.actions:
self.counts[action] = {}
for value in self.actions[action]:
self.counts[action][value] = 1
# Select a random action to do (random because all counts are 0)
self.action = self.selection.select_action(self.counts)
def sense(self, observation):
name, value = observation
self.observations[name] = value
def act(self):
# Change the counts according to selected action and whether a
# reward is present
self.rule.update_counts(self.counts, self.action, self.__got_reward())
# Select a new action (max probability)
self.action = self.selection.select_action(self.counts)
if self.action is None:
a = random.choice(self.actions.keys())
v = random.choice(self.actions[a])
self.action = (a, v)
return [self.action]
def __got_reward(self):
"""
Returns
-------
got_reward : boolean
True if a rewarding stimulus is present, False otherwise.
"""
for (name, value) in self.rewards:
if name in self.observations and self.observations[name] == value:
return True
return False
| {
"repo_name": "Luciden/easl",
"path": "easl/controller/simple_controller.py",
"copies": "1",
"size": "7956",
"license": "mit",
"hash": -3741950987237589000,
"line_mean": 30.2186234818,
"line_max": 91,
"alpha_frac": 0.554927099,
"autogenerated": false,
"ratio": 4.764071856287425,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00004353315049410126,
"num_lines": 247
} |
__author__ = 'Dennis'
from copy import copy
class Entity(object):
"""
The basic component in the simulation.
An Entity can perform actions and be acted on itself, and it can observe
It can observe other Entities.
An Entity is a self-contained unit and should not have any references
directly (in its Physical State) to other Entities in a possible World.
If an Entity has a reference at all, it is one that is in its Internal
State, grounded in experience through its Senses.
Actions are local to Entities: They change their internal (physical)
state.
The consequences of this, among with the consequences of the entity's
physics, are used to have interactions between Entities.
Attributes
----------
name : string
A name for identifying purposes (for example, in the log).
log : Log
Log to use to document changes in this Entity.
attributes : {name: value}
The attributes constituting the physical representation of the Entity.
attribute_values : {name: []}
List of possible values for every attribute.
sensors : [Sensor]
observations : {name: value}
physics : function
a function that changes the state using only the state's
attributes
emission : function
Returns a list of signals to be emitted in this frame, based on the
Entity's internal state.
actions : {name: (function, [value])}
All possible actions identified by their name, with the function that
describes how its parameters influence the internal state,
a list/generator of all possible values.
default_action : {name: value}
A default action that is considered to be equivalent to the absence
of the action.
events : {name: function(old, new)}
Specifies for every attribute what events it triggers when it changes.
The functions return an event.
An event is a tuple of (name, {name: value}) of event name and its
parameters name/value pairs.
triggers : {name: function(self, ...)}
callback functions that change the attributes when called
mechanisms : Agent
motor_signal_queue : [(name, value)]
All action/parameter pairs that are queued to be executed.
Both name and its parameter name/value pairs are provided.
"""
def __init__(self, name, agent=None, visual=None):
self.name = name
self.log = None
self.attributes = {}
self.a = self.attributes
self.attribute_values = {}
self.sensors = []
self.observations = {}
self.physics = lambda x: None
self.emission = lambda x: []
self.actions = {}
self.default_action = {}
self.events = {}
self.triggers = {}
self.agent = agent
self.visual = visual
self.motor_signal_queue = []
self.signal_queue = []
self.event_queue = []
def start(self):
"""
Called when the experiment starts.
"""
if self.agent is not None:
self.agent.init_internal(self)
def try_change(self, attribute, value):
"""
Checks to see if setting the specified attribute's value is different from the
current value, sets the attribute and notifies.
Parameters
----------
attribute : string
value : value
Returns
-------
bool
True if the attribute changes, False otherwise
"""
# The emission function is obscure.
# When attributes change, the modality these attributes are in should
# determine whether events/signals are sent or not.
if self.a[attribute] != value:
old = self.a[attribute]
self.a[attribute] = value
# Call the event for this change
event = None
if self.events[attribute] is not None:
event = self.events[attribute](old, value)
self.log.do_log("event", {"name": self.name, "attribute": attribute, "old": old, "new": value})
if event is not None:
e, params = event
self.event_queue.append((attribute, e, params))
return True
return False
def set_log(self, log):
"""
Parameters
----------
log : Log
Log to use.
"""
self.log = log
if self.agent is not None:
self.agent.set_log(log)
def add_observation(self, observation):
self.observations.update(observation)
def queue_motor_signals(self):
"""
Queues actions to be executed by consulting associated Agent, if available.
See Also
--------
easl.mechanisms.Agent.act : Functionality delegated to Agent.
"""
if self.agent is None:
self.motor_signal_queue = []
return
# pass all observations to mechanisms and have it convert to internal representation
for observation in self.observations:
self.log.do_log("observation",
{"entity": self.name, "observation": observation, "value": self.observations[observation]})
self.agent.sense((observation, self.observations[observation]))
self.observations = {}
# Also add internal representation as observations
for observation in self.attributes:
self.log.do_log("observation",
{"entity": self.name, "observation": observation, "value": self.attributes[observation]})
self.agent.sense((observation, self.attributes[observation]))
# ask mechanisms to give actions
self.motor_signal_queue = self.agent.act()
def add_attribute(self, name, initial_value, values, event):
"""
Parameters
----------
name : string
Name to identify the attribute by.
initial_value : value
Any value that the attribute is set to when the experiment begins.
event : function(old, new) : (name, value)
Function that is called when the attribute changes.
The function receives the old and new values and should return an
event, i.e. a name and value pair.
"""
self.attributes[name] = initial_value
self.attribute_values[name] = values
self.events[name] = event
def add_action(self, name, values, default, f):
"""
Adds an action to the possible actions.
Defining Actions:
name, [{paramname: [values]}], function
Parameters
----------
name : string
name the action will be identified/called by
values : [values]
Possible values for this action.
default : value
Default value to be used when the action is absent.
Considered to be equivalent to doing no action.
f : function
callback that is called for an entity when the action is performed
"""
self.actions[name] = (f, values)
self.default_action[name] = default
def add_sensor(self, sensor):
sensor.set_observations(self.observations)
self.sensors.append(sensor)
def add_trigger(self, name, trigger):
"""
A Trigger changes the Entity's internal state if a match for a
cause was found.
"""
self.triggers[name] = trigger
def set_physics(self, physics):
self.physics = physics
def set_agent(self, agent):
self.agent = agent
def set_emission(self, emission):
self.emission = emission
def execute_actions(self):
"""
Calls all queued actions and clears the queue.
"""
while len(self.motor_signal_queue) > 0:
name, value = self.motor_signal_queue.pop(0)
self.log.do_log("action", {"entity": self.name, "name": name, "value": value})
parameters = {"self": self, "value": value}
self.actions[name][0](**parameters)
def emit_signals(self):
emitting = self.emission(self)
for signal in emitting:
self.log.do_log("emission", {"entity": self.name, "name": signal.sig_type, "value": signal.value})
self.signal_queue.append(signal)
def get_queued_signals(self):
"""
Pass all the queued signals so far and clear the queue.
"""
signals = copy(self.signal_queue)
self.signal_queue = []
return signals
def call_trigger(self, name, params):
if name in self.triggers:
self.log.do_log("trigger", {"name": name})
params["self"] = self
self.triggers[name](**params)
def is_active(self):
"""
If the entity performs any actions, i.e. has an associated mechanisms.
"""
return self.agent is not None
def measure(self):
"""
Log all attribute values.
Parameters
----------
name : string
Name to identify the measurement by.
"""
measurement = copy(self.attributes)
measurement["entity"] = self.name
self.log.do_log("measurement", measurement)
def visualize(self):
"""
Creates a Visualization from the attributes.
:return:
"""
if self.visual is not None:
return self.visual.visualize(self)
else:
return None
def visualize_agent(self):
if self.agent is not None:
return self.agent.visualize()
| {
"repo_name": "Luciden/easl",
"path": "easl/entity.py",
"copies": "1",
"size": "9968",
"license": "mit",
"hash": 3993041888301522400,
"line_mean": 31.6756756757,
"line_max": 119,
"alpha_frac": 0.5733346709,
"autogenerated": false,
"ratio": 4.6820103334899015,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5755345004389901,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Dennis'
from copy import deepcopy
import itertools
class Table(object):
"""
Given a set of variables and respective domains, this data structure provides
read/write access to a value assigned to each full combination of all variables.
For N variables, each with K values, this means a total of K^N values.
"""
def __init__(self, column_names):
"""
Attributes
----------
column_names : {str: [str]}
Names of the columns that make up the table and a list of all possible values for each.
"""
self._column_names = column_names
def get_value(self, row):
"""
Parameters
----------
row : {str: str}
Pairs of column name/value
Preconditions
-------------
All columns' values should be specified.
"""
raise NotImplementedError()
def set_value(self, row, value):
""" Set the value that corresponds with the provided combination of column/value.
Preconditions
-------------
All columns' values should be specified.
"""
raise NotImplementedError()
class FullTable(Table):
def __init__(self, column_names):
"""
Parameters
----------
column_names : {str: [str]}
See Table.
Attributes
----------
table : {name: {name: ... {name: value} ... }}
variables : {name: [name]}
order : [name]
last : name
"""
super(FullTable, self).__init__(column_names)
# Sort the variables so the representation is more predictable
self.order = list(self._column_names.keys())
self.order.sort()
self.last = self.order.pop()
self.table = self.__make_table_rec(self.order)
def __make_table_rec(self, order):
"""
Parameters
----------
order : [name]
Order of the rest of the variables to consider.
"""
# make the full joint of the provided variables by making a tree of
# variable name/value dicts and storing the probabilities at the end.
# When a new variable was chosen and we have to get the parameter order
if len(order) == 0:
counts = {}
for value in self._column_names[self.last]:
counts[value] = 0
return counts
else:
current = {}
for value in self._column_names[order[0]]:
current[value] = self.__make_table_rec(order[1:])
return current
def set_value(self, row, value):
current = self.table
for name in self.order:
if name not in row:
raise IndexError("There is no variable {0} in this Table".format(name))
current = current[row[name]]
current[row[self.last]] = value
def get_value(self, row):
"""
Parameters
----------
vals : {name: value}
"""
current = self.table
for name in self.order:
current = current[row[name]]
return current[row[self.last]]
def increment_value(self, vals):
"""
Parameters
----------
vals : {name: value}
"""
# Go down the path taking the turn appropriate for the value in the
# entry.
# Then increment.
current = self.table
for name in self.order:
current = current[vals[name]]
current[vals[self.last]] += 1
def map_function_over_all_values(self, f):
"""
Perform function f(x) on every element.
Parameters
----------
f : function x: f(x)
"""
self.__map_function_over_all_values_recursive(f, self.table, self.order)
def __map_function_over_all_values_recursive(self, f, current, order):
if len(order) == 0:
for value in current:
current[value] = f(current[value])
else:
for value in current:
self.__map_function_over_all_values_recursive(f, current[value], order[1:])
class SparseTable(Table):
def __init__(self, column_names):
super(SparseTable, self).__init__(column_names)
self.table = {}
self._column_names = column_names
self.order = list(self._column_names.keys())
self.order.sort()
self.last = self.order.pop()
def get_variables(self):
return self._column_names.copy()
def __make_entry(self, entry):
current = self.table
for name in self.order:
if entry[name] in current:
current = current[entry[name]]
continue
else:
current[entry[name]] = {}
current = current[entry[name]]
continue
current[entry[self.last]] = 0
def set_value(self, row, value):
current = self.table
for name in self.order:
if row[name] not in current:
self.__make_entry(row)
# try again, but now the entry exists
self.set_value(row, value)
return
current = current[row[name]]
if row[self.last] not in current:
self.__make_entry(row)
self.set_value(row, value)
return
else:
current[row[self.last]] = value
def inc_value(self, vals):
"""
Parameters
----------
vals : {name: value}
"""
# Go down the path taking the turn appropriate for the value in the
# entry.
# Then increment.
current = self.table
for name in self.order:
if vals[name] not in current:
self.__make_entry(vals)
self.set_value(vals, 1)
return
current = current[vals[name]]
if vals[self.last] not in current:
self.__make_entry(vals)
self.set_value(vals, 1)
return
else:
current[vals[self.last]] += 1
def get_value(self, row):
"""
Parameters
----------
vals : {name: value}
"""
current = self.table
for name in self.order:
if row[name] not in current:
return 0
current = current[row[name]]
if row[self.last] not in current:
return 0
else:
v = current[row[self.last]]
return v
def get_nonzero_entries(self):
return self.__get_nonzero_entries_rec(self.table, self.order, {})
def __get_nonzero_entries_rec(self, current, order, entry):
if len(order) == 0:
for value in current:
new_entry = deepcopy(entry)
new_entry[self.last] = value
return [new_entry]
else:
entries = []
for value in current:
new_entry = deepcopy(entry)
new_entry[order[0]] = value
entries += self.__get_nonzero_entries_rec(current[value], order[1:], new_entry)
return entries
def do_operation(self, f):
"""
Perform function f(x) on every element.
Parameters
----------
f : function x: f(x)
"""
self.__do_operation_rec(f, self.table, self.order)
def __do_operation_rec(self, f, current, order):
if len(order) == 0:
for value in current:
current[value] = f(current[value])
else:
for value in current:
self.__do_operation_rec(f, current[value], order[1:])
class SparseConditionalTable(SparseTable):
def __init__(self, column_names, conditional):
super(SparseConditionalTable, self).__init__(column_names)
self.conditional = conditional
self.conditional_table = SparseTable(conditional)
def set_value(self, row, value):
super(SparseConditionalTable, self).set_value(row, value)
# Add entry to the conditional table
self.conditional_table.set_value({k: v for k, v in row.iteritems() if k in self.conditional.keys()}, True)
def has_data(self, conditional):
return self.conditional_table.get_value(conditional)
class Distribution(SparseTable):
def __init__(self, column_names, freq=None):
"""
Parameters
----------
{name: [value]}
Attributes
----------
variables : {name: [value]}
Variables and for each a list of their possible values.
"""
if freq is None:
super(Distribution, self).__init__(column_names)
elif isinstance(freq, SparseTable):
self._column_names = deepcopy(freq._column_names)
self.order = deepcopy(freq.order)
self.last = deepcopy(freq.last)
self.table = deepcopy(freq.table)
else:
raise RuntimeError("Not a Table.")
def __eq__(self, other):
if not isinstance(other, Distribution):
return False
# Check if variables and values are the same
for variable in self._column_names:
if variable not in other._column_names:
return False
else:
a = self._column_names[variable]
b = self._column_names[variable]
if not set(a) == set(b):
return False
# Check if all values are the same
values = []
for variable in self.order:
values.append(self._column_names[variable])
values.append(self._column_names[self.last])
for combination in list(itertools.product(*values)):
vals = {}
for i in range(len(self.order)):
vals[self.order[i]] = combination[i]
vals[self.last] = combination[-1]
if self.get_value(vals) != other.get_value(vals):
return False
return True
def set_prob(self, vals, p):
"""
Parameters
----------
vals : dict
pairs of variable name/value. should contain all variables in the
distribution.
p : number
probability of the situation
"""
self.set_value(vals, p)
def prob(self, vals):
"""
Parameters
----------
vals : {name: {name: value}}
"""
return self.get_value(vals)
def single_prob(self, variable, value):
"""
Get the probability for a partial specification with only one variable.
Marginalizing?
Parameters
----------
variable : string
Name of variable to get probability for.
value : string
"""
return self.partial_prob({variable: value})
def partial_prob(self, vals):
# Check if all variables are here
for variable in vals:
if variable not in self.order and variable != self.last:
raise IndexError("Variable {0} is not in this distribution.".format(variable))
return self.__partial_prob_rec(vals, self.table, self.order)
def __partial_prob_rec(self, vals, current, order):
"""
Calculates the probability of a set of variable=value pairs.
By summing all branches of other variables and then only taking the
probabilities of the branches that go through the variable=value branch.
"""
p = 0
if len(order) > 0:
# If we found the variable
if order[0] in vals:
# Take only the branch with the specified value for that variable
if vals[order[0]] in current:
p += self.__partial_prob_rec(vals, current[vals[order[0]]], order[1:])
else:
return 0.0
# If the variable is one we do not have a particular value for
else:
# Sum the probabilities of all possible values for that variable
for branch in current:
p += self.__partial_prob_rec(vals, current[branch], order[1:])
# When we're at the end (with the values)
else:
# If this is the variable
if self.last in vals:
# Only take the number for the value that the variable was set to
if vals[self.last] in current:
p += current[vals[self.last]]
# If its a different variable
else:
# Sum over all the values
for value in current:
p += current[value]
return p
| {
"repo_name": "Luciden/easl",
"path": "easl/utils/probability.py",
"copies": "1",
"size": "13224",
"license": "mit",
"hash": -101876953644176930,
"line_mean": 28.0545454545,
"line_max": 114,
"alpha_frac": 0.5106624319,
"autogenerated": false,
"ratio": 4.653061224489796,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004076305846691072,
"num_lines": 440
} |
__author__ = 'Dennis'
from log import Log
from visualize import *
class Sensor(object):
def __init__(self):
"""
Attributes
----------
observations
Reference to the observations list of the Entity with this Sensor.
signals : {name: [value]}
"""
self.observations = None
self.signals = {}
self.default_signals = {}
self.init()
def init(self):
"""
Used to specify the signals and signal values that this Sensor can
sense.
"""
raise NotImplementedError()
def set_observations(self, observations):
"""
Args:
observations: a dictionary that the Sensor can use to put interpreted
observations in.
"""
self.observations = observations
def detects_modality(self, modality):
return False
class Signal(object):
def __init__(self, modality, sig_type, value, values):
"""
Attributes
----------
modality : string
Describes the modality that this signal is in.
type : string
An abstract description of what this signal represents.
value : value
The value associated with the type
values : []
All possible values this signal can have.
"""
self.modality = modality
self.sig_type = sig_type
self.value = value
self.values = values
class World(object):
"""
Handles and arranges Entities and handles interactions between any
observable event and its observer(s).
Describing a World consists of describing the Entities in it and the
relations between those Entities.
Part is based on the RegionalSenseManager from "Artificial Intelligence for
Games" while ignoring some parts as the representation used in this simulation
is a kind of 'distanceless' representation.
In other words, only the essentials.
Differences with RegionalSenseManager:
* no distances.
* no notification queue, since all notifications are handled immediately.
* signals are added in the beginning phase of a frame and sent at the end
phase, which means all signals can be sent when all entities have been
processed.
Attributes
----------
entities : {name: Entity}
all entities in the world identified by name
triggers : [(string, string, string, string)]
The connections between entities that link actions and triggers.
Causing entity name, attribute name, event name, affected entity name.
log : Log
time : int
signals : [(string, Signal)]
All queued signals with the names of the entities that will receive them.
"""
def __init__(self, visualizer=None):
self.entities = {}
self.triggers = []
self.log = None
self.time = 0
self.queued_signals = []
self.visualizer = visualizer
if self.visualizer is not None:
self.visualizer.set_world(self)
def run(self, iterations=10, remove_triggers=None, add_triggers=None):
"""
Runs the simulation once with the currently specified Entities
and relations between them.
Parameters
----------
remove_triggers : {int: []}
For every defined time step, the triggers to be removed.
"""
if remove_triggers is None:
remove_triggers = {}
if add_triggers is None:
add_triggers = {}
self.log = Log()
self.log.set_verbose()
# Initialize initial states of all entities, including agents
for e in self.entities:
self.entities[e].set_log(self.log)
self.entities[e].start()
for i in range(iterations):
self.time = i
self.log.time_tick(i)
self.__do_physics()
self.__trigger_events()
self.__queue_signals()
self.__send_signals()
self.__queue_motor_signals()
self.__execute_actions()
self.__measure_entities()
if i in remove_triggers:
for (a, b, c, d) in remove_triggers[i]:
self.remove_trigger(a, b, c, d)
if i in add_triggers:
for (a, b, c, d) in add_triggers[i]:
self.add_trigger(a, b, c, d)
if self.visualizer is not None:
self.visualizer.reset_visualization()
self.visualizer.update_visualization(Number("time", self.time))
self.visualizer.update_visualization(List("triggers", self.triggers))
entity_group = Group("entities")
agent_group = Group("agents")
for entity in self.entities:
# Get visualizations from current state of entities
entity_group.add_element(self.entities[entity].visualize())
# Get visualizations from current state of agents
agent_group.add_element(self.entities[entity].visualize_agent())
# Update the actual screen with all visualizations
self.visualizer.update_visualization(entity_group)
self.visualizer.update_visualization(agent_group)
self.visualizer.update(i)
def add_entity(self, entity):
self.entities[entity.name] = entity
def has_trigger(self, causing, attribute, event, affected):
for i in range(len(self.triggers)):
c, att, e, aff = self.triggers[i]
if c == causing and att == attribute and e == event and aff == affected:
return i
return None
def add_trigger(self, causing, attribute, event, affected):
"""
Parameters
----------
causing : string
Name of the Entity that caused the event.
attribute : string
Name of the attribute of the Entity that caused the event.
event : string
Name of the type of event that occurred.
affected : string
Name of the Entity that is affected by the event.
"""
if self.has_trigger(causing, attribute, event, affected) is None:
self.triggers.append((causing, attribute, event, affected))
def remove_trigger(self, causing, attribute, event, affected):
i = self.has_trigger(causing, attribute, event, affected)
if i is not None:
del self.triggers[i]
def __do_physics(self):
"""
Calls all Entities' physics method.
"""
for entity in self.entities:
self.entities[entity].physics(self.entities[entity])
def __queue_signals(self):
"""
Takes all signals that were queued to be emitted and sends queues them
to be sent to the appropriate receivers.
"""
for sender in self.entities:
# First see if it still emits more signals.
self.entities[sender].emit_signals()
for signal in self.entities[sender].get_queued_signals():
for receiver in self.entities:
for sensor in self.entities[receiver].sensors:
if sensor.detects_modality(signal.modality):
self.queued_signals.append((receiver, signal))
def __send_signals(self):
"""
Add the queued signals as observations to the appropriate entities.
"""
while len(self.queued_signals) > 0:
receiver, signal = self.queued_signals.pop(0)
self.entities[receiver].add_observation({signal.sig_type: signal.value})
def __queue_motor_signals(self):
"""
Makes all Entities prepare their motor signals.
The querying and execution phase of the actions should be separated,
because actions have effects on the Entities' attributes and all
actions should be selected at the same point in time.
"""
for entity in self.entities:
self.entities[entity].queue_motor_signals()
def __execute_actions(self):
"""
Executes all actions
"""
for entity in self.entities:
self.entities[entity].execute_actions()
def __trigger_events(self):
for cause in self.entities:
while len(self.entities[cause].event_queue) > 0:
attribute, event, params = self.entities[cause].event_queue.pop(0)
# Find all entities that are triggered by this event
for (causer, causer_attribute, caused_event, affected) in self.triggers:
if causer == cause and causer_attribute == attribute and caused_event == event:
self.entities[affected].call_trigger(event, params)
def __measure_entities(self):
"""
Logs all entities' attributes to be used for analysis.
"""
for entity in self.entities:
self.entities[entity].measure()
| {
"repo_name": "Luciden/easl",
"path": "easl/world.py",
"copies": "1",
"size": "9362",
"license": "mit",
"hash": 2998502860909007000,
"line_mean": 33.1954887218,
"line_max": 99,
"alpha_frac": 0.5702841273,
"autogenerated": false,
"ratio": 4.818322182192486,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5888606309492487,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Dennis'
from visualizer import *
import easl
import sys
import pygame
import math
class PyGameVisualizer(Visualizer):
BG_COLOR = (0, 0, 0)
FG_COLOR = (255, 255, 255)
OBJ_COLOR = (196, 0, 0)
def __init__(self):
super(PyGameVisualizer, self).__init__()
pygame.init()
self.world = None
self.size = 1600, 900
self.screen = pygame.display.set_mode(self.size)
self.font = pygame.font.SysFont("monospace", 11)
self.step = False
self.paused = False
self.parameters = {"dt": 100}
self.keys = ["space: pause/unpause the simulation",
"s: step once and pause"]
self.limbs = ["left-hand", "right-hand", "left-foot", "right-foot"]
self.selected_limb = "right-foot"
self.graph = None
self.graph_surface = None
def set_world(self, world):
self.world = world
def reset_visualization(self):
super(PyGameVisualizer, self).reset_visualization()
# Add a keymap
self.visualizations.add_element(List("Key Bindings", self.keys))
# Add the currently set parameters
self.visualizations.add_element(Dict("Parameters", self.parameters))
self.visualizations.add_element(Number("Selected limb:", self.selected_limb))
def update(self, iteration):
"""Draws all the current visualizations to the screen.
"""
self.screen.fill(PyGameVisualizer.BG_COLOR)
self.screen.blit(self.__draw_visualization(self.visualizations), (0, 0))
pygame.display.flip()
pygame.time.delay(self.parameters["dt"])
if self.graph is not None:
#if iteration == 24 or iteration == 40 or iteration == 60 or iteration == 135 or iteration == 230:
# pygame.image.save(self.graph_surface, "figure_{0}.png".format(iteration))
pygame.image.save(self.graph_surface, "figure_{0}.png".format(iteration))
if self.step:
self.step = False
self.__pause()
else:
self.__handle_keys()
if self.paused:
self.__pause()
def __handle_keys(self):
for event in pygame.event.get():
if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE):
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
if self.paused:
self.paused = False
self.step = False
else:
self.paused = True
self.step = False
if event.key == pygame.K_s:
self.step = True
self.paused = True
if event.key == pygame.K_UP:
self.parameters["dt"] += 100
if event.key == pygame.K_DOWN:
self.parameters["dt"] -= 100
if event.key == pygame.K_EQUALS:
self.world.add_trigger("infant", "{0}-position".format(self.selected_limb), "movement", "mobile")
if event.key == pygame.K_MINUS:
self.world.remove_trigger("infant", "{0}-position".format(self.selected_limb), "movement", "mobile")
if event.key == pygame.K_0:
for limb in self.limbs:
if limb == self.selected_limb:
self.world.add_trigger("infant", "{0}-position".format(self.selected_limb), "movement", "mobile")
else:
self.world.remove_trigger("infant", "{0}-position".format(limb), "movement", "mobile")
if event.key == pygame.K_1:
self.selected_limb = "left-hand"
if event.key == pygame.K_2:
self.selected_limb = "right-hand"
if event.key == pygame.K_3:
self.selected_limb = "left-foot"
if event.key == pygame.K_4:
self.selected_limb = "right-foot"
if event.key == pygame.K_v:
if self.graph is not None:
self.graph.visualize()
def __pause(self):
while self.paused and not self.step:
self.__handle_keys()
def visualize_log(self, log):
"""
Parameters
----------
log : Log
The data to visualize.
"""
# Get the information from entities
length = log.get_length()
running = True
time = 0
while running and time < length:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
now = log.get_at_time(time)
self.screen.fill((0, 0, 0))
# Get all measurements
grid = Grid("infant", 2, 2)
for m in now:
if m["_type"] == "measurement":
if m["entity"] == "infant":
positions = {"up": 0, "middle": 1, "down": 2}
grid.add_element(Slider("left-hand-position", 3, positions[m["left-hand-position"]]), 0, 0)
grid.add_element(Slider("right-hand-position", 3, positions[m["right-hand-position"]]), 0, 1)
grid.add_element(Slider("left-foot-position", 3, positions[m["left-foot-position"]]), 1, 0)
grid.add_element(Slider("right-foot-position", 3, positions[m["right-foot-position"]]), 1, 1)
self.screen.blit(self.__draw_grid(grid), (0, 0))
time += 1
pygame.display.flip()
pygame.time.delay(self.parameters["dt"])
def __draw_visualization(self, v):
if isinstance(v, Slider):
return self.__draw_slider(v)
elif isinstance(v, Number):
return self.__draw_number(v)
elif isinstance(v, Grid):
return self.__draw_grid(v)
elif isinstance(v, Tree):
return self.__draw_tree(v)
elif isinstance(v, Rows):
return self.__draw_rows(v)
elif isinstance(v, Group):
return self.__draw_group(v)
elif isinstance(v, List):
return self.__draw_list(v)
elif isinstance(v, Dict):
return self.__draw_dict(v)
elif isinstance(v, Circle):
return self.__draw_circle(v)
elif isinstance(v, Graph):
return self.__draw_graph(v)
else:
raise RuntimeError("Unknown type")
def __draw_slider(self, slider):
slider_width = 4
block_width = 32
length = slider.number * block_width
block_y = (length / float(slider.number)) * slider.position
# See how long the name is and adjust width accordingly
name = self.font.render(slider.name, 1, PyGameVisualizer.FG_COLOR)
width, name_height = name.get_size()
if width < block_width:
width = block_width
surface = pygame.Surface((width, name_height + length))
surface.fill(PyGameVisualizer.BG_COLOR)
surface.blit(name, (0, 0))
# Draw the slider base
pygame.draw.rect(surface, PyGameVisualizer.FG_COLOR,
[width / 2 - slider_width / 2, name_height, slider_width, length])
# Draw the slider block
pygame.draw.rect(surface, PyGameVisualizer.FG_COLOR,
[width / 2 - block_width / 2, name_height + block_y, block_width, block_width])
return surface
def __draw_number(self, number):
return self.font.render("{0}: {1}".format(number.name, number.number), 1, PyGameVisualizer.FG_COLOR)
def __draw_grid(self, grid):
# Make new grid of surfaces
surface_grid = [[None for _ in range(grid.w)] for _ in range(grid.h)]
# Find out which part is the biggest
max_width = 0
max_height = 0
for x in range(grid.w):
for y in range(grid.h):
surface_grid[x][y] = self.__draw_visualization(grid.element_at(x, y))
width, height = surface_grid[x][y].get_size()
max_width = max(max_width, width)
max_height = max(max_height, height)
# Blit accordingly
surface = pygame.Surface((grid.w * max_width, grid.h * max_height))
surface.fill(PyGameVisualizer.BG_COLOR)
for x in range(grid.w):
for y in range(grid.h):
surface.blit(surface_grid[x][y], (x * max_width, y * max_height))
return surface
def __draw_tree(self, tree, indent=0):
if isinstance(tree, Tree):
return self.__draw_tree(tree.tree)
# Main case: for all branches, make the indented rest
elif isinstance(tree, dict):
# Make all branches
branches = []
for name in tree:
branches.append(self.font.render(indent * " " + name, 1, PyGameVisualizer.FG_COLOR))
branches.append(self.__draw_tree(tree[name], indent + 1))
# Find largest one and prepare surface
max_width = 0
total_height = 0
for branch in branches:
width, height = branch.get_size()
max_width = max(max_width, width)
total_height += height
# Draw all branches to the surface and return
surface = pygame.Surface((max_width, total_height))
surface.fill(PyGameVisualizer.BG_COLOR)
y = 0
for branch in branches:
_, height = branch.get_size()
surface.blit(branch, (0, y))
y += height
return surface
# Base case: just print the value
else:
return self.font.render(indent * " " + str(tree), 1, PyGameVisualizer.FG_COLOR)
def __draw_group(self, group):
# Draw every element, take its size and draw the next after it
elements = []
x = 0
max_y = 0
margin = pygame.Surface((8, 1))
# Find the dimensions of the surface
for element in group.get_elements():
elements.append(margin)
x += margin.get_width()
e = self.__draw_visualization(element)
elements.append(e)
w, h = e.get_size()
x += w
max_y = max(max_y, h)
# Blit to surface
surface = pygame.Surface((x, max_y))
surface.fill(PyGameVisualizer.BG_COLOR)
x = 0
for e in elements:
w, h = e.get_size()
surface.blit(e, (x, 0))
x += w
return surface
def __draw_list(self, lst):
max_width = 0
total_height = 0
elements = []
for element in lst.elements:
e = self.font.render(str(element), 1, self.FG_COLOR)
max_width = max(max_width, e.get_width())
total_height += e.get_height()
elements.append(e)
surface = pygame.Surface((max_width, total_height))
surface.fill(PyGameVisualizer.BG_COLOR)
y = 0
for e in elements:
surface.blit(e, (0, y))
y += e.get_height()
return surface
def __draw_dict(self, dct):
max_width = 0
total_height = 0
elements = []
for element in dct.elements:
e = self.font.render("%s: %s" % (str(element), str(dct.elements[element])), 1, self.FG_COLOR)
max_width = max(max_width, e.get_width())
total_height += e.get_height()
elements.append(e)
surface = pygame.Surface((max_width, total_height))
surface.fill(PyGameVisualizer.BG_COLOR)
y = 0
for e in elements:
surface.blit(e, (0, y))
y += e.get_height()
return surface
def __draw_circle(self, circle):
delta_v = 8
d_max = 2 * circle.v_max * delta_v
center_x = center_y = d_max / 2
radius = delta_v * circle.v
surface = pygame.Surface((d_max, d_max))
surface.fill(self.BG_COLOR)
pygame.draw.circle(surface, self.FG_COLOR, (center_x, center_y), radius)
return surface
def __draw_graph(self, graph):
self.graph = graph.graph
node_radius = 16
spacing = 6 * node_radius # Distance between adjacent nodes' centers
vertical_spacing = 3 * node_radius
coordinates = {}
# If no groups were given, do a column layout
if graph.groups is None:
max_columns = 6
positions = dict()
""" Old, 'intuitive' layout
positions["left-hand-position_previous"] = (0, 0)
positions["right-hand-position_previous"] = (6, 0)
positions["left-foot-position_previous"] = (0, 6)
positions["right-foot-position_previous"] = (6, 6)
positions["left-hand-position_current"] = (1, 1)
positions["right-hand-position_current"] = (5, 1)
positions["left-foot-position_current"] = (1, 5)
positions["right-foot-position_current"] = (5, 5)
positions["left-hand_previous"] = (0, 1)
positions["right-hand_previous"] = (5, 0)
positions["left-foot_previous"] = (6, 3)
positions["right-foot_previous"] = (3, 6)
positions["left-hand_current"] = (1, 0)
positions["right-hand_current"] = (6, 1)
positions["left-foot_current"] = (6, 3)
positions["right-foot_current"] = (3, 6)
positions["movement_previous"] = (4, 2)
positions["movement_current"] = (2, 4)
"""
positions["left-hand_previous"] = (0, 5)
positions["right-hand_previous"] = (0, 6)
positions["left-foot_previous"] = (0, 7)
positions["right-foot_previous"] = (0, 8)
positions["left-hand-position_previous"] = (1, 1)
positions["right-hand-position_previous"] = (1, 2)
positions["left-foot-position_previous"] = (1, 3)
positions["right-foot-position_previous"] = (1, 4)
positions["left-hand_current"] = (3, 5)
positions["right-hand_current"] = (3, 6)
positions["left-foot_current"] = (3, 7)
positions["right-foot_current"] = (3, 8)
positions["left-hand-position_current"] = (4, 1)
positions["right-hand-position_current"] = (4, 2)
positions["left-foot-position_current"] = (4, 3)
positions["right-foot-position_current"] = (4, 4)
positions["movement_previous"] = (5, 5)
positions["movement_current"] = (5, 7)
# Distribute node positions and store coordinates
height = 0
width = 0
for node in positions:
gx, gy = positions[node]
x = node_radius + gx * spacing
y = node_radius + gy * vertical_spacing
coordinates[node] = (x, y)
if y > height:
height = y
if x > width:
width = x
height += node_radius
width += spacing
# If groups were given, do arc layouts
else:
left_width = 0
top_height = 0
right_width = 0
bottom_height = 0
layout = None
n_group = 0
for group in graph.groups:
n = len(group)
if n_group == 0:
layout = easl.utils.Graph.arc_layout(n)
left_width = top_height = n * 2 * node_radius + (n - 1) * spacing
elif n_group == 1:
layout = easl.utils.Graph.flipped_layout_both(layout, n)
right_width = bottom_height = n * 2 * node_radius + (n - 1) * spacing
elif n_group == 2:
layout = easl.utils.Graph.flipped_layout_vertical(layout, n)
right_width = max(right_width, n * 2 * node_radius + (n - 1) * spacing)
top_height = max(top_height, n * 2 * node_radius + (n - 1) * spacing)
elif n_group == 3:
layout = easl.utils.Graph.flipped_layout_both(layout, n)
left_width = max(left_width, n * 2 * node_radius + (n - 1) * spacing)
bottom_height = max(bottom_height, n * 2 * node_radius + (n - 1) * spacing)
for node in group:
grid_x, grid_y = layout.pop()
coordinates[node] = (node_radius + grid_x * spacing, node_radius + grid_y * spacing)
width = left_width + right_width
height = top_height + bottom_height
surface = pygame.Surface((width, height))
surface.fill(self.BG_COLOR)
# Draw edges from stored coordinates
for a, b in graph.edges:
ax, ay = coordinates[a]
bx, by = coordinates[b]
pygame.draw.line(surface, self.FG_COLOR, coordinates[a], coordinates[b])
# Draw arrow heads
if ax == bx:
cx = bx
cy = by - node_radius if by > ay else by + node_radius
elif ay == by:
cx = bx - node_radius if bx > ax else bx + node_radius
cy = by
else:
dx = ax - bx
dy = ay - by
angle = math.atan2(-dy, dx)
delta_x = node_radius * math.cos(angle)
delta_y = node_radius * math.sin(angle)
cx = int(bx + delta_x)
cy = int(by - delta_y)
pygame.draw.circle(surface, self.FG_COLOR, (cx, cy), 4)
for node in coordinates:
x, y = coordinates[node]
pygame.draw.circle(surface, self.OBJ_COLOR, (x, y), node_radius)
name = self.font.render(node, 1, self.FG_COLOR)
surface.blit(name, (x, y))
self.graph_surface = surface
return surface
def __draw_rows(self, rows):
# Draw every element, take its size and draw the next after it
elements = []
h = 0
max_w = 0
margin = pygame.Surface((8, 1))
# Find the dimensions of the surface
for element in rows.get_elements():
elements.append(margin)
h += margin.get_height()
e = self.__draw_visualization(element)
elements.append(e)
max_w = max(max_w, e.get_width())
h += e.get_height()
# Blit to surface
surface = pygame.Surface((max_w, h))
surface.fill(PyGameVisualizer.BG_COLOR)
y = 0
for e in elements:
surface.blit(e, (0, y))
y += e.get_height()
return surface
| {
"repo_name": "Luciden/easl",
"path": "easl/visualize/pygame_visualizer.py",
"copies": "1",
"size": "19586",
"license": "mit",
"hash": 4669480660601376000,
"line_mean": 33.9376146789,
"line_max": 125,
"alpha_frac": 0.5014295926,
"autogenerated": false,
"ratio": 4.055912197142265,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000611899033969321,
"num_lines": 545
} |
__author__ = 'Dennis'
import random
from copy import copy, deepcopy
from easl.controller import Controller
from easl.utils import stat
from easl.visualize import *
class WorkingMemory(object):
"""
"The working memory module holds a collection of time-labeled
predicates describing the rat's current perceptions and actions
and those of the recent past."
Attributes
----------
memory : {number: ([predicates], [predicates])}
Age, sensory predicates, and action predicates at the current,
and recent, instant(s).
actions : [name]
List of names of predicates that are actions.
"""
NOW = 0
PREV = -1
FUT = 1
def __init__(self, actions=None):
if actions is None:
actions = []
self._MAX_AGE = 8
self._SENSORY = 0
self._ACTION = 1
self.oldest = 0
self.actions = actions
self.memory = {}
self.__init_now()
def __init_now(self):
self.memory[0] = ([], [])
def add_sensory(self, predicate):
"""
Adds a sensory predicate.
"""
self.memory[0][self._SENSORY].append(predicate)
def add_action(self, action):
"""
Adds an action predicate.
"""
self.memory[0][self._ACTION].append(action)
def age(self):
"""
Updates all memory units' age by one time step.
"""
# Create a new representation with all ages incremented
# by going from the oldest (maximum i) to newest and
# shifting up one (m[i+1] = m[i])
for i in range(len(self.memory) - 1, -1, -1):
if i == self._MAX_AGE:
# Delete the entry from memory
del self.memory[i]
else:
self.memory[i + 1] = self.memory[i]
self.oldest = i + 1
self.__init_now()
def get_oldest(self):
return self.oldest
def get_of_age(self, age):
"""
Get all predicates of the specified age.
Returns
-------
[Predicate]
"""
if age > self.oldest:
return []
predicates = []
predicates.extend(self.memory[age][self._ACTION])
predicates.extend(self.memory[age][self._SENSORY])
return predicates
def has_predicate(self, age, predicate):
if age not in self.memory:
return False
for p in self.memory[age][self._SENSORY]:
if p == predicate:
return True
for p in self.memory[age][self._ACTION]:
if p == predicate:
return True
return False
def matches_now(self, temporal, now=0):
"""
Returns
-------
bool
True if the temporal predicates match now, t=0, False otherwise
"""
prev = now + 1
time = {WorkingMemory.NOW: now,
WorkingMemory.PREV: prev}
# Try to match the temporal predicates
for (p, t) in temporal:
# Always match actions if they can be satisfied now
if p.name in self.actions and now == 0 and t == WorkingMemory.NOW:
continue
if t not in time:
return False
if not self.has_predicate(time[t], p):
return False
return True
@staticmethod
def __is_tag_possible(match, age, tag):
"""
Parameters
----------
match
matched predicates so far
age
age for the new predicate
tag
which tag the new predicate is going to be tagged with
"""
for (_, a, t) in match:
if age - a != tag - t:
return False
return True
class Predicate(object):
"""
Attributes
----------
name : string
Name of the predicate.
value : value
"""
def __init__(self, name, value):
self.name = name
self.value = value
def __eq__(self, other):
if isinstance(other, Predicate):
return self.name == other.name and self.value == other.value
def __str__(self):
return "%s(%s)" % (self.name, str(self.value))
class Conjunction(object):
"""
Temporally tagged conjunctions of predicates.
"""
def __init__(self, temporal=None):
"""
Attributes
----------
predicates : [(Predicate, tag)]
Temporal predicates that form the conjunction.
"""
if temporal is None:
self.predicates = []
else:
self.predicates = [temporal]
def __eq__(self, other):
if isinstance(other, Conjunction) and len(self.predicates) == len(other.predicates):
# Check if all predicates are in the other conjunction too
# i.e. if the intersection of predicates is the same as the current set
for (p, t) in self.predicates:
if (p, t) not in other.predicates:
return False
return True
else:
return False
def __str__(self):
return ', '.join(['%s'] * len(self.predicates)) % tuple([(str(p), t) for (p, t) in self.predicates])
def add_predicate(self, predicate, tag):
if not self.has_predicate(predicate, tag):
self.predicates.append((predicate, tag))
def has_predicate(self, predicate, tag):
for p, t in self.predicates:
if p == predicate and t == tag:
return True
return False
def get_temporal(self):
return self.predicates
def get_predicates(self):
return [p for (p, _) in self.predicates]
def has_predicate_with_name_from(self, predicates, tag):
for (p, t) in self.predicates:
if p.name in predicates and t == tag:
return True
return False
def get_predicates_with_name_from(self, predicates):
return [(p, t) for (p, t) in self.predicates if p.name in predicates]
def is_strict_subset_of(self, other):
if not isinstance(other, Conjunction):
raise RuntimeError("Can only compare two Conjunctions.")
if not len(self.predicates) < len(other.predicates):
return False
for (p, t) in self.predicates:
if not other.has_predicate(p, t):
return False
return True
@staticmethod
def combine(a, b):
new = Conjunction()
for (p, t) in a.predicates:
new.add_predicate(p, t)
for (p, t) in b.predicates:
new.add_predicate(p, t)
return new
class Reinforcer(object):
"""
Attributes
----------
predicate : Predicate
Predicate that states which variable/value pair is considered to be
rewarding.
conjunctions : [(Conjunction, int, int)]]
Keeps for every conjunction how many times it was satisfied and how
many times it was followed by this reinforcer respectively.
predictors : [Conjunction]
Temporal predicate that predicts the reinforcer with its associated
probability.
"""
def __init__(self, predicate):
self.predicate = predicate
# Start with the null conjunction
self.conjunctions = []
self.add_conjunction(Conjunction())
self.predictors = []
def add_conjunction(self, conjunction, satisfied=False, followed=False):
s = 1 if satisfied else 0
f = 1 if followed else 0
# Only add if the conjunction is not already in the table
for (c, sc, fc) in self.conjunctions:
if c == conjunction:
return
self.conjunctions.append((conjunction, s, f))
def increment_conjunctions(self, memory):
"""
Updates all conjunctions that are currently being watched.
Parameters
----------
predicates : [Predicate]
"""
for i in range(len(self.conjunctions)):
conjunction, s, f = self.conjunctions[i]
# If the conjunction is matched by taking the most future tag as t=0,
# increment satisfied.
if memory.matches_now(conjunction.get_temporal(), 1):
# If the conjunction is matched by taking the most future tag as t=-1
# increment followed if predicate is true at t=0
if memory.has_predicate(0, self.predicate):
self.conjunctions[i] = (conjunction, s + 1, f + 1)
else:
self.conjunctions[i] = (conjunction, s + 1, f)
def count(self, conjunction):
"""
Parameters
----------
conjunction : Conjunction
Returns
-------
(number, number)
the number of times the conjunction was satisfied since reinforcer
acquisition and the number of times the conjunction was satisfied
since acquisition.
"""
for (c, s, f) in self.conjunctions:
if c == conjunction:
return s, f
def find_best_conjunctions(self):
"""
Returns
-------
best_conjunctions : [Conjunction]
"""
# If less than two conjunctions, include all
if len(self.conjunctions) < 2:
return [c for (c, s, f) in self.conjunctions]
# for conjunctions:
rr = []
rc = []
for (_, s, f) in self.conjunctions:
if s == 0:
rr.append(0)
else:
rr.append(f / s)
rc.append(f)
# Calculate mean reward rate and standard deviation
m_rr = stat.mean(rr)
std_rr = stat.pstdev(rr)
# Calculate mean reward count and standard deviation
m_rc = stat.mean(rc)
std_rc = stat.pstdev(rc)
# Filter generated conjunctions on those with rates or counts of one
# standard deviation above mean
best = [c
for (c, s, f)
in self.conjunctions
if s != 0 and float(f) / float(s) > m_rr + std_rr
or f > m_rc + std_rc]
return best
def get_predictors(self):
return deepcopy(self.predictors)
def create_predictor(self):
# Add new predictors
for predictor in self.__create_predictor():
self.__add_predictor(predictor)
def __add_predictor(self, predictor):
for p in self.predictors:
if p == predictor:
return
self.predictors.append(predictor)
def remove_predictor(self, predictor):
self.predictors[:] = [p for p in self.predictors if not p == predictor]
def __create_predictor(self):
"""
Returns
-------
predictors : [Conjunction]
"""
# "New predictors are created from the best-scoring conjunctions currently
# maintained for that reinforcer.
# "When creating new predictors, candidate conjunctions are sorted by merit
# rather than raw reward rate to give greater weight to conjunctions that
# have been sampled more heavily."
# "If there are still several candidates, two are chosen at random to become
# new predictors." (Enforces exploration.)
conjunctions = sorted([(c, n, r) for (c, n, r) in self.conjunctions],
key=lambda x: Reinforcer.merit(x[2], x[1]),
reverse=True)
if len(conjunctions) == 0:
return []
if len(conjunctions) <= 2:
return [c for (c, n, r) in conjunctions]
# "If several conjunctions are tied for top score, the ones with the fewest
# number of terms are selected."
top = []
top_conjunction, top_n, top_r = conjunctions[0]
top_score = Reinforcer.merit(top_r, top_n)
for c, n, r in conjunctions:
if abs(Reinforcer.merit(r, n) - top_score) <= 1e-6:
top.append(c)
if len(top) > 2:
return sorted(top, key=lambda x: len(x.predicates))[:1]
else:
return top
@staticmethod
def merit(r, n):
"""
.. math:: M(r, n) = \frac{r}{n} \mdot \max(0.2, 1 - \frac{1.175}{n})
.. math:: M(r, 0) = 1
"""
if n == 0:
return 0
else:
return (r / float(n)) * max(0.2, 1 - (1.175 / float(n)))
@staticmethod
def demerit(r, n):
"""
.. math:: D(r, n) = \min(1, \frac{r}{n} + \frac{n - r}{0.7n^2})
.. math:: D(r, 0) = 0
"""
if n == 0:
return 1
else:
return min(1, (r / float(n)) + (n - r) / float(0.7 * n ** 2))
class OperantConditioningVisual(Visual):
@staticmethod
def visualize(self):
group = Group("operant")
group.add_element(List("actions", [str(a) for a in self.selected_actions]))
for reinforcer in self.reinforcers:
r = Group("reinforcer")
r.add_element(Number("reinforcer", str(reinforcer.predicate)))
r.add_element(List("predictors", [str(c) for c in reinforcer.predictors]))
r.add_element(List("conjunctions", [(str(c), s, f) for (c, s, f) in reinforcer.conjunctions]))
group.add_element(r)
return group
class OperantConditioningController(Controller):
"""Uses operant conditioning based learning.
Attributes
----------
actions : {name: [value]}
reinforcers : [ {predicate: [conjunction: (sat, fol)]} ]
"Conditioned reinforcers are stimuli that become associated with food or
water (or some other innate reward (even exercise), and serve as a signal
that the reward is coming, thereby eliminating the gap between the desired
action and the reinforcement signal."
"In order to extract this information from its experience of the
world, the program maintains two tables for each reinforcer.
One counts the number of times each conjunction has been satisfied
since that reinforcer was acquired; the other table counts the
number of times a conjunction's occurrence has been followed on
the next time step by the reinforcer.
predictors : [(reinforcer, conjunction, probability)]
All current predictors.
observations : [(name, value)]
selected_actions : [(name, value)]
For debugging purposes.
References
----------
.. [1] "Operant Conditioning in Skinnerbots,"
David S. Touretzky & Lisa M. Saksida.
"""
def __init__(self):
super(OperantConditioningController, self).__init__(visual=OperantConditioningVisual())
self.observations = []
self.memory = None
self.reinforcers = []
self.selected_actions = []
self._DEMERIT_THRESHOLD = 0.5
self._SUFFICIENT_TRIALS = 10
self._SIMILAR_MERIT_THRESHOLD = 0.2
def init_internal(self, entity):
super(OperantConditioningController, self).init_internal(entity)
self.memory = WorkingMemory(actions=self.actions.keys())
# Add all actions as predictors
for reinforcer in self.reinforcers:
for action in self.actions:
for value in self.actions[action]:
reinforcer.add_conjunction(Conjunction((Predicate(action, value), WorkingMemory.NOW)))
def sense(self, observation):
"""
Parameters
----------
observation : (name, value)
"""
# Store observations for later conversion
self.observations.append(observation)
def act(self):
self.memory.age()
self.__store_observations()
self.__acquire_reinforcers()
self.__generate_conjunctions()
self.__update_reinforcer_counts()
# "New predictors for a given reinforcer are created only when that
# reinforcer has just been received and the reward counts updated.
# At that point, the program can check candidate predictors against its
# working memory, so that it only constructs predictors that would have
# predicted the reward it just got."
self.__create_predictors()
self.__delete_predictors()
actions = self.__select_actions()
if len(actions) == 0:
actions = [self.__select_random_action()]
self.selected_actions = copy(actions)
# Add actions as predicates
for action in actions:
self.log.do_log("observation", {"controller": "operant", "name": action[0], "value": action[1]})
self.memory.add_action(Predicate(action[0], action[1]))
return actions
def set_primary_reinforcer(self, name, value):
primary = Reinforcer(Predicate(name, value))
self.reinforcers.append(primary)
def __store_observations(self):
"""Stores all observations as sensory predicates.
"""
for (name, value) in self.observations:
self.memory.add_sensory(Predicate(name, value))
# Clear for the next iteration
self.observations = []
def __update_reinforcer_counts(self):
# increment all reinforcer/conjunction occurrences for the new
# observations
for reinforcer in self.reinforcers:
reinforcer.increment_conjunctions(self.memory)
def __generate_conjunctions(self):
"""
Conjunctions are constructed incrementally by combining a pool of
currently "best" conjunctions (starting with the null conjunction)
with a pool of "best" predicates.
A "best" conjunction is one whose reward rate is at least one standard
deviation above the mean rate, or whose reward count is at least one
standard deviation above the mean count. Both tests are necessary.
"""
# Calculate for every reinforcer separately
for reinforcer in self.reinforcers:
# Combine previously 'best' conjunctions with predicates and check
# which conjunctions are 'best' next
best_conjunctions = reinforcer.find_best_conjunctions()
# Find best predicates, i.e. best conjunctions of 1 predicate
best_predicates = [c for c in best_conjunctions
if len(c.predicates) == 1]
if len(best_predicates) == 0:
best_predicates = self.__derive_temporal_predicates()
for c in best_conjunctions:
for p in best_predicates:
reinforcer.add_conjunction(Conjunction.combine(c, p))
def __derive_temporal_predicates(self):
"""
"The algorithm for inferring reinforcement contingencies operates on a
collection of slightly more abstract items called temporal predicates.
These are derived from working memory elements by replacing numeric
time tags with symbolic labels."
Returns
-------
[(Predicate, tag)]
Temporal predicates.
"""
# Take the previous time step as a point of reference and create the
# temporal predicates.
predicates = []
for p in self.memory.get_of_age(1):
predicates.append(Conjunction((p, WorkingMemory.NOW)))
prev = self.memory.get_of_age(2)
for p in prev:
predicates.append(Conjunction((p, WorkingMemory.PREV)))
return predicates
def __create_temporal_action_predicates(self):
predicates = []
for action in self.actions:
for value in self.actions[action]:
predicates.append((Predicate(action, value), WorkingMemory.NOW))
return predicates
def __create_predictors(self):
"""
Predictor: conjunction -> reinforcer w/ probability
"During learning, conjunctions that are sufficiently well correlated with
rewards generate "predictors," i.e., rules for predicting reward.
These may displace earlier predictors that have not performed as well.
To allow for the effects of noise, predictors are not replaced until they
have a reasonably high application count (so their success rate can be
accurately estimated) and their replacement has a significantly higher success
rate."
Predictor Creation and Deletion
"Two numerical measures are used to assign scores to conjunctions and
predictors: merit and demerit.
They estimate the lower and upper bounds, respectively, on the true reward
rate based on the number of examples seen so far."
"Let :math:`n` be the number of times a conjunction has been observed to be
true, and :math:`r` the number of times the reinforcer was received on the
subsequent time step.
Merit and demerit are defined as:"
.. math:: M(r, n) = \frac{r}{n} \mdot \max(0.2, 1 - \frac{1.175}{n})
.. math:: M(r, 0) = 1
.. math:: D(r, n) = \min(1, \frac{r}{n} + \frac{n - r}{0.7n^2})
.. math:: D(r, 0) = 0
"As :math:`n` approaches :math:`\inf`, merit and demerit both converge to
:math:`\frac{r}{n}`, the true reward rate."
When deleting predictors, demerit is used, so the program is conservative
in its judgements and does not delete too quickly."
"""
# "Furthermore, in order for new predictors to be created the reward must
# either have been unexpected, meaning the current set of predictors is
# incomplete, or there must have been at least one false prediction since
# the last reward was encountered, meaning there is an erroneous predictor,
# one that is not specific enough to accurately express the reward
# contingencies."
# Check if the current set of predictors is incomplete.
# I.e. unexpected reward, or reward but no prediction.
reinforced = []
renew = [] # all reinforcers that should have new predictors created
# Check if there were any incomplete predictors
# i.e. any unexpected rewards
for predicate in self.memory.get_of_age(0):
if self.__has_acquired_reinforcer(predicate):
reinforcer = self.__get_reinforcer(predicate)
reinforced.append(reinforcer)
if len(self.__was_predicted(reinforcer)) == 0:
# Create new predictors for the reinforcer
renew.append(reinforcer)
# For all reinforcers that are not already being renewed
# Check if false prediction of reward. (Erroneous predictor.).
for reinforcer in self.reinforcers:
if reinforcer not in reinforced:
# See if predictor actually predicted it
if len(self.__was_predicted(reinforcer)) > 0:
# Create new predictors for the reinforcer
renew.append(reinforcer)
for reinforcer in renew:
reinforcer.create_predictor()
def __was_predicted(self, reinforcer):
"""
Parameters
----------
reinforcer : Reinforcer
Returns
-------
[Conjunction]
All predictors that predicted the reinforcer to occur at the
current time.
"""
# Check for all predictors for reward if it was predicted.
predictors = []
for predictor in reinforcer.predictors:
# Check if the predictor could have matched one time step before
if self.memory.matches_now(predictor.get_temporal(), 1):
predictors.append(predictor)
return predictors
def __delete_predictors(self):
"""
"Predictors are deleted in three circumstances.
First, if the predictor has just given a false alarm, it may be deleted if
its demerit is below a certain minimum value. (Remove bad predictors.)
Second, if a reinforcer has just been correctly predicted, the predictor
may be deleted if its demerit is less than the highest merit of any other
successful predictor for that reinforcer. (Substitution for better one.)
Finally, a predictor will be deleted if there is another predictor whose
antecedent uses a strict subset of the terms in this predictor's
conjunction, whose merit is nearly as good, and whose number of trials is
sufficiently high that there is reasonable confidence that the two
predictors are equivalent.
"""
# Get all reinforcers that were just reinforced
reinforced = []
for predicate in self.memory.get_of_age(0):
if self.__has_acquired_reinforcer(predicate):
reinforced.append(self.__get_reinforcer(predicate))
# false alarm (reward predicted, but no reward)
# delete if demerit below threshold
for reinforcer in self.reinforcers:
# Only check reinforcers that were not predicted
if reinforcer not in reinforced:
predicted_by = self.__was_predicted(reinforcer)
if len(predicted_by) > 0:
# Delete if demerit below threshold
for predictor in predicted_by:
n, r = reinforcer.count(predictor)
if Reinforcer.demerit(r, n) < self._DEMERIT_THRESHOLD:
reinforcer.remove_predictor(predictor)
# if predictor successful
# delete if demerit is lower than highest merit of other successful predictor
for reinforcer in self.reinforcers:
if reinforcer in reinforced:
predicted_by = self.__was_predicted(reinforcer)
if len(predicted_by) > 0:
for conjunction in predicted_by:
s, f = reinforcer.count(conjunction)
demerit = Reinforcer.demerit(f, s)
highest_predictor, highest_merit = None, 0
# Find merits of successful predictors
for predictor in reinforcer.predictors:
if predictor == conjunction:
continue
n, r = reinforcer.count(predictor)
merit = Reinforcer.merit(r, n)
if merit > highest_merit:
highest_predictor = predictor
highest_merit = merit
if highest_predictor is not None:
if demerit < merit:
reinforcer.remove_predictor(conjunction)
# if there is a predictor with:
# - antecedent strict subset of this predictor
# - merit nearly as good
# - number of trials sufficiently high to have reasonable confidence of
# equivalence
# delete
for reinforcer in self.reinforcers:
for predictor in reinforcer.predictors:
n, r = reinforcer.count(predictor)
merit_predictor = Reinforcer.merit(r, n)
# Number of trials sufficiently high to have confidence of equivalence?
if n < self._SUFFICIENT_TRIALS:
continue
# Try to find another predictor that satisfies the requirements
for other in reinforcer.predictors:
if other == predictor:
continue
n, r = reinforcer.count(other)
merit_other = Reinforcer.merit(r, n)
# Number of trials sufficiently high to have confidence of equivalence?
if n < self._SUFFICIENT_TRIALS:
continue
# Antecedent strict subset of this predictor?
# Merit nearly as good?
if other.is_strict_subset_of(predictor) and \
abs(merit_predictor - merit_other) < self._SIMILAR_MERIT_THRESHOLD:
reinforcer.remove_predictor(predictor)
def __acquire_reinforcers(self):
"""
Acquiring Conditioned Reinforcers
"If the skinnerbot could find a way to make HEAR(pump) be true, then
predictor #3 suggests it could get water whenever it wanted.
So HEAR(pump) becomes a secondary reinforcer, and the skinnerbot begins
trying out theories of what causes the pump to run."
I.e. if a predictor has a sensory predicate, that sensory predicate
becomes a reinforcer itself.
"""
for reinforcer in self.reinforcers:
for predictor in reinforcer.get_predictors():
# if there is a sensory predicate that is not already a
# reinforcer, add the reinforcer if it does not already exist.
for (sensory, tag) in predictor.get_predicates_with_name_from(self.sensory.keys()):
if not self.__has_acquired_reinforcer(sensory):
self.reinforcers.append(Reinforcer(sensory))
self.log.do_log("reinforcer", {"controller": "operant", "predicate": sensory.name, "value": sensory.value})
def __select_actions(self):
"""Selects actions by matching predictors with the working memory.
"To generate behavior, we look for predictors that can be satisfied by the
rat's taking some action currently available to it."
"At each time step, the skinnerbot seeks a predictor it can satisfy.
Predictors are prioritized by the nature of the reinforcement they
promise, so that given a choice, the skinnerbot will always act to secure
a more basic reward (water) over a more abstract one (the ability to
press the bar.)
If it finds a predictor where all but one of the predicates is currently
true (i.e., matches an item in working memory), and the last one can be
made true by taking some action that is presently available, then it will
select that action with high probability.
There is also some randomness in the action selection mechanism, to
facilitate exploration."
Returns
-------
[(name, value)]
List of action names and valuesRANDOM
"""
matches = []
# Find predictors that can be satisfied
for reinforcer in self.reinforcers:
for predictor in reinforcer.predictors:
temporal = predictor.get_temporal()
if self.memory.matches_now(temporal):
matches.append(temporal)
# If all sensory predicates in a predictor are true (match items in
# working memory), and there is an action predicate that is available,
# the action will be selected with high probability.
#
# Find the action predicates in the match that can be satisfied now
actions = []
for match in matches:
for (p, tag) in match:
if p.name in self.actions.keys() and tag == WorkingMemory.NOW:
actions.append(p)
# Select one of the matched actions randomly
if len(actions) == 0:
return []
else:
action = random.choice(actions)
return [(action.name, action.value)]
def __select_random_action(self):
print "RANDOM"
action = random.choice(self.actions.keys())
value = random.choice(self.actions[action])
return action, value
def __has_acquired_reinforcer(self, predicate):
"""
Checks whether the predicate is considered to be a reinforcer.
Parameters
----------
predicate : Predicate
"""
for reinforcer in self.reinforcers:
if reinforcer.predicate == predicate:
return True
return False
def __get_reinforcer(self, predicate):
"""
Parameters
----------
predicate : Predicate
"""
for reinforcer in self.reinforcers:
if reinforcer.predicate == predicate:
return reinforcer
raise RuntimeError("Reinforcer not found.")
| {
"repo_name": "Luciden/easl",
"path": "easl/controller/operant_controller.py",
"copies": "1",
"size": "33427",
"license": "mit",
"hash": 894906099631784100,
"line_mean": 34.732967033,
"line_max": 131,
"alpha_frac": 0.5625392647,
"autogenerated": false,
"ratio": 4.319291898178059,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5381831162878059,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Dennis'
class Visual(object):
@staticmethod
def visualize(self):
"""
Parameters
----------
self : object
Any object that will be visualized.
Returns
-------
visualization : Visualization
"""
raise NotImplementedError("Base Class")
class Visualization(object):
def __init__(self, name, show_name=False):
self.name = name
self.show_name = show_name
class Group(Visualization):
def __init__(self, name):
super(Group, self).__init__(name)
self.elements = []
def add_element(self, element):
if element is not None:
self.elements.append(element)
def get_elements(self):
return self.elements
class Rows(Group):
def __init__(self, name):
super(Rows, self).__init__(name)
class Columns(Group):
def __init__(self, name):
super(Columns, self).__init__(name)
class Slider(Visualization):
"""
A slider with a fixed number of positions.
A horizontal
--|-----
or vertical
|
+
|
slider.
Attributes
----------
name : string
"""
def __init__(self, name, number, position):
super(Slider, self).__init__(name)
self.name = name
self.number = number
if 0 <= position < number:
self.position = position
else:
raise RuntimeError("position not in slide")
class Table(Visualization):
"""
A table.
A B
C 1 0
D 2 4
Attributes
----------
name : string
"""
def __init__(self, name):
super(Table, self).__init__(name)
class Tree(Visualization):
def __init__(self, name, tree):
"""
Attributes
----------
tree : {name: {name: ...{name: value}}}
"""
super(Tree, self).__init__(name)
self.tree = tree
class Number(Visualization):
def __init__(self, name, number):
super(Number, self).__init__(name)
self.number = number
class Grid(Visualization):
def __init__(self, name, w, h):
super(Grid, self).__init__(name)
self.grid = [[None for _ in range(w)] for _ in range(h)]
self.w = w
self.h = h
def add_element(self, element, y, x):
if 0 <= x < self.w and 0 <= y < self.h:
self.grid[x][y] = element
def element_at(self, x, y):
return self.grid[x][y]
class List(Visualization):
def __init__(self, name, elements):
super(List, self).__init__(name)
self.elements = elements
class Dict(Visualization):
def __init__(self, name, elements):
super(Dict, self).__init__(name)
self.elements = elements
class Circle(Visualization):
def __init__(self, name, v_min, v_max, v):
super(Circle, self).__init__(name)
self.v_min = v_min
self.v_max = v_max
self.v = v
class Graph(Visualization):
def __init__(self, name, graph, nodes, edges, groups=None):
super(Graph, self).__init__(name)
self.graph = graph
self.nodes = nodes
self.edges = edges
self.groups = groups
class Visualizer(object):
def __init__(self):
self.visualizations = None
def reset_visualization(self):
self.visualizations = Rows("main")
def update_visualization(self, v):
if v is None:
return
else:
self.visualizations.add_element(v)
def update(self, iteration):
"""Draws all the current visualizations to the screen.
"""
raise NotImplementedError("Blah")
| {
"repo_name": "Luciden/easl",
"path": "easl/visualize/visualizer.py",
"copies": "1",
"size": "3856",
"license": "mit",
"hash": 2551656781319968300,
"line_mean": 19.6629213483,
"line_max": 64,
"alpha_frac": 0.5114107884,
"autogenerated": false,
"ratio": 4.124064171122995,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 178
} |
from aiorchestra.core import context
from aiorchestra.tests import base
class TestDeployments(base.BaseAIOrchestraTestCase):
def setUp(self):
super(TestDeployments, self).setUp()
def tearDown(self):
super(TestDeployments, self).tearDown()
@base.with_template('simple_node_template.yaml')
def test_deploy_status_before_deploy(self, template_path):
c = context.OrchestraContext(
'simple_node_template',
path=template_path,
logger=base.LOG,
event_loop=self.event_loop)
self.assertEqual(c.status, context.OrchestraContext.PENDING)
@base.with_template('simple_node_template.yaml')
def test_deploy_status_after_deploy(self, template_path):
c = context.OrchestraContext(
'simple_node_template',
path=template_path,
logger=base.LOG,
event_loop=self.event_loop)
c.run_deploy()
self.assertEqual(c.status, context.OrchestraContext.COMPLETED)
@base.with_template('simple_node_template.yaml')
def test_undeploy_error(self, template_path):
c = context.OrchestraContext(
'simple_node_template',
path=template_path,
logger=base.LOG,
event_loop=self.event_loop)
self.assertRaises(Exception, c.run_undeploy)
@base.with_template('simple_node_template.yaml')
def test_deploy_error(self, template_path):
c = context.OrchestraContext(
'simple_node_template',
path=template_path,
logger=base.LOG,
event_loop=self.event_loop)
c.status = context.OrchestraContext.FAILED
self.assertRaises(Exception, c.run_deploy)
@base.with_template('simple_node_template.yaml')
def test_undeploy_twice(self, template_path):
c = context.OrchestraContext(
'simple_node_template',
path=template_path,
logger=base.LOG,
event_loop=self.event_loop)
c.run_deploy()
c.run_undeploy()
self.assertRaises(Exception, c.run_undeploy)
@base.with_deployed('invalid_node_template.yaml', do_deploy=False)
def test_unable_to_import_lifecycle_implementation(self, c):
ex = self.assertRaises(Exception, c.run_deploy)
self.assertIn("No module named 'module'", str(ex))
@base.with_deployed('invalid_node_template-2.yaml', do_deploy=False)
def test_invalid_node_event_implementation_reference(self, c):
ex = self.assertRaises(Exception, c.run_deploy)
self.assertIn('Invalid event implementation reference', str(ex))
| {
"repo_name": "aiorchestra/aiorchestra",
"path": "aiorchestra/tests/test_deployments.py",
"copies": "1",
"size": "3229",
"license": "apache-2.0",
"hash": -534115506029501250,
"line_mean": 36.9882352941,
"line_max": 78,
"alpha_frac": 0.6611954165,
"autogenerated": false,
"ratio": 3.776608187134503,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4937803603634503,
"avg_score": null,
"num_lines": null
} |
from aiorchestra.core import utils
COMPUTE_ACTIVE = 'ACTIVE'
COMPUTE_BUILD = 'BUILD'
COMPUTE_SHUTOFF = 'SHUTOFF'
SERVER_TASK_STATE_POWERING_ON = 'powering-on'
async def create(context, novaclient, glanceclient, name_or_id, flavor,
image, ssh_keyname=None, nics=None, use_existing=False,
files=None, config_drive=False, userdata=None):
"""
Creates compute instance
:param context: OrchestraContext
:param novaclient: Authorized Nova client
:param glanceclient: Authorized Glance client
:param name_or_id: Instance name or ID
:param flavor: Instance flavor
:param image: Instance image
:param ssh_keyname: name of SSH keypair to be injected
:param nics: Neutron port definitions for an instance
:param use_existing: weather use existing instance or create new
:param files: dict of file injections
:param config_drive: use config driver or not
:return: instance
"""
if not use_existing:
glanceclient.images.get(image)
context.logger.debug('Image "{0}" exists.'
.format(image))
novaclient.flavors.get(flavor)
context.logger.debug('Flavor "{0}" exists.'
.format(flavor))
instance = novaclient.servers.create(
name_or_id, image, flavor,
key_name=ssh_keyname,
nics=nics, files=files,
config_drive=config_drive,
userdata=userdata,
)
context.logger.info('Compute instance "{0}" created.'
.format(name_or_id))
else:
instance = novaclient.servers.get(name_or_id)
return instance
async def start(context, novaclient, name_or_id,
use_existing=False,
task_retry_interval=None,
task_retries=None):
"""
:param context:
:param novaclient:
:param name_or_id:
:param use_existing:
:param task_retry_interval:
:param task_retries:
:return:
"""
if use_existing:
context.logger.info('Using existing instance in its original state.')
return
async def wait_until_active():
instance = novaclient.servers.get(name_or_id)
server_task_state = getattr(instance, 'OS-EXT-STS:task_state')
if instance.status == COMPUTE_ACTIVE:
return True
if instance.status == COMPUTE_BUILD:
return False
if (instance.status == COMPUTE_BUILD and
server_task_state != SERVER_TASK_STATE_POWERING_ON):
instance.start()
return False
if (instance.status == COMPUTE_BUILD or
server_task_state == SERVER_TASK_STATE_POWERING_ON):
return False
await utils.retry(wait_until_active, exceptions=(Exception,),
task_retries=task_retries,
task_retry_interval=task_retry_interval)
context.logger.info('Compute instance started.'.format(name_or_id))
async def delete(context, novaclient, name_or_id,
use_existing=False,
task_retry_interval=None,
task_retries=None):
"""
Deletes compute instance
:param context:
:param novaclient:
:param name_or_id:
:param use_existing:
:param task_retry_interval:
:param task_retries:
:return:
"""
if use_existing:
context.logger.info('Compute instance "{0}" remains as is, '
'because it is external resource.'
.format(name_or_id))
return
instance = novaclient.servers.get(name_or_id)
try:
instance.delete()
except Exception as ex:
context.logger.debug(str(ex))
# we don't really care if instance was stopped or not,
# next operation will delete it
pass
async def is_gone():
try:
novaclient.servers.get(name_or_id)
return False
except Exception as ex:
context.logger.debug(str(ex))
return True
await utils.retry(is_gone, exceptions=(Exception,),
task_retries=task_retries,
task_retry_interval=task_retry_interval)
context.logger.info('Compute instance "{0}" deleted.'
.format(name_or_id))
async def stop(context, novaclient, name_or_id,
use_existing=False,
task_retry_interval=None,
task_retries=None):
"""
Stops compute instance
:param context:
:param novaclient:
:param name_or_id:
:param use_existing:
:param task_retry_interval:
:param task_retries:
:return:
"""
if use_existing:
context.logger.info('Leaving compute instance "{0}" as is because it '
'is external resource.'.format(name_or_id))
return
context.logger.info('Attempting to stop compute '
'instance "{0}".'.format(name_or_id))
try:
instance = novaclient.servers.get(name_or_id)
instance.stop()
except Exception as ex:
context.logger.debug(str(ex))
# we don't really care if instance was stopped or not,
# next operation will delete it
pass
async def wait_until_task_finished():
instance = novaclient.servers.get(name_or_id)
server_task_state = getattr(instance, 'OS-EXT-STS:task_state')
return server_task_state is None
await utils.retry(wait_until_task_finished, exceptions=(Exception, ),
task_retry_interval=task_retry_interval,
task_retries=task_retries)
context.logger.info('Compute instance "{0}" stopped.'
.format(name_or_id))
| {
"repo_name": "aiorchestra/aiorchestra-openstack-plugin",
"path": "openstack_plugin/compute/instances.py",
"copies": "1",
"size": "6397",
"license": "apache-2.0",
"hash": 7243501706962119000,
"line_mean": 32.6684210526,
"line_max": 78,
"alpha_frac": 0.6038768173,
"autogenerated": false,
"ratio": 4.082322910019145,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 190
} |
from aiorchestra.core import utils
from openstack_plugin.common import clients
from openstack_plugin.networking import floating_ip
@utils.operation
async def floatingip_create(node, inputs):
node.context.logger.info(
'[{0}] - Attempting to create floating IP.'
.format(node.name))
existing_floating_ip_id = node.properties.get(
'floating_ip_id')
use_existing = True if existing_floating_ip_id else False
neutron = clients.openstack.neutron(node)
port_id = node.runtime_properties.get('port_id')
floating_network_id = node.runtime_properties.get(
'floating_network_id')
fip = await floating_ip.create(
node.context, neutron, floating_network_id,
port_id, use_existing=use_existing,
existing_floating_ip_id=existing_floating_ip_id)
node.batch_update_runtime_properties(**{
'floating_ip_id': fip['id'],
})
node.batch_update_runtime_properties(**fip)
node.context.logger.info(
'[{0}] - Floating IP created.' .format(node.name))
@utils.operation
async def floatingip_delete(node, inputs):
node.context.logger.info(
'[{0}] - Attempting to delete floating IP.'.format(node.name))
fip = node.get_attribute('floating_ip_id')
use_existing = True if node.properties.get(
'floating_ip_id') else False
neutron = clients.openstack.neutron(node)
await floating_ip.delete(node.context, neutron,
fip, use_existing=use_existing)
@utils.operation
async def link_floatingip_to_network(source, target, inputs):
network_id = target.get_attribute('network_id')
source.context.logger.info(
'[{0} -----> {1}] - Connecting floating IP to '
'network "{2}".'.format(target.name,
source.name,
network_id))
source.update_runtime_properties(
'floating_network_id', network_id)
@utils.operation
async def unlink_floatingip_from_network(source, target, inputs):
if 'floating_network_id' in source.runtime_properties:
source.context.logger.info(
'[{0} --X--> {1}] - Disconnecting floating IP from '
'network "{2}".'.format(
target.name,
source.name,
source.runtime_properties['floating_network_id']))
del source.runtime_properties['floating_network_id']
@utils.operation
async def link_floatingip_to_port(source, target, inputs):
source.update_runtime_properties(
'port_id', target.runtime_properties.get('port_id'))
source.context.logger.info(
'[{0} -----> {1}] - Connecting floating IP to '
'port "{2}".'.format(target.name,
source.name,
target.runtime_properties.get('port_id')))
@utils.operation
async def unlink_floatingip_from_port(source, target, inputs):
if 'port_id' in source.runtime_properties:
source.context.logger.info(
'[{0} --X--> {1}] - Disconnecting floating IP from '
'port "{2}".'.format(target.name,
source.name,
target.runtime_properties.get('port_id')))
del source.runtime_properties['port_id']
@utils.operation
async def inject_floating_ip_attributes(source, target, inputs):
source.context.logger.info('[{0} -----> {1}] - Supplying '
'floating IP details.'
.format(target.name, source.name))
fip = target.runtime_properties['floating_ip_address']
source.batch_update_runtime_properties(**{
'access_ip': fip,
})
@utils.operation
async def eject_floating_ip_attributes(source, target, inputs):
source.context.logger.info('[{0} --X--> {1}] - Revoking '
'floating IP details.'
.format(target.name, source.name))
for attr in ['access_ip']:
if attr in source.runtime_properties:
del source.runtime_properties[attr]
| {
"repo_name": "aiorchestra/aiorchestra-openstack-plugin",
"path": "openstack_plugin/tasks/floating_ip.py",
"copies": "1",
"size": "4686",
"license": "apache-2.0",
"hash": -1976084973717016600,
"line_mean": 35.8976377953,
"line_max": 78,
"alpha_frac": 0.6188647034,
"autogenerated": false,
"ratio": 3.9510961214165263,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5069960824816526,
"avg_score": null,
"num_lines": null
} |
from aiorchestra.core import utils
from openstack_plugin.common import clients
from openstack_plugin.networking import network
@utils.operation
async def network_create(node, inputs):
node.context.logger.info(
'[{0}] - Attempting to create network.'.format(node.name))
neutron = clients.openstack.neutron(node)
network_name = node.properties.get('network_name')
network_id = node.properties.get('network_id')
is_external = node.properties['is_external']
use_existing = True if network_id else False
identifier = network_name if not network_id else network_id
net = await network.create(
node.context,
identifier,
neutron,
is_external=is_external,
admin_state_up=True,
use_existing=use_existing,
)
node.batch_update_runtime_properties(**{
'network_id': net['id'],
'network_name': net['name'],
'subnets': net['subnets']
})
node.context.logger.info(
'[{0}] - Network "{1}" created.'.format(
node.name, identifier))
@utils.operation
async def network_start(node, inputs):
node.context.logger.info(
'[{0}] - Attempting to start network.'.format(node.name))
neutron = clients.openstack.neutron(node)
net_id = node.get_attribute('network_id')
net = neutron.show_network(net_id)['network']
node.batch_update_runtime_properties(**{
'network_id': net['id'],
'network_name': net['name'],
'subnets': net['subnets']
})
@utils.operation
async def network_delete(node, inputs):
node.context.logger.info(
'[{0}] - Attempting to delete network.'.format(node.name))
task_retries = inputs.get('task_retries', 10)
task_retry_interval = inputs.get('task_retry_interval', 10)
neutron = clients.openstack.neutron(node)
net_id = node.get_attribute('network_id')
use_existing = True if node.properties.get(
'network_id') else False
await network.delete(
node.context,
net_id,
neutron,
use_existing=use_existing,
task_retries=task_retries,
task_retry_interval=task_retry_interval
)
node.context.logger.info(
'[{0}] - Network "{1}" deleted.'.format(
node.name, net_id))
@utils.operation
async def link(source, target, inputs):
source.context.logger.info(
'[{0} -----> {1}] - Network "{2}" attached.'
.format(target.name,
source.name,
target.get_attribute('network_id')))
if 'link_id' in target.runtime_properties:
link_id = target.runtime_properties['link_id']
subnet_id = target.get_attribute('network_id')
ip_version = target.runtime_properties['ip_version']
source.context.logger.info(
'[{0} -----> {1}] - It appears that target network '
'is a subnet for network "{2}".'
.format(target.name, source.name, link_id))
source.batch_update_runtime_properties(**{
'link_id': link_id,
'subnet_id': subnet_id,
'ip_version': ip_version,
})
else:
source.update_runtime_properties(
'link_id', target.get_attribute('network_id'))
@utils.operation
async def unlink(source, target, inputs):
source.context.logger.info(
'[{0} --X--> {1}] - Network "{2}" detached.'
.format(target.name,
source.name,
target.get_attribute('network_id')))
for attr in ['link_id', 'subnet_id']:
if attr in source.runtime_properties:
del source.runtime_properties[attr]
| {
"repo_name": "aiorchestra/aiorchestra-openstack-plugin",
"path": "openstack_plugin/tasks/net.py",
"copies": "1",
"size": "4238",
"license": "apache-2.0",
"hash": -1598847362826648800,
"line_mean": 33.4552845528,
"line_max": 78,
"alpha_frac": 0.6222274658,
"autogenerated": false,
"ratio": 3.8145814581458146,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49368089239458146,
"avg_score": null,
"num_lines": null
} |
from aiorchestra.core import utils
from openstack_plugin.common import clients
from openstack_plugin.networking import port
@utils.operation
async def port_create(node, inputs):
if 'link_id' not in node.runtime_properties:
raise Exception('Unable to create port for node "{0}". '
'It is necessary to use relationship '
'to link port to a network'
.format(node.name))
node.context.logger.info(
'[{0}] - Attempting to create port.'.format(node.name))
neutron = clients.openstack.neutron(node)
port_name = node.properties.get('port_name')
port_id = node.properties.get('port_id')
link_id = node.runtime_properties.get('link_id')
ip_address = node.properties.get('ip_address')
order = node.properties.get('order')
is_default = node.properties.get('is_default')
ip_range_start = node.properties.get('ip_range_start')
ip_range_end = node.properties.get('ip_range_end')
admit_state_up = True
subnet_id = node.runtime_properties.get('subnet_id')
security_groups = node.runtime_properties.get('security_groups')
use_existing = True if port_id else False
identifier = port_name if not port_id else port_id
_port = await port.create(node.context,
identifier, neutron,
link_id,
subnet_id=subnet_id,
ip_addresses=ip_address,
admin_state_up=admit_state_up,
security_groups=security_groups,
use_existing=use_existing)
node.batch_update_runtime_properties(**{
'link_id': link_id,
'port_name': _port['name'],
'port_id': _port['id'],
'ip_addresses': ip_address,
'order': order,
'is_default': is_default,
'ip_range_start': ip_range_start,
'ip_range_end': ip_range_end,
'mac_address': _port['mac_address'],
'allowed_address_pairs': _port['allowed_address_pairs'],
})
if _port['fixed_ips']:
fixed_ips = _port['fixed_ips'].pop()
node.batch_update_runtime_properties(**fixed_ips)
node.context.logger.info(
'[{0}] - Port created.'.format(node.name))
@utils.operation
async def port_start(node, inputs):
node.context.logger.info(
'[{0}] - Attempting to start port.'
.format(node.name))
compute_id = node.runtime_properties.get('compute_id')
if compute_id:
port_id = node.runtime_properties['port_id']
node.context.logger.info(
'[{0}] - Attempting to attach port "{1}" to '
'compute node "{2}".'
.format(node.name, port_id, compute_id))
nova = clients.openstack.nova(node)
nova.servers.interface_attach(compute_id, port_id,
None, None)
node.context.logger.info(
'[{0}] - Port "{1}" attached.'
.format(node.name, port_id))
node.context.logger.info(
'[{0}] - Port started.'.format(node.name))
@utils.operation
async def port_stop(node, inputs):
node.context.logger.info(
'[{0}] - Attempting to stop port.'
.format(node.name))
compute_id = node.runtime_properties.get('compute_id')
if compute_id:
port_id = node.runtime_properties['port_id']
node.context.logger.info(
'[{0}] - Attempting to detach port "{1}" from '
'compute node "{2}".'
.format(node.name, port_id, compute_id))
nova = clients.openstack.nova(node)
nova.servers.interface_detach(compute_id, port_id)
node.context.logger.info(
'[{0}] - Port "{1}" detached.'
.format(node.name, port_id))
node.context.logger.info(
'[{0}] - Port stopped.'.format(node.name))
@utils.operation
async def port_delete(node, inputs):
task_retries = inputs.get('task_retries', 10)
task_retry_interval = inputs.get('task_retry_interval', 10)
neutron = clients.openstack.neutron(node)
use_existing = True if node.properties.get(
'port_id') else False
port_id = node.runtime_properties['port_id']
await port.delete(node.context, port_id, neutron,
use_existing=use_existing,
task_retry_interval=task_retry_interval,
task_retries=task_retries)
@utils.operation
async def bind_compute_to_port(source, target, inputs):
source.context.logger.info('[{0} -----> {1}] - Connecting '
'compute to port.'
.format(target.name, source.name))
compute_id = target.runtime_properties['server']['id']
source.update_runtime_properties('compute_id', compute_id)
@utils.operation
async def unbind_compute_from_port(source, target, inputs):
source.context.logger.info('[{0} --X--> {1}] - Disconnecting '
'compute to port.'.format(target.name,
source.name))
if 'compute_id' in source.runtime_properties:
del source.runtime_properties['compute_id']
@utils.operation
async def add_port(source, target, inputs):
source.context.logger.info(
'[{0} -----> {1}] - Binding port to compute'
.format(target.name, source.name))
nics = source.runtime_properties.get('nics', [])
_port = {}
port_id = target.runtime_properties.get('port_id')
link_id = target.runtime_properties.get('link_id')
ip_version = target.runtime_properties.get('ip_version')
ip_address = target.runtime_properties.get('ip_address')
_port.update({
'net-id': link_id,
'port-id': port_id,
'v{0}-fixed-ip'.format(ip_version):
ip_address
})
nics.append(_port)
source.update_runtime_properties('nics', nics)
@utils.operation
async def remove_port(source, target, inputs):
port_id = target.runtime_properties.get('port_id')
source.context.logger.info(
'[{0} --X--> {1}] - Unbinding port "{2}" from compute'
.format(target.name, source.name, port_id))
if 'nics' in source.runtime_properties:
del source.runtime_properties['nics']
| {
"repo_name": "aiorchestra/aiorchestra-openstack-plugin",
"path": "openstack_plugin/tasks/port.py",
"copies": "1",
"size": "6908",
"license": "apache-2.0",
"hash": -2659259300388528600,
"line_mean": 36.956043956,
"line_max": 78,
"alpha_frac": 0.5972785177,
"autogenerated": false,
"ratio": 3.7543478260869567,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48516263437869567,
"avg_score": null,
"num_lines": null
} |
from aiorchestra.core import utils
from openstack_plugin.common import clients
from openstack_plugin.networking import router
@utils.operation
async def router_create(node, inputs):
node.context.logger.info('[{0}] - Attempting to create router.'
.format(node.name))
neutron = clients.openstack.neutron(node)
router_name = node.properties.get('router_name')
router_id = node.properties.get('router_id')
use_existing = True if router_id else False
identifier = router_name if not router_id else router_id
external_gateway_info = node.runtime_properties.get(
'external_gateway_info', {})
_router = await router.create(
node.context, identifier, neutron,
external_gateway_info=external_gateway_info,
use_existing=use_existing)
node.update_runtime_properties('router_id', _router['id'])
node.context.logger.info(
'[{0}] - Router "{1}" created.'
.format(node.name, identifier))
@utils.operation
async def router_start(node, inputs):
router_id = node.get_attribute('router_id')
neutron = clients.openstack.neutron(node)
_router = neutron.show_router(router_id)['router']
node.batch_update_runtime_properties(**_router)
node.context.logger.info('[{0}] - Router started.'
.format(node.name))
@utils.operation
async def router_delete(node, inputs):
task_retries = inputs.get('task_retries', 10)
task_retry_interval = inputs.get('task_retry_interval', 10)
neutron = clients.openstack.neutron(node)
router_id = node.get_attribute('router_id')
use_existing = True if node.properties.get('router_id') else False
node.context.logger.info(
'[{0}] - Attempting to delete router "{0}".'
.format(node.name, router_id))
await router.delete(node.context, router_id, neutron,
use_existing=use_existing,
task_retry_interval=task_retry_interval,
task_retries=task_retries)
node.context.logger.info('[{0}] - Router deleted.'
.format(node.name))
@utils.operation
async def link_router_to_external_network(source, target, inputs):
source.context.logger.info('[{0} -----> {1}] - Connecting router to '
'external network.'
.format(target.name, source.name))
source.update_runtime_properties('external_gateway_info', {
'external_gateway_info': {
'network_id': target.get_attribute('network_id')
}
})
@utils.operation
async def unlink_router_from_external_network(source, target, inputs):
source.context.logger.info('[{0} --X--> {1}] - Disconnecting router from '
'external network.'
.format(target.name, source.name))
if 'external_gateway_info' in source.runtime_properties:
del source.runtime_properties['external_gateway_info']
| {
"repo_name": "aiorchestra/aiorchestra-openstack-plugin",
"path": "openstack_plugin/tasks/router.py",
"copies": "1",
"size": "3614",
"license": "apache-2.0",
"hash": -7964261575703325000,
"line_mean": 37.4468085106,
"line_max": 78,
"alpha_frac": 0.6427780852,
"autogenerated": false,
"ratio": 3.9583789704271632,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5101157055627163,
"avg_score": null,
"num_lines": null
} |
from aiorchestra.core import utils
from openstack_plugin.common import clients
from openstack_plugin.networking import security_group_and_rules
@utils.operation
async def security_group_create(node, inputs):
neutron = clients.openstack.neutron(node)
sg_name = node.properties.get('security_group_name')
sg_id = node.properties.get('security_group_id')
use_existing = True if sg_id else False
description = node.properties.get(
'description', 'Security group for node [{0}]'
.format(node.name))
rules = node.runtime_properties.get('security_group_rules', [])
node.context.logger.info(
'[{0}] - Attempting to create security group.'
.format(node.name))
identifier = sg_name if not sg_id else sg_id
sg = await security_group_and_rules.create(
node.context, identifier, neutron,
description=description, use_existing=use_existing)
node.update_runtime_properties('security_group_id', sg['id'])
_rs = [list(r.values()).pop() for r in rules]
await security_group_and_rules.create_rules(
node.context, sg['id'], _rs, neutron)
node.context.logger.info(
'[{0}] - Security group created.'.format(node.name))
@utils.operation
async def security_group_delete(node, inputs):
node.context.logger.info(
'[{0}] - Attempting to delete security group.'
.format(node.name))
neutron = clients.openstack.neutron(node)
sg_id = node.get_attribute('security_group_id')
use_existing = True if node.properties.get(
'security_group_id') else False
await security_group_and_rules.delete(
node.context, sg_id, neutron, use_existing=use_existing)
node.context.logger.info(
'[{0}] - Security group deleted.'
.format(node.name))
@utils.operation
async def connect_security_groups_rule(source, target, inputs):
source.context.logger.info('[{0} -----> {1}] - '
'Connecting security group '
'rule to security group.'
.format(target.name, source.name))
rule = target.properties
rules = source.runtime_properties.get('security_group_rules', [])
rules.append({target.name: rule})
source.update_runtime_properties('security_group_rules', rules)
@utils.operation
async def disconnect_security_groups_rule(source, target, inputs):
source.context.logger.info('[{0} --X--> {1}] - '
'Disconnecting security group '
'rule from security group.'
.format(target.name, source.name))
if 'security_group_rules' in source.runtime_properties:
del source.runtime_properties['security_group_rules']
@utils.operation
async def link_security_groups_to_port(source, target, inputs):
sgs = source.runtime_properties.get('security_groups', [])
sec_id = target.get_attribute('security_group_id')
sgs.append(sec_id)
source.update_runtime_properties('security_groups', sgs)
@utils.operation
async def unlink_security_groups_from_port(source, target, inputs):
if 'security_groups' in source.runtime_properties:
del source.runtime_properties['security_groups']
| {
"repo_name": "aiorchestra/aiorchestra-openstack-plugin",
"path": "openstack_plugin/tasks/security_group.py",
"copies": "1",
"size": "3853",
"license": "apache-2.0",
"hash": 8881628741872361000,
"line_mean": 37.53,
"line_max": 78,
"alpha_frac": 0.6636387231,
"autogenerated": false,
"ratio": 3.87625754527163,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 100
} |
from aiorchestra.core import utils
from openstack_plugin.common import clients
from openstack_plugin.networking import subnet
# https://wiki.openstack.org/wiki/Neutron/APIv2-specification#Create_Subnet
@utils.operation
async def subnet_create(node, inputs):
if 'link_id' not in node.runtime_properties:
raise Exception('Unable to create subnet for node "{0}". '
'It is necessary to use relationship '
'to link subnet to network.'.format(node.name))
neutron = clients.openstack.neutron(node)
link_id = node.runtime_properties['link_id']
network_id = node.properties.get('network_id')
network_name = node.properties.get('network_name')
ip_version = node.properties.get('ip_version')
cidr = node.properties.get('cidr')
pool_start_ip = node.properties.get('start_ip')
pool_end_ip = node.properties.get('end_ip')
dhcp_enabled = node.properties.get('dhcp_enabled')
dns_nameservers = node.properties.get('dns_nameservers')
router_id = node.runtime_properties.get('router_id')
use_existing = True if network_id else False
identifier = network_name if not network_id else network_id
allocation_pools = [
{
'start': pool_start_ip,
'end': pool_end_ip,
},
]
node.context.logger.info(
'[{0}] - Attempting to create subnet "{1}" for network "{2}".'
.format(node.name, network_name, link_id))
_subnet = await subnet.create(
node.context,
identifier,
neutron,
link_id,
ip_version,
cidr,
allocation_pools,
dns_nameservers,
dhcp_enabled=dhcp_enabled,
router_id=router_id,
use_existing=use_existing
)
node.batch_update_runtime_properties(**{
'link_id': link_id,
'network_id': _subnet['id'],
'network_name': _subnet['name'],
'ip_version': ip_version,
'cidr': cidr,
'star_ip': pool_start_ip,
'end_ip': pool_end_ip,
'enable_dhcp': dhcp_enabled,
'dns_nameservers': dns_nameservers,
'gateway_ip': _subnet['gateway_ip'],
'host_routes': _subnet['host_routes'],
'router_id': router_id,
})
node.context.logger.info(
'[{0}] - Subnet "{1}" for network "{2}" was created.'
.format(node.name, identifier, link_id))
@utils.operation
async def subnet_delete(node, inputs):
task_retries = inputs.get('task_retries', 10)
task_retry_interval = inputs.get('task_retry_interval', 10)
node.context.logger.info('[{0}] - Attempting to delete subnet "{1}".'
.format(node.name,
node.get_attribute('network_id')))
neutron = clients.openstack.neutron(node)
router_id = node.runtime_properties.get('router_id')
net_id = node.get_attribute('network_id')
use_existing = True if node.properties.get(
'network_id') else False
await subnet.delete(node.context, net_id, neutron,
router_id=router_id,
use_existing=use_existing,
task_retry_interval=task_retry_interval,
task_retries=task_retries)
@utils.operation
async def link_subnet_to_router(source, target, inputs):
source.context.logger.info('[{0} -----> {1}] - Connecting network to '
'router "{2}".'
.format(target.name,
source.name,
target.get_attribute('router_id')))
router_id = target.get_attribute('router_id')
source.update_runtime_properties('router_id', router_id)
@utils.operation
async def unlink_subnet_to_router(source, target, inputs):
source.context.logger.info('[{0} -----> {1}] - Disconnecting network from '
'router "{2}".'
.format(target.name,
source.name,
target.get_attribute('router_id')))
for attr in ['router_id', ]:
if attr in source.runtime_properties:
del source.runtime_properties[attr]
| {
"repo_name": "aiorchestra/aiorchestra-openstack-plugin",
"path": "openstack_plugin/tasks/subnet.py",
"copies": "1",
"size": "4862",
"license": "apache-2.0",
"hash": 6501643038868763000,
"line_mean": 35.5563909774,
"line_max": 79,
"alpha_frac": 0.5909090909,
"autogenerated": false,
"ratio": 3.9754701553556826,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5066379246255682,
"avg_score": null,
"num_lines": null
} |
from aiorchestra.core import utils
from openstack_plugin.common import clients
def collect_member_net_attribute(members, attr):
attrs = []
for member in members:
interfaces = member.get('member_interfaces')
for interface in interfaces:
for fixed_ip in interface.fixed_ips:
attrs.append(fixed_ip[attr])
return attrs
@utils.operation
async def create(node, inputs):
node.context.logger.info('[{0}] - Attempting to create '
'load balancer for required members.'
.format(node.name))
client_capability = node.get_capability('client')
if not client_capability:
raise Exception('[{0}] - Unable to resolve load balancer '
'"client" capability.'.format(node.name))
subnet_id = client_capability.get('network_id')
protocol = node.properties.get('protocol')
algorithm = node.properties.get('algorithm')
pool_dict = {
"pool": {
"admin_state_up": True,
"description": "pool for load balancer {0}".format(node.name),
# it may appear that OpenStack does have LBaaS v1
"lb_algorithm": algorithm,
"name": node.name,
"protocol": protocol,
"subnet_id": subnet_id,
}
}
neutron = clients.openstack.neutron(node)
try:
pool = neutron.create_pool(body=pool_dict)['pool']
except Exception as ex:
if 'lb_algorithm' in str(ex):
node.context.logger.warn('[{0}] - Falling back to '
'LBaaS v1.'.format(node.name))
# it may appear that OpenStack does have LBaaS v1
del pool_dict['pool']['lb_algorithm']
pool_dict['pool']['lb_method'] = algorithm
pool = neutron.create_pool(body=pool_dict)['pool']
else:
raise ex
node.context.logger.info('[{0}] - Creating pool with identifiers: {1}.'
.format(node.name, str(pool_dict)))
node.batch_update_runtime_properties(**pool)
node.update_runtime_properties('network_id', subnet_id)
node.context.logger.info('[{0}] - Load balancer created.'
.format(node.name))
@utils.operation
async def start(node, inputs):
node.context.logger.info('[{0}] - Attempting to start '
'load balancer for required members.'
.format(node.name))
# wait until pool is in ACTIVE state
neutron = clients.openstack.neutron(node)
pool_id = node.runtime_properties['id']
async def await_for_active_state():
pool = neutron.show_pool(pool_id)['pool']
return pool['status'] == 'ACTIVE'
await utils.retry(await_for_active_state, task_retries=3)
protocol_port = node.properties.get('protocol_port')
members = node.runtime_properties.get('pool_members')
pool_members = []
for member in members:
member_dict = {
'member': {
'address': member['ip_address'],
'protocol_port': protocol_port,
'weight': member['weight'],
'pool_id': pool_id,
}
}
node.context.logger.info('[{0}] - Adding member to load '
'balancer pool.'.format(node.name))
member = neutron.create_member(body=member_dict)['member']
pool_members.append(member)
node.update_runtime_properties('pool_members', pool_members)
node.context.logger.info('[{0}] - Load balancer started.'
.format(node.name))
@utils.operation
async def stop(node, inputs):
node.context.logger.info('[{0}] - Attempting to stop '
'load balancer for required members.'
.format(node.name))
neutron = clients.openstack.neutron(node)
for member in node.runtime_properties.get('pool_members', []):
member_id = member['id']
neutron.delete_member(member_id)
node.context.logger.info('[{0}] - Load balancer stopped.'
.format(node.name))
@utils.operation
async def delete(node, inputs):
node.context.logger.info('[{0}] - Attempting to delete '
'load balancer for required members.'
.format(node.name))
neutron = clients.openstack.neutron(node)
pool_id = node.runtime_properties['id']
neutron.delete_pool(pool_id)
node.context.logger.info('[{0}] - Load balancer deleted.'
.format(node.name))
@utils.operation
async def add_member(source, target, inputs):
source.context.logger.info('[{0} -----> {1}] - Connecting '
'application compute to '
'load balancer members.'
.format(target.name, source.name))
member_capability = source.get_requirement_capability(target)
ip_address = member_capability.get('ip_address')
weight = member_capability.get('weight')
members = source.runtime_properties.get('pool_members', [])
member_dict = {
'ip_address': ip_address,
'weight': weight,
}
members.append(member_dict)
source.update_runtime_properties('pool_members', members)
@utils.operation
async def remove_member(source, target, inputs):
source.context.logger.info('[{0} --X--> {1}] - Disconnecting '
'application compute from '
'load balancer members.'
.format(target.name, source.name))
if 'pool_members' in source.runtime_properties:
del source.runtime_properties['pool_members']
| {
"repo_name": "aiorchestra/aiorchestra-openstack-plugin",
"path": "openstack_plugin/tasks/lbaas.py",
"copies": "1",
"size": "6374",
"license": "apache-2.0",
"hash": 3449830167012078000,
"line_mean": 39.0880503145,
"line_max": 78,
"alpha_frac": 0.5855036084,
"autogenerated": false,
"ratio": 4.198945981554677,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5284449589954677,
"avg_score": null,
"num_lines": null
} |
from aiorchestra.core import utils
async def create(context, name_or_id, neutronclient,
external_gateway_info=None,
use_existing=False):
"""
Creates router
:param context:
:param name_or_id:
:param neutronclient:
:param external_gateway_info:
:param use_existing:
:return:
"""
if not use_existing:
context.logger.info(
'Attempting to create new router "{0}".'
.format(name_or_id))
router_dict = {
'router': {
'name': name_or_id,
}
}
if external_gateway_info:
router_dict['router'].update(external_gateway_info)
router = neutronclient.create_router(router_dict)
else:
context.logger.info('Using existing router "{0}".'
.format(name_or_id))
router = neutronclient.show_router(name_or_id)
return router['router']
async def delete(context, name_or_id, neutronclient,
use_existing=False,
task_retries=None,
task_retry_interval=None):
"""
:param context:
:param name_or_id:
:param neutronclient:
:param use_existing:
:param task_retries:
:param task_retry_interval:
:return:
"""
if use_existing:
context.logger.info('Leaving router "{0}" as is, '
'because of it is external resource.'
.format(name_or_id))
return
context.logger.info(
'Attempting to delete router "{0}".'
.format(name_or_id))
neutronclient.delete_router(name_or_id)
async def is_gone():
try:
neutronclient.show_router(name_or_id)
return False
except Exception as ex:
context.logger.debug(str(ex))
return True
await utils.retry(is_gone, exceptions=(Exception,),
task_retry_interval=task_retry_interval,
task_retries=task_retries)
| {
"repo_name": "aiorchestra/aiorchestra-openstack-plugin",
"path": "openstack_plugin/networking/router.py",
"copies": "1",
"size": "2645",
"license": "apache-2.0",
"hash": 7120134587429198000,
"line_mean": 30.1176470588,
"line_max": 78,
"alpha_frac": 0.5871455577,
"autogenerated": false,
"ratio": 4.08179012345679,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.516893568115679,
"avg_score": null,
"num_lines": null
} |
from aiorchestra.core import utils
async def create(context, name_or_id,
neutronclient,
network_id,
subnet_id=None,
ip_addresses=None,
admin_state_up=True,
security_groups=None,
use_existing=False):
"""
Creates port for specific subnet of network
:param context:
:param name_or_id:
:param neutronclient:
:param network_id:
:param subnet_id:
:param ip_addresses:
:param admin_state_up:
:param security_groups:
:param use_existing:
:return:
"""
if not use_existing:
port_dict = {
'port': {
'admin_state_up': admin_state_up,
'name': name_or_id,
'network_id': network_id,
}
}
fixed_ips = []
if subnet_id:
subnet = {'subnet_id': subnet_id}
if ip_addresses:
subnet.update({'ip_address': ip_addresses})
fixed_ips.append(subnet)
port_dict['port']['fixed_ips'] = fixed_ips
if security_groups:
port_dict['port']['security_groups'] = security_groups
context.logger.info('Creating port with identifiers: {0}'
.format(str(port_dict)))
port = neutronclient.create_port(body=port_dict)
else:
context.logger.info('Using existing port "{0}".'
.format(name_or_id))
port = neutronclient.show_port(name_or_id)
return port['port']
async def delete(context, name_or_id, neutronclient,
use_existing=False,
task_retry_interval=None,
task_retries=None):
"""
Deletes port for specific subnet of network
:param context:
:param name_or_id:
:param neutronclient:
:param use_existing:
:param task_retry_interval:
:param task_retries:
:return:
"""
if use_existing:
context.logger.info(
'Leaving port "{0}" as is, '
'because of it is external resource.'
.format(name_or_id))
return
neutronclient.delete_port(name_or_id)
async def is_gone():
try:
neutronclient.show_port(name_or_id)
return False
except Exception as ex:
context.logger.debug(str(ex))
return True
await utils.retry(is_gone, exceptions=(Exception,),
task_retries=task_retries,
task_retry_interval=task_retry_interval)
| {
"repo_name": "aiorchestra/aiorchestra-openstack-plugin",
"path": "openstack_plugin/networking/port.py",
"copies": "1",
"size": "3188",
"license": "apache-2.0",
"hash": -7231048548089948000,
"line_mean": 29.9514563107,
"line_max": 78,
"alpha_frac": 0.5677540778,
"autogenerated": false,
"ratio": 4.102960102960103,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5170714180760103,
"avg_score": null,
"num_lines": null
} |
from aiorchestra.core import utils
async def create(context,
name_or_id,
neutronclient,
is_external=False,
admin_state_up=True,
use_existing=False):
"""
Creates network for OpenStack using Neutron API
:param context: OrchestraContext instance
:param name_or_id: Neutron name or ID
:param neutronclient: Authorized Neutron client
:param is_external: weather if network is external
:param admin_state_up: network state to assign
:param use_existing: will use existing network
:return: network: provisioned network
:rtype: dict
"""
if not use_existing:
network = {'name': name_or_id,
'admin_state_up': admin_state_up}
context.logger.info(
'Creating new network with identifiers {0}.'
.format(str(network)))
net = neutronclient.create_network({'network': network})
else:
context.logger.info('Using existing network.')
net = neutronclient.show_network(name_or_id)
if is_external:
context.logger.info(
'Attempting to get external network details.')
net_details = net['network']
if not net_details['router:external']:
raise Exception(
'Network "{0}" is not an external. Details: {1}'
.format(net_details['id'], str(net_details)))
return net['network']
async def delete(context,
name_or_id,
neutronclient,
use_existing=False,
task_retries=None,
task_retry_interval=None):
"""
Deletes network for OpenStack using Neutron API
:param context: OrchestraContext instance
:param name_or_id: Network name or ID
:param neutronclient: Authorized Neutron client
:param use_existing: weather if network exists or not
:param task_retries: task retries
:param task_retry_interval: task retry interval
:return:
"""
if use_existing:
context.logger.info('Network "{0}" remains as is, '
'because it is an external resource.'
.format(name_or_id))
return
if not use_existing:
neutronclient.delete_network(name_or_id)
async def is_gone():
try:
neutronclient.show_network(name_or_id)
return False
except Exception as ex:
context.logger.debug(str(ex))
return True
await utils.retry(is_gone, exceptions=(Exception, ),
task_retries=task_retries,
task_retry_interval=task_retry_interval)
| {
"repo_name": "aiorchestra/aiorchestra-openstack-plugin",
"path": "openstack_plugin/networking/network.py",
"copies": "1",
"size": "3374",
"license": "apache-2.0",
"hash": 7273226558076974000,
"line_mean": 35.6739130435,
"line_max": 78,
"alpha_frac": 0.5981031417,
"autogenerated": false,
"ratio": 4.44532279314888,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.554342593484888,
"avg_score": null,
"num_lines": null
} |
from aiorchestra.core import utils
async def create(context,
name_or_id,
neutronclient,
network_id,
ip_version,
cidr,
allocation_pools,
dns_nameservers,
dhcp_enabled=True,
router_id=None,
use_existing=False,):
"""
Creates subnet for given network
:param context:
:param name_or_id:
:param neutronclient:
:param network_id:
:param ip_version:
:param cidr:
:param allocation_pools:
:param dns_nameservers:
:param dhcp_enabled:
:param router_id:
:param use_existing:
:return:
"""
if not use_existing:
subnet_body = {
'subnet': {
'name': name_or_id,
'network_id': network_id,
'ip_version': ip_version,
'cidr': cidr,
'allocation_pools': allocation_pools,
'dns_nameservers': dns_nameservers,
'enable_dhcp': dhcp_enabled,
}
}
subnet = neutronclient.create_subnet(body=subnet_body)
else:
subnet = neutronclient.show_subnet(name_or_id)
if subnet['subnet']['network_id'] != network_id:
raise Exception('Subnet network mismatch while '
'using existing port.')
if router_id:
context.logger.info(
'Attaching subnet "{0}" to router "{1}".'
.format(subnet['subnet']['id'], router_id))
neutronclient.add_interface_router(
router_id, {'subnet_id': subnet['subnet']['id']})
return subnet['subnet']
async def delete(context, name_or_id, neutronclient,
router_id=None, use_existing=False,
task_retry_interval=None,
task_retries=None):
"""
Deletes subnet for given network
:param context:
:param name_or_id:
:param neutronclient:
:param router_id:
:param use_existing:
:param task_retry_interval:
:param task_retries:
:return:
"""
if router_id:
context.logger.info('Detaching subnet "{0}" '
'to router "{1}".'
.format(name_or_id, router_id))
neutronclient.remove_interface_router(
router_id, {'subnet_id': name_or_id})
if not use_existing:
neutronclient.delete_subnet(name_or_id)
async def is_gone():
try:
neutronclient.show_subnet(name_or_id)
return False
except Exception as ex:
context.logger.debug(str(ex))
return True
await utils.retry(is_gone, exceptions=(Exception,),
task_retries=task_retries,
task_retry_interval=task_retry_interval)
else:
context.logger.info('Subnet "{0}" remains as is, '
'because it is external resource.'
.format(name_or_id))
| {
"repo_name": "aiorchestra/aiorchestra-openstack-plugin",
"path": "openstack_plugin/networking/subnet.py",
"copies": "1",
"size": "3681",
"license": "apache-2.0",
"hash": 3680811571247947000,
"line_mean": 31.2894736842,
"line_max": 78,
"alpha_frac": 0.5479489269,
"autogenerated": false,
"ratio": 4.295215869311552,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5343164796211552,
"avg_score": null,
"num_lines": null
} |
from aiorchestra.tests import base as aiorchestra
from openstack_plugin.tests.integration import base
from openstack_plugin.tests.integration import config
class TestComplex(base.BaseAIOrchestraOpenStackTestCase):
def setUp(self):
super(TestComplex, self).setUp()
def tearDown(self):
super(TestComplex, self).tearDown()
@aiorchestra.with_deployed('orchestra-openstack-compute-'
'with-floating-ip.yaml',
inputs=config.CONFIG)
def test_compute_with_floating_ip(self, context):
pass
@aiorchestra.with_deployed('orchestra-openstack-network-subnet-'
'port-attached-to-compute-with-sgs.yaml',
inputs=config.CONFIG)
def test_compute_with_port_and_security_group(self, context):
pass
@aiorchestra.with_deployed('orchestra-openstack-network-'
'subnet-port-attached-to-compute.yaml',
inputs=config.CONFIG)
def test_network_subnet_and_port_to_compute(self, context):
pass
@aiorchestra.with_deployed(
'orchestra-openstack-network-subnet-'
'router-two-ports-attached-to-compute.yaml',
inputs=config.CONFIG)
def test_ext_net_router_network_subnet_and_two_ports_to_compute(
self, context):
pass
@aiorchestra.with_deployed(
'orchestra-openstack-network-subnet-'
'two-ports-attached-to-compute.yaml',
inputs=config.CONFIG)
def test_network_subnet_and_two_ports_to_compute(self, context):
pass
@aiorchestra.with_deployed('orchestra-openstack-vrouter-base.yaml',
inputs=config.CONFIG)
def test_vrouter_base(self, context):
pass
@aiorchestra.with_deployed('orchestra-openstack-vrouter-'
'base-with-two-computes.yaml',
inputs=config.CONFIG)
def test_vrouter_base_with_two_computes(self, context):
pass
@aiorchestra.with_deployed('orchestra-openstack-vrouter-'
'base-with-external-access.yaml',
inputs=config.CONFIG)
def test_vrouter_base_with_external_access(self, context):
pass
@aiorchestra.with_deployed('orchestra-openstack-load-balancer.yaml',
inputs=config.CONFIG)
def test_lbaas(self, context):
pass
| {
"repo_name": "aiorchestra/aiorchestra-openstack-plugin",
"path": "openstack_plugin/tests/integration/test_complex.py",
"copies": "1",
"size": "3102",
"license": "apache-2.0",
"hash": -8617472114487124000,
"line_mean": 36.8292682927,
"line_max": 78,
"alpha_frac": 0.6292714378,
"autogenerated": false,
"ratio": 3.9415501905972046,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 82
} |
from glanceclient.v2 import client as glanceclient
from keystoneauth1 import loading
from keystoneauth1 import session
from keystoneclient import client as keystoneclient
from novaclient import client as novaclient
from neutronclient.v2_0 import client as neutronclient
class OpenStackClients(object):
__keystone = None
__nova = None
__neutron = None
__glance = None
def __password_session_setup(self, node):
creds = node.runtime_properties['auth_properties']
if 'region_name' in creds:
del creds['region_name']
loader = loading.get_plugin_loader('password')
auth = loader.load_from_options(**creds)
sess = session.Session(auth=auth)
return sess
def keystone(self, node):
if self.__keystone is None:
self.__keystone = keystoneclient.Client(**node.properties)
self.__keystone.authenticate()
return self.__keystone
def nova(self, node):
if self.__nova is None:
version = node.properties['compute_api_version']
use_connection_pool = node.properties['use_connection_pool']
self.__nova = novaclient.Client(
version, session=self.__password_session_setup(node),
connection_pool=use_connection_pool)
return self.__nova
def neutron(self, node):
if self.__neutron is None:
self.__neutron = neutronclient.Client(
session=self.__password_session_setup(node))
return self.__neutron
def glance(self, node):
if self.__glance is None:
self.__glance = glanceclient.Client(
session=self.__password_session_setup(node))
return self.__glance
openstack = OpenStackClients()
| {
"repo_name": "aiorchestra/aiorchestra-openstack-plugin",
"path": "openstack_plugin/common/clients.py",
"copies": "1",
"size": "2375",
"license": "apache-2.0",
"hash": -3476055939082463700,
"line_mean": 33.9264705882,
"line_max": 78,
"alpha_frac": 0.6568421053,
"autogenerated": false,
"ratio": 4.302536231884058,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 68
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.