text stringlengths 0 1.05M | meta dict |
|---|---|
"""A chart parser and some grammars. (Chapter 22)"""
from utils import *
#______________________________________________________________________________
# Grammars and Lexicons
def Rules(**rules):
"""Create a dictionary mapping symbols to alternative sequences.
>>> Rules(A = "B C | D E")
{'A': [['B', 'C'], ['D', 'E']]}
"""
for (lhs, rhs) in rules.items():
rules[lhs] = [alt.strip().split() for alt in rhs.split('|')]
return rules
def Lexicon(**rules):
"""Create a dictionary mapping symbols to alternative words.
>>> Lexicon(Art = "the | a | an")
{'Art': ['the', 'a', 'an']}
"""
for (lhs, rhs) in rules.items():
rules[lhs] = [word.strip() for word in rhs.split('|')]
return rules
class Grammar:
def __init__(self, name, rules, lexicon):
"A grammar has a set of rules and a lexicon."
update(self, name=name, rules=rules, lexicon=lexicon)
self.categories = DefaultDict([])
for lhs in lexicon:
for word in lexicon[lhs]:
self.categories[word].append(lhs)
def rewrites_for(self, cat):
"Return a sequence of possible rhs's that cat can be rewritten as."
return self.rules.get(cat, ())
def isa(self, word, cat):
"Return True iff word is of category cat"
return cat in self.categories[word]
def __repr__(self):
return '<Grammar %s>' % self.name
E0 = Grammar('E0',
Rules( # Grammar for E_0 [Fig. 22.4]
S = 'NP VP | S Conjunction S',
NP = 'Pronoun | Noun | Article Noun | Digit Digit | NP PP | NP RelClause',
VP = 'Verb | VP NP | VP Adjective | VP PP | VP Adverb',
PP = 'Preposition NP',
RelClause = 'That VP'),
Lexicon( # Lexicon for E_0 [Fig. 22.3]
Noun = "stench | breeze | glitter | nothing | wumpus | pit | pits | gold | east",
Verb = "is | see | smell | shoot | fell | stinks | go | grab | carry | kill | turn | feel",
Adjective = "right | left | east | south | back | smelly",
Adverb = "here | there | nearby | ahead | right | left | east | south | back",
Pronoun = "me | you | I | it",
Name = "John | Mary | Boston | Aristotle",
Article = "the | a | an",
Preposition = "to | in | on | near",
Conjunction = "and | or | but",
Digit = "0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9",
That = "that"
))
E_ = Grammar('E_', # Trivial Grammar and lexicon for testing
Rules(
S = 'NP VP',
NP = 'Art N | Pronoun',
VP = 'V NP'),
Lexicon(
Art = 'the | a',
N = 'man | woman | table | shoelace | saw',
Pronoun = 'I | you | it',
V = 'saw | liked | feel'
))
def generate_random(grammar=E_, s='S'):
"""Replace each token in s by a random entry in grammar (recursively).
This is useful for testing a grammar, e.g. generate_random(E_)"""
import random
def rewrite(tokens, into):
for token in tokens:
if token in grammar.rules:
rewrite(random.choice(grammar.rules[token]), into)
elif token in grammar.lexicon:
into.append(random.choice(grammar.lexicon[token]))
else:
into.append(token)
return into
return ' '.join(rewrite(s.split(), []))
#______________________________________________________________________________
# Chart Parsing
class Chart:
"""Class for parsing sentences using a chart data structure. [Fig 22.7]
>>> chart = Chart(E0);
>>> len(chart.parses('the stench is in 2 2'))
1
"""
def __init__(self, grammar, trace=False):
"""A datastructure for parsing a string; and methods to do the parse.
self.chart[i] holds the edges that end just before the i'th word.
Edges are 5-element lists of [start, end, lhs, [found], [expects]]."""
update(self, grammar=grammar, trace=trace)
def parses(self, words, S='S'):
"""Return a list of parses; words can be a list or string."""
if isinstance(words, str):
words = words.split()
self.parse(words, S)
# Return all the parses that span the whole input
return [[i, j, S, found, []]
for (i, j, lhs, found, expects) in self.chart[len(words)]
if lhs == S and expects == []]
def parse(self, words, S='S'):
"""Parse a list of words; according to the grammar.
Leave results in the chart."""
self.chart = [[] for i in range(len(words)+1)]
self.add_edge([0, 0, 'S_', [], [S]])
for i in range(len(words)):
self.scanner(i, words[i])
return self.chart
def add_edge(self, edge):
"Add edge to chart, and see if it extends or predicts another edge."
start, end, lhs, found, expects = edge
if edge not in self.chart[end]:
self.chart[end].append(edge)
if self.trace:
print '%10s: added %s' % (caller(2), edge)
if not expects:
self.extender(edge)
else:
self.predictor(edge)
def scanner(self, j, word):
"For each edge expecting a word of this category here, extend the edge."
for (i, j, A, alpha, Bb) in self.chart[j]:
if Bb and self.grammar.isa(word, Bb[0]):
self.add_edge([i, j+1, A, alpha + [(Bb[0], word)], Bb[1:]])
def predictor(self, (i, j, A, alpha, Bb)):
"Add to chart any rules for B that could help extend this edge."
B = Bb[0]
if B in self.grammar.rules:
for rhs in self.grammar.rewrites_for(B):
self.add_edge([j, j, B, [], rhs])
def extender(self, edge):
"See what edges can be extended by this edge."
(j, k, B, _, _) = edge
for (i, j, A, alpha, B1b) in self.chart[j]:
if B1b and B == B1b[0]:
self.add_edge([i, k, A, alpha + [edge], B1b[1:]])
#### TODO:
#### 1. Parsing with augmentations -- requires unification, etc.
#### 2. Sequitor
| {
"repo_name": "ken0nek/Software2",
"path": "140714/nlp.py",
"copies": "9",
"size": "6011",
"license": "mit",
"hash": 7787981093581479000,
"line_mean": 34.3588235294,
"line_max": 95,
"alpha_frac": 0.5338545999,
"autogenerated": false,
"ratio": 3.4289788933257275,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8462833493225728,
"avg_score": null,
"num_lines": null
} |
"""A chart parser and some grammars. (Chapter 22)"""
# (Written for the second edition of AIMA; expect some discrepanciecs
# from the third edition until this gets reviewed.)
from collections import defaultdict
import urllib.request
import re
# ______________________________________________________________________________
# Grammars and Lexicons
def Rules(**rules):
"""Create a dictionary mapping symbols to alternative sequences.
>>> Rules(A = "B C | D E")
{'A': [['B', 'C'], ['D', 'E']]}
"""
for (lhs, rhs) in rules.items():
rules[lhs] = [alt.strip().split() for alt in rhs.split('|')]
return rules
def Lexicon(**rules):
"""Create a dictionary mapping symbols to alternative words.
>>> Lexicon(Art = "the | a | an")
{'Art': ['the', 'a', 'an']}
"""
for (lhs, rhs) in rules.items():
rules[lhs] = [word.strip() for word in rhs.split('|')]
return rules
class Grammar:
def __init__(self, name, rules, lexicon):
"""A grammar has a set of rules and a lexicon."""
self.name = name
self.rules = rules
self.lexicon = lexicon
self.categories = defaultdict(list)
for lhs in lexicon:
for word in lexicon[lhs]:
self.categories[word].append(lhs)
def rewrites_for(self, cat):
"""Return a sequence of possible rhs's that cat can be rewritten as."""
return self.rules.get(cat, ())
def isa(self, word, cat):
"""Return True iff word is of category cat"""
return cat in self.categories[word]
def __repr__(self):
return '<Grammar {}>'.format(self.name)
E0 = Grammar('E0',
Rules( # Grammar for E_0 [Figure 22.4]
S='NP VP | S Conjunction S',
NP='Pronoun | Name | Noun | Article Noun | Digit Digit | NP PP | NP RelClause', # noqa
VP='Verb | VP NP | VP Adjective | VP PP | VP Adverb',
PP='Preposition NP',
RelClause='That VP'),
Lexicon( # Lexicon for E_0 [Figure 22.3]
Noun="stench | breeze | glitter | nothing | wumpus | pit | pits | gold | east", # noqa
Verb="is | see | smell | shoot | fell | stinks | go | grab | carry | kill | turn | feel", # noqa
Adjective="right | left | east | south | back | smelly",
Adverb="here | there | nearby | ahead | right | left | east | south | back", # noqa
Pronoun="me | you | I | it",
Name="John | Mary | Boston | Aristotle",
Article="the | a | an",
Preposition="to | in | on | near",
Conjunction="and | or | but",
Digit="0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9",
That="that"
))
E_ = Grammar('E_', # Trivial Grammar and lexicon for testing
Rules(
S='NP VP',
NP='Art N | Pronoun',
VP='V NP'),
Lexicon(
Art='the | a',
N='man | woman | table | shoelace | saw',
Pronoun='I | you | it',
V='saw | liked | feel'
))
E_NP_ = Grammar('E_NP_', # another trivial grammar for testing
Rules(NP='Adj NP | N'),
Lexicon(Adj='happy | handsome | hairy',
N='man'))
def generate_random(grammar=E_, s='S'):
"""Replace each token in s by a random entry in grammar (recursively).
This is useful for testing a grammar, e.g. generate_random(E_)"""
import random
def rewrite(tokens, into):
for token in tokens:
if token in grammar.rules:
rewrite(random.choice(grammar.rules[token]), into)
elif token in grammar.lexicon:
into.append(random.choice(grammar.lexicon[token]))
else:
into.append(token)
return into
return ' '.join(rewrite(s.split(), []))
# ______________________________________________________________________________
# Chart Parsing
class Chart:
"""Class for parsing sentences using a chart data structure. [Figure 22.7]
>>> chart = Chart(E0);
>>> len(chart.parses('the stench is in 2 2'))
1
"""
def __init__(self, grammar, trace=False):
"""A datastructure for parsing a string; and methods to do the parse.
self.chart[i] holds the edges that end just before the i'th word.
Edges are 5-element lists of [start, end, lhs, [found], [expects]]."""
self.grammar = grammar
self.trace = trace
def parses(self, words, S='S'):
"""Return a list of parses; words can be a list or string."""
if isinstance(words, str):
words = words.split()
self.parse(words, S)
# Return all the parses that span the whole input
# 'span the whole input' => begin at 0, end at len(words)
return [[i, j, S, found, []]
for (i, j, lhs, found, expects) in self.chart[len(words)]
# assert j == len(words)
if i == 0 and lhs == S and expects == []]
def parse(self, words, S='S'):
"""Parse a list of words; according to the grammar.
Leave results in the chart."""
self.chart = [[] for i in range(len(words)+1)]
self.add_edge([0, 0, 'S_', [], [S]])
for i in range(len(words)):
self.scanner(i, words[i])
return self.chart
def add_edge(self, edge):
"Add edge to chart, and see if it extends or predicts another edge."
start, end, lhs, found, expects = edge
if edge not in self.chart[end]:
self.chart[end].append(edge)
if self.trace:
print('Chart: added {}'.format(edge))
if not expects:
self.extender(edge)
else:
self.predictor(edge)
def scanner(self, j, word):
"For each edge expecting a word of this category here, extend the edge." # noqa
for (i, j, A, alpha, Bb) in self.chart[j]:
if Bb and self.grammar.isa(word, Bb[0]):
self.add_edge([i, j+1, A, alpha + [(Bb[0], word)], Bb[1:]])
def predictor(self, edge):
"Add to chart any rules for B that could help extend this edge."
(i, j, A, alpha, Bb) = edge
B = Bb[0]
if B in self.grammar.rules:
for rhs in self.grammar.rewrites_for(B):
self.add_edge([j, j, B, [], rhs])
def extender(self, edge):
"See what edges can be extended by this edge."
(j, k, B, _, _) = edge
for (i, j, A, alpha, B1b) in self.chart[j]:
if B1b and B == B1b[0]:
self.add_edge([i, k, A, alpha + [edge], B1b[1:]])
# ______________________________________________________________________________
# CYK Parsing
def CYK_parse(words, grammar):
"[Figure 23.5]"
# We use 0-based indexing instead of the book's 1-based.
N = len(words)
P = defaultdict(float)
# Insert lexical rules for each word.
for (i, word) in enumerate(words):
for (X, p) in grammar.categories[word]: # XXX grammar.categories needs changing, above
P[X, i, 1] = p
# Combine first and second parts of right-hand sides of rules,
# from short to long.
for length in range(2, N+1):
for start in range(N-length+1):
for len1 in range(1, length): # N.B. the book incorrectly has N instead of length
len2 = length - len1
for (X, Y, Z, p) in grammar.cnf_rules(): # XXX grammar needs this method
P[X, start, length] = max(P[X, start, length],
P[Y, start, len1] * P[Z, start+len1, len2] * p)
return P
# ______________________________________________________________________________
# Page Ranking
# First entry in list is the base URL, and then following are relative URL pages
examplePagesSet = ["https://en.wikipedia.org/wiki/", "Aesthetics", "Analytic_philosophy",
"Ancient_Greek", "Aristotle", "Astrology", "Atheism", "Baruch_Spinoza",
"Belief", "Betrand Russell", "Confucius", "Consciousness",
"Continental Philosophy", "Dialectic", "Eastern_Philosophy",
"Epistemology", "Ethics", "Existentialism", "Friedrich_Nietzsche",
"Idealism", "Immanuel_Kant", "List_of_political_philosophers", "Logic",
"Metaphysics", "Philosophers", "Philosophy", "Philosophy_of_mind", "Physics",
"Plato", "Political_philosophy", "Pythagoras", "Rationalism",
"Social_philosophy", "Socrates", "Subjectivity", "Theology",
"Truth", "Western_philosophy"]
def loadPageHTML(addressList):
"""Download HTML page content for every URL address passed as argument"""
contentDict = {}
for addr in addressList:
with urllib.request.urlopen(addr) as response:
raw_html = response.read().decode('utf-8')
# Strip raw html of unnessecary content. Basically everything that isn't link or text
html = stripRawHTML(raw_html)
contentDict[addr] = html
return contentDict
def initPages(addressList):
"""Create a dictionary of pages from a list of URL addresses"""
pages = {}
for addr in addressList:
pages[addr] = Page(addr)
return pages
def stripRawHTML(raw_html):
"""Remove the <head> section of the HTML which contains links to stylesheets etc.,
and remove all other unnessecary HTML"""
# TODO: Strip more out of the raw html
return re.sub("<head>.*?</head>", "", raw_html, flags=re.DOTALL) # remove <head> section
def determineInlinks(page):
"""Given a set of pages that have their outlinks determined, we can fill
out a page's inlinks by looking through all other page's outlinks"""
inlinks = []
for addr, indexPage in pagesIndex.items():
if page.address == indexPage.address:
continue
elif page.address in indexPage.outlinks:
inlinks.append(addr)
return inlinks
def findOutlinks(page, handleURLs=None):
"""Search a page's HTML content for URL links to other pages"""
urls = re.findall(r'href=[\'"]?([^\'" >]+)', pagesContent[page.address])
if handleURLs:
urls = handleURLs(urls)
return urls
def onlyWikipediaURLS(urls):
"""Some example HTML page data is from wikipedia. This function converts
relative wikipedia links to full wikipedia URLs"""
wikiURLs = [url for url in urls if url.startswith('/wiki/')]
return ["https://en.wikipedia.org"+url for url in wikiURLs]
# ______________________________________________________________________________
# HITS Helper Functions
def expand_pages(pages):
"""From Textbook: adds in every page that links to or is linked from one of
the relevant pages."""
expanded = {}
for addr, page in pages.items():
if addr not in expanded:
expanded[addr] = page
for inlink in page.inlinks:
if inlink not in expanded:
expanded[inlink] = pagesIndex[inlink]
for outlink in page.outlinks:
if outlink not in expanded:
expanded[outlink] = pagesIndex[outlink]
return expanded
def relevant_pages(query):
"""Relevant pages are pages that contain the query in its entireity.
If a page's content contains the query it is returned by the function."""
relevant = {}
print("pagesContent in function: ", pagesContent)
for addr, page in pagesIndex.items():
if query.lower() in pagesContent[addr].lower():
relevant[addr] = page
return relevant
def normalize(pages):
"""From the pseudocode: Normalize divides each page's score by the sum of
the squares of all pages' scores (separately for both the authority and hubs scores).
"""
summed_hub = sum(page.hub**2 for _, page in pages.items())
summed_auth = sum(page.authority**2 for _, page in pages.items())
for _, page in pages.items():
page.hub /= summed_hub
page.authority /= summed_auth
class ConvergenceDetector(object):
"""If the hub and authority values of the pages are no longer changing, we have
reached a convergence and further iterations will have no effect. This detects convergence
so that we can stop the HITS algorithm as early as possible."""
def __init__(self):
self.hub_history = None
self.auth_history = None
def __call__(self):
return self.detect()
def detect(self):
curr_hubs = [page.hub for addr, page in pagesIndex.items()]
curr_auths = [page.authority for addr, page in pagesIndex.items()]
if self.hub_history is None:
self.hub_history, self.auth_history = [], []
else:
diffsHub = [abs(x-y) for x, y in zip(curr_hubs, self.hub_history[-1])]
diffsAuth = [abs(x-y) for x, y in zip(curr_auths, self.auth_history[-1])]
aveDeltaHub = sum(diffsHub)/float(len(pagesIndex))
aveDeltaAuth = sum(diffsAuth)/float(len(pagesIndex))
if aveDeltaHub < 0.01 and aveDeltaAuth < 0.01: # may need tweaking
return True
if len(self.hub_history) > 2: # prevent list from getting long
del self.hub_history[0]
del self.auth_history[0]
self.hub_history.append([x for x in curr_hubs])
self.auth_history.append([x for x in curr_auths])
return False
def getInlinks(page):
if not page.inlinks:
page.inlinks = determineInlinks(page)
return [p for addr, p in pagesIndex.items() if addr in page.inlinks]
def getOutlinks(page):
if not page.outlinks:
page.outlinks = findOutlinks(page)
return [p for addr, p in pagesIndex.items() if addr in page.outlinks]
# ______________________________________________________________________________
# HITS Algorithm
class Page(object):
def __init__(self, address, hub=0, authority=0, inlinks=None, outlinks=None):
self.address = address
self.hub = hub
self.authority = authority
self.inlinks = inlinks
self.outlinks = outlinks
pagesContent = {} # maps Page relative or absolute URL/location to page's HTML content
pagesIndex = {}
convergence = ConvergenceDetector() # assign function to variable to mimic pseudocode's syntax
def HITS(query):
"""The HITS algorithm for computing hubs and authorities with respect to a query."""
pages = expand_pages(relevant_pages(query)) # in order to 'map' faithfully to pseudocode we
for p in pages: # won't pass the list of pages as an argument
p.authority = 1
p.hub = 1
while True: # repeat until... convergence
for p in pages:
p.authority = sum(x.hub for x in getInlinks(p)) # p.authority ← ∑i Inlinki(p).Hub
p.hub = sum(x.authority for x in getOutlinks(p)) # p.hub ← ∑i Outlinki(p).Authority
normalize(pages)
if convergence():
break
return pages
| {
"repo_name": "sofmonk/aima-python",
"path": "nlp.py",
"copies": "1",
"size": "15220",
"license": "mit",
"hash": 7263117038356376000,
"line_mean": 37.2211055276,
"line_max": 114,
"alpha_frac": 0.5620562714,
"autogenerated": false,
"ratio": 3.7551221920513456,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48171784634513454,
"avg_score": null,
"num_lines": null
} |
"""A chart parser and some grammars. (Chapter 22)"""
# (Written for the second edition of AIMA; expect some discrepanciecs
# from the third edition until this gets reviewed.)
from . utils import *
from collections import defaultdict
#______________________________________________________________________________
# Grammars and Lexicons
def Rules(**rules):
"""Create a dictionary mapping symbols to alternative sequences.
>>> Rules(A = "B C | D E")
{'A': [['B', 'C'], ['D', 'E']]}
"""
for (lhs, rhs) in list(rules.items()):
rules[lhs] = [alt.strip().split() for alt in rhs.split('|')]
return rules
def Lexicon(**rules):
"""Create a dictionary mapping symbols to alternative words.
>>> Lexicon(Art = "the | a | an")
{'Art': ['the', 'a', 'an']}
"""
for (lhs, rhs) in list(rules.items()):
rules[lhs] = [word.strip() for word in rhs.split('|')]
return rules
class Grammar:
def __init__(self, name, rules, lexicon):
"A grammar has a set of rules and a lexicon."
update(self, name=name, rules=rules, lexicon=lexicon)
self.categories = defaultdict(list)
for lhs in lexicon:
for word in lexicon[lhs]:
self.categories[word].append(lhs)
def rewrites_for(self, cat):
"Return a sequence of possible rhs's that cat can be rewritten as."
return self.rules.get(cat, ())
def isa(self, word, cat):
"Return True iff word is of category cat"
return cat in self.categories[word]
def __repr__(self):
return '<Grammar %s>' % self.name
E0 = Grammar('E0',
Rules( # Grammar for E_0 [Fig. 22.4]
S='NP VP | S Conjunction S',
NP='Pronoun | Name | Noun | Article Noun | Digit Digit | NP PP | NP RelClause',
VP='Verb | VP NP | VP Adjective | VP PP | VP Adverb',
PP='Preposition NP',
RelClause='That VP'),
Lexicon( # Lexicon for E_0 [Fig. 22.3]
Noun="stench | breeze | glitter | nothing | wumpus | pit | pits | gold | east",
Verb="is | see | smell | shoot | fell | stinks | go | grab | carry | kill | turn | feel",
Adjective="right | left | east | south | back | smelly",
Adverb="here | there | nearby | ahead | right | left | east | south | back",
Pronoun="me | you | I | it",
Name="John | Mary | Boston | Aristotle",
Article="the | a | an",
Preposition="to | in | on | near",
Conjunction="and | or | but",
Digit="0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9",
That="that"
))
E_ = Grammar('E_', # Trivial Grammar and lexicon for testing
Rules(
S='NP VP',
NP='Art N | Pronoun',
VP='V NP'),
Lexicon(
Art='the | a',
N='man | woman | table | shoelace | saw',
Pronoun='I | you | it',
V='saw | liked | feel'
))
E_NP_ = Grammar('E_NP_', # another trivial grammar for testing
Rules(NP='Adj NP | N'),
Lexicon(Adj='happy | handsome | hairy',
N='man'))
def generate_random(grammar=E_, s='S'):
"""Replace each token in s by a random entry in grammar (recursively).
This is useful for testing a grammar, e.g. generate_random(E_)"""
import random
def rewrite(tokens, into):
for token in tokens:
if token in grammar.rules:
rewrite(random.choice(grammar.rules[token]), into)
elif token in grammar.lexicon:
into.append(random.choice(grammar.lexicon[token]))
else:
into.append(token)
return into
return ' '.join(rewrite(s.split(), []))
#______________________________________________________________________________
# Chart Parsing
class Chart:
"""Class for parsing sentences using a chart data structure. [Fig 22.7]
>>> chart = Chart(E0);
>>> len(chart.parses('the stench is in 2 2'))
1
"""
def __init__(self, grammar, trace=False):
"""A datastructure for parsing a string; and methods to do the parse.
self.chart[i] holds the edges that end just before the i'th word.
Edges are 5-element lists of [start, end, lhs, [found], [expects]]."""
update(self, grammar=grammar, trace=trace)
def parses(self, words, S='S'):
"""Return a list of parses; words can be a list or string.
>>> chart = Chart(E_NP_)
>>> chart.parses('happy man', 'NP')
[[0, 2, 'NP', [('Adj', 'happy'), [1, 2, 'NP', [('N', 'man')], []]], []]]
"""
if isinstance(words, str):
words = words.split()
self.parse(words, S)
# Return all the parses that span the whole input
# 'span the whole input' => begin at 0, end at len(words)
return [[i, j, S, found, []]
for (i, j, lhs, found, expects) in self.chart[len(words)]
# assert j == len(words)
if i == 0 and lhs == S and expects == []]
def parse(self, words, S='S'):
"""Parse a list of words; according to the grammar.
Leave results in the chart."""
self.chart = [[] for i in range(len(words)+1)]
self.add_edge([0, 0, 'S_', [], [S]])
for i in range(len(words)):
self.scanner(i, words[i])
return self.chart
def add_edge(self, edge):
"Add edge to chart, and see if it extends or predicts another edge."
start, end, lhs, found, expects = edge
if edge not in self.chart[end]:
self.chart[end].append(edge)
if self.trace:
print('%10s: added %s' % (caller(2), edge))
if not expects:
self.extender(edge)
else:
self.predictor(edge)
def scanner(self, j, word):
"For each edge expecting a word of this category here, extend the edge."
for (i, j, A, alpha, Bb) in self.chart[j]:
if Bb and self.grammar.isa(word, Bb[0]):
self.add_edge([i, j+1, A, alpha + [(Bb[0], word)], Bb[1:]])
def predictor(self, xxx_todo_changeme):
"Add to chart any rules for B that could help extend this edge."
(i, j, A, alpha, Bb) = xxx_todo_changeme
B = Bb[0]
if B in self.grammar.rules:
for rhs in self.grammar.rewrites_for(B):
self.add_edge([j, j, B, [], rhs])
def extender(self, edge):
"See what edges can be extended by this edge."
(j, k, B, _, _) = edge
for (i, j, A, alpha, B1b) in self.chart[j]:
if B1b and B == B1b[0]:
self.add_edge([i, k, A, alpha + [edge], B1b[1:]])
# TODO:
# 1. Parsing with augmentations -- requires unification, etc.
# 2. Sequitor
__doc__ += """
>>> chart = Chart(E0)
>>> chart.parses('the wumpus that is smelly is near 2 2')
[[0, 9, 'S', [[0, 5, 'NP', [[0, 2, 'NP', [('Article', 'the'), ('Noun', 'wumpus')], []], [2, 5, 'RelClause', [('That', 'that'), [3, 5, 'VP', [[3, 4, 'VP', [('Verb', 'is')], []], ('Adjective', 'smelly')], []]], []]], []], [5, 9, 'VP', [[5, 6, 'VP', [('Verb', 'is')], []], [6, 9, 'PP', [('Preposition', 'near'), [7, 9, 'NP', [('Digit', '2'), ('Digit', '2')], []]], []]], []]], []]]
### There is a built-in trace facility (compare [Fig. 22.9])
>>> Chart(E_, trace=True).parses('I feel it')
parse: added [0, 0, 'S_', [], ['S']]
predictor: added [0, 0, 'S', [], ['NP', 'VP']]
predictor: added [0, 0, 'NP', [], ['Art', 'N']]
predictor: added [0, 0, 'NP', [], ['Pronoun']]
scanner: added [0, 1, 'NP', [('Pronoun', 'I')], []]
extender: added [0, 1, 'S', [[0, 1, 'NP', [('Pronoun', 'I')], []]], ['VP']]
predictor: added [1, 1, 'VP', [], ['V', 'NP']]
scanner: added [1, 2, 'VP', [('V', 'feel')], ['NP']]
predictor: added [2, 2, 'NP', [], ['Art', 'N']]
predictor: added [2, 2, 'NP', [], ['Pronoun']]
scanner: added [2, 3, 'NP', [('Pronoun', 'it')], []]
extender: added [1, 3, 'VP', [('V', 'feel'), [2, 3, 'NP', [('Pronoun', 'it')], []]], []]
extender: added [0, 3, 'S', [[0, 1, 'NP', [('Pronoun', 'I')], []], [1, 3, 'VP', [('V', 'feel'), [2, 3, 'NP', [('Pronoun', 'it')], []]], []]], []]
extender: added [0, 3, 'S_', [[0, 3, 'S', [[0, 1, 'NP', [('Pronoun', 'I')], []], [1, 3, 'VP', [('V', 'feel'), [2, 3, 'NP', [('Pronoun', 'it')], []]], []]], []]], []]
[[0, 3, 'S', [[0, 1, 'NP', [('Pronoun', 'I')], []], [1, 3, 'VP', [('V', 'feel'), [2, 3, 'NP', [('Pronoun', 'it')], []]], []]], []]]
"""
| {
"repo_name": "gokul-uf/aima-python",
"path": "aimaPy/nlp.py",
"copies": "1",
"size": "8703",
"license": "mit",
"hash": 7219337756951665000,
"line_mean": 39.2916666667,
"line_max": 378,
"alpha_frac": 0.4898310927,
"autogenerated": false,
"ratio": 3.242548435171386,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42323795278713855,
"avg_score": null,
"num_lines": null
} |
"""A chart parser and some grammars. (Chapter 22)"""
# (Written for the second edition of AIMA; expect some discrepanciecs
# from the third edition until this gets reviewed.)
from utils import *
from collections import defaultdict
#______________________________________________________________________________
# Grammars and Lexicons
def Rules(**rules):
"""Create a dictionary mapping symbols to alternative sequences.
>>> Rules(A = "B C | D E")
{'A': [['B', 'C'], ['D', 'E']]}
"""
for (lhs, rhs) in list(rules.items()):
rules[lhs] = [alt.strip().split() for alt in rhs.split('|')]
return rules
def Lexicon(**rules):
"""Create a dictionary mapping symbols to alternative words.
>>> Lexicon(Art = "the | a | an")
{'Art': ['the', 'a', 'an']}
"""
for (lhs, rhs) in list(rules.items()):
rules[lhs] = [word.strip() for word in rhs.split('|')]
return rules
class Grammar:
def __init__(self, name, rules, lexicon):
"A grammar has a set of rules and a lexicon."
update(self, name=name, rules=rules, lexicon=lexicon)
self.categories = defaultdict(list)
for lhs in lexicon:
for word in lexicon[lhs]:
self.categories[word].append(lhs)
def rewrites_for(self, cat):
"Return a sequence of possible rhs's that cat can be rewritten as."
return self.rules.get(cat, ())
def isa(self, word, cat):
"Return True iff word is of category cat"
return cat in self.categories[word]
def __repr__(self):
return '<Grammar %s>' % self.name
E0 = Grammar('E0',
Rules( # Grammar for E_0 [Fig. 22.4]
S='NP VP | S Conjunction S',
NP='Pronoun | Name | Noun | Article Noun | Digit Digit | NP PP | NP RelClause',
VP='Verb | VP NP | VP Adjective | VP PP | VP Adverb',
PP='Preposition NP',
RelClause='That VP'),
Lexicon( # Lexicon for E_0 [Fig. 22.3]
Noun="stench | breeze | glitter | nothing | wumpus | pit | pits | gold | east",
Verb="is | see | smell | shoot | fell | stinks | go | grab | carry | kill | turn | feel",
Adjective="right | left | east | south | back | smelly",
Adverb="here | there | nearby | ahead | right | left | east | south | back",
Pronoun="me | you | I | it",
Name="John | Mary | Boston | Aristotle",
Article="the | a | an",
Preposition="to | in | on | near",
Conjunction="and | or | but",
Digit="0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9",
That="that"
))
E_ = Grammar('E_', # Trivial Grammar and lexicon for testing
Rules(
S='NP VP',
NP='Art N | Pronoun',
VP='V NP'),
Lexicon(
Art='the | a',
N='man | woman | table | shoelace | saw',
Pronoun='I | you | it',
V='saw | liked | feel'
))
E_NP_ = Grammar('E_NP_', # another trivial grammar for testing
Rules(NP='Adj NP | N'),
Lexicon(Adj='happy | handsome | hairy',
N='man'))
def generate_random(grammar=E_, s='S'):
"""Replace each token in s by a random entry in grammar (recursively).
This is useful for testing a grammar, e.g. generate_random(E_)"""
import random
def rewrite(tokens, into):
for token in tokens:
if token in grammar.rules:
rewrite(random.choice(grammar.rules[token]), into)
elif token in grammar.lexicon:
into.append(random.choice(grammar.lexicon[token]))
else:
into.append(token)
return into
return ' '.join(rewrite(s.split(), []))
#______________________________________________________________________________
# Chart Parsing
class Chart:
"""Class for parsing sentences using a chart data structure. [Fig 22.7]
>>> chart = Chart(E0);
>>> len(chart.parses('the stench is in 2 2'))
1
"""
def __init__(self, grammar, trace=False):
"""A datastructure for parsing a string; and methods to do the parse.
self.chart[i] holds the edges that end just before the i'th word.
Edges are 5-element lists of [start, end, lhs, [found], [expects]]."""
update(self, grammar=grammar, trace=trace)
def parses(self, words, S='S'):
"""Return a list of parses; words can be a list or string.
>>> chart = Chart(E_NP_)
>>> chart.parses('happy man', 'NP')
[[0, 2, 'NP', [('Adj', 'happy'), [1, 2, 'NP', [('N', 'man')], []]], []]]
"""
if isinstance(words, str):
words = words.split()
self.parse(words, S)
# Return all the parses that span the whole input
# 'span the whole input' => begin at 0, end at len(words)
return [[i, j, S, found, []]
for (i, j, lhs, found, expects) in self.chart[len(words)]
# assert j == len(words)
if i == 0 and lhs == S and expects == []]
def parse(self, words, S='S'):
"""Parse a list of words; according to the grammar.
Leave results in the chart."""
self.chart = [[] for i in range(len(words)+1)]
self.add_edge([0, 0, 'S_', [], [S]])
for i in range(len(words)):
self.scanner(i, words[i])
return self.chart
def add_edge(self, edge):
"Add edge to chart, and see if it extends or predicts another edge."
start, end, lhs, found, expects = edge
if edge not in self.chart[end]:
self.chart[end].append(edge)
if self.trace:
print('%10s: added %s' % (caller(2), edge))
if not expects:
self.extender(edge)
else:
self.predictor(edge)
def scanner(self, j, word):
"For each edge expecting a word of this category here, extend the edge."
for (i, j, A, alpha, Bb) in self.chart[j]:
if Bb and self.grammar.isa(word, Bb[0]):
self.add_edge([i, j+1, A, alpha + [(Bb[0], word)], Bb[1:]])
def predictor(self, xxx_todo_changeme):
"Add to chart any rules for B that could help extend this edge."
(i, j, A, alpha, Bb) = xxx_todo_changeme
B = Bb[0]
if B in self.grammar.rules:
for rhs in self.grammar.rewrites_for(B):
self.add_edge([j, j, B, [], rhs])
def extender(self, edge):
"See what edges can be extended by this edge."
(j, k, B, _, _) = edge
for (i, j, A, alpha, B1b) in self.chart[j]:
if B1b and B == B1b[0]:
self.add_edge([i, k, A, alpha + [edge], B1b[1:]])
# TODO:
# 1. Parsing with augmentations -- requires unification, etc.
# 2. Sequitor
__doc__ += """
>>> chart = Chart(E0)
>>> chart.parses('the wumpus that is smelly is near 2 2')
[[0, 9, 'S', [[0, 5, 'NP', [[0, 2, 'NP', [('Article', 'the'), ('Noun', 'wumpus')], []], [2, 5, 'RelClause', [('That', 'that'), [3, 5, 'VP', [[3, 4, 'VP', [('Verb', 'is')], []], ('Adjective', 'smelly')], []]], []]], []], [5, 9, 'VP', [[5, 6, 'VP', [('Verb', 'is')], []], [6, 9, 'PP', [('Preposition', 'near'), [7, 9, 'NP', [('Digit', '2'), ('Digit', '2')], []]], []]], []]], []]]
### There is a built-in trace facility (compare [Fig. 22.9])
>>> Chart(E_, trace=True).parses('I feel it')
parse: added [0, 0, 'S_', [], ['S']]
predictor: added [0, 0, 'S', [], ['NP', 'VP']]
predictor: added [0, 0, 'NP', [], ['Art', 'N']]
predictor: added [0, 0, 'NP', [], ['Pronoun']]
scanner: added [0, 1, 'NP', [('Pronoun', 'I')], []]
extender: added [0, 1, 'S', [[0, 1, 'NP', [('Pronoun', 'I')], []]], ['VP']]
predictor: added [1, 1, 'VP', [], ['V', 'NP']]
scanner: added [1, 2, 'VP', [('V', 'feel')], ['NP']]
predictor: added [2, 2, 'NP', [], ['Art', 'N']]
predictor: added [2, 2, 'NP', [], ['Pronoun']]
scanner: added [2, 3, 'NP', [('Pronoun', 'it')], []]
extender: added [1, 3, 'VP', [('V', 'feel'), [2, 3, 'NP', [('Pronoun', 'it')], []]], []]
extender: added [0, 3, 'S', [[0, 1, 'NP', [('Pronoun', 'I')], []], [1, 3, 'VP', [('V', 'feel'), [2, 3, 'NP', [('Pronoun', 'it')], []]], []]], []]
extender: added [0, 3, 'S_', [[0, 3, 'S', [[0, 1, 'NP', [('Pronoun', 'I')], []], [1, 3, 'VP', [('V', 'feel'), [2, 3, 'NP', [('Pronoun', 'it')], []]], []]], []]], []]
[[0, 3, 'S', [[0, 1, 'NP', [('Pronoun', 'I')], []], [1, 3, 'VP', [('V', 'feel'), [2, 3, 'NP', [('Pronoun', 'it')], []]], []]], []]]
"""
| {
"repo_name": "MircoT/aima-python",
"path": "nlp.py",
"copies": "1",
"size": "8702",
"license": "mit",
"hash": 7379244320641577000,
"line_mean": 39.1013824885,
"line_max": 378,
"alpha_frac": 0.4898873822,
"autogenerated": false,
"ratio": 3.2421758569299555,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4232063239129955,
"avg_score": null,
"num_lines": null
} |
"""A chart parser and some grammars. (Chapter 22)"""
# (Written for the second edition of AIMA; expect some discrepanciecs
# from the third edition until this gets reviewed.)
from utils import *
#______________________________________________________________________________
# Grammars and Lexicons
def Rules(**rules):
"""Create a dictionary mapping symbols to alternative sequences.
>>> Rules(A = "B C | D E")
{'A': [['B', 'C'], ['D', 'E']]}
"""
for (lhs, rhs) in rules.items():
rules[lhs] = [alt.strip().split() for alt in rhs.split('|')]
return rules
def Lexicon(**rules):
"""Create a dictionary mapping symbols to alternative words.
>>> Lexicon(Art = "the | a | an")
{'Art': ['the', 'a', 'an']}
"""
for (lhs, rhs) in rules.items():
rules[lhs] = [word.strip() for word in rhs.split('|')]
return rules
class Grammar:
def __init__(self, name, rules, lexicon):
"A grammar has a set of rules and a lexicon."
update(self, name=name, rules=rules, lexicon=lexicon)
self.categories = DefaultDict([])
for lhs in lexicon:
for word in lexicon[lhs]:
self.categories[word].append(lhs)
def rewrites_for(self, cat):
"Return a sequence of possible rhs's that cat can be rewritten as."
return self.rules.get(cat, ())
def isa(self, word, cat):
"Return True iff word is of category cat"
return cat in self.categories[word]
def __repr__(self):
return '<Grammar %s>' % self.name
E0 = Grammar('E0',
Rules( # Grammar for E_0 [Fig. 22.4]
S = 'NP VP | S Conjunction S',
NP = 'Pronoun | Name | Noun | Article Noun | Digit Digit | NP PP | NP RelClause',
VP = 'Verb | VP NP | VP Adjective | VP PP | VP Adverb',
PP = 'Preposition NP',
RelClause = 'That VP'),
Lexicon( # Lexicon for E_0 [Fig. 22.3]
Noun = "stench | breeze | glitter | nothing | wumpus | pit | pits | gold | east",
Verb = "is | see | smell | shoot | fell | stinks | go | grab | carry | kill | turn | feel",
Adjective = "right | left | east | south | back | smelly",
Adverb = "here | there | nearby | ahead | right | left | east | south | back",
Pronoun = "me | you | I | it",
Name = "John | Mary | Boston | Aristotle",
Article = "the | a | an",
Preposition = "to | in | on | near",
Conjunction = "and | or | but",
Digit = "0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9",
That = "that"
))
E_ = Grammar('E_', # Trivial Grammar and lexicon for testing
Rules(
S = 'NP VP',
NP = 'Art N | Pronoun',
VP = 'V NP'),
Lexicon(
Art = 'the | a',
N = 'man | woman | table | shoelace | saw',
Pronoun = 'I | you | it',
V = 'saw | liked | feel'
))
E_NP_ = Grammar('E_NP_', # another trivial grammar for testing
Rules(NP = 'Adj NP | N'),
Lexicon(Adj = 'happy | handsome | hairy',
N = 'man'))
def generate_random(grammar=E_, s='S'):
"""Replace each token in s by a random entry in grammar (recursively).
This is useful for testing a grammar, e.g. generate_random(E_)"""
import random
def rewrite(tokens, into):
for token in tokens:
if token in grammar.rules:
rewrite(random.choice(grammar.rules[token]), into)
elif token in grammar.lexicon:
into.append(random.choice(grammar.lexicon[token]))
else:
into.append(token)
return into
return ' '.join(rewrite(s.split(), []))
#______________________________________________________________________________
# Chart Parsing
class Chart:
"""Class for parsing sentences using a chart data structure. [Fig 22.7]
>>> chart = Chart(E0);
>>> len(chart.parses('the stench is in 2 2'))
1
"""
def __init__(self, grammar, trace=False):
"""A datastructure for parsing a string; and methods to do the parse.
self.chart[i] holds the edges that end just before the i'th word.
Edges are 5-element lists of [start, end, lhs, [found], [expects]]."""
update(self, grammar=grammar, trace=trace)
def parses(self, words, S='S'):
"""Return a list of parses; words can be a list or string.
>>> chart = Chart(E_NP_)
>>> chart.parses('happy man', 'NP')
[[0, 2, 'NP', [('Adj', 'happy'), [1, 2, 'NP', [('N', 'man')], []]], []]]
"""
if isinstance(words, str):
words = words.split()
self.parse(words, S)
# Return all the parses that span the whole input
# 'span the whole input' => begin at 0, end at len(words)
return [[i, j, S, found, []]
for (i, j, lhs, found, expects) in self.chart[len(words)]
# assert j == len(words)
if i == 0 and lhs == S and expects == []]
def parse(self, words, S='S'):
"""Parse a list of words; according to the grammar.
Leave results in the chart."""
self.chart = [[] for i in range(len(words)+1)]
self.add_edge([0, 0, 'S_', [], [S]])
for i in range(len(words)):
self.scanner(i, words[i])
return self.chart
def add_edge(self, edge):
"Add edge to chart, and see if it extends or predicts another edge."
start, end, lhs, found, expects = edge
if edge not in self.chart[end]:
self.chart[end].append(edge)
if self.trace:
print '%10s: added %s' % (caller(2), edge)
if not expects:
self.extender(edge)
else:
self.predictor(edge)
def scanner(self, j, word):
"For each edge expecting a word of this category here, extend the edge."
for (i, j, A, alpha, Bb) in self.chart[j]:
if Bb and self.grammar.isa(word, Bb[0]):
self.add_edge([i, j+1, A, alpha + [(Bb[0], word)], Bb[1:]])
def predictor(self, (i, j, A, alpha, Bb)):
"Add to chart any rules for B that could help extend this edge."
B = Bb[0]
if B in self.grammar.rules:
for rhs in self.grammar.rewrites_for(B):
self.add_edge([j, j, B, [], rhs])
def extender(self, edge):
"See what edges can be extended by this edge."
(j, k, B, _, _) = edge
for (i, j, A, alpha, B1b) in self.chart[j]:
if B1b and B == B1b[0]:
self.add_edge([i, k, A, alpha + [edge], B1b[1:]])
#### TODO:
#### 1. Parsing with augmentations -- requires unification, etc.
#### 2. Sequitor
__doc__ += """
>>> chart = Chart(E0)
>>> chart.parses('the wumpus that is smelly is near 2 2')
[[0, 9, 'S', [[0, 5, 'NP', [[0, 2, 'NP', [('Article', 'the'), ('Noun', 'wumpus')], []], [2, 5, 'RelClause', [('That', 'that'), [3, 5, 'VP', [[3, 4, 'VP', [('Verb', 'is')], []], ('Adjective', 'smelly')], []]], []]], []], [5, 9, 'VP', [[5, 6, 'VP', [('Verb', 'is')], []], [6, 9, 'PP', [('Preposition', 'near'), [7, 9, 'NP', [('Digit', '2'), ('Digit', '2')], []]], []]], []]], []]]
### There is a built-in trace facility (compare [Fig. 22.9])
>>> Chart(E_, trace=True).parses('I feel it')
parse: added [0, 0, 'S_', [], ['S']]
predictor: added [0, 0, 'S', [], ['NP', 'VP']]
predictor: added [0, 0, 'NP', [], ['Art', 'N']]
predictor: added [0, 0, 'NP', [], ['Pronoun']]
scanner: added [0, 1, 'NP', [('Pronoun', 'I')], []]
extender: added [0, 1, 'S', [[0, 1, 'NP', [('Pronoun', 'I')], []]], ['VP']]
predictor: added [1, 1, 'VP', [], ['V', 'NP']]
scanner: added [1, 2, 'VP', [('V', 'feel')], ['NP']]
predictor: added [2, 2, 'NP', [], ['Art', 'N']]
predictor: added [2, 2, 'NP', [], ['Pronoun']]
scanner: added [2, 3, 'NP', [('Pronoun', 'it')], []]
extender: added [1, 3, 'VP', [('V', 'feel'), [2, 3, 'NP', [('Pronoun', 'it')], []]], []]
extender: added [0, 3, 'S', [[0, 1, 'NP', [('Pronoun', 'I')], []], [1, 3, 'VP', [('V', 'feel'), [2, 3, 'NP', [('Pronoun', 'it')], []]], []]], []]
extender: added [0, 3, 'S_', [[0, 3, 'S', [[0, 1, 'NP', [('Pronoun', 'I')], []], [1, 3, 'VP', [('V', 'feel'), [2, 3, 'NP', [('Pronoun', 'it')], []]], []]], []]], []]
[[0, 3, 'S', [[0, 1, 'NP', [('Pronoun', 'I')], []], [1, 3, 'VP', [('V', 'feel'), [2, 3, 'NP', [('Pronoun', 'it')], []]], []]], []]]
"""
| {
"repo_name": "ttalviste/aima",
"path": "aima/nlp.py",
"copies": "2",
"size": "8297",
"license": "mit",
"hash": -6962023272076194000,
"line_mean": 38.6985645933,
"line_max": 378,
"alpha_frac": 0.5048812824,
"autogenerated": false,
"ratio": 3.0958955223880595,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.460077680478806,
"avg_score": null,
"num_lines": null
} |
""" A cheap and cheerful elm327 wifi dongle simulator.
"""
import SocketServer
LISTEN_IP = 'localhost'
RESPONDERS = {
'ATI': lambda: 'LM327 v1.5 (fake)',
'010C': lambda: '01 07', # RPM
'010D': lambda: '64', # KPH
}
class ELM327Handler(SocketServer.BaseRequestHandler):
def handle(self):
command = self.request.recv(1024).strip()
# Attempt to create the response
response = None
try:
response = RESPONDERS[command]()
except KeyError:
# Unknown command
pass
# Pad the response unless it is a string response
if command not in ('ATI',):
response = '{} {} {} BA'.format(
command[:2],
command[2:4],
response,
)
print '{} => {}'.format(command, response)
if response is not None:
self.request.sendall(response)
if __name__ == "__main__":
HOST, PORT = "localhost", 9999
server = SocketServer.TCPServer((LISTEN_IP, 35000), ELM327Handler)
server.serve_forever()
| {
"repo_name": "thisismyrobot/obDash",
"path": "elm327wifisim.py",
"copies": "1",
"size": "1132",
"license": "mit",
"hash": 9205753825789336000,
"line_mean": 23.1555555556,
"line_max": 70,
"alpha_frac": 0.5300353357,
"autogenerated": false,
"ratio": 3.9719298245614034,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 45
} |
'''A checker for Draft Designer Smart Grids. Checks several different aspects of the grid and
reports Errors and Warnings.
Created on May 15, 2013
@author: Cam Moore
'''
from apps.managers.challenge_mgr import challenge_mgr
from apps.widgets.smartgrid_design.models import DesignerAction, DesignerEvent, DesignerGrid, \
DesignerLevel, DesignerColumnGrid
from apps.managers.challenge_mgr.models import RoundSetting
from datetime import datetime, time
from apps.managers.smartgrid_mgr import smartgrid_mgr, action_dependency
import re
from apps.widgets.smartgrid_library.models import LibraryAction
import urllib2
from urllib2 import HTTPError, URLError
from apps.managers.smartgrid_mgr.gcc_model import Error, Warn, _ERRORS, _WARNINGS
from apps.managers.predicate_mgr import predicate_mgr
def __is_in_round(date, roundsetting):
"""Returns True if the given date is in the given round."""
if date and roundsetting:
return date >= roundsetting.start and date <= roundsetting.end
else:
return False
def __is_in_rounds(date):
"""Returns True if the given date is in any of the roundsettings."""
ret = False
for r in RoundSetting.objects.all():
ret = ret or __is_in_round(date, r)
return ret
def __is_in_challenge(date):
"""Returns True if the given date is between the Challenge start and end dates."""
if date:
start = challenge_mgr.get_challenge_start()
end = challenge_mgr.get_challenge_end()
return date >= start and date <= end
else:
return False
def __is_after_challenge(date):
"""Returns True if the given date is after the Challenge end date."""
return date > challenge_mgr.get_challenge_end()
def __is_boolean_logic(token):
"""Returns True if the token is boolean logic operator ('and', 'or', 'not') or ('True',
'False')."""
if token:
return token.lower() in ['and', 'or', 'not', 'true', 'false']
return False
def __get_urls(text):
"""Returns a list of the urls in the text."""
ret = []
urls = re.\
findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', \
text.lower())
for url in urls:
if url.endswith(')') or url.endswith('>'):
url = url[: -1]
if url.endswith('),') or url.endswith(').'):
url = url[: -2]
if url.endswith(')</center'):
url = url[: -9]
ret.append(url)
return ret
def __get_predicate(token):
"""Returns the predicate if any in the given token. Predicates are defined as the string
before '('. e.g. the predicate in 'submitted_action(intro-video)' is submitted_action."""
if token and token.find('(') != -1:
return token[:token.find('(')]
return None
def __check_predicates(action):
"""Checks the unlock_condition string of the given action ensuring that the predicates in the
string are valid Makahiki predicates and that it has boolean logic only. Returns a list of
Errors. Does not evaluate the predicates or test that the logic is correct."""
ret = []
unlock_condition = action.unlock_condition
valid_predicates = predicate_mgr.get_defined_predicates()
if unlock_condition:
no_pred = re.compile(r'^(\s*[^(]+\s*)$')
if len(no_pred.findall(unlock_condition)) == 0:
pass
else:
pat = re.compile(r'([^(]+)\s*\(([^)]+)\)\s*')
for pred, params in pat.findall(unlock_condition):
_ = params
for token in pred.split():
if __is_boolean_logic(token):
pass
else:
if token not in valid_predicates.keys():
message = "%s is not a defined Makahiki predicate" % token
ret.append(Error(message=message, action=action))
return ret
def check_pub_exp_dates(draft):
"""Returns a dictionary of Errors and Warnings for DesignerActions whose pub_date or
exp_date are not in the challenge."""
ret = {}
ret[_ERRORS] = []
ret[_WARNINGS] = []
challenge_start = challenge_mgr.get_challenge_start()
challenge_end = challenge_mgr.get_challenge_end()
for action in DesignerAction.objects.filter(draft=draft):
if action.pub_date > challenge_end.date():
ret[_ERRORS].append(Error(message="Publication Date after end of Challenge", \
action=action))
if action.expire_date and \
datetime.combine(action.expire_date, time(0, 0)) < \
challenge_start.date():
ret[_ERRORS].append(Error(message="Expiration date before beginning of Challenge", \
action=action))
if not __is_in_rounds(datetime.combine(action.pub_date, time(0, 0))):
ret[_WARNINGS].append(Warn(message="Publication Date isn't in a round", \
action=action))
if action.expire_date and not \
__is_in_rounds(datetime.combine(action.expire_date, time(0, 0))):
ret[_WARNINGS].append(Warn(message="Expiration Date isn't in a round", \
action=action))
return ret
def check_grid_pub_exp_dates(draft):
"""Returns a dictionary of Errors and Warnings for DesignerActions in the grid whose pub_date or
exp_date are not in the challenge."""
ret = {}
ret[_ERRORS] = []
ret[_WARNINGS] = []
challenge_start = challenge_mgr.get_challenge_start()
challenge_end = challenge_mgr.get_challenge_end()
for loc in DesignerGrid.objects.filter(draft=draft):
if loc.action.pub_date > challenge_end.date():
message = "Publication Date %s after end of Challenge %s" % (loc.action.pub_date, \
challenge_end.date())
ret[_ERRORS].append(Error(message=message, \
action=loc.action))
if loc.action.expire_date and \
loc.action.expire_date < \
challenge_start.date():
message = "Expiration date %s is before beginning of Challenge %s" % \
(loc.action.expire_date, challenge_start.date())
ret[_ERRORS].append(Error(message=message, \
action=loc.action))
# if not __is_in_rounds(datetime.combine(loc.action.pub_date, time(0, 0))):
# message = "Publication Date %s isn't in a round" % loc.action.pub_date
# ret[_WARNINGS].append(Warn(message=message, \
# action=loc.action))
# if loc.action.expire_date and not \
# __is_in_rounds(datetime.combine(loc.action.expire_date, time(0, 0))):
# message = "Expiration Date isn't in a round" % loc.action.expire_date
# ret[_WARNINGS].append(Warn(message=message, \
# action=loc.action))
return ret
def check_event_dates(draft):
"""Returns a list of Errors for DesignerEvents whose event_date isn't in the challenge or
isn't during a round."""
ret = []
for event in DesignerEvent.objects.filter(draft=draft):
if not __is_in_rounds(event.event_date):
ret.append(Error(message="Event date isn't in a round", action=event))
if not __is_in_challenge(event.event_date):
ret.append(Error(message="Event date isn't in the challenge", action=event))
return ret
def check_grid_event_dates(draft):
"""Returns a list of Errors for DesignerEvents in the grid whose event_date isn't in the
challenge or isn't during a round."""
ret = []
for loc in DesignerGrid.objects.filter(draft=draft):
if loc.action.type == 'event':
event = smartgrid_mgr.get_designer_action(draft=draft, slug=loc.action.slug)
if not __is_in_rounds(event.event_date):
if event.event_date:
message = "Event date %s isn't in a round" % event.event_date.date()
else:
message = "Event doesn't have an event date."
ret.append(Error(message=message, action=event))
if not __is_in_challenge(event.event_date):
if event.event_date:
message = "Event date %s isn't in the challenge %s - %s" % \
(event.event_date.date(), challenge_mgr.get_challenge_start().date(), \
challenge_mgr.get_challenge_end().date())
else:
message = "Event doesn't have an event date."
ret.append(Error(message=message, action=event))
return ret
def check_designer_unlock_dates(draft):
"""Checks all the DesignerAction unlock_conditions looking for unlock_on_date predicates.
Checks the dates in the predicate to ensure they are in the challenge."""
ret = {}
ret[_ERRORS] = []
ret[_WARNINGS] = []
for action in DesignerAction.objects.filter(draft=draft):
if action.unlock_condition:
l = action.unlock_condition.split('unlock_on_date(')
if len(l) > 1:
index = l[1].find(')')
date_string = l[1][:index].strip('"\'')
unlock_date = datetime.strptime(date_string, "%Y-%m-%d")
if __is_after_challenge(datetime.combine(unlock_date, time(0, 0))):
message = "unlock date %s is after challenge end %s" % \
(unlock_date.date(), challenge_mgr.get_challenge_end().date())
ret[_ERRORS].append(Error(message=message, action=action))
if not __is_in_rounds(datetime.combine(unlock_date, time(0, 0))):
message = "unlock date %s is not in a round" % unlock_date.date()
ret[_WARNINGS].append(Warn(message=message, \
action=action))
return ret
def check_designer_urls(draft):
"""Checks all the DesignerAction descriptions looking for URLs and checks that they return a
valid HTTP status code. If they don't a warning is appended to the list of warnings."""
ret = []
for action in DesignerAction.objects.filter(draft=draft):
urls = __get_urls(action.description)
for url in urls:
req = urllib2.Request(url)
try:
urllib2.urlopen(req)
except HTTPError as e:
msg = "url %s raised error %s" % (url, e)
ret.append(Warn(message=msg, action=action))
except URLError as e1:
msg = "url %s raised error %s" % (url, e1)
ret.append(Warn(message=msg, action=action))
return ret
def check_designer_predicates(draft):
"""Checks the Designer items' unlock_condition ensuring the predicates are defined. This does
not evaluate the predicates, just ensures that the predicates are defined."""
ret = []
for action in DesignerAction.objects.filter(draft=draft):
for issue in __check_predicates(action):
ret.append(issue)
for level in DesignerLevel.objects.filter(draft=draft):
for issue in __check_predicates(level):
ret.append(issue)
return ret
def check_designer_action_column_names(draft):
"""Checks for actions in columns that don't have a column name."""
ret = []
for grid in DesignerGrid.objects.filter(draft=draft):
if len(DesignerColumnGrid.objects.filter(draft=draft, level=grid.level, \
column=grid.column)) == 0:
message = "in %s column %s row %s needs a column name." % (grid.level, \
grid.column, \
grid.row)
ret.append(Error(message=message, action=grid.action))
return ret
def check_unreachable_designer_actions(draft):
"""Checks for unreachable actions and returns a list of Errors indicating which actions are
unreachable."""
return action_dependency.check_unreachable_designer_actions(draft)
def check_false_unlock_conditions(draft):
"""Checks for actions that depend on actions with False unlock_conditions."""
return action_dependency.check_false_unlock_designer_actions(draft)
def check_mismatched_designer_level(draft):
"""Checks for actions that depend on actions in a higher level."""
return action_dependency.check_missmatched_designer_level(draft)
def run_designer_checks(draft, settings): # pylint: disable=R0912
"""Runs the checks that the user set in their GccSettings."""
ret = {}
ret[_ERRORS] = []
ret[_WARNINGS] = []
# cannot turn off checking the predicates.
for e in check_designer_predicates(draft):
ret[_ERRORS].append(str(e))
for e in check_designer_action_column_names(draft):
ret[_ERRORS].append(str(e))
if settings.check_pub_dates:
d = check_grid_pub_exp_dates(draft)
for e in d[_ERRORS]:
ret[_ERRORS].append(str(e))
for w in d[_WARNINGS]:
ret[_WARNINGS].append(str(w))
if settings.check_event_dates:
d = check_grid_event_dates(draft)
for e in d:
ret[_ERRORS].append(str(e))
if settings.check_unlock_dates:
d = check_designer_unlock_dates(draft)
for e in d[_ERRORS]:
ret[_ERRORS].append(str(e))
for w in d[_WARNINGS]:
ret[_WARNINGS].append(str(w))
if settings.check_description_urls:
for w in check_designer_urls(draft):
ret[_WARNINGS].append(str(w))
if settings.check_unreachable:
for e in action_dependency.check_unreachable_designer_actions(draft):
ret[_ERRORS].append(str(e))
if settings.check_false_unlocks:
for w in action_dependency.check_false_unlock_designer_actions(draft):
ret[_WARNINGS].append(str(w))
if settings.check_mismatched_levels:
for w in action_dependency.check_missmatched_designer_level(draft):
ret[_WARNINGS].append(str(w))
return ret
# pylint: enable=R0912
def full_designer_check(draft):
"""Runs all the designer checks (slow)."""
ret = {}
ret[_ERRORS] = []
ret[_WARNINGS] = []
d = check_pub_exp_dates(draft)
for e in d[_ERRORS]:
ret[_ERRORS].append(str(str(e)))
for w in d[_WARNINGS]:
ret[_WARNINGS].append(str(w))
d = check_grid_pub_exp_dates(draft)
for e in d[_ERRORS]:
ret[_ERRORS].append(str(e))
for w in d[_WARNINGS]:
ret[_WARNINGS].append(str(w))
d = check_event_dates(draft)
for e in d:
ret[_ERRORS].append(str(e))
d = check_grid_event_dates(draft)
for e in d:
ret[_ERRORS].append(str(e))
d = check_designer_unlock_dates(draft)
for e in d[_ERRORS]:
ret[_ERRORS].append(str(e))
for w in d[_WARNINGS]:
ret[_WARNINGS].append(str(w))
for w in check_designer_urls(draft):
ret[_WARNINGS].append(str(w))
for e in action_dependency.check_unreachable_designer_actions(draft):
ret[_ERRORS].append(str(e))
for w in action_dependency.check_false_unlock_designer_actions(draft):
ret[_WARNINGS].append(str(w))
for w in action_dependency.check_missmatched_designer_level(draft):
ret[_WARNINGS].append(str(w))
return ret
def quick_designer_check(draft):
"""Quick test."""
ret = {}
ret[_ERRORS] = []
ret[_WARNINGS] = []
d = check_grid_pub_exp_dates(draft)
for e in d[_ERRORS]:
ret[_ERRORS].append(str(e))
for w in d[_WARNINGS]:
ret[_WARNINGS].append(str(w))
d = check_grid_event_dates(draft)
for e in d:
ret[_ERRORS].append(str(e))
d = check_designer_unlock_dates(draft)
for e in d[_ERRORS]:
ret[_ERRORS].append(str(e))
for w in d[_WARNINGS]:
ret[_WARNINGS].append(str(w))
for e in action_dependency.check_unreachable_designer_actions(draft):
ret[_ERRORS].append(str(e))
for w in action_dependency.check_false_unlock_designer_actions(draft):
ret[_WARNINGS].append(str(w))
for w in action_dependency.check_missmatched_designer_level(draft):
ret[_WARNINGS].append(str(w))
return ret
def check_library_unlock_dates():
"""Checks all the LibraryAction unlock_conditions looking for unlock_on_date predicates.
Checks the dates in the predicate to ensure they are in the challenge."""
ret = {}
ret[_ERRORS] = []
ret[_WARNINGS] = []
for action in LibraryAction.objects.all():
l = action.unlock_condition.split('unlock_on_date(')
if len(l) > 1:
index = l[1].find(')')
date_string = l[1][:index].strip('"\'')
unlock_date = datetime.strptime(date_string, "%m/%d/%y")
if not __is_in_challenge(datetime.combine(unlock_date, time(0, 0))):
ret[_ERRORS].append(Error(message="unlock date is not in challenge", \
action=action))
if not __is_in_rounds(datetime.combine(unlock_date, time(0, 0))):
ret[_WARNINGS].append(Warn(message="unlock date is not in a round", \
action=action))
return ret
def check_library_urls():
"""Checks all the LibraryAction descriptions looking for URLs and checks that they return a
valid HTTP status code. If they don't a warning is raised. Returns a list of Warnings."""
ret = []
for action in LibraryAction.objects.all():
urls = __get_urls(action.description)
for url in urls:
req = urllib2.Request(url)
try:
urllib2.urlopen(req)
except HTTPError as e:
msg = "url %s raised error %s" % (url, e)
ret.append(Warn(message=msg, action=action))
except URLError as e1:
msg = "url %s raised error %s" % (url, e1)
ret.append(Warn(message=msg, action=action))
return ret
def check_library_predicates():
"""Checks all the Library items' predicates."""
ret = []
for action in LibraryAction.objects.all():
for issue in __check_predicates(action):
ret.append(issue)
return ret
def full_library_check():
"""Runs all the consistency checks on the library returning a dictionary with the Errors and
Warnings."""
ret = {}
ret[_ERRORS] = []
ret[_WARNINGS] = []
d = check_library_unlock_dates()
for e in d[_ERRORS]:
ret[_ERRORS].append(str(e))
for w in d[_WARNINGS]:
ret[_WARNINGS].append(str(w))
for w in check_library_urls():
ret[_WARNINGS].append(str(w))
for e in action_dependency.check_unreachable_library_actions():
ret[_ERRORS].append(str(e))
for w in action_dependency.check_false_unlock_library_actions():
ret[_WARNINGS].append(str(w))
return ret
def quick_library_check():
"""Runs the faster checks, not urls."""
ret = {}
ret[_ERRORS] = []
ret[_WARNINGS] = []
d = check_library_unlock_dates()
for e in d[_ERRORS]:
ret[_ERRORS].append(str(e))
for w in d[_WARNINGS]:
ret[_WARNINGS].append(str(w))
for e in action_dependency.check_unreachable_library_actions():
ret[_ERRORS].append(str(e))
for w in action_dependency.check_false_unlock_library_actions():
ret[_WARNINGS].append(str(w))
return ret
def library_errors():
"""Returns the errors found in the Library items."""
ret = quick_library_check()
return ret[_ERRORS]
| {
"repo_name": "yongwen/makahiki",
"path": "makahiki/apps/managers/smartgrid_mgr/gcc.py",
"copies": "3",
"size": "19805",
"license": "mit",
"hash": -844153852135999200,
"line_mean": 39.5840163934,
"line_max": 100,
"alpha_frac": 0.5963645544,
"autogenerated": false,
"ratio": 3.8772513703993736,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5973615924799374,
"avg_score": null,
"num_lines": null
} |
"""A CherryPy tool for hosting a foreign WSGI application."""
import sys
import warnings
import cherrypy
# is this sufficient for start_response?
def start_response(status, response_headers, exc_info=None):
cherrypy.response.status = status
headers_dict = dict(response_headers)
cherrypy.response.headers.update(headers_dict)
def make_environ():
"""grabbed some of below from wsgiserver.py
for hosting WSGI apps in non-WSGI environments (yikes!)
"""
request = cherrypy.request
# create and populate the wsgi environ
environ = dict()
environ["wsgi.version"] = (1,0)
environ["wsgi.url_scheme"] = request.scheme
environ["wsgi.input"] = request.rfile
environ["wsgi.errors"] = sys.stderr
environ["wsgi.multithread"] = True
environ["wsgi.multiprocess"] = False
environ["wsgi.run_once"] = False
environ["REQUEST_METHOD"] = request.method
environ["SCRIPT_NAME"] = request.script_name
environ["PATH_INFO"] = request.path_info
environ["QUERY_STRING"] = request.query_string
environ["SERVER_PROTOCOL"] = request.protocol
environ["SERVER_NAME"] = request.local.name
environ["SERVER_PORT"] = request.local.port
environ["REMOTE_HOST"] = request.remote.name
environ["REMOTE_ADDR"] = request.remote.ip
environ["REMOTE_PORT"] = request.remote.port
# then all the http headers
headers = request.headers
environ["CONTENT_TYPE"] = headers.get("Content-type", "")
environ["CONTENT_LENGTH"] = headers.get("Content-length", "")
for (k, v) in headers.iteritems():
envname = "HTTP_" + k.upper().replace("-","_")
environ[envname] = v
return environ
def run(app, env=None):
"""Run the given WSGI app and set response.body to its output."""
warnings.warn("This module is deprecated and will be removed in "
"Cherrypy 3.2. See http://www.cherrypy.org/ticket/700 "
"for more information.")
try:
environ = cherrypy.request.wsgi_environ.copy()
environ['SCRIPT_NAME'] = cherrypy.request.script_name
environ['PATH_INFO'] = cherrypy.request.path_info
except AttributeError:
environ = make_environ()
if env:
environ.update(env)
# run the wsgi app and have it set response.body
response = app(environ, start_response)
try:
cherrypy.response.body = [x for x in response]
finally:
if hasattr(response, "close"):
response.close()
return True
| {
"repo_name": "cread/ec2id",
"path": "cherrypy/lib/wsgiapp.py",
"copies": "1",
"size": "2610",
"license": "apache-2.0",
"hash": -3720640023283479000,
"line_mean": 31.8961038961,
"line_max": 73,
"alpha_frac": 0.6302681992,
"autogenerated": false,
"ratio": 4.110236220472441,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.524050441967244,
"avg_score": null,
"num_lines": null
} |
"""achievement_date_unique
Revision ID: 65c7a32b7322
Revises: d4a70083f72e
Create Date: 2017-01-31 23:01:11.744725
"""
# revision identifiers, used by Alembic.
revision = '65c7a32b7322'
down_revision = 'd4a70083f72e'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.execute("ALTER TABLE achievements_users DROP CONSTRAINT pk_achievements_users;")
op.execute("ALTER TABLE achievements_users ADD COLUMN id SERIAL;")
op.execute("ALTER TABLE achievements_users ADD CONSTRAINT pk_achievements_users PRIMARY KEY(id);")
op.execute("ALTER TABLE goal_evaluation_cache DROP CONSTRAINT pk_goal_evaluation_cache;")
op.execute("ALTER TABLE goal_evaluation_cache ADD COLUMN id SERIAL;")
op.execute("ALTER TABLE goal_evaluation_cache ADD CONSTRAINT pk_goal_evaluation_cache PRIMARY KEY(id);")
op.create_index('idx_achievements_users_date_not_null_unique', 'achievements_users', ['user_id', 'achievement_id', 'achievement_date', 'level'], unique=True, postgresql_where=sa.text('achievement_date IS NOT NULL'))
op.create_index('idx_achievements_users_date_null_unique', 'achievements_users', ['user_id', 'achievement_id', 'level'], unique=True, postgresql_where=sa.text('achievement_date IS NULL'))
op.create_index(op.f('ix_achievements_users_achievement_id'), 'achievements_users', ['achievement_id'], unique=False)
op.create_index(op.f('ix_achievements_users_level'), 'achievements_users', ['level'], unique=False)
op.create_index('idx_goal_evaluation_cache_date_not_null_unique', 'goal_evaluation_cache', ['user_id', 'goal_id', 'achievement_date'], unique=True, postgresql_where=sa.text('achievement_date IS NOT NULL'))
op.create_index('idx_goal_evaluation_cache_date_null_unique', 'goal_evaluation_cache', ['user_id', 'goal_id'], unique=True, postgresql_where=sa.text('achievement_date IS NULL'))
op.create_index(op.f('ix_goal_evaluation_cache_goal_id'), 'goal_evaluation_cache', ['goal_id'], unique=False)
op.create_index(op.f('ix_goal_evaluation_cache_user_id'), 'goal_evaluation_cache', ['user_id'], unique=False)
### end Alembic commands ###
def downgrade():
pass
# not possible !
| {
"repo_name": "ActiDoo/gamification-engine",
"path": "gengine/app/alembic/versions/65c7a32b7322_achievement_date_unique.py",
"copies": "1",
"size": "2270",
"license": "mit",
"hash": 8783330824597448000,
"line_mean": 51.7906976744,
"line_max": 219,
"alpha_frac": 0.7281938326,
"autogenerated": false,
"ratio": 3.1267217630853996,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4354915595685399,
"avg_score": null,
"num_lines": null
} |
"""Achievement management."""
from flask import jsonify
from flask_restplus import Namespace, Resource
import api
from api import PicoException, require_admin
from .schemas import achievement_patch_req, achievement_req
ns = Namespace("achievements", description="Achievement management")
@ns.route("")
class AchievementList(Resource):
"""Get the full list of achievements, or add a new achievement."""
@require_admin
@ns.response(200, "Success")
@ns.response(401, "Not logged in")
@ns.response(403, "Not authorized")
def get(self):
"""Get the full list of achievements."""
return api.achievement.get_all_achievements(), 200
@require_admin
@ns.expect(achievement_req)
@ns.response(201, "Achievement added")
@ns.response(400, "Error parsing request")
@ns.response(401, "Not logged in")
@ns.response(403, "Not authorized")
def post(self):
"""Add a new achievement."""
req = achievement_req.parse_args(strict=True)
aid = api.achievement.insert_achievement(**req)
res = jsonify({"success": True, "aid": aid})
res.status_code = 201
return res
@ns.response(200, "Success")
@ns.response(404, "Achievement not found")
@ns.route("/<string:achievement_id>")
class Achievement(Resource):
"""Get or update a specific achievement."""
@require_admin
@ns.response(401, "Not logged in")
@ns.response(403, "Not authorized")
def get(self, achievement_id):
"""Retrieve a specific achievement."""
res = api.achievement.get_achievement(achievement_id)
if not res:
raise PicoException("Achievement not found", status_code=404)
else:
return res, 200
@require_admin
@ns.expect(achievement_req)
@ns.response(400, "Error parsing request")
@ns.response(401, "Not logged in")
@ns.response(403, "Not authorized")
def put(self, achievement_id):
"""Replace an existing achievement."""
req = achievement_req.parse_args(strict=True)
aid = api.achievement.update_achievement(achievement_id, req)
if aid is None:
raise PicoException("Achievement not found", status_code=404)
return jsonify({"success": True, "aid": aid})
@require_admin
@ns.expect(achievement_patch_req)
@ns.response(400, "Error parsing request")
@ns.response(401, "Not logged in")
@ns.response(403, "Not authorized")
def patch(self, achievement_id):
"""Update an existing achievement."""
req = {
k: v for k, v in achievement_patch_req.parse_args().items() if v is not None
}
aid = api.achievement.update_achievement(achievement_id, req)
if aid is None:
raise PicoException("Achievement not found", status_code=404)
return jsonify({"success": True, "aid": aid})
| {
"repo_name": "royragsdale/picoCTF",
"path": "picoCTF-web/api/apps/v1/achievements.py",
"copies": "2",
"size": "2868",
"license": "mit",
"hash": -5250793928133448000,
"line_mean": 33.5542168675,
"line_max": 88,
"alpha_frac": 0.6474895397,
"autogenerated": false,
"ratio": 3.4721549636803872,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5119644503380387,
"avg_score": null,
"num_lines": null
} |
#Achieves around .019 when trained with the 3'rd convolutional module having 64 layers
#when trained on around 175,000 patches in total.
#The 32 layer in module 3) is not that different, achieving around .022
#In both cases after 10 training epochs.
#Sadly not the 205,000 due to Mathematica HDF5 bug (uses 32-bit program I think).
import tensorflow as tf
import numpy as np
import h5py
import argparse
#import the data
def readData( fileName ):
file = h5py.File( fileName, 'r')
images = file['/images']
labels = file['/labels']
return ( images, labels )
# Model definitions
def weight_variable(shape):
if len(shape) == 4:
initial = tf.random_uniform(shape, -np.sqrt( 12 / ( shape[0]*shape[1]*shape[2] + shape[3]) ), np.sqrt( 12 / ( shape[0]*shape[1]*shape[2] + shape[3] ) ) )
else:
initial = tf.random_uniform(shape, -np.sqrt( 12 / ( shape[0] + shape[1] ) ), np.sqrt( 12 / ( shape[0] + shape[1] ) ) )
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.0, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='VALID')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def model( l1, l2, l3 ):
x_image = tf.reshape(x, [-1,32,32,1])
W_conv1 = weight_variable([5, 5, 1, l1])
b_conv1 = bias_variable([l1])
h_conv1 = tf.nn.tanh(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5, 5, l1, l2])
b_conv2 = bias_variable([l2])
h_conv2 = tf.nn.tanh(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_conv3 = weight_variable([5, 5, l2, l3])
b_conv3 = bias_variable([l3])
h_conv3 = tf.nn.tanh(conv2d(h_pool2, W_conv3) + b_conv3)
h_pool3 = max_pool_2x2(h_conv3)
W_fc1 = tf.Variable( tf.truncated_normal( [ l3,1 ], stddev=0.01 ) )
b_fc1 = bias_variable([1])
h_pool3_flat = tf.reshape(h_pool3, [-1, l3])
y_conv = tf.matmul(h_pool3_flat, W_fc1) + b_fc1
# loss = tf.reduce_mean( tf.pow( ( y_conv - y_ ), 2 ) )
cross_entropy = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(y_conv[:,0], y_))
tf.image_summary( "W1", tf.transpose( W_conv1, [ 3, 0, 1, 2 ] ) )
return cross_entropy
#Some helper functions
def partition(l, n):
n = max(1, n)
return (l[i:i+n] for i in range(0, len(l), n))
def batch_process( x, y ):
return zip( partition( x,100 ), partition( y, 100 ) )
def batch_run( sess, loss, inputs, targets ):
total_loss = 0
for batch in batch_process( inputs, targets ):
batch_loss = sess.run( loss, feed_dict = { x: batch[0], y_: batch[1] } )
total_loss += batch_loss * len( batch[0] )
return total_loss / len( targets )
#Parsing the command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("-datafile",
help="HDF5 file containing training and validation data")
parser.add_argument("-logdir",
help="logging directory")
parser.add_argument("-checkpointfile",
help="file to store saved model")
args = parser.parse_args()
( images, labels ) = readData( args.datafile)
#TensorFlow code
x = tf.placeholder(tf.float32, [None, 32, 32])
y_ = tf.placeholder( tf.float32, shape=[None] )
training_size = int( len( images )*.8 )
print( "Training Set size = ", training_size )
training_images = images[:training_size]
training_labels = labels[:training_size]
validation_images = images[training_size:]
validation_labels = labels[training_size:]
def train_model( l1 = 32, l2 = 32, l3 = 64 ):
loss = model( l1, l2, l3 )
train_step = tf.train.MomentumOptimizer( learning_rate=0.01, use_nesterov=True, momentum=0.9 ).minimize( loss )
gpu_options = tf.GPUOptions( per_process_gpu_memory_fraction=0.4 )
sess = tf.Session( config=tf.ConfigProto( gpu_options = gpu_options, device_count = {'GPU':-1} ) )
sess.run( tf.global_variables_initializer() )
for epoch in range(6):
for batch in batch_process( training_images, training_labels ):
sess.run(train_step, feed_dict={ x: batch[0], y_: batch[1] } )
training_loss = batch_run( sess, loss, training_images, training_labels )
validation_loss = batch_run( sess, loss, validation_images, validation_labels )
print( "Level", l2, " Train=", training_loss, "Validation=", validation_loss, "Overfitting=", training_loss/validation_loss )
return( validation_loss )
res = []
for l2 in range(1,64):
res.append( train_model( l1=32, l2=l2, l3=64 ) )
print( res )
| {
"repo_name": "jfrancis71/TensorFlowApps",
"path": "FaceTraining.py",
"copies": "1",
"size": "4559",
"license": "mit",
"hash": 2239702025717803000,
"line_mean": 34.0692307692,
"line_max": 157,
"alpha_frac": 0.6499232288,
"autogenerated": false,
"ratio": 2.842269326683292,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3992192555483292,
"avg_score": null,
"num_lines": null
} |
"""Acid Base descriptor.
References:
* http://cdk.github.io/cdk/1.5/docs/api/org/openscience/cdk/qsar/descriptors/molecular/AcidicGroupCountDescriptor.html
* http://cdk.github.io/cdk/1.5/docs/api/org/openscience/cdk/qsar/descriptors/molecular/BasicGroupCountDescriptor.html
""" # noqa: E501
from abc import abstractproperty
from rdkit import Chem
from ._base import Descriptor
__all__ = ("AcidicGroupCount", "BasicGroupCount")
class SmartsCountBase(Descriptor):
__slots__ = ("_mol",)
@classmethod
def preset(cls, version):
yield cls()
def _create_smarts(self):
s = ",".join("$(" + s + ")" for s in self.SMARTS)
self._mol = Chem.MolFromSmarts("[" + s + "]")
return self._mol
@abstractproperty
def SMARTS(self):
pass
def __str__(self):
return self._name
def parameters(self):
return ()
def calculate(self):
pat = getattr(self, "_mol", None) or self._create_smarts()
return len(self.mol.GetSubstructMatches(pat))
rtype = int
_extra_docs = ("SMARTS",)
class AcidicGroupCount(SmartsCountBase):
r"""acidic group count descriptor."""
since = "1.0.0"
__slots__ = ()
def description(self):
return "acidic group count"
_name = "nAcid"
SMARTS = (
"[O;H1]-[C,S,P]=O",
"[*;-;!$(*~[*;+])]",
"[NH](S(=O)=O)C(F)(F)F",
"n1nnnc1",
)
class BasicGroupCount(SmartsCountBase):
r"""basic group count descriptor."""
since = "1.0.0"
__slots__ = ()
def description(self):
return "basic group count"
_name = "nBase"
SMARTS = (
"[NH2]-[CX4]",
"[NH](-[CX4])-[CX4]",
"N(-[CX4])(-[CX4])-[CX4]",
"[*;+;!$(*~[*;-])]",
"N=C-N",
"N-C=N",
)
| {
"repo_name": "mordred-descriptor/mordred",
"path": "mordred/AcidBase.py",
"copies": "1",
"size": "1810",
"license": "bsd-3-clause",
"hash": 1713949195114105600,
"line_mean": 20.0465116279,
"line_max": 122,
"alpha_frac": 0.5524861878,
"autogenerated": false,
"ratio": 2.9966887417218544,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9049174929521855,
"avg_score": 0,
"num_lines": 86
} |
"""aclarknet URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls import url, include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from aclarknet.website import views
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^about$', views.about, name='about'),
url(r'^admin', admin.site.urls),
url(r'^blog$', views.blog, name='blog'),
url(r'^book$', views.book, name='book'),
url(r'^clients$', views.clients, name='clients'),
url(r'^contact$', views.contact, name='contact'),
url(r'^community$', views.community, name='community'),
url(r'^open-source$', views.opensource, name='open-source'),
url(r'^projects$', views.projects, name='projects'),
url(r'^services$', views.services, name='services'),
url(r'^team$', views.team, name='team'),
url(r'^testimonials$', views.testimonials, name='testimonials'),
url(r'^location$', views.location, name='location'),
url(r'^history$', views.history, name='history'),
url(r'^now$', views.now, name='now'),
]
| {
"repo_name": "ACLARKNET/aclarknet-website",
"path": "aclarknet/urls.py",
"copies": "1",
"size": "1649",
"license": "mit",
"hash": 5174645127124175000,
"line_mean": 42.3947368421,
"line_max": 79,
"alpha_frac": 0.6658580958,
"autogenerated": false,
"ratio": 3.4,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.95658580958,
"avg_score": 0,
"num_lines": 38
} |
"""A class and CLI client to interact with a Pyjojo instance"""
from __future__ import print_function
import base64
import json
import requests
class Mojo(object):
"""A class used to interact with a Pyjojo instance"""
def __init__(self, **kwargs):
"""Constructs a Mojo by connecting to a Jojo and caching its scripts"""
# Transform some options into connection data
url = "http://{}"
if kwargs.get("use_ssl", False):
url = "https://{}"
self.endpoint = url.format(
"{}:{}".format(
kwargs.get("endpoint", "localhost"),
kwargs.get("port", 3000)
)
)
self.verify = kwargs.get("verify", True)
self.user = kwargs.get("user", None)
self.password = kwargs.get("password", None)
if (self.user is not None) & (self.password is not None):
self.auth = True
else:
self.auth = False
self.unauthorized = False
# Get the script lexicon from the Jojo and cache it
self.scripts = self.get_scripts()
# For backward compatibility, add a method for old jojos
for script in self.scripts:
if "http_method" not in script:
self.scripts[script]["http_method"] = "POST"
def __call(self, path, method="GET", data=""):
"""Makes a call to a Jojo"""
session = requests.Session()
headers = {"Content-Type": "application/json"}
if self.auth:
headers["Authorization"] = "Basic {}".format(base64.b64encode(
"{}:{}".format(self.user, self.password))
)
req = requests.Request(
method,
"{}{}".format(self.endpoint, path),
data=data,
headers=headers
).prepare()
response = session.send(req, verify=self.verify)
if response.status_code == 401:
self.unauthorized = True
return response
def get_scripts(self, param=None, tags=None):
"""Gets a collection of scripts that live on the Jojo"""
route = "/scripts"
if param is not None and tags is not None:
route += "?{}={}".format(param, tags)
resp = self.__call(route, method="GET")
if resp.status_code == 200:
return resp.json()["scripts"]
elif resp.status_code == 401:
self.unauthorized = True
resp.raise_for_status()
return {}
def get_script_names(self, param=None, tags=None):
"""Gets a list of script names that live on the Jojo"""
route = "/script_names"
if param is not None and tags is not None:
route += "?{}={}".format(param, tags)
resp = self.__call(route, method="GET")
if resp.status_code == 200:
return resp.json()["script_names"]
def reload(self):
"""Reloads the Jojo's script cache, then stashes that data in the
Mojo"""
response = self.__call("/reload", method="POST")
if response.status_code == 200:
self.scripts = self.get_scripts()
return True
elif response.status_code == 401:
return False
else:
return None
def get_script(self, name, use_cache=True):
"""Gets data about a script in the Jojo, from the cache or from the
Jojo"""
if use_cache:
if name in self.scripts:
return self.scripts[name]
else:
return None
else:
resp = self.__call("/scripts/{}".format(name), method="OPTIONS")
if resp.status_code == 200:
self.scripts[name] = resp.json()['script']
return self.scripts[name]
else:
return None
def run(self, name, params=None):
"""Runs the named script with the given parameters"""
data = None
if name not in self.scripts:
script = self.get_script(name, use_cache=False)
if script is None:
print("No script named {} exists on the server".format(name))
if params is not None:
data = json.dumps(params)
return self.__call(
"/scripts/{}".format(name),
method=self.scripts[name]['http_method'],
data=data
)
| {
"repo_name": "GradysGhost/pymojo",
"path": "pymojo/mojo.py",
"copies": "1",
"size": "4385",
"license": "apache-2.0",
"hash": -1865219172026683000,
"line_mean": 31.4814814815,
"line_max": 79,
"alpha_frac": 0.5388825542,
"autogenerated": false,
"ratio": 4.299019607843137,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 135
} |
"""A class and helper functions for dealing with density
histograms.
"""
import glob
import os.path
import numpy as np
class DensityHistogram(object):
"""A collection of data about the z-direction local density.
Attributes:
index: A numpy array enumerating the subensembles of the
simulation.
zbins: An array of z-coordinate bins for the histogram.
rho: A 2D array of the relative densities at each distance bin
for each subensemble.
freqs: An array with the number of samples for each
subensemble.
"""
def __init__(self, index, zbins, rho, freqs):
self.index = index
self.zbins = zbins
self.rho = rho
self.freqs = freqs
def write_pzhist(self, path, write_pzcnt=False):
"""Write a density histogram to a pzhist_*.dat file.
Args:
path: The name of the file to write.
write_pzcnt: If True, also write a pzcnt.dat file in the
same directory with the frequencies.
"""
dirname = os.path.dirname(path)
if write_pzcnt:
np.savetxt(os.path.join(dirname, 'pzcnt.dat'),
np.column_stack((self.index, self.freqs)))
np.savetxt(path, np.column_stack((self.index,
*np.transpose(self.rho))))
def read_pzhist(path):
"""Read a density histogram from a pzhist*.dat file.
Note that the index and frequencies are read from the
pzcnt.dat file in the same directory as the pzhist file.
Args:
path: The location of the pzhist_*.dat file.
Returns:
A DensityHistogram object.
"""
hist = np.transpose(np.loadtxt(path))
dirname = os.path.dirname(path)
index, freqs = np.loadtxt(os.path.join(dirname, 'pzcnt.dat'), unpack=True)
return DensityHistogram(index=index, zbins=hist[1], rho=hist[2:],
freqs=freqs)
def combine_histograms(hists):
"""Create a density histogram by averaging a list of provided
histograms.
Args:
hists: A list of DensityHistograms.
Returns:
A new DensityHistogram with the apropriately summed
frequencies and averaged densities.
"""
index, zbins = hists[0].index, hists[0].zbins
freq_sum = sum([h.freqs for h in hists])
weighted = sum(h.frequencies * np.transpose(h.rho) for h in hists)
rho = np.transpose(np.nan_to_num(weighted / freq_sum))
return DensityHistogram(index=index, zbins=zbins, rho=rho, freqs=freq_sum)
def combine_pzhist_runs(path, runs, hist_file):
"""Combine the density histograms of multiple production
runs.
Args:
path: The directory containing the runs to combine.
runs: The list of runs to combine.
hist_file: The specific histogram file to combine.
Returns:
A DensityHistogram object with the summed freqeuencies
and averaged densities of all the runs.
"""
return combine_histograms([read_pzhist(os.path.join(path, r, hist_file))
for r in runs])
def read_all_pzhists(path):
"""Read the density histograms for all atoms of all species.
Args:
path: The directory containing the pzhist_*.dat files.
Returns:
A dict of DensityHistogram objects, with the names of each
file as the keys.
"""
files = sorted(glob.glob(os.path.join(path, 'pzhist_*.dat')))
return {os.path.basename(f): read_pzhist(f) for f in files}
def combine_all_pzhists(path, runs):
"""Combine the density histograms for all atoms of all species
from a series of production runs.
Args:
path: The directory containing the production runs.
runs: The list of runs to combine.
Returns:
A dict of DensityHistogram objects, with the names of each
file as the keys.
"""
files = [os.path.basename(pf)
for pf in sorted(glob.glob(os.path.join(path, runs[0],
'pzhist_*.dat')))]
return {f: combine_pzhist_runs(path, runs, f) for f in files}
| {
"repo_name": "adamrall/coex",
"path": "coex/density.py",
"copies": "1",
"size": "4142",
"license": "bsd-2-clause",
"hash": 8885114256098753000,
"line_mean": 29.9104477612,
"line_max": 78,
"alpha_frac": 0.6224046354,
"autogenerated": false,
"ratio": 3.796516956920257,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49189215923202567,
"avg_score": null,
"num_lines": null
} |
# A class containing reading tools for sentence embeddings.
# 1) reading the word embeddings from a file
# 2) reading the Microsoft Research Paraphrase Corpus from a file (version training or testing)
import datetime
from bllipparser import Tree
import math
import nltk
import sys
from nltk.data import find
embeddingsSize = 200
def getVectorForSentence(sentence, emb_dict):
tokens=sentence.split(' ')
newVec = []
for i in range(embeddingsSize):
newVec.append(0)
for i in range(0, len(tokens)):
if tokens[i] not in emb_dict:
#print('Problem with: ', tokens[i])
continue
currentEmb = emb_dict[tokens[i]]
for j in range(len(newVec)):
newVec[j] += currentEmb[j]
return newVec
#copied this, should maybe try to understand it
def cosine_similarity(v1,v2):
"compute cosine similarity of v1 to v2: (v1 dot v1)/{||v1||*||v2||)"
sumxx, sumxy, sumyy = 0, 0, 0
theEnd = len(v1)
for i in range(theEnd):
x = v1[i]
y = v2[i]
sumxx += x*x
sumyy += y*y
sumxy += x*y
if sumxx == 0 or sumyy == 0:
return 0
return sumxy/math.sqrt(sumxx*sumyy)
def getRepForTrees(tree, emb_dict):
#print 'tree: ', tree
#print 'tree[0]: ', tree[0]
if tree.is_preterminal():
#print 'TOKEN: ', tree.token
if tree.token in emb_dict:
return emb_dict[tree.token]
else:
newRep = []
for i in range(embeddingsSize):
newRep.append(0)
return newRep
else:
newRep = []
for i in range(embeddingsSize):
newRep.append(0)
for i in range(len(tree)):
newVec = getRepForTrees(tree[i], emb_dict)
for j in range(embeddingsSize):
newRep[j] = newRep[j] + newVec[j]
for i in range(embeddingsSize):
newRep[i] = newRep[i] / len(tree)
return newRep
def getRepresentationForSentence(sentence, emb_dict, parser):
tokens=sentence.split(' ')
tree = Tree(parser.simple_parse(tokens))
return getRepForTrees(tree, emb_dict)
def loadEmbeddingFile(embeddingFile):
print 'LOADEMBEDDINGFILE ', datetime.datetime.now().time()
embeddingDict={}
file = open(embeddingFile)
for line in file:
tokens=line[:len(line)-2].split(' ') # consider lowercases
values=[]
for i in range(1, len(tokens)):
values.append(float(tokens[i]))
embeddingDict[tokens[0]]=values
print ('Embedding loading finished.')
return embeddingDict
# TODO: tokenization! Or ask Wenpeng
def load_msrp(isTraining):
print 'LOAD MSRP ', datetime.datetime.now().time()
if isTraining:
file = open('/mounts/data/proj/kann/msrp/msr_paraphrase_train.txt')
#file = open('/mounts/data/proj/kann/QA_MSR_training.txt')
else:
file = open('/mounts/data/proj/kann/msrp/msr_paraphrase_test.txt')
#file = open('/mounts/data/proj/kann/QA_MSR_test.txt')
msrpDict={}
counter = -1
for line in file:
counter += 1
if counter == 0 or line == '':
continue
tokens=line[:len(line)-1].split('\t') # consider only lower case
msrpDict[prepare_string(tokens[3]), prepare_string(tokens[4])] = (tokens[0] == "1") # 1 is paraphrase
print ('MSRP loading finished. ' + str(len(msrpDict)) + ' pairs of sentences have been loaded.')
return msrpDict
def prepare_string(line):
tokens = nltk.word_tokenize(line)
newSentence = tokens[0]
for i in range(1, len(tokens)):
newSentence = newSentence + " " + tokens[i]
return newSentence | {
"repo_name": "Kelina/SentenceEmbeddings",
"path": "Tools_Reading.py",
"copies": "1",
"size": "3468",
"license": "apache-2.0",
"hash": 149221526007962980,
"line_mean": 26.1015625,
"line_max": 105,
"alpha_frac": 0.6450403691,
"autogenerated": false,
"ratio": 3.226046511627907,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9147040239156929,
"avg_score": 0.04480932831419563,
"num_lines": 128
} |
"""A :class:`Dataset` is a simple abstraction around a `data` and a
`target` matrix.
A Dataset's :attr:`~Dataset.data` and :attr:`~Dataset.target`
attributes are available via attributes of the same name:
.. doctest::
>>> data = np.array([[3, 2, 1], [2, 1, 0]] * 4)
>>> target = np.array([3, 2] * 4)
>>> dataset = Dataset(data, target)
>>> dataset.data is data
True
>>> dataset.target is target
True
Attribute :attr:`~Dataset.split_indices` gives us a cross-validation
generator:
.. doctest::
>>> for train_index, test_index in dataset.split_indices:
... X_train, X_test, = data[train_index], data[test_index]
... y_train, y_test, = target[train_index], target[test_index]
An example of where a cross-validation generator like
:attr:`~Dataset.split_indices` returns it is expected is
:class:`sklearn.grid_search.GridSearchCV`.
If all you want is a train/test split of your data, you can simply
call :meth:`Dataset.train_test_split`:
.. doctest::
>>> X_train, X_test, y_train, y_test = dataset.train_test_split()
>>> X_train.shape, X_test.shape, y_train.shape, y_test.shape
((6, 3), (2, 3), (6,), (2,))
"""
import warnings
import numpy as np
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn import preprocessing
warnings.warn("""
The nolearn.dataset module will be removed in nolearn 0.6. If you want to
continue to use this module, please consider copying the code into
your own project.
""")
class Dataset(object):
n_iterations = 3
test_size = 0.25
random_state = 42
def __init__(self, data, target):
if isinstance(data, basestring):
data = np.load(data)
if isinstance(target, basestring):
target = np.load(target)
self.data, self.target = data, target
def scale(self, **kwargs):
self.data = preprocessing.scale(self.data, **kwargs)
return self
@property
def split_indices(self):
return StratifiedShuffleSplit(
self.target,
indices=True,
n_iter=self.n_iterations,
test_size=self.test_size,
random_state=self.random_state,
)
def train_test_split(self):
train_index, test_index = iter(self.split_indices).next()
X_train, X_test, = self.data[train_index], self.data[test_index]
y_train, y_test, = self.target[train_index], self.target[test_index]
return X_train, X_test, y_train, y_test
| {
"repo_name": "rajegannathan/grasp-lift-eeg-cat-dog-solution-updated",
"path": "python-packages/nolearn-0.5/nolearn/dataset.py",
"copies": "2",
"size": "2473",
"license": "bsd-3-clause",
"hash": 8896767815531190000,
"line_mean": 28.0941176471,
"line_max": 76,
"alpha_frac": 0.6457743631,
"autogenerated": false,
"ratio": 3.4490934449093444,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5094867808009345,
"avg_score": null,
"num_lines": null
} |
# A class encapsulating the basic functionality commonly used for an OpenGL view
# System imports
import numpy as np
from math import pi, sin, cos, tan
# OpenGL imports
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
# Use euclid for rotations
import euclid as eu
# Local imports
from Utilities import normalize
class Camera(object):
def __init__(self, windowWidth, windowHeight):
# Window variables
self.updateDimensions(windowWidth, windowHeight)
# View variables
self.viewTarget = np.array((0.0,0.0,0.0))
self.cameraDir = eu.Vector3(0,0,1).normalize()
self.upDir = eu.Vector3(0,1,0).normalize()
self.cameraTrans = np.array((0.0,0.0,0.0))
self.fov = 65 # degrees
self.nearClipRat = 0.01
self.farClipRat = 100
self.zoomDist = 3.0 # Camera distance from target
self.minZoom = 0.01 # Cannot zoom closer than this
# Mouse drag variables
self.mouseDragging = False
self.mouseDragState = None
self.lastPos = None
self.shiftHeld = False
### OpenGL model and projection
def projMat(self):
# Build a projection matrix
fVal = 1.0 / tan(self.fov * (pi / 360.0))
farClip = self.farClipRat * self.zoomDist
nearClip = self.nearClipRat * self.zoomDist
projMat = np.eye(4)
projMat[0,0] = fVal / self.aspectRatio
projMat[1,1] = fVal
projMat[2,2] = (farClip + nearClip) / (nearClip - farClip)
projMat[2,3] = (2.0 * farClip * nearClip) / (nearClip - farClip)
projMat[3,2] = -1.0
projMat[3,3] = 0.0
return projMat.astype(np.float32)
def viewMat(self):
# First make sure we know all relevant positions and directions
E = self.viewTarget + np.array(self.cameraDir) * self.zoomDist
C = self.viewTarget
U = np.array(self.upDir)
# Rotation matrix to put the camera in the right direction
rotMat = np.zeros((4,4))
rotMat[0,0:3] = np.cross(self.upDir, self.cameraDir)
rotMat[1,0:3] = self.upDir
rotMat[2,0:3] = self.cameraDir
rotMat[3,3] = 1.0
# Translation matrix, which mostly just pushes it out to the -z Axis
# where the camera looks
# If we want to make the camera translate, should probably add it here
transMat = np.eye(4)
transMat[0,3] = 0.0 + self.cameraTrans[0]
transMat[1,3] = 0.0 + self.cameraTrans[1]
transMat[2,3] = -self.zoomDist + self.cameraTrans[2]
transMat[3,3] = 1.0
viewMat = np.dot(transMat, rotMat)
return viewMat.astype(np.float32)
def getPos(self):
return (self.viewTarget + np.array(self.cameraDir) * self.zoomDist).astype(np.float32)
def getUp(self):
return np.array(self.upDir).astype(np.float32)
def updateDimensions(self, windowWidth, windowHeight):
self.aspectRatio = float(windowWidth) / windowHeight
self.windowWidth = windowWidth
self.windowHeight = windowHeight
glViewport(0, 0, windowWidth, windowHeight);
### Mouse and keyboard callbacks to reposition
def processMouse(self, button, state, x, y, shiftHeld):
# print("ProcessMouse button = " + str(button) + " state = " + str(state))
# Scroll wheel for zoom
if button == 3 or button == 4:
if state == GLUT_UP:
return
elif button == 3:
self.zoomIn()
elif button == 4:
self.zoomOut()
# Left click activates dragging
elif button == GLUT_LEFT_BUTTON:
if state == GLUT_DOWN:
self.mouseDragging = True
self.lastPos = (x,y)
# Holding shift gives translation instead of rotation
if(shiftHeld):
self.mouseDragState = 'translate'
else:
self.mouseDragState = 'rotate'
else: # (state == GLUT_UP)
self.mouseDragging = False
self.lastPos = None
self.mouseDragState = None
def processMotion(self, x, y):
if self.mouseDragging:
# The vector representing this drag, scaled so the dimensions
# of the window correspond to 1.0
delX = (float(x) - self.lastPos[0]) / self.windowWidth
delY = (float(y) - self.lastPos[1]) / self.windowWidth
if(self.mouseDragState == 'rotate'):
# Scale the rotations relative to the screen size
delTheta = -2*pi * delX
delPhi = -pi * delY
# Rotate by theta around 'up' (rotating up is unneeded since it
# would do nothing)
oldCamDir = self.cameraDir.copy();
self.cameraDir = self.cameraDir.rotate_around(self.upDir, delTheta)
# # Rotate by phi around 'left'
leftDir = self.upDir.cross(oldCamDir)
self.cameraDir = self.cameraDir.rotate_around(leftDir, delPhi)
self.upDir = self.upDir.rotate_around(leftDir, delPhi)
elif(self.mouseDragState == 'translate'):
moveDist = self.zoomDist * 5.0
self.cameraTrans[0] += delX*moveDist
self.cameraTrans[1] -= delY*moveDist
self.lastPos = (x,y)
def processKey(self, key, x, y):
# print("ProcessKey key = " + str(key))
moveDist = self.zoomDist * 0.02
# Use 'r' and 'f' to zoom (OSX doesn't give mouse scroll events)
if key == 'r':
self.zoomIn()
elif key == 'f':
self.zoomOut()
# Use 'wasd' to translate view window
elif key == 'd':
self.cameraTrans[0] += moveDist
elif key == 'a':
self.cameraTrans[0] -= moveDist
elif key == 'w':
self.cameraTrans[1] += moveDist
elif key == 's':
self.cameraTrans[1] -= moveDist
def zoomIn(self):
self.zoomDist = max(self.minZoom, self.zoomDist * 0.9)
def zoomOut(self):
self.zoomDist = self.zoomDist * 1.1
| {
"repo_name": "nmwsharp/DDGSpring2016",
"path": "core/Camera.py",
"copies": "1",
"size": "6240",
"license": "mit",
"hash": 4532495744758387700,
"line_mean": 31.5,
"line_max": 94,
"alpha_frac": 0.5708333333,
"autogenerated": false,
"ratio": 3.6533957845433256,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47242291178433254,
"avg_score": null,
"num_lines": null
} |
# a class for all the global variables for the CAD system, to replace HeeksCAD
import wx
from SelectMode import SelectMode
from Color import HeeksColor
from Material import Material
import geom
from OpenGL.GL import *
from OpenGL.GLU import *
from Grid import RenderGrid
import sys
from CadFrame import CadFrame
BackgroundModeOneColor = 0
BackgroundModeTwoColors = 1
BackgroundModeTwoColorsLeftToRight = 2
BackgroundModeFourColors = 3
class HeeksCADApp(wx.App):
def __init__(self):
wx.App.__init__(self)
self.input_mode = SelectMode(self)
self.current_viewport = None
self.mouse_wheel_forward_away = False
self.background_mode = BackgroundModeTwoColors
self.background_color = [HeeksColor(255, 175, 96), HeeksColor(198, 217, 119), HeeksColor(247, 198, 243), HeeksColor(193, 235, 236)]
self.objects = []
self.light_push_matrix = True
self.on_glCommands_list = []
self.transform_gl_list = []
self.grid_mode = 3
self.current_coordinate_system = None
self.draw_to_grid = True
self.digitizing_grid = 1.0
def OnInit(self):
self.frame= CadFrame()
self.frame.Show()
return True
def Viewport(self):
from Viewport import Viewport
return Viewport(self)
def OnMouseEvent(self, viewport, event):
viewport.OnMouseEvent(event)
def GetBox(self, box):
# to do
pass
def GetPixelScale(self):
return self.current_viewport.view_point.pixel_scale
def EnableBlend(self):
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
def DisableBlend(self):
glDisable(GL_BLEND)
def glCommands(self, viewport):
glDrawBuffer(GL_BACK)
glPixelStorei(GL_UNPACK_ALIGNMENT, 1)
glDisable(GL_BLEND)
glDisable(GL_LINE_SMOOTH)
viewport.SetViewport()
if self.background_mode == BackgroundModeTwoColors or self.background_mode == BackgroundModeTwoColorsLeftToRight or self.background_mode == BackgroundModeFourColors:
# draw graduated background
glClear(GL_DEPTH_BUFFER_BIT)
glMatrixMode (GL_PROJECTION)
glLoadIdentity()
gluOrtho2D(0.0, 1.0, 0.0, 1.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
# set up which colors to use
c = []
for i in range(0, 4):
c.append(self.background_color[i])
if self.background_mode == BackgroundModeTwoColors:
c[2] = c[0]
c[3] = c[1]
elif self.background_mode == BackgroundModeTwoColorsLeftToRight:
c[1] = c[0]
c[3] = c[2]
glShadeModel(GL_SMOOTH)
glBegin(GL_QUADS)
c[0].glColor()
glVertex2f (0.0, 1.0)
c[1].glColor()
glVertex2f (0.0, 0.0)
c[3].glColor()
glVertex2f (1.0, 0.0)
c[2].glColor()
glVertex2f (1.0, 1.0)
glEnd()
glShadeModel(GL_FLAT)
viewport.view_point.SetProjection(True)
viewport.view_point.SetModelView()
if self.background_mode == BackgroundModeOneColor:
# clear the back buffer
self.background_color[0].glClearColor(1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
else:
glClear(GL_DEPTH_BUFFER_BIT)
# render everything
self.CreateLights()
glDisable(GL_LIGHTING)
Material().glMaterial(1.0)
glDepthFunc(GL_LEQUAL)
glEnable(GL_DEPTH_TEST)
glLineWidth(1)
glDepthMask(1)
glEnable(GL_POLYGON_OFFSET_FILL)
glShadeModel(GL_FLAT)
viewport.view_point.SetPolygonOffset()
after_others_objects = []
for obj in self.objects:
if obj.OnVisibleLayer() and obj.visible:
if obj.DrawAfterOthers():
after_other_object.append(obj)
else:
obj.glCommands()
glDisable(GL_POLYGON_OFFSET_FILL)
for callback in self.on_glCommands_list:
callbackfunc()
glEnable(GL_POLYGON_OFFSET_FILL)
self.input_mode.OnRender()
if self.transform_gl_list:
glPushMatrix()
m = extract_transposed(self.drag_matrix);
glMultMatrixd(m);
glCallList(self.transform_gl_list)
glPopMatrix()
# draw any last_objects
for obj in after_others_objects:
obj.glCommands()
# draw the ruler
#if(self.show_ruler and self.ruler.m_visible):
# self.ruler.glCommands()
# draw the grid
glDepthFunc(GL_LESS)
RenderGrid(self, viewport.view_point)
glDepthFunc(GL_LEQUAL)
# draw the datum
#RenderDatumOrCurrentCoordSys();
self.DestroyLights()
glDisable(GL_DEPTH_TEST)
glDisable(GL_POLYGON_OFFSET_FILL)
glPolygonMode(GL_FRONT_AND_BACK ,GL_FILL )
#if(m_hidden_for_drag.size() == 0 || !m_show_grippers_on_drag)m_marked_list->GrippersGLCommands(false, false);
# draw the input mode text on the top
#if(m_graphics_text_mode != GraphicsTextModeNone)
#{
# wxString screen_text1, screen_text2;
# if(m_sketch_mode)
# screen_text1.Append(_T("Sketch Mode:\n"));
# if(input_mode_object && input_mode_object->GetTitle())
# {
# screen_text1.Append(input_mode_object->GetTitle());
# screen_text1.Append(_T("\n"));
# }
# if(m_graphics_text_mode == GraphicsTextModeWithHelp && input_mode_object)
# {
# const wxChar* help_str = input_mode_object->GetHelpText();
# if(help_str)
# {
# screen_text2.Append(help_str);
# }
# }
# render_screen_text(screen_text1, screen_text2);
# mark various XOR drawn items as not drawn
viewport.render_on_front_done = False
def CreateLights(self):
amb = [0.8, 0.8, 0.8, 1.0]
dif = [0.8, 0.8, 0.8, 1.0]
spec =[0.8, 0.8, 0.8, 1.0]
pos = [0.5, 0.5, 0.5, 0.0]
lmodel_amb = [0.2, 0.2, 0.2, 1.0]
local_viewer = [ 0.0 ]
if self.light_push_matrix:
glPushMatrix()
glLoadIdentity()
glLightfv(GL_LIGHT0, GL_AMBIENT, amb)
glLightfv(GL_LIGHT0, GL_DIFFUSE, dif)
glLightfv(GL_LIGHT0, GL_POSITION, pos)
glLightfv(GL_LIGHT0, GL_SPECULAR, spec)
glLightModelfv(GL_LIGHT_MODEL_AMBIENT, lmodel_amb)
glLightModelfv(GL_LIGHT_MODEL_LOCAL_VIEWER, local_viewer)
glLightModelf(GL_LIGHT_MODEL_TWO_SIDE, GL_TRUE)
glLightfv(GL_LIGHT0,GL_SPOT_DIRECTION,pos)
if self.light_push_matrix:
glPopMatrix()
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glEnable(GL_AUTO_NORMAL)
glEnable(GL_NORMALIZE)
glDisable(GL_LIGHT1)
glDisable(GL_LIGHT2)
glDisable(GL_LIGHT3)
glDisable(GL_LIGHT4)
glDisable(GL_LIGHT5)
glDisable(GL_LIGHT6)
glDisable(GL_LIGHT7)
def DestroyLights(self):
glDisable(GL_LIGHTING)
glDisable(GL_LIGHT0)
glDisable(GL_AUTO_NORMAL)
glDisable(GL_NORMALIZE)
def GetDrawMatrix(self, get_the_appropriate_orthogonal):
if get_the_appropriate_orthogonal:
# choose from the three orthoganal possibilities, the one where it's z-axis closest to the camera direction
vx, vy = self.current_viewport.view_point.GetTwoAxes(False, 0)
o = geom.Point3D(0, 0, 0)
if self.current_coordinate_system: o.Transform(self.current_coordinate_system.GetMatrix())
return geom.Matrix(o, vx, vy)
mat = geom.Matrix()
if self.current_coordinate_system: mat = self.current_coordinate_system.GetMatrix()
return mat
if __name__ == "__main__":
save_out = sys.stdout
save_err = sys.stderr
app = HeeksCADApp()
sys.stdout = save_out
sys.stderr = save_err
app.MainLoop()
| {
"repo_name": "danheeks/HeeksCAM",
"path": "PyCAD/HeeksCAD.py",
"copies": "1",
"size": "8521",
"license": "bsd-2-clause",
"hash": 7320780825895988000,
"line_mean": 31.28515625,
"line_max": 173,
"alpha_frac": 0.5670695928,
"autogenerated": false,
"ratio": 3.526903973509934,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4593973566309934,
"avg_score": null,
"num_lines": null
} |
"""A class for a normal form game"""
import numpy as np
from scipy.optimize import linprog
from scipy.spatial import HalfspaceIntersection
def build_halfspaces(M):
"""
Build a matrix representation for a halfspace corresponding to:
Mx <= 1 and x >= 0
This is of the form:
[M: -1]
[-1: 0]
As specified in
https://docs.scipy.org/doc/scipy-0.19.0/reference/generated/scipy.spatial.HalfspaceIntersection.html
Parameters
----------
M : array
A matrix with linear coefficients defining the polytope.
Returns
-------
array
The half spaces.
"""
number_of_strategies, dimension = M.shape
b = np.append(-np.ones(number_of_strategies), np.zeros(dimension))
M = np.append(M, -np.eye(dimension), axis=0)
halfspaces = np.column_stack((M, b.transpose()))
return halfspaces
def find_feasible_point(halfspaces):
"""
Use linear programming to find a point inside the halfspaces (needed to
define it).
Code taken from scipy documentation:
https://docs.scipy.org/doc/scipy-0.19.0/reference/generated/scipy.spatial.HalfspaceIntersection.html
Parameters
----------
halfspaces : array
a matrix representation of halfspaces.
Returns
-------
array
A feasible point inside the halfspace.
"""
norm_vector = np.reshape(
np.linalg.norm(halfspaces[:, :-1], axis=1), (halfspaces.shape[0], 1)
)
c = np.zeros((halfspaces.shape[1],))
c[-1] = -1
A = np.hstack((halfspaces[:, :-1], norm_vector))
b = -halfspaces[:, -1:]
res = linprog(c, A_ub=A, b_ub=b)
return res.x[:-1]
def labels(vertex, halfspaces):
"""
Return the labels of the facets on which lie a given vertex. This is
calculated by carrying out the matrix multiplication.
Parameters
----------
vertex: array
A given vertex of a polytope.
halfspaces: array
A halfspace definition of a polytope.
Returns
-------
set
The set of labels of the vertex.
"""
b = halfspaces[:, -1]
M = halfspaces[:, :-1]
return set(np.where(np.isclose(np.dot(M, vertex), -b))[0])
def non_trivial_vertices(halfspaces):
"""
Returns all vertex, label pairs (ignoring the origin).
Parameters
----------
halfspaces: array
A halfspace definition of a polytope.
Returns
-------
generator
A generator of non trivial vertices and their labels.
"""
feasible_point = find_feasible_point(halfspaces)
hs = HalfspaceIntersection(halfspaces, feasible_point)
hs.close()
return (
(v, labels(v, halfspaces))
for v in hs.intersections
if not np.all(np.isclose(v, 0)) and max(v) < np.inf
)
| {
"repo_name": "drvinceknight/Nashpy",
"path": "src/nashpy/polytope/polytope.py",
"copies": "1",
"size": "2774",
"license": "mit",
"hash": -5039639765285649000,
"line_mean": 24.2181818182,
"line_max": 104,
"alpha_frac": 0.6164383562,
"autogenerated": false,
"ratio": 3.7184986595174263,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9834937015717427,
"avg_score": 0,
"num_lines": 110
} |
"""A class for a normal form game"""
import numpy as np
from .algorithms.lemke_howson import lemke_howson
from .algorithms.support_enumeration import support_enumeration
from .algorithms.vertex_enumeration import vertex_enumeration
from .learning.fictitious_play import fictitious_play
from .learning.replicator_dynamics import (
asymmetric_replicator_dynamics,
replicator_dynamics,
)
from .learning.stochastic_fictitious_play import stochastic_fictitious_play
from .utils.is_best_response import is_best_response
class Game:
"""
A class for a normal form game.
Parameters
----------
- A, B: 2 dimensional list/arrays representing the payoff matrices for
non zero sum games.
- A: 2 dimensional list/array representing the payoff matrix for a
zero sum game.
"""
def __init__(self, *args):
if len(args) == 2:
if (not len(args[0]) == len(args[1])) or (
not len(args[0][0]) == len(args[1][0])
):
raise ValueError("Unequal dimensions for matrices A and B")
self.payoff_matrices = tuple([np.asarray(m) for m in args])
if len(args) == 1:
self.payoff_matrices = np.asarray(args[0]), -np.asarray(args[0])
self.zero_sum = np.array_equal(
self.payoff_matrices[0], -self.payoff_matrices[1]
)
def __repr__(self):
if self.zero_sum:
tpe = "Zero sum"
else:
tpe = "Bi matrix"
return """{} game with payoff matrices:
Row player:
{}
Column player:
{}""".format(
tpe, *self.payoff_matrices
)
def __getitem__(self, key):
row_strategy, column_strategy = key
return np.array(
[
np.dot(row_strategy, np.dot(m, column_strategy))
for m in self.payoff_matrices
]
)
def vertex_enumeration(self):
"""
Obtain the Nash equilibria using enumeration of the vertices of the best
response polytopes.
Algorithm implemented here is Algorithm 3.5 of [Nisan2007]_.
1. Build best responses polytopes of both players
2. For each vertex pair of both polytopes
3. Check if pair is fully labelled
4. Return the normalised pair
Returns
-------
generator
The equilibria.
"""
return vertex_enumeration(*self.payoff_matrices)
def support_enumeration(self, non_degenerate=False, tol=10 ** -16):
"""
Obtain the Nash equilibria using support enumeration.
Algorithm implemented here is Algorithm 3.4 of [Nisan2007]_.
1. For each k in 1...min(size of strategy sets)
2. For each I,J supports of size k
3. Solve indifference conditions
4. Check that have Nash Equilibrium.
Parameters
----------
non_degenerate : bool
Whether or not to consider supports of equal size. By default
(False) only considers supports of equal size.
tol : float
A tolerance parameter for equality.
Returns
-------
generator
The equilibria.
"""
return support_enumeration(
*self.payoff_matrices, non_degenerate=non_degenerate, tol=tol
)
def lemke_howson_enumeration(self):
"""
Obtain Nash equilibria for all possible starting dropped labels
using the lemke howson algorithm. See `Game.lemke_howson` for more
information.
Note: this is not guaranteed to find all equilibria.
Yields
------
Tuple
An equilibria
"""
for label in range(sum(self.payoff_matrices[0].shape)):
yield self.lemke_howson(initial_dropped_label=label)
def lemke_howson(self, initial_dropped_label):
"""
Obtain the Nash equilibria using the Lemke Howson algorithm implemented
using integer pivoting.
Algorithm implemented here is Algorithm 3.6 of [Nisan2007]_.
1. Start at the artificial equilibrium (which is fully labeled)
2. Choose an initial label to drop and move in the polytope for which
the vertex has that label to the edge
that does not share that label. (This is implemented using integer
pivoting)
3. A label will now be duplicated in the other polytope, drop it in a
similar way.
4. Repeat steps 2 and 3 until have Nash Equilibrium.
Parameters
----------
initial_dropped_label: int
The initial dropped label.
Returns
-------
Tuple
An equilibria
"""
return lemke_howson(
*self.payoff_matrices, initial_dropped_label=initial_dropped_label
)
def fictitious_play(self, iterations, play_counts=None):
"""
Return a given sequence of actions through fictitious play. The
implementation corresponds to the description of chapter 2 of
[Fudenberg1998]_.
1. Players have a belief of the strategy of the other player: a vector
representing the number of times the player has chosen a given strategy.
2. Players choose a best response to the belief.
3. Players update their belief based on the latest choice of the
opponent.
Parameters
----------
iterations : int
The number of iterations of the algorithm.
play_counts : array
The play counts.
Returns
-------
Generator
The play counts
"""
return fictitious_play(
*self.payoff_matrices, iterations=iterations, play_counts=play_counts
)
def stochastic_fictitious_play(
self, iterations, play_counts=None, etha=10 ** -1, epsilon_bar=10 ** -2
):
"""Return a given sequence of actions and mixed strategies through stochastic fictitious play. The
implementation corresponds to the description given in [Hofbauer2002]_.
Parameters
----------
iterations : int
The number of iterations of the algorithm.
play_counts : array
The play counts.
etha : float
The noise parameter for the logit choice function.
epsilon_bar : float
The maximum stochastic perturbation.
Returns
-------
Generator
The play counts
"""
return stochastic_fictitious_play(
*self.payoff_matrices,
iterations=iterations,
play_counts=play_counts,
etha=etha,
epsilon_bar=epsilon_bar
)
def replicator_dynamics(self, y0=None, timepoints=None):
"""
Implement replicator dynamics
Return an array showing probability of each strategy being played over
time.
The total population is constant. Strategies can either stay constant
if equilibria is achieved, replicate or die.
Parameters
----------
y0 : array
The initial population distribution.
timepoints: array
The iterable of timepoints.
Returns
-------
array
The population distributions over time.
"""
A, _ = self.payoff_matrices
return replicator_dynamics(A=A, y0=y0, timepoints=timepoints)
def asymmetric_replicator_dynamics(self, x0=None, y0=None, timepoints=None):
"""
Returns two arrays, corresponding to the two players, showing the
probability of each strategy being played over time using the asymmetric
replicator dynamics algorithm.
Parameters
----------
x0 : array
The initial population distribution of the row player.
y0 : array
The initial population distribution of the column player.
timepoints: array
The iterable of timepoints.
Returns
-------
Tuple
The 2 population distributions over time.
"""
A, B = self.payoff_matrices
return asymmetric_replicator_dynamics(
A=A, B=B, x0=x0, y0=y0, timepoints=timepoints
)
def is_best_response(self, sigma_r, sigma_c):
"""
Checks if sigma_r is a best response to sigma_c and vice versa.
Parameters
----------
sigma_r : array
The row player strategy
sigma_c : array
The column player strategy
Returns
-------
tuple
A pair of booleans, the first indicates if sigma_r is a best
response to sigma_c. The second indicates if sigma_c is a best
response to sigma_r.
"""
A, B = self.payoff_matrices
is_row_strategy_best_response = is_best_response(
A=A,
sigma_c=sigma_c,
sigma_r=sigma_r,
)
is_column_strategy_best_response = is_best_response(
A=B.T,
sigma_c=sigma_r,
sigma_r=sigma_c,
)
return (is_row_strategy_best_response, is_column_strategy_best_response)
| {
"repo_name": "drvinceknight/Nashpy",
"path": "src/nashpy/game.py",
"copies": "1",
"size": "9268",
"license": "mit",
"hash": 7979568060059578000,
"line_mean": 30.4169491525,
"line_max": 106,
"alpha_frac": 0.5869659042,
"autogenerated": false,
"ratio": 4.314711359404097,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5401677263604097,
"avg_score": null,
"num_lines": null
} |
"""A class for a normal form game"""
import warnings
from itertools import chain, combinations
import numpy as np
def powerset(n):
"""
A power set of range(n)
Based on recipe from python itertools documentation:
https://docs.python.org/2/library/itertools.html#recipes
Parameters
----------
n : int
The defining parameter of the powerset.
Returns
-------
generator
The powerset
"""
return chain.from_iterable(combinations(range(n), r) for r in range(n + 1))
def solve_indifference(A, rows=None, columns=None):
"""
Solve the indifference for a payoff matrix assuming support for the
strategies given by columns
Finds vector of probabilities that makes player indifferent between
rows. (So finds probability vector for corresponding column player)
Parameters
----------
A : array
The row player utility matrix.
rows : array
Array of integers corresponding to rows to consider.
columns : array
Array of integers corresponding to columns to consider.
Returns
-------
array
The solution to the indifference equations.
"""
# Ensure differences between pairs of pure strategies are the same
M = (A[np.array(rows)] - np.roll(A[np.array(rows)], 1, axis=0))[:-1]
# Columns that must be played with prob 0
zero_columns = set(range(A.shape[1])) - set(columns)
if zero_columns != set():
M = np.append(
M,
[[int(i == j) for i, col in enumerate(M.T)] for j in zero_columns],
axis=0,
)
# Ensure have probability vector
M = np.append(M, np.ones((1, M.shape[1])), axis=0)
b = np.append(np.zeros(len(M) - 1), [1])
try:
prob = np.linalg.solve(M, b)
if all(prob >= 0):
return prob
return False
except np.linalg.linalg.LinAlgError:
return False
def potential_support_pairs(A, B, non_degenerate=False):
"""
A generator for the potential support pairs
Parameters
----------
A : array
The row player utility matrix.
B : array
The column player utility matrix
non_degenerate : bool
Whether or not to consider supports of equal size. By default
(False) only considers supports of equal size.
Yields
-------
tuple
A pair of possible supports.
"""
p1_num_strategies, p2_num_strategies = A.shape
for support1 in (s for s in powerset(p1_num_strategies) if len(s) > 0):
for support2 in (
s
for s in powerset(p2_num_strategies)
if (len(s) > 0 and not non_degenerate) or len(s) == len(support1)
):
yield support1, support2
def indifference_strategies(A, B, non_degenerate=False, tol=10 ** -16):
"""
A generator for the strategies corresponding to the potential supports
Parameters
----------
A : array
The row player utility matrix.
B : array
The column player utility matrix
non_degenerate : bool
Whether or not to consider supports of equal size. By default
(False) only considers supports of equal size.
tol : float
A tolerance parameter for equality.
Yields
------
tuple
A generator of all potential strategies that are indifferent on each
potential support. Return False if they are not valid (not a
probability vector OR not fully on the given support).
"""
if non_degenerate:
tol = min(tol, 0)
for pair in potential_support_pairs(A, B, non_degenerate=non_degenerate):
s1 = solve_indifference(B.T, *(pair[::-1]))
s2 = solve_indifference(A, *pair)
if obey_support(s1, pair[0], tol=tol) and obey_support(s2, pair[1], tol=tol):
yield s1, s2, pair[0], pair[1]
def obey_support(strategy, support, tol=10 ** -16):
"""
Test if a strategy obeys its support
Parameters
----------
strategy: array
A given strategy vector
support: array
A strategy support
tol : float
A tolerance parameter for equality.
Returns
-------
bool
whether or not that strategy does indeed have the given support
"""
if strategy is False:
return False
if not all(
(i in support and value > tol) or (i not in support and value <= tol)
for i, value in enumerate(strategy)
):
return False
return True
def is_ne(strategy_pair, support_pair, payoff_matrices):
"""
Test if a given strategy pair is a pair of best responses
Parameters
----------
strategy_pair: tuple
a 2-tuple of numpy arrays.
support_pair: tuple
a 2-tuple of numpy arrays of integers.
payoff_matrices: tuple
a 2-tuple of numpy array of payoff matrices.
Returns
-------
bool
True if a given strategy pair is a pair of best responses.
"""
A, B = payoff_matrices
# Payoff against opponents strategies:
u = strategy_pair[1].reshape(strategy_pair[1].size, 1)
row_payoffs = np.dot(A, u)
v = strategy_pair[0].reshape(strategy_pair[0].size, 1)
column_payoffs = np.dot(B.T, v)
# Pure payoffs on current support:
row_support_payoffs = row_payoffs[np.array(support_pair[0])]
column_support_payoffs = column_payoffs[np.array(support_pair[1])]
return (
row_payoffs.max() == row_support_payoffs.max()
and column_payoffs.max() == column_support_payoffs.max()
)
def support_enumeration(A, B, non_degenerate=False, tol=10 ** -16):
"""
Obtain the Nash equilibria using support enumeration.
Algorithm implemented here is Algorithm 3.4 of [Nisan2007]_
1. For each k in 1...min(size of strategy sets)
2. For each I,J supports of size k
3. Solve indifference conditions
4. Check that have Nash Equilibrium.
Parameters
----------
A : array
The row player utility matrix.
B : array
The column player utility matrix
non_degenerate : bool
Whether or not to consider supports of equal size. By default
(False) only considers supports of equal size.
tol : float
A tolerance parameter for equality.
Yields
-------
tuple
The equilibria.
"""
count = 0
for s1, s2, sup1, sup2 in indifference_strategies(
A, B, non_degenerate=non_degenerate, tol=tol
):
if is_ne((s1, s2), (sup1, sup2), (A, B)):
count += 1
yield s1, s2
if count % 2 == 0:
warning = """
An even number of ({}) equilibria was returned. This
indicates that the game is degenerate. Consider using another algorithm
to investigate.
""".format(
count
)
warnings.warn(warning, RuntimeWarning)
| {
"repo_name": "drvinceknight/Nashpy",
"path": "src/nashpy/algorithms/support_enumeration.py",
"copies": "1",
"size": "6855",
"license": "mit",
"hash": -738179337116536600,
"line_mean": 26.7530364372,
"line_max": 85,
"alpha_frac": 0.6106491612,
"autogenerated": false,
"ratio": 3.8926746166950594,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5003323777895059,
"avg_score": null,
"num_lines": null
} |
"""A class for an overall activity"""
from twothirds import Data, TwoThirdsGame
import seaborn as sns
import matplotlib.pyplot as plt
class Activity:
def __init__(self, filename):
self.raw_data = Data(filename)
self.raw_data.read()
self.data = self.raw_data.out()
self.games = [TwoThirdsGame(d) for d in self.data]
def analyse(self):
self.two_thirds = [game.two_thirds_of_the_average() for game in
self.games]
self.winners = [game.find_winner()[:-1] for game in self.games]
self.winning_guesses = [game.find_winner()[-1] for game in self.games]
def __repr__(self):
string = ''
for i, game in enumerate(self.games):
string += """=====================
Game {}
---------------------
2/3rds of the average: {:.2f}
Winning guess: {}
Winner(s): {}
""".format(i, self.two_thirds[i], self.winning_guesses[i], self.winners[i])
return string
def pairplot(self):
figure = plt.figure()
sns.pairplot(self.raw_data.df)
return figure
def distplot(self):
figure = plt.figure()
clrs = sns.color_palette("hls", len(self.games))
for i, game in enumerate(self.games):
if type(game.data) is list:
values = game.data
if type(game.data) is dict:
values = game.data.values()
sns.distplot(values, kde=False, norm_hist=False, label='Game {}'.format(i), color=clrs[i])
two_thirds = self.two_thirds[i]
plt.axvline(two_thirds, color=clrs[i], label='2/3rds = {:.2f}'.format(two_thirds))
plt.xlabel('Guess')
plt.ylabel('Frequency')
plt.legend()
return figure
| {
"repo_name": "drvinceknight/TwoThirds",
"path": "twothirds/activity.py",
"copies": "1",
"size": "1744",
"license": "mit",
"hash": 5477309497749872000,
"line_mean": 33.88,
"line_max": 102,
"alpha_frac": 0.5653669725,
"autogenerated": false,
"ratio": 3.3409961685823757,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44063631410823756,
"avg_score": null,
"num_lines": null
} |
""" A class for building a PyRTL AES circuit.
Currently this class only supports 128 bit AES encryption/decryption
Example::
import pyrtl
from pyrtl.rtllib.aes import AES
aes = AES()
plaintext = pyrtl.Input(bitwidth=128, name='aes_plaintext')
key = pyrtl.Input(bitwidth=128, name='aes_key')
aes_ciphertext = pyrtl.Output(bitwidth=128, name='aes_ciphertext')
reset = pyrtl.Input(1, name='reset')
ready = pyrtl.Output(1, name='ready')
ready_out, aes_cipher = aes.encrypt_state_m(plaintext, key, reset)
ready <<= ready_out
aes_ciphertext <<= aes_cipher
sim_trace = pyrtl.SimulationTrace()
sim = pyrtl.Simulation(tracer=sim_trace)
sim.step ({
'aes_plaintext': 0x00112233445566778899aabbccddeeff,
'aes_key': 0x000102030405060708090a0b0c0d0e0f,
'reset': 1
})
for cycle in range(1,10):
sim.step ({
'aes_plaintext': 0x00112233445566778899aabbccddeeff,
'aes_key': 0x000102030405060708090a0b0c0d0e0f,
'reset': 0
})
sim_trace.render_trace(symbol_len=40, segment_size=1)
"""
from __future__ import division, absolute_import
import pyrtl
from pyrtl.rtllib import libutils
# TODO:
# 2) All ROMs should be synchronous. This should be easy once (3) is completed
# 3) Right now decryption generates one GIANT combinatorial block. Instead
# it should generate one of 2 options -- Either an iterative design or a
# pipelined design. Both will add registers between each round of AES
# 5) a single "aes-unit" combining encryption and decryption (without making
# full independent hardware units) would be a plus as well
class AES(object):
def __init__(self):
self.memories_built = False
self._key_len = 128
def encryption(self, plaintext, key):
"""
Builds a single cycle AES Encryption circuit
:param WireVector plaintext: text to encrypt
:param WireVector key: AES key to use to encrypt
:return: a WireVector containing the ciphertext
"""
if len(plaintext) != self._key_len:
raise pyrtl.PyrtlError("Ciphertext length is invalid")
if len(key) != self._key_len:
raise pyrtl.PyrtlError("key length is invalid")
key_list = self._key_gen(key)
t = self._add_round_key(plaintext, key_list[0])
for round in range(1, 11):
t = self._sub_bytes(t)
t = self._shift_rows(t)
if round != 10:
t = self._mix_columns(t)
t = self._add_round_key(t, key_list[round])
return t
def encrypt_state_m(self, plaintext_in, key_in, reset):
"""
Builds a multiple cycle AES Encryption state machine circuit
:param reset: a one bit signal telling the state machine
to reset and accept the current plaintext and key
:return ready, cipher_text: ready is a one bit signal showing
that the encryption result (cipher_text) has been calculated.
"""
if len(key_in) != len(plaintext_in):
raise pyrtl.PyrtlError("AES key and plaintext should be the same length")
plain_text, key = (pyrtl.Register(len(plaintext_in)) for i in range(2))
key_exp_in, add_round_in = (pyrtl.WireVector(len(plaintext_in)) for i in range(2))
counter = pyrtl.Register(4, 'counter')
round = pyrtl.WireVector(4, 'round')
counter.next <<= round
sub_out = self._sub_bytes(plain_text)
shift_out = self._shift_rows(sub_out)
mix_out = self._mix_columns(shift_out)
key_out = self._key_expansion(key, counter)
add_round_out = self._add_round_key(add_round_in, key_exp_in)
with pyrtl.conditional_assignment:
with reset == 1:
round |= 0
key_exp_in |= key_in # to lower the number of cycles
plain_text.next |= add_round_out
key.next |= key_in
add_round_in |= plaintext_in
with counter == 10: # keep everything the same
round |= counter
plain_text.next |= plain_text
with pyrtl.otherwise: # running through AES
round |= counter + 1
key_exp_in |= key_out
plain_text.next |= add_round_out
key.next |= key_out
with counter == 9:
add_round_in |= shift_out
with pyrtl.otherwise:
add_round_in |= mix_out
ready = (counter == 10)
return ready, plain_text
def decryption(self, ciphertext, key):
"""
Builds a single cycle AES Decryption circuit
:param WireVector ciphertext: data to decrypt
:param WireVector key: AES key to use to encrypt (AES is symmetric)
:return: a WireVector containing the plaintext
"""
if len(ciphertext) != self._key_len:
raise pyrtl.PyrtlError("Ciphertext length is invalid")
if len(key) != self._key_len:
raise pyrtl.PyrtlError("key length is invalid")
key_list = self._key_gen(key)
t = self._add_round_key(ciphertext, key_list[10])
for round in range(1, 11):
t = self._inv_shift_rows(t)
t = self._sub_bytes(t, True)
t = self._add_round_key(t, key_list[10 - round])
if round != 10:
t = self._mix_columns(t, True)
return t
def decryption_statem(self, ciphertext_in, key_in, reset):
"""
Builds a multiple cycle AES Decryption state machine circuit
:param reset: a one bit signal telling the state machine
to reset and accept the current plaintext and key
:return ready, plain_text: ready is a one bit signal showing
that the decryption result (plain_text) has been calculated.
"""
if len(key_in) != len(ciphertext_in):
raise pyrtl.PyrtlError("AES key and ciphertext should be the same length")
cipher_text, key = (pyrtl.Register(len(ciphertext_in)) for i in range(2))
key_exp_in, add_round_in = (pyrtl.WireVector(len(ciphertext_in)) for i in range(2))
# this is not part of the state machine as we need the keys in
# reverse order...
reversed_key_list = reversed(self._key_gen(key_exp_in))
counter = pyrtl.Register(4, 'counter')
round = pyrtl.WireVector(4)
counter.next <<= round
inv_shift = self._inv_shift_rows(cipher_text)
inv_sub = self._sub_bytes(inv_shift, True)
key_out = pyrtl.mux(round, *reversed_key_list, default=0)
add_round_out = self._add_round_key(add_round_in, key_out)
inv_mix_out = self._mix_columns(add_round_out, True)
with pyrtl.conditional_assignment:
with reset == 1:
round |= 0
key.next |= key_in
key_exp_in |= key_in # to lower the number of cycles needed
cipher_text.next |= add_round_out
add_round_in |= ciphertext_in
with counter == 10: # keep everything the same
round |= counter
cipher_text.next |= cipher_text
with pyrtl.otherwise: # running through AES
round |= counter + 1
key.next |= key
key_exp_in |= key
add_round_in |= inv_sub
with counter == 9:
cipher_text.next |= add_round_out
with pyrtl.otherwise:
cipher_text.next |= inv_mix_out
ready = (counter == 10)
return ready, cipher_text
def _key_gen(self, key):
keys = [key]
for enc_round in range(10):
key = self._key_expansion(key, enc_round)
keys.append(key)
return keys
def _key_expansion(self, old_key, key_expand_round):
self._build_memories_if_not_exists()
w = libutils.partition_wire(old_key, 32)
x = [w[3] ^ self._g(w[0], key_expand_round)]
x.insert(0, x[0] ^ w[2])
x.insert(0, x[0] ^ w[1])
x.insert(0, x[0] ^ w[0])
return pyrtl.concat_list(x)
def _g(self, word, key_expand_round):
"""
One-byte left circular rotation, substitution of each byte
"""
import numbers
self._build_memories_if_not_exists()
a = libutils.partition_wire(word, 8)
sub = [self.sbox[a[index]] for index in (3, 0, 1, 2)]
if isinstance(key_expand_round, numbers.Number):
rcon_data = self._rcon_data[key_expand_round + 1] # int value
else:
rcon_data = self.rcon[key_expand_round + 1]
sub[3] = sub[3] ^ rcon_data
return pyrtl.concat_list(sub)
def _sub_bytes(self, in_vector, inverse=False):
self._build_memories_if_not_exists()
subbed = [self.inv_sbox[byte] if inverse else self.sbox[byte]
for byte in libutils.partition_wire(in_vector, 8)]
return pyrtl.concat_list(subbed)
@staticmethod
def _inv_shift_rows(in_vector):
a = libutils.partition_wire(in_vector, 8)
return pyrtl.concat_list((a[12], a[9], a[6], a[3],
a[0], a[13], a[10], a[7],
a[4], a[1], a[14], a[11],
a[8], a[5], a[2], a[15]))
@staticmethod
def _shift_rows(in_vector):
a = libutils.partition_wire(in_vector, 8)
return pyrtl.concat_list((a[4], a[9], a[14], a[3],
a[8], a[13], a[2], a[7],
a[12], a[1], a[6], a[11],
a[0], a[5], a[10], a[15]))
def _galois_mult(self, c, mult_table):
if mult_table == 1:
return c
else:
return self._galois_mults[mult_table][c]
def _mix_columns(self, in_vector, inverse=False):
self._build_memories_if_not_exists()
igm_mults = [14, 9, 13, 11] if inverse else [2, 1, 1, 3]
subgroups = libutils.partition_wire(in_vector, 32)
return pyrtl.concat_list([self._mix_col_subgroup(sg, igm_mults) for sg in subgroups])
def _mix_col_subgroup(self, in_vector, gm_multipliers):
def _mix_single(index):
mult_items = [self._galois_mult(a[(index + loc) % 4], mult_table)
for loc, mult_table in enumerate(gm_multipliers)]
return mult_items[0] ^ mult_items[1] ^ mult_items[2] ^ mult_items[3]
a = libutils.partition_wire(in_vector, 8)
return pyrtl.concat_list([_mix_single(index) for index in range(len(a))])
@staticmethod
def _add_round_key(t, key):
return t ^ key
def _build_memories_if_not_exists(self):
if not self.memories_built:
self._build_memories()
def _build_memories(self):
def build_mem(data):
return pyrtl.RomBlock(bitwidth=8, addrwidth=8, romdata=data, build_new_roms=True,
asynchronous=True)
self.sbox = build_mem(self._sbox_data)
self.inv_sbox = build_mem(self._inv_sbox_data)
self.rcon = build_mem(self._rcon_data)
self.GM2 = build_mem(self._GM2_data)
self.GM3 = build_mem(self._GM3_data)
self.GM9 = build_mem(self._GM9_data)
self.GM11 = build_mem(self._GM11_data)
self.GM13 = build_mem(self._GM13_data)
self.GM14 = build_mem(self._GM14_data)
self._galois_mults = {3: self.GM3, 2: self.GM2, 9: self.GM9, 11: self.GM11,
13: self.GM13, 14: self.GM14}
self.memories_built = True
_sbox_data = libutils.str_to_int_array('''
63 7c 77 7b f2 6b 6f c5 30 01 67 2b fe d7 ab 76 ca 82 c9 7d fa 59 47 f0
ad d4 a2 af 9c a4 72 c0 b7 fd 93 26 36 3f f7 cc 34 a5 e5 f1 71 d8 31 15
04 c7 23 c3 18 96 05 9a 07 12 80 e2 eb 27 b2 75 09 83 2c 1a 1b 6e 5a a0
52 3b d6 b3 29 e3 2f 84 53 d1 00 ed 20 fc b1 5b 6a cb be 39 4a 4c 58 cf
d0 ef aa fb 43 4d 33 85 45 f9 02 7f 50 3c 9f a8 51 a3 40 8f 92 9d 38 f5
bc b6 da 21 10 ff f3 d2 cd 0c 13 ec 5f 97 44 17 c4 a7 7e 3d 64 5d 19 73
60 81 4f dc 22 2a 90 88 46 ee b8 14 de 5e 0b db e0 32 3a 0a 49 06 24 5c
c2 d3 ac 62 91 95 e4 79 e7 c8 37 6d 8d d5 4e a9 6c 56 f4 ea 65 7a ae 08
ba 78 25 2e 1c a6 b4 c6 e8 dd 74 1f 4b bd 8b 8a 70 3e b5 66 48 03 f6 0e
61 35 57 b9 86 c1 1d 9e e1 f8 98 11 69 d9 8e 94 9b 1e 87 e9 ce 55 28 df
8c a1 89 0d bf e6 42 68 41 99 2d 0f b0 54 bb 16
''')
_inv_sbox_data = libutils.str_to_int_array('''
52 09 6a d5 30 36 a5 38 bf 40 a3 9e 81 f3 d7 fb 7c e3 39 82 9b 2f ff 87
34 8e 43 44 c4 de e9 cb 54 7b 94 32 a6 c2 23 3d ee 4c 95 0b 42 fa c3 4e
08 2e a1 66 28 d9 24 b2 76 5b a2 49 6d 8b d1 25 72 f8 f6 64 86 68 98 16
d4 a4 5c cc 5d 65 b6 92 6c 70 48 50 fd ed b9 da 5e 15 46 57 a7 8d 9d 84
90 d8 ab 00 8c bc d3 0a f7 e4 58 05 b8 b3 45 06 d0 2c 1e 8f ca 3f 0f 02
c1 af bd 03 01 13 8a 6b 3a 91 11 41 4f 67 dc ea 97 f2 cf ce f0 b4 e6 73
96 ac 74 22 e7 ad 35 85 e2 f9 37 e8 1c 75 df 6e 47 f1 1a 71 1d 29 c5 89
6f b7 62 0e aa 18 be 1b fc 56 3e 4b c6 d2 79 20 9a db c0 fe 78 cd 5a f4
1f dd a8 33 88 07 c7 31 b1 12 10 59 27 80 ec 5f 60 51 7f a9 19 b5 4a 0d
2d e5 7a 9f 93 c9 9c ef a0 e0 3b 4d ae 2a f5 b0 c8 eb bb 3c 83 53 99 61
17 2b 04 7e ba 77 d6 26 e1 69 14 63 55 21 0c 7d
''')
_rcon_data = libutils.str_to_int_array('''
8d 01 02 04 08 10 20 40 80 1b 36 6c d8 ab 4d 9a 2f 5e bc 63 c6 97 35 6a
d4 b3 7d fa ef c5 91 39 72 e4 d3 bd 61 c2 9f 25 4a 94 33 66 cc 83 1d 3a
74 e8 cb 8d 01 02 04 08 10 20 40 80 1b 36 6c d8 ab 4d 9a 2f 5e bc 63 c6
97 35 6a d4 b3 7d fa ef c5 91 39 72 e4 d3 bd 61 c2 9f 25 4a 94 33 66 cc
83 1d 3a 74 e8 cb 8d 01 02 04 08 10 20 40 80 1b 36 6c d8 ab 4d 9a 2f 5e
bc 63 c6 97 35 6a d4 b3 7d fa ef c5 91 39 72 e4 d3 bd 61 c2 9f 25 4a 94
33 66 cc 83 1d 3a 74 e8 cb 8d 01 02 04 08 10 20 40 80 1b 36 6c d8 ab 4d
9a 2f 5e bc 63 c6 97 35 6a d4 b3 7d fa ef c5 91 39 72 e4 d3 bd 61 c2 9f
25 4a 94 33 66 cc 83 1d 3a 74 e8 cb 8d 01 02 04 08 10 20 40 80 1b 36 6c
d8 ab 4d 9a 2f 5e bc 63 c6 97 35 6a d4 b3 7d fa ef c5 91 39 72 e4 d3 bd
61 c2 9f 25 4a 94 33 66 cc 83 1d 3a 74 e8 cb 8d
''')
# Galois Multiplication tables for 2, 3, 9, 11, 13, and 14.
_GM2_data = libutils.str_to_int_array('''
00 02 04 06 08 0a 0c 0e 10 12 14 16 18 1a 1c 1e 20 22 24 26 28 2a 2c 2e
30 32 34 36 38 3a 3c 3e 40 42 44 46 48 4a 4c 4e 50 52 54 56 58 5a 5c 5e
60 62 64 66 68 6a 6c 6e 70 72 74 76 78 7a 7c 7e 80 82 84 86 88 8a 8c 8e
90 92 94 96 98 9a 9c 9e a0 a2 a4 a6 a8 aa ac ae b0 b2 b4 b6 b8 ba bc be
c0 c2 c4 c6 c8 ca cc ce d0 d2 d4 d6 d8 da dc de e0 e2 e4 e6 e8 ea ec ee
f0 f2 f4 f6 f8 fa fc fe 1b 19 1f 1d 13 11 17 15 0b 09 0f 0d 03 01 07 05
3b 39 3f 3d 33 31 37 35 2b 29 2f 2d 23 21 27 25 5b 59 5f 5d 53 51 57 55
4b 49 4f 4d 43 41 47 45 7b 79 7f 7d 73 71 77 75 6b 69 6f 6d 63 61 67 65
9b 99 9f 9d 93 91 97 95 8b 89 8f 8d 83 81 87 85 bb b9 bf bd b3 b1 b7 b5
ab a9 af ad a3 a1 a7 a5 db d9 df dd d3 d1 d7 d5 cb c9 cf cd c3 c1 c7 c5
fb f9 ff fd f3 f1 f7 f5 eb e9 ef ed e3 e1 e7 e5
''')
_GM3_data = libutils.str_to_int_array('''
00 03 06 05 0c 0f 0a 09 18 1b 1e 1d 14 17 12 11 30 33 36 35 3c 3f 3a 39
28 2b 2e 2d 24 27 22 21 60 63 66 65 6c 6f 6a 69 78 7b 7e 7d 74 77 72 71
50 53 56 55 5c 5f 5a 59 48 4b 4e 4d 44 47 42 41 c0 c3 c6 c5 cc cf ca c9
d8 db de dd d4 d7 d2 d1 f0 f3 f6 f5 fc ff fa f9 e8 eb ee ed e4 e7 e2 e1
a0 a3 a6 a5 ac af aa a9 b8 bb be bd b4 b7 b2 b1 90 93 96 95 9c 9f 9a 99
88 8b 8e 8d 84 87 82 81 9b 98 9d 9e 97 94 91 92 83 80 85 86 8f 8c 89 8a
ab a8 ad ae a7 a4 a1 a2 b3 b0 b5 b6 bf bc b9 ba fb f8 fd fe f7 f4 f1 f2
e3 e0 e5 e6 ef ec e9 ea cb c8 cd ce c7 c4 c1 c2 d3 d0 d5 d6 df dc d9 da
5b 58 5d 5e 57 54 51 52 43 40 45 46 4f 4c 49 4a 6b 68 6d 6e 67 64 61 62
73 70 75 76 7f 7c 79 7a 3b 38 3d 3e 37 34 31 32 23 20 25 26 2f 2c 29 2a
0b 08 0d 0e 07 04 01 02 13 10 15 16 1f 1c 19 1a
''')
_GM9_data = libutils.str_to_int_array('''
00 09 12 1b 24 2d 36 3f 48 41 5a 53 6c 65 7e 77 90 99 82 8b b4 bd a6 af
d8 d1 ca c3 fc f5 ee e7 3b 32 29 20 1f 16 0d 04 73 7a 61 68 57 5e 45 4c
ab a2 b9 b0 8f 86 9d 94 e3 ea f1 f8 c7 ce d5 dc 76 7f 64 6d 52 5b 40 49
3e 37 2c 25 1a 13 08 01 e6 ef f4 fd c2 cb d0 d9 ae a7 bc b5 8a 83 98 91
4d 44 5f 56 69 60 7b 72 05 0c 17 1e 21 28 33 3a dd d4 cf c6 f9 f0 eb e2
95 9c 87 8e b1 b8 a3 aa ec e5 fe f7 c8 c1 da d3 a4 ad b6 bf 80 89 92 9b
7c 75 6e 67 58 51 4a 43 34 3d 26 2f 10 19 02 0b d7 de c5 cc f3 fa e1 e8
9f 96 8d 84 bb b2 a9 a0 47 4e 55 5c 63 6a 71 78 0f 06 1d 14 2b 22 39 30
9a 93 88 81 be b7 ac a5 d2 db c0 c9 f6 ff e4 ed 0a 03 18 11 2e 27 3c 35
42 4b 50 59 66 6f 74 7d a1 a8 b3 ba 85 8c 97 9e e9 e0 fb f2 cd c4 df d6
31 38 23 2a 15 1c 07 0e 79 70 6b 62 5d 54 4f 46
''')
_GM11_data = libutils.str_to_int_array('''
00 0b 16 1d 2c 27 3a 31 58 53 4e 45 74 7f 62 69 b0 bb a6 ad 9c 97 8a 81
e8 e3 fe f5 c4 cf d2 d9 7b 70 6d 66 57 5c 41 4a 23 28 35 3e 0f 04 19 12
cb c0 dd d6 e7 ec f1 fa 93 98 85 8e bf b4 a9 a2 f6 fd e0 eb da d1 cc c7
ae a5 b8 b3 82 89 94 9f 46 4d 50 5b 6a 61 7c 77 1e 15 08 03 32 39 24 2f
8d 86 9b 90 a1 aa b7 bc d5 de c3 c8 f9 f2 ef e4 3d 36 2b 20 11 1a 07 0c
65 6e 73 78 49 42 5f 54 f7 fc e1 ea db d0 cd c6 af a4 b9 b2 83 88 95 9e
47 4c 51 5a 6b 60 7d 76 1f 14 09 02 33 38 25 2e 8c 87 9a 91 a0 ab b6 bd
d4 df c2 c9 f8 f3 ee e5 3c 37 2a 21 10 1b 06 0d 64 6f 72 79 48 43 5e 55
01 0a 17 1c 2d 26 3b 30 59 52 4f 44 75 7e 63 68 b1 ba a7 ac 9d 96 8b 80
e9 e2 ff f4 c5 ce d3 d8 7a 71 6c 67 56 5d 40 4b 22 29 34 3f 0e 05 18 13
ca c1 dc d7 e6 ed f0 fb 92 99 84 8f be b5 a8 a3
''')
_GM13_data = libutils.str_to_int_array('''
00 0d 1a 17 34 39 2e 23 68 65 72 7f 5c 51 46 4b d0 dd ca c7 e4 e9 fe f3
b8 b5 a2 af 8c 81 96 9b bb b6 a1 ac 8f 82 95 98 d3 de c9 c4 e7 ea fd f0
6b 66 71 7c 5f 52 45 48 03 0e 19 14 37 3a 2d 20 6d 60 77 7a 59 54 43 4e
05 08 1f 12 31 3c 2b 26 bd b0 a7 aa 89 84 93 9e d5 d8 cf c2 e1 ec fb f6
d6 db cc c1 e2 ef f8 f5 be b3 a4 a9 8a 87 90 9d 06 0b 1c 11 32 3f 28 25
6e 63 74 79 5a 57 40 4d da d7 c0 cd ee e3 f4 f9 b2 bf a8 a5 86 8b 9c 91
0a 07 10 1d 3e 33 24 29 62 6f 78 75 56 5b 4c 41 61 6c 7b 76 55 58 4f 42
09 04 13 1e 3d 30 27 2a b1 bc ab a6 85 88 9f 92 d9 d4 c3 ce ed e0 f7 fa
b7 ba ad a0 83 8e 99 94 df d2 c5 c8 eb e6 f1 fc 67 6a 7d 70 53 5e 49 44
0f 02 15 18 3b 36 21 2c 0c 01 16 1b 38 35 22 2f 64 69 7e 73 50 5d 4a 47
dc d1 c6 cb e8 e5 f2 ff b4 b9 ae a3 80 8d 9a 97
''')
_GM14_data = libutils.str_to_int_array('''
00 0e 1c 12 38 36 24 2a 70 7e 6c 62 48 46 54 5a e0 ee fc f2 d8 d6 c4 ca
90 9e 8c 82 a8 a6 b4 ba db d5 c7 c9 e3 ed ff f1 ab a5 b7 b9 93 9d 8f 81
3b 35 27 29 03 0d 1f 11 4b 45 57 59 73 7d 6f 61 ad a3 b1 bf 95 9b 89 87
dd d3 c1 cf e5 eb f9 f7 4d 43 51 5f 75 7b 69 67 3d 33 21 2f 05 0b 19 17
76 78 6a 64 4e 40 52 5c 06 08 1a 14 3e 30 22 2c 96 98 8a 84 ae a0 b2 bc
e6 e8 fa f4 de d0 c2 cc 41 4f 5d 53 79 77 65 6b 31 3f 2d 23 09 07 15 1b
a1 af bd b3 99 97 85 8b d1 df cd c3 e9 e7 f5 fb 9a 94 86 88 a2 ac be b0
ea e4 f6 f8 d2 dc ce c0 7a 74 66 68 42 4c 5e 50 0a 04 16 18 32 3c 2e 20
ec e2 f0 fe d4 da c8 c6 9c 92 80 8e a4 aa b8 b6 0c 02 10 1e 34 3a 28 26
7c 72 60 6e 44 4a 58 56 37 39 2b 25 0f 01 13 1d 47 49 5b 55 7f 71 63 6d
d7 d9 cb c5 ef e1 f3 fd a7 a9 bb b5 9f 91 83 8d
''')
| {
"repo_name": "UCSBarchlab/PyRTL",
"path": "pyrtl/rtllib/aes.py",
"copies": "1",
"size": "20040",
"license": "bsd-3-clause",
"hash": -3803449266277873700,
"line_mean": 45.0689655172,
"line_max": 93,
"alpha_frac": 0.5779441118,
"autogenerated": false,
"ratio": 2.8542942600769123,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.89309177050192,
"avg_score": 0.00026413337154238426,
"num_lines": 435
} |
""" A class for building histograms incrementally. """
import numpy as np
from collections import defaultdict
class RHist():
"""
A class for calculating histograms where the bin size
and location is set by rounding the input (i.e. use <decimals>) but
where the number and range of bins is determined by the data.
As a result, you need only know in advance the approximate scale
your data will take, i.e. the precision you're interested in.
There are a few methods that return useful statistics.
<name> is a unique identifier for this histogram.
<decimals> is an integer specifying the number of decimal places.
Negative numbers behave as expected.
"""
def __init__(self,name,decimals=1):
self.decimals = decimals
self.name = name
self.h = defaultdict(int)
self.h_norm = None
def add(self,x):
""" Add <x>, a data point, to the histogram """
# Do type checking here?
self.h[np.round(x,self.decimals)] += 1
def norm(self):
"""
Calculate the normalized histogram (i.e. a probability
mass function).
"""
from copy import deepcopy
# Borrowed from the implementation discussed in
# Think Stats Probability and Statistics for Programmers
# By Allen B. Downey, p 16.
# http://shop.oreilly.com/product/0636920020745.do
self.h_norm = deepcopy(self.h)
weight = 1./self.n()
for k in self.h_norm.keys():
self.h_norm[k] *= weight
def mean(self):
""" Estimate and return the mean. """
# Borrowed from the implementation discussed in
# Think Stats Probability and Statistics for Programmers
# By Allen B. Downey, p 16.
# http://shop.oreilly.com/product/0636920020745.do
if self.h_norm is None:
self.norm()
mean = 0.0
for x, p in self.h_norm.items():
# mean = sum_i(p_i*x_i)
mean += p * x
return mean
def median(self):
""" Estimate and return the median. """
# Get all the key values
# and sort them.
values = self.h.keys()
values = sorted(values)
# And count them too
nvalues = len(values)
# If even:
if (nvalues % 2) == 0:
median = (values[nvalues / 2] + values[(nvalues / 2) + 1]) / 2.0
# Or odd
else:
median = values[(nvalues + 1) / 2]
return median
def var(self):
""" Estimate and return the variance. """
# Borrowed from the implementation discussed in
# Think Stats Probability and Statistics for Programmers
# By Allen B. Downey, p 16.
# http://shop.oreilly.com/product/0636920020745.do
if self.h_norm is None:
self.norm()
# var = sum_i(p_i * (x_i - mean)*2)
mean = self.mean()
var = 0.0
for x, p in self.h_norm.items():
var += p * (x - mean) ** 2
return var
def n(self):
""" Count and return the total number of samples. """
return np.sum(self.h.values())
def stdev(self):
""" Estimate and return the variance. """
var = self.var()
n = self.n()
return np.sqrt(var/(n-1))
def se(self):
""" Estimate and return the standard error. """
sd = self.stdev()
n = self.n()
return sd / np.sqrt(n)
def above(self, criterion):
""" Estimate and return the percent area of the histogram at
or above the <criterion>. """
# If the bin is at or above, add to the list of values
# the sum and norm the values.
values = [value for key, value in self.h.items() if key >= criterion]
return np.sum(values) / float(self.n())
def overlap(self, Rhist):
""" Calculates the percent overlap between this histogram and
<Rhist>, another histogram instance.
Note: percent overlap is calculated by finding the difference
in absolute counts for all overlapping bins, summing these,
then normalizing by the total counts for both distributions
(all bins). """
n1 = self.n() ## Get total counts
n2 = Rhist.n()
# Tabulate the diffs for each
# overlapping bin.
diffs = []
for key, val1 in self.h.items():
try:
val2 = Rhist.h[key]
except KeyError:
pass
else:
val1 = float(val1)
val2 = float(val2)
diffs.append(max(val1, val2) - np.abs(val1 - val2))
# Sum, then normalize by total count.
return np.sum(diffs) / (n1 + n2)
def fitPDF(self, family):
""" Fit a probability density function (of type <family>) """
# TODO...
raise NotImplementedError()
def plot(self,fig=None,color='black',norm=False):
"""
Plot the histogram.
If provided current data is added to <fig>, a matplotlib plot
identifier.
<norm> indicates whether the raw counts or normalized values
should be plotted.
"""
import matplotlib.pyplot as plt
plt.ion()
## Interactive plots -- go.
xs = []; ys = []
if norm is True:
if self.h_norm is None:
self.norm()
xs,ys = zip(*sorted(self.h_norm.items()))
else:
xs,ys = zip(*sorted(self.h.items()))
ax = None
if fig is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
ax = fig.axes[0]
# Find the min width for bars. And plot!
width = min(
[xs[ii+1] - xs[ii] for ii in range(len(xs)-1)])
ax.bar(xs,ys,
width=width,
alpha=0.4,
color=color,edgecolor=color,
align='center',
label=self.name)
plt.show()
return fig
| {
"repo_name": "parenthetical-e/bigstats",
"path": "hist.py",
"copies": "1",
"size": "6254",
"license": "bsd-2-clause",
"hash": -1240893760392850400,
"line_mean": 26.3100436681,
"line_max": 77,
"alpha_frac": 0.5295810681,
"autogenerated": false,
"ratio": 4.111768573307035,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.026063907930984065,
"num_lines": 229
} |
"""A class for controling graph."""
import numpy as np
import matplotlib.pyplot as plt
from math import log10, ceil, sqrt
from more_itertools import chunked
import cPickle
from .type import is_number, float_list
class MGraph:
"""Control graphs and visualizaton."""
def __init__(self):
self.dir_to_save = "../data/"
def comparison_bar(self, data, labels, legend="", metric_label="", comparison_label="", lim=[],
horizontal=False, title="", filename="", show_flag=True):
'''Draw a bar graph for comparing items.'''
original_data = locals().copy()
fig, ax = plt.subplots()
if horizontal:
[set_lim1, set_lim2] = [ax.set_ylim, ax.set_xlim]
set_label1 = ax.set_xlabel
set_label2 = ax.set_ylabel
set_ticks = plt.yticks
else:
[set_lim1, set_lim2] = [ax.set_xlim, ax.set_ylim]
set_label1 = ax.set_ylabel
set_label2 = ax.set_xlabel
set_ticks = plt.xticks
if isinstance(data[0], (int, float)):
Y = range(len(data))
bar_height = 0.5
if horizontal:
ax.barh(Y, data, height=bar_height)
else:
ax.bar(Y, data, width=bar_height)
if len(labels) == len(data):
set_ticks([item + 0.25 for item in Y], labels)
elif len(labels) == len(data) + 1:
pos = [p - 0.25 for p in range(len(labels) + 1)]
set_ticks(pos, labels)
else:
Y = range(len(labels))
bar_height = 1. / (len(data) + 1)
cmap = plt.cm.rainbow
cmap_v = cmap.N / (len(data) - 1)
for idx, d in enumerate(data):
if horizontal:
ax.barh([y + bar_height * (len(data) - idx - 1) for y in Y], d,
height=bar_height, color=cmap(idx * cmap_v))
else:
rects = ax.bar([y + bar_height * idx for y in Y], d,
width=bar_height, color=cmap(idx * cmap_v))
self.__autolabel(rects, ax)
set_ticks([item + bar_height / 2 * len(data) for item in Y], labels)
set_lim1([-bar_height, len(labels)])
if lim:
set_lim2(lim)
if metric_label:
set_label1(metric_label)
if comparison_label:
set_label2(comparison_label)
if legend:
plt.legend(legend, loc='upper right')
self.set_title(ax, title)
self.show_and_save(fig, filename, show_flag, original_data)
def __autolabel(self, rects, ax):
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x(), 1.05 * height, '%0.3f' % height)
def line_scatter(self, x_data, y_data, hl_span=None, legend="", x_label="", y_label="",
xlim=[], ylim=[], title="", filename="", show_flag=True):
"""Draw a scatter graph connected by lines"""
original_data = locals().copy()
fig, ax = self.figure_with_side_space(0.7)
for x, y in zip(x_data, y_data):
if is_number(y):
y = [y]
ax.plot(x, float_list(y), '.-')
if hl_span:
ax.axvspan(hl_span[0], hl_span[1], alpha=0.2, color='red')
if xlim:
ax.set_xlim(xlim)
if ylim:
ax.set_ylim(ylim)
self.set_legend(ax, legend)
self.set_label(ax, x_label, y_label)
self.set_title(ax, title)
self.show_and_save(fig, filename, show_flag, original_data)
def figure_with_side_space(self, space_width):
aspect = 1. / (1. + space_width)
fig = plt.figure(figsize=plt.figaspect(aspect))
ax = fig.add_axes([.05, .1, aspect, .8])
return fig, ax
def set_legend(self, ax, legend):
if legend:
ax.legend(legend, bbox_to_anchor=(1.02, 1.), loc='upper left',
borderaxespad=0, fontsize=8)
def line_series(self, data, y_points, legend="", x_label="", y_label="", ylim=[],
markersize=10, title="", filename="", show_flag=True):
"""Draw a line graph of the series."""
original_data = locals().copy()
fig, ax = plt.subplots()
for item in data:
if markersize is 0:
ax.plot(y_points, item, '-')
else:
ax.plot(y_points, item, 'o--', markersize=markersize)
if legend:
ax.legend(legend)
ax.set_xlim(self.__calc_lim(y_points, 0.05))
if ylim:
ax.set_ylim(ylim)
self.set_label(ax, x_label, y_label)
self.set_title(ax, title)
self.show_and_save(fig, filename, show_flag, original_data)
def labeled_line_series(self, data, label, y_points,
x_label="", y_label="", ylim=[],
title="", filename="", show_flag=True):
"""Draw a line graph of the series."""
original_data = locals().copy()
fig, ax = plt.subplots()
for idx, item in enumerate(data):
if label[idx] == 0:
ax.plot(y_points, item, 'b-')
for idx, item in enumerate(data):
if label[idx] == 1:
ax.plot(y_points, item, 'r-')
ax.set_xlim(self.__calc_lim(y_points, 0.05))
if ylim:
ax.set_ylim(ylim)
self.set_label(ax, x_label, y_label)
self.set_title(ax, title)
self.show_and_save(fig, filename, show_flag, original_data)
def show_and_save(self, fig, filename, show_flag, data=None):
if len(filename) > 0:
path = self.dir_to_save + filename
fig.savefig(path)
if data is not None:
p_path = path + '.pkl'
f = open(p_path, 'w')
cPickle.dump(data, f)
f.close()
if show_flag:
fig.show()
def set_title(self, ax, title):
if title:
ax.set_title(title)
def set_label(self, ax, x_label, y_label):
if x_label:
ax.set_xlabel(x_label)
if y_label:
ax.set_ylabel(y_label)
def __calc_lim(self, values, margin_ratio):
margin = (max(values) - min(values)) * margin_ratio
return [min(values) - margin, max(values) + margin]
class Graph(MGraph):
"""Control all the graphs and visualizations."""
def __init__(self):
"""Initializer of Graph class."""
MGraph.__init__(self)
self.limit_timeseries = 25
def visualize_image(self, data,
h_len=28, n_cols=0, filename="", show_flag=True):
"""Visualizer of image data."""
if data.ndim == 1:
v_len = data.shape[0] / h_len
if n_cols == 0:
n_cols = 1
n_rows = 1
elif data.ndim == 2:
v_len = data.shape[1] / h_len
if n_cols == 0:
n_cols = int(ceil(sqrt(data.shape[0])))
n_rows = int(ceil(float(data.shape[0]) / n_cols))
else:
raise ValueError
plt.gray()
fig, axes = plt.subplots(n_rows, n_cols)
X, Y = np.meshgrid(range(h_len), range(v_len))
for i_v in range(n_rows):
for i_h in range(n_cols):
index = i_h + i_v * n_cols
if index < data.shape[0]:
if n_rows > 1:
ax = axes[i_v, i_h]
Z = data[index].reshape(v_len, h_len)
elif n_cols > 1:
ax = axes[i_h]
Z = data[index].reshape(v_len, h_len)
else:
ax = axes
Z = data.reshape(v_len, h_len)
Z = Z[::-1, :]
ax.set_xlim(0, h_len - 1)
ax.set_ylim(0, v_len - 1)
ax.pcolor(X, Y, Z)
ax.tick_params(labelbottom='off')
ax.tick_params(labelleft='off')
MGraph.show_and_save(self, fig, filename, show_flag)
def draw_lab_adm(self, admission, title, filename="", show_flag=True):
"""Draw lab tests data of admissions."""
base_time = admission.admit_dt
data = admission.labs
plot_list = self.__get_plot_list(base_time, data)
icu_ios = [self.__time_diff_in_hour(
[icustay.intime, icustay.outtime], base_time)
for icustay in admission.icustays]
self.__draw_series_with_legend(plot_list, icu_ios,
title, filename, show_flag)
def draw_med_icu(self, icustay, base_time, title, filename="", show_flag=True):
data = icustay.medications
plot_list = self.__get_plot_list(base_time, data)
icu_io = self.__time_diff_in_hour([icustay.intime, icustay.outtime], base_time)
self.__draw_series_with_legend(plot_list, [icu_io], title, filename, show_flag, 'o')
def draw_chart_icu(self, icustay, base_time, title, filename="", show_flag=True):
data = icustay.charts
plot_list = self.__get_plot_list(base_time, data)
icu_io = self.__time_diff_in_hour([icustay.intime, icustay.outtime], base_time)
self.__draw_series_with_legend(plot_list, [icu_io], title, filename, show_flag)
def draw_selected_chart_icu(self, icustay, itemid_list, base_time, title,
filename="", show_flag=True):
selected_ids = itemid_list
data = [item for item in icustay.charts if item.itemid in selected_ids]
plot_list = self.__get_plot_list(base_time, data)
icu_io = self.__time_diff_in_hour([icustay.intime, icustay.outtime], base_time)
self.__draw_series_with_legend(plot_list, [icu_io], title, filename, show_flag)
def draw_io_icu(self, icustay, base_time, title, filename="", show_flag=True):
data = icustay.ios
plot_list = self.__get_plot_list(base_time, data)
icu_io = self.__time_diff_in_hour([icustay.intime, icustay.outtime], base_time)
self.__draw_series_with_legend(plot_list, [icu_io], title, filename, show_flag, 'o')
def draw_lab_adm_itemid(self, admission, itemids, title, filename="", show_flag=True):
base_time = admission.admit_dt
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
colors = ['b', 'r']
axis = [ax1, ax2]
for idx, id in enumerate(itemids):
data = admission.get_lab_itemid(id)
time_diff = self.__time_diff_in_hour(data.timestamps, base_time)
values = data.values
axis[idx].plot(time_diff, values, "%ss--" % colors[idx])
axis[idx].set_ylabel("%s [%s]" % (data.description, data.unit), color=colors[idx])
ax1.set_title(title)
ax1.set_xlabel("Hours since Admission")
base_time = admission.admit_dt
icu_ios = [self.__time_diff_in_hour([icustay.intime, icustay.outtime], base_time)
for icustay in admission.icustays]
for span in icu_ios:
ax1.axvspan(span[0], span[1], alpha=0.2, color='red')
MGraph.show_and_save(self, fig, filename, show_flag)
def draw_lab_distribution(self, expire_values, recover_values, title,
filename="", show_flag=True):
fig, ax = plt.subplots()
for value in expire_values:
ax.plot(value[0], value[1], "ro")
for value in recover_values:
ax.plot(value[0], value[1], "bo")
ax.set_xlabel("Creatinine [mg/dL]")
ax.set_ylabel("Urea Nitrogen[mg/dL]")
MGraph.show_and_save(self, fig, filename, show_flag)
def plot_classification(self, positive, negative, line, title,
filename="", show_flag=True, x_label="", y_label=""):
fig, ax = plt.subplots()
ax.plot(positive[:, 0], positive[:, 1], 'ro')
ax.plot(negative[:, 0], negative[:, 1], 'bo')
ax.plot([line[0], line[1]], [line[2], line[3]])
margin_rate = 0.05
x_max = max(max(positive[:, 0]), max(negative[:, 0]))
x_min = min(min(positive[:, 0]), min(negative[:, 0]))
x_margin = (x_max - x_min) * margin_rate
ax.set_xlim([x_min - x_margin, x_max + x_margin])
y_max = max(max(positive[:, 1]), max(negative[:, 1]))
y_min = min(min(positive[:, 1]), min(negative[:, 1]))
y_margin = (y_max - y_min) * margin_rate
ax.set_ylim([y_min - y_margin, y_max + y_margin])
if len(x_label) > 0:
ax.set_xlabel(x_label)
if len(y_label) > 0:
ax.set_ylabel(y_label)
MGraph.show_and_save(self, fig, filename, show_flag)
def plot_classification_with_contour(self, x, y, xx, yy, z, x_label, y_label,
filename="", show_flag=True):
fig, ax = plt.subplots()
ax.contourf(xx, yy, z, cmap=plt.cm.rainbow, alpha=0.2)
ax.scatter(x[:, 0], x[:, 1], c=y, cmap=plt.cm.rainbow)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
MGraph.show_and_save(self, fig, filename, show_flag)
def bar_feature_importance(self, entropy_reduction, labels, filename="", show_flag=True):
fig, ax = plt.subplots()
Y = range(len(entropy_reduction))
Y.reverse()
ax.barh(Y, entropy_reduction, height=0.4)
plt.yticks(Y, labels)
ax.set_xlabel("Entropy Reduction")
plt.tick_params(axis='both', which='both', labelsize=8)
plt.tight_layout()
MGraph.show_and_save(self, fig, filename, show_flag)
def bar_classification(self, l_classification_result, labels, comparison_label="",
title="", filename="", show_flag=True):
l_rec = [item.recall for item in l_classification_result]
l_prec = [item.prec for item in l_classification_result]
l_f = [item.f for item in l_classification_result]
legend = ['recall', 'precision', 'f_measure']
MGraph.comparison_bar(self, [l_rec, l_prec, l_f], labels, legend, lim=[0, 1],
comparison_label=comparison_label, title=title,
filename=filename, show_flag=show_flag)
def bar_histogram(self, hist, bin_edges, hist_label, bin_label, only_left_edge=False,
title="", filename="", show_flag=True):
label = list(bin_edges)
if only_left_edge:
label.pop()
MGraph.comparison_bar(self, hist, label, metric_label=hist_label,
comparison_label=bin_label,
title=title, filename=filename, show_flag=show_flag)
def series_classification(self, l_classification_result, timestamp, x_label,
title="", filename="", show_flag=True):
l_rec = [item.rec for item in l_classification_result]
l_prec = [item.prec for item in l_classification_result]
l_f = [item.f for item in l_classification_result]
legend = ['recall', 'precision', 'f_measure']
MGraph.line_series(self, [l_rec, l_prec, l_f], timestamp, legend, ylim=[0, 1],
x_label=x_label, title=title, filename=filename, show_flag=show_flag)
def draw_series_data_class(self, series, n_draw_sample=0):
"""Visualize the deata of SeriesData class."""
fig, ax = plt.subplots()
y_points = range(series.n_step())
n_sample = series.n_sample()
if 0 < n_draw_sample < n_sample:
idx_selected_sample = range(n_draw_sample - 1,
n_sample,
int(n_sample / n_draw_sample))
series = series.slice_by_sample(idx_selected_sample)
for idx_f in range(series.n_feature()):
f_series = series.slice_by_feature(idx_f)
MGraph.labeled_line_series(self, f_series.series.transpose(), f_series.label, y_points)
def waitforbuttonpress(self):
plt.waitforbuttonpress()
def close_all(self):
plt.close('all')
def normalize(self, value):
max_val = max(abs(value))
order = 10.0 ** int(log10(float(max_val)))
n_value = value / order
return n_value, order
def __figure_with_legend(self):
fig = plt.figure(figsize=plt.figaspect(0.5))
ax = fig.add_axes([.05, .1, .5, .8])
return fig, ax
def __show_legend(self, ax):
ax.legend(bbox_to_anchor=(1.02, 1), loc='upper left', borderaxespad=0, prop={'size': 8})
def __time_diff_in_hour(self, time_seq, base_time):
return [(item - base_time).total_seconds() / 3600 for item in time_seq]
def __get_plot_list(self, base_time, time_series):
plot_list = []
for item in time_series:
try:
time_diff = self.__time_diff_in_hour(item.timestamps, base_time)
value = np.array([float(num) for num in item.values])
plot_val, order = self.normalize(value)
tag = "%s [%0.1f %s]" % (item.description, order, item.unit)
plot_list.append([time_diff, plot_val, tag])
except ValueError:
print "Can't plot %s" % item.description
return plot_list
def __draw_series_with_legend(self, plot_list, icu_ios, title, filename, show_flag, style='-'):
plot_all = list(chunked(plot_list, self.limit_timeseries))
for plot_list in plot_all:
fig, ax = self.__figure_with_legend()
for item in plot_list:
ax.plot(item[0], item[1], style, label=item[2])
for span in icu_ios:
ax.axvspan(span[0], span[1], alpha=0.2, color='red')
ax.set_title(title)
ax.set_xlabel("Hours since Admission")
self.__show_legend(ax)
MGraph.show_and_save(self, fig, filename, show_flag)
| {
"repo_name": "belemizz/mimic2_tools",
"path": "clinical_db/mutil/graph.py",
"copies": "1",
"size": "18161",
"license": "mit",
"hash": -5158962004349609000,
"line_mean": 38.6528384279,
"line_max": 99,
"alpha_frac": 0.5323495402,
"autogenerated": false,
"ratio": 3.4546319193456343,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4486981459545634,
"avg_score": null,
"num_lines": null
} |
'A class for creating focus elements for use with gridgen'
# Copyright R. Hetland on 2007-11-26.
# All rights reserved.
# Version 1.0.0 on 2007-11-26
# Initial version of Focus and FocusPoint classes.
from numpy import *
from scipy.special import erf
class FocusPoint(object):
"""
Return a transformed, uniform grid, focused around point xo, yo, with a focusing
factor of focus, and x and y extent given by Rx and Ry. The region of focusing
will be approximately Gausian, and the resolution will be increased by approximately
the value of factor. To achive focusing on a line in the x- or y-direction, use a
large value for R in the desired direction; typically a value of 10.0 is good enough
to have focusing only in one direction.
"""
def __init__(self, xo, yo, factor=2.0, Rx=0.1, Ry=None):
if Ry is None: Ry = Rx
self.xo = xo
self.yo = yo
self.factor = factor
self.Rx = Rx
self.Ry = Ry
def __call__(self, x, y):
x = asarray(x)
y = asarray(y)
assert not any(x>1.0) or not any(x<0.0) or not any(y>1.0) or not any(x<0.0), \
'x and y must both be within the range [0, 1].'
alpha = 1.0 - 1.0/self.factor
def xf(x, y):
return x - 0.5*sqrt(pi)*self.Rx*alpha*exp(-(y-self.yo)**2/self.Ry**2)*erf((x-self.xo)/self.Rx)
def yf(x, y):
return y - 0.5*sqrt(pi)*self.Ry*alpha*exp(-(x-self.xo)**2/self.Rx**2)*erf((y-self.yo)/self.Ry)
xf0 = xf(0.0, y); xf1 = xf(1.0, y)
yf0 = yf(x, 0.0); yf1 = yf(x, 1.0)
return (xf(x, y)-xf0)/(xf1-xf0), (yf(x, y)-yf0)/(yf1-yf0)
class Focus(object):
"""
foc = Focus(xo, yo, factor=2.0, Rx=0.1, Ry=Rx)
Return a transformed, uniform grid, focused around point xo, yo, with a focusing
factor of focus, and x and y extent given by Rx and Ry. The region of focusing
will be approximately Gausian, and the resolution will be increased by approximately
the value of factor. To achive focusing on a line in the x- or y-direction, use a
large value for R in the desired direction; typically a value of 10.0 is good enough
to have focusing only in one direction.
Calls to the object return transformed coordinates:
xf, yf = foc(x, y)
where x and y must be within [0, 1], and are typically a uniform, normalized grid.
Additional focus points may be added with the add_focus_point method:
foc.add_focus_point(xo, yo, factor=2.0, Rx=0.1, Ry=Rx)
subsequent calls to foc will result in a transformed grid with all the focus points
applied in sequence.
EXAMPLE:
foc = Focus(0.7, 0.0, factor=2.0, Rx=0.2, Ry=10)
foc.add_focus_point(0.2, 0.7, factor=3.0, Rx=0.2)
y, x = mgrid[0:1:50j, 0:1:70j]
xf, yf = foc(x, y)
"""
def __init__(self, xo, yo, factor=2.0, Rx=0.1, Ry=None):
self._focuspoints = []
self.add_focus_point(xo, yo, factor, Rx, Ry)
def add_focus_point(self, xo, yo, factor=2.0, Rx=0.1, Ry=None):
"""docstring for add_point"""
self._focuspoints.append(FocusPoint(xo, yo, factor, Rx, Ry))
def __call__(self, x, y):
"""docstring for __call__"""
for focuspoint in self._focuspoints:
x, y = focuspoint(x, y)
return x, y
if __name__ == '__main__':
import pylab as pl
y, x = mgrid[0:1:50j, 0:1:70j]
factor = 2.0
foc = Focus(0.7, 0.0, factor=factor, Rx=0.2, Ry=10)
foc.add_focus_point(0.2, 0.7, factor=factor, Rx=0.2)
# foc.add_focus_point(0.7, 0.7, factor=factor, Rx=0.2)
foc.add_focus_point(0.2, 0.2, factor=factor, Rx=0.2)
xf, yf = foc(x, y)
dx = diff(xf, axis=-1)
print 'Focusing factor in x:', dx.max()/dx.min()
dy = diff(yf, axis=0)
print 'Focusing factor in y:', dy.max()/dy.min()
print 'should both be approximately: ', factor
pl.plot(xf, yf, '-k')
pl.plot(xf.T, yf.T, '-k')
pl.show() | {
"repo_name": "simion1232006/pyroms",
"path": "pyroms/focus.py",
"copies": "1",
"size": "4087",
"license": "bsd-3-clause",
"hash": -6630461991126780000,
"line_mean": 33.9401709402,
"line_max": 106,
"alpha_frac": 0.5869831172,
"autogenerated": false,
"ratio": 2.846100278551532,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3933083395751532,
"avg_score": null,
"num_lines": null
} |
"""A class for downloading event data from Open Catalogs."""
import codecs
import json
import os
import re
import shutil
import webbrowser
from collections import OrderedDict
from difflib import get_close_matches
from astrocats.catalog.utils import is_number
from six import string_types
from mosfit.printer import Printer
from mosfit.utils import get_url_file_handle, listify, open_atomic
class Fetcher(object):
"""Downloads data from the Open Catalogs."""
def __init__(self,
test=False,
open_in_browser=False,
printer=None,
**kwargs):
"""Initialize class."""
self._test = test
self._printer = Printer() if printer is None else printer
self._open_in_browser = open_in_browser
self._names_downloaded = False
self._names = OrderedDict()
self._excluded_catalogs = []
self._catalogs = OrderedDict((('OSC', {
'json': ('https://sne.space/astrocats/astrocats/'
'supernovae/output'),
'web':
'https://sne.space/sne/'
}), ('OTC', {
'json': ('https://tde.space/astrocats/astrocats/'
'tidaldisruptions/output'),
'web':
'https://tde.space/tde/'
}), ('OKC', {
'json': ('https://kilonova.space/astrocats/astrocats/'
'kilonovae/output'),
'web':
'https://kilonova.space/kne/'
})))
def add_excluded_catalogs(self, catalogs):
"""Add catalog name(s) to list of catalogs that will be excluded."""
if not isinstance(catalogs, list) or isinstance(
catalogs, string_types):
catalogs = listify(catalogs)
self._excluded_catalogs.extend([x.upper() for x in catalogs])
def fetch(self, event_list, offline=False, prefer_cache=False,
cache_path=''):
"""Fetch a list of events from the open catalogs."""
dir_path = os.path.dirname(os.path.realpath(__file__))
prt = self._printer
self._cache_path = cache_path
levent_list = listify(event_list)
events = [None for x in levent_list]
catalogs = OrderedDict([(x, self._catalogs[x]) for x in self._catalogs
if x not in self._excluded_catalogs])
for ei, event in enumerate(levent_list):
if not event:
continue
events[ei] = OrderedDict()
path = ''
# If the event name ends in .json, assume event is a path.
if event.endswith('.json'):
path = event
events[ei]['name'] = event.replace('.json', '').split('/')[-1]
# If not (or the file doesn't exist), download from an open
# catalog.
name_dir_path = dir_path
if self._cache_path:
name_dir_path = self._cache_path
if not path or not os.path.exists(path):
names_paths = [
os.path.join(name_dir_path, 'cache', x + '.names.min.json')
for x in catalogs
]
input_name = event.replace('.json', '')
if offline:
prt.message('event_interp', [input_name])
else:
for ci, catalog in enumerate(catalogs):
if self._names_downloaded or (prefer_cache
and os.path.exists(
names_paths[ci])):
continue
if ci == 0:
prt.message('dling_aliases', [input_name])
try:
response = get_url_file_handle(
catalogs[catalog]['json'] + '/names.min.json',
timeout=10)
except Exception:
prt.message(
'cant_dl_names', [catalog], warning=True)
else:
with open_atomic(names_paths[ci], 'wb') as f:
shutil.copyfileobj(response, f)
self._names_downloaded = True
for ci, catalog in enumerate(catalogs):
if os.path.exists(names_paths[ci]):
if catalog not in self._names:
with open(names_paths[ci], 'r') as f:
self._names[catalog] = json.load(
f, object_pairs_hook=OrderedDict)
else:
prt.message('cant_read_names', [catalog], warning=True)
if offline:
prt.message('omit_offline')
continue
if input_name in self._names[catalog]:
events[ei]['name'] = input_name
events[ei]['catalog'] = catalog
else:
for name in self._names[catalog]:
if (input_name in self._names[catalog][name]
or 'SN' +
input_name in self._names[catalog][name]):
events[ei]['name'] = name
events[ei]['catalog'] = catalog
break
if not events[ei].get('name', None):
for ci, catalog in enumerate(catalogs):
namekeys = []
for name in self._names[catalog]:
namekeys.extend(self._names[catalog][name])
namekeys = list(sorted(set(namekeys)))
matches = get_close_matches(
event, namekeys, n=5, cutoff=0.8)
# matches = []
if len(matches) < 5 and is_number(event[0]):
prt.message('pef_ext_search')
snprefixes = set(('SN19', 'SN20'))
for name in self._names[catalog]:
ind = re.search(r"\d", name)
if ind and ind.start() > 0:
snprefixes.add(name[:ind.start()])
snprefixes = list(sorted(snprefixes))
for prefix in snprefixes:
testname = prefix + event
new_matches = get_close_matches(
testname, namekeys, cutoff=0.95, n=1)
if (len(new_matches)
and new_matches[0] not in matches):
matches.append(new_matches[0])
if len(matches) == 5:
break
if len(matches):
if self._test:
response = matches[0]
else:
response = prt.prompt(
'no_exact_match',
kind='select',
options=matches,
none_string=('None of the above, ' +
('skip this event.' if
ci == len(catalogs) - 1 else
'try the next catalog.')))
if response:
for name in self._names[catalog]:
if response in self._names[catalog][name]:
events[ei]['name'] = name
events[ei]['catalog'] = catalog
break
if events[ei]['name']:
break
if not events[ei].get('name', None):
prt.message('no_event_by_name')
events[ei]['name'] = input_name
continue
urlname = events[ei]['name'] + '.json'
name_path = os.path.join(name_dir_path, 'cache', urlname)
if offline or (prefer_cache and os.path.exists(name_path)):
prt.message('cached_event',
[events[ei]['name'], events[ei]['catalog']])
else:
prt.message('dling_event',
[events[ei]['name'], events[ei]['catalog']])
try:
response = get_url_file_handle(
catalogs[events[ei]['catalog']]['json'] + '/json/'
+ urlname,
timeout=10)
except Exception:
prt.message(
'cant_dl_event', [events[ei]['name']],
warning=True)
else:
with open_atomic(name_path, 'wb') as f:
shutil.copyfileobj(response, f)
path = name_path
if os.path.exists(path):
events[ei]['path'] = path
if self._open_in_browser:
webbrowser.open(catalogs[events[ei]['catalog']]['web'] +
events[ei]['name'])
prt.message('event_file', [path], wrapped=True)
else:
prt.message('no_data',
[events[ei]['name'], '/'.join(catalogs.keys())])
if offline:
prt.message('omit_offline')
raise RuntimeError
return events
def load_data(self, event):
"""Return data from specified path."""
if not os.path.exists(event['path']):
return None
with codecs.open(event['path'], 'r', encoding='utf-8') as f:
return json.load(f, object_pairs_hook=OrderedDict)
| {
"repo_name": "guillochon/FriendlyFit",
"path": "mosfit/fetcher.py",
"copies": "5",
"size": "10425",
"license": "mit",
"hash": 1692135649380628200,
"line_mean": 43.1737288136,
"line_max": 79,
"alpha_frac": 0.4174580336,
"autogenerated": false,
"ratio": 5.055771096023278,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7973229129623278,
"avg_score": null,
"num_lines": null
} |
""" A class for handling large tractography datasets.
It is built using the h5py which in turn implement
key features of the HDF5 (hierachical data format) API [1]_.
References
----------
.. [1] http://www.hdfgroup.org/HDF5/doc/H5.intro.html
"""
import numpy as np
import h5py
from nibabel.streamlines import ArraySequence as Streamlines
# Make sure not to carry across setup module from * import
__all__ = ['Dpy']
class Dpy(object):
def __init__(self, fname, mode='r', compression=0):
""" Advanced storage system for tractography based on HDF5
Parameters
------------
fname : str, full filename
mode : 'r' read
'w' write
'r+' read and write only if file already exists
compression : 0 no compression to 9 maximum compression
Examples
----------
>>> import os
>>> from tempfile import mkstemp #temp file
>>> from dipy.io.dpy import Dpy
>>> def dpy_example():
... fd,fname = mkstemp()
... fname += '.dpy'#add correct extension
... dpw = Dpy(fname,'w')
... A=np.ones((5,3))
... B=2*A.copy()
... C=3*A.copy()
... dpw.write_track(A)
... dpw.write_track(B)
... dpw.write_track(C)
... dpw.close()
... dpr = Dpy(fname,'r')
... A=dpr.read_track()
... B=dpr.read_track()
... T=dpr.read_tracksi([0,1,2,0,0,2])
... dpr.close()
... os.remove(fname) #delete file from disk
>>> dpy_example()
"""
self.mode = mode
self.f = h5py.File(fname, mode=self.mode)
self.compression = compression
if self.mode == 'w':
self.f.attrs['version'] = u'0.0.1'
self.streamlines = self.f.create_group('streamlines')
self.tracks = self.streamlines.create_dataset(
'tracks',
shape=(0, 3),
dtype='f4',
maxshape=(None, 3), chunks=True)
self.offsets = self.streamlines.create_dataset(
'offsets',
shape=(1,),
dtype='i8',
maxshape=(None,), chunks=True)
self.curr_pos = 0
self.offsets[:] = np.array([self.curr_pos]).astype(np.int64)
if self.mode == 'r':
self.tracks = self.f['streamlines']['tracks']
self.offsets = self.f['streamlines']['offsets']
self.track_no = len(self.offsets) - 1
self.offs_pos = 0
def version(self):
return self.f.attrs['version']
def write_track(self, track):
""" write on track each time
"""
self.tracks.resize(self.tracks.shape[0] + track.shape[0], axis=0)
self.tracks[-track.shape[0]:] = track.astype(np.float32)
self.curr_pos += track.shape[0]
self.offsets.resize(self.offsets.shape[0] + 1, axis=0)
self.offsets[-1] = self.curr_pos
def write_tracks(self, tracks):
""" write many tracks together
"""
self.tracks.resize(self.tracks.shape[0] + tracks._data.shape[0],
axis=0)
self.tracks[-tracks._data.shape[0]:] = tracks._data
self.offsets.resize(self.offsets.shape[0] + tracks._offsets.shape[0],
axis=0)
self.offsets[-tracks._offsets.shape[0]:] = \
self.offsets[-tracks._offsets.shape[0] - 1] + \
tracks._offsets + tracks._lengths
def read_track(self):
""" read one track each time
"""
off0, off1 = self.offsets[self.offs_pos:self.offs_pos + 2]
self.offs_pos += 1
return self.tracks[off0:off1]
def read_tracksi(self, indices):
""" read tracks with specific indices
"""
tracks = Streamlines()
for i in indices:
off0, off1 = self.offsets[i:i + 2]
tracks.append(self.tracks[off0:off1])
return tracks
def read_tracks(self):
""" read the entire tractography
"""
I = self.offsets[:]
TR = self.tracks[:]
tracks = Streamlines()
for i in range(len(I) - 1):
off0, off1 = I[i:i + 2]
tracks.append(TR[off0:off1])
return tracks
def close(self):
self.f.close()
if __name__ == '__main__':
pass
| {
"repo_name": "nilgoyyou/dipy",
"path": "dipy/io/dpy.py",
"copies": "2",
"size": "4474",
"license": "bsd-3-clause",
"hash": 3260410179514379000,
"line_mean": 29.2297297297,
"line_max": 77,
"alpha_frac": 0.5127402772,
"autogenerated": false,
"ratio": 3.694467382328654,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5207207659528654,
"avg_score": null,
"num_lines": null
} |
''' A class for handling large tractography datasets.
It is built using the pytables tools which in turn implement
key features of the HDF5 (hierachical data format) API [1]_.
References
----------
.. [1] http://www.hdfgroup.org/HDF5/doc/H5.intro.html
'''
import numpy as np
# Conditional import machinery for pytables
from dipy.utils.optpkg import optional_package
# Allow import, but disable doctests, if we don't have pytables
tables, have_tables, setup_module = optional_package('tables')
# Make sure not to carry across setup module from * import
__all__ = ['Dpy']
class Dpy(object):
def __init__(self, fname, mode='r', compression=0):
''' Advanced storage system for tractography based on HDF5
Parameters
------------
fname : str, full filename
mode : 'r' read
'w' write
'r+' read and write only if file already exists
'a' read and write even if file doesn't exist (not used yet)
compression : 0 no compression to 9 maximum compression
Examples
----------
>>> import os
>>> from tempfile import mkstemp #temp file
>>> from dipy.io.dpy import Dpy
>>> fd,fname = mkstemp()
>>> fname = fname + '.dpy' #add correct extension
>>> dpw = Dpy(fname,'w')
>>> A=np.ones((5,3))
>>> B=2*A.copy()
>>> C=3*A.copy()
>>> dpw.write_track(A)
>>> dpw.write_track(B)
>>> dpw.write_track(C)
>>> dpw.close()
>>> dpr = Dpy(fname,'r')
>>> A=dpr.read_track()
>>> B=dpr.read_track()
>>> T=dpr.read_tracksi([0,1,2,0,0,2])
>>> dpr.close()
>>> os.remove(fname) #delete file from disk
'''
self.mode = mode
self.f = tables.openFile(fname, mode=self.mode)
self.N = 5 * 10**9
self.compression = compression
if self.mode == 'w':
self.streamlines = self.f.createGroup(self.f.root, 'streamlines')
# create a version number
self.version = self.f.createArray(self.f.root, 'version',
[b"0.0.1"], 'Dpy Version Number')
self.tracks = self.f.createEArray(self.f.root.streamlines,
'tracks',
tables.Float32Atom(),
(0, 3),
"scalar Float32 earray",
tables.Filters(self.compression),
expectedrows=self.N)
self.offsets = self.f.createEArray(self.f.root.streamlines,
'offsets',
tables.Int64Atom(), (0,),
"scalar Int64 earray",
tables.Filters(
self.compression),
expectedrows=self.N + 1)
self.curr_pos = 0
self.offsets.append(np.array([self.curr_pos]).astype(np.int64))
if self.mode == 'r':
self.tracks = self.f.root.streamlines.tracks
self.offsets = self.f.root.streamlines.offsets
self.track_no = len(self.offsets) - 1
self.offs_pos = 0
def version(self):
ver = self.f.root.version[:]
return ver[0].decode()
def write_track(self, track):
''' write on track each time
'''
self.tracks.append(track.astype(np.float32))
self.curr_pos += track.shape[0]
self.offsets.append(np.array([self.curr_pos]).astype(np.int64))
def write_tracks(self, T):
''' write many tracks together
'''
for track in T:
self.tracks.append(track.astype(np.float32))
self.curr_pos += track.shape[0]
self.offsets.append(np.array([self.curr_pos]).astype(np.int64))
def read_track(self):
''' read one track each time
'''
off0, off1 = self.offsets[self.offs_pos:self.offs_pos + 2]
self.offs_pos += 1
return self.tracks[off0:off1]
def read_tracksi(self, indices):
''' read tracks with specific indices
'''
T = []
for i in indices:
# print(self.offsets[i:i+2])
off0, off1 = self.offsets[i:i + 2]
T.append(self.tracks[off0:off1])
return T
def read_tracks(self):
''' read the entire tractography
'''
I = self.offsets[:]
TR = self.tracks[:]
T = []
for i in range(len(I) - 1):
off0, off1 = I[i:i + 2]
T.append(TR[off0:off1])
return T
def close(self):
self.f.close()
if __name__ == '__main__':
pass
| {
"repo_name": "matthieudumont/dipy",
"path": "dipy/io/dpy.py",
"copies": "2",
"size": "4952",
"license": "bsd-3-clause",
"hash": -5905453544975111000,
"line_mean": 32.6870748299,
"line_max": 79,
"alpha_frac": 0.4919224556,
"autogenerated": false,
"ratio": 3.9775100401606425,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00024295432458697764,
"num_lines": 147
} |
''' A class for handling large tractography datasets.
It is built using the pytables tools which in turn implement
key features of the HDF5 (hierachical data format) API [1]_.
References
----------
.. [1] http://www.hdfgroup.org/HDF5/doc/H5.intro.html
'''
import numpy as np
# Conditional import machinery for pytables
from ..utils.optpkg import optional_package
# Allow import, but disable doctests, if we don't have pytables
tables, have_tables, setup_module = optional_package('tables')
# Make sure not to carry across setup module from * import
__all__ = ['Dpy']
class Dpy(object):
def __init__(self,fname,mode='r',compression=0):
''' Advanced storage system for tractography based on HDF5
Parameters
------------
fname : str, full filename
mode : 'r' read
'w' write
'r+' read and write only if file already exists
'a' read and write even if file doesn't exist (not used yet)
compression : 0 no compression to 9 maximum compression
Examples
----------
>>> import os
>>> from tempfile import mkstemp #temp file
>>> from dipy.io.dpy import Dpy
>>> fd,fname = mkstemp()
>>> fname = fname + '.dpy' #add correct extension
>>> dpw = Dpy(fname,'w')
>>> A=np.ones((5,3))
>>> B=2*A.copy()
>>> C=3*A.copy()
>>> dpw.write_track(A)
>>> dpw.write_track(B)
>>> dpw.write_track(C)
>>> dpw.close()
>>> dpr = Dpy(fname,'r')
>>> A=dpr.read_track()
>>> B=dpr.read_track()
>>> T=dpr.read_tracksi([0,1,2,0,0,2])
>>> dpr.close()
>>> os.remove(fname) #delete file from disk
'''
self.mode=mode
self.f = tables.openFile(fname, mode = self.mode)
self.N = 5*10**9
self.compression = compression
if self.mode=='w':
self.streamlines=self.f.createGroup(self.f.root,'streamlines')
#create a version number
self.version=self.f.createArray(self.f.root,'version',['0.0.1'],'Dpy Version Number')
self.tracks = self.f.createEArray(self.f.root.streamlines, 'tracks',tables.Float32Atom(), (0, 3),
"scalar Float32 earray", tables.Filters(self.compression),expectedrows=self.N)
self.offsets = self.f.createEArray(self.f.root.streamlines, 'offsets',tables.Int64Atom(), (0,),
"scalar Int64 earray", tables.Filters(self.compression), expectedrows=self.N+1)
self.curr_pos=0
self.offsets.append(np.array([self.curr_pos]).astype(np.int64))
if self.mode=='r':
self.tracks=self.f.root.streamlines.tracks
self.offsets=self.f.root.streamlines.offsets
self.track_no=len(self.offsets)-1
self.offs_pos=0
def version(self):
ver=self.f.root.version[:]
return ver[0]
def write_track(self,track):
''' write on track each time
'''
self.tracks.append(track.astype(np.float32))
self.curr_pos+=track.shape[0]
self.offsets.append(np.array([self.curr_pos]).astype(np.int64))
def write_tracks(self,T):
''' write many tracks together
'''
for track in T:
self.tracks.append(track.astype(np.float32))
self.curr_pos+=track.shape[0]
self.offsets.append(np.array([self.curr_pos]).astype(np.int64))
def read_track(self):
''' read one track each time
'''
off0,off1=self.offsets[self.offs_pos:self.offs_pos+2]
self.offs_pos+=1
return self.tracks[off0:off1]
def read_tracksi(self,indices):
''' read tracks with specific indices
'''
T=[]
for i in indices:
#print(self.offsets[i:i+2])
off0,off1=self.offsets[i:i+2]
T.append(self.tracks[off0:off1])
return T
def read_tracks(self):
''' read the entire tractography
'''
I=self.offsets[:]
TR=self.tracks[:]
T=[]
for i in range(len(I)-1):
off0,off1=I[i:i+2]
T.append(TR[off0:off1])
return T
def close(self):
self.f.close()
if __name__ == '__main__':
pass
| {
"repo_name": "rfdougherty/dipy",
"path": "dipy/io/dpy.py",
"copies": "11",
"size": "4636",
"license": "bsd-3-clause",
"hash": 4313028932745195500,
"line_mean": 32.8394160584,
"line_max": 135,
"alpha_frac": 0.5230802416,
"autogenerated": false,
"ratio": 3.762987012987013,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.023002186850424704,
"num_lines": 137
} |
''' A class for handling large tractography datasets.
It is built using the pytables tools which in turn implement
key features of the HDF5 (hierachical data format) API [1]_.
References
----------
.. [1] http://www.hdfgroup.org/HDF5/doc/H5.intro.html
'''
import numpy as np
from distutils.version import LooseVersion
# Conditional testing machinery for pytables
from dipy.testing import doctest_skip_parser
# Conditional import machinery for pytables
from dipy.utils.optpkg import optional_package
# Allow import, but disable doctests, if we don't have pytables
tables, have_tables, _ = optional_package('tables')
# Useful variable for backward compatibility.
if have_tables:
TABLES_LESS_3_0 = LooseVersion(tables.__version__) < "3.0"
# Make sure not to carry across setup module from * import
__all__ = ['Dpy']
class Dpy(object):
@doctest_skip_parser
def __init__(self, fname, mode='r', compression=0):
''' Advanced storage system for tractography based on HDF5
Parameters
------------
fname : str, full filename
mode : 'r' read
'w' write
'r+' read and write only if file already exists
'a' read and write even if file doesn't exist (not used yet)
compression : 0 no compression to 9 maximum compression
Examples
----------
>>> import os
>>> from tempfile import mkstemp #temp file
>>> from dipy.io.dpy import Dpy
>>> def dpy_example():
... fd,fname = mkstemp()
... fname = fname + '.dpy' #add correct extension
... dpw = Dpy(fname,'w')
... A=np.ones((5,3))
... B=2*A.copy()
... C=3*A.copy()
... dpw.write_track(A)
... dpw.write_track(B)
... dpw.write_track(C)
... dpw.close()
... dpr = Dpy(fname,'r')
... A=dpr.read_track()
... B=dpr.read_track()
... T=dpr.read_tracksi([0,1,2,0,0,2])
... dpr.close()
... os.remove(fname) #delete file from disk
>>> dpy_example() # skip if not have_tables
'''
self.mode = mode
self.f = tables.openFile(fname, mode=self.mode) if TABLES_LESS_3_0 else tables.open_file(fname, mode=self.mode)
self.N = 5 * 10**9
self.compression = compression
if self.mode == 'w':
if TABLES_LESS_3_0:
func_create_group = self.f.createGroup
func_create_array = self.f.createArray
func_create_earray = self.f.createEArray
else:
func_create_group = self.f.create_group
func_create_array = self.f.create_array
func_create_earray = self.f.create_earray
self.streamlines = func_create_group(self.f.root, 'streamlines')
# create a version number
self.version = func_create_array(self.f.root, 'version',
[b"0.0.1"], 'Dpy Version Number')
self.tracks = func_create_earray(self.f.root.streamlines,
'tracks',
tables.Float32Atom(),
(0, 3),
"scalar Float32 earray",
tables.Filters(self.compression),
expectedrows=self.N)
self.offsets = func_create_earray(self.f.root.streamlines,
'offsets',
tables.Int64Atom(), (0,),
"scalar Int64 earray",
tables.Filters(self.compression),
expectedrows=self.N + 1)
self.curr_pos = 0
self.offsets.append(np.array([self.curr_pos]).astype(np.int64))
if self.mode == 'r':
self.tracks = self.f.root.streamlines.tracks
self.offsets = self.f.root.streamlines.offsets
self.track_no = len(self.offsets) - 1
self.offs_pos = 0
def version(self):
ver = self.f.root.version[:]
return ver[0].decode()
def write_track(self, track):
''' write on track each time
'''
self.tracks.append(track.astype(np.float32))
self.curr_pos += track.shape[0]
self.offsets.append(np.array([self.curr_pos]).astype(np.int64))
def write_tracks(self, T):
''' write many tracks together
'''
for track in T:
self.tracks.append(track.astype(np.float32))
self.curr_pos += track.shape[0]
self.offsets.append(np.array([self.curr_pos]).astype(np.int64))
def read_track(self):
''' read one track each time
'''
off0, off1 = self.offsets[self.offs_pos:self.offs_pos + 2]
self.offs_pos += 1
return self.tracks[off0:off1]
def read_tracksi(self, indices):
''' read tracks with specific indices
'''
T = []
for i in indices:
# print(self.offsets[i:i+2])
off0, off1 = self.offsets[i:i + 2]
T.append(self.tracks[off0:off1])
return T
def read_tracks(self):
''' read the entire tractography
'''
I = self.offsets[:]
TR = self.tracks[:]
T = []
for i in range(len(I) - 1):
off0, off1 = I[i:i + 2]
T.append(TR[off0:off1])
return T
def close(self):
self.f.close()
if __name__ == '__main__':
pass
| {
"repo_name": "omarocegueda/dipy",
"path": "dipy/io/dpy.py",
"copies": "3",
"size": "5761",
"license": "bsd-3-clause",
"hash": 6197182644142380000,
"line_mean": 33.7048192771,
"line_max": 119,
"alpha_frac": 0.510328068,
"autogenerated": false,
"ratio": 3.9110658520027157,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5921393920002715,
"avg_score": null,
"num_lines": null
} |
'''A class for interacting with a nsqd instance over http'''
from . import BaseClient, json_wrap, ok_check, ClientException
from ..util import pack
class Client(BaseClient):
@ok_check
def ping(self):
'''Ping the client'''
return self.get('ping')
@json_wrap
def info(self):
'''Get information about the client'''
return self.get('info')
@ok_check
def pub(self, topic, message):
'''Publish a message to a topic'''
return self.post('pub', params={'topic': topic}, data=message)
@ok_check
def mpub(self, topic, messages, binary=True):
'''Send multiple messages to a topic. Optionally pack the messages'''
if binary:
# Pack and ship the data
return self.post('mpub', data=pack(messages)[4:],
params={'topic': topic, 'binary': True})
elif any(b'\n' in m for m in messages):
# If any of the messages has a newline, then you must use the binary
# calling format
raise ClientException(
'Use `binary` flag in mpub for messages with newlines')
else:
return self.post(
'/mpub', params={'topic': topic}, data=b'\n'.join(messages))
@json_wrap
def create_topic(self, topic):
'''Create the provided topic'''
return self.get('create_topic', params={'topic': topic})
@json_wrap
def empty_topic(self, topic):
'''Empty the provided topic'''
return self.get('empty_topic', params={'topic': topic})
@json_wrap
def delete_topic(self, topic):
'''Delete the provided topic'''
return self.get('delete_topic', params={'topic': topic})
@json_wrap
def pause_topic(self, topic):
'''Pause the provided topic'''
return self.get('pause_topic', params={'topic': topic})
@json_wrap
def unpause_topic(self, topic):
'''Unpause the provided topic'''
return self.get('unpause_topic', params={'topic': topic})
@json_wrap
def create_channel(self, topic, channel):
'''Create the channel in the provided topic'''
return self.get(
'/create_channel', params={'topic': topic, 'channel': channel})
@json_wrap
def empty_channel(self, topic, channel):
'''Empty the channel in the provided topic'''
return self.get(
'/empty_channel', params={'topic': topic, 'channel': channel})
@json_wrap
def delete_channel(self, topic, channel):
'''Delete the channel in the provided topic'''
return self.get(
'/delete_channel', params={'topic': topic, 'channel': channel})
@json_wrap
def pause_channel(self, topic, channel):
'''Pause the channel in the provided topic'''
return self.get(
'/pause_channel', params={'topic': topic, 'channel': channel})
@json_wrap
def unpause_channel(self, topic, channel):
'''Unpause the channel in the provided topic'''
return self.get(
'/unpause_channel', params={'topic': topic, 'channel': channel})
@json_wrap
def stats(self):
'''Get stats about the server'''
return self.get('stats', params={'format': 'json'})
def clean_stats(self):
'''Stats with topics and channels keyed on topic and channel names'''
stats = self.stats()
if 'topics' in stats: # pragma: no branch
topics = stats['topics']
topics = dict((t.pop('topic_name'), t) for t in topics)
for topic, data in topics.items():
if 'channels' in data: # pragma: no branch
channels = data['channels']
channels = dict(
(c.pop('channel_name'), c) for c in channels)
data['channels'] = channels
stats['topics'] = topics
return stats
| {
"repo_name": "dlecocq/nsq-py",
"path": "nsq/http/nsqd.py",
"copies": "1",
"size": "3917",
"license": "mit",
"hash": 3484775974341873700,
"line_mean": 33.9732142857,
"line_max": 80,
"alpha_frac": 0.5739086035,
"autogenerated": false,
"ratio": 4.252985884907709,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002668708809059686,
"num_lines": 112
} |
'''A class for interacting with a nsqlookupd instance over http'''
from . import BaseClient, json_wrap, ok_check
class Client(BaseClient):
'''A client for talking to nsqlookupd over http'''
@ok_check
def ping(self):
'''Ping the client'''
return self.get('ping')
@json_wrap
def info(self):
'''Get info about this instance'''
return self.get('info')
@json_wrap
def lookup(self, topic):
'''Look up which hosts serve a particular topic'''
return self.get('lookup', params={'topic': topic})
@json_wrap
def topics(self):
'''Get a list of topics'''
return self.get('topics')
@json_wrap
def channels(self, topic):
'''Get a list of channels for a given topic'''
return self.get('channels', params={'topic': topic})
@json_wrap
def nodes(self):
'''Get information about nodes'''
return self.get('nodes')
@json_wrap
def delete_topic(self, topic):
'''Delete a topic'''
return self.get('delete_topic', params={'topic': topic})
@json_wrap
def delete_channel(self, topic, channel):
'''Delete a channel in the provided topic'''
return self.get('delete_channel',
params={'topic': topic, 'channel': channel})
@json_wrap
def tombstone_topic_producer(self, topic, node):
'''It's not clear what this endpoint does'''
return self.get('tombstone_topic_producer',
params={'topic': topic, 'node': node})
@json_wrap
def create_topic(self, topic):
'''Create a topic'''
return self.get('create_topic', params={'topic': topic})
@json_wrap
def create_channel(self, topic, channel):
'''Create a channel in the provided topic'''
return self.get('create_channel',
params={'topic': topic, 'channel': channel})
@json_wrap
def debug(self):
'''Get debugging information'''
return self.get('debug')
| {
"repo_name": "dlecocq/nsq-py",
"path": "nsq/http/nsqlookupd.py",
"copies": "1",
"size": "2002",
"license": "mit",
"hash": -9212283524350920000,
"line_mean": 28.0144927536,
"line_max": 66,
"alpha_frac": 0.5914085914,
"autogenerated": false,
"ratio": 4.069105691056911,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5160514282456911,
"avg_score": null,
"num_lines": null
} |
"""A class for managing collision tests between all objects in a WorldModel.
"""
from __future__ import generators
from robotsim import *
import se3
class WorldCollider:
"""
Attributes:
- geomList: a list of (object,geom) pairs for all objects in the world
- mask: a list of sets, indicating which items are activated for
collision detection for each object in the world.
- terrains: contains the geomList indices of each terrain in the world.
- rigidObjects: contains the geomList indices of each object in
the world
- robots: contains the geomList indices of each robot in the world.
Methods:
- getGeom(obj): finds the geometry corresponding to an object
- collisionTests(filter1,filter2): returns an iterator over potential
colliding pairs
- collisions(filter1,filter2): yields an iterator over collision pairs
- robotSelfCollisions(r): yields an iterator over robot self collisions
- robotObjectCollisions(r,o): yields an iterator over robot-object
collision pairs
- robotTerrainCollisions(r,t): yields an iterator over robot-terrain
collision pairs
- objectTerrainCollide(o,t): returns whether an object and terrain
collide
- objectObjectCollide(o1,o2): returns whether two objects collide
- rayCast(ray_source,ray_direction,obj_indices): finds the first
object intersected by a ray
- rayCastRobot(robot_index,ray_source_ray_direction): finds the
first robot link intersected by a ray
"""
def __init__(self,world):
"""Initializes the collision detection structure given a WorldModel
as input."""
self.world = world
#a list of (object,geom) pairs
self.geomList = []
#self collision mask (2-D array)
self.mask = []
#indexing lists
self.terrains = []
self.rigidObjects = []
self.robots = []
for i in xrange(world.numTerrains()):
t = world.terrain(i)
g = t.geometry()
if g != None:
self.terrains.append(len(self.geomList))
self.geomList.append((t,g))
else:
self.terrains.append(-1)
for i in xrange(world.numRigidObjects()):
o = world.rigidObject(i)
g = o.geometry()
if g != None:
self.rigidObjects.append(len(self.geomList))
self.geomList.append((o,g))
else:
self.rigidObjects.append(-1)
for i in xrange(world.numRobots()):
r = world.robot(i)
self.robots.append([])
for j in xrange(r.numLinks()):
l = r.getLink(j)
g = l.geometry()
if g != None:
self.robots[-1].append(len(self.geomList))
self.geomList.append((l,g))
else:
self.robots[-1].append(-1)
#construct the collision mask
for i in xrange(len(self.geomList)):
self.mask.append(set())
for t in self.terrains:
if t < 0: continue
for o in self.rigidObjects:
if o < 0: continue
self.mask[t].add(o)
self.mask[o].add(t)
for r in self.robots:
for l in r:
if l < 0: continue
#test for fixed links
if self.geomList[l][0].getParent() >= 0:
self.mask[l].add(t)
self.mask[t].add(l)
else:
#print "Ignoring fixed link..."
pass
for o in self.rigidObjects:
if o < 0: continue
for o2 in self.rigidObjects[:o]:
if o2 < 0: continue
self.mask[o].add(o2)
self.mask[o2].add(o)
for r in self.robots:
for l in r:
if l < 0: continue
self.mask[l].add(o)
self.mask[o].add(l)
for i,r in enumerate(self.robots):
#robot - robot collision
for r2 in self.robots[0:i]:
for l1 in r:
for l2 in r2:
if l < 0 or l2 < 0: continue
self.mask[l1].add(l2)
self.mask[l2].add(l1)
#robot self-collision
rob = self.geomList[r[0]][0].getRobot()
nl = rob.numLinks()
for i in xrange(nl):
for j in xrange(i):
if rob.selfCollisionEnabled(i,j):
self.mask[r[i]].add(r[j])
self.mask[r[j]].add(r[i])
def getGeom(self,object):
for (o,g) in self.geomList:
if o==object:
return g
return None
def collisionTests(self,filter1=None,filter2=None):
"""Iterates over ((object,geom),(object,geom)) pairs indicating
which objects should be tested for collision. The geom objects
will be instances of Geometry3D.
E.g., to test collisions, you will call
for i,j in worldCollider.collisionTests():
if i[1].collides(j[1]):
print "Object",i[0].getName(),"collides with",j[0].getName()
(Note that for this purpose is easier to just call collisions();
however you may want to use collisionTests to perform other queries
like proximity detection.)
See collisions for a description of the filter1 and
filter2 arguments.
"""
res = []
if filter1 == None:
for (i,(g,objs)) in enumerate(zip(self.geomList,self.mask)):
for objIndex in objs:
#already checked
if objIndex < i: continue
yield (g,self.geomList[objIndex])
elif filter2 == None:
for (i,(g,objs)) in enumerate(zip(self.geomList,self.mask)):
if not filter1(g[0]): continue
for objIndex in objs:
#already checked
if objIndex < i: continue
if not filter1(self.geomList[objIndex][0]): continue
yield (g,self.geomList[objIndex])
else:
for (i,(g,objs)) in enumerate(zip(self.geomList,self.mask)):
f1 = filter1(g[0])
f2 = filter2(g[0])
for objIndex in objs:
#already checked
if self.geomList[objIndex][0]==g[0]:
continue
if f1 and filter2(self.geomList[objIndex][0]):
yield (g,self.geomList[objIndex])
elif f2 and filter1(self.geomList[objIndex][0]):
yield (self.geomList[objIndex],g)
def collisions(self,filter1=None,filter2=None):
"""Returns an iterator over the colliding pairs of
objects, optionally that satisfies the filter(s).
Arguments filter1 and filter2 optionally indicate subsets of
objects to collide. If neither filter1 nor filter2 are provided,
then all pairs are returned.
If filter1 is provided but filter2 is not, then objects in the set
filter1 will be collided against each other.
If filter1 and filter2 are provided, then objects that
satisfy filter1 will be collided against objects that satisfy
filter2. (Note: in this case there is no checking of duplicates)."""
for (g0,g1) in self.collisionTests(filter1,filter2):
if g0[1].collides(g1[1]):
yield (g0[0],g1[0])
def robotSelfCollisions(self,robot=None):
"""Given robot, tests all self collisions. If robot is None, all
robots are tested. If robots is an index or a RobotModel object
only collisions for that robot are tested"""
if isinstance(robot,RobotModel):
try:
robot = [r for r in xrange(self.world.numRobots()) if self.world.robot(r).getID()==robot.getID()][0]
except IndexError:
raise RuntimeError("Robot "+robot.getName()+" is not found in the world!")
if robot == None:
#test all robots
for r in xrange(len(self.robots)):
for c in self.robotSelfCollisions(r):
yield c
return
rindices = self.robots[robot]
for i in rindices:
if i < 0: continue
for j in rindices:
if i < j: break
if j not in self.mask[i]: continue
if self.geomList[i][1].collides(self.geomList[j][1]):
yield (self.geomList[i][0],self.geomList[j][0])
def robotObjectCollisions(self,robot,object=None):
"""Given robot and object indices, tests all collisions between robot
links and the object. If object is not provided, all objects
are tested"""
if isinstance(robot,RobotModel):
try:
robot = [r for r in xrange(self.world.numRobots()) if self.world.robot(r).getID()==robot.getID()][0]
except IndexError:
raise RuntimeError("Robot "+robot.getName()+" is not found in the world!")
if isinstance(object,RigidObjectModel):
try:
object = [o for o in xrange(self.world.numRigidObjects()) if self.world.rigidObject(o).getID()==object.getID()][0]
except IndexError:
raise RuntimeError("RigidObject "+object.getName()+" is not found in the world!")
if object == None:
#test all objects
for o in xrange(len(self.rigidObjects)):
for c in self.robotObjectCollisions(robot,o):
yield c
return
rindices = self.robots[robot]
oindex = self.rigidObjects[object]
if oindex < 0: return
for i in rindices:
if i < 0: continue
if oindex not in self.mask[i]: continue
if self.geomList[oindex][1].collides(self.geomList[i][1]):
yield (self.geomList[i][0],self.geomList[oindex][0])
def robotTerrainCollisions(self,robot,terrain=None):
"""Given robot and terrain indices, tests all collisions between robot
links and the terrain"""
if isinstance(robot,RobotModel):
try:
robot = [r for r in xrange(self.world.numRobots()) if self.world.robot(r).getID()==robot.getID()][0]
except IndexError:
raise RuntimeError("Robot "+robot.getName()+" is not found in the world!")
if isinstance(terrain,TerrainModel):
try:
terrain = [t for t in xrange(self.world.numTerrains()) if self.world.terrain(t).getID()==terrain.getID()][0]
except IndexError:
raise RuntimeError("Terrain "+robot.getName()+" is not found in the world!")
if terrain == None:
#test all terrains
for t in xrange(len(self.terrains)):
for c in self.robotTerrainCollisions(robot,t):
yield c
return
rindices = self.robots[robot]
tindex = self.terrains[terrain]
if tindex < 0: return
for i in rindices:
if i < 0: continue
if tindex not in self.mask[i]: continue
if self.geomList[tindex][1].collides(self.geomList[i][1]):
yield (self.geomList[i][0],self.geomList[tindex][0])
def objectTerrainCollisions(self,object,terrain=None):
if isinstance(object,RigidObjectModel):
try:
object = [o for o in xrange(self.world.numRigidObjects()) if self.world.rigidObject(o).getID()==object.getID()][0]
except IndexError:
raise RuntimeError("RigidObject "+object.getName()+" is not found in the world!")
if isinstance(terrain,TerrainModel):
try:
terrain = [t for t in xrange(self.world.numTerrains()) if self.world.terrain(t).getID()==terrain.getID()][0]
except IndexError:
raise RuntimeError("Terrain "+robot.getName()+" is not found in the world!")
if terrain == None:
#test all terrains
for t in xrange(len(self.terrains)):
for c in self.objectTerrainCollisions(object,t):
yield c
return
oindex = self.rigidObjects[object]
tindex = self.terrains[terrain]
if oindex < 0: return
if tindex < 0: return
if tindex not in self.mask[oindex]: return
if self.geomList[oindex][1].collides(self.geomList[tindex][1]):
yield (self.geomList[oindex][0],self.geomList[tindex][0])
return
def objectObjectCollisions(self,object,object2):
if isinstance(object,RigidObjectModel):
try:
object = [o for o in xrange(self.world.numRigidObjects()) if self.world.rigidObject(o).getID()==object.getID()][0]
except IndexError:
raise RuntimeError("RigidObject "+object.getName()+" is not found in the world!")
if isinstance(object2,RigidObjectModel):
try:
object2 = [o for o in xrange(self.world.numRigidObjects()) if self.world.rigidObject(o).getID()==object2.getID()][0]
except IndexError:
raise RuntimeError("RigidObject "+object2.getName()+" is not found in the world!")
if object2 == None:
#test all terrains
for o in xrange(len(self.rigidObjects)):
for c in self.objectObjectCollisions(objectot):
yield c
return
oindex = self.rigidObjects[object]
oindex2 = self.rigidObjects[object2]
if oindex < 0: return
if oindex2 < 0: return
if oindex not in self.mask[oindex2]: return
if self.geomList[oindex][1].collides(self.geomList[oindex2][1]):
yield (self.geomList[oindex][0],self.geomList[oindex2][0])
return
def rayCast(self,s,d,indices=None):
"""Finds the first collision with the ray at source s and direction
d. Returns the (object,point) pair or None if no collision is found.
"""
res = None
dmin = 1e300
geoms = (self.geomList if indices==None else [self.geomList[i] for i in indices])
for g in geoms:
(coll,pt) = g[1].rayCast(s,d)
if coll:
dist = vectorops.dot(d,vectorops,sub(pt,s))
if dist < dmin:
dmin,res = dist,(g[0],pt)
return res
def rayCastRobot(self,robot,s,d):
"""Given robot index, do ray casting with the given ray"""
if isinstance(robot,RobotModel):
try:
robot = [r for r in xrange(self.world.numRobots()) if self.world.robot(r).getID()==robot.getID()][0]
except IndexError:
raise RuntimeError("Robot "+robot.getName()+" is not found in the world!")
rindices = self.robots[robot]
return self.rayCast(s,d,rindices)
| {
"repo_name": "stevekuznetsov/Klampt",
"path": "Python/klampt/robotcollide.py",
"copies": "1",
"size": "15364",
"license": "bsd-3-clause",
"hash": 2114435262365613800,
"line_mean": 42.0364145658,
"line_max": 132,
"alpha_frac": 0.5537620411,
"autogenerated": false,
"ratio": 4.03784494086728,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.011634786960395816,
"num_lines": 357
} |
"""A class for managing OpenBSD's Packet Filter.
This class communicates with the kernel through the ioctl(2) interface provided
by the pf(4) pseudo-device; this allows Python to natively send commands to the
kernel, thanks to the fcntl and ctypes modules.
"""
import os
import stat
from fcntl import ioctl
from errno import *
from ctypes import *
from socket import *
from pf.exceptions import PFError
from pf.constants import *
from pf._struct import *
from pf._base import PFObject
from pf.queue import *
from pf.state import PFState
from pf.status import PFStatus, PFIface
from pf.table import PFTableAddr, PFTable, PFTStats
from pf.rule import PFRule, PFRuleset, pf_timeouts
from pf._utils import *
__all__ = ['PacketFilter']
# ioctl() operations
IOCPARM_MASK = 0x1fff
IOC_VOID = 0x20000000L
IOC_OUT = 0x40000000L
IOC_IN = 0x80000000L
IOC_INOUT = IOC_IN | IOC_OUT
def _IOC(inout, group, num, len):
return (inout | ((len & IOCPARM_MASK) << 16) | ((group) << 8) | (num))
def _IO(group, num):
return _IOC(IOC_VOID, ord(group), num, 0)
def _IOWR(group, num, type):
return _IOC(IOC_INOUT, ord(group), num, sizeof(type))
DIOCSTART = _IO ('D', 1)
DIOCSTOP = _IO ('D', 2)
DIOCADDRULE = _IOWR('D', 4, pfioc_rule)
DIOCGETRULES = _IOWR('D', 6, pfioc_rule)
DIOCGETRULE = _IOWR('D', 7, pfioc_rule)
DIOCCLRSTATES = _IOWR('D', 18, pfioc_state_kill)
#DIOCGETSTATE = _IOWR('D', 19, pfioc_state)
DIOCSETSTATUSIF = _IOWR('D', 20, pfioc_iface)
DIOCGETSTATUS = _IOWR('D', 21, pf_status)
DIOCCLRSTATUS = _IOWR('D', 22, pfioc_iface)
#DIOCNATLOOK = _IOWR('D', 23, pfioc_natlook)
DIOCSETDEBUG = _IOWR('D', 24, c_uint32)
DIOCGETSTATES = _IOWR('D', 25, pfioc_states)
#DIOCCHANGERULE = _IOWR('D', 26, pfioc_rule)
DIOCSETTIMEOUT = _IOWR('D', 29, pfioc_tm)
DIOCGETTIMEOUT = _IOWR('D', 30, pfioc_tm)
#DIOCADDSTATE = _IOWR('D', 37, pfioc_state)
#DIOCCLRRULECTRS = _IO ('D', 38)
DIOCGETLIMIT = _IOWR('D', 39, pfioc_limit)
DIOCSETLIMIT = _IOWR('D', 40, pfioc_limit)
DIOCKILLSTATES = _IOWR('D', 41, pfioc_state_kill)
#DIOCGETRULESETS = _IOWR('D', 58, pfioc_ruleset)
#DIOCGETRULESET = _IOWR('D', 59, pfioc_ruleset)
DIOCRCLRTABLES = _IOWR('D', 60, pfioc_table)
DIOCRADDTABLES = _IOWR('D', 61, pfioc_table)
DIOCRDELTABLES = _IOWR('D', 62, pfioc_table)
DIOCRGETTABLES = _IOWR('D', 63, pfioc_table)
DIOCRGETTSTATS = _IOWR('D', 64, pfioc_table)
DIOCRCLRTSTATS = _IOWR('D', 65, pfioc_table)
DIOCRCLRADDRS = _IOWR('D', 66, pfioc_table)
DIOCRADDADDRS = _IOWR('D', 67, pfioc_table)
DIOCRDELADDRS = _IOWR('D', 68, pfioc_table)
DIOCRSETADDRS = _IOWR('D', 69, pfioc_table)
DIOCRGETADDRS = _IOWR('D', 70, pfioc_table)
#DIOCRGETASTATS = _IOWR('D', 71, pfioc_table)
#DIOCRCLRASTATS = _IOWR('D', 72, pfioc_table)
DIOCRTSTADDRS = _IOWR('D', 73, pfioc_table)
#DIOCRSETTFLAGS = _IOWR('D', 74, pfioc_table)
DIOCRINADEFINE = _IOWR('D', 77, pfioc_table)
#DIOCOSFPFLUSH = _IO ('D', 78)
#DIOCOSFPADD = _IOWR('D', 79, pf_osfp_ioctl)
#DIOCOSFPGET = _IOWR('D', 80, pf_osfp_ioctl)
DIOCXBEGIN = _IOWR('D', 81, pfioc_trans)
DIOCXCOMMIT = _IOWR('D', 82, pfioc_trans)
DIOCXROLLBACK = _IOWR('D', 83, pfioc_trans)
#DIOCGETSRCNODES = _IOWR('D', 84, pfioc_src_nodes)
#DIOCCLRSRCNODES = _IO ('D', 85)
DIOCSETHOSTID = _IOWR('D', 86, c_uint32)
DIOCIGETIFACES = _IOWR('D', 87, pfioc_iface)
DIOCSETIFFLAG = _IOWR('D', 89, pfioc_iface)
DIOCCLRIFFLAG = _IOWR('D', 90, pfioc_iface)
#DIOCKILLSRCNODES = _IOWR('D', 91, pfioc_src_node_kill)
DIOCSETREASS = _IOWR('D', 92, c_uint32)
DIOCADDQUEUE = _IOWR('D', 93, pfioc_queue)
DIOCGETQUEUES = _IOWR('D', 94, pfioc_queue)
DIOCGETQUEUE = _IOWR('D', 95, pfioc_queue)
DIOCGETQSTATS = _IOWR('D', 96, pfioc_qstats)
DIOCSETSYNFLWATS = _IOWR('D', 97, pfioc_synflwats)
DIOCSETSYNCOOKIES = _IOWR('D', 98, c_uint8)
DIOCGETSYNFLWATS = _IOWR('D', 99, pfioc_synflwats)
class _PFTrans(object):
"""Class for managing transactions with the Packet Filter subsystem."""
def __init__(self, dev, path="", *trans_type):
"""Initialize the required structures."""
self.dev = dev
self.size = len(trans_type)
self.array = (pfioc_trans_e * self.size)()
for a, t in zip(self.array, trans_type):
a.type = t
a.anchor = path
self._pt = pfioc_trans(size=self.size, esize=sizeof(pfioc_trans_e),
array=addressof(self.array))
def __enter__(self):
"""Start the transaction."""
ioctl(self.dev, DIOCXBEGIN, self._pt)
return self
def __exit__(self, type, value, traceback):
"""Commit changes if no exceptions occurred; otherwise, rollback."""
if type is None:
ioctl(self.dev, DIOCXCOMMIT, self._pt)
else:
ioctl(self.dev, DIOCXROLLBACK, self._pt)
class PacketFilter(object):
"""Class representing the kernel's packet filtering subsystem.
It provides a set of methods that allow you to send commands to the kernel
through the ioctl(2) interface provided by the pf(4) pseudo-device.
Basically, all methods in this class are just wrappers to ioctl(2) calls,
and may consequently raise IOError if the ioctl() request fails.
"""
def __init__(self, dev="/dev/pf"):
"""Set the pf device."""
self.dev = dev
def enable(self):
"""Enable Packet Filtering."""
with open(self.dev, 'w') as d:
try:
ioctl(d, DIOCSTART)
except IOError, (e, s):
if e != EEXIST: # EEXIST means PF is already enabled
raise
def disable(self):
"""Disable Packet Filtering."""
with open(self.dev, 'w') as d:
try:
ioctl(d, DIOCSTOP)
except IOError, (e, s):
if e != ENOENT: # ENOENT means PF is already disabled
raise
def set_debug(self, level):
"""Set the debug level.
The debug level can be either one of the LOG_* constants or a string.
"""
if level in dbg_levels:
level = dbg_levels[level]
with open(self.dev, 'w') as d:
with _PFTrans(d):
ioctl(d, DIOCSETDEBUG, c_uint32(level))
def set_hostid(self, id):
"""Set the host ID.
The host ID is used by pfsync to identify the host that created a state
table entry. 'id' must be a 32-bit unsigned integer.
"""
with open(self.dev, 'w') as d:
with _PFTrans(d):
ioctl(d, DIOCSETHOSTID, c_uint32(htonl(id)))
def set_reassembly(self, reassembly):
"""Enable reassembly of network traffic.
The 'reassembly' argument specifies the flags for the reassembly
operation; available flags are PF_REASS_ENABLED and PF_REASS_NODF.
"""
with open(self.dev, 'w') as d:
with _PFTrans(d):
ioctl(d, DIOCSETREASS, c_uint32(reassembly))
def get_limit(self, limit=None):
"""Return the hard limits on the memory pools used by Packet Filter.
'limit' can be either one of the PF_LIMIT_* constants or a string;
return the value of the requested limit (UINT_MAX means unlimited) or,
if called with no arguments, a dictionary containing all the available
limits.
"""
if limit is None:
return dict([(l, self.get_limit(l)) for l in pf_limits])
elif limit in pf_limits:
limit = pf_limits[limit]
pl = pfioc_limit(index=limit)
with open(self.dev, 'r') as d:
ioctl(d, DIOCGETLIMIT, pl)
return pl.limit
def set_limit(self, limit, value):
"""Set hard limits on the memory pools used by Packet Filter.
'limit' can be either one of the PF_LIMIT_* constants or a string; a
'value' of UINT_MAX means unlimited. Raise PFError if the current pool
size exceeds the requested hard limit.
"""
if limit in pf_limits:
limit = pf_limits[limit]
pl = pfioc_limit(index=limit, limit=value)
with open(self.dev, 'w') as d:
with _PFTrans(d):
try:
ioctl(d, DIOCSETLIMIT, pl)
except IOError, (e, s):
if e == EBUSY:
raise PFError("Current pool size > {0:d}".format(value))
raise
def get_timeout(self, timeout=None):
"""Return the configured timeout values for PF states.
'timeout' can be either one of the PFTM_* constants or a string; return
the value of the requested timeout or, if called with no arguments, a
dictionary containing all the available timeouts.
"""
if timeout is None:
return dict([(t, self.get_timeout(t)) for t in pf_timeouts])
elif timeout in pf_timeouts:
timeout = pf_timeouts[timeout]
tm = pfioc_tm(timeout=timeout)
with open(self.dev, 'r') as d:
ioctl(d, DIOCGETTIMEOUT, tm)
return tm.seconds
def set_timeout(self, timeout, value):
"""Set the timeout 'value' for a specific PF state.
'timeout' can be either one of the PFTM_* constants or a string; return
the old value of the specified timeout.
"""
if timeout in pf_timeouts:
timeout = pf_timeouts[timeout]
tm = pfioc_tm(timeout=timeout, seconds=value)
with open(self.dev, 'w') as d:
with _PFTrans(d):
ioctl(d, DIOCSETTIMEOUT, tm)
return tm.seconds
def set_optimization(self, opt="normal"):
"""Set the optimization profile for state handling like pfctl."""
for name, val in pf_hints[opt].iteritems():
self.set_timeout(name, val)
def get_optimization(self):
""" """
tm = self.get_timeout()
for name, val in pf_hints.iteritems():
if val["tcp.first"] == tm["tcp.first"]:
return name
def get_ifaces(self, ifname=""):
"""Get the list of interfaces and interface drivers known to pf.
Return a tuple of PFIface objects or a single PFIface object if a
specific 'ifname' is specified.
"""
pi = pfioc_iface(pfiio_name=ifname, pfiio_esize=sizeof(pfi_kif))
with open(self.dev, 'w') as d:
ioctl(d, DIOCIGETIFACES, pi)
buf = (pfi_kif * pi.pfiio_size)()
pi.pfiio_buffer = addressof(buf)
ioctl(d, DIOCIGETIFACES, pi)
if ifname and len(buf) == 1:
return PFIface(buf[0])
else:
return tuple(map(PFIface, buf))
def set_ifflags(self, ifname, flags):
"""Set the user setable 'flags' on the interface 'ifname'."""
pi = pfioc_iface(pfiio_name=ifname, pfiio_flags=flags)
with open(self.dev, 'w') as d:
with _PFTrans(d):
ioctl(d, DIOCSETIFFLAG, pi)
def clear_ifflags(self, ifname, flags=None):
"""Clear the specified user setable 'flags' on the interface 'ifname'.
If no flags are specified, clear all flags.
"""
if flags is None:
flags = PFI_IFLAG_SKIP
pi = pfioc_iface(pfiio_name=ifname, pfiio_flags=flags)
with open(self.dev, 'w') as d:
with _PFTrans(d):
ioctl(d, DIOCCLRIFFLAG, pi)
def set_status_if(self, ifname=""):
"""Specify the interface for which statistics are accumulated.
If no 'ifname' is provided, turn off the collection of per-interface
statistics. Raise PFError if 'ifname' is not a valid interface name.
"""
pi = pfioc_iface(pfiio_name=ifname)
with open(self.dev, 'w') as d:
with _PFTrans(d):
try:
ioctl(d, DIOCSETSTATUSIF, pi)
except IOError, (e, s):
if e == EINVAL:
raise PFError("Invalid ifname: '{0}'".format(ifname))
raise
def get_status(self):
"""Return a PFStatus object containing the internal PF statistics."""
s = pf_status()
with open(self.dev, 'w') as d:
ioctl(d, DIOCGETSTATUS, s)
return PFStatus(s)
def clear_status(self, ifname=""):
"""Clear the internal packet filter statistics.
An optional 'ifname' can be specified in order to clear statistics only
for a specific interface.
"""
pi = pfioc_iface(pfiio_name=ifname)
with open(self.dev, 'w') as d:
ioctl(d, DIOCCLRSTATUS, pi)
def get_states(self):
"""Retrieve Packet Filter's state table entries.
Return a tuple of PFState objects representing the states currently
tracked by PF.
"""
ps = pfioc_states()
l = 0
with open(self.dev, 'w') as d:
while True:
if l:
ps_states = (pfsync_state * (l / sizeof(pfsync_state)))()
ps.ps_buf = addressof(ps_states)
ps.ps_len = l
ioctl(d, DIOCGETSTATES, ps)
if ps.ps_len == 0:
return ()
if ps.ps_len <= l:
break
l = (ps.ps_len * 2)
ps_num = (ps.ps_len / sizeof(pfsync_state))
return tuple([PFState(s) for s in ps_states[:ps_num]])
def clear_states(self, ifname=""):
"""Clear all states.
If an interface name is provided, only states for that interface will
be cleared. Return the number of cleared states.
"""
psk = pfioc_state_kill(psk_ifname=ifname)
with open(self.dev, 'w') as d:
ioctl(d, DIOCCLRSTATES, psk)
return psk.psk_killed
def kill_states(self, af=AF_UNSPEC, proto=0, src=None, dst=None, ifname="",
label="", rdomain=0):
"""Clear states matching the specified arguments.
States can be specified by address family, layer-4 protocol, source and
destination addresses, interface name, label and routing domain. Return
the number of killed states.
"""
psk = pfioc_state_kill(psk_af=af, psk_proto=proto, psk_ifname=ifname,
psk_label=label, psk_rdomain=rdomain)
if src:
psk.psk_src = src._to_struct()
if dst:
psk.psk_dst = dst._to_struct()
with open(self.dev, 'w') as d:
ioctl(d, DIOCKILLSTATES, psk)
return psk.psk_killed
def clear_rules(self, path=""):
"""Clear all rules contained in the anchor 'path'."""
self.load_ruleset(PFRuleset(), path, PF_TRANS_RULESET)
def load_queues(self, *queues):
"""Load a set of queues on an interface.
'queues' must be PFQueue objects.
"""
with open(self.dev, 'w') as d:
with _PFTrans(d, "", PF_TRANS_RULESET) as t:
for queue in queues:
q = pfioc_queue(ticket=t.array[0].ticket,
queue=queue._to_struct())
ioctl(d, DIOCADDQUEUE, q)
def get_queues(self):
"""Retrieve the currently loaded queues.
Return a tuple of PFQueue objects.
"""
queues = []
pq = pfioc_queue()
with open(self.dev, 'r') as d:
ioctl(d, DIOCGETQUEUES, pq)
qstats = queue_stats()
for nr in range(pq.nr):
pqs = pfioc_qstats(nr=nr, ticket=pq.ticket,
buf=addressof(qstats.data),
nbytes=sizeof(hfsc_class_stats))
ioctl(d, DIOCGETQSTATS, pqs)
queue = PFQueue(pqs.queue)
queue.stats = PFQueueStats(qstats.data)
queues.append(queue)
return queues
def _get_rules(self, path, dev, clear):
"""Recursively retrieve rules from the specified ruleset."""
if path.endswith("/*"):
path = path[:-2]
pr = pfioc_rule(anchor=path)
if clear:
pr.action = PF_GET_CLR_CNTR
pr.rule.action = PF_PASS
ioctl(dev, DIOCGETRULES, pr)
tables = list(self.get_tables(PFTable(anchor=path)))
rules = []
for nr in range(pr.nr):
pr.nr = nr
ioctl(dev, DIOCGETRULE, pr)
if pr.anchor_call:
path = os.path.join(pr.anchor, pr.anchor_call)
rs = PFRuleset(pr.anchor_call, pr.rule)
rs.append(*self._get_rules(path, dev, clear))
rules.append(rs)
else:
rules.append(PFRule(pr.rule))
return tables + rules
def get_ruleset(self, path="", clear=False, **kw):
"""Return a PFRuleset object containing the active ruleset.
'path' is the path of the anchor to retrieve rules from. If 'clear' is
True, per-rule statistics will be cleared. Keyword arguments can be
passed for returning only matching rules.
"""
rs = PFRuleset(os.path.basename(path))
with open(self.dev, 'r') as d:
for rule in self._get_rules(path, d, clear):
if isinstance(rule, PFRule):
if not all((getattr(rule, attr) == value)
for (attr, value) in kw.iteritems()):
continue
rs.append(rule)
return rs
def _inadefine(self, table, dev, path, ticket):
"""Define a table in the inactive ruleset."""
table.anchor = path
io = pfioc_table(pfrio_table=table._to_struct(), pfrio_ticket=ticket,
pfrio_esize=sizeof(pfr_addr))
if table.addrs:
io.pfrio_flags |= PFR_FLAG_ADDRSTOO
addrs = table.addrs
buf = (pfr_addr * len(addrs))(*[a._to_struct() for a in addrs])
io.pfrio_buffer = addressof(buf)
io.pfrio_size = len(addrs)
ioctl(dev, DIOCRINADEFINE, io)
def load_ruleset(self, ruleset, path="", *tr_type):
"""Load the given ruleset.
'ruleset' must be a PFRuleset object; 'path' is the name of the anchor
where to load rules; 'tr_type' is one or more PF_TRANS_* constants: if
omitted, all ruleset types will be loaded.
"""
if not tr_type:
tr_type = (PF_TRANS_TABLE, PF_TRANS_RULESET)
with open(self.dev, 'w') as d:
with _PFTrans(d, path, *tr_type) as t:
for a in t.array:
if a.type == PF_TRANS_TABLE:
for t in ruleset.tables:
self._inadefine(t, d, path, a.ticket)
elif a.type == PF_TRANS_RULESET:
for r in ruleset.rules:
pr = pfioc_rule(ticket=a.ticket, anchor=path,
rule=r._to_struct())
if isinstance(r, PFRuleset):
pr.anchor_call = r.name
ioctl(d, DIOCADDRULE, pr)
if isinstance(r, PFRuleset):
self.load_ruleset(r, os.path.join(path, r.name),
*tr_type)
def add_tables(self, *tables):
"""Create one or more tables.
'tables' must be PFTable objects; return the number of tables created.
"""
io = pfioc_table(pfrio_esize=sizeof(pfr_table), pfrio_size=len(tables))
buffer = (pfr_table * len(tables))(*[t._to_struct() for t in tables])
io.pfrio_buffer = addressof(buffer)
with open(self.dev, 'w') as d:
ioctl(d, DIOCRADDTABLES, io)
for t in filter(lambda t: t.addrs, tables):
self.add_addrs(t, *t.addrs)
return io.pfrio_nadd
def clear_tables(self, filter=None):
"""Clear all tables.
'filter' is a PFTable object that allows you to specify the anchor of
the tables to delete. Return the number of tables deleted.
"""
io = pfioc_table()
if filter is not None:
io.pfrio_table = pfr_table(pfrt_anchor=filter.anchor)
with open(self.dev, 'w') as d:
ioctl(d, DIOCRCLRTABLES, io)
return io.pfrio_ndel
def del_tables(self, *tables):
"""Delete one or more tables.
'tables' must be PFTable objects. Return the number of tables deleted.
"""
io = pfioc_table(pfrio_esize=sizeof(pfr_table), pfrio_size=len(tables))
buffer = (pfr_table * len(tables))()
for (t, b) in zip(tables, buffer):
b.pfrt_name = t.name
b.pfrt_anchor = t.anchor
io.pfrio_buffer = addressof(buffer)
with open(self.dev, 'w') as d:
ioctl(d, DIOCRDELTABLES, io)
return io.pfrio_ndel
def get_tables(self, filter=None, buf_size=10):
"""Get the list of all tables.
'filter' is a PFTable object that allows you to specify the anchor of
the tables to retrieve. Return a tuple of PFTable objects containing
the currently-loaded tables.
"""
io = pfioc_table(pfrio_esize=sizeof(pfr_table))
if filter is not None:
io.pfrio_table = pfr_table(pfrt_anchor=filter.anchor)
with open(self.dev, 'w') as d:
while True:
buffer = (pfr_table * buf_size)()
io.pfrio_buffer = addressof(buffer)
io.pfrio_size = buf_size
ioctl(d, DIOCRGETTABLES, io)
if io.pfrio_size <= buf_size:
break
buf_size = io.pfrio_size
tables = []
for t in buffer[:io.pfrio_size]:
try:
addrs = self.get_addrs(PFTable(t))
except IOError, (e, s):
pass # Ignore tables of which you can't get the addresses
else:
tables.append(PFTable(t, *addrs))
return tuple(tables)
def test_addrs(self, table, *addrs):
"""Test if one or more addresses match a table.
'table' can be either a PFTable instance or a string containing the
table name; 'addrs' can be either PFTableAddr instances or strings.
Return the addresses that match.
"""
if isinstance(table, basestring):
table = pfr_table(pfrt_name=table)
else:
table = pfr_table(pfrt_name=table.name, pfrt_anchor=table.anchor)
_addrs = []
for addr in addrs:
if isinstance(addr, PFTableAddr):
_addrs.append(addr)
else:
_addrs.append(PFTableAddr(addr))
io = pfioc_table(pfrio_table=table, pfrio_esize=sizeof(pfr_addr),
pfrio_size=len(addrs))
buffer = (pfr_addr * len(addrs))(*[a._to_struct() for a in _addrs])
io.pfrio_buffer = addressof(buffer)
with open(self.dev, 'w') as d:
ioctl(d, DIOCRTSTADDRS, io)
return tuple([PFTableAddr(a) for a in buffer[:io.pfrio_size] if a.pfra_fback])
def add_addrs(self, table, *addrs):
"""Add one or more addresses to a table.
'table' can be either a PFTable instance or a string containing the
table name; 'addrs' can be either PFTableAddr instances or strings.
Return the number of addresses effectively added.
"""
if isinstance(table, basestring):
table = pfr_table(pfrt_name=table)
else:
table = pfr_table(pfrt_name=table.name, pfrt_anchor=table.anchor)
_addrs = []
for addr in addrs:
if isinstance(addr, PFTableAddr):
_addrs.append(addr)
else:
_addrs.append(PFTableAddr(addr))
io = pfioc_table(pfrio_table=table, pfrio_esize=sizeof(pfr_addr),
pfrio_size=len(addrs))
buffer = (pfr_addr * len(addrs))(*[a._to_struct() for a in _addrs])
io.pfrio_buffer = addressof(buffer)
with open(self.dev, 'w') as d:
ioctl(d, DIOCRADDADDRS, io)
return io.pfrio_nadd
def clear_addrs(self, table):
"""Clear all addresses in the specified table.
Return the number of addresses removed.
"""
if isinstance(table, basestring):
table = pfr_table(pfrt_name=table)
else:
table = pfr_table(pfrt_name=table.name, pfrt_anchor=table.anchor)
io = pfioc_table(pfrio_table=table)
with open(self.dev, 'w') as d:
ioctl(d, DIOCRCLRADDRS, io)
return io.pfrio_ndel
def del_addrs(self, table, *addrs):
"""Delete one or more addresses from the specified table.
'table' can be either a PFTable instance or a string containing the
table name; 'addrs' can be either PFTableAddr instances or strings.
Return the number of addresses deleted.
"""
if isinstance(table, basestring):
table = pfr_table(pfrt_name=table)
else:
table = pfr_table(pfrt_name=table.name, pfrt_anchor=table.anchor)
_addrs = []
for addr in addrs:
if isinstance(addr, PFTableAddr):
_addrs.append(addr)
else:
_addrs.append(PFTableAddr(addr))
io = pfioc_table(pfrio_table=table, pfrio_esize=sizeof(pfr_addr),
pfrio_size=len(addrs))
buffer = (pfr_addr * len(addrs))(*[a._to_struct() for a in _addrs])
io.pfrio_buffer = addressof(buffer)
with open(self.dev, 'w') as d:
ioctl(d, DIOCRDELADDRS, io)
return io.pfrio_ndel
def set_addrs(self, table, *addrs):
"""Replace the content of a table.
'table' can be either a PFTable instance or a string containing the
table name; 'addrs' can be either PFTableAddr instances or strings.
Return a tuple containing the number of addresses deleted, added and
changed.
"""
if isinstance(table, basestring):
table = pfr_table(pfrt_name=table)
else:
table = pfr_table(pfrt_name=table.name, pfrt_anchor=table.anchor)
_addrs = []
for addr in addrs:
if isinstance(addr, PFTableAddr):
_addrs.append(addr)
else:
_addrs.append(PFTableAddr(addr))
io = pfioc_table(pfrio_table=table, pfrio_esize=sizeof(pfr_addr),
pfrio_size=len(addrs))
buffer = (pfr_addr * len(addrs))(*[a._to_struct() for a in _addrs])
io.pfrio_buffer = addressof(buffer)
with open(self.dev, 'w') as d:
ioctl(d, DIOCRSETADDRS, io)
return (io.pfrio_ndel, io.pfrio_nadd, io.pfrio_nchange)
def get_addrs(self, table, buf_size=10):
"""Get the addresses in the specified table.
'table' can be either a PFTable instance or a string containing the
table name. Return a list of PFTableAddr objects.
"""
if isinstance(table, basestring):
table = pfr_table(pfrt_name=table)
else:
table = pfr_table(pfrt_name=table.name, pfrt_anchor=table.anchor)
io = pfioc_table(pfrio_table=table, pfrio_esize=sizeof(pfr_addr))
with open(self.dev, 'w') as d:
while True:
buffer = (pfr_addr * buf_size)()
io.pfrio_buffer = addressof(buffer)
io.pfrio_size = buf_size
ioctl(d, DIOCRGETADDRS, io)
if io.pfrio_size <= buf_size:
break
buf_size = io.pfrio_size
return tuple([PFTableAddr(a) for a in buffer[:io.pfrio_size]])
def get_tstats(self, filter=None, buf_size=10):
"""Get statistics information for one or more tables.
'filter' is a PFTable object that allows you to specify the anchor of
the tables to retrieve statistics for. Return a tuple of PFTStats
objects.
"""
io = pfioc_table(pfrio_esize=sizeof(pfr_tstats))
if filter is not None:
io.pfrio_table = pfr_table(pfrt_anchor=filter.anchor)
with open(self.dev, 'w') as d:
while True:
buffer = (pfr_tstats * buf_size)()
io.pfrio_buffer = addressof(buffer)
io.pfrio_size = buf_size
ioctl(d, DIOCRGETTSTATS, io)
if io.pfrio_size <= buf_size:
break
buf_size = io.pfrio_size
stats = []
for t in buffer[:io.pfrio_size]:
if t.pfrts_tzero:
stats.append(PFTStats(t))
return tuple(stats)
def clear_tstats(self, *tables):
"""Clear the statistics of one or more tables.
'tables' must be PFTable objects. Return the number of tables cleared.
"""
io = pfioc_table(pfrio_esize=sizeof(pfr_table), pfrio_size=len(tables))
buffer = (pfr_table * len(tables))()
for (t, b) in zip(tables, buffer):
b.pfrt_name = t.name
b.pfrt_anchor = t.anchor
io.pfrio_buffer = addressof(buffer)
with open(self.dev, 'w') as d:
ioctl(d, DIOCRCLRTSTATS, io)
return io.pfrio_nadd
def get_synflood_watermarks(self):
"""Return the start and end values for adaptive syncookies watermarks"""
ps = pfioc_synflwats()
with open(self.dev, 'w') as d:
ioctl(d, DIOCGETSYNFLWATS, ps)
return (ps.hiwat, ps.lowat)
def set_synflood_watermarks(self, start=2500, end=1500):
"""Set the start and end values for adaptive syncookies watermarks"""
ps = pfioc_synflwats(hiwat=start, lowat=end)
with open(self.dev, 'w') as d:
ioctl(d, DIOCSETSYNFLWATS, ps)
def set_syncookies(self, mode):
"""Set the syncookies mode (never, always or adaptive)"""
if mode in pf_syncookies_modes:
mode = pf_syncookies_modes[mode]
with open(self.dev, 'w') as d:
ioctl(d, DIOCSETSYNCOOKIES, c_uint8(mode))
| {
"repo_name": "dotpy/py-pf",
"path": "pf/filter.py",
"copies": "1",
"size": "30374",
"license": "bsd-3-clause",
"hash": 3747517932098310700,
"line_mean": 33.9528193326,
"line_max": 86,
"alpha_frac": 0.5631790347,
"autogenerated": false,
"ratio": 3.4363615793641813,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4499540614064181,
"avg_score": null,
"num_lines": null
} |
"""A class for managing OpRegularizers."""
from __future__ import absolute_import
from __future__ import division
# [internal] enable type annotations
from __future__ import print_function
import collections
from morph_net.framework import concat_and_slice_regularizers
from morph_net.framework import constant_op_regularizer
from morph_net.framework import grouping_regularizers
from morph_net.framework import op_handler_util
import tensorflow.compat.v1 as tf
from typing import List
# Hardcoded limit for OpRegularizerManager to finish analyzing the network.
ITERATION_LIMIT = 1000000
# OpSlice represents a slice of a tf.Operation.
# op: A tf.Operation.
# slice: A Slice tuple containing the index and size of the slice. If None, or
# part of the tuple is None, then the OpSlice represents the entire op.
class OpSlice(collections.namedtuple('OpSlice', ['op', 'slice'])):
def __str__(self):
return '{} {}'.format(self.op.name, self.slice)
__repr__ = __str__
# Slice represents the index and size of a slice.
# start_index: Integer specifying start index of the slice.
# size: Integer specifying number of elements in the slice.
class Slice(collections.namedtuple('Slice', ['start_index', 'size'])):
def __str__(self):
return '({}, {})'.format(self.start_index, self.size)
__repr__ = __str__
class OpRegularizerManager(object):
"""A class for managing OpRegularizers."""
def __init__(
self,
output_boundary: List[tf.Operation],
op_handler_dict=None,
create_grouping_regularizer=grouping_regularizers.MaxGroupingRegularizer,
force_group=None,
regularizer_blacklist=None,
input_boundary: List[tf.Operation] = None,
iteration_limit=ITERATION_LIMIT):
"""Creates an instance of OpRegularizerManager.
Several internal data structures are initialized which are used to track ops
and their grouping. A DFS is performed starting from `output_boundary` and
following data dependencies that do not involve ops in `input_boundary`. The
source ops found in this DFS are placed into a queue for processing. The
OpRegularizerManager then loops over ops in the queue, using the associated
OpHandler to determine the grouping of the op. Once all ops have been
grouped, regularizers for the groups can be created.
If a group has multiple sources of regularization, the
create_grouping_regularizer function is used to create an OpRegularizer that
combines the multiple sources.
If force_group is specified, ops that would not normally be grouped are
force-grouped. Ops matching the regex will be grouped together, along with
all ops that were grouped with the matching ops. Basically, the groups
would be merged. Each regex specifies a separate force-grouping.
If regularizer_blacklist is specified, then ops matching any of the regex
(and ops in the same group) do not get regularized. The
OpRegularizerManager will instead create a None regularizer for the group.
Args:
output_boundary: A list of ops to start regularization from.
op_handler_dict: Dictionary mapping tf.Operation type to OpHandler.
create_grouping_regularizer: Function that creates an OpRegularizer given
a list of OpRegularizer.
force_group: List of regex for ops that should be force-grouped. Each
regex corresponds to a separate group. Use '|' operator to specify
multiple patterns in a single regex.
regularizer_blacklist: List of regex for ops that should not be
regularized.
input_boundary: A list of ops that should be excluded from regularization.
iteration_limit: Integer iteration limit for OpRegularizerManager to
finish analyzing the network. If the limit is reached, it is assumed
that OpRegularizerManager got stuck in a loop.
Raises:
RuntimeError: If OpRegularizerManager cannot analyze the entire network
within ITERATION_LIMIT.
TypeError: If force_group argument is not a list.
TypeError: If regularizer_blacklist argument is not a list.
"""
# Dictionary mapping op to list of OpSlice. The op is the concatenation of
# its OpSlice list.
self._op_slice_dict = {}
# Dictionary mapping OpSlice to OpGroup.
self._op_group_dict = {}
# Dictionary mapping op type to OpHandler class.
self._op_handler_dict = op_handler_dict or {}
# Dictionary mapping OpSlice to OpRegularizer.
self._op_regularizer_dict = {}
# Queue of ops to process.
self._op_deque = collections.deque()
# Set of all ops to regularize.
self._all_ops = set()
# Start DFS from outputs to find all source ops.
tf.logging.info('OpRegularizerManager starting analysis from: %s.',
output_boundary)
self._dfs_for_source_ops(output_boundary, input_boundary)
tf.logging.info('OpRegularizerManager found %d ops and %d sources.',
len(self._all_ops), len(self._op_deque))
# Process grouping for all ops.
iteration_count = 0
while self._op_deque and iteration_count < iteration_limit:
op = self._op_deque.pop()
self._op_handler_dict[op.type].assign_grouping(op, self)
iteration_count += 1
if iteration_count >= iteration_limit:
# OpRegularizerManager got stuck in a loop. Report the ops still in the
# processing queue.
raise RuntimeError('OpRegularizerManager could not handle ops: %s' %
['%s (%s)' % (o.name, o.type) for o in self._op_deque])
# Force-group ops.
force_group = force_group or []
if not isinstance(force_group, list):
raise TypeError('force_group must be a list of regex.')
self._force_group_ops(force_group)
# Create blacklist regex.
blacklist_regex = ''
if regularizer_blacklist:
if not isinstance(regularizer_blacklist, list):
raise TypeError('regularizer_blacklist must be a list of regex.')
blacklist_regex = '|'.join(regularizer_blacklist)
# Instantiate regularizers for all groups that have sources.
groups = set(self._op_group_dict.values())
blacklist_used = False
for group in groups:
# Collect regularizer for every source OpSlice in the OpGroup.
source_op_slices = []
regularizers = []
# If group is blacklisted, then no regularizers are created and all
# OpSlice will be assigned a None regularizer.
if op_handler_util.group_match(blacklist_regex, group.op_slices):
tf.logging.info('OpGroup not regularized due to blacklist: %s.',
group.op_slices)
blacklist_used = True
else:
for source_op_slice in group.source_op_slices:
handler = self._op_handler_dict[source_op_slice.op.type]
source_op_slices.append(source_op_slice)
regularizers.append(handler.create_regularizer(source_op_slice))
# Create a group regularizer and assign to all OpSlice in the OpGroup. If
# there are no regularizers, assign None.
if regularizers:
if len(regularizers) > 1:
group_regularizer = create_grouping_regularizer(regularizers)
else:
group_regularizer = regularizers[0]
else:
group_regularizer = None
for op_slice in group.op_slices:
self._op_regularizer_dict[op_slice] = group_regularizer
tf.logging.info('Source OpSlice %s for OpGroup: %s.', source_op_slices,
group.op_slices)
if blacklist_regex and not blacklist_used:
raise ValueError('Blacklist regex never used: \'%s\'.' % blacklist_regex)
tf.logging.info('OpRegularizerManager regularizing %d groups.',
len(set(self._op_group_dict.values())))
# Set scope of all ops to be ops that were analyzed.
self._all_ops = set(self._op_slice_dict.keys())
@property
def ops(self):
"""Returns all ops discovered by OpRegularizerManager."""
return self._all_ops
def get_regularizer(self, op):
"""Returns an OpRegularizer for the specified op.
If no OpRegularizer exists for any slices in the op, returns None.
Otherwise, create a ConstantOpRegularizer for any slices that are missing a
regularizer.
Args:
op: A tf.Operation.
Returns:
An OpRegularizer for op, or None if no OpRegularizer exists.
"""
op_slices = self.get_op_slices(op)
regularizers = [
self._op_regularizer_dict.get(op_slice) for op_slice in op_slices
]
# If all OpSlice have None regularizer, return None.
if not any(regularizers):
return None
regularizers = []
for op_slice in op_slices:
regularizer = self._op_regularizer_dict.get(op_slice)
if regularizer is None:
regularizer = constant_op_regularizer.ConstantOpRegularizer(
op_slice.slice.size)
self._op_regularizer_dict[op_slice] = regularizer
regularizers.append(regularizer)
# If op only has 1 OpSlice, return the regularizer for that OpSlice.
# Otherwise, return the concatenation of regularizers for the constituent
# OpSlice.
if len(regularizers) == 1:
return regularizers[0]
else:
return concat_and_slice_regularizers.ConcatRegularizer(regularizers)
def create_op_group_for_op_slice(self, op_slice, is_source=True):
"""Creates an OpGroup for an OpSlice.
Args:
op_slice: OpSlice to create an OpGroup for.
is_source: Boolean indicating if the OpSlice is a source.
Returns:
OpGroup for the OpSlice.
"""
# If OpSlice is not a source, then omit it from list of source OpSlice.
omit_source_op_slices = [] if is_source else [op_slice]
# Create OpGroup for the OpSlice.
op_group = OpGroup(op_slice, omit_source_op_slices=omit_source_op_slices)
# Update mapping of OpSlice to new OpGroup.
self._op_group_dict[op_slice] = op_group
return self.get_op_group(op_slice)
def group_op_slices(self, op_slices, omit_source_op_slices=None):
"""Group op slices.
Each OpSlice in op_slices gets mapped to the same group. Additionally, the
new group is also mapped to the list of OpSlice. Note that this is
transitive, meaning that if group_op_slices([A, B]) is called when B is
grouped with C, then all 3 OpSlice [A, B, C] will be grouped together.
Args:
op_slices: List of OpSlice to group.
omit_source_op_slices: List of OpSlice to not track as sources in the new
OpGroup.
"""
# Find groups that op slices are already a part of.
existing_op_groups = []
for op_slice in op_slices:
op_group = self.get_op_group(op_slice)
if op_group and op_group not in existing_op_groups:
existing_op_groups.append(op_group)
# Find OpSlice that will change group.
# pylint: disable=g-complex-comprehension
op_slices_to_update = [
os for og in existing_op_groups for os in og.op_slices
]
for op_slice in op_slices:
if op_slice not in op_slices_to_update:
# This OpSlice does not have an OpGroup, so create a temporary one.
temp_op_group = self.create_op_group_for_op_slice(
op_slice, is_source=self.is_source_op(op_slice.op))
existing_op_groups.append(temp_op_group)
op_slices_to_update.append(op_slice)
# Create new OpGroup.
new_op_group = OpGroup(
op_groups=existing_op_groups,
omit_source_op_slices=omit_source_op_slices)
# Update mapping.
for op_slice in op_slices_to_update:
self._op_group_dict[op_slice] = new_op_group
def slice_op(self, op, sizes):
"""Slice an op into specified sizes.
Creates OpSlice objects to represent slices of op. The op is mapped to its
constituent OpSlice and reformed by concatenating the OpSlice. For example,
if op has 10 channels and sizes is [3, 7], then this method returns
[OpSlice(op, (0, 3)), OpSlice(op, (3, 7))].
Note that sizes must be able to be aligned with the original op slice sizes.
An original slice can be partitioned into smaller slices, but the original
slice boundaries cannot be changed. For example, if the original sizes are
[3, 7], the op cannot be sliced into sizes [2, 8]. However, slicing into
sizes [1, 2, 3, 4] is okay because the original slices are being sliced
(3 -> [1, 2] and 7 -> [3, 4]).
Also note that ops that are grouped with op will also be sliced accordingly,
with respective slices grouped. For example, if OpA is grouped with OpB and
OpC, and OpA is sliced into OpA1 and OpA2, then the result will be groups
(OpA1, OpB1, OpC1) and (OpA2, OpB2, OpC2).
Args:
op: A tf.Operation to slice for the purpose of grouping.
sizes: List of Integer sizes to slice op into. Sizes must sum up to the
number of output channels for op.
Raises:
ValueError: If sizes cannot be aligned with original op slice sizes.
"""
old_op_slices = self.get_op_slices(op)
old_op_slice_sizes = op_handler_util.get_op_slice_sizes([old_op_slices])[0]
# If sizes already match, then nothing happens.
if old_op_slice_sizes == sizes:
return
# If sizes cannot be aligned with original sizes, raise exception.
try:
aligned_op_slice_sizes = op_handler_util.get_aligned_sizes(
[old_op_slice_sizes, sizes])
except ValueError as e:
raise ValueError('Error with op: %s: %s' % (op.name, e.args[0]))
if sizes != aligned_op_slice_sizes:
raise ValueError('Cannot slice op %s from sizes %s to %s' %
(op.name, old_op_slice_sizes, sizes))
# Iterate through slices to find old slices that need to be resliced.
old_slice_index = 0
new_slice_index = 0
new_slice_count = 1
while (new_slice_index + new_slice_count <= len(aligned_op_slice_sizes) and
old_slice_index < len(old_op_slice_sizes)):
old_size = old_op_slice_sizes[old_slice_index]
new_size = op_handler_util.get_total_slice_size(sizes, new_slice_index,
new_slice_count)
if old_size == new_size:
if new_slice_count > 1:
# If sizes match then this old slice is sliced into new_slice_count
# smaller slices. Find the group of the old slice because all OpSlice
# in the group will need to be sliced similarly.
op_group = self.get_op_group(old_op_slices[old_slice_index])
if op_group:
group_op_slices = op_group.op_slices
else:
# If OpSlice has no group, just use the OpSlice itself.
group_op_slices = [old_op_slices[old_slice_index]]
new_op_slice_group = [list() for _ in range(new_slice_count)]
for group_op_slice in group_op_slices:
self._slice_op_slice(group_op_slice, sizes, new_slice_index,
new_slice_count, new_op_slice_group)
if op_group:
# Group all new OpSlice along each index.
for i in range(new_slice_count):
self.group_op_slices(new_op_slice_group[i])
# Update indices for the next slice.
old_slice_index += 1
new_slice_index += new_slice_count
new_slice_count = 1
else:
# If sizes do not match, then more new slices are needed to match the
# old slice.
new_slice_count += 1
def process_ops(self, ops):
"""Add ops to processing queue.
Args:
ops: List of tf.Operation to put into the processing queue.
"""
new_ops = [
op for op in ops if op not in self._op_deque and op in self._all_ops
]
self._op_deque.extend(new_ops)
def process_ops_last(self, ops):
"""Add ops to the end of the processing queue.
Used to avoid infinite looping if an OpHandler decides to defer processing
of itself.
Args:
ops: List of tf.Operation to put at the end of the processing queue.
"""
new_ops = [op for op in ops if op not in self._op_deque]
self._op_deque.extendleft(new_ops)
def is_source_op(self, op):
"""Returns True if op is a source op.
Args:
op: tf.Operation to check whether it is a source op.
Returns:
Boolean indicating if op is a source op.
"""
op_handler = self._op_handler_dict[op.type]
return op_handler.is_source_op
def is_passthrough(self, op):
"""Returns True if op is passthrough.
Args:
op: tf.Operation to check whether it is passthrough.
Returns:
Boolean indicating if op is passthrough.
"""
op_handler = self._op_handler_dict[op.type]
return op_handler.is_passthrough
def get_op_slices(self, op):
"""Returns OpSlice objects that are mapped to op.
If no mapping exists, a new OpSlice object will be created and mapped to op.
Args:
op: A tf.Operation to get OpSlice for.
Returns:
List of OpSlice that constitute op.
"""
if op not in self._op_slice_dict:
# No OpSlice exists for op so create a new one.
size = op_handler_util.get_op_size(op)
if size > 0:
new_op_slice = OpSlice(op, Slice(0, size))
self._op_slice_dict[op] = [new_op_slice]
else:
self._op_slice_dict[op] = []
return self._op_slice_dict[op]
def get_op_group(self, op_slice):
"""Returns the OpGroup that contains op_slice.
Returns None if no mapping exists.
Args:
op_slice: An OpSlice to find OpGroup for.
Returns:
OpGroup that contains op_slice, or None if no mapping exists.
"""
return self._op_group_dict.get(op_slice)
def _slice_op_slice(self, op_slice, sizes, size_index, size_count,
new_op_slice_group):
"""Slices an OpSlice according to new sizes.
During reslicing, any OpSlice of an op could be resliced. Given the new
sizes, this method finds the index where the old OpSlice matches, and
reslices the OpSlice according to the new sizes. The new OpSlice are added
to new_op_slice_group by index, so that matching OpSlice can be grouped
together later.
Args:
op_slice: OpSlice that should be sliced.
sizes: List of integers specifying the new slice sizes.
size_index: Integer specifying which index in sizes corresponds to
op_slice.
size_count: Integer specifying how many slices op_slice will be sliced
into.
new_op_slice_group: List of list of new OpSlice that should be grouped
together.
"""
op = op_slice.op
op_slices = self.get_op_slices(op)
# Get slice sizes for op.
op_slice_sizes = op_handler_util.get_op_slice_sizes([op_slices])[0]
# Find the slice index that needs to be resliced.
op_slice_index = op_slices.index(op_slice)
# Clear old OpSlice to OpGroup mapping.
if op_slice in self._op_group_dict:
del self._op_group_dict[op_slice]
# Calculate the new op slice sizes for the op.
op_slice_sizes.pop(op_slice_index)
# Keep track of which OpSlice were resliced.
is_resliced = [False] * len(op_slice_sizes)
for i in range(size_count):
op_slice_sizes.insert(op_slice_index + i, sizes[size_index + i])
is_resliced.insert(op_slice_index + i, True)
# Find source slices and slice the op.
is_source = self._get_source_slices(op_slice_sizes, op_slices)
slices = self._slice_op_with_sizes(op, op_slice_sizes, is_source,
is_resliced)
# Accumulate new OpSlice at the corresonding index.
for i in range(size_count):
new_op_slice_group[i].append(slices[op_slice_index + i])
def _slice_op_with_sizes(self, op, sizes, is_source, is_resliced):
"""Slices the op according to sizes.
Args:
op: tf.Operation to slice.
sizes: List of integers of slice sizes.
is_source: List of booleans indicating which new slices are sources.
is_resliced: List of booleans indicating which slices are new.
Returns:
List of OpSlice for the newly sliced op.
"""
old_slices = self.get_op_slices(op)
slices = []
for i, size in enumerate(sizes):
if is_resliced[i]:
# Sum up previous slice sizes to find start index of next slice.
index = sum(sizes[:i])
# Create new OpSlice for new slices.
new_slice = OpSlice(op, Slice(index, size))
# Create new OpGroup for OpSlice that should be sources.
if is_source[i]:
self.create_op_group_for_op_slice(new_slice)
else:
# If OpSlice is not new, reuse existing OpSlice. Calculate the index of
# the old OpSlice by subtracting the count of new slices.
offset = max(is_resliced[:i].count(True) - 1, 0)
new_slice = old_slices[i - offset]
slices.append(new_slice)
# Update OpSlice for the op.
self._op_slice_dict[op] = slices
return slices
def _get_source_slices(self, sizes, op_slices):
"""Returns list of booleans indicating which slices are sources.
If an OpSlice is a source, then its slices are also sources. For example,
if an op consists of slices size [3, 7] where only the first slice is a
source, but is resliced into sizes [1, 2, 3, 4], then only the first 2
slices are sources. Then this method would return
[True, True, False, False].
Args:
sizes: List of integers indicating new slice sizes.
op_slices: List of OpSlice before slicing.
Returns:
List of booleans indicating which slices are sources.
"""
size_index = 0
slice_index = 0
is_source = []
while size_index < len(sizes):
# Get the OpGroup for the OpSlice to see if it is a source.
op_slice = op_slices[slice_index]
op_group = self.get_op_group(op_slice)
if op_group and op_slice in op_group.source_op_slices:
is_source.append(True)
else:
is_source.append(False)
# Check end indices of op_slices and sizes. If they match, then current
# slice is done and slice index should be incremented.
end_index = sum(sizes[:size_index + 1])
slice_end_index = op_slice.slice.start_index + op_slice.slice.size
if end_index == slice_end_index:
slice_index += 1
size_index += 1
return is_source
def _dfs_for_source_ops(self, output_boundary, input_boundary=None):
"""Performs DFS from ops and finds source ops to process.
Args:
output_boundary: An OpRegularizer will be created for all these
operations, and recursively for all ops they depend on via data
dependency that does not involve ops from input_boundary.
input_boundary: A list of ops where traversal should terminate.
"""
if input_boundary:
input_boundary = set(input_boundary)
else:
input_boundary = set()
to_visit = list(output_boundary)
visited = set()
while to_visit:
# Get next op and mark as visited.
op = to_visit.pop()
visited.add(op)
if op in input_boundary:
continue
self._all_ops.add(op)
# Check if op is a source by querying OpHandler.
if self._op_handler_dict[op.type].is_source_op:
self.process_ops([op])
# Add op inputs to to_visit.
for tensor in op.inputs:
next_op = tensor.op
if next_op not in visited:
to_visit.append(next_op)
def _force_group_ops(self, force_group):
"""Force-groups ops that match a regex.
Args:
force_group: List of regex. For each regex, all matching ops will have
their groups merged.
"""
for regex in force_group:
force_group_ops = []
for op, op_slices in self._op_slice_dict.items():
if op_handler_util.group_match(regex, op_slices):
force_group_ops.append(op)
# If no ops match, continue to the next force-group.
if not force_group_ops:
raise ValueError('Regex \'%s\' did not match any ops.')
# Assert all ops to force-group have only 1 OpSlice.
if ([len(self._op_slice_dict[op]) for op in force_group_ops] !=
[1] * len(force_group_ops)):
multiple_slice_ops = []
for op in force_group_ops:
if len(self._op_slice_dict[op]) != 1:
multiple_slice_ops.append(op.name)
raise ValueError('Cannot force-group ops with more than 1 OpSlice: %s' %
multiple_slice_ops)
# Assert all ops to force-group have the same size.
target_op_size = self._op_slice_dict[force_group_ops[0]][0].slice.size
if ([self._op_slice_dict[op][0].slice.size for op in force_group_ops] !=
[target_op_size] * len(force_group_ops)):
op_names = [op.name for op in force_group_ops]
raise ValueError(
'Cannot force-group ops with different sizes: %s' % op_names)
# Group the ops.
self.group_op_slices(
[self._op_slice_dict[op][0] for op in force_group_ops])
class OpGroup(object):
"""Helper class to keep track of OpSlice grouping."""
_static_index = 0
def __init__(self, op_slice=None, op_groups=None, omit_source_op_slices=None):
"""Create OpGroup with self-incrementing index.
The OpGroup keeps a list of OpSlice that belong to the group. The OpGroup
also keeps a separate list of source OpSlice. If op_slice is specified, it
is assumed to be a source. All OpGroup in op_groups will be merged together
to form a new OpGroup. OpSlice listed in omit_source_op_slices will not
be tracked as sources in the new OpGroup.
Args:
op_slice: OpSlice to include in the group and track as a source.
op_groups: List of OpGroup to merge together into a new OpGroup.
omit_source_op_slices: List of OpSlice to not track as sources in the new
OpGroup.
"""
omit_source_op_slices = omit_source_op_slices or []
# Add op_slice to the OpGroup.
self._op_slices = []
if op_slice:
self._op_slices.append(op_slice)
self._source_op_slices = []
if op_slice is not None and op_slice not in omit_source_op_slices:
self._source_op_slices.append(op_slice)
# Merge op_groups into a combined OpGroup.
if op_groups:
for op_group in op_groups:
# Collect OpSlice from each OpGroup.
for op_slice in op_group.op_slices:
if op_slice not in self._op_slices:
self._op_slices.append(op_slice)
# Collect source OpSlice from each OpGroup.
for source_op_slice in op_group.source_op_slices:
if (source_op_slice not in omit_source_op_slices and
source_op_slice not in self._source_op_slices):
self._source_op_slices.append(source_op_slice)
# Increment OpGroup index.
self._index = OpGroup._static_index
OpGroup._static_index += 1
@property
def op_slices(self):
"""Return a list of OpSlice belonging to the OpGroup."""
return self._op_slices
@property
def source_op_slices(self):
"""Return a list of OpSlice that are regularizer sources."""
return self._source_op_slices
| {
"repo_name": "google-research/morph-net",
"path": "morph_net/framework/op_regularizer_manager.py",
"copies": "1",
"size": "27056",
"license": "apache-2.0",
"hash": 6339691322377077000,
"line_mean": 36.7350069735,
"line_max": 80,
"alpha_frac": 0.6579686576,
"autogenerated": false,
"ratio": 3.7515252357182476,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49094938933182475,
"avg_score": null,
"num_lines": null
} |
"""A class for manipulating time series based on measurements at
unevenly-spaced times, see:
http://en.wikipedia.org/wiki/Unevenly_spaced_time_series
"""
import csv
import datetime
import itertools
import pprint
from queue import PriorityQueue
import sortedcontainers
from dateutil.parser import parse as date_parse
from infinity import inf
from . import histogram, operations, utils, plot
class TimeSeries(object):
"""A class to help manipulate and analyze time series that are the
result of taking measurements at irregular points in time. For
example, here would be a simple time series that starts at 8am and
goes to 9:59am:
>>> ts = TimeSeries()
>>> ts['8:00am'] = 0
>>> ts['8:47am'] = 1
>>> ts['8:51am'] = 0
>>> ts['9:15am'] = 1
>>> ts['9:59am'] = 0
The value of the time series is the last recorded measurement: for
example, at 8:05am the value is 0 and at 8:48am the value is 1. So:
>>> ts['8:05am']
0
>>> ts['8:48am']
1
There are also a bunch of things for operating on another time
series: sums, difference, logical operators and such.
"""
def __init__(self, data=None, default=None):
self._d = sortedcontainers.SortedDict(data)
self.default = default
self.getter_functions = {
"previous": self._get_previous,
"linear": self._get_linear_interpolate,
}
def __getstate__(self):
return {
"data": self.items(),
"default": self.default,
}
def __setstate__(self, state):
self.__init__(**state)
def __iter__(self):
"""Iterate over sorted (time, value) pairs."""
return iter(self._d.items())
def __bool__(self):
return bool(self._d)
def is_empty(self):
return len(self) == 0
@property
def default(self):
"""Return the default value of the time series."""
return self._default
@default.setter
def default(self, value):
"""Set the default value of the time series."""
self._default = value
def _get_linear_interpolate(self, time):
right_index = self._d.bisect_right(time)
left_index = right_index - 1
if left_index < 0:
return self.default
elif right_index == len(self._d):
# right of last measurement
return self.last_item()[1]
else:
left_time, left_value = self._d.peekitem(left_index)
right_time, right_value = self._d.peekitem(right_index)
dt_interval = right_time - left_time
dt_start = time - left_time
if isinstance(dt_interval, datetime.timedelta):
dt_interval = dt_interval.total_seconds()
dt_start = dt_start.total_seconds()
slope = float(right_value - left_value) / dt_interval
value = slope * dt_start + left_value
return value
def _get_previous(self, time):
right_index = self._d.bisect_right(time)
left_index = right_index - 1
if right_index > 0:
_, left_value = self._d.peekitem(left_index)
return left_value
elif right_index == 0:
return self.default
else:
msg = (
"self._d.bisect_right({}) returned a negative value. "
"""This "can't" happen: please file an issue at """
"https://github.com/datascopeanalytics/traces/issues"
).format(time)
raise ValueError(msg)
def get(self, time, interpolate="previous"):
"""Get the value of the time series, even in-between measured values.
"""
try:
getter = self.getter_functions[interpolate]
except KeyError:
msg = (
"unknown value '{}' for interpolate, "
"valid values are in [{}]"
).format(interpolate, ", ".join(self.getter_functions))
raise ValueError(msg)
else:
return getter(time)
def get_item_by_index(self, index):
"""Get the (t, value) pair of the time series by index."""
return self._d.peekitem(index)
def last_item(self):
"""Returns the last (time, value) pair of the time series."""
return self.get_item_by_index(-1)
def last_key(self):
"""Returns the last time recorded in the time series"""
return self.last_item()[0]
def last_value(self):
"""Returns the last recorded value in the time series"""
return self.last_item()[1]
def first_item(self):
"""Returns the first (time, value) pair of the time series."""
return self.get_item_by_index(0)
def first_key(self):
"""Returns the first time recorded in the time series"""
return self.first_item()[0]
def first_value(self):
"""Returns the first recorded value in the time series"""
return self.first_item()[1]
def set(self, time, value, compact=False):
"""Set the value for the time series. If compact is True, only set the
value if it's different from what it would be anyway.
"""
if (
(len(self) == 0)
or (not compact)
or (compact and self.get(time) != value)
):
self._d[time] = value
def set_interval(self, start, end, value, compact=False):
"""Set the value for the time series on an interval. If compact is
True, only set the value if it's different from what it would
be anyway.
"""
# for each interval to render
for i, (s, e, v) in enumerate(self.iterperiods(start, end)):
# look at all intervals included in the current interval
# (always at least 1)
if i == 0:
# if the first, set initial value to new value of range
self.set(s, value, compact)
else:
# otherwise, remove intermediate key
del self[s]
# finish by setting the end of the interval to the previous value
self.set(end, v, compact)
def compact(self):
"""Convert this instance to a compact version: the value will be the
same at all times, but repeated measurements are discarded.
"""
previous_value = object()
redundant = []
for time, value in self:
if value == previous_value:
redundant.append(time)
previous_value = value
for time in redundant:
del self[time]
def items(self):
"""ts.items() -> list of the (key, value) pairs in ts, as 2-tuples"""
return self._d.items()
def exists(self):
"""returns False when the timeseries has a None value, True
otherwise
"""
result = TimeSeries(default=False if self.default is None else True)
for t, v in self:
result[t] = False if v is None else True
return result
def remove(self, time):
"""Allow removal of measurements from the time series. This throws an
error if the given time is not actually a measurement point.
"""
try:
del self._d[time]
except KeyError:
raise KeyError("no measurement at {}".format(time))
def remove_points_from_interval(self, start, end):
"""Allow removal of all points from the time series within a interval
[start:end].
"""
for s, e, v in self.iterperiods(start, end):
try:
del self._d[s]
except KeyError:
pass
def n_measurements(self):
"""Return the number of measurements in the time series."""
return len(self._d)
def __len__(self):
"""Number of points in the TimeSeries."""
return self.n_measurements()
def __repr__(self):
return "<TimeSeries>\n%s\n</TimeSeries>" % pprint.pformat(self._d)
def iterintervals(self, n=2):
"""Iterate over groups of `n` consecutive measurement points in the
time series.
"""
# tee the original iterator into n identical iterators
streams = itertools.tee(iter(self), n)
# advance the "cursor" on each iterator by an increasing
# offset, e.g. if n=3:
#
# [a, b, c, d, e, f, ..., w, x, y, z]
# first cursor --> *
# second cursor --> *
# third cursor --> *
for stream_index, stream in enumerate(streams):
for _ in range(stream_index):
next(stream)
# now, zip the offset streams back together to yield tuples,
# in the n=3 example it would yield:
# (a, b, c), (b, c, d), ..., (w, x, y), (x, y, z)
for intervals in zip(*streams):
yield intervals
@staticmethod
def _value_function(value):
# if value is None, don't filter
if value is None:
def value_function(t0_, t1_, value_):
return True
# if value is a function, use the function to filter
elif callable(value):
value_function = value
# if value is a constant other than None, then filter to
# return only the intervals where the value equals the
# constant
else:
def value_function(t0_, t1_, value_):
return value_ == value
return value_function
def iterperiods(self, start=None, end=None, value=None):
"""This iterates over the periods (optionally, within a given time
span) and yields (interval start, interval end, value) tuples.
TODO: add mask argument here.
"""
start, end, mask = self._check_boundaries(
start, end, allow_infinite=False
)
value_function = self._value_function(value)
# get start index and value
start_index = self._d.bisect_right(start)
if start_index:
_, start_value = self._d.peekitem(start_index - 1)
else:
start_value = self.default
# get last index before end of time span
end_index = self._d.bisect_right(end)
interval_t0, interval_value = start, start_value
for interval_t1 in self._d.islice(start_index, end_index):
if value_function(interval_t0, interval_t1, interval_value):
yield interval_t0, interval_t1, interval_value
# set start point to the end of this interval for next
# iteration
interval_t0 = interval_t1
interval_value = self[interval_t0]
# yield the time, duration, and value of the final period
if interval_t0 < end:
if value_function(interval_t0, end, interval_value):
yield interval_t0, end, interval_value
def slice(self, start, end):
"""Return an equivalent TimeSeries that only has points between
`start` and `end` (always starting at `start`)
"""
start, end, mask = self._check_boundaries(
start, end, allow_infinite=True
)
result = TimeSeries(default=self.default)
for t0, t1, value in self.iterperiods(start, end):
result[t0] = value
result[t1] = self[t1]
return result
def _check_regularization(self, start, end, sampling_period=None):
# only do these checks if sampling period is given
if sampling_period is not None:
# cast to both seconds and timedelta for error checking
if isinstance(sampling_period, datetime.timedelta):
sampling_period_seconds = sampling_period.total_seconds()
sampling_period_timedelta = sampling_period
else:
sampling_period_seconds = sampling_period
sampling_period_timedelta = datetime.timedelta(
seconds=sampling_period
)
if sampling_period_seconds <= 0:
msg = "sampling_period must be > 0"
raise ValueError(msg)
if sampling_period_seconds > utils.duration_to_number(end - start):
msg = (
"sampling_period "
"is greater than the duration between "
"start and end."
)
raise ValueError(msg)
if isinstance(start, datetime.date):
sampling_period = sampling_period_timedelta
else:
sampling_period = sampling_period_seconds
return sampling_period
def sample(
self, sampling_period, start=None, end=None, interpolate="previous"
):
"""Sampling at regular time periods.
"""
start, end, mask = self._check_boundaries(start, end)
sampling_period = self._check_regularization(
start, end, sampling_period
)
result = []
current_time = start
while current_time <= end:
value = self.get(current_time, interpolate=interpolate)
result.append((current_time, value))
current_time += sampling_period
return result
def moving_average(
self,
sampling_period,
window_size=None,
start=None,
end=None,
placement="center",
pandas=False,
):
"""Averaging over regular intervals
"""
start, end, mask = self._check_boundaries(start, end)
# default to sampling_period if not given
if window_size is None:
window_size = sampling_period
sampling_period = self._check_regularization(
start, end, sampling_period
)
# convert to datetime if the times are datetimes
full_window = window_size * 1.
half_window = full_window / 2
if isinstance(start, datetime.date) and not isinstance(
full_window, datetime.timedelta
):
half_window = datetime.timedelta(seconds=half_window)
full_window = datetime.timedelta(seconds=full_window)
result = []
current_time = start
while current_time <= end:
if placement == "center":
window_start = current_time - half_window
window_end = current_time + half_window
elif placement == "left":
window_start = current_time
window_end = current_time + full_window
elif placement == "right":
window_start = current_time - full_window
window_end = current_time
else:
msg = 'unknown placement "{}"'.format(placement)
raise ValueError(msg)
# calculate mean over window and add (t, v) tuple to list
try:
mean = self.mean(window_start, window_end)
except TypeError as e:
if "NoneType" in str(e):
mean = None
else:
raise e
result.append((current_time, mean))
current_time += sampling_period
# convert to pandas Series if pandas=True
if pandas:
try:
import pandas as pd
except ImportError:
msg = "can't have pandas=True if pandas is not installed"
raise ImportError(msg)
result = pd.Series(
[v for t, v in result], index=[t for t, v in result],
)
return result
@staticmethod
def rebin(binned, key_function):
result = sortedcontainers.SortedDict()
for bin_start, value in binned.items():
new_bin_start = key_function(bin_start)
try:
result[new_bin_start] += value
except KeyError:
result[new_bin_start] = value
return result
def bin(
self,
unit,
n_units=1,
start=None,
end=None,
mask=None,
smaller=None,
transform="distribution",
):
# return an empty sorted dictionary if there is no time span
if mask is not None and mask.is_empty():
return sortedcontainers.SortedDict()
elif start is not None and start == end:
return sortedcontainers.SortedDict()
# use smaller if available
if smaller:
return self.rebin(
smaller, lambda x: utils.datetime_floor(x, unit, n_units),
)
start, end, mask = self._check_boundaries(start, end, mask=mask)
start = utils.datetime_floor(start, unit=unit, n_units=n_units)
function = getattr(self, transform)
result = sortedcontainers.SortedDict()
dt_range = utils.datetime_range(start, end, unit, n_units=n_units)
for bin_start, bin_end in utils.pairwise(dt_range):
result[bin_start] = function(
bin_start, bin_end, mask=mask, normalized=False
)
return result
def mean(self, start=None, end=None, mask=None, interpolate="previous"):
"""This calculated the average value of the time series over the given
time range from `start` to `end`, when `mask` is truthy.
"""
return self.distribution(
start=start, end=end, mask=mask, interpolate=interpolate
).mean()
def distribution(
self,
start=None,
end=None,
normalized=True,
mask=None,
interpolate="previous",
):
"""Calculate the distribution of values over the given time range from
`start` to `end`.
Args:
start (orderable, optional): The lower time bound of
when to calculate the distribution. By default, the
first time point will be used.
end (orderable, optional): The upper time bound of
when to calculate the distribution. By default, the
last time point will be used.
normalized (bool): If True, distribution will sum to
one. If False and the time values of the TimeSeries
are datetimes, the units will be seconds.
mask (:obj:`TimeSeries`, optional): A domain on which to
calculate the distribution.
interpolate (str, optional): Method for interpolating
between measurement points: either "previous"
(default) or "linear". Note: if "previous" is used,
then the resulting histogram is exact. If "linear" is
given, then the values used for the histogram are the
average value for each segment -- the mean of this
histogram will be exact, but higher moments (variance)
will be approximate.
Returns:
:obj:`Histogram` with the results.
"""
start, end, mask = self._check_boundaries(start, end, mask=mask)
counter = histogram.Histogram()
for i_start, i_end, _ in mask.iterperiods(value=True):
for t0, t1, _ in self.iterperiods(i_start, i_end):
duration = utils.duration_to_number(t1 - t0, units="seconds",)
midpoint = utils.time_midpoint(t0, t1)
value = self.get(midpoint, interpolate=interpolate)
try:
counter[value] += duration
except histogram.UnorderableElements:
counter = histogram.Histogram.from_dict(
dict(counter), key=hash
)
counter[value] += duration
# divide by total duration if result needs to be normalized
if normalized:
return counter.normalized()
else:
return counter
def n_points(
self,
start=-inf,
end=+inf,
mask=None,
include_start=True,
include_end=False,
normalized=False,
):
"""Calculate the number of points over the given time range from
`start` to `end`.
Args:
start (orderable, optional): The lower time bound of when
to calculate the distribution. By default, start is
-infinity.
end (orderable, optional): The upper time bound of when to
calculate the distribution. By default, the end is
+infinity.
mask (:obj:`TimeSeries`, optional): A
domain on which to calculate the distribution.
Returns:
`int` with the result
"""
# just go ahead and return 0 if we already know it regardless
# of boundaries
if not self.n_measurements():
return 0
start, end, mask = self._check_boundaries(start, end, mask=mask)
count = 0
for i_start, i_end, _ in mask.iterperiods(value=True):
if include_end:
end_count = self._d.bisect_right(i_end)
else:
end_count = self._d.bisect_left(i_end)
if include_start:
start_count = self._d.bisect_left(i_start)
else:
start_count = self._d.bisect_right(i_start)
count += end_count - start_count
if normalized:
count /= float(self.n_measurements())
return count
def _check_time_series(self, other):
"""Function used to check the type of the argument and raise an
informative error message if it's not a TimeSeries.
"""
if not isinstance(other, TimeSeries):
msg = "unsupported operand types(s) for +: %s and %s" % (
type(self),
type(other),
)
raise TypeError(msg)
@staticmethod
def _iter_merge(timeseries_list):
"""This function uses a priority queue to efficiently yield the (time,
value_list) tuples that occur from merging together many time
series.
"""
# cast to list since this is getting iterated over several
# times (causes problem if timeseries_list is a generator)
timeseries_list = list(timeseries_list)
# Create iterators for each timeseries and then add the first
# item from each iterator onto a priority queue. The first
# item to be popped will be the one with the lowest time
queue = PriorityQueue()
for index, timeseries in enumerate(timeseries_list):
iterator = iter(timeseries)
try:
t, value = next(iterator)
except StopIteration:
pass
else:
queue.put((t, index, value, iterator))
# `state` keeps track of the value of the merged
# TimeSeries. It starts with the default. It starts as a list
# of the default value for each individual TimeSeries.
state = [ts.default for ts in timeseries_list]
while not queue.empty():
# get the next time with a measurement from queue
t, index, next_value, iterator = queue.get()
# make a copy of previous state, and modify only the value
# at the index of the TimeSeries that this item came from
state = list(state)
state[index] = next_value
yield t, state
# add the next measurement from the time series to the
# queue (if there is one)
try:
t, value = next(iterator)
except StopIteration:
pass
else:
queue.put((t, index, value, iterator))
@classmethod
def iter_merge(cls, timeseries_list):
"""Iterate through several time series in order, yielding (time, list)
tuples where list is the values of each individual TimeSeries
in the list at time t.
"""
# using return without an argument is the way to say "the
# iterator is empty" when there is nothing to iterate over
# (the more you know...)
if not timeseries_list:
return
# for ts in timeseries_list:
# if ts.is_floating():
# msg = "can't merge empty TimeSeries with no default value"
# raise KeyError(msg)
# This function mostly wraps _iter_merge, the main point of
# this is to deal with the case of tied times, where we only
# want to yield the last list of values that occurs for any
# group of tied times.
index, previous_t, previous_state = -1, object(), object()
for index, (t, state) in enumerate(cls._iter_merge(timeseries_list)):
if index > 0 and t != previous_t:
yield previous_t, previous_state
previous_t, previous_state = t, state
# only yield final thing if there was at least one element
# yielded by _iter_merge
if index > -1:
yield previous_t, previous_state
@classmethod
def merge(cls, ts_list, compact=True, operation=None):
"""Iterate through several time series in order, yielding (time,
`value`) where `value` is the either the list of each
individual TimeSeries in the list at time t (in the same order
as in ts_list) or the result of the optional `operation` on
that list of values.
"""
# If operation is not given then the default is the list
# of defaults of all time series
# If operation is given, then the default is the result of
# the operation over the list of all defaults
default = [ts.default for ts in ts_list]
if operation:
default = operation(default)
result = cls(default=default)
for t, merged in cls.iter_merge(ts_list):
if operation is None:
value = merged
else:
value = operation(merged)
result.set(t, value, compact=compact)
return result
@staticmethod
def csv_time_transform(raw):
return date_parse(raw)
@staticmethod
def csv_value_transform(raw):
return str(raw)
@classmethod
def from_csv(
cls,
filename,
time_column=0,
value_column=1,
time_transform=None,
value_transform=None,
skip_header=True,
default=None,
):
# use default on class if not given
if time_transform is None:
time_transform = cls.csv_time_transform
if value_transform is None:
value_transform = cls.csv_value_transform
result = cls(default=default)
with open(filename) as infile:
reader = csv.reader(infile)
if skip_header:
next(reader)
for row in reader:
time = time_transform(row[time_column])
value = value_transform(row[value_column])
result[time] = value
return result
def operation(self, other, function, **kwargs):
"""Calculate "elementwise" operation either between this TimeSeries
and another one, i.e.
operation(t) = function(self(t), other(t))
or between this timeseries and a constant:
operation(t) = function(self(t), other)
If it's another time series, the measurement times in the
resulting TimeSeries will be the union of the sets of
measurement times of the input time series. If it's a
constant, the measurement times will not change.
"""
result = TimeSeries(**kwargs)
if isinstance(other, TimeSeries):
for time, value in self:
result[time] = function(value, other[time])
for time, value in other:
result[time] = function(self[time], value)
else:
for time, value in self:
result[time] = function(value, other)
return result
def to_bool(self, invert=False):
"""Return the truth value of each element."""
if invert:
def function(x, y):
return not bool(x)
else:
def function(x, y):
return bool(x)
return self.operation(None, function)
def threshold(self, value, inclusive=False):
"""Return True if > than treshold value (or >= threshold value if
inclusive=True).
"""
if inclusive:
def function(x, y):
return x >= y
else:
def function(x, y):
return x > y
return self.operation(value, function)
def sum(self, other):
"""sum(x, y) = x(t) + y(t)."""
return TimeSeries.merge(
[self, other], operation=operations.ignorant_sum
)
def difference(self, other):
"""difference(x, y) = x(t) - y(t)."""
return self.operation(other, lambda x, y: x - y)
def multiply(self, other):
"""mul(t) = self(t) * other(t)."""
return self.operation(other, lambda x, y: x * y)
def logical_and(self, other):
"""logical_and(t) = self(t) and other(t)."""
return self.operation(other, lambda x, y: int(x and y))
def logical_or(self, other):
"""logical_or(t) = self(t) or other(t)."""
return self.operation(other, lambda x, y: int(x or y))
def logical_xor(self, other):
"""logical_xor(t) = self(t) ^ other(t)."""
return self.operation(other, lambda x, y: int(bool(x) ^ bool(y)))
def __setitem__(self, time, value):
"""Allow a[time] = value syntax or a a[start:end]=value."""
if isinstance(time, slice):
return self.set_interval(time.start, time.stop, value)
else:
return self.set(time, value)
def __getitem__(self, time):
"""Allow a[time] syntax."""
if isinstance(time, slice):
raise ValueError("Syntax a[start:end] not allowed")
else:
return self.get(time)
def __delitem__(self, time):
"""Allow del[time] syntax."""
if isinstance(time, slice):
return self.remove_points_from_interval(time.start, time.stop)
else:
return self.remove(time)
def __add__(self, other):
"""Allow a + b syntax"""
return self.sum(other)
def __radd__(self, other):
"""Allow the operation 0 + TimeSeries() so that builtin sum function
works on an iterable of TimeSeries.
"""
# skip type check if other is the integer 0
if not other == 0:
self._check_time_series(other)
# 0 + self = self
return self
def __sub__(self, other):
"""Allow a - b syntax"""
return self.difference(other)
def __mul__(self, other):
"""Allow a * b syntax"""
return self.multiply(other)
def __and__(self, other):
"""Allow a & b syntax"""
return self.logical_and(other)
def __or__(self, other):
"""Allow a | b syntax"""
return self.logical_or(other)
def __xor__(self, other):
"""Allow a ^ b syntax"""
return self.logical_xor(other)
def __eq__(self, other):
return self.items() == other.items()
def __ne__(self, other):
return not (self == other)
def _check_boundary(self, value, allow_infinite, lower_or_upper):
if lower_or_upper == "lower":
infinity_value = -inf
method_name = "first_key"
elif lower_or_upper == "upper":
infinity_value = inf
method_name = "last_key"
else:
msg = '`lower_or_upper` must be "lower" or "upper", got {}'.format(
lower_or_upper,
)
raise ValueError(msg)
if value is None:
if allow_infinite:
return infinity_value
else:
try:
return getattr(self, method_name)()
except IndexError:
msg = (
"can't use '{}' for default {} boundary "
"of empty TimeSeries"
).format(method_name, lower_or_upper)
raise KeyError(msg)
else:
return value
def _check_boundaries(self, start, end, mask=None, allow_infinite=False):
if mask is not None and mask.is_empty():
raise ValueError("mask can not be empty")
# if only a mask is passed in, return mask boundaries and mask
if start is None and end is None and mask is not None:
return mask.first_key(), mask.last_key(), mask
# replace with defaults if not given
start = self._check_boundary(start, allow_infinite, "lower")
end = self._check_boundary(end, allow_infinite, "upper")
if start >= end:
msg = "start can't be >= end ({} >= {})".format(start, end)
raise ValueError(msg)
start_end_mask = TimeSeries(default=False)
start_end_mask[start] = True
start_end_mask[end] = False
if mask is None:
mask = start_end_mask
else:
mask = mask & start_end_mask
return start, end, mask
def distribution_by_hour_of_day(
self, first=0, last=23, start=None, end=None
):
start, end, mask = self._check_boundaries(start, end)
result = []
for hour in range(first, last + 1):
mask = hour_of_day(start, end, hour)
result.append((hour, self.distribution(mask=mask)))
return result
def distribution_by_day_of_week(
self, first=0, last=6, start=None, end=None
):
start, end, mask = self._check_boundaries(start, end)
result = []
for week in range(first, last + 1):
mask = day_of_week(start, end, week)
result.append((week, self.distribution(mask=mask)))
return result
def plot(
self,
interpolate="previous",
figure_width=12,
linewidth=1,
marker="o",
markersize=3,
color="#222222",
):
return plot.plot(
self,
interpolate=interpolate,
figure_width=figure_width,
linewidth=linewidth,
marker=marker,
markersize=markersize,
color=color,
)
def hour_of_day(start, end, hour):
# start should be date, or if datetime, will use date of datetime
floored = utils.datetime_floor(start)
domain = TimeSeries(default=False)
for day_start in utils.datetime_range(
floored, end, "days", inclusive_end=True
):
interval_start = day_start + datetime.timedelta(hours=hour)
interval_end = interval_start + datetime.timedelta(hours=1)
domain[interval_start] = True
domain[interval_end] = False
result = domain.slice(start, end)
result[end] = False
return result
def day_of_week(start, end, weekday):
# allow weekday name or number
number = utils.weekday_number(weekday)
# start should be date, or if datetime, will use date of datetime
floored = utils.datetime_floor(start)
next_week = floored + datetime.timedelta(days=7)
for day in utils.datetime_range(floored, next_week, "days"):
if day.weekday() == number:
first_day = day
break
domain = TimeSeries(default=False)
for week_start in utils.datetime_range(
first_day, end, "weeks", inclusive_end=True
):
interval_start = week_start
interval_end = interval_start + datetime.timedelta(days=1)
domain[interval_start] = True
domain[interval_end] = False
result = domain.slice(start, end)
result[end] = False
return result
| {
"repo_name": "datascopeanalytics/traces",
"path": "traces/timeseries.py",
"copies": "1",
"size": "36019",
"license": "mit",
"hash": 3663469954914108400,
"line_mean": 31.1598214286,
"line_max": 79,
"alpha_frac": 0.5597323635,
"autogenerated": false,
"ratio": 4.360653753026634,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 1120
} |
""" A class for observatories to use with the Virtual Radio Interferometer
Adapted from the vriObservatory.java class from the legacy code."""
class Observatory(object):
def __init__(self, menu_name, full_name,
latitude, longitude, num_antennas, num_stations,
ant_diameter, ant_el_limit,
antennas, stations, configs)
self.menu_name = menu_name # Name of observatory in menu
self.full_name = full_name # Actual name of the observatory
self.latitude = latitude # Observatory latitude in radians
self.longitude = longitude # Observatory longitude in radians
self.num_antennas = num_antennas # Number of antennas
self.num_stations = num_stations # Number of stations
self.ant_diameter = ant_diameter # Diameter of antennas (meters)
self.ant_el_limit = ant_el_limit # Antenna lower elevation limit (degrees)
self.antennas = antennas # List of antennas (i.e instances of vriAntenna class)
self.stations = stations # List of stations (instances of vriStation)
self.configs = configs # List of antenna configurations (list of stations)
| {
"repo_name": "NuriaLorente/VRIpy",
"path": "Observatory.py",
"copies": "1",
"size": "1070",
"license": "mit",
"hash": 988909154599918000,
"line_mean": 47.6363636364,
"line_max": 81,
"alpha_frac": 0.7485981308,
"autogenerated": false,
"ratio": 3.440514469453376,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9339100698877816,
"avg_score": 0.07000238027511219,
"num_lines": 22
} |
# A class for performing hidden markov models
import copy
import numpy as np
class HMM():
def __init__(self, transmission_prob, emission_prob, obs=None):
'''
Note that this implementation assumes that n, m, and T are small
enough not to require underflow mitigation.
Required Inputs:
- transmission_prob: an (n+2) x (n+2) numpy array, initial, where n is
the number of hidden states
- emission_prob: an (m x n) 2-D numpy array, where m is the number of
possible observations
Optional Input:
- obs: a list of observation labels, in the same order as their
occurence within the emission probability matrix; otherwise, will assume
that the emission probabilities are in alpha-numerical order.
'''
self.transmission_prob = transmission_prob
self.emission_prob = emission_prob
self.n = self.emission_prob.shape[1]
self.m = self.emission_prob.shape[0]
self.observations = None
self.forward = []
self.backward = []
self.psi = []
self.obs = obs
self.emiss_ref = {}
self.forward_final = [0 , 0]
self.backward_final = [0 , 0]
self.state_probs = []
if obs is None and self.observations is not None:
self.obs = self.assume_obs()
def assume_obs(self):
'''
If observation labels are not given, will assume that the emission
probabilities are in alpha-numerical order.
'''
obs = list(set(list(self.observations)))
obs.sort()
for i in range(len(obs)):
self.emiss_ref[obs[i]] = i
return obs
def train(self, observations, iterations = 10, verbose=True):
'''
Trains the model parameters according to the observation sequence.
Input:
- observations: 1-D string array of T observations
'''
self.observations = observations
self.obs = self.assume_obs()
self.psi = [[[0.0] * (len(self.observations)-1) for i in range(self.n)] for i in range(self.n)]
self.gamma = [[0.0] * (len(self.observations)) for i in range(self.n)]
for i in range(iterations):
old_transmission = self.transmission_prob.copy()
old_emission = self.emission_prob.copy()
if verbose:
print("Iteration: {}".format(i + 1))
self.expectation()
self.maximization()
def expectation(self):
'''
Executes expectation step.
'''
self.forward = self.forward_recurse(len(self.observations))
self.backward = self.backward_recurse(0)
self.get_gamma()
self.get_psi()
def get_gamma(self):
'''
Calculates the gamma matrix.
'''
self.gamma = [[0, 0] for i in range(len(self.observations))]
for i in range(len(self.observations)):
self.gamma[i][0] = (float(self.forward[0][i] * self.backward[0][i]) /
float(self.forward[0][i] * self.backward[0][i] +
self.forward[1][i] * self.backward[1][i]))
self.gamma[i][1] = (float(self.forward[1][i] * self.backward[1][i]) /
float(self.forward[0][i] * self.backward[0][i] +
self.forward[1][i] * self.backward[1][i]))
def get_psi(self):
'''
Runs the psi calculation.
'''
for t in range(1, len(self.observations)):
for j in range(self.n):
for i in range(self.n):
self.psi[i][j][t-1] = self.calculate_psi(t, i, j)
def calculate_psi(self, t, i, j):
'''
Calculates the psi for a transition from i->j for t > 0.
'''
alpha_tminus1_i = self.forward[i][t-1]
a_i_j = self.transmission_prob[j+1][i+1]
beta_t_j = self.backward[j][t]
observation = self.observations[t]
b_j = self.emission_prob[self.emiss_ref[observation]][j]
denom = float(self.forward[0][i] * self.backward[0][i] + self.forward[1][i] * self.backward[1][i])
return (alpha_tminus1_i * a_i_j * beta_t_j * b_j) / denom
def maximization(self):
'''
Executes maximization step.
'''
self.get_state_probs()
for i in range(self.n):
self.transmission_prob[i+1][0] = self.gamma[0][i]
self.transmission_prob[-1][i+1] = self.gamma[-1][i] / self.state_probs[i]
for j in range(self.n):
self.transmission_prob[j+1][i+1] = self.estimate_transmission(i, j)
for obs in range(self.m):
self.emission_prob[obs][i] = self.estimate_emission(i, obs)
def get_state_probs(self):
'''
Calculates total probability of a given state.
'''
self.state_probs = [0] * self.n
for state in range(self.n):
summ = 0
for row in self.gamma:
summ += row[state]
self.state_probs[state] = summ
def estimate_transmission(self, i, j):
'''
Estimates transmission probabilities from i to j.
'''
return sum(self.psi[i][j]) / self.state_probs[i]
def estimate_emission(self, j, observation):
'''
Estimate emission probability for an observation from state j.
'''
observation = self.obs[observation]
ts = [i for i in range(len(self.observations)) if self.observations[i] == observation]
for i in range(len(ts)):
ts[i] = self.gamma[ts[i]][j]
return sum(ts) / self.state_probs[j]
def backward_recurse(self, index):
'''
Runs the backward recursion.
'''
# Initialization at T
if index == (len(self.observations) - 1):
backward = [[0.0] * (len(self.observations)) for i in range(self.n)]
for state in range(self.n):
backward[state][index] = self.backward_initial(state)
return backward
# Recursion for T --> 0
else:
backward = self.backward_recurse(index+1)
for state in range(self.n):
if index >= 0:
backward[state][index] = self.backward_probability(index, backward, state)
if index == 0:
self.backward_final[state] = self.backward_probability(index, backward, 0, final=True)
return backward
def backward_initial(self, state):
'''
Initialization of backward probabilities.
'''
return self.transmission_prob[self.n + 1][state + 1]
def backward_probability(self, index, backward, state, final=False):
'''
Calculates the backward probability at index = t.
'''
p = [0] * self.n
for j in range(self.n):
observation = self.observations[index + 1]
if not final:
a = self.transmission_prob[j + 1][state + 1]
else:
a = self.transmission_prob[j + 1][0]
b = self.emission_prob[self.emiss_ref[observation]][j]
beta = backward[j][index + 1]
p[j] = a * b * beta
return sum(p)
def forward_recurse(self, index):
'''
Executes forward recursion.
'''
# Initialization
if index == 0:
forward = [[0.0] * (len(self.observations)) for i in range(self.n)]
for state in range(self.n):
forward[state][index] = self.forward_initial(self.observations[index], state)
return forward
# Recursion
else:
forward = self.forward_recurse(index-1)
for state in range(self.n):
if index != len(self.observations):
forward[state][index] = self.forward_probability(index, forward, state)
else:
# Termination
self.forward_final[state] = self.forward_probability(index, forward, state, final=True)
return forward
def forward_initial(self, observation, state):
'''
Calculates initial forward probabilities.
'''
self.transmission_prob[state + 1][0]
self.emission_prob[self.emiss_ref[observation]][state]
return self.transmission_prob[state + 1][0] * self.emission_prob[self.emiss_ref[observation]][state]
def forward_probability(self, index, forward, state, final=False):
'''
Calculates the alpha for index = t.
'''
p = [0] * self.n
for prev_state in range(self.n):
if not final:
# Recursion
obs_index = self.emiss_ref[self.observations[index]]
p[prev_state] = forward[prev_state][index-1] * self.transmission_prob[state + 1][prev_state + 1] * self.emission_prob[obs_index][state]
else:
# Termination
p[prev_state] = forward[prev_state][index-1] * self.transmission_prob[self.n][prev_state + 1]
return sum(p)
def likelihood(self, new_observations):
'''
Returns the probability of a observation sequence based on current model
parameters.
'''
new_hmm = HMM(self.transmission_prob, self.emission_prob)
new_hmm.observations = new_observations
new_hmm.obs = new_hmm.assume_obs()
forward = new_hmm.forward_recurse(len(new_observations))
return sum(new_hmm.forward_final)
if __name__ == '__main__':
# Example inputs from Jason Eisner's Ice Cream and Baltimore Summer example
# http://www.cs.jhu.edu/~jason/papers/#eisner-2002-tnlp
emission = np.array([[0.7, 0], [0.2, 0.3], [0.1, 0.7]])
transmission = np.array([ [0, 0, 0, 0], [0.5, 0.8, 0.2, 0], [0.5, 0.1, 0.7, 0], [0, 0.1, 0.1, 0]])
observations = ['2','3','3','2','3','2','3','2','2','3','1','3','3','1','1',
'1','2','1','1','1','3','1','2','1','1','1','2','3','3','2',
'3','2','2']
model = HMM(transmission, emission)
model.train(observations)
print("Model transmission probabilities:\n{}".format(model.transmission_prob))
print("Model emission probabilities:\n{}".format(model.emission_prob))
# Probability of a new sequence
new_seq = ['1', '2', '3']
print("Finding likelihood for {}".format(new_seq))
likelihood = model.likelihood(new_seq)
print("Likelihood: {}".format(likelihood))
| {
"repo_name": "aldengolab/hidden-markov-model",
"path": "hmm.py",
"copies": "1",
"size": "10528",
"license": "mit",
"hash": 8558655080084198000,
"line_mean": 38.5789473684,
"line_max": 151,
"alpha_frac": 0.5539513678,
"autogenerated": false,
"ratio": 3.6927393896878287,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9730249587115658,
"avg_score": 0.0032882340744342293,
"num_lines": 266
} |
"""A class for reading and writing ELF format binaries (esp. Amiga m68k ones)"""
import struct
import os
from ELF import *
from ELFFile import *
class ELFReader:
def _load_section_headers(self, f, ef):
shoff = ef.header.shoff
shentsize = ef.header.shentsize
f.seek(shoff, os.SEEK_SET)
shnum = ef.header.shnum
for i in xrange(shnum):
sh = ELFSectionHeader()
sh_data = f.read(shentsize)
sh.parse(sh_data)
ef.section_hdrs.append(sh)
def _load_sections(self, f, ef):
sect_hdrs = ef.section_hdrs
idx = 0
for sect_hdr in sect_hdrs:
idx += 1
sect = self._load_section(f, sect_hdr, idx)
ef.sections.append(sect)
def _load_section(self, f, sect_hdr, idx):
t = sect_hdr.type_
size = sect_hdr.size
if t == SHT_NOBITS or size == 0:
sect = ELFSection(sect_hdr, idx)
else:
# read data
offset = sect_hdr.offset
f.seek(offset, os.SEEK_SET)
data = f.read(size)
# decode?
if t == SHT_STRTAB:
sect = ELFSectionStringTable(sect_hdr, idx, data)
sect.decode()
elif t == SHT_SYMTAB:
sect = ELFSectionSymbolTable(sect_hdr, idx, data)
sect.decode()
elif t == SHT_RELA:
sect = ELFSectionRelocationsWithAddend(sect_hdr, idx, data)
sect.decode()
else:
sect = ELFSectionWithData(sect_hdr, idx, data)
return sect
def _name_section(self, section, strtab):
off = section.header.name
section.name_str = strtab.get_string(off)
def _resolve_symtab_names(self, sect, sections):
# get linked string table
strtab_seg_num = sect.header.link
if strtab_seg_num < 1 or strtab_seg_num >= len(sections):
raise ELFParseError("Invalid strtab for symtab: "+strtab_seg_num)
strtab = sections[strtab_seg_num]
if strtab.__class__ != ELFSectionStringTable:
raise ELFParserError("Invalid strtab segment for symtab")
# resolve all symbol names
for sym in sect.symtab:
sym.name_str = strtab.get_string(sym.name)
def _resolve_symtab_indices(self, sect, sections):
for sym in sect.symtab:
if sym.shndx_str == None:
# refers a valid section
idx = sym.shndx
sym.section = sections[idx]
def _assign_symbols_to_sections(self, sect):
src_file_sym = None
all_symbols = []
for sym in sect.symtab:
sym_type = sym.type_str
if sym_type == 'FILE':
# store file symbol for following symbols
src_file_sym = sym
elif sym_type in ('OBJECT','FUNC','NOTYPE'):
# add containing file symbol and its name
if src_file_sym != None:
sym.file_sym = src_file_sym
# add symbol to segment
sym_sect = sym.section
if sym_sect is not None:
sym_sect.symbols.append(sym)
# list of all symbols assigned
all_symbols.append(sym_sect.symbols)
# now sort all symbol lists
for symbols in all_symbols:
symbols.sort(key=lambda x : x.value)
def _resolve_rela_links(self, sect, sections):
link = sect.header.link
info = sect.header.info
num_sects = len(sections)
if link == 0 or link >= num_sects:
raise ELFParseError("Invalid rela link!")
if info == 0 or info >= num_sects:
raise ELFParseError("Invalid rela info!")
# info_seg -> src segment we will apply rela on
src_sect = sections[info]
sect.reloc_section = src_sect
# link_seg -> symbol table
sect.symtab = sections[link]
# store link in segment for this relocation
src_sect.relocations = sect
# a map for rela by tgt segment
by_sect = {}
src_sect.reloc_by_sect = by_sect
# now process all rela entries
symtab = sect.symtab
for entry in sect.rela:
# look up symbol of rela entry
sym_idx = entry.sym
sym = symtab.get_symbol(sym_idx)
entry.symbol = sym
# copy section we relocate from
entry.section = sym.section
# calc addend in segment
entry.section_addend = entry.addend + sym.value
# clear symbol if its empty
if sym.name_str == "":
entry.symbol = None
# add entry to section list
tgt_sect = entry.section
if by_sect.has_key(tgt_sect):
by_sect_list = by_sect[tgt_sect]
else:
by_sect_list = []
by_sect[tgt_sect] = by_sect_list
by_sect_list.append(entry)
# sort all by_seg entries
for sect in by_sect:
by_sect_list = by_sect[sect]
by_sect_list.sort(key=lambda x : x.offset)
def load(self, f):
"""load an ELF file from the given file object f
and return an ELFFile instance or None if loading failed"""
ef = ELFFile()
# read identifier
ident = ELFIdentifier()
ident_data = f.read(16)
ident.parse(ident_data)
ef.identifier = ident
# read header
hdr = ELFHeader()
hdr_data = f.read(36)
hdr.parse(hdr_data)
ef.header = hdr
# expect a non-empty section header
if hdr.shnum == 0:
raise ELFParseError("No segment header defined!")
# load all section headers
self._load_section_headers(f, ef)
# load and decode sections
self._load_sections(f, ef)
# get string table with segment names
strtab_idx = ef.header.shstrndx
strtab = ef.sections[strtab_idx]
if strtab.__class__ != ELFSectionStringTable:
raise ELFParseError("No strtab for segment header found! ")
# process sections
for sect in ef.sections:
# name all sections by using the string table
self._name_section(sect, strtab)
# resolve symbol table names
if sect.header.type_ == SHT_SYMTAB:
# store in file symtabs
ef.symtabs.append(sect)
# get names in symtab
self._resolve_symtab_names(sect, ef.sections)
# link sections to symbols
self._resolve_symtab_indices(sect, ef.sections)
# assign symbols to sections
self._assign_symbols_to_sections(sect)
# resolve rela links and symbols
for sect in ef.sections:
if sect.header.type_ == SHT_RELA:
self._resolve_rela_links(sect, ef.sections)
ef.relas.append(sect)
return ef
# mini test
if __name__ == '__main__':
import sys
reader = ELFReader()
for a in sys.argv[1:]:
f = open(a, "rb")
ef = reader.load(f)
| {
"repo_name": "alpine9000/amiga_examples",
"path": "tools/external/amitools/amitools/binfmt/elf/ELFReader.py",
"copies": "1",
"size": "6319",
"license": "bsd-2-clause",
"hash": -7209164815298673000,
"line_mean": 27.9862385321,
"line_max": 80,
"alpha_frac": 0.6217755974,
"autogenerated": false,
"ratio": 3.4286489419424853,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4550424539342485,
"avg_score": null,
"num_lines": null
} |
"""A class for SQLGitHub sessions.
Sample Usage:
g = Github(token)
s = SgSession(g, ["name", "description"], "abseil.repos")
print(s.Execute())
"""
import table as tb
import table_fetcher
from expression import SgExpression
from grouping import SgGrouping
from ordering import SgOrdering
from ordering import SgTableOrdering
# TODO(lnishan): Change it to SgSessionSimple and add SgSession to handle unions and joins.
class SgSession:
"""A class for SQLGitHub sessions."""
def __init__(self, github, field_exprs, source=None, condition=None, groups=None, having=None, orders=None, limit=None):
self._field_exprs = field_exprs
self._source = source
self._condition = condition
self._groups = groups
self._having = having
self._orders = orders
self._limit = limit
rel_keys = SgExpression.ExtractTokensFromExpressions(self._field_exprs)
if self._condition:
rel_keys += SgExpression.ExtractTokensFromExpressions([self._condition])
if self._groups:
rel_keys += SgExpression.ExtractTokensFromExpressions(self._groups)
if self._having:
rel_keys += SgExpression.ExtractTokensFromExpressions([self._having])
if self._orders:
rel_keys += SgExpression.ExtractTokensFromExpressions(self._orders[0])
rel_keys = list(set(rel_keys))
if u"*" in rel_keys:
rel_keys = [u"*"]
self._fetcher = table_fetcher.SgTableFetcher(github, rel_keys)
def _GetEmptyTable(self):
table = tb.SgTable()
table.SetFields(self._field_exprs)
return table
def Execute(self):
# source is either a label (eg. "google.issues") or a SgSession
if self._source:
source_table = self._source.Execute() if isinstance(self._source, SgSession) else self._fetcher.Fetch(self._source)
if not source_table[:]:
return self._GetEmptyTable()
else:
if u"*" in self._field_exprs:
self._field_exprs = source_table.GetFields()
else:
source_table = tb.SgTable()
source_table.SetFields([u"Dummy Field"])
source_table.Append([u"Dummy Value"])
# evaluate where
if self._condition:
filtered_table = tb.SgTable()
filtered_table.SetFields(source_table.GetFields())
meets = SgExpression.EvaluateExpression(source_table, self._condition)
for i, row in enumerate(source_table):
if meets[i]:
filtered_table.Append(row)
else:
filtered_table = source_table
if not filtered_table[:]:
return self._GetEmptyTable()
# evaluate all necessary expressions
# in reversed order because we process from the rightmost item first
select_tokens = SgExpression.ExtractTokensFromExpressions(self._field_exprs[:])
eval_exprs = select_tokens
if self._orders:
order_tokens = SgExpression.ExtractTokensFromExpressions(self._orders[0])
eval_exprs += order_tokens
if self._having:
having_tokens = SgExpression.ExtractTokensFromExpressions([self._having])
eval_exprs += having_tokens
if self._groups:
eval_exprs += self._groups
res_table = SgExpression.EvaluateExpressions(filtered_table, eval_exprs)
# group by
if self._groups:
res_tables = SgGrouping.GenerateGroups(res_table, self._groups)
else:
res_tables = [res_table]
# having
if self._having:
filtered_tables = []
for table in res_tables:
if all(SgExpression.EvaluateExpression(table, self._having)):
filtered_tables.append(table.SliceCol(0, len(table.GetFields()) - len(having_tokens)))
res_tables = filtered_tables
# order by
if self._orders:
for table in res_tables:
table.Copy(table.SliceCol(0, len(table.GetFields()) - len(order_tokens)).Chain(SgExpression.EvaluateExpressions(table, self._orders[0])))
ordering = SgOrdering(table, self._orders[1])
table.Copy(ordering.Sort(keep_order_fields=True))
ordering = SgTableOrdering(res_tables, self._orders[1])
res_tables = ordering.Sort()
# TODO(lnishan): Support having here
# process select
for table in res_tables:
table.Copy(SgExpression.EvaluateExpressions(table, self._field_exprs))
# check if all tokens in expressions are contained in aggregate functions
check_exprs = [expr for expr in self._field_exprs if expr not in self._groups] if self._groups else self._field_exprs
if SgExpression.IsAllTokensInAggregate(check_exprs):
for table in res_tables:
table = table.SetTable([table[0]])
merged_table = tb.SgTable()
merged_table.SetFields(res_tables[0].GetFields())
for table in res_tables:
for row in table:
merged_table.Append(row)
# process limit
if self._limit:
merged_table.SetTable(merged_table[:self._limit])
return merged_table
| {
"repo_name": "lnishan/SQLGitHub",
"path": "components/session.py",
"copies": "1",
"size": "5335",
"license": "mit",
"hash": -5547283115970506000,
"line_mean": 38.5185185185,
"line_max": 153,
"alpha_frac": 0.6131208997,
"autogenerated": false,
"ratio": 4.122874806800618,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002162532713535398,
"num_lines": 135
} |
#a class for the Kaplan-Meier estimator
from statsmodels.compat.python import range
import numpy as np
from math import sqrt
import matplotlib.pyplot as plt
class KAPLAN_MEIER(object):
def __init__(self, data, timesIn, groupIn, censoringIn):
raise RuntimeError('Newer version of Kaplan-Meier class available in survival2.py')
#store the inputs
self.data = data
self.timesIn = timesIn
self.groupIn = groupIn
self.censoringIn = censoringIn
def fit(self):
#split the data into groups based on the predicting variable
#get a set of all the groups
groups = list(set(self.data[:,self.groupIn]))
#create an empty list to store the data for different groups
groupList = []
#create an empty list for each group and add it to groups
for i in range(len(groups)):
groupList.append([])
#iterate through all the groups in groups
for i in range(len(groups)):
#iterate though the rows of dataArray
for j in range(len(self.data)):
#test if this row has the correct group
if self.data[j,self.groupIn] == groups[i]:
#add the row to groupList
groupList[i].append(self.data[j])
#create an empty list to store the times for each group
timeList = []
#iterate through all the groups
for i in range(len(groupList)):
#create an empty list
times = []
#iterate through all the rows of the group
for j in range(len(groupList[i])):
#get a list of all the times in the group
times.append(groupList[i][j][self.timesIn])
#get a sorted set of the times and store it in timeList
times = list(sorted(set(times)))
timeList.append(times)
#get a list of the number at risk and events at each time
#create an empty list to store the results in
timeCounts = []
#create an empty list to hold points for plotting
points = []
#create a list for points where censoring occurs
censoredPoints = []
#iterate trough each group
for i in range(len(groupList)):
#initialize a variable to estimate the survival function
survival = 1
#initialize a variable to estimate the variance of
#the survival function
varSum = 0
#initialize a counter for the number at risk
riskCounter = len(groupList[i])
#create a list for the counts for this group
counts = []
##create a list for points to plot
x = []
y = []
#iterate through the list of times
for j in range(len(timeList[i])):
if j != 0:
if j == 1:
#add an indicator to tell if the time
#starts a new group
groupInd = 1
#add (0,1) to the list of points
x.append(0)
y.append(1)
#add the point time to the right of that
x.append(timeList[i][j-1])
y.append(1)
#add the point below that at survival
x.append(timeList[i][j-1])
y.append(survival)
#add the survival to y
y.append(survival)
else:
groupInd = 0
#add survival twice to y
y.append(survival)
y.append(survival)
#add the time twice to x
x.append(timeList[i][j-1])
x.append(timeList[i][j-1])
#add each censored time, number of censorings and
#its survival to censoredPoints
censoredPoints.append([timeList[i][j-1],
censoringNum,survival,groupInd])
#add the count to the list
counts.append([timeList[i][j-1],riskCounter,
eventCounter,survival,
sqrt(((survival)**2)*varSum)])
#increment the number at risk
riskCounter += -1*(riskChange)
#initialize a counter for the change in the number at risk
riskChange = 0
#initialize a counter to zero
eventCounter = 0
#intialize a counter to tell when censoring occurs
censoringCounter = 0
censoringNum = 0
#iterate through the observations in each group
for k in range(len(groupList[i])):
#check of the observation has the given time
if (groupList[i][k][self.timesIn]) == (timeList[i][j]):
#increment the number at risk counter
riskChange += 1
#check if this is an event or censoring
if groupList[i][k][self.censoringIn] == 1:
#add 1 to the counter
eventCounter += 1
else:
censoringNum += 1
#check if there are any events at this time
if eventCounter != censoringCounter:
censoringCounter = eventCounter
#calculate the estimate of the survival function
survival *= ((float(riskCounter) -
eventCounter)/(riskCounter))
try:
#calculate the estimate of the variance
varSum += (eventCounter)/((riskCounter)
*(float(riskCounter)-
eventCounter))
except ZeroDivisionError:
varSum = 0
#append the last row to counts
counts.append([timeList[i][len(timeList[i])-1],
riskCounter,eventCounter,survival,
sqrt(((survival)**2)*varSum)])
#add the last time once to x
x.append(timeList[i][len(timeList[i])-1])
x.append(timeList[i][len(timeList[i])-1])
#add the last survival twice to y
y.append(survival)
#y.append(survival)
censoredPoints.append([timeList[i][len(timeList[i])-1],
censoringNum,survival,1])
#add the list for the group to al ist for all the groups
timeCounts.append(np.array(counts))
points.append([x,y])
#returns a list of arrays, where each array has as it columns: the time,
#the number at risk, the number of events, the estimated value of the
#survival function at that time, and the estimated standard error at
#that time, in that order
self.results = timeCounts
self.points = points
self.censoredPoints = censoredPoints
def plot(self):
x = []
#iterate through the groups
for i in range(len(self.points)):
#plot x and y
plt.plot(np.array(self.points[i][0]),np.array(self.points[i][1]))
#create lists of all the x and y values
x += self.points[i][0]
for j in range(len(self.censoredPoints)):
#check if censoring is occuring
if (self.censoredPoints[j][1] != 0):
#if this is the first censored point
if (self.censoredPoints[j][3] == 1) and (j == 0):
#calculate a distance beyond 1 to place it
#so all the points will fit
dx = ((1./((self.censoredPoints[j][1])+1.))
*(float(self.censoredPoints[j][0])))
#iterate through all the censored points at this time
for k in range(self.censoredPoints[j][1]):
#plot a vertical line for censoring
plt.vlines((1+((k+1)*dx)),
self.censoredPoints[j][2]-0.03,
self.censoredPoints[j][2]+0.03)
#if this censored point starts a new group
elif ((self.censoredPoints[j][3] == 1) and
(self.censoredPoints[j-1][3] == 1)):
#calculate a distance beyond 1 to place it
#so all the points will fit
dx = ((1./((self.censoredPoints[j][1])+1.))
*(float(self.censoredPoints[j][0])))
#iterate through all the censored points at this time
for k in range(self.censoredPoints[j][1]):
#plot a vertical line for censoring
plt.vlines((1+((k+1)*dx)),
self.censoredPoints[j][2]-0.03,
self.censoredPoints[j][2]+0.03)
#if this is the last censored point
elif j == (len(self.censoredPoints) - 1):
#calculate a distance beyond the previous time
#so that all the points will fit
dx = ((1./((self.censoredPoints[j][1])+1.))
*(float(self.censoredPoints[j][0])))
#iterate through all the points at this time
for k in range(self.censoredPoints[j][1]):
#plot a vertical line for censoring
plt.vlines((self.censoredPoints[j-1][0]+((k+1)*dx)),
self.censoredPoints[j][2]-0.03,
self.censoredPoints[j][2]+0.03)
#if this is a point in the middle of the group
else:
#calcuate a distance beyond the current time
#to place the point, so they all fit
dx = ((1./((self.censoredPoints[j][1])+1.))
*(float(self.censoredPoints[j+1][0])
- self.censoredPoints[j][0]))
#iterate through all the points at this time
for k in range(self.censoredPoints[j][1]):
#plot a vetical line for censoring
plt.vlines((self.censoredPoints[j][0]+((k+1)*dx)),
self.censoredPoints[j][2]-0.03,
self.censoredPoints[j][2]+0.03)
#set the size of the plot so it extends to the max x and above 1 for y
plt.xlim((0,np.max(x)))
plt.ylim((0,1.05))
#label the axes
plt.xlabel('time')
plt.ylabel('survival')
plt.show()
def show_results(self):
#start a string that will be a table of the results
resultsString = ''
#iterate through all the groups
for i in range(len(self.results)):
#label the group and header
resultsString += ('Group {0}\n\n'.format(i) +
'Time At Risk Events Survival Std. Err\n')
for j in self.results[i]:
#add the results to the string
resultsString += (
'{0:<9d}{1:<12d}{2:<11d}{3:<13.4f}{4:<6.4f}\n'.format(
int(j[0]),int(j[1]),int(j[2]),j[3],j[4]))
print(resultsString)
| {
"repo_name": "wzbozon/statsmodels",
"path": "statsmodels/sandbox/km_class.py",
"copies": "31",
"size": "11748",
"license": "bsd-3-clause",
"hash": -5926520682033970000,
"line_mean": 47.95,
"line_max": 91,
"alpha_frac": 0.4906367041,
"autogenerated": false,
"ratio": 4.438231960710238,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00980300851805711,
"num_lines": 240
} |
#a class for the Kaplan-Meier estimator
import numpy as np
from math import sqrt
import matplotlib.pyplot as plt
class KAPLAN_MEIER(object):
def __init__(self, data, timesIn, groupIn, censoringIn):
raise RuntimeError('Newer version of Kaplan-Meier class available in survival2.py')
#store the inputs
self.data = data
self.timesIn = timesIn
self.groupIn = groupIn
self.censoringIn = censoringIn
def fit(self):
#split the data into groups based on the predicting variable
#get a set of all the groups
groups = list(set(self.data[:,self.groupIn]))
#create an empty list to store the data for different groups
groupList = []
#create an empty list for each group and add it to groups
for i in range(len(groups)):
groupList.append([])
#iterate through all the groups in groups
for i in range(len(groups)):
#iterate though the rows of dataArray
for j in range(len(self.data)):
#test if this row has the correct group
if self.data[j,self.groupIn] == groups[i]:
#add the row to groupList
groupList[i].append(self.data[j])
#create an empty list to store the times for each group
timeList = []
#iterate through all the groups
for i in range(len(groupList)):
#create an empty list
times = []
#iterate through all the rows of the group
for j in range(len(groupList[i])):
#get a list of all the times in the group
times.append(groupList[i][j][self.timesIn])
#get a sorted set of the times and store it in timeList
times = list(sorted(set(times)))
timeList.append(times)
#get a list of the number at risk and events at each time
#create an empty list to store the results in
timeCounts = []
#create an empty list to hold points for plotting
points = []
#create a list for points where censoring occurs
censoredPoints = []
#iterate trough each group
for i in range(len(groupList)):
#initialize a variable to estimate the survival function
survival = 1
#initialize a variable to estimate the variance of
#the survival function
varSum = 0
#initialize a counter for the number at risk
riskCounter = len(groupList[i])
#create a list for the counts for this group
counts = []
##create a list for points to plot
x = []
y = []
#iterate through the list of times
for j in range(len(timeList[i])):
if j != 0:
if j == 1:
#add an indicator to tell if the time
#starts a new group
groupInd = 1
#add (0,1) to the list of points
x.append(0)
y.append(1)
#add the point time to the right of that
x.append(timeList[i][j-1])
y.append(1)
#add the point below that at survival
x.append(timeList[i][j-1])
y.append(survival)
#add the survival to y
y.append(survival)
else:
groupInd = 0
#add survival twice to y
y.append(survival)
y.append(survival)
#add the time twice to x
x.append(timeList[i][j-1])
x.append(timeList[i][j-1])
#add each censored time, number of censorings and
#its survival to censoredPoints
censoredPoints.append([timeList[i][j-1],
censoringNum,survival,groupInd])
#add the count to the list
counts.append([timeList[i][j-1],riskCounter,
eventCounter,survival,
sqrt(((survival)**2)*varSum)])
#increment the number at risk
riskCounter += -1*(riskChange)
#initialize a counter for the change in the number at risk
riskChange = 0
#initialize a counter to zero
eventCounter = 0
#intialize a counter to tell when censoring occurs
censoringCounter = 0
censoringNum = 0
#iterate through the observations in each group
for k in range(len(groupList[i])):
#check of the observation has the given time
if (groupList[i][k][self.timesIn]) == (timeList[i][j]):
#increment the number at risk counter
riskChange += 1
#check if this is an event or censoring
if groupList[i][k][self.censoringIn] == 1:
#add 1 to the counter
eventCounter += 1
else:
censoringNum += 1
#check if there are any events at this time
if eventCounter != censoringCounter:
censoringCounter = eventCounter
#calculate the estimate of the survival function
survival *= ((float(riskCounter) -
eventCounter)/(riskCounter))
try:
#calculate the estimate of the variance
varSum += (eventCounter)/((riskCounter)
*(float(riskCounter)-
eventCounter))
except ZeroDivisionError:
varSum = 0
#append the last row to counts
counts.append([timeList[i][len(timeList[i])-1],
riskCounter,eventCounter,survival,
sqrt(((survival)**2)*varSum)])
#add the last time once to x
x.append(timeList[i][len(timeList[i])-1])
x.append(timeList[i][len(timeList[i])-1])
#add the last survival twice to y
y.append(survival)
#y.append(survival)
censoredPoints.append([timeList[i][len(timeList[i])-1],
censoringNum,survival,1])
#add the list for the group to al ist for all the groups
timeCounts.append(np.array(counts))
points.append([x,y])
#returns a list of arrays, where each array has as it columns: the time,
#the number at risk, the number of events, the estimated value of the
#survival function at that time, and the estimated standard error at
#that time, in that order
self.results = timeCounts
self.points = points
self.censoredPoints = censoredPoints
def plot(self):
x = []
#iterate through the groups
for i in range(len(self.points)):
#plot x and y
plt.plot(np.array(self.points[i][0]),np.array(self.points[i][1]))
#create lists of all the x and y values
x += self.points[i][0]
for j in range(len(self.censoredPoints)):
#check if censoring is occuring
if (self.censoredPoints[j][1] != 0):
#if this is the first censored point
if (self.censoredPoints[j][3] == 1) and (j == 0):
#calculate a distance beyond 1 to place it
#so all the points will fit
dx = ((1./((self.censoredPoints[j][1])+1.))
*(float(self.censoredPoints[j][0])))
#iterate through all the censored points at this time
for k in range(self.censoredPoints[j][1]):
#plot a vertical line for censoring
plt.vlines((1+((k+1)*dx)),
self.censoredPoints[j][2]-0.03,
self.censoredPoints[j][2]+0.03)
#if this censored point starts a new group
elif ((self.censoredPoints[j][3] == 1) and
(self.censoredPoints[j-1][3] == 1)):
#calculate a distance beyond 1 to place it
#so all the points will fit
dx = ((1./((self.censoredPoints[j][1])+1.))
*(float(self.censoredPoints[j][0])))
#iterate through all the censored points at this time
for k in range(self.censoredPoints[j][1]):
#plot a vertical line for censoring
plt.vlines((1+((k+1)*dx)),
self.censoredPoints[j][2]-0.03,
self.censoredPoints[j][2]+0.03)
#if this is the last censored point
elif j == (len(self.censoredPoints) - 1):
#calculate a distance beyond the previous time
#so that all the points will fit
dx = ((1./((self.censoredPoints[j][1])+1.))
*(float(self.censoredPoints[j][0])))
#iterate through all the points at this time
for k in range(self.censoredPoints[j][1]):
#plot a vertical line for censoring
plt.vlines((self.censoredPoints[j-1][0]+((k+1)*dx)),
self.censoredPoints[j][2]-0.03,
self.censoredPoints[j][2]+0.03)
#if this is a point in the middle of the group
else:
#calcuate a distance beyond the current time
#to place the point, so they all fit
dx = ((1./((self.censoredPoints[j][1])+1.))
*(float(self.censoredPoints[j+1][0])
- self.censoredPoints[j][0]))
#iterate through all the points at this time
for k in range(self.censoredPoints[j][1]):
#plot a vetical line for censoring
plt.vlines((self.censoredPoints[j][0]+((k+1)*dx)),
self.censoredPoints[j][2]-0.03,
self.censoredPoints[j][2]+0.03)
#set the size of the plot so it extends to the max x and above 1 for y
plt.xlim((0,np.max(x)))
plt.ylim((0,1.05))
#label the axes
plt.xlabel('time')
plt.ylabel('survival')
plt.show()
def show_results(self):
#start a string that will be a table of the results
resultsString = ''
#iterate through all the groups
for i in range(len(self.results)):
#label the group and header
resultsString += ('Group {0}\n\n'.format(i) +
'Time At Risk Events Survival Std. Err\n')
for j in self.results[i]:
#add the results to the string
resultsString += (
'{0:<9d}{1:<12d}{2:<11d}{3:<13.4f}{4:<6.4f}\n'.format(
int(j[0]),int(j[1]),int(j[2]),j[3],j[4]))
print(resultsString)
| {
"repo_name": "wesm/statsmodels",
"path": "scikits/statsmodels/sandbox/km_class.py",
"copies": "5",
"size": "11704",
"license": "bsd-3-clause",
"hash": 5593517864010706000,
"line_mean": 47.9707112971,
"line_max": 91,
"alpha_frac": 0.4892344498,
"autogenerated": false,
"ratio": 4.438376943496397,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7427611393296397,
"avg_score": null,
"num_lines": null
} |
"""A class for the Lemke Howson algorithm"""
import warnings
from itertools import cycle
import numpy as np
from nashpy.integer_pivoting import (
make_tableau,
non_basic_variables,
pivot_tableau,
)
def shift_tableau(tableau, shape):
"""
Shift a tableau to ensure labels of pairs of tableaux coincide
Parameters
----------
tableau : array
a tableau corresponding to a vertex of a polytope.
shape : tuple
the required shape of the tableau
Returns
-------
array
The shifted tableau
"""
return np.append(
np.roll(tableau[:, :-1], shape[0], axis=1),
np.ones((shape[0], 1)),
axis=1,
)
def tableau_to_strategy(tableau, basic_labels, strategy_labels):
"""
Return a strategy vector from a tableau
Parameters
----------
tableau : array
a tableau corresponding to a vertex of a polytope.
basic_labels : set
the set of basic labels.
strategy_labels : set
the set of labels that correspond to strategies.
Returns
-------
array
A strategy.
"""
vertex = []
for column in strategy_labels:
if column in basic_labels:
for i, row in enumerate(tableau[:, column]):
if row != 0:
vertex.append(tableau[i, -1] / row)
else:
vertex.append(0)
strategy = np.array(vertex)
return strategy / sum(strategy)
def lemke_howson(A, B, initial_dropped_label=0):
"""
Obtain the Nash equilibria using the Lemke Howson algorithm implemented
using integer pivoting.
Algorithm implemented here is Algorithm 3.6 of [Nisan2007]_.
1. Start at the artificial equilibrium (which is fully labeled)
2. Choose an initial label to drop and move in the polytope for which
the vertex has that label to the edge
that does not share that label. (This is implemented using integer
pivoting)
3. A label will now be duplicated in the other polytope, drop it in a
similar way.
4. Repeat steps 2 and 3 until have Nash Equilibrium.
Parameters
----------
A : array
The row player payoff matrix
B : array
The column player payoff matrix
initial_dropped_label: int
The initial dropped label.
Returns
-------
Tuple
An equilibria
"""
if np.min(A) <= 0:
A = A + abs(np.min(A)) + 1
if np.min(B) <= 0:
B = B + abs(np.min(B)) + 1
# build tableaux
col_tableau = make_tableau(A)
col_tableau = shift_tableau(col_tableau, A.shape)
row_tableau = make_tableau(B.transpose())
full_labels = set(range(sum(A.shape)))
if initial_dropped_label in non_basic_variables(row_tableau):
tableux = cycle((row_tableau, col_tableau))
else:
tableux = cycle((col_tableau, row_tableau))
# First pivot (to drop a label)
entering_label = pivot_tableau(next(tableux), initial_dropped_label)
while (
non_basic_variables(row_tableau).union(non_basic_variables(col_tableau))
!= full_labels
):
entering_label = pivot_tableau(next(tableux), next(iter(entering_label)))
row_strategy = tableau_to_strategy(
row_tableau, non_basic_variables(col_tableau), range(A.shape[0])
)
col_strategy = tableau_to_strategy(
col_tableau,
non_basic_variables(row_tableau),
range(A.shape[0], sum(A.shape)),
)
if row_strategy.shape != (A.shape[0],) and col_strategy.shape != (A.shape[0],):
msg = """The Lemke Howson algorithm has returned probability vectors of
incorrect shapes. This indicates an error. Your game could be degenerate."""
warnings.warn(msg, RuntimeWarning)
return row_strategy, col_strategy
| {
"repo_name": "drvinceknight/Nashpy",
"path": "src/nashpy/algorithms/lemke_howson.py",
"copies": "1",
"size": "3796",
"license": "mit",
"hash": -3560653609190244000,
"line_mean": 26.9117647059,
"line_max": 83,
"alpha_frac": 0.6198630137,
"autogenerated": false,
"ratio": 3.7142857142857144,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4834148727985714,
"avg_score": null,
"num_lines": null
} |
"""A class for the Lemke Howson algorithm with lexicographical ordering"""
from itertools import cycle
import numpy as np
from nashpy.integer_pivoting import make_tableau, pivot_tableau_lex
from .lemke_howson import shift_tableau, tableau_to_strategy
def lemke_howson_lex(A, B, initial_dropped_label=0):
"""
Obtain the Nash equilibria using the Lemke Howson algorithm implemented
using lexicographical integer pivoting. (Able to solve degenerate games)
1. Start at the artificial equilibrium (which is fully labeled)
2. Choose an initial label to drop and move in the polytope for which
the vertex has that label to the edge that does not share that label.
(This is implemented using integer pivoting and the choice of label
to drop is implemented using lexicographical ordering)
3. A label will now be duplicated in the other polytope, drop it in a
similar way.
4. Repeat steps 2 and 3 until have Nash Equilibrium.
Parameters
----------
A : array
The row player payoff matrix
B : array
The column player payoff matrix
initial_dropped_label: int
The initial dropped label.
Returns
-------
Tuple
An equilibria
"""
if np.min(A) <= 0:
A = A + abs(np.min(A)) + 1
if np.min(B) <= 0:
B = B + abs(np.min(B)) + 1
# build tableaux
col_tableau = make_tableau(A)
col_tableau = shift_tableau(col_tableau, A.shape)
row_tableau = make_tableau(B.transpose())
full_labels = set(range(sum(A.shape)))
# slack variables
row_slack_variables = range(B.shape[0], sum(B.shape))
col_slack_variables = range(A.shape[0])
# non-basic variables
row_non_basic_variables = full_labels - set(row_slack_variables)
col_non_basic_variables = full_labels - set(col_slack_variables)
# print(initial_dropped_label)
if initial_dropped_label in row_non_basic_variables:
tableaux = cycle(
(
(row_tableau, row_slack_variables, row_non_basic_variables),
(col_tableau, col_slack_variables, col_non_basic_variables),
)
)
else:
tableaux = cycle(
(
(col_tableau, col_slack_variables, col_non_basic_variables),
(row_tableau, row_slack_variables, row_non_basic_variables),
)
)
# First pivot (to drop a label)
next_tableau, next_slack_variables, next_non_basic_variables = next(tableaux)
entering_label = pivot_tableau_lex(
next_tableau,
initial_dropped_label,
next_slack_variables,
next_non_basic_variables,
)
# keeps track of each tableau's non-basic variables
next_non_basic_variables.add(entering_label)
next_non_basic_variables.remove(initial_dropped_label)
while col_non_basic_variables.union(row_non_basic_variables) != full_labels:
next_tableau, next_slack_variables, next_non_basic_variables = next(tableaux)
# the first label is 'entering' in the sense that it will enter the next
# tableau's set of basic variables
just_entered_label = entering_label
entering_label = pivot_tableau_lex(
next_tableau,
entering_label,
next_slack_variables,
next_non_basic_variables,
)
next_non_basic_variables.add(entering_label)
next_non_basic_variables.remove(just_entered_label)
row_strategy = tableau_to_strategy(
row_tableau,
full_labels - row_non_basic_variables,
range(A.shape[0]),
)
col_strategy = tableau_to_strategy(
col_tableau,
full_labels - col_non_basic_variables,
range(A.shape[0], sum(A.shape)),
)
return row_strategy, col_strategy
| {
"repo_name": "drvinceknight/Nashpy",
"path": "src/nashpy/algorithms/lemke_howson_lex.py",
"copies": "1",
"size": "3808",
"license": "mit",
"hash": 902916041433906600,
"line_mean": 31.547008547,
"line_max": 85,
"alpha_frac": 0.6402310924,
"autogenerated": false,
"ratio": 3.6232159847764036,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4763447077176403,
"avg_score": null,
"num_lines": null
} |
"""A class for the vertex enumeration algorithm"""
import numpy as np
from nashpy.polytope import build_halfspaces, non_trivial_vertices
def vertex_enumeration(A, B):
"""
Obtain the Nash equilibria using enumeration of the vertices of the best
response polytopes.
Algorithm implemented here is Algorithm 3.5 of [Nisan2007]_
1. Build best responses polytopes of both players
2. For each vertex pair of both polytopes
3. Check if pair is fully labelled
4. Return the normalised pair
Parameters
----------
A : array
The row player utility matrix.
B : array
The column player utility matrix
Yields
-------
tuple
The equilibria.
"""
if np.min(A) < 0:
A = A + abs(np.min(A))
if np.min(B) < 0:
B = B + abs(np.min(B))
number_of_row_strategies, row_dimension = A.shape
max_label = number_of_row_strategies + row_dimension
full_labels = set(range(max_label))
row_halfspaces = build_halfspaces(B.transpose())
col_halfspaces = build_halfspaces(A)
for row_v, row_l in non_trivial_vertices(row_halfspaces):
adjusted_row_l = set(
(label + number_of_row_strategies) % (max_label) for label in row_l
)
for col_v, col_l in non_trivial_vertices(col_halfspaces):
if adjusted_row_l.union(col_l) == full_labels:
yield row_v / sum(row_v), col_v / sum(col_v)
| {
"repo_name": "drvinceknight/Nashpy",
"path": "src/nashpy/algorithms/vertex_enumeration.py",
"copies": "1",
"size": "1448",
"license": "mit",
"hash": 2097955600661822500,
"line_mean": 26.8461538462,
"line_max": 79,
"alpha_frac": 0.6256906077,
"autogenerated": false,
"ratio": 3.497584541062802,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4623275148762802,
"avg_score": null,
"num_lines": null
} |
## A class for working with trajectories (basically animation paths)
## J Eisenmann
## ACCAD
## 2012-13
from IterativeDynamicTimeWarping import *
from Vector import *
from Plane import *
from Cylinder import *
import maya.cmds as mc
class Trajectory:
""" A class to hold spatio-temporal path information """
def __init__( self, name="" ):
self.name = name
self.points = {}
self.searchList = {} # the dict of trajectories to check for a match (indexed by joint name)
self.dtws = []
self.closest = -1
self.closestJoint = None # the closest motion path in the searchList (measured by DTW distance)
self.normal = None # the selected timespan
self.planePt = None
def Clear( self ):
""" removes all points from the trajectory """
self.points.clear()
del(self.dtws[:])
self.closest = -1
self.closestJoint = None
self.timespan = None
self.normal = None
self.planePt = None
def AddPoint( self, p, t=None ):
if not type(p) == Vector:
try:
p = Vector(p)
except:
print "Error: input point p must be a Vector"
return
if t:
self.points[t] = p
else:
self.points[ len(self.points.keys()) ] = p # if no time is provided, just use and index
self.UpdateDTWs()
def SetSearchList( self, trajectories ):
""" Sets the dict of trajectories to check for a match """
self.searchList = trajectories
def SetUpDTWs( self ):
""" Initializes the DTWs """
del self.dtws[:]
do_subsequence = True
selfData = [ [self.points[t].x, self.points[t].y, self.points[t].z] for t in sorted(self.points.keys()) ]
for joint in sorted(self.searchList.keys()):
jointMotionPath = self.searchList[joint]
currentCam = mc.lookThru(q=True)
camPos = Vector(mc.xform(currentCam,q=True,t=True))
otherData = [ Plane(self.normal,self.planePt).intersectWithRay( camPos, jointMotionPath.points[t] ).asList() for t in sorted(jointMotionPath.points.keys()) ]
self.dtws.append( DTW( selfData, otherData, do_subsequence ) )
def UpdateDTWs( self ):
""" Augments the cost matrices of the DTWs and re-solves for the optimal paths """
minCost = None
minP = None
minC = float("inf")
for i, (joint, dtw) in enumerate( zip( sorted(self.searchList.keys()), self.dtws ) ):
if dtw.P and dtw.minCost and dtw.D: # if not first time, get an updated optimal cost and path
selfData = [ [self.points[t].x, self.points[t].y, self.points[t].z] for t in sorted(self.points.keys()) ]
P,C,M = dtw.UpdateX( selfData )
else: # if first time, fresh start
P,C,M = dtw.DTW()
if C < minC:
minCost = M
minP = P
minC = C
self.closestJoint = joint
self.closest = i
start = 0
for s,step in enumerate(minP):
if step[0] > 0:
start = sorted(self.searchList[joint].points.keys())[s-1]
break
stop = sorted(self.searchList[joint].points.keys())[minP[-1][1]]
if stop-start > 1:
self.timespan = [ start, stop ]
def Center( self ):
""" Returns the center of this trajectory """
psum = Vector()
for p in self.points.values():
psum = psum + p
return psum/len(self.points.values())
def ClosestTimeTo( self, point, plane=None ):
""" Returns the closest point in this trajectory to the given point in 3d space (or 2d if plane is defined) """
if not type(point) == Vector:
point = Vector(point)
if plane:
point = point.projectToPlane(plane.normal,planePt=plane.point)
minDist = float("inf")
ft = None
camPos = Vector( mc.xform(mc.lookThru(q=True), q=True, t=True) )
for i,t in enumerate(sorted(self.points.keys())):
p = self.points[t]
if plane:
p = p.projectToPlane(plane.normal,planePt=plane.point)
dist = (point-p).mag()
else:
ray = (p-camPos).norm() # build a ray from the camera to the path point in question
dist = ray.cross(point-p).mag()
if minDist > dist:
minDist = dist
ft = t
return ft # return the key of the closest point in the points dictionary
def ClosestPointTo( self, point, plane=None ):
""" Returns the key of the closest point in this trajectory to the given point in 3d space (or 2d if plane is defined) """
return self.points[self.ClosestTimeTo(point,plane=plane)]
def DistanceTo( self, point, plane=None ):
""" Returns the distance between the closest point in this trajectory and a point in 3d space (or 2d if plane is defined) """
if plane:
point = point.projectToPlane(plane.normal,planePt=plane.point)
return ( point - self.ClosestPointTo(point, plane=plane) ).mag()
def __repr__( self ):
""" So we can print this object """
string = ""
for t in sorted( self.points.keys() ):
string += "%.2f:\t%s\n"%(t, self.points[t])
return string
| {
"repo_name": "jeisenma/traceSelectionInMaya",
"path": "scripts/Trajectory.py",
"copies": "1",
"size": "5614",
"license": "mit",
"hash": 5848402642529200000,
"line_mean": 40.5851851852,
"line_max": 169,
"alpha_frac": 0.5545065907,
"autogenerated": false,
"ratio": 3.879751209398756,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4934257800098756,
"avg_score": null,
"num_lines": null
} |
"""A class for working with vector representations."""
import json
import logging
import os
from io import open
import numpy as np
from tqdm import tqdm
logger = logging.getLogger(__name__)
class Reach(object):
"""
Work with vector representations of items.
Supports functions for calculating fast batched similarity
between items or composite representations of items.
Parameters
----------
vectors : numpy array
The vector space.
items : list
A list of items. Length must be equal to the number of vectors, and
aligned with the vectors.
name : string, optional, default ''
A string giving the name of the current reach. Only useful if you
have multiple spaces and want to keep track of them.
unk_index : int or None, optional, default None
The index of the UNK item. If this is None, any attempts at vectorizing
OOV items will throw an error.
Attributes
----------
items : dict
A mapping from items to ids.
indices : dict
A mapping from ids to items.
vectors : numpy array
The array representing the vector space.
unk_index : int
The integer index of your unknown glyph. This glyph will be inserted
into your BoW space whenever an unknown item is encountered.
norm_vectors : numpy array
A normalized version of the vector space.
size : int
The dimensionality of the vector space.
name : string
The name of the Reach instance.
"""
def __init__(self, vectors, items, name="", unk_index=None):
"""Initialize a Reach instance with an array and list of items."""
if len(items) != len(vectors):
raise ValueError(
"Your vector space and list of items are not "
"the same length: "
f"{len(vectors)} != {len(items)}"
)
if isinstance(items, (dict, set)):
raise ValueError(
"Your item list is a set or dict, and might not "
"retain order in the conversion to internal look"
"-ups. Please convert it to list and check the "
"order."
)
self.items = {w: idx for idx, w in enumerate(items)}
self.indices = {v: k for k, v in self.items.items()}
self.vectors = np.asarray(vectors)
self.unk_index = unk_index
self.name = name
@property
def size(self):
return self.vectors.shape[1]
@property
def vectors(self):
return self._vectors
@vectors.setter
def vectors(self, x):
x = np.array(x)
assert np.ndim(x) == 2 and x.shape[0] == len(self.items)
self._vectors = x
# Make sure norm vectors is updated.
if hasattr(self, "_norm_vectors"):
self._norm_vectors = self.normalize(x)
@property
def norm_vectors(self):
if not hasattr(self, "_norm_vectors"):
self._norm_vectors = self.normalize(self.vectors)
return self._norm_vectors
@staticmethod
def load(
pathtovector,
wordlist=(),
num_to_load=None,
truncate_embeddings=None,
unk_word=None,
sep=" ",
recover_from_errors=False,
desired_dtype="float32",
**kwargs,
):
r"""
Read a file in word2vec .txt format.
The load function will raise a ValueError when trying to load items
which do not conform to line lengths.
Parameters
----------
pathtovector : string
The path to the vector file.
header : bool
Whether the vector file has a header of the type
(NUMBER OF ITEMS, SIZE OF VECTOR).
wordlist : iterable, optional, default ()
A list of words you want loaded from the vector file. If this is
None (default), all words will be loaded.
num_to_load : int, optional, default None
The number of items to load from the file. Because loading can take
some time, it is sometimes useful to onlyl load the first n items
from a vector file for quick inspection.
truncate_embeddings : int, optional, default None
If this value is not None, the vectors in the vector space will
be truncated to the number of dimensions indicated by this value.
unk_word : object
The object to treat as UNK in your vector space. If this is not
in your items dictionary after loading, we add it with a zero
vector.
recover_from_errors : bool
If this flag is True, the model will continue after encountering
duplicates or other errors.
Returns
-------
r : Reach
An initialized Reach instance.
"""
vectors, items = Reach._load(
pathtovector,
wordlist,
num_to_load,
truncate_embeddings,
sep,
recover_from_errors,
)
if unk_word is not None:
if unk_word not in set(items):
unk_vec = np.zeros((1, vectors.shape[1]))
vectors = np.concatenate([unk_vec, vectors], 0)
items = [unk_word] + items
unk_index = 0
else:
unk_index = items.index(unk_word)
else:
unk_index = None
return Reach(
vectors.astype(desired_dtype),
items,
name=os.path.split(pathtovector)[-1],
unk_index=unk_index,
)
@staticmethod
def _load(
pathtovector,
wordlist,
num_to_load,
truncate_embeddings,
sep,
recover_from_errors,
):
"""Load a matrix and wordlist from a .vec file."""
vectors = []
addedwords = set()
words = []
try:
wordlist = set(wordlist)
except ValueError:
wordlist = set()
logger.info(f"Loading {pathtovector}")
firstline = open(pathtovector).readline().strip()
try:
num, size = firstline.split(sep)
num, size = int(num), int(size)
logger.info(f"Vector space: {num} by {size}")
header = True
except ValueError:
size = len(firstline.split(sep)) - 1
logger.info(f"Vector space: {size} dim, # items unknown")
word, rest = firstline.split(sep, 1)
# If the first line is correctly parseable, set header to False.
header = False
if truncate_embeddings is None or truncate_embeddings == 0:
truncate_embeddings = size
for idx, line in enumerate(open(pathtovector, encoding="utf-8")):
if header and idx == 0:
continue
word, rest = line.rstrip(" \n").split(sep, 1)
if wordlist and word not in wordlist:
continue
if word in addedwords:
e = f"Duplicate: {word} on line {idx+1} was in the vector space twice"
if recover_from_errors:
print(e)
continue
raise ValueError(e)
if len(rest.split(sep)) != size:
e = (
f"Incorrect input at index {idx+1}, size"
f"is {len(rest.split())}, expected {size}."
)
if recover_from_errors:
print(e)
continue
raise ValueError(e)
words.append(word)
addedwords.add(word)
vectors.append(np.fromstring(rest, sep=sep)[:truncate_embeddings])
if num_to_load is not None and len(addedwords) >= num_to_load:
break
vectors = np.array(vectors).astype(np.float32)
logger.info("Loading finished")
if wordlist:
diff = wordlist - addedwords
if diff:
logger.info(
"Not all items from your wordlist were in your "
f"vector space: {diff}."
)
return vectors, words
def __getitem__(self, item):
"""Get the vector for a single item."""
return self.vectors[self.items[item]]
def vectorize(self, tokens, remove_oov=False, norm=False):
"""
Vectorize a sentence by replacing all items with their vectors.
Parameters
----------
tokens : object or list of objects
The tokens to vectorize.
remove_oov : bool, optional, default False
Whether to remove OOV items. If False, OOV items are replaced by
the UNK glyph. If this is True, the returned sequence might
have a different length than the original sequence.
norm : bool, optional, default False
Whether to return the unit vectors, or the regular vectors.
Returns
-------
s : numpy array
An M * N matrix, where every item has been replaced by
its vector. OOV items are either removed, or replaced
by the value of the UNK glyph.
"""
if not tokens:
raise ValueError("You supplied an empty list.")
index = self.bow(tokens, remove_oov=remove_oov)
if not index:
raise ValueError(
f"You supplied a list with only OOV tokens: {tokens}, "
"which then got removed. Set remove_oov to False,"
" or filter your sentences to remove any in which"
" all items are OOV."
)
index = np.asarray(index)
if norm:
return self.norm_vectors[index]
else:
return self.vectors[index]
def mean_vector(self, tokens, remove_oov=False, norm=False):
"""
Get the mean vector of a sentence.
Parameters
----------
tokens : object or list of objects
The tokens to take the mean of.
remove_oov : bool, optional, default False
Whether to remove OOV items. If False, OOV items are replaced by
the UNK glyph.
norm : bool, optional, default False
Whether to take the mean of the unit vectors, or the regular vectors.
Returns
-------
mean : numpy array
A vector with M dimensions, where M is the size of the vector space.
"""
return self.vectorize(tokens, remove_oov, norm).mean(0)
def bow(self, tokens, remove_oov=False):
"""
Create a bow representation of a list of tokens.
Parameters
----------
tokens : list.
The list of items to change into a bag of words representation.
remove_oov : bool.
Whether to remove OOV items from the input.
If this is True, the length of the returned BOW representation
might not be the length of the original representation.
Returns
-------
bow : generator
A BOW representation of the list of items.
"""
out = []
for t in tokens:
try:
out.append(self.items[t])
except KeyError:
if remove_oov:
continue
if self.unk_index is None:
raise ValueError(
"You supplied OOV items but didn't "
"provide the index of the replacement "
"glyph. Either set remove_oov to True, "
"or set unk_index to the index of the "
"item which replaces any OOV items."
)
out.append(self.unk_index)
return out
def transform(self, corpus, remove_oov=False, norm=False):
"""
Transform a corpus by repeated calls to vectorize, defined above.
Parameters
----------
corpus : A list of list of strings.
Represents a corpus as a list of sentences, where a sentence
is a list of tokens.
remove_oov : bool, optional, default False
If True, removes OOV items from the input before vectorization.
norm : bool, optional, default False
If True, this will return normalized vectors.
Returns
-------
c : list
A list of numpy arrays, where each array represents the transformed
sentence in the original list. The list is guaranteed to be the
same length as the input list, but the arrays in the list may be
of different lengths, depending on whether remove_oov is True.
"""
return [self.vectorize(s, remove_oov=remove_oov, norm=norm) for s in corpus]
def most_similar(
self, items, num=10, batch_size=100, show_progressbar=False, return_names=True
):
"""
Return the num most similar items to a given list of items.
Parameters
----------
items : list of objects or a single object.
The items to get the most similar items to.
num : int, optional, default 10
The number of most similar items to retrieve.
batch_size : int, optional, default 100.
The batch size to use. 100 is a good default option. Increasing
the batch size may increase the speed.
show_progressbar : bool, optional, default False
Whether to show a progressbar.
return_names : bool, optional, default True
Whether to return the item names, or just the distances.
Returns
-------
sim : array
For each items in the input the num most similar items are returned
in the form of (NAME, DISTANCE) tuples. If return_names is false,
the returned list just contains distances.
"""
# This line allows users to input single items.
# We used to rely on string identities, but we now also allow
# anything hashable as keys.
# Might fail if a list of passed items is also in the vocabulary.
# but I can't think of cases when this would happen, and what
# user expectations are.
try:
if items in self.items:
items = [items]
except TypeError:
pass
x = np.stack([self.norm_vectors[self.items[x]] for x in items])
result = self._batch(x, batch_size, num + 1, show_progressbar, return_names)
# list call consumes the generator.
return [x[1:] for x in result]
def threshold(
self,
items,
threshold=0.5,
batch_size=100,
show_progressbar=False,
return_names=True,
):
"""
Return all items whose similarity is higher than threshold.
Parameters
----------
items : list of objects or a single object.
The items to get the most similar items to.
threshold : float, optional, default .5
The radius within which to retrieve items.
batch_size : int, optional, default 100.
The batch size to use. 100 is a good default option. Increasing
the batch size may increase the speed.
show_progressbar : bool, optional, default False
Whether to show a progressbar.
return_names : bool, optional, default True
Whether to return the item names, or just the distances.
Returns
-------
sim : array
For each items in the input the num most similar items are returned
in the form of (NAME, DISTANCE) tuples. If return_names is false,
the returned list just contains distances.
"""
# This line allows users to input single items.
# We used to rely on string identities, but we now also allow
# anything hashable as keys.
# Might fail if a list of passed items is also in the vocabulary.
# but I can't think of cases when this would happen, and what
# user expectations are.
try:
if items in self.items:
items = [items]
except TypeError:
pass
x = np.stack([self.norm_vectors[self.items[x]] for x in items])
result = self._threshold_batch(
x, batch_size, threshold, show_progressbar, return_names
)
# list call consumes the generator.
return [x[1:] for x in result]
def nearest_neighbor(
self, vectors, num=10, batch_size=100, show_progressbar=False, return_names=True
):
"""
Find the nearest neighbors to some arbitrary vector.
This function is meant to be used in composition operations. The
most_similar function can only handle items that are in vocab, and
looks up their vector through a dictionary. Compositions, e.g.
"King - man + woman" are necessarily not in the vocabulary.
Parameters
----------
vectors : list of arrays or numpy array
The vectors to find the nearest neighbors to.
num : int, optional, default 10
The number of most similar items to retrieve.
batch_size : int, optional, default 100.
The batch size to use. 100 is a good default option. Increasing
the batch size may increase speed.
show_progressbar : bool, optional, default False
Whether to show a progressbar.
return_names : bool, optional, default True
Whether to return the item names, or just the distances.
Returns
-------
sim : list of tuples.
For each item in the input the num most similar items are returned
in the form of (NAME, DISTANCE) tuples. If return_names is set to
false, only the distances are returned.
"""
vectors = np.array(vectors)
if np.ndim(vectors) == 1:
vectors = vectors[None, :]
return list(
self._batch(vectors, batch_size, num, show_progressbar, return_names)
)
def nearest_neighbor_threshold(
self,
vectors,
threshold=0.5,
batch_size=100,
show_progressbar=False,
return_names=True,
):
"""
Find the nearest neighbors to some arbitrary vector.
This function is meant to be used in composition operations. The
most_similar function can only handle items that are in vocab, and
looks up their vector through a dictionary. Compositions, e.g.
"King - man + woman" are necessarily not in the vocabulary.
Parameters
----------
vectors : list of arrays or numpy array
The vectors to find the nearest neighbors to.
threshold : float, optional, default .5
The threshold within to retrieve items.
batch_size : int, optional, default 100.
The batch size to use. 100 is a good default option. Increasing
the batch size may increase speed.
show_progressbar : bool, optional, default False
Whether to show a progressbar.
return_names : bool, optional, default True
Whether to return the item names, or just the distances.
Returns
-------
sim : list of tuples.
For each item in the input the num most similar items are returned
in the form of (NAME, DISTANCE) tuples. If return_names is set to
false, only the distances are returned.
"""
vectors = np.array(vectors)
if np.ndim(vectors) == 1:
vectors = vectors[None, :]
return list(
self._threshold_batch(
vectors, batch_size, threshold, show_progressbar, return_names
)
)
def _threshold_batch(
self, vectors, batch_size, threshold, show_progressbar, return_names
):
"""Batched cosine distance."""
for i in tqdm(range(0, len(vectors), batch_size), disable=not show_progressbar):
batch = vectors[i : i + batch_size]
similarities = self._sim(batch, self.norm_vectors)
for lidx, sims in enumerate(similarities):
indices = np.flatnonzero(sims >= threshold)
sorted_indices = indices[np.argsort(-sims[indices])]
if return_names:
yield [(self.indices[d], sims[d]) for d in sorted_indices]
else:
yield list(sims[sorted_indices])
def _batch(self, vectors, batch_size, num, show_progressbar, return_names):
"""Batched cosine distance."""
if num < 1:
raise ValueError("num should be >= 1, is now {num}")
for i in tqdm(range(0, len(vectors), batch_size), disable=not show_progressbar):
batch = vectors[i : i + batch_size]
similarities = self._sim(batch, self.norm_vectors)
if num == 1:
sorted_indices = np.argmax(similarities, 1)[:, None]
else:
sorted_indices = np.argpartition(-similarities, kth=num, axis=1)
sorted_indices = sorted_indices[:, :num]
for lidx, indices in enumerate(sorted_indices):
sims = similarities[lidx, indices]
if return_names:
dindex = np.argsort(-sims)
yield [(self.indices[indices[d]], sims[d]) for d in dindex]
else:
yield list(-1 * np.sort(-sims))
@staticmethod
def normalize(vectors):
"""
Normalize a matrix of row vectors to unit length.
Contains a shortcut if there are no zero vectors in the matrix.
If there are zero vectors, we do some indexing tricks to avoid
dividing by 0.
Parameters
----------
vectors : np.array
The vectors to normalize.
Returns
-------
vectors : np.array
The input vectors, normalized to unit length.
"""
vectors = np.copy(vectors)
if np.ndim(vectors) == 1:
norm = np.linalg.norm(vectors)
if norm == 0:
return np.zeros_like(vectors)
return vectors / norm
norm = np.linalg.norm(vectors, axis=1)
if np.any(norm == 0):
nonzero = norm > 0
result = np.zeros_like(vectors)
n = norm[nonzero]
p = vectors[nonzero]
result[nonzero] = p / n[:, None]
return result
else:
return vectors / norm[:, None]
def vector_similarity(self, vector, items):
"""Compute the similarity between a vector and a set of items."""
items_vec = np.stack([self.norm_vectors[self.items[x]] for x in items])
return self._sim(vector, items_vec)
def _sim(self, x, y):
"""Distance function."""
sim = self.normalize(x).dot(y.T)
return np.clip(sim, a_min=0.0, a_max=1.0)
def similarity(self, i1, i2):
"""
Compute the similarity between two sets of items.
Parameters
----------
i1 : object
The first set of items.
i2 : object
The second set of item.
Returns
-------
sim : array of floats
An array of similarity scores between 1 and 0.
"""
try:
if i1 in self.items:
i1 = [i1]
except TypeError:
pass
try:
if i2 in self.items:
i2 = [i2]
except TypeError:
pass
i1_vec = np.stack([self.norm_vectors[self.items[x]] for x in i1])
i2_vec = np.stack([self.norm_vectors[self.items[x]] for x in i2])
return self._sim(i1_vec, i2_vec)
def intersect(self, wordlist):
"""
Intersect a reach instance with a wordlist.
Parameters
----------
wordlist : list of str
A list of words to keep. Note that this wordlist need not include
all words in the Reach instance. Any words which are in the
wordlist, but not in the reach instance are ignored.
"""
# Remove duplicates and oov words.
wordlist = set(self.items) & set(wordlist)
# Get indices of intersection.
indices = np.sort([self.items[x] for x in wordlist])
# Set unk_index to None if it is None or if it is not in indices
unk_index = self.unk_index if self.unk_index in indices else None
# Index vectors
vectors = self.vectors[indices]
# Index words
wordlist = [self.indices[x] for x in indices]
return Reach(vectors, wordlist, unk_index=unk_index)
def union(self, other, check=True):
"""
Union a reach with another reach.
If items are in both reach instances, the current instance gets precedence.
Parameters
----------
other : Reach
Another Reach instance.
check : bool
Whether to check if duplicates are the same vector.
"""
if self.size != other.size:
raise ValueError(
f"The size of the embedding spaces was not the same: {self.size} and {other.size}"
)
union = sorted(set(self.items) | set(other.items))
if check:
intersection = set(self.items) & set(other.items)
for x in intersection:
if not np.allclose(self[x], other[x]):
raise ValueError(f"Term {x} was not the same in both instances")
vectors = []
for x in union:
try:
vectors.append(self[x])
except KeyError:
vectors.append(other[x])
return Reach(np.stack(vectors), union)
def save(self, path, write_header=True):
"""
Save the current vector space in word2vec format.
Parameters
----------
path : str
The path to save the vector file to.
write_header : bool, optional, default True
Whether to write a word2vec-style header as the first line of the
file
"""
with open(path, "w") as f:
if write_header:
f.write(f"{self.vectors.shape[0]} {self.vectors.shape[1]}\n")
for i in range(len(self.items)):
w = self.indices[i]
vec = self.vectors[i]
vec_string = " ".join([str(x) for x in vec])
f.write(f"{w} {vec_string}\n")
def save_fast_format(self, filename):
"""
Save a reach instance in a fast format.
The reach fast format stores the words and vectors of a Reach instance
separately in a JSON and numpy format, respectively.
Parameters
----------
filename : str
The prefix to add to the saved filename. Note that this is not the
real filename under which these items are stored.
The words and unk_index are stored under "{filename}_words.json",
and the numpy matrix is saved under "{filename}_vectors.npy".
"""
items, _ = zip(*sorted(self.items.items(), key=lambda x: x[1]))
items = {"items": items, "unk_index": self.unk_index, "name": self.name}
json.dump(items, open(f"{filename}_items.json", "w"))
np.save(open(f"{filename}_vectors.npy", "wb"), self.vectors)
@staticmethod
def load_fast_format(filename, desired_dtype="float32"):
"""
Load a reach instance in fast format.
As described above, the fast format stores the words and vectors of the
Reach instance separately, and is drastically faster than loading from
.txt files.
Parameters
----------
filename : str
The filename prefix from which to load. Note that this is not a
real filepath as such, but a shared prefix for both files.
In order for this to work, both {filename}_words.json and
{filename}_vectors.npy should be present.
"""
words, unk_index, name, vectors = Reach._load_fast(filename)
vectors = vectors.astype(desired_dtype)
return Reach(vectors, words, unk_index=unk_index, name=name)
@staticmethod
def _load_fast(filename):
"""Sub for fast loader."""
it = json.load(open(f"{filename}_items.json"))
words, unk_index, name = it["items"], it["unk_index"], it["name"]
vectors = np.load(open(f"{filename}_vectors.npy", "rb"))
return words, unk_index, name, vectors
| {
"repo_name": "stephantul/reach",
"path": "reach/reach.py",
"copies": "1",
"size": "28867",
"license": "mit",
"hash": -3763876699457379300,
"line_mean": 33.7795180723,
"line_max": 98,
"alpha_frac": 0.5619565594,
"autogenerated": false,
"ratio": 4.51540747692789,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.557736403632789,
"avg_score": null,
"num_lines": null
} |
"""A class for wrapping a WebSocket."""
import asyncio
import json
import websockets
class WebSocketWrapper(object):
"""A class that wraps a client WebSocket.
Attributes:
ws (websockets.client.WebSocketClientProtocol): The websocket object
representing the WebSocket wrapped by this class. This attribute
will be set to None when a WebSocket connection is not open.
"""
def __init__(self, ws=None):
self.ws = ws
@asyncio.coroutine
def open_ws(self, address):
"""Coroutine to open a WebSocket to the specified address.
This method sets the ``ws`` attribute of this class to an open
WebSocketClientProtocol object. If this class already has a
Args:
address (str): The address to connect to.
"""
if self.ws is not None:
return
else:
self.ws = yield from websockets.connect('ws://' + address)
@asyncio.coroutine
def close_ws(self):
"""Coroutine to close the current WebSocket connection."""
if self.ws is None:
return
else:
yield from self.ws.close()
self.ws = None
@asyncio.coroutine
def send_json(self, json_dict):
"""Send JSON over this class's WebSocket.
Args:
json_dict (dict): A JSON-like dict.
"""
json_payload = json.dumps(json_dict)
yield from self.ws.send(json_payload)
@asyncio.coroutine
def recv_json(self):
"""Receive JSON over this class's WebScoket.
Returns:
dict: A JSON-like dict.
"""
resp = yield from self.ws.recv()
json_resp = json.loads(resp)
return json_resp
| {
"repo_name": "welchbj/spotnet",
"path": "backend/utils/ws_wrapper.py",
"copies": "1",
"size": "1747",
"license": "mit",
"hash": 3144005357220292600,
"line_mean": 24.6911764706,
"line_max": 76,
"alpha_frac": 0.5935890097,
"autogenerated": false,
"ratio": 4.44529262086514,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.553888163056514,
"avg_score": null,
"num_lines": null
} |
""" A class for writing out everything associated with a training run.
E.g. Command line arguments, hyperparameters, input and output sizes, model description, results
"""
import os
import sys
import argparse
import datetime
import pickle
import subprocess
class Experiment(object):
__version__ = "1.0.0"
def __init__(self, exp_name, args, exp_dir='experiments', num_samples=None, input_dim=None, num_actions=None, hparams={}, modules=[]):
""" modules: iterable with python modules that have __name__ and __version__ attributes.
Python and Experiment class versions will always be output.
"""
if not os.path.exists(exp_dir):
os.makedirs(exp_dir)
self.dir_name = os.path.join( exp_dir, exp_name )
if os.path.exists(self.dir_name):
raise IOError(-1,"Experiment directory already exists, won't overwrite ",self.dir_name)
os.makedirs(self.dir_name)
if type(args) == argparse.Namespace:
# argparse
self.args = vars(args)
else:
# docopt
self.args = args
self.filename = os.path.join( self.dir_name, exp_name+".txt" )
self.name = exp_name
self.num_samples = num_samples
self.input_dim = input_dim
self.num_actions = num_actions
self.hparams = hparams
self.modules = modules
self.commit = Experiment.gitCommit()
self.start = datetime.datetime.now()
self.writeBefore()
def _writeOne(self, fileobj, key, value, indent=''):
if value is not None:
fileobj.write( "{}{}: {}\n".format( indent, key, value ) )
def writeBefore(self):
# Write out everything we know before running the experiment, in case of crash, etc.
# Will overwrite any existing file
with open(self.filename,'w') as f:
f.write( "Name: {}\n".format( self.name ) )
f.write( "Start: {}\n".format( self.start ) )
self._writeOne( f, "Num samples", self.num_samples )
self._writeOne( f, "Input dims", self.input_dim )
self._writeOne( f, "Num actions", self.num_actions )
if self.commit is None:
print( "Commit: None" )
self._writeOne( f, "Git Commit", self.commit )
f.write( "Argv: {}\n".format( sys.argv ) )
f.write( "Current dir: {}\n".format( os.getcwd() ) )
f.write( "Command line arguments:\n" )
for key,value in self.args.items():
self._writeOne( f, key, value, indent=" ")
f.write( "Hyperparameters:\n" )
for key,value in self.hparams.items():
self._writeOne( f, key, value, indent=" ")
self._writeVersions(f)
def writeAfter(self, model=None, histories=None, results={}, saveModel=False):
""" Write closing data to the experiment file.
model: Needs to be a Keras model (with a summary method that accepts a print_fn argument)
It also needs to support to_json() and save_weights() methods if saveModel is True.
results: A dictionary of any relevant results
"""
# Write out everything new we know after running the experiment
# Will append to the existing file
with open(self.filename,'a') as f:
finish = datetime.datetime.now()
f.write( "Finish: {}\n".format( finish ) )
f.write( "Elapsed: {}\n".format( finish-self.start ) )
if model is not None:
summ_list = []
model.summary(print_fn=lambda x: summ_list.append(x))
f.write( "Model:\n" )
for summ in summ_list:
f.write( ' {}\n'.format(summ) )
f.write( "Results:\n" )
for key,value in results.items():
f.write( " {}: {}\n".format( key, value ) )
if model is not None and saveModel:
fname = os.path.join( self.dir_name, self.name+"_model.json" )
with open(fname,'w') as f:
f.write(model.to_json())
fname = os.path.join( self.dir_name, self.name+"_weights.h5" )
model.save_weights(fname)
if histories is not None:
try:
his_fname = os.path.join(self.dir_name, "histories.pickle")
with open(his_fname, 'wb') as f:
pickle.dump( histories, f, pickle.HIGHEST_PROTOCOL)
except Exception as ex:
print( "Failed to write history ({}) to {}\n {}".format( type(histories), his_fname, ex ) )
def pythonVersionString(self):
"""Current system python version as string major.minor.micro [(alpha|beta|etc)]"""
vstring = "{0}.{1}.{2}".format(sys.version_info.major, sys.version_info.minor, sys.version_info.micro)
if sys.version_info.releaselevel != "final":
vstring += " ({})".format( sys.version_info.releaselevel )
if sys.version_info.serial != 0:
vstring += " (serial: {})".format( sys.version_info.serial )
return vstring
def _writeVersions(self, fileobj):
fileobj.write( "Module Versions:\n" )
self._writeOne( fileobj, "Python", self.pythonVersionString(), indent=" " )
self._writeOne( fileobj, "Experiment", Experiment.__version__, indent=" " )
for mod in self.modules:
self._writeOne( fileobj, mod.__name__, mod.__version__, indent=" " )
self.modules = []
@staticmethod
def gitCommit():
try:
out = subprocess.run(["git", "rev-parse", "HEAD"], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
if 0 == out.returncode:
return out.stdout.decode('UTF-8').strip()
except Exception as ex:
return None
return None
| {
"repo_name": "Bleyddyn/malpi",
"path": "malpi/train/experiment.py",
"copies": "1",
"size": "5843",
"license": "mit",
"hash": 1762477492135918000,
"line_mean": 42.9323308271,
"line_max": 138,
"alpha_frac": 0.5719664556,
"autogenerated": false,
"ratio": 3.908361204013378,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9878766075930887,
"avg_score": 0.02031231673649813,
"num_lines": 133
} |
""" A class holding game state """
import sumoku.game
import sys
def output_tile(tile, highlight):
""" Print a tile in color """
sys.stdout.write('\x1b[{};{}1m{}\x1b[0m'
.format(31 + tile[1], '47;' if highlight else '',
tile[0]))
class GameState(object):
""" A class holding sumoku game state """
def __init__(self, players, hand_size, key_number):
""" Construct the game state from initial parameters """
self.players = players
self.hand_size = hand_size
self.key_number = key_number
self.tiles = sumoku.game.generate_tiles()
self.hands = [[] for _ in xrange(players)]
self.draw_tiles()
self.scores = [0 for _ in xrange(players)]
self.played_tiles = []
self.pending_tiles = []
self.player = 0
def draw_tiles(self):
""" Draw tiles to fill up hands """
for hand in self.hands:
while len(hand) < self.hand_size and len(self.tiles) > 0:
hand.append(sumoku.game.draw_tile(self.tiles))
hand.sort()
def cur_hand(self):
""" Return the hand of the current player """
return self.hands[self.player]
def flip_tile(self, tile):
""" Flip a tile between 6 and 9 """
hand = self.cur_hand()
if hand[tile][0] == 6:
hand[tile] = (9, hand[tile][1])
elif hand[tile][0] == 9:
hand[tile] = (6, hand[tile][1])
def place_tile(self, tile, col, row):
""" Place a tile on the board """
hand = self.cur_hand()
self.pending_tiles.append((hand[tile][0], hand[tile][1], col, row))
del hand[tile]
def remove_tile(self, col, row):
""" Remove a tile from the board """
tile = sumoku.game.find_tile(col, row, self.pending_tiles)
self.pending_tiles.remove(tile)
self.cur_hand().append((tile[0], tile[1]))
def submit_play(self):
""" Submit a play """
score, complete_line = sumoku.game.score_play(self.pending_tiles,
self.played_tiles,
self.key_number)
self.scores[self.player] = self.scores[self.player] + score
self.played_tiles.extend(self.pending_tiles)
self.pending_tiles = []
if not complete_line or len(self.cur_hand()) == 0:
self.player = (self.player + 1) % self.players
self.draw_tiles()
def game_complete(self):
""" Return True if the game is complete """
for hand in self.hands:
if len(hand) != 0:
return False
return True
def print_game(self):
""" Print the game state """
print ' abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
for row in xrange(sumoku.game.MIN_Y, sumoku.game.MAX_Y + 1):
sys.stdout.write('{:2} '.format(row + 1))
for col in xrange(sumoku.game.MIN_X, sumoku.game.MAX_X + 1):
try:
tile = sumoku.game.find_tile(col, row, self.played_tiles)
output_tile(tile, False)
except sumoku.game.InvalidPlayException:
try:
tile = sumoku.game.find_tile(col, row,
self.pending_tiles)
output_tile(tile, True)
except sumoku.game.InvalidPlayException:
sys.stdout.write('-')
if row == 0:
print ' Key number: {}'.format(self.key_number)
elif row == 1:
print ' Tiles remaining: {}'.format(len(self.tiles))
elif row == 3:
print ' Player Score Hand'
elif row == 4:
print ' 12345678'
elif row - 5 >= 0 and row - 5 < self.players:
player = row - 5
sys.stdout.write(' ')
if player == self.player:
sys.stdout.write('\x1b[47m')
sys.stdout.write('{:6} {:05} '
.format(player + 1, self.scores[player]))
for tile in self.hands[player]:
output_tile(tile, player == self.player)
print
else:
print
| {
"repo_name": "sheepweevil/modulus-master",
"path": "sumoku/gamestate.py",
"copies": "1",
"size": "4409",
"license": "mit",
"hash": -170293391087994560,
"line_mean": 37.3391304348,
"line_max": 77,
"alpha_frac": 0.4992061692,
"autogenerated": false,
"ratio": 3.972072072072072,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9971278241272072,
"avg_score": 0,
"num_lines": 115
} |
"""A class implementation of code found in
https://github.com/hackappcom/iloot"""
from xml.parsers.expat import ExpatError
from urllib.parse import urlparse
import plistlib
import http.client as http
class PlistRequester(object):
"""A class to make requesting plist data from a webserver easier"""
def __init__(self, url, method="GET"):
self.url = url
self.method = method
self.body = None
self.headers = {}
self.data = None
self.headers['User-Agent'] = "iPhone OS 8.1.2 (12B440)"
self.headers['X-MMe-Client-Info'] = "<iPhone7,2> <iPhone OS;8.1.2;12B440> <com.apple.SyncedDefaults/207.2>"
def add_header(self, key, value):
"""Allows adding new values to the headers"""
self.headers[key] = value
def set_authorization(self, auth_value):
self.add_header("Authorization", auth_value)
def plist_as_dict(self):
"""Makes the network request and returns data"""
if self.data:
return self.data
purl = urlparse(self.url)
if purl.scheme == "https":
conn = http.HTTPSConnection(purl.hostname, purl.port)
else:
conn = http.HTTPConnection(purl.hostname, purl.port)
conn.request(self.method, purl.path, self.body, self.headers)
response = conn.getresponse()
data = response.read()
try:
self.data = plistlib.loads(data)
except ExpatError:
self.data = None
if response.status != 200:
print("Request %s returned code %d" % (self.url, response.status))
return None
return self.data
| {
"repo_name": "mattandersen/icloud-recents-deleter",
"path": "libs/plistrequester.py",
"copies": "1",
"size": "1656",
"license": "mit",
"hash": 5336805120631063000,
"line_mean": 30.2452830189,
"line_max": 115,
"alpha_frac": 0.6123188406,
"autogenerated": false,
"ratio": 3.7722095671981775,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9883715135189199,
"avg_score": 0.0001626545217957059,
"num_lines": 53
} |
"""A class implementing an exponentially moving average."""
import progress.decorators
from progress.eta.base import BaseETA
@progress.decorators.inherit_docstrings
class EMAETA(BaseETA):
"""Implements an exponentially moving average algorithm.
Previous progress has an exponentially decreasing influence
on the ETA and more recent progress has a bigger influence
"""
def __init__(self, decay=0.1):
"""Decay controls how fast past values' effect decreases."""
self.decay = decay
self.reset()
def update(self, time, value, max_value):
if self._history:
# Compute the differences between time and values
dt, dv = (float(abs(i - j))
for i, j in zip(self._history, [time, value]))
else:
dt, dv = float(time), float(value)
if dt > 0. and dv > 0.:
self._ema = self.decay * (dv / dt) + (1. - self.decay) * self._ema
else:
self._ema = 0.
if self._ema > 0.:
self._eta = (max_value - value) / self._ema
self._history = [time, value]
def get(self):
return self.format_eta(self.eta) if self.eta else None
def reset(self):
self._eta = None
self._ema = 0.
self._history = []
@property
def eta(self):
return self._eta
@property
def decay(self):
return self._decay
@decay.setter
def decay(self, decay):
if decay < 0.0 or decay > 1.0:
raise ValueError("Decay must be in range [0.0, 1.0]")
self._decay = decay
| {
"repo_name": "MisanthropicBit/progress",
"path": "progress/eta/ema.py",
"copies": "1",
"size": "1607",
"license": "mit",
"hash": -6126272084552591000,
"line_mean": 25.3442622951,
"line_max": 78,
"alpha_frac": 0.5718730554,
"autogenerated": false,
"ratio": 3.835322195704057,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49071952511040573,
"avg_score": null,
"num_lines": null
} |
"""A class needed to reload modules."""
import sys
import imp
import logging
from .tools import PKG_NAME
log = logging.getLogger("ECC")
class ModuleReloader:
"""Reloader for all dependencies."""
MAX_RELOAD_TRIES = 10
@staticmethod
def reload_all(ignore_string='singleton'):
"""Reload all loaded modules."""
prefix = PKG_NAME + '.plugin.'
# reload all twice to make sure all dependencies are satisfied
log.debug(
"Reloading modules that start with '%s' and don't contain '%s'",
prefix, ignore_string)
log.debug("Reload all modules first time")
ModuleReloader.reload_once(prefix, ignore_string)
log.debug("Reload all modules second time")
ModuleReloader.reload_once(prefix, ignore_string)
log.debug("All modules reloaded")
@staticmethod
def reload_once(prefix, ignore_string):
"""Reload all modules once."""
try_counter = 0
try:
for name, module in sys.modules.items():
if name.startswith(prefix) and ignore_string not in name:
log.debug("Reloading module: '%s'", name)
imp.reload(module)
except OSError as e:
if try_counter >= ModuleReloader.MAX_RELOAD_TRIES:
log.fatal("Too many tries to reload and no success. Fail.")
return
try_counter += 1
log.error("Received an error: %s on try %s. Try again.",
e, try_counter)
ModuleReloader.reload_once(prefix, ignore_string)
| {
"repo_name": "niosus/EasyClangComplete",
"path": "plugin/utils/module_reloader.py",
"copies": "1",
"size": "1595",
"license": "mit",
"hash": 9104347241393245000,
"line_mean": 33.6739130435,
"line_max": 76,
"alpha_frac": 0.5968652038,
"autogenerated": false,
"ratio": 4.346049046321526,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5442914250121527,
"avg_score": null,
"num_lines": null
} |
""" A class representation of all possible input types """
import logging
from os import linesep
from xml.sax.saxutils import quoteattr
# Initialize logger for this module
logger = logging.getLogger(__name__)
class Base:
def __init__(self, name, parent, wsdl_type, update_parent=True):
self.__parent = parent
self.__name = name
self.__depth = None
logger.info("Initializing new {} element with name '{}'".format(self.__class__.__name__, self.name))
self.__ref = wsdl_type
# Run the update method here, so we see any type hints that were provided, or any other needed updates
# See wsdl.types.Base.update() for more information
self.ref.update()
self.__setable = True
self.__repeatable = False
self.__empty = True
if isinstance(self.parent, Container) and update_parent:
self.parent.append_child(self)
def __str__(self):
doc = self.ref.bs_element.get("docstring", "")
if doc:
return "{}<!--- {} -->{}".format(self._str_indent, doc, linesep)
return ""
@property
def _str_indent(self) -> str:
if self.depth == 0:
return ""
return " | " * self.depth
@property
def parent(self):
return self.__parent
@property
def is_empty(self):
""" Tracks the state of whether or not the element (and all child elements) is empty. """
return self.__empty
@is_empty.setter
def is_empty(self, state):
# Set the parents to not empty
if self.parent is not None:
self.parent.is_empty = state
self.__empty = state
@property
def inner_xml(self) -> str:
""" Represents the xml of the element, including all children, values, etc. If set, then the value of the
input will be ignored, as well as any child objects defined in the WSDL. Instead, the value of
inner_xml will be used verbatim, in place. """
try:
return self.__inner_xml
except AttributeError:
return None
@inner_xml.setter
def inner_xml(self, xml: str):
self.__inner_xml = xml
@property
def name(self) -> str:
return self.__name
@property
def depth(self) -> int:
if self.__depth is not None:
return self.__depth
else:
# Determine the depth
self.__depth = 0
def inc(_):
self.__depth += 1
return self.__depth
self._map_parents(inc)
return self.__depth
@property
def setable(self) -> bool:
""" Indicates whether the object can contain a value, or just contains other elements. If the object can have
a value, then you can set it with:
InputElement.value = "foo"
"""
return self.__setable
@property
def repeatable(self):
""" Indicates whether the element should only appear (at most) once in the rendered document (single value)
or whether it can accept an iterable value, and will thus be (possibly) repeated in the rendered document """
return self.__repeatable
@property
def ref(self):
return self.__ref
def _map_parents(self, func):
""" Map a function to take action on every parent, recursively, until None is found
Provided function will be passed the current parent """
working_parent = self.parent
result = None
while working_parent is not None:
result = func(working_parent)
working_parent = working_parent.parent
return result
class RenderOptionsMixin:
""" Mixing for applying methods that alter render behavior beyond simple values, etc """
def render_empty(self):
""" Configures the element to be included in the rendered envelope even when empty and min_occurs = 0"""
logger.info("Setting Element {} to be rendered even when empty".format(self.name))
self.ref.min_occurs = "1"
if isinstance(self.parent, RenderOptionsMixin):
self.parent.render_empty()
class AttributableMixin:
""" Mixin supplying attribute retrieval and setting to input elements that support attributes """
def __init__(self):
attrs = list()
attributes = self.ref.attributes
for attr in attributes:
attrs.append(Attribute(attr.name, attr.default))
self.__attrs = tuple(attrs)
@property
def attributes(self) -> tuple:
return self.__attrs
@property
def all_attributes_empty(self) -> bool:
for attr in self.attributes:
if attr.value is not None:
return False
return True
def keys(self):
return tuple([attr.name for attr in self.attributes])
def __getitem__(self, item):
""" retrieves the attribute on the element in bs4-style """
for attr in self.attributes:
if attr.name == item:
return attr
raise AttributeError("{} object has no element attribute {}".format(self.__class__.__name__, item))
class Element(Base, AttributableMixin, RenderOptionsMixin):
""" A base input Element is capable of being assigned a value ('setable') and is not repeatable """
def __init__(self, name, parent, wsdl_type, update_parent=True):
super().__init__(name, parent, wsdl_type, update_parent)
self.__value = None
AttributableMixin.__init__(self)
def __str__(self):
prefix = ""
if self.ref.enums:
prefix = "{}<!--- Enum hints: {} -->{}".format(self._str_indent, self.ref.enums, linesep)
return "{5}{4}{0}<{1} {2}>{3}</{1}>".format(self._str_indent, self.name,
" ".join(str(attr) for attr in self.attributes),
self.value,
prefix, super().__str__())
@classmethod
def from_sibling(cls, sib):
logger.info("Creating new {} Element from sibling, {}".format(sib.__class__.__name__, sib.name))
return cls(sib.name, sib.parent, sib.ref, False)
@property
def value(self) -> str:
""" The current value (defaults to None-type) of the input Element, and the value that will be used in the
request envelope """
return self.__value
@value.setter
def value(self, value):
if value is not None:
# Some very basic python-to-xml type conversion
if value is True:
self.__value = "true"
elif value is False:
self.__value = "false"
else:
self.__value = value
class Container(Base, AttributableMixin, RenderOptionsMixin):
""" Container elements only contain other elements, and possibly attributes. The can not be set themselves, or
repeated more than once. They contain attributes that map to other input Elements. """
def __init__(self, name, parent, wsdl_type, update_parent=True):
super().__init__(name, parent, wsdl_type, update_parent)
AttributableMixin.__init__(self)
self.__setable = False
self.__children = list()
def __setattr__(self, key, value):
""" implementation allows settings child Element values without having to reference the .value attribute
on the Element, but can set the Element inside the parent Container and the .value attribute will be set
"""
if key in self.__dict__:
if isinstance(self.__dict__[key], Element):
self.__dict__[key].value = value
else:
self.__dict__[key] = value
else:
self.__dict__[key] = value
def __str__(self):
return "{5}{4}<{0} {1}>{3}{2}{3}{4}</{0}>".format(self.name, " ".join(str(attr) for attr in self.attributes),
"{}".format(linesep).join(
str(child) for child in self.children),
linesep,
self._str_indent,
super().__str__())
@classmethod
def from_sibling(cls, sib):
logger.info("Creating new {} Element from sibling, {}".format(sib.__class__.__name__, sib.name))
new = cls(sib.name, sib.parent, sib.ref, False)
# Duplicate this process for each child element
for child in sib.children:
logger.debug("Appending child {} to new {}".format(child.name, sib.__class__.__name__))
new.append_child(child.from_sibling(child))
return new
@property
def setable(self):
return self.__setable
@property
def children(self):
return self.__children
def append_child(self, child: Element):
logger.debug("Appending child with name {} to {}".format(child.name, self.name))
name = child.name
if child.name in dir(self):
name = "_"+child.name
setattr(self, name, child)
self.children.append(child)
class Repeatable(Base):
""" Repeatable Elements are like normal elements, except their values are left as iterables, not scalars """
def __init__(self, name, parent, wsdl_type, update_parent=True):
super().__init__(name, parent, wsdl_type, update_parent)
self.__repeatable = True
# We need to initialize the zeroth element in our array to be an Element with the same name
self.__elements = list()
self.append()
def __getitem__(self, item: int) -> Element:
return self.__elements[item]
def __setitem__(self, key, value):
if isinstance(key, int):
self.__elements[key].value = value
else:
raise ValueError("Subscript values for {} object must be integers. Invalid: {}"
.format(self.__class__.__name__, value))
def __str__(self):
return "{4}{1}<!--- Repeatable: {0} --->{3}{2}".format(self.name,
self._str_indent,
"{0}".format(linesep).join(
str(el) for el in self.elements),
linesep,
super().__str__())
def __len__(self):
return len(self.elements)
@property
def elements(self) -> list:
return self.__elements
@property
def repeatable(self):
return self.__repeatable
@classmethod
def from_sibling(cls, sib):
logger.info("Creating new {} Element from sibling, {}".format(sib.__class__.__name__, sib.name), 4)
new = cls(sib.name, sib.parent, sib.ref, False)
return new
def append(self, value=None) -> None:
""" Append a new child to the list, providing an optional value. If value is not provided, then an empty new
element will be created (which could be set using .value later) """
element = Element.from_sibling(self)
logger.debug("Appending new Element to Repeatable {}".format(self.name))
element.value = value
logger.debug("Set new Element {} value to '{}'".format(self.name, value))
self.__elements.append(element)
def extend(self, *args) -> None:
""" Extend the list of elements with new elements based on an iterable of values """
logger.info("Extending new set of values to {}".format(self.name))
for value in args:
logger.debug("Creating new Element with value '{}' in '{}'".format(value, self.name))
element = Element.from_sibling(self)
element.value = value
self.__elements.append(element)
class Collection(Repeatable, Container):
""" Collections hold a list of repeatable Containers
The Collection interface is defined by being repeatable but not setable."""
def __init__(self, name, parent, wsdl_type, update_parent=True):
super().__init__(name, parent, wsdl_type, update_parent)
self.__repeatable = True
self.__collection = {}
def append(self, value=dict()):
""" Append a new child Container to the list of elements for this Collection. Values may be provided as a
dictionary, with keys matching the child element names. If not provided, then an empty container will be
created. """
logger.info("Appending new child Container to '{}'".format(self.name))
container = Container.from_sibling(self)
self.elements.append(container)
def append_child(self, child: Element):
super().append_child(child)
logger.debug("Appending new child {1} to elements in Collection {0}".format(self.name, child.name))
for element in self.elements:
if isinstance(element, Container):
element.append_child(child)
def __getitem__(self, item):
""" Return the indicated index of child elements """
return self.elements[item]
def __str__(self):
return "{1}<!--- Repeatable: {0} --->{2}".format(self.name, self._str_indent, linesep) \
+ "{0}".format(linesep).join(str(el) for el in self.elements)
class Attribute:
""" An individual attribute of an input Element. A further abstraction of the
Attribute object in soapy.wsdl.types """
def __init__(self, name, value):
self.__name = name
if value is not None:
self.__value = quoteattr(value)
else:
self.__value = None
@property
def value(self):
return self.__value
@value.setter
def value(self, value):
self.__value = quoteattr(str(value))
@property
def name(self):
return self.__name
def __str__(self):
return self.name + "=" + str(self.value)
class Factory:
""" Factory creates an input object class structure from the WSDL Type elements that represents the possible
inputs to the provided Message. The Factory.root_element object represents the top-level message, and child
input elements may be retrieved through normal attribute notation, using the . (dot) operator. You can also
print() a Factory class to see a pseudo XML representation of the possible inputs, and their current values.
"""
def __init__(self, root_element):
logger.info("Initializing new Factory instance for root element '{}'".format(root_element))
elements = list()
inputs = list()
logger.info("Building list of all elements for this Part")
Factory._recursive_extract_elements(elements, root_element)
for element in elements:
name = element[0].name
logger.debug("Processing WSDL element {}".format(name))
# Find the parent InputElement to pass to the child if there is a parent
if element[1] is not None:
for input in inputs:
if element[1] is input.ref:
logger.debug("Setting parent element for {} to '{}'".format(name, input.name))
inputs.append(self._select_class(element)(name, input, element[0]))
else:
self.root_element = self._select_class(element)(name, None, element[0])
inputs.append(self.root_element)
self.items = inputs
def __getattr__(self, item):
""" Enable attribute references on Factory object itself to return attributes on root_element instead """
return getattr(self.root_element, item)
def __str__(self):
return str(self.root_element)
def _select_class(self, element):
""" Return the appropriate input class based on criteria
Repeatable = setable and repeatable
Element = setable
Container = neither setable nor repeatable
Collection = repeatable
"""
setable = False
repeatable = False
if element[0].max_occurs == "unbounded" or int(element[0].max_occurs) > 1:
repeatable = True
if len(element[0].element_children) == 0:
setable = True
switch = {
(True, False): Element,
(True, True): Repeatable,
(False, False): Container,
(False, True): Collection
}
logger.info("Creating {} type for input message element {}".format(
switch[setable, repeatable].__name__,
element[0].name))
return switch[setable, repeatable]
@staticmethod
def _recursive_extract_elements(l: list, element, parent=None):
""" Recursively iterates over soapy.wsdl.types Objects and extracts the
TypeElement objects, as they are what actually represent input options """
if element is None:
return
l.append((element, parent))
for child in element.element_children:
if child.bs_element is element.bs_element:
continue
Factory._recursive_extract_elements(l, child, element)
| {
"repo_name": "qmuloadmin/soapy",
"path": "soapy/inputs.py",
"copies": "1",
"size": "17856",
"license": "bsd-3-clause",
"hash": 3534931422227848700,
"line_mean": 37.1578947368,
"line_max": 117,
"alpha_frac": 0.5624439964,
"autogenerated": false,
"ratio": 4.592592592592593,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0013139291659988582,
"num_lines": 456
} |
"""A class representing a microbial or tissue community."""
import re
import six
import six.moves.cPickle as pickle
import cobra
import pandas as pd
from sympy.core.singleton import S
from tqdm import tqdm
from micom.util import load_model, join_models, add_var_from_expression
from micom.logger import logger
from micom.media import default_excludes
from micom.problems import optcom, solve
_taxonomy_cols = ["id", "file"]
class Community(cobra.Model):
"""A community of models.
This class represents a community of individual models. It was designed for
microbial communities but may also be used for multi-tissue or tissue-cell
mixture models as long as all individuals exist within a single enclosing
compartment.
"""
def __init__(self, taxonomy, id=None, name=None, rel_threshold=1e-6,
solver=None, progress=True):
"""Create a new community object.
`micom` builds a community from a taxonomy which may simply be a list
of model files in its simplest form. Usually, the taxonomy will contain
additional information such as annotations for the individuals (for
instance phylum, organims or species) and abundances.
Notes
-----
`micom` will automatically add exchange fluxes and and a community
objective maximizing the overall growth rate of the community.
Parameters
----------
taxonomy : pandas.DataFrame
The taxonomy used for building the model. Must have at least the
two columns "id" and "file" which specify an ID and the filepath
for each model. Valid file extensions are ".pickle", ".xml",
".xml.gz" and ".json". If the taxonomy includes a column named
"abundance" it will be used to quantify each individual in the
community. If absent `micom` will assume all individuals are
present in the same amount.
id : str, optional
The ID for the community. Should only contain letters and numbers,
otherwise it will be formatted as such.
name : str, optional
The name for the community.
rel_threshold : float < 1, optional
The relative abundance threshold that will be used. Describes the
smallest relative amount of an individual that will be considered
non-zero. All individuals with a smaller relative amount will be
omitted.
solver : str, optional
Which solver to use. Will default to cplex if available which is
better suited for large problems.
progress : bool, optional
Show a progress bar.
Attributes
----------
objectives : dict
A dict of {id: sympy_expression} denoting the individual growth
objectives for each model in the community.
"""
super(Community, self).__init__(id, name)
logger.info("building new micom model {}.".format(id))
if not solver:
self.solver = ("cplex" if "cplex" in cobra.util.solver.solvers
else "glpk")
else:
self.solver = solver
if not (isinstance(taxonomy, pd.DataFrame) and
all(col in taxonomy.columns for col in _taxonomy_cols)):
raise ValueError("`taxonomy` must be a pandas DataFrame with at"
"least columns id and file :(")
self._rtol = rel_threshold
self._modification = None
taxonomy = taxonomy.copy()
if "abundance" not in taxonomy.columns:
taxonomy["abundance"] = 1
taxonomy.abundance /= taxonomy.abundance.sum()
logger.info("{} individuals with abundances below threshold".format(
(taxonomy.abundance <= self._rtol).sum()))
taxonomy = taxonomy[taxonomy.abundance > self._rtol]
if taxonomy.id.str.contains(r"[^A-Za-z0-9_]", regex=True).any():
logger.warning("taxonomy IDs contain prohibited characters and"
" will be reformatted")
taxonomy.id = taxonomy.id.replace(
[r"[^A-Za-z0-9_\s]", r"\s+"], ["", "_"], regex=True)
self.__taxonomy = taxonomy
self.__taxonomy.index = self.__taxonomy.id
obj = S.Zero
self.objectives = {}
index = self.__taxonomy.index
index = tqdm(index, unit="models") if progress else index
for idx in index:
row = self.__taxonomy.loc[idx]
if isinstance(row.file, list):
model = join_models(row.file)
if len(row.file) > 1:
logger.info("joined {} models".format(len(row.file)))
else:
model = load_model(row.file)
suffix = "__" + idx.replace(" ", "_").strip()
logger.info("converting IDs for {}".format(idx))
for r in model.reactions:
r.global_id = r.id
r.id += suffix
r.community_id = idx
for m in model.metabolites:
m.global_id = m.id
m.id += suffix
m.compartment += suffix
m.community_id = idx
logger.info("adding reactions for {} to community".format(idx))
self.add_reactions(model.reactions)
o = self.solver.interface.Objective.clone(model.objective,
model=self.solver)
obj += o.expression * row.abundance
self.objectives[idx] = o.expression
species_obj = self.problem.Constraint(
o.expression, name="objective_" + idx, lb=0.0)
self.add_cons_vars([species_obj])
self.__add_exchanges(model.reactions, row)
self.solver.update() # to avoid dangling refs due to lazy add
com_obj = add_var_from_expression(self, "community_objective",
obj, lb=0)
self.objective = self.problem.Objective(com_obj, direction="max")
def __add_exchanges(self, reactions, info, exclude=default_excludes,
external_compartment="e"):
"""Add exchange reactions for a new model."""
for r in reactions:
# Some sanity checks for whether the reaction is an exchange
ex = external_compartment + "__" + r.community_id
if (not r.boundary or any(bad in r.id for bad in exclude) or
ex not in r.compartments):
continue
if not r.id.lower().startswith("ex"):
logger.warning(
"Reaction %s seems to be an exchange " % r.id +
"reaction but its ID does not start with 'EX_'...")
export = len(r.reactants) == 1
lb, ub = r.bounds if export else (-r.upper_bound, -r.lower_bound)
met = (r.reactants + r.products)[0]
medium_id = re.sub("_{}$".format(met.compartment), "", met.id)
if medium_id in exclude:
continue
medium_id += "_m"
if medium_id == met.id:
medium_id += "_medium"
if medium_id not in self.metabolites:
# If metabolite does not exist in medium add it to the model
# and also add an exchange reaction for the medium
logger.info("adding metabolite %s to external medium" %
medium_id)
medium_met = met.copy()
medium_met.id = medium_id
medium_met.compartment = "m"
medium_met.global_id = medium_id
medium_met.community_id = "medium"
ex_medium = cobra.Reaction(
id="EX_" + medium_met.id,
name=medium_met.id + " medium exchange",
lower_bound=lb,
upper_bound=ub)
ex_medium.add_metabolites({medium_met: -1})
ex_medium.global_id = ex_medium.id
ex_medium.community_id = "medium"
self.add_reactions([ex_medium])
else:
logger.info("updating import rate for external metabolite %s" %
medium_id)
medium_met = self.metabolites.get_by_id(medium_id)
ex_medium = self.reactions.get_by_id("EX_" + medium_met.id)
ex_medium.lower_bound = min(lb, ex_medium.lower_bound)
ex_medium.upper_bound = max(ub, ex_medium.upper_bound)
coef = info.abundance
r.add_metabolites({medium_met: coef if export else -coef})
def __update_exchanges(self):
"""Update exchanges."""
logger.info("updating exchange reactions for %s" % self.id)
for met in self.metabolites.query(lambda x: x.compartment == "m"):
for r in met.reactions:
if r.boundary:
continue
coef = self.__taxonomy.loc[r.community_id, "abundance"]
if met in r.products:
r.add_metabolites({met: coef}, combine=False)
else:
r.add_metabolites({met: -coef}, combine=False)
def __update_community_objective(self):
"""Update the community objective."""
logger.info("updating the community objective for %s" % self.id)
v = self.variables.community_objective
const = self.constraints.community_objective_equality
self.remove_cons_vars([const])
com_obj = S.Zero
for sp, expr in self.objectives.items():
ab = self.__taxonomy.loc[sp, "abundance"]
com_obj += ab * expr
const = self.problem.Constraint(v - com_obj, lb=0, ub=0,
name="community_objective_equality")
self.add_cons_vars([const])
def optimize_single(self, id):
"""Optimize growth rate for one individual.
`optimize_single` will calculate the maximal growth rate for one
individual member of the community.
Notes
-----
This might well mean that growth rates for all other individuals are
low since the individual may use up all available resources.
Parameters
----------
id : str
The ID of the individual to be optimized.
fluxes : boolean, optional
Whether to return all fluxes. Defaults to just returning the
maximal growth rate.
Returns
-------
float
The maximal growth rate for the given species.
"""
if isinstance(id, six.string_types):
if id not in self.__taxonomy.index:
raise ValueError(id + " not in taxonomy!")
info = self.__taxonomy.loc[id]
elif isinstance(id, int) and id >= 0 and id < len(self.__taxonomy):
info = self.__taxonomy.iloc[id]
else:
raise ValueError("`id` must be an id or positive index!")
logger.info("optimizing for {}".format(info.name))
obj = self.objectives[info.name]
with self as m:
m.objective = obj
m.solver.optimize()
return m.objective.value
def optimize_all(self, fluxes=False, progress=False):
"""Return solutions for individually optimizing each model.
Notes
-----
This might well mean that growth rates for all other individuals are
low since the individual may use up all available resources. As a
consequence the reported growth rates may usually never be obtained
all at once.
Parameters
----------
fluxes : boolean, optional
Whether to return all fluxes. Defaults to just returning the
maximal growth rate.
progress : boolean, optional
Whether to show a progress bar.
Returns
-------
pandas.Series
The maximal growth rate for each species.
"""
index = self.__taxonomy.index
if progress:
index = tqdm(self.__taxonomy.index, unit="optimizations")
individual = (self.optimize_single(id) for id in index)
return pd.Series(individual, self.__taxonomy.index)
def optimize(self, slim=True):
"""Optimize the model using flux balance analysis.
Parameters
----------
slim : boolean
Whether to return a slim solution which does not contain fluxes,
just growth rates.
Returns
-------
micom.CommunitySolution
The solution after optimization or None if there is no optimum.
"""
self.solver.optimize()
with self:
solution = solve(self, fluxes=not slim)
return solution
@property
def abundances(self):
"""pandas.Series: The normalized abundances.
Setting this attribute will also trigger the appropriate updates in
the exchange fluxes and the community objective.
"""
return self.__taxonomy.abundance
@abundances.setter
def abundances(self, value):
try:
self.__taxonomy.abundance = value
except Exception:
raise ValueError("value must be an iterable with an entry for "
"each species/tissue")
logger.info("setting new abundances for %s" % self.id)
ab = self.__taxonomy.abundance
self.__taxonomy.abundance /= ab.sum()
small = ab < self._rtol
logger.info("adjusting abundances for %s to %g" %
(str(self.__taxonomy.index[small]), self._rtol))
self.__taxonomy.loc[small, "abundance"] = self._rtol
self.__update_exchanges()
self.__update_community_objective()
@property
def taxonomy(self):
"""pandas.DataFrame: The taxonomy used within the model.
This attribute only returns a copy.
"""
return self.__taxonomy.copy()
@property
def modification(self):
"""str: Denotes modifications to the model currently applied.
Will be None if the community is unmodified.
"""
return self._modification
@modification.setter
@cobra.util.context.resettable
def modification(self, mod):
self._modification = mod
@property
def exchanges(self):
"""list: Returns all exchange reactions in the model.
Uses several heuristics based on the reaction name and compartments
to exclude reactions that are *not* exchange reactions.
"""
return self.reactions.query(
lambda x: x.boundary and not
any(ex in x.id for ex in default_excludes) and
"m" in x.compartments)
def optcom(self, strategy="lagrangian", min_growth=0.1, tradeoff=0.5,
fluxes=False, pfba=True):
"""Run OptCom for the community.
OptCom methods are a group of optimization procedures to find community
solutions that provide a tradeoff between the cooperative community
growth and the egoistic growth of each individual [#c1]_. `micom`
provides several strategies that can be used to find optimal solutions:
- "linear": Applies a lower bound for the individual growth rates and
finds the optimal community growth rate. This is the fastest methods
but also ignores that individuals might strive to optimize their
individual growth instead of community growth.
- "lagrangian": Optimizes a joint objective containing the community
objective (maximized) as well as a cooperativity cost which
represents the distance to the individuals "egoistic" maximum growth
rate (minimized). Requires the `tradeoff` parameter. This method is
still relatively fast and does require only few additional variables.
- "linear lagrangian": The same as "lagrangian" only with a linear
representation of the cooperativity cost (absolute value).
- "moma": Minimization of metabolic adjustment. Simultaneously
optimizes the community objective (maximize) and the cooperativity
cost (minimize). This method finds an exact maximum but doubles the
number of required variables, thus being slow.
- "lmoma": The same as "moma" only with a linear
representation of the cooperativity cost (absolute value).
- "original": Solves the multi-objective problem described in [#c1]_.
Here, the community growth rate is maximized simultanously with all
individual growth rates. Note that there are usually many
Pareto-optimal solutions to this problem and the method will only
give one solution. This is also the slowest method.
Parameters
----------
community : micom.Community
The community to optimize.
strategy : str
The strategy used to solve the OptCom formulation. Defaults to
"lagrangian" which gives a decent tradeoff between speed and
correctness.
min_growth : float or array-like
The minimal growth rate required for each individual. May be a
single value or an array-like object with the same length as there
are individuals.
tradeoff : float in [0, 1]
Only used for lagrangian strategies. Must be between 0 and 1 and
describes the strength of the cooperativity cost / egoism. 1 means
optimization will only minimize the cooperativity cost and zero
means optimization will only maximize the community objective.
fluxes : boolean
Whether to return the fluxes as well.
pfba : boolean
Whether to obtain fluxes by parsimonious FBA rather than
"classical" FBA.
Returns
-------
micom.CommunitySolution
The solution of the optimization. If fluxes==False will only
contain the objective value, community growth rate and individual
growth rates.
References
----------
.. [#c1] OptCom: a multi-level optimization framework for the metabolic
modeling and analysis of microbial communities.
Zomorrodi AR, Maranas CD. PLoS Comput Biol. 2012 Feb;8(2):e1002363.
doi: 10.1371/journal.pcbi.1002363, PMID: 22319433
"""
return optcom(self, strategy, min_growth, tradeoff, fluxes, pfba)
def to_pickle(self, filename):
"""Save a community in serialized form.
Parameters
----------
filename : str
Where to save the pickled community.
Returns
-------
Nothing
"""
with open(filename, mode="wb") as out:
pickle.dump(self, out)
def load_pickle(filename):
"""Load a community model from a pickled version.
Parameters
----------
filename : str
The file the community is stored in.
Returns
-------
micom.Community
The loaded community model.
"""
with open(filename, mode="rb") as infile:
return pickle.load(infile)
| {
"repo_name": "cdiener/micom",
"path": "micom/community.py",
"copies": "1",
"size": "19333",
"license": "apache-2.0",
"hash": -1029667247200642300,
"line_mean": 38.944214876,
"line_max": 79,
"alpha_frac": 0.5881135882,
"autogenerated": false,
"ratio": 4.43519155769672,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.552330514589672,
"avg_score": null,
"num_lines": null
} |
"""A class representing a processing job: for one sample and one sample-to-detector distance"""
import time
import traceback
from datetime import datetime
from multiprocessing.queues import Queue
from multiprocessing.synchronize import Lock, Event
from typing import List, Optional, Any
import h5py
import numpy as np
from sastool import ErrorValue
from sastool.classes2 import Curve
from sastool.io.credo_cct import Header, Exposure
from . import outliers
from .backgroundprocedure import BackgroundProcedure, Results, ProcessingError, UserStopException
from .correlmatrix import correlmatrix_cython
from .loader import Loader
from .matrixaverager import MatrixAverager
class ProcessingJobResults(Results):
time_loadheaders: float = 0
time_loadexposures: float = 0
time_outlierdetection: float = 0
time_averaging: float = 0
time_averaging_header: float = 0
time_averaging_exposures: float = 0
time_averaging_curves: float = 0
time_output: float = 0
time_output_waitforlock: float = 0
time_output_write: float = 0
badfsns: List[int] = None
class ProcessingJob(BackgroundProcedure):
"""A separate process for processing (summarizing, azimuthally averaging, finding outliers) of a range
of exposures belonging to the same sample, measured at the same sample-to-detector distance.
"""
_errorpropagationtypes = ['Weighted', 'Average', 'Squared (Gaussian)', 'Conservative']
headers: List[Header] = None
curves: List[Curve] = None
curvesforcmap: List[Curve] = None
fsnsforcmap: List[int] = None
exposures: List[Exposure] = None
_loader: Loader = None
bigmemorymode: bool = False
badfsns: List[int] = None
initialBadfsns: List[int] = None
def __init__(self, jobid: Any, h5writerLock: Lock, killswitch: Event,
resultsqueue: Queue, h5file:str, rootdir: str,
fsnlist: List[int], badfsns: List[int],
ierrorprop: str, qerrorprop: str, outliermethod: str, outliermultiplier: float, logcmat: bool,
qrange: Optional[np.ndarray], bigmemorymode: bool = False):
super().__init__(jobid, h5writerLock, killswitch, resultsqueue, h5file)
self.fsnlist = fsnlist
self._loader = Loader(rootdir)
self.initialBadfsns = badfsns
self.badfsns = list(self.initialBadfsns) # make a copy: we will modify this.
if ierrorprop in self._errorpropagationtypes:
self.ierrorprop = ierrorprop
else:
raise ValueError('Invalid value for ierrorprop')
if qerrorprop in self._errorpropagationtypes:
self.qerrorprop = qerrorprop
else:
raise ValueError('Invalid value for qerrorprop')
if outliermethod in ['Z-score', 'Modified Z-score', 'Interquartile Range']:
self.outliermethod = outliermethod
else:
raise ValueError('Invalid value for outlier method')
self.outliermultiplier = outliermultiplier
self.logcmat = logcmat
self.qrange = qrange
self.bigmemorymode = bigmemorymode
self.result = ProcessingJobResults()
def _loadheaders(self):
# first load all header files
t0 = time.monotonic()
self.headers = []
self.sendProgress('Loading headers {}/{}'.format(0, len(self.fsnlist)),
total=len(self.fsnlist), current=0)
for i, fsn in enumerate(self.fsnlist, start=1):
try:
h = self._loader.loadHeader(fsn)
if h.fsn != fsn:
raise ValueError('FSN in header ({}) is different than in the filename ({}).'.format(h.fsn, fsn))
self.headers.append(h)
self.sendProgress('Loading headers {}/{}'.format(i, len(self.fsnlist)),
total=len(self.fsnlist), current=i)
except FileNotFoundError:
continue
# check if all headers correspond to the same sample and distance
if len({(h.title, float(h.distance)) for h in self.headers}) > 1:
raise ProcessingError('There are more samples/distances!')
self.result.time_loadheaders = time.monotonic() - t0
def _loadexposures(self):
"""Load all exposures, i.e. 2D images. Do radial averaging as well."""
if not self.headers:
return
t0 = time.monotonic()
# now load all exposures
self.exposures = []
self.curvesforcmap = []
self.fsnsforcmap = []
self.curves = []
self.sendProgress('Loading exposures {}/{}'.format(0, len(self.headers)),
total=len(self.headers), current=0)
qrange = self.qrange
for i, h in enumerate(self.headers, start=1):
try:
ex = self._loader.loadExposure(h.fsn)
radavg = ex.radial_average(
qrange=qrange,
errorpropagation=self._errorpropagationtypes.index(self.ierrorprop),
abscissa_errorpropagation=self._errorpropagationtypes.index(self.qerrorprop),
)
if qrange is None:
qrange = radavg.q
if h.fsn not in self.badfsns:
# do not use this curve for correlation matrix analysis
self.curvesforcmap.append(radavg)
self.fsnsforcmap.append(h.fsn)
self.curves.append(radavg)
if self.bigmemorymode:
self.exposures.append(ex)
self.sendProgress('Loading exposures {}/{}'.format(i, len(self.headers)),
total=len(self.headers), current=i)
except FileNotFoundError as fnfe:
raise ProcessingError('Cannot find file: {}'.format(fnfe.args[0]))
self.result.time_loadexposures = time.monotonic() - t0
def _checkforoutliers(self):
t0 = time.monotonic()
self.sendProgress('Testing for outliers...', total=0, current=0)
intensities = np.vstack([c.Intensity for c in self.curvesforcmap]).T
errors = np.vstack([c.Error for c in self.curvesforcmap]).T
cmat = correlmatrix_cython(intensities, errors, self.logcmat)
discrp = np.diagonal(cmat)
# find outliers
if self.outliermethod in ['Interquartile Range', 'Tukey_IQR', 'Tukey', 'IQR']:
bad = outliers.outliers_Tukey_iqr(discrp, self.outliermultiplier)
elif self.outliermethod in ['Z-score']:
bad = outliers.outliers_zscore(discrp, self.outliermultiplier)
elif self.outliermethod in ['Modified Z-score', 'Iglewicz-Hoaglin']:
bad = outliers.outliers_zscore_mod(discrp, self.outliermultiplier)
else:
assert False
self.badfsns.extend([self.fsnsforcmap[i] for i in bad])
self.correlmatrix = cmat
self.result.time_outlierdetection = time.monotonic() - t0
def _summarize(self):
"""Calculate average scattering pattern and curve"""
# first average the headers, however fool this sounds...
t0 = time.monotonic()
self.sendProgress('Collecting header data for averaged image...')
t1 = time.monotonic()
self.averagedHeader = {}
for field in ['title', 'distance', 'distancedecrease', 'pixelsizex', 'pixelsizey', 'wavelength',
'sample_category']:
# ensure that these fields are unique
avg = {getattr(h, field) if not isinstance(getattr(h, field), ErrorValue) else getattr(h, field).val for h
in self.headers if h.fsn not in self.badfsns}
if len(avg) != 1:
raise ValueError(
'Field {} must be unique. Found the following different values: {}'.format(field, ', '.join(avg)))
self.averagedHeader[field] = avg.pop()
for field in ['date', 'startdate']:
self.averagedHeader[field] = min([getattr(h, field) for h in self.headers if h.fsn not in self.badfsns])
for field in ['enddate']:
# take the maximum of these fields
self.averagedHeader[field] = max([getattr(h, field) for h in self.headers if h.fsn not in self.badfsns])
for field in ['exposuretime', ]:
# take the sum of these fields
self.averagedHeader[field] = sum([getattr(h, field) for h in self.headers if h.fsn not in self.badfsns])
for field in ['absintfactor', 'beamcenterx', 'beamcentery', 'flux', 'samplex', 'sampley', 'temperature',
'thickness', 'transmission', 'vacuum']:
# take the weighted average of these fields
try:
values = [getattr(h, field) for h in self.headers if h.fsn not in self.badfsns]
except KeyError:
# can happen for absintfactor.
continue
values = [v for v in values if isinstance(v, float) or isinstance(v, ErrorValue)]
val = np.array([v.val if isinstance(v, ErrorValue) else v for v in values])
err = np.array([v.err if isinstance(v, ErrorValue) else np.nan for v in values])
if np.isfinite(err).sum() == 0:
err = np.ones_like(val)
elif (err > 0).sum() == 0:
err = np.ones_like(val)
else:
minposerr = np.nanmin(err[err > 0])
err[err <= 0] = minposerr
err[~np.isfinite(err)] = minposerr
self.averagedHeader[field] = ErrorValue(
(val / err ** 2).sum() / (1 / err ** 2).sum(),
1 / (1 / err ** 2).sum() ** 0.5
)
for field in ['fsn', 'fsn_absintref', 'fsn_emptybeam', 'maskname', 'project', 'username']:
# take the first value
try:
self.averagedHeader[field] = [getattr(h, field) for h in self.headers if h.fsn not in self.badfsns][0]
except KeyError:
# can happen e.g. for fsn_emptybeam and fsn_absintref
continue
# make a true Header instance
avgheader = Header()
for field in self.averagedHeader:
setattr(avgheader, field, self.averagedHeader[field])
self.result.time_averaging_header = time.monotonic() - t1
# summarize 2D and 1D datasets
t1 = time.monotonic()
self.sendProgress('Averaging exposures...', current=0, total=0)
maskavg = None
avg = MatrixAverager(self.ierrorprop)
for i, header in enumerate(self.headers):
self.sendProgress('Averaging exposures {}/{}...'.format(i, len(self.headers)),
current=i, total=len(self.headers))
if header.fsn in self.badfsns:
continue
if self.exposures:
ex = self.exposures[i]
else:
ex = self._loader.loadExposure(header.fsn)
if maskavg is None:
maskavg = ex.mask.copy()
else:
maskavg = np.logical_and(ex.mask != 0, maskavg != 0)
avg.add(ex.intensity, ex.error)
avgintensity, avgerr = avg.get()
self.averaged2D = Exposure(avgintensity, avgerr, avgheader, maskavg)
self.result.time_averaging_exposures = time.monotonic() - t1
t1 = time.monotonic()
self.sendProgress('Averaging curves...', total=0, current=0)
avgq = MatrixAverager(self.qerrorprop)
avgi = MatrixAverager(self.ierrorprop)
for h, c, i in zip(self.headers, self.curves, range(len(self.headers))):
self.sendProgress('Averaging curves {}/{}...'.format(i, len(self.headers)), total=len(self.headers),
current=i)
if h.fsn in self.badfsns:
continue
avgq.add(c.q, c.qError)
avgi.add(c.Intensity, c.Error)
qavg, qErravg = avgq.get()
Iavg, Erravg = avgi.get()
self.averaged1D = Curve(qavg, Iavg, Erravg, qErravg)
self.reintegrated1D = self.averaged2D.radial_average(
self.qrange, errorpropagation=self._errorpropagationtypes.index(self.ierrorprop),
abscissa_errorpropagation=self._errorpropagationtypes.index(self.qerrorprop)
)
self.result.time_averaging_curves = time.monotonic() - t1
self.result.time_averaging = time.monotonic() - t0
def _output(self):
"""Write results in the .h5 file."""
t0 = time.monotonic()
self.sendProgress('Waiting for HDF5 writer lock...')
with self.h5WriterLock:
t1 = time.monotonic()
self.sendProgress('Writing HDF5 file...')
with h5py.File(self.h5file, mode='a') as h5: # mode=='a': read/write if exists, create otherwise
# h5.swmr_mode = True
# save all masks
masks = h5.require_group('masks')
for name, mask in self._loader.masks.items():
ds = masks.require_dataset(name, mask.shape, mask.dtype, exact=True)
ds = mask
# create Samples/<samplename>/<dist> group hierarchy if not exists
samples = h5.require_group('Samples')
samplegroup = h5['Samples'].require_group(self.headers[0].title)
try:
del samplegroup['{:.2f}'.format(float(self.headers[0].distance))]
except KeyError:
pass
distgroup = samplegroup.create_group('{:.2f}'.format(float(self.headers[0].distance)))
# Set attributes of the <dist> group from the averaged header.
for key, value in self.averagedHeader.items():
if isinstance(value, ErrorValue):
distgroup.attrs[key] = value.val
distgroup.attrs[key + '.err'] = value.err
elif isinstance(value, datetime):
distgroup.attrs[key] = str(value)
else:
distgroup.attrs[key] = value
# save datasets
distgroup.create_dataset('image', data=self.averaged2D.intensity, compression=self.h5compression)
distgroup.create_dataset('image_uncertainty', data=self.averaged2D.error,
compression=self.h5compression)
distgroup.create_dataset('correlmatrix', data=self.correlmatrix, compression=self.h5compression)
distgroup.create_dataset('mask', data=self.averaged2D.mask, compression=self.h5compression)
distgroup['badfsns'] = np.array(self.badfsns)
distgroup.create_dataset('curve_averaged', data=np.vstack(
(self.averaged1D.q, self.averaged1D.Intensity, self.averaged1D.Error, self.averaged1D.qError)).T,
compression=self.h5compression)
distgroup.create_dataset('curve_reintegrated',
data=np.vstack((self.reintegrated1D.q, self.reintegrated1D.Intensity,
self.reintegrated1D.Error, self.reintegrated1D.qError)).T,
compression=self.h5compression)
distgroup['curve'] = h5py.SoftLink('curve_averaged')
# save all curves
try:
del distgroup['curves']
except KeyError:
pass
curvesgroup = distgroup.create_group('curves')
# we will write outlier results per-curve. Do the common calculations beforehand.
diag = np.diagonal(self.correlmatrix)
zscore = outliers.zscore(diag)
zscore_mod = outliers.zscore_mod(diag)
bad_zscore = outliers.outliers_zscore(diag, self.outliermultiplier)
bad_zscore_mod = outliers.outliers_zscore_mod(diag, self.outliermultiplier)
bad_iqr = outliers.outliers_Tukey_iqr(diag, self.outliermultiplier)
for h, c in zip(self.headers, self.curves):
self.checkDuplicateFSNs()
ds = curvesgroup.create_dataset(str(h.fsn), data=np.vstack((c.q, c.Intensity, c.Error, c.qError)).T,
compression=self.h5compression)
for field in ['absintfactor', 'beamcenterx', 'beamcentery', 'date', 'distance', 'distancedecrease',
'enddate', 'exposuretime', 'flux', 'fsn', 'fsn_absintref', 'fsn_emptybeam',
'maskname',
'pixelsizex', 'pixelsizey', 'project', 'samplex', 'samplex_motor',
'sampley', 'sampley_motor', 'startdate', 'temperature', 'thickness', 'title',
'transmission',
'username', 'vacuum', 'wavelength']:
try:
value = getattr(h, field)
except KeyError:
continue
if isinstance(value, datetime):
ds.attrs[field] = str(value)
elif isinstance(value, ErrorValue):
ds.attrs[field] = value.val
ds.attrs[field + '.err'] = value.err
elif value is None:
ds.attrs[field] = 'None'
else:
ds.attrs[field] = value
if h.fsn in self.initialBadfsns:
ds.attrs['correlmat_bad'] = 1
ds.attrs['correlmat_discrp'] = np.nan
ds.attrs['correlmat_zscore'] = np.nan
ds.attrs['correlmat_zscore_mod'] = np.nan
ds.attrs['correlmat_bad_zscore'] = 1
ds.attrs['correlmat_bad_zscore_mod'] = 1
ds.attrs['correlmat_bad_iqr'] = 1
else:
idx = self.fsnsforcmap.index(h.fsn)
ds.attrs['correlmat_bad'] = int(h.fsn in self.badfsns)
ds.attrs['correlmat_discrp'] = diag[idx]
ds.attrs['correlmat_zscore'] = zscore[idx]
ds.attrs['correlmat_zscore_mod'] = zscore_mod[idx]
ds.attrs['correlmat_bad_zscore'] = int(idx in bad_zscore)
ds.attrs['correlmat_bad_zscore_mod'] = int(idx in bad_zscore_mod)
ds.attrs['correlmat_bad_iqr'] = int(idx in bad_iqr)
self.result.time_output_write = time.monotonic() - t1
self.result.time_output_waitforlock = t1 - t0
self.result.time_output = time.monotonic() - t0
def checkDuplicateFSNs(self):
# check if we have duplicate FSNs:
fsns = sorted([h.fsn for h in self.headers])
duplicate = [f for f in fsns if fsns.count(f) > 1]
if duplicate:
raise ValueError('Duplicate FSN(s) {}'.format(', '.join([str(f) for f in duplicate])))
def _execute(self):
try:
t0 = time.monotonic()
self._loadheaders()
self._loadexposures()
self._checkforoutliers()
self._summarize()
self._output()
self.result.badfsns = [b for b in self.badfsns if b not in self.initialBadfsns]
self.result.success = True
self.result.time_total = time.monotonic() - t0
except UserStopException:
self.result.success = False
self.result.status = 'User break'
self.sendError('User stop requested')
except Exception as exc:
self.result.success = False
self.result.status = 'Error'
self.sendError(str(exc), traceback=traceback.format_exc())
| {
"repo_name": "awacha/cct",
"path": "cct/core/processing/processingjob.py",
"copies": "1",
"size": "20066",
"license": "bsd-3-clause",
"hash": 6661031751666146000,
"line_mean": 49.9289340102,
"line_max": 120,
"alpha_frac": 0.5656832453,
"autogenerated": false,
"ratio": 3.9593528018942385,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5025036047194239,
"avg_score": null,
"num_lines": null
} |
"A class representing individuals"
from pydigree.recombination import recombine
from pydigree.paths import kinship
from pydigree.common import flatten
from pydigree.genotypes import LabelledAlleles
from pydigree.exceptions import IterationError
from pydigree.phenotypes import Phenotypes
# TODO: Move this somewhere more useful
missing_genotype = (0, 0)
def is_missing_genotype(g):
return g == missing_genotype
class Individual(object):
'''
An object for working with the phenotypes and genotypes of an individual
in a genetic study or simulation
'''
def __init__(self, population, label, father=None, mother=None, sex=None):
# Every individual is observed within a population with certain
# genotypes available. This makes recombination book-keeping easier.
self.population = population
if id:
self.label = label
else:
self.label = None
self.father = father
self.mother = mother
self.sex = sex # 0:M 1:F
self.pedigree = None
self.genotypes = None
self.observed_genos = False
self.phenotypes = Phenotypes()
self.attrib = {}
self.children = []
if isinstance(self.father, Individual):
self.father.register_child(self)
if isinstance(self.mother, Individual):
self.mother.register_child(self)
def __str__(self):
try:
pop, lab = self.full_label
if self.is_founder():
return 'Individual {}:{} (FOUNDER)'.format(pop, lab)
else:
return 'Individual {}:{} (F:{},M:{})'.format(pop, lab,
self.father.label,
self.mother.label)
except AttributeError:
return 'Individual %s (Unlinked)' % self.label
def __repr__(self):
return self.__str__()
def register_child(self, child):
'''
Add a child for this individual
:param child: child object
:type child: Individual
'''
self.children.append(child)
def register_with_parents(self):
''' Inform the parent Individuals that this is their child '''
if self.is_founder():
return
self.father.register_child(self)
self.mother.register_child(self)
@property
def full_label(self):
'''
:return: the population label and individual label
:rtype: 2-tuple
'''
return (self.population.label, self.label)
# Functions about genotypes
#
@property
def chromosomes(self):
'''
Returns a list of the individuals ChromosomeTemplate objects
'''
if self.pedigree is not None:
return self.pedigree.chromosomes
else:
return self.population.chromosomes
def _init_genotypes(self, blankchroms=True, dtype=None, sparse=True):
"""
Initializes genotypes so that all genotypes are missing if blankchroms
is true, otherwise, just sets what would be the chromosome to None
"""
gt = []
if blankchroms:
gt = [(chrom.empty_chromosome(dtype=dtype, sparse=sparse),
chrom.empty_chromosome(dtype=dtype, sparse=sparse))
for chrom in self.chromosomes]
else:
gt = [[None, None] for chrom in self.chromosomes]
self.genotypes = gt
def has_genotypes(self):
"""
Tests if individual has genotypes
:returns: genotype available
:rtype: bool
"""
return self.genotypes is not None
def __fail_on_observed_genos(self):
if self.observed_genos:
raise ValueError('Individual has observed genotypes')
def __fail_on_non_genotyped(self):
if not self.has_genotypes():
raise ValueError('Individual has no genotypes')
def get_genotypes(self, linkeq=False):
"""
Retrieve genotypes from a chromosome pool if present, or else a
chromosome generated under linkage equilibrium
:param linkeq: should the retrieved genotypes be generated
under linkage equilibrium?
:type linkeq: bool
:returns: Nothing
"""
self.__fail_on_observed_genos()
if not self.population.pool:
linkeq = True
if self.has_genotypes():
return
if self.is_founder() and self.population is None:
raise ValueError('Founder ind %s has no population!'
% (self.label))
if self.is_founder():
pop = self.population
if linkeq:
self.genotypes = pop.get_linkage_equilibrium_genotypes()
else:
self.genotypes = pop.get_founder_genotypes()
else:
self.genotypes = Individual.fertilize(self.father.gamete(),
self.mother.gamete())
def get_genotype(self, loc, checkhasgeno=True):
"""
Returns alleles at a position.
:returns: Genotype tuple
:rtype: tuple
"""
if checkhasgeno and not self.has_genotypes():
raise ValueError('Individual has no genotypes!')
return (self.genotypes[loc[0]][0][loc[1]],
self.genotypes[loc[0]][1][loc[1]])
def get_constrained_genotypes(self, constraints, linkeq=True):
'''
Gets genotypes from parents (or population if the individual is a
founder) subject to constraints. Used by the simulation objects
in pydigree.simulation
'''
self.get_genotypes(linkeq=linkeq)
for constraint in constraints:
locus, chromatid, allele, _ = constraint
gt = list(self.get_genotype(locus))
gt[chromatid] = allele
self.set_genotype(locus, tuple(gt))
def set_genotype(self, location, genotype):
""" Manually set a genotype """
self.__fail_on_non_genotyped()
self.genotypes[location[0]][0][location[1]] = genotype[0]
self.genotypes[location[0]][1][location[1]] = genotype[1]
def _set_genotypes(self, gts):
self.genotypes = gts
def update(self, other):
'''
Takes another individual object, merges/updates phenotypes with the
other individual object and replace self's genotypes with other's
.. warning::
This does not merge genotypes, it replaces them! Additionally,
phenotypes in the other data overwrite the existing ones
:param other: the data to update with
:type other: Individual
'''
self.phenotypes.update(other.phenotypes)
self.genotypes = other.genotypes
def has_allele(self, location, allele):
"""
Returns True if individual has the specified allele at location
"""
g = self.get_genotype(location)
if is_missing_genotype(g):
return None
else:
return allele in g
def label_genotypes(self):
"""
Gives the individual label genotypes.
When label genotypes are transmitted on to the next generation, you
can see where each allele in the next generation came from. This is
sseful for gene dropping simulations and reducing the memory footprint
of forward-time simulation.
"""
self.__fail_on_observed_genos()
def labelled_chromatids(i, c):
a = LabelledAlleles.founder_chromosome(self, i, 0, chromobj=c)
b = LabelledAlleles.founder_chromosome(self, i, 1, chromobj=c)
return (a, b)
g = [labelled_chromatids(i, c) for i, c in enumerate(self.chromosomes)]
self.genotypes = g
def delabel_genotypes(self):
'''
When an individual has label genotypes, replaces the labels with
the ancestral allele corresponding to the label
'''
for chromoidx, chromosome in enumerate(self.genotypes):
for chromaidx, chromatid in enumerate(chromosome):
newchromatid = chromatid.delabel()
self.genotypes[chromoidx][chromaidx] = newchromatid
def clear_genotypes(self):
""" Removes genotypes """
self.observed_genos = False
self.genotypes = None
# Functions about ancestry and family
#
def is_founder(self):
""" Returns true if individual is a founder """
return self.father is None and self.mother is None
def is_marryin_founder(self):
"""
Returns true if an individual is a marry-in founder
i.e.: the individual is a founder (depth: 0) and has a child with
depth > 1
"""
if not self.is_founder():
return False
return any(x.depth > 1 for x in self.children)
def parents(self):
''' Returns the individual's father and mother in a 2-tuple '''
return self.father, self.mother
def ancestors(self):
"""
Recursively searches for ancestors.
:returns: A collection of all the ancestors of the individual
:rtype: set of Individuals
"""
if self.is_founder():
return set()
return set([self.father, self.mother]) | \
self.father.ancestors() | \
self.mother.ancestors()
def descendants(self):
"""
Recursively searches for descendants.
:returns: A collection of all the descendants of the individual
:rtype: set of Individuals
"""
return set(self.children + list(flatten([x.descendants()
for x in self.children])))
def siblings(self, include_halfsibs=False):
"""
Returns this individuals sibliings.
:param include_halfsibs: Include half-siblings
:type include_halfsibs: bool
:returns: A collection of the individual's siblings
:rtype: set of Individuals
"""
if include_halfsibs:
return set(self.father.children) | set(self.mother.children)
else:
return set(self.father.children) & set(self.mother.children)
def matriline(self):
"""
Returns a label by recursively searching for the individual's mother's
mother's mother's etc. until it reachesa founder mother, in which case
it returns that ancestor's id.
Useful reference:
Courtenay et al. 'Mitochondrial haplogroup X is associated with
successful aging in the Amish.' Human Genetics (2012). 131(2):201-8.
doi: 10.1007/s00439-011-1060-3. Epub 2011 Jul 13.
:returns: Label of the matriline founder
"""
if self.is_founder():
return self.label
else:
return self.mother.matriline()
def patriline(self):
"""
Returns a label by recursively searching for the individual's mother's
father's father's etc. until it reaches a founder father, in which case
it returns that ancestor's id.
Analagous to individual.matriline.
:returns: Label of patriline founder
"""
if self.is_founder():
return self.label
else:
return self.father.patriline()
@property
def depth(self):
"""
Returns the depth of an individual in a genealogy, a rough measure of
what generation in the pedigree the individual is. Defined as:
depth = 0 if individual is a founder, else the maximum of the
depth of each parent
:returns: Indiviual depth
:rtype: integer
"""
if self.is_founder():
return 0
elif 'depth' in self.attrib:
return self.attrib['depth']
else:
d = 1 + max(self.father.depth, self.mother.depth)
self.attrib['depth'] = d
return d
def remove_ancestry(self):
"""
Removes ancestry: makes a person a founder. Cannot be used on an
individual in a pedigree, because the pedigree structure is
already set.
"""
if self.pedigree:
raise ValueError('Individual in a pedigree!')
self.father = None
self.mother = None
def inbreeding(self):
"""
Returns the inbreeding coefficient (F) for the individual.
"""
# Two edge cases where inbreedings must be 0
if self.is_founder():
return 0.0
if self.father.is_founder() or self.mother.is_founder():
return 0.0
if 'inbreed' in self.attrib:
return self.attrib['inbreed']
else:
self.attrib['inbreed'] = kinship(self.father, self.mother)
return self.attrib['inbreed']
# Functions for breeding
#
def gamete(self):
"""
Provides a set of half-genotypes to use with method fertilize
:returns: a collection of AlleleContainers
:rtype: list
"""
if not self.genotypes:
self.get_genotypes()
g = [recombine(chrom[0],
chrom[1],
self.chromosomes[i].genetic_map)
for i, chrom in enumerate(self.genotypes)]
return g
def constrained_gamete(self, constraints, attempts=1000):
# Constraints here is a list of ((location, index), alleles) tuples
# for alleles that the gamete has to have
for _ in range(attempts):
g = self.gamete()
success = True
for loc, allele in constraints:
chrom, pos = loc
if g[chrom][pos] != allele:
success = False
continue
if success:
return g
raise IterationError('Ran out of constrained gamete attempts')
@staticmethod
def fertilize(father, mother):
"""
Combines a set of half-genotypes (from method gamete) to a full
set of genotypes
"""
return [[x, y] for x, y in zip(father, mother)]
def predict_phenotype(self, trait):
""" Predicts phenotypes from a given trait architecture and sets it """
self.phenotypes[trait.name] = self.predicted_phenotype(trait)
def delete_phenotype(self, trait):
"Removes a phenotype from the phenotype dictionary"
self.phenotypes.delete_phenotype(trait)
def genotype_as_phenotype(self, locus, minor_allele, label):
"""
Creates a phenotype record representing the additive effect of a minor
allele at the specified locus. That is, the phenotype created is the
number of copies of the minor allele at the locus.
:param locus: the site to count minor alleles
:param minor_allele: the allele to count
:param label: The name of the phenotype to be added
:returns: void
"""
if not self.has_genotypes():
self.phenotypes[label] = None
return
gt = self.get_genotype(locus)
if is_missing_genotype(gt):
val = None
else:
val = gt.count(minor_allele)
self.phenotypes[label] = val
| {
"repo_name": "jameshicks/pydigree",
"path": "pydigree/individual.py",
"copies": "1",
"size": "15424",
"license": "apache-2.0",
"hash": 4499481781320710000,
"line_mean": 32.1698924731,
"line_max": 79,
"alpha_frac": 0.5844139004,
"autogenerated": false,
"ratio": 4.155172413793103,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005914643440381258,
"num_lines": 465
} |
# A class representing the contents of /etc/network/interfaces
from debinterface.interfacesWriter import InterfacesWriter
from debinterface.interfacesReader import InterfacesReader
from debinterface.adapter import NetworkAdapter
import debinterface.toolutils as toolutils
class Interfaces:
_interfaces_path = '/etc/network/interfaces'
def __init__(self, update_adapters=True,
interfaces_path=None, backup_path=None):
""" By default read interface file on init """
self._set_paths(interfaces_path, backup_path)
if update_adapters is True:
self.updateAdapters()
else:
self._adapters = []
@property
def adapters(self):
return self._adapters
@property
def interfaces_path(self):
return self._interfaces_path
@property
def backup_path(self):
return self._backup_path
def updateAdapters(self):
""" (re)read interfaces file and save adapters """
reader = InterfacesReader(self._interfaces_path)
self._adapters = reader.parse_interfaces()
if not self._adapters:
self._adapters = []
def writeInterfaces(self):
""" write adapters to interfaces file """
return InterfacesWriter(
self._adapters,
self._interfaces_path,
self._backup_path
).write_interfaces()
def getAdapter(self, name):
""" Find adapter by interface name """
return next(
(
x for x in self._adapters
if x._ifAttributes['name'] == name
),
None)
def addAdapter(self, options, index=None):
"""Insert a NetworkAdapter before the given index
or at the end of the list.
options should be a string (name) or a dict
Args:
options (string or dict): options to build a network adaptator
index (integer, optional): index to insert the NetworkAdapter
"""
adapter = NetworkAdapter(options)
adapter.validateAll()
if index is None:
self._adapters.insert(index, adapter)
else:
self._adapters.append(adapter)
return adapter
def removeAdapter(self, index):
""" Remove the adapter at the given index. """
self._adapters.pop(index)
def removeAdapterByName(self, name):
""" Remove the adapter with the given name. """
self._adapters = [
x for x in self._adapters
if x._ifAttributes['name'] != name
]
def upAdapter(self, if_name):
""" return True/False, command output. Use ifup. """
return toolutils.safe_subprocess(["/sbin/ifup", if_name])
def downAdapter(self, if_name):
""" return True/False, command output. Use ifdown. """
return toolutils.safe_subprocess(["/sbin/ifdown", if_name])
def _set_paths(self, interfaces_path, backup_path):
""" either use user input or defaults """
if interfaces_path is not None:
self._interfaces_path = interfaces_path
if backup_path:
self._backup_path = backup_path
else:
self._backup_path = self._interfaces_path + ".bak"
| {
"repo_name": "michaelboulton/debinterface",
"path": "debinterface/interfaces.py",
"copies": "1",
"size": "3276",
"license": "bsd-3-clause",
"hash": -1405889030419315000,
"line_mean": 29.9056603774,
"line_max": 78,
"alpha_frac": 0.5992063492,
"autogenerated": false,
"ratio": 4.55,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 106
} |
# A class representing the contents of /etc/network/interfaces
from interfacesWriter import InterfacesWriter
from interfacesReader import InterfacesReader
from adapter import NetworkAdapter
import toolutils
#import defaults
class Interfaces:
_interfaces_path = '/etc/network/interfaces'
def __init__(self, update_adapters=True, interfaces_path=None, backup_path=None):
''' By default read interface file on init '''
self._set_paths(interfaces_path, backup_path)
if update_adapters is True:
self.updateAdapters()
else:
self._adapters = []
@property
def adapters(self):
return self._adapters
@property
def interfaces_path(self):
return self._interfaces_path
@property
def backup_path(self):
return self._backup_path
def updateAdapters(self):
''' (re)read interfaces file and save adapters '''
self._adapters = InterfacesReader(self._interfaces_path).parse_interfaces()
if not self._adapters:
self._adapters = []
def updateAdaptersWithString(self, data):
self._adapters = InterfacesReader(None).parse_interfaces_from_string(data)
if not self._adapters:
self._adapters = []
def writeInterfaces(self):
''' write adapters to interfaces file '''
self._writer_factory().write_interfaces()
def writeInterfacesAsString(self):
return self._writer_factory().write_interfaces_as_string()
def _writer_factory(self):
''' Create a writer object '''
return InterfacesWriter(
self._adapters,
self._interfaces_path,
self._backup_path
)
def getAdapter(self, name):
''' Find adapter by interface name '''
return next((x for x in self._adapters if x._ifAttributes['name'] == name), None)
def addAdapter(self, options, index=None):
'''
Insert a networkAdapter before the given index or at the end of the list.
options should be a string (name) or a dict
'''
adapter = NetworkAdapter(options)
adapter.validateAll()
if index is None:
self._adapters.insert(index, adapter)
else:
self._adapters.append(adapter)
return adapter
def removeAdapter(self, index):
''' Remove the adapter at the given index. '''
self._adapters.pop(index)
def removeAdapterByName(self, name):
''' Remove the adapter with the given name. '''
self._adapters = [x for x in self._adapters if x._ifAttributes['name'] != name]
def upAdapter(self, if_name):
''' return True/False, command output. Use ifconfig. '''
return toolutils.safe_subprocess(["ifconfig", if_name, 'up'])
def downAdapter(self, if_name):
''' return True/False, command output. Use ifdown. '''
return toolutils.safe_subprocess(["ifconfig", if_name, 'down'])
def _set_paths(self, interfaces_path, backup_path):
''' either use user input or defaults '''
if interfaces_path is not None:
self._interfaces_path = interfaces_path
if backup_path:
self._backup_path = backup_path
else:
self._backup_path = self._interfaces_path + ".bak"
| {
"repo_name": "le9i0nx/debinterface",
"path": "interfaces.py",
"copies": "1",
"size": "3325",
"license": "bsd-3-clause",
"hash": -5387718861210518000,
"line_mean": 30.6666666667,
"line_max": 89,
"alpha_frac": 0.6216541353,
"autogenerated": false,
"ratio": 4.386543535620053,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0012238766239604456,
"num_lines": 105
} |
# A class representing the contents of /etc/network/interfaces
from adapter import NetworkAdapter
import StringIO
class InterfacesReader:
''' Short lived class to read interfaces file '''
def __init__(self, interfaces_path):
self._interfaces_path = interfaces_path
self._reset()
@property
def adapters(self):
return self._adapters
def parse_interfaces(self):
''' Read /etc/network/interfaces. '''
self._reset()
# Open up the interfaces file. Read only.
with open(self._interfaces_path, "r") as interfaces:
self._read_lines_from_file(interfaces)
return self._parse_interfaces_impl()
def parse_interfaces_from_string(self, data):
self._reset()
# Can't be used in 'with..as'
string_file = StringIO.StringIO(data)
self._read_lines_from_file(string_file)
string_file.close()
return self._parse_interfaces_impl()
def _parse_interfaces_impl(self):
''' Save adapters
Return an array of networkAdapter instances.
'''
for entry in self._auto_list:
for adapter in self._adapters:
if adapter._ifAttributes['name'] == entry:
adapter.setAuto(True)
for entry in self._hotplug_list:
for adapter in self._adapters:
if adapter._ifAttributes['name'] == entry:
adapter.setHotplug(True)
return self._adapters
def _read_lines_from_file(self, fileObj):
# Loop through the interfaces file.
for line in fileObj:
# Identify the clauses by analyzing the first word of each line.
# Go to the next line if the current line is a comment.
if line.strip().startswith("#") is True:
pass
else:
self._parse_iface(line)
# Ignore blank lines.
if line.isspace() is True:
pass
else:
self._parse_details(line)
self._read_auto(line)
self._read_hotplug(line)
def _parse_iface(self, line):
if line.startswith('iface'):
sline = line.split()
# Update the self._context when an iface clause is encountered.
self._context += 1
self._adapters.append(NetworkAdapter(sline[1]))
self._adapters[self._context].setAddressSource(sline[-1])
self._adapters[self._context].setAddrFam(sline[2])
def _parse_details(self, line):
if line[0].isspace() is True:
sline = line.split()
if sline[0] == 'address':
self._adapters[self._context].setAddress(sline[1])
elif sline[0] == 'netmask':
self._adapters[self._context].setNetmask(sline[1])
elif sline[0] == 'gateway':
self._adapters[self._context].setGateway(sline[1])
elif sline[0] == 'broadcast':
self._adapters[self._context].setBroadcast(sline[1])
elif sline[0] == 'network':
self._adapters[self._context].setNetwork(sline[1])
elif sline[0].startswith('bridge') is True:
opt = sline[0].split('_')
sline.pop(0)
ifs = " ".join(sline)
self._adapters[self._context].replaceBropt(opt[1], ifs)
elif sline[0] == 'up' or sline[0] == 'down' or sline[0] == 'pre-up' or sline[0] == 'post-down':
ud = sline.pop(0)
cmd = ' '.join(sline)
if ud == 'up':
self._adapters[self._context].appendUp(cmd)
elif ud == 'down':
self._adapters[self._context].appendDown(cmd)
elif ud == 'pre-up':
self._adapters[self._context].appendPreUp(cmd)
elif ud == 'post-down':
self._adapters[self._context].appendPostDown(cmd)
else:
# store as if so as not to loose it
self._adapters[self._context].setUnknown(sline[0], sline[1])
def _read_auto(self, line):
''' Identify which adapters are flagged auto. '''
if line.startswith('auto'):
sline = line.split()
for word in sline:
if word == 'auto':
pass
else:
self._auto_list.append(word)
def _read_hotplug(self, line):
''' Identify which adapters are flagged allow-hotplug. '''
if line.startswith('allow-hotplug'):
sline = line.split()
for word in sline:
if word == 'allow-hotplug':
pass
else:
self._hotplug_list.append(word)
def _reset(self):
# Initialize a place to store created networkAdapter objects.
self._adapters = []
# Keep a list of adapters that have the auto or allow-hotplug flags set.
self._auto_list = []
self._hotplug_list = []
# Store the interface context.
# This is the index of the adapters collection.
self._context = -1
| {
"repo_name": "dggreenbaum/debinterface",
"path": "interfacesReader.py",
"copies": "4",
"size": "5241",
"license": "bsd-3-clause",
"hash": 3795905094307924000,
"line_mean": 35.6503496503,
"line_max": 107,
"alpha_frac": 0.532150353,
"autogenerated": false,
"ratio": 4.206260032102729,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6738410385102729,
"avg_score": null,
"num_lines": null
} |
"""A class that can be used to represent a car."""
"""A set of classes used to represent gas and electric cars."""
class Car():
"""A simple attempt to represent a car."""
def __init__(self, make, model, year):
"""Initialize attributes to describe a car."""
self.make = make
self.model = model
self.year = year
self.odometer_reading = 0
def get_descriptive_name(self):
"""Return a neatly formatted descriptive name."""
long_name = str(self.year) + ' ' + self.make + ' ' + self.model
return long_name.title()
def read_odometer(self):
"""Print a statement showing the car's mileage."""
print("This car has " + str(self.odometer_reading) + " miles on it.")
def update_odometer(self, mileage):
"""
Set the odometer reading to the given value.
Reject the change if it attempts to roll the odometer back.
"""
if mileage >= self.odometer_reading:
self.odometer_reading = mileage
else:
print("You can't roll back an odometer!")
def increment_odometer(self, miles):
"""Add the given amount to the odometer reading."""
self.odometer_reading += miles
class Battery():
"""A simple attempt to model a battery for an electric car."""
def __init__(self, battery_size=70):
"""Initialize the battery's attributes."""
self.battery_size = battery_size
def describe_battery(self):
"""Print a statement describing the battery size."""
print("This car has a " + str(self.battery_size) + "-kWh battery.")
def get_range(self):
"""Print a statement about the range this battery provides."""
if self.battery_size == 70:
range = 240
elif self.battery_size == 85:
range = 270
message = "This car can go approximately " + str(range)
message += " miles on a full chage."
print(message)
class ElectricCar(Car):
"""Represenst aspects of a car, specific to electric vehicles."""
def __init__(self, make, model, year):
"""
Initialize attributes of the parent class.
Then initialize attributes specific to an electric car.
"""
super().__init__(make, model, year)
self.battery = Battery()
def fill_gas_tank(self):
"""Electric cars don't have gas tanks."""
print("This car doesn't need a gas tank!")
# my_new_car = Car('audi', 'a4', 2016)
# print(my_new_car.get_descriptive_name())
# my_new_car.read_odometer()
# my_new_car.odometer_reading = 23
# my_new_car.read_odometer()
# my_new_car.update_odometer(23)
# my_new_car.read_odometer()
"""
my_used_car = Car('subaru', 'outback', 2013)
print(my_used_car.get_descriptive_name())
my_used_car.update_odometer(23500)
my_used_car.read_odometer()
my_used_car.increment_odometer(100)
my_used_car.read_odometer()
"""
| {
"repo_name": "mccarrion/python-practice",
"path": "crash_course/chapter09/car.py",
"copies": "1",
"size": "2929",
"license": "mit",
"hash": -7059992858041922000,
"line_mean": 30.4946236559,
"line_max": 77,
"alpha_frac": 0.6111300785,
"autogenerated": false,
"ratio": 3.478622327790974,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9566773198065086,
"avg_score": 0.004595841645177509,
"num_lines": 93
} |
"""A class that can be used to represent a car."""
class Car():
"""A simple attempt to represent a car."""
def __init__(self, manufacturer, model, year):
"""Initialize attributes to describe a car."""
self.manufacturer = manufacturer
self.model = model
self.year = year
self.odometer_reading = 0
def get_descriptive_name(self):
"""Return a neatly formatted descriptive name."""
long_name = str(self.year) + ' ' + self.manufacturer + ' ' + self.model
return long_name.title()
def read_odometer(self):
"""Print a statement showing the car's mileage."""
print("This car has " + str(self.odometer_reading) + " miles on it.")
def update_odometer(self, mileage):
"""
Set the odometer reading to the given value.
Reject the change if it attempts to roll the odometer back.
"""
if mileage >= self.odometer_reading:
self.odometer_reading = mileage
else:
print("You can't roll back an odometer!")
def increment_odometer(self, miles):
"""Add the given amount to the odometer reading."""
self.odometer_reading += miles
| {
"repo_name": "lluxury/pcc_exercise",
"path": "09/car.py",
"copies": "1",
"size": "1263",
"license": "mit",
"hash": 5306594851964954000,
"line_mean": 35.1470588235,
"line_max": 79,
"alpha_frac": 0.5756136184,
"autogenerated": false,
"ratio": 4.087378640776699,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.51629922591767,
"avg_score": null,
"num_lines": null
} |
""" A class that can provide a date/time in any timeformat.format() format and both
local and UTC timezones within a ContextVariable.
Copyright (c) 2004 Colin Stewart (http://www.owlfish.com/)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
If you make any bug fixes or feature enhancements please let me know!
"""
import re, time, math, string
import timeformat
from simpletal import simpleTALES
PATHREGEX = re.compile ('^((?:local)|(?:utc))/?(.*)$')
class Date (simpleTALES.ContextVariable):
""" Wraps a DateTime and provides context paths local and utc.
These paths in turn can take TimeFormat formats, for example:
utc/%d-%m-%Y
"""
def __init__ (self, value = None, defaultFormat = '%a[SHORT], %d %b[SHORT] %Y %H:%M:%S %Z'):
""" The value should be in the LOCAL timezone.
"""
self.ourValue = value
self.defaultFormat = defaultFormat
def value (self, currentPath=None):
# Default to local timezone and RFC822 format
utcTime = 0
strFrmt = self.defaultFormat
if (currentPath is not None):
index, paths = currentPath
currentPath = '/'.join (paths[index:])
match = PATHREGEX.match (currentPath)
if (match is not None):
type = match.group(1)
if (type == 'local'):
utcTime = 0
else:
utcTime = 1
strFrmt = match.group(2)
if (strFrmt == ""):
strFrmt = self.defaultFormat
if (self.ourValue is None):
# Default to the current time!
timeValue = time.localtime()
else:
timeValue = self.ourValue
if (utcTime):
# Convert to UTC (GMT)
timeValue = time.gmtime (time.mktime (timeValue))
value = timeformat.format (strFrmt, timeValue, utctime=utcTime)
raise simpleTALES.ContextVariable (value)
| {
"repo_name": "owlfish/pubtal",
"path": "lib/pubtal/DateContext.py",
"copies": "1",
"size": "3035",
"license": "bsd-3-clause",
"hash": 4333515682096000500,
"line_mean": 37.9102564103,
"line_max": 93,
"alpha_frac": 0.7291598023,
"autogenerated": false,
"ratio": 3.78428927680798,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.501344907910798,
"avg_score": null,
"num_lines": null
} |
'''A class that checks connections'''
import time
import threading
from . import logger
class StoppableThread(threading.Thread):
'''A thread that may be stopped'''
def __init__(self):
threading.Thread.__init__(self)
self._event = threading.Event()
def wait(self, timeout):
'''Wait for the provided time to elapse'''
logger.debug('Waiting for %fs', timeout)
return self._event.wait(timeout)
def stop(self):
'''Set the stop condition'''
self._event.set()
class PeriodicThread(StoppableThread):
'''A thread that periodically invokes a callback every interval seconds'''
def __init__(self, interval, callback, *args, **kwargs):
StoppableThread.__init__(self)
self._interval = interval
self._callback = callback
self._args = args
self._kwargs = kwargs
self._last_checked = None
def delay(self):
'''How long to wait before the next check'''
if self._last_checked:
return self._interval - (time.time() - self._last_checked)
return self._interval
def callback(self):
'''Run the callback'''
self._callback(*self._args, **self._kwargs)
self._last_checked = time.time()
def run(self):
'''Run the callback periodically'''
while not self.wait(self.delay()):
try:
logger.info('Invoking callback %s', self.callback)
self.callback()
except Exception:
logger.exception('Callback failed')
class ConnectionChecker(PeriodicThread):
'''A thread that checks the connections on an object'''
def __init__(self, client, interval=60):
PeriodicThread.__init__(self, interval, client.check_connections)
| {
"repo_name": "dlecocq/nsq-py",
"path": "nsq/checker.py",
"copies": "1",
"size": "1791",
"license": "mit",
"hash": -7491734606844129000,
"line_mean": 29.8793103448,
"line_max": 78,
"alpha_frac": 0.6058068118,
"autogenerated": false,
"ratio": 4.378973105134475,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5484779916934475,
"avg_score": null,
"num_lines": null
} |
'''A class that listens to pubsub channels and can unlisten'''
import logging
import threading
import contextlib
# Our logger
logger = logging.getLogger('qless')
class Listener(object):
'''A class that listens to pubsub channels and can unlisten'''
def __init__(self, redis, channels):
self._pubsub = redis.pubsub()
self._channels = channels
def listen(self):
'''Listen for events as they come in'''
try:
self._pubsub.subscribe(self._channels)
for message in self._pubsub.listen():
if message['type'] == 'message':
yield message
finally:
self._channels = []
def unlisten(self):
'''Stop listening for events'''
self._pubsub.unsubscribe(self._channels)
@contextlib.contextmanager
def thread(self):
'''Run in a thread'''
thread = threading.Thread(target=self.listen)
thread.start()
try:
yield self
finally:
self.unlisten()
thread.join()
class Events(Listener):
'''A class for handling qless events'''
namespace = 'ql:'
events = (
'canceled', 'completed', 'failed', 'popped',
'stalled', 'put', 'track', 'untrack'
)
def __init__(self, redis):
Listener.__init__(
self, redis, [self.namespace + event for event in self.events])
self._callbacks = dict((k, None) for k in (self.events))
def listen(self):
'''Listen for events'''
for message in Listener.listen(self):
logger.debug('Message: %s', message)
# Strip off the 'namespace' from the channel
channel = message['channel'][len(self.namespace):]
func = self._callbacks.get(channel)
if func:
func(message['data'])
def on(self, evt, func):
'''Set a callback handler for a pubsub event'''
if evt not in self._callbacks:
raise NotImplementedError('callback "%s"' % evt)
else:
self._callbacks[evt] = func
def off(self, evt):
'''Deactivate the callback for a pubsub event'''
return self._callbacks.pop(evt, None)
| {
"repo_name": "seomoz/qless-py",
"path": "qless/listener.py",
"copies": "1",
"size": "2225",
"license": "mit",
"hash": -6098407002178100000,
"line_mean": 28.6666666667,
"line_max": 75,
"alpha_frac": 0.566741573,
"autogenerated": false,
"ratio": 4.303675048355899,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5370416621355899,
"avg_score": null,
"num_lines": null
} |
"""A class that performs HTTP-01 challenges for Apache"""
import logging
import os
from acme.magic_typing import List, Set # pylint: disable=unused-import, no-name-in-module
from certbot import errors
from certbot.plugins import common
from certbot_apache.obj import VirtualHost # pylint: disable=unused-import
from certbot_apache.parser import get_aug_path
logger = logging.getLogger(__name__)
class ApacheHttp01(common.TLSSNI01):
"""Class that performs HTTP-01 challenges within the Apache configurator."""
CONFIG_TEMPLATE22_PRE = """\
RewriteEngine on
RewriteRule ^/\\.well-known/acme-challenge/([A-Za-z0-9-_=]+)$ {0}/$1 [L]
"""
CONFIG_TEMPLATE22_POST = """\
<Directory {0}>
Order Allow,Deny
Allow from all
</Directory>
<Location /.well-known/acme-challenge>
Order Allow,Deny
Allow from all
</Location>
"""
CONFIG_TEMPLATE24_PRE = """\
RewriteEngine on
RewriteRule ^/\\.well-known/acme-challenge/([A-Za-z0-9-_=]+)$ {0}/$1 [END]
"""
CONFIG_TEMPLATE24_POST = """\
<Directory {0}>
Require all granted
</Directory>
<Location /.well-known/acme-challenge>
Require all granted
</Location>
"""
def __init__(self, *args, **kwargs):
super(ApacheHttp01, self).__init__(*args, **kwargs)
self.challenge_conf_pre = os.path.join(
self.configurator.conf("challenge-location"),
"le_http_01_challenge_pre.conf")
self.challenge_conf_post = os.path.join(
self.configurator.conf("challenge-location"),
"le_http_01_challenge_post.conf")
self.challenge_dir = os.path.join(
self.configurator.config.work_dir,
"http_challenges")
self.moded_vhosts = set() # type: Set[VirtualHost]
def perform(self):
"""Perform all HTTP-01 challenges."""
if not self.achalls:
return []
# Save any changes to the configuration as a precaution
# About to make temporary changes to the config
self.configurator.save("Changes before challenge setup", True)
self.configurator.ensure_listen(str(
self.configurator.config.http01_port))
self.prepare_http01_modules()
responses = self._set_up_challenges()
self._mod_config()
# Save reversible changes
self.configurator.save("HTTP Challenge", True)
return responses
def prepare_http01_modules(self):
"""Make sure that we have the needed modules available for http01"""
if self.configurator.conf("handle-modules"):
needed_modules = ["rewrite"]
if self.configurator.version < (2, 4):
needed_modules.append("authz_host")
else:
needed_modules.append("authz_core")
for mod in needed_modules:
if mod + "_module" not in self.configurator.parser.modules:
self.configurator.enable_mod(mod, temp=True)
def _mod_config(self):
selected_vhosts = [] # type: List[VirtualHost]
http_port = str(self.configurator.config.http01_port)
for chall in self.achalls:
# Search for matching VirtualHosts
for vh in self._matching_vhosts(chall.domain):
selected_vhosts.append(vh)
# Ensure that we have one or more VirtualHosts that we can continue
# with. (one that listens to port configured with --http-01-port)
found = False
for vhost in selected_vhosts:
if any(a.is_wildcard() or a.get_port() == http_port for a in vhost.addrs):
found = True
if not found:
for vh in self._relevant_vhosts():
selected_vhosts.append(vh)
# Add the challenge configuration
for vh in selected_vhosts:
self._set_up_include_directives(vh)
self.configurator.reverter.register_file_creation(
True, self.challenge_conf_pre)
self.configurator.reverter.register_file_creation(
True, self.challenge_conf_post)
if self.configurator.version < (2, 4):
config_template_pre = self.CONFIG_TEMPLATE22_PRE
config_template_post = self.CONFIG_TEMPLATE22_POST
else:
config_template_pre = self.CONFIG_TEMPLATE24_PRE
config_template_post = self.CONFIG_TEMPLATE24_POST
config_text_pre = config_template_pre.format(self.challenge_dir)
config_text_post = config_template_post.format(self.challenge_dir)
logger.debug("writing a pre config file with text:\n %s", config_text_pre)
with open(self.challenge_conf_pre, "w") as new_conf:
new_conf.write(config_text_pre)
logger.debug("writing a post config file with text:\n %s", config_text_post)
with open(self.challenge_conf_post, "w") as new_conf:
new_conf.write(config_text_post)
def _matching_vhosts(self, domain):
"""Return all VirtualHost objects that have the requested domain name or
a wildcard name that would match the domain in ServerName or ServerAlias
directive.
"""
matching_vhosts = []
for vhost in self.configurator.vhosts:
if self.configurator.domain_in_names(vhost.get_names(), domain):
# domain_in_names also matches the exact names, so no need
# to check "domain in vhost.get_names()" explicitly here
matching_vhosts.append(vhost)
return matching_vhosts
def _relevant_vhosts(self):
http01_port = str(self.configurator.config.http01_port)
relevant_vhosts = []
for vhost in self.configurator.vhosts:
if any(a.is_wildcard() or a.get_port() == http01_port for a in vhost.addrs):
if not vhost.ssl:
relevant_vhosts.append(vhost)
if not relevant_vhosts:
raise errors.PluginError(
"Unable to find a virtual host listening on port {0} which is"
" currently needed for Certbot to prove to the CA that you"
" control your domain. Please add a virtual host for port"
" {0}.".format(http01_port))
return relevant_vhosts
def _set_up_challenges(self):
if not os.path.isdir(self.challenge_dir):
os.makedirs(self.challenge_dir)
os.chmod(self.challenge_dir, 0o755)
responses = []
for achall in self.achalls:
responses.append(self._set_up_challenge(achall))
return responses
def _set_up_challenge(self, achall):
response, validation = achall.response_and_validation()
name = os.path.join(self.challenge_dir, achall.chall.encode("token"))
self.configurator.reverter.register_file_creation(True, name)
with open(name, 'wb') as f:
f.write(validation.encode())
os.chmod(name, 0o644)
return response
def _set_up_include_directives(self, vhost):
"""Includes override configuration to the beginning and to the end of
VirtualHost. Note that this include isn't added to Augeas search tree"""
if vhost not in self.moded_vhosts:
logger.debug(
"Adding a temporary challenge validation Include for name: %s " +
"in: %s", vhost.name, vhost.filep)
self.configurator.parser.add_dir_beginning(
vhost.path, "Include", self.challenge_conf_pre)
self.configurator.parser.add_dir(
vhost.path, "Include", self.challenge_conf_post)
if not vhost.enabled:
self.configurator.parser.add_dir(
get_aug_path(self.configurator.parser.loc["default"]),
"Include", vhost.filep)
self.moded_vhosts.add(vhost)
| {
"repo_name": "letsencrypt/letsencrypt",
"path": "certbot-apache/certbot_apache/http_01.py",
"copies": "1",
"size": "7968",
"license": "apache-2.0",
"hash": -7423766223547613000,
"line_mean": 37.4927536232,
"line_max": 91,
"alpha_frac": 0.6066767068,
"autogenerated": false,
"ratio": 3.9309324124321656,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5037609119232165,
"avg_score": null,
"num_lines": null
} |
"""A class that performs TLS-SNI-01 challenges for Apache"""
import os
import logging
from certbot.plugins import common
from certbot.errors import PluginError, MissingCommandlineFlag
from certbot_apache import obj
from certbot_apache import parser
logger = logging.getLogger(__name__)
class ApacheTlsSni01(common.TLSSNI01):
"""Class that performs TLS-SNI-01 challenges within the Apache configurator
:ivar configurator: ApacheConfigurator object
:type configurator: :class:`~apache.configurator.ApacheConfigurator`
:ivar list achalls: Annotated TLS-SNI-01
(`.KeyAuthorizationAnnotatedChallenge`) challenges.
:param list indices: Meant to hold indices of challenges in a
larger array. ApacheTlsSni01 is capable of solving many challenges
at once which causes an indexing issue within ApacheConfigurator
who must return all responses in order. Imagine ApacheConfigurator
maintaining state about where all of the http-01 Challenges,
TLS-SNI-01 Challenges belong in the response array. This is an
optional utility.
:param str challenge_conf: location of the challenge config file
"""
VHOST_TEMPLATE = """\
<VirtualHost {vhost}>
ServerName {server_name}
UseCanonicalName on
SSLStrictSNIVHostCheck on
LimitRequestBody 1048576
Include {ssl_options_conf_path}
SSLCertificateFile {cert_path}
SSLCertificateKeyFile {key_path}
DocumentRoot {document_root}
</VirtualHost>
"""
def __init__(self, *args, **kwargs):
super(ApacheTlsSni01, self).__init__(*args, **kwargs)
self.challenge_conf = os.path.join(
self.configurator.conf("challenge-location"),
"le_tls_sni_01_cert_challenge.conf")
def perform(self):
"""Perform a TLS-SNI-01 challenge."""
if not self.achalls:
return []
# Save any changes to the configuration as a precaution
# About to make temporary changes to the config
self.configurator.save("Changes before challenge setup", True)
# Prepare the server for HTTPS
self.configurator.prepare_server_https(
str(self.configurator.config.tls_sni_01_port), True)
responses = []
# Create all of the challenge certs
for achall in self.achalls:
responses.append(self._setup_challenge_cert(achall))
# Setup the configuration
addrs = self._mod_config()
self.configurator.save("Don't lose mod_config changes", True)
self.configurator.make_addrs_sni_ready(addrs)
# Save reversible changes
self.configurator.save("SNI Challenge", True)
return responses
def _mod_config(self):
"""Modifies Apache config files to include challenge vhosts.
Result: Apache config includes virtual servers for issued challs
:returns: All TLS-SNI-01 addresses used
:rtype: set
"""
addrs = set()
config_text = "<IfModule mod_ssl.c>\n"
for achall in self.achalls:
achall_addrs = self._get_addrs(achall)
addrs.update(achall_addrs)
config_text += self._get_config_text(achall, achall_addrs)
config_text += "</IfModule>\n"
self._conf_include_check(self.configurator.parser.loc["default"])
self.configurator.reverter.register_file_creation(
True, self.challenge_conf)
logger.debug("writing a config file with text:\n %s", config_text)
with open(self.challenge_conf, "w") as new_conf:
new_conf.write(config_text)
return addrs
def _get_addrs(self, achall):
"""Return the Apache addresses needed for TLS-SNI-01."""
# TODO: Checkout _default_ rules.
addrs = set()
default_addr = obj.Addr(("*", str(
self.configurator.config.tls_sni_01_port)))
try:
vhost = self.configurator.choose_vhost(achall.domain, temp=True)
except (PluginError, MissingCommandlineFlag):
# We couldn't find the virtualhost for this domain, possibly
# because it's a new vhost that's not configured yet (GH #677),
# or perhaps because there were multiple <VirtualHost> sections
# in the config file (GH #1042). See also GH #2600.
addrs.add(default_addr)
return addrs
for addr in vhost.addrs:
if "_default_" == addr.get_addr():
addrs.add(default_addr)
else:
addrs.add(
addr.get_sni_addr(
self.configurator.config.tls_sni_01_port))
return addrs
def _conf_include_check(self, main_config):
"""Add TLS-SNI-01 challenge conf file into configuration.
Adds TLS-SNI-01 challenge include file if it does not already exist
within mainConfig
:param str main_config: file path to main user apache config file
"""
if len(self.configurator.parser.find_dir(
parser.case_i("Include"), self.challenge_conf)) == 0:
# print "Including challenge virtual host(s)"
logger.debug("Adding Include %s to %s",
self.challenge_conf, parser.get_aug_path(main_config))
self.configurator.parser.add_dir(
parser.get_aug_path(main_config),
"Include", self.challenge_conf)
def _get_config_text(self, achall, ip_addrs):
"""Chocolate virtual server configuration text
:param .KeyAuthorizationAnnotatedChallenge achall: Annotated
TLS-SNI-01 challenge.
:param list ip_addrs: addresses of challenged domain
:class:`list` of type `~.obj.Addr`
:returns: virtual host configuration text
:rtype: str
"""
ips = " ".join(str(i) for i in ip_addrs)
document_root = os.path.join(
self.configurator.config.work_dir, "tls_sni_01_page/")
# TODO: Python docs is not clear how mutliline string literal
# newlines are parsed on different platforms. At least on
# Linux (Debian sid), when source file uses CRLF, Python still
# parses it as "\n"... c.f.:
# https://docs.python.org/2.7/reference/lexical_analysis.html
return self.VHOST_TEMPLATE.format(
vhost=ips,
server_name=achall.response(achall.account_key).z_domain,
ssl_options_conf_path=self.configurator.mod_ssl_conf,
cert_path=self.get_cert_path(achall),
key_path=self.get_key_path(achall),
document_root=document_root).replace("\n", os.linesep)
| {
"repo_name": "dietsche/letsencrypt",
"path": "certbot-apache/certbot_apache/tls_sni_01.py",
"copies": "1",
"size": "6683",
"license": "apache-2.0",
"hash": 3285832167967193000,
"line_mean": 34.1736842105,
"line_max": 79,
"alpha_frac": 0.6319018405,
"autogenerated": false,
"ratio": 3.9970095693779903,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 190
} |
"""A class that performs TLS-SNI-01 challenges for Apache"""
import os
import logging
from letsencrypt.plugins import common
from letsencrypt_apache import obj
from letsencrypt_apache import parser
logger = logging.getLogger(__name__)
class ApacheTlsSni01(common.TLSSNI01):
"""Class that performs TLS-SNI-01 challenges within the Apache configurator
:ivar configurator: ApacheConfigurator object
:type configurator: :class:`~apache.configurator.ApacheConfigurator`
:ivar list achalls: Annotated TLS-SNI-01
(`.KeyAuthorizationAnnotatedChallenge`) challenges.
:param list indices: Meant to hold indices of challenges in a
larger array. ApacheTlsSni01 is capable of solving many challenges
at once which causes an indexing issue within ApacheConfigurator
who must return all responses in order. Imagine ApacheConfigurator
maintaining state about where all of the http-01 Challenges,
TLS-SNI-01 Challenges belong in the response array. This is an
optional utility.
:param str challenge_conf: location of the challenge config file
"""
VHOST_TEMPLATE = """\
<VirtualHost {vhost}>
ServerName {server_name}
UseCanonicalName on
SSLStrictSNIVHostCheck on
LimitRequestBody 1048576
Include {ssl_options_conf_path}
SSLCertificateFile {cert_path}
SSLCertificateKeyFile {key_path}
DocumentRoot {document_root}
</VirtualHost>
"""
def __init__(self, *args, **kwargs):
super(ApacheTlsSni01, self).__init__(*args, **kwargs)
self.challenge_conf = os.path.join(
self.configurator.conf("challenge-location"),
"le_tls_sni_01_cert_challenge.conf")
def perform(self):
"""Perform a TLS-SNI-01 challenge."""
if not self.achalls:
return []
# Save any changes to the configuration as a precaution
# About to make temporary changes to the config
self.configurator.save("Changes before challenge setup", True)
# Prepare the server for HTTPS
self.configurator.prepare_server_https(
str(self.configurator.config.tls_sni_01_port), True)
responses = []
# Create all of the challenge certs
for achall in self.achalls:
responses.append(self._setup_challenge_cert(achall))
# Setup the configuration
addrs = self._mod_config()
self.configurator.save("Don't lose mod_config changes", True)
self.configurator.make_addrs_sni_ready(addrs)
# Save reversible changes
self.configurator.save("SNI Challenge", True)
return responses
def _mod_config(self):
"""Modifies Apache config files to include challenge vhosts.
Result: Apache config includes virtual servers for issued challs
:returns: All TLS-SNI-01 addresses used
:rtype: set
"""
addrs = set()
config_text = "<IfModule mod_ssl.c>\n"
for achall in self.achalls:
achall_addrs = self._get_addrs(achall)
addrs.update(achall_addrs)
config_text += self._get_config_text(achall, achall_addrs)
config_text += "</IfModule>\n"
self._conf_include_check(self.configurator.parser.loc["default"])
self.configurator.reverter.register_file_creation(
True, self.challenge_conf)
logger.debug("writing a config file with text:\n %s", config_text)
with open(self.challenge_conf, "w") as new_conf:
new_conf.write(config_text)
return addrs
def _get_addrs(self, achall):
"""Return the Apache addresses needed for TLS-SNI-01."""
vhost = self.configurator.choose_vhost(achall.domain, temp=True)
# TODO: Checkout _default_ rules.
addrs = set()
default_addr = obj.Addr(("*", str(
self.configurator.config.tls_sni_01_port)))
for addr in vhost.addrs:
if "_default_" == addr.get_addr():
addrs.add(default_addr)
else:
addrs.add(
addr.get_sni_addr(
self.configurator.config.tls_sni_01_port))
return addrs
def _conf_include_check(self, main_config):
"""Add TLS-SNI-01 challenge conf file into configuration.
Adds TLS-SNI-01 challenge include file if it does not already exist
within mainConfig
:param str main_config: file path to main user apache config file
"""
if len(self.configurator.parser.find_dir(
parser.case_i("Include"), self.challenge_conf)) == 0:
# print "Including challenge virtual host(s)"
logger.debug("Adding Include %s to %s",
self.challenge_conf, parser.get_aug_path(main_config))
self.configurator.parser.add_dir(
parser.get_aug_path(main_config),
"Include", self.challenge_conf)
def _get_config_text(self, achall, ip_addrs):
"""Chocolate virtual server configuration text
:param .KeyAuthorizationAnnotatedChallenge achall: Annotated
TLS-SNI-01 challenge.
:param list ip_addrs: addresses of challenged domain
:class:`list` of type `~.obj.Addr`
:returns: virtual host configuration text
:rtype: str
"""
ips = " ".join(str(i) for i in ip_addrs)
document_root = os.path.join(
self.configurator.config.work_dir, "tls_sni_01_page/")
# TODO: Python docs is not clear how mutliline string literal
# newlines are parsed on different platforms. At least on
# Linux (Debian sid), when source file uses CRLF, Python still
# parses it as "\n"... c.f.:
# https://docs.python.org/2.7/reference/lexical_analysis.html
return self.VHOST_TEMPLATE.format(
vhost=ips,
server_name=achall.response(achall.account_key).z_domain,
ssl_options_conf_path=self.configurator.mod_ssl_conf,
cert_path=self.get_cert_path(achall),
key_path=self.get_key_path(achall),
document_root=document_root).replace("\n", os.linesep)
| {
"repo_name": "thanatos/lets-encrypt-preview",
"path": "letsencrypt-apache/letsencrypt_apache/tls_sni_01.py",
"copies": "1",
"size": "6210",
"license": "apache-2.0",
"hash": -4839916551796540000,
"line_mean": 33.5,
"line_max": 80,
"alpha_frac": 0.6325281804,
"autogenerated": false,
"ratio": 3.9655172413793105,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.509804542177931,
"avg_score": null,
"num_lines": null
} |
# A class that takes a single image, applies affine transformations, and renders it
# (and possibly a pixel-mask to tell which pixels are coming from the image)
# The class will only load the image when the render function is called (lazy evaluation)
import cv2
import numpy as np
import math
class SingleTileAffineRenderer:
def __init__(self, img_path, width, height, compute_mask=False, compute_distances=True):
self.img_path = img_path
self.width = width
self.height = height
self.transform_matrix = np.eye(3)[:2]
self.compute_mask = compute_mask
self.mask = None
self.compute_distances = compute_distances
self.weights = None
self.update_img_transformed_corners_and_bbox()
# Save for caching
self.already_rendered = False
def add_transformation(self, transform_matrix):
assert(transform_matrix.shape == (2, 3))
self.transform_matrix = np.dot(np.vstack((transform_matrix, [0., 0., 1.])), np.vstack((self.transform_matrix, [0., 0., 1.])))[:2]
self.update_img_transformed_corners_and_bbox()
# Remove any rendering
self.already_rendered = False
self.img = None
def get_start_point(self):
return (self.bbox[0], self.bbox[2])
def get_bbox(self):
return self.bbox
# def contains_point(self, p):
# """Return True if the point is inside the image boundaries (bounding polygon)."""
def get_min_distance(self, points):
"""Returns a list of minimal distances between each of the given points and any of the image boundaries (bounding polygon).
Assumes that the given points are inside the bounding polygon."""
#assert(p.shape == (2,))
# Get the normals of each line, and compute the distance between the point and the normal
# Based on method 2 (but for 2D) from: http://www.qc.edu.hk/math/Advanced%20Level/Point_to_line.htm
denominators = [np.linalg.norm(self.corners[i] - self.corners[(i + 1) % len(self.corners)]) for i in range(len(self.corners))]
self_normals = get_normals(self.corners)
if points.shape == (2,): # A single point
dist = np.min([np.linalg.norm(np.dot(n, points - c)) / denom
for c, n, denom in zip(self.corners, self_normals, denominators)])
return dist
else: # multiple points
dists = [np.min([np.linalg.norm(np.dot(n, p - c)) / denom
for c, n, denom in zip(self.corners, self_normals, denominators)])
for p in points]
return dists
def is_overlapping(self, other_tile):
"""Uses Separating Axes Theorem (http://www.dyn4j.org/2010/01/sat/) in order to decide
whether the the current transformed tile and the other transformed tile are overlapping"""
# Fetch the normals of each tile
self_normals = get_normals(self.corners)
other_normals = get_normals(other_tile.corners)
# Check all edges of self against the normals of the other tile
if not check_normals_side(self.corners, self_normals, other_tile.corners):
return True
# Check all edges of the other tile against the normals of self
if not check_normals_side(other_tile.corners, other_normals, self.corners):
return True
return False
def render(self):
"""Returns the rendered image (after transformation), and the start point of the image in global coordinates"""
if self.already_rendered:
return self.img, np.array([self.bbox[0], self.bbox[1]])
img = cv2.imread(self.img_path, 0)
adjusted_transform = self.transform_matrix[:2].copy()
adjusted_transform[0][2] -= self.bbox[0]
adjusted_transform[1][2] -= self.bbox[2]
self.img = cv2.warpAffine(img, adjusted_transform, self.shape, flags=cv2.INTER_AREA)
self.already_rendered = True
if self.compute_mask:
mask_img = np.ones(img.shape)
self.mask = cv2.warpAffine(mask_img, adjusted_transform, self.shape, flags=cv2.INTER_AREA)
self.mask[self.mask > 0] = 1
if self.compute_distances:
# The initial weights for each pixel is the minimum from the image boundary
grid = np.mgrid[0:self.height, 0:self.width]
weights_img = np.minimum(
np.minimum(grid[0], self.height - 1 - grid[0]),
np.minimum(grid[1], self.width - 1 - grid[1])
).astype(np.float32)
self.weights = cv2.warpAffine(weights_img, adjusted_transform, self.shape, flags=cv2.INTER_AREA)
# Returns the transformed image and the start point
return self.img, (self.bbox[0], self.bbox[2])
def fetch_mask(self):
assert(self.compute_mask)
if not self.already_rendered:
self.render()
return self.mask, (self.bbox[0], self.bbox[2])
def crop(self, from_x, from_y, to_x, to_y):
"""Returns the cropped image, its starting point, and the cropped mask (if the mask was computed).
The given coordinates are specified using world coordinates."""
# find the overlapping area of the given coordinates and the transformed tile
overlapping_area = [max(from_x, self.bbox[0]), min(to_x, self.bbox[1]), max(from_y, self.bbox[2]), min(to_y, self.bbox[3])]
overlapping_width = overlapping_area[1] - overlapping_area[0] + 1
overlapping_height = overlapping_area[3] - overlapping_area[2] + 1
if overlapping_width <= 0 or overlapping_height <= 0:
# No overlap between the area and the tile
return None, None, None
cropped_mask = None
# Make sure the image was rendered
self.render()
cropped_img = self.img[overlapping_area[2] - self.bbox[2]:overlapping_area[3] - self.bbox[2] + 1,
overlapping_area[0] - self.bbox[0]:overlapping_area[1] - self.bbox[0] + 1]
if self.compute_mask:
cropped_mask = self.mask[overlapping_area[2] - self.bbox[2]:overlapping_area[3] - self.bbox[2] + 1,
overlapping_area[0] - self.bbox[0]:overlapping_area[1] - self.bbox[0] + 1]
# Take only the parts that are overlapping
return cropped_img, (overlapping_area[0], overlapping_area[2]), cropped_mask
def crop_with_distances(self, from_x, from_y, to_x, to_y):
"""Returns the cropped image, its starting point, and the cropped image L1 distances of each pixel inside the image from the edge
of the rendered image (if the mask was computed).
The given coordinates are specified using world coordinates."""
# find the overlapping area of the given coordinates and the transformed tile
overlapping_area = [max(from_x, self.bbox[0]), min(to_x, self.bbox[1]), max(from_y, self.bbox[2]), min(to_y, self.bbox[3])]
overlapping_width = overlapping_area[1] - overlapping_area[0] + 1
overlapping_height = overlapping_area[3] - overlapping_area[2] + 1
if overlapping_width <= 0 or overlapping_height <= 0:
# No overlap between the area and the tile
return None, None, None
cropped_distances = None
# Make sure the image was rendered
self.render()
cropped_img = self.img[overlapping_area[2] - self.bbox[2]:overlapping_area[3] - self.bbox[2] + 1,
overlapping_area[0] - self.bbox[0]:overlapping_area[1] - self.bbox[0] + 1]
# if self.compute_mask:
# cropped_mask = self.mask[overlapping_area[2] - self.bbox[2]:overlapping_area[3] - self.bbox[2] + 1,
# overlapping_area[0] - self.bbox[0]:overlapping_area[1] - self.bbox[0] + 1]
# # find for each row where is the first and last one
# rows_ones = find_per_row_first_last_one(cropped_mask)
# cols_ones = find_per_row_first_last_one(cropped_mask.T)
# # fill the weights matrix according to the distance of each pixel from it's row and col values
# grid = np.mgrid[0:cropped_mask.shape[0], 0:cropped_mask.shape[1]]
# cropped_distances = np.minimum(
# np.minimum(np.abs(grid[0] - cols_ones[:, 0]), np.abs(grid[0] - cols_ones[:, 1])),
# np.minimum(np.abs(grid[1].T - rows_ones[:, 0].T).T, np.abs(grid[1].T - rows_ones[:, 1].T).T)
# )
# # Filter only the weights that are on the mask (using elementwise multiplication)
# cropped_distances = cropped_distances * cropped_mask
if self.compute_distances:
cropped_distances = self.weights[overlapping_area[2] - self.bbox[2]:overlapping_area[3] - self.bbox[2] + 1,
overlapping_area[0] - self.bbox[0]:overlapping_area[1] - self.bbox[0] + 1]
# Take only the parts that are overlapping
return cropped_img, (overlapping_area[0], overlapping_area[2]), cropped_distances
# Helper methods (shouldn't be used from the outside)
def update_img_transformed_corners_and_bbox(self):
pts = np.array([[0., 0.], [self.width - 1, 0.], [self.width - 1, self.height - 1], [0., self.height - 1]])
self.corners = np.dot(self.transform_matrix[:2,:2], pts.T).T + np.asarray(self.transform_matrix.T[2][:2]).reshape((1, 2))
min_XY = np.min(self.corners, axis=0)
max_XY = np.max(self.corners, axis=0)
# Rounding to avoid float precision errors due to representation
self.bbox = [int(math.floor(round(min_XY[0], 5))), int(math.ceil(round(max_XY[0], 5))), int(math.floor(round(min_XY[1], 5))), int(math.ceil(round(max_XY[1], 5)))]
#self.bbox = [int(min_XY[0] + math.copysign(0.5, min_XY[0])), int(max_XY[0] + math.copysign(0.5, max_XY[1])), int(min_XY[1] + math.copysign(0.5, min_XY[1])), int(max_XY[1] + math.copysign(0.5, max_XY[1]))]
self.shape = (self.bbox[1] - self.bbox[0] + 1, self.bbox[3] - self.bbox[2] + 1)
def get_normals(corners):
"""Given a polygon corners list, returns a list of non-normalized normals for each edge"""
edges = [(corners[i] - corners[(i + 1) % len(corners)]) for i in range(len(corners))]
normals = [(-e[1], e[0]) for e in edges]
return normals
def check_normals_side(corners1, normals1, corners2):
"""Checks if all corners2 appear on one side of polygon1"""
assert(len(corners1) == len(normals1))
for c, n in zip(corners1, normals1):
signs2 = [np.sign(np.dot(n, p - c)) for p in corners2]
signs2 = [s for s in signs2 if abs(s - 0.) > 0.0001] # remove all +-0.
if np.any(signs2 != signs2[0]):
return False
return True
def find_per_row_first_last_one(arr):
"""Given a 2D array (of only 1's in a quadrangle shape, and 0's), for each row find the first and the last occurrance of 1.
Returns a 2D array with the same number of rows as arr, and 2 columns with the column-indices
of the first and last one. If a row has only 0's, -1 will be returned on both indices"""
res = np.full((arr.shape[0], 2), -1, dtype=np.int16)
# take the first and last column of arr, and find all 1's
arr_T = arr.T
first_col_non_zero = np.nonzero(arr_T[0])
last_col_non_zero = np.nonzero(arr_T[-1])
for r in first_col_non_zero[0]:
res[r, 0] = 0
for r in last_col_non_zero[0]:
res[r, 1] = arr.shape[1] - 1
# Now find the positions where the value changes in the middle of the matrix using np.diff
nonzero = np.nonzero(np.diff(arr))
# nonzero contents for each row, r:
# if nonzero doesn't have a row with r, the row has the same value (either 0 or 1)
# if nonzero has a single row with r, the row changes the value once (either from 0 to 1 or from 1 to 0)
# if nonzero has row r twice, the row changes both from 0 to 1 and then from 1 to 0
for r, c in zip(*nonzero):
if res[r, 0] > -1:
# already updated the left value, or there is a single change from 1 to 0
res[r, 1] = c
else:
res[r, 0] = c + 1
return res
| {
"repo_name": "Rhoana/rh_aligner",
"path": "old/renderer/single_tile_affine_renderer.py",
"copies": "1",
"size": "12335",
"license": "mit",
"hash": -9041177022771855000,
"line_mean": 53.5796460177,
"line_max": 213,
"alpha_frac": 0.6112687475,
"autogenerated": false,
"ratio": 3.55988455988456,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46711533073845596,
"avg_score": null,
"num_lines": null
} |
# A class that takes a single image, applies transformations (both affine and non-affine), and renders it
# (and possibly a pixel-mask to tell which pixels are coming from the image).
# Assumption: there is only one non-affine transformation. TODO - get rid of this assumption
# The class will only load the image when the render function is called (lazy evaluation).
# Consecutive affine transformations will be condensed into a single transformation
import cv2
import numpy as np
import math
import scipy.interpolate as spint
import scipy.spatial.qhull as qhull
from scipy.spatial import ConvexHull
class SingleTileRenderer:
def __init__(self, img_path, width, height, compute_mask=False, compute_distances=True):
self.img_path = img_path
self.width = width
self.height = height
# Starting with a single identity affine transformation
self.pre_non_affine_transform = np.eye(3)[:2]
self.non_affine_transform = None
self.post_non_affine_transform = np.eye(3)[:2]
self.compute_mask = compute_mask
self.mask = None
self.compute_distances = compute_distances
self.weights = None
self.bbox = [0, width - 1, 0, height - 1]
self.shape = (width, height)
# Store the pixel locations (x,y) of the surrounding polygon of the image
self.surrounding_polygon = np.array([[0., 0.], [width - 1., 0.], [width - 1., height - 1.], [0., height - 1.]])
self.start_point = (0, 0) # If only affine is used then this is always (bbox[0], bbox[2]), with affine it can be different
# Save for caching
self.already_rendered = False
def add_transformation(self, model):
if model.is_affine():
new_model_matrix = model.get_matrix()
# Need to add the transformation either to the pre_non_affine or the post_non_affine
if self.non_affine_transform is None:
cur_transformation = self.pre_non_affine_transform
else:
cur_transformation = self.post_non_affine_transform
# Compute the new transformation (multiply from the left)
new_transformation = np.dot(new_model_matrix, np.vstack((cur_transformation, [0., 0., 1.])))[:2]
if self.non_affine_transform is None:
self.pre_non_affine_transform = new_transformation
else:
self.post_non_affine_transform = new_transformation
# Apply the model to each of the surrounding polygon locations
self.surrounding_polygon = model.apply_special(self.surrounding_polygon)
else:
# Non-affine transformation
self.non_affine_transform = model
# TODO - need to see if this returns a sufficient bounding box for the reverse transformation
# Find the new surrounding polygon locations
# using a forward transformation from the boundaries of the source image to the destination
boundary1 = np.array([[float(p), 0.] for p in np.arange(self.width)])
boundary2 = np.array([[float(p), float(self.height - 1)] for p in np.arange(self.width)])
boundary3 = np.array([[0., float(p)] for p in np.arange(self.height)])
boundary4 = np.array([[float(self.width - 1), float(p)] for p in np.arange(self.height)])
boundaries = np.concatenate((boundary1, boundary2, boundary3, boundary4))
boundaries = np.dot(self.pre_non_affine_transform[:2, :2], boundaries.T).T + self.pre_non_affine_transform[:, 2].reshape((1, 2))
src_points, dest_points = model.get_point_map()
cubic_interpolator = spint.CloughTocher2DInterpolator(src_points, dest_points)
self.surrounding_polygon = cubic_interpolator(boundaries)
# Find the new surrounding polygon locations (using the destination points of the non affine transformation)
#dest_points = model.get_point_map()[1]
#hull = ConvexHull(dest_points)
#self.surrounding_polygon = dest_points[hull.vertices]
# Update bbox and shape according to the new borders
self.bbox, self.shape = compute_bbox_and_shape(self.surrounding_polygon)
# Remove any rendering
self.already_rendered = False
self.img = None
def get_bbox(self):
return self.bbox
# def contains_point(self, p):
# """Return True if the point is inside the image boundaries (bounding polygon)."""
# def get_min_distance(self, points):
# """Returns a list of minimal distances between each of the given points and any of the image boundaries (bounding polygon).
# Assumes that the given points are inside the bounding polygon."""
# #assert(p.shape == (2,))
# # Get the normals of each line, and compute the distance between the point and the normal
# # Based on method 2 (but for 2D) from: http://www.qc.edu.hk/math/Advanced%20Level/Point_to_line.htm
# denominators = [np.linalg.norm(self.corners[i] - self.corners[(i + 1) % len(self.corners)]) for i in range(len(self.corners))]
# self_normals = get_normals(self.corners)
# if points.shape == (2,): # A single point
# dist = np.min([np.linalg.norm(np.dot(n, points - c)) / denom
# for c, n, denom in zip(self.corners, self_normals, denominators)])
# return dist
# else: # multiple points
# dists = [np.min([np.linalg.norm(np.dot(n, p - c)) / denom
# for c, n, denom in zip(self.corners, self_normals, denominators)])
# for p in points]
# return dists
# def is_overlapping(self, other_tile):
# """Uses Separating Axes Theorem (http://www.dyn4j.org/2010/01/sat/) in order to decide
# whether the the current transformed tile and the other transformed tile are overlapping"""
# # Fetch the normals of each tile
# self_normals = get_normals(self.corners)
# other_normals = get_normals(other_tile.corners)
# # Check all edges of self against the normals of the other tile
# if not check_normals_side(self.corners, self_normals, other_tile.corners):
# return True
# # Check all edges of the other tile against the normals of self
# if not check_normals_side(other_tile.corners, other_normals, self.corners):
# return True
# return False
def render(self):
"""Returns the rendered image (after transformation), and the start point of the image in global coordinates"""
if self.already_rendered:
return self.img, self.start_point
img = cv2.imread(self.img_path, 0)
self.start_point = np.array([self.bbox[0], self.bbox[2]]) # may be different for non-affine result
if self.non_affine_transform is None:
# If there wasn't a non-affine transformation, we only need to apply an affine transformation
adjusted_transform = self.pre_non_affine_transform[:2].copy()
adjusted_transform[0][2] -= self.bbox[0]
adjusted_transform[1][2] -= self.bbox[2]
self.img = cv2.warpAffine(img, adjusted_transform, self.shape, flags=cv2.INTER_AREA)
if self.compute_mask:
mask_img = np.ones(img.shape)
self.mask = cv2.warpAffine(mask_img, adjusted_transform, self.shape, flags=cv2.INTER_AREA)
self.mask[self.mask > 0] = 1
if self.compute_distances:
# The initial weights for each pixel is the minimum from the image boundary
grid = np.mgrid[0:self.height, 0:self.width]
weights_img = np.minimum(
np.minimum(grid[0], self.height - 1 - grid[0]),
np.minimum(grid[1], self.width - 1 - grid[1])
).astype(np.float32)
self.weights = cv2.warpAffine(weights_img, adjusted_transform, self.shape, flags=cv2.INTER_AREA)
else:
# Apply a reverse pre affine transformation on the source points of the non-affine transformation,
# and a post affine transformation on the destination points
src_points, dest_points = self.non_affine_transform.get_point_map()
inverted_pre = np.linalg.inv(np.vstack([self.pre_non_affine_transform, [0., 0., 1.]]))[:2]
src_points = np.dot(inverted_pre[:2, :2], src_points.T).T + inverted_pre[:, 2].reshape((1, 2))
dest_points = np.dot(self.post_non_affine_transform[:2, :2], dest_points.T).T + self.post_non_affine_transform[:, 2].reshape((1, 2))
# Move the destination points to start at (0, 0) --> less rendering
dest_points = dest_points - np.array([self.bbox[0], self.bbox[2]])
# Set the target grid using the shape
out_grid_x, out_grid_y = np.mgrid[0:self.shape[0], 0:self.shape[1]]
# TODO - is there a way to further restrict the target grid size, and speed up the interpolation?
# Use griddata to interpolate all the destination points
#out_grid_z = spint.griddata(dest_points, src_points, (out_grid_x, out_grid_y), method='linear', fill_value=-1.)
out_grid_z = spint.griddata(dest_points, src_points, (out_grid_x, out_grid_y), method='cubic', fill_value=-1.)
map_x = np.append([], [ar[:,0] for ar in out_grid_z]).reshape(self.shape[0], self.shape[1]).astype('float32')
map_y = np.append([], [ar[:,1] for ar in out_grid_z]).reshape(self.shape[0], self.shape[1]).astype('float32')
# find all rows and columns that are mapped before or after the boundaries of the source image, and remove them
map_valid_cells = np.where((map_x >= 0.) & (map_x < float(self.width)) & (map_y >= 0.) & (map_y < float(self.height)))
min_col_row = np.min(map_valid_cells, axis=1)
max_col_row = np.max(map_valid_cells, axis=1)
map_x = map_x[min_col_row[0]:max_col_row[0], min_col_row[1]:max_col_row[1]]
map_y = map_y[min_col_row[0]:max_col_row[0], min_col_row[1]:max_col_row[1]]
# remap the source points to the destination points
self.img = cv2.remap(img, map_x, map_y, cv2.INTER_CUBIC).T
self.start_point = self.start_point + min_col_row
# Add mask and weights computation
if self.compute_mask:
mask_img = np.ones(img.shape)
self.mask = cv2.remap(mask_img, map_x, map_y, cv2.INTER_CUBIC).T
self.mask[self.mask > 0] = 1
if self.compute_distances:
# The initial weights for each pixel is the minimum from the image boundary
grid = np.mgrid[0:self.height, 0:self.width]
weights_img = np.minimum(
np.minimum(grid[0], self.height - 1 - grid[0]),
np.minimum(grid[1], self.width - 1 - grid[1])
).astype(np.float32)
self.weights = cv2.remap(weights_img, map_x, map_y, cv2.INTER_CUBIC).T
self.weights[self.weights < 0] = 0
self.already_rendered = True
return self.img, self.start_point
def fetch_mask(self):
assert(self.compute_mask)
if not self.already_rendered:
self.render()
return self.mask, (self.bbox[0], self.bbox[2])
def crop(self, from_x, from_y, to_x, to_y):
"""Returns the cropped image, its starting point, and the cropped mask (if the mask was computed).
The given coordinates are specified using world coordinates."""
# find the overlapping area of the given coordinates and the transformed tile
overlapping_area = [max(from_x, self.bbox[0]), min(to_x, self.bbox[1]), max(from_y, self.bbox[2]), min(to_y, self.bbox[3])]
overlapping_width = overlapping_area[1] - overlapping_area[0] + 1
overlapping_height = overlapping_area[3] - overlapping_area[2] + 1
if overlapping_width <= 0 or overlapping_height <= 0:
# No overlap between the area and the tile
return None, None, None
cropped_mask = None
# Make sure the image was rendered
self.render()
# Check with the actual image bounding box (may be different because of the non-affine transformation)
actual_bbox = [self.start_point[0], self.start_point[0] + self.img.shape[1], self.start_point[1], self.start_point[1] + self.img.shape[0]]
overlapping_area = [max(from_x, actual_bbox[0]), min(to_x, actual_bbox[1]), max(from_y, actual_bbox[2]), min(to_y, actual_bbox[3])]
overlapping_width = overlapping_area[1] - overlapping_area[0] + 1
overlapping_height = overlapping_area[3] - overlapping_area[2] + 1
if overlapping_width <= 0 or overlapping_height <= 0:
# No overlap between the area and the tile
return None, None, None
cropped_img = self.img[overlapping_area[2] - actual_bbox[2]:overlapping_area[3] - actual_bbox[2] + 1,
overlapping_area[0] - actual_bbox[0]:overlapping_area[1] - actual_bbox[0] + 1]
if self.compute_mask:
cropped_mask = self.mask[overlapping_area[2] - actual_bbox[2]:overlapping_area[3] - actual_bbox[2] + 1,
overlapping_area[0] - actual_bbox[0]:overlapping_area[1] - actual_bbox[0] + 1]
# Take only the parts that are overlapping
return cropped_img, (overlapping_area[0], overlapping_area[2]), cropped_mask
def crop_with_distances(self, from_x, from_y, to_x, to_y):
"""Returns the cropped image, its starting point, and the cropped image L1 distances of each pixel inside the image from the edge
of the rendered image (if the mask was computed).
The given coordinates are specified using world coordinates."""
# find the overlapping area of the given coordinates and the transformed tile
overlapping_area = [max(from_x, self.bbox[0]), min(to_x, self.bbox[1]), max(from_y, self.bbox[2]), min(to_y, self.bbox[3])]
overlapping_width = overlapping_area[1] - overlapping_area[0] + 1
overlapping_height = overlapping_area[3] - overlapping_area[2] + 1
if overlapping_width <= 0 or overlapping_height <= 0:
# No overlap between the area and the tile
return None, None, None
cropped_distances = None
# Make sure the image was rendered
self.render()
# Check with the actual image bounding box (may be different because of the non-affine transformation)
actual_bbox = [self.start_point[0], self.start_point[0] + self.img.shape[1], self.start_point[1], self.start_point[1] + self.img.shape[0]]
overlapping_area = [max(from_x, actual_bbox[0]), min(to_x, actual_bbox[1]), max(from_y, actual_bbox[2]), min(to_y, actual_bbox[3])]
overlapping_width = overlapping_area[1] - overlapping_area[0] + 1
overlapping_height = overlapping_area[3] - overlapping_area[2] + 1
if overlapping_width <= 0 or overlapping_height <= 0:
# No overlap between the area and the tile
return None, None, None
cropped_img = self.img[overlapping_area[2] - actual_bbox[2]:overlapping_area[3] - actual_bbox[2] + 1,
overlapping_area[0] - actual_bbox[0]:overlapping_area[1] - actual_bbox[0] + 1]
if self.compute_distances:
cropped_distances = self.weights[overlapping_area[2] - actual_bbox[2]:overlapping_area[3] - actual_bbox[2] + 1,
overlapping_area[0] - actual_bbox[0]:overlapping_area[1] - actual_bbox[0] + 1]
# Take only the parts that are overlapping
return cropped_img, (overlapping_area[0], overlapping_area[2]), cropped_distances
# Helper methods (shouldn't be used from the outside)
def compute_bbox_and_shape(polygon):
# find the new bounding box
min_XY = np.min(polygon, axis=0)
max_XY = np.max(polygon, axis=0)
# Rounding to avoid float precision errors due to representation
new_bbox = [int(math.floor(round(min_XY[0], 5))), int(math.ceil(round(max_XY[0], 5))), int(math.floor(round(min_XY[1], 5))), int(math.ceil(round(max_XY[1], 5)))]
#new_bbox = [int(min_XY[0] + math.copysign(0.5, min_XY[0])), int(max_XY[0] + math.copysign(0.5, max_XY[1])), int(min_XY[1] + math.copysign(0.5, min_XY[1])), int(max_XY[1] + math.copysign(0.5, max_XY[1]))]
new_shape = (new_bbox[1] - new_bbox[0] + 1, new_bbox[3] - new_bbox[2] + 1)
return new_bbox, new_shape
#def get_normals(corners):
# """Given a polygon corners list, returns a list of non-normalized normals for each edge"""
# edges = [(corners[i] - corners[(i + 1) % len(corners)]) for i in range(len(corners))]
# normals = [(-e[1], e[0]) for e in edges]
# return normals
#def check_normals_side(corners1, normals1, corners2):
# """Checks if all corners2 appear on one side of polygon1"""
# assert(len(corners1) == len(normals1))
# for c, n in zip(corners1, normals1):
# signs2 = [np.sign(np.dot(n, p - c)) for p in corners2]
# signs2 = [s for s in signs2 if abs(s - 0.) > 0.0001] # remove all +-0.
# if np.any(signs2 != signs2[0]):
# return False
# return True
#def find_per_row_first_last_one(arr):
# """Given a 2D array (of only 1's in a quadrangle shape, and 0's), for each row find the first and the last occurrance of 1.
# Returns a 2D array with the same number of rows as arr, and 2 columns with the column-indices
# of the first and last one. If a row has only 0's, -1 will be returned on both indices"""
# res = np.full((arr.shape[0], 2), -1, dtype=np.int16)
# # take the first and last column of arr, and find all 1's
# arr_T = arr.T
# first_col_non_zero = np.nonzero(arr_T[0])
# last_col_non_zero = np.nonzero(arr_T[-1])
# for r in first_col_non_zero[0]:
# res[r, 0] = 0
# for r in last_col_non_zero[0]:
# res[r, 1] = arr.shape[1] - 1
# # Now find the positions where the value changes in the middle of the matrix using np.diff
# nonzero = np.nonzero(np.diff(arr))
# # nonzero contents for each row, r:
# # if nonzero doesn't have a row with r, the row has the same value (either 0 or 1)
# # if nonzero has a single row with r, the row changes the value once (either from 0 to 1 or from 1 to 0)
# # if nonzero has row r twice, the row changes both from 0 to 1 and then from 1 to 0
# for r, c in zip(*nonzero):
# if res[r, 0] > -1:
# # already updated the left value, or there is a single change from 1 to 0
# res[r, 1] = c
# else:
# res[r, 0] = c + 1
# return res
# An implementation of scipy's interpolate.griddata broken to 2 parts
# (taken from: http://stackoverflow.com/questions/20915502/speedup-scipy-griddata-for-multiple-interpolations-between-two-irregular-grids)
def interp_weights(xyz, uvw):
"""The initial part of griddata (using linear interpolation) -
Creates a Delaunay mesh triangulation of the source points,
each point in the mesh is transformed to the new mesh
using an interpolation of each point inside a triangle is done using barycentric coordinates"""
tri = qhull.Delaunay(xyz)
simplex = tri.find_simplex(uvw)
vertices = np.take(tri.simplices, simplex, axis=0)
temp = np.take(tri.transform, simplex, axis=0)
delta = uvw - temp[:, d]
bary = np.einsum('njk,nk->nj', temp[:, :d, :], delta)
return vertices, np.hstack((bary, 1 - bary.sum(axis=1, keepdims=True)))
def interpolate(values, vtx, wts, fill_value=np.nan):
"""Executes the interpolation step of griddata"""
ret = np.einsum('nj,nj->n', np.take(values, vtx), wts)
ret[np.any(wts < 0, axis=1)] = fill_value
return ret
| {
"repo_name": "Rhoana/rh_aligner",
"path": "old/renderer/single_tile_renderer.py",
"copies": "1",
"size": "20035",
"license": "mit",
"hash": 347611344933902900,
"line_mean": 55.1204481793,
"line_max": 208,
"alpha_frac": 0.6209134015,
"autogenerated": false,
"ratio": 3.535380271748721,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4656293673248721,
"avg_score": null,
"num_lines": null
} |
"""A class to aid in generating random numbers and sequences
It doesn't seem necessary to create an options class since this class will probably not be extended
"""
import random, sys
nts = ['A','C','G','T']
hexchars = '0123456789abcdef'
uuid4special = '89ab'
class RandomSource:
"""You can asign it a seed if you want
:param seed: seed the pseduorandom number generator
:type seed: int
"""
def __init__(self,seed=None):
self._random = random.Random()
if seed: self._random.seed(seed)
def choice(self,arr):
"""Uniform random selection of a member of an list
:param arr: list you want to select an element from
:type arr: list
:return: one element from the list
"""
ind = self.randint(0,len(arr)-1)
return arr[ind]
def random(self):
"""generate a random number
:return: uniform random float between 0 and 1
:rtype: float
"""
return self._random.random()
def gauss(self,mu,sigma):
"""Generate a random number based on a gaussian distribution
:param mu: mean of distribution
:param sigma: standard deveiation of distribution (i think)
:type mu: float
:type sigma: float
"""
return self._random.gauss(mu,sigma)
def randint(self,a,b):
"""Generate a random integer uniform distribution between a and b like randint of the usual random class
:return: random int between a and b
:rtype: int
"""
return self._random.randint(a,b)
def different_random_nt(self,nt):
global nts
"""generate a random nucleotide change. uniform random. will never return itself
:param nt: current nucleotide
:type nt: char
:return: new nucleotide
:rtype: char
"""
return self._random.choice([x for x in nts if x != nt.upper()])
def random_nt(self):
"""Produce a random nucleotide (uniform random)
:return: nucleotide
:rtype: char
"""
global nts
return self._random.choice(nts)
def get_weighted_random_index(self,weights):
"""Return an index of an array based on the weights
if a random number between 0 and 1 is less than an index return the lowest index
:param weights: a list of floats for how to weight each index [w1, w2, ... wN]
:type weights: list
:return: index
:rtype: int
"""
tot = float(sum([float(x) for x in weights]))
fracarray = [weights[0]]
for w in weights[1:]:
prev = fracarray[-1]
fracarray.append(w+prev)
#print fracarray
rnum = self._random.random()*tot
#print rnum
#sys.exit()
for i in range(len(weights)):
if rnum < fracarray[i]: return i
sys.stderr.write("Warning unexpected no random\n")
def uuid4(self):
"""Make an id in the format of UUID4, but keep in mind this could very well be pseudorandom, and if it is you'll not be truely random, and can regenerate same id if same seed"""
return ''.join([hexchars[self.randint(0,15)] for x in range(0,8)]) + '-' +\
''.join([hexchars[self.randint(0,15)] for x in range(0,4)]) + '-' +\
'4'+''.join([hexchars[self.randint(0,15)] for x in range(0,3)]) + '-' +\
uuid4special[self.randint(0,3)]+''.join([hexchars[self.randint(0,15)] for x in range(0,3)]) + '-' +\
''.join([hexchars[self.randint(0,15)] for x in range(0,12)])
| {
"repo_name": "jason-weirather/py-seq-tools",
"path": "seqtools/simulation/randomsource.py",
"copies": "1",
"size": "3309",
"license": "apache-2.0",
"hash": 818378733695486200,
"line_mean": 29.6388888889,
"line_max": 181,
"alpha_frac": 0.6424901783,
"autogenerated": false,
"ratio": 3.5314834578441836,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4673973636144183,
"avg_score": null,
"num_lines": null
} |
"""A class to build directory diff tools on."""
import os
import dircache
import cmpcache
import statcache
from stat import *
class dircmp:
"""Directory comparison class."""
def new(self, a, b):
"""Initialize."""
self.a = a
self.b = b
# Properties that caller may change before calling self.run():
self.hide = [os.curdir, os.pardir] # Names never to be shown
self.ignore = ['RCS', 'tags'] # Names ignored in comparison
return self
def run(self):
"""Compare everything except common subdirectories."""
self.a_list = filter(dircache.listdir(self.a), self.hide)
self.b_list = filter(dircache.listdir(self.b), self.hide)
self.a_list.sort()
self.b_list.sort()
self.phase1()
self.phase2()
self.phase3()
def phase1(self):
"""Compute common names."""
self.a_only = []
self.common = []
for x in self.a_list:
if x in self.b_list:
self.common.append(x)
else:
self.a_only.append(x)
self.b_only = []
for x in self.b_list:
if x not in self.common:
self.b_only.append(x)
def phase2(self):
"""Distinguish files, directories, funnies."""
self.common_dirs = []
self.common_files = []
self.common_funny = []
for x in self.common:
a_path = os.path.join(self.a, x)
b_path = os.path.join(self.b, x)
ok = 1
try:
a_stat = statcache.stat(a_path)
except os.error, why:
# print 'Can\'t stat', a_path, ':', why[1]
ok = 0
try:
b_stat = statcache.stat(b_path)
except os.error, why:
# print 'Can\'t stat', b_path, ':', why[1]
ok = 0
if ok:
a_type = S_IFMT(a_stat[ST_MODE])
b_type = S_IFMT(b_stat[ST_MODE])
if a_type <> b_type:
self.common_funny.append(x)
elif S_ISDIR(a_type):
self.common_dirs.append(x)
elif S_ISREG(a_type):
self.common_files.append(x)
else:
self.common_funny.append(x)
else:
self.common_funny.append(x)
def phase3(self):
"""Find out differences between common files."""
xx = cmpfiles(self.a, self.b, self.common_files)
self.same_files, self.diff_files, self.funny_files = xx
def phase4(self):
"""Find out differences between common subdirectories.
A new dircmp object is created for each common subdirectory,
these are stored in a dictionary indexed by filename.
The hide and ignore properties are inherited from the parent."""
self.subdirs = {}
for x in self.common_dirs:
a_x = os.path.join(self.a, x)
b_x = os.path.join(self.b, x)
self.subdirs[x] = newdd = dircmp().new(a_x, b_x)
newdd.hide = self.hide
newdd.ignore = self.ignore
newdd.run()
def phase4_closure(self):
"""Recursively call phase4() on subdirectories."""
self.phase4()
for x in self.subdirs.keys():
self.subdirs[x].phase4_closure()
def report(self):
"""Print a report on the differences between a and b."""
# Assume that phases 1 to 3 have been executed
# Output format is purposely lousy
print 'diff', self.a, self.b
if self.a_only:
print 'Only in', self.a, ':', self.a_only
if self.b_only:
print 'Only in', self.b, ':', self.b_only
if self.same_files:
print 'Identical files :', self.same_files
if self.diff_files:
print 'Differing files :', self.diff_files
if self.funny_files:
print 'Trouble with common files :', self.funny_files
if self.common_dirs:
print 'Common subdirectories :', self.common_dirs
if self.common_funny:
print 'Common funny cases :', self.common_funny
def report_closure(self):
"""Print reports on self and on subdirs.
If phase 4 hasn't been done, no subdir reports are printed."""
self.report()
try:
x = self.subdirs
except AttributeError:
return # No subdirectories computed
for x in self.subdirs.keys():
print
self.subdirs[x].report_closure()
def report_phase4_closure(self):
"""Report and do phase 4 recursively."""
self.report()
self.phase4()
for x in self.subdirs.keys():
print
self.subdirs[x].report_phase4_closure()
def cmpfiles(a, b, common):
"""Compare common files in two directories.
Return:
- files that compare equal
- files that compare different
- funny cases (can't stat etc.)"""
res = ([], [], [])
for x in common:
res[cmp(os.path.join(a, x), os.path.join(b, x))].append(x)
return res
def cmp(a, b):
"""Compare two files.
Return:
0 for equal
1 for different
2 for funny cases (can't stat, etc.)"""
try:
if cmpcache.cmp(a, b): return 0
return 1
except os.error:
return 2
def filter(list, skip):
"""Return a copy with items that occur in skip removed."""
result = []
for item in list:
if item not in skip: result.append(item)
return result
def demo():
"""Demonstration and testing."""
import sys
import getopt
options, args = getopt.getopt(sys.argv[1:], 'r')
if len(args) <> 2: raise getopt.error, 'need exactly two args'
dd = dircmp().new(args[0], args[1])
dd.run()
if ('-r', '') in options:
dd.report_phase4_closure()
else:
dd.report()
if __name__ == "__main__":
demo()
| {
"repo_name": "MalloyPower/parsing-python",
"path": "front-end/testsuite-python-lib/Python-2.0/Lib/lib-old/dircmp.py",
"copies": "4",
"size": "6029",
"license": "mit",
"hash": 1277190448387764000,
"line_mean": 28.9950248756,
"line_max": 72,
"alpha_frac": 0.5345828496,
"autogenerated": false,
"ratio": 3.789440603394092,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6324023452994092,
"avg_score": null,
"num_lines": null
} |
""" A class to convert a corpus of PREPROCESSED documents into the Vector Space representation """
from collections import defaultdict
import TermParamsClass
import math
class VectorSpace:
N = 10 #Total number of documents in the corpus
@staticmethod
def computeInverseDocFreq(numDocsWithTerm, numDocsTotal):
if numDocsWithTerm == 0:
return 0
else:
return -math.log(float(numDocsWithTerm) / numDocsTotal)
@staticmethod
def getAllIndices(list,term):
return [i for i, x in enumerate(list) if x == term]
# Method to construct inverted file list
def __init__(self,docs,origQuery):
self.numRel = 0
self.numNonRel = 0
self.origQuery = origQuery.split()
self.vocab = set([]) #Build vocalbulary from title and text of all documents
for d in docs:
d["Title"] = d["Title"].split()
d["Description"] = d["Description"].split()
# to remove water mark like url keywords in titles and descriptions
# as they are noises
urlFilter = d['DisplayUrl'].partition('.')[2].partition('.')[0]
d["Title"] = map(lambda s:s.strip('.,!()[]&" |').lower(), d['Title'])
d["Title"] = filter(lambda e: e!= urlFilter, d["Title"])
d["Description"] = map(lambda s:s.strip('.,!()[]&" |').lower(), d["Description"])
d["Description"] = filter(lambda e:e!= urlFilter, d["Description"])
self.vocab.update(d["Title"])
self.vocab.update(d["Description"])
# Construct the set for stopwords
stopWords = []
file = open("resources/english",'r').readlines()
for i in range(len(file)):
stopWords.append(file[i].rstrip())
# Take out stop words from current vocabulary set
self.vocab = self.vocab - set(stopWords)
#Insert Document-tf pairs into the inverted file
self.invFile = defaultdict(TermParamsClass.TermParams)
self.relevanceList = {}
for v in self.vocab:
temp = {};
for d in docs:
self.relevanceList[d["DisplayUrl"]] = d["Relevant"]
pos = self.getAllIndices(d["Title"] + d["Description"],v)
if len(pos) != 0:
temp[d["DisplayUrl"]] = pos
idf= self.computeInverseDocFreq(len(temp.keys()), self.N)
self.invFile[v] = TermParamsClass.TermParams(idf,temp)
#Function to create weight vectors for the relevant docs, non-relevant docs as well as the current query
def createDocVectors(self):#self,originalQuery):
i = 0
relWeights = [0 for i in range(len(self.invFile.keys()))]
nonRelWeights = [0 for i in range(len(self.invFile.keys()))]
queryWeights = [0 for i in range(len(self.invFile.keys()))]
i = 0
for key in sorted(self.invFile.keys()):
currIdf = self.invFile[key].getIdf()
queryWeights[i] = self.origQuery.count(self.invFile[key])*currIdf
docs = self.invFile[key].getDocs()
for d in docs.keys():
if self.relevanceList[d] == 'y':
self.numRel = self.numRel + 1;
relWeights[i] += len(docs[d])*currIdf
else:
self.numNonRel = self.numNonRel + 1
nonRelWeights[i] += len(docs[d])*currIdf
i = i+1
return {"rel":relWeights,"nonRel":nonRelWeights,"queryWeights":queryWeights}
def getNumRel(self):
return self.numRel
def getNumNonRel(self):
return self.numNonRel
def getInvFileKeys(self):
return self.invFile.keys()
| {
"repo_name": "aiyyoi/ADB-Project-1",
"path": "VectorSpaceClass.py",
"copies": "1",
"size": "3212",
"license": "mit",
"hash": -3504086218558458000,
"line_mean": 32.8105263158,
"line_max": 105,
"alpha_frac": 0.6706102117,
"autogenerated": false,
"ratio": 3.121477162293489,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4292087373993489,
"avg_score": null,
"num_lines": null
} |
"""A class to crawl webpages."""
from __future__ import absolute_import, print_function
import sys
import lxml.html as lh
from .orderedset import OrderedSet
from . import utils
class Crawler(object):
"""Follows and saves webpages to PART.html files."""
def __init__(self, args, seed_url=None):
"""Set seed URL and program arguments"""
self.seed_url = seed_url
self.args = args
self.page_cache = []
def get_new_links(self, url, resp):
"""Get new links from a URL and filter them."""
links_on_page = resp.xpath("//a/@href")
links = [utils.clean_url(u, url) for u in links_on_page]
# Remove non-links through filtering by protocol
links = [x for x in links if utils.check_protocol(x)]
# Restrict new URLs by the domain of the input URL
if not self.args["nonstrict"]:
domain = utils.get_domain(url)
links = [x for x in links if utils.get_domain(x) == domain]
# Filter URLs by regex keywords, if any
if self.args["crawl"]:
links = utils.re_filter(links, self.args["crawl"])
return links
def limit_reached(self, num_crawls):
"""Check if number of pages crawled have reached a limit."""
return self.args["max_crawls"] and num_crawls >= self.args["max_crawls"]
def page_crawled(self, page_resp):
"""Check if page has been crawled by hashing its text content.
Add new pages to the page cache.
Return whether page was found in cache.
"""
page_text = utils.parse_text(page_resp)
page_hash = utils.hash_text("".join(page_text))
if page_hash not in self.page_cache:
utils.cache_page(self.page_cache, page_hash, self.args["cache_size"])
return False
return True
def crawl_links(self, seed_url=None):
"""Find new links given a seed URL and follow them breadth-first.
Save page responses as PART.html files.
Return the PART.html filenames created during crawling.
"""
if seed_url is not None:
self.seed_url = seed_url
if self.seed_url is None:
sys.stderr.write("Crawling requires a seed URL.\n")
return []
prev_part_num = utils.get_num_part_files()
crawled_links = set()
uncrawled_links = OrderedSet()
uncrawled_links.add(self.seed_url)
try:
while uncrawled_links:
# Check limit on number of links and pages to crawl
if self.limit_reached(len(crawled_links)):
break
url = uncrawled_links.pop(last=False)
# Remove protocol, fragments, etc. to get unique URLs
unique_url = utils.remove_protocol(utils.clean_url(url))
if unique_url not in crawled_links:
raw_resp = utils.get_raw_resp(url)
if raw_resp is None:
if not self.args["quiet"]:
sys.stderr.write("Failed to parse {0}.\n".format(url))
continue
resp = lh.fromstring(raw_resp)
if self.page_crawled(resp):
continue
crawled_links.add(unique_url)
new_links = self.get_new_links(url, resp)
uncrawled_links.update(new_links)
if not self.args["quiet"]:
print("Crawled {0} (#{1}).".format(url, len(crawled_links)))
# Write page response to PART.html file
utils.write_part_file(
self.args, url, raw_resp, resp, len(crawled_links)
)
except (KeyboardInterrupt, EOFError):
pass
curr_part_num = utils.get_num_part_files()
return utils.get_part_filenames(curr_part_num, prev_part_num)
| {
"repo_name": "huntrar/scrape",
"path": "scrape/crawler.py",
"copies": "1",
"size": "3963",
"license": "mit",
"hash": -6758113123482355000,
"line_mean": 35.6944444444,
"line_max": 84,
"alpha_frac": 0.5591723442,
"autogenerated": false,
"ratio": 3.9511465603190428,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00044771944089817096,
"num_lines": 108
} |
# A class to describe a Star shape.
class Star(object):
def __init__(self):
self.x = random(100, width - 100)
self.y = random(100, height - 100)
self.speed = random(0.5, 3)
# First create the shape.
self.s = createShape()
self.s.beginShape()
# You can set fill and stroke.
self.s.fill(255, 204)
self.s.noStroke()
# Here, we are hardcoding a series of vertices.
self.s.vertex(0, -50)
self.s.vertex(14, -20)
self.s.vertex(47, -15)
self.s.vertex(23, 7)
self.s.vertex(29, 40)
self.s.vertex(0, 25)
self.s.vertex(-29, 40)
self.s.vertex(-23, 7)
self.s.vertex(-47, -15)
self.s.vertex(-14, -20)
# The shape is complete.
self.s.endShape(CLOSE)
def move(self):
# Demonstrating some simple motion.
self.x += self.speed
if self.x > width + 100:
self.x = -100
def display(self):
# Locating and drawing the shape.
with pushMatrix():
translate(self.x, self.y)
shape(self.s)
| {
"repo_name": "kantel/processingpy",
"path": "sketches/modes/PythonMode/examples/Topics/Create Shapes/PolygonPShapeOOP/star.py",
"copies": "6",
"size": "1126",
"license": "mit",
"hash": 6250003606796715000,
"line_mean": 27.8717948718,
"line_max": 55,
"alpha_frac": 0.526642984,
"autogenerated": false,
"ratio": 3.321533923303835,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6848176907303835,
"avg_score": null,
"num_lines": null
} |
"""A class to download NAIP imagery from the s3://aws-naip RequesterPays bucket."""
import boto3
import os
import subprocess
import sys
import time
from random import shuffle
from src.config import cache_paths, create_cache_directories, NAIP_DATA_DIR, LABELS_DATA_DIR
class NAIPDownloader:
"""Downloads NAIP images from S3, by state/year."""
def __init__(self, number_of_naips, should_randomize, state, year):
"""Download some arbitrary NAIP images from the aws-naip S3 bucket."""
self.number_of_naips = number_of_naips
self.should_randomize = should_randomize
self.state = state
self.year = year
self.resolution = '1m'
self.spectrum = 'rgbir'
self.bucket_url = 's3://aws-naip/'
self.url_base = '{}{}/{}/{}/{}/'.format(self.bucket_url, self.state, self.year,
self.resolution, self.spectrum)
self.make_directory(NAIP_DATA_DIR, full_path=True)
def make_directory(self, new_dir, full_path=False):
"""Make a new directory tree if it doesn't already exist."""
if full_path:
path = ''
for token in new_dir.split('/'):
path += token + '/'
try:
os.mkdir(path)
except:
pass
return path
try:
os.mkdir(new_dir)
except:
pass
return new_dir
def download_naips(self):
"""Download self.number_of_naips of the naips for a given state."""
create_cache_directories()
self.configure_s3cmd()
naip_filenames = self.list_naips()
if self.should_randomize:
shuffle(naip_filenames)
naip_local_paths = self.download_from_s3(naip_filenames)
cache_paths(naip_local_paths)
return naip_local_paths
def configure_s3cmd(self):
"""Configure s3cmd with AWS credentials."""
file_path = os.environ.get("HOME") + '/.s3cfg'
f = open(file_path, 'r')
filedata = f.read()
f.close()
access = os.environ.get("AWS_ACCESS_KEY_ID")
secret = os.environ.get("AWS_SECRET_ACCESS_KEY")
newdata = filedata.replace("AWS_ACCESS_KEY", access)
newdata = newdata.replace("AWS_SECRET_KEY", secret)
f = open(file_path, 'w')
f.write(newdata)
f.close()
def list_naips(self):
"""Make a list of NAIPs based on the init parameters for the class."""
# list the contents of the bucket directory
bash_command = "s3cmd ls --recursive --skip-existing {} --requester-pays".format(
self.url_base)
process = subprocess.Popen(bash_command.split(" "), stdout=subprocess.PIPE)
output = process.communicate()[0]
naip_filenames = []
print(output)
for line in output.split('\n'):
parts = line.split(self.url_base)
print(parts)
# there may be subdirectories for each state, where directories need to be made
if len(parts) == 2:
naip_path = parts[1]
naip_filenames.append(naip_path)
naip_subpath = os.path.join(NAIP_DATA_DIR, naip_path.split('/')[0])
if not os.path.exists(naip_subpath):
os.mkdir(naip_subpath)
labels_subpath = os.path.join(LABELS_DATA_DIR, naip_path.split('/')[0])
if not os.path.exists(labels_subpath):
os.mkdir(labels_subpath)
else:
pass
# skip non filename lines from response
return naip_filenames
def download_from_s3(self, naip_filenames):
"""Download the NAIPs and return a list of the file paths."""
s3_client = boto3.client('s3')
naip_local_paths = []
max_range = self.number_of_naips
if max_range == -1:
max_range = len(naip_filenames)
t0 = time.time()
has_printed = False
for filename in naip_filenames[0:max_range]:
# for filename in ['m_3807708_ne_18_1_20130924.tif']:
full_path = os.path.join(NAIP_DATA_DIR, filename)
if os.path.exists(full_path):
print("NAIP {} already downloaded".format(full_path))
else:
if not has_printed:
print("DOWNLOADING {} NAIPs...".format(max_range))
has_printed = True
url_without_prefix = self.url_base.split(self.bucket_url)[1]
s3_url = '{}{}'.format(url_without_prefix, filename)
s3_client.download_file('aws-naip', s3_url, full_path, {'RequestPayer': 'requester'
})
naip_local_paths.append(full_path)
if time.time() - t0 > 0.01:
print("downloads took {0:.1f}s".format(time.time() - t0))
return naip_local_paths
if __name__ == '__main__':
parameters_message = "parameters are: download"
if len(sys.argv) == 1:
print(parameters_message)
elif sys.argv[1] == 'download':
naiper = NAIPDownloader()
naiper.download_naips()
else:
print(parameters_message)
| {
"repo_name": "trailbehind/DeepOSM",
"path": "src/naip_images.py",
"copies": "2",
"size": "5282",
"license": "mit",
"hash": 3308869800225673700,
"line_mean": 37.2753623188,
"line_max": 99,
"alpha_frac": 0.5556607346,
"autogenerated": false,
"ratio": 3.7647897362794014,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5320450470879401,
"avg_score": null,
"num_lines": null
} |
"""A class to generate plots for the results of applied loss functions and/or
accuracy of models trained with machine learning methods.
Example:
plotter = LossAccPlotter()
for epoch in range(100):
loss_train, acc_train = your_model.train()
loss_val, acc_val = your_model.validate()
plotter.add_values(epoch,
loss_train=loss_train, acc_train=acc_train,
loss_val=loss_val, acc_val=acc_val)
plotter.block()
Example, no accuracy chart:
plotter = LossAccPlotter(show_acc_plot=False)
for epoch in range(100):
loss_train = your_model.train()
loss_val = your_model.validate()
plotter.add_values(epoch, loss_train=loss_train, loss_val=loss_val)
plotter.block()
Example, update the validation line only every 10th epoch:
plotter = LossAccPlotter(show_acc_plot=False)
for epoch in range(100):
loss_train = your_model.train()
if epoch % 10 == 0:
loss_val = your_model.validate()
else:
loss_val = None
plotter.add_values(epoch, loss_train=loss_train, loss_val=loss_val)
plotter.block()
"""
from __future__ import absolute_import
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import warnings
import math
from collections import OrderedDict
def ignore_nan_and_inf(value, label, x_index):
"""Helper function that creates warnings on NaN/INF and converts them to None.
Args:
value: The value to check for NaN/INF.
label: For which line the value was used (usually "loss train", "loss val", ...)
This is used in the warning message.
x_index: At which x-index the value was used (e.g. 1 as in Epoch 1).
This is used in the warning message.
Returns:
value, but None if value is NaN or INF.
"""
if value is None:
return None
elif math.isnan(value):
warnings.warn("Got NaN for value '%s' at x-index %d" % (label, x_index))
return None
elif math.isinf(value):
warnings.warn("Got INF for value '%s' at x-index %d" % (label, x_index))
return None
else:
return value
class LossAccPlotter(object):
"""Class to plot loss and accuracy charts (for training and validation data)."""
def __init__(self,
title=None,
save_to_filepath=None,
show_regressions=True,
show_averages=True,
show_loss_plot=True,
show_acc_plot=True,
show_plot_window=True,
x_label="Epoch"):
"""Constructs the plotter.
Args:
title: An optional title which will be shown at the top of the
plot. E.g. the name of the experiment or some info about it.
If set to None, no title will be shown. (Default is None.)
save_to_filepath: The path to a file in which the plot will be saved,
e.g. "/tmp/last_plot.png". If set to None, the chart will not be
saved to a file. (Default is None.)
show_regressions: Whether or not to show a regression, indicating
where each line might end up in the future.
show_averages: Whether to plot moving averages in the charts for
each line (so for loss train, loss val, ...). This value may
only be True or False. To change the interval (default is 20
epochs), change the instance variable "averages_period" to the new
integer value. (Default is True.)
show_loss_plot: Whether to show the chart for the loss values. If
set to False, only the accuracy chart will be shown. (Default
is True.)
show_acc_plot: Whether to show the chart for the accuracy value. If
set to False, only the loss chart will be shown. (Default is True.)
show_plot_window: Whether to show the plot in a window (True)
or hide it (False). Hiding it makes only sense if you
set save_to_filepath. (Default is True.)
x_label: Label on the x-axes of the charts. Reasonable choices
would be: "Epoch", "Batch" or "Example". (Default is "Epoch".)
"""
assert show_loss_plot or show_acc_plot
assert save_to_filepath is not None or show_plot_window
self.title = title
self.title_fontsize = 14
self.show_regressions = show_regressions
self.show_averages = show_averages
self.show_loss_plot = show_loss_plot
self.show_acc_plot = show_acc_plot
self.show_plot_window = show_plot_window
self.save_to_filepath = save_to_filepath
self.x_label = x_label
# alpha values
# 0.8 = quite visible line
# 0.5 = moderately visible line
# thick is used for averages and regression (also for the main values,
# if there are no averages),
# thin is used for the main values
self.alpha_thick = 0.8
self.alpha_thin = 0.5
# the interval for the moving averages, e.g. 20 = average over 20 epochs
self.averages_period = 20
# these values deal with the regression
self.poly_forward_perc = 0.1
self.poly_backward_perc = 0.2
self.poly_n_forward_min = 5
self.poly_n_backward_min = 10
self.poly_n_forward_max = 100
self.poly_n_backward_max = 100
self.poly_degree = 1
# whether to show grids in both charts
self.grid = True
# the styling of the lines
# sma = simple moving average
self.linestyles = {
"loss_train": "r-",
"loss_train_sma": "r-",
"loss_train_regression": "r:",
"loss_val": "b-",
"loss_val_sma": "b-",
"loss_val_regression": "b:",
"acc_train": "r-",
"acc_train_sma": "r-",
"acc_train_regression": "r:",
"acc_val": "b-",
"acc_val_sma": "b-",
"acc_val_regression": "b:"
}
# different linestyles for the first epoch (if only one value is available),
# because no line can then be drawn (needs 2+ points) and only symbols will
# be shown.
# No regression here, because regression always has at least at least
# two xy-points (last real value and one (or more) predicted values).
# No averages here, because the average over one value would be identical
# to the value anyways.
self.linestyles_one_value = {
"loss_train": "rs-",
"loss_val": "b^-",
"acc_train": "rs-",
"acc_val": "b^-"
}
# these values will be set in _initialize_plot() upon the first call
# of redraw()
# fig: the figure of the whole plot
# ax_loss: loss chart (left)
# ax_acc: accuracy chart (right)
self.fig = None
self.ax_loss = None
self.ax_acc = None
# dictionaries with x, y values for each line
self.values_loss_train = OrderedDict()
self.values_loss_val = OrderedDict()
self.values_acc_train = OrderedDict()
self.values_acc_val = OrderedDict()
def add_values(self, x_index, loss_train=None, loss_val=None, acc_train=None,
acc_val=None, redraw=True):
"""Function to add new values for each line for a specific x-value (e.g.
a specific epoch).
Meaning of the values / lines:
- loss_train: y-value of the loss function applied to the training set.
- loss_val: y-value of the loss function applied to the validation set.
- acc_train: y-value of the accuracy (e.g. 0.0 to 1.0) when measured on
the training set.
- acc_val: y-value of the accuracy (e.g. 0.0 to 1.0) when measured on
the validation set.
Values that are None will be ignored.
Values that are INF or NaN will be ignored, but create a warning.
It is currently assumed that added values follow logically after
each other (progressive order), so the first x_index might be 1 (first entry),
then 2 (second entry), then 3 (third entry), ...
Not allowed would be e.g.: 10, 11, 5, 7, ...
If that is not the case, you will get a broken line graph.
Args:
x_index: The x-coordinate, e.g. x_index=5 might represent Epoch 5.
loss_train: The y-value of the loss train line at the given x_index.
If None, no value for the loss train line will be added at
the given x_index. (Default is None.)
loss_val: Same as loss_train for the loss validation line.
(Default is None.)
acc_train: Same as loss_train for the accuracy train line.
(Default is None.)
acc_val: Same as loss_train for the accuracy validation line.
(Default is None.)
redraw: Whether to redraw the plot immediatly after receiving the
new values. This is reasonable if you add values once at the end
of every epoch. If you add many values in a row, set this to
False and call redraw() at the end (significantly faster).
(Default is True.)
"""
assert isinstance(x_index, (int, long))
loss_train = ignore_nan_and_inf(loss_train, "loss train", x_index)
loss_val = ignore_nan_and_inf(loss_val, "loss val", x_index)
acc_train = ignore_nan_and_inf(acc_train, "acc train", x_index)
acc_val = ignore_nan_and_inf(acc_val, "acc val", x_index)
if loss_train is not None:
self.values_loss_train[x_index] = loss_train
if loss_val is not None:
self.values_loss_val[x_index] = loss_val
if acc_train is not None:
self.values_acc_train[x_index] = acc_train
if acc_val is not None:
self.values_acc_val[x_index] = acc_val
if redraw:
self.redraw()
def block(self):
"""Function to show the plot in a blocking way.
This should be called at the end of your program. Otherwise the
chart will be closed automatically (at the end).
By default, the plot is shown in a non-blocking way, so that the
program continues execution, which causes it to close automatically
when the program finishes.
This function will silently do nothing if show_plot_window was set
to False in the constructor.
"""
if self.show_plot_window:
plt.figure(self.fig.number)
plt.show()
def save_plot(self, filepath):
"""Saves the current plot to a file.
Args:
filepath: The path to the file, e.g. "/tmp/last_plot.png".
"""
self.fig.savefig(filepath, bbox_inches="tight")
def _initialize_plot(self):
"""Creates empty figure and axes of the plot and shows it in a new window.
"""
if self.show_loss_plot and self.show_acc_plot:
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(24, 8))
self.fig = fig
self.ax_loss = ax1
self.ax_acc = ax2
else:
fig, ax = plt.subplots(ncols=1, figsize=(12, 8))
self.fig = fig
self.ax_loss = ax if self.show_loss_plot else None
self.ax_acc = ax if self.show_acc_plot else None
# set_position is neccessary here in order to make space at the bottom
# for the legend
for ax in [self.ax_loss, self.ax_acc]:
if ax is not None:
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.1,
box.width, box.height * 0.9])
# draw the title
# it seems to be necessary to set the title here instead of in redraw(),
# otherwise the title is apparently added again and again with every
# epoch, making it ugly and bold
if self.title is not None:
self.fig.suptitle(self.title, fontsize=self.title_fontsize)
if self.show_plot_window:
plt.show(block=False)
def redraw(self):
"""Redraws the plot with the current values.
This is a full redraw and includes recalculating averages and regressions.
It should not be called many times per second as that would be slow.
Calling it every couple seconds should create no noticeable slowdown though.
Args:
epoch: The index of the current epoch, starting at 0.
train_loss: All of the training loss values of each
epoch (list of floats).
train_acc: All of the training accuracy values of each
epoch (list of floats).
val_loss: All of the validation loss values of each
epoch (list of floats).
val_acc: All of the validation accuracy values of each
epoch (list of floats).
"""
# initialize the plot if it's the first redraw
if self.fig is None:
self._initialize_plot()
# activate the plot, in case another plot was opened since the last call
plt.figure(self.fig.number)
# shorter local variables
ax1 = self.ax_loss
ax2 = self.ax_acc
# set chart titles, x-/y-labels and grid
for ax, label in zip([ax1, ax2], ["Loss", "Accuracy"]):
if ax:
ax.clear()
ax.set_title(label)
ax.set_ylabel(label)
ax.set_xlabel(self.x_label)
ax.grid(self.grid)
# Plot main lines, their averages and the regressions (predictions)
self._redraw_main_lines()
self._redraw_averages()
self._redraw_regressions()
# Add legends (below both chart)
ncol = 1
labels = ["$CHART train", "$CHART val."]
if self.show_averages:
labels.extend(["$CHART train (avg %d)" % (self.averages_period,),
"$CHART val. (avg %d)" % (self.averages_period,)])
ncol += 1
if self.show_regressions:
labels.extend(["$CHART train (regression)",
"$CHART val. (regression)"])
ncol += 1
if ax1:
ax1.legend([label.replace("$CHART", "loss") for label in labels],
loc="upper center",
bbox_to_anchor=(0.5, -0.08),
ncol=ncol)
if ax2:
ax2.legend([label.replace("$CHART", "acc.") for label in labels],
loc="upper center",
bbox_to_anchor=(0.5, -0.08),
ncol=ncol)
plt.draw()
# save the redrawn plot to a file upon every redraw.
if self.save_to_filepath is not None:
self.save_plot(self.save_to_filepath)
def _redraw_main_lines(self):
"""Draw the main lines of values (i.e. loss train, loss val, acc train, acc val).
Returns:
List of handles (one per line).
"""
handles = []
ax1 = self.ax_loss
ax2 = self.ax_acc
# Set the styles of the lines used in the charts
# Different line style for epochs after the first one, because
# the very first epoch has only one data point and therefore no line
# and would be invisible without the changed style.
ls_loss_train = self.linestyles["loss_train"]
ls_loss_val = self.linestyles["loss_val"]
ls_acc_train = self.linestyles["acc_train"]
ls_acc_val = self.linestyles["acc_val"]
if len(self.values_loss_train) == 1:
ls_loss_train = self.linestyles_one_value["loss_train"]
if len(self.values_loss_val) == 1:
ls_loss_val = self.linestyles_one_value["loss_val"]
if len(self.values_acc_train) == 1:
ls_acc_train = self.linestyles_one_value["acc_train"]
if len(self.values_acc_val) == 1:
ls_acc_val = self.linestyles_one_value["acc_val"]
# Plot the lines
alpha_main = self.alpha_thin if self.show_averages else self.alpha_thick
if ax1:
h_lt, = ax1.plot(self.values_loss_train.keys(), self.values_loss_train.values(),
ls_loss_train, label="loss train", alpha=alpha_main)
h_lv, = ax1.plot(self.values_loss_val.keys(), self.values_loss_val.values(),
ls_loss_val, label="loss val.", alpha=alpha_main)
handles.extend([h_lt, h_lv])
if ax2:
h_at, = ax2.plot(self.values_acc_train.keys(), self.values_acc_train.values(),
ls_acc_train, label="acc. train", alpha=alpha_main)
h_av, = ax2.plot(self.values_acc_val.keys(), self.values_acc_val.values(),
ls_acc_val, label="acc. val.", alpha=alpha_main)
handles.extend([h_at, h_av])
return handles
def _redraw_averages(self):
"""Draw the moving averages of each line.
If moving averages has been deactived in the constructor, this function
will do nothing.
Returns:
List of handles (one per line).
"""
# abort if moving averages have been deactivated
if not self.show_averages:
return []
handles = []
ax1 = self.ax_loss
ax2 = self.ax_acc
# calculate the xy-values
if ax1:
# for loss chart
(lt_sma_x, lt_sma_y) = self._calc_sma(self.values_loss_train.keys(),
self.values_loss_train.values())
(lv_sma_x, lv_sma_y) = self._calc_sma(self.values_loss_val.keys(),
self.values_loss_val.values())
if ax2:
# for accuracy chart
(at_sma_x, at_sma_y) = self._calc_sma(self.values_acc_train.keys(),
self.values_acc_train.values())
(av_sma_x, av_sma_y) = self._calc_sma(self.values_acc_val.keys(),
self.values_acc_val.values())
# plot the xy-values
alpha_sma = self.alpha_thick
if ax1:
# for loss chart
h_lt, = ax1.plot(lt_sma_x, lt_sma_y, self.linestyles["loss_train_sma"],
label="train loss (avg %d)" % (self.averages_period,),
alpha=alpha_sma)
h_lv, = ax1.plot(lv_sma_x, lv_sma_y, self.linestyles["loss_val_sma"],
label="val loss (avg %d)" % (self.averages_period,),
alpha=alpha_sma)
handles.extend([h_lt, h_lv])
if ax2:
# for accuracy chart
h_at, = ax2.plot(at_sma_x, at_sma_y, self.linestyles["acc_train_sma"],
label="train acc (avg %d)" % (self.averages_period,),
alpha=alpha_sma)
h_av, = ax2.plot(av_sma_x, av_sma_y, self.linestyles["acc_val_sma"],
label="acc. val. (avg %d)" % (self.averages_period,),
alpha=alpha_sma)
handles.extend([h_at, h_av])
return handles
def _redraw_regressions(self):
"""Draw the moving regressions of each line, i.e. the predictions of
future values.
If regressions have been deactived in the constructor, this function
will do nothing.
Returns:
List of handles (one per line).
"""
if not self.show_regressions:
return []
handles = []
ax1 = self.ax_loss
ax2 = self.ax_acc
# calculate future values for loss train (lt), loss val (lv),
# acc train (at) and acc val (av)
if ax1:
# for loss chart
lt_regression = self._calc_regression(self.values_loss_train.keys(),
self.values_loss_train.values())
lv_regression = self._calc_regression(self.values_loss_val.keys(),
self.values_loss_val.values())
# predicting accuracy values isnt necessary if theres no acc chart
if ax2:
# for accuracy chart
at_regression = self._calc_regression(self.values_acc_train.keys(),
self.values_acc_train.values())
av_regression = self._calc_regression(self.values_acc_val.keys(),
self.values_acc_val.values())
# plot the predicted values
alpha_regression = self.alpha_thick
if ax1:
# for loss chart
h_lt, = ax1.plot(lt_regression[0], lt_regression[1],
self.linestyles["loss_train_regression"],
label="loss train regression",
alpha=alpha_regression)
h_lv, = ax1.plot(lv_regression[0], lv_regression[1],
self.linestyles["loss_val_regression"],
label="loss val. regression",
alpha=alpha_regression)
handles.extend([h_lt, h_lv])
if ax2:
# for accuracy chart
h_at, = ax2.plot(at_regression[0], at_regression[1],
self.linestyles["acc_train_regression"],
label="acc train regression",
alpha=alpha_regression)
h_av, = ax2.plot(av_regression[0], av_regression[1],
self.linestyles["acc_val_regression"],
label="acc val. regression",
alpha=alpha_regression)
handles.extend([h_at, h_av])
return handles
def _calc_sma(self, x_values, y_values):
"""Calculate the moving average for one line (given as two lists, one
for its x-values and one for its y-values).
Args:
x_values: x-coordinate of each value.
y_values: y-coordinate of each value.
Returns:
Tuple (x_values, y_values), where x_values are the x-values of
the line and y_values are the y-values of the line.
"""
result_y, last_ys = [], []
running_sum = 0
period = self.averages_period
# use a running sum here instead of avg(), should be slightly faster
for y_val in y_values:
last_ys.append(y_val)
running_sum += y_val
if len(last_ys) > period:
poped_y = last_ys.pop(0)
running_sum -= poped_y
result_y.append(float(running_sum) / float(len(last_ys)))
return (x_values, result_y)
def _calc_regression(self, x_values, y_values):
"""Calculate the regression for one line (given as two lists, one
for its x-values and one for its y-values).
Args:
x_values: x-coordinate of each value.
y_values: y-coordinate of each value.
Returns:
Tuple (x_values, y_values), where x_values are the predicted x-values
of the line and y_values are the predicted y-values of the line.
"""
if not x_values or len(x_values) < 2:
return ([], [])
# This currently assumes that the last added x-value for the line
# was indeed that highest x-value.
# This could be avoided by tracking the max value for each line.
last_x = x_values[-1]
nb_values = len(x_values)
# Compute regression lines based on n_backwards epochs
# in the past, e.g. based on the last 10 values.
# n_backwards is calculated relative to the current epoch
# (e.g. at epoch 100 compute based on the last 10 values,
# at 200 based on the last 20 values...). It has a minimum (e.g. never
# use less than 5 epochs (unless there are only less than 5 epochs))
# and a maximum (e.g. never use more than 1000 epochs).
# The minimum prevents bad predictions.
# The maximum
# a) is better for performance
# b) lets the regression react faster in case you change something
# in the hyperparameters after a long time of training.
n_backward = int(nb_values * self.poly_backward_perc)
n_backward = max(n_backward, self.poly_n_backward_min)
n_backward = min(n_backward, self.poly_n_backward_max)
# Compute the regression lines for the n_forward future epochs.
# n_forward also has a reletive factor, as well as minimum and maximum
# values (see n_backward).
n_forward = int(nb_values * self.poly_forward_perc)
n_forward = max(n_forward, self.poly_n_forward_min)
n_forward = min(n_forward, self.poly_n_forward_max)
# return nothing of the values turn out too low
if n_backward <= 1 or n_forward <= 0:
return ([], [])
# create/train the regression model
fit = np.polyfit(x_values[-n_backward:], y_values[-n_backward:],
self.poly_degree)
poly = np.poly1d(fit)
# calculate future x- and y-values
# we use last_x to last_x+n_forward here instead of
# last_x+1 to last_x+1+n_forward
# so that the regression line is better connected to the current line
# (no visible gap)
future_x = [i for i in range(last_x, last_x + n_forward)]
future_y = [poly(x_idx) for x_idx in future_x]
return (future_x, future_y)
| {
"repo_name": "XinDongol/warehouse",
"path": "draw/LossAccPlotter/laplotter.py",
"copies": "3",
"size": "26025",
"license": "mit",
"hash": 4886162902124814000,
"line_mean": 41.3859934853,
"line_max": 92,
"alpha_frac": 0.5576176753,
"autogenerated": false,
"ratio": 3.950964020039472,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0011080474476545706,
"num_lines": 614
} |
# a class to hold a formula and all the parameter settings which go with it
import copy
import math
import random
import re
import StringIO
import weakref
import fracttypes
import gradient
import image
# matches a complex number
cmplx_re = re.compile(r'\((.*?),(.*?)\)')
# matches a hypercomplex number
hyper_re = re.compile(r'\((.*?),(.*?),(.*?),(.*?)\)')
class T:
def __init__(self,compiler,parent=None,prefix=None):
self.compiler = compiler
self.formula = None
self.funcName = None
self.funcFile = None
self.params = []
self.paramtypes = []
self.dirty = False
self.set_prefix(prefix)
if parent:
self.parent = weakref.ref(parent)
else:
self.parent = None
def set_prefix(self,prefix):
self.prefix = prefix
if prefix == None:
self.sectname = "function"
elif prefix == "cf0":
self.sectname = "outer"
elif prefix == "cf1":
self.sectname = "inner"
elif prefix[0] == 't':
self.sectname = "transform"
else:
raise ValueError("Unexpected prefix '%s' " % prefix)
def set_initparams_from_formula(self,g):
self.params = self.formula.symbols.default_params()
self.paramtypes = self.formula.symbols.type_of_params()
for i in xrange(len(self.paramtypes)):
if self.paramtypes[i] == fracttypes.Gradient:
self.params[i] = copy.copy(g)
elif self.paramtypes[i] == fracttypes.Image:
im = image.T(1,1)
#b = im.image_buffer()
#b[0] = chr(216)
#b[3] = chr(88)
#b[4] = chr(192)
#b[11] = chr(255)
self.params[i] = im
def reset_params(self):
self.params = self.formula.symbols.default_params()
self.paramtypes = self.formula.symbols.type_of_params()
def copy_from(self,other):
# copy the function overrides
for name in self.func_names():
self.set_named_func(name,
other.get_func_value(name))
# copy the params
self.params = [copy.copy(x) for x in other.params]
def initvalue(self,name,warp_param=None):
ord = self.order_of_name(name)
type = self.formula.symbols[name].type
if type == fracttypes.Complex:
if warp_param == name:
return "warp"
else:
return "(%.17f,%.17f)"%(self.params[ord],self.params[ord+1])
elif type == fracttypes.Hyper or type == fracttypes.Color:
return "(%.17f,%.17f,%.17f,%.17f)"% \
(self.params[ord],self.params[ord+1],
self.params[ord+2],self.params[ord+3])
elif type == fracttypes.Float:
return "%.17f" % self.params[ord]
elif type == fracttypes.Int:
return "%d" % self.params[ord]
elif type == fracttypes.Bool:
return "%s" % int(self.params[ord])
elif type == fracttypes.Gradient:
return "[\n" + self.params[ord].serialize() + "]"
elif type == fracttypes.Image:
return "[\n" + self.params[ord].serialize() + "]"
else:
raise ValueError("Unknown type %s for param %s" % (type,name))
def save_formula_params(self,file,warp_param=None,sectnum=None):
if sectnum == None:
print >>file, "[%s]" % self.sectname
else:
print >>file, "[%s]=%d" % (self.sectname, sectnum)
print >>file, "formulafile=%s" % self.funcFile
print >>file, "function=%s" % self.funcName
if(self.compiler.is_inline(self.funcFile, self.funcName)):
contents = self.compiler.get_formula_text(
self.funcFile, self.funcName)
print >>file, "formula=[\n%s\n]" % contents
names = self.func_names()
names.sort()
for name in names:
print >>file, "%s=%s" % (name, self.get_func_value(name))
names = self.param_names()
names.sort()
for name in names:
print >>file, "%s=%s" % (name, self.initvalue(name,warp_param))
print >>file, "[endsection]"
def func_names(self):
return self.formula.symbols.func_names()
def param_names(self):
return self.formula.symbols.param_names()
def params_of_type(self,type,readable=False):
params = []
op = self.formula.symbols.order_of_params()
for name in op.keys():
if name != '__SIZE__':
if self.formula.symbols[name].type == type:
if readable:
params.append(self.formula.symbols.demangle(name))
else:
params.append(name)
return params
def get_func_value(self,func_to_get):
fname = self.formula.symbols.demangle(func_to_get)
func = self.formula.symbols[fname]
return func[0].cname
def get_named_param_value(self,name):
op = self.formula.symbols.order_of_params()
ord = op.get(self.formula.symbols.mangled_name(name))
return self.params[ord]
def order_of_name(self,name):
symbol_table = self.formula.symbols
op = symbol_table.order_of_params()
rn = symbol_table.mangled_name(name)
ord = op.get(rn)
if ord == None:
#print "can't find %s (%s) in %s" % (name,rn,op)
pass
return ord
def set_gradient(self,g):
ord = self.order_of_name("@_gradient")
self.params[ord] = g
def try_set_named_item(self,name,val):
# set the item if it exists, don't worry if it doesn't
try:
self.set_named_item(name,val)
except KeyError:
pass
def text(self):
"Return the text of this formula"
return self.compiler.get_formula_text(
self.funcFile, self.funcName)
def set_named_item(self,name,val):
sym = self.formula.symbols[name].first()
if isinstance(sym, fracttypes.Func):
self.set_named_func(name,val)
else:
self.set_named_param(name,val)
def set_named_param(self,name,val):
ord = self.order_of_name(name)
if ord == None:
#print "Ignoring unknown param %s" % name
return
t = self.formula.symbols[name].type
if t == fracttypes.Complex:
m = cmplx_re.match(val)
if m != None:
re = float(m.group(1)); im = float(m.group(2))
if self.params[ord] != re:
self.params[ord] = re
self.changed()
if self.params[ord+1] != im:
self.params[ord+1] = im
self.changed()
elif val == "warp":
self.parent().set_warp_param(name)
elif t == fracttypes.Hyper or t == fracttypes.Color:
m = hyper_re.match(val)
if m!= None:
for i in xrange(4):
val = float(m.group(i+1))
if self.params[ord+i] != val:
self.params[ord+i] = val
self.changed()
elif t == fracttypes.Float:
newval = float(val)
if self.params[ord] != newval:
self.params[ord] = newval
self.changed()
elif t == fracttypes.Int:
newval = int(val)
if self.params[ord] != newval:
self.params[ord] = newval
self.changed()
elif t == fracttypes.Bool:
# don't use bool(val) - that makes "0" = True
try:
i = int(val)
i = (i != 0)
except ValueError:
# an old release included a 'True' or 'False' string
if val == "True": i = 1
else: i = 0
if self.params[ord] != i:
self.params[ord] = i
self.changed()
elif t == fracttypes.Gradient:
grad = gradient.Gradient()
grad.load(StringIO.StringIO(val))
self.params[ord] = grad
self.changed()
elif t == fracttypes.Image:
im = image.T(2,2)
self.params[ord] = im
self.changed()
else:
raise ValueError("Unknown param type %s for %s" % (t,name))
def set_named_func(self,func_to_set,val):
fname = self.formula.symbols.demangle(func_to_set)
func = self.formula.symbols.get(fname)
return self.set_func(func[0],val)
def zw_random(self,weirdness,size):
factor = math.fabs(1.0 - math.log(size)) + 1.0
return weirdness * (random.random() - 0.5 ) * 1.0 / factor
def mutate(self, weirdness, size):
for i in xrange(len(self.params)):
if self.paramtypes[i] == fracttypes.Float:
self.params[i] += self.zw_random(weirdness, size)
elif self.paramtypes[i] == fracttypes.Int:
# FIXME: need to be able to look up enum to find min/max
pass
elif self.paramtypes[i] == fracttypes.Bool:
if random.random() < weirdness * 0.2:
self.params[i] = not self.params[i]
def nudge_param(self,n,x,y):
if x == 0 and y == 0:
return False
self.params[n] += (0.025 * x)
self.params[n+1] += (0.025 * y)
self.changed()
return True
def set_param(self,n,val):
# set the N'th param to val, after converting from a string
t = self.paramtypes[n]
if t == fracttypes.Float:
val = float(val)
elif t == fracttypes.Int:
val = int(val)
elif t == fracttypes.Bool:
val = bool(val)
else:
raise ValueError("Unknown parameter type %s" % t)
if self.params[n] != val:
self.params[n] = val
self.changed()
return True
return False
def set_func(self,func,fname):
if func.cname != fname:
self.formula.symbols.set_std_func(func,fname)
self.dirty = True
self.changed()
return True
else:
return False
def changed(self):
self.dirty = True
if self.parent:
self.parent().changed()
def is_direct(self):
return self.formula.is_direct()
def set_formula(self,file,func,gradient):
formula = self.compiler.get_formula(file,func,self.prefix)
if formula == None:
raise ValueError("no such formula: %s:%s" % (file, func))
if formula.errors != []:
raise ValueError("invalid formula '%s':\n%s" % \
(func, "\n".join(formula.errors)))
self.formula = formula
self.funcName = func
self.funcFile = file
self.set_initparams_from_formula(gradient)
def load_param_bag(self,bag):
for (name,val) in bag.dict.items():
if name == "formulafile" or name=="function":
pass
else:
self.try_set_named_item(name,val)
def blend(self, other, ratio):
# update in-place our settings so that they are a mixture with the other
if self.funcName != other.funcName or self.funcFile != other.funcFile:
raise ValueError("Cannot blend parameters between different formulas")
for i in xrange(len(self.params)):
(a,b) = (self.params[i],other.params[i])
if self.paramtypes[i] == fracttypes.Float:
self.params[i] = a*(1.0-ratio) + b*ratio
elif self.paramtypes[i] == fracttypes.Int:
self.params[i] = int(a*(1.0-ratio) + b*ratio)
elif self.paramtypes[i] == fracttypes.Bool:
if ratio >= 0.5:
self.params[i] = b
else:
# don't interpolate
pass
| {
"repo_name": "ericchill/gnofract4d",
"path": "fract4d/formsettings.py",
"copies": "1",
"size": "12216",
"license": "bsd-3-clause",
"hash": 8672380856605546000,
"line_mean": 33.4112676056,
"line_max": 82,
"alpha_frac": 0.5180091683,
"autogenerated": false,
"ratio": 3.777365491651206,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4795374659951206,
"avg_score": null,
"num_lines": null
} |
"""A class to hold entity values."""
from collections import OrderedDict
import fnmatch
import re
from typing import Any, Dict, Optional, Pattern
from homeassistant.core import split_entity_id
# mypy: disallow-any-generics
class EntityValues:
"""Class to store entity id based values."""
def __init__(
self,
exact: Optional[Dict[str, Dict[str, str]]] = None,
domain: Optional[Dict[str, Dict[str, str]]] = None,
glob: Optional[Dict[str, Dict[str, str]]] = None,
) -> None:
"""Initialize an EntityConfigDict."""
self._cache: Dict[str, Dict[str, str]] = {}
self._exact = exact
self._domain = domain
if glob is None:
compiled: Optional[Dict[Pattern[str], Any]] = None
else:
compiled = OrderedDict()
for key, value in glob.items():
compiled[re.compile(fnmatch.translate(key))] = value
self._glob = compiled
def get(self, entity_id: str) -> Dict[str, str]:
"""Get config for an entity id."""
if entity_id in self._cache:
return self._cache[entity_id]
domain, _ = split_entity_id(entity_id)
result = self._cache[entity_id] = {}
if self._domain is not None and domain in self._domain:
result.update(self._domain[domain])
if self._glob is not None:
for pattern, values in self._glob.items():
if pattern.match(entity_id):
result.update(values)
if self._exact is not None and entity_id in self._exact:
result.update(self._exact[entity_id])
return result
| {
"repo_name": "partofthething/home-assistant",
"path": "homeassistant/helpers/entity_values.py",
"copies": "3",
"size": "1660",
"license": "mit",
"hash": 8207015178582773000,
"line_mean": 29.7407407407,
"line_max": 68,
"alpha_frac": 0.5831325301,
"autogenerated": false,
"ratio": 4.058679706601467,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 54
} |
"""A class to hold entity values."""
from collections import OrderedDict
import fnmatch
import re
from typing import Any, Dict, Optional, Pattern
from homeassistant.core import split_entity_id
class EntityValues:
"""Class to store entity id based values."""
def __init__(
self,
exact: Optional[Dict] = None,
domain: Optional[Dict] = None,
glob: Optional[Dict] = None,
) -> None:
"""Initialize an EntityConfigDict."""
self._cache: Dict[str, Dict] = {}
self._exact = exact
self._domain = domain
if glob is None:
compiled: Optional[Dict[Pattern[str], Any]] = None
else:
compiled = OrderedDict()
for key, value in glob.items():
compiled[re.compile(fnmatch.translate(key))] = value
self._glob = compiled
def get(self, entity_id: str) -> Dict:
"""Get config for an entity id."""
if entity_id in self._cache:
return self._cache[entity_id]
domain, _ = split_entity_id(entity_id)
result = self._cache[entity_id] = {}
if self._domain is not None and domain in self._domain:
result.update(self._domain[domain])
if self._glob is not None:
for pattern, values in self._glob.items():
if pattern.match(entity_id):
result.update(values)
if self._exact is not None and entity_id in self._exact:
result.update(self._exact[entity_id])
return result
| {
"repo_name": "qedi-r/home-assistant",
"path": "homeassistant/helpers/entity_values.py",
"copies": "19",
"size": "1546",
"license": "apache-2.0",
"hash": 664552293114965900,
"line_mean": 28.7307692308,
"line_max": 68,
"alpha_frac": 0.5782664942,
"autogenerated": false,
"ratio": 4.247252747252747,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 52
} |
"""A class to hold entity values."""
from collections import OrderedDict
import fnmatch
import re
from typing import Any, Dict, Optional, Pattern # noqa: F401
from homeassistant.core import split_entity_id
class EntityValues:
"""Class to store entity id based values."""
def __init__(
self,
exact: Optional[Dict] = None,
domain: Optional[Dict] = None,
glob: Optional[Dict] = None,
) -> None:
"""Initialize an EntityConfigDict."""
self._cache = {} # type: Dict[str, Dict]
self._exact = exact
self._domain = domain
if glob is None:
compiled = None # type: Optional[Dict[Pattern[str], Any]]
else:
compiled = OrderedDict()
for key, value in glob.items():
compiled[re.compile(fnmatch.translate(key))] = value
self._glob = compiled
def get(self, entity_id: str) -> Dict:
"""Get config for an entity id."""
if entity_id in self._cache:
return self._cache[entity_id]
domain, _ = split_entity_id(entity_id)
result = self._cache[entity_id] = {}
if self._domain is not None and domain in self._domain:
result.update(self._domain[domain])
if self._glob is not None:
for pattern, values in self._glob.items():
if pattern.match(entity_id):
result.update(values)
if self._exact is not None and entity_id in self._exact:
result.update(self._exact[entity_id])
return result
| {
"repo_name": "fbradyirl/home-assistant",
"path": "homeassistant/helpers/entity_values.py",
"copies": "1",
"size": "1576",
"license": "apache-2.0",
"hash": 5771701921764271000,
"line_mean": 29.3076923077,
"line_max": 70,
"alpha_frac": 0.5774111675,
"autogenerated": false,
"ratio": 4.191489361702128,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5268900529202127,
"avg_score": null,
"num_lines": null
} |
"""A class to hold entity values."""
from collections import OrderedDict
import fnmatch
import re
from typing import Dict
from homeassistant.core import split_entity_id
class EntityValues:
"""Class to store entity id based values."""
def __init__(self, exact: Dict = None, domain: Dict = None,
glob: Dict = None) -> None:
"""Initialize an EntityConfigDict."""
self._cache = {}
self._exact = exact
self._domain = domain
if glob is None:
compiled = None
else:
compiled = OrderedDict()
for key, value in glob.items():
compiled[re.compile(fnmatch.translate(key))] = value
self._glob = compiled
def get(self, entity_id):
"""Get config for an entity id."""
if entity_id in self._cache:
return self._cache[entity_id]
domain, _ = split_entity_id(entity_id)
result = self._cache[entity_id] = {}
if self._domain is not None and domain in self._domain:
result.update(self._domain[domain])
if self._glob is not None:
for pattern, values in self._glob.items():
if pattern.match(entity_id):
result.update(values)
if self._exact is not None and entity_id in self._exact:
result.update(self._exact[entity_id])
return result
| {
"repo_name": "Danielhiversen/home-assistant",
"path": "homeassistant/helpers/entity_values.py",
"copies": "3",
"size": "1405",
"license": "mit",
"hash": 8192409149731231000,
"line_mean": 28.2708333333,
"line_max": 68,
"alpha_frac": 0.5758007117,
"autogenerated": false,
"ratio": 4.376947040498442,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6452747752198442,
"avg_score": null,
"num_lines": null
} |
"""A class to hold entity values."""
from collections import OrderedDict
import fnmatch
import re
from homeassistant.core import split_entity_id
class EntityValues(object):
"""Class to store entity id based values."""
def __init__(self, exact=None, domain=None, glob=None):
"""Initialize an EntityConfigDict."""
self._cache = {}
self._exact = exact
self._domain = domain
if glob is None:
compiled = None
else:
compiled = OrderedDict()
for key, value in glob.items():
compiled[re.compile(fnmatch.translate(key))] = value
self._glob = compiled
def get(self, entity_id):
"""Get config for an entity id."""
if entity_id in self._cache:
return self._cache[entity_id]
domain, _ = split_entity_id(entity_id)
result = self._cache[entity_id] = {}
if self._domain is not None and domain in self._domain:
result.update(self._domain[domain])
if self._glob is not None:
for pattern, values in self._glob.items():
if pattern.match(entity_id):
result.update(values)
if self._exact is not None and entity_id in self._exact:
result.update(self._exact[entity_id])
return result
| {
"repo_name": "MungoRae/home-assistant",
"path": "homeassistant/helpers/entity_values.py",
"copies": "28",
"size": "1340",
"license": "apache-2.0",
"hash": 7295884759394458000,
"line_mean": 28.1304347826,
"line_max": 68,
"alpha_frac": 0.5813432836,
"autogenerated": false,
"ratio": 4.35064935064935,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""A class to hold entity values."""
from __future__ import annotations
from collections import OrderedDict
import fnmatch
import re
from typing import Any
from homeassistant.core import split_entity_id
# mypy: disallow-any-generics
class EntityValues:
"""Class to store entity id based values."""
def __init__(
self,
exact: dict[str, dict[str, str]] | None = None,
domain: dict[str, dict[str, str]] | None = None,
glob: dict[str, dict[str, str]] | None = None,
) -> None:
"""Initialize an EntityConfigDict."""
self._cache: dict[str, dict[str, str]] = {}
self._exact = exact
self._domain = domain
if glob is None:
compiled: dict[re.Pattern[str], Any] | None = None
else:
compiled = OrderedDict()
for key, value in glob.items():
compiled[re.compile(fnmatch.translate(key))] = value
self._glob = compiled
def get(self, entity_id: str) -> dict[str, str]:
"""Get config for an entity id."""
if entity_id in self._cache:
return self._cache[entity_id]
domain, _ = split_entity_id(entity_id)
result = self._cache[entity_id] = {}
if self._domain is not None and domain in self._domain:
result.update(self._domain[domain])
if self._glob is not None:
for pattern, values in self._glob.items():
if pattern.match(entity_id):
result.update(values)
if self._exact is not None and entity_id in self._exact:
result.update(self._exact[entity_id])
return result
| {
"repo_name": "kennedyshead/home-assistant",
"path": "homeassistant/helpers/entity_values.py",
"copies": "2",
"size": "1662",
"license": "apache-2.0",
"hash": 7268175306500996000,
"line_mean": 28.6785714286,
"line_max": 68,
"alpha_frac": 0.578820698,
"autogenerated": false,
"ratio": 4.0242130750605325,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5603033773060533,
"avg_score": null,
"num_lines": null
} |
"""A class to hold entity values."""
from __future__ import annotations
from collections import OrderedDict
import fnmatch
import re
from typing import Any, Pattern
from homeassistant.core import split_entity_id
# mypy: disallow-any-generics
class EntityValues:
"""Class to store entity id based values."""
def __init__(
self,
exact: dict[str, dict[str, str]] | None = None,
domain: dict[str, dict[str, str]] | None = None,
glob: dict[str, dict[str, str]] | None = None,
) -> None:
"""Initialize an EntityConfigDict."""
self._cache: dict[str, dict[str, str]] = {}
self._exact = exact
self._domain = domain
if glob is None:
compiled: dict[Pattern[str], Any] | None = None
else:
compiled = OrderedDict()
for key, value in glob.items():
compiled[re.compile(fnmatch.translate(key))] = value
self._glob = compiled
def get(self, entity_id: str) -> dict[str, str]:
"""Get config for an entity id."""
if entity_id in self._cache:
return self._cache[entity_id]
domain, _ = split_entity_id(entity_id)
result = self._cache[entity_id] = {}
if self._domain is not None and domain in self._domain:
result.update(self._domain[domain])
if self._glob is not None:
for pattern, values in self._glob.items():
if pattern.match(entity_id):
result.update(values)
if self._exact is not None and entity_id in self._exact:
result.update(self._exact[entity_id])
return result
| {
"repo_name": "sander76/home-assistant",
"path": "homeassistant/helpers/entity_values.py",
"copies": "3",
"size": "1668",
"license": "apache-2.0",
"hash": 1140993966264533000,
"line_mean": 28.7857142857,
"line_max": 68,
"alpha_frac": 0.579736211,
"autogenerated": false,
"ratio": 4.038740920096853,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6118477131096853,
"avg_score": null,
"num_lines": null
} |
"""A class to hold the data about scoring taxids and NC ids."""
import sys
import taxon
class scoring:
"""
We use this class to score a pair of IDs. We can handle taxonomy IDs
and NC_\d+ IDs (i.e. from genbank).
"""
def __init__(self):
sys.stderr.write("Parsing the taxonomy files\n")
self.taxa = taxon.readNodes()
self.names, self.blastname, self.genbankname, self.synonym = taxon.extendedNames()
self.divs = taxon.readDivisions()
sys.stderr.write("Done parsing the taxonomy\n")
self.wanted = ['species', 'genus', 'family', 'order', 'class',
'phylum', 'superkingdom']
self.taxonomy = {}
self.nc2tax={}
def read_host_file(self, taxfile='/home3/redwards/phage/host_analysis/all_host_taxid.txt'):
"""
Read a host taxonomy file. This file has tuples of NC id and
txonomy ID. The important thing here is that the phage taxonomy
ID is the taxonomy of the host and not the phage!
"""
with open(taxfile, 'r') as tin:
for l in tin:
p=l.strip().split("\t")
self.nc2tax[p[0]]=p[1]
def score_taxid(self, phage, host):
"""
Given a pair of taxonomy IDs from the phage and its predicted
host we return a hash of taxonomic levels with True if the host
is correct for the phage at that level and False if it is not
correct at that level.
"""
results = {w:False for w in self.wanted}
if phage not in self.taxa:
sys.stderr.write("SKIPPED Phage: " + str(phage) + " not in the taxonomy\n")
return results
if host not in self.taxa:
sys.stderr.write("SKIPPED host: " + str(host) + " not in the taxonomy\n")
return results
if phage not in self.taxonomy:
self.taxonomy[phage]={}
i=phage
while self.taxa[i].parent != '1' and i != '1':
rank = self.taxa[i].rank
if rank in self.wanted:
self.taxonomy[phage][rank]=i
i=self.taxa[i].parent
if host not in self.taxonomy:
self.taxonomy[host]={}
i=host
while self.taxa[i].parent != '1' and i != '1':
rank = self.taxa[i].rank
if rank in self.wanted:
self.taxonomy[host][rank]=i
i=self.taxa[i].parent
for w in self.wanted:
if w in self.taxonomy[phage] and w in self.taxonomy[host]:
if self.taxonomy[phage][w] == self.taxonomy[host][w]:
results[w]=True
return results
def score_NC(self, phage, host):
'''
Given a pair of NC ids (from GenBank) we convert them to
taxonomy IDs and then socre those
'''
if self.nc2tax == {}:
self.read_host_file()
if phage not in self.nc2tax:
sys.stderr.write("SKIPPED: phage ID: " + phage + " not in 2tax file\n")
return self.score_taxid(None, None)
if host not in self.nc2tax:
sys.stderr.write("SKIPPED: host ID: " + host + " not in 2tax file\n")
return self.score_taxid(None, None)
return self.score_taxid(self.nc2tax[phage], self.nc2tax[host])
if __name__ == '__main__':
sys.stderr.write("Initializing ... ")
s=scoring()
print("Checking tax id's. These are the same:")
r = s.score_taxid('1280', '1280')
print("\n".join([w + "\t" + str(r[w]) for w in r]))
print("Checking tax id's. These are different")
r = s.score_taxid('1280', '28901')
print("\n".join([w + "\t" + str(r[w]) for w in r]))
print("Checking NC ids. These are the same:")
r = s.score_NC('NC_021774', 'NC_021775')
print("\n".join([w + "\t" + str(r[w]) for w in r]))
print("Checking NC id's. These are different")
r = s.score_NC('NC_019513', 'NC_021774')
print("\n".join([w + "\t" + str(r[w]) for w in r]))
| {
"repo_name": "linsalrob/PhageHosts",
"path": "code/scoring.py",
"copies": "1",
"size": "4070",
"license": "mit",
"hash": -2891458303786684000,
"line_mean": 35.017699115,
"line_max": 95,
"alpha_frac": 0.5461916462,
"autogenerated": false,
"ratio": 3.4726962457337884,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4518887891933788,
"avg_score": null,
"num_lines": null
} |
# a class to manage SRTM surfaces
import json
import numpy as np
import os
from pylab import *
import random
import scipy.interpolate
import struct
import urllib.request
import zipfile
import navpy
from .logger import log
# return the lower left corner of the 1x1 degree tile containing
# the specified lla coordinate
def lla_ll_corner(lat_deg, lon_deg):
return int(floor(lat_deg)), int(floor(lon_deg))
# return the tile base name for the specified coordinate
def make_tile_name(lat, lon):
ll_lat, ll_lon = lla_ll_corner(lat, lon)
if ll_lat < 0:
slat = "S%2d" % -ll_lat
else:
slat = "N%2d" % ll_lat
if ll_lon < 0:
slon = "W%03d" % -ll_lon
else:
slon = "E%03d" % ll_lon
return slat + slon
class SRTM():
def __init__(self, lat, lon, dict_path):
self.lat, self.lon = lla_ll_corner(lat, lon)
self.srtm_dict = {}
self.srtm_cache_dir = '/var/tmp' # unless set otherwise
self.srtm_z = None
self.i = None
self.load_srtm_dict(dict_path)
# load the directory download dictionary (mapping a desired file
# to a download path.)
def load_srtm_dict(self, dict_path):
dict_file = dict_path + '/srtm.json'
try:
f = open(dict_file, 'r')
self.srtm_dict = json.load(f)
f.close()
except:
log("SRTM: unable to read =", dict_file)
# if we'd like a persistant place to cache srtm files so they don't
# need to be re-download every time we run anything
def set_srtm_cache_dir(self, srtm_cache_dir):
if not os.path.exists(srtm_cache_dir):
log("SRTM: cache path doesn't exist =", srtm_cache_dir)
else:
self.srtm_cache_dir = srtm_cache_dir
# download and extract srtm file into cache directory
def download_srtm(self, fileroot):
if fileroot in self.srtm_dict:
url = self.srtm_dict[fileroot]
download_file = self.srtm_cache_dir + '/' + fileroot + '.hgt.zip'
log("SRTM: downloading:", url)
file = urllib.request.URLopener()
log(file.retrieve(url, download_file))
return True
else:
log("SRTM: requested srtm file not in catalog:", fileroot)
return False
def parse(self):
tilename = make_tile_name(self.lat, self.lon)
cache_file = self.srtm_cache_dir + '/' + tilename + '.hgt.zip'
if not os.path.exists(cache_file):
if not self.download_srtm(tilename):
return False
log("SRTM: parsing .hgt file:", cache_file)
#f = open(cache_file, "rb")
zip = zipfile.ZipFile(cache_file)
f = zip.open(tilename + '.hgt', 'r')
contents = f.read()
f.close()
# read 1,442,401 (1201x1201) high-endian
# signed 16-bit words into self.z
self.srtm_z = struct.unpack(">1442401H", contents)
return True
def make_lla_interpolator(self):
log("SRTM: constructing LLA interpolator")
srtm_pts = np.zeros((1201, 1201))
for r in range(0,1201):
for c in range(0,1201):
idx = (1201*r)+c
va = self.srtm_z[idx]
if va == 65535 or va < 0 or va > 10000:
va = 0.0
z = va
srtm_pts[c,1200-r] = z
x = np.linspace(self.lon, self.lon+1, 1201)
y = np.linspace(self.lat, self.lat+1, 1201)
#print x
#print y
self.lla_interp = scipy.interpolate.RegularGridInterpolator((x, y), srtm_pts, bounds_error=False, fill_value=-32768)
#print self.lla_interp([-93.14530573529404, 45.220697421008396])
#for i in range(20):
# x = -94.0 + random.random()
# y = 45 + random.random()
# z = self.lla_interp([x,y])
# print [x, y, z[0]]
def lla_interpolate(self, point_list):
return self.lla_interp(point_list)
def plot_raw(self):
zzz = np.zeros((1201,1201))
for r in range(0,1201):
for c in range(0,1201):
va=self.srtm_z[(1201*r)+c]
if (va==65535 or va<0 or va>10000):
va=0.0
zzz[r][c]=float(va)
#zz=np.log1p(zzz)
imshow(zzz, interpolation='bilinear',cmap=cm.gray,alpha=1.0)
grid(False)
show()
# Build a gridded elevation interpolation table centered at lla_ref
# with width and height. This is a little bit of quick feet dancing,
# but allows areas to span corners or edges of srtm tiles and attempts
# to stay on a fast path of regular grids, even though a regularly lla
# grid != a regular ned grid.
tile_dict = {}
ned_interp = None
def initialize(lla_ref, width_m, height_m, step_m):
global tile_dict
if len(tile_dict):
log("Reinitializing SRTM interpolator which is probably not necessary")
tile_dict = {}
else:
log("Initializing the SRTM interpolator")
load_tiles(lla_ref, width_m, height_m)
make_interpolator(lla_ref, width_m, height_m, step_m)
def load_tiles(lla_ref, width_m, height_m):
log("SRTM: loading DEM tiles")
ll_ned = np.array([[-height_m*0.5, -width_m*0.5, 0.0]])
ur_ned = np.array([[height_m*0.5, width_m*0.5, 0.0]])
ll_lla = navpy.ned2lla(ll_ned, lla_ref[0], lla_ref[1], lla_ref[2])
ur_lla = navpy.ned2lla(ur_ned, lla_ref[0], lla_ref[1], lla_ref[2])
#print ll_lla
#print ur_lla
lat1, lon1 = lla_ll_corner( ll_lla[0], ll_lla[1] )
lat2, lon2 = lla_ll_corner( ur_lla[0], ur_lla[1] )
for lat in range(lat1, lat2+1):
for lon in range(lon1, lon2+1):
srtm = SRTM(lat, lon, '../srtm')
if srtm.parse():
srtm.make_lla_interpolator()
#srtm.plot_raw()
tile_name = make_tile_name(lat, lon)
tile_dict[tile_name] = srtm
def make_interpolator(lla_ref, width_m, height_m, step_m):
log("SRTM: constructing NED area interpolator")
rows = int(height_m / step_m) + 1
cols = int(width_m / step_m) + 1
#ned_pts = np.zeros((cols, rows))
#for r in range(0,rows):
# for c in range(0,cols):
# idx = (cols*r)+c
# #va = self.srtm_z[idx]
# #if va == 65535 or va < 0 or va > 10000:
# # va = 0.0
# #z = va
# ned_pts[c,r] = 0.0
# build regularly gridded x,y coordinate list and ned_pts array
n_list = np.linspace(-height_m*0.5, height_m*0.5, rows)
e_list = np.linspace(-width_m*0.5, width_m*0.5, cols)
#print "e's:", e_list
#print "n's:", n_list
ned_pts = []
for e in e_list:
for n in n_list:
ned_pts.append( [n, e, 0] )
# convert ned_pts list to lla coordinates (so it's not
# necessarily an exact grid anymore, but we can now
# interpolate elevations out of the lla interpolators for each
# tile.
navpy_pts = navpy.ned2lla(ned_pts, lla_ref[0], lla_ref[1], lla_ref[2])
#print navpy_pts
# build list of (lat, lon) points for doing actual lla
# elevation lookup
ll_pts = []
for i in range( len(navpy_pts[0]) ):
lat = navpy_pts[0][i]
lon = navpy_pts[1][i]
ll_pts.append( [ lon, lat ] )
#print "ll_pts:", ll_pts
# set all the elevations in the ned_ds list to the extreme
# minimum value. (rows,cols) might seem funny, but (ne)d is
# reversed from (xy)z ... don't think about it too much or
# you'll get a headache. :-)
ned_ds = np.zeros((rows,cols))
ned_ds[:][:] = -32768
#print "ned_ds:", ned_ds
# for each tile loaded, interpolate as many elevation values
# as we can, then copy the good values into ned_ds. When we
# finish all the loaded tiles, we should have elevations for
# the entire range of points.
for tile in tile_dict:
zs = tile_dict[tile].lla_interpolate(np.array(ll_pts))
#print zs
# copy the good altitudes back to the corresponding ned points
for r in range(0,rows):
for c in range(0,cols):
idx = (rows*c)+r
if zs[idx] > -10000:
ned_ds[r,c] = zs[idx]
# quick sanity check
for r in range(0,rows):
for c in range(0,cols):
idx = (rows*c)+r
if ned_ds[r,c] < -10000:
log("Problem interpolating elevation for:", ll_pts[idx])
ned_ds[r,c] = 0.0
#print "ned_ds:", ned_ds
# now finally build the actual grid interpolator with evenly
# spaced ned n, e values and elevations interpolated out of
# the srtm lla interpolator.
global ned_interp
ned_interp = scipy.interpolate.RegularGridInterpolator((n_list, e_list), ned_ds, bounds_error=False, fill_value=-32768)
do_plot = False
if do_plot:
imshow(ned_ds, interpolation='bilinear', origin='lower', cmap=cm.gray, alpha=1.0)
grid(False)
show()
do_test = False
if do_test:
for i in range(40):
ned = [(random.random()-0.5)*height_m,
(random.random()-0.5)*width_m,
0.0]
lla = navpy.ned2lla(ned, lla_ref[0], lla_ref[1], lla_ref[2])
#print "ned=%s, lla=%s" % (ned, lla)
nedz = ned_interp([ned[0], ned[1]])
tile = make_tile_name(lla[0], lla[1])
llaz = tile_dict[tile].lla_interpolate(np.array([lla[1], lla[0]]))
qlog("nedz=%.2f llaz=%.2f" % (nedz, llaz))
# while error > eps: find altitude at current point, new pt = proj
# vector to current alt.
def interpolate_vector(ned, v):
#print ned
p = ned[:] # copy hopefully
# sanity check (always assume camera pose is above ground!)
if v[2] <= 0.0:
return p
eps = 0.01
count = 0
#print "start:", p
#print "vec:", v
#print "ned:", ned
tmp = ned_interp([p[0], p[1]])
if not np.isnan(tmp[0]) and tmp[0] > -32768:
ground = tmp[0]
else:
ground = 0.0
error = abs(p[2] + ground)
#print " p=%s ground=%s error=%s" % (p, ground, error)
while error > eps and count < 25:
d_proj = -(ned[2] + ground)
factor = d_proj / v[2]
n_proj = v[0] * factor
e_proj = v[1] * factor
#print "proj = %s %s" % (n_proj, e_proj)
p = [ ned[0] + n_proj, ned[1] + e_proj, ned[2] + d_proj ]
#print "new p:", p
tmp = ned_interp([p[0], p[1]])
if not np.isnan(tmp[0]) and tmp[0] > -32768:
ground = tmp[0]
error = abs(p[2] + ground)
#print " p=%s ground=%.2f error = %.3f" % (p, ground, error)
count += 1
#print "ground:", ground[0]
if np.any(np.isnan(p)):
print('SRTM interpolation made a nan:' ,p)
return p
# return a list of (3d) ground intersection points for the give
# vector list and camera pose. Vectors are already transformed
# into ned orientation.
def interpolate_vectors(ned, v_list):
pt_list = []
for v in v_list:
p = dinterpolate_vector(ned, v.flatten())
pt_list.append(p)
return pt_list
| {
"repo_name": "UASLab/ImageAnalysis",
"path": "scripts/lib/srtm.py",
"copies": "1",
"size": "11147",
"license": "mit",
"hash": 5376634110165213000,
"line_mean": 34.2753164557,
"line_max": 124,
"alpha_frac": 0.5624831793,
"autogenerated": false,
"ratio": 3.053972602739726,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41164557820397263,
"avg_score": null,
"num_lines": null
} |
"""A class to provide the flot plotting utility in an ipython notebook
This class provides utilities to plot data in an ipython notebook using
the flot http://code.google.com/p/flot/ javascript plotting library. It
has the class plot which must be instantiated as an object. Once this is
instantiated the plot_figure method can be called to plot data. This inserts
a div tag and then uses the flot library to render that data.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# Stdlib imports
import string
import json
# Third-party imports
# Our own imports
import IPython.core.display
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class FlotPlot(object):
'''
This class contains methods for using the javascript plotting backend flot
to plot in an ipython notebook. the number of pixels can be set using the
pixelsx and pixelsy atttributes and the legend location can be set using
the legendloc attribute.
possible legendloc values : 'ne', 'nw', 'se', 'sw'
'''
nplots = 0
pixelsx = 600
pixelsy = 300
legendloc = "ne"
haslegend = True
def _read_data(self, data, data1, label):
#This function takes the python data and encodes it into JSON data
d = ""
labelstring = ""
encoder = json.JSONEncoder()
if data is not None:
if type(data[0]) == list or ('numpy' in str(type(data[0])) and data[0].shape != () ):
for index,item in enumerate(data):
if data1 is not None:
d += "var d"+str(index)+" ="+ encoder.encode(zip(item,data1[index])) +";\n"
if label is not None and type(label) == list:
labelstring += "{ label:\"" +label[index] + "\", data:d" + str(index) + " },"
else:
labelstring += "{ data:d" + str(index) + " },"
else:
d += "var d"+str(index)+" ="+ encoder.encode(zip(item,range(len(item)))) +";\n"
if label is not None and type(label) == list:
labelstring += "{ label:\"" +label[index] + "\", data:d" + str(index) + " },"
else:
labelstring += "{ data:d" + str(index) + " },"
labelstring = string.rstrip(labelstring,",")
else:
datastring = "var d1 = "
if data1 is not None:
datastring += encoder.encode(zip(data,data1)) +";"
else:
datastring += encoder.encode(zip(data,range(len(data)))) +";"
if label is not None and type(label) == str:
labelstring = "{ label : \"" + label + "\"," + "data:d1}"
else:
labelstring = "{data:d1}"
d = datastring
return d, labelstring
def plot_figure( self, data = None, data1 = None, label = None):
'''
This method plots the inputs data and data1 based on the following
rules. If only data exists each array in that input field will be
plotted with the x-axis having integer values. If data exists
in both data and data1 it will be assumed to be of the format:
[x0,x1,x2...]
[y0,y1,y2...]
where xn and yn are either numerical values of arrays of values.
the label is assumed to be a string if there is only one input set
or an array of strings equal in length to the number of arrays in
data.
'''
if data is not None and len(data) > 0:
d, label = self._read_data(data,data1,label)
nplotstxt = str(self.nplots)
legendstr = 'legend: { position:"' + self.legendloc + '"},' if self.haslegend else ""
src = d + """
var options = {
selection: { mode: "xy" },
"""+legendstr+"""
};
var plot""" + nplotstxt + """ = $.plot($("#placeholder""" + nplotstxt + """"), [ """ + label + """],options);
var minx""" + nplotstxt + """ = plot""" + nplotstxt + """.getAxes().xaxis.min;
var maxx""" + nplotstxt + """ = plot""" + nplotstxt + """.getAxes().xaxis.max;
var miny""" + nplotstxt + """ = plot""" + nplotstxt + """.getAxes().yaxis.min;
var maxy""" + nplotstxt + """ = plot""" + nplotstxt + """.getAxes().yaxis.max;
var iminx""" + nplotstxt + """ = plot""" + nplotstxt + """.getAxes().xaxis.min;
var imaxx""" + nplotstxt + """ = plot""" + nplotstxt + """.getAxes().xaxis.max;
var iminy""" + nplotstxt + """ = plot""" + nplotstxt + """.getAxes().yaxis.min;
var imaxy""" + nplotstxt + """ = plot""" + nplotstxt + """.getAxes().yaxis.max;
$("#placeholder""" + nplotstxt + """").bind("plotselected", function (event, ranges) {
minx""" + nplotstxt + """ = ranges.xaxis.from;
maxx""" + nplotstxt + """ = ranges.xaxis.to;
miny""" + nplotstxt + """ = ranges.yaxis.from;
maxy""" + nplotstxt + """ = ranges.yaxis.to;
});
$("#zoom""" + nplotstxt + """").click(function() {
$.plot($("#placeholder""" + nplotstxt + """"), plot""" + nplotstxt + """.getData(),
$.extend(true, {}, options, {
xaxis: { min: minx""" + nplotstxt + """ , max: maxx""" + nplotstxt + """ },
yaxis: { min: miny""" + nplotstxt + """ , max: maxy""" + nplotstxt + """ }
}));
});
$("#home""" + nplotstxt + """").click(function() {
$.plot($("#placeholder""" + nplotstxt + """"), plot""" + nplotstxt + """.getData(),
$.extend(true, {}, options, {
xaxis: { min: iminx""" + nplotstxt + """ , max: imaxx""" + nplotstxt + """ },
yaxis: { min: iminy""" + nplotstxt + """ , max: imaxy""" + nplotstxt + """ }
}));
});
"""
else:
print("No data given to plot")
return
self._insert_placeholder()
self.nplots = self.nplots + 1
IPython.core.display.display_javascript(IPython.core.display.Javascript(data=src,
lib=["http://crbates.github.com/flot/jquery.flot.min.js","http://crbates.github.com/flot/jquery.flot.navigate.min.js","http://crbates.github.com/flot/jquery.flot.selection.min.js"]))
def _insert_placeholder(self):
#This function inserts the html tag for the plot
nplotstxt = str(self.nplots)
src = """
<div id="placeholder""" + nplotstxt + """"" style="width:
""" + str(self.pixelsx) + """px;height:""" + str(self.pixelsy) + """px;"></div>
<input id="home""" + nplotstxt + """" type="button" value="home"> <input id="zoom""" + nplotstxt + """" type="button" value="zoom to selection">
"""
IPython.core.display.display_html(IPython.core.display.HTML(data=src))
| {
"repo_name": "mlhenderson/narrative",
"path": "src/MG-RAST_ipy-mkmq/ipyMKMQ/flotplot.py",
"copies": "7",
"size": "7906",
"license": "mit",
"hash": -4500349992348054000,
"line_mean": 47.5030674847,
"line_max": 190,
"alpha_frac": 0.4798886921,
"autogenerated": false,
"ratio": 4.035732516590097,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.014457370095569003,
"num_lines": 163
} |
# A class to represent a list of ipfw rules and tables
import os.path
from subprocess import Popen, PIPE
import builtins
import glob
from includes.output import *
from includes.util import *
from includes.defines import *
class IPFW:
# Constructor
def __init__(self, directory, uuid = None):
self.directory = directory.rstrip('/')
self.uuid = uuid
self.ipfwFile = "ipfw.rules"
self.rules = {} # a dict of rules, keyed by rule number
self.tables = {} # a dict of tables, keyed by table number
# Action: adds a rule to allow a port(s)
#
# Pre: this object exists
# Post: a new rule has been added to this object
#
# Params: direction - IN/OUT
# protocol - the protocol to allow
# interface - the interface to apply the rule to
# sourceIP - the source IP address
# destIP - the destination IP address
# ports - the ports to allow
# ruleNum - the rule number to add this at
#
# Return: True if succeeded, False otherwise
def openPort(self, direction, protocol, interface, sourceIP, destIP, ports, ruleNum = None):
# if rulenum is none then append to the end
if (ruleNum is None) and (len(self.rules) > 0):
maxRule = max(self.rules.keys(), key=int)
ruleNum = int(maxRule) + 1
else:
ruleNum = 1
# if the # of ports is 0 then dont add a rule and return
if (ports is None) or (len(ports) == 0):
return True
else:
portsCSV = ','.join(str(port) for port in ports)
# Check if the user wanted direction of "any"
if (direction == "any"):
direction = ''
# add some options if proto == tcp
options = ''
if (protocol == 'tcp'):
options = "setup keep-state"
elif (protocol == 'udp'):
options = "keep-state"
logging = ''
if (builtins.tredlyCommonConfig.firewallEnableLogging == "yes"):
logging = "log logamount 5"
# append the rule
self.rules[ruleNum] = "allow " + logging + " " + protocol + " from " + sourceIP + " to " + destIP + " " + portsCSV + " " + direction + " via " + interface + " " + options
# Action: apply firewall rules
#
# Pre: this object exists
# Post: ipfw rules have been applied to container or host
#
# Params:
#
# Return: True if succeeded, False otherwise
def apply(self):
# write the tables out first
for tableNum, tableList in self.tables.items():
filePath = self.directory + "/ipfw.table." + str(tableNum)
# open the table file for writing
with open(filePath, "w") as ipfw_table:
print('#!/usr/bin/env sh', file=ipfw_table)
# loop over the list, adding the values
for value in tableList:
# print the table rule to the file
print("ipfw table " + str(tableNum) + " add " + value, file=ipfw_table)
# Set the permissions on this file to 700
os.chmod(filePath, 0o700)
# set up the list command
listCmd = ['ipfw', 'table', str(tableNum), 'list']
if (self.uuid is not None):
# handle the container command
listCmd = ['jexec', 'trd-' + self.uuid] + listCmd
# get a list of live tables and remove the ips that arent associated with this object
process = Popen(listCmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdOut, stdErr = process.communicate()
if (process.returncode != 0):
e_error("Failed to list table " + str(tableNum))
stdOutString = stdOut.decode(encoding='UTF-8')
# loop over results
for line in stdOutString.splitlines():
# extract the value
value = line.split()[0]
# check if it exists in the table
if (value not in self.tables[tableNum]):
# doesnt exist so remove it
if (self.uuid is None):
delCmd = ['ipfw', 'table', str(tableNum), 'delete', value]
else:
delCmd = ['jexec', 'trd-' + self.uuid, 'sh', '-c', 'ipfw table ' + str(tableNum) + ' delete ' + value]
process = Popen(delCmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdOut, stdErr = process.communicate()
if (process.returncode != 0):
print(delCmd)
e_error("Failed to delete value " + value)
# apply the table by running the script
file = '/usr/local/etc/ipfw.table.' + str(tableNum)
if (self.uuid is not None): # its a container
cmd = ['jexec', 'trd-' + self.uuid, file]
else: # its the host so just run the script
cmd = [file]
process = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdOut, stdErr = process.communicate()
# return code of 0 == success
# return code of 71 == firewall rule already exists
if (process.returncode != 0) and (process.returncode != 71):
e_error("Failed to apply firewall rules from " + file)
e_error("Return code " + str(process.returncode))
return False
# now write the main rules
filePath = self.directory + "/ipfw.rules"
# dont overwrite the hosts ipfw rules
if (self.uuid is not None):
with open(filePath, "w") as ipfw_rules:
# add the shebang
print('#!/usr/bin/env sh', file=ipfw_rules)
# loop over the tables, including them
for tableNum, tableList in self.tables.items():
print('source /usr/local/etc/ipfw.table.' + str(tableNum), file=ipfw_rules)
# loop over the rules, adding them to the ipfw file
for ruleNum, rule in self.rules.items():
print("ipfw add " + str(ruleNum) + " " + rule, file=ipfw_rules)
# Set the permissions on this file to 700
os.chmod(self.directory + '/' + self.ipfwFile, 0o700)
# run the ipfw rules within the container only - host rules never change
if (self.uuid is not None): # its a container
cmd = ['jexec', 'trd-' + self.uuid, '/usr/local/etc/ipfw.rules']
process = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdOut, stdErr = process.communicate()
if (process.returncode != 0):
e_error("Failed to apply firewall rules from " + self.directory + '/' + self.ipfwFile)
return False
# everything succeeded, return true
return True
# Action: read firewall rules
#
# Pre: this object exists
# Post: ipfw rules and tables have been read from self.directory + '/' to this object
#
# Params:
#
# Return: True if succeeded, False otherwise
def readRules(self):
# get a list of tables to read
tables = glob.glob(self.directory.rstrip('/') + "/ipfw.table.*")
# loop over the tables, and read each file
for table in tables:
# read the file
with open(table) as ipfwTableFile:
for line in ipfwTableFile:
# strip off leading and following whitespace
line = line.strip()
# ignore commented lines
if (not line.startswith('#')) and (len(line) > 0):
# extract the table number and ip/range
tableNum = int(line.split()[2])
value = line.split()[4]
# add to the list
self.appendTable(tableNum, value)
# ignore main ipfw file for host
if (self.uuid is not None):
# now read the main ipfw file
try:
with open(self.directory + '/ipfw.rules') as ipfwTableFile:
for line in ipfwTableFile:
# strip off leading and following whitespace
line = line.strip()
# we're only concerned with ipfw rules, nothing else
if (line.startswith('ipfw')):
# extract the rule number - split 3 times so we can get the rule to add to our dict
lineParts = line.split(' ', 3)
# get hte rule number
ruleNum = lineParts[2]
# append the rule
self.rules[ruleNum] = lineParts[3]
except FileNotFoundError:
e_warning("IPFW file " + self.directory + '/ipfw.rules not found.')
return False
return True
# Action: appends a value to a given table
#
# Pre: this object exists
# Post: tableNum has been updated with value
#
# Params: tableNum - the table number to update
# value - the value to update it with
#
# Return: True if succeeded, False otherwise
def appendTable(self, tableNum, value):
# if the table isnt set then set it as a list
if (tableNum not in self.tables):
self.tables[tableNum] = []
# check if its an ip address and doesnt contain a /
if (isValidIp4(value)) and ('/' not in value):
# it is so assume its a host
value = value + '/32'
# check if this is already set
if (value not in self.tables[tableNum]):
# append the value
self.tables[tableNum].append(value)
return True
# Action: remove a value from a given table
#
# Pre: this object exists
# Post: value has been removed from table tableNum
#
# Params: tableNum - the table number to remove from
# values - the value to remove
#
# Return: True if succeeded, False otherwise
def removeFromTable(self, tableNum, value):
# if the table doesnt exist return false
if (tableNum not in self.tables):
return False
# check if its an ip address and doesnt contain a /
if (isValidIp4(value)) and ('/' not in value):
# it is so assume its a host
value = value + '/32'
# check if the value exists
if (value in self.tables[tableNum]):
# remove the value
self.tables[tableNum].remove(value)
return True | {
"repo_name": "vuid-com/tredly",
"path": "components/tredly-libs/python-common/objects/firewall/ipfw.py",
"copies": "2",
"size": "11251",
"license": "mit",
"hash": 8125513311521402000,
"line_mean": 38.7597173145,
"line_max": 178,
"alpha_frac": 0.5172873522,
"autogenerated": false,
"ratio": 4.544022617124394,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6061309969324394,
"avg_score": null,
"num_lines": null
} |
# A class to represent a network interface
import random
class NetInterface:
# Constructor
def __init__(self, name = None, mac = None):
self.name = name
self.mac = mac
self.ip4Addrs = []
self.ip6Addrs = []
# Action: Generate a mac address for this interface
#
# Pre: this object exists
# Post: self.mac has been set with a random mac address
#
# Params: octet1-3 - the first three octets in the mac address
#
# Return: True if succeeded, False otherwise
def generateMac(self, octet1 = "02", octet2 = "33", octet3 = "11"):
# generate the rest of the octets
octet4 = hex(random.randint(0x00, 0x7f)).lstrip('0x')
octet5 = hex(random.randint(0x00, 0xff)).lstrip('0x')
octet6 = hex(random.randint(0x00, 0xff)).lstrip('0x')
# generate and set the mac
self.mac = octet1 + ":" + octet2 + ":" + octet3 + ":" + str(octet4).rjust(2, '0') + ":" + str(octet5).rjust(2,'0') + ":" + str(octet6).rjust(2,'0')
return True | {
"repo_name": "vuid-com/tredly",
"path": "components/tredly-libs/python-common/objects/ip4/netinterface.py",
"copies": "2",
"size": "1063",
"license": "mit",
"hash": -5257381930515646000,
"line_mean": 33.3225806452,
"line_max": 156,
"alpha_frac": 0.57855127,
"autogenerated": false,
"ratio": 3.5082508250825084,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5086802095082508,
"avg_score": null,
"num_lines": null
} |
# A class to represent an layer 4 proxy file
import os.path
import re
from subprocess import Popen, PIPE
class Layer4ProxyFile:
# Constructor
def __init__(self, filePath):
self.filePath = filePath
self.preamble = ''
self.lines = []
# Action: reads layer 4 proxy file, and stores parsed lines to self
#
# Pre:
# Post:
#
# Params: clear - clear the elements before reading
#
# Return: True if success, False otherwise
def read(self, clear = True):
# only read in the data if the file actually exists
if (self.fileExists()):
# if clear is set then clear hte array first
if (clear):
self.lines = []
# file exists so process it
with open(self.filePath) as l4proxyFile:
i = 0
for line in l4proxyFile:
# strip off leading and following whitespace
line = line.strip()
# ignore empty lines
if (len(line) > 0):
# look for a line we are interested in
#regex = '^\w+\s+(tcp|udp)\s+([^:]+):(\d+)\s(\d+)\s+[\\\\\s]*#\s+(\w+)$'
regex = '^\w+\s+(tcp|udp)\s+([^:]+):(\d+)\s(\d+)\s+`#\s+(\w+)`\s*[\\\\]*$'
m = re.match(regex, line)
# check if we found a match
if (m is not None):
self.append(m.group(5), m.group(1), m.group(4), m.group(2), m.group(3))
else:
# if this isnt the first line then add a newline
if (i > 0):
self.preamble += "\n"
# add it to the preamble
self.preamble += line
# increment hte counter
i += 1
else:
return False
return True
# Action: append a value to our proxy rules
#
# Pre:
# Post: given values have been added as a proxy rule
#
# Params: uuid - the container uuid. If set to None, then applies to host
# protocol - the protocol to use
# sourcePort - the source port to forward from
# destHost - the destination host to send this traffic to
# destPort - the destination port to send this traffic to
#
# Return: True if exists, False otherwise
def append(self, uuid, protocol, sourcePort, destHost, destPort):
# only append if the source and dest ports dont exist to prevent errors
if (not self.srcPortExists(sourcePort)) and (not self.destPortExists(destPort)):
# append to our dict
self.lines.append({
'uuid': uuid,
'protocol': protocol,
'destHost': destHost,
'destPort': destPort,
'sourcePort': sourcePort
})
return True
else:
return False
# Action: checks if the given port exists as a source port already
#
# Pre:
# Post:
#
# Params: srcPort - the port to check for
#
# Return: True if exists, False otherwise
def srcPortExists(self, srcPort):
# loop over lines looking for source port
for line in self.lines:
if (line['sourcePort'] == srcPort):
return True
return False
# Action: checks if the given port exists as a destination port already
#
# Pre:
# Post:
#
# Params: srcPort - the port to check for
#
# Return: True if exists, False otherwise
def destPortExists(self, srcPort):
# loop over lines looking for source port
for line in self.lines:
if (line['destPort'] == srcPort):
return True
return False
# Action: removes all elements relating to a given uuid
#
# Pre:
# Post: all elements with given uuid have been removed from self.lines
#
# Params: uuid - the uuid to remove
#
# Return: True if succes, False otherwise
def removeElementsByUUID(self, uuid):
# dont do anything if the list is empty
if (len(self.lines) == 0):
return True
# create a new list
newList = []
# loop over the list
for element in self.lines:
# check if this element has the same uuid
if (element['uuid'] != uuid):
# not equal to our uuid so add to new list
newList.append(element)
# set the lines to the updated element
self.lines = newList
return True
# Action: checks whether the path to the layer4proxy file exists or not
#
# Pre:
# Post:
#
# Params:
#
# Return: True if exists, False otherwise
def fileExists(self):
return os.path.isfile(self.filePath)
# Action: writes a layer 4 proxy file.
#
# Pre:
# Post: the self.lines object has been written to the layer 4 proxy file in the layer 4 proxy format
#
# Params:
#
# Return: True if successful, False otherwise
def write(self):
try:
with open(self.filePath, 'w') as l4proxyFile:
print(self.preamble, file=l4proxyFile)
for i, line in enumerate(self.lines):
# form the line
text = 'redirect_port ' + line['protocol'] + " " + line['destHost'] + ':' + str(line['destPort']) + " " + str(line['sourcePort']) + ' `# ' + line['uuid'] + '`'
# add the bash line connector if this isnt the last line
if (i < (len(self.lines)-1)):
text += " \\"
# write the line to the file
print(text, file=l4proxyFile)
except IOError:
return False
return True
# Action: reload the layer 4 proxy data from self.filePath
#
# Pre:
# Post: ipfw rules have been applied to container or host
#
# Params:
#
# Return: True if succeeded, False otherwise
def reload(self):
# run the script that this object represents
process = Popen(['sh', self.filePath], stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdOut, stdErr = process.communicate()
return (process.returncode == 0) | {
"repo_name": "vuid-com/tredly",
"path": "components/tredly-libs/python-common/objects/layer4proxy/layer4proxyfile.py",
"copies": "2",
"size": "6613",
"license": "mit",
"hash": 7777041940517652000,
"line_mean": 32.404040404,
"line_max": 179,
"alpha_frac": 0.5121729926,
"autogenerated": false,
"ratio": 4.529452054794521,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6041625047394521,
"avg_score": null,
"num_lines": null
} |
# A class to represent an Unbound File
import os.path
import re
from subprocess import Popen, PIPE
class UnboundFile:
# Constructor
def __init__(self, filePath):
self.filePath = filePath
self.lines = []
# Action: reads unbound file, and stores parsed lines to self
#
# Pre:
# Post: self.lines has been updated with data parsed from self.filePath
#
# Params: clear - whether or not to clear self.lines before re-reading the file
#
# Return:
def read(self, clear = True):
# only read in the data if the file actually exists
if (self.fileExists()):
# if clear is set then clear hte array first
if (clear):
self.lines = []
try:
# file exists so process it
with open(self.filePath) as unboundFile:
for line in unboundFile:
# strip off leading and following whitespace
line = line.strip()
# ignore empty lines
if (len(line) > 0):
m = re.match("^([\w\-]+)\:\s*\"([A-z\-0-9\.]+)\s+(\w+)\s+(\w+)\s+((?:[0-9]{1,3}\.){3}[0-9]{1,3})\"\s+\#\s+([A-z0-9]+)", line)
self.append(m.group(1), m.group(2), m.group(3), m.group(4), m.group(5), m.group(6))
return True
except IOError:
return False
else:
return False
# Action: append a record to this object
#
# Pre:
# Post:
#
# Example unbound line: local-data: "tredly-cc.example.com IN A 10.99.255.254" # oIrUtDAu
#
# Params: type - above "local-data"
# domainName - domain name for this record. above "tredly-cc.example.com"
# inValue - above "IN"
# recordType - DNS record type. Above "A"
# ipAddress - the ip address to resolve domainName to. above "10.99.255.254"
# uuid - the uuid of the container this relates to. above "oIrUtDAu"
#
# Return: True if successful, False otherwise
def append(self, type, domainName, inValue, recordType, ipAddress, uuid):
self.lines.append({
'type': type,
'domainName': domainName,
'in': inValue,
'recordType': recordType,
'ipAddress': ipAddress,
'uuid': uuid
})
return True
# Action: removes all elements relating to a given uuid
#
# Pre:
# Post: all elements with given uuid have been removed from self.lines
#
# Params: uuid - the uuid to remove
#
# Return: True if success, False otherwise
def removeElementsByUUID(self, uuid):
# dont do anything if the list is empty
if (len(self.lines) == 0):
return True
# create a new list
newList = []
# loop over the list
for element in self.lines:
# check if this element has the same uuid
if (element['uuid'] != uuid):
# not equal to our uuid so add to new list
newList.append(element)
# set the lines to the updated element
self.lines = newList
return True
# Action: checks whether the path to the unbound file exists or not
#
# Pre:
# Post:
#
# Params:
#
# Return: True if exists, False otherwise
def fileExists(self):
return os.path.isfile(self.filePath)
# Action: writes an unbound file.
#
# Pre:
# Post: self.filePath has been updated with the data from self.lines
#
# Params: deleteEmpty - if this object has no lines, then delete the file
#
# Return: True if successful, False otherwise
def write(self, deleteEmpty = True):
# if self.lines is length of 0 then delete the file
if (len(self.lines) == 0) and (deleteEmpty):
# delete it
try:
os.remove(self.filePath)
except FileNotFoundError:
pass
else:
try:
with open(self.filePath, 'w') as unbound_config:
for line in self.lines:
# form the line
text = ''
text += line['type'] + ': "'
text += line['domainName'] + " "
text += line['in'] + " "
text += line['recordType'] + " "
text += line['ipAddress'] + '" # '
text += line['uuid']
# write the line to the file
print(text, file=unbound_config)
except IOError:
return False
return True
| {
"repo_name": "vuid-com/tredly",
"path": "components/tredly-libs/python-common/objects/tredly/unboundfile.py",
"copies": "2",
"size": "4957",
"license": "mit",
"hash": 352944071771337150,
"line_mean": 32.2751677852,
"line_max": 153,
"alpha_frac": 0.4960661691,
"autogenerated": false,
"ratio": 4.3906111603188664,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5886677329418867,
"avg_score": null,
"num_lines": null
} |
# A class to represent an Unbound File
import os.path
import re
class ResolvConfFile:
# Constructor
def __init__(self, filePath = '/etc/resolv.conf', search = [], servers = []):
self.filePath = filePath
self.search = search
self.nameservers = servers
# Action: reads resolv.conf file, and stores parsed lines to self
#
# Pre: this object exists
# Post: data has been read if file exists
#
# Params: clear - whether or not to clear this object when re-reading
#
# Return: True if success, False otherwise
def read(self, clear = True):
# only read in the data if the file actually exists
if (self.fileExists()):
# if clear is set then clear hte array first
if (clear):
self.search = []
self.nameservers = []
# file exists so process it
with open(self.filePath) as resolvConf:
for line in resolvConf:
# strip off leading and following whitespace
line = line.strip()
# ignore empty lines
if (len(line) > 0):
lineList = line.split()
if (lineList[0] == "search"):
# remove the first element as we arent interested in it
del lineList[0]
# set the linelist as the search list
self.search = lineList
elif (lineList[0] == "nameserver"):
# append the second element to the search list
self.nameservers.append(lineList[1])
return True
else:
return False
# Action: checks whether the path to the unbound file exists or not
#
# Pre:
# Post:
#
# Params:
#
# Return: True if exists, False otherwise
def fileExists(self):
return os.path.isfile(self.filePath)
# Action: writes out a resolv.conf file
#
# Pre: this object exists
# Post: this object has been written to self.filePath in the resolv.conf format
#
# Params:
#
# Return: True if successful, False otherwise
def write(self):
try:
with open(self.filePath, 'w') as resolvConf:
searchLine = 'search'
for search in self.search:
# append the dns search path to the variable
searchLine = searchLine + ' ' + search
# print it to the file
print(searchLine, file=resolvConf)
# loop over the nameservers, printing one per line to the file
for nameserver in self.nameservers:
# write the line to the file
print('nameserver ' + nameserver, file=resolvConf)
return True
except IOError:
return False
return False
| {
"repo_name": "vuid-com/tredly",
"path": "components/tredly-libs/python-common/objects/tredly/resolvconffile.py",
"copies": "2",
"size": "3121",
"license": "mit",
"hash": -1755437721273979000,
"line_mean": 33.3076923077,
"line_max": 83,
"alpha_frac": 0.5081704582,
"autogenerated": false,
"ratio": 5.017684887459807,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.013724713447609659,
"num_lines": 91
} |
# A class to represent a sweep of frames collected under the same conditions.
# This pertains to the dataset object in the early phases of processing.
import os
from xia2.Experts.FindImages import find_matching_images
from xia2.Handlers.Phil import PhilIndex
def SweepFactory(template, directory, beam=None):
"""A factory which will return a list of sweep objects which match
the input template and directory."""
sweeps = []
from xia2.Schema import load_imagesets
imagesets = load_imagesets(
template, directory, reversephi=PhilIndex.params.xia2.settings.input.reverse_phi
)
for imageset in imagesets:
scan = imageset.get_scan()
if scan is not None:
sweeps.append(
Sweep(
template,
directory,
imageset=imageset,
id_image=scan.get_image_range()[0],
beam=beam,
)
)
return sweeps
class Sweep:
"""A class to represent a single sweep of frames."""
def __init__(self, template, directory, imageset=None, id_image=None, beam=None):
"""Initialise the sweep by inspecting the images. id_image
defines the first image in this sweep, and hence the identity of
the sweep of more than one are found which match."""
self._identity_attributes = [
"_collect_start",
"_collect_end",
"_template",
"_id_image",
]
if id_image is not None:
self._id_image = id_image
else:
self._id_image = -1
# populate the attributes of this object
self._template = template
self._directory = directory
# populate the rest of the structure
self._images = []
if imageset is not None:
self._imageset = imageset
image_range = imageset.get_scan().get_image_range()
self._images = list(range(image_range[0], image_range[1] + 1))
# if the beam has been specified, then this will
# override the headers
self._beam_centre = beam
self.update()
def get_template(self):
# try:
# return self._imageset.get_template()
# except Exception:
return self._template
def get_directory(self):
return self._directory
def get_imageset(self):
return self._imageset
def get_images(self):
# check if any more images have appeared
self.update()
image_range = self._imageset.get_scan().get_image_range()
return list(range(image_range[0], image_range[1] + 1))
def get_distance(self):
return self._imageset.get_detector()[0].get_directed_distance()
def get_wavelength(self):
return self._imageset.get_beam().get_wavelength()
def set_wavelength(self, wavelength):
return self._imageset.get_beam().set_wavelength(wavelength)
def get_beam_centre(self):
from xia2.Schema.Interfaces.FrameProcessor import get_beam_centre
detector = self._imageset.get_detector()
beam = self._imageset.get_beam()
return get_beam_centre(detector, beam)
def update(self):
"""Check to see if any more frames have appeared - if they
have update myself and reset."""
from xia2.Applications.xia2setup import is_hdf5_name
if is_hdf5_name(os.path.join(self._directory, self._template)):
return
images = find_matching_images(self._template, self._directory)
if len(images) > len(self._images):
self._images = images
from xia2.Schema import load_imagesets
imagesets = load_imagesets(
self._template,
self._directory,
id_image=self._id_image,
use_cache=False,
reversephi=PhilIndex.params.xia2.settings.input.reverse_phi,
)
max_images = 0
best_sweep = None
for imageset in imagesets:
scan = imageset.get_scan()
if scan is None:
continue
if imageset.get_scan().get_num_images() > max_images:
best_sweep = imageset
self._imageset = best_sweep
| {
"repo_name": "xia2/xia2",
"path": "src/xia2/Schema/Sweep.py",
"copies": "1",
"size": "4340",
"license": "bsd-3-clause",
"hash": -4535391322729451000,
"line_mean": 28.5238095238,
"line_max": 88,
"alpha_frac": 0.583640553,
"autogenerated": false,
"ratio": 4.165067178502879,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00015553647641444873,
"num_lines": 147
} |
# A class to represent a tredly partition
import os
import shutil
from objects.tidycmd.tidycmd import *
from includes.util import *
from includes.defines import *
from includes.output import *
from objects.tredly.container import *
class Partition:
# Constructor
def __init__(self, name, maxHdd = None, maxCpu = None, maxRam = None):
self.name = name;
self.maxHdd = maxHdd;
self.maxCpu = maxCpu;
self.maxRam = maxRam;
# lists
self.publicIPs = []; # list of public ips assigned to this partition
self.ip4Whitelist = []; # list of ip addresses whitelisted for this partition
# Action: check to see if this partition exists in ZFS
#
# Pre: this object exists
# Post:
#
# Params:
#
# Return: True if exists, False otherwise
def existsInZfs(self):
partitionLocation = ZFS_TREDLY_PARTITIONS_DATASET + '/' + self.name
# check that the zfs dataset for this partition actually exists
zfsList = TidyCmd(["zfs", "list", partitionLocation])
zfsList.run()
return (zfsList.returnCode == 0)
# Action: destroy this partition on the host
#
# Pre: this object exists
# Post: the partition on the file system has been deleted
#
# Params:
#
# Return: True if succeeded, False otherwise
def destroy(self):
# make sure our name isnt none (so we dont destroy the entire partitions dataset)
# and has a length
if ((self.name != None) and (len(self.name) > 0)):
# destroy containers within this partition
zfsDestroy = TidyCmd(["zfs", "destroy", "-rf", ZFS_TREDLY_PARTITIONS_DATASET + '/' + self.name])
zfsDestroy.run()
# clean up the directory if its left behind and destroy succeeded
if (zfsDestroy.returnCode == 0):
mountPoint = TREDLY_PARTITIONS_MOUNT + '/' + self.name
# ensure the mountpoint is a directory
if (os.path.isdir(mountPoint)):
try:
shutil.rmtree(mountPoint)
except:
return False
return True
else:
return False
# catch all errors
return False
# Action: get a list of all containers within this partition
#
# Pre: this object exists
# Post: any containers within this partition have been returned
#
# Params: alphaOrder - boolean, if true sort in alphabetical order
#
# Return: list of Container objects
def getContainers(self, alphaOrder = True):
# get a list of containers within this partition
zfsCmd = TidyCmd(["zfs", "list", "-d3", "-rH", "-o", "name", ZFS_TREDLY_PARTITIONS_DATASET + '/' + self.name + '/' + TREDLY_CONTAINER_DIR_NAME])
zfsCmd.appendPipe(["grep", "-Ev", TREDLY_CONTAINER_DIR_NAME + '$|*./root'])
if (alphaOrder):
zfsCmd.appendPipe(["sort", "-t", "^", "-k", "2"])
containerStdOut = zfsCmd.run()
# list to return
containers = []
for container in containerStdOut.splitlines():
# extract uuid from dataset
uuid = container.split('/')[-1]
# create new container object
container = Container()
# set up the dataset name that contains this containers data
containerDataset = ZFS_TREDLY_PARTITIONS_DATASET + "/" + self.name + "/" + TREDLY_CONTAINER_DIR_NAME + "/" + uuid
# load the containers properties from ZFS
container.loadFromZFS(containerDataset)
# append to our list
containers.append(container)
return containers
# Action: get the amount of disk space used by this partition
#
# Pre: this object exists
# Post: the disk usage of this partition has been returned
#
# Params:
#
# Return: string of disk size
def getDiskUsage(self):
zfs = ZFSDataset(ZFS_TREDLY_PARTITIONS_DATASET + "/" + self.name)
return zfs.getProperty('used')
# Action: get the number of containers in this partition
# this is a much quicker method than getContainers()
#
# Pre: this object exists
# Post:
#
# Params:
#
# Return: int
def getContainerUUIDs(self):
# get a list of containers within this partition
zfsCmd = TidyCmd(["zfs", "list", "-d3", "-rH", "-o", "name", ZFS_TREDLY_PARTITIONS_DATASET + '/' + self.name + '/' + TREDLY_CONTAINER_DIR_NAME])
zfsCmd.appendPipe(["grep", "-Ev", TREDLY_CONTAINER_DIR_NAME + '$|*./root'])
stdOut = zfsCmd.run()
uuids = []
for line in stdOut.splitlines():
uuids.append(line.split('/')[-1])
return uuids
# Action: get the number of files/directories in this partition
#
# Pre: this object exists
# Post: the number of files/directories
#
# Params:
#
# Return: int
def getNumFilesDirs(self, exclude = ['cntr']):
count = 0
# count the tree
i = 0
# loop over the directory structure and count the dirs/files, excluding '/cntr' from the root dir
for root, dirs, files in os.walk(TREDLY_PARTITIONS_MOUNT + '/' + self.name, topdown=True):
# if this is the root dir then exclude, otherwise accept all
if (i == 0):
dirs[:] = [d for d in dirs if d not in exclude]
# increment counter
count += len(dirs) + len(files)
return count
| {
"repo_name": "tredly/tredly",
"path": "components/tredly-libs/python-common/objects/tredly/partition.py",
"copies": "2",
"size": "5577",
"license": "mit",
"hash": 13385306043224484,
"line_mean": 31.6140350877,
"line_max": 152,
"alpha_frac": 0.5892056661,
"autogenerated": false,
"ratio": 4.020908435472243,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5610114101572242,
"avg_score": null,
"num_lines": null
} |
# A class to retrieve data from a tredly host
from subprocess import Popen, PIPE
import re
import builtins
from includes.util import *
from includes.defines import *
from includes.output import *
from objects.nginx.nginxblock import *
class Layer7Proxy:
# Constructor
#def __init__(self):
# Action: reload nginx on the host
#
# Pre:
# Post: nginx configuration files have been reloaded
#
# Params:
#
# Return: True if succeeded, False otherwise
def reload(self):
# reload nginx
process = Popen(['service', 'nginx', 'reload'], stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdOut, stdErr = process.communicate()
if (process.returncode != 0):
e_error("Failed to reload layer 7 proxy")
return False
else:
return True
# Action: add a url to the layer 7 proxy
#
# Pre:
# Post: the given values have been added as a proxy URL definition within nginx
#
# Params: url - the url to add
# ip4 - the ip4 address of the container serving this url
# maxFileSize - the maximum file size that this URL can receive
# servernameFilename - the filename of the server_name file to apply this to
# upstreamFilename - the filename of the upstream file to apply this to
# errorResponse - whether or not to have the layer 7 proxy respond with its own error page, or to allow the container to serve its own
# sslCert - the path to the cert eg ssl/stage/star.tld.com/server.crt
# sslKey - the path to the key, eg ssl/stage/star.tld.com/server.key
# includes - any extra files to include within this URL
#
# Return: True if succeeded, False otherwise
def registerUrl(self, url, ip4, maxFileSize, websocket, servernameFilename, upstreamFilename, errorResponse, sslCert = None, sslKey = None, includes = None):
# split the url into its domain and directory parts
if ('/' in url.rstrip('/')):
urlDomain = url.split('/', 1)[0]
urlDirectory = '/' + url.split('/', 1)[1].rstrip('/') + '/'
else:
urlDomain = url.rstrip('/')
urlDirectory = '/'
# work out which protocol we are redirecting FROM
if (sslCert is None):
port = "80"
protocol = "http"
else:
port = "443"
protocol = "https"
# create nginxblocks from each of these files
servername = NginxBlock(None, None, '/usr/local/etc/nginx/server_name/' + nginxFormatFilename(servernameFilename))
servername.loadFile()
upstream = NginxBlock(None, None, '/usr/local/etc/nginx/upstream/' + nginxFormatFilename(upstreamFilename))
upstream.loadFile()
#####################################
# SET UP THE UPSTREAM FILE
# check if the https upstream block exists
try:
upstream.upstream[upstreamFilename]
except (KeyError, TypeError):
# not defined, so define it
upstream.addBlock('upstream', upstreamFilename)
# add the ip address of this container to the upstream block
upstream.upstream[upstreamFilename].addAttr('server', ip4 + ':' + port)
# save the upstream file
if (not upstream.saveFile()):
return False
#####################################
# SET UP THE SERVER_NAME FILE
# check if the server block exists
try:
servername.server[0]
except (KeyError, TypeError):
# not defined, so define it
servername.addBlock('server')
# add ssl specific items
if (sslCert is not None):
servername.server[0].attrs['ssl'][0] = "on"
servername.server[0].attrs['ssl_certificate'][0] = sslCert
servername.server[0].attrs['ssl_certificate_key'][0] = sslKey
else:
# remove the ssl entries
if ('ssl' in servername.server[0].attrs.keys()):
del servername.server[0].attrs['ssl']
if ('ssl_certificate' in servername.server[0].attrs.keys()):
del servername.server[0].attrs['ssl_certificate']
if ('ssl_certificate_key' in servername.server[0].attrs.keys()):
del servername.server[0].attrs['ssl_certificate_key']
# add standard lines
servername.server[0].attrs['server_name'][0] = urlDomain
servername.server[0].attrs['listen'][0] = builtins.tredlyCommonConfig.httpProxyIP + ":" + port
# add the location block
try:
servername.server[0].location[urlDirectory]
except (KeyError, TypeError):
# not defined, so define it
servername.server[0].addBlock('location', urlDirectory)
# add any includes that we received
if (includes is not None):
for include in includes:
servername.server[0].location[urlDirectory].addAttr('include', include)
else:
# remove the includes
if ('include' in servername.server[0].location[urlDirectory].attrs.keys()):
del servername.server[0].location[urlDirectory].attrs['include']
# include websockets if requested, otherwise include http/https include file
if (websocket):
servername.server[0].location[urlDirectory].addAttr('include', 'proxy_pass/ws_wss')
else:
servername.server[0].location[urlDirectory].addAttr('include', 'proxy_pass/http_https')
# add maxfilesize if requested
if (maxFileSize is not None):
servername.server[0].location[urlDirectory].attrs['client_max_body_size'][0] = maxFileSize
else:
# check if its already been applied and remove it
if ('client_max_body_size' in servername.server[0].location[urlDirectory].attrs.keys()):
del servername.server[0].location[urlDirectory].attrs['client_max_body_size']
# if errorresponse is true then set up tredlys error pages, otherwise the containers page will be used
if (errorResponse):
# include 404 page for this URL
servername.server[0].location[urlDirectory].attrs['error_page'][0] = '404 /tredly_error_docs/404.html'
else:
# check if its already been applied and remove it
if ('error_page' in servername.server[0].location[urlDirectory].attrs.keys()):
del servername.server[0].location[urlDirectory].attrs['error_page']
# add the proxy pass attr
servername.server[0].location[urlDirectory].attrs['proxy_pass'][0] = protocol + "://" + upstreamFilename
######################
# Set up error docs location block
try:
servername.server[0].location['/tredly_error_docs']
except (KeyError, TypeError):
# not defined, so define it
servername.server[0].addBlock('location', '/tredly_error_docs')
# set/overwrite the values
servername.server[0].location['/tredly_error_docs'].attrs['alias'][0] = '/usr/local/etc/nginx/tredly_error_docs'
servername.server[0].location['/tredly_error_docs'].attrs['log_not_found'][0] = 'off'
servername.server[0].location['/tredly_error_docs'].attrs['access_log'][0] = 'off'
servername.server[0].location['/tredly_error_docs'].attrs['internal'][0] = None
# save the file
if (not servername.saveFile()):
return False
return True
# Action: add an access file to the layer 7 proxy
#
# Pre:
# Post: an access file has been created
#
# Params: file - the path to the file to save this as
# whitelist - a list of ip addresses to whitelist in this access file
# deny - whether or not to add a deny all rule to the end of this file
#
# Return: True if succeeded, False otherwise
def registerAccessFile(self, file, whitelist, deny = False):
# create nginxblocks from each of these files
accessFile = NginxBlock(None, None, file)
accessFile.loadFile()
# loop over the whitelist and add in the ips to the access file
if (len(whitelist) > 0):
for ip4 in whitelist:
accessFile.addAttr('allow', ip4)
else:
accessFile.addAttr('allow', 'all')
# add the deny rule if it was requested
if (deny):
accessFile.addAttr('deny', 'all')
return accessFile.saveFile()
# Action: add a redirect URL to the layer 7 proxy
#
# Pre:
# Post: the given url has been added to the layer 7 proxy
#
# Params: redirectFrom - the URL to redirect from, including protocol. eg https://www.test.com/olddir
# redirectTo - the URL to redirect to, including protocol. eg https://www.test.com/newdir
# redirectFromSslCert - the SSL Cert to apply to the redirectFrom URL
# redirectFromSslKey - the SSL key to apply to the redirectFrom URL
#
# Return: True if succeeded, False otherwise
def registerUrlRedirect(self, redirectFrom, redirectTo, redirectFromSslCert = None, redirectFromSslKey = None):
# split the url into its domain and directory parts
if ('/' in redirectFrom.rstrip('/')):
urlDomain = redirectFrom.split('/', 1)[0]
urlDirectory = '/' + redirectFrom.split('/', 1)[1].rstrip('/') + '/'
else:
urlDomain = redirectFrom.rstrip('/')
urlDirectory = '/'
# work out which protocol we are redirecting FROM
if (redirectFromSslCert is None):
redirectFromProtocol = 'http'
redirectFromPort = "80"
else:
redirectFromProtocol = 'https'
redirectFromPort = "443"
# split out the redirect to parts
redirectToProtocol = redirectTo.split('://')[0]
redirectToDomain = redirectTo.split('://')[1].rstrip('/').split('/',1)[0]
# form the file path - remove trailing slash, and replace dashes with dots
filePath = "/usr/local/etc/nginx/server_name/" + redirectFromProtocol + '-' + nginxFormatFilename(urlDomain.rstrip('/'))
# create nginxblock object
servernameRedirect = NginxBlock(None, None, filePath)
servernameRedirect.loadFile()
# check if the server block exists, and add it if it doesnt
try:
servernameRedirect.server[0]
except (KeyError, TypeError):
# not defined, so define it
servernameRedirect.addBlock('server')
# add attrs
servernameRedirect.server[0].attrs['server_name'][0] = urlDomain
servernameRedirect.server[0].attrs['listen'][0] = builtins.tredlyCommonConfig.httpProxyIP + ":" + redirectFromPort
# enable ssl if a cert was presented
if (redirectFromSslCert is not None):
servernameRedirect.server[0].attrs['ssl'][0] = "on"
servernameRedirect.server[0].attrs['ssl_certificate'][0] = redirectFromSslCert
servernameRedirect.server[0].attrs['ssl_certificate_key'][0] = redirectFromSslKey
else:
# remove the ssl entries
if ('ssl' in servernameRedirect.server[0].attrs.keys()):
del servernameRedirect.server[0].attrs['ssl']
if ('ssl_certificate' in servernameRedirect.server[0].attrs.keys()):
del servernameRedirect.server[0].attrs['ssl_certificate']
if ('ssl_certificate_key' in servernameRedirect.server[0].attrs.keys()):
del servernameRedirect.server[0].attrs['ssl_certificate_key']
# add the location block if it doesnt exist
try:
servernameRedirect.server[0].location[urlDirectory]
except (KeyError, TypeError):
# not defined, so define it
servernameRedirect.server[0].addBlock('location', urlDirectory)
# add redirect attr
servernameRedirect.server[0].location[urlDirectory].attrs['return'][0] = "301 " + redirectToProtocol + '://' + redirectToDomain + '$request_uri'
# save the file and return whether it succeeded or not
return servernameRedirect.saveFile() | {
"repo_name": "tredly/tredly",
"path": "components/tredly-libs/python-common/objects/nginx/layer7proxy.py",
"copies": "2",
"size": "12548",
"license": "mit",
"hash": -8614164896421857000,
"line_mean": 42.5729166667,
"line_max": 161,
"alpha_frac": 0.5998565508,
"autogenerated": false,
"ratio": 4.309065934065934,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.016250104405445366,
"num_lines": 288
} |
# A class to retrieve data from a tredly host
from subprocess import Popen, PIPE
import re
from objects.tidycmd.tidycmd import *
from includes.util import *
from includes.defines import *
from includes.output import *
class TredlyHost:
# Action: return a list of partition names on this host
#
# Pre:
# Post:
#
# Params:
#
# Return: list of strings
def getPartitionNames(self):
# create a list to pass back
partitionNames = []
# get a list of the properties for these group members
cmd = ['zfs', 'list', '-H', '-o' 'name', '-r', '-d', '1', ZFS_TREDLY_PARTITIONS_DATASET]
process = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdOut, stdErr = process.communicate()
if (process.returncode != 0):
e_error("Failed to get list of partition names")
# convert stdout to string
stdOut = stdOut.decode(encoding='UTF-8').rstrip();
for line in stdOut.splitlines():
# strip off the dataset part
line = line.replace(ZFS_TREDLY_PARTITIONS_DATASET, '').strip()
# check if the line still contains data
if (len(line) > 0):
# and now strip the slash which should be at the beginning
line = line.lstrip('/')
partitionNames.append(line)
return partitionNames
# Action: get a list of ip addresses of all containers in the given group/partition
#
# Pre:
# Post:
#
# Params: containerGroup - the containergroup to search for
# partitionName - the partition name to search for
#
# Return: list of strings (ip addresses)
def getContainerGroupContainerIps(self, containerGroup, partitionName):
# form the base dataset to search in
dataset = ZFS_TREDLY_PARTITIONS_DATASET + "/" + partitionName + '/cntr'
# and the property
datasetProperty = ZFS_PROP_ROOT +':ip4_addr'
# get a list of uuids in this group
groupMemberUUIDs = self.getContainerGroupContainerUUIDs(containerGroup, partitionName)
# create a list to pass back
ipList = []
# loop over htem and get the data
for uuid in groupMemberUUIDs:
# get a list of the properties for these group members
cmd = ['zfs', 'get', '-H', '-r', '-o' 'name,value', datasetProperty, dataset + '/' + uuid]
process = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdOut, stdErr = process.communicate()
if (process.returncode != 0):
e_error("Failed to get list of containergroup ips")
# convert stdout to string
stdOut = stdOut.decode(encoding='UTF-8').rstrip();
# extract the data if it exists
if (re.match("^" + dataset + '/' + uuid, stdOut)):
# extract the value part
ip4Part = stdOut.split()[1]
# extract the ip
regex = '^(\w+)\|([\w.]+)\/(\d+)$'
m = re.match(regex, ip4Part)
if (m is not None):
ipList.append(m.group(2))
return ipList
# Action: get a list of container uuids within a container group and partition
#
# Pre:
# Post:
#
# Params: containerGroup - the container group to search for
# partitionName - the partition name to search for
#
# Return: list of strings (uuids)
def getContainerGroupContainerUUIDs(self, containerGroup, partitionName):
# form the base dataset to search in
dataset = ZFS_TREDLY_PARTITIONS_DATASET + "/" + partitionName
# and the property
datasetProperty = ZFS_PROP_ROOT +':containergroupname'
# get a list of the properties
cmd = ['zfs', 'get', '-H', '-r', '-o' 'name,value', datasetProperty, dataset]
process = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdOut, stdErr = process.communicate()
if (process.returncode != 0):
e_error("Failed to get list of containergroup members")
# convert stdout to string
stdOut = stdOut.decode(encoding='UTF-8').rstrip();
# create a list to pass back
containerList = []
# loop over the results looking for our value
for line in iter(stdOut.splitlines()):
# check if it matches our containergroup
if (re.match("^.*\s" + containerGroup + "$", line)):
# extract the dataset part
datasetPart = line.split()[0]
# get the uuid and append to our list
containerList.append(datasetPart.split('/')[-1])
return containerList
# Action: get a list of containers within a partition
#
# Pre:
# Post:
#
# Params: partitionName - the partition to search in
#
# Return: list of strings (uuids)
def getPartitionContainerUUIDs(self, partitionName):
# form the base dataset to search in
dataset = ZFS_TREDLY_PARTITIONS_DATASET + "/" + partitionName + "/" + TREDLY_CONTAINER_DIR_NAME
# get a list of the containers
cmd = ['zfs', 'list', '-H', '-r', '-o' 'name', dataset]
process = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdOut, stdErr = process.communicate()
if (process.returncode != 0):
e_error("Failed to get list of partition members")
# convert stdout to string
stdOut = stdOut.decode(encoding='UTF-8').rstrip()
# create a list to pass back
containerList = []
# loop over the results looking for our value
for line in iter(stdOut.splitlines()):
# check if it matches our containergroup
if (re.match("^" + dataset + "/.*$", line)):
# get the uuid and append to our list
containerList.append(line.split('/')[-1])
return containerList
# Action: check if a container exists in a given partition
#
# Pre:
# Post:
#
# Params: partitionName - the partition to search in
#
# Return: True if exists, False otherwise
def containerExists(self, uuid, partitionName = None):
if (uuid is None):
return False
if (len(uuid) == 0):
return False
# find the partition name if partition name was empty
if (partitionName is None):
# get the partition name
partitionName = self.getContainerPartition(uuid)
# if None was returned then the container doesnt exist
if (partitionName is None):
return False
# get a list of containers in this partition
containerUUIDs = self.getPartitionContainerUUIDs(partitionName)
# check if it exists in the array
if (uuid in containerUUIDs):
return True
return False
# Action: finds the partition that the given uuid resides on
#
# Pre:
# Post:
#
# Params: uuid - the uuid to search for
#
# Return: string (partition name)
def getContainerPartition(self, uuid):
zfsCmd = ['zfs', 'list', '-d6', '-rH', '-o', 'name', ZFS_TREDLY_PARTITIONS_DATASET]
zfsResult = Popen(zfsCmd, stdout=PIPE)
stdOut, stdErr = zfsResult.communicate()
# convert stdout to string
stdOutString = stdOut.decode("utf-8").strip()
# loop over the results looking for our uuid
for line in stdOutString.splitlines():
line.strip()
if (line.endswith(TREDLY_CONTAINER_DIR_NAME + "/" + uuid)):
# found it so extract the partition name
partitionName = line.replace(ZFS_TREDLY_PARTITIONS_DATASET + '/', '')
partitionName = partitionName.replace('/' + TREDLY_CONTAINER_DIR_NAME + '/' + uuid, '')
return partitionName
# return none if nothing found
return None
# Action: search for all containers that have a given zfs array
#
# Pre:
# Post:
#
# Params: datasetProperty - the property to search in
# string - the array name to search for
#
# Return: set of strings (uuids). a set is used here to enforce uniqueness
def getContainersWithArray(self, datasetProperty, arrayName):
# form the base dataset to search in
dataset = ZFS_TREDLY_PARTITIONS_DATASET
cmd = ['zfs', 'get', '-H', '-r', '-o' 'name,value,property', 'all', dataset]
result = Popen(cmd, stdout=PIPE)
stdOut, stdErr = result.communicate()
# convert stdout to string
stdOutString = stdOut.decode("utf-8").strip()
uuids = []
# loop over the results looking for our dataset
for line in stdOutString.splitlines():
line.strip()
if (re.search(datasetProperty + ':\d+$', line)):
# split it up into elements
lineElements = line.split()
# match 2nd element to the arrayName
# and the 3rd element to the dataset property name
if (lineElements[1] == arrayName) and (re.match(datasetProperty + ':\d+$', lineElements[2])):
# found it so extract the uuid from the first element and append to our array
uuids.append(lineElements[0].split('/')[-1])
# return a set as we are only after unique values
return set(uuids)
# Action: find the uuid of a contaienr with containerName
#
# Pre:
# Post:
#
# Params: partitionName - the partition to search in
# containerName - the name of the container to search for
#
# Return: string (uuid)
def getUUIDFromContainerName(self, partitionName, containerName):
# form the base dataset to search in
dataset = ZFS_TREDLY_PARTITIONS_DATASET + "/" + partitionName + "/" + TREDLY_CONTAINER_DIR_NAME
# get a list of the containers
zfsCmd = ['zfs', 'get', '-H', '-r', '-o', 'name,property,value', 'all', dataset]
grepCmd = ['grep', '-F', ZFS_PROP_ROOT + ':containername']
zfsProcess = Popen(zfsCmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
grepProcess = Popen(grepCmd, stdin=zfsProcess.stdout, stdout=PIPE, stderr=PIPE)
stdOut, stdErr = grepProcess.communicate()
if (grepProcess.returncode != 0):
return None
# convert stdout to string
stdOut = stdOut.decode(encoding='UTF-8').rstrip()
# loop over the results looking for our value
for line in iter(stdOut.splitlines()):
# split up the line
splitLine = line.split()
# check if it matches our containergroup
if (splitLine[2] == containerName):
# get the uuid and append to our list
return splitLine[0].split('/')[-1]
# Action: find the containername of a container with uuid
#
# Pre: container exists
# Post:
#
# Params: uuid - the uuid of the container to search for
# partitionName - the partition to search in
#
# Return: string (container name)
def getContainerNameFromUUID(self, uuid, partitionName = None):
# if partition name wasnt given then get the partition name
if (partitionName is None):
partitionName = self.getContainerPartition(uuid)
# extract the container name from zfs
dataset = ZFS_TREDLY_PARTITIONS_DATASET + "/" + partitionName + "/" + TREDLY_CONTAINER_DIR_NAME + '/' + uuid
zfsContainer = ZFSDataset(dataset)
return zfsContainer.getProperty(ZFS_PROP_ROOT + ':containername')
# Action: get a list of all container UUIDs on this host
#
# Pre:
# Post: list has been returned
#
# Params:
#
# Return: list (uuids)
def getAllContainerUUIDs(self):
# a list to return
uuids = []
# get a list of partitions
partitionNames = self.getPartitionNames()
for partition in partitionNames:
# add the containers in this partition to our list
uuids = uuids + self.getPartitionContainerUUIDs(partition)
return uuids
# Action: check if a container with uuid is running
#
# Pre:
# Post:
#
# Params: uuid - the uuid to check
#
# Return: boolean - True if running, False otherwise
def containerIsRunning(self, uuid):
cmd = ['jls', '-j', 'trd-' + uuid]
result = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdOut, stdErr = result.communicate()
# exit code of 0 == running
return (result.returncode == 0)
# Action: initialise the zfs datasets ready for use by tredly
#
# Pre:
# Post: default datasets exist
#
# Params:
#
# Return: boolean - True if success, False otherwise
def zfsInit(self):
return False
'''
# initialise the zfs datasets ready for use by tredly
# TODO: this should probably return something meaningful, at the moment it mimics the bash version's behaviour
zfsTredlyDataset = ZFSDataset(ZFS_TREDLY_DATASET, TREDLY_MOUNT)
# create it if it doesnt already exist
if (not zfsTredlyDataset.exists()):
zfsTredlyDataset.create()
zfsDownloadsDataset = ZFSDataset(ZFS_TREDLY_DOWNLOADS_DATASET, TREDLY_DOWNLOADS_MOUNT)
# create it if it doesnt already exist
if (not zfsDownloadsDataset.exists()):
zfsDownloadsDataset.create()
zfsReleasesDataset = ZFSDataset(ZFS_TREDLY_RELEASES_DATASET, TREDLY_RELEASES_MOUNT)
if (not zfsReleasesDataset.exists()):
zfsReleasesDataset.create()
zfsLogDataset = ZFSDataset(ZFS_TREDLY_LOG_DATASET, TREDLY_LOG_MOUNT)
if (not zfsLogDataset.exists()):
zfsLogDataset.create()
zfsPartitionsDataset = ZFSDataset(ZFS_TREDLY_PARTITIONS_DATASET, TREDLY_PARTITIONS_MOUNT)
if (not zfsLogDataset.exists()):
zfsLogDataset.create()
# TODO: create a default partition
# create a default partition under the partitions dataset
#if [[ $( zfs list "${ZFS_TREDLY_PARTITIONS_DATASET}/${TREDLY_DEFAULT_PARTITION}" 2> /dev/null | wc -l ) -eq 0 ]]; then
#partition_create "${TREDLY_DEFAULT_PARTITION}" "" "" "" "" "true"
#fi
'''
| {
"repo_name": "tredly/tredly",
"path": "components/tredly-libs/python-common/objects/tredly/tredlyhost.py",
"copies": "2",
"size": "14413",
"license": "mit",
"hash": -4128565464779281000,
"line_mean": 34.239608802,
"line_max": 127,
"alpha_frac": 0.5979324221,
"autogenerated": false,
"ratio": 4.11095265259555,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5708885074695551,
"avg_score": null,
"num_lines": null
} |
'''A class to store estimators once retrieved from the database.
'''
import numpy as np
import os
from itertools import izip
from dmrg_helpers.extract.estimator_site import EstimatorSite
from dmrg_helpers.view.xy_data import XYDataDict
class EstimatorData(object):
"""An auxiliary class to hold the numerical data from an Estimator.
Estimators contain numerical data and you use this class to store them.
Parameters
----------
sites_list: an list of tuples of ints.
The sites at which each of the single-site operators of the correlator
act.
values_list: an list of doubles.
The value of the correlator at each site in the sites_list.
"""
def __init__(self):
self.sites_list = []
self.values_list = []
def add(self, sites, value):
"""Adds data"""
self.sites_list.append(sites)
self.values_list.append(value)
def sites(self):
"""Returns the sites a list of tuples
"""
return [i.sites for i in self.sites_list]
def x(self):
"""Returns the first site as an index of the chain in a list
"""
return map(EstimatorSite.x, self.sites_list)
def x_as_np(self):
"""Returns the first site as an index of the chain in a numpy array.
"""
return np.array(map(EstimatorSite.x, self.sites_list), dtype=int)
def y(self):
"""Returns the values as a list.
"""
return self.values_list
def y_as_np(self):
"""Returns the values as a numpy array.
"""
return np.array(self.values_list, dtype=float)
class Estimator(object):
"""A class for storing data for estimators once retrieved for a database.
You use this class to store the result of calling the function
get_estimator in the Database class. Additionally, you can create new
correlators by making linear combinations of other correlators.
Parameters
----------
name: a tuple of strings.
Each of the names of the single site operators that make up the
estimator.
meta_keys: a string.
The keys from the metadata dictionary joined by the ':' delimiter.
The keys are alphabetically ordered. It stores the metadata of the
estimator, like the names of the parameters of the Hamiltonian.
keys: a tuple of strings.
Obtained from meta_keys. Used to inspect which are the keys, like
parameters of the Hamiltonian that label your data.
data: a dict of a string on EstimatorData.
Contains the actual values for the estimator. The key in the dictionary
is given by the parameters that characterize the data, such as
Hamiltonian parameters of the DMRG run or the system length. The value
of the dictionary is given by a EstimatorData object that holds the
numerical part of the estimator.
"""
def __init__(self, name, meta_keys):
self.name = name
self.meta_keys = meta_keys
self.keys = self.meta_keys.split(':')
self.data = {}
def __len__(self):
"""Returns number of entry of the data dict.
You use this function to inspect which are the keys, like parameters of
the Hamiltonain that label your data.
"""
return len(self.data)
def get_metadata_as_dict(self, meta_val):
"""Returns a dictionary with metadata.
Parameters
----------
meta_val: one of the meta_vals.
"""
return dict(izip(self.keys, meta_val.split(':')))
def add_fetched_data(self, fetched_data):
"""Adds data fecthed from the database to the Estimator.
Parameters
----------
fetched_data : data as it comes for the database.
This is a list of tuples. The elements of the tuple are: an
EstimatorName, an EstimatorSite, the data for the correlator, and
a string with the values of the meta_keys.
"""
for d in fetched_data:
meta_vals = d[3]
if meta_vals not in self.data:
self.data[meta_vals] = EstimatorData()
self.data[meta_vals].add(d[1], d[2])
def save(self, filename, output_dir=os.getcwd()):
"""Saves the correlator data to a file.
You use this function to save data for a correlator to a file. If there
is more that one set of data in the Correlator, for example, because
you have data for different systems sizes, each set will be saved into
a different file. The name of these files will be obtained by appending
the names and values of the meta_data to `filename`.
"""
xy_data_dict = XYDataDict.from_estimator(self)
xy_data_dict.save(filename, output_dir)
def save_as_txt(self, filename, output_dir=os.getcwd()):
"""Saves the correlator data to a file.
You use this function to save data for a correlator to a file. If there
is more that one set of data in the Correlator, for example, because
you have data for different systems sizes, each set will be saved into
a different file. The name of these files will be obtained by appending
the names and values of the meta_data to `filename`.
Inside the file the data is organized in two columns: the first is a
site of the chain, and the second the value of the correlator.
"""
xy_data_dict = XYDataDict.from_estimator(self)
xy_data_dict.save_as_txt(filename, output_dir)
def plot(self):
"""Plots the data.
Makes a plot of the correlator data. If the correlator contains several
sets of parameters, graphs all in the same plot.
"""
xy_data_dict = XYDataDict.from_estimator(self)
xy_data_dict.plot()
| {
"repo_name": "iglpdc/dmrg_helpers",
"path": "dmrg_helpers/extract/estimator.py",
"copies": "1",
"size": "5864",
"license": "mit",
"hash": 1314680287810832000,
"line_mean": 36.5897435897,
"line_max": 80,
"alpha_frac": 0.6348908595,
"autogenerated": false,
"ratio": 4.197566213314245,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006361912940823831,
"num_lines": 156
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.