code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
import re
from typing import List
from collections import Counter
from data import Article
class Process:
def process(self, words: List[str]) -> List[str]:
pass
class Strategy:
_lastLetters = List[str]
_measure = List[int]
_containsVowel = False
_doubleConsonant = False
_endsWithPattern = False
_pattern = ".*[b-df-hj-np-tv-z][aiueo][b-df-hj-np-tvz].{{{suffixlen}}}$"
_applyRegex = ""
_suffixLen = 0
_replacement = ""
_suffix = ""
def apply(self, word: str) -> str:
"""
Applies this strategy to the given word regardless of wether it is applicable or not.
"""
return re.sub(self._applyRegex, self._replacement, word, 0)
def isApplicable(self, word: str, wordMeasure: int) -> str:
"""
Checks if this strategy is applicable or not
"""
if len(word) <= self._suffixLen:
return False
#word does not end with the suffix of this strategy
if not word.endswith(self._suffix):
return False
#measure is important, if it is not empty
#wordMeasure has to be in measure for this strategy to be applicable
if self._measure and wordMeasure not in self._measure:
return False
#last letter is important
if self._lastLetters:
#test all letters that matter
found = False
for letter in self._lastLetters:
#check letter
if self.lastLetterEquals(word, letter):
found = True
#did not find any
if not found:
return False
#vowels matter
if self._containsVowel:
#check for vowels
vowel = self.containsVowel(word)
#did not find any
if not vowel:
return False
#needs a double consonant before the suffix
if self._doubleConsonant:
consonant = self.doubleConsonant(word)
#did not find a consonant
if not consonant:
return False
#needs a specific pattern before the suffix
if self._endsWithPattern:
pattern = self.endsWithPattern(word)
#did not find it
if not pattern:
return False
#all conditions met
return True
def lastLetterEquals(self, text: str, letter: str) -> bool:
"""
Checks if the last letter before the suffix is the given letter
"""
#get last letter of the word stem in lower case
return text[-1 * self._suffixLen - 1].lower() == letter
def containsVowel(self, text: str) -> bool:
"""
Checks for vowels in the given word
"""
return any(char in ["a", "i", "u", "e", "o", "A", "I", "U", "E", "O"]
for char in text[:-1 * self._suffixLen])
def doubleConsonant(self, text: str) -> bool:
"""
checks if the letter before the suffix is a double consonant
"""
if (self._suffixLen + 1) >= len(text):
return False
#last stem letter is a consonant
if (text[-1 * self._suffixLen -
1].lower() not in ["a", "i", "u", "e", "o"]):
#check for double consonant
return text[-1 * self._suffixLen -
1].lower() == text[-1 * self._suffixLen - 2].lower()
return False
def endsWithPattern(self, text: str) -> bool:
"""
Checks if the word before the suffix ends witch a given pattern
"""
if self._invertPattern:
return not re.search(self._pattern, text)
return re.search(self._pattern, text)
def __init__(self,
suffix: str,
replacement: str,
lastLetters: List[str],
measure: List[int],
containsVowel: bool,
doubleConsonant: bool,
endsWithPattern: bool,
invertPattern=False):
self._suffixLen = len(suffix)
self._pattern = re.compile(
self._pattern.format(suffixlen=self._suffixLen), re.IGNORECASE)
self._measure = measure
self._lastLetters = lastLetters
self._containsVowel = containsVowel
self._doubleConsonant = doubleConsonant
self._endsWithPattern = endsWithPattern
self._applyRegex = re.compile(suffix + "$", re.IGNORECASE)
self._replacement = replacement
self._suffix = suffix
self._invertPattern = invertPattern
class Tokenizer:
def __init__(self, keepPunctuation: bool, keepCaps: bool):
self._keepPunctuation = keepPunctuation
self._keepCaps = keepCaps
def erasePunctuation(self, text: str) -> str:
replacements = {
",": " ",
".": " ",
";": " ",
":": " ",
"/": " ",
"(": " ",
")": " ",
"{": " ",
"}": " ",
"+": " ",
"-": " ",
"<": " ",
">": " ",
'"': " ",
"'": " ",
"*": " ",
"!": " ",
"?": " ",
"^": " ",
"\u007f": ""
}
return "".join([replacements.get(c, c) for c in text])
def tokenize(self, text: str) -> List[str]:
if not self._keepPunctuation:
text = self.erasePunctuation(text)
if not self._keepCaps:
text = text.lower()
return text.split()
class SingleLetterStrategy(Strategy):
def apply(self, word: str) -> str:
return word[:-1]
def isApplicable(self, word: str, wordMeasure: int) -> bool:
return self.doubleConsonant(word) and not (self.lastLetterEquals(
word, "l") or self.lastLetterEquals(
word, "s") or self.lastLetterEquals(word, "z"))
class Stemmer(Process):
_measureRegex = "^[b-df-hj-np-tv-z]*([aiueo]+[b-df-hj-np-tv-z]+){{{}}}[aiueo]*$"
#-----------------------------------------------------------------------------------
#a whole lot of Porter's stemming rules...
_step1a = [
Strategy("sses", "ss", [], [], False, False, False),
Strategy("ies", "i", [], [], False, False, False),
Strategy("ss", "ss", [], [], False, False, False),
Strategy("s", "", [], [], False, False, False)
]
_step1b = [
Strategy("eed", "ee", [], range(1, 100), False, False, False),
Strategy("ed", "", [], [], True, False, False),
Strategy("ing", "", [], [], True, False, False)
]
_step1bnext = [
Strategy("at", "ate", [], [], False, False, False),
Strategy("bl", "ble", [], [], False, False, False),
Strategy("iz", "ize", [], [], False, False, False),
SingleLetterStrategy("", "", [
"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "m", "n",
"o", "p", "q", "r", "t", "u", "v", "w", "x", "y"
], [], False, True, False),
Strategy("", "e", [], [1], False, False, True)
]
_step1c = [Strategy("y", "i", [], [], True, False, False)]
_step2 = [
Strategy("ational", "ate", [], range(1, 100), False, False, False),
Strategy("tional", "tion", [], range(1, 100), False, False, False),
Strategy("enci", "ence", [], range(1, 100), False, False, False),
Strategy("anci", "ance", [], range(1, 100), False, False, False),
Strategy("izer", "ize", [], range(1, 100), False, False, False),
Strategy("abli", "able", [], range(1, 100), False, False, False),
Strategy("alli", "al", [], range(1, 100), False, False, False),
Strategy("entli", "ent", [], range(1, 100), False, False, False),
Strategy("eli", "e", [], range(1, 100), False, False, False),
Strategy("ousli", "ous", [], range(1, 100), False, False, False),
Strategy("ization", "ize", [], range(1, 100), False, False, False),
Strategy("ation", "ate", [], range(1, 100), False, False, False),
Strategy("ator", "ate", [], range(1, 100), False, False, False),
Strategy("alism", "al", [], range(1, 100), False, False, False),
Strategy("iveness", "ive", [], range(1, 100), False, False, False),
Strategy("fulness", "ful", [], range(1, 100), False, False, False),
Strategy("ousness", "ous", [], range(1, 100), False, False, False),
Strategy("aliti", "al", [], range(1, 100), False, False, False),
Strategy("iviti", "ive", [], range(1, 100), False, False, False),
Strategy("biliti", "ble", [], range(1, 100), False, False, False)
]
_step3 = [
Strategy("icate", "ic", [], range(1, 100), False, False, False),
Strategy("ative", "", [], range(1, 100), False, False, False),
Strategy("alize", "al", [], range(1, 100), False, False, False),
Strategy("icite", "ic", [], range(1, 100), False, False, False),
Strategy("ical", "ic", [], range(1, 100), False, False, False),
Strategy("ful", "", [], range(1, 100), False, False, False),
Strategy("ness", "", [], range(1, 100), False, False, False)
]
_step4 = [
Strategy("al", "", [], range(2, 100), False, False, False),
Strategy("ance", "", [], range(2, 100), False, False, False),
Strategy("ence", "", [], range(2, 100), False, False, False),
Strategy("er", "", [], range(2, 100), False, False, False),
Strategy("ic", "", [], range(2, 100), False, False, False),
Strategy("able", "", [], range(2, 100), False, False, False),
Strategy("ible", "", [], range(2, 100), False, False, False),
Strategy("ant", "", [], range(2, 100), False, False, False),
Strategy("ement", "", [], range(2, 100), False, False, False),
Strategy("ment", "", [], range(2, 100), False, False, False),
Strategy("ent", "", [], range(2, 100), False, False, False),
Strategy("ion", "", ["s", "t"], range(2, 100), False, False, False),
Strategy("ou", "", [], range(2, 100), False, False, False),
Strategy("ism", "", [], range(2, 100), False, False, False),
Strategy("ate", "", [], range(2, 100), False, False, False),
Strategy("iti", "", [], range(2, 100), False, False, False),
Strategy("ous", "", [], range(2, 100), False, False, False),
Strategy("ive", "", [], range(2, 100), False, False, False),
Strategy("ize", "", [], range(2, 100), False, False, False)
]
_step5a = [
Strategy("e", "", [], range(2, 100), False, False, False),
Strategy("e", "", [], [1], False, False, False, True)
]
_step5b = [
SingleLetterStrategy("e", "", ["l"], range(2, 100), False, True, False)
]
#-----------------------------------------------------------------------------------
def process(self, words: List[str]) -> List[str]:
indices = range(len(words))
for index in indices:
wordMeasure = self.getMeasure(words[index])
# Step 1 ----------------------------------------------------------
words[index] = self.applyList(self._step1a, words[index],
wordMeasure)[0]
step1b = self.applyList(self._step1b, words[index], wordMeasure)
words[index] = step1b[0]
if step1b[1] == 2 or step1b[1] == 3:
words[index] = self.applyList(self._step1bnext, words[index],
wordMeasure)[0]
words[index] = self.applyList(self._step1c, words[index],
wordMeasure)[0]
# Step 2 ----------------------------------------------------------
words[index] = self.applyList(self._step2, words[index],
wordMeasure)[0]
# Step 3 ----------------------------------------------------------
words[index] = self.applyList(self._step3, words[index],
wordMeasure)[0]
# Step 4 ----------------------------------------------------------
words[index] = self.applyList(self._step4, words[index],
wordMeasure)[0]
# Step 5 ----------------------------------------------------------
words[index] = self.applyList(self._step5a, words[index],
wordMeasure)[0]
words[index] = self.applyList(self._step5b, words[index],
wordMeasure)[0]
return words
def applyList(self, strategies, word: str, wordMeasure: int) -> str:
#apply porter strategies
counter = 0
for strat in strategies:
counter += 1
if strat.isApplicable(word, wordMeasure):
return [strat.apply(word), counter]
return [word, counter]
def getMeasure(self, word: str) -> int:
#get porters word measure
for index in range(100):
if re.search(self._measureRegex.format(index), word,
re.IGNORECASE):
return index
return -1
class StopwordEraser(Process):
stopwords = [
"mln", "dlr", "reuters", "\x03", 'a', 'about', 'above', 'after', 'again', 'against', 'all', 'am', 'an', 'and', 'any', 'are', "aren't", 'as', 'at', 'be',
'because', 'been', 'before', 'being', 'below', 'between', 'both', 'but', 'by', "can't", 'cannot', 'could', "couldn't", 'did', "didn't", 'do', 'does', "doesn't",
'doing', "don't", 'down', 'during', 'each', 'few', 'for', 'from', 'further', 'had', "hadn't", 'has', "hasn't", 'have', "haven't", 'having', 'he', "he'd",
"he'll", "he's", 'her', 'here', "here's", 'hers', "herself'", 'him', 'himself', 'his', 'how', "how's", 'i', "i'd", "i'll", "i'm", "i've", "if'", 'in', 'into', 'is',
"isn't", 'it', "it's", 'its', 'itself', "let's", 'me', 'more', 'most', "mustn't", 'my', 'myself', 'no', 'nor', 'not', 'of', 'off', 'on', 'once', 'only', 'or', 'other',
'ought', 'our', 'ours', 'ourselves', 'out', 'over', 'own', 'same', "shan't", 'she', "she'd", "she'll", "she's", 'should', "shouldn't", 'so', 'some', 'such', 'than',
'that', "that's", 'the', "their'", 'theirs', 'them', 'themselves', 'then', 'there', "there's", 'these', 'they', "they'd", "they'll", "they're", "they've", 'this',
'those', 'through', 'to', 'too', 'under', 'until', 'up', 'very', 'was', "wasn't", 'we', "we'd", "we'll", "we're", "we've", 'were', "weren't", 'what', "what's", 'when',
"when's", 'where', "where's", 'which', 'while', 'who', "who's", 'whom', 'why', "why's", 'with', "won't", 'would', "wouldn't", 'you', "you'd", "you'll", "you're", "you've",
'your', 'yours', 'yourself', 'yourselves'
]
def process(self, words: List[str]) -> List[str]:
#only return words that are not in the stopword array
return [word for word in words if not word in self.stopwords]
class NumberEraser(Process):
def process(self, words: List[str]) -> List[str]:
#find all numbers and replace with /number/
regex = re.compile(r".*\d")
for index in range(len(words)):
if re.match(regex, words[index]):
words[index] = "/number/"
return words
class GarbageEraser(Process):
_blaRegex = r"^b+l+a+$"
_noVocalRegex = r"^[b-df-hj-np-tv-z]*$"
def process(self, words: List[str]) -> List[str]:
blaRegex = re.compile(self._blaRegex)
noVocalRegex = re.compile(self._noVocalRegex)
#there are seriously a whole lot of blas in this articles .... -.-
return [
word for word in words
if (not re.match(blaRegex, word)) and len(word) > 1 and (
not re.match(noVocalRegex, word))
]
class IllicitWordEraser(Process):
def __init__(self, allowedWords: List[str]):
self._allowedWords = allowedWords
def process(self, words: List[str]) -> List[str]:
#return only words that are in allowedWords list
return [word for word in words if word in self._allowedWords]
class Preprocessor:
#regex template to test for words
#_regexTemplate = "^[b-df-hj-np-tv-z]*([aiueo]+[b-df-hj-np-tv-z]+){{{}}}[aiueo]*$"
def addProcessor(self, process: Process) -> None:
self._processors.append(process)
def process(self, article: Article) -> Article:
#process the given article
#first: tokenization
words = self._tokenizer.tokenize(article.text)
#all the other preprocessors
for proc in self._processors:
words = proc.process(words)
#count the words
self._counter.update(words)
#pass a copy to the article.
#copy is necessary to not count all words in all articles
article.preprocessed = self._counter.copy()
#reset counter
self.resetCounter()
return article
def resetCounter(self) -> None:
for index in self._counter:
self._counter[index] = 0
def __init__(self, allowedWords):
self._processors = []
self._tokenizer = Tokenizer(False, False)
self._counter = Counter(allowedWords)
class PreprocessorFactory:
instance = None
@staticmethod
def FACTORY(allowedWords: List[str]) -> Preprocessor:
# create standard Preprocessor
if PreprocessorFactory.instance == None:
preprocessor = Preprocessor(allowedWords)
preprocessor.addProcessor(StopwordEraser())
preprocessor.addProcessor(NumberEraser())
preprocessor.addProcessor(GarbageEraser())
preprocessor.addProcessor(Stemmer())
preprocessor.addProcessor(IllicitWordEraser(allowedWords))
PreprocessorFactory.instance = preprocessor
return preprocessor
return PreprocessorFactory.instance
@staticmethod
def CACHE_FACTORY() -> Preprocessor:
#create a preprocessor to build the cache
preprocessor = Preprocessor([])
preprocessor.addProcessor(StopwordEraser())
preprocessor.addProcessor(NumberEraser())
preprocessor.addProcessor(GarbageEraser())
preprocessor.addProcessor(Stemmer())
return preprocessor
|
[
"re.match",
"re.search",
"collections.Counter",
"re.sub",
"re.compile"
] |
[((664, 716), 're.sub', 're.sub', (['self._applyRegex', 'self._replacement', 'word', '(0)'], {}), '(self._applyRegex, self._replacement, word, 0)\n', (670, 716), False, 'import re\n'), ((3718, 3748), 're.search', 're.search', (['self._pattern', 'text'], {}), '(self._pattern, text)\n', (3727, 3748), False, 'import re\n'), ((4464, 4503), 're.compile', 're.compile', (["(suffix + '$')", 're.IGNORECASE'], {}), "(suffix + '$', re.IGNORECASE)\n", (4474, 4503), False, 'import re\n'), ((15305, 15324), 're.compile', 're.compile', (['""".*\\\\d"""'], {}), "('.*\\\\d')\n", (15315, 15324), False, 'import re\n'), ((15653, 15679), 're.compile', 're.compile', (['self._blaRegex'], {}), '(self._blaRegex)\n', (15663, 15679), False, 'import re\n'), ((15703, 15733), 're.compile', 're.compile', (['self._noVocalRegex'], {}), '(self._noVocalRegex)\n', (15713, 15733), False, 'import re\n'), ((17378, 17399), 'collections.Counter', 'Counter', (['allowedWords'], {}), '(allowedWords)\n', (17385, 17399), False, 'from collections import Counter\n'), ((15380, 15409), 're.match', 're.match', (['regex', 'words[index]'], {}), '(regex, words[index])\n', (15388, 15409), False, 'import re\n'), ((3672, 3702), 're.search', 're.search', (['self._pattern', 'text'], {}), '(self._pattern, text)\n', (3681, 3702), False, 'import re\n'), ((15881, 15905), 're.match', 're.match', (['blaRegex', 'word'], {}), '(blaRegex, word)\n', (15889, 15905), False, 'import re\n'), ((15951, 15979), 're.match', 're.match', (['noVocalRegex', 'word'], {}), '(noVocalRegex, word)\n', (15959, 15979), False, 'import re\n')]
|
#!/usr/bin/env python3
import os
import urllib.parse
import urllib.request
from collections import OrderedDict
import xml.etree.ElementTree
class NameSilo(object):
def __init__(self, apikey):
self._apikey = apikey
def _make_url(self, op, **args):
x = OrderedDict(
version=1,
type='xml',
key=self._apikey
)
x.update(args)
urltuple = (
'https',
'www.namesilo.com',
'/api/{0}'.format(op),
'',
urllib.parse.urlencode(x),
''
)
uu = urllib.parse.urlunparse(urltuple)
#print(uu)
return uu
def _get_url(self, op, **args):
url = self._make_url(op, **args)
req = urllib.request.Request(
url=url,
headers={'User-Agent': 'Mozilla/5.0'}
)
data = urllib.request.urlopen(req).read().decode('utf-8')
#print(data)
root = xml.etree.ElementTree.fromstring(data)
code = int(root.find('./reply/code').text)
if code != 300:
raise Exception('{0}: {1}'.format(op, root.find('./reply/detail').text))
return root.find('./reply')
def add_record(self, domain, host, value, ttl):
reply = self._get_url('dnsAddRecord', domain=domain, rrtype='TXT', rrhost=host, rrvalue=value, rrttl=ttl)
return reply.find('./record_id').text
def del_record(self, domain, id):
self._get_url('dnsDeleteRecord', domain=domain, rrid=id)
def list_records(self, domain):
reply = self._get_url('dnsListRecords', domain=domain)
return [{e.tag:e.text for e in r} for r in reply.findall('./resource_record')]
def amce_build_host(domain):
parts = domain.split('.')
if len(parts) > 2:
host = '_acme-challenge.{0}'.format('.'.join(parts[:-2]))
cdomain = '.'.join(parts[-2:])
else:
host = '_acme-challenge'
cdomain = domain
fullhost = '_acme-challenge.{0}'.format(domain)
return (host, cdomain, fullhost)
|
[
"collections.OrderedDict"
] |
[((260, 312), 'collections.OrderedDict', 'OrderedDict', ([], {'version': '(1)', 'type': '"""xml"""', 'key': 'self._apikey'}), "(version=1, type='xml', key=self._apikey)\n", (271, 312), False, 'from collections import OrderedDict\n')]
|
import unittest
from mock import mock
from capiq.capiq_client import CapIQClient
def mocked_gdsp_data_requests_post(*args, **kwargs):
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return self.json_data
if args[0] is not None:
return MockResponse({"GDSSDKResponse": [{
"Headers": ["IQ_CLOSEPRICE"],
"Rows": [{"Row": ["46.80"]}],
"NumCols": 1,
"Seniority": "",
"Mnemonic": "IQ_CLOSEPRICE",
"Function": "GDSP",
"ErrMsg": None,
"Properties": {},
"StartDate": "",
"NumRows": 1,
"CacheExpiryTime": "0",
"SnapType": "",
"Frequency": "",
"Identifier": "TRIP:",
"Limit": ""
}]}, 200)
def mocked_gdsp_no_data_requests_post(*args, **kwargs):
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return self.json_data
"""
if args[0] == 'http://someurl.com/test.json':
return MockResponse({"key1": "value1"}, 200)
elif args[0] == 'http://someotherurl.com/anothertest.json':
"""
if args[0] is not None:
return MockResponse(
{
"GDSSDKResponse":
[
{
"Headers": ["IQ_CLOSEPRICE"],
"Rows": [{"Row": ["46.80"]}],
"NumCols": 1,
"Seniority": "",
"Mnemonic": "IQ_CLOSEPRICE",
"Function": "GDSP",
"ErrMsg": "SOME ERROR",
"Properties": {},
"StartDate": "",
"NumRows": 1,
"CacheExpiryTime": "0",
"SnapType": "",
"Frequency": "",
"Identifier": "TRIP:",
"Limit": ""
}
]
}, 200)
class TestCapiqClientGdsp(unittest.TestCase):
@mock.patch('capiq.capiq_client.requests.post', side_effect=mocked_gdsp_data_requests_post)
def test_gdsp_data(self, mocked_post):
ciq_client = CapIQClient("username", "password")
return_value = ciq_client.gdsp(["TRIP"], ["IQ_CLOSEPRICE"], ["close_price"], [{}])
self.assertEqual(return_value, {'TRIP:': {'close_price': '46.80'}})
@mock.patch('capiq.capiq_client.requests.post', side_effect=mocked_gdsp_no_data_requests_post)
def test_gdsp_no_data(self, mocked_post):
ciq_client = CapIQClient("username", "password")
return_value = ciq_client.gdsp(["TRIP"], ["IQ_CLOSEPRICE"], ["close_price"], [{}])
self.assertEqual(return_value, {'TRIP:': {'close_price': None}})
|
[
"mock.mock.patch",
"capiq.capiq_client.CapIQClient"
] |
[((2385, 2480), 'mock.mock.patch', 'mock.patch', (['"""capiq.capiq_client.requests.post"""'], {'side_effect': 'mocked_gdsp_data_requests_post'}), "('capiq.capiq_client.requests.post', side_effect=\n mocked_gdsp_data_requests_post)\n", (2395, 2480), False, 'from mock import mock\n'), ((2749, 2847), 'mock.mock.patch', 'mock.patch', (['"""capiq.capiq_client.requests.post"""'], {'side_effect': 'mocked_gdsp_no_data_requests_post'}), "('capiq.capiq_client.requests.post', side_effect=\n mocked_gdsp_no_data_requests_post)\n", (2759, 2847), False, 'from mock import mock\n'), ((2540, 2575), 'capiq.capiq_client.CapIQClient', 'CapIQClient', (['"""username"""', '"""password"""'], {}), "('username', 'password')\n", (2551, 2575), False, 'from capiq.capiq_client import CapIQClient\n'), ((2910, 2945), 'capiq.capiq_client.CapIQClient', 'CapIQClient', (['"""username"""', '"""password"""'], {}), "('username', 'password')\n", (2921, 2945), False, 'from capiq.capiq_client import CapIQClient\n')]
|
from shutil import copyfile
import glob
import os
if not os.path.exists("config"):
os.mkdir("config")
for file in glob.glob("example-config/*.json"):
copyfile(file, os.path.join("config", file.split("example-")[-1]))
|
[
"os.mkdir",
"os.path.exists",
"glob.glob"
] |
[((120, 154), 'glob.glob', 'glob.glob', (['"""example-config/*.json"""'], {}), "('example-config/*.json')\n", (129, 154), False, 'import glob\n'), ((58, 82), 'os.path.exists', 'os.path.exists', (['"""config"""'], {}), "('config')\n", (72, 82), False, 'import os\n'), ((88, 106), 'os.mkdir', 'os.mkdir', (['"""config"""'], {}), "('config')\n", (96, 106), False, 'import os\n')]
|
from enum import Enum
from numerous.engine.model import Model
from numerous.engine.simulation import Simulation
from numerous.engine.system import Subsystem, Item
from tests.test_equations import TestEq_ground, Test_Eq, TestEq_input
from enum import Enum
from numerous.engine.model import Model
from numerous.engine.simulation import Simulation
from numerous.engine.system import Subsystem, Item
from tests.test_equations import TestEq_ground, Test_Eq, TestEq_input
class SolverType(Enum):
SOLVER_IVP = 0
NUMEROUS = 1
solver_types = [SolverType.NUMEROUS, SolverType.SOLVER_IVP]
def ms2():
class I(Item):
def __init__(self, tag, P, T, R):
super().__init__(tag)
t1 = self.create_namespace('t1')
t1.add_equations([TestEq_input(P=P, T=T, R=R)])
class T(Item):
def __init__(self, tag, T, R):
super().__init__(tag)
t1 = self.create_namespace('t1')
t1.add_equations([Test_Eq(T=T, R=R)])
class G(Item):
def __init__(self, tag, TG, RG):
super().__init__(tag)
t1 = self.create_namespace('t1')
t1.add_equations([TestEq_ground(TG=TG, RG=RG)])
class S2(Subsystem):
def __init__(self, tag):
super().__init__(tag)
input = I('1', P=100, T=0, R=10)
item1 = T('2', T=0, R=5)
item2 = T('3', T=0, R=3)
item3 = T('4', T=0, R=2)
## RG is redundant we use item3.R as a last value of R in a chain
ground = G('5', TG=10, RG=2)
input.t1.T_o.add_mapping(item1.t1.T)
# item1.bind(input=input, output=item2)
item1.t1.R_i.add_mapping(input.t1.R)
item1.t1.T_i.add_mapping(input.t1.T)
item1.t1.T_o.add_mapping(item2.t1.T)
# t_0 = item1.t1.T_o
# item1.t1.T_o = item2.t1.T
item2.t1.R_i.add_mapping(item1.t1.R)
item2.t1.T_i.add_mapping(item1.t1.T)
item2.t1.T_o.add_mapping(item3.t1.T)
item3.t1.R_i.add_mapping(item2.t1.R)
item3.t1.T_i.add_mapping(item2.t1.T)
item3.t1.T_o.add_mapping(ground.t1.T)
self.register_items([input, item1, item2, item3, ground])
return S2('S2')
def run_model(ms, solver, use_llvm):
# print(type(ms2))
m1 = Model(system=ms, use_llvm=use_llvm)
s1 = Simulation(m1, t_start=0, t_stop=1000, num=10, solver_type=solver)
s1.solve()
return s1
def ms2N(n):
class I(Item):
def __init__(self, tag, P, T, R):
super().__init__(tag)
t1 = self.create_namespace('t1')
t1.add_equations([TestEq_input(P=P, T=T, R=R)])
class T(Item):
def __init__(self, tag, T, R):
super().__init__(tag)
t1 = self.create_namespace('t1')
t1.add_equations([Test_Eq(T=T, R=R)])
class G(Item):
def __init__(self, tag, TG, RG):
super().__init__(tag)
t1 = self.create_namespace('t1')
t1.add_equations([TestEq_ground(TG=TG, RG=RG)])
class S2N(Subsystem):
def __init__(self, tag):
super().__init__(tag)
items = []
input = I('1', P=100, T=0, R=10)
for i in range(n):
items.append(T(str(i+2), T=1, R=5))
#print(items)
#print(f'GNDID{n + 2}')
ground = G(str(n + 2), TG=10, RG=2)
input.t1.T_o.add_mapping(items[0].t1.T)
# item1.bind(input=input, output=item2)
for item in range(n):
if item == 0:
items[item].t1.R_i.add_mapping(input.t1.R)
#items[item].t1.T_i.add_mapping(input.t1.T)
items[item].t1.T_o.add_mapping(items[item + 1].t1.T)
elif item == n-1:
items[item].t1.R_i.add_mapping(items[item - 1].t1.R)
items[item].t1.T_i.add_mapping(items[item - 1].t1.T)
items[item].t1.T_o.add_mapping(ground.t1.T)
else:
items[item].t1.R_i.add_mapping(items[item - 1].t1.R)
items[item].t1.T_i.add_mapping(items[item - 1].t1.T)
items[item].t1.T_o.add_mapping(items[item + 1].t1.T)
r_items = [input]
for i in items:
r_items.append(i)
r_items.append(ground)
print(r_items)
#print(type(items[0]))
self.register_items(r_items)
return S2N('S2')
#s1=run_model(ms2(), solver=SolverType.NUMEROUS, use_llvm=[True, False])
#s1_result=s1.model.historian_df
s1n=run_model(ms2N(8), solver=SolverType.NUMEROUS, use_llvm=[True, False])
s1n_result=s1n.model.historian_df
|
[
"tests.test_equations.TestEq_input",
"tests.test_equations.Test_Eq",
"numerous.engine.simulation.Simulation",
"tests.test_equations.TestEq_ground",
"numerous.engine.model.Model"
] |
[((2351, 2386), 'numerous.engine.model.Model', 'Model', ([], {'system': 'ms', 'use_llvm': 'use_llvm'}), '(system=ms, use_llvm=use_llvm)\n', (2356, 2386), False, 'from numerous.engine.model import Model\n'), ((2396, 2462), 'numerous.engine.simulation.Simulation', 'Simulation', (['m1'], {'t_start': '(0)', 't_stop': '(1000)', 'num': '(10)', 'solver_type': 'solver'}), '(m1, t_start=0, t_stop=1000, num=10, solver_type=solver)\n', (2406, 2462), False, 'from numerous.engine.simulation import Simulation\n'), ((777, 804), 'tests.test_equations.TestEq_input', 'TestEq_input', ([], {'P': 'P', 'T': 'T', 'R': 'R'}), '(P=P, T=T, R=R)\n', (789, 804), False, 'from tests.test_equations import TestEq_ground, Test_Eq, TestEq_input\n'), ((976, 993), 'tests.test_equations.Test_Eq', 'Test_Eq', ([], {'T': 'T', 'R': 'R'}), '(T=T, R=R)\n', (983, 993), False, 'from tests.test_equations import TestEq_ground, Test_Eq, TestEq_input\n'), ((1167, 1194), 'tests.test_equations.TestEq_ground', 'TestEq_ground', ([], {'TG': 'TG', 'RG': 'RG'}), '(TG=TG, RG=RG)\n', (1180, 1194), False, 'from tests.test_equations import TestEq_ground, Test_Eq, TestEq_input\n'), ((2678, 2705), 'tests.test_equations.TestEq_input', 'TestEq_input', ([], {'P': 'P', 'T': 'T', 'R': 'R'}), '(P=P, T=T, R=R)\n', (2690, 2705), False, 'from tests.test_equations import TestEq_ground, Test_Eq, TestEq_input\n'), ((2877, 2894), 'tests.test_equations.Test_Eq', 'Test_Eq', ([], {'T': 'T', 'R': 'R'}), '(T=T, R=R)\n', (2884, 2894), False, 'from tests.test_equations import TestEq_ground, Test_Eq, TestEq_input\n'), ((3067, 3094), 'tests.test_equations.TestEq_ground', 'TestEq_ground', ([], {'TG': 'TG', 'RG': 'RG'}), '(TG=TG, RG=RG)\n', (3080, 3094), False, 'from tests.test_equations import TestEq_ground, Test_Eq, TestEq_input\n')]
|
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from pyvirtualdisplay import Display
from tests.testing_utilities import populate_test_db
class FunctionalTest(StaticLiveServerTestCase):
def setUp(self):
display = Display(visible=0, size=(800, 600))
display.start()
self.selenium = webdriver.Firefox()
self.selenium.implicitly_wait(3)
populate_test_db()
def tearDown(self):
self.selenium.quit()
# Auxiliary function to add view subdir to URL
def _get_full_url(self, namespace):
return self.live_server_url + namespace
def _is_text_present(self, text):
try:
body = self.selenium.find_element_by_tag_name('body')
except NoSuchElementException:
return False
return text in body.text # check if the text is in body's text
def test_home_title(self):
"""
Tests that Home is loading properly
"""
self.selenium.get(self._get_full_url('/'))
self.assertIn('Metadata Explorer Tool', self.selenium.title)
def test_home_sections(self):
"""
Tests that Home is showing the right sections
"""
self.selenium.get(self._get_full_url('/'))
self.assertTrue(self._is_text_present('Entities summary'))
|
[
"pyvirtualdisplay.Display",
"tests.testing_utilities.populate_test_db",
"selenium.webdriver.Firefox"
] |
[((346, 381), 'pyvirtualdisplay.Display', 'Display', ([], {'visible': '(0)', 'size': '(800, 600)'}), '(visible=0, size=(800, 600))\n', (353, 381), False, 'from pyvirtualdisplay import Display\n'), ((431, 450), 'selenium.webdriver.Firefox', 'webdriver.Firefox', ([], {}), '()\n', (448, 450), False, 'from selenium import webdriver\n'), ((500, 518), 'tests.testing_utilities.populate_test_db', 'populate_test_db', ([], {}), '()\n', (516, 518), False, 'from tests.testing_utilities import populate_test_db\n')]
|
from abc import ABC, abstractmethod
from typing import TypeVar, Generic, List
from geniusweb.connection.ConnectionEnd import ConnectionEnd
INTYPE = TypeVar('INTYPE')
OUTTYPE = TypeVar('OUTTYPE')
class Connectable(ABC, Generic[INTYPE,OUTTYPE]):
'''
A Connectable is an object that can connect on request with a provided
{@link ConnectionEnd} and then respond to incoming and outgong signals.
@param <INTYPE> the type of incoming messages
@param <OUTTYPE> the type of outgoing messages
'''
@abstractmethod
def connect(self,connection:ConnectionEnd[INTYPE, OUTTYPE] ):
'''
creates the connection. Only called if not yet connected.
@param connection the new connection
'''
@abstractmethod
def disconnect(self):
'''
Removes the connection from a connectable; the previously given
connection can not be used anymore after this call. Only called if
previously connected.
'''
|
[
"typing.TypeVar"
] |
[((150, 167), 'typing.TypeVar', 'TypeVar', (['"""INTYPE"""'], {}), "('INTYPE')\n", (157, 167), False, 'from typing import TypeVar, Generic, List\n'), ((178, 196), 'typing.TypeVar', 'TypeVar', (['"""OUTTYPE"""'], {}), "('OUTTYPE')\n", (185, 196), False, 'from typing import TypeVar, Generic, List\n')]
|
import os
import pandas as pd
class MergeExcel(object):
def __init__(self, excel_filepath=None, folder_path=None, sheetname_lst=None):
"""
: df_dict: df组成的字典,key为sheetname
: sheetname_lst: 需要合并的sheetname列为,默认为空,即不指定则合并所有
"""
self.excel_filepath = excel_filepath
self.folder_path = folder_path
self.sheetname_lst = sheetname_lst
if self.excel_filepath is not None:
new_filename = "%s_处理完成.xlsx" % os.path.basename(excel_filepath)
abs_filepath = os.path.abspath(excel_filepath)
self.new_filepath = os.path.join(os.path.dirname(abs_filepath), new_filename)
else:
self.new_filepath = os.path.join(os.path.dirname(self.folder_path),
"%s_合并结果.xlsx" % os.path.basename(self.folder_path))
def read_excel(self, excel_filepath, sheet_name=None):
"""
读取excel等本地文件数据路径,返回Pandas.DataFrame结构化数据
:param excel_filepath:文件路径
:sheet_name: 设置读取的工作表范围,默认为None,即读取所有的sheet
:return:Pandas.DataFrame 字典对象,key:sheet_name,values:pandas.df
"""
try:
df_dict = pd.read_excel(excel_filepath, sheet_name=sheet_name)
return df_dict
except Exception as err:
print(err)
@classmethod
def df_concat(self, df_sheet_dict):
# try:
df = pd.concat(df_sheet_dict)
print(df.index.values)
df.index = [x[0] for x in df.index]
print(df.index)
df.columns = [x for x in df.columns]
df.index.name = '工作表名称'
return df
def merge_worksheet(self):
if type(self.sheetname_lst) in [str, list] or self.sheetname_lst is None:
df_dict = self.read_excel(excel_filepath=self.excel_filepath,
sheet_name=self.sheetname_lst)
print("【注意】当前共有%s个工作表需要合并!" % len(df_dict))
for sheet_name, df in df_dict.items():
print("工作表名称【%s】: 共%s行" % (sheet_name, df.shape[0]))
df_merge = pd.concat(df_dict)
df_merge.index = [x[0] for x in df_merge.index]
df_merge.index.name = '工作表名称'
else:
print("当前指定的参数有误!,请检查后重新输入!")
df_merge = None
if df_merge is not None:
return df_merge
else:
return None
def merge_workbooks(self):
'''
:param folder_path: 文件夹路径
:param end_str_lst: 指定读取的文件扩展名列表
:return: pd.DataFrame
'''
folder_path = self.folder_path
end_str_lst = ['.xlsx', '.xls']
end_str_tuble = tuple(end_str_lst)
# 判断是否为绝对路径,如果不是,则转换为绝对路径
if not os.path.isabs(folder_path):
folder_path = os.path.abspath(folder_path)
df_all_lst = []
for root, dirs, files in os.walk(folder_path, topdown=True):
'''
root:当前正在遍历的这个文件夹
dirs :list ,root目录中所有的文件夹的名字(不包括文件夹中的子目录)
files:list , root目录中所有的文件名称(不包括子目录)
'''
excel_files = [file for file in files if file.endswith(end_str_tuble) and not file.startswith(("~$"))]
print(root)
print(dirs)
print(files)
print(excel_files)
# 如果excel_files列表不为空
if excel_files:
for excel_file in excel_files:
df_dict = pd.read_excel(os.path.join(root, excel_file), sheet_name=None)
if self.sheetname_lst is not None:
sheetname_lst = list(df_dict.keys())
keep_key = sheetname_lst[0]
df_dict = {keep_key: df_dict[keep_key]}
df_merge = pd.concat(df_dict)
df_merge.index = [x[0] for x in df_merge.index]
df_merge.index.name = '工作表名称'
col_name = list(df_merge.columns)
df_merge['excel文件名称'] = excel_file
df_merge['工作表名称'] = df_merge.index
df_merge = pd.DataFrame(df_merge, columns=['excel文件名称', '工作表名称'] + col_name)
df_all_lst.append(df_merge)
df_all = pd.concat(df_all_lst)
return df_all
def to_excel(self, df, sheet_name="sheet1"):
writer = pd.ExcelWriter('%s' % self.new_filepath, engine='xlsxwriter')
df.to_excel(writer, sheet_name=sheet_name,
startrow=0,
index=False)
workbook = writer.book
# 统计部分内容部分样式
txt_cell_format = workbook.add_format({'bold': False, 'italic': False, 'font_size': 10, 'border': 1})
txt_cell_format.set_align('left')
worksheet = writer.sheets[sheet_name]
# worksheet1.set_column(起始列,结束列,列宽,格式)
worksheet.set_column(0, df.shape[1], 15, txt_cell_format)
writer.save()
if __name__ == "__main__":
folder_path = r"C:\Users\soari\Desktop\excel"
merge = MergeExcel(folder_path=folder_path, sheetname_lst=None)
df_all = merge.merge_workbooks()
print(df_all)
merge.to_excel(df_all)
|
[
"pandas.DataFrame",
"os.path.abspath",
"os.path.isabs",
"os.path.join",
"os.path.basename",
"os.path.dirname",
"os.walk",
"pandas.read_excel",
"pandas.ExcelWriter",
"pandas.concat"
] |
[((1401, 1425), 'pandas.concat', 'pd.concat', (['df_sheet_dict'], {}), '(df_sheet_dict)\n', (1410, 1425), True, 'import pandas as pd\n'), ((2855, 2889), 'os.walk', 'os.walk', (['folder_path'], {'topdown': '(True)'}), '(folder_path, topdown=True)\n', (2862, 2889), False, 'import os\n'), ((4322, 4383), 'pandas.ExcelWriter', 'pd.ExcelWriter', (["('%s' % self.new_filepath)"], {'engine': '"""xlsxwriter"""'}), "('%s' % self.new_filepath, engine='xlsxwriter')\n", (4336, 4383), True, 'import pandas as pd\n'), ((538, 569), 'os.path.abspath', 'os.path.abspath', (['excel_filepath'], {}), '(excel_filepath)\n', (553, 569), False, 'import os\n'), ((1179, 1231), 'pandas.read_excel', 'pd.read_excel', (['excel_filepath'], {'sheet_name': 'sheet_name'}), '(excel_filepath, sheet_name=sheet_name)\n', (1192, 1231), True, 'import pandas as pd\n'), ((2077, 2095), 'pandas.concat', 'pd.concat', (['df_dict'], {}), '(df_dict)\n', (2086, 2095), True, 'import pandas as pd\n'), ((2715, 2741), 'os.path.isabs', 'os.path.isabs', (['folder_path'], {}), '(folder_path)\n', (2728, 2741), False, 'import os\n'), ((2769, 2797), 'os.path.abspath', 'os.path.abspath', (['folder_path'], {}), '(folder_path)\n', (2784, 2797), False, 'import os\n'), ((4206, 4227), 'pandas.concat', 'pd.concat', (['df_all_lst'], {}), '(df_all_lst)\n', (4215, 4227), True, 'import pandas as pd\n'), ((486, 518), 'os.path.basename', 'os.path.basename', (['excel_filepath'], {}), '(excel_filepath)\n', (502, 518), False, 'import os\n'), ((615, 644), 'os.path.dirname', 'os.path.dirname', (['abs_filepath'], {}), '(abs_filepath)\n', (630, 644), False, 'import os\n'), ((719, 752), 'os.path.dirname', 'os.path.dirname', (['self.folder_path'], {}), '(self.folder_path)\n', (734, 752), False, 'import os\n'), ((824, 858), 'os.path.basename', 'os.path.basename', (['self.folder_path'], {}), '(self.folder_path)\n', (840, 858), False, 'import os\n'), ((3739, 3757), 'pandas.concat', 'pd.concat', (['df_dict'], {}), '(df_dict)\n', (3748, 3757), True, 'import pandas as pd\n'), ((4071, 4136), 'pandas.DataFrame', 'pd.DataFrame', (['df_merge'], {'columns': "(['excel文件名称', '工作表名称'] + col_name)"}), "(df_merge, columns=['excel文件名称', '工作表名称'] + col_name)\n", (4083, 4136), True, 'import pandas as pd\n'), ((3426, 3456), 'os.path.join', 'os.path.join', (['root', 'excel_file'], {}), '(root, excel_file)\n', (3438, 3456), False, 'import os\n')]
|
"""Experiments and corresponding analysis.
format adapted from https://github.com/gyyang/olfaction_evolution
Each experiment is described by a function that returns a list of configurations
function name is the experiment name
combinatorial mode:
config_ranges should not have repetitive values
sequential mode:
config_ranges values should have equal length,
otherwise this will only loop through the shortest one
control mode:
base_config must contain keys in config_ranges
"""
import os
import copy
from collections import OrderedDict
import logging
import numpy as np
from configs.config_global import ROOT_DIR, LOG_LEVEL
from configs.configs import BaseConfig
from utils.config_utils import vary_config
from analysis.train_analysis import plot_train_log
import evaluate
from analysis import plots
def init_analysis(configs_):
logging.basicConfig(level=LOG_LEVEL)
exp_name = configs_[0].experiment_name
print('Analyzing ' + exp_name)
exp_path = os.path.join(ROOT_DIR, 'experiments', exp_name) + os.sep
plot_train_log([exp_path], exp_name=exp_name)
# -----------------------------------------------------
# experiments
# -----------------------------------------------------
def timescale():
config = BaseConfig()
config.experiment_name = 'timescale'
config.rnn_type = 'plainRNN'
config.t_scale = 1.0
config.augment = None
config.use_velocity = False
config.context = None
config.context_w = 10
config.hidden_size = 64
config.num_ep = 40
config_ranges = OrderedDict()
config_ranges['rnn_type'] = ['plainRNN',
'CTRNN',
'LSTM',
'GRU',
'RNNSTSP']
configs = vary_config(config, config_ranges, mode='combinatorial')
return configs
def timescale_aug():
config = BaseConfig()
config.experiment_name = 'timescale_aug'
config.rnn_type = 'plainRNN'
config.t_scale = 1.0
config.augment = (0.5, 1.5)
config.use_velocity = False
config.context = None
config.context_w = 10
config.hidden_size = 64
config.num_ep = 40
config_ranges = OrderedDict()
config_ranges['rnn_type'] = ['plainRNN',
'CTRNN',
'LSTM',
'GRU',
'RNNSTSP']
configs = vary_config(config, config_ranges, mode='combinatorial')
return configs
def timecode():
config = BaseConfig()
config.experiment_name = 'timecode'
config.rnn_type = 'plainRNN'
config.t_scale = 1.0
config.augment = None
config.use_velocity = False
config.context = 'zero'
config.context_w = 10
config.hidden_size = 64
config.num_ep = 40
config_ranges = OrderedDict()
config_ranges['context'] = ['zero', 'noise', 'scalar', 'ramping',
'clock', 'stairs_end', 'stairs_start']
configs = vary_config(config, config_ranges, mode='combinatorial')
return configs
def timecode_aug():
config = BaseConfig()
config.experiment_name = 'timecode_aug'
config.rnn_type = 'plainRNN'
config.t_scale = 1.0
config.augment = (0.5, 1.5)
config.use_velocity = False
config.context = 'zero'
config.context_w = 10
config.hidden_size = 64
config.num_ep = 40
config_ranges = OrderedDict()
config_ranges['context'] = ['zero', 'noise', 'scalar', 'ramping',
'clock', 'stairs_end', 'stairs_start']
configs = vary_config(config, config_ranges, mode='combinatorial')
return configs
# -----------------------------------------------------
# analysis
# -----------------------------------------------------
def timescale_analysis():
configs = timescale()
init_analysis(configs)
t_scale_list = np.arange(0.1, 2, 0.1)
acc_list = np.zeros_like(t_scale_list)
for cfg in configs:
for i_s, t_scale in enumerate(t_scale_list):
new_cfg = copy.deepcopy(cfg)
new_cfg.t_scale = t_scale
acc_list[i_s] = evaluate.eval_total_acc(new_cfg)
np.save(os.path.join(cfg.save_path, 'tscalelist.npy'), t_scale_list)
np.save(os.path.join(cfg.save_path, 'acclist.npy'), acc_list)
plots.plot_gen(t_scale_list, acc_list, cfg.rnn_type)
plots.plot_group_gen(configs, configs[0].experiment_name, mode='rnn_type')
def timescale_aug_analysis():
configs = timescale_aug()
init_analysis(configs)
t_scale_list = np.arange(0.1, 2, 0.1)
acc_list = np.zeros_like(t_scale_list)
for cfg in configs:
for i_s, t_scale in enumerate(t_scale_list):
new_cfg = copy.deepcopy(cfg)
new_cfg.t_scale = t_scale
acc_list[i_s] = evaluate.eval_total_acc(new_cfg)
np.save(os.path.join(cfg.save_path, 'tscalelist.npy'), t_scale_list)
np.save(os.path.join(cfg.save_path, 'acclist.npy'), acc_list)
plots.plot_gen(t_scale_list, acc_list, cfg.rnn_type)
plots.plot_group_gen(configs, configs[0].experiment_name, mode='rnn_type')
def timecode_analysis():
configs = timecode()
init_analysis(configs)
t_scale_list = np.arange(0.1, 2, 0.1)
acc_list = np.zeros_like(t_scale_list)
for cfg in configs:
for i_s, t_scale in enumerate(t_scale_list):
new_cfg = copy.deepcopy(cfg)
new_cfg.t_scale = t_scale
acc_list[i_s] = evaluate.eval_total_acc(new_cfg)
np.save(os.path.join(cfg.save_path, 'tscalelist.npy'), t_scale_list)
np.save(os.path.join(cfg.save_path, 'acclist.npy'), acc_list)
plots.plot_gen(t_scale_list, acc_list, cfg.context)
plots.plot_group_gen(configs, configs[0].experiment_name, mode='context')
def timecode_aug_analysis():
configs = timecode_aug()
init_analysis(configs)
t_scale_list = np.arange(0.1, 2, 0.1)
acc_list = np.zeros_like(t_scale_list)
for cfg in configs:
for i_s, t_scale in enumerate(t_scale_list):
new_cfg = copy.deepcopy(cfg)
new_cfg.t_scale = t_scale
acc_list[i_s] = evaluate.eval_total_acc(new_cfg)
np.save(os.path.join(cfg.save_path, 'tscalelist.npy'), t_scale_list)
np.save(os.path.join(cfg.save_path, 'acclist.npy'), acc_list)
plots.plot_gen(t_scale_list, acc_list, cfg.context)
plots.plot_group_gen(configs, configs[0].experiment_name, mode='context')
|
[
"utils.config_utils.vary_config",
"copy.deepcopy",
"numpy.zeros_like",
"analysis.plots.plot_gen",
"os.path.join",
"logging.basicConfig",
"evaluate.eval_total_acc",
"numpy.arange",
"analysis.train_analysis.plot_train_log",
"collections.OrderedDict",
"configs.configs.BaseConfig",
"analysis.plots.plot_group_gen"
] |
[((857, 893), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'LOG_LEVEL'}), '(level=LOG_LEVEL)\n', (876, 893), False, 'import logging\n'), ((1048, 1093), 'analysis.train_analysis.plot_train_log', 'plot_train_log', (['[exp_path]'], {'exp_name': 'exp_name'}), '([exp_path], exp_name=exp_name)\n', (1062, 1093), False, 'from analysis.train_analysis import plot_train_log\n'), ((1254, 1266), 'configs.configs.BaseConfig', 'BaseConfig', ([], {}), '()\n', (1264, 1266), False, 'from configs.configs import BaseConfig\n'), ((1549, 1562), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1560, 1562), False, 'from collections import OrderedDict\n'), ((1789, 1845), 'utils.config_utils.vary_config', 'vary_config', (['config', 'config_ranges'], {'mode': '"""combinatorial"""'}), "(config, config_ranges, mode='combinatorial')\n", (1800, 1845), False, 'from utils.config_utils import vary_config\n'), ((1901, 1913), 'configs.configs.BaseConfig', 'BaseConfig', ([], {}), '()\n', (1911, 1913), False, 'from configs.configs import BaseConfig\n'), ((2206, 2219), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2217, 2219), False, 'from collections import OrderedDict\n'), ((2446, 2502), 'utils.config_utils.vary_config', 'vary_config', (['config', 'config_ranges'], {'mode': '"""combinatorial"""'}), "(config, config_ranges, mode='combinatorial')\n", (2457, 2502), False, 'from utils.config_utils import vary_config\n'), ((2553, 2565), 'configs.configs.BaseConfig', 'BaseConfig', ([], {}), '()\n', (2563, 2565), False, 'from configs.configs import BaseConfig\n'), ((2849, 2862), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2860, 2862), False, 'from collections import OrderedDict\n'), ((3018, 3074), 'utils.config_utils.vary_config', 'vary_config', (['config', 'config_ranges'], {'mode': '"""combinatorial"""'}), "(config, config_ranges, mode='combinatorial')\n", (3029, 3074), False, 'from utils.config_utils import vary_config\n'), ((3129, 3141), 'configs.configs.BaseConfig', 'BaseConfig', ([], {}), '()\n', (3139, 3141), False, 'from configs.configs import BaseConfig\n'), ((3435, 3448), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3446, 3448), False, 'from collections import OrderedDict\n'), ((3604, 3660), 'utils.config_utils.vary_config', 'vary_config', (['config', 'config_ranges'], {'mode': '"""combinatorial"""'}), "(config, config_ranges, mode='combinatorial')\n", (3615, 3660), False, 'from utils.config_utils import vary_config\n'), ((3904, 3926), 'numpy.arange', 'np.arange', (['(0.1)', '(2)', '(0.1)'], {}), '(0.1, 2, 0.1)\n', (3913, 3926), True, 'import numpy as np\n'), ((3942, 3969), 'numpy.zeros_like', 'np.zeros_like', (['t_scale_list'], {}), '(t_scale_list)\n', (3955, 3969), True, 'import numpy as np\n'), ((4400, 4474), 'analysis.plots.plot_group_gen', 'plots.plot_group_gen', (['configs', 'configs[0].experiment_name'], {'mode': '"""rnn_type"""'}), "(configs, configs[0].experiment_name, mode='rnn_type')\n", (4420, 4474), False, 'from analysis import plots\n'), ((4583, 4605), 'numpy.arange', 'np.arange', (['(0.1)', '(2)', '(0.1)'], {}), '(0.1, 2, 0.1)\n', (4592, 4605), True, 'import numpy as np\n'), ((4621, 4648), 'numpy.zeros_like', 'np.zeros_like', (['t_scale_list'], {}), '(t_scale_list)\n', (4634, 4648), True, 'import numpy as np\n'), ((5079, 5153), 'analysis.plots.plot_group_gen', 'plots.plot_group_gen', (['configs', 'configs[0].experiment_name'], {'mode': '"""rnn_type"""'}), "(configs, configs[0].experiment_name, mode='rnn_type')\n", (5099, 5153), False, 'from analysis import plots\n'), ((5252, 5274), 'numpy.arange', 'np.arange', (['(0.1)', '(2)', '(0.1)'], {}), '(0.1, 2, 0.1)\n', (5261, 5274), True, 'import numpy as np\n'), ((5290, 5317), 'numpy.zeros_like', 'np.zeros_like', (['t_scale_list'], {}), '(t_scale_list)\n', (5303, 5317), True, 'import numpy as np\n'), ((5747, 5820), 'analysis.plots.plot_group_gen', 'plots.plot_group_gen', (['configs', 'configs[0].experiment_name'], {'mode': '"""context"""'}), "(configs, configs[0].experiment_name, mode='context')\n", (5767, 5820), False, 'from analysis import plots\n'), ((5927, 5949), 'numpy.arange', 'np.arange', (['(0.1)', '(2)', '(0.1)'], {}), '(0.1, 2, 0.1)\n', (5936, 5949), True, 'import numpy as np\n'), ((5965, 5992), 'numpy.zeros_like', 'np.zeros_like', (['t_scale_list'], {}), '(t_scale_list)\n', (5978, 5992), True, 'import numpy as np\n'), ((6422, 6495), 'analysis.plots.plot_group_gen', 'plots.plot_group_gen', (['configs', 'configs[0].experiment_name'], {'mode': '"""context"""'}), "(configs, configs[0].experiment_name, mode='context')\n", (6442, 6495), False, 'from analysis import plots\n'), ((987, 1034), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""experiments"""', 'exp_name'], {}), "(ROOT_DIR, 'experiments', exp_name)\n", (999, 1034), False, 'import os\n'), ((4343, 4395), 'analysis.plots.plot_gen', 'plots.plot_gen', (['t_scale_list', 'acc_list', 'cfg.rnn_type'], {}), '(t_scale_list, acc_list, cfg.rnn_type)\n', (4357, 4395), False, 'from analysis import plots\n'), ((5022, 5074), 'analysis.plots.plot_gen', 'plots.plot_gen', (['t_scale_list', 'acc_list', 'cfg.rnn_type'], {}), '(t_scale_list, acc_list, cfg.rnn_type)\n', (5036, 5074), False, 'from analysis import plots\n'), ((5691, 5742), 'analysis.plots.plot_gen', 'plots.plot_gen', (['t_scale_list', 'acc_list', 'cfg.context'], {}), '(t_scale_list, acc_list, cfg.context)\n', (5705, 5742), False, 'from analysis import plots\n'), ((6366, 6417), 'analysis.plots.plot_gen', 'plots.plot_gen', (['t_scale_list', 'acc_list', 'cfg.context'], {}), '(t_scale_list, acc_list, cfg.context)\n', (6380, 6417), False, 'from analysis import plots\n'), ((4069, 4087), 'copy.deepcopy', 'copy.deepcopy', (['cfg'], {}), '(cfg)\n', (4082, 4087), False, 'import copy\n'), ((4154, 4186), 'evaluate.eval_total_acc', 'evaluate.eval_total_acc', (['new_cfg'], {}), '(new_cfg)\n', (4177, 4186), False, 'import evaluate\n'), ((4204, 4249), 'os.path.join', 'os.path.join', (['cfg.save_path', '"""tscalelist.npy"""'], {}), "(cfg.save_path, 'tscalelist.npy')\n", (4216, 4249), False, 'import os\n'), ((4281, 4323), 'os.path.join', 'os.path.join', (['cfg.save_path', '"""acclist.npy"""'], {}), "(cfg.save_path, 'acclist.npy')\n", (4293, 4323), False, 'import os\n'), ((4748, 4766), 'copy.deepcopy', 'copy.deepcopy', (['cfg'], {}), '(cfg)\n', (4761, 4766), False, 'import copy\n'), ((4833, 4865), 'evaluate.eval_total_acc', 'evaluate.eval_total_acc', (['new_cfg'], {}), '(new_cfg)\n', (4856, 4865), False, 'import evaluate\n'), ((4883, 4928), 'os.path.join', 'os.path.join', (['cfg.save_path', '"""tscalelist.npy"""'], {}), "(cfg.save_path, 'tscalelist.npy')\n", (4895, 4928), False, 'import os\n'), ((4960, 5002), 'os.path.join', 'os.path.join', (['cfg.save_path', '"""acclist.npy"""'], {}), "(cfg.save_path, 'acclist.npy')\n", (4972, 5002), False, 'import os\n'), ((5417, 5435), 'copy.deepcopy', 'copy.deepcopy', (['cfg'], {}), '(cfg)\n', (5430, 5435), False, 'import copy\n'), ((5502, 5534), 'evaluate.eval_total_acc', 'evaluate.eval_total_acc', (['new_cfg'], {}), '(new_cfg)\n', (5525, 5534), False, 'import evaluate\n'), ((5552, 5597), 'os.path.join', 'os.path.join', (['cfg.save_path', '"""tscalelist.npy"""'], {}), "(cfg.save_path, 'tscalelist.npy')\n", (5564, 5597), False, 'import os\n'), ((5629, 5671), 'os.path.join', 'os.path.join', (['cfg.save_path', '"""acclist.npy"""'], {}), "(cfg.save_path, 'acclist.npy')\n", (5641, 5671), False, 'import os\n'), ((6092, 6110), 'copy.deepcopy', 'copy.deepcopy', (['cfg'], {}), '(cfg)\n', (6105, 6110), False, 'import copy\n'), ((6177, 6209), 'evaluate.eval_total_acc', 'evaluate.eval_total_acc', (['new_cfg'], {}), '(new_cfg)\n', (6200, 6209), False, 'import evaluate\n'), ((6227, 6272), 'os.path.join', 'os.path.join', (['cfg.save_path', '"""tscalelist.npy"""'], {}), "(cfg.save_path, 'tscalelist.npy')\n", (6239, 6272), False, 'import os\n'), ((6304, 6346), 'os.path.join', 'os.path.join', (['cfg.save_path', '"""acclist.npy"""'], {}), "(cfg.save_path, 'acclist.npy')\n", (6316, 6346), False, 'import os\n')]
|
from onnx_tf.handlers.backend_handler import BackendHandler
from onnx_tf.handlers.handler import onnx_op
from onnx_tf.handlers.handler import partial_support
from onnx_tf.handlers.handler import ps_description
from .conv_mixin import ConvMixin
@onnx_op("ConvTranspose")
@partial_support(True)
@ps_description("ConvTranspose with dilations != 1, or " +
"transposed convolution for 4D or higher " +
"are not supported in Tensorflow.")
class ConvTranspose(ConvMixin, BackendHandler):
@classmethod
def version_1(cls, node, **kwargs):
return cls.conv(node, kwargs["tensor_dict"], transpose=True)
@classmethod
def version_11(cls, node, **kwargs):
return cls.conv(node, kwargs["tensor_dict"], transpose=True)
|
[
"onnx_tf.handlers.handler.onnx_op",
"onnx_tf.handlers.handler.partial_support",
"onnx_tf.handlers.handler.ps_description"
] |
[((247, 271), 'onnx_tf.handlers.handler.onnx_op', 'onnx_op', (['"""ConvTranspose"""'], {}), "('ConvTranspose')\n", (254, 271), False, 'from onnx_tf.handlers.handler import onnx_op\n'), ((273, 294), 'onnx_tf.handlers.handler.partial_support', 'partial_support', (['(True)'], {}), '(True)\n', (288, 294), False, 'from onnx_tf.handlers.handler import partial_support\n'), ((296, 442), 'onnx_tf.handlers.handler.ps_description', 'ps_description', (["('ConvTranspose with dilations != 1, or ' +\n 'transposed convolution for 4D or higher ' +\n 'are not supported in Tensorflow.')"], {}), "('ConvTranspose with dilations != 1, or ' +\n 'transposed convolution for 4D or higher ' +\n 'are not supported in Tensorflow.')\n", (310, 442), False, 'from onnx_tf.handlers.handler import ps_description\n')]
|
# 12.10
import pandas as pd
import seaborn as sns
from sklearn.datasets import load_iris
iris_dataset = load_iris()
# creamos un dataframe de los datos de flores
# etiquetamos las columnas usando las cadenas de iris_dataset.feature_names
iris_dataframe = pd.DataFrame(iris_dataset['data'], columns = iris_dataset.feature_names)
# agrego al DataFrame el atributo target
iris_dataframe['target'] = iris_dataset['target']
# reemplazo valores por nombres
iris_dataframe['target'].replace([0], 'Setosa', inplace=True)
iris_dataframe['target'].replace([1], 'Versicolor', inplace=True)
iris_dataframe['target'].replace([2], 'Virginica', inplace=True)
sns.pairplot(iris_dataframe, hue="target")
|
[
"pandas.DataFrame",
"sklearn.datasets.load_iris",
"seaborn.pairplot"
] |
[((106, 117), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (115, 117), False, 'from sklearn.datasets import load_iris\n'), ((258, 328), 'pandas.DataFrame', 'pd.DataFrame', (["iris_dataset['data']"], {'columns': 'iris_dataset.feature_names'}), "(iris_dataset['data'], columns=iris_dataset.feature_names)\n", (270, 328), True, 'import pandas as pd\n'), ((648, 690), 'seaborn.pairplot', 'sns.pairplot', (['iris_dataframe'], {'hue': '"""target"""'}), "(iris_dataframe, hue='target')\n", (660, 690), True, 'import seaborn as sns\n')]
|
from __future__ import annotations
from pyramid.config import Configurator
from wsgiref.simple_server import make_server
from zope.interface import Interface, implementer
from pyramid_services_viewmapper import ServiceInjector as SI, ServiceViewMapper
class IExampleService(Interface):
def example(self):
pass
@implementer(IExampleService)
class ExampleService:
def example(self):
return "example"
def function_view(request, example_service: IExampleService):
return {"function": example_service.example()}
class ClassView:
def __init__(self, request, example_service: IExampleService):
self.request = request
self.example_service = example_service
def __call__(self):
return {"class": self.example_service.example()}
class NamedService:
def named(self):
return "named"
def named_view(request, named_service: SI(name="named_service")):
return {"function": named_service.named()}
def implicit_named_view(request, named_service: SI):
return {"function": named_service.named()}
if __name__ == '__main__':
config = Configurator()
config.include("pyramid_services")
config.register_service(ExampleService(), IExampleService)
config.register_service(NamedService(), name="named_service")
config.include("pyramid_services_viewmapper")
config.add_route("function", "/function")
config.add_view(function_view, route_name="function", renderer="json")
config.add_route("class", "/class")
config.add_view(ClassView, route_name="class", renderer="json")
config.add_route("named", "/named")
config.add_view(named_view, route_name="named", renderer="json")
config.add_route("implicit_named", "/implicit_named")
config.add_view(implicit_named_view, route_name="implicit_named", renderer="json")
server = make_server('0.0.0.0', 8080, config.make_wsgi_app())
server.serve_forever()
|
[
"zope.interface.implementer",
"pyramid.config.Configurator",
"pyramid_services_viewmapper.ServiceInjector"
] |
[((329, 357), 'zope.interface.implementer', 'implementer', (['IExampleService'], {}), '(IExampleService)\n', (340, 357), False, 'from zope.interface import Interface, implementer\n'), ((1114, 1128), 'pyramid.config.Configurator', 'Configurator', ([], {}), '()\n', (1126, 1128), False, 'from pyramid.config import Configurator\n'), ((896, 920), 'pyramid_services_viewmapper.ServiceInjector', 'SI', ([], {'name': '"""named_service"""'}), "(name='named_service')\n", (898, 920), True, 'from pyramid_services_viewmapper import ServiceInjector as SI, ServiceViewMapper\n')]
|
'''
Created on 22.09.2014
@author: markusfasel
'''
from PWGJE.EMCALJetTasks.Tracks.analysis.base.Graphics import SinglePanelPlot, GraphicsObject, Style, Frame
from PWGJE.EMCALJetTasks.Tracks.analysis.correction.TriggeredSpectrumScaler import TriggeredSpectrumScaler
from PWGJE.EMCALJetTasks.Tracks.analysis.correction.SpectrumCombiner import SpectrumCombiner
from ROOT import kRed, kBlack, kBlue
class PlotScaledTriggeredToMinBias(SinglePanelPlot):
'''
classdocs
'''
def __init__(self, minbiasspectrum, triggeredSpectrum):
'''
Constructor
'''
SinglePanelPlot.__init__(self)
self.__minbiasSpectrum = GraphicsObject(minbiasspectrum, Style(kRed,25))
triggeredSpectrumMaker = TriggeredSpectrumScaler(minbiasspectrum, triggeredSpectrum)
self.__triggeredSpectrum = GraphicsObject(triggeredSpectrumMaker.GetScaledTriggeredSpectrum(), Style(kBlue, 24))
combinedSpectrumMaker = SpectrumCombiner(minbiasspectrum, self.__triggeredSpectrum.GetData())
self.__combinedSpectrum = GraphicsObject(combinedSpectrumMaker.MakeCombinedSpectrum(50.), Style(kBlack, 20))
self.__labeltext = None
def SetLabel(self, label):
self.__labeltext = label
def Create(self):
self._OpenCanvas("triggerSpectrumScalerPlot", "Compare scaled trigger to minbias")
pad = self._GetFramedPad()
#pad.GetPad().SetLogx()
pad.GetPad().SetLogy()
frame = Frame("framecomp", 0.1, 100, 1e-10, 2)
frame.SetXtitle("p_{t} (GeV/c)")
frame.SetYtitle("1/N_{ev} dN/dp_{t} ((GeV/c)^{-1})")
pad.DrawFrame(frame)
pad.DrawGraphicsObject(self.__combinedSpectrum, True, "Combined")
pad.DrawGraphicsObject(self.__minbiasSpectrum, True, "MinBias")
pad.DrawGraphicsObject(self.__triggeredSpectrum, True, "Triggered")
pad.CreateLegend(0.55, 0.75, 0.89, 0.89)
if self.__labeltext:
pad.CreateLabel(0.15, 0.15, 0.45, 0.2, self.__labeltext)
|
[
"PWGJE.EMCALJetTasks.Tracks.analysis.correction.TriggeredSpectrumScaler.TriggeredSpectrumScaler",
"PWGJE.EMCALJetTasks.Tracks.analysis.base.Graphics.Frame",
"PWGJE.EMCALJetTasks.Tracks.analysis.base.Graphics.Style",
"PWGJE.EMCALJetTasks.Tracks.analysis.base.Graphics.SinglePanelPlot.__init__"
] |
[((596, 626), 'PWGJE.EMCALJetTasks.Tracks.analysis.base.Graphics.SinglePanelPlot.__init__', 'SinglePanelPlot.__init__', (['self'], {}), '(self)\n', (620, 626), False, 'from PWGJE.EMCALJetTasks.Tracks.analysis.base.Graphics import SinglePanelPlot, GraphicsObject, Style, Frame\n'), ((741, 800), 'PWGJE.EMCALJetTasks.Tracks.analysis.correction.TriggeredSpectrumScaler.TriggeredSpectrumScaler', 'TriggeredSpectrumScaler', (['minbiasspectrum', 'triggeredSpectrum'], {}), '(minbiasspectrum, triggeredSpectrum)\n', (764, 800), False, 'from PWGJE.EMCALJetTasks.Tracks.analysis.correction.TriggeredSpectrumScaler import TriggeredSpectrumScaler\n'), ((1482, 1520), 'PWGJE.EMCALJetTasks.Tracks.analysis.base.Graphics.Frame', 'Frame', (['"""framecomp"""', '(0.1)', '(100)', '(1e-10)', '(2)'], {}), "('framecomp', 0.1, 100, 1e-10, 2)\n", (1487, 1520), False, 'from PWGJE.EMCALJetTasks.Tracks.analysis.base.Graphics import SinglePanelPlot, GraphicsObject, Style, Frame\n'), ((692, 707), 'PWGJE.EMCALJetTasks.Tracks.analysis.base.Graphics.Style', 'Style', (['kRed', '(25)'], {}), '(kRed, 25)\n', (697, 707), False, 'from PWGJE.EMCALJetTasks.Tracks.analysis.base.Graphics import SinglePanelPlot, GraphicsObject, Style, Frame\n'), ((904, 920), 'PWGJE.EMCALJetTasks.Tracks.analysis.base.Graphics.Style', 'Style', (['kBlue', '(24)'], {}), '(kBlue, 24)\n', (909, 920), False, 'from PWGJE.EMCALJetTasks.Tracks.analysis.base.Graphics import SinglePanelPlot, GraphicsObject, Style, Frame\n'), ((1122, 1139), 'PWGJE.EMCALJetTasks.Tracks.analysis.base.Graphics.Style', 'Style', (['kBlack', '(20)'], {}), '(kBlack, 20)\n', (1127, 1139), False, 'from PWGJE.EMCALJetTasks.Tracks.analysis.base.Graphics import SinglePanelPlot, GraphicsObject, Style, Frame\n')]
|
"""
Module Functions:
Plot King County House Rate data,
in the form of sale listing cluster map and density heat map.
"""
import os
import webbrowser
import pandas as pd
import folium
from folium import plugins
# Set global settings and macros.
MAX_SHOW = 1000
HOUSE_URL = 'houses.html'
HOUSE_HEAT_URL = "househeatmap.html"
CRIME_HEAT_URL = "crimeheatmap.html"
pd.set_option('display.max_columns', None) # To display all columns
# Read in king county house data.
data = pd.read_csv("../../Data/kc_house_data.csv", parse_dates=['date'])
data['zipcode'] = data['zipcode'].astype(str)
## Create one map showing each listing of in house dataset and show in browser.
# Use folium Map function to plot underlying basic map.
house_map = folium.Map(location=[data['lat'].mean(), data['long'].mean()], zoom_start=10)
# Define clusters to show house clusters to add to the underlying houses_map.
marker_cluster = folium.MarkerCluster().add_to(house_map)
# Iteratively add interactive clusters to the basic map.
# When mouse-over the cluster, show house listing information:
# sqft, price.
for iters, row in data[0:MAX_SHOW].iterrows():
folium.Marker([row["lat"], row["long"]],
popup="{0} sqft: Sold for $ {1}"\
.format(row["sqft_living"], row["price"])).add_to(marker_cluster)
# Save the house cluster map to a html and open it in broswer.
house_map.save(HOUSE_URL)
webbrowser.open('file://' + os.path.realpath(HOUSE_URL), new=2)
## Create one map showing the frequency of house sales and show in browser.
# Use folium Map function to plot underlying basic map
houses_heatmap = folium.Map(location=[data['lat'].mean(), data['long'].mean()], zoom_start=10)
# Add heatmap on top of the basic map.
houses_heatmap.add_children(
plugins.HeatMap([[row["lat"], row["long"], row["price"]]
for iters, row in data.iterrows()],
min_opacity=0.5, max_zoom=18, radius=8)) #[0:MAX_SHOW]
# Save the house sale frequency heat map to a html and open it in broswer.
houses_heatmap.save(HOUSE_HEAT_URL)
webbrowser.open('file://' + os.path.realpath(HOUSE_HEAT_URL), new=2)
|
[
"pandas.read_csv",
"os.path.realpath",
"pandas.set_option",
"folium.MarkerCluster"
] |
[((373, 415), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', 'None'], {}), "('display.max_columns', None)\n", (386, 415), True, 'import pandas as pd\n'), ((484, 549), 'pandas.read_csv', 'pd.read_csv', (['"""../../Data/kc_house_data.csv"""'], {'parse_dates': "['date']"}), "('../../Data/kc_house_data.csv', parse_dates=['date'])\n", (495, 549), True, 'import pandas as pd\n'), ((919, 941), 'folium.MarkerCluster', 'folium.MarkerCluster', ([], {}), '()\n', (939, 941), False, 'import folium\n'), ((1442, 1469), 'os.path.realpath', 'os.path.realpath', (['HOUSE_URL'], {}), '(HOUSE_URL)\n', (1458, 1469), False, 'import os\n'), ((2107, 2139), 'os.path.realpath', 'os.path.realpath', (['HOUSE_HEAT_URL'], {}), '(HOUSE_HEAT_URL)\n', (2123, 2139), False, 'import os\n')]
|
from pathlib import Path
import numpy as np
from PIL import Image
def load_light_distribution(name="lamp_spectrum.csv"):
sd_light_source = np.loadtxt(name, skiprows=1, dtype="float")
sd_light_source = sd_light_source[np.where(sd_light_source[:, 0] >= 400)]
# rindx = np.where(sd_light_source[:, 0] >= 400) and np.where(sd_light_source[:, 0] <= 600)
sd_light_source = sd_light_source[:, 1:2]
# print("sum", np.sum(sd_light_source))
sd_light_source = sd_light_source[::20]
sd_light_source = sd_light_source[:44]
# print(sd_light_source.shape)
return sd_light_source
def load_illuminantA(name="A.csv"):
sd_light_source = np.loadtxt(name, skiprows=1, dtype="float")
sd_light_source = sd_light_source[np.where(sd_light_source[:, 0] >= 400)]
sd_light_source = sd_light_source[:, 1:2]
# print("sum",np.sum(sd_light_source))
# sd_light_source = sd_light_source / np.max(sd_light_source)
sd_light_source = sd_light_source[::2]
sd_light_source = sd_light_source[:44]
# print(sd_light_source)
return sd_light_source
def hsi_to_ci31931_rgb(himg, dist_name):
pass
def hsi_to_rgb(himg, dist_name):
"""
input: ハイパースペクトル画像 HSI(numpy型)
return: RGB画像(Image objedct)
"""
# 計測時にノイズとして負の値になった値を0にする
np.where(himg < 0, 0, himg)
cmf = np.loadtxt("./csvs/CIE1931-2deg-XYZ.csv", delimiter=",")
# HSIが400nm以上のため等色関数も400nm以上のみを利用
cmf = cmf[np.where(cmf[:, 0] >= 400)]
# 光源の分光分布の5nm刻みをHSIと同じ10nm刻みに変更
cmf = cmf[::2]
cmf = cmf[:44, :]
stem = Path(dist_name).stem
if stem in ["A"]:
# 標準光源Aは780nmまでを可視光としているため,HSIと等色関数も780nmm以下に制限
nhimg = himg[:, :, :39]
cmf = cmf[:39, :]
sd_light_source = load_illuminantA(name=dist_name)
elif stem in ["D65"]:
nhimg = himg[:, :, :44]
cmf = cmf[:44, :]
sd_light_source = load_illuminantA(name=dist_name)
else:
nhimg = himg[:, :, :44]
sd_light_source = load_light_distribution(name=dist_name)
flag_const_100 = False
ncmf = cmf[:, 1:]
nmf_multi_ld = ncmf * sd_light_source
x = nmf_multi_ld[:, 0]
y = nmf_multi_ld[:, 1]
z = nmf_multi_ld[:, 2]
if flag_const_100:
k = 100 / np.sum(y)
# print(np.sum(y))
else:
k = 1 / np.sum(y)
# print(np.sum(y))
X = np.sum(x * nhimg, axis=2)
Y = np.sum(y * nhimg, axis=2)
Z = np.sum(z * nhimg, axis=2)
XYZ = np.stack([X, Y, Z], 2)
# print(np.max(XYZ), np.min(XYZ))
# print(np.max(Y*k), np.min(Y*k))
XYZ = XYZ * k
XYZ.shape
xyz_to_r = np.array([3.2406255, -1.537208, -0.4986286])
r = np.dot(XYZ, xyz_to_r)
xyz_to_g = np.array([-0.9689307, 1.8757561, 0.0415175])
g = np.dot(XYZ, xyz_to_g)
xyz_to_b = np.array([0.0557101, -0.2040211, 1.0569959])
b = np.dot(XYZ, xyz_to_b)
rgb_img2 = np.stack([r, g, b], axis=2)
rgb_img2 = np.where(rgb_img2 < 0, 0, rgb_img2)
if flag_const_100:
# HSI画像配布元と同じガンマ補正(ガンマ=0.6)をする
# print(np.max(rgb_img2))
rgb_img2 = np.power(rgb_img2/255, 0.6)
else:
# XYZからsRGBへのレンダリングするためのガンマ補正
# print(np.max(255*rgb_img2))
rgb_img2 = np.where(rgb_img2 <= 0.0031308, 12.92 * rgb_img2, 1.055 * np.power(rgb_img2, 1/2.4) - 0.055)
rgb_img2 = np.clip(rgb_img2, 0, 1)
if flag_const_100:
img = Image.fromarray(np.uint8(255*rgb_img2))
else:
img = Image.fromarray(np.uint8(255*rgb_img2))
return img
|
[
"numpy.stack",
"numpy.uint8",
"numpy.sum",
"numpy.power",
"numpy.clip",
"pathlib.Path",
"numpy.where",
"numpy.array",
"numpy.loadtxt",
"numpy.dot"
] |
[((146, 189), 'numpy.loadtxt', 'np.loadtxt', (['name'], {'skiprows': '(1)', 'dtype': '"""float"""'}), "(name, skiprows=1, dtype='float')\n", (156, 189), True, 'import numpy as np\n'), ((663, 706), 'numpy.loadtxt', 'np.loadtxt', (['name'], {'skiprows': '(1)', 'dtype': '"""float"""'}), "(name, skiprows=1, dtype='float')\n", (673, 706), True, 'import numpy as np\n'), ((1287, 1314), 'numpy.where', 'np.where', (['(himg < 0)', '(0)', 'himg'], {}), '(himg < 0, 0, himg)\n', (1295, 1314), True, 'import numpy as np\n'), ((1326, 1382), 'numpy.loadtxt', 'np.loadtxt', (['"""./csvs/CIE1931-2deg-XYZ.csv"""'], {'delimiter': '""","""'}), "('./csvs/CIE1931-2deg-XYZ.csv', delimiter=',')\n", (1336, 1382), True, 'import numpy as np\n'), ((2341, 2366), 'numpy.sum', 'np.sum', (['(x * nhimg)'], {'axis': '(2)'}), '(x * nhimg, axis=2)\n', (2347, 2366), True, 'import numpy as np\n'), ((2375, 2400), 'numpy.sum', 'np.sum', (['(y * nhimg)'], {'axis': '(2)'}), '(y * nhimg, axis=2)\n', (2381, 2400), True, 'import numpy as np\n'), ((2409, 2434), 'numpy.sum', 'np.sum', (['(z * nhimg)'], {'axis': '(2)'}), '(z * nhimg, axis=2)\n', (2415, 2434), True, 'import numpy as np\n'), ((2445, 2467), 'numpy.stack', 'np.stack', (['[X, Y, Z]', '(2)'], {}), '([X, Y, Z], 2)\n', (2453, 2467), True, 'import numpy as np\n'), ((2591, 2635), 'numpy.array', 'np.array', (['[3.2406255, -1.537208, -0.4986286]'], {}), '([3.2406255, -1.537208, -0.4986286])\n', (2599, 2635), True, 'import numpy as np\n'), ((2644, 2665), 'numpy.dot', 'np.dot', (['XYZ', 'xyz_to_r'], {}), '(XYZ, xyz_to_r)\n', (2650, 2665), True, 'import numpy as np\n'), ((2681, 2725), 'numpy.array', 'np.array', (['[-0.9689307, 1.8757561, 0.0415175]'], {}), '([-0.9689307, 1.8757561, 0.0415175])\n', (2689, 2725), True, 'import numpy as np\n'), ((2734, 2755), 'numpy.dot', 'np.dot', (['XYZ', 'xyz_to_g'], {}), '(XYZ, xyz_to_g)\n', (2740, 2755), True, 'import numpy as np\n'), ((2771, 2815), 'numpy.array', 'np.array', (['[0.0557101, -0.2040211, 1.0569959]'], {}), '([0.0557101, -0.2040211, 1.0569959])\n', (2779, 2815), True, 'import numpy as np\n'), ((2824, 2845), 'numpy.dot', 'np.dot', (['XYZ', 'xyz_to_b'], {}), '(XYZ, xyz_to_b)\n', (2830, 2845), True, 'import numpy as np\n'), ((2861, 2888), 'numpy.stack', 'np.stack', (['[r, g, b]'], {'axis': '(2)'}), '([r, g, b], axis=2)\n', (2869, 2888), True, 'import numpy as np\n'), ((2905, 2940), 'numpy.where', 'np.where', (['(rgb_img2 < 0)', '(0)', 'rgb_img2'], {}), '(rgb_img2 < 0, 0, rgb_img2)\n', (2913, 2940), True, 'import numpy as np\n'), ((3297, 3320), 'numpy.clip', 'np.clip', (['rgb_img2', '(0)', '(1)'], {}), '(rgb_img2, 0, 1)\n', (3304, 3320), True, 'import numpy as np\n'), ((228, 266), 'numpy.where', 'np.where', (['(sd_light_source[:, 0] >= 400)'], {}), '(sd_light_source[:, 0] >= 400)\n', (236, 266), True, 'import numpy as np\n'), ((745, 783), 'numpy.where', 'np.where', (['(sd_light_source[:, 0] >= 400)'], {}), '(sd_light_source[:, 0] >= 400)\n', (753, 783), True, 'import numpy as np\n'), ((1435, 1461), 'numpy.where', 'np.where', (['(cmf[:, 0] >= 400)'], {}), '(cmf[:, 0] >= 400)\n', (1443, 1461), True, 'import numpy as np\n'), ((1552, 1567), 'pathlib.Path', 'Path', (['dist_name'], {}), '(dist_name)\n', (1556, 1567), False, 'from pathlib import Path\n'), ((3056, 3085), 'numpy.power', 'np.power', (['(rgb_img2 / 255)', '(0.6)'], {}), '(rgb_img2 / 255, 0.6)\n', (3064, 3085), True, 'import numpy as np\n'), ((2233, 2242), 'numpy.sum', 'np.sum', (['y'], {}), '(y)\n', (2239, 2242), True, 'import numpy as np\n'), ((2296, 2305), 'numpy.sum', 'np.sum', (['y'], {}), '(y)\n', (2302, 2305), True, 'import numpy as np\n'), ((3374, 3398), 'numpy.uint8', 'np.uint8', (['(255 * rgb_img2)'], {}), '(255 * rgb_img2)\n', (3382, 3398), True, 'import numpy as np\n'), ((3438, 3462), 'numpy.uint8', 'np.uint8', (['(255 * rgb_img2)'], {}), '(255 * rgb_img2)\n', (3446, 3462), True, 'import numpy as np\n'), ((3247, 3274), 'numpy.power', 'np.power', (['rgb_img2', '(1 / 2.4)'], {}), '(rgb_img2, 1 / 2.4)\n', (3255, 3274), True, 'import numpy as np\n')]
|
import pyaudio
import numpy as np
import sys
import time
import asyncio
from aiohttp import web, WSMsgType
import json
import os
import struct
import websocket
HOST = os.getenv('HOST', '0.0.0.0')
PORT = int(os.getenv('PORT', 8080))
SAMPLE_RATE = 44100
CHUNK_SIZE = 4096
AUDIO_FORMAT = pyaudio.paInt16
FORMAT = np.int16
def calculate_levels(data, chunk,sample_rate):
# Apply FFT - real data so rfft used
fourier=np.fft.rfft(data)
# Remove last element in array to make it the same size as chunk
fourier=np.delete(fourier,len(fourier)-1)
#fourier = fourier[0:256]
# Find amplitude
power = np.log10(np.abs(fourier))**2
# Arrange array into 256 rows for the Unicorn HAT HD
power = np.reshape(power,(256,8))
matrix= np.average(power,axis=1)
return list(matrix.astype(int).astype(float))
def calculate_spect(data, chunk):
data_int = struct.unpack(str(2 * chunk) + 'B', data)
yf = np.fft.rfft(data_int)
spect = np.abs(yf[256:512]) / (128 * chunk)
max_v = np.max(spect)
# hist = np.histogram(spect, 256)
return list(spect.astype(float)), max_v.astype(float)
# return list(hist[0].astype(float)), max_v.astype(float)
def audio_analyse(stream):
signal = np.frombuffer(stream.read(CHUNK_SIZE, exception_on_overflow = False), FORMAT)
# levels = calculate_levels(signal, CHUNK_SIZE, SAMPLE_RATE)
levels, max_v = calculate_spect(signal, CHUNK_SIZE)
return json.dumps({'data':levels,'max':max_v})
async def connection_test(request):
return web.Response(text='Connection test')
async def websocket_handler(request):
print('Websocket connection starting')
ws = web.WebSocketResponse()
await ws.prepare(request)
print('Websocket connection ready')
# rgb = audio_analyse(stream)
async for msg in ws:
levels = audio_analyse(stream)
if msg.type == WSMsgType.TEXT:
if msg.data == 'close':
await ws.close()
else:
await ws.send_str(levels)
print('Websocket connection closed')
return ws
def main():
loop = asyncio.get_event_loop()
app = web.Application(loop=loop)
app.router.add_route('GET', '/', connection_test)
app.router.add_route('GET', '/ws', websocket_handler)
web.run_app(app, host=HOST, port=PORT)
if __name__ == '__main__':
p = pyaudio.PyAudio()
stream = p.open(format=AUDIO_FORMAT, channels=1, rate=SAMPLE_RATE, input=True, frames_per_buffer=CHUNK_SIZE)
main()
|
[
"numpy.fft.rfft",
"numpy.average",
"aiohttp.web.Response",
"aiohttp.web.WebSocketResponse",
"asyncio.get_event_loop",
"numpy.abs",
"aiohttp.web.Application",
"json.dumps",
"numpy.max",
"numpy.reshape",
"aiohttp.web.run_app",
"pyaudio.PyAudio",
"os.getenv"
] |
[((169, 197), 'os.getenv', 'os.getenv', (['"""HOST"""', '"""0.0.0.0"""'], {}), "('HOST', '0.0.0.0')\n", (178, 197), False, 'import os\n'), ((209, 232), 'os.getenv', 'os.getenv', (['"""PORT"""', '(8080)'], {}), "('PORT', 8080)\n", (218, 232), False, 'import os\n'), ((424, 441), 'numpy.fft.rfft', 'np.fft.rfft', (['data'], {}), '(data)\n', (435, 441), True, 'import numpy as np\n'), ((718, 745), 'numpy.reshape', 'np.reshape', (['power', '(256, 8)'], {}), '(power, (256, 8))\n', (728, 745), True, 'import numpy as np\n'), ((756, 781), 'numpy.average', 'np.average', (['power'], {'axis': '(1)'}), '(power, axis=1)\n', (766, 781), True, 'import numpy as np\n'), ((932, 953), 'numpy.fft.rfft', 'np.fft.rfft', (['data_int'], {}), '(data_int)\n', (943, 953), True, 'import numpy as np\n'), ((1014, 1027), 'numpy.max', 'np.max', (['spect'], {}), '(spect)\n', (1020, 1027), True, 'import numpy as np\n'), ((1438, 1480), 'json.dumps', 'json.dumps', (["{'data': levels, 'max': max_v}"], {}), "({'data': levels, 'max': max_v})\n", (1448, 1480), False, 'import json\n'), ((1526, 1562), 'aiohttp.web.Response', 'web.Response', ([], {'text': '"""Connection test"""'}), "(text='Connection test')\n", (1538, 1562), False, 'from aiohttp import web, WSMsgType\n'), ((1654, 1677), 'aiohttp.web.WebSocketResponse', 'web.WebSocketResponse', ([], {}), '()\n', (1675, 1677), False, 'from aiohttp import web, WSMsgType\n'), ((2097, 2121), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (2119, 2121), False, 'import asyncio\n'), ((2132, 2158), 'aiohttp.web.Application', 'web.Application', ([], {'loop': 'loop'}), '(loop=loop)\n', (2147, 2158), False, 'from aiohttp import web, WSMsgType\n'), ((2275, 2313), 'aiohttp.web.run_app', 'web.run_app', (['app'], {'host': 'HOST', 'port': 'PORT'}), '(app, host=HOST, port=PORT)\n', (2286, 2313), False, 'from aiohttp import web, WSMsgType\n'), ((2351, 2368), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (2366, 2368), False, 'import pyaudio\n'), ((966, 985), 'numpy.abs', 'np.abs', (['yf[256:512]'], {}), '(yf[256:512])\n', (972, 985), True, 'import numpy as np\n'), ((629, 644), 'numpy.abs', 'np.abs', (['fourier'], {}), '(fourier)\n', (635, 644), True, 'import numpy as np\n')]
|
from django.contrib import admin
# Register your models here.
from .models import Movie
class MovieAdmin(admin.ModelAdmin):
list_display = ['id', 'movie_id', 'title', 'slug']
prepopulated_fields = {
"slug": ("title",)
}
admin.site.register(Movie, MovieAdmin)
|
[
"django.contrib.admin.site.register"
] |
[((244, 282), 'django.contrib.admin.site.register', 'admin.site.register', (['Movie', 'MovieAdmin'], {}), '(Movie, MovieAdmin)\n', (263, 282), False, 'from django.contrib import admin\n')]
|
import os
import boto3
TABLE_NAME = os.environ.get("TABLE_NAME", "filter-demo-data")
TABLE = boto3.resource("dynamodb").Table(TABLE_NAME)
CLIENT = boto3.client("dynamodb")
def lambda_handler(event, context):
# We can work on the assumption that we only get items
# in NewImage with a type of "VIEW", that means we can
# rely on userId, videoId, and duration being present.
# We can also assume we get a single record.
item = event["Records"][0]["dynamodb"]["NewImage"]
event_name = event["Records"][0]["eventName"] # INSERT or REMOVE
user_id = item["userId"]["S"]
video_id = item["videoId"]["S"]
duration = item["duration"]["N"]
print(f"Type: {event_name} User: {user_id} Video: {video_id} Duration: {duration}")
# We use a transaction so either both writes succeed, or both fail.
CLIENT.transact_write_items(
TransactItems=[
{
"Update": {
"TableName": TABLE_NAME,
"Key": {
"PK": {"S": f"USER#{user_id}"},
"SK": {"S": "SUMMARY"}
},
"UpdateExpression": "ADD #views :view_increment, "\
"#duration :duration_increment "\
"SET #type = :type, "\
"#userId = :userId",
"ExpressionAttributeNames": {
"#views": "views",
"#duration": "duration",
"#type": "type",
"#userId": "userId",
},
"ExpressionAttributeValues": {
":view_increment": {"N": str(1)},
":duration_increment": {"N": str(duration)},
":type": {"S": "USER_SUMMARY"},
":userId": {"S": str(user_id)},
},
},
},
{
"Update": {
"TableName": TABLE_NAME,
"Key": {
"PK": {"S": f"VIDEO#{video_id}"},
"SK": {"S": "SUMMARY"}
},
"UpdateExpression": "ADD #views :view_increment, "\
"#duration :duration_increment "\
"SET #type = :type, "\
"#videoId = :videoId, "\
"#gsi1pk = :gsi1pk, "\
"#gsi1sk = :gsi1sk",
"ExpressionAttributeNames": {
"#views": "views",
"#duration": "duration",
"#type": "type",
"#videoId": "videoId",
"#gsi1pk": "GSI1PK",
"#gsi1sk": "GSI1SK",
},
"ExpressionAttributeValues": {
":view_increment": {"N": str(1)},
":duration_increment": {"N": str(duration)},
":type": {"S": "VIDEO_SUMMARY"},
":videoId": {"S": str(video_id)},
":gsi1pk": {"S": f"VIDEO#{video_id}"},
":gsi1sk": {"S": "SUMMARY"},
},
}
},
]
)
|
[
"os.environ.get",
"boto3.resource",
"boto3.client"
] |
[((38, 86), 'os.environ.get', 'os.environ.get', (['"""TABLE_NAME"""', '"""filter-demo-data"""'], {}), "('TABLE_NAME', 'filter-demo-data')\n", (52, 86), False, 'import os\n'), ((150, 174), 'boto3.client', 'boto3.client', (['"""dynamodb"""'], {}), "('dynamodb')\n", (162, 174), False, 'import boto3\n'), ((96, 122), 'boto3.resource', 'boto3.resource', (['"""dynamodb"""'], {}), "('dynamodb')\n", (110, 122), False, 'import boto3\n')]
|
# Copyright (c) 2013, 2021, Oracle and/or its affiliates.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2.0, as
# published by the Free Software Foundation.
#
# This program is also distributed with certain software (including
# but not limited to OpenSSL) that is licensed under separate terms,
# as designated in a particular file or component or in included license
# documentation. The authors of MySQL hereby grant you an
# additional permission to link the program and your derivative works
# with the separately licensed software that they have included with
# MySQL.
#
# Without limiting anything contained in the foregoing, this file,
# which is part of MySQL Connector/Python, is also subject to the
# Universal FOSS Exception, version 1.0, a copy of which can be found at
# http://oss.oracle.com/licenses/universal-foss-exception.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License, version 2.0, for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import mysql.connector
import tests
class WL6351Tests(tests.MySQLConnectorTests):
"""Test to check for Return error codes."""
def test_host(self):
"""Try to open a database connection with wrong ip should throw
an error.
"""
config = self.get_clean_mysql_config()
config["host"] = "1.3.5.1"
config["connect_timeout"] = 1
for cls in self.all_cnx_classes:
self.assertRaises(
(
mysql.connector.errors.InterfaceError,
mysql.connector.errors.DatabaseError,
),
cls,
**config
)
@tests.foreach_cnx()
def test_db(self):
"""Try to open a database connection and use non existing database."""
with self.cnx.cursor() as cur:
with self.assertRaises(
mysql.connector.errors.ProgrammingError
) as context:
cur.execute("use unknowndb")
self.assertEqual(context.exception.errno, 1049)
@tests.foreach_cnx()
def test_table(self):
"""Execute the SQL query using execute() method."""
with self.cnx.cursor() as cur:
with self.assertRaises(
mysql.connector.errors.ProgrammingError
) as context:
cur.execute("SELECT * FROM unknowntable")
self.assertEqual(context.exception.errno, 1146)
|
[
"tests.foreach_cnx"
] |
[((2072, 2091), 'tests.foreach_cnx', 'tests.foreach_cnx', ([], {}), '()\n', (2089, 2091), False, 'import tests\n'), ((2462, 2481), 'tests.foreach_cnx', 'tests.foreach_cnx', ([], {}), '()\n', (2479, 2481), False, 'import tests\n')]
|
import logging
import yaml
import pprint
from settings.HeadConfiguration import HeadConfiguration
from settings.ConfigurationBaseClass import ConfigurationBaseClass
class ConfigurationHandler(ConfigurationBaseClass):
"""
This class handels the configuration file. After loading the content this class
provides getter methods to the content.
"""
__mServerSectionKey = 'servers'
__mHeadsSectionKey = 'heads'
def __init__(self, configurationFilePath: str):
self.__mFilePath = configurationFilePath
with open(configurationFilePath, 'r') as fileHandle:
configurationContent = yaml.safe_load(fileHandle)
self.__checkConfigurationFileContent(configurationContent)
serverList = configurationContent[self.__mServerSectionKey]
self.__checkServerListContent(serverList)
self.__mServers = serverList
self.__mHeads = configurationContent[self.__mHeadsSectionKey]
listOfHeads = self.__convertHeadsConfig2ListOfHeads()
self.__checkListOfHeads(listOfHeads)
self.__mListOfHeads = listOfHeads
def getListOfHeadConfigurationObjects(self) -> list():
return self.__mListOfHeads
def __convertHeadsConfig2ListOfHeads(self):
listOfHeadConfigurationsObjects = []
for headConfiguration in self.__mHeads:
listOfHeadConfigurationsObjects.append(HeadConfiguration(headConfiguration, self.__mServers))
return listOfHeadConfigurationsObjects
def __checkConfigurationFileContent(self, configurationContent):
keysToCheck = [
self.__mServerSectionKey,
self.__mHeadsSectionKey
]
try:
self._checkConfigurationStructure(configurationContent, keysToCheck)
except Exception as error:
logging.error('Wrong structure in file: ' + self.__mFilePath)
raise error
logging.debug('All section are found in file: ' + self.__mFilePath)
def __checkServerListContent(self, serverList):
for serverKey, server in serverList.items():
self._checkServerContent(server)
def __checkListOfHeads(self, listOfHeads):
occurrence = {}
for head in listOfHeads:
self.__countKey(occurrence, head.getName())
self.__countKey(occurrence, head.getBleMac())
for key, value in occurrence.items():
if value > 1:
raise Exception('Multiple occurrence of heads found: ' + pprint.pformat(occurrence))
def __countKey(self, occurrenceDict, key):
if key in occurrenceDict:
occurrenceDict[key] += 1
else:
occurrenceDict[key] = 1
|
[
"settings.HeadConfiguration.HeadConfiguration",
"pprint.pformat",
"logging.debug",
"logging.error",
"yaml.safe_load"
] |
[((1911, 1978), 'logging.debug', 'logging.debug', (["('All section are found in file: ' + self.__mFilePath)"], {}), "('All section are found in file: ' + self.__mFilePath)\n", (1924, 1978), False, 'import logging\n'), ((633, 659), 'yaml.safe_load', 'yaml.safe_load', (['fileHandle'], {}), '(fileHandle)\n', (647, 659), False, 'import yaml\n'), ((1393, 1446), 'settings.HeadConfiguration.HeadConfiguration', 'HeadConfiguration', (['headConfiguration', 'self.__mServers'], {}), '(headConfiguration, self.__mServers)\n', (1410, 1446), False, 'from settings.HeadConfiguration import HeadConfiguration\n'), ((1816, 1877), 'logging.error', 'logging.error', (["('Wrong structure in file: ' + self.__mFilePath)"], {}), "('Wrong structure in file: ' + self.__mFilePath)\n", (1829, 1877), False, 'import logging\n'), ((2496, 2522), 'pprint.pformat', 'pprint.pformat', (['occurrence'], {}), '(occurrence)\n', (2510, 2522), False, 'import pprint\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from functools import partial
import numpy as np
import torch
import torch.nn as nn
import torchvision
from omegaconf import OmegaConf
import hydra
import phyre
from phyre_simulator import PhyreSimulator # pylint: disable=unused-import
from losses import * # pylint: disable=wildcard-import,unused-wildcard-import
from preproc import * # pylint: disable=wildcard-import,unused-wildcard-import
USE_CUDA = torch.cuda.is_available()
DEVICE = torch.device('cuda:0' if USE_CUDA else 'cpu')
np.random.seed(42)
class ActionNetwork(nn.Module):
def __init__(self, action_size, output_size, hidden_size=256,
num_layers=1):
super().__init__()
self.layers = nn.ModuleList([nn.Linear(action_size, hidden_size)])
for _ in range(1, num_layers):
self.layers.append(nn.Linear(hidden_size, hidden_size))
self.output = nn.Linear(hidden_size, output_size)
def forward(self, tensor):
for layer in self.layers:
tensor = nn.functional.relu(layer(tensor), inplace=True)
return self.output(tensor)
class FilmActionNetwork(nn.Module):
def __init__(self, action_size, output_size, **kwargs):
super().__init__()
self.net = ActionNetwork(action_size, output_size * 2, **kwargs)
def forward(self, actions, image):
beta, gamma = torch.chunk(
self.net(actions).unsqueeze(-1).unsqueeze(-1), chunks=2, dim=1)
return image * beta + gamma
class SimpleNetWithAction(nn.Module):
def __init__(self, action_size, action_network_kwargs=None):
super().__init__()
action_network_kwargs = action_network_kwargs or {}
self.stem = nn.Sequential(
nn.Conv2d(phyre.NUM_COLORS, 3, kernel_size=1, bias=False),
nn.BatchNorm2d(3),
nn.ReLU(inplace=True),
nn.Conv2d(3, 64, kernel_size=7, stride=4, padding=3, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=5, stride=2, padding=2, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=5, stride=2, padding=2, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=5, stride=2, padding=2, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 128, kernel_size=5, stride=2, padding=2, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 128, kernel_size=5, stride=2, padding=2,
bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 128, kernel_size=5, stride=2, padding=2,
bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
)
self.action_net = ActionNetwork(action_size, 128,
**action_network_kwargs)
@property
def device(self):
if hasattr(self, 'parameters') and next(self.parameters()).is_cuda:
return 'cuda'
else:
return 'cpu'
def preprocess(self, observations):
device = self.device
image = _image_colors_to_onehot(
observations.to(dtype=torch.long, device=device))
return dict(features=self.stem(image).squeeze(-1).squeeze(-1))
def forward(self, observations, actions, preprocessed=None):
if preprocessed is None:
preprocessed = self.preprocess(observations)
return self._forward(actions, **preprocessed)
def _forward(self, actions, features):
actions = self.action_net(actions.to(features.device))
return (actions * features).sum(-1) / (actions.shape[-1]**0.5)
def ce_loss(self, decisions, targets):
targets = torch.ByteTensor(targets).float().to(decisions.device)
return nn.functional.binary_cross_entropy_with_logits(
decisions, targets)
def _get_fusution_points(fusion_place_spec, max_points):
if fusion_place_spec == 'all':
return tuple(range(max_points))
elif fusion_place_spec == 'none':
return tuple()
else:
return tuple(int(fusion_place_spec), )
class ResNet18FilmAction(nn.Module):
def __init__(self,
action_size,
action_layers=1,
action_hidden_size=256,
fusion_place='last'):
super().__init__()
net = torchvision.models.resnet18(pretrained=False)
conv1 = nn.Conv2d(phyre.NUM_COLORS,
64,
kernel_size=7,
stride=2,
padding=3,
bias=False)
self.register_buffer('embed_weights', torch.eye(phyre.NUM_COLORS))
self.stem = nn.Sequential(conv1, net.bn1, net.relu, net.maxpool)
self.stages = nn.ModuleList(
[net.layer1, net.layer2, net.layer3, net.layer4])
def build_film(output_size):
return FilmActionNetwork(action_size,
output_size,
hidden_size=action_hidden_size,
num_layers=action_layers)
assert fusion_place in ('first', 'last', 'all', 'none', 'last_single')
self.last_network = None
if fusion_place == 'all':
self.action_networks = nn.ModuleList(
[build_film(size) for size in (64, 64, 128, 256)])
elif fusion_place == 'last':
# Save module as attribute.
self._action_network = build_film(256)
self.action_networks = [None, None, None, self._action_network]
elif fusion_place == 'first':
# Save module as attribute.
self._action_network = build_film(64)
self.action_networks = [self._action_network, None, None, None]
elif fusion_place == 'last_single':
# Save module as attribute.
self.last_network = build_film(512)
self.action_networks = [None, None, None, None]
elif fusion_place == 'none':
self.action_networks = [None, None, None, None]
else:
raise Exception('Unknown fusion place: %s' % fusion_place)
self.reason = nn.Linear(512, 1)
@property
def device(self):
if hasattr(self, 'parameters') and next(self.parameters()).is_cuda:
return 'cuda'
else:
return 'cpu'
def preprocess(self, observations):
image = self._image_colors_to_onehot(observations)
features = self.stem(image)
for stage, act_layer in zip(self.stages, self.action_networks):
if act_layer is not None:
break
features = stage(features)
else:
features = nn.functional.adaptive_max_pool2d(features, 1)
return dict(features=features)
def forward(self, observations, actions, preprocessed=None):
if preprocessed is None:
preprocessed = self.preprocess(observations)
return self._forward(actions, **preprocessed)
def _forward(self, actions, features):
actions = actions.to(features.device)
skip_compute = True
for stage, film_layer in zip(self.stages, self.action_networks):
if film_layer is not None:
skip_compute = False
features = film_layer(actions, features)
if skip_compute:
continue
features = stage(features)
if not skip_compute:
features = nn.functional.adaptive_max_pool2d(features, 1)
if self.last_network is not None:
features = self.last_network(actions, features)
features = features.flatten(1)
if features.shape[0] == 1 and actions.shape[0] != 1:
# Haven't had a chance to use actions. So will match batch size as
# in actions manually.
features = features.expand(actions.shape[0], -1)
return self.reason(features).squeeze(-1)
def ce_loss(self, decisions, targets):
targets = targets.to(dtype=torch.float, device=decisions.device)
return nn.functional.binary_cross_entropy_with_logits(
decisions, targets)
def _image_colors_to_onehot(self, indices):
onehot = torch.nn.functional.embedding(
indices.to(dtype=torch.long, device=self.embed_weights.device),
self.embed_weights)
onehot = onehot.permute(0, 3, 1, 2).contiguous()
return onehot
def _image_colors_to_onehot(indices):
onehot = torch.nn.functional.embedding(
indices, torch.eye(phyre.NUM_COLORS, device=indices.device))
onehot = onehot.permute(0, 3, 1, 2).contiguous()
return onehot
def gen_dyn_conv(dim_in, dim_out):
# Switched to 1x1 kernels since I might be running it on 1x1 features too.
# Using vector features when using object representation
conv = nn.Conv2d(dim_in,
dim_out,
kernel_size=1,
stride=1,
padding=0,
bias=False)
return conv
class DynConcat(nn.Module):
"""Simple dynamics model, that concats the features and 2 layer MLP."""
def __init__(self, encoder, dim, n, nobj):
super().__init__()
del encoder # This one doesn't need it
self.dyn = nn.Sequential(gen_dyn_conv(dim * n * nobj, dim * nobj),
nn.ReLU(inplace=True),
gen_dyn_conv(dim * nobj, dim * nobj),
nn.ReLU(inplace=True),
gen_dyn_conv(dim * nobj, dim * nobj))
def forward(self, features, pixels):
"""
This dyn model does not use pixels, so will just return the last history
frame
Args:
features: (B, T, Nobj, D, H', W')
pixels: (B, T, Nobj, C, H, W)
Returns:
pred: (B, Nobj, D, H', W')
pixels: (B, Nobj, C, H, W)
addl_losses: {}
"""
cat_feats = torch.reshape(features, (features.shape[0], -1) +
features.shape[-2:])
future_feat = torch.reshape(self.dyn(cat_feats),
features.shape[:1] + features.shape[2:])
# Skip connection, add the last frames features, so it stops
# deleting things
pred = features[:, -1, ...] + future_feat
return pred, pixels[:, -1, ...], {}
class MultiSTN(nn.Module):
"""Multi spatial transformer network: predicts multiple transformations
and applies to parts of the input feature, split on the channel dim."""
def __init__(self,
input_dim,
num_tx,
dof='affine',
inp_type='pix',
affine_tx_mode='bilinear',
kernel_size=3,
stochastic=False):
"""
Args:
input_dim (int): Dimension of the features used to predict the STN
parameters
num_tx (int): Number of transformations to predict, will apply to
the tensor, split along some dimension
dof (str): Controls how generic of a affine matrix to predict.
If 'affine', will predict a generic 3x2 matrix
If 'rot-trans-only', it will only predict theta, x, y,
and use those to construct the affine matrix. So it will force
the matrix to not do any shear, scale etc.
Similarly for 'rot-only' and 'trans-only'
inp_type (str): Defines the type of the input. 'pix' is the default,
to directly transform the grid and move the pixels. 'pt' is the
PointNet style format, where the first 2 dimensions of each
split of the channels must correspond to the X, Y location, and
the transforms will just modify those dimensions, and not
touch the pixel values at all.
affine_tx_mode (str): The mode to use for grid_sample
kernel_size (int)
stochastic (bool): If true, predict a distribution over the affine
matrix, instead of deterministically.
"""
super().__init__()
self.num_tx = num_tx
self.dof = dof
self.inp_type = inp_type
self.affine_tx_mode = affine_tx_mode
# Spatial transformer localization-network
self.localization = nn.Sequential(
nn.Conv2d(input_dim,
8 * num_tx,
kernel_size=kernel_size,
padding=kernel_size // 2), nn.ReLU(True),
nn.Conv2d(8 * num_tx,
10 * num_tx,
kernel_size=kernel_size,
padding=kernel_size // 2), nn.ReLU(True))
# Regressor for the affine matrices
# Predicting 3x2 parameters that should be enough for any generic
# affine transformation, though will subselect in case only few
# parameters are needed
self.stochastic = stochastic
if self.stochastic:
self.fc_loc_mean = nn.Linear(10 * num_tx, 10 * num_tx)
self.fc_loc_logvar = nn.Linear(10 * num_tx, 10 * num_tx)
self.fc_loc = nn.Sequential(nn.Linear(10 * num_tx, 32 * num_tx),
nn.ReLU(True),
nn.Linear(32 * num_tx, num_tx * 3 * 2))
# Initialize the weights/bias with identity transformation
self.fc_loc[2].weight.data.zero_()
if self.dof != 'affine': # The paramters would be used for rot/trans
self.fc_loc[2].bias.data.zero_() # 0 rot/translation by default
else:
self.fc_loc[2].bias.data.copy_(
torch.from_numpy(
np.array([1, 0, 0, 0, 1, 0] * num_tx, dtype=np.float)))
def transform_pix(self, feat, theta, mode='bilinear'):
"""Transform the features using theta."""
grid = nn.functional.affine_grid(theta,
feat.size(),
align_corners=True)
return nn.functional.grid_sample(feat,
grid,
mode=mode,
align_corners=True)
def transform_pt(self, feat, theta):
"""Transform pt-net style feature using theta.
Here, it assumes the first 2 dimensions of the feature are loc.
Args:
feat (B, C, H, W), C >= 2
Returns:
tx feat (B, C, H, W)
"""
assert feat.shape[1] >= 2
feat_pos = feat[:, :2, ...]
feat_pos_ones = torch.ones_like(feat[:, :1, ...])
feat_pos_aug = torch.cat([feat_pos, feat_pos_ones], dim=1)
feat_pos_aug = feat_pos_aug.view(feat.shape[:1] + (3, -1))
feat_pos_aug_end = feat_pos_aug.transpose(1, 2).unsqueeze(-1)
txed = torch.matmul(theta.unsqueeze(1), feat_pos_aug_end)
tx_feat_pos = txed.squeeze(-1).transpose(1, 2).view(feat_pos.shape)
# Attach the features to it
tx_feat = torch.cat([tx_feat_pos, feat[:, 2:, ...]], dim=1)
return tx_feat
def _compute_loc_stochastic(self, feat_hist):
# from https://github.com/pytorch/examples/blob/master/vae/main.py#L53
mean = self.fc_loc_mean(feat_hist)
logvar = self.fc_loc_logvar(feat_hist)
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
latent_var_z = mean + eps * std
kl_loss = -0.5 * torch.sum(1 + logvar - mean.pow(2) - logvar.exp())
return self.fc_loc(latent_var_z), kl_loss
def forward(self, feat_for_tx, feat_to_tx, split_dim=1):
"""
Args:
feat_for_tx (B, D, H, W): The features to use to compute the
transformation
feat_to_tx (B, D', H, W): Features to apply the tx onto
split_dim (int): Dimension to split on
"""
feat_hist_embed = self.localization(feat_for_tx)
# Average out the spatial dimension
feat_hist_embed = torch.mean(feat_hist_embed, dim=[-2, -1])
addl_losses = {}
if self.stochastic:
pred, kl_loss = self._compute_loc_stochastic(feat_hist_embed)
addl_losses['kl'] = kl_loss
else:
pred = self.fc_loc(feat_hist_embed)
if self.dof != 'affine':
pred = pred.view(-1, self.num_tx, 3 * 2)
# Say the first number is actual angle, and next 2 are x, y
angle = pred[..., :1]
pos_x = pred[..., 1:2]
pos_y = pred[..., 2:3]
if self.dof == 'rot-only':
pos_x = torch.zeros_like(pos_x)
pos_y = torch.zeros_like(pos_y)
elif self.dof == 'trans-only':
angle = torch.zeros_like(angle)
else:
assert self.dof == 'rot-trans-only', 'The only other option'
cos_angle = torch.cos(angle)
sin_angle = torch.sin(angle)
# create the 2x3 matrix out of this
theta = torch.cat(
[cos_angle, sin_angle, pos_x, -sin_angle, cos_angle, pos_y],
dim=-1)
theta = theta.view(theta.shape[:-1] + (2, 3))
elif self.dof == 'affine':
theta = pred.view(-1, self.num_tx, 2, 3)
else:
raise NotImplementedError('Unknown {}'.format(self.dof))
# Split the channels of feat_to_tx into num_tx groups, and apply the
# transformations to each of those groups
assert feat_to_tx.shape[split_dim] % self.num_tx == 0, (
'Must be divisible to ensure equal sized chunks')
# Chunk it
feat_to_tx_parts = torch.chunk(feat_to_tx, self.num_tx, split_dim)
# Apply the corresponding transformation to each part
if self.inp_type == 'pix':
tx_fn = partial(self.transform_pix, mode=self.affine_tx_mode)
elif self.inp_type == 'pt':
tx_fn = self.transform_pt
else:
raise NotImplementedError('Unknown type {}'.format(self.inp_type))
feat_to_tx_parts_txed = [
tx_fn(el, theta[:, i, ...])
for i, el in enumerate(feat_to_tx_parts)
]
return torch.cat(feat_to_tx_parts_txed, dim=split_dim), addl_losses
class DynSTN(nn.Module):
"""Spatial Transformer based dynamics model."""
def __init__(self, encoder, dim, n, nobj, num_tx, base_stn):
super().__init__()
del encoder # This one doesn't need it
assert nobj == 1 or nobj == num_tx, (
'Either split the 1 object features and tx, or tx each obj sep')
self.dyn = hydra.utils.instantiate(base_stn, dim * n * nobj, num_tx)
def forward(self, features, pixels):
"""
This dyn model does not use pixels, so will just return the last history
frame
Args:
features: (B, T, Nobj, D, H', W')
pixels: (B, T, Nobj, C, H, W)
Returns:
pred: (B, Nobj, D, H', W')
pix
addl_losses
"""
cat_feats = torch.reshape(features, (features.shape[0], -1) +
features.shape[-2:])
# For > 1 objs, just flatten Nobj and D channels, and the STN class
# will split it back to do the transformations
feat_obj_flat = torch.flatten(features, 2, 3)
new_feat, addl_loses = self.dyn(cat_feats, feat_obj_flat[:, -1, ...])
future_feat = torch.reshape(new_feat,
features.shape[:1] + features.shape[2:])
return future_feat, pixels[:, -1, ...], addl_loses
class DynSTNPixels_DEPRECATED(nn.Module):
"""Spatial Transformer based dynamics model, applied on pixels.
Use DynSTNPixelChannelsDetBg"""
def __init__(self, encoder, dim, n, nobj, num_tx, base_stn):
super().__init__()
self.enc = encoder
self.dyn = hydra.utils.instantiate(base_stn, dim * n * nobj, num_tx)
self.num_tx = num_tx
# A network to predict num_tx attention maps
self.attention = nn.Sequential(
gen_deconv(dim * n * nobj, num_tx),
*([gen_deconv(num_tx, num_tx, upsample_factor=4)] * 2),
nn.Conv2d(num_tx, num_tx, kernel_size=1, padding=0, bias=False),
nn.Softmax(dim=1))
def forward(self, features, pixels):
"""
Args:
features: (B, T, Nobj, D, H', W')
pixels: (B, T, C, H, W)
Returns:
pred: (B, Nobj, D, H', W')
"""
raise NotImplementedError('Deal with objectified pixel input. '
'Also deal with addl losses. ')
cat_feats = torch.reshape(features, (features.shape[0], -1) +
features.shape[-2:])
assert features.shape[2] == 1, 'Not implemented yet for >1 objs'
# Repmat the image channels num_tx times, so STN can predict those many
# transformations
pixels_tiled = pixels.repeat(1, 1, self.num_tx, 1, 1)
future_pixels_tiled = self.dyn(cat_feats, pixels_tiled[:, -1, ...])
# Compute attention maps for compositing
attention_maps = self.attention(cat_feats)
# Do a weighted sum of the channels using the attention maps
attention_maps_split = torch.chunk(attention_maps, self.num_tx, 1)
future_pixels_split = torch.chunk(future_pixels_tiled, self.num_tx, 1)
weighted = [
att * pix
for att, pix in zip(attention_maps_split, future_pixels_split)
]
future_pixels = torch.mean(torch.stack(weighted), dim=0)
# Since this is a new image being generated, need to pass through the
# encoder to get the features for this image
future_feat = self.enc(future_pixels.unsqueeze(1))[:, 0, ...]
return future_feat, future_pixels
class DynSTNPixelChannels_DEPRECATED(nn.Module):
"""Spatial Transformer based dynamics model, applied on channels of img.
Use DynSTNPixelChannelsDetBg"""
def __init__(self, encoder, dim, n, nobj, base_stn):
super().__init__()
self.enc = encoder
self.num_tx = phyre.NUM_COLORS # One tx per color
self.dyn = hydra.utils.instantiate(base_stn, dim * n * nobj,
self.num_tx)
def forward(self, features, pixels):
"""
Args:
features: (B, T, Nobj, D, H', W')
pixels: (B, T, C, H, W)
Returns:
pred: (B, Nobj, D, H', W')
"""
raise NotImplementedError('Deal with objectified pixel input. '
'Also deal with addl losses. ')
assert (pixels.shape[2] == self.num_tx or
pixels.shape[2] == self.num_tx * 3), 'In pix or pt mode so far'
cat_feats = torch.reshape(features, (features.shape[0], -1) +
features.shape[-2:])
assert features.shape[2] == 1, 'Not implemented yet for >1 objs'
future_pixels = self.dyn(cat_feats, pixels[:, -1, ...])
# Since this is a new image being generated, need to pass through the
# encoder to get the features for this image
future_feat = self.enc(future_pixels.unsqueeze(1))[:, 0, ...]
return future_feat, future_pixels
class DynSTNPixelChannelsGenBg_DEPRECATED(nn.Module):
"""Spatial Transformer based dynamics model, applied on channels of img.
Generates the background.
Use DynSTNPixelChannelsDetBg
"""
def __init__(self, encoder, dim, n, nobj, base_stn):
super().__init__()
self.enc = encoder
# One tx per color, except background that is generated since it's not
# an object that can be moved like others. Just a 1x1 convolution on
# the predicted image to gen the last channel
self.num_tx = phyre.NUM_COLORS - 1
self.dyn = hydra.utils.instantiate(base_stn, dim * n * nobj,
self.num_tx)
# Just a couple layer should suffice, over the last frame, and new frame
# feature
self.bg_dec = nn.Sequential(
nn.Conv2d(2 * phyre.NUM_COLORS - 1,
8,
kernel_size=1,
stride=1,
padding=0,
bias=False), nn.ReLU(inplace=True),
nn.Conv2d(8, 1, kernel_size=1, stride=1, padding=0, bias=False))
def forward(self, features, pixels):
"""
Args:
features: (B, T, Nobj, D, H', W')
pixels: (B, T, C, H, W)
Returns:
pred: (B, Nobj, D, H', W')
"""
raise NotImplementedError('Deal with objectified pixel input. '
'Also deal with addl losses. ')
assert (pixels.shape[2] - 1) == self.num_tx
cat_feats = torch.reshape(features, (features.shape[0], -1) +
features.shape[-2:])
assert features.shape[2] == 1, 'Not implemented yet for >1 objs'
future_pixels_obj = self.dyn(cat_feats, pixels[:, -1, 1:, ...])
future_pixels_bg = self.bg_dec(
torch.cat([pixels[:, -1, ...], future_pixels_obj], dim=1))
future_pixels = torch.cat([future_pixels_bg, future_pixels_obj], dim=1)
# Since this is a new image being generated, need to pass through the
# encoder to get the features for this image
future_feat = self.enc(future_pixels.unsqueeze(1))[:, 0, ...]
return future_feat, future_pixels
class DynSTNPixelChannelsDetBg(nn.Module):
"""Spatial Transformer based dynamics model, applied on channels of img.
Generates the background deterministically, using the change.
"""
def __init__(self,
encoder,
dim,
n,
nobj,
base_stn,
movable_ch,
movable_only=False):
super().__init__()
self.enc = encoder
self.movable_only = movable_only
# One tx per color (or movable colors, if that is set),
# except background that is generated since it's not
# an object that can be moved like others.
if self.movable_only:
self.movable_channels = torch.LongTensor(movable_ch)
else:
self.movable_channels = torch.arange(1, phyre.NUM_COLORS)
self.num_tx = len(self.movable_channels)
self.nobj = nobj
self.dyn = hydra.utils.instantiate(base_stn, dim * n * nobj,
self.num_tx * nobj)
def forward(self, features, pixels):
"""
Args:
features: (B, T, Nobj, D, H', W')
pixels: (B, T, Nobj, C, H, W)
Returns:
pred: (B, Nobj, D, H', W')
pix
addl_losses
"""
assert pixels.shape[3] >= self.num_tx
cat_feats = torch.reshape(features, (features.shape[0], -1) +
features.shape[-2:])
pixels_movable = pixels[:, -1, :, self.movable_channels, ...]
# combine all channels of objects and transform
pixels_movable_flat = torch.flatten(pixels_movable, 1, 2)
future_pixels_flat_movable, addl_losses = self.dyn(
cat_feats, pixels_movable_flat)
future_pixels_movable = future_pixels_flat_movable.view(
pixels_movable.shape)
future_pixels = pixels[:, -1, ...] # Copy most of the channels
future_pixels[:, :, self.movable_channels, ...] = future_pixels_movable
# Compute the background deterministically, where all other channels
# are 0s, it has to be 1. So make channels sum to 1.
future_pixels_bg = 1.0 - torch.sum(
future_pixels[:, :, 1:, ...], dim=2, keepdims=True)
future_pixels[:, :, :1, ...] = future_pixels_bg
# Since this is a new image being generated, need to pass through the
# encoder to get the features for this image
future_feat = self.enc(future_pixels.unsqueeze(1))[:, 0, ...]
return future_feat, future_pixels, addl_losses
def gen_deconv(in_dim,
out_dim,
stride=1,
kernel_size=3,
padding=1,
upsample_factor=2,
inst_norm=False,
activation=nn.ReLU(inplace=True)):
return nn.Sequential(
nn.ConvTranspose2d(in_dim,
out_dim,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=False),
# nn.Sequential() simulates identity, if no instance norm to be added
nn.InstanceNorm2d(out_dim, affine=True)
if inst_norm else nn.Sequential(),
activation,
nn.Upsample(scale_factor=upsample_factor,
mode='bilinear',
align_corners=True))
class BasicDecoder(nn.Module):
"""Simple decoder, goes from features to frame representation."""
def __init__(self, in_dim, out_dim, nlayers, kernel_size, padding,
upsample_factor, decode_from, backprop_feat_ext, inst_norm,
activation):
super().__init__()
decoder_dim = 256
self.backprop_feat_ext = backprop_feat_ext
self.decode_from = decode_from
assert self.decode_from in ['pixels', 'features']
if self.decode_from == 'pixels':
in_dim = phyre.NUM_COLORS
decoder_dim = 16
activation = hydra.utils.instantiate(activation)
logging.warning('Using %s activation for decoders', activation)
inter_layers = [
gen_deconv(decoder_dim,
decoder_dim,
1,
kernel_size,
padding,
upsample_factor,
inst_norm,
activation=activation) for _ in range(nlayers)
]
self.deconv_net = nn.Sequential(
gen_deconv(in_dim,
decoder_dim,
1,
kernel_size,
padding,
upsample_factor,
activation=activation), *inter_layers,
gen_deconv(
decoder_dim,
out_dim,
1,
kernel_size,
padding,
upsample_factor,
activation=nn.Sequential())) # No activation on the last
def forward(self, features, pixels):
"""
Args:
features (BxNobjxDxH'xW'): Features to be decoded
pixels (BxNobjxCxHxW): Pixels generated by the dynamics model
Returns:
imgs (BxNobjxD_outxHxW): Output frames (per obj, aggregation is
done later in the Fwd class)
"""
if self.decode_from == 'pixels':
decode_feature = pixels
else:
decode_feature = features
if not self.backprop_feat_ext:
# Means train the decoder separately from the rest of the network,
# don't backprop gradients to the feature extractor
decode_feature = decode_feature.detach()
# Summing the features over all the objects, and doing one decode.
# Separate decodes takes just way too much time, so need to do it once
decode_feature = torch.sum(decode_feature, dim=1, keepdims=True)
features_flatten_obj = torch.flatten(decode_feature, 0, 1)
images = self.deconv_net(features_flatten_obj)
# Reshape back into object level
out = torch.reshape(images,
decode_feature.shape[:2] + images.shape[1:])
return out
class TrivialDecoder(nn.Module):
"""Trivial decoder, simply outputs the frames from the dynamics model."""
def __init__(self, in_dim, out_dim):
super().__init__()
del in_dim, out_dim
def forward(self, features, pixels):
"""
Args:
features (BxNobjxDxH'xW'): Features to be decoded
pixels (BxNobjxCxHxW): Pixels generated by the dynamics model
Returns:
imgs (BxNobjxCxHxW): Output frames
"""
del features # assumes the dynamics model will do all decoding
return pixels
def average_losses(all_losses):
"""Average the losses into one dict of losses.
Args:
all_losses: List of dictionary of losses.
Returns:
combined: A dictionary with same keys as individual dicts, with
all losses combined.
"""
if len(all_losses) == 0:
return {}
combined = {}
for key, val in all_losses[0].items():
if not isinstance(val, torch.Tensor):
# If it's none or sth.. eg some loss was not active
combined[key] = val
else:
# Average all the values
stkd = torch.stack([el[key] for el in all_losses])
# Average the losses that are positive, since I set undefined
# losses to -1 (where not enough GT is available, etc)
combined[key] = torch.mean(stkd * (stkd >= 0), dim=0)
return combined
class BasicObjEncoder(nn.Module):
"""Takes objectified representation, and puts it through more layers."""
def __init__(self,
in_dim,
out_dim,
nlayers,
kernel_size=3,
stride=1,
padding=1,
spatial_mean=True):
super().__init__()
if nlayers > 0:
self.out_dim = out_dim
else:
logging.warning('Ignoring the out_dim (%d) for ObjEncoder',
out_dim)
self.out_dim = in_dim
layers_lst = [[
nn.Conv2d(in_dim if i == 0 else out_dim,
out_dim,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=False),
nn.ReLU(inplace=True)
] for i in range(nlayers)]
layers_lst_flat = [item for sublist in layers_lst for item in sublist]
if len(layers_lst_flat) > 0:
layers_lst_flat = layers_lst_flat[:-1] # Remove the last relu
self.encoder = nn.Sequential(*layers_lst_flat)
else:
self.encoder = None
self.spatial_mean = spatial_mean
def forward(self, feat):
"""
Args:
feat: (B, T, Nobj, D, H', W')
"""
if self.encoder:
feat_flat = torch.flatten(feat, 0, 2)
obj_embed_flat = self.encoder(feat_flat)
obj_embed = torch.reshape(
obj_embed_flat, feat.shape[:3] + obj_embed_flat.shape[1:])
else:
obj_embed = feat
if self.spatial_mean:
obj_embed = torch.mean(obj_embed, dim=[-1, -2], keepdims=True)
return obj_embed
class ContextGatingObjectifier(nn.Module):
"""Takes intermediate representation and converts into object-level rep."""
def __init__(self, dim, obj_encoder, nobj=1):
super().__init__()
self.obj_mapper = nn.Sequential(
nn.Conv2d(dim, dim, kernel_size=1, stride=1, padding=0,
bias=False), nn.ReLU(inplace=True),
nn.Conv2d(dim,
nobj,
kernel_size=1,
stride=1,
padding=0,
bias=False))
self.obj_encoder = hydra.utils.instantiate(obj_encoder, dim)
self.out_dim = self.obj_encoder.out_dim
def forward(self, vid_feat):
"""
Decompose the video features into object level representation.
Args:
vid_feat: (BxTxDxH'xW')
nobj (int): Max number of objects in the scene. The hope is that the
extra channels will just have some degenerate information
Returns:
BxTxNobjxDxH''xW''
"""
raise NotImplementedError('The inp is now objfied, TODO deal with it')
batch_size = vid_feat.shape[0]
# Use context gating: generate a heatmap for each object at each time
# step, and weight using that heatmap to get an object representation
flatten_feat = torch.flatten(vid_feat, 0, 1)
# Unsqueeze to add a channel dimension to the attention maps
obj_map = self.obj_mapper(flatten_feat).unsqueeze(2)
# Add a 1-D object dimension
flatten_feat = flatten_feat.unsqueeze(1)
# Weight the feats with the attention maps to get the object-features
mapped_feat = flatten_feat * obj_map
# Reshape to add the time dimension back
mapped_feat = torch.reshape(mapped_feat,
(batch_size, -1) + mapped_feat.shape[1:])
final_feat = self.obj_encoder(mapped_feat)
return final_feat
class ChannelSplitObjectifier(nn.Module):
"""Splits the channel of image representation to get obj rep."""
def __init__(self, dim, obj_encoder, nobj=1):
super().__init__()
self.nobj = nobj
self.obj_encoder = hydra.utils.instantiate(obj_encoder, dim // nobj)
self.out_dim = self.obj_encoder.out_dim
def forward(self, vid_feat):
"""
Decompose the video features into object level representation.
Args:
vid_feat: (BxTxNobjxDxH'xW')
Returns:
BxTxNobjx(D/Nobj)xH'xW'
"""
assert vid_feat.shape[2] == 1, (
'Channel split can not deal with pre objectified {} input'.format(
vid_feat.shape[2]))
assert vid_feat.shape[3] % self.nobj == 0, 'Must be divisible'
# Reshape the channel dimension to split into an object dimension
objed = vid_feat.view(vid_feat.shape[:2] + (self.nobj, -1) +
vid_feat.shape[-2:])
assert objed.shape[2] == self.nobj
assert objed.shape[3] == vid_feat.shape[3] / self.nobj
# Apply a little network to get a flat feature
obj_encoded = self.obj_encoder(objed)
return obj_encoded
class TrivialObjectifier(nn.Module):
"""Simply returns the feature.
Earlier version would unsqueeze, but since the component splitting the
input at least has 1 obj, so no need to unsqueeze it further.
"""
def __init__(self, dim, obj_encoder, nobj=1):
super().__init__()
del obj_encoder
self.nobj = nobj
self.out_dim = dim
def forward(self, vid_feat):
assert vid_feat.shape[2] == self.nobj, ('{} != {}'.format(
vid_feat.shape[2], self.nobj))
return vid_feat
class SimpleBaseEncoder(nn.Module):
"""Simple network, simplified from Anton's version."""
def __init__(self, in_dim, width_scale_factor):
"""Simple encoder weights.
For a 256x256 input, it'll give a 4x4 output."""
super().__init__()
self.width_scale_factor = width_scale_factor
_s = self._scale_int
self.stem = nn.Sequential(
nn.Conv2d(in_dim, 3, kernel_size=1, bias=False),
nn.BatchNorm2d(3),
nn.ReLU(inplace=True),
nn.Conv2d(3,
_s(64),
kernel_size=7,
stride=2,
padding=3,
bias=False),
nn.BatchNorm2d(_s(64)),
nn.ReLU(inplace=True),
nn.Conv2d(_s(64),
_s(64),
kernel_size=5,
stride=2,
padding=2,
bias=False),
nn.BatchNorm2d(_s(64)),
nn.ReLU(inplace=True),
nn.Conv2d(_s(64),
_s(64),
kernel_size=5,
stride=2,
padding=2,
bias=False),
nn.BatchNorm2d(_s(64)),
nn.ReLU(inplace=True),
nn.Conv2d(_s(64),
_s(64),
kernel_size=5,
stride=2,
padding=2,
bias=False),
nn.BatchNorm2d(_s(64)),
nn.ReLU(inplace=True),
nn.Conv2d(_s(64),
_s(128),
kernel_size=5,
stride=2,
padding=2,
bias=False),
nn.BatchNorm2d(_s(128)),
nn.ReLU(inplace=True),
)
self.out_dim = _s(128)
def _scale_int(self, n):
"""Scale the number by a factor. To control width of this network."""
return int(self.width_scale_factor * n)
def forward(self, image):
return self.stem(image)
class ResNetBaseEncoder(nn.Module):
"""ResNet based feature extractor."""
def __init__(self, in_dim, base_model, nlayers):
super().__init__()
net = hydra.utils.instantiate(base_model)
conv1 = nn.Conv2d(in_dim,
64,
kernel_size=7,
stride=2,
padding=3,
bias=False)
self.stem = nn.Sequential(conv1, net.bn1, net.relu, net.maxpool)
self.stages = nn.ModuleList(
[getattr(net, 'layer%d' % (i + 1)) for i in range(nlayers)])
last_stage = self.stages[-1][-1]
if hasattr(last_stage, 'bn3'):
self.out_dim = last_stage.bn3.num_features
elif hasattr(last_stage, 'bn2'):
self.out_dim = last_stage.bn2.num_features
else:
raise ValueError('This should not happen')
def forward(self, image):
features = self.stem(image)
for stage in self.stages:
features = stage(features)
return features
class BasicEncoder(nn.Module):
"""Encode pixels to features."""
def __init__(self, in_dim, nobj, feat_ext, objectifier, obj_encoder,
spatial_mean, feat_ext_eval_mode, process_objs_together):
"""
Args:
obj_before_enc: If true, do the objectify in the input (pixel) space
before running the encode (so each object is encoded separately)
spatial_mean: Avg pool the features to 1x1
feat_ext_eval_mode: Set the feature extractor to eval mode for BN,
dropout etc
process_objs_together: If true, it will concatenate all objs on the
channel dimension, extract features, and split the features
in channel dimensions to get features for each obj
"""
super().__init__()
self.nobj = nobj
self.process_objs_together = process_objs_together
# The image embedding model
self.feat_ext = hydra.utils.instantiate(
feat_ext, in_dim * nobj if self.process_objs_together else in_dim)
initial_dim = self.feat_ext.out_dim
# The objects model
self.objectifier = hydra.utils.instantiate(objectifier, initial_dim,
obj_encoder)
self.out_dim = self.objectifier.out_dim
if self.process_objs_together:
assert self.out_dim % nobj == 0
self.out_dim //= nobj
self.spatial_mean = spatial_mean
self.feat_ext_eval_mode = feat_ext_eval_mode
def _forward_vid(self, batch_vid_obs, l2_norm_feats=False):
"""
Convert a video into images to run the forward model.
Args:
batch_vid_obs: BxTxCxHxW or BxTxNobjxCxHxW
Returns:
features: BxTxDxH'xW' or BxTxNobjxDxH'xW'
"""
# Add an object dimension, so the rest of the code doesn't have to
# deal with edge cases
added_obj_dim = False
if len(batch_vid_obs.shape) == 4:
added_obj_dim = True
batch_vid_obs = batch_vid_obs.unsqueeze(2) # BxTxNobjxCxHxW
# Flatten videos into frames to extract out the features
# resulting shape B'xC'xHxW
if self.process_objs_together:
# resulting shape B' = B * T, C' = Nobj * C
flat_obs = batch_vid_obs.reshape((-1, ) + batch_vid_obs.shape[-4:])
flat_obs = torch.flatten(flat_obs, 1, 2)
else:
# resulting shape B' = B * T * Nobj, C' = C
flat_obs = batch_vid_obs.reshape((-1, ) + batch_vid_obs.shape[-3:])
# Extract features
if self.feat_ext_eval_mode:
self.feat_ext.eval()
features = self.feat_ext(flat_obs)
if self.spatial_mean:
# Mean over spatial dimensions
features = torch.mean(features, dim=[-2, -1], keepdims=True)
if l2_norm_feats:
# L2 normalize the features -- MemoryBank, MoCo and PIRL do that
features = nn.functional.normalize(features, p=2, dim=-1)
# Reshape back to original batch dimension
if self.process_objs_together:
features_batched = features.reshape(batch_vid_obs.shape[:2] +
(self.nobj, -1) +
features.shape[-2:])
else:
features_batched = features.reshape(batch_vid_obs.shape[:-3] +
features.shape[1:])
if added_obj_dim:
features_batched = features_batched.squeeze(2)
assert features_batched.shape[-3] == self.out_dim
return features_batched
def forward(self, vid):
"""
Args:
vid (B, T, Nobj, C, H, W): Input video, in preprocessed form; i.e.
one-hot
Returns:
obj_feat (B, T, Nobj', D, H', W'): Features with objects, if needed
"""
vid_feat = self._forward_vid(vid)
vid_feat = self.objectifier(vid_feat)
return vid_feat
def combine_obj_pixels(obj_pix, obj_dim):
"""Combine obj-split pixels into a single image.
Args:
obj_pix: B, ..., Nobj, ..., C, H, W
obj_dim: The dimension to reduce over -- which corresponds to objs
Returns
B, ..., ..., C, H, W
"""
if obj_pix is None:
return None
return torch.max(obj_pix, dim=obj_dim)[0]
class MLPClassifier(nn.Module):
"""Simple classifier on top of the intermediate features."""
def __init__(self, in_dim, nlayers, match_inp_sz_layer=False):
super().__init__()
self.nlayers = nlayers
if nlayers == 0:
return
# First linear layer, to project to the in_dim dimension, if not
self.match_inp_sz_layer = match_inp_sz_layer
if self.match_inp_sz_layer:
raise NotImplementedError('Doesnt work with multi-gpu yet..')
self.register_parameter('init_linear_wt', None)
self.in_dim = in_dim
layers = [[nn.Linear(in_dim, in_dim),
nn.ReLU(inplace=True)] for _ in range(nlayers - 1)]
layers_flat = [item for sublist in layers for item in sublist]
self.cls = nn.Sequential(*(layers_flat[:-1] + [nn.Linear(in_dim, 1)]))
def reset_parameters(self, inp, in_dim, out_dim):
self.init_linear_wt = nn.Parameter(
inp.new(in_dim, out_dim).normal_(0, 1))
def forward(self, preds, pixs, process_all_frames=False):
"""
Run the classifier on the predictions.
Args:
preds: (BxTx1xDxH'xW')
pixs: (BxTx1xDxHxW)
Retuns:
solved: (BxT)
process_all_frames: Set true when used by other classifiers for
intermediate feature extraction, so to get features for each
frame.
"""
del pixs # This does not use it
if self.nlayers == 0:
return preds
# Since this classifier doesn't take into account context and the final
# _cls is going to look at the last frame, so might as well only process
# that last frame
if not process_all_frames:
preds = preds[:, -1:, ...]
mean_feat = torch.mean(preds, axis=[2, -1, -2])
if self.match_inp_sz_layer:
if self.init_linear_wt is None:
logging.warning(
'Creating a linear layer to map the input '
'dims (%d) to MLP input dim (%d)', mean_feat.shape[-1],
self.in_dim)
self.reset_parameters(preds, self.in_dim,
preds.shape[1] * preds.shape[3])
mean_feat = nn.functional.linear(mean_feat, self.init_linear_wt)
mean_feat = nn.ReLU(inplace=True)(mean_feat)
return self.cls(mean_feat).squeeze(-1)
class ConvNetClassifier(nn.Module):
"""ConvNet classifier on top of the intermediate features."""
def __init__(self, feat_in_dim, num_conv_blocks, num_fc_layers):
super().__init__()
del feat_in_dim
nobj = 1
self.enc = BasicEncoder(
phyre.NUM_COLORS,
nobj,
OmegaConf.create({
'class': 'nets.ResNetBaseEncoder',
'params': {
'base_model': {
'class': 'torchvision.models.resnet18',
'params': {
'pretrained': False,
}
},
'nlayers': num_conv_blocks,
}
}),
OmegaConf.create({
'class': 'nets.TrivialObjectifier',
'params': {
'nobj': nobj, # will sum into 1 obj
}
}),
OmegaConf.create({
'class': 'nets.BasicObjEncoder',
'params': {
'out_dim': 16,
'nlayers': 0,
'spatial_mean': True,
}
}),
spatial_mean=False,
feat_ext_eval_mode=False,
process_objs_together=False, # Doesn't matter, 1 obj
)
self.cls = MLPClassifier(self.enc.out_dim, num_fc_layers)
def forward(self, preds, pixs, process_all_frames=False):
"""
Run the classifier on the predictions.
Args:
preds: (BxTx1xDxH'xW')
pixs: (BxTx1xDxHxW)
process_all_frames: Set true when used by other classifiers for
intermediate feature extraction, so to get features for each
frame.
Retuns:
solved: (BxT)
"""
# Not enforcing the assert here if pred is None, since this module
# is usually used by other modules as a way to extract features,
# and it might pass in None for preds. But rest assured, this check
# would have been done on the caller side.
assert preds is None or preds.shape[1] == pixs.shape[1], (
'Must pass in run_decode=True if using a pixel-based classifier!!')
del preds # This does not use it
# Since this classifier doesn't take into account context and the final
# _cls is going to look at the last frame, so might as well only process
# that last frame
if not process_all_frames:
pixs = pixs[:, -1:, ...]
obj_feats = self.enc(pixs)
return self.cls(obj_feats, None, process_all_frames=process_all_frames)
class TxClassifier(nn.Module):
"""Transformer on top of the intermediate features over time."""
def __init__(self, in_dim, nheads, nlayers):
super().__init__()
self.tx_enc = TxEncoder(in_dim, nheads, nlayers)
self.cls = nn.Linear(self.tx_enc.out_dim, 1)
def forward(self, preds, pixs):
"""
Run the classifier on the predictions.
Args:
preds: (BxTx1xDxH'xW')
pixs: (BxTx1xDxHxW)
Retuns:
solved: (Bx1)
"""
del pixs # This does not use it
# Spatial mean the features
stacked_mean_feat = torch.flatten(torch.mean(preds, axis=[-1, -2]), 1,
2)
feat_enc_time = self.cls(self.tx_enc(stacked_mean_feat))
# Max pool over time to get the final prediction
# Keepdims since the output format expects a time dimension and does
# a max pool over it at the end
cls_pred = torch.max(feat_enc_time, dim=1,
keepdims=True)[0].squeeze(-1)
return cls_pred
class ConvTxClassifier(nn.Module):
"""Transformer on top of the Conv features learned over time."""
def __init__(self, in_dim, nconvblocks, nheads, nlayers):
super().__init__()
self.conv_feat = ConvNetClassifier(in_dim, nconvblocks, 0)
self.tx_cls = TxClassifier(self.conv_feat.enc.out_dim, nheads, nlayers)
def forward(self, preds, pixs):
"""
Run the classifier on the predictions.
Args:
preds: (BxTx1xDxH'xW')
pixs: (BxTx1xDxHxW)
Retuns:
solved: (Bx1)
"""
assert preds.shape[1] == pixs.shape[1], (
'Must pass in run_decode=True if using a pixel-based classifier!!')
del preds
feats = self.conv_feat(None, pixs, process_all_frames=True)
preds = self.tx_cls(feats, None)
return preds
class Conv3dClassifier(nn.Module):
"""3D conv over features learned over time."""
def __init__(self, in_dim, num_3d_layers):
super().__init__()
layers = [[
nn.Conv3d(in_dim, in_dim, 3, stride=2, padding=1, bias=False),
nn.ReLU(inplace=True)
] for _ in range(num_3d_layers - 1)]
layers_flat = [item for sublist in layers for item in sublist]
self.enc = nn.Sequential(*(layers_flat[:-1]))
self.cls = nn.Linear(in_dim, 1)
def forward(self, preds, pixs):
"""
Run the classifier on the predictions.
Args:
preds: (BxTx1xDxH'xW')
pixs: (BxTx1xDxHxW)
Retuns:
solved: (Bx1)
"""
del pixs
enc_preds = self.enc(preds.squeeze(2).transpose(1, 2))
cls_preds = self.cls(torch.mean(enc_preds, [-1, -2, -3]))
# It has 1 extra dim in the end from the fc layer which should be
# removed, but since I need to add a time dimension anyway, just leave
# this there (will end up the same)
return cls_preds
class ConvConv3dClassifier(nn.Module):
"""Conv3D on top of the Conv features learned over time."""
def __init__(self, in_dim, nconvblocks, n3dlayers):
super().__init__()
self.conv_feat = ConvNetClassifier(in_dim, nconvblocks, 0)
self.td_cls = Conv3dClassifier(self.conv_feat.enc.out_dim, n3dlayers)
def forward(self, preds, pixs):
"""
Run the classifier on the predictions.
Args:
preds: (BxTx1xDxH'xW')
pixs: (BxTx1xDxHxW)
Retuns:
solved: (Bx1)
"""
assert preds.shape[1] == pixs.shape[1], (
'Must pass in run_decode=True if using a pixel-based classifier!!')
del preds
feats = self.conv_feat(None, pixs, process_all_frames=True)
preds = self.td_cls(feats, None)
return preds
class ConcatClassifier(nn.Module):
"""Concat the features and classify."""
def __init__(self, in_dim, nlayers):
super().__init__()
self.cls = MLPClassifier(in_dim, nlayers, match_inp_sz_layer=True)
def forward(self, preds, pixs):
"""
Run the classifier on the predictions.
Args:
preds: (BxTx1xDxH'xW')
pixs: (BxTx1xDxHxW)
Retuns:
solved: (Bx1)
"""
del pixs
# Concatenate over the time dimension
preds_flat = preds.view(preds.shape[0], 1, 1, -1, preds.shape[-2],
preds.shape[-1])
return self.cls(preds_flat, None, process_all_frames=True)
class ConvConcatClassifier(nn.Module):
"""Concat the Conv features and classify."""
def __init__(self, in_dim, nconvblocks, nclslayers):
super().__init__()
self.conv_feat = ConvNetClassifier(in_dim, nconvblocks, 0)
self.concat_cls = ConcatClassifier(self.conv_feat.enc.out_dim,
nclslayers)
def forward(self, preds, pixs):
"""
Run the classifier on the predictions.
Args:
preds: (BxTx1xDxH'xW')
pixs: (BxTx1xDxHxW)
Retuns:
solved: (Bx1)
"""
assert preds.shape[1] == pixs.shape[1], (
'Must pass in run_decode=True if using a pixel-based classifier!!')
del preds
feats = self.conv_feat(None, pixs, process_all_frames=True)
preds = self.concat_cls(feats, None)
return preds
class TrivialInteractor(nn.Module):
"""Model interactions btw objects: do nothing."""
def __init__(self, in_dim):
super().__init__()
del in_dim
@classmethod
def forward(cls, feat):
"""
Args:
feat: (B, T, Nobj, C, H', W')
Returns:
feat as is
"""
return feat
class TxEncoder(nn.Module):
"""Transformer based encoder, generates a feature combining the context."""
def __init__(self, in_dim, nheads, nlayers, maintain_dim=False):
"""
Args:
maintain_dim (bool): If true, it maps the final output to the same
dimensionality as the input
"""
super().__init__()
# Very basic position encoding
self.loc_embed = nn.Sequential(nn.Linear(1, 4), nn.ReLU(inplace=True),
nn.Linear(4, 8))
self.nheads = nheads
self.nlayers = nlayers
in_dim_loc = in_dim + 8 * nheads
self.loc_mixer = nn.Linear(in_dim_loc, in_dim_loc)
layer = nn.TransformerEncoderLayer(in_dim_loc, nheads)
self.encoder = nn.TransformerEncoder(layer, nlayers)
if maintain_dim:
self.back_to_orig_dim = nn.Linear(in_dim_loc, in_dim)
self.out_dim = in_dim
else:
self.back_to_orig_dim = lambda x: x # Identity
self.out_dim = in_dim_loc
def forward(self, feat):
"""
Args:
feat: (B, T, C)
Returns:
Same shape as input
"""
# Add a location embedding (over time), since time axis will flatten
loc_embedding = self.loc_embed(
torch.arange(feat.shape[1],
device=feat.device).unsqueeze(-1).float())
# Make into the shape of the feature
loc_embedding = loc_embedding.unsqueeze(0).repeat(
feat.shape[0], 1, self.nheads)
feat = torch.cat([feat, loc_embedding], dim=-1)
# Mix up the location information throughout the features so each head
# would have it
mixed_feat = self.loc_mixer(feat)
# Transformer encoder expects the time dimension as the 0th! So gotta
# permute things around
return self.back_to_orig_dim(
self.encoder(mixed_feat.permute(1, 0, 2)).permute(1, 0, 2))
class TxInteractor(nn.Module):
"""Model interactions btw objects: using Transformer."""
def __init__(self, in_dim, nheads, nlayers):
super().__init__()
self.in_dim = in_dim
self.tx_enc = TxEncoder(in_dim, nheads, nlayers, maintain_dim=True)
def forward(self, feat):
"""
Args:
feat: (B, T, Nobj, C, H', W')
Returns:
Same shape as input
"""
# Mean reduce the spatial dimensions for tx, then add it back to the
# original feature as a residual connection
feat_spat_mean = torch.mean(feat, dim=[-1, -2])
feat_flat = feat_spat_mean.flatten(1, 2)
tx_feat = self.tx_enc(feat_flat)
tx_feat = tx_feat.view(
feat_spat_mean.shape).unsqueeze(-1).unsqueeze(-1)
return feat + tx_feat
class TrivialSpatialAttention(nn.Module):
def __init__(self, in_dim):
super().__init__()
del in_dim
def forward(self, feat):
return feat
class TxSpatialAttention(nn.Module):
def __init__(self, in_dim, nheads, nlayers):
super().__init__()
self.tx_enc = TxEncoder(in_dim, nheads, nlayers, maintain_dim=True)
def forward(self, feat):
"""
Args:
feats (B, T, Nobj, D, H', W')
"""
feat_flat = torch.flatten(torch.flatten(feat, 0, 2), -2, -1)
feat_att = self.tx_enc(feat_flat.transpose(1, 2)).transpose(1, 2)
return feat_att.view(feat.shape)
class Fwd(nn.Module):
"""The master class with Forward model."""
def __init__(self, agent_cfg):
"""
Args:
dyn_type: The type of dynamics model to use.
dyn_n: Number of previous features used for prediction.
"""
super().__init__()
# The image embedding model
self.preproc = VideoPreprocessor(agent_cfg)
self.enc = hydra.utils.instantiate(agent_cfg.encoder,
self.preproc.out_dim,
agent_cfg.nobj)
dim = self.enc.out_dim
self.interactor = hydra.utils.instantiate(agent_cfg.interactor, dim)
# The dynamics model
self.dyn = hydra.utils.instantiate(agent_cfg.dyn, self.enc, dim)
# Classifier model
self.nframes_to_cls = agent_cfg.nframes_to_cls
# A attention of the latent features before passing them through the
# classifier.
self.spat_att = hydra.utils.instantiate(agent_cfg.spat_att, dim)
self.cls = hydra.utils.instantiate(agent_cfg.cls, dim)
# Decoder model
self.dec = hydra.utils.instantiate(agent_cfg.decoder, dim,
phyre.NUM_COLORS)
# Other loss functions
self.pix_loss = hydra.utils.instantiate(agent_cfg.loss_fn.pix)
self.nce_loss = hydra.utils.instantiate(agent_cfg.loss_fn.nce, dim)
@property
def device(self):
if hasattr(self, 'parameters') and next(self.parameters()).is_cuda:
return 'cuda'
else:
return 'cpu'
def _forward_dyn(self, feats, vids, n_fwd_times, need_intermediate=False):
"""
Args:
feats: (BxT_histxNobjxDxH'xW')
vids: (BxT_histxCxHxW) The video corresponding to the feats, some
dyn models might use them.
n_fwd_times: Number of times to run the fwd model on the last frames
need_intermediate: If true, give all the intermediate features
Returns:
all_preds: The predictions at each time step, in n_fwd_times
all_pixs: The predictions in pixels. Note all dynamics models don't
use pixels, so it might just give the last frame as output
all_solved: The classification at each time step, for n_fwd_times
"""
all_preds = []
all_pixs = []
all_addl_losses = []
if n_fwd_times == 0:
return [all_preds, all_pixs, all_addl_losses]
def run_fwd_append(feats, pixs):
pred, pred_pix, addl_losses = self.dyn(feats, pixs)
all_preds.append(pred)
all_pixs.append(pred_pix)
all_addl_losses.append(addl_losses)
run_fwd_append(feats, vids)
n_fwd_times_copy = n_fwd_times
while n_fwd_times - 1 > 0:
feats = torch.cat(
[feats[:, 1:, ...],
torch.unsqueeze(all_preds[-1], axis=1)],
dim=1)
vids = torch.cat(
[vids[:, 1:, ...],
torch.unsqueeze(all_pixs[-1], axis=1)],
dim=1)
run_fwd_append(feats, vids)
n_fwd_times -= 1
assert len(all_preds) == n_fwd_times_copy, (
'%d %d' % (len(all_preds), n_fwd_times_copy))
if not need_intermediate:
all_preds = [all_preds[-1]]
all_pixs = [all_pixs[-1]]
all_addl_losses = [all_addl_losses[-1]]
# Will compute solved or not later, after decode, in case the classifier
# needs that information
return all_preds, all_pixs, all_addl_losses
def _slice_for_dyn(self, features_batched, n_hist_frames, nslices=-1):
"""
Args:
features_batched: BxTx.... can deal with any following
dimensions, typically it is (BxTxNobjxDxH'xW')
n_hist_frames (int): Number of frames to use as history
nslices (int): If -1, make as many slices of the training data
as possible. If 1, keep only the first one. (1 used when
training classifier on top, which should always see videos
from the start)
Returns:
B'x n_hist_frames x ... (B'x n_hist_frames x Nobj x D x H' x W')
"""
clip_hist = []
assert features_batched.shape[1] >= n_hist_frames
for i in range((features_batched.shape[1] - n_hist_frames + 1)):
if nslices > 0 and i >= nslices:
break
clip_hist.append(features_batched[:, i:i + n_hist_frames, ...])
clip_hist = torch.cat(clip_hist, dim=0)
return clip_hist
def _forward_dec(self, feats, pixels):
"""
Args:
feats: List of features (BxD) from the dynamics prediction stage,
one for each time step predicted.
pixels: List of corresponding pixels from the dynamics model. The
dyn model may or may not actually generate new pixels.
"""
return [self.dec(feat, pix) for feat, pix in zip(feats, pixels)]
# Loss functions ###########################################################
def cswm_loss(self, pred, gt, hinge=1.0):
"""
The energy based contrastive loss.
Args:
pred (BxNobjxDxH'xW')
gt (BxNobjxDxH'xW')
From https://github.com/tkipf/c-swm/blob/master/modules.py#L94
"""
pred = pred.view(pred.shape[:2] + (-1, ))
gt = gt.view(gt.shape[:2] + (-1, ))
batch_size = gt.size(0)
perm = np.random.permutation(batch_size)
neg = gt[perm]
def energy(pred, gt, sigma=0.5):
"""Energy function based on normalized squared L2 norm.
Args:
pred (B, Nobj, D')
gt (B, Nobj, D')
"""
norm = 0.5 / (sigma**2)
diff = pred - gt
return norm * diff.pow(2).sum(2).mean(1)
pos_loss = energy(pred, gt)
zeros = torch.zeros_like(pos_loss)
pos_loss = pos_loss.mean()
neg_loss = torch.max(zeros, hinge - energy(pred, neg)).mean()
return pos_loss + neg_loss
def ce_loss(self, decisions, targets):
targets = targets.to(dtype=torch.float, device=decisions.device)
return torch.nn.functional.binary_cross_entropy_with_logits(
decisions, targets)
def autoencoder_loss(self, pix, latent, autoenc_loss_ratio):
"""
Runs a random portion of the actual frames through decoder to incur a
loss to encourage the intermediate representation to learn a good
autoencoder as well. Random fraction only for compute reasons.
Ideally would run every frame (ratio = 1)
Args:
pix (B, T, H, W): Actual pixels of the input frames
latent (B, T, Nobj, D, H', W'): Latent representation of the input
frames
autoenc_loss_ratio (float): What percentage of the input frames to
run it on. Only for compute reasons, ideally run it on all.
Returns:
loss {'autoenc': (1,) <float>} for the loss
"""
# Flatten the Batch and time dimension to get all the frames
pix_flat = torch.flatten(pix, 0, 1)
latent_flat = torch.flatten(latent, 0, 1)
# Select a subset of the frames to run the loss on
assert pix_flat.shape[0] == latent_flat.shape[0]
idx = np.arange(pix_flat.shape[0])
np.random.shuffle(idx)
sel_cnt = int(autoenc_loss_ratio * len(idx))
idx_sel = np.sort(idx[:sel_cnt])
pix_flat_sel = pix_flat[idx_sel, ...]
latent_flat_sel = latent_flat[idx_sel, ...]
# Generate the pixels for the latent, and incur loss
pred_flat_sel = combine_obj_pixels(self.dec(latent_flat_sel, None), 1)
loss = self.pix_loss(pred_flat_sel, pix_flat_sel).unsqueeze(0)
return {'autoenc_pix': loss}
def solved_or_not_loss(self, clip_preds_solved, vid_is_solved):
"""
Repeat the is_solved to as many times the batch was repeated to get
the class label at each forward prediction
Args:
clip_preds_solved (B',)
vid_is_solved (B,)
B and B' might be different but B' must be a multiple of B, since
it happens when num_slices > 1
Returns:
loss {'ce': (1,) <float>} for the loss
"""
assert clip_preds_solved.shape[0] % vid_is_solved.shape[0] == 0
return {
'ce':
self.ce_loss(
clip_preds_solved,
vid_is_solved.repeat((clip_preds_solved.shape[0] //
vid_is_solved.shape[0], ))).unsqueeze(0)
}
############################################################################
def _compute_losses(self, clip_pred, clip_pred_pix, vid_feat, vid,
n_hist_frames, n_fwd_times):
"""
Compute all losses possible.
"""
dummy_loss = torch.Tensor([-1]).to(clip_pred.device)
losses = {}
# NCE and pixel loss
# find the GT for each clip, note that all predictions may not have a GT
# since the last n_hist_frames for a video will make a prediction that
# goes out of the list of frames that were extracted for that video.
feat_preds = []
feat_gt = []
pix_preds = []
pix_gt = []
batch_size = vid_feat.shape[0]
gt_max_time = vid_feat.shape[1]
# Max slices that could have been made of the data, to use all of the
# training clip
max_slices_with_gt = gt_max_time - n_hist_frames - n_fwd_times + 1
num_slices = clip_pred.shape[0] // batch_size
for i in range(min(max_slices_with_gt, num_slices)):
corr_pred = clip_pred[i * batch_size:(i + 1) * batch_size, ...]
# Get the corresponding GT predictions for this pred
corr_gt = vid_feat[:, i + n_hist_frames + n_fwd_times - 1]
assert corr_gt.shape == corr_pred.shape
feat_preds.append(corr_pred)
feat_gt.append(corr_gt)
# Same thing for pix
if clip_pred_pix is not None:
corr_pix_pred = clip_pred_pix[i * vid_feat.shape[0]:(i + 1) *
vid_feat.shape[0], ...]
corr_pix_gt = vid[:, i + n_hist_frames + n_fwd_times - 1]
pix_preds.append(corr_pix_pred)
pix_gt.append(corr_pix_gt)
if len(feat_gt) > 0:
# Keep a batch dimension to the loss, since it will be run over
# multiple GPUs
feat_preds = torch.cat(feat_preds)
feat_gt = torch.cat(feat_gt)
losses['nce'] = self.nce_loss(feat_preds, feat_gt).unsqueeze(0)
losses['cswm'] = self.cswm_loss(feat_preds, feat_gt).unsqueeze(0)
else:
losses['nce'] = dummy_loss
losses['cswm'] = dummy_loss
# Reconstruction loss
if len(pix_gt) > 0:
losses['pix'] = self.pix_loss(torch.cat(pix_preds),
torch.cat(pix_gt)).unsqueeze(0)
else:
losses['pix'] = dummy_loss
return losses
def _cls(self, feat_hist, pix_hist, feat_preds, pix_preds):
"""
Wrapper around the classifier, collates all the input frames/features
and predicted future frames/features.
The images, features are already summed over the objects
Args:
feat_hist: (B, T, C, H', W')
pix_hist: (B, T, 7, H, W)
feat_preds [list of (B, C, H', W')] -- len = num predictions
pix_preds [list of (B, 7, H, W)] -- len = num predictions
The elements could be None, since not all models predict pixels
Returns:
(B,) predicted scores for the clips
"""
feats_combined = feat_hist
if feat_preds is not None and len(feat_preds) > 0:
feats_combined = torch.cat([feat_hist] +
[el.unsqueeze(1) for el in feat_preds],
dim=1)
pix_combined = pix_hist
if (pix_preds is not None and len(pix_preds) > 0
and pix_preds[0] is not None):
pix_combined = torch.cat([pix_combined] +
[el.unsqueeze(1) for el in pix_preds],
dim=1)
# Sum over objs -- we want the classifier model to see everything
# at the same time
# They are summed now, but need the dimension still
pix_combined = pix_combined.unsqueeze(2)
feats_combined = feats_combined.unsqueeze(2)
# If need to keep only a subset of the frames
if self.nframes_to_cls > 0:
pix_combined = pix_combined[:, :self.nframes_to_cls, ...]
feats_combined = feats_combined[:, :self.nframes_to_cls, ...]
feats_combined = self.spat_att(feats_combined)
# Keep the last prediction, as that should ideally be the best
# prediction of whether it was solved or not
# torch.max was hard to optimize through
return self.cls(feats_combined, pix_combined)[:, -1]
def forward(self,
vid,
vid_is_solved,
n_hist_frames=3,
n_fwd_times=1,
n_fwd_times_incur_loss=999999,
run_decode=False,
compute_losses=False,
need_intermediate=False,
autoenc_loss_ratio=0.0,
nslices=-1):
"""
Args:
vid: (BxTxNobjxHxW) The input video
vid_is_solved: (Bx1) Whether the video is solved in the end of not.
Could be None at test time.
n_hist_frames: (int) Number of frames to use as history for
prediction
n_fwd_times: (int) How many times to run the forward dynamics model
n_fwd_times_incur_loss (int): Upto how many of these forwards to
incur loss on.
run_decode: (bool) Decode the features into pixel output
compute_losses: Should be set at train time. Will compute losses,
whatever it can given the data (eg, if vid_is_solved is not
passed to the function, it will not compute the CE loss).
need_intermediate (bool): Set true if you want to run the dynamics
model and need all the intermediate results. Else, will return
a list with only 1 element, the final output.
autoenc_loss_ratio (float btw 0-1): Set to 1 to run auto-encoder
style loss on all frames when run_decode is set.
num_slices (int): See in the _slice_for_dyn fn
Returns:
clip_feat: BxTxD
"""
vid_preproc = self.preproc.preprocess_vid(vid)
obj_feat = self.enc(vid_preproc)
clip_hist = self._slice_for_dyn(obj_feat,
n_hist_frames,
nslices=nslices)
vid_hist = self._slice_for_dyn(vid_preproc,
n_hist_frames,
nslices=nslices)
assert clip_hist.shape[1] == n_hist_frames
clip_hist = self.interactor(clip_hist)
clip_preds, clip_preds_pix, clip_preds_addl_losses = self._forward_dyn(
clip_hist, vid_hist, n_fwd_times, need_intermediate)
if run_decode:
clip_preds_pix = self._forward_dec(clip_preds, clip_preds_pix)
else:
clip_preds_pix = [None] * len(clip_preds)
# Compute the solved or not, will only do for the ones asked for
clip_preds_solved = self._cls(
combine_obj_pixels(clip_hist, 2), combine_obj_pixels(vid_hist, 2),
[combine_obj_pixels(el, 1) for el in clip_preds],
[combine_obj_pixels(el, 1) for el in clip_preds_pix])
all_losses = []
clip_preds_pix_unpreproc_for_loss = [
self.preproc.unpreprocess_frame_for_loss(el)
for el in clip_preds_pix
]
if compute_losses:
for i in range(min(len(clip_preds), n_fwd_times_incur_loss)):
# Compute losses at each prediction step, if need_intermediate
# is set. Else, it will only return a single output
# (at the last prediction), and then we can only incur loss at
# that point.
if not need_intermediate:
assert len(clip_preds) == 1
pred_id = -1
# Only loss on predicting the final rolled out obs
this_fwd_times = n_fwd_times
else:
assert len(clip_preds) == n_fwd_times
pred_id = i
this_fwd_times = i + 1
all_losses.append(
self._compute_losses(
# For the loss, using only the last prediction (for now)
clip_preds[pred_id],
combine_obj_pixels(
clip_preds_pix_unpreproc_for_loss[pred_id], 1),
obj_feat,
combine_obj_pixels(vid, 2),
n_hist_frames,
this_fwd_times))
all_losses = average_losses(all_losses)
all_losses.update(average_losses(clip_preds_addl_losses))
all_losses.update(
self.solved_or_not_loss(clip_preds_solved, vid_is_solved))
# Add losses on the provided frames if requested
if run_decode and autoenc_loss_ratio > 0:
all_losses.update(
self.autoencoder_loss(combine_obj_pixels(vid, 2), obj_feat,
autoenc_loss_ratio))
clip_preds_pix_unpreproc = [
combine_obj_pixels(self.preproc.unpreprocess_frame_after_loss(el),
1) for el in clip_preds_pix_unpreproc_for_loss
]
all_preds = {
'feats': clip_preds,
'is_solved': clip_preds_solved,
'pixels': clip_preds_pix_unpreproc,
}
return all_preds, all_losses
|
[
"numpy.random.seed",
"torch.eye",
"torch.ByteTensor",
"torch.cat",
"torch.nn.InstanceNorm2d",
"torch.cos",
"numpy.arange",
"torch.nn.Softmax",
"torch.arange",
"torch.device",
"torch.nn.functional.normalize",
"torch.flatten",
"torch.nn.functional.grid_sample",
"logging.warning",
"torch.nn.Conv3d",
"torch.nn.TransformerEncoderLayer",
"torch.nn.functional.binary_cross_entropy_with_logits",
"torch.exp",
"torch.nn.Upsample",
"torch.Tensor",
"torch.nn.functional.adaptive_max_pool2d",
"torch.nn.Linear",
"numpy.random.shuffle",
"torchvision.models.resnet18",
"torch.mean",
"torch.nn.TransformerEncoder",
"functools.partial",
"torch.randn_like",
"torch.nn.ModuleList",
"torch.zeros_like",
"torch.nn.Conv2d",
"numpy.sort",
"torch.nn.BatchNorm2d",
"torch.cuda.is_available",
"torch.max",
"numpy.random.permutation",
"torch.unsqueeze",
"torch.reshape",
"torch.sum",
"torch.ones_like",
"torch.nn.ReLU",
"torch.nn.ConvTranspose2d",
"torch.stack",
"torch.nn.Sequential",
"hydra.utils.instantiate",
"torch.LongTensor",
"torch.nn.functional.linear",
"omegaconf.OmegaConf.create",
"numpy.array",
"torch.chunk",
"torch.sin"
] |
[((1022, 1047), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1045, 1047), False, 'import torch\n'), ((1057, 1102), 'torch.device', 'torch.device', (["('cuda:0' if USE_CUDA else 'cpu')"], {}), "('cuda:0' if USE_CUDA else 'cpu')\n", (1069, 1102), False, 'import torch\n'), ((1103, 1121), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (1117, 1121), True, 'import numpy as np\n'), ((9710, 9784), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim_in', 'dim_out'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(dim_in, dim_out, kernel_size=1, stride=1, padding=0, bias=False)\n', (9719, 9784), True, 'import torch.nn as nn\n'), ((29393, 29414), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (29400, 29414), True, 'import torch.nn as nn\n'), ((1484, 1519), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'output_size'], {}), '(hidden_size, output_size)\n', (1493, 1519), True, 'import torch.nn as nn\n'), ((4587, 4653), 'torch.nn.functional.binary_cross_entropy_with_logits', 'nn.functional.binary_cross_entropy_with_logits', (['decisions', 'targets'], {}), '(decisions, targets)\n', (4633, 4653), True, 'import torch.nn as nn\n'), ((5166, 5211), 'torchvision.models.resnet18', 'torchvision.models.resnet18', ([], {'pretrained': '(False)'}), '(pretrained=False)\n', (5193, 5211), False, 'import torchvision\n'), ((5228, 5307), 'torch.nn.Conv2d', 'nn.Conv2d', (['phyre.NUM_COLORS', '(64)'], {'kernel_size': '(7)', 'stride': '(2)', 'padding': '(3)', 'bias': '(False)'}), '(phyre.NUM_COLORS, 64, kernel_size=7, stride=2, padding=3, bias=False)\n', (5237, 5307), True, 'import torch.nn as nn\n'), ((5533, 5585), 'torch.nn.Sequential', 'nn.Sequential', (['conv1', 'net.bn1', 'net.relu', 'net.maxpool'], {}), '(conv1, net.bn1, net.relu, net.maxpool)\n', (5546, 5585), True, 'import torch.nn as nn\n'), ((5608, 5671), 'torch.nn.ModuleList', 'nn.ModuleList', (['[net.layer1, net.layer2, net.layer3, net.layer4]'], {}), '([net.layer1, net.layer2, net.layer3, net.layer4])\n', (5621, 5671), True, 'import torch.nn as nn\n'), ((7024, 7041), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(1)'], {}), '(512, 1)\n', (7033, 7041), True, 'import torch.nn as nn\n'), ((8934, 9000), 'torch.nn.functional.binary_cross_entropy_with_logits', 'nn.functional.binary_cross_entropy_with_logits', (['decisions', 'targets'], {}), '(decisions, targets)\n', (8980, 9000), True, 'import torch.nn as nn\n'), ((9399, 9449), 'torch.eye', 'torch.eye', (['phyre.NUM_COLORS'], {'device': 'indices.device'}), '(phyre.NUM_COLORS, device=indices.device)\n', (9408, 9449), False, 'import torch\n'), ((10873, 10943), 'torch.reshape', 'torch.reshape', (['features', '((features.shape[0], -1) + features.shape[-2:])'], {}), '(features, (features.shape[0], -1) + features.shape[-2:])\n', (10886, 10943), False, 'import torch\n'), ((15038, 15106), 'torch.nn.functional.grid_sample', 'nn.functional.grid_sample', (['feat', 'grid'], {'mode': 'mode', 'align_corners': '(True)'}), '(feat, grid, mode=mode, align_corners=True)\n', (15063, 15106), True, 'import torch.nn as nn\n'), ((15607, 15640), 'torch.ones_like', 'torch.ones_like', (['feat[:, :1, ...]'], {}), '(feat[:, :1, ...])\n', (15622, 15640), False, 'import torch\n'), ((15664, 15707), 'torch.cat', 'torch.cat', (['[feat_pos, feat_pos_ones]'], {'dim': '(1)'}), '([feat_pos, feat_pos_ones], dim=1)\n', (15673, 15707), False, 'import torch\n'), ((16041, 16090), 'torch.cat', 'torch.cat', (['[tx_feat_pos, feat[:, 2:, ...]]'], {'dim': '(1)'}), '([tx_feat_pos, feat[:, 2:, ...]], dim=1)\n', (16050, 16090), False, 'import torch\n'), ((16348, 16371), 'torch.exp', 'torch.exp', (['(0.5 * logvar)'], {}), '(0.5 * logvar)\n', (16357, 16371), False, 'import torch\n'), ((16386, 16407), 'torch.randn_like', 'torch.randn_like', (['std'], {}), '(std)\n', (16402, 16407), False, 'import torch\n'), ((17024, 17065), 'torch.mean', 'torch.mean', (['feat_hist_embed'], {'dim': '[-2, -1]'}), '(feat_hist_embed, dim=[-2, -1])\n', (17034, 17065), False, 'import torch\n'), ((18670, 18717), 'torch.chunk', 'torch.chunk', (['feat_to_tx', 'self.num_tx', 'split_dim'], {}), '(feat_to_tx, self.num_tx, split_dim)\n', (18681, 18717), False, 'import torch\n'), ((19632, 19689), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['base_stn', '(dim * n * nobj)', 'num_tx'], {}), '(base_stn, dim * n * nobj, num_tx)\n', (19655, 19689), False, 'import hydra\n'), ((20073, 20143), 'torch.reshape', 'torch.reshape', (['features', '((features.shape[0], -1) + features.shape[-2:])'], {}), '(features, (features.shape[0], -1) + features.shape[-2:])\n', (20086, 20143), False, 'import torch\n'), ((20333, 20362), 'torch.flatten', 'torch.flatten', (['features', '(2)', '(3)'], {}), '(features, 2, 3)\n', (20346, 20362), False, 'import torch\n'), ((20463, 20527), 'torch.reshape', 'torch.reshape', (['new_feat', '(features.shape[:1] + features.shape[2:])'], {}), '(new_feat, features.shape[:1] + features.shape[2:])\n', (20476, 20527), False, 'import torch\n'), ((20909, 20966), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['base_stn', '(dim * n * nobj)', 'num_tx'], {}), '(base_stn, dim * n * nobj, num_tx)\n', (20932, 20966), False, 'import hydra\n'), ((21689, 21759), 'torch.reshape', 'torch.reshape', (['features', '((features.shape[0], -1) + features.shape[-2:])'], {}), '(features, (features.shape[0], -1) + features.shape[-2:])\n', (21702, 21759), False, 'import torch\n'), ((22311, 22354), 'torch.chunk', 'torch.chunk', (['attention_maps', 'self.num_tx', '(1)'], {}), '(attention_maps, self.num_tx, 1)\n', (22322, 22354), False, 'import torch\n'), ((22385, 22433), 'torch.chunk', 'torch.chunk', (['future_pixels_tiled', 'self.num_tx', '(1)'], {}), '(future_pixels_tiled, self.num_tx, 1)\n', (22396, 22433), False, 'import torch\n'), ((23223, 23285), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['base_stn', '(dim * n * nobj)', 'self.num_tx'], {}), '(base_stn, dim * n * nobj, self.num_tx)\n', (23246, 23285), False, 'import hydra\n'), ((23835, 23905), 'torch.reshape', 'torch.reshape', (['features', '((features.shape[0], -1) + features.shape[-2:])'], {}), '(features, (features.shape[0], -1) + features.shape[-2:])\n', (23848, 23905), False, 'import torch\n'), ((24907, 24969), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['base_stn', '(dim * n * nobj)', 'self.num_tx'], {}), '(base_stn, dim * n * nobj, self.num_tx)\n', (24930, 24969), False, 'import hydra\n'), ((25887, 25957), 'torch.reshape', 'torch.reshape', (['features', '((features.shape[0], -1) + features.shape[-2:])'], {}), '(features, (features.shape[0], -1) + features.shape[-2:])\n', (25900, 25957), False, 'import torch\n'), ((26272, 26327), 'torch.cat', 'torch.cat', (['[future_pixels_bg, future_pixels_obj]'], {'dim': '(1)'}), '([future_pixels_bg, future_pixels_obj], dim=1)\n', (26281, 26327), False, 'import torch\n'), ((27518, 27587), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['base_stn', '(dim * n * nobj)', '(self.num_tx * nobj)'], {}), '(base_stn, dim * n * nobj, self.num_tx * nobj)\n', (27541, 27587), False, 'import hydra\n'), ((27961, 28031), 'torch.reshape', 'torch.reshape', (['features', '((features.shape[0], -1) + features.shape[-2:])'], {}), '(features, (features.shape[0], -1) + features.shape[-2:])\n', (27974, 28031), False, 'import torch\n'), ((28222, 28257), 'torch.flatten', 'torch.flatten', (['pixels_movable', '(1)', '(2)'], {}), '(pixels_movable, 1, 2)\n', (28235, 28257), False, 'import torch\n'), ((29451, 29559), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['in_dim', 'out_dim'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding', 'bias': '(False)'}), '(in_dim, out_dim, kernel_size=kernel_size, stride=stride,\n padding=padding, bias=False)\n', (29469, 29559), True, 'import torch.nn as nn\n'), ((29889, 29967), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': 'upsample_factor', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(scale_factor=upsample_factor, mode='bilinear', align_corners=True)\n", (29900, 29967), True, 'import torch.nn as nn\n'), ((30620, 30655), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['activation'], {}), '(activation)\n', (30643, 30655), False, 'import hydra\n'), ((30664, 30727), 'logging.warning', 'logging.warning', (['"""Using %s activation for decoders"""', 'activation'], {}), "('Using %s activation for decoders', activation)\n", (30679, 30727), False, 'import logging\n'), ((32532, 32579), 'torch.sum', 'torch.sum', (['decode_feature'], {'dim': '(1)', 'keepdims': '(True)'}), '(decode_feature, dim=1, keepdims=True)\n', (32541, 32579), False, 'import torch\n'), ((32611, 32646), 'torch.flatten', 'torch.flatten', (['decode_feature', '(0)', '(1)'], {}), '(decode_feature, 0, 1)\n', (32624, 32646), False, 'import torch\n'), ((32757, 32823), 'torch.reshape', 'torch.reshape', (['images', '(decode_feature.shape[:2] + images.shape[1:])'], {}), '(images, decode_feature.shape[:2] + images.shape[1:])\n', (32770, 32823), False, 'import torch\n'), ((36681, 36722), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['obj_encoder', 'dim'], {}), '(obj_encoder, dim)\n', (36704, 36722), False, 'import hydra\n'), ((37450, 37479), 'torch.flatten', 'torch.flatten', (['vid_feat', '(0)', '(1)'], {}), '(vid_feat, 0, 1)\n', (37463, 37479), False, 'import torch\n'), ((37890, 37958), 'torch.reshape', 'torch.reshape', (['mapped_feat', '((batch_size, -1) + mapped_feat.shape[1:])'], {}), '(mapped_feat, (batch_size, -1) + mapped_feat.shape[1:])\n', (37903, 37958), False, 'import torch\n'), ((38314, 38363), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['obj_encoder', '(dim // nobj)'], {}), '(obj_encoder, dim // nobj)\n', (38337, 38363), False, 'import hydra\n'), ((42136, 42171), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['base_model'], {}), '(base_model)\n', (42159, 42171), False, 'import hydra\n'), ((42188, 42257), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_dim', '(64)'], {'kernel_size': '(7)', 'stride': '(2)', 'padding': '(3)', 'bias': '(False)'}), '(in_dim, 64, kernel_size=7, stride=2, padding=3, bias=False)\n', (42197, 42257), True, 'import torch.nn as nn\n'), ((42408, 42460), 'torch.nn.Sequential', 'nn.Sequential', (['conv1', 'net.bn1', 'net.relu', 'net.maxpool'], {}), '(conv1, net.bn1, net.relu, net.maxpool)\n', (42421, 42460), True, 'import torch.nn as nn\n'), ((44009, 44104), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['feat_ext', '(in_dim * nobj if self.process_objs_together else in_dim)'], {}), '(feat_ext, in_dim * nobj if self.\n process_objs_together else in_dim)\n', (44032, 44104), False, 'import hydra\n'), ((44212, 44274), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['objectifier', 'initial_dim', 'obj_encoder'], {}), '(objectifier, initial_dim, obj_encoder)\n', (44235, 44274), False, 'import hydra\n'), ((47437, 47468), 'torch.max', 'torch.max', (['obj_pix'], {'dim': 'obj_dim'}), '(obj_pix, dim=obj_dim)\n', (47446, 47468), False, 'import torch\n'), ((49293, 49328), 'torch.mean', 'torch.mean', (['preds'], {'axis': '[2, -1, -2]'}), '(preds, axis=[2, -1, -2])\n', (49303, 49328), False, 'import torch\n'), ((52865, 52898), 'torch.nn.Linear', 'nn.Linear', (['self.tx_enc.out_dim', '(1)'], {}), '(self.tx_enc.out_dim, 1)\n', (52874, 52898), True, 'import torch.nn as nn\n'), ((54981, 55013), 'torch.nn.Sequential', 'nn.Sequential', (['*layers_flat[:-1]'], {}), '(*layers_flat[:-1])\n', (54994, 55013), True, 'import torch.nn as nn\n'), ((55035, 55055), 'torch.nn.Linear', 'nn.Linear', (['in_dim', '(1)'], {}), '(in_dim, 1)\n', (55044, 55055), True, 'import torch.nn as nn\n'), ((59109, 59142), 'torch.nn.Linear', 'nn.Linear', (['in_dim_loc', 'in_dim_loc'], {}), '(in_dim_loc, in_dim_loc)\n', (59118, 59142), True, 'import torch.nn as nn\n'), ((59159, 59205), 'torch.nn.TransformerEncoderLayer', 'nn.TransformerEncoderLayer', (['in_dim_loc', 'nheads'], {}), '(in_dim_loc, nheads)\n', (59185, 59205), True, 'import torch.nn as nn\n'), ((59229, 59266), 'torch.nn.TransformerEncoder', 'nn.TransformerEncoder', (['layer', 'nlayers'], {}), '(layer, nlayers)\n', (59250, 59266), True, 'import torch.nn as nn\n'), ((60036, 60076), 'torch.cat', 'torch.cat', (['[feat, loc_embedding]'], {'dim': '(-1)'}), '([feat, loc_embedding], dim=-1)\n', (60045, 60076), False, 'import torch\n'), ((61030, 61060), 'torch.mean', 'torch.mean', (['feat'], {'dim': '[-1, -2]'}), '(feat, dim=[-1, -2])\n', (61040, 61060), False, 'import torch\n'), ((62335, 62420), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['agent_cfg.encoder', 'self.preproc.out_dim', 'agent_cfg.nobj'], {}), '(agent_cfg.encoder, self.preproc.out_dim, agent_cfg.nobj\n )\n', (62358, 62420), False, 'import hydra\n'), ((62559, 62609), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['agent_cfg.interactor', 'dim'], {}), '(agent_cfg.interactor, dim)\n', (62582, 62609), False, 'import hydra\n'), ((62658, 62711), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['agent_cfg.dyn', 'self.enc', 'dim'], {}), '(agent_cfg.dyn, self.enc, dim)\n', (62681, 62711), False, 'import hydra\n'), ((62917, 62965), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['agent_cfg.spat_att', 'dim'], {}), '(agent_cfg.spat_att, dim)\n', (62940, 62965), False, 'import hydra\n'), ((62985, 63028), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['agent_cfg.cls', 'dim'], {}), '(agent_cfg.cls, dim)\n', (63008, 63028), False, 'import hydra\n'), ((63072, 63137), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['agent_cfg.decoder', 'dim', 'phyre.NUM_COLORS'], {}), '(agent_cfg.decoder, dim, phyre.NUM_COLORS)\n', (63095, 63137), False, 'import hydra\n'), ((63236, 63282), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['agent_cfg.loss_fn.pix'], {}), '(agent_cfg.loss_fn.pix)\n', (63259, 63282), False, 'import hydra\n'), ((63307, 63358), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['agent_cfg.loss_fn.nce', 'dim'], {}), '(agent_cfg.loss_fn.nce, dim)\n', (63330, 63358), False, 'import hydra\n'), ((66579, 66606), 'torch.cat', 'torch.cat', (['clip_hist'], {'dim': '(0)'}), '(clip_hist, dim=0)\n', (66588, 66606), False, 'import torch\n'), ((67555, 67588), 'numpy.random.permutation', 'np.random.permutation', (['batch_size'], {}), '(batch_size)\n', (67576, 67588), True, 'import numpy as np\n'), ((67995, 68021), 'torch.zeros_like', 'torch.zeros_like', (['pos_loss'], {}), '(pos_loss)\n', (68011, 68021), False, 'import torch\n'), ((68294, 68366), 'torch.nn.functional.binary_cross_entropy_with_logits', 'torch.nn.functional.binary_cross_entropy_with_logits', (['decisions', 'targets'], {}), '(decisions, targets)\n', (68346, 68366), False, 'import torch\n'), ((69239, 69263), 'torch.flatten', 'torch.flatten', (['pix', '(0)', '(1)'], {}), '(pix, 0, 1)\n', (69252, 69263), False, 'import torch\n'), ((69286, 69313), 'torch.flatten', 'torch.flatten', (['latent', '(0)', '(1)'], {}), '(latent, 0, 1)\n', (69299, 69313), False, 'import torch\n'), ((69444, 69472), 'numpy.arange', 'np.arange', (['pix_flat.shape[0]'], {}), '(pix_flat.shape[0])\n', (69453, 69472), True, 'import numpy as np\n'), ((69481, 69503), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (69498, 69503), True, 'import numpy as np\n'), ((69575, 69597), 'numpy.sort', 'np.sort', (['idx[:sel_cnt]'], {}), '(idx[:sel_cnt])\n', (69582, 69597), True, 'import numpy as np\n'), ((2314, 2371), 'torch.nn.Conv2d', 'nn.Conv2d', (['phyre.NUM_COLORS', '(3)'], {'kernel_size': '(1)', 'bias': '(False)'}), '(phyre.NUM_COLORS, 3, kernel_size=1, bias=False)\n', (2323, 2371), True, 'import torch.nn as nn\n'), ((2385, 2402), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(3)'], {}), '(3)\n', (2399, 2402), True, 'import torch.nn as nn\n'), ((2416, 2437), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2423, 2437), True, 'import torch.nn as nn\n'), ((2451, 2515), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)'], {'kernel_size': '(7)', 'stride': '(4)', 'padding': '(3)', 'bias': '(False)'}), '(3, 64, kernel_size=7, stride=4, padding=3, bias=False)\n', (2460, 2515), True, 'import torch.nn as nn\n'), ((2529, 2547), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (2543, 2547), True, 'import torch.nn as nn\n'), ((2561, 2582), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2568, 2582), True, 'import torch.nn as nn\n'), ((2596, 2661), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)'], {'kernel_size': '(5)', 'stride': '(2)', 'padding': '(2)', 'bias': '(False)'}), '(64, 64, kernel_size=5, stride=2, padding=2, bias=False)\n', (2605, 2661), True, 'import torch.nn as nn\n'), ((2675, 2693), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (2689, 2693), True, 'import torch.nn as nn\n'), ((2707, 2728), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2714, 2728), True, 'import torch.nn as nn\n'), ((2742, 2807), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)'], {'kernel_size': '(5)', 'stride': '(2)', 'padding': '(2)', 'bias': '(False)'}), '(64, 64, kernel_size=5, stride=2, padding=2, bias=False)\n', (2751, 2807), True, 'import torch.nn as nn\n'), ((2821, 2839), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (2835, 2839), True, 'import torch.nn as nn\n'), ((2853, 2874), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2860, 2874), True, 'import torch.nn as nn\n'), ((2888, 2953), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)'], {'kernel_size': '(5)', 'stride': '(2)', 'padding': '(2)', 'bias': '(False)'}), '(64, 64, kernel_size=5, stride=2, padding=2, bias=False)\n', (2897, 2953), True, 'import torch.nn as nn\n'), ((2967, 2985), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (2981, 2985), True, 'import torch.nn as nn\n'), ((2999, 3020), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3006, 3020), True, 'import torch.nn as nn\n'), ((3034, 3100), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(128)'], {'kernel_size': '(5)', 'stride': '(2)', 'padding': '(2)', 'bias': '(False)'}), '(64, 128, kernel_size=5, stride=2, padding=2, bias=False)\n', (3043, 3100), True, 'import torch.nn as nn\n'), ((3114, 3133), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (3128, 3133), True, 'import torch.nn as nn\n'), ((3147, 3168), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3154, 3168), True, 'import torch.nn as nn\n'), ((3182, 3249), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)'], {'kernel_size': '(5)', 'stride': '(2)', 'padding': '(2)', 'bias': '(False)'}), '(128, 128, kernel_size=5, stride=2, padding=2, bias=False)\n', (3191, 3249), True, 'import torch.nn as nn\n'), ((3285, 3304), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (3299, 3304), True, 'import torch.nn as nn\n'), ((3318, 3339), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3325, 3339), True, 'import torch.nn as nn\n'), ((3353, 3420), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)'], {'kernel_size': '(5)', 'stride': '(2)', 'padding': '(2)', 'bias': '(False)'}), '(128, 128, kernel_size=5, stride=2, padding=2, bias=False)\n', (3362, 3420), True, 'import torch.nn as nn\n'), ((3456, 3475), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (3470, 3475), True, 'import torch.nn as nn\n'), ((3489, 3510), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3496, 3510), True, 'import torch.nn as nn\n'), ((5484, 5511), 'torch.eye', 'torch.eye', (['phyre.NUM_COLORS'], {}), '(phyre.NUM_COLORS)\n', (5493, 5511), False, 'import torch\n'), ((7564, 7610), 'torch.nn.functional.adaptive_max_pool2d', 'nn.functional.adaptive_max_pool2d', (['features', '(1)'], {}), '(features, 1)\n', (7597, 7610), True, 'import torch.nn as nn\n'), ((8329, 8375), 'torch.nn.functional.adaptive_max_pool2d', 'nn.functional.adaptive_max_pool2d', (['features', '(1)'], {}), '(features, 1)\n', (8362, 8375), True, 'import torch.nn as nn\n'), ((10242, 10263), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (10249, 10263), True, 'import torch.nn as nn\n'), ((10369, 10390), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (10376, 10390), True, 'import torch.nn as nn\n'), ((13346, 13434), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_dim', '(8 * num_tx)'], {'kernel_size': 'kernel_size', 'padding': '(kernel_size // 2)'}), '(input_dim, 8 * num_tx, kernel_size=kernel_size, padding=\n kernel_size // 2)\n', (13355, 13434), True, 'import torch.nn as nn\n'), ((13497, 13510), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (13504, 13510), True, 'import torch.nn as nn\n'), ((13524, 13614), 'torch.nn.Conv2d', 'nn.Conv2d', (['(8 * num_tx)', '(10 * num_tx)'], {'kernel_size': 'kernel_size', 'padding': '(kernel_size // 2)'}), '(8 * num_tx, 10 * num_tx, kernel_size=kernel_size, padding=\n kernel_size // 2)\n', (13533, 13614), True, 'import torch.nn as nn\n'), ((13677, 13690), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (13684, 13690), True, 'import torch.nn as nn\n'), ((14011, 14046), 'torch.nn.Linear', 'nn.Linear', (['(10 * num_tx)', '(10 * num_tx)'], {}), '(10 * num_tx, 10 * num_tx)\n', (14020, 14046), True, 'import torch.nn as nn\n'), ((14080, 14115), 'torch.nn.Linear', 'nn.Linear', (['(10 * num_tx)', '(10 * num_tx)'], {}), '(10 * num_tx, 10 * num_tx)\n', (14089, 14115), True, 'import torch.nn as nn\n'), ((14152, 14187), 'torch.nn.Linear', 'nn.Linear', (['(10 * num_tx)', '(32 * num_tx)'], {}), '(10 * num_tx, 32 * num_tx)\n', (14161, 14187), True, 'import torch.nn as nn\n'), ((14225, 14238), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (14232, 14238), True, 'import torch.nn as nn\n'), ((14276, 14314), 'torch.nn.Linear', 'nn.Linear', (['(32 * num_tx)', '(num_tx * 3 * 2)'], {}), '(32 * num_tx, num_tx * 3 * 2)\n', (14285, 14314), True, 'import torch.nn as nn\n'), ((17902, 17918), 'torch.cos', 'torch.cos', (['angle'], {}), '(angle)\n', (17911, 17918), False, 'import torch\n'), ((17943, 17959), 'torch.sin', 'torch.sin', (['angle'], {}), '(angle)\n', (17952, 17959), False, 'import torch\n'), ((18028, 18106), 'torch.cat', 'torch.cat', (['[cos_angle, sin_angle, pos_x, -sin_angle, cos_angle, pos_y]'], {'dim': '(-1)'}), '([cos_angle, sin_angle, pos_x, -sin_angle, cos_angle, pos_y], dim=-1)\n', (18037, 18106), False, 'import torch\n'), ((18836, 18889), 'functools.partial', 'partial', (['self.transform_pix'], {'mode': 'self.affine_tx_mode'}), '(self.transform_pix, mode=self.affine_tx_mode)\n', (18843, 18889), False, 'from functools import partial\n'), ((19210, 19257), 'torch.cat', 'torch.cat', (['feat_to_tx_parts_txed'], {'dim': 'split_dim'}), '(feat_to_tx_parts_txed, dim=split_dim)\n', (19219, 19257), False, 'import torch\n'), ((21217, 21280), 'torch.nn.Conv2d', 'nn.Conv2d', (['num_tx', 'num_tx'], {'kernel_size': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(num_tx, num_tx, kernel_size=1, padding=0, bias=False)\n', (21226, 21280), True, 'import torch.nn as nn\n'), ((21294, 21311), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (21304, 21311), True, 'import torch.nn as nn\n'), ((22597, 22618), 'torch.stack', 'torch.stack', (['weighted'], {}), '(weighted)\n', (22608, 22618), False, 'import torch\n'), ((25161, 25251), 'torch.nn.Conv2d', 'nn.Conv2d', (['(2 * phyre.NUM_COLORS - 1)', '(8)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(2 * phyre.NUM_COLORS - 1, 8, kernel_size=1, stride=1, padding=0,\n bias=False)\n', (25170, 25251), True, 'import torch.nn as nn\n'), ((25359, 25380), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (25366, 25380), True, 'import torch.nn as nn\n'), ((25394, 25457), 'torch.nn.Conv2d', 'nn.Conv2d', (['(8)', '(1)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(8, 1, kernel_size=1, stride=1, padding=0, bias=False)\n', (25403, 25457), True, 'import torch.nn as nn\n'), ((26189, 26246), 'torch.cat', 'torch.cat', (['[pixels[:, -1, ...], future_pixels_obj]'], {'dim': '(1)'}), '([pixels[:, -1, ...], future_pixels_obj], dim=1)\n', (26198, 26246), False, 'import torch\n'), ((27312, 27340), 'torch.LongTensor', 'torch.LongTensor', (['movable_ch'], {}), '(movable_ch)\n', (27328, 27340), False, 'import torch\n'), ((27391, 27424), 'torch.arange', 'torch.arange', (['(1)', 'phyre.NUM_COLORS'], {}), '(1, phyre.NUM_COLORS)\n', (27403, 27424), False, 'import torch\n'), ((28784, 28845), 'torch.sum', 'torch.sum', (['future_pixels[:, :, 1:, ...]'], {'dim': '(2)', 'keepdims': '(True)'}), '(future_pixels[:, :, 1:, ...], dim=2, keepdims=True)\n', (28793, 28845), False, 'import torch\n'), ((29778, 29817), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', (['out_dim'], {'affine': '(True)'}), '(out_dim, affine=True)\n', (29795, 29817), True, 'import torch.nn as nn\n'), ((29844, 29859), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (29857, 29859), True, 'import torch.nn as nn\n'), ((34045, 34088), 'torch.stack', 'torch.stack', (['[el[key] for el in all_losses]'], {}), '([el[key] for el in all_losses])\n', (34056, 34088), False, 'import torch\n'), ((34258, 34295), 'torch.mean', 'torch.mean', (['(stkd * (stkd >= 0))'], {'dim': '(0)'}), '(stkd * (stkd >= 0), dim=0)\n', (34268, 34295), False, 'import torch\n'), ((34765, 34833), 'logging.warning', 'logging.warning', (['"""Ignoring the out_dim (%d) for ObjEncoder"""', 'out_dim'], {}), "('Ignoring the out_dim (%d) for ObjEncoder', out_dim)\n", (34780, 34833), False, 'import logging\n'), ((35449, 35480), 'torch.nn.Sequential', 'nn.Sequential', (['*layers_lst_flat'], {}), '(*layers_lst_flat)\n', (35462, 35480), True, 'import torch.nn as nn\n'), ((35727, 35752), 'torch.flatten', 'torch.flatten', (['feat', '(0)', '(2)'], {}), '(feat, 0, 2)\n', (35740, 35752), False, 'import torch\n'), ((35830, 35902), 'torch.reshape', 'torch.reshape', (['obj_embed_flat', '(feat.shape[:3] + obj_embed_flat.shape[1:])'], {}), '(obj_embed_flat, feat.shape[:3] + obj_embed_flat.shape[1:])\n', (35843, 35902), False, 'import torch\n'), ((36017, 36067), 'torch.mean', 'torch.mean', (['obj_embed'], {'dim': '[-1, -2]', 'keepdims': '(True)'}), '(obj_embed, dim=[-1, -2], keepdims=True)\n', (36027, 36067), False, 'import torch\n'), ((36348, 36415), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim', 'dim'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(dim, dim, kernel_size=1, stride=1, padding=0, bias=False)\n', (36357, 36415), True, 'import torch.nn as nn\n'), ((36439, 36460), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (36446, 36460), True, 'import torch.nn as nn\n'), ((36474, 36542), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim', 'nobj'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(dim, nobj, kernel_size=1, stride=1, padding=0, bias=False)\n', (36483, 36542), True, 'import torch.nn as nn\n'), ((40250, 40297), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_dim', '(3)'], {'kernel_size': '(1)', 'bias': '(False)'}), '(in_dim, 3, kernel_size=1, bias=False)\n', (40259, 40297), True, 'import torch.nn as nn\n'), ((40311, 40328), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(3)'], {}), '(3)\n', (40325, 40328), True, 'import torch.nn as nn\n'), ((40342, 40363), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (40349, 40363), True, 'import torch.nn as nn\n'), ((40605, 40626), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (40612, 40626), True, 'import torch.nn as nn\n'), ((40873, 40894), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (40880, 40894), True, 'import torch.nn as nn\n'), ((41141, 41162), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (41148, 41162), True, 'import torch.nn as nn\n'), ((41409, 41430), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (41416, 41430), True, 'import torch.nn as nn\n'), ((41679, 41700), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (41686, 41700), True, 'import torch.nn as nn\n'), ((45459, 45488), 'torch.flatten', 'torch.flatten', (['flat_obs', '(1)', '(2)'], {}), '(flat_obs, 1, 2)\n', (45472, 45488), False, 'import torch\n'), ((45874, 45923), 'torch.mean', 'torch.mean', (['features'], {'dim': '[-2, -1]', 'keepdims': '(True)'}), '(features, dim=[-2, -1], keepdims=True)\n', (45884, 45923), False, 'import torch\n'), ((46050, 46096), 'torch.nn.functional.normalize', 'nn.functional.normalize', (['features'], {'p': '(2)', 'dim': '(-1)'}), '(features, p=2, dim=-1)\n', (46073, 46096), True, 'import torch.nn as nn\n'), ((49768, 49820), 'torch.nn.functional.linear', 'nn.functional.linear', (['mean_feat', 'self.init_linear_wt'], {}), '(mean_feat, self.init_linear_wt)\n', (49788, 49820), True, 'import torch.nn as nn\n'), ((50259, 50451), 'omegaconf.OmegaConf.create', 'OmegaConf.create', (["{'class': 'nets.ResNetBaseEncoder', 'params': {'base_model': {'class':\n 'torchvision.models.resnet18', 'params': {'pretrained': False}},\n 'nlayers': num_conv_blocks}}"], {}), "({'class': 'nets.ResNetBaseEncoder', 'params': {\n 'base_model': {'class': 'torchvision.models.resnet18', 'params': {\n 'pretrained': False}}, 'nlayers': num_conv_blocks}})\n", (50275, 50451), False, 'from omegaconf import OmegaConf\n'), ((50685, 50770), 'omegaconf.OmegaConf.create', 'OmegaConf.create', (["{'class': 'nets.TrivialObjectifier', 'params': {'nobj': nobj}}"], {}), "({'class': 'nets.TrivialObjectifier', 'params': {'nobj': nobj}}\n )\n", (50701, 50770), False, 'from omegaconf import OmegaConf\n'), ((50887, 51005), 'omegaconf.OmegaConf.create', 'OmegaConf.create', (["{'class': 'nets.BasicObjEncoder', 'params': {'out_dim': 16, 'nlayers': 0,\n 'spatial_mean': True}}"], {}), "({'class': 'nets.BasicObjEncoder', 'params': {'out_dim': 16,\n 'nlayers': 0, 'spatial_mean': True}})\n", (50903, 51005), False, 'from omegaconf import OmegaConf\n'), ((53249, 53281), 'torch.mean', 'torch.mean', (['preds'], {'axis': '[-1, -2]'}), '(preds, axis=[-1, -2])\n', (53259, 53281), False, 'import torch\n'), ((55396, 55431), 'torch.mean', 'torch.mean', (['enc_preds', '[-1, -2, -3]'], {}), '(enc_preds, [-1, -2, -3])\n', (55406, 55431), False, 'import torch\n'), ((58887, 58902), 'torch.nn.Linear', 'nn.Linear', (['(1)', '(4)'], {}), '(1, 4)\n', (58896, 58902), True, 'import torch.nn as nn\n'), ((58904, 58925), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (58911, 58925), True, 'import torch.nn as nn\n'), ((58966, 58981), 'torch.nn.Linear', 'nn.Linear', (['(4)', '(8)'], {}), '(4, 8)\n', (58975, 58981), True, 'import torch.nn as nn\n'), ((59328, 59357), 'torch.nn.Linear', 'nn.Linear', (['in_dim_loc', 'in_dim'], {}), '(in_dim_loc, in_dim)\n', (59337, 59357), True, 'import torch.nn as nn\n'), ((61782, 61807), 'torch.flatten', 'torch.flatten', (['feat', '(0)', '(2)'], {}), '(feat, 0, 2)\n', (61795, 61807), False, 'import torch\n'), ((72724, 72745), 'torch.cat', 'torch.cat', (['feat_preds'], {}), '(feat_preds)\n', (72733, 72745), False, 'import torch\n'), ((72768, 72786), 'torch.cat', 'torch.cat', (['feat_gt'], {}), '(feat_gt)\n', (72777, 72786), False, 'import torch\n'), ((1317, 1352), 'torch.nn.Linear', 'nn.Linear', (['action_size', 'hidden_size'], {}), '(action_size, hidden_size)\n', (1326, 1352), True, 'import torch.nn as nn\n'), ((1425, 1460), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (1434, 1460), True, 'import torch.nn as nn\n'), ((17620, 17643), 'torch.zeros_like', 'torch.zeros_like', (['pos_x'], {}), '(pos_x)\n', (17636, 17643), False, 'import torch\n'), ((17668, 17691), 'torch.zeros_like', 'torch.zeros_like', (['pos_y'], {}), '(pos_y)\n', (17684, 17691), False, 'import torch\n'), ((34932, 35054), 'torch.nn.Conv2d', 'nn.Conv2d', (['(in_dim if i == 0 else out_dim)', 'out_dim'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding', 'bias': '(False)'}), '(in_dim if i == 0 else out_dim, out_dim, kernel_size=kernel_size,\n stride=stride, padding=padding, bias=False)\n', (34941, 35054), True, 'import torch.nn as nn\n'), ((35174, 35195), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (35181, 35195), True, 'import torch.nn as nn\n'), ((48084, 48109), 'torch.nn.Linear', 'nn.Linear', (['in_dim', 'in_dim'], {}), '(in_dim, in_dim)\n', (48093, 48109), True, 'import torch.nn as nn\n'), ((48130, 48151), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (48137, 48151), True, 'import torch.nn as nn\n'), ((49425, 49559), 'logging.warning', 'logging.warning', (['"""Creating a linear layer to map the input dims (%d) to MLP input dim (%d)"""', 'mean_feat.shape[-1]', 'self.in_dim'], {}), "(\n 'Creating a linear layer to map the input dims (%d) to MLP input dim (%d)',\n mean_feat.shape[-1], self.in_dim)\n", (49440, 49559), False, 'import logging\n'), ((49845, 49866), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (49852, 49866), True, 'import torch.nn as nn\n'), ((54749, 54810), 'torch.nn.Conv3d', 'nn.Conv3d', (['in_dim', 'in_dim', '(3)'], {'stride': '(2)', 'padding': '(1)', 'bias': '(False)'}), '(in_dim, in_dim, 3, stride=2, padding=1, bias=False)\n', (54758, 54810), True, 'import torch.nn as nn\n'), ((54824, 54845), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (54831, 54845), True, 'import torch.nn as nn\n'), ((71052, 71070), 'torch.Tensor', 'torch.Tensor', (['[-1]'], {}), '([-1])\n', (71064, 71070), False, 'import torch\n'), ((14694, 14747), 'numpy.array', 'np.array', (['([1, 0, 0, 0, 1, 0] * num_tx)'], {'dtype': 'np.float'}), '([1, 0, 0, 0, 1, 0] * num_tx, dtype=np.float)\n', (14702, 14747), True, 'import numpy as np\n'), ((17759, 17782), 'torch.zeros_like', 'torch.zeros_like', (['angle'], {}), '(angle)\n', (17775, 17782), False, 'import torch\n'), ((31588, 31603), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (31601, 31603), True, 'import torch.nn as nn\n'), ((53589, 53635), 'torch.max', 'torch.max', (['feat_enc_time'], {'dim': '(1)', 'keepdims': '(True)'}), '(feat_enc_time, dim=1, keepdims=True)\n', (53598, 53635), False, 'import torch\n'), ((64881, 64919), 'torch.unsqueeze', 'torch.unsqueeze', (['all_preds[-1]'], {'axis': '(1)'}), '(all_preds[-1], axis=1)\n', (64896, 64919), False, 'import torch\n'), ((65027, 65064), 'torch.unsqueeze', 'torch.unsqueeze', (['all_pixs[-1]'], {'axis': '(1)'}), '(all_pixs[-1], axis=1)\n', (65042, 65064), False, 'import torch\n'), ((4517, 4542), 'torch.ByteTensor', 'torch.ByteTensor', (['targets'], {}), '(targets)\n', (4533, 4542), False, 'import torch\n'), ((48308, 48328), 'torch.nn.Linear', 'nn.Linear', (['in_dim', '(1)'], {}), '(in_dim, 1)\n', (48317, 48328), True, 'import torch.nn as nn\n'), ((73135, 73155), 'torch.cat', 'torch.cat', (['pix_preds'], {}), '(pix_preds)\n', (73144, 73155), False, 'import torch\n'), ((73199, 73216), 'torch.cat', 'torch.cat', (['pix_gt'], {}), '(pix_gt)\n', (73208, 73216), False, 'import torch\n'), ((59778, 59825), 'torch.arange', 'torch.arange', (['feat.shape[1]'], {'device': 'feat.device'}), '(feat.shape[1], device=feat.device)\n', (59790, 59825), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
import multiscale.bulk_img_processing as blk
from pathlib import Path
import unittest
class get_core_file_name_TestSuite(unittest.TestCase):
"""Basic test cases."""
def test_multiple_underscores(self):
testStr = 'This_name_has_multiple_underscores.extension'
self.assertEqual(blk.get_core_file_name(testStr),'This')
def test_no_underscores(self):
testStr = 'TestStr.extension'
self.assertEqual(blk.get_core_file_name(testStr),'TestStr')
def test_single_underscore(self):
testStr = 'Test_str.extension'
self.assertEqual(blk.get_core_file_name(testStr),'Test')
#class dataframe_generator_excel_TestSuite(unittest.TestCast):
# return
#
class file_name_parts_TestSuite(unittest.TestCase):
def test_multiple_underscores(self):
testStr = 'This_name_has_multiple_underscores.extension'
parts = ['This', 'name', 'has', 'multiple', 'underscores']
self.assertEqual(blk.file_name_parts(testStr),parts)
def test_no_underscores(self):
testStr = 'TestStr.extension'
self.assertEqual(blk.file_name_parts(testStr),['TestStr'])
def test_single_underscore(self):
testStr = 'Test_str.extension'
parts = ['Test', 'str']
self.assertEqual(blk.file_name_parts(testStr),parts)
class create_new_image_path_TestSuite(unittest.TestCase):
def test_suffix_proivded(self):
path = Path('C:/Test/Folder/Test.tif')
output_dir = Path('C:/Output')
suffix = 'Suffix'
expected = Path('C:/Output/Test_Suffix.tif')
new_path = blk.create_new_image_path(path, output_dir,
output_suffix = suffix)
self.assertEqual(new_path, expected)
def test_empty_suffix(self):
path = Path('C:/Test/Folder/Test.tif')
output_dir = Path('C:/Output')
expected = Path('C:/Output/Test.tif')
new_path = blk.create_new_image_path(path, output_dir)
#Modify to either Accept None Suffix, or to throw error for no bad suffix
self.assertEqual(new_path, expected)
def test_new_extension(self):
path = Path('C:/Test/Folder/Test.tif')
output_dir = Path('C:/Output')
extension = '.test'
expected = Path('C:/Output/Test.test')
new_path = blk.create_new_image_path(path, output_dir,
extension = extension)
self.assertEqual(new_path, expected)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
[
"unittest.main",
"multiscale.bulk_img_processing.create_new_image_path",
"pathlib.Path",
"multiscale.bulk_img_processing.get_core_file_name",
"multiscale.bulk_img_processing.file_name_parts"
] |
[((2633, 2659), 'unittest.main', 'unittest.main', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (2646, 2659), False, 'import unittest\n'), ((1484, 1515), 'pathlib.Path', 'Path', (['"""C:/Test/Folder/Test.tif"""'], {}), "('C:/Test/Folder/Test.tif')\n", (1488, 1515), False, 'from pathlib import Path\n'), ((1537, 1554), 'pathlib.Path', 'Path', (['"""C:/Output"""'], {}), "('C:/Output')\n", (1541, 1554), False, 'from pathlib import Path\n'), ((1609, 1642), 'pathlib.Path', 'Path', (['"""C:/Output/Test_Suffix.tif"""'], {}), "('C:/Output/Test_Suffix.tif')\n", (1613, 1642), False, 'from pathlib import Path\n'), ((1671, 1736), 'multiscale.bulk_img_processing.create_new_image_path', 'blk.create_new_image_path', (['path', 'output_dir'], {'output_suffix': 'suffix'}), '(path, output_dir, output_suffix=suffix)\n', (1696, 1736), True, 'import multiscale.bulk_img_processing as blk\n'), ((1882, 1913), 'pathlib.Path', 'Path', (['"""C:/Test/Folder/Test.tif"""'], {}), "('C:/Test/Folder/Test.tif')\n", (1886, 1913), False, 'from pathlib import Path\n'), ((1935, 1952), 'pathlib.Path', 'Path', (['"""C:/Output"""'], {}), "('C:/Output')\n", (1939, 1952), False, 'from pathlib import Path\n'), ((1972, 1998), 'pathlib.Path', 'Path', (['"""C:/Output/Test.tif"""'], {}), "('C:/Output/Test.tif')\n", (1976, 1998), False, 'from pathlib import Path\n'), ((2027, 2070), 'multiscale.bulk_img_processing.create_new_image_path', 'blk.create_new_image_path', (['path', 'output_dir'], {}), '(path, output_dir)\n', (2052, 2070), True, 'import multiscale.bulk_img_processing as blk\n'), ((2265, 2296), 'pathlib.Path', 'Path', (['"""C:/Test/Folder/Test.tif"""'], {}), "('C:/Test/Folder/Test.tif')\n", (2269, 2296), False, 'from pathlib import Path\n'), ((2318, 2335), 'pathlib.Path', 'Path', (['"""C:/Output"""'], {}), "('C:/Output')\n", (2322, 2335), False, 'from pathlib import Path\n'), ((2383, 2410), 'pathlib.Path', 'Path', (['"""C:/Output/Test.test"""'], {}), "('C:/Output/Test.test')\n", (2387, 2410), False, 'from pathlib import Path\n'), ((2439, 2503), 'multiscale.bulk_img_processing.create_new_image_path', 'blk.create_new_image_path', (['path', 'output_dir'], {'extension': 'extension'}), '(path, output_dir, extension=extension)\n', (2464, 2503), True, 'import multiscale.bulk_img_processing as blk\n'), ((328, 359), 'multiscale.bulk_img_processing.get_core_file_name', 'blk.get_core_file_name', (['testStr'], {}), '(testStr)\n', (350, 359), True, 'import multiscale.bulk_img_processing as blk\n'), ((467, 498), 'multiscale.bulk_img_processing.get_core_file_name', 'blk.get_core_file_name', (['testStr'], {}), '(testStr)\n', (489, 498), True, 'import multiscale.bulk_img_processing as blk\n'), ((621, 652), 'multiscale.bulk_img_processing.get_core_file_name', 'blk.get_core_file_name', (['testStr'], {}), '(testStr)\n', (643, 652), True, 'import multiscale.bulk_img_processing as blk\n'), ((1004, 1032), 'multiscale.bulk_img_processing.file_name_parts', 'blk.file_name_parts', (['testStr'], {}), '(testStr)\n', (1023, 1032), True, 'import multiscale.bulk_img_processing as blk\n'), ((1139, 1167), 'multiscale.bulk_img_processing.file_name_parts', 'blk.file_name_parts', (['testStr'], {}), '(testStr)\n', (1158, 1167), True, 'import multiscale.bulk_img_processing as blk\n'), ((1324, 1352), 'multiscale.bulk_img_processing.file_name_parts', 'blk.file_name_parts', (['testStr'], {}), '(testStr)\n', (1343, 1352), True, 'import multiscale.bulk_img_processing as blk\n')]
|
#!/usr/bin/env python3
import rospy
import os
from sensor_msgs.msg import CompressedImage
import numpy as np
import cv2
def streaming(msg, rate, stream, image_pub):
while not rospy.is_shutdown():
_, frame = stream.read()
msg.header.stamp = rospy.Time.now()
msg.format = 'jpeg'
msg.data = np.array(cv2.imencode('.jpeg', frame)[1]).tostring()
image_pub.publish(msg)
rate.sleep()
if __name__ == '__main__':
src = 0 # 'http://[Pi IP]:8081/'
stream = cv2.VideoCapture(src)
hn = os.environ.get('HOSTNAME')
hn = 'pi' if hn is None else hn
image_pub = rospy.Publisher(f'/nautilus/{hn}/usbcam', CompressedImage, queue_size=1)
rospy.init_node('compress_stream')
rate = rospy.Rate(24)
msg = CompressedImage()
streaming(msg, rate, stream, image_pub)
|
[
"rospy.Time.now",
"rospy.Publisher",
"rospy.Rate",
"cv2.VideoCapture",
"os.environ.get",
"rospy.is_shutdown",
"rospy.init_node",
"sensor_msgs.msg.CompressedImage",
"cv2.imencode"
] |
[((526, 547), 'cv2.VideoCapture', 'cv2.VideoCapture', (['src'], {}), '(src)\n', (542, 547), False, 'import cv2\n'), ((560, 586), 'os.environ.get', 'os.environ.get', (['"""HOSTNAME"""'], {}), "('HOSTNAME')\n", (574, 586), False, 'import os\n'), ((643, 715), 'rospy.Publisher', 'rospy.Publisher', (['f"""/nautilus/{hn}/usbcam"""', 'CompressedImage'], {'queue_size': '(1)'}), "(f'/nautilus/{hn}/usbcam', CompressedImage, queue_size=1)\n", (658, 715), False, 'import rospy\n'), ((721, 755), 'rospy.init_node', 'rospy.init_node', (['"""compress_stream"""'], {}), "('compress_stream')\n", (736, 755), False, 'import rospy\n'), ((768, 782), 'rospy.Rate', 'rospy.Rate', (['(24)'], {}), '(24)\n', (778, 782), False, 'import rospy\n'), ((796, 813), 'sensor_msgs.msg.CompressedImage', 'CompressedImage', ([], {}), '()\n', (811, 813), False, 'from sensor_msgs.msg import CompressedImage\n'), ((188, 207), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (205, 207), False, 'import rospy\n'), ((271, 287), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (285, 287), False, 'import rospy\n'), ((346, 374), 'cv2.imencode', 'cv2.imencode', (['""".jpeg"""', 'frame'], {}), "('.jpeg', frame)\n", (358, 374), False, 'import cv2\n')]
|
# -*- coding: utf-8 -*-
'''
PROJECT_NAME:login
FILE:serializers
USERNAME: 李宏伟
DATE:2020/1/15
TIME:上午10:13
PRODUCT_NAME:PyCharm
'''
import re
from rest_framework import serializers
from .models import User
class CreateUserSerializer(serializers.ModelSerializer):
"""
创建用户序列化器
"""
password2 = serializers.CharField(label="确认密码", write_only=True)
allow = serializers.CharField(label="同意协议", write_only=True)
class Meta:
model = User
fields = ('id', 'username', 'password', 'password2', 'mobile', 'allow',)
extra_kwargs = {
"username": {
"min_length": 5,
"max_length": 20,
"error_messages": {
"min_length": "仅允许5-20个字符的用户名",
"max_length": "仅允许5-20个字符的用户名",
}
},
"password": {
"write_only": True,
"min_length": 8,
"max_length": 20,
"error_messages": {
"min_length": "仅允许8-20个字符的密码",
"max_length": "仅允许8-20个字符的密码",
}
}
}
def validate_mobile(self, value):
"""验证手机号码"""
if not re.match(r"1[1-9]\d{9}", value):
raise serializers.ValidationError("手机号码格式错误")
return value
def validate_allow(self, value):
if value != 'true':
raise serializers.ValidationError('请同意用户协议')
return value
def validate(self, attrs):
# 判断两次密码
if attrs['password'] != attrs['password2']:
raise serializers.ValidationError('两次密码不一致')
return attrs
def create(self, validate_data):
"""创建用户"""
# 移除数据库模型中不存在的属性
del validate_data['password2']
del validate_data['allow']
user = super(CreateUserSerializer, self).create(validate_data)
# 调用Django的认证系统加密密码
user.set_password(validate_data["password"])
user.save()
# 生成token
from rest_framework_jwt.settings import api_settings
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
payload = jwt_payload_handler(user)
token = jwt_encode_handler(payload)
user.token = token
return user
|
[
"rest_framework.serializers.CharField",
"re.match",
"rest_framework.serializers.ValidationError"
] |
[((304, 356), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'label': '"""确认密码"""', 'write_only': '(True)'}), "(label='确认密码', write_only=True)\n", (325, 356), False, 'from rest_framework import serializers\n'), ((366, 418), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'label': '"""同意协议"""', 'write_only': '(True)'}), "(label='同意协议', write_only=True)\n", (387, 418), False, 'from rest_framework import serializers\n'), ((969, 1000), 're.match', 're.match', (['"""1[1-9]\\\\d{9}"""', 'value'], {}), "('1[1-9]\\\\d{9}', value)\n", (977, 1000), False, 'import re\n'), ((1011, 1050), 'rest_framework.serializers.ValidationError', 'serializers.ValidationError', (['"""手机号码格式错误"""'], {}), "('手机号码格式错误')\n", (1038, 1050), False, 'from rest_framework import serializers\n'), ((1133, 1171), 'rest_framework.serializers.ValidationError', 'serializers.ValidationError', (['"""请同意用户协议"""'], {}), "('请同意用户协议')\n", (1160, 1171), False, 'from rest_framework import serializers\n'), ((1283, 1321), 'rest_framework.serializers.ValidationError', 'serializers.ValidationError', (['"""两次密码不一致"""'], {}), "('两次密码不一致')\n", (1310, 1321), False, 'from rest_framework import serializers\n')]
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import numpy as np
import torch
import mobile_cv.arch.fbnet_v2.basic_blocks as bb
import mobile_cv.arch.fbnet_v2.fbnet_builder as fbnet_builder
import mobile_cv.arch.utils.fuse_utils as fuse_utils
def run_and_compare(model_before, model_after, input_size):
inputs = torch.zeros(input_size, requires_grad=False)
output_before = model_before(inputs)
output_after = model_after(inputs)
np.testing.assert_allclose(
output_before.detach(), output_after.detach(), rtol=0, atol=1e-4
)
def _build_model(arch_def, dim_in):
arch_def = fbnet_builder.unify_arch_def(arch_def, ["blocks"])
torch.manual_seed(0)
builder = fbnet_builder.FBNetBuilder(1.0)
model = builder.build_blocks(arch_def["blocks"], dim_in=dim_in)
model.eval()
return model
def _find_modules(model, module_to_check):
for x in model.modules():
if isinstance(x, module_to_check):
return True
return False
class TestUtilsFuseUtils(unittest.TestCase):
def test_fuse_convbnrelu(self):
cbr = bb.ConvBNRelu(
3, 6, kernel_size=3, padding=1, bn_args="bn", relu_args="relu"
).eval()
fused = fuse_utils.fuse_convbnrelu(cbr, inplace=False)
self.assertTrue(_find_modules(cbr, torch.nn.BatchNorm2d))
self.assertFalse(_find_modules(fused, torch.nn.BatchNorm2d))
input_size = [2, 3, 7, 7]
run_and_compare(cbr, fused, input_size)
def test_fuse_convbnrelu_inplace(self):
cbr = bb.ConvBNRelu(
3, 6, kernel_size=3, padding=1, bn_args="bn", relu_args="relu"
).eval()
fused = fuse_utils.fuse_convbnrelu(cbr, inplace=True)
self.assertFalse(_find_modules(cbr, torch.nn.BatchNorm2d))
self.assertFalse(_find_modules(fused, torch.nn.BatchNorm2d))
input_size = [2, 3, 7, 7]
run_and_compare(cbr, fused, input_size)
def test_fuse_model(self):
e6 = {"expansion": 6}
dw_skip_bnrelu = {"dw_skip_bnrelu": True}
bn_args = {"bn_args": {"name": "bn", "momentum": 0.003}}
arch_def = {
"blocks": [
# [c, s, n, ...]
# stage 0
[("conv_k3", 4, 2, 1, bn_args)],
# stage 1
[
("ir_k3", 8, 2, 2, e6, dw_skip_bnrelu, bn_args),
("ir_k5_sehsig", 8, 1, 1, e6, bn_args),
],
]
}
model = _build_model(arch_def, dim_in=3)
fused_model = fuse_utils.fuse_model(model, inplace=False)
print(model)
print(fused_model)
self.assertTrue(_find_modules(model, torch.nn.BatchNorm2d))
self.assertFalse(_find_modules(fused_model, torch.nn.BatchNorm2d))
input_size = [2, 3, 8, 8]
run_and_compare(model, fused_model, input_size)
def test_fuse_model_inplace(self):
e6 = {"expansion": 6}
dw_skip_bnrelu = {"dw_skip_bnrelu": True}
bn_args = {"bn_args": {"name": "bn", "momentum": 0.003}}
arch_def = {
"blocks": [
# [c, s, n, ...]
# stage 0
[("conv_k3", 4, 2, 1, bn_args)],
# stage 1
[
("ir_k3", 8, 2, 2, e6, dw_skip_bnrelu, bn_args),
("ir_k5_sehsig", 8, 1, 1, e6, bn_args),
],
]
}
model = _build_model(arch_def, dim_in=3)
fused_model = fuse_utils.fuse_model(model, inplace=True)
print(model)
print(fused_model)
self.assertFalse(_find_modules(model, torch.nn.BatchNorm2d))
self.assertFalse(_find_modules(fused_model, torch.nn.BatchNorm2d))
input_size = [2, 3, 8, 8]
run_and_compare(model, fused_model, input_size)
def test_fuse_model_swish(self):
e6 = {"expansion": 6}
dw_skip_bnrelu = {"dw_skip_bnrelu": True}
bn_args = {"bn_args": {"name": "bn", "momentum": 0.003}}
arch_def = {
"blocks": [
# [c, s, n, ...]
# stage 0
[("conv_k3", 4, 2, 1, bn_args, {"relu_args": "swish"})],
# stage 1
[
("ir_k3", 8, 2, 2, e6, dw_skip_bnrelu, bn_args),
("ir_k5_sehsig", 8, 1, 1, e6, bn_args),
],
]
}
model = _build_model(arch_def, dim_in=3)
fused_model = fuse_utils.fuse_model(model, inplace=False)
print(model)
print(fused_model)
self.assertTrue(_find_modules(model, torch.nn.BatchNorm2d))
self.assertFalse(_find_modules(fused_model, torch.nn.BatchNorm2d))
self.assertTrue(_find_modules(fused_model, bb.Swish))
input_size = [2, 3, 8, 8]
run_and_compare(model, fused_model, input_size)
|
[
"mobile_cv.arch.fbnet_v2.fbnet_builder.unify_arch_def",
"mobile_cv.arch.fbnet_v2.fbnet_builder.FBNetBuilder",
"torch.manual_seed",
"mobile_cv.arch.utils.fuse_utils.fuse_model",
"mobile_cv.arch.utils.fuse_utils.fuse_convbnrelu",
"mobile_cv.arch.fbnet_v2.basic_blocks.ConvBNRelu",
"torch.zeros"
] |
[((385, 429), 'torch.zeros', 'torch.zeros', (['input_size'], {'requires_grad': '(False)'}), '(input_size, requires_grad=False)\n', (396, 429), False, 'import torch\n'), ((675, 725), 'mobile_cv.arch.fbnet_v2.fbnet_builder.unify_arch_def', 'fbnet_builder.unify_arch_def', (['arch_def', "['blocks']"], {}), "(arch_def, ['blocks'])\n", (703, 725), True, 'import mobile_cv.arch.fbnet_v2.fbnet_builder as fbnet_builder\n'), ((730, 750), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (747, 750), False, 'import torch\n'), ((765, 796), 'mobile_cv.arch.fbnet_v2.fbnet_builder.FBNetBuilder', 'fbnet_builder.FBNetBuilder', (['(1.0)'], {}), '(1.0)\n', (791, 796), True, 'import mobile_cv.arch.fbnet_v2.fbnet_builder as fbnet_builder\n'), ((1278, 1324), 'mobile_cv.arch.utils.fuse_utils.fuse_convbnrelu', 'fuse_utils.fuse_convbnrelu', (['cbr'], {'inplace': '(False)'}), '(cbr, inplace=False)\n', (1304, 1324), True, 'import mobile_cv.arch.utils.fuse_utils as fuse_utils\n'), ((1726, 1771), 'mobile_cv.arch.utils.fuse_utils.fuse_convbnrelu', 'fuse_utils.fuse_convbnrelu', (['cbr'], {'inplace': '(True)'}), '(cbr, inplace=True)\n', (1752, 1771), True, 'import mobile_cv.arch.utils.fuse_utils as fuse_utils\n'), ((2610, 2653), 'mobile_cv.arch.utils.fuse_utils.fuse_model', 'fuse_utils.fuse_model', (['model'], {'inplace': '(False)'}), '(model, inplace=False)\n', (2631, 2653), True, 'import mobile_cv.arch.utils.fuse_utils as fuse_utils\n'), ((3563, 3605), 'mobile_cv.arch.utils.fuse_utils.fuse_model', 'fuse_utils.fuse_model', (['model'], {'inplace': '(True)'}), '(model, inplace=True)\n', (3584, 3605), True, 'import mobile_cv.arch.utils.fuse_utils as fuse_utils\n'), ((4538, 4581), 'mobile_cv.arch.utils.fuse_utils.fuse_model', 'fuse_utils.fuse_model', (['model'], {'inplace': '(False)'}), '(model, inplace=False)\n', (4559, 4581), True, 'import mobile_cv.arch.utils.fuse_utils as fuse_utils\n'), ((1155, 1232), 'mobile_cv.arch.fbnet_v2.basic_blocks.ConvBNRelu', 'bb.ConvBNRelu', (['(3)', '(6)'], {'kernel_size': '(3)', 'padding': '(1)', 'bn_args': '"""bn"""', 'relu_args': '"""relu"""'}), "(3, 6, kernel_size=3, padding=1, bn_args='bn', relu_args='relu')\n", (1168, 1232), True, 'import mobile_cv.arch.fbnet_v2.basic_blocks as bb\n'), ((1603, 1680), 'mobile_cv.arch.fbnet_v2.basic_blocks.ConvBNRelu', 'bb.ConvBNRelu', (['(3)', '(6)'], {'kernel_size': '(3)', 'padding': '(1)', 'bn_args': '"""bn"""', 'relu_args': '"""relu"""'}), "(3, 6, kernel_size=3, padding=1, bn_args='bn', relu_args='relu')\n", (1616, 1680), True, 'import mobile_cv.arch.fbnet_v2.basic_blocks as bb\n')]
|
# Copyright 2018 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import redirect_stdout
from io import StringIO
from os import path
from conftest import here
from nuclio import magic
def test_print_handler_code():
fname = path.join(here, "handler.ipynb")
io = StringIO()
with redirect_stdout(io):
magic.print_handler_code(fname)
assert 'def handler' in io.getvalue()
def test_export():
line = path.join(here, "handler.ipynb")
line = line.replace("\\", "/") # handle windows
config, code = magic.build(line, None, return_dir=True)
assert config.get('spec'), 'export failed, config={}'.format(config)
|
[
"nuclio.magic.print_handler_code",
"io.StringIO",
"nuclio.magic.build",
"contextlib.redirect_stdout",
"os.path.join"
] |
[((752, 784), 'os.path.join', 'path.join', (['here', '"""handler.ipynb"""'], {}), "(here, 'handler.ipynb')\n", (761, 784), False, 'from os import path\n'), ((794, 804), 'io.StringIO', 'StringIO', ([], {}), '()\n', (802, 804), False, 'from io import StringIO\n'), ((950, 982), 'os.path.join', 'path.join', (['here', '"""handler.ipynb"""'], {}), "(here, 'handler.ipynb')\n", (959, 982), False, 'from os import path\n'), ((1055, 1095), 'nuclio.magic.build', 'magic.build', (['line', 'None'], {'return_dir': '(True)'}), '(line, None, return_dir=True)\n', (1066, 1095), False, 'from nuclio import magic\n'), ((814, 833), 'contextlib.redirect_stdout', 'redirect_stdout', (['io'], {}), '(io)\n', (829, 833), False, 'from contextlib import redirect_stdout\n'), ((843, 874), 'nuclio.magic.print_handler_code', 'magic.print_handler_code', (['fname'], {}), '(fname)\n', (867, 874), False, 'from nuclio import magic\n')]
|
from setuptools import find_packages, setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='rac_es',
version='0.17.4',
description="Helpers for Rockefeller Archive Center's Elasticsearch implementation.",
long_description=long_description,
long_description_content_type="text/markdown",
url='http://github.com/RockefellerArchiveCenter/rac_es',
author='Rockefeller Archive Center',
author_email='<EMAIL>',
license='MIT',
packages=find_packages(),
install_requires=[
'elasticsearch',
'elasticsearch_dsl',
],
test_suite='nose.collector',
tests_require=['nose', 'coverage'],
zip_safe=False)
|
[
"setuptools.find_packages"
] |
[((504, 519), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (517, 519), False, 'from setuptools import find_packages, setup\n')]
|
# The MIT License (MIT)
# Copyright (c) 2022 by the xcube development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from typing import Optional, Tuple, Type, TypeVar, Union
from xcube.util.assertions import assert_true
T = TypeVar('T')
ItemType = Union[Type[T], Tuple[Type[T], ...]]
Pair = Tuple[T, T]
ScalarOrPair = Union[T, Pair]
def normalize_scalar_or_pair(
value: ScalarOrPair[T],
*,
item_type: Optional[ItemType[T]] = None,
name: Optional[str] = None
) -> Pair:
try:
assert_true(len(value) <= 2,
message=f"{name or 'Value'} must be a scalar or pair of "
f"{item_type or 'scalars'}, was '{value}'")
x, y = value
except TypeError:
x, y = value, value
if item_type is not None:
assert_true(isinstance(x, item_type) and isinstance(y, item_type),
message=f"{name or 'Value'} must be a scalar or pair of "
f"{item_type}, was '{value}'")
return x, y
|
[
"typing.TypeVar"
] |
[((1259, 1271), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (1266, 1271), False, 'from typing import Optional, Tuple, Type, TypeVar, Union\n')]
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
from __future__ import unicode_literals
import logging
import platform
from cronman.monitor import Cronitor, Sentry, Slack
from cronman.utils import bool_param, config, ensure_dir, format_exception
logger = logging.getLogger("cronman.command")
class BaseCronObject(object):
"""Common base class for CronRemoteManager, CronScheduler, CronSpawner,
CronWorker.
"""
def __init__(self, **kwargs):
self.data_dir = kwargs.get("data_dir", config("CRONMAN_DATA_DIR"))
self.debug = kwargs.get("debug", bool_param(config("CRONMAN_DEBUG")))
self.cronitor = Cronitor()
self.sentry = Sentry()
self.slack = Slack()
ensure_dir(self.data_dir)
self.logger = kwargs.get("logger", logger)
def warning(self, exception, silent=False):
"""Handles exception as warning"""
message = format_exception(exception)
if not silent:
self.logger.warning(message)
system_name = platform.node()
self.slack.post(
"[{host}] {message}".format(host=system_name, message=message)
)
return message + "\n" # to be printed on stdout
|
[
"platform.node",
"logging.getLogger",
"cronman.monitor.Slack",
"cronman.monitor.Sentry",
"cronman.utils.format_exception",
"cronman.monitor.Cronitor",
"cronman.utils.ensure_dir",
"cronman.utils.config"
] |
[((262, 298), 'logging.getLogger', 'logging.getLogger', (['"""cronman.command"""'], {}), "('cronman.command')\n", (279, 298), False, 'import logging\n'), ((643, 653), 'cronman.monitor.Cronitor', 'Cronitor', ([], {}), '()\n', (651, 653), False, 'from cronman.monitor import Cronitor, Sentry, Slack\n'), ((676, 684), 'cronman.monitor.Sentry', 'Sentry', ([], {}), '()\n', (682, 684), False, 'from cronman.monitor import Cronitor, Sentry, Slack\n'), ((706, 713), 'cronman.monitor.Slack', 'Slack', ([], {}), '()\n', (711, 713), False, 'from cronman.monitor import Cronitor, Sentry, Slack\n'), ((722, 747), 'cronman.utils.ensure_dir', 'ensure_dir', (['self.data_dir'], {}), '(self.data_dir)\n', (732, 747), False, 'from cronman.utils import bool_param, config, ensure_dir, format_exception\n'), ((909, 936), 'cronman.utils.format_exception', 'format_exception', (['exception'], {}), '(exception)\n', (925, 936), False, 'from cronman.utils import bool_param, config, ensure_dir, format_exception\n'), ((513, 539), 'cronman.utils.config', 'config', (['"""CRONMAN_DATA_DIR"""'], {}), "('CRONMAN_DATA_DIR')\n", (519, 539), False, 'from cronman.utils import bool_param, config, ensure_dir, format_exception\n'), ((1027, 1042), 'platform.node', 'platform.node', ([], {}), '()\n', (1040, 1042), False, 'import platform\n'), ((593, 616), 'cronman.utils.config', 'config', (['"""CRONMAN_DEBUG"""'], {}), "('CRONMAN_DEBUG')\n", (599, 616), False, 'from cronman.utils import bool_param, config, ensure_dir, format_exception\n')]
|
# Csnake project configuration
import csnCilab
from csnAll import three
four = csnCilab.CilabModuleProject("Four", "third party")
four.pathsManager.useFilePath = "%s/Four/UseFour.cmake" % four.GetBuildFolder()
four.pathsManager.configFilePath = "%s/Four/FourConfig.cmake" % four.GetBuildFolder()
four.AddProjects([three])
|
[
"csnCilab.CilabModuleProject"
] |
[((80, 130), 'csnCilab.CilabModuleProject', 'csnCilab.CilabModuleProject', (['"""Four"""', '"""third party"""'], {}), "('Four', 'third party')\n", (107, 130), False, 'import csnCilab\n')]
|
import numpy as np
import pandas
from sklearn.preprocessing import MinMaxScaler, StandardScaler, Binarizer
#handle the missing values
data = pandas.DataFrame([
[4., 45., 984.],
[np.NAN, np.NAN, 5.],
[94., 23., 55.],
])
#print original data
print(data)
#fill the missing values with the constant 0.1
print(data.fillna(0.1))
#fill the missing values with the mean
print(data.fillna(data.mean()))
#Data normalization
data1 = pandas.DataFrame([[ 58., 1., 43.],
[ 10., 200., 65.],
[ 20. , 75. , 7.]])
#scaling with min-max scaler
scaled_values = MinMaxScaler(feature_range=(0,1))
results = scaled_values.fit(data1).transform(data1)
print(results)
#scaling with the standard scaling
stand_scalar = StandardScaler().fit(data1)
results = stand_scalar.transform(data1)
print(results)
#normalization using binarization
results = Binarizer(50.0).fit(data1).transform(data1)
print(results)
|
[
"pandas.DataFrame",
"sklearn.preprocessing.Binarizer",
"sklearn.preprocessing.MinMaxScaler",
"sklearn.preprocessing.StandardScaler"
] |
[((145, 231), 'pandas.DataFrame', 'pandas.DataFrame', (['[[4.0, 45.0, 984.0], [np.NAN, np.NAN, 5.0], [94.0, 23.0, 55.0]]'], {}), '([[4.0, 45.0, 984.0], [np.NAN, np.NAN, 5.0], [94.0, 23.0, \n 55.0]])\n', (161, 231), False, 'import pandas\n'), ((466, 543), 'pandas.DataFrame', 'pandas.DataFrame', (['[[58.0, 1.0, 43.0], [10.0, 200.0, 65.0], [20.0, 75.0, 7.0]]'], {}), '([[58.0, 1.0, 43.0], [10.0, 200.0, 65.0], [20.0, 75.0, 7.0]])\n', (482, 543), False, 'import pandas\n'), ((601, 635), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)'}), '(feature_range=(0, 1))\n', (613, 635), False, 'from sklearn.preprocessing import MinMaxScaler, StandardScaler, Binarizer\n'), ((757, 773), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (771, 773), False, 'from sklearn.preprocessing import MinMaxScaler, StandardScaler, Binarizer\n'), ((887, 902), 'sklearn.preprocessing.Binarizer', 'Binarizer', (['(50.0)'], {}), '(50.0)\n', (896, 902), False, 'from sklearn.preprocessing import MinMaxScaler, StandardScaler, Binarizer\n')]
|
from snakemake.io import load_configfile
from pathlib import Path
if __name__=='__main__':
import argparse
parser = argparse.ArgumentParser(description='Create an empty output file for failed integration runs')
parser.add_argument('-c', '--config', help='Snakemake config file', required=True)
parser.add_argument('-t', '--task', required=True)
parser.add_argument('-m', '--method', required=True)
parser.add_argument("-v", '--hvgs', help='pre-processed by HVG filtering', action='store_true')
parser.add_argument('-s', '--scale', action='store_true', help='pre-processed by scaling')
args = parser.parse_args()
config = args.config
task = args.task
hvgs = args.hvgs
scale = args.scale
method = args.method
# Load config file
params = load_configfile(config)
# Check inputs
if method not in params['METHODS']:
raise ValueError(f'{method} is not a valid method.\n'
f'Please choose one of: {list(params["METHODS"].keys())}')
if task not in params['DATA_SCENARIOS']:
raise ValueError(f'{task} is not a valid integration task.\n'
f'Please choose one of: {list(params["DATA_SCENARIOS"].keys())}')
# Get path values
folder = params['ROOT']
t_folder = task
s_folder = 'scaled' if scale else 'unscaled'
h_folder = 'hvg' if hvgs else 'full_feature'
r_folder = 'R/' if 'R' in params['METHODS'][method] else ''
filename = method+'.h5ad'
folder_path = '/'.join([folder,task,'integration',s_folder,h_folder])+'/'+r_folder
full_path = folder_path+filename
if 'R' in params['METHODS'][method]:
filename_r = method+'.RDS'
full_path_r = folder_path+filename_r
Path(full_path_r).touch()
Path(full_path_r+".benchmark").touch()
#print(full_path)
Path(full_path).touch()
Path(full_path+".benchmark").touch()
|
[
"pathlib.Path",
"argparse.ArgumentParser",
"snakemake.io.load_configfile"
] |
[((127, 226), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create an empty output file for failed integration runs"""'}), "(description=\n 'Create an empty output file for failed integration runs')\n", (150, 226), False, 'import argparse\n'), ((801, 824), 'snakemake.io.load_configfile', 'load_configfile', (['config'], {}), '(config)\n', (816, 824), False, 'from snakemake.io import load_configfile\n'), ((1866, 1881), 'pathlib.Path', 'Path', (['full_path'], {}), '(full_path)\n', (1870, 1881), False, 'from pathlib import Path\n'), ((1894, 1924), 'pathlib.Path', 'Path', (["(full_path + '.benchmark')"], {}), "(full_path + '.benchmark')\n", (1898, 1924), False, 'from pathlib import Path\n'), ((1766, 1783), 'pathlib.Path', 'Path', (['full_path_r'], {}), '(full_path_r)\n', (1770, 1783), False, 'from pathlib import Path\n'), ((1800, 1832), 'pathlib.Path', 'Path', (["(full_path_r + '.benchmark')"], {}), "(full_path_r + '.benchmark')\n", (1804, 1832), False, 'from pathlib import Path\n')]
|
from uuid import UUID, uuid4
from datetime import datetime
from typing import Optional
from pydantic import BaseModel, Field
class BaseCollection(BaseModel):
id: Optional[UUID] = Field(alias='_id', default=uuid4())
created_at: Optional[datetime] = datetime.now()
collection: Optional[str]
def __init__(self, collection: str = None, **kwargs):
super().__init__(**kwargs)
self.id = uuid4()
self.created_at = datetime.now()
if collection:
self.collection = collection
class Likes(BaseCollection):
user_id: UUID
content_id: UUID
value: int
collection = "likes"
class Reviews(BaseCollection):
user_id: UUID
movie_id: UUID
text: str
collection = "reviews"
class Bookmarks(BaseCollection):
movie_id: UUID
user_id: UUID
collection = "bookmarks"
|
[
"uuid.uuid4",
"datetime.datetime.now"
] |
[((259, 273), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (271, 273), False, 'from datetime import datetime\n'), ((416, 423), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (421, 423), False, 'from uuid import UUID, uuid4\n'), ((450, 464), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (462, 464), False, 'from datetime import datetime\n'), ((213, 220), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (218, 220), False, 'from uuid import UUID, uuid4\n')]
|
"""
Your chance to explore Loops and Turtles!
Authors: <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, their colleagues, and <NAME>.
"""
########################################################################
# DONE: 1.
# On Line 5 above, replace PUT_YOUR_NAME_HERE with your own name.
########################################################################
########################################################################
# DONE: 2.
# You should have RUN the m5e_loopy_turtles module and READ its code.
# (Do so now if you have not already done so.)
#
# Below this comment, add ANY CODE THAT YOU WANT, as long as:
# 1. You construct at least 2 rg.SimpleTurtle objects.
# 2. Each rg.SimpleTurtle object draws something
# (by moving, using its rg.Pen). ANYTHING is fine!
# 3. Each rg.SimpleTurtle moves inside a LOOP.
#
# Be creative! Strive for way-cool pictures! Abstract pictures rule!
#
# If you make syntax (notational) errors, no worries -- get help
# fixing them at either this session OR at the NEXT session.
#
# Don't forget to COMMIT-and-PUSH when you are done with this module.
#
########################################################################
import rosegraphics as rg
window = rg.TurtleWindow()
window.tracer(100)
blueboi = rg.SimpleTurtle('square')
blueboi.pen = rg.Pen('blue', 3)
blueboi.speed = 200
for k in range(500):
blueboi.left(92)
blueboi.forward(k)
redboi = rg.SimpleTurtle('square')
redboi.pen = rg.Pen('red', 3)
redboi.speed = 200
for k in range(500):
redboi.right(92)
redboi.forward(k)
greenboi = rg.SimpleTurtle('square')
greenboi.pen = rg.Pen('green', 3)
greenboi.speed = 200
greenboi.backward(10)
for k in range(500):
greenboi.backward(k)
greenboi.right(2)
greenboi.forward(k)
window.close_on_mouse_click()
|
[
"rosegraphics.TurtleWindow",
"rosegraphics.SimpleTurtle",
"rosegraphics.Pen"
] |
[((1255, 1272), 'rosegraphics.TurtleWindow', 'rg.TurtleWindow', ([], {}), '()\n', (1270, 1272), True, 'import rosegraphics as rg\n'), ((1302, 1327), 'rosegraphics.SimpleTurtle', 'rg.SimpleTurtle', (['"""square"""'], {}), "('square')\n", (1317, 1327), True, 'import rosegraphics as rg\n'), ((1342, 1359), 'rosegraphics.Pen', 'rg.Pen', (['"""blue"""', '(3)'], {}), "('blue', 3)\n", (1348, 1359), True, 'import rosegraphics as rg\n'), ((1454, 1479), 'rosegraphics.SimpleTurtle', 'rg.SimpleTurtle', (['"""square"""'], {}), "('square')\n", (1469, 1479), True, 'import rosegraphics as rg\n'), ((1493, 1509), 'rosegraphics.Pen', 'rg.Pen', (['"""red"""', '(3)'], {}), "('red', 3)\n", (1499, 1509), True, 'import rosegraphics as rg\n'), ((1604, 1629), 'rosegraphics.SimpleTurtle', 'rg.SimpleTurtle', (['"""square"""'], {}), "('square')\n", (1619, 1629), True, 'import rosegraphics as rg\n'), ((1645, 1663), 'rosegraphics.Pen', 'rg.Pen', (['"""green"""', '(3)'], {}), "('green', 3)\n", (1651, 1663), True, 'import rosegraphics as rg\n')]
|
# -*- coding: utf-8 -*-
from django.db import models
from django.utils.translation import ugettext_lazy as _
from jsonfield import JSONField
from behaviors.models import Timestampable, Versionable
from parlalize.settings import API_OUT_DATE_FORMAT
from datetime import datetime
class PopoloDateTimeField(models.DateTimeField):
"""Converting datetime to popolo."""
def get_popolo_value(self, value):
return str(datetime.strftime(value, '%Y-%m-%d'))
class Session(Timestampable, models.Model):
"""Model of all sessions that happened in parliament, copied from parladata."""
name = models.CharField(_('name'),
blank=True, null=True,
max_length=128,
help_text=_('Session name'))
date = PopoloDateTimeField(_('date of session'),
blank=True, null=True,
help_text=_('date of session'))
id_parladata = models.IntegerField(_('parladata id'),
blank=True, null=True,
help_text=_('id parladata'))
mandate = models.CharField(_('mandate name'),
blank=True, null=True,
max_length=128,
help_text=_('Mandate name'))
start_time = PopoloDateTimeField(_('start time of session'),
blank=True, null=True,
help_text='Start time')
end_time = PopoloDateTimeField(_('end time of session'),
blank=True, null=True,
help_text='End time')
organization = models.ForeignKey('parlaskupine.Organization',
blank=True, null=True,
related_name='session',
help_text='The organization in session')
organizations = models.ManyToManyField('parlaskupine.Organization',
related_name='sessions',
help_text='The organizations in session')
classification = models.CharField(_('classification'),
max_length=128,
blank=True, null=True,
help_text='Session classification')
actived = models.CharField(_('actived'),
null=True, blank=True,
max_length=128,
help_text=_('Yes if PG is actived or no if it is not'))
classification = models.CharField(_('classification'),
max_length=128,
blank=True, null=True,
help_text=_('An organization category, e.g. committee'))
gov_id = models.TextField(blank=True, null=True,
help_text='Gov website ID.')
in_review = models.BooleanField(default=False,
help_text='Is session in review?')
def __str__(self):
return self.name
def getSessionDataMultipleOrgs(self):
orgs_data = [org.getOrganizationData()
for org
in self.organizations.all()]
return {'name': self.name,
'date': self.start_time.strftime(API_OUT_DATE_FORMAT),
'date_ts': self.start_time,
'id': self.id_parladata,
'orgs': orgs_data,
'in_review': self.in_review}
def getSessionData(self):
orgs_data = [org.getOrganizationData()
for org
in self.organizations.all()]
return {'name': self.name,
'date': self.start_time.strftime(API_OUT_DATE_FORMAT),
'date_ts': self.start_time,
'id': self.id_parladata,
'org': self.organization.getOrganizationData(),
'orgs': orgs_data,
'in_review': self.in_review}
class Activity(Timestampable, models.Model):
"""All activities of MP."""
id_parladata = models.IntegerField(_('parladata id'),
blank=True, null=True,
help_text=_('id parladata'))
session = models.ForeignKey('Session',
blank=True, null=True,
related_name="%(app_label)s_%(class)s_related",
help_text=_('Session '))
person = models.ForeignKey('parlaposlanci.Person',
blank=True, null=True,
help_text=_('MP'))
start_time = PopoloDateTimeField(blank=True, null=True,
help_text='Start time')
end_time = PopoloDateTimeField(blank=True, null=True,
help_text='End time')
def get_child(self):
if Speech.objects.filter(activity_ptr=self.id):
return Speech.objects.get(activity_ptr=self.id)
elif Ballot.objects.filter(activity_ptr=self.id):
return Ballot.objects.get(activity_ptr=self.id)
else:
return Question.objects.get(activity_ptr=self.id)
class Speech(Versionable, Activity):
"""Model of all speeches in parlament."""
content = models.TextField(blank=True, null=True,
help_text='Words spoken')
order = models.IntegerField(blank=True, null=True,
help_text='Order of speech')
organization = models.ForeignKey('parlaskupine.Organization',
blank=True, null=True,
help_text='Organization')
def __init__(self, *args, **kwargs):
super(Activity, self).__init__(*args, **kwargs)
@staticmethod
def getValidSpeeches(date_):
return Speech.objects.filter(valid_from__lt=date_, valid_to__gt=date_)
class Question(Activity):
"""Model of MP questions to the government."""
content_link = models.URLField(help_text='Words spoken',
max_length=350,
blank=True, null=True)
title = models.TextField(blank=True, null=True,
help_text='Words spoken')
recipient_persons = models.ManyToManyField('parlaposlanci.Person',
blank=True,
null=True,
help_text='Recipient persons (if it\'s a person).',
related_name='questions')
recipient_organizations = models.ManyToManyField('parlaskupine.Organization',
blank=True,
null=True,
help_text='Recipient organizations (if it\'s an organization).',
related_name='questions_org')
recipient_text = models.TextField(blank=True,
null=True,
help_text='Recipient name as written on dz-rs.si')
def getQuestionData(self):
# fix import issue
from parlalize.utils import getMinistryData
persons = []
orgs = []
for person in self.recipient_persons.all():
persons.append(getMinistryData(person.id_parladata, self.start_time.strftime(API_DATE_FORMAT)))
for org in self.recipient_organizations.all():
orgs.append(org.getOrganizationData())
return {'title': self.title,
'recipient_text': self.recipient_text,
'recipient_persons': persons,
'recipient_orgs': orgs,
'url': self.content_link,
'id': self.id_parladata}
class Ballot(Activity):
"""Model of all ballots"""
vote = models.ForeignKey('Vote',
blank=True, null=True,
related_name='vote',
help_text=_('Vote'))
option = models.CharField(max_length=128,
blank=True, null=True,
help_text='Yes, no, abstain')
org_voter = models.ForeignKey('parlaskupine.Organization',
blank=True, null=True,
related_name='OrganizationVoter',
help_text=_('Organization voter'))
def __init__(self, *args, **kwargs):
super(Activity, self).__init__(*args, **kwargs)
class Vote(Timestampable, models.Model):
"""Model of all votes that happend on specific sessions,
with number of votes for, against, abstain and not present.
"""
created_for = models.DateField(_('date of vote'),
blank=True, null=True,
help_text=_('date of vote'))
session = models.ForeignKey('Session',
blank=True, null=True,
related_name='in_session',
help_text=_('Session '))
motion = models.TextField(blank=True, null=True,
help_text='The motion for which the vote took place')
tags = JSONField(blank=True, null=True)
votes_for = models.IntegerField(blank=True, null=True,
help_text='Number of votes for')
against = models.IntegerField(blank=True, null=True,
help_text='Number votes againt')
abstain = models.IntegerField(blank=True, null=True,
help_text='Number votes abstain')
not_present = models.IntegerField(blank=True, null=True,
help_text='Number of MPs that warent on the session')
result = models.NullBooleanField(blank=True, null=True,
default=False,
help_text='The result of the vote')
id_parladata = models.IntegerField(_('parladata id'),
blank=True, null=True,
help_text=_('id parladata'))
document_url = JSONField(blank=True,
null=True)
start_time = PopoloDateTimeField(blank=True,
null=True,
help_text='Start time')
is_outlier = models.NullBooleanField(default=False,
help_text='is outlier')
has_outlier_voters = models.NullBooleanField(default=False,
help_text='has outlier voters')
intra_disunion = models.FloatField(default=0.0,
help_text='intra disunion for all members')
class VoteDetailed(Timestampable, models.Model):
"""Model of votes with data, how each MP and PG voted."""
motion = models.TextField(blank=True, null=True,
help_text='The motion for which the vote took place')
session = models.ForeignKey('Session',
blank=True, null=True,
related_name='in_session_for_VG',
help_text=_('Session '))
vote = models.ForeignKey('Vote',
blank=True, null=True,
related_name='vote_of_graph',
help_text=_('Vote'))
created_for = models.DateField(_('date of vote'),
blank=True, null=True,
help_text=_('date of vote'))
votes_for = models.IntegerField(blank=True, null=True,
help_text='Number of votes for')
against = models.IntegerField(blank=True, null=True,
help_text='Number votes againt')
abstain = models.IntegerField(blank=True, null=True,
help_text='Number votes abstain')
not_present = models.IntegerField(blank=True, null=True,
help_text='Number of MPs that warent on the session')
result = models.NullBooleanField(blank=True, null=True,
default=False,
help_text='The result of the vote')
pgs_yes = JSONField(blank=True, null=True)
pgs_no = JSONField(blank=True, null=True)
pgs_np = JSONField(blank=True, null=True)
pgs_kvor = JSONField(blank=True, null=True)
mp_yes = JSONField(blank=True, null=True)
mp_no = JSONField(blank=True, null=True)
mp_np = JSONField(blank=True, null=True)
mp_kvor = JSONField(blank=True, null=True)
class Vote_analysis(Timestampable, models.Model):
session = models.ForeignKey('Session',
blank=True, null=True,
related_name='in_session_for_VA',
help_text=_('Session '))
vote = models.ForeignKey('Vote',
blank=True, null=True,
related_name='analysis',
help_text=_('Vote'))
created_for = models.DateField(_('date of vote'),
blank=True,
null=True,
help_text=_('date of vote'))
votes_for = models.IntegerField(blank=True, null=True,
help_text='Number of votes for')
against = models.IntegerField(blank=True, null=True,
help_text='Number votes againt')
abstain = models.IntegerField(blank=True, null=True,
help_text='Number votes abstain')
not_present = models.IntegerField(blank=True, null=True,
help_text='Number of MPs that warent on the session')
pgs_data = JSONField(blank=True, null=True)
mp_yes = JSONField(blank=True, null=True)
mp_no = JSONField(blank=True, null=True)
mp_np = JSONField(blank=True, null=True)
mp_kvor = JSONField(blank=True, null=True)
coal_opts = JSONField(blank=True, null=True)
oppo_opts = JSONField(blank=True, null=True)
class AbsentMPs(Timestampable, models.Model):
"""Model for analysis absent MPs on session."""
session = models.ForeignKey('Session',
blank=True, null=True,
related_name='session_absent',
help_text=_('Session '))
absentMPs = JSONField(blank=True, null=True)
created_for = models.DateField(_('date of vote'),
blank=True, null=True,
help_text=_('date of vote'))
class Quote(Timestampable, models.Model):
"""Model for quoted text from speeches."""
quoted_text = models.TextField(_('quoted text'),
blank=True, null=True,
help_text=_('text quoted in a speech'))
speech = models.ForeignKey('Speech',
help_text=_('the speech that is being quoted'))
first_char = models.IntegerField(blank=True, null=True,
help_text=_('index of first character of quote string'))
last_char = models.IntegerField(blank=True, null=True,
help_text=_('index of last character of quote string'))
class PresenceOfPG(Timestampable, models.Model):
"""Model for analysis presence of PG on session."""
session = models.ForeignKey('Session',
blank=True, null=True,
related_name='session_presence',
help_text=_('Session '))
presence = JSONField(blank=True, null=True)
created_for = models.DateField(_('date of activity'),
blank=True, null=True,
help_text=_('date of analize'))
class Tfidf(Timestampable, models.Model):
"""Model for analysis TFIDF."""
session = models.ForeignKey('Session',
blank=True, null=True,
related_name='tfidf',
help_text=_('Session '))
created_for = models.DateField(_('date of activity'),
blank=True,
null=True,
help_text=_('date of analize'))
is_visible = models.BooleanField(_('is visible'),
default=True)
data = JSONField(blank=True, null=True)
def __str__(self):
return unicode(self.session.name) + " --> " + unicode(self.session.organization.name)
class Tag(models.Model):
"""All tags of votes."""
id_parladata = models.IntegerField(_('parladata id'),
blank=True,
null=True,
help_text=_('id parladata'))
name = models.TextField(blank=True,
null=True,
help_text=_('tag name'))
|
[
"datetime.datetime.strftime",
"django.db.models.TextField",
"django.db.models.URLField",
"django.db.models.ManyToManyField",
"django.db.models.NullBooleanField",
"django.db.models.ForeignKey",
"django.db.models.CharField",
"jsonfield.JSONField",
"django.db.models.FloatField",
"django.db.models.BooleanField",
"django.db.models.IntegerField",
"django.utils.translation.ugettext_lazy"
] |
[((1746, 1884), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""parlaskupine.Organization"""'], {'blank': '(True)', 'null': '(True)', 'related_name': '"""session"""', 'help_text': '"""The organization in session"""'}), "('parlaskupine.Organization', blank=True, null=True,\n related_name='session', help_text='The organization in session')\n", (1763, 1884), False, 'from django.db import models\n'), ((2013, 2135), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['"""parlaskupine.Organization"""'], {'related_name': '"""sessions"""', 'help_text': '"""The organizations in session"""'}), "('parlaskupine.Organization', related_name='sessions',\n help_text='The organizations in session')\n", (2035, 2135), False, 'from django.db import models\n'), ((2985, 3053), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)', 'help_text': '"""Gov website ID."""'}), "(blank=True, null=True, help_text='Gov website ID.')\n", (3001, 3053), False, 'from django.db import models\n'), ((3101, 3170), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'help_text': '"""Is session in review?"""'}), "(default=False, help_text='Is session in review?')\n", (3120, 3170), False, 'from django.db import models\n'), ((5526, 5591), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)', 'help_text': '"""Words spoken"""'}), "(blank=True, null=True, help_text='Words spoken')\n", (5542, 5591), False, 'from django.db import models\n'), ((5636, 5707), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)', 'help_text': '"""Order of speech"""'}), "(blank=True, null=True, help_text='Order of speech')\n", (5655, 5707), False, 'from django.db import models\n'), ((5760, 5859), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""parlaskupine.Organization"""'], {'blank': '(True)', 'null': '(True)', 'help_text': '"""Organization"""'}), "('parlaskupine.Organization', blank=True, null=True,\n help_text='Organization')\n", (5777, 5859), False, 'from django.db import models\n'), ((6258, 6343), 'django.db.models.URLField', 'models.URLField', ([], {'help_text': '"""Words spoken"""', 'max_length': '(350)', 'blank': '(True)', 'null': '(True)'}), "(help_text='Words spoken', max_length=350, blank=True, null=True\n )\n", (6273, 6343), False, 'from django.db import models\n'), ((6422, 6487), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)', 'help_text': '"""Words spoken"""'}), "(blank=True, null=True, help_text='Words spoken')\n", (6438, 6487), False, 'from django.db import models\n'), ((6542, 6697), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['"""parlaposlanci.Person"""'], {'blank': '(True)', 'null': '(True)', 'help_text': '"""Recipient persons (if it\'s a person)."""', 'related_name': '"""questions"""'}), '(\'parlaposlanci.Person\', blank=True, null=True,\n help_text="Recipient persons (if it\'s a person).", related_name=\'questions\'\n )\n', (6564, 6697), False, 'from django.db import models\n'), ((6908, 7084), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['"""parlaskupine.Organization"""'], {'blank': '(True)', 'null': '(True)', 'help_text': '"""Recipient organizations (if it\'s an organization)."""', 'related_name': '"""questions_org"""'}), '(\'parlaskupine.Organization\', blank=True, null=True,\n help_text="Recipient organizations (if it\'s an organization).",\n related_name=\'questions_org\')\n', (6930, 7084), False, 'from django.db import models\n'), ((7311, 7406), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)', 'help_text': '"""Recipient name as written on dz-rs.si"""'}), "(blank=True, null=True, help_text=\n 'Recipient name as written on dz-rs.si')\n", (7327, 7406), False, 'from django.db import models\n'), ((8416, 8506), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)', 'blank': '(True)', 'null': '(True)', 'help_text': '"""Yes, no, abstain"""'}), "(max_length=128, blank=True, null=True, help_text=\n 'Yes, no, abstain')\n", (8432, 8506), False, 'from django.db import models\n'), ((9503, 9601), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)', 'help_text': '"""The motion for which the vote took place"""'}), "(blank=True, null=True, help_text=\n 'The motion for which the vote took place')\n", (9519, 9601), False, 'from django.db import models\n'), ((9640, 9672), 'jsonfield.JSONField', 'JSONField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (9649, 9672), False, 'from jsonfield import JSONField\n'), ((9690, 9765), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)', 'help_text': '"""Number of votes for"""'}), "(blank=True, null=True, help_text='Number of votes for')\n", (9709, 9765), False, 'from django.db import models\n'), ((9817, 9892), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)', 'help_text': '"""Number votes againt"""'}), "(blank=True, null=True, help_text='Number votes againt')\n", (9836, 9892), False, 'from django.db import models\n'), ((9942, 10018), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)', 'help_text': '"""Number votes abstain"""'}), "(blank=True, null=True, help_text='Number votes abstain')\n", (9961, 10018), False, 'from django.db import models\n'), ((10072, 10173), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)', 'help_text': '"""Number of MPs that warent on the session"""'}), "(blank=True, null=True, help_text=\n 'Number of MPs that warent on the session')\n", (10091, 10173), False, 'from django.db import models\n'), ((10221, 10323), 'django.db.models.NullBooleanField', 'models.NullBooleanField', ([], {'blank': '(True)', 'null': '(True)', 'default': '(False)', 'help_text': '"""The result of the vote"""'}), "(blank=True, null=True, default=False, help_text=\n 'The result of the vote')\n", (10244, 10323), False, 'from django.db import models\n'), ((10602, 10634), 'jsonfield.JSONField', 'JSONField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (10611, 10634), False, 'from jsonfield import JSONField\n'), ((10841, 10903), 'django.db.models.NullBooleanField', 'models.NullBooleanField', ([], {'default': '(False)', 'help_text': '"""is outlier"""'}), "(default=False, help_text='is outlier')\n", (10864, 10903), False, 'from django.db import models\n'), ((10971, 11041), 'django.db.models.NullBooleanField', 'models.NullBooleanField', ([], {'default': '(False)', 'help_text': '"""has outlier voters"""'}), "(default=False, help_text='has outlier voters')\n", (10994, 11041), False, 'from django.db import models\n'), ((11113, 11187), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0.0)', 'help_text': '"""intra disunion for all members"""'}), "(default=0.0, help_text='intra disunion for all members')\n", (11130, 11187), False, 'from django.db import models\n'), ((11354, 11452), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)', 'help_text': '"""The motion for which the vote took place"""'}), "(blank=True, null=True, help_text=\n 'The motion for which the vote took place')\n", (11370, 11452), False, 'from django.db import models\n'), ((12093, 12168), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)', 'help_text': '"""Number of votes for"""'}), "(blank=True, null=True, help_text='Number of votes for')\n", (12112, 12168), False, 'from django.db import models\n'), ((12220, 12295), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)', 'help_text': '"""Number votes againt"""'}), "(blank=True, null=True, help_text='Number votes againt')\n", (12239, 12295), False, 'from django.db import models\n'), ((12345, 12421), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)', 'help_text': '"""Number votes abstain"""'}), "(blank=True, null=True, help_text='Number votes abstain')\n", (12364, 12421), False, 'from django.db import models\n'), ((12475, 12576), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)', 'help_text': '"""Number of MPs that warent on the session"""'}), "(blank=True, null=True, help_text=\n 'Number of MPs that warent on the session')\n", (12494, 12576), False, 'from django.db import models\n'), ((12624, 12726), 'django.db.models.NullBooleanField', 'models.NullBooleanField', ([], {'blank': '(True)', 'null': '(True)', 'default': '(False)', 'help_text': '"""The result of the vote"""'}), "(blank=True, null=True, default=False, help_text=\n 'The result of the vote')\n", (12647, 12726), False, 'from django.db import models\n'), ((12811, 12843), 'jsonfield.JSONField', 'JSONField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (12820, 12843), False, 'from jsonfield import JSONField\n'), ((12857, 12889), 'jsonfield.JSONField', 'JSONField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (12866, 12889), False, 'from jsonfield import JSONField\n'), ((12903, 12935), 'jsonfield.JSONField', 'JSONField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (12912, 12935), False, 'from jsonfield import JSONField\n'), ((12951, 12983), 'jsonfield.JSONField', 'JSONField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (12960, 12983), False, 'from jsonfield import JSONField\n'), ((12998, 13030), 'jsonfield.JSONField', 'JSONField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (13007, 13030), False, 'from jsonfield import JSONField\n'), ((13043, 13075), 'jsonfield.JSONField', 'JSONField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (13052, 13075), False, 'from jsonfield import JSONField\n'), ((13088, 13120), 'jsonfield.JSONField', 'JSONField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (13097, 13120), False, 'from jsonfield import JSONField\n'), ((13135, 13167), 'jsonfield.JSONField', 'JSONField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (13144, 13167), False, 'from jsonfield import JSONField\n'), ((13871, 13946), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)', 'help_text': '"""Number of votes for"""'}), "(blank=True, null=True, help_text='Number of votes for')\n", (13890, 13946), False, 'from django.db import models\n'), ((13997, 14072), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)', 'help_text': '"""Number votes againt"""'}), "(blank=True, null=True, help_text='Number votes againt')\n", (14016, 14072), False, 'from django.db import models\n'), ((14123, 14199), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)', 'help_text': '"""Number votes abstain"""'}), "(blank=True, null=True, help_text='Number votes abstain')\n", (14142, 14199), False, 'from django.db import models\n'), ((14254, 14355), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)', 'help_text': '"""Number of MPs that warent on the session"""'}), "(blank=True, null=True, help_text=\n 'Number of MPs that warent on the session')\n", (14273, 14355), False, 'from django.db import models\n'), ((14401, 14433), 'jsonfield.JSONField', 'JSONField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (14410, 14433), False, 'from jsonfield import JSONField\n'), ((14448, 14480), 'jsonfield.JSONField', 'JSONField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (14457, 14480), False, 'from jsonfield import JSONField\n'), ((14493, 14525), 'jsonfield.JSONField', 'JSONField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (14502, 14525), False, 'from jsonfield import JSONField\n'), ((14538, 14570), 'jsonfield.JSONField', 'JSONField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (14547, 14570), False, 'from jsonfield import JSONField\n'), ((14585, 14617), 'jsonfield.JSONField', 'JSONField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (14594, 14617), False, 'from jsonfield import JSONField\n'), ((14635, 14667), 'jsonfield.JSONField', 'JSONField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (14644, 14667), False, 'from jsonfield import JSONField\n'), ((14685, 14717), 'jsonfield.JSONField', 'JSONField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (14694, 14717), False, 'from jsonfield import JSONField\n'), ((15054, 15086), 'jsonfield.JSONField', 'JSONField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (15063, 15086), False, 'from jsonfield import JSONField\n'), ((16314, 16346), 'jsonfield.JSONField', 'JSONField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (16323, 16346), False, 'from jsonfield import JSONField\n'), ((17158, 17190), 'jsonfield.JSONField', 'JSONField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (17167, 17190), False, 'from jsonfield import JSONField\n'), ((627, 636), 'django.utils.translation.ugettext_lazy', '_', (['"""name"""'], {}), "('name')\n", (628, 636), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((822, 842), 'django.utils.translation.ugettext_lazy', '_', (['"""date of session"""'], {}), "('date of session')\n", (823, 842), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1001, 1018), 'django.utils.translation.ugettext_lazy', '_', (['"""parladata id"""'], {}), "('parladata id')\n", (1002, 1018), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1182, 1199), 'django.utils.translation.ugettext_lazy', '_', (['"""mandate name"""'], {}), "('mandate name')\n", (1183, 1199), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1400, 1426), 'django.utils.translation.ugettext_lazy', '_', (['"""start time of session"""'], {}), "('start time of session')\n", (1401, 1426), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1585, 1609), 'django.utils.translation.ugettext_lazy', '_', (['"""end time of session"""'], {}), "('end time of session')\n", (1586, 1609), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2257, 2276), 'django.utils.translation.ugettext_lazy', '_', (['"""classification"""'], {}), "('classification')\n", (2258, 2276), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2499, 2511), 'django.utils.translation.ugettext_lazy', '_', (['"""actived"""'], {}), "('actived')\n", (2500, 2511), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2740, 2759), 'django.utils.translation.ugettext_lazy', '_', (['"""classification"""'], {}), "('classification')\n", (2741, 2759), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4307, 4324), 'django.utils.translation.ugettext_lazy', '_', (['"""parladata id"""'], {}), "('parladata id')\n", (4308, 4324), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((9133, 9150), 'django.utils.translation.ugettext_lazy', '_', (['"""date of vote"""'], {}), "('date of vote')\n", (9134, 9150), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((10433, 10450), 'django.utils.translation.ugettext_lazy', '_', (['"""parladata id"""'], {}), "('parladata id')\n", (10434, 10450), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((11935, 11952), 'django.utils.translation.ugettext_lazy', '_', (['"""date of vote"""'], {}), "('date of vote')\n", (11936, 11952), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((13675, 13692), 'django.utils.translation.ugettext_lazy', '_', (['"""date of vote"""'], {}), "('date of vote')\n", (13676, 13692), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((15123, 15140), 'django.utils.translation.ugettext_lazy', '_', (['"""date of vote"""'], {}), "('date of vote')\n", (15124, 15140), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((15391, 15407), 'django.utils.translation.ugettext_lazy', '_', (['"""quoted text"""'], {}), "('quoted text')\n", (15392, 15407), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((16383, 16404), 'django.utils.translation.ugettext_lazy', '_', (['"""date of activity"""'], {}), "('date of activity')\n", (16384, 16404), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((16857, 16878), 'django.utils.translation.ugettext_lazy', '_', (['"""date of activity"""'], {}), "('date of activity')\n", (16858, 16878), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((17078, 17093), 'django.utils.translation.ugettext_lazy', '_', (['"""is visible"""'], {}), "('is visible')\n", (17079, 17093), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((17405, 17422), 'django.utils.translation.ugettext_lazy', '_', (['"""parladata id"""'], {}), "('parladata id')\n", (17406, 17422), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((430, 466), 'datetime.datetime.strftime', 'datetime.strftime', (['value', '"""%Y-%m-%d"""'], {}), "(value, '%Y-%m-%d')\n", (447, 466), False, 'from datetime import datetime\n'), ((771, 788), 'django.utils.translation.ugettext_lazy', '_', (['"""Session name"""'], {}), "('Session name')\n", (772, 788), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((939, 959), 'django.utils.translation.ugettext_lazy', '_', (['"""date of session"""'], {}), "('date of session')\n", (940, 959), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1131, 1148), 'django.utils.translation.ugettext_lazy', '_', (['"""id parladata"""'], {}), "('id parladata')\n", (1132, 1148), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1343, 1360), 'django.utils.translation.ugettext_lazy', '_', (['"""Mandate name"""'], {}), "('Mandate name')\n", (1344, 1360), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2655, 2699), 'django.utils.translation.ugettext_lazy', '_', (['"""Yes if PG is actived or no if it is not"""'], {}), "('Yes if PG is actived or no if it is not')\n", (2656, 2699), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2924, 2969), 'django.utils.translation.ugettext_lazy', '_', (['"""An organization category, e.g. committee"""'], {}), "('An organization category, e.g. committee')\n", (2925, 2969), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4437, 4454), 'django.utils.translation.ugettext_lazy', '_', (['"""id parladata"""'], {}), "('id parladata')\n", (4438, 4454), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4677, 4690), 'django.utils.translation.ugettext_lazy', '_', (['"""Session """'], {}), "('Session ')\n", (4678, 4690), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4843, 4850), 'django.utils.translation.ugettext_lazy', '_', (['"""MP"""'], {}), "('MP')\n", (4844, 4850), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((8391, 8400), 'django.utils.translation.ugettext_lazy', '_', (['"""Vote"""'], {}), "('Vote')\n", (8392, 8400), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((8795, 8818), 'django.utils.translation.ugettext_lazy', '_', (['"""Organization voter"""'], {}), "('Organization voter')\n", (8796, 8818), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((9255, 9272), 'django.utils.translation.ugettext_lazy', '_', (['"""date of vote"""'], {}), "('date of vote')\n", (9256, 9272), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((9474, 9487), 'django.utils.translation.ugettext_lazy', '_', (['"""Session """'], {}), "('Session ')\n", (9475, 9487), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((10563, 10580), 'django.utils.translation.ugettext_lazy', '_', (['"""id parladata"""'], {}), "('id parladata')\n", (10564, 10580), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((11685, 11698), 'django.utils.translation.ugettext_lazy', '_', (['"""Session """'], {}), "('Session ')\n", (11686, 11698), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((11888, 11897), 'django.utils.translation.ugettext_lazy', '_', (['"""Vote"""'], {}), "('Vote')\n", (11889, 11897), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((12057, 12074), 'django.utils.translation.ugettext_lazy', '_', (['"""date of vote"""'], {}), "('date of vote')\n", (12058, 12074), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((13424, 13437), 'django.utils.translation.ugettext_lazy', '_', (['"""Session """'], {}), "('Session ')\n", (13425, 13437), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((13628, 13637), 'django.utils.translation.ugettext_lazy', '_', (['"""Vote"""'], {}), "('Vote')\n", (13629, 13637), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((13835, 13852), 'django.utils.translation.ugettext_lazy', '_', (['"""date of vote"""'], {}), "('date of vote')\n", (13836, 13852), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((15022, 15035), 'django.utils.translation.ugettext_lazy', '_', (['"""Session """'], {}), "('Session ')\n", (15023, 15035), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((15245, 15262), 'django.utils.translation.ugettext_lazy', '_', (['"""date of vote"""'], {}), "('date of vote')\n", (15246, 15262), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((15512, 15540), 'django.utils.translation.ugettext_lazy', '_', (['"""text quoted in a speech"""'], {}), "('text quoted in a speech')\n", (15513, 15540), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((15625, 15661), 'django.utils.translation.ugettext_lazy', '_', (['"""the speech that is being quoted"""'], {}), "('the speech that is being quoted')\n", (15626, 15661), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((15771, 15816), 'django.utils.translation.ugettext_lazy', '_', (['"""index of first character of quote string"""'], {}), "('index of first character of quote string')\n", (15772, 15816), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((15924, 15968), 'django.utils.translation.ugettext_lazy', '_', (['"""index of last character of quote string"""'], {}), "('index of last character of quote string')\n", (15925, 15968), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((16283, 16296), 'django.utils.translation.ugettext_lazy', '_', (['"""Session """'], {}), "('Session ')\n", (16284, 16296), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((16509, 16529), 'django.utils.translation.ugettext_lazy', '_', (['"""date of analize"""'], {}), "('date of analize')\n", (16510, 16529), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((16806, 16819), 'django.utils.translation.ugettext_lazy', '_', (['"""Session """'], {}), "('Session ')\n", (16807, 16819), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((17018, 17038), 'django.utils.translation.ugettext_lazy', '_', (['"""date of analize"""'], {}), "('date of analize')\n", (17019, 17038), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((17574, 17591), 'django.utils.translation.ugettext_lazy', '_', (['"""id parladata"""'], {}), "('id parladata')\n", (17575, 17591), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((17711, 17724), 'django.utils.translation.ugettext_lazy', '_', (['"""tag name"""'], {}), "('tag name')\n", (17712, 17724), True, 'from django.utils.translation import ugettext_lazy as _\n')]
|
## ____ _ ____
## / ___|__ _ ___| |_ _ _ ___ / ___|__ _ _ __ _ _ ___ _ __
## | | / _` |/ __| __| | | / __| | | / _` | '_ \| | | |/ _ \| '_ \
## | |__| (_| | (__| |_| |_| \__ \ | |__| (_| | | | | |_| | (_) | | | |
## \____\__,_|\___|\__|\__,_|___/ \____\__,_|_| |_|\__, |\___/|_| |_|
## |___/
## ___ ___ _ _ _____ ___ _ _ _ _ ___ ___
## / __/ _ \| \| |_ _|_ _| \| | | | | __| \
## | (_| (_) | .` | | | | || .` | |_| | _|| |) |
## \___\___/|_|\_| |_| |___|_|\_|\___/|___|___/
##
## A P-ROC Project by <NAME>, Copyright 2012-2013
## Built on the PyProcGame Framework from <NAME> and <NAME>
## Original Cactus Canyon software by <NAME>
##
##
## The Medieval Madness Tribute
##
from procgame import dmd,game
import ep
import random
class MM_Tribute(ep.EP_Mode):
"""This is Just a Tribute """
def __init__(self,game,priority):
super(MM_Tribute, self).__init__(game,priority)
self.myID = "MM Tribute"
self.halted = False
self.running = False
self.hitsToWin = 3
self.won = False
self.tauntTimer = 8
script = []
# set up the pause text layer
textString = "< TROLLS PAUSED >"
textLayer = ep.EP_TextLayer(128/2, 24, self.game.assets.font_6px_az_inverse, "center", opaque=False).set_text(textString,color=ep.GREEN)
script.append({'seconds':0.3,'layer':textLayer})
# set up the alternating blank layer
blank = dmd.FrameLayer(opaque=False, frame=self.game.assets.dmd_blank.frames[0])
blank.composite_op = "blacksrc"
script.append({'seconds':0.3,'layer':blank})
# make a script layer with the two
self.pauseView = dmd.ScriptedLayer(128,32,script)
self.pauseView.composite_op = "blacksrc"
self.leftTaunts = [self.game.assets.quote_mmLT1,
self.game.assets.quote_mmLT2,
self.game.assets.quote_mmLT3,
self.game.assets.quote_mmLT4,
self.game.assets.quote_mmLT5,
self.game.assets.quote_mmLT6]
self.rightTaunts = [self.game.assets.quote_mmRT1,
self.game.assets.quote_mmRT2,
self.game.assets.quote_mmRT3,
self.game.assets.quote_mmRT4,
self.game.assets.quote_mmRT5,
self.game.assets.quote_mmRT6]
self.leftSoloTaunts = [self.game.assets.quote_mmLTS1,
self.game.assets.quote_mmLTS2,
self.game.assets.quote_mmLTS3,
self.game.assets.quote_mmLTS4,
self.game.assets.quote_mmLTS5,
self.game.assets.quote_mmLTS6]
self.rightSoloTaunts = [self.game.assets.quote_mmRTS1,
self.game.assets.quote_mmRTS2,
self.game.assets.quote_mmRTS3,
self.game.assets.quote_mmRTS4,
self.game.assets.quote_mmRTS5,
self.game.assets.quote_mmRTS6]
def mode_started(self):
self.tauntChoices = [0,1,2,3,4,5]
self.tauntTimer = 8
# overall mode timer
self.modeTimer = 30
self.timeLayer = ep.EP_TextLayer(64,22,self.game.assets.font_9px_az,"center",opaque=True).set_text(str(self.modeTimer),color=ep.GREEN)
# fire up the switch block if it's not already loaded
self.game.switch_blocker('add',self.myID)
# unload the launcher
self.game.tribute_launcher.unload()
# first hit is 250, but it adds the bump first in the routine
self.value = 175000
self.running = True
self.halted = False
self.won = False
# total left troll hits
self.leftHitsSoFar = 0
# total right troll hits
self.rightHitsSoFar = 0
# score for the mode
self.totalPoints = 0
# set up the text layers
self.titleLine = ep.EP_TextLayer(64,2,self.game.assets.font_5px_AZ,"center",opaque=False)
self.update_titleLine()
self.scoreLayer = ep.EP_TextLayer(64,10,self.game.assets.font_5px_AZ,"center",opaque=False)
self.intro()
def ball_drained(self):
# if we get to zero balls while running, finish
if self.game.trough.num_balls_in_play == 0 and self.running:
self.finish_trolls()
def halt_test(self):
if not self.halted:
self.halt_trolls()
# if the mode is already halted, cancel any pending resume delay
else:
self.cancel_delayed("Resume")
# halt switches
# bonus lanes pause save polly
def sw_leftBonusLane_active(self,sw):
self.halt_test()
def sw_rightBonusLane_active(self,sw):
self.halt_test()
# bumpers pause quickdraw
def sw_leftJetBumper_active(self,sw):
self.halt_test()
def sw_rightJetBumper_active(self,sw):
self.halt_test()
def sw_bottomJetBumper_active(self,sw):
self.halt_test()
# so does the mine and both pass the 'advanced' flag to avoid moo sounds
def sw_minePopper_active_for_350ms(self,sw):
#print "Trolls It Mine Popper Register"
self.halt_test()
def sw_saloonPopper_active_for_250ms(self,sw):
#print "Trolls It Saloon Popper Register"
self.halt_test()
def sw_saloonPopper_inactive(self,sw):
if self.running and self.halted:
self.halted = False
self.delay("Resume",delay=1,handler=self.resume_trolls)
# resume when exit
def sw_jetBumpersExit_active(self,sw):
if self.running and self.halted:
# kill the halt flag
self.halted = False
self.delay("Resume",delay=1,handler=self.resume_trolls)
def intro(self,step=1):
if step == 1:
self.stop_music()
self.delay(delay=0.5,handler=self.game.base.play_quote,param=self.game.assets.quote_mmTrolls)
introWait = self.game.sound.play(self.game.assets.sfx_mmIntro)
self.delay(delay=introWait,handler=self.game.music_on,param=self.game.assets.music_trolls)
border = dmd.FrameLayer(opaque = True, frame=self.game.assets.dmd_singlePixelBorder.frames[0])
titleLine = ep.EP_TextLayer(64,2,self.game.assets.font_9px_az,"center",False).set_text("TROLLS!",color=ep.GREEN)
infoLine1 = ep.EP_TextLayer(64,14,self.game.assets.font_5px_AZ,"center",False).set_text("SHOOT EACH TROLL " + str(self.hitsToWin) + " TIMES")
infoLine2 = ep.EP_TextLayer(64,20,self.game.assets.font_5px_AZ,"center",False).set_text("TO FINISH")
combined = dmd.GroupedLayer(128,32,[border,titleLine,infoLine1,infoLine2])
self.layer = combined
self.delay(delay=2,handler=self.intro,param=2)
if step == 2:
startFrame = dmd.FrameLayer(opaque = True, frame=self.game.assets.dmd_mmTrollsIntro.frames[0])
transition = ep.EP_Transition(self,self.layer,startFrame,ep.EP_Transition.TYPE_PUSH,ep.EP_Transition.PARAM_NORTH)
self.delay(delay=1.5,handler=self.intro,param=3)
if step == 3:
anim = self.game.assets.dmd_mmTrollsIntro
myWait = len(anim.frames) / 10.0
animLayer = ep.EP_AnimatedLayer(anim)
animLayer.hold = True
animLayer.frame_time = 6
animLayer.repeat = False
animLayer.opaque = True
# sounds ?
animLayer.add_frame_listener(1,self.game.sound.play,param=self.game.assets.sfx_lightning1)
animLayer.add_frame_listener(13,self.game.sound.play,param=self.game.assets.sfx_lightning1)
# trolls raising
animLayer.add_frame_listener(10,self.game.bad_guys.target_up,param=1)
animLayer.add_frame_listener(21,self.game.bad_guys.target_up,param=2)
# first taunt
animLayer.add_frame_listener(25,self.taunt)
self.layer = animLayer
self.delay(delay = myWait,handler=self.get_going)
def get_going(self):
self.game.ball_search.enable()
# release the ball
if self.game.tribute_launcher.shot == 3:
self.game.mountain.eject()
else:
self.game.coils.leftGunFightPost.disable()
# start the timer
self.modeTimer += 1
self.time_trolls()
# start the score updater
self.score_update()
# start the display
self.display_trolls(mode="idle",troll="both")
def display_trolls(self,troll="both",mode="idle"):
self.cancel_delayed("Display")
if troll == "left":
self.cancel_delayed("Left Display")
elif troll == "right":
self.cancel_delayed("Right Display")
if mode == "idle":
if troll == "left" or troll == "both":
anim = self.game.assets.dmd_mmTrollIdleLeft
self.leftTrollLayer = dmd.AnimatedLayer(frames=anim.frames,hold=False,opaque=False,repeat=True,frame_time=6)
myWait = 0
if troll == "right" or troll == "both":
anim = self.game.assets.dmd_mmTrollIdleRight
self.rightTrollLayer = dmd.AnimatedLayer(frames=anim.frames,hold=False,opaque=False,repeat=True,frame_time=6)
myWait = 0
elif mode == "hit":
if troll == "left":
anim = self.game.assets.dmd_mmTrollHitLeft
myWait = len(anim.frames) / 10.0
self.leftTrollLayer = dmd.AnimatedLayer(frames=anim.frames,hold=True,opaque=False,repeat=False,frame_time=6)
if troll == "right":
anim = self.game.assets.dmd_mmTrollHitRight
myWait = len(anim.frames) / 10.0
self.rightTrollLayer = dmd.AnimatedLayer(frames=anim.frames,hold=True,opaque=False,repeat=False,frame_time=6)
elif mode == "dead":
if troll == "left":
anim = self.game.assets.dmd_mmTrollDeadLeft
myWait = len(anim.frames) / 10.0
self.leftTrollLayer = dmd.AnimatedLayer(frames=anim.frames,hold=True,opaque=False,repeat=False,frame_time=6)
if troll == "right":
anim = self.game.assets.dmd_mmTrollDeadRight
myWait = len(anim.frames) / 10.0
self.rightTrollLayer = dmd.AnimatedLayer(frames=anim.frames,hold=True,opaque=False,repeat=False,frame_time=6)
else:
# if we didn't get a cue to change trolls, don't mess with them
myWait = 0
# build the layer
self.leftTrollLayer.composite_op = "blacksrc"
self.rightTrollLayer.composite_op = "blacksrc"
combined = dmd.GroupedLayer(128,32,[self.timeLayer,self.titleLine,self.scoreLayer,self.leftTrollLayer,self.rightTrollLayer])
self.layer = combined
# set the delay for fixing it after a hit or a miss
if mode == "hit":
#print "It's a hit - setting loop back to idle"
# if a troll got hit loop back to that one to set it to idle after the animation finishes
if troll == "left":
self.delay("Left Display",delay=myWait,handler=self.reset_troll,param="left")
if troll == "right":
self.delay("Right Display",delay=myWait,handler=self.reset_troll,param="right")
if mode == "dead":
if self.won:
# if both trolls are dead, go to the finish
self.delay(delay=myWait,handler=self.finish_trolls)
def reset_troll(self,side):
# set the display back
self.display_trolls(side)
# put the target back up
if side == "left":
target = 1
else:
target = 2
#print "Resetting Troll on target " + str(target)
self.game.bad_guys.target_up(target)
def hit_troll(self,target):
# score the points
self.game.score(self.value)
# add to the total
self.totalPoints += self.value
# play the smack sound
self.game.sound.play(self.game.assets.sfx_mmTrollSmack)
# delay the taunt timer
if self.tauntTimer <= 2:
self.tauntTimer += 3
if target == 1:
# register the hit
self.leftHitsSoFar += 1
if self.leftHitsSoFar >= self.hitsToWin:
# troll is dead
if self.rightHitsSoFar >= self.hitsToWin:
# both dead? Winner!
self.win()
# then display the troll dying
self.display_trolls(mode="dead",troll="left")
# play the death sound
self.game.sound.play(self.game.assets.quote_mmLeftDeath)
# if the other troll isn't dead yet, he comments
if not self.won:
self.delay(delay=0.5,handler=self.game.base.play_quote,param=self.game.assets.quote_mmRightAlone)
# if troll is not dead, just hit it
else:
self.display_trolls(mode="hit",troll="left")
# and play the pain sound
self.game.sound.play(self.game.assets.quote_mmLeftPain)
# other target is 2
else:
self.rightHitsSoFar += 1
if self.rightHitsSoFar >= self.hitsToWin:
# troll is dead
if self.leftHitsSoFar >= self.hitsToWin:
# both dead? Winner!
self.win()
# then display the troll dying
self.display_trolls(mode="dead",troll="right")
# play the death sound
self.game.sound.play(self.game.assets.quote_mmRightDeath)
# if the other troll isn't dead yet, he comments
if not self.won:
self.delay(delay=0.5,handler=self.game.base.play_quote,param=self.game.assets.quote_mmLeftAlone)
else:
self.display_trolls(mode="hit",troll="right")
# and play the pain sound
self.game.sound.play(self.game.assets.quote_mmRightPain)
# update the title line
self.update_titleLine()
def win(self):
self.won = True
self.cancel_delayed("Mode Timer")
self.cancel_delayed("Taunt Timer")
def score_update(self):
# update the score line total every half second
p = self.game.current_player()
scoreString = ep.format_score(p.score)
self.scoreLayer.set_text(scoreString,color=ep.GREEN)
self.delay("Score Update",delay=0.5,handler=self.score_update)
def update_titleLine(self):
left = self.hitsToWin - self.leftHitsSoFar
right = self.hitsToWin - self.rightHitsSoFar
self.titleLine.set_text(str(left) + " - TROLLS! - " + str(right),color=ep.BROWN)
def time_trolls(self):
self.modeTimer -= 1
# if we get to zero, end the mode
if self.modeTimer < 0:
self.finish_trolls()
# otherwise update the timer layers and loop back
else:
if self.modeTimer > 9:
color = ep.GREEN
elif self.modeTimer > 4:
color = ep.YELLOW
else:
color = ep.RED
self.timeLayer.set_text(str(self.modeTimer),color=color)
self.delay("Mode Timer",delay=1,handler=self.time_trolls)
def halt_trolls(self):
if self.modeTimer <= 0:
return
#print "HALTING TROLLS IN BUMPERS/MINE"
self.cancel_delayed("Resume")
# cancel delays
self.cancel_delayed("Mode Timer")
# do the halted delay
self.layer = self.pauseView
# set the flag
self.halted = True
def resume_trolls(self):
# turn the timer back on
self.time_trolls()
# turn the display back on
if self.leftHitsSoFar == self.hitsToWin:
self.display_trolls(mode="idle",troll="right")
elif self.rightHitsSoFar == self.hitsToWin:
self.display_trolls(mode="idle",troll="left")
else:
self.display_trolls(mode="idle",troll="both")
def taunt(self):
# cancel any existing timer and taunt calls
self.cancel_delayed("Taunt Timer")
self.cancel_delayed("Taunt Call")
# pick a taunt
index = random.choice(self.tauntChoices)
# remove that from the list
self.tauntChoices.remove(index)
# make sure it's not now empty
if len(self.tauntChoices) == 0:
self.tauntChoices = [0,1,2,3,4,5]
# if they're both alive - double taunt
if self.leftHitsSoFar < self.hitsToWin and self.rightHitsSoFar < self.hitsToWin:
# play the left taunt
myWait = self.game.base.priority_quote(self.leftTaunts[index])
# then delay the second
self.delay("Taunt Call",delay=myWait+0.5,handler=self.game.base.play_quote,param=self.rightTaunts[index])
# if one of them is already dead - play a single taunt
else:
if self.leftHitsSoFar < self.hitsToWin:
# play a left taunt
self.game.base.play_quote(self.leftSoloTaunts[index])
else:
self.game.base.play_quote(self.rightSoloTaunts[index])
# set the timer for the next one
self.tauntTimer = 8
# then start the timer
self.taunt_timer()
def taunt_timer(self):
# loop for calling the troll taunting
self.tauntTimer -= 1
if self.tauntTimer <= 0:
self.taunt()
else:
self.delay("Taunt Timer", delay = 1, handler=self.taunt_timer)
def finish_trolls(self):
# kill the delays
self.wipe_delays()
# drop the targets
self.game.bad_guys.drop_targets()
border = dmd.FrameLayer(opaque=True, frame=self.game.assets.dmd_mmTrollFinalFrame.frames[0])
textLayer1 = ep.EP_TextLayer(64,8,self.game.assets.font_5px_AZ,"center",opaque=False)
if self.won:
textLayer1.set_text("TROLLS DESTROYED",color=ep.DARK_GREEN)
# add some extra points if won - to make it a cool 1.5 million
self.game.score(450000)
self.totalPoints += 450000
else:
textLayer1.set_text("TROLLS ESCAPED",color=ep.DARK_GREEN)
textLayer2 = ep.EP_TextLayer(64,14,self.game.assets.font_9px_az,"center",opaque=False).set_text(str(ep.format_score(self.totalPoints)),color=ep.GREEN)
combined = dmd.GroupedLayer(128,32,[border,textLayer1,textLayer2])
self.layer = combined
# play a final quote ?
if self.won:
self.delay(delay=1,handler=self.game.base.priority_quote,param=self.game.assets.quote_mmFatality)
elif self.leftHitsSoFar == 0 and self.rightHitsSoFar == 0:
self.game.base.priority_quote(self.game.assets.quote_mmYouSuck)
else:
self.game.sound.play(self.game.assets.sfx_cheers)
myWait = 2
self.delay(delay=myWait,handler=self.done)
def done(self):
self.running = False
# turn the level 5 stack flag back off
self.game.stack_level(5,False)
# set the music back to the main loop
self.music_on(self.game.assets.music_mainTheme,mySlice=5)
# remove the switch blocker
self.game.switch_blocker('remove',self.myID)
# then unload
self.unload()
|
[
"procgame.dmd.GroupedLayer",
"ep.EP_Transition",
"procgame.dmd.AnimatedLayer",
"ep.EP_AnimatedLayer",
"random.choice",
"ep.EP_TextLayer",
"ep.format_score",
"procgame.dmd.FrameLayer",
"procgame.dmd.ScriptedLayer"
] |
[((1560, 1632), 'procgame.dmd.FrameLayer', 'dmd.FrameLayer', ([], {'opaque': '(False)', 'frame': 'self.game.assets.dmd_blank.frames[0]'}), '(opaque=False, frame=self.game.assets.dmd_blank.frames[0])\n', (1574, 1632), False, 'from procgame import dmd, game\n'), ((1794, 1828), 'procgame.dmd.ScriptedLayer', 'dmd.ScriptedLayer', (['(128)', '(32)', 'script'], {}), '(128, 32, script)\n', (1811, 1828), False, 'from procgame import dmd, game\n'), ((4224, 4300), 'ep.EP_TextLayer', 'ep.EP_TextLayer', (['(64)', '(2)', 'self.game.assets.font_5px_AZ', '"""center"""'], {'opaque': '(False)'}), "(64, 2, self.game.assets.font_5px_AZ, 'center', opaque=False)\n", (4239, 4300), False, 'import ep\n'), ((4355, 4432), 'ep.EP_TextLayer', 'ep.EP_TextLayer', (['(64)', '(10)', 'self.game.assets.font_5px_AZ', '"""center"""'], {'opaque': '(False)'}), "(64, 10, self.game.assets.font_5px_AZ, 'center', opaque=False)\n", (4370, 4432), False, 'import ep\n'), ((10985, 11108), 'procgame.dmd.GroupedLayer', 'dmd.GroupedLayer', (['(128)', '(32)', '[self.timeLayer, self.titleLine, self.scoreLayer, self.leftTrollLayer, self\n .rightTrollLayer]'], {}), '(128, 32, [self.timeLayer, self.titleLine, self.scoreLayer,\n self.leftTrollLayer, self.rightTrollLayer])\n', (11001, 11108), False, 'from procgame import dmd, game\n'), ((14739, 14763), 'ep.format_score', 'ep.format_score', (['p.score'], {}), '(p.score)\n', (14754, 14763), False, 'import ep\n'), ((16643, 16675), 'random.choice', 'random.choice', (['self.tauntChoices'], {}), '(self.tauntChoices)\n', (16656, 16675), False, 'import random\n'), ((18147, 18235), 'procgame.dmd.FrameLayer', 'dmd.FrameLayer', ([], {'opaque': '(True)', 'frame': 'self.game.assets.dmd_mmTrollFinalFrame.frames[0]'}), '(opaque=True, frame=self.game.assets.dmd_mmTrollFinalFrame.\n frames[0])\n', (18161, 18235), False, 'from procgame import dmd, game\n'), ((18252, 18328), 'ep.EP_TextLayer', 'ep.EP_TextLayer', (['(64)', '(8)', 'self.game.assets.font_5px_AZ', '"""center"""'], {'opaque': '(False)'}), "(64, 8, self.game.assets.font_5px_AZ, 'center', opaque=False)\n", (18267, 18328), False, 'import ep\n'), ((18830, 18889), 'procgame.dmd.GroupedLayer', 'dmd.GroupedLayer', (['(128)', '(32)', '[border, textLayer1, textLayer2]'], {}), '(128, 32, [border, textLayer1, textLayer2])\n', (18846, 18889), False, 'from procgame import dmd, game\n'), ((6420, 6508), 'procgame.dmd.FrameLayer', 'dmd.FrameLayer', ([], {'opaque': '(True)', 'frame': 'self.game.assets.dmd_singlePixelBorder.frames[0]'}), '(opaque=True, frame=self.game.assets.dmd_singlePixelBorder.\n frames[0])\n', (6434, 6508), False, 'from procgame import dmd, game\n'), ((6921, 6989), 'procgame.dmd.GroupedLayer', 'dmd.GroupedLayer', (['(128)', '(32)', '[border, titleLine, infoLine1, infoLine2]'], {}), '(128, 32, [border, titleLine, infoLine1, infoLine2])\n', (6937, 6989), False, 'from procgame import dmd, game\n'), ((7125, 7204), 'procgame.dmd.FrameLayer', 'dmd.FrameLayer', ([], {'opaque': '(True)', 'frame': 'self.game.assets.dmd_mmTrollsIntro.frames[0]'}), '(opaque=True, frame=self.game.assets.dmd_mmTrollsIntro.frames[0])\n', (7139, 7204), False, 'from procgame import dmd, game\n'), ((7232, 7340), 'ep.EP_Transition', 'ep.EP_Transition', (['self', 'self.layer', 'startFrame', 'ep.EP_Transition.TYPE_PUSH', 'ep.EP_Transition.PARAM_NORTH'], {}), '(self, self.layer, startFrame, ep.EP_Transition.TYPE_PUSH,\n ep.EP_Transition.PARAM_NORTH)\n', (7248, 7340), False, 'import ep\n'), ((7539, 7564), 'ep.EP_AnimatedLayer', 'ep.EP_AnimatedLayer', (['anim'], {}), '(anim)\n', (7558, 7564), False, 'import ep\n'), ((1317, 1411), 'ep.EP_TextLayer', 'ep.EP_TextLayer', (['(128 / 2)', '(24)', 'self.game.assets.font_6px_az_inverse', '"""center"""'], {'opaque': '(False)'}), "(128 / 2, 24, self.game.assets.font_6px_az_inverse, 'center',\n opaque=False)\n", (1332, 1411), False, 'import ep\n'), ((3497, 3573), 'ep.EP_TextLayer', 'ep.EP_TextLayer', (['(64)', '(22)', 'self.game.assets.font_9px_az', '"""center"""'], {'opaque': '(True)'}), "(64, 22, self.game.assets.font_9px_az, 'center', opaque=True)\n", (3512, 3573), False, 'import ep\n'), ((9213, 9307), 'procgame.dmd.AnimatedLayer', 'dmd.AnimatedLayer', ([], {'frames': 'anim.frames', 'hold': '(False)', 'opaque': '(False)', 'repeat': '(True)', 'frame_time': '(6)'}), '(frames=anim.frames, hold=False, opaque=False, repeat=True,\n frame_time=6)\n', (9230, 9307), False, 'from procgame import dmd, game\n'), ((9479, 9573), 'procgame.dmd.AnimatedLayer', 'dmd.AnimatedLayer', ([], {'frames': 'anim.frames', 'hold': '(False)', 'opaque': '(False)', 'repeat': '(True)', 'frame_time': '(6)'}), '(frames=anim.frames, hold=False, opaque=False, repeat=True,\n frame_time=6)\n', (9496, 9573), False, 'from procgame import dmd, game\n'), ((18673, 18750), 'ep.EP_TextLayer', 'ep.EP_TextLayer', (['(64)', '(14)', 'self.game.assets.font_9px_az', '"""center"""'], {'opaque': '(False)'}), "(64, 14, self.game.assets.font_9px_az, 'center', opaque=False)\n", (18688, 18750), False, 'import ep\n'), ((18760, 18793), 'ep.format_score', 'ep.format_score', (['self.totalPoints'], {}), '(self.totalPoints)\n', (18775, 18793), False, 'import ep\n'), ((6530, 6599), 'ep.EP_TextLayer', 'ep.EP_TextLayer', (['(64)', '(2)', 'self.game.assets.font_9px_az', '"""center"""', '(False)'], {}), "(64, 2, self.game.assets.font_9px_az, 'center', False)\n", (6545, 6599), False, 'import ep\n'), ((6655, 6725), 'ep.EP_TextLayer', 'ep.EP_TextLayer', (['(64)', '(14)', 'self.game.assets.font_5px_AZ', '"""center"""', '(False)'], {}), "(64, 14, self.game.assets.font_5px_AZ, 'center', False)\n", (6670, 6725), False, 'import ep\n'), ((6809, 6879), 'ep.EP_TextLayer', 'ep.EP_TextLayer', (['(64)', '(20)', 'self.game.assets.font_5px_AZ', '"""center"""', '(False)'], {}), "(64, 20, self.game.assets.font_5px_AZ, 'center', False)\n", (6824, 6879), False, 'import ep\n'), ((9799, 9893), 'procgame.dmd.AnimatedLayer', 'dmd.AnimatedLayer', ([], {'frames': 'anim.frames', 'hold': '(True)', 'opaque': '(False)', 'repeat': '(False)', 'frame_time': '(6)'}), '(frames=anim.frames, hold=True, opaque=False, repeat=False,\n frame_time=6)\n', (9816, 9893), False, 'from procgame import dmd, game\n'), ((10067, 10161), 'procgame.dmd.AnimatedLayer', 'dmd.AnimatedLayer', ([], {'frames': 'anim.frames', 'hold': '(True)', 'opaque': '(False)', 'repeat': '(False)', 'frame_time': '(6)'}), '(frames=anim.frames, hold=True, opaque=False, repeat=False,\n frame_time=6)\n', (10084, 10161), False, 'from procgame import dmd, game\n'), ((10362, 10456), 'procgame.dmd.AnimatedLayer', 'dmd.AnimatedLayer', ([], {'frames': 'anim.frames', 'hold': '(True)', 'opaque': '(False)', 'repeat': '(False)', 'frame_time': '(6)'}), '(frames=anim.frames, hold=True, opaque=False, repeat=False,\n frame_time=6)\n', (10379, 10456), False, 'from procgame import dmd, game\n'), ((10631, 10725), 'procgame.dmd.AnimatedLayer', 'dmd.AnimatedLayer', ([], {'frames': 'anim.frames', 'hold': '(True)', 'opaque': '(False)', 'repeat': '(False)', 'frame_time': '(6)'}), '(frames=anim.frames, hold=True, opaque=False, repeat=False,\n frame_time=6)\n', (10648, 10725), False, 'from procgame import dmd, game\n')]
|
import subprocess
import numpy as np
import os
import pathlib
import multiprocessing as mp
import sys
def test_mp_serial():
#gets current path
path = pathlib.Path(__file__).parents[0].absolute()
sys.path.insert(1, str(path.parents[0]))
if not (path / "MP_Serial").exists():
subprocess.check_output(["vspace", "vspace.in"], cwd=path)
# Run multi-planet
if not (path / ".MP_Serial").exists():
subprocess.check_output(["multiplanet", "vspace.in", "-c", "1"], cwd=path)
folders = sorted([f.path for f in os.scandir(path / "MP_Serial") if f.is_dir()])
for i in range(len(folders)):
os.chdir(folders[i])
assert os.path.isfile('earth.earth.forward') == True
os.chdir('../')
if __name__ == "__main__":
test_mp_serial()
|
[
"subprocess.check_output",
"os.path.isfile",
"pathlib.Path",
"os.chdir",
"os.scandir"
] |
[((300, 358), 'subprocess.check_output', 'subprocess.check_output', (["['vspace', 'vspace.in']"], {'cwd': 'path'}), "(['vspace', 'vspace.in'], cwd=path)\n", (323, 358), False, 'import subprocess\n'), ((434, 508), 'subprocess.check_output', 'subprocess.check_output', (["['multiplanet', 'vspace.in', '-c', '1']"], {'cwd': 'path'}), "(['multiplanet', 'vspace.in', '-c', '1'], cwd=path)\n", (457, 508), False, 'import subprocess\n'), ((638, 658), 'os.chdir', 'os.chdir', (['folders[i]'], {}), '(folders[i])\n', (646, 658), False, 'import os\n'), ((728, 743), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (736, 743), False, 'import os\n'), ((674, 711), 'os.path.isfile', 'os.path.isfile', (['"""earth.earth.forward"""'], {}), "('earth.earth.forward')\n", (688, 711), False, 'import os\n'), ((548, 578), 'os.scandir', 'os.scandir', (["(path / 'MP_Serial')"], {}), "(path / 'MP_Serial')\n", (558, 578), False, 'import os\n'), ((159, 181), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (171, 181), False, 'import pathlib\n')]
|
import platform
import os
import sys
def install(package):
print("""Colorama er ikke installert. Du trenger Colorama for å spille Pokus med farger.
For å spille må du installere Colorama og starte spillet på nytt.""")
if input("Vil du installere colorama nå?\n> ").lower() in {"ja", "j"}:
osys = platform.system()
if osys == "Linux" or osys == "Darwin":
os.system("pip3 install --user colorama")
os.system("clear")
else:
os.system("pip install --user colorama")
os.system("cls")
sys.exit("Du må starte spillet på nytt nå.")
|
[
"platform.system",
"os.system",
"sys.exit"
] |
[((564, 608), 'sys.exit', 'sys.exit', (['"""Du må starte spillet på nytt nå."""'], {}), "('Du må starte spillet på nytt nå.')\n", (572, 608), False, 'import sys\n'), ((313, 330), 'platform.system', 'platform.system', ([], {}), '()\n', (328, 330), False, 'import platform\n'), ((391, 432), 'os.system', 'os.system', (['"""pip3 install --user colorama"""'], {}), "('pip3 install --user colorama')\n", (400, 432), False, 'import os\n'), ((445, 463), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (454, 463), False, 'import os\n'), ((490, 530), 'os.system', 'os.system', (['"""pip install --user colorama"""'], {}), "('pip install --user colorama')\n", (499, 530), False, 'import os\n'), ((543, 559), 'os.system', 'os.system', (['"""cls"""'], {}), "('cls')\n", (552, 559), False, 'import os\n')]
|
import tensorflow as tf
def attach_GNAP(x : tf.Tensor):
out = x
norm = tf.norm(out, ord=2, axis=3, keepdims=True)
norm = tf.math.maximum(norm, 1e-12)
mean = tf.math.reduce_mean(norm)
out = tf.keras.layers.BatchNormalization(scale=False)(out)
out = tf.math.divide(out, norm)
out = tf.multiply(out, mean)
out = tf.keras.layers.GlobalAveragePooling2D()(out)
out = tf.keras.layers.BatchNormalization(scale=False)(out)
return out
def attach_l2_norm_features(x : tf.Tensor, scale=30):
x = tf.math.l2_normalize(x, axis=1)
x = tf.multiply(x, scale)
return x
def attach_embedding_projection(x : tf.Tensor, embedding_dim : int):
out = x
out = tf.keras.layers.Dense(embedding_dim,
use_bias=False,
kernel_regularizer=tf.keras.regularizers.l2(5e-4))(out)
out = tf.keras.layers.BatchNormalization(name='embeddings')(out)
return out
|
[
"tensorflow.math.reduce_mean",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.multiply",
"tensorflow.math.maximum",
"tensorflow.keras.layers.GlobalAveragePooling2D",
"tensorflow.norm",
"tensorflow.math.l2_normalize",
"tensorflow.math.divide",
"tensorflow.keras.regularizers.l2"
] |
[((81, 123), 'tensorflow.norm', 'tf.norm', (['out'], {'ord': '(2)', 'axis': '(3)', 'keepdims': '(True)'}), '(out, ord=2, axis=3, keepdims=True)\n', (88, 123), True, 'import tensorflow as tf\n'), ((135, 163), 'tensorflow.math.maximum', 'tf.math.maximum', (['norm', '(1e-12)'], {}), '(norm, 1e-12)\n', (150, 163), True, 'import tensorflow as tf\n'), ((175, 200), 'tensorflow.math.reduce_mean', 'tf.math.reduce_mean', (['norm'], {}), '(norm)\n', (194, 200), True, 'import tensorflow as tf\n'), ((274, 299), 'tensorflow.math.divide', 'tf.math.divide', (['out', 'norm'], {}), '(out, norm)\n', (288, 299), True, 'import tensorflow as tf\n'), ((310, 332), 'tensorflow.multiply', 'tf.multiply', (['out', 'mean'], {}), '(out, mean)\n', (321, 332), True, 'import tensorflow as tf\n'), ((531, 562), 'tensorflow.math.l2_normalize', 'tf.math.l2_normalize', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (551, 562), True, 'import tensorflow as tf\n'), ((571, 592), 'tensorflow.multiply', 'tf.multiply', (['x', 'scale'], {}), '(x, scale)\n', (582, 592), True, 'import tensorflow as tf\n'), ((211, 258), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {'scale': '(False)'}), '(scale=False)\n', (245, 258), True, 'import tensorflow as tf\n'), ((343, 383), 'tensorflow.keras.layers.GlobalAveragePooling2D', 'tf.keras.layers.GlobalAveragePooling2D', ([], {}), '()\n', (381, 383), True, 'import tensorflow as tf\n'), ((399, 446), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {'scale': '(False)'}), '(scale=False)\n', (433, 446), True, 'import tensorflow as tf\n'), ((834, 887), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {'name': '"""embeddings"""'}), "(name='embeddings')\n", (868, 887), True, 'import tensorflow as tf\n'), ((787, 819), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', (['(0.0005)'], {}), '(0.0005)\n', (811, 819), True, 'import tensorflow as tf\n')]
|
from riotwrapper.lor import LoRWrapper
from riotwrapper.const.lor_const import REGION_URL
import pytest
import re
@pytest.fixture
def environment():
import os
api_key = os.environ.get('API_KEY')
account_id = os.environ.get('ACCOUNT_ID')
env = {
'api_key': api_key,
'account_id': account_id
}
return env
@pytest.fixture
def wrapper(environment):
wrapper = LoRWrapper(environment['api_key'], region="AMERICAS")
return wrapper
class TestLoRWrapper:
def test_wrong_region(self):
"""Tests the exception raised after try to initialize
the wrapper with a not available region"""
region = "WRONG"
with pytest.raises(Exception) as region_info:
_ = LoRWrapper("key", region=region)
assert f"{region} is not available" in str(region_info.value)
assert ', '.join(list(REGION_URL.keys())) in str(region_info.value)
def test_platform_data(self, wrapper):
"""Tests an API call to get platform data."""
response = wrapper.platform_data()
assert isinstance(response, dict)
assert "id" in response.keys()
assert "maintenances" in response.keys()
assert "incidents" in response.keys()
def test_leaderboard(self, wrapper):
"""Tests an API call to get the leaderboard."""
response = wrapper.leaderboard()
assert isinstance(response, dict)
assert "players" in response.keys()
assert isinstance(response["players"], list)
assert len(response["players"]) > 0
assert "name" in response["players"][0].keys()
def test_match_ids(self, wrapper, environment):
"""Tests an API call to get the match id list by user."""
account_id = environment['account_id']
response = wrapper.match_ids(account_id)
pattern = re.compile(r'^(\w+\-\w+\-\w+\-\w+\-\w+)$')
assert isinstance(response, list)
assert pattern.match(response[0])
def test_match_by_id(self, wrapper, environment):
"""Tests and API call to get a match by id."""
account_id = environment['account_id']
match_list = wrapper.match_ids(account_id)
match_id = match_list[0]
response = wrapper.match_by_id(match_id)
assert isinstance(response, dict)
assert "metadata" in response.keys()
assert "info" in response.keys()
|
[
"riotwrapper.const.lor_const.REGION_URL.keys",
"os.environ.get",
"riotwrapper.lor.LoRWrapper",
"pytest.raises",
"re.compile"
] |
[((180, 205), 'os.environ.get', 'os.environ.get', (['"""API_KEY"""'], {}), "('API_KEY')\n", (194, 205), False, 'import os\n'), ((223, 251), 'os.environ.get', 'os.environ.get', (['"""ACCOUNT_ID"""'], {}), "('ACCOUNT_ID')\n", (237, 251), False, 'import os\n'), ((406, 459), 'riotwrapper.lor.LoRWrapper', 'LoRWrapper', (["environment['api_key']"], {'region': '"""AMERICAS"""'}), "(environment['api_key'], region='AMERICAS')\n", (416, 459), False, 'from riotwrapper.lor import LoRWrapper\n'), ((1862, 1912), 're.compile', 're.compile', (['"""^(\\\\w+\\\\-\\\\w+\\\\-\\\\w+\\\\-\\\\w+\\\\-\\\\w+)$"""'], {}), "('^(\\\\w+\\\\-\\\\w+\\\\-\\\\w+\\\\-\\\\w+\\\\-\\\\w+)$')\n", (1872, 1912), False, 'import re\n'), ((691, 715), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (704, 715), False, 'import pytest\n'), ((748, 780), 'riotwrapper.lor.LoRWrapper', 'LoRWrapper', (['"""key"""'], {'region': 'region'}), "('key', region=region)\n", (758, 780), False, 'from riotwrapper.lor import LoRWrapper\n'), ((882, 899), 'riotwrapper.const.lor_const.REGION_URL.keys', 'REGION_URL.keys', ([], {}), '()\n', (897, 899), False, 'from riotwrapper.const.lor_const import REGION_URL\n')]
|
"""
Constants and defaults values
"""
import datetime
import django
SECONDARY = 10
PRIMARY = 20
INFO = 30
SUCCESS = 40
WARNING = 50
ERROR = 60
DEFAULT_TAGS = {
SECONDARY: 'secondary',
PRIMARY: 'primary',
INFO: 'info',
SUCCESS: 'success',
WARNING: 'warning',
ERROR: 'danger',
}
DEFAULT_LEVELS = {
'SECONDARY': SECONDARY,
'PRIMARY': PRIMARY,
'INFO': INFO,
'SUCCESS': SUCCESS,
'WARNING': WARNING,
'ERROR': ERROR,
}
MESSAGE_FILES_UPLOAD_TO = "django_magnificent_messages/message_files"
MESSAGE_DB_MODEL = "django_magnificent_messages.Message"
DEFAULT_NOTIFICATION_STORAGE = "django_magnificent_messages.storage.notification_storage.session.SessionStorage"
DEFAULT_MESSAGE_STORAGE = "django_magnificent_messages.storage.message_storage.db.DatabaseStorage"
MIN_DATETIME = django.utils.timezone.make_aware(datetime.datetime(1900, 1, 1))
|
[
"datetime.datetime"
] |
[((854, 883), 'datetime.datetime', 'datetime.datetime', (['(1900)', '(1)', '(1)'], {}), '(1900, 1, 1)\n', (871, 883), False, 'import datetime\n')]
|
#!/usr/bin/python3
# 2019-05-29
import re
import requests
from chatterbot import ChatBot
from chatterbot.trainers import ChatterBotCorpusTrainer
class Bot:
def __init__(self, name, bot_id, group_id, api_token):
self.name = name
self.bot_id = bot_id
self.group_id = group_id
self.api_token = api_token
self.api_base_url = 'https://api.groupme.com/v3'
self.api_session = requests.session()
# Create a instnce of a chatterbot and tell the bot where to find data
self.chatbot = ChatBot(self.name)
trainer = ChatterBotCorpusTrainer(self.chatbot)
trainer.train(
'chatterbot.corpus.english'
)
def sendMessage(self, msg):
'''Send a message from the bot to its assigned group.
Args:
msg (str): message to be sent to group
Returns:
request response
'''
# set parameters for post request
params = {
'bot_id': self.bot_id,
'text': msg
}
# send the request to the api and get the results in the response var
response = self.api_session.post(
f'{self.api_base_url}/bots/post',
params=params
)
return response
def getMessages(self):
'''Get all messages for the bot's group chat.
Args:
none
Returns:
request response
'''
# authenticate the request with the api token
params = {
'token': self.api_token
}
# get the messages for the bot's group
response = self.api_session.post(
f'{self.api_base_url}/groups/{self.group_id}/messages',
params=params
)
return response
def checkForMention(self, msg):
'''Checks the recent messages of the bots group for instances of its name
Args:
msg (str): message sent in group chat
Returns:
boolean: a value denoting if the bot was mentioned or not
'''
return re.match(r'.*@'+self.name+r'.*', msg)
def removeMention(self, msg):
'''Checks the recent messages of the bots group for instances of its name
Args:
msg (str): message sent in group chat
Returns:
msg (str): a messaged with the '@<bot_name>' removed
'''
return re.sub(f'@{self.name}', '', msg)
def getResponse(self, msg):
'''Given a message the appropriate response is returned.
Args:
msg (str): a message to respond to
Returns:
response (str): the bot's response to the message
'''
# makes a call to the chatterbot package for a response
response = self.chatbot.get_response(msg)
return response
|
[
"requests.session",
"chatterbot.ChatBot",
"chatterbot.trainers.ChatterBotCorpusTrainer",
"re.match",
"re.sub"
] |
[((423, 441), 'requests.session', 'requests.session', ([], {}), '()\n', (439, 441), False, 'import requests\n'), ((545, 563), 'chatterbot.ChatBot', 'ChatBot', (['self.name'], {}), '(self.name)\n', (552, 563), False, 'from chatterbot import ChatBot\n'), ((582, 619), 'chatterbot.trainers.ChatterBotCorpusTrainer', 'ChatterBotCorpusTrainer', (['self.chatbot'], {}), '(self.chatbot)\n', (605, 619), False, 'from chatterbot.trainers import ChatterBotCorpusTrainer\n'), ((2129, 2168), 're.match', 're.match', (["('.*@' + self.name + '.*')", 'msg'], {}), "('.*@' + self.name + '.*', msg)\n", (2137, 2168), False, 'import re\n'), ((2473, 2505), 're.sub', 're.sub', (['f"""@{self.name}"""', '""""""', 'msg'], {}), "(f'@{self.name}', '', msg)\n", (2479, 2505), False, 'import re\n')]
|
# Copyright (c) 2012 <NAME> http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
try:
import simplejson as json
except ImportError:
import json
import boto.exception
import requests
import boto
class SearchServiceException(Exception):
pass
class CommitMismatchError(Exception):
pass
class DocumentServiceConnection(object):
def __init__(self, domain=None, endpoint=None):
self.domain = domain
self.endpoint = endpoint
if not self.endpoint:
self.endpoint = domain.doc_service_endpoint
self.documents_batch = []
self._sdf = None
def add(self, _id, version, fields, lang='en'):
d = {'type': 'add', 'id': _id, 'version': version, 'lang': lang,
'fields': fields}
self.documents_batch.append(d)
def delete(self, _id, version):
d = {'type': 'delete', 'id': _id, 'version': version}
self.documents_batch.append(d)
def get_sdf(self):
return self._sdf if self._sdf else json.dumps(self.documents_batch)
def clear_sdf(self):
self._sdf = None
self.documents_batch = []
def add_sdf_from_s3(self, key_obj):
"""@todo (lucas) would be nice if this could just take an s3://uri..."""
self._sdf = key_obj.get_contents_as_string()
def commit(self):
sdf = self.get_sdf()
if ': null' in sdf:
boto.log.error('null value in sdf detected. This will probably raise '
'500 error.')
index = sdf.index(': null')
boto.log.error(sdf[index - 100:index + 100])
url = "http://%s/2011-02-01/documents/batch" % (self.endpoint)
request_config = {
'pool_connections': 20,
'keep_alive': True,
'max_retries': 5,
'pool_maxsize': 50
}
r = requests.post(url, data=sdf, config=request_config,
headers={'Content-Type': 'application/json'})
return CommitResponse(r, self, sdf)
class CommitResponse(object):
"""Wrapper for response to Cloudsearch document batch commit.
:type response: :class:`requests.models.Response`
:param response: Response from Cloudsearch /documents/batch API
:type doc_service: :class:`exfm.cloudsearch.DocumentServiceConnection`
:param doc_service: Object containing the documents posted and methods to
retry
:raises: :class:`boto.exception.BotoServerError`
:raises: :class:`exfm.cloudsearch.SearchServiceException`
"""
def __init__(self, response, doc_service, sdf):
self.response = response
self.doc_service = doc_service
self.sdf = sdf
try:
self.content = json.loads(response.content)
except:
boto.log.error('Error indexing documents.\nResponse Content:\n{}\n\n'
'SDF:\n{}'.format(response.content, self.sdf))
raise boto.exception.BotoServerError(self.response.status_code, '',
body=response.content)
self.status = self.content['status']
if self.status == 'error':
self.errors = [e.get('message') for e in self.content.get('errors',
[])]
else:
self.errors = []
self.adds = self.content['adds']
self.deletes = self.content['deletes']
self._check_num_ops('add', self.adds)
self._check_num_ops('delete', self.deletes)
def _check_num_ops(self, type_, response_num):
"""Raise exception if number of ops in response doesn't match commit
:type type_: str
:param type_: Type of commit operation: 'add' or 'delete'
:type response_num: int
:param response_num: Number of adds or deletes in the response.
:raises: :class:`exfm.cloudsearch.SearchServiceException`
"""
commit_num = len([d for d in self.doc_service.documents_batch
if d['type'] == type_])
if response_num != commit_num:
raise CommitMismatchError(
'Incorrect number of {}s returned. Commit: {} Respose: {}'\
.format(type_, commit_num, response_num))
|
[
"json.loads",
"json.dumps",
"boto.exception.BotoServerError",
"boto.log.error",
"requests.post"
] |
[((2934, 3035), 'requests.post', 'requests.post', (['url'], {'data': 'sdf', 'config': 'request_config', 'headers': "{'Content-Type': 'application/json'}"}), "(url, data=sdf, config=request_config, headers={'Content-Type':\n 'application/json'})\n", (2947, 3035), False, 'import requests\n'), ((2097, 2129), 'json.dumps', 'json.dumps', (['self.documents_batch'], {}), '(self.documents_batch)\n', (2107, 2129), False, 'import json\n'), ((2483, 2570), 'boto.log.error', 'boto.log.error', (['"""null value in sdf detected. This will probably raise 500 error."""'], {}), "(\n 'null value in sdf detected. This will probably raise 500 error.')\n", (2497, 2570), False, 'import boto\n'), ((2637, 2681), 'boto.log.error', 'boto.log.error', (['sdf[index - 100:index + 100]'], {}), '(sdf[index - 100:index + 100])\n', (2651, 2681), False, 'import boto\n'), ((3790, 3818), 'json.loads', 'json.loads', (['response.content'], {}), '(response.content)\n', (3800, 3818), False, 'import json\n'), ((3998, 4087), 'boto.exception.BotoServerError', 'boto.exception.BotoServerError', (['self.response.status_code', '""""""'], {'body': 'response.content'}), "(self.response.status_code, '', body=response\n .content)\n", (4028, 4087), False, 'import boto\n')]
|
from django.contrib import admin
from django.urls import path, re_path, include, reverse_lazy
from rest_framework.routers import DefaultRouter
from users.views import LoginViewCustom
from accounts.views import CardSerializerViewSet
router = DefaultRouter()
router.register(r'card', CardSerializerViewSet)
urlpatterns = [
path("", include(router.urls)),
path("admin/", admin.site.urls),
path("api/auth/", include("rest_auth.urls")),
path("api/auth/registration/", include("rest_auth.registration.urls")),
path("api/auth/login/", LoginViewCustom.as_view(), name="rest_login"),
]
|
[
"django.urls.path",
"users.views.LoginViewCustom.as_view",
"rest_framework.routers.DefaultRouter",
"django.urls.include"
] |
[((245, 260), 'rest_framework.routers.DefaultRouter', 'DefaultRouter', ([], {}), '()\n', (258, 260), False, 'from rest_framework.routers import DefaultRouter\n'), ((366, 397), 'django.urls.path', 'path', (['"""admin/"""', 'admin.site.urls'], {}), "('admin/', admin.site.urls)\n", (370, 397), False, 'from django.urls import path, re_path, include, reverse_lazy\n'), ((339, 359), 'django.urls.include', 'include', (['router.urls'], {}), '(router.urls)\n', (346, 359), False, 'from django.urls import path, re_path, include, reverse_lazy\n'), ((421, 446), 'django.urls.include', 'include', (['"""rest_auth.urls"""'], {}), "('rest_auth.urls')\n", (428, 446), False, 'from django.urls import path, re_path, include, reverse_lazy\n'), ((484, 522), 'django.urls.include', 'include', (['"""rest_auth.registration.urls"""'], {}), "('rest_auth.registration.urls')\n", (491, 522), False, 'from django.urls import path, re_path, include, reverse_lazy\n'), ((553, 578), 'users.views.LoginViewCustom.as_view', 'LoginViewCustom.as_view', ([], {}), '()\n', (576, 578), False, 'from users.views import LoginViewCustom\n')]
|
#!python3
# -*- coding:utf-8 -*-
# 『Pythonで始めるOpenCV4プログラミング』
# 北山尚洋
import cv2
import sys, traceback
import numpy as np
def add(imgName1, imgName2):
try:
img1 = cv2.imread(imgName1)
img2 = cv2.imread(imgName2)
if img1 is None or img2 is None:
print("no file reading...")
sys.exit(1)
#caution!
#src file size have to same size each img1 and img2. and same type.
img1 = cv2.resize(img1, (500,500))
img2 = cv2.resize(img2, (500,500))
cv2.imshow('image1', img1)
cv2.imshow('image2', img2)
dst = cv2.add(img1, img2)
cv2.imshow('synthesize', dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
except Exception as ex:
print("Error:", sys.exc_info()[0])
print(sys.exc_info()[1])
print(traceback.format_tb(sys.exc_info()[2]))
finally:
pass
def addScolor(imgName1):
try:
img1 = cv2.imread(imgName1)
if img1 is None:
print("no file reading...")
sys.exit(1)
cv2.imshow("img1",img1)
height = img1.shape[0]
width = img1.shape[1]
blue = np.zeros((height, width, 3), np.uint8)
blue[:,:] = [128, 0, 0]
dst = cv2.add(img1, blue)
cv2.imshow("after", dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
except Exception as ex:
print("Error:", sys.exc_info()[0])
print(sys.exc_info()[1])
print(traceback.format_tb(sys.exc_info()[2]))
finally:
pass
def addMask(imgName1, imgName2):
try:
img1 = cv2.imread(imgName1)
img2 = cv2.imread(imgName2)
if img1 is None or img2 is None:
print("no file reading...")
sys.exit(1)
#caution!
#src file size have to same size each img1 and img2. and same type.
img1 = cv2.resize(img1, (500,500))
img2 = cv2.resize(img2, (500,500))
cv2.imshow('image1', img1)
cv2.imshow('image2', img2)
#create mask
height = img1.shape[0]
width = img1.shape[1]
img_mask = np.zeros((height, width), np.uint8)
img_mask[ height//4:height*3//4, width//4:width*3//4 ] = [255]
#add two image with mask.
dst = cv2.add(img1, img2, mask = img_mask)
cv2.imshow('dst1', dst)
#add two image with mask.
dst = cv2.add(img1, img2, mask = img_mask)
cv2.imshow('dst1', dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
except Exception as ex:
print("Error:", sys.exc_info()[0])
print(sys.exc_info()[1])
print(traceback.format_tb(sys.exc_info()[2]))
finally:
pass
#add(sys.argv[1], sys.argv[2])
#addScolor(sys.argv[1])
addMask(sys.argv[1], sys.argv[2])
|
[
"cv2.waitKey",
"cv2.destroyAllWindows",
"numpy.zeros",
"cv2.imread",
"sys.exc_info",
"sys.exit",
"cv2.imshow",
"cv2.add",
"cv2.resize"
] |
[((177, 197), 'cv2.imread', 'cv2.imread', (['imgName1'], {}), '(imgName1)\n', (187, 197), False, 'import cv2\n'), ((213, 233), 'cv2.imread', 'cv2.imread', (['imgName2'], {}), '(imgName2)\n', (223, 233), False, 'import cv2\n'), ((466, 494), 'cv2.resize', 'cv2.resize', (['img1', '(500, 500)'], {}), '(img1, (500, 500))\n', (476, 494), False, 'import cv2\n'), ((509, 537), 'cv2.resize', 'cv2.resize', (['img2', '(500, 500)'], {}), '(img2, (500, 500))\n', (519, 537), False, 'import cv2\n'), ((554, 580), 'cv2.imshow', 'cv2.imshow', (['"""image1"""', 'img1'], {}), "('image1', img1)\n", (564, 580), False, 'import cv2\n'), ((589, 615), 'cv2.imshow', 'cv2.imshow', (['"""image2"""', 'img2'], {}), "('image2', img2)\n", (599, 615), False, 'import cv2\n'), ((639, 658), 'cv2.add', 'cv2.add', (['img1', 'img2'], {}), '(img1, img2)\n', (646, 658), False, 'import cv2\n'), ((667, 696), 'cv2.imshow', 'cv2.imshow', (['"""synthesize"""', 'dst'], {}), "('synthesize', dst)\n", (677, 696), False, 'import cv2\n'), ((714, 728), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (725, 728), False, 'import cv2\n'), ((737, 760), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (758, 760), False, 'import cv2\n'), ((1009, 1029), 'cv2.imread', 'cv2.imread', (['imgName1'], {}), '(imgName1)\n', (1019, 1029), False, 'import cv2\n'), ((1149, 1173), 'cv2.imshow', 'cv2.imshow', (['"""img1"""', 'img1'], {}), "('img1', img1)\n", (1159, 1173), False, 'import cv2\n'), ((1258, 1296), 'numpy.zeros', 'np.zeros', (['(height, width, 3)', 'np.uint8'], {}), '((height, width, 3), np.uint8)\n', (1266, 1296), True, 'import numpy as np\n'), ((1352, 1371), 'cv2.add', 'cv2.add', (['img1', 'blue'], {}), '(img1, blue)\n', (1359, 1371), False, 'import cv2\n'), ((1380, 1404), 'cv2.imshow', 'cv2.imshow', (['"""after"""', 'dst'], {}), "('after', dst)\n", (1390, 1404), False, 'import cv2\n'), ((1422, 1436), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1433, 1436), False, 'import cv2\n'), ((1445, 1468), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1466, 1468), False, 'import cv2\n'), ((1720, 1740), 'cv2.imread', 'cv2.imread', (['imgName1'], {}), '(imgName1)\n', (1730, 1740), False, 'import cv2\n'), ((1756, 1776), 'cv2.imread', 'cv2.imread', (['imgName2'], {}), '(imgName2)\n', (1766, 1776), False, 'import cv2\n'), ((2009, 2037), 'cv2.resize', 'cv2.resize', (['img1', '(500, 500)'], {}), '(img1, (500, 500))\n', (2019, 2037), False, 'import cv2\n'), ((2052, 2080), 'cv2.resize', 'cv2.resize', (['img2', '(500, 500)'], {}), '(img2, (500, 500))\n', (2062, 2080), False, 'import cv2\n'), ((2097, 2123), 'cv2.imshow', 'cv2.imshow', (['"""image1"""', 'img1'], {}), "('image1', img1)\n", (2107, 2123), False, 'import cv2\n'), ((2132, 2158), 'cv2.imshow', 'cv2.imshow', (['"""image2"""', 'img2'], {}), "('image2', img2)\n", (2142, 2158), False, 'import cv2\n'), ((2269, 2304), 'numpy.zeros', 'np.zeros', (['(height, width)', 'np.uint8'], {}), '((height, width), np.uint8)\n', (2277, 2304), True, 'import numpy as np\n'), ((2433, 2467), 'cv2.add', 'cv2.add', (['img1', 'img2'], {'mask': 'img_mask'}), '(img1, img2, mask=img_mask)\n', (2440, 2467), False, 'import cv2\n'), ((2478, 2501), 'cv2.imshow', 'cv2.imshow', (['"""dst1"""', 'dst'], {}), "('dst1', dst)\n", (2488, 2501), False, 'import cv2\n'), ((2559, 2593), 'cv2.add', 'cv2.add', (['img1', 'img2'], {'mask': 'img_mask'}), '(img1, img2, mask=img_mask)\n', (2566, 2593), False, 'import cv2\n'), ((2604, 2627), 'cv2.imshow', 'cv2.imshow', (['"""dst1"""', 'dst'], {}), "('dst1', dst)\n", (2614, 2627), False, 'import cv2\n'), ((2653, 2667), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2664, 2667), False, 'import cv2\n'), ((2676, 2699), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2697, 2699), False, 'import cv2\n'), ((336, 347), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (344, 347), False, 'import sys, traceback\n'), ((1116, 1127), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1124, 1127), False, 'import sys, traceback\n'), ((1879, 1890), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1887, 1890), False, 'import sys, traceback\n'), ((822, 836), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (834, 836), False, 'import sys, traceback\n'), ((855, 869), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (867, 869), False, 'import sys, traceback\n'), ((1530, 1544), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (1542, 1544), False, 'import sys, traceback\n'), ((1563, 1577), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (1575, 1577), False, 'import sys, traceback\n'), ((2761, 2775), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (2773, 2775), False, 'import sys, traceback\n'), ((2794, 2808), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (2806, 2808), False, 'import sys, traceback\n'), ((908, 922), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (920, 922), False, 'import sys, traceback\n'), ((1616, 1630), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (1628, 1630), False, 'import sys, traceback\n'), ((2847, 2861), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (2859, 2861), False, 'import sys, traceback\n')]
|
"""
pgxnclient -- tar file utilities
"""
# Copyright (C) 2011-2020 <NAME>
# This file is part of the PGXN client
import os
import tarfile
from pgxnclient.i18n import _
from pgxnclient.errors import PgxnClientException
from pgxnclient.archive import Archive
import logging
logger = logging.getLogger('pgxnclient.tar')
class TarArchive(Archive):
"""Handle .tar archives"""
_file = None
def can_open(self):
return tarfile.is_tarfile(self.filename)
def open(self):
assert not self._file, "archive already open"
try:
self._file = tarfile.open(self.filename, 'r')
except Exception as e:
raise PgxnClientException(
_("cannot open archive '%s': %s") % (self.filename, e)
)
def close(self):
if self._file is not None:
self._file.close()
self._file = None
def list_files(self):
assert self._file, "archive not open"
return self._file.getnames()
def read(self, fn):
assert self._file, "archive not open"
return self._file.extractfile(fn).read()
def unpack(self, destdir):
tarname = self.filename
logger.info(_("unpacking: %s"), tarname)
destdir = os.path.abspath(destdir)
self.open()
try:
for fn in self.list_files():
fname = os.path.abspath(os.path.join(destdir, fn))
if not fname.startswith(destdir):
raise PgxnClientException(
_("archive file '%s' trying to escape!") % fname
)
self._file.extractall(path=destdir)
finally:
self.close()
return self._find_work_directory(destdir)
def unpack(filename, destdir):
return TarArchive(filename).unpack(destdir)
|
[
"pgxnclient.i18n._",
"os.path.abspath",
"tarfile.is_tarfile",
"tarfile.open",
"os.path.join",
"logging.getLogger"
] |
[((287, 322), 'logging.getLogger', 'logging.getLogger', (['"""pgxnclient.tar"""'], {}), "('pgxnclient.tar')\n", (304, 322), False, 'import logging\n'), ((441, 474), 'tarfile.is_tarfile', 'tarfile.is_tarfile', (['self.filename'], {}), '(self.filename)\n', (459, 474), False, 'import tarfile\n'), ((1255, 1279), 'os.path.abspath', 'os.path.abspath', (['destdir'], {}), '(destdir)\n', (1270, 1279), False, 'import os\n'), ((588, 620), 'tarfile.open', 'tarfile.open', (['self.filename', '"""r"""'], {}), "(self.filename, 'r')\n", (600, 620), False, 'import tarfile\n'), ((1208, 1226), 'pgxnclient.i18n._', '_', (['"""unpacking: %s"""'], {}), "('unpacking: %s')\n", (1209, 1226), False, 'from pgxnclient.i18n import _\n'), ((1394, 1419), 'os.path.join', 'os.path.join', (['destdir', 'fn'], {}), '(destdir, fn)\n', (1406, 1419), False, 'import os\n'), ((707, 740), 'pgxnclient.i18n._', '_', (['"""cannot open archive \'%s\': %s"""'], {}), '("cannot open archive \'%s\': %s")\n', (708, 740), False, 'from pgxnclient.i18n import _\n'), ((1542, 1582), 'pgxnclient.i18n._', '_', (['"""archive file \'%s\' trying to escape!"""'], {}), '("archive file \'%s\' trying to escape!")\n', (1543, 1582), False, 'from pgxnclient.i18n import _\n')]
|
import sys
from src.krpsim.utils import split_need_result_delay, build_process_dic
class Parser:
"""
Parsing Class, heart of the parsing is here.
-> stocks is a list of Stock class instances
-> content is a list of Process class instances
-> optimize is a list of Optimize class instances
-> delay corresponds to the maximal delay given as a parameter
"""
def __init__(self, options):
self.path, self.delay = options.input_path, options.delay
self.stocks = {}
self.content = {}
self.optimize = []
self.verbose = options.verbose
self.fd = open(self.path, 'r+')
def main_parsing(self):
"""
Main parsing loop, the goal here is to iterate over
the fd content, and to parse every line we encounter to
determine its type
"""
curr_line = None
for line in self.fd:
if line[0] == '#':
print("Found a comment") if self.verbose == 1 or self.verbose == 3 else 0
continue
elif len(line) == 1 and line[0] == '\n':
print("Skipping empty line") if self.verbose == 1 or self.verbose == 3 else 0
continue
else:
curr_line = self.parse_line(line)
self.fill_parser_lists(curr_line)
print(curr_line) if self.verbose == 1 or self.verbose == 3 else 0
self.fd = self.fd.close()
def fill_parser_lists(self, line):
"""
Comparing the line type after parse_line,
we compare class instances with the base classes
"""
if type(line) is Process:
self.content[line.name] = line
elif type(line) is Optimize:
self.optimize.append(line)
elif type(line) is Stock:
self.stocks[line.name] = line
def verify_parsing_content(self):
"""
Afterward check method for the parsing content
"""
if not self.optimize:
sys.exit("Missing optimize content.")
elif not self.stocks:
sys.exit("Missing initial stocks.")
elif not self.content:
sys.exit("No process detected inside {}, please provide at least one".format(self.path))
#Check if what need to be optimized is indeed inside at least one process and is accesible
#like if the process never gets called because of stocks that can never be filled, then
#the optimize values are not valid.
def parse_line(self, line):
"""
Method used to parse a line and extract the corresponding elem
tmp -> Used for splitting the line and removing some junk from the list
res -> Class instance, either Stock, Process or Optimize
every instance is filled with the corresponding params
"""
tmp = None
res = None
line = line.replace('\n', '')
tmp = [i for i in line.split(':')]
tmp.pop(tmp.index('')) if '' in tmp else tmp
# Parsing for stock elem
if '(' not in line:
if tmp[0].isalpha() and tmp[1].isdecimal() or\
tmp[0].replace('_', '').isalpha() and tmp[1].isdecimal():
res = Stock(tmp[0], int(tmp[1]))
else:
res = 'Error'
# Parsing for optimize elem
elif 'optimize:' in line:
if tmp[-1].isdigit():
sys.exit("You can't specify a delay for an optimize element, error with \033[4m{}\033[0m"
.format(line))
tmp = str(tmp[1]).replace('(', '').replace(')', '')
res = Optimize(tmp.split(';'))
# Parsing for process elem
elif tmp[-1].isdigit():
tmp = [i.replace(')', '') for i in line.split('(')]
name, need, result, delay = split_need_result_delay(tmp, line)
res = Process(name, build_process_dic(need), build_process_dic(result), delay)
# Invalid elem
elif not tmp[-1].isdigit():
sys.exit("Error with \033[4m{}\033[0m, invalid element.".format(line))
return res
class Stock:
"""
Stock elem associated Class
-> name is obviously the stock name
-> qty is the quantity available for this stock
"""
def __init__(self, name, qty):
self.name = name
self.qty = qty
def __str__(self):
return '\033[1mStock\033[0m -> \033[38;5;155m{}\033[0m : {}'.format(self.name, self.qty)
def __eq__(self, other):
return self.name == other.name and self.qty == other.qty
class Process:
"""
Process elem associated Class
-> name is obviously the process name
-> need is a list of stocks (name & qty) needed to run this process
-> result is a list of resulting stocks after running the process
-> delay is the delay needed to run the process
"""
def __init__(self, name, need, result, delay):
self.name = name
self.need = need
self.result = result
self.delay = delay
def __str__(self):
return '\033[38;5;74m{}\033[0m - \033[1mneeds\033[0m : {} -> \033[1mresult\033[0m : {} - \033[1mdelay\033[0m : {}'\
.format(self.name, self.need, self.result, self.delay)
def __eq__(self, other):
return self.name == other.name and \
self.delay == other.delay and \
self.need == other.need and \
self.result == other.result
class Optimize:
"""
Optimize elem associated Class
-> opti_elems is a list of name associated with what is
to optimize, like client and time
"""
def __init__(self, elems):
self.opti_elems = [i for i in elems]
def __str__(self):
return '\033[1mOptimize\033[0m -> \033[38;5;218m{}\033[0m'.format(str(self.opti_elems).replace('[', '').replace(']', ''))
def __eq__(self, other):
return self.opti_elems == other.opti_elems
|
[
"src.krpsim.utils.split_need_result_delay",
"src.krpsim.utils.build_process_dic",
"sys.exit"
] |
[((2013, 2050), 'sys.exit', 'sys.exit', (['"""Missing optimize content."""'], {}), "('Missing optimize content.')\n", (2021, 2050), False, 'import sys\n'), ((2093, 2128), 'sys.exit', 'sys.exit', (['"""Missing initial stocks."""'], {}), "('Missing initial stocks.')\n", (2101, 2128), False, 'import sys\n'), ((3834, 3868), 'src.krpsim.utils.split_need_result_delay', 'split_need_result_delay', (['tmp', 'line'], {}), '(tmp, line)\n', (3857, 3868), False, 'from src.krpsim.utils import split_need_result_delay, build_process_dic\n'), ((3901, 3924), 'src.krpsim.utils.build_process_dic', 'build_process_dic', (['need'], {}), '(need)\n', (3918, 3924), False, 'from src.krpsim.utils import split_need_result_delay, build_process_dic\n'), ((3926, 3951), 'src.krpsim.utils.build_process_dic', 'build_process_dic', (['result'], {}), '(result)\n', (3943, 3951), False, 'from src.krpsim.utils import split_need_result_delay, build_process_dic\n')]
|
import datetime
import pickle
from enum import Enum
from decorator import decorator
def time_this(fn):
"""
This decorator time the input function :func:`fn`
"""
def timed_fn(fn, *args, **kwargs):
time1 = datetime.datetime.now()
result = fn(*args, **kwargs)
calc_time = datetime.datetime.now() - time1
print('%s execution time: %s.' % (fn.__name__, str(calc_time)))
return result
return decorator(timed_fn, fn)
class RaspEnum(Enum):
def __str__(self):
return self.value[1]
def get_index(self):
return self.value[0]
@classmethod
def get_enum(cls, index):
e = [e.value for e in cls if e.value[0] == index][0]
return cls.__new__(cls, e)
@classmethod
def enum_from_string(cls, index):
e = [e.value for e in cls if e.value[1] == index][0]
return cls.__new__(cls, e)
@classmethod
def enum_from_name_string(cls, name):
e = [e.value for e in cls if e.name == name][0]
return cls.__new__(cls, e)
def __ge__(self, other):
if self.__class__ is other.__class__:
return self.value[0] >= other.value[0]
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self.value[0] > other.value[0]
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self.value[0] <= other.value[0]
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self.value[0] < other.value[0]
return NotImplemented
def write_to_pickle(file_name, data, file_path=r'c:\temp'):
with open(file_path + '\\' + file_name, 'wb') as f:
pickle.dump(data, f, protocol=2)
def read_data(file_name, file_path=r'c:\temp'):
with open(file_path + '\\' + file_name, 'rb') as f:
data = pickle.load(f)
return data
|
[
"decorator.decorator",
"pickle.dump",
"pickle.load",
"datetime.datetime.now"
] |
[((449, 472), 'decorator.decorator', 'decorator', (['timed_fn', 'fn'], {}), '(timed_fn, fn)\n', (458, 472), False, 'from decorator import decorator\n'), ((231, 254), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (252, 254), False, 'import datetime\n'), ((1803, 1835), 'pickle.dump', 'pickle.dump', (['data', 'f'], {'protocol': '(2)'}), '(data, f, protocol=2)\n', (1814, 1835), False, 'import pickle\n'), ((1957, 1971), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1968, 1971), False, 'import pickle\n'), ((312, 335), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (333, 335), False, 'import datetime\n')]
|
from .abstract_machine import AutoNormalizedAutoregressiveMachine
from ..deepar.layers import ToFloat32, DownShiftLayer, ExpandInputDim, WeightNormalization
from ..layers import VectorToComplexNumber
from tensorflow.keras.layers import Activation, Conv1D, ZeroPadding1D, Activation, Add
def causal_conv_1d(x, filters, kernel_size, weights_normalization, dilation_rate=1, activation=None, skip_connection=None):
padding = kernel_size + (kernel_size - 1) * (dilation_rate - 1) - 1
if padding > 0:
x = ZeroPadding1D(padding=(padding, 0))(x)
conv_layer = Conv1D(filters=filters, kernel_size=kernel_size, strides=1, dilation_rate=dilation_rate)
if weights_normalization:
conv_layer = WeightNormalization(conv_layer)
x = conv_layer(x)
if skip_connection is not None:
x = Add()([x, skip_connection])
if activation is not None:
x = Activation(activation)(x)
return x
class SimpleConvNetAutoregressive1D(AutoNormalizedAutoregressiveMachine):
"""docstring for ConvNetAutoregressive1D"""
def __init__(self, keras_input_layer, depth, num_of_channels, kernel_size=3,
use_dilation=True, add_skip_connections=False, max_dilation_rate=None, activation='relu',
weights_normalization=True, should_expand_input_dim=True, **kwargs):
self.depth = depth
self.num_of_channels = num_of_channels
self.kernel_size = kernel_size
self.use_dilation = use_dilation
self.add_skip_connections = add_skip_connections
self.max_dilation_rate = max_dilation_rate
self.activation = activation
self.weights_normalization = weights_normalization
self.should_expand_input_dim = should_expand_input_dim
self._build_unnormalized_conditional_log_wave_function(keras_input_layer)
super(SimpleConvNetAutoregressive1D, self).__init__(keras_input_layer, **kwargs)
@property
def unnormalized_conditional_log_wave_function(self):
return self._unnormalized_conditional_log_wave_function
def _build_unnormalized_conditional_log_wave_function(self, keras_input_layer):
dilation_rate = 1
x = keras_input_layer
if self.should_expand_input_dim:
x = ExpandInputDim()(x)
x = ToFloat32()(x)
for i in range(self.depth - 2):
skip_connection = x if self.add_skip_connections and i > 0 else None
x = causal_conv_1d(x, filters=self.num_of_channels,
kernel_size=self.kernel_size,
activation=self.activation,
weights_normalization=self.weights_normalization,
dilation_rate=dilation_rate, skip_connection=skip_connection)
if self.use_dilation:
if self.max_dilation_rate is not None and dilation_rate < self.max_dilation_rate:
dilation_rate *= 2
x = DownShiftLayer()(x)
x = causal_conv_1d(x, filters=4, kernel_size=1,
weights_normalization=self.weights_normalization)
self._unnormalized_conditional_log_wave_function = VectorToComplexNumber()(x)
|
[
"tensorflow.keras.layers.Conv1D",
"tensorflow.keras.layers.Add",
"tensorflow.keras.layers.ZeroPadding1D",
"tensorflow.keras.layers.Activation"
] |
[((574, 667), 'tensorflow.keras.layers.Conv1D', 'Conv1D', ([], {'filters': 'filters', 'kernel_size': 'kernel_size', 'strides': '(1)', 'dilation_rate': 'dilation_rate'}), '(filters=filters, kernel_size=kernel_size, strides=1, dilation_rate=\n dilation_rate)\n', (580, 667), False, 'from tensorflow.keras.layers import Activation, Conv1D, ZeroPadding1D, Activation, Add\n'), ((518, 553), 'tensorflow.keras.layers.ZeroPadding1D', 'ZeroPadding1D', ([], {'padding': '(padding, 0)'}), '(padding=(padding, 0))\n', (531, 553), False, 'from tensorflow.keras.layers import Activation, Conv1D, ZeroPadding1D, Activation, Add\n'), ((816, 821), 'tensorflow.keras.layers.Add', 'Add', ([], {}), '()\n', (819, 821), False, 'from tensorflow.keras.layers import Activation, Conv1D, ZeroPadding1D, Activation, Add\n'), ((887, 909), 'tensorflow.keras.layers.Activation', 'Activation', (['activation'], {}), '(activation)\n', (897, 909), False, 'from tensorflow.keras.layers import Activation, Conv1D, ZeroPadding1D, Activation, Add\n')]
|
"""
Command to:
- load all fixtures named 'demo-*.*'
- create copy the demo media files
This should only be called in a clean database, such as after
`resetdb` is run. This should generally only be used in
development environments.
"""
import os
import shutil
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import BaseCommand
from physionet.utility import get_project_apps
from lightwave.views import DBCAL_FILE, ORIGINAL_DBCAL_FILE
class Command(BaseCommand):
def handle(self, *args, **options):
# If not in development, prompt warning messages twice
if 'development' not in os.environ['DJANGO_SETTINGS_MODULE']:
warning_messages = ['You are NOT in the development environment. Are you sure you want to insert demo data? [y/n]',
'The demo data will be mixed with existing data. Are you sure? [y/n]',
'Final warning. Are you ABSOLUTELY SURE? [y/n]']
for i in range(3):
choice = input(warning_messages[i]).lower()
if choice != 'y':
sys.exit('Exiting from load. No actions applied.')
print('Continuing loading demo data')
# Load licences and software languages
site_data_fixtures = os.path.join(settings.BASE_DIR, 'project',
'fixtures', 'site-data.json')
call_command('loaddata', site_data_fixtures, verbosity=1)
# Load fixtures for default project types
project_types_fixtures = os.path.join(settings.BASE_DIR, 'project',
'fixtures', 'project-types.json')
call_command('loaddata', project_types_fixtures, verbosity=1)
# Load fixtures for default sites
site_fixtures = os.path.join(settings.BASE_DIR, 'physionet',
'fixtures', 'sites.json')
call_command('loaddata', site_fixtures, verbosity=1)
# Load other app fixtures
project_apps = get_project_apps()
demo_fixtures = find_demo_fixtures(project_apps)
call_command('loaddata', *demo_fixtures, verbosity=1)
# Copy the demo media and static content
copy_demo_media()
copy_demo_static()
print('Copied demo media and static files.')
# Make symlink of wfdbcal for lightwave
if os.path.exists(ORIGINAL_DBCAL_FILE):
os.symlink(ORIGINAL_DBCAL_FILE, DBCAL_FILE)
def find_demo_fixtures(project_apps):
"""
Find non-empty demo fixtures
"""
demo_fixtures = []
for app in project_apps:
fixture = 'demo-{}'.format(app)
file_name = os.path.join(settings.BASE_DIR, app,
'fixtures', '{}.json'.format(fixture))
if os.path.exists(file_name) and open(file_name).read(4) != '[\n]\n':
demo_fixtures.append(fixture)
return demo_fixtures
def copy_demo_media():
"""
Copy the demo media files into the media root.
Copy all items from within the immediate subfolders of the demo
media root.
"""
demo_media_root = os.path.join(settings.DEMO_FILE_ROOT, 'media')
for subdir in os.listdir(demo_media_root):
demo_subdir = os.path.join(demo_media_root, subdir)
target_subdir = os.path.join(settings.MEDIA_ROOT, subdir)
for item in [i for i in os.listdir(demo_subdir) if i != '.gitkeep']:
shutil.copytree(os.path.join(demo_subdir, item),
os.path.join(target_subdir, item))
# Published project files should have been made read-only at
# the time of publication
ppdir = os.path.join(settings.MEDIA_ROOT, 'published-projects')
for dirpath, subdirs, files in os.walk(ppdir):
if dirpath != ppdir:
for f in files:
os.chmod(os.path.join(dirpath, f), 0o444)
for d in subdirs:
os.chmod(os.path.join(dirpath, d), 0o555)
def copy_demo_static():
"""
Copy the demo static files into the effective static root.
"""
demo_static_root = os.path.join(settings.DEMO_FILE_ROOT, 'static')
# Either the actual static root if defined, or the staticfiles_dirs
effective_static_root = settings.STATIC_ROOT if settings.STATIC_ROOT else settings.STATICFILES_DIRS[0]
for subdir in os.listdir(demo_static_root):
demo_subdir = os.path.join(demo_static_root, subdir)
target_subdir = os.path.join(effective_static_root, subdir)
for item in [i for i in os.listdir(demo_subdir) if i != '.gitkeep']:
shutil.copytree(os.path.join(demo_subdir, item),
os.path.join(target_subdir, item))
# Published project files should have been made read-only at
# the time of publication
ppdir = os.path.join(effective_static_root, 'published-projects')
for dirpath, subdirs, files in os.walk(ppdir):
if dirpath != ppdir:
for f in files:
os.chmod(os.path.join(dirpath, f), 0o444)
for d in subdirs:
os.chmod(os.path.join(dirpath, d), 0o555)
|
[
"os.walk",
"os.path.exists",
"django.core.management.call_command",
"physionet.utility.get_project_apps",
"os.symlink",
"os.path.join",
"os.listdir"
] |
[((3198, 3244), 'os.path.join', 'os.path.join', (['settings.DEMO_FILE_ROOT', '"""media"""'], {}), "(settings.DEMO_FILE_ROOT, 'media')\n", (3210, 3244), False, 'import os\n'), ((3263, 3290), 'os.listdir', 'os.listdir', (['demo_media_root'], {}), '(demo_media_root)\n', (3273, 3290), False, 'import os\n'), ((3729, 3784), 'os.path.join', 'os.path.join', (['settings.MEDIA_ROOT', '"""published-projects"""'], {}), "(settings.MEDIA_ROOT, 'published-projects')\n", (3741, 3784), False, 'import os\n'), ((3820, 3834), 'os.walk', 'os.walk', (['ppdir'], {}), '(ppdir)\n', (3827, 3834), False, 'import os\n'), ((4168, 4215), 'os.path.join', 'os.path.join', (['settings.DEMO_FILE_ROOT', '"""static"""'], {}), "(settings.DEMO_FILE_ROOT, 'static')\n", (4180, 4215), False, 'import os\n'), ((4415, 4443), 'os.listdir', 'os.listdir', (['demo_static_root'], {}), '(demo_static_root)\n', (4425, 4443), False, 'import os\n'), ((4884, 4941), 'os.path.join', 'os.path.join', (['effective_static_root', '"""published-projects"""'], {}), "(effective_static_root, 'published-projects')\n", (4896, 4941), False, 'import os\n'), ((4977, 4991), 'os.walk', 'os.walk', (['ppdir'], {}), '(ppdir)\n', (4984, 4991), False, 'import os\n'), ((1343, 1415), 'os.path.join', 'os.path.join', (['settings.BASE_DIR', '"""project"""', '"""fixtures"""', '"""site-data.json"""'], {}), "(settings.BASE_DIR, 'project', 'fixtures', 'site-data.json')\n", (1355, 1415), False, 'import os\n'), ((1466, 1523), 'django.core.management.call_command', 'call_command', (['"""loaddata"""', 'site_data_fixtures'], {'verbosity': '(1)'}), "('loaddata', site_data_fixtures, verbosity=1)\n", (1478, 1523), False, 'from django.core.management import call_command\n'), ((1608, 1684), 'os.path.join', 'os.path.join', (['settings.BASE_DIR', '"""project"""', '"""fixtures"""', '"""project-types.json"""'], {}), "(settings.BASE_DIR, 'project', 'fixtures', 'project-types.json')\n", (1620, 1684), False, 'import os\n'), ((1735, 1796), 'django.core.management.call_command', 'call_command', (['"""loaddata"""', 'project_types_fixtures'], {'verbosity': '(1)'}), "('loaddata', project_types_fixtures, verbosity=1)\n", (1747, 1796), False, 'from django.core.management import call_command\n'), ((1864, 1934), 'os.path.join', 'os.path.join', (['settings.BASE_DIR', '"""physionet"""', '"""fixtures"""', '"""sites.json"""'], {}), "(settings.BASE_DIR, 'physionet', 'fixtures', 'sites.json')\n", (1876, 1934), False, 'import os\n'), ((1985, 2037), 'django.core.management.call_command', 'call_command', (['"""loaddata"""', 'site_fixtures'], {'verbosity': '(1)'}), "('loaddata', site_fixtures, verbosity=1)\n", (1997, 2037), False, 'from django.core.management import call_command\n'), ((2097, 2115), 'physionet.utility.get_project_apps', 'get_project_apps', ([], {}), '()\n', (2113, 2115), False, 'from physionet.utility import get_project_apps\n'), ((2181, 2234), 'django.core.management.call_command', 'call_command', (['"""loaddata"""', '*demo_fixtures'], {'verbosity': '(1)'}), "('loaddata', *demo_fixtures, verbosity=1)\n", (2193, 2234), False, 'from django.core.management import call_command\n'), ((2450, 2485), 'os.path.exists', 'os.path.exists', (['ORIGINAL_DBCAL_FILE'], {}), '(ORIGINAL_DBCAL_FILE)\n', (2464, 2485), False, 'import os\n'), ((3314, 3351), 'os.path.join', 'os.path.join', (['demo_media_root', 'subdir'], {}), '(demo_media_root, subdir)\n', (3326, 3351), False, 'import os\n'), ((3376, 3417), 'os.path.join', 'os.path.join', (['settings.MEDIA_ROOT', 'subdir'], {}), '(settings.MEDIA_ROOT, subdir)\n', (3388, 3417), False, 'import os\n'), ((4467, 4505), 'os.path.join', 'os.path.join', (['demo_static_root', 'subdir'], {}), '(demo_static_root, subdir)\n', (4479, 4505), False, 'import os\n'), ((4530, 4573), 'os.path.join', 'os.path.join', (['effective_static_root', 'subdir'], {}), '(effective_static_root, subdir)\n', (4542, 4573), False, 'import os\n'), ((2499, 2542), 'os.symlink', 'os.symlink', (['ORIGINAL_DBCAL_FILE', 'DBCAL_FILE'], {}), '(ORIGINAL_DBCAL_FILE, DBCAL_FILE)\n', (2509, 2542), False, 'import os\n'), ((2863, 2888), 'os.path.exists', 'os.path.exists', (['file_name'], {}), '(file_name)\n', (2877, 2888), False, 'import os\n'), ((3451, 3474), 'os.listdir', 'os.listdir', (['demo_subdir'], {}), '(demo_subdir)\n', (3461, 3474), False, 'import os\n'), ((3525, 3556), 'os.path.join', 'os.path.join', (['demo_subdir', 'item'], {}), '(demo_subdir, item)\n', (3537, 3556), False, 'import os\n'), ((3586, 3619), 'os.path.join', 'os.path.join', (['target_subdir', 'item'], {}), '(target_subdir, item)\n', (3598, 3619), False, 'import os\n'), ((4607, 4630), 'os.listdir', 'os.listdir', (['demo_subdir'], {}), '(demo_subdir)\n', (4617, 4630), False, 'import os\n'), ((4680, 4711), 'os.path.join', 'os.path.join', (['demo_subdir', 'item'], {}), '(demo_subdir, item)\n', (4692, 4711), False, 'import os\n'), ((4741, 4774), 'os.path.join', 'os.path.join', (['target_subdir', 'item'], {}), '(target_subdir, item)\n', (4753, 4774), False, 'import os\n'), ((3918, 3942), 'os.path.join', 'os.path.join', (['dirpath', 'f'], {}), '(dirpath, f)\n', (3930, 3942), False, 'import os\n'), ((4006, 4030), 'os.path.join', 'os.path.join', (['dirpath', 'd'], {}), '(dirpath, d)\n', (4018, 4030), False, 'import os\n'), ((5075, 5099), 'os.path.join', 'os.path.join', (['dirpath', 'f'], {}), '(dirpath, f)\n', (5087, 5099), False, 'import os\n'), ((5163, 5187), 'os.path.join', 'os.path.join', (['dirpath', 'd'], {}), '(dirpath, d)\n', (5175, 5187), False, 'import os\n')]
|
#!/usr/bin/env python3
import argparse
import re
def main(args):
with open(args.source) as file:
content = file.read()
for pattern, filename in re.findall(r'(\{!(.*)!\})', content):
with open(filename) as file:
match_content = file.read()
dest_content = re.sub(pattern, match_content, content)
with open(args.dest, mode='w') as file:
file.write(dest_content)
with open(args.dest) as file:
content = file.read()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Replace placeholders with it's file contents"
)
parser.add_argument(
'source',
metavar='SOURCE',
type=str,
help='File to parse'
)
parser.add_argument(
'dest',
metavar='DEST',
type=str,
help='destination file'
)
args = parser.parse_args()
main(args)
|
[
"re.findall",
"re.sub",
"argparse.ArgumentParser"
] |
[((164, 201), 're.findall', 're.findall', (['"""(\\\\{!(.*)!\\\\})"""', 'content'], {}), "('(\\\\{!(.*)!\\\\})', content)\n", (174, 201), False, 'import re\n'), ((545, 633), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Replace placeholders with it\'s file contents"""'}), '(description=\n "Replace placeholders with it\'s file contents")\n', (568, 633), False, 'import argparse\n'), ((303, 342), 're.sub', 're.sub', (['pattern', 'match_content', 'content'], {}), '(pattern, match_content, content)\n', (309, 342), False, 'import re\n')]
|
from django.urls import path
from . import views
urlpatterns = [
path('member_view', views.api_member_view, name='test_api_member_view'),
path('manager_view', views.api_manager_view, name='test_api_manager_view'),
path('owner_view', views.api_owner_view, name='test_api_owner_view'),
path('base_org_view', views.base_org_view, name='test_base_org_permission_view'),
path('org_field_view', views.org_field_view, name='test_organization_field_view'),
path('error_field_view', views.error_field_view, name='test_error_field_view'),
]
|
[
"django.urls.path"
] |
[((71, 142), 'django.urls.path', 'path', (['"""member_view"""', 'views.api_member_view'], {'name': '"""test_api_member_view"""'}), "('member_view', views.api_member_view, name='test_api_member_view')\n", (75, 142), False, 'from django.urls import path\n'), ((148, 222), 'django.urls.path', 'path', (['"""manager_view"""', 'views.api_manager_view'], {'name': '"""test_api_manager_view"""'}), "('manager_view', views.api_manager_view, name='test_api_manager_view')\n", (152, 222), False, 'from django.urls import path\n'), ((228, 296), 'django.urls.path', 'path', (['"""owner_view"""', 'views.api_owner_view'], {'name': '"""test_api_owner_view"""'}), "('owner_view', views.api_owner_view, name='test_api_owner_view')\n", (232, 296), False, 'from django.urls import path\n'), ((302, 387), 'django.urls.path', 'path', (['"""base_org_view"""', 'views.base_org_view'], {'name': '"""test_base_org_permission_view"""'}), "('base_org_view', views.base_org_view, name='test_base_org_permission_view'\n )\n", (306, 387), False, 'from django.urls import path\n'), ((388, 474), 'django.urls.path', 'path', (['"""org_field_view"""', 'views.org_field_view'], {'name': '"""test_organization_field_view"""'}), "('org_field_view', views.org_field_view, name=\n 'test_organization_field_view')\n", (392, 474), False, 'from django.urls import path\n'), ((475, 553), 'django.urls.path', 'path', (['"""error_field_view"""', 'views.error_field_view'], {'name': '"""test_error_field_view"""'}), "('error_field_view', views.error_field_view, name='test_error_field_view')\n", (479, 553), False, 'from django.urls import path\n')]
|
import time
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import transforms, datasets, models
from collections import OrderedDict
from PIL import Image
import matplotlib.pyplot as plt
import json
import argparse
def load_checkpoint(checkpoint_path, model):
checkpoint = torch.load(checkpoint_path)
if(model == "vgg"):
nhu = checkpoint['nhu']
model = models.vgg11(pretrained=True)
for param in model.parameters():
param.requires_grad = False
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(25088, nhu)),
('relu', nn.ReLU()),
('fc2', nn.Linear(nhu, 102)),
('output', nn.LogSoftmax(dim=1))
]))
model.classifier = classifier
model.load_state_dict(checkpoint['state_dict'])
model.class_to_idx = checkpoint['class_to_idx']
elif(model == "densenet"):
nhu = checkpoint['nhu']
model = models.densenet121(pretrained=True)
for param in model.parameters():
param.requires_grad = False
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(1024, nhu)),
('relu', nn.ReLU()),
('fc2', nn.Linear(nhu, 102)),
('output', nn.LogSoftmax(dim=1))
]))
model.classifier = classifier
model.load_state_dict(checkpoint['state_dict'])
model.class_to_idx = checkpoint['class_to_idx']
return model
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
pil_image = Image.open(image)
pil_image = pil_image.resize((256, 256))
width, height = pil_image.size # Get dimensions
left = (width - 224)/2
top = (height - 224)/2
right = (width + 224)/2
bottom = (height + 224)/2
pil_image = pil_image.crop((left, top, right, bottom))
pil_image = pil_image.convert('RGB')
np_image = np.array(pil_image)/255
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
np_image = (np_image-mean)/std
np_image = np_image.transpose((2, 0, 1))
return torch.from_numpy(np_image)
def predict(image_path, model, device="cpu", topk=5):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
print(device)
output_image = process_image(image_path).to(device)
image = torch.zeros([64, 3, 224, 224], dtype=torch.float64).to(device)
image += output_image.to(device)
model.to(device)
model.eval()
torch.no_grad()
logps = model.forward(image.float())
ps = torch.exp(logps)
probability, index = torch.topk(ps, topk, dim=1)
return probability.to(device), index.to(device)
def get_input_args():
"""
Retrieves and parses the 3 command line arguments provided by the user when
they run the program from a terminal window. This function uses Python's
argparse module to created and defined these 3 command line arguments. If
the user fails to provide some or all of the 3 arguments, then the default
values are used for the missing arguments.
Command Line Arguments:
1. Image Folder as --dir with default value 'flowers'
2. CNN Model Architecture as --arch with default value 'vgg'
3. GPU as --GPU with default value 'cpu'
This function returns these arguments as an ArgumentParser object.
Parameters:
None - simply using argparse module to create & store command line arguments
Returns:
parse_args() -data structure that stores the command line arguments object
"""
# Replace None with parser.parse_args() parsed argument collection that
# you created with this function
# Creates Argument Parser object named parser
parser = argparse.ArgumentParser()
# Argument 1: that's a path to a folder
parser.add_argument('input', type=str,
help='path to the image')
parser.add_argument('ckpdir', type=str,
help='path to the folder of check point')
parser.add_argument('--arch', type=str, default='vgg',
help='The Network architecture')
parser.add_argument('--gpu', type=bool, default=False,
help='gpu enable')
parser.add_argument('--topk', type=float, default=5,
help='topk')
parser.add_argument('--category_names', type=str, default='cat_to_name.json',
help='directory of jason file')
# Assigns variable in_args to parse_args()
in_args = parser.parse_args()
return in_args
|
[
"torch.topk",
"argparse.ArgumentParser",
"torchvision.models.vgg11",
"torch.nn.ReLU",
"torch.nn.LogSoftmax",
"torch.load",
"torchvision.models.densenet121",
"PIL.Image.open",
"torch.exp",
"numpy.array",
"torch.nn.Linear",
"torch.zeros",
"torch.no_grad",
"torch.from_numpy"
] |
[((329, 356), 'torch.load', 'torch.load', (['checkpoint_path'], {}), '(checkpoint_path)\n', (339, 356), False, 'import torch\n'), ((1656, 1673), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (1666, 1673), False, 'from PIL import Image\n'), ((2036, 2067), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (2044, 2067), True, 'import numpy as np\n'), ((2078, 2109), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (2086, 2109), True, 'import numpy as np\n'), ((2201, 2227), 'torch.from_numpy', 'torch.from_numpy', (['np_image'], {}), '(np_image)\n', (2217, 2227), False, 'import torch\n'), ((2608, 2623), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2621, 2623), False, 'import torch\n'), ((2674, 2690), 'torch.exp', 'torch.exp', (['logps'], {}), '(logps)\n', (2683, 2690), False, 'import torch\n'), ((2716, 2743), 'torch.topk', 'torch.topk', (['ps', 'topk'], {'dim': '(1)'}), '(ps, topk, dim=1)\n', (2726, 2743), False, 'import torch\n'), ((3843, 3868), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3866, 3868), False, 'import argparse\n'), ((429, 458), 'torchvision.models.vgg11', 'models.vgg11', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (441, 458), False, 'from torchvision import transforms, datasets, models\n'), ((2001, 2020), 'numpy.array', 'np.array', (['pil_image'], {}), '(pil_image)\n', (2009, 2020), True, 'import numpy as np\n'), ((994, 1029), 'torchvision.models.densenet121', 'models.densenet121', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (1012, 1029), False, 'from torchvision import transforms, datasets, models\n'), ((2466, 2517), 'torch.zeros', 'torch.zeros', (['[64, 3, 224, 224]'], {'dtype': 'torch.float64'}), '([64, 3, 224, 224], dtype=torch.float64)\n', (2477, 2517), False, 'import torch\n'), ((609, 630), 'torch.nn.Linear', 'nn.Linear', (['(25088)', 'nhu'], {}), '(25088, nhu)\n', (618, 630), True, 'import torch.nn as nn\n'), ((654, 663), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (661, 663), True, 'import torch.nn as nn\n'), ((686, 705), 'torch.nn.Linear', 'nn.Linear', (['nhu', '(102)'], {}), '(nhu, 102)\n', (695, 705), True, 'import torch.nn as nn\n'), ((731, 751), 'torch.nn.LogSoftmax', 'nn.LogSoftmax', ([], {'dim': '(1)'}), '(dim=1)\n', (744, 751), True, 'import torch.nn as nn\n'), ((1180, 1200), 'torch.nn.Linear', 'nn.Linear', (['(1024)', 'nhu'], {}), '(1024, nhu)\n', (1189, 1200), True, 'import torch.nn as nn\n'), ((1224, 1233), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1231, 1233), True, 'import torch.nn as nn\n'), ((1256, 1275), 'torch.nn.Linear', 'nn.Linear', (['nhu', '(102)'], {}), '(nhu, 102)\n', (1265, 1275), True, 'import torch.nn as nn\n'), ((1301, 1321), 'torch.nn.LogSoftmax', 'nn.LogSoftmax', ([], {'dim': '(1)'}), '(dim=1)\n', (1314, 1321), True, 'import torch.nn as nn\n')]
|
# Ad Soyad: <NAME> No: 180401041
#import numpy.matlib #Matrislerde kullanılabilir
from matrix_operations import matrix_transpose # gerekli matris işlemleri için fonksiyonların dahil edilmesi.
# D=A^T --> D= np.transpose(A) --> transpose kullanımı
from matrix_operations import matrix_multiplication # C=AxB(AB=C) --> C=np.dot(A,B) --> matrislerde çarpma işlemi
from matrix_operations import matrix_inverse # D=A^-1 --> D= np.linalg.inv --> matrisin tersinin alınması için..
from copy import copy #ilk veriyi kaybetmeden kopyası üzerinde işlem yapmak için kullanılır..
from __init__ import __data_file__ #veri girişi için kullandım , init=initialization(başlatma) ile daha kolay bir görünüm elde ettik..
from __init__ import __output_file__ #veri çıkışı için kullandım
import numpy as np # numpy artık np değişkeniyle ifade ediliyor..
import sys #dir(sys) sys.argv komutu, programın ismi ile birlikte, bu programa parametre olarak verilen değerleri de bir liste halinde saklıyor.
import os #gerek yok silinebilir
# import sys sayesinde %d,%s gibi C diline ait göstergelerle sayısal ifadelerimiz daha da kolaylaştı
'''
sys kullanımı ....--> en aşağıda arguments=sys.argv..
def çık():
print('Çıkılıyor...')
sys.exit() # programı kapanmaya zorlamak için kullanılabilir.
if len(sys.argv) < 2: #eğer parametre veya verilerde istenilenden fazlası veya azı varsa kullanılabilir
print('Gerekli parametreleri girmediniz!')
çık()
elif len(sys.argv) > 2: #sys.argv kullandığım parametreleri liste halinde tutar
print('Çok fazla parametre girdiniz!')
çık()
elif sys.argv[1] in ['-v', '-V']:
print('Program sürümü: 0.8')
else:
mesaj = 'Girdiğiniz parametre ({}) anlaşılamadı!'
print(mesaj.format(sys.argv[1]))
çık()
---> BU ŞEKİLDE YETERLİ DEĞİL DETAYLANMASI LAZIM..
'''
'''
class BColors:
ENDC = '\033[0m'-->kullanılır
BOLD = '\033[1m'-->??
UNDERLINE = '\033[4m'-->??
INACTIVE = '\033[90m'
FAIL = '\033[91m'-->kullanılır
OKGREEN = '\033[92m'
WARNING = '\033[93m'-->kullanılır
OKBLUE = '\033[94m'
HEADER = '\033[95m'
COMMENT = '\033[96m'
BLOCK = '\033[97m'
CODE = '\033[98m'
'''
class term_renkleri:
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
#BOLD = '\033[1m'
#UNDERLINE = '\033[4m'
#COMMENT = '\033[96m'
class process:#süreç
def __init__(self, veri_içerik: list): # init yapıcı(constructor) fonksiyondur , self parametresi zorunludur.
"""
Bu sınıf tüm ana işlemleri yürütür
: param veri_içerik: Bir veri listesi
"""
self.veri_ = veri_içerik
self.first_var = 0 #ilk varyasyon = 0 , başlangıc
self.last_var = len(self.veri_) #son varyasyon =len(self.veri)
self.grade_regression_result = None # regresyon iki ya da daha fazla değişken arasındaki değişimleri ölçmeye yarar.(Grafikler ile görselleşmesi sonucunda da anlaşılırlığı yükselir.)
self.results = list() #sonuçları listeye atacağız aşağıdaki işlemlerle
self.error_results = list() #hata sonuçları için yine bir boş liste kullanımı yapıldı.(for error results..)
def set_data_range(self, first=0, last=0, all_=False):
"""
Regresyon için veri aralığını ayarlama
: param first: Verilerin ilk dizini
: param last: Son dizin veri verileri
: param all_: Tüm verileri seç
: dönüş: Yok(None)
"""
if all_:#if dene
self.first_var = 0
self.last_var = len(self.veri_)
else:
self.first_var = first #ilk varyasyon ilk son v.. son
self.last_var = last
print(
term_renkleri.WARNING + "Veri aralığının %d dan %d a kadar ayarlanması!" % (self.first_var, self.last_var) + term_renkleri.ENDC)
# veri aralığının 0 dan 59 a ayarlaması gibi
def regression_for_grade(self, derece=0, no_print=False):#sınıf için regresyon
# çözüme(solution) ulaşmam için :
# X * çözümler = Y
# çözümler = (X_t * X)^-1 * X_t * Y
solution_matrix = np.zeros(shape=(derece + 1, 1), dtype=float)#çözelti matrix
x_matrix = np.zeros(shape=((self.last_var - self.first_var), derece + 1), dtype=float)
y_matrix = np.zeros(shape=((self.last_var - self.first_var), 1), dtype=float)
# Prepair matrixs matris hazırlanışı
y_index = 0
for i in range(0, (self.last_var - self.first_var)):
for j in range(0, x_matrix.shape[1]):
x_matrix[i][j] = pow(float(self.veri_[i + self.first_var]), j)
y_matrix[i][0] = float(y_index)
y_index += 1
x_trans_matrix = matrix_transpose(x_matrix) #transpozunun alınması
multi_matrix = matrix_multiplication(x_trans_matrix, x_matrix)#matris ile transpozunun alınması tersini alma işlemi
inversed_matrix = matrix_inverse(multi_matrix)#matrisin tersinin alınması
multi_two_matrix = matrix_multiplication(x_trans_matrix, y_matrix)
multi_three_matrix = matrix_multiplication(inversed_matrix, multi_two_matrix)
solution_matrix = multi_three_matrix
self.grade_regression_result = copy(solution_matrix)
self.results.append(self.grade_regression_result) #regresyon sonuçları listemize atıldı.. 1
to_printed = ""
to_printed += str(derece) + ". derece regresyon sonuçlarım : \n"
to_printed += str(self.grade_regression_result[0])
for i in range(1, derece + 1):
to_printed += " + " + str(self.grade_regression_result[i]) + "X"
to_printed += "^^" + str(i)
to_printed += " = Y"
if not no_print:
print(to_printed)
def calculate_most_usefull(self): #en yararlı olanı hesapla
for i in range(len(self.results)):
avarage = 0.0
y_index = 0
for x_data in self.veri_:
X = float(x_data)
Y = y_index
y_index += 1
total = 0.0
for j in self.results[i]:
total += float(j) * pow(X, j)
E = total - Y
avarage += E
avarage /= len(self.veri_)
self.error_results.append(avarage)
for i in range(len(self.error_results)):
if self.error_results[i] < 0:
self.error_results[i] *= -1
the_lowest_error = self.error_results[0]
the_lowest_error_index = 0
for i in range(len(self.error_results)):
if self.error_results[i] < the_lowest_error:
the_lowest_error = self.error_results[i]
the_lowest_error_index = i
print("Polinom bölgesindeki en düşük hata (aralıklar karşılaştırıldı): %d .derece regresyon ile E=%s"
% ((the_lowest_error_index + 1), the_lowest_error))
def veri_uzunluk(self): #verileri almak için fonksiyon
return len(self.veri_)
def kill_vars(self): #ölüm varyasyonu
self.grade_regression_result = None
self.results = list()
self.error_results = list()
def write_to_file(self, the_dir): #dosyaya yazma işlemi
with open(the_dir + "/%s" % __output_file__, "w") as fh:
to_printed = ""
for i in range(len(self.results)):
to_printed += str(i + 1) + " Reggression\t"
for j in range(len(self.results[i])):
to_printed += str(self.results[i][j]) + "X^^" + str(j) + "\t"
to_printed += "\n"
fh.write(to_printed)
print(term_renkleri.WARNING + "%s file generated!" % __output_file__ + term_renkleri.ENDC)
def main():
if arguman: # argüman(args)=sys.argv ataması ile ana listede parametre tutmayı tercih ettim.
print(term_renkleri.WARNING + "Argüman işleyici yok!" + term_renkleri.ENDC)
# Aslında buna gerek yok ,çünkü dosyam zaten var
yeni_veri = None
working_directory = os.getcwd()
try: #try exception yapısı bak, hata mesajı vermek için
with open(working_directory + "/%s" % __data_file__, "r") as fh:
string_format = fh.read()
a = string_format.splitlines()
# If last line of file is not /n son dosya satırı değilse
for i in range(len(a)):
if a[i] == "":
a.pop(len(a)-1)
yeni_veri = copy(a) #copy ile ana liste elemanları korundu..
except FileNotFoundError: #dosya bulunamadı hatası buna da gerek yok aslında
raise Exception("Dosya bulunamadı! %s file" % __data_file__)
if not yeni_veri: #dosya var ama veri yoksa verilecek mesaj
raise Exception("Dosya bulundu ancak okuma başarısız oldu. ")
print(term_renkleri.WARNING + "Dosya açıldı ve başarıyla okundu" + term_renkleri.ENDC)
#başarılı dosyaaçılması ve veriokunması durumu
print(term_renkleri.WARNING + "İlk soru başlangıç:" + term_renkleri.ENDC)
new_process = process(yeni_veri)
new_process.set_data_range(all_=True)
new_process.regression_for_grade(derece=1)
new_process.regression_for_grade(derece=2)
new_process.regression_for_grade(derece=3)
new_process.regression_for_grade(derece=4)
new_process.regression_for_grade(derece=5)
new_process.regression_for_grade(derece=6)
#new_process.regressionn_for_grade(grade=7)
new_process.write_to_file(working_directory)
print(term_renkleri.WARNING + "İLK SORUNUN BAŞARIYLA SONLANMASI. \t" + term_renkleri.ENDC)#ilk soru bitti ikinci soru işleniyor
print(term_renkleri.WARNING + "Burası ikincinin başlangıc noktası:" + term_renkleri.ENDC)
new_process.calculate_most_usefull()
print(term_renkleri.WARNING + "İKİNCİ SORUNUN BAŞARILA SONLANMASI. \t" + term_renkleri.ENDC)#ikinci soru bitti üçüncü soru işleniyor
print(term_renkleri.WARNING + "Burası üçüncünün başlangıç noktası :" + term_renkleri.ENDC)
print(term_renkleri.FAIL + "Taşma durumuna dikkat edilmeli !!" + term_renkleri.ENDC)# taşmaya dikkat et
for i in range(int(new_process.veri_uzunluk() / 10) + 1):
first = i * 10
last = i * 10 + 10 #last > first
if i >= int(new_process.veri_uzunluk() / 10):
last = new_process.veri_uzunluk()
new_process.kill_vars()
new_process.set_data_range(first, last)# ilk ,son karşılaştırma için olabilir??
new_process.regression_for_grade(derece=1, no_print=True)
new_process.regression_for_grade(derece=2, no_print=True)
new_process.regression_for_grade(derece=3, no_print=True)
new_process.regression_for_grade(derece=4, no_print=True)
new_process.regression_for_grade(derece=5, no_print=True)
new_process.regression_for_grade(derece=6, no_print=True)
new_process.calculate_most_usefull()
if __name__ == '__main__':
arguman = sys.argv[1:]
main()
|
[
"matrix_operations.matrix_transpose",
"os.getcwd",
"numpy.zeros",
"copy.copy",
"matrix_operations.matrix_inverse",
"matrix_operations.matrix_multiplication"
] |
[((8209, 8220), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8218, 8220), False, 'import os\n'), ((4313, 4357), 'numpy.zeros', 'np.zeros', ([], {'shape': '(derece + 1, 1)', 'dtype': 'float'}), '(shape=(derece + 1, 1), dtype=float)\n', (4321, 4357), True, 'import numpy as np\n'), ((4392, 4465), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.last_var - self.first_var, derece + 1)', 'dtype': 'float'}), '(shape=(self.last_var - self.first_var, derece + 1), dtype=float)\n', (4400, 4465), True, 'import numpy as np\n'), ((4487, 4551), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.last_var - self.first_var, 1)', 'dtype': 'float'}), '(shape=(self.last_var - self.first_var, 1), dtype=float)\n', (4495, 4551), True, 'import numpy as np\n'), ((4906, 4932), 'matrix_operations.matrix_transpose', 'matrix_transpose', (['x_matrix'], {}), '(x_matrix)\n', (4922, 4932), False, 'from matrix_operations import matrix_transpose\n'), ((4979, 5026), 'matrix_operations.matrix_multiplication', 'matrix_multiplication', (['x_trans_matrix', 'x_matrix'], {}), '(x_trans_matrix, x_matrix)\n', (5000, 5026), False, 'from matrix_operations import matrix_multiplication\n'), ((5106, 5134), 'matrix_operations.matrix_inverse', 'matrix_inverse', (['multi_matrix'], {}), '(multi_matrix)\n', (5120, 5134), False, 'from matrix_operations import matrix_inverse\n'), ((5198, 5245), 'matrix_operations.matrix_multiplication', 'matrix_multiplication', (['x_trans_matrix', 'y_matrix'], {}), '(x_trans_matrix, y_matrix)\n', (5219, 5245), False, 'from matrix_operations import matrix_multiplication\n'), ((5284, 5340), 'matrix_operations.matrix_multiplication', 'matrix_multiplication', (['inversed_matrix', 'multi_two_matrix'], {}), '(inversed_matrix, multi_two_matrix)\n', (5305, 5340), False, 'from matrix_operations import matrix_multiplication\n'), ((5425, 5446), 'copy.copy', 'copy', (['solution_matrix'], {}), '(solution_matrix)\n', (5429, 5446), False, 'from copy import copy\n'), ((8633, 8640), 'copy.copy', 'copy', (['a'], {}), '(a)\n', (8637, 8640), False, 'from copy import copy\n')]
|
"""yieldfx plot"""
import calendar
from collections import OrderedDict
import datetime
import pandas as pd
from pyiem.meteorology import gdd
from pyiem.plot.use_agg import plt
from pyiem.datatypes import temperature, distance
from pyiem.util import get_autoplot_context
STATIONS = OrderedDict([
('ames', 'Central (Ames)'),
('cobs', 'Central (COBS)'),
('crawfordsville', 'Southeast (Crawfordsville)'),
('kanawha', 'Northern (Kanawha)'),
('lewis', 'Southwest (Lewis)'),
('mcnay', 'Southern (Chariton/McNay)'),
('muscatine', 'Southeast (Muscatine)'),
('nashua', 'Northeast (Nashua)'),
('sutherland', 'Northwest (Sutherland)')])
PLOTS = OrderedDict([
('gdd', 'Growing Degree Days [F]'),
('rain', 'Precipitation [in]'),
('maxt', 'Daily Maximum Temperature [F]'),
('mint', 'Daily Minimum Temperature [F]'),
])
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc['data'] = True
desc['description'] = """ """
desc['arguments'] = [
dict(type='select', name='location', default='ames',
label='Select Location:', options=STATIONS),
dict(type='select', name='ptype', default='gdd',
label='Select Plot Type:', options=PLOTS),
dict(type='text', name='sdate', default='mar15',
label='Start Date:')
]
return desc
def load(dirname, location, sdate):
""" Read a file please """
data = []
idx = []
mindoy = int(sdate.strftime("%j"))
for line in open("%s/%s.met" % (dirname, location)):
line = line.strip()
if not line.startswith('19') and not line.startswith('20'):
continue
tokens = line.split()
if int(tokens[1]) < mindoy:
continue
data.append(tokens)
ts = (datetime.date(int(tokens[0]), 1, 1) +
datetime.timedelta(days=int(tokens[1])-1))
idx.append(ts)
if len(data[0]) < 10:
cols = ['year', 'doy', 'radn', 'maxt', 'mint', 'rain']
else:
cols = ['year', 'doy', 'radn', 'maxt', 'mint',
'rain', 'gdd', 'st4', 'st12', 'st24',
'st50', 'sm12', 'sm24', 'sm50']
df = pd.DataFrame(data, index=idx,
columns=cols)
for col in cols:
df[col] = pd.to_numeric(df[col], errors='coerce')
if len(data[0]) < 10:
df['gdd'] = gdd(temperature(df['maxt'].values, 'C'),
temperature(df['mint'].values, 'C'))
df['gddcum'] = df.groupby(['year'])['gdd'].apply(lambda x: x.cumsum())
df['raincum'] = distance(
df.groupby(['year'])['rain'].apply(lambda x: x.cumsum()),
'MM').value('IN')
return df
def plotter(fdict):
""" Go """
ctx = get_autoplot_context(fdict, get_description())
location = ctx['location']
ptype = ctx['ptype']
sdate = datetime.datetime.strptime(ctx['sdate'], '%b%d')
df = load("/mesonet/share/pickup/yieldfx", location, sdate)
cdf = load("/opt/iem/scripts/yieldfx/baseline", location, sdate)
today = datetime.date.today()
thisyear = df[df['year'] == today.year].copy()
thisyear.reset_index(inplace=True)
thisyear.set_index('doy', inplace=True)
# Drop extra day from cdf during non-leap year
if today.year % 4 != 0:
cdf = cdf[cdf['doy'] < 366]
df = df[df['doy'] < 366]
# Create a specialized result dataframe for CSV, Excel output options
resdf = pd.DataFrame(index=thisyear.index)
resdf.index.name = 'date'
resdf['doy'] = thisyear.index.values
resdf.reset_index(inplace=True)
resdf.set_index('doy', inplace=True)
# write current year data back to resdf
for _v, _u in zip(['gddcum', 'raincum'],
['F', 'in']):
resdf["%s[%s]" % (_v, _u)] = thisyear[_v]
for _v in ['mint', 'maxt']:
resdf["%s[F]" % (_v)] = temperature(thisyear[_v].values,
'C').value('F')
resdf['rain[in]'] = distance(thisyear['rain'], 'MM').value('IN')
for _ptype, unit in zip(['gdd', 'rain'], ['F', 'in']):
resdf[_ptype+'cum_climo[%s]' % (unit, )
] = cdf.groupby('doy')[_ptype+'cum'].mean()
resdf[_ptype+'cum_min[%s]' % (unit, )
] = df.groupby('doy')[_ptype+'cum'].min()
resdf[_ptype+'cum_max[%s]' % (unit, )
] = df.groupby('doy')[_ptype+'cum'].max()
for _ptype in ['maxt', 'mint']:
resdf[_ptype+'_climo[F]'] = temperature(
cdf.groupby('doy')[_ptype].mean().values, 'C').value('F')
resdf[_ptype+'_min[F]'] = temperature(
df.groupby('doy')[_ptype].min().values, 'C').value('F')
resdf[_ptype+'_max[F]'] = temperature(
df.groupby('doy')[_ptype].max().values, 'C').value('F')
(fig, ax) = plt.subplots(1, 1, figsize=(8, 6))
if ptype in ['gdd', 'rain']:
ax.plot(thisyear.index.values, thisyear[ptype+'cum'], zorder=4,
color='b',
lw=2, label='%s Obs + CFS Forecast' % (today.year,))
climo = cdf.groupby('doy')[ptype+'cum'].mean()
ax.plot(climo.index.values, climo.values, lw=2, color='k',
label="Climatology", zorder=3)
xrng = df.groupby('doy')[ptype+'cum'].max()
nrng = df.groupby('doy')[ptype+'cum'].min()
ax.fill_between(xrng.index.values, nrng.values, xrng.values,
color='tan', label="Range", zorder=2)
else:
ax.plot(thisyear.index.values,
temperature(thisyear[ptype], 'C').value('F'),
zorder=4, color='b',
lw=2, label='%s Obs + CFS Forecast' % (today.year,))
climo = cdf.groupby('doy')[ptype].mean()
ax.plot(climo.index.values,
temperature(climo.values, 'C').value('F'), lw=2, color='k',
label='Climatology', zorder=3)
xrng = df.groupby('doy')[ptype].max()
nrng = df.groupby('doy')[ptype].min()
ax.fill_between(xrng.index.values,
temperature(nrng.values, 'C').value('F'),
temperature(xrng.values, 'C').value('F'),
color='tan', label="Range", zorder=2)
ax.set_title("%s %s" % (STATIONS[location], PLOTS[ptype]))
ax.set_ylabel(PLOTS[ptype])
ax.legend(loc=(0.03, -0.16), ncol=3, fontsize=12)
ax.set_xticks((1, 32, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335,
365))
ax.set_xticklabels(calendar.month_abbr[1:])
ax.grid(True)
ax.set_xlim(int(sdate.strftime("%j")),
int(datetime.date(today.year, 12, 1).strftime("%j")))
pos = ax.get_position()
ax.set_position([pos.x0, pos.y0 + 0.05, pos.width, pos.height * 0.95])
return fig, resdf
if __name__ == '__main__':
plotter(dict())
|
[
"pandas.DataFrame",
"pyiem.datatypes.temperature",
"pyiem.datatypes.distance",
"datetime.date",
"datetime.date.today",
"datetime.datetime.strptime",
"collections.OrderedDict",
"pyiem.plot.use_agg.plt.subplots",
"pandas.to_numeric"
] |
[((283, 643), 'collections.OrderedDict', 'OrderedDict', (["[('ames', 'Central (Ames)'), ('cobs', 'Central (COBS)'), ('crawfordsville',\n 'Southeast (Crawfordsville)'), ('kanawha', 'Northern (Kanawha)'), (\n 'lewis', 'Southwest (Lewis)'), ('mcnay', 'Southern (Chariton/McNay)'),\n ('muscatine', 'Southeast (Muscatine)'), ('nashua', 'Northeast (Nashua)'\n ), ('sutherland', 'Northwest (Sutherland)')]"], {}), "([('ames', 'Central (Ames)'), ('cobs', 'Central (COBS)'), (\n 'crawfordsville', 'Southeast (Crawfordsville)'), ('kanawha',\n 'Northern (Kanawha)'), ('lewis', 'Southwest (Lewis)'), ('mcnay',\n 'Southern (Chariton/McNay)'), ('muscatine', 'Southeast (Muscatine)'), (\n 'nashua', 'Northeast (Nashua)'), ('sutherland', 'Northwest (Sutherland)')])\n", (294, 643), False, 'from collections import OrderedDict\n'), ((708, 884), 'collections.OrderedDict', 'OrderedDict', (["[('gdd', 'Growing Degree Days [F]'), ('rain', 'Precipitation [in]'), (\n 'maxt', 'Daily Maximum Temperature [F]'), ('mint',\n 'Daily Minimum Temperature [F]')]"], {}), "([('gdd', 'Growing Degree Days [F]'), ('rain',\n 'Precipitation [in]'), ('maxt', 'Daily Maximum Temperature [F]'), (\n 'mint', 'Daily Minimum Temperature [F]')])\n", (719, 884), False, 'from collections import OrderedDict\n'), ((2287, 2330), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'index': 'idx', 'columns': 'cols'}), '(data, index=idx, columns=cols)\n', (2299, 2330), True, 'import pandas as pd\n'), ((2954, 3002), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (["ctx['sdate']", '"""%b%d"""'], {}), "(ctx['sdate'], '%b%d')\n", (2980, 3002), False, 'import datetime\n'), ((3149, 3170), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (3168, 3170), False, 'import datetime\n'), ((3541, 3575), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'thisyear.index'}), '(index=thisyear.index)\n', (3553, 3575), True, 'import pandas as pd\n'), ((4897, 4931), 'pyiem.plot.use_agg.plt.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(8, 6)'}), '(1, 1, figsize=(8, 6))\n', (4909, 4931), False, 'from pyiem.plot.use_agg import plt\n'), ((2392, 2431), 'pandas.to_numeric', 'pd.to_numeric', (['df[col]'], {'errors': '"""coerce"""'}), "(df[col], errors='coerce')\n", (2405, 2431), True, 'import pandas as pd\n'), ((2482, 2517), 'pyiem.datatypes.temperature', 'temperature', (["df['maxt'].values", '"""C"""'], {}), "(df['maxt'].values, 'C')\n", (2493, 2517), False, 'from pyiem.datatypes import temperature, distance\n'), ((2543, 2578), 'pyiem.datatypes.temperature', 'temperature', (["df['mint'].values", '"""C"""'], {}), "(df['mint'].values, 'C')\n", (2554, 2578), False, 'from pyiem.datatypes import temperature, distance\n'), ((4081, 4113), 'pyiem.datatypes.distance', 'distance', (["thisyear['rain']", '"""MM"""'], {}), "(thisyear['rain'], 'MM')\n", (4089, 4113), False, 'from pyiem.datatypes import temperature, distance\n'), ((3964, 4001), 'pyiem.datatypes.temperature', 'temperature', (['thisyear[_v].values', '"""C"""'], {}), "(thisyear[_v].values, 'C')\n", (3975, 4001), False, 'from pyiem.datatypes import temperature, distance\n'), ((5602, 5635), 'pyiem.datatypes.temperature', 'temperature', (['thisyear[ptype]', '"""C"""'], {}), "(thisyear[ptype], 'C')\n", (5613, 5635), False, 'from pyiem.datatypes import temperature, distance\n'), ((5855, 5885), 'pyiem.datatypes.temperature', 'temperature', (['climo.values', '"""C"""'], {}), "(climo.values, 'C')\n", (5866, 5885), False, 'from pyiem.datatypes import temperature, distance\n'), ((6121, 6150), 'pyiem.datatypes.temperature', 'temperature', (['nrng.values', '"""C"""'], {}), "(nrng.values, 'C')\n", (6132, 6150), False, 'from pyiem.datatypes import temperature, distance\n'), ((6187, 6216), 'pyiem.datatypes.temperature', 'temperature', (['xrng.values', '"""C"""'], {}), "(xrng.values, 'C')\n", (6198, 6216), False, 'from pyiem.datatypes import temperature, distance\n'), ((6669, 6701), 'datetime.date', 'datetime.date', (['today.year', '(12)', '(1)'], {}), '(today.year, 12, 1)\n', (6682, 6701), False, 'import datetime\n')]
|
from quart import jsonify
from providers.BlueprintProvider import BlueprintProvider
class Friends(BlueprintProvider):
def __craftSummary(self, data):
return jsonify(
{
"following": data.target_following_count,
"followers": data.target_follower_count,
}
)
def routes(self):
@self.xbl_decorator.cachedRoute("/summary/xuid/<int:xuid>")
async def xuid(xuid):
return self.__craftSummary(
await self.xbl_client.people.get_friends_summary_by_xuid(xuid)
)
@self.xbl_decorator.cachedRoute("/summary/gamertag/<gamertag>")
async def gamertag(gamertag):
return self.__craftSummary(
await self.xbl_client.people.get_friends_summary_by_gamertag(gamertag)
)
|
[
"quart.jsonify"
] |
[((172, 269), 'quart.jsonify', 'jsonify', (["{'following': data.target_following_count, 'followers': data.\n target_follower_count}"], {}), "({'following': data.target_following_count, 'followers': data.\n target_follower_count})\n", (179, 269), False, 'from quart import jsonify\n')]
|
from task import Task
from tasks import Tasks
from status import Status
import redis
import datetime
import json
from json_extension import check_update_list
from converter import datetime_converter
class TaskManager:
_redis = None
_task_management_key = None
def __redis():
server = "localhost"
port = 6379
db = 0
TaskManager._redis = redis.Redis(server, port, db)
TaskManager._task_management_key = "object-serial"
@staticmethod
def __find_task_object(json_object, name):
for dict in json_object:
x = json.loads(dict)
if x['task_id'] == name:
return x
@staticmethod
def __find_task(json_object, name):
task = [obj for obj in json_object if obj['task_id']==name]
if len(task) > 1 and task is not None:
return task[0]
return None
@staticmethod
def clear_task_tasks_obj_as_dict():
# check and then create redis server object
if TaskManager._redis is None:
TaskManager.__redis()
TaskManager._redis.delete(TaskManager._task_management_key)
def get_task_management():
# check and then create redis server object
if TaskManager._redis is None:
TaskManager.__redis()
tasks_data_as_bytes = TaskManager._redis.get(TaskManager._task_management_key)
if tasks_data_as_bytes is not None:
tasks_data_as_str = tasks_data_as_bytes.decode("utf-8")
tasks_obj_as_dict = json.loads(tasks_data_as_str)
return tasks_obj_as_dict
else:
return None
@staticmethod
def __update_json_object(tasks_obj_as_dict, replace_obj):
for task in tasks_obj_as_dict:
if json.loads(task)['task_id'] == replace_obj['task_id']:
task = json.dumps(replace_obj)
break
return tasks_obj_as_dict
@staticmethod
def update_task_management_ext(event, name, status, id):
tasks_obj_as_dict = TaskManager.get_task_management()
if tasks_obj_as_dict is not None:
for element in tasks_obj_as_dict:
#print("@@@@@",tasks_obj_as_dict[element])
for elt in tasks_obj_as_dict[element]:
#print(";;;;;;;;;;;",elt['task_id'])
if elt['task_id'] == id:
new_status = Status(id, name, str(datetime.datetime.now()), status)
elt['conditions'].append(new_status)
print("***->",elt)
# print("!!!!!!!->",tasks_obj_as_dict['conditions'])
tasks = Tasks(tasks_obj_as_dict['conditions'])
TaskManager._redis.set(TaskManager._task_management_key, json.dumps(tasks.to_json()))
# #Iterating all the fields of the JSON
# for element in tasks_obj_as_dict:
# #If Json Field value is a list
# if (isinstance(tasks_obj_as_dict[element], list)):
# # add new status in the task_conditions list
# new_status = Status(id, name, datetime.datetime.now(), status)
# check_update_list(tasks_obj_as_dict[element], element, new_status)
# print(tasks_obj_as_dict['conditions'])
# tasks = Tasks(tasks_obj_as_dict['conditions'])
# #TaskManager._redis.set(TaskManager._task_management_key, json.dumps(tasks.to_json()))
@staticmethod
def update_task_management(event, name, status, id):
tasks_obj_as_dict = TaskManager.get_task_management()
if tasks_obj_as_dict is not None:
#convert dict into json object called cache_object and add new item in the existing collection
cache_data = json.loads(json.dumps(tasks_obj_as_dict))
if cache_data is not None:
current_task = TaskManager.__find_task_object(cache_data['conditions'], id)
if current_task is not None:
# get task status list
current_task_conditions = current_task['conditions']
# add new status in the task_conditions list
new_status = Status(id, name, datetime.datetime.now(), status)
current_task_conditions.append(new_status)
# update object
update_json_obj = TaskManager.__update_json_object(cache_data['conditions'], current_task)
@staticmethod
def create_new_task(message_type, task):
# check and then create redis server object
if TaskManager._redis is None:
TaskManager.__redis()
_conditions = []
_tasks = []
tasks_obj_as_dict = TaskManager.get_task_management()
if tasks_obj_as_dict is None:
#first time creating task in the redis
if task is not None:
new_task = Task(message_type, task['id'], "init", _conditions)
_tasks.append(new_task)
tasks = Tasks(_tasks)
TaskManager._redis.set(TaskManager._task_management_key, json.dumps(tasks.to_json()))
else:
new_task = Task(message_type, task['id'], "init", _conditions)
# #convert dict into json object called cache_object and add new item in the existing collection
cache_data = json.loads(json.dumps(tasks_obj_as_dict))
# # print(cache_data)
cache_data['conditions'].append(new_task)
#print("---->",type(*cache_data.values()))
#print(len(*cache_data.values()))
# # prefixed by an asterisk operator to unpack the values in order to create a typename tuple subclass
tasks = Tasks(*cache_data.values())
TaskManager._redis.set(TaskManager._task_management_key, json.dumps(tasks.to_json()))
# return tasks_obj_as_dict
# new_task = Task(message_type, task['id'], "init", _conditions)
# #convert dict into json object called cache_object and add new item in the existing collection
# cache_data = json.loads(json.dumps(tasks_obj_as_dict))
# # print(cache_data)
# cache_data['conditions'].append(new_task)
# # prefixed by an asterisk operator to unpack the values in order to create a typename tuple subclass
# tasks = Tasks(*cache_data.values())
# TaskManager._redis.set(TaskManager._task_management_key, json.dumps(tasks.to_json()))
# return tasks_obj_as_dict
@staticmethod
def testing_create_new_task(task):
# check and then create redis server object
if TaskManager._redis is None:
TaskManager.__redis()
tasks_obj_as_dict = TaskManager.get_task_management()
if tasks_obj_as_dict is None:
TaskManager._redis.set(TaskManager._task_management_key, json.dumps(task))
else:
return tasks_obj_as_dict
# from task_store.task import Task
# from task_store.tasks import Tasks
# from task_store.status import Status
# import redis
# import datetime
# from config.setting import Config
# import json
# from utilities.json_extension import check_update_list
# from task_store.converter import datetime_converter
# class TaskManager:
# _redis = None
# _task_management_key = None
# def __redis():
# server = Config.get_complete_property('redis','server')
# port = Config.get_complete_property('redis','port')
# db = Config.get_complete_property('redis','db')
# TaskManager._redis = redis.Redis(server, port, db)
# TaskManager._task_management_key = Config.get_complete_property('redis','task_management_key')
# @staticmethod
# def __find_task_object(json_object, name):
# for dict in json_object:
# x = json.loads(dict)
# if x['task_id'] == name:
# return x
# @staticmethod
# def __find_task(json_object, name):
# task = [obj for obj in json_object if obj['task_id']==name]
# if len(task) > 1 and task is not None:
# return task[0]
# return None
# @staticmethod
# def clear_task_tasks_obj_as_dict():
# # check and then create redis server object
# if TaskManager._redis is None:
# TaskManager.__redis()
# TaskManager._redis.delete(TaskManager._task_management_key)
# def get_task_management():
# # check and then create redis server object
# if TaskManager._redis is None:
# TaskManager.__redis()
# tasks_data_as_bytes = TaskManager._redis.get(TaskManager._task_management_key)
# if tasks_data_as_bytes is not None:
# tasks_data_as_str = tasks_data_as_bytes.decode("utf-8")
# tasks_obj_as_dict = json.loads(tasks_data_as_str)
# return tasks_obj_as_dict
# else:
# return None
# @staticmethod
# def __update_json_object(tasks_obj_as_dict, replace_obj):
# for task in tasks_obj_as_dict:
# if json.loads(task)['task_id'] == replace_obj['task_id']:
# task = json.dumps(replace_obj)
# break
# return tasks_obj_as_dict
# @staticmethod
# def update_task_management_ext(event, name, status, id):
# tasks_obj_as_dict = TaskManager.get_task_management()
# if tasks_obj_as_dict is not None:
# #Iterating all the fields of the JSON
# for element in tasks_obj_as_dict:
# #If Json Field value is a list
# if (isinstance(tasks_obj_as_dict[element], list)):
# # add new status in the task_conditions list
# new_status = Status(id, name, datetime.datetime.now(), status)
# check_update_list(tasks_obj_as_dict[element], element, new_status)
# tasks = Tasks(tasks_obj_as_dict['conditions'])
# TaskManager._redis.set(TaskManager._task_management_key, json.dumps(tasks.to_json()))
# @staticmethod
# def update_task_management(event, name, status, id):
# tasks_obj_as_dict = TaskManager.get_task_management()
# if tasks_obj_as_dict is not None:
# #convert dict into json object called cache_object and add new item in the existing collection
# cache_data = json.loads(json.dumps(tasks_obj_as_dict))
# if cache_data is not None:
# current_task = TaskManager.__find_task_object(cache_data['conditions'], id)
# if current_task is not None:
# # get task status list
# current_task_conditions = current_task['conditions']
# # add new status in the task_conditions list
# new_status = Status(id, name, datetime.datetime.now(), status)
# current_task_conditions.append(new_status)
# # update object
# update_json_obj = TaskManager.__update_json_object(cache_data['conditions'], current_task)
# @staticmethod
# def create_new_task(message_type, task):
# # check and then create redis server object
# if TaskManager._redis is None:
# TaskManager.__redis()
# _conditions = []
# _tasks = []
# tasks_obj_as_dict = TaskManager.get_task_management()
# if tasks_obj_as_dict is None:
# #first time creating task in the redis
# if task is not None:
# new_task = Task(message_type, task.id, "init", _conditions)
# _tasks.append(new_task)
# tasks = Tasks(_tasks)
# TaskManager._redis.set(TaskManager._task_management_key, json.dumps(tasks.to_json()))
# else:
# new_task = Task(message_type, task.id, "init", _conditions)
# #convert dict into json object called cache_object and add new item in the existing collection
# cache_data = json.loads(json.dumps(tasks_obj_as_dict))
# # print(cache_data)
# cache_data['conditions'].append(new_task)
# # prefixed by an asterisk operator to unpack the values in order to create a typename tuple subclass
# tasks = Tasks(*cache_data.values())
# TaskManager._redis.set(TaskManager._task_management_key, json.dumps(tasks.to_json()))
# return tasks_obj_as_dict
# @staticmethod
# def testing_create_new_task(task):
# # check and then create redis server object
# if TaskManager._redis is None:
# TaskManager.__redis()
# tasks_obj_as_dict = TaskManager.get_task_management()
# if tasks_obj_as_dict is None:
# TaskManager._redis.set(TaskManager._task_management_key, json.dumps(task))
# else:
# return tasks_obj_as_dict
|
[
"redis.Redis",
"json.loads",
"json.dumps",
"tasks.Tasks",
"datetime.datetime.now",
"task.Task"
] |
[((382, 411), 'redis.Redis', 'redis.Redis', (['server', 'port', 'db'], {}), '(server, port, db)\n', (393, 411), False, 'import redis\n'), ((2690, 2728), 'tasks.Tasks', 'Tasks', (["tasks_obj_as_dict['conditions']"], {}), "(tasks_obj_as_dict['conditions'])\n", (2695, 2728), False, 'from tasks import Tasks\n'), ((586, 602), 'json.loads', 'json.loads', (['dict'], {}), '(dict)\n', (596, 602), False, 'import json\n'), ((1541, 1570), 'json.loads', 'json.loads', (['tasks_data_as_str'], {}), '(tasks_data_as_str)\n', (1551, 1570), False, 'import json\n'), ((5241, 5292), 'task.Task', 'Task', (['message_type', "task['id']", '"""init"""', '_conditions'], {}), "(message_type, task['id'], 'init', _conditions)\n", (5245, 5292), False, 'from task import Task\n'), ((1883, 1906), 'json.dumps', 'json.dumps', (['replace_obj'], {}), '(replace_obj)\n', (1893, 1906), False, 'import json\n'), ((3835, 3864), 'json.dumps', 'json.dumps', (['tasks_obj_as_dict'], {}), '(tasks_obj_as_dict)\n', (3845, 3864), False, 'import json\n'), ((4972, 5023), 'task.Task', 'Task', (['message_type', "task['id']", '"""init"""', '_conditions'], {}), "(message_type, task['id'], 'init', _conditions)\n", (4976, 5023), False, 'from task import Task\n'), ((5088, 5101), 'tasks.Tasks', 'Tasks', (['_tasks'], {}), '(_tasks)\n', (5093, 5101), False, 'from tasks import Tasks\n'), ((5450, 5479), 'json.dumps', 'json.dumps', (['tasks_obj_as_dict'], {}), '(tasks_obj_as_dict)\n', (5460, 5479), False, 'import json\n'), ((7017, 7033), 'json.dumps', 'json.dumps', (['task'], {}), '(task)\n', (7027, 7033), False, 'import json\n'), ((1805, 1821), 'json.loads', 'json.loads', (['task'], {}), '(task)\n', (1815, 1821), False, 'import json\n'), ((4273, 4296), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4294, 4296), False, 'import datetime\n'), ((2475, 2498), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2496, 2498), False, 'import datetime\n')]
|
from utils import *
import pandas as pd
import sys
import csv
GP = []
MPG = []
WS = []
WSP48 = []
BPM = []
VORP = []
PLUSMINUS = []
NBA_STATS = [GP, MPG, WS, WSP48, BPM, VORP, PLUSMINUS]
def main():
all = get_csv_file("add NBA stats? ")
all = populate_NBA_all_statistics(all)
addNBAStatsToMasterList(pd.read_csv('temp_master.csv'), all)
def populate_NBA_all_statistics(all):
couldNotFindList = []
nba_stats = all[['Season','Name']].copy()
base_url = "https://www.basketball-reference.com/players"
for index, row in all.iterrows():
if(row['Season'] == get_season_from_year(get_current_year())):
print(row['Name'] + " is still in college.")
appendValuesToNBALists(["?", "?", "?", "?", "?", "?", "?"])
continue
bkrefCorrectPage = True
bkrefIdentifier, bkrefIndex, bkrefName = getBKRefIdentifierAndIndex(row['Name'])
while True:
url = base_url + bkrefIdentifier + "0" + str(bkrefIndex) + ".html"
print(url)
soup = find_site(url)
if (soup):
if (soup.find('div', {'class': 'index reading'}) or not soup.find('table')):
print("Reached 404 page - assuming there are no stats for " + row['Name'])
appendValuesToNBALists(["0", "0", "0", "0", "0", "0", "0"])
break
quickBKRefPlayerInfoDiv = get_basketball_reference_player_info(soup)
if (quickBKRefPlayerInfoDiv):
if (isOnCorrectPlayerPage(bkrefName, row['School'], quickBKRefPlayerInfoDiv)):
try:
tableID = soup.find('table')['id']
except KeyError:
tableID = None
if (not tableID or tableID == 'all_college_stats'):
print("Could not find an NBA Basketball Reference page for " + row['Name'])
bkrefIndex = bkrefIndex + 1
bkrefCorrectPage = False
continue
else:
print("Found NBA player page for " + row['Name'])
populateNBAPlayerStatistics(soup)
break
else:
if (bkrefCorrectPage == True):
bkrefIndex = bkrefIndex + 1
bkrefCorrectPage = False
continue
else:
print("Could not find a correct NBA player page for " + row['Name'])
couldNotFindList.append(index)
appendValuesToNBALists(["0", "0", "0", "0", "0", "0", "0"])
bkrefIndex = bkrefIndex + 1
break
else:
print("Could not find player info div for " + url)
else:
print("Could not find page for url: " + url)
nba_stats['NBA GP'] = GP
nba_stats['NBA MPG'] = MPG
nba_stats['NBA WS'] = WS
nba_stats['NBA WSP48'] = WSP48
nba_stats['NBA BPM'] = BPM
nba_stats['NBA VORP'] = VORP
nba_stats['NBA PLUSMINUS'] = PLUSMINUS
nba_stats.to_csv('all_nba_stats.csv', index=False)
return nba_stats
def getBKRefIdentifierAndIndex(name):
bkrefName = get_basketball_reference_formatted_name(name, NBA_PLAYER_NAME_EXCEPTIONS)
firstName, lastName = bkrefName.replace("-", "").split(' ', 1)
bkrefIdentifier = ("/" + lastName[0] + "/" + lastName[:5] + firstName[:2]).lower()
bkrefIndex = check_value_in_dictionary_of_exceptions(bkrefName, NBA_INDEX_EXCEPTIONS, 1)
return bkrefIdentifier, bkrefIndex, bkrefName
def isOnCorrectPlayerPage(name, school, playerInfo):
school = get_basketball_reference_formatted_school(school, NBA_SCHOOL_NAME_EXCEPTIONS, school)
return name.lower() in playerInfo.replace("'", "").lower() and school in playerInfo
def populateNBAPlayerStatistics(soup):
statValueList = []
statValueList.extend(findGivenStatOnPlayerPage(soup, 'all_per_game', ['g','mp_per_g']))
statValueList.extend(findGivenStatOnPlayerPage(soup, 'all_advanced', ['ws', 'ws_per_48', 'bpm', 'vorp']))
statValueList.extend(findGivenStatOnPlayerPage(soup, 'all_pbp', ['plus_minus_net']))
appendValuesToNBALists(statValueList)
# Check the designated table for the designated datastat
def findGivenStatOnPlayerPage(soup, table_ID, datastat_IDs):
table = soup.find('div', {'id': table_ID})
list = []
if table:
career_stats = table('tfoot')[0] #Guarantees first row in the footer (career)
for datastat_ID in datastat_IDs:
stat = (career_stats.find("td", {"data-stat": datastat_ID})).getText()
list.append(stat)
else:
for datastat_ID in datastat_IDs:
print("Did not find a stat for " + datastat_ID + " - adding zero.")
list.append("0")
return list
# For each player, add an entry to the NBA stats lists
def appendValuesToNBALists(l):
for i in range(0,7):
NBA_STATS[i].append(l[i])
# Final step, add NBA stats to master list and export to CSV
def addNBAStatsToMasterList(all, nba):
all['NBA GP'] = nba['NBA GP']
all['NBA MPG'] = nba['NBA MPG']
all['NBA WS'] = nba ['NBA WS']
all['NBA WSP48'] = nba['NBA WSP48']
all['NBA BPM'] = nba['NBA BPM']
all['NBA VORP'] = nba['NBA VORP']
all['NBA PLUSMINUS'] = nba['NBA PLUSMINUS']
all = reorder_columns(all)
all.to_csv("new_master.csv", index=False)
if __name__ == "__main__":
main()
|
[
"pandas.read_csv"
] |
[((314, 344), 'pandas.read_csv', 'pd.read_csv', (['"""temp_master.csv"""'], {}), "('temp_master.csv')\n", (325, 344), True, 'import pandas as pd\n')]
|
from train_utils import train_model
from sample_models import custom_rnn_model
from keras.layers import SimpleRNN, GRU, LSTM
import argparse
import sys
import os
from os.path import join
if __name__ == '__main__':
from keras.backend.tensorflow_backend import set_session
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.5
set_session(tf.Session(config=config))
print ("###############################################")
print ("# ASR Trainer #")
print ("###############################################")
parser = argparse.ArgumentParser(description='ASR Driver')
parser.add_argument('-o', dest='output', required=True, type=str, help='Path to folder containing model data input/output (.hd5 and .pickle files).')
parser.add_argument('-i', dest='id', required=True, type=int, help='Id or name of the model')
parser.add_argument('-cf', dest='conv_filters', type=int, help='# of convolution filters')
parser.add_argument('-ck', dest='kernel_size', type=int, help='Size of convolution kernel')
parser.add_argument('-cs', dest='conv_stride', type=int, help='Convolutional stride')
parser.add_argument('-cp', dest='conv_padding', type=str, help="Convolutional padding mode ('same', or 'valid')")
parser.add_argument('-cd', dest='conv_dropout', type=float, help='Dropout for convolutional output (between 0.0 and 1.0)')
parser.add_argument('-rl', dest='recur_layers', type=int, help='Number of recurrent layers')
parser.add_argument('-ru', dest='recur_units', nargs='*', type=int, help="List of 'rl' recurrent unit sizes")
parser.add_argument('-rc', dest='recur_cells', nargs='*', type=int, help="List of 'rl' recurrent cell types (0: SimpleRNN, 1: GRU, or 2: LSTM)")
parser.add_argument('-rb', dest='recur_bidis', nargs='*', type=int, help="List of 'rl' flags indicating whether the layer is bidirectional ('True', 'False')")
parser.add_argument('-rd', dest='recur_dropouts', nargs='*', type=float, help="List of 'rl' dropouts (between 0.0 and 1.0)")
parser.add_argument('-dd', dest='dense_dropout', type=float, help="Dropout for fully connected output layer")
parser.add_argument('-e', dest='num_epochs', required=False, default=20, type=int, help="Number of epochs to train")
args = parser.parse_args()
args.recur_cells = map(lambda x: SimpleRNN if x is 0 else GRU if x is 1 else LSTM, list(args.recur_cells))
args.recur_bidis = map(lambda x: False if x is 0 else True, list(args.recur_bidis))
print (args)
model_weights_path = "model_{}.h5".format(args.id) #join(os.getcwd(), args.output, "model_{}.h5".format(args.id))
model_hist_path = "model_{}.pickle".format(args.id) #join(os.getcwd(), args.output, "model_{}.pickle".format(args.id))
print("\tModel weights path: {}".format(model_weights_path))
print("\tModel train hist path: {}".format(model_hist_path))
# --------
model_5 = custom_rnn_model(input_dim=13, # change to 13 if you would like to use MFCC features
conv_filters=args.conv_filters, conv_kernel_size=args.kernel_size, conv_stride=args.conv_stride, conv_border_mode=args.conv_padding, conv_batch_mode=True, conv_dropout=args.conv_dropout, \
recur_layers=args.recur_layers, recur_units=args.recur_units, recur_cells=args.recur_cells, recur_bidis=args.recur_bidis, recur_batchnorms=[True]*args.recur_layers, recur_dropouts=args.recur_dropouts, \
output_dropout=args.dense_dropout, output_dim=29)
train_model(input_to_softmax=model_5,
epochs=args.num_epochs,
pickle_path=model_hist_path,
save_model_path=model_weights_path,
spectrogram=False) # change to False if you would like to use MFCC features
print ("Training complete!")
print ("\tModel weights stored in: {}".format(model_weights_path))
print ("\tModel hist stored in: {}".format(model_hist_path))
print ("# Thank you! #")
|
[
"train_utils.train_model",
"argparse.ArgumentParser",
"tensorflow.Session",
"tensorflow.ConfigProto",
"sample_models.custom_rnn_model"
] |
[((318, 334), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (332, 334), True, 'import tensorflow as tf\n'), ((644, 693), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""ASR Driver"""'}), "(description='ASR Driver')\n", (667, 693), False, 'import argparse\n'), ((3021, 3518), 'sample_models.custom_rnn_model', 'custom_rnn_model', ([], {'input_dim': '(13)', 'conv_filters': 'args.conv_filters', 'conv_kernel_size': 'args.kernel_size', 'conv_stride': 'args.conv_stride', 'conv_border_mode': 'args.conv_padding', 'conv_batch_mode': '(True)', 'conv_dropout': 'args.conv_dropout', 'recur_layers': 'args.recur_layers', 'recur_units': 'args.recur_units', 'recur_cells': 'args.recur_cells', 'recur_bidis': 'args.recur_bidis', 'recur_batchnorms': '([True] * args.recur_layers)', 'recur_dropouts': 'args.recur_dropouts', 'output_dropout': 'args.dense_dropout', 'output_dim': '(29)'}), '(input_dim=13, conv_filters=args.conv_filters,\n conv_kernel_size=args.kernel_size, conv_stride=args.conv_stride,\n conv_border_mode=args.conv_padding, conv_batch_mode=True, conv_dropout=\n args.conv_dropout, recur_layers=args.recur_layers, recur_units=args.\n recur_units, recur_cells=args.recur_cells, recur_bidis=args.recur_bidis,\n recur_batchnorms=[True] * args.recur_layers, recur_dropouts=args.\n recur_dropouts, output_dropout=args.dense_dropout, output_dim=29)\n', (3037, 3518), False, 'from sample_models import custom_rnn_model\n'), ((3650, 3800), 'train_utils.train_model', 'train_model', ([], {'input_to_softmax': 'model_5', 'epochs': 'args.num_epochs', 'pickle_path': 'model_hist_path', 'save_model_path': 'model_weights_path', 'spectrogram': '(False)'}), '(input_to_softmax=model_5, epochs=args.num_epochs, pickle_path=\n model_hist_path, save_model_path=model_weights_path, spectrogram=False)\n', (3661, 3800), False, 'from train_utils import train_model\n'), ((412, 437), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (422, 437), True, 'import tensorflow as tf\n')]
|
'''Create entry point to init everything and start Dolphin.'''
import argparse
# import sys
from pathlib import Path
import melee
from .interact import LiveInputsThread
def start_game(ports, cmds={}, log=True):
'''Main method to fully start game.
Command-line first needs dolphin path and iso path, then game starts.
Iso path is optional if you have a default iso set to run on Dolphin startup.
```
# main.py
...
start_game(...)
```
`python main.py path/to/dolphin path/to/iso`
Args:
ports: tuple containing 4 bot instances or Nones.
eg. `(None, my_bot, None, None)`
cmds: optional.
- `dict`: of custom commands `'cmd': (func, 'descrip')` or `'cmd': func`.
- default: empty dict, no custom commands
- `None`: no live thread desired (probably for performance)
log: `bool`, write game logs to file with `melee.Logger` if True (default)'''
args = _start_command_line()
dolphin_folder = str( Path(args.dolphin_path).parent )
console = melee.Console(path=dolphin_folder) # libmelee wants the folder
# controllers must be connected before console run/connect...
bots = _assign_controllers(ports, console)
console.run(iso_path=args.iso_path) # if None, relies on default Dolphin iso on startup
console.connect()
# ... and then controllers are connected afterward
_connect_controllers(bots)
logger = melee.Logger() if log else None
live_interface = None
if cmds is not None:
live_interface = LiveInputsThread(commands=cmds)
live_interface.onshutdown = _shutdown(console, logger)
live_interface.start()
while True:
gamestate = console.step()
if not gamestate:
break
for bot in bots:
bot.act(gamestate)
if live_interface:
live_interface.update(gamestate)
if logger:
logger.logframe(gamestate)
logger.log('Frame Process Time', console.processingtime) # ms
logger.writeframe()
def _start_command_line():
# simple CLI to get paths for dolphin and optionally iso
parser = argparse.ArgumentParser()
parser.add_argument('paths', nargs='+', help='dolphin/path [iso/path]')
args = parser.parse_args()
args.dolphin_path = args.paths[0]
args.iso_path = args.paths[1] if len(args.paths) > 1 else None
return args
def _assign_controllers(ports, console):
# make + give controllers to any bots present in 4-tuple of ports
bots = []
for port, bot in enumerate(ports):
if bot:
controller = melee.Controller(console=console, port=port+1)
# controller = melee.Controller(console=console, port=port+1, type=melee.ControllerType.STANDARD)
bot.controller = controller
bots.append(bot)
return bots
def _connect_controllers(bots):
for bot in bots:
bot.controller.connect()
def _shutdown(console, logger):
# returns callable that closes dolphin and anything else
def f():
console.stop()
if logger:
print()
logger.writelog()
print('Log file created: ' + logger.filename)
print('Shutting down')
return f
|
[
"argparse.ArgumentParser",
"pathlib.Path",
"melee.Controller",
"melee.Console",
"melee.Logger"
] |
[((1063, 1097), 'melee.Console', 'melee.Console', ([], {'path': 'dolphin_folder'}), '(path=dolphin_folder)\n', (1076, 1097), False, 'import melee\n'), ((2188, 2213), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2211, 2213), False, 'import argparse\n'), ((1456, 1470), 'melee.Logger', 'melee.Logger', ([], {}), '()\n', (1468, 1470), False, 'import melee\n'), ((1016, 1039), 'pathlib.Path', 'Path', (['args.dolphin_path'], {}), '(args.dolphin_path)\n', (1020, 1039), False, 'from pathlib import Path\n'), ((2648, 2696), 'melee.Controller', 'melee.Controller', ([], {'console': 'console', 'port': '(port + 1)'}), '(console=console, port=port + 1)\n', (2664, 2696), False, 'import melee\n')]
|
# -*- coding: utf-8 -*-
"""
Class for storing supply curves and calculating marginal costs
Created on Thu Feb 7 15:34:33 2019
@author: elisn
"""
import pickle
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
class SupplyCurve():
""" Has panda dataframe with list of bids
One or many generators may be added to the supply curve. The generators must be in the form
of a panda data frame, with the columns ['c2','c1','pmax']
The marginal cost of a generator is given by MC = 2*c2*q + c1, where q ranges from 0 to pmax
Hence a generator with c2 = 0 has constant marginal cost
(Thus note that the coefficients are those for the Total Cost function)
It is also possible to add bids, which then must have the columns [cap,mc_min,mc_max]
Note that the internal functions use the bid structure.
Class methods:
price2quantity(price) - calculates the quantity offered for a given price, straightforward calculation
quantity2price(quantity) - calculates price required for given quantity
not straightforward since constant bids produce discrete jumps in the offered quantity
plot() - plots supply curve
"""
def __init__(self,bids = pd.DataFrame(columns=['cap','mc_min','mc_max']),gens = pd.DataFrame(columns=['c2','c1','pmax']) ):
self.bids = bids.append(get_generator_bids(gens),ignore_index=True).sort_values(by=['mc_min','mc_max'])
self._calculate_inflection_points_()
def add_bids(self,bids):
""" Add bids to supply curve, in the form of a data frame """
self.bids = self.bids.append(bids,ignore_index=True).sort_values(by=['mc_min','mc_max'])
self._calculate_inflection_points_()
def add_gens(self,gens):
""" Add generators with c1, c2, pmax coefficients to supply curve """
self.bids = self.bids.append(get_generator_bids(gens),ignore_index=True).sort_values(by=['mc_min','mc_max'])
self._calculate_inflection_points_()
def price2quantity(self,price):
""" Calculate the offered quantity for a given price """
# loop over bids, calculate offer by each
quantity = 0
for i in self.bids.index:
if price >= self.bids.loc[i,'mc_min']:
if self.bids.loc[i,'mc_min'] != self.bids.loc[i,'mc_max']: # variable MC
q = (price - self.bids.loc[i,'mc_min'])/(self.bids.loc[i,'mc_max']-self.bids.loc[i,'mc_min'])*self.bids.loc[i,'cap']
if q > self.bids.loc[i,'cap']:
q = self.bids.loc[i,'cap']
quantity += q
else: # fixed MC
quantity += self.bids.loc[i,'cap']
else:
# mc_min exceeds price, can exit as bids are sorted by increasing mc_min
return quantity
return quantity
def _calculate_inflection_points_(self):
""" Find all inflection points in the supply curve """
ppoints = []
for i in self.bids.index:
if self.bids.loc[i,'mc_min'] not in ppoints:
ppoints.append(self.bids.loc[i,'mc_min'])
if self.bids.loc[i,'mc_max'] not in ppoints:
ppoints.append(self.bids.loc[i,'mc_max'])
ppoints.sort()
# find curresponding quantities
qpoints = []
for point in ppoints:
qpoints.append(self.price2quantity(point))
self.xprice = ppoints
self.xquant = qpoints
def quantity2price(self,quantity,plot=False,verbose=False):
""" Calculate minimum price needed for given quantity """
idx = 0
while True:
if idx == self.xprice.__len__():
# quantity > qmax, not enough capacity
if verbose:
print("Insufficient capacity: {0} MW available, but quantity = {1:.3}".format(self.xquant[-1],quantity))
#return np.nan
p = np.nan
break
elif self.xquant[idx] < quantity:
idx += 1 # go to next price level
else:
if idx == 0:
# quantity <= 0 - return lowest marginal cost
#print("Non-positive quantity = {0:.3}, returning lowest available MC".format(quantity))
#return self.xprice[0]
p = self.xprice[0]
break
elif self.xquant[idx] == quantity:
# price corresponds exactly to quantity
#return self.xprice[idx]
p = self.xprice[idx]
break
else:
# check if offer curve is linear by evaluating quantity between prices
if self.price2quantity(self.xprice[idx-1]+(self.xprice[idx]-self.xprice[idx-1])/2) > self.xquant[idx-1]:
# if offer curve is linear, interpolate to find correct price
# Note: Cannot interpolate linearly to next intersection point, as there
# the curve may consist of a linear horizontal section to the next point
# Thus we must instead find the inverse slope by summing the inverse slopes
# of linear bids at this point
# use inverse slope at price xprice[idx] for interpolation
p = self.xprice[idx-1] + (quantity-self.xquant[idx-1]) / self._find_slope_(self.xprice[idx])
if p > self.xprice[idx]: # cap price increase up to xprice[idx]
# if idx == 3:
# print(p)
# pass
p = self.xprice[idx]
#return p
break
else:
# else return this price
p = self.xprice[idx]
#return self.xprice[idx]
break
if plot:
# plot supply curve with determined point
self.plot(qpoints=[quantity],ppoints=[p])
return p
def _find_slope_(self,price):
""" Find the slope of the supply curve, in MW/EUR (quantity/price) for given price """
# loop over all linear bids and see which are active in this price range
slope = 0 # slope in MW/EUR
for index in self.bids.index:
if self.bids.loc[index,'mc_min'] != self.bids.loc[index,'mc_max'] and \
price > self.bids.loc[index,'mc_min'] and price <= self.bids.loc[index,'mc_max']:
slope += self.bids.loc[index,'cap']/(self.bids.loc[index,'mc_max']-self.bids.loc[index,'mc_min'])
return slope
def plot(self,qpoints=[],ppoints=[]):
""" Plot supply curve """
x_quantity = np.linspace(0,self.xquant[-1])
y_price = np.array([self.quantity2price(x) for x in x_quantity])
y2_price = np.linspace(self.xprice[0],self.xprice[-1])
x2_quantity = np.array([self.price2quantity(p) for p in y2_price])
# # merge data points into single array
# x = np.array([x for x,_ in sorted(zip(list(x_quantity)+list(x2_quantity),list(y_price)+list(y2_price)))])
# y = np.array([y for _,y in sorted(zip(list(x_quantity)+list(x2_quantity),list(y_price)+list(y2_price)))])
#
plt.plot()
plt.plot(x_quantity,y_price,'*')
plt.plot(x2_quantity,y2_price,'*')
#plt.plot(x,y)
# add given points to plot
if qpoints.__len__() > 0:
plt.plot(np.array(qpoints),np.array(ppoints),'r*')
plt.grid()
plt.xlabel('MW')
plt.ylabel('EUR/MWh')
plt.title('Supply curve')
plt.legend(['quantity2price','price2quantity'])
plt.show()
def get_curve(self):
""" Return x and y vector with points to plot the offer curve """
x_quantity = np.linspace(0,self.xquant[-1])
y_price = np.array([self.quantity2price(x) for x in x_quantity])
y2_price = np.linspace(self.xprice[0],self.xprice[-1])
x2_quantity = np.array([self.price2quantity(p) for p in y2_price])
# merge data points into single array
x = np.array([x for x,_ in sorted(zip(list(x_quantity)+list(x2_quantity),list(y_price)+list(y2_price)))])
y = np.array([y for _,y in sorted(zip(list(x_quantity)+list(x2_quantity),list(y_price)+list(y2_price)))])
return x,y
def get_generator_bids(gens):
""" Takes a panda dataframe with generator info, and returns a dataframe with bids
with the columns [cap,mc_min,mc_max]
cap - total capacity of bid
mc_min - minimum marginal cost (=c1)
mc_max - maximum marginal cost (=2*c2)
"""
bids = pd.DataFrame(columns=['cap','mc_min','mc_max'])
bids.loc[:,'cap'] = gens.loc[:,'pmax']
bids.loc[:,'mc_min'] = gens.loc[:,'c1']
bids.loc[:,'mc_max'] = gens.loc[:,'pmax'] * gens.loc[:,'c2']*2 + gens.loc[:,'c1']
bids.index = list(range(bids.__len__()))
return bids
if __name__ == "__main__":
with open('Data/generators.pkl','rb') as f:
gens = pickle.load(f)
# gens = pd.DataFrame(columns=['c1','c2','pmax'],index=[1,2])
# gens.loc[1,:] = [10,0,10000]
# gens.loc[2,:] = [20,0,10000]
# gens.loc[3,:] = [15,0.0005,10000]
s = SupplyCurve(gens=gens)
s.plot()
s.add_bids(pd.DataFrame(np.array([[10000,10,10],[10000,80,80]]),columns=['cap','mc_min','mc_max']))
s.plot()
x,y = s.get_curve()
plt.plot(x,y)
|
[
"pandas.DataFrame",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"pickle.load",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid"
] |
[((9079, 9128), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['cap', 'mc_min', 'mc_max']"}), "(columns=['cap', 'mc_min', 'mc_max'])\n", (9091, 9128), True, 'import pandas as pd\n'), ((9860, 9874), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (9868, 9874), True, 'import matplotlib.pyplot as plt\n'), ((1279, 1328), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['cap', 'mc_min', 'mc_max']"}), "(columns=['cap', 'mc_min', 'mc_max'])\n", (1291, 1328), True, 'import pandas as pd\n'), ((1334, 1376), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['c2', 'c1', 'pmax']"}), "(columns=['c2', 'c1', 'pmax'])\n", (1346, 1376), True, 'import pandas as pd\n'), ((7080, 7111), 'numpy.linspace', 'np.linspace', (['(0)', 'self.xquant[-1]'], {}), '(0, self.xquant[-1])\n', (7091, 7111), True, 'import numpy as np\n'), ((7212, 7256), 'numpy.linspace', 'np.linspace', (['self.xprice[0]', 'self.xprice[-1]'], {}), '(self.xprice[0], self.xprice[-1])\n', (7223, 7256), True, 'import numpy as np\n'), ((7635, 7645), 'matplotlib.pyplot.plot', 'plt.plot', ([], {}), '()\n', (7643, 7645), True, 'import matplotlib.pyplot as plt\n'), ((7654, 7688), 'matplotlib.pyplot.plot', 'plt.plot', (['x_quantity', 'y_price', '"""*"""'], {}), "(x_quantity, y_price, '*')\n", (7662, 7688), True, 'import matplotlib.pyplot as plt\n'), ((7695, 7731), 'matplotlib.pyplot.plot', 'plt.plot', (['x2_quantity', 'y2_price', '"""*"""'], {}), "(x2_quantity, y2_price, '*')\n", (7703, 7731), True, 'import matplotlib.pyplot as plt\n'), ((7893, 7903), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (7901, 7903), True, 'import matplotlib.pyplot as plt\n'), ((7912, 7928), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""MW"""'], {}), "('MW')\n", (7922, 7928), True, 'import matplotlib.pyplot as plt\n'), ((7937, 7958), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""EUR/MWh"""'], {}), "('EUR/MWh')\n", (7947, 7958), True, 'import matplotlib.pyplot as plt\n'), ((7967, 7992), 'matplotlib.pyplot.title', 'plt.title', (['"""Supply curve"""'], {}), "('Supply curve')\n", (7976, 7992), True, 'import matplotlib.pyplot as plt\n'), ((8001, 8049), 'matplotlib.pyplot.legend', 'plt.legend', (["['quantity2price', 'price2quantity']"], {}), "(['quantity2price', 'price2quantity'])\n", (8011, 8049), True, 'import matplotlib.pyplot as plt\n'), ((8057, 8067), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8065, 8067), True, 'import matplotlib.pyplot as plt\n'), ((8206, 8237), 'numpy.linspace', 'np.linspace', (['(0)', 'self.xquant[-1]'], {}), '(0, self.xquant[-1])\n', (8217, 8237), True, 'import numpy as np\n'), ((8338, 8382), 'numpy.linspace', 'np.linspace', (['self.xprice[0]', 'self.xprice[-1]'], {}), '(self.xprice[0], self.xprice[-1])\n', (8349, 8382), True, 'import numpy as np\n'), ((9459, 9473), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (9470, 9473), False, 'import pickle\n'), ((9737, 9781), 'numpy.array', 'np.array', (['[[10000, 10, 10], [10000, 80, 80]]'], {}), '([[10000, 10, 10], [10000, 80, 80]])\n', (9745, 9781), True, 'import numpy as np\n'), ((7843, 7860), 'numpy.array', 'np.array', (['qpoints'], {}), '(qpoints)\n', (7851, 7860), True, 'import numpy as np\n'), ((7861, 7878), 'numpy.array', 'np.array', (['ppoints'], {}), '(ppoints)\n', (7869, 7878), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import codecs
from setuptools import setup
setup(
name='pytest-packagetree',
version='0.0.1',
author='<NAME>',
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
license='MIT',
url='https://github.com/jsfehler/pytest-packagetree',
description='Pytest plugin or package-tree',
packages=['pytest_packagetree'],
install_requires=['pytest>=3.5.0', 'package-tree'],
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Pytest',
'Intended Audience :: Developers',
'Topic :: Software Development :: Testing',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
],
entry_points={
'pytest11': [
'packagetree = pytest_packagetree.plugin',
],
},
)
|
[
"setuptools.setup"
] |
[((102, 934), 'setuptools.setup', 'setup', ([], {'name': '"""pytest-packagetree"""', 'version': '"""0.0.1"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'maintainer': '"""<NAME>"""', 'maintainer_email': '"""<EMAIL>"""', 'license': '"""MIT"""', 'url': '"""https://github.com/jsfehler/pytest-packagetree"""', 'description': '"""Pytest plugin or package-tree"""', 'packages': "['pytest_packagetree']", 'install_requires': "['pytest>=3.5.0', 'package-tree']", 'classifiers': "['Development Status :: 4 - Beta', 'Framework :: Pytest',\n 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Testing',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Operating System :: OS Independent',\n 'License :: OSI Approved :: MIT License']", 'entry_points': "{'pytest11': ['packagetree = pytest_packagetree.plugin']}"}), "(name='pytest-packagetree', version='0.0.1', author='<NAME>',\n author_email='<EMAIL>', maintainer='<NAME>', maintainer_email='<EMAIL>',\n license='MIT', url='https://github.com/jsfehler/pytest-packagetree',\n description='Pytest plugin or package-tree', packages=[\n 'pytest_packagetree'], install_requires=['pytest>=3.5.0',\n 'package-tree'], classifiers=['Development Status :: 4 - Beta',\n 'Framework :: Pytest', 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Testing',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Operating System :: OS Independent',\n 'License :: OSI Approved :: MIT License'], entry_points={'pytest11': [\n 'packagetree = pytest_packagetree.plugin']})\n", (107, 934), False, 'from setuptools import setup\n')]
|
# -*- coding: utf-8 -*-
from sklearn.metrics import classification_report
from keras.callbacks import ModelCheckpoint
from keras.utils import plot_model
import matplotlib.pyplot as plt
import numpy as np
import os
from tnmlearn.callbacks import TrainingMonitor
# %%
class BaseLearningModel:
def __init__(self):
self.callbacks = []
def buildTrainMonCB_(self, outputpath):
# construct the set of callbacks
figPath = os.path.sep.join([outputpath, "{}.png".format(
os.getpid())])
jsonPath = os.path.sep.join([outputpath, "{}.json".format(
os.getpid())])
self.callbacks.append(TrainingMonitor(figPath, jsonPath=jsonPath))
def buildModelChkPointCB_(self, weightpath):
# construct the callback to save only the *best* model to disk
# based on the validation loss
fname = os.path.sep.join([weightpath,
"weights-{epoch:03d}-{val_loss:.4f}.hdf5"])
checkpoint = ModelCheckpoint(fname, monitor="val_loss", mode="min",
save_best_only=True, verbose=1)
self.callbacks.append(checkpoint)
def fit_(self, epochs=100, batch_size=32):
# train the model using SGD
print("[INFO] training network...")
H = self.model.fit(self.trainX, self.trainY,
callbacks=self.callbacks,
validation_data=(self.testX, self.testY),
epochs=epochs, batch_size=batch_size)
self.H = H
return H
def plotModel_(self, outputpath):
plot_model(self.model, to_file=outputpath, show_shapes=True)
def plot(self):
# plot the training loss and accuracy
H = self.H
epochs = len(H.history['loss'])
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, epochs), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, epochs), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, epochs), H.history["acc"], label="train_acc")
plt.plot(np.arange(0, epochs), H.history["val_acc"], label="val_acc")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend()
plt.show()
def evaluate_(self, batch_size):
# evaluate the network
print("[INFO] evaluating network...")
predictions = self.model.predict(self.testX, batch_size=batch_size)
print(classification_report(self.testY.argmax(axis=1),
predictions.argmax(axis=1), target_names=self.classNames))
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"os.getpid",
"keras.callbacks.ModelCheckpoint",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"keras.utils.plot_model",
"matplotlib.pyplot.style.use",
"numpy.arange",
"tnmlearn.callbacks.TrainingMonitor",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"os.path.sep.join"
] |
[((829, 902), 'os.path.sep.join', 'os.path.sep.join', (["[weightpath, 'weights-{epoch:03d}-{val_loss:.4f}.hdf5']"], {}), "([weightpath, 'weights-{epoch:03d}-{val_loss:.4f}.hdf5'])\n", (845, 902), False, 'import os\n'), ((927, 1017), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['fname'], {'monitor': '"""val_loss"""', 'mode': '"""min"""', 'save_best_only': '(True)', 'verbose': '(1)'}), "(fname, monitor='val_loss', mode='min', save_best_only=True,\n verbose=1)\n", (942, 1017), False, 'from keras.callbacks import ModelCheckpoint\n'), ((1499, 1559), 'keras.utils.plot_model', 'plot_model', (['self.model'], {'to_file': 'outputpath', 'show_shapes': '(True)'}), '(self.model, to_file=outputpath, show_shapes=True)\n', (1509, 1559), False, 'from keras.utils import plot_model\n'), ((1681, 1704), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (1694, 1704), True, 'import matplotlib.pyplot as plt\n'), ((1709, 1721), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1719, 1721), True, 'import matplotlib.pyplot as plt\n'), ((2022, 2061), 'matplotlib.pyplot.title', 'plt.title', (['"""Training Loss and Accuracy"""'], {}), "('Training Loss and Accuracy')\n", (2031, 2061), True, 'import matplotlib.pyplot as plt\n'), ((2066, 2087), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch #"""'], {}), "('Epoch #')\n", (2076, 2087), True, 'import matplotlib.pyplot as plt\n'), ((2092, 2119), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss/Accuracy"""'], {}), "('Loss/Accuracy')\n", (2102, 2119), True, 'import matplotlib.pyplot as plt\n'), ((2124, 2136), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2134, 2136), True, 'import matplotlib.pyplot as plt\n'), ((2141, 2151), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2149, 2151), True, 'import matplotlib.pyplot as plt\n'), ((621, 664), 'tnmlearn.callbacks.TrainingMonitor', 'TrainingMonitor', (['figPath'], {'jsonPath': 'jsonPath'}), '(figPath, jsonPath=jsonPath)\n', (636, 664), False, 'from tnmlearn.callbacks import TrainingMonitor\n'), ((1735, 1755), 'numpy.arange', 'np.arange', (['(0)', 'epochs'], {}), '(0, epochs)\n', (1744, 1755), True, 'import numpy as np\n'), ((1809, 1829), 'numpy.arange', 'np.arange', (['(0)', 'epochs'], {}), '(0, epochs)\n', (1818, 1829), True, 'import numpy as np\n'), ((1885, 1905), 'numpy.arange', 'np.arange', (['(0)', 'epochs'], {}), '(0, epochs)\n', (1894, 1905), True, 'import numpy as np\n'), ((1957, 1977), 'numpy.arange', 'np.arange', (['(0)', 'epochs'], {}), '(0, epochs)\n', (1966, 1977), True, 'import numpy as np\n'), ((494, 505), 'os.getpid', 'os.getpid', ([], {}), '()\n', (503, 505), False, 'import os\n'), ((580, 591), 'os.getpid', 'os.getpid', ([], {}), '()\n', (589, 591), False, 'import os\n')]
|
# ABC073b
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10**6)
n = int(input())
l = [list(map(int, input().split())) for _ in range(n)]
ans = 0
for i in l:
ans += i[1]-i[0]+1
print(ans)
|
[
"sys.setrecursionlimit"
] |
[((48, 78), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(10 ** 6)'], {}), '(10 ** 6)\n', (69, 78), False, 'import sys\n')]
|
# Generated by Django 1.11.12 on 2018-04-25 12:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('email_marketing', '0009_remove_emailmarketingconfiguration_sailthru_activation_template'),
]
operations = [
migrations.AddField(
model_name='emailmarketingconfiguration',
name='sailthru_verification_failed_template',
field=models.CharField(blank=True, help_text='Sailthru send template to use on failed ID verification.', max_length=20),
),
migrations.AddField(
model_name='emailmarketingconfiguration',
name='sailthru_verification_passed_template',
field=models.CharField(blank=True, help_text='Sailthru send template to use on passed ID verification.', max_length=20),
),
]
|
[
"django.db.models.CharField"
] |
[((442, 560), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Sailthru send template to use on failed ID verification."""', 'max_length': '(20)'}), "(blank=True, help_text=\n 'Sailthru send template to use on failed ID verification.', max_length=20)\n", (458, 560), False, 'from django.db import migrations, models\n'), ((727, 845), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Sailthru send template to use on passed ID verification."""', 'max_length': '(20)'}), "(blank=True, help_text=\n 'Sailthru send template to use on passed ID verification.', max_length=20)\n", (743, 845), False, 'from django.db import migrations, models\n')]
|
import numpy as np
import tensorflow as tf
import csv
def classify_state(X, n_state):
up = 80
if (0 <= X <= 2.5):
return n_state - 1, 2.5
for i in range(n_state - 1):
if (up - (i + 1) * 2.5 < X <= up - i * 2.5):
return i, up - i * 2.5
def GA(max_prob_index, n_actions):
values = np.zeros(n_actions)
jjj = 0
for i in range(n_actions):
values[i] = jjj
jjj = jjj + 2.5
return values[max_prob_index]
def GR(X, x, n_actions, round_size, n_state):
values = np.zeros(n_actions)
jjj = 0
for i in range(n_actions):
values[i] = jjj
jjj = jjj + 2.5
reward = np.zeros(n_actions)
flag = 0
_, down = classify_state(X[x + 1][round_size - 1], n_state)
for i in range(n_actions):
if (down + 2.5 >= values[i] > down):
reward[i] = 1
elif (down + 5 >= values[i] >= down + 2.5):
reward[i] = 2
elif (down + 7.5 >= values[i] > down+5):
reward[i] = 3
else:
reward[i] = -1
return reward, flag, values
def classify_losspackge(diff, one_hot_state, n_state):
if (diff == 0):
class_one_hot = one_hot_state[0]
for i in range(int((n_state / 2) - 1)):
if (2.5 * i < diff <= 2.5 * (i + 1)):
class_one_hot = one_hot_state[i + 1]
if (2.5 * (int(n_state / 2) - 1) < diff):
class_one_hot = one_hot_state[int(n_state / 2) - 1]
for i in range(int(n_state / 2) - 2):
if (-2.5 * (i + 1) <= diff < -2.5 * (i)):
class_one_hot = one_hot_state[int(n_state / 2) - 1 + i + 1]
if (-2.5 * (int(n_state / 2) - 2) > diff):
class_one_hot = one_hot_state[int(n_state / 2) - 1 + int(n_state / 2) - 2 + 1]
return class_one_hot
def lstm_test(cell_number, n_actions, n_state, epoch, one_hot_state, X,
model_i, round_size):
tf.reset_default_graph()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
input = tf.placeholder(tf.float32, [None, round_size , n_state], name="input_x") # 1*30
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=cell_number, state_is_tuple=True)
_, final_state = tf.nn.dynamic_rnn(cell=lstm_cell, inputs=input, dtype=tf.float32)
W3 = tf.get_variable("W3", shape=[cell_number, n_actions],
initializer=tf.contrib.layers.xavier_initializer())
B3 = tf.get_variable("B3", shape=[1, n_actions],
initializer=tf.constant_initializer())
score = tf.matmul(final_state[1], W3) + B3
probability = tf.nn.softmax(score)
restore_path = './model_' + str(model_i) + '/' + str(epoch) + '.ckpt'
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, restore_path)
down_count = len(X)
RAB = np.zeros(2)
DLR = np.zeros(2)
G_list = []
X_list = []
G =80 #15.5
G_list.append(G)
batch_reward, batch_state, all_reward_for_loss, batch_action, all_action_for_loss = [], [], [], [], []
g_count=0
for x in range(len(X)-1):
if (x != 0):
if (G > 80):
G_list.append(80)
else:
G_list.append(action_values)
g_count=g_count+1
R_state = []
for i in range(round_size):
#print(len(X[x][i]))
state_arg, D = classify_state(X[x][i], n_state)
state_ = one_hot_state[state_arg]
R_state.append(state_)
batch_state.append(R_state)
state = np.reshape(R_state, [1, round_size , n_state])
tfprob = sess.run(probability, feed_dict={input: state})
max_prob_index = np.argmax(tfprob[0])
loss_package = G - X[x][round_size - 1]
if (loss_package >= 0):
RAB[0] = RAB[0] + loss_package
RAB[1] = RAB[1] + 1
else:
DLR[0] = DLR[0] + (-1) * loss_package
DLR[1] = DLR[1] + 1
action_values = GA(max_prob_index, n_actions)
reward, flag, values = GR(X, x, n_actions, round_size, n_state)
X_list.append(X[x][round_size - 1])
G = action_values
batch_reward.append(reward)
all_reward_for_loss.append(reward)
x_count=down_count
if (RAB[1] != 0):
RAB_ = RAB[0] / RAB[1]
else:
RAB_ = 0
if (DLR[1] != 0):
DLR_ = DLR[0] / DLR[1]
else:
DLR_ = 0
with open('./model_' + str(model_i) + '/lost_package.csv', 'a',
newline='') as p:
writer = csv.writer(p)
writer.writerow(['RAB', 'DLR'])
writer.writerow([RAB_, DLR_])
return x_count,g_count, G_list
|
[
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.nn.softmax",
"csv.writer",
"tensorflow.train.Saver",
"numpy.argmax",
"tensorflow.nn.dynamic_rnn",
"tensorflow.reset_default_graph",
"tensorflow.constant_initializer",
"numpy.zeros",
"tensorflow.Session",
"tensorflow.placeholder",
"tensorflow.nn.rnn_cell.BasicLSTMCell",
"tensorflow.matmul",
"tensorflow.ConfigProto",
"numpy.reshape",
"tensorflow.GPUOptions"
] |
[((325, 344), 'numpy.zeros', 'np.zeros', (['n_actions'], {}), '(n_actions)\n', (333, 344), True, 'import numpy as np\n'), ((531, 550), 'numpy.zeros', 'np.zeros', (['n_actions'], {}), '(n_actions)\n', (539, 550), True, 'import numpy as np\n'), ((655, 674), 'numpy.zeros', 'np.zeros', (['n_actions'], {}), '(n_actions)\n', (663, 674), True, 'import numpy as np\n'), ((1883, 1907), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (1905, 1907), True, 'import tensorflow as tf\n'), ((1926, 1978), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'per_process_gpu_memory_fraction': '(0.333)'}), '(per_process_gpu_memory_fraction=0.333)\n', (1939, 1978), True, 'import tensorflow as tf\n'), ((2061, 2132), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, round_size, n_state]'], {'name': '"""input_x"""'}), "(tf.float32, [None, round_size, n_state], name='input_x')\n", (2075, 2132), True, 'import tensorflow as tf\n'), ((2158, 2230), 'tensorflow.nn.rnn_cell.BasicLSTMCell', 'tf.nn.rnn_cell.BasicLSTMCell', ([], {'num_units': 'cell_number', 'state_is_tuple': '(True)'}), '(num_units=cell_number, state_is_tuple=True)\n', (2186, 2230), True, 'import tensorflow as tf\n'), ((2252, 2317), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', ([], {'cell': 'lstm_cell', 'inputs': 'input', 'dtype': 'tf.float32'}), '(cell=lstm_cell, inputs=input, dtype=tf.float32)\n', (2269, 2317), True, 'import tensorflow as tf\n'), ((2640, 2660), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['score'], {}), '(score)\n', (2653, 2660), True, 'import tensorflow as tf\n'), ((2748, 2764), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (2762, 2764), True, 'import tensorflow as tf\n'), ((2587, 2616), 'tensorflow.matmul', 'tf.matmul', (['final_state[1]', 'W3'], {}), '(final_state[1], W3)\n', (2596, 2616), True, 'import tensorflow as tf\n'), ((2774, 2786), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2784, 2786), True, 'import tensorflow as tf\n'), ((2882, 2893), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (2890, 2893), True, 'import numpy as np\n'), ((2908, 2919), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (2916, 2919), True, 'import numpy as np\n'), ((4729, 4742), 'csv.writer', 'csv.writer', (['p'], {}), '(p)\n', (4739, 4742), False, 'import csv\n'), ((2008, 2047), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options'}), '(gpu_options=gpu_options)\n', (2022, 2047), True, 'import tensorflow as tf\n'), ((2418, 2456), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (2454, 2456), True, 'import tensorflow as tf\n'), ((2548, 2573), 'tensorflow.constant_initializer', 'tf.constant_initializer', ([], {}), '()\n', (2571, 2573), True, 'import tensorflow as tf\n'), ((3679, 3724), 'numpy.reshape', 'np.reshape', (['R_state', '[1, round_size, n_state]'], {}), '(R_state, [1, round_size, n_state])\n', (3689, 3724), True, 'import numpy as np\n'), ((3824, 3844), 'numpy.argmax', 'np.argmax', (['tfprob[0]'], {}), '(tfprob[0])\n', (3833, 3844), True, 'import numpy as np\n')]
|
# (c) 2005 <NAME>
# This module is part of the Python Paste Project and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
# This code was written with funding by http://prometheusresearch.com
"""
Authentication via Multiple Methods
In some environments, the choice of authentication method to be used
depends upon the environment and is not "fixed". This middleware allows
N authentication methods to be registered along with a goodness function
which determines which method should be used. The following example
demonstrates how to use both form and digest authentication in a server
stack; by default it uses form-based authentication unless
``*authmeth=digest`` is specified as a query argument.
>>> from paste.auth import form, cookie, digest, multi
>>> from paste.wsgilib import dump_environ
>>> from paste.httpserver import serve
>>>
>>> multi = multi.MultiHandler(dump_environ)
>>> def authfunc(environ, realm, user):
... return digest.digest_password(realm, user, user)
>>> multi.add_method('digest', digest.middleware, "Test Realm", authfunc)
>>> multi.set_query_argument('digest')
>>>
>>> def authfunc(environ, username, password):
... return username == password
>>> multi.add_method('form', form.middleware, authfunc)
>>> multi.set_default('form')
>>> serve(cookie.middleware(multi))
serving on...
"""
class MultiHandler(object):
"""
Multiple Authentication Handler
This middleware provides two othogonal facilities:
- a manner to register any number of authentication middlewares
- a mechanism to register predicates which cause one of the
registered middlewares to be used depending upon the request
If none of the predicates returns True, then the application is
invoked directly without middleware
"""
def __init__(self, application):
self.application = application
self.default = application
self.binding = {}
self.predicate = []
def add_method(self, name, factory, *args, **kwargs):
self.binding[name] = factory(self.application, *args, **kwargs)
def add_predicate(self, name, checker):
self.predicate.append((checker, self.binding[name]))
def set_default(self, name):
""" set default authentication method """
self.default = self.binding[name]
def set_query_argument(self, name, key = '*authmeth', value = None):
""" choose authentication method based on a query argument """
lookfor = "%s=%s" % (key, value or name)
self.add_predicate(name,
lambda environ: lookfor in environ.get('QUERY_STRING',''))
def __call__(self, environ, start_response):
for (checker, binding) in self.predicate:
if checker(environ):
return binding(environ, start_response)
return self.default(environ, start_response)
middleware = MultiHandler
__all__ = ['MultiHandler']
if "__main__" == __name__:
import doctest
doctest.testmod(optionflags=doctest.ELLIPSIS)
|
[
"doctest.testmod"
] |
[((2987, 3032), 'doctest.testmod', 'doctest.testmod', ([], {'optionflags': 'doctest.ELLIPSIS'}), '(optionflags=doctest.ELLIPSIS)\n', (3002, 3032), False, 'import doctest\n')]
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Topic: 临时文件和目录
Desc :
"""
from tempfile import TemporaryFile
from tempfile import TemporaryDirectory
from tempfile import NamedTemporaryFile
import tempfile
def temp_file():
with TemporaryFile('w+t') as f:
# Read/write to the file
f.write('Hello World\n')
f.write('Testing\n')
# Seek back to beginning and read the data
f.seek(0)
data = f.read()
print(data)
with NamedTemporaryFile('w+t') as f:
print('filename is:', f.name)
with TemporaryDirectory() as dirname:
print('dirname is:', dirname)
print(tempfile.mkstemp())
print(tempfile.mkdtemp())
print(tempfile.gettempdir())
if __name__ == '__main__':
temp_file()
|
[
"tempfile.NamedTemporaryFile",
"tempfile.TemporaryDirectory",
"tempfile.mkstemp",
"tempfile.gettempdir",
"tempfile.TemporaryFile",
"tempfile.mkdtemp"
] |
[((238, 258), 'tempfile.TemporaryFile', 'TemporaryFile', (['"""w+t"""'], {}), "('w+t')\n", (251, 258), False, 'from tempfile import TemporaryFile\n'), ((484, 509), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', (['"""w+t"""'], {}), "('w+t')\n", (502, 509), False, 'from tempfile import NamedTemporaryFile\n'), ((564, 584), 'tempfile.TemporaryDirectory', 'TemporaryDirectory', ([], {}), '()\n', (582, 584), False, 'from tempfile import TemporaryDirectory\n'), ((646, 664), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (662, 664), False, 'import tempfile\n'), ((676, 694), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (692, 694), False, 'import tempfile\n'), ((706, 727), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (725, 727), False, 'import tempfile\n')]
|
#===============================================================================
# Command-line interface
#===============================================================================
import argparse
from .Titles import Titles
class Parser():
"""
Methods for parsing CLI arguments.
"""
### Initialize objects that will be used on class methods.
def __init__(self):
self._usage = "$ AAG.py [-h] [-l] [-e] [-m FONT_NUM TEXT] [-r TEXT]"
self._description = r"""
Ascii Art Generator - Generate art from 425 fonts
Author: <NAME>
"""
self._epilog = r"""
EXAMPLES
List all available fonts:
$ ./AAG.py -l
Generate examples for all fonts:
$ ./AAG.py -e
Create ASCII art using the "larry3d" font:
$ ./AAG.py -m 211 something
Wrap your text in quotes if it is more than one word or contains special characters:
$ ./AAG.py -m 211 "ASCII Art Generator"
$ ./AAG.py -m 330 "H&M"
Generate ASCII art from a random font:
$ ./AAG.py -r "A random font"
"""
### Add parser flags.
def _add_flags(self, parser):
aag = parser.add_argument_group("generator options")
aag.add_argument(
"-l", "--list",
action = "store_true",
help = "list all available fonts and their corresponding number")
aag.add_argument(
"-e", "--examples",
action = "store_true",
help = "generate examples for each font")
aag.add_argument(
"-m", "--make",
action = "append",
metavar = "",
nargs = 2,
help = "generate ASCII art")
aag.add_argument(
"-r", "--randomize",
action = "append",
metavar = "",
help = "generate ASCII art from a random font")
### Get args.
def parse_args(self):
parser = argparse.ArgumentParser(
description = self._description,
epilog = self._epilog,
formatter_class = argparse.RawDescriptionHelpFormatter,
usage = self._usage)
self._add_flags(parser)
args = parser.parse_args()
return args, parser
class CheckArgs():
"""
Method for checking the `-m`/`--make` flag.
"""
@staticmethod
def check_make(args, parser):
for args in args.make:
try:
if not args[0].isdigit():
raise ValueError
except ValueError:
Titles.error_title()
parser.exit()
|
[
"argparse.ArgumentParser"
] |
[((1898, 2050), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'self._description', 'epilog': 'self._epilog', 'formatter_class': 'argparse.RawDescriptionHelpFormatter', 'usage': 'self._usage'}), '(description=self._description, epilog=self._epilog,\n formatter_class=argparse.RawDescriptionHelpFormatter, usage=self._usage)\n', (1921, 2050), False, 'import argparse\n')]
|
"""
Uses class Config to define all type conversion functions for main config.yaml file
"""
from utils.config import Config
import discord
config = Config("./config.yaml")
status_converter_dict = {"online": discord.Status.online,
"offline": discord.Status.offline,
"idle": discord.Status.idle,
"dnd": discord.Status.dnd,
"invisible": discord.Status.invisible}
activity_converter_dict = {"game": discord.Game,
"streaming": discord.Streaming,
"custom": discord.CustomActivity}
@config.register_get_handler("discord_presence.ready.status")
@config.register_get_handler("discord_presence.startup.status")
def status_converter(value):
if value in status_converter_dict:
return status_converter_dict[value]
return status_converter_dict["online"]
@config.register_get_handler("discord_presence.ready.activity")
@config.register_get_handler("discord_presence.startup.activity")
def presence_converter(value):
if value["type"] in activity_converter_dict:
return activity_converter_dict[value["type"]](name=value["text"])
return None
|
[
"utils.config.Config"
] |
[((149, 172), 'utils.config.Config', 'Config', (['"""./config.yaml"""'], {}), "('./config.yaml')\n", (155, 172), False, 'from utils.config import Config\n')]
|
import os
import pdb
import numpy as np
from fastestimator.summary.logs import parse_log_file
from scipy.stats import ttest_ind
from tabulate import tabulate
def get_best_step(objective, eval_steps, result, mode, train_history):
obj_step = 0
for idx, value in enumerate(result):
if (mode == "max" and value >= objective) or (mode == "min"
and value <= objective):
obj_step = eval_steps[idx]
break
upper_step = obj_step
lower_step = eval_steps[idx - 1]
min_loss = None
min_train_step = None
for train_step, train_loss in train_history.items():
if train_step > lower_step and train_step <= upper_step:
if min_loss is None:
min_loss = train_loss
min_train_step = train_step
elif train_loss < min_loss:
min_loss = train_loss
min_train_step = train_step
return min_train_step
def get_column_mean_std(all_data,
best_mode,
lrname,
lr_schedules,
arc_name="lr-controller-weighted-acc"):
if best_mode == "max":
get_best = np.max
get_worst = np.min
elif best_mode == "min":
get_best = np.min
get_worst = np.max
else:
raise ValueError("best_mode needs to be one of ['max', 'min']")
column_data = all_data[lrname]
best_numbers = []
for lr_schedule in lr_schedules:
lr_schedule_data = column_data[lr_schedule]
for step, result, _ in lr_schedule_data:
best_numbers.append(get_best(result))
convergence_target = get_worst(best_numbers)
br_dict, bs_dict = {}, {}
for lr_schedule in lr_schedules:
best_step, best_result = [], []
lr_schedule_data = column_data[lr_schedule]
for step, result, train_history in lr_schedule_data:
best_result.append(get_best(result))
best_step.append(
get_best_step(convergence_target, step, result, best_mode,
train_history))
br_dict[lr_schedule] = best_result
bs_dict[lr_schedule] = best_step
table = []
for lr_schedule in lr_schedules:
best_result = br_dict[lr_schedule]
best_step = bs_dict[lr_schedule]
br_display = f"{np.mean(best_result):.4f}"
bs_display = f"{np.mean(best_step):.0f}"
if np.mean(best_result) == get_best(
[np.mean(x) for x in br_dict.values()]):
br_display += "*"
if np.mean(best_step) == min([np.mean(x) for x in bs_dict.values()]):
bs_display += "*"
if ttest_ind(br_dict[arc_name], br_dict[lr_schedule]).pvalue < 0.05:
br_display += "#"
if ttest_ind(bs_dict[arc_name], bs_dict[lr_schedule]).pvalue < 0.05:
bs_display += "#"
table.append([
lr_schedule, br_display, f"{np.std(best_result):.4f}", bs_display,
f"{np.std(best_step):.0f}"
])
print(
tabulate(table,
headers=[
"scheduler", "metric mean", "metric std", "step mean",
"step std"
],
tablefmt="github"))
def get_column_median(all_data,
best_mode,
lrname,
lr_schedules,
arc_name="lr-controller-weighted-acc"):
if best_mode == "max":
get_best = np.max
get_worst = np.min
elif best_mode == "min":
get_best = np.min
get_worst = np.max
else:
raise ValueError("best_mode needs to be one of ['max', 'min']")
column_data = all_data[lrname]
best_numbers = []
for lr_schedule in lr_schedules:
lr_schedule_data = column_data[lr_schedule]
for step, result, _ in lr_schedule_data:
best_numbers.append(get_best(result))
convergence_target = get_worst(best_numbers)
br_dict, bs_dict = {}, {}
for lr_schedule in lr_schedules:
best_step, best_result = [], []
lr_schedule_data = column_data[lr_schedule]
for step, result, train_history in lr_schedule_data:
best_result.append(get_best(result))
best_step.append(
get_best_step(convergence_target, step, result, best_mode,
train_history))
br_dict[lr_schedule] = best_result
bs_dict[lr_schedule] = best_step
table = []
for lr_schedule in lr_schedules:
best_result = br_dict[lr_schedule]
best_step = bs_dict[lr_schedule]
br_display = f"{np.median(best_result):.4f}"
bs_display = f"{np.median(best_step):.0f}"
if np.median(best_result) == get_best(
[np.median(x) for x in br_dict.values()]):
br_display += "*"
if np.median(best_step) == min(
[np.median(x) for x in bs_dict.values()]):
bs_display += "*"
if ttest_ind(br_dict[arc_name], br_dict[lr_schedule]).pvalue < 0.05:
br_display += "#"
if ttest_ind(bs_dict[arc_name], bs_dict[lr_schedule]).pvalue < 0.05:
bs_display += "#"
table.append([
lr_schedule,
br_display,
bs_display,
])
print(
tabulate(table,
headers=[
"scheduler",
"metric median",
"step median",
],
tablefmt="github"))
def check_file_complete(folder_path):
filenames = [
fname for fname in os.listdir(folder_path) if fname.endswith(".txt")
]
lr_set = set()
schedule_set = set()
id_set = set()
# get the set of lr, scheduler, id
for filename in filenames:
configs = os.path.splitext(filename)[0].split("_")
lr_name, lr_schedule_name, run_id = configs
lr_set.add(lr_name)
schedule_set.add(lr_schedule_name)
id_set.add(run_id)
# check all combinations exist
for lr in lr_set:
for schedule in schedule_set:
for run in id_set:
filename = f"{lr}_{schedule}_{run}.txt"
assert os.path.exists(os.path.join(
folder_path, filename)), f"{filename} is missing"
def print_table(folder_path, best_mode, metric_name, loss_name, mode):
if mode == "mean_std":
print_func = get_column_mean_std
elif mode == "median":
print_func = get_column_median
else:
raise ValueError("mode needs to be one of ['mean_std', 'median']")
check_file_complete(folder_path)
all_data = {}
filenames = [
fname for fname in os.listdir(folder_path) if fname.endswith(".txt")
]
for filename in filenames:
filepath = os.path.join(folder_path, filename)
configs = os.path.splitext(filename)[0].split("_")
lr_name, lr_schedule_name, run_id = configs
summary = parse_log_file(filepath, ".txt")
result = np.array(
[acc for acc in summary.history["eval"][metric_name].values()])
steps = np.array(
[acc for acc in summary.history["eval"][metric_name].keys()])
train_history = summary.history["train"][loss_name]
if lr_name not in all_data:
all_data[lr_name] = {}
if lr_schedule_name not in all_data[lr_name]:
all_data[lr_name][lr_schedule_name] = []
all_data[lr_name][lr_schedule_name].append(
(steps, result, train_history))
for lrname in sorted(list(all_data.keys()), reverse=True):
print(
f"========================================== lrname={lrname} ==========================================="
)
print_func(all_data,
best_mode,
lrname=lrname,
lr_schedules=[
"base-lr", "cosine-decay", "cyclic-cosine-decay",
"exponential-decay", "lr-controller-weighted-acc"
])
if __name__ == "__main__":
print_table(
mode="median", # "median" or "mean_std"
folder_path=
"/mnt/c/Users/212770359/Downloads/ARC-master/iccv/logs/normal_comparison/language_modeling", # path of the log dir
best_mode="min", # "max" or "min"
metric_name="perplexity", # evaluation metric
loss_name="ce") # loss key
|
[
"fastestimator.summary.logs.parse_log_file",
"numpy.median",
"numpy.std",
"scipy.stats.ttest_ind",
"numpy.mean",
"tabulate.tabulate",
"os.path.splitext",
"os.path.join",
"os.listdir"
] |
[((3104, 3219), 'tabulate.tabulate', 'tabulate', (['table'], {'headers': "['scheduler', 'metric mean', 'metric std', 'step mean', 'step std']", 'tablefmt': '"""github"""'}), "(table, headers=['scheduler', 'metric mean', 'metric std',\n 'step mean', 'step std'], tablefmt='github')\n", (3112, 3219), False, 'from tabulate import tabulate\n'), ((5397, 5490), 'tabulate.tabulate', 'tabulate', (['table'], {'headers': "['scheduler', 'metric median', 'step median']", 'tablefmt': '"""github"""'}), "(table, headers=['scheduler', 'metric median', 'step median'],\n tablefmt='github')\n", (5405, 5490), False, 'from tabulate import tabulate\n'), ((6894, 6929), 'os.path.join', 'os.path.join', (['folder_path', 'filename'], {}), '(folder_path, filename)\n', (6906, 6929), False, 'import os\n'), ((7059, 7091), 'fastestimator.summary.logs.parse_log_file', 'parse_log_file', (['filepath', '""".txt"""'], {}), "(filepath, '.txt')\n", (7073, 7091), False, 'from fastestimator.summary.logs import parse_log_file\n'), ((2489, 2509), 'numpy.mean', 'np.mean', (['best_result'], {}), '(best_result)\n', (2496, 2509), True, 'import numpy as np\n'), ((2618, 2636), 'numpy.mean', 'np.mean', (['best_step'], {}), '(best_step)\n', (2625, 2636), True, 'import numpy as np\n'), ((4806, 4828), 'numpy.median', 'np.median', (['best_result'], {}), '(best_result)\n', (4815, 4828), True, 'import numpy as np\n'), ((4939, 4959), 'numpy.median', 'np.median', (['best_step'], {}), '(best_step)\n', (4948, 4959), True, 'import numpy as np\n'), ((5690, 5713), 'os.listdir', 'os.listdir', (['folder_path'], {}), '(folder_path)\n', (5700, 5713), False, 'import os\n'), ((6787, 6810), 'os.listdir', 'os.listdir', (['folder_path'], {}), '(folder_path)\n', (6797, 6810), False, 'import os\n'), ((2401, 2421), 'numpy.mean', 'np.mean', (['best_result'], {}), '(best_result)\n', (2408, 2421), True, 'import numpy as np\n'), ((2452, 2470), 'numpy.mean', 'np.mean', (['best_step'], {}), '(best_step)\n', (2459, 2470), True, 'import numpy as np\n'), ((2727, 2777), 'scipy.stats.ttest_ind', 'ttest_ind', (['br_dict[arc_name]', 'br_dict[lr_schedule]'], {}), '(br_dict[arc_name], br_dict[lr_schedule])\n', (2736, 2777), False, 'from scipy.stats import ttest_ind\n'), ((2835, 2885), 'scipy.stats.ttest_ind', 'ttest_ind', (['bs_dict[arc_name]', 'bs_dict[lr_schedule]'], {}), '(bs_dict[arc_name], bs_dict[lr_schedule])\n', (2844, 2885), False, 'from scipy.stats import ttest_ind\n'), ((4714, 4736), 'numpy.median', 'np.median', (['best_result'], {}), '(best_result)\n', (4723, 4736), True, 'import numpy as np\n'), ((4767, 4787), 'numpy.median', 'np.median', (['best_step'], {}), '(best_step)\n', (4776, 4787), True, 'import numpy as np\n'), ((5065, 5115), 'scipy.stats.ttest_ind', 'ttest_ind', (['br_dict[arc_name]', 'br_dict[lr_schedule]'], {}), '(br_dict[arc_name], br_dict[lr_schedule])\n', (5074, 5115), False, 'from scipy.stats import ttest_ind\n'), ((5173, 5223), 'scipy.stats.ttest_ind', 'ttest_ind', (['bs_dict[arc_name]', 'bs_dict[lr_schedule]'], {}), '(bs_dict[arc_name], bs_dict[lr_schedule])\n', (5182, 5223), False, 'from scipy.stats import ttest_ind\n'), ((2536, 2546), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (2543, 2546), True, 'import numpy as np\n'), ((2645, 2655), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (2652, 2655), True, 'import numpy as np\n'), ((4855, 4867), 'numpy.median', 'np.median', (['x'], {}), '(x)\n', (4864, 4867), True, 'import numpy as np\n'), ((4981, 4993), 'numpy.median', 'np.median', (['x'], {}), '(x)\n', (4990, 4993), True, 'import numpy as np\n'), ((5898, 5924), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (5914, 5924), False, 'import os\n'), ((6310, 6345), 'os.path.join', 'os.path.join', (['folder_path', 'filename'], {}), '(folder_path, filename)\n', (6322, 6345), False, 'import os\n'), ((6948, 6974), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (6964, 6974), False, 'import os\n'), ((2995, 3014), 'numpy.std', 'np.std', (['best_result'], {}), '(best_result)\n', (3001, 3014), True, 'import numpy as np\n'), ((3049, 3066), 'numpy.std', 'np.std', (['best_step'], {}), '(best_step)\n', (3055, 3066), True, 'import numpy as np\n')]
|
import FWCore.ParameterSet.Config as cms
# First register all the hit matching algorithms, then specify preferred ones at end.
# The stub windows used has been optimized for for PU200 events
# We use by default the tight tuning
#
# Definition is presented here:
#
# https://indico.cern.ch/event/681577/#4-update-of-the-track-trigger
#
# This script is adapted to the very last Tilted Tracker geometry to date (tracker T5)
# This version was tested on CMSSW 10_0_0_pre1
#
TTStubAlgorithm_official_Phase2TrackerDigi_ = cms.ESProducer("TTStubAlgorithm_official_Phase2TrackerDigi_",
zMatchingPS = cms.bool(True),
zMatching2S = cms.bool(True),
#Number of tilted rings per side in barrel layers (for tilted geom only)
NTiltedRings = cms.vdouble( 0., 12., 12., 12., 0., 0., 0.),
# PU200 tight tuning, optimized for muons
BarrelCut = cms.vdouble( 0, 2, 2.5, 3.5, 4.5, 5.5, 7),
TiltedBarrelCutSet = cms.VPSet(
cms.PSet( TiltedCut = cms.vdouble( 0 ) ),
cms.PSet( TiltedCut = cms.vdouble( 0, 3, 3, 2.5, 3, 3, 2.5, 2.5, 2, 1.5, 1.5, 1, 1) ),
cms.PSet( TiltedCut = cms.vdouble( 0, 3.5, 3, 3, 3, 3, 2.5, 2.5, 3, 3, 2.5, 2.5, 2.5) ),
cms.PSet( TiltedCut = cms.vdouble( 0, 4, 4, 4, 3.5, 3.5, 3.5, 3.5, 3, 3, 3, 3, 3) ),
),
EndcapCutSet = cms.VPSet(
cms.PSet( EndcapCut = cms.vdouble( 0 ) ),
cms.PSet( EndcapCut = cms.vdouble( 0, 1, 2.5, 2.5, 3, 2.5, 3, 3.5, 4, 4, 4.5, 3.5, 4, 4.5, 5, 5.5) ),
cms.PSet( EndcapCut = cms.vdouble( 0, 0.5, 2.5, 2.5, 3, 2.5, 3, 3, 3.5, 3.5, 4, 3.5, 3.5, 4, 4.5, 5) ),
cms.PSet( EndcapCut = cms.vdouble( 0, 1, 3, 3, 2.5, 3.5, 3.5, 3.5, 4, 3.5, 3.5, 4, 4.5) ),
cms.PSet( EndcapCut = cms.vdouble( 0, 1, 2.5, 3, 2.5, 3.5, 3, 3, 3.5, 3.5, 3.5, 4, 4) ),
cms.PSet( EndcapCut = cms.vdouble( 0, 0.5, 1.5, 3, 2.5, 3.5, 3, 3, 3.5, 4, 3.5, 4, 3.5) ),
)
# PU200 loose tuning, optimized for robustness (uncomment if you want to use it)
#BarrelCut = cms.vdouble( 0, 2.0, 3, 4.5, 6, 6.5, 7.0),
#TiltedBarrelCutSet = cms.VPSet(
# cms.PSet( TiltedCut = cms.vdouble( 0 ) ),
# cms.PSet( TiltedCut = cms.vdouble( 0, 3, 3., 2.5, 3., 3., 2.5, 2.5, 2., 1.5, 1.5, 1, 1) ),
# cms.PSet( TiltedCut = cms.vdouble( 0, 4., 4, 4, 4, 4., 4., 4.5, 5, 4., 3.5, 3.5, 3) ),
# cms.PSet( TiltedCut = cms.vdouble( 0, 5, 5, 5, 5, 5, 5, 5.5, 5, 5, 5.5, 5.5, 5.5) ),
# ),
#EndcapCutSet = cms.VPSet(
# cms.PSet( EndcapCut = cms.vdouble( 0 ) ),
# cms.PSet( EndcapCut = cms.vdouble( 0, 1., 2.5, 2.5, 3.5, 5.5, 5.5, 6, 6.5, 6.5, 6.5, 6.5, 6.5, 6.5, 7, 7) ),
# cms.PSet( EndcapCut = cms.vdouble( 0, 0.5, 2.5, 2.5, 3, 5, 6, 6, 6.5, 6.5, 6.5, 6.5, 6.5, 6.5, 7, 7) ),
# cms.PSet( EndcapCut = cms.vdouble( 0, 1, 3., 4.5, 6., 6.5, 6.5, 6.5, 7, 7, 7, 7, 7) ),
# cms.PSet( EndcapCut = cms.vdouble( 0, 1., 2.5, 3.5, 6., 6.5, 6.5, 6.5, 6.5, 7, 7, 7, 7) ),
# cms.PSet( EndcapCut = cms.vdouble( 0, 0.5, 1.5, 3., 4.5, 6.5, 6.5, 7, 7, 7, 7, 7, 7) ),
# )
)
# CBC3 hit matching algorithm
TTStubAlgorithm_cbc3_Phase2TrackerDigi_ = cms.ESProducer("TTStubAlgorithm_cbc3_Phase2TrackerDigi_",
zMatchingPS = cms.bool(True),
zMatching2S = cms.bool(True),
)
# Set the preferred hit matching algorithms.
# We prefer the global geometry algorithm for now in order not to break
# anything. Override with process.TTStubAlgorithm_PSimHit_ = ...,
# etc. in your configuration.
TTStubAlgorithm_Phase2TrackerDigi_ = cms.ESPrefer("TTStubAlgorithm_official_Phase2TrackerDigi_")
|
[
"FWCore.ParameterSet.Config.ESPrefer",
"FWCore.ParameterSet.Config.bool",
"FWCore.ParameterSet.Config.vdouble"
] |
[((3478, 3537), 'FWCore.ParameterSet.Config.ESPrefer', 'cms.ESPrefer', (['"""TTStubAlgorithm_official_Phase2TrackerDigi_"""'], {}), "('TTStubAlgorithm_official_Phase2TrackerDigi_')\n", (3490, 3537), True, 'import FWCore.ParameterSet.Config as cms\n'), ((601, 615), 'FWCore.ParameterSet.Config.bool', 'cms.bool', (['(True)'], {}), '(True)\n', (609, 615), True, 'import FWCore.ParameterSet.Config as cms\n'), ((635, 649), 'FWCore.ParameterSet.Config.bool', 'cms.bool', (['(True)'], {}), '(True)\n', (643, 649), True, 'import FWCore.ParameterSet.Config as cms\n'), ((745, 794), 'FWCore.ParameterSet.Config.vdouble', 'cms.vdouble', (['(0.0)', '(12.0)', '(12.0)', '(12.0)', '(0.0)', '(0.0)', '(0.0)'], {}), '(0.0, 12.0, 12.0, 12.0, 0.0, 0.0, 0.0)\n', (756, 794), True, 'import FWCore.ParameterSet.Config as cms\n'), ((854, 894), 'FWCore.ParameterSet.Config.vdouble', 'cms.vdouble', (['(0)', '(2)', '(2.5)', '(3.5)', '(4.5)', '(5.5)', '(7)'], {}), '(0, 2, 2.5, 3.5, 4.5, 5.5, 7)\n', (865, 894), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3175, 3189), 'FWCore.ParameterSet.Config.bool', 'cms.bool', (['(True)'], {}), '(True)\n', (3183, 3189), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3208, 3222), 'FWCore.ParameterSet.Config.bool', 'cms.bool', (['(True)'], {}), '(True)\n', (3216, 3222), True, 'import FWCore.ParameterSet.Config as cms\n'), ((962, 976), 'FWCore.ParameterSet.Config.vdouble', 'cms.vdouble', (['(0)'], {}), '(0)\n', (973, 976), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1012, 1072), 'FWCore.ParameterSet.Config.vdouble', 'cms.vdouble', (['(0)', '(3)', '(3)', '(2.5)', '(3)', '(3)', '(2.5)', '(2.5)', '(2)', '(1.5)', '(1.5)', '(1)', '(1)'], {}), '(0, 3, 3, 2.5, 3, 3, 2.5, 2.5, 2, 1.5, 1.5, 1, 1)\n', (1023, 1072), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1107, 1169), 'FWCore.ParameterSet.Config.vdouble', 'cms.vdouble', (['(0)', '(3.5)', '(3)', '(3)', '(3)', '(3)', '(2.5)', '(2.5)', '(3)', '(3)', '(2.5)', '(2.5)', '(2.5)'], {}), '(0, 3.5, 3, 3, 3, 3, 2.5, 2.5, 3, 3, 2.5, 2.5, 2.5)\n', (1118, 1169), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1204, 1262), 'FWCore.ParameterSet.Config.vdouble', 'cms.vdouble', (['(0)', '(4)', '(4)', '(4)', '(3.5)', '(3.5)', '(3.5)', '(3.5)', '(3)', '(3)', '(3)', '(3)', '(3)'], {}), '(0, 4, 4, 4, 3.5, 3.5, 3.5, 3.5, 3, 3, 3, 3, 3)\n', (1215, 1262), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1330, 1344), 'FWCore.ParameterSet.Config.vdouble', 'cms.vdouble', (['(0)'], {}), '(0)\n', (1341, 1344), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1380, 1455), 'FWCore.ParameterSet.Config.vdouble', 'cms.vdouble', (['(0)', '(1)', '(2.5)', '(2.5)', '(3)', '(2.5)', '(3)', '(3.5)', '(4)', '(4)', '(4.5)', '(3.5)', '(4)', '(4.5)', '(5)', '(5.5)'], {}), '(0, 1, 2.5, 2.5, 3, 2.5, 3, 3.5, 4, 4, 4.5, 3.5, 4, 4.5, 5, 5.5)\n', (1391, 1455), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1490, 1567), 'FWCore.ParameterSet.Config.vdouble', 'cms.vdouble', (['(0)', '(0.5)', '(2.5)', '(2.5)', '(3)', '(2.5)', '(3)', '(3)', '(3.5)', '(3.5)', '(4)', '(3.5)', '(3.5)', '(4)', '(4.5)', '(5)'], {}), '(0, 0.5, 2.5, 2.5, 3, 2.5, 3, 3, 3.5, 3.5, 4, 3.5, 3.5, 4, 4.5, 5)\n', (1501, 1567), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1602, 1666), 'FWCore.ParameterSet.Config.vdouble', 'cms.vdouble', (['(0)', '(1)', '(3)', '(3)', '(2.5)', '(3.5)', '(3.5)', '(3.5)', '(4)', '(3.5)', '(3.5)', '(4)', '(4.5)'], {}), '(0, 1, 3, 3, 2.5, 3.5, 3.5, 3.5, 4, 3.5, 3.5, 4, 4.5)\n', (1613, 1666), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1701, 1763), 'FWCore.ParameterSet.Config.vdouble', 'cms.vdouble', (['(0)', '(1)', '(2.5)', '(3)', '(2.5)', '(3.5)', '(3)', '(3)', '(3.5)', '(3.5)', '(3.5)', '(4)', '(4)'], {}), '(0, 1, 2.5, 3, 2.5, 3.5, 3, 3, 3.5, 3.5, 3.5, 4, 4)\n', (1712, 1763), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1798, 1862), 'FWCore.ParameterSet.Config.vdouble', 'cms.vdouble', (['(0)', '(0.5)', '(1.5)', '(3)', '(2.5)', '(3.5)', '(3)', '(3)', '(3.5)', '(4)', '(3.5)', '(4)', '(3.5)'], {}), '(0, 0.5, 1.5, 3, 2.5, 3.5, 3, 3, 3.5, 4, 3.5, 4, 3.5)\n', (1809, 1862), True, 'import FWCore.ParameterSet.Config as cms\n')]
|
from django.db import models
from wagtail.core.models import Page
from wagtail.core.fields import StreamField
from wagtail.core import blocks
from wagtail.admin.edit_handlers import FieldPanel, StreamFieldPanel
from wagtail.images.blocks import ImageChooserBlock
class HomePage(Page):
body = StreamField([
('jumbotron', blocks.RawHTMLBlock()),
])
content_panels = Page.content_panels + [
StreamFieldPanel('body'),
]
subpage_types = [
'home.HomePage',
'home.BasicPage',
]
parent_page_type = [
'wagtailcore.Page'
]
class BasicPage(Page):
body = StreamField([
('heading', blocks.CharBlock(classname="full title")),
('paragraph', blocks.RichTextBlock()),
('image', ImageChooserBlock()),
])
content_panels = Page.content_panels + [
StreamFieldPanel('body'),
]
subpage_types = [
'home.BasicPage',
]
parent_page_type = [
'wagtailcore.Page'
]
|
[
"wagtail.images.blocks.ImageChooserBlock",
"wagtail.core.blocks.RichTextBlock",
"wagtail.core.blocks.RawHTMLBlock",
"wagtail.admin.edit_handlers.StreamFieldPanel",
"wagtail.core.blocks.CharBlock"
] |
[((426, 450), 'wagtail.admin.edit_handlers.StreamFieldPanel', 'StreamFieldPanel', (['"""body"""'], {}), "('body')\n", (442, 450), False, 'from wagtail.admin.edit_handlers import FieldPanel, StreamFieldPanel\n'), ((860, 884), 'wagtail.admin.edit_handlers.StreamFieldPanel', 'StreamFieldPanel', (['"""body"""'], {}), "('body')\n", (876, 884), False, 'from wagtail.admin.edit_handlers import FieldPanel, StreamFieldPanel\n'), ((341, 362), 'wagtail.core.blocks.RawHTMLBlock', 'blocks.RawHTMLBlock', ([], {}), '()\n', (360, 362), False, 'from wagtail.core import blocks\n'), ((669, 709), 'wagtail.core.blocks.CharBlock', 'blocks.CharBlock', ([], {'classname': '"""full title"""'}), "(classname='full title')\n", (685, 709), False, 'from wagtail.core import blocks\n'), ((734, 756), 'wagtail.core.blocks.RichTextBlock', 'blocks.RichTextBlock', ([], {}), '()\n', (754, 756), False, 'from wagtail.core import blocks\n'), ((777, 796), 'wagtail.images.blocks.ImageChooserBlock', 'ImageChooserBlock', ([], {}), '()\n', (794, 796), False, 'from wagtail.images.blocks import ImageChooserBlock\n')]
|
import json
import os
import random
import iso8601
import shutil
numberOfFiles = 1000
creatorAddress = "BjLKxBKRUjFX3WyfyTcTtotC5TfRaPJgVjEeMn1MuzPd"
# Build Blockchain JSON
for x in range(numberOfFiles):
nftNumber = x + 1
niftyRecordNFTData = {
"name" : "NiftyRecord #" + str(nftNumber),
"symbol": "NFRC",
"uri" : "https://assets.niftyrecordsnft.com/niftyrecords/" + str(nftNumber) + "/NiftyRecord-" + str(nftNumber) + ".json",
"image": "https://assets.niftyrecordsnft.com/niftyrecords/" + str(nftNumber) + "/NiftyRecord-" + str(nftNumber) + ".png",
"properties": {
"files": [
{
"uri": "https://assets.niftyrecordsnft.com/niftyrecords/" + str(nftNumber) + "/NiftyRecord-" + str(nftNumber) + ".png",
"type": "image/png"
}
],
"category": "image",
"creators": [
{
"address": creatorAddress,
"share": 100
}
]
},
"seller_fee_basis_points": 750
}
# When generating blockchain JSON
with open('blockchain/' + str(x) + '.json', 'w') as f:
json.dump(niftyRecordNFTData, f)
# Paste in temporary white placeholder image
shutil.copyfile('sleeves/white.png', 'blockchain/' + str(x) + '.png')
unrevealedSleeves = [
{
"name" : "White",
"rangeStart": 1,
"rangeEnd": 40,
"imagePath":"sleeves/white.png",
"revealDate": "2022-03-28T21:00:00-04:00"
},
{
"name" : "Black",
"rangeStart": 41,
"rangeEnd": 70,
"imagePath":"sleeves/black.png",
"revealDate": "2022-03-28T20:00:00-04:00"
},
{
"name" : "Silver",
"rangeStart": 71,
"rangeEnd": 90,
"imagePath":"sleeves/silver.png",
"revealDate": "2022-03-28T19:00:00-04:00"
},
{
"name" : "Gold",
"rangeStart": 91,
"rangeEnd": 100,
"imagePath":"sleeves/gold.png",
"revealDate": "2022-03-28T18:00:00-04:00"
}
]
generatedWhiteSleeves = 0
maxWhiteSleeves = numberOfFiles * 0.4
print("maxWhiteSleeves")
print(maxWhiteSleeves)
generatedBlackSleeves = 0
maxBlackSleeves = numberOfFiles * 0.3
print("maxBlackSleeves")
print(maxBlackSleeves)
generatedSilverSleeves = 0
maxSilverSleeves = numberOfFiles * 0.2
print("maxSilverSleeves")
print(maxSilverSleeves)
generatedGoldSleeves = 0
maxGoldSleeves = numberOfFiles * 0.1
print("maxGoldSleeves")
print(maxGoldSleeves)
# Build server unrevealed metadata JSON
for x in range(numberOfFiles):
# Determine record sleeve
# TODO: Increment generated number and ensure that we haven't gone over the allowed amount of sleeves for this limit
thisSleeve = unrevealedSleeves[0] # default to the white sleeve
while True:
# Generate a random number between 1 and 100
randomNumber = random.randint(1, 100)
# Find the associated sleeve to this percent value
for sleeve in unrevealedSleeves:
if randomNumber > sleeve["rangeStart"] and randomNumber < sleeve["rangeEnd"]:
thisSleeve = sleeve
if thisSleeve["name"] == "White":
if generatedWhiteSleeves < maxWhiteSleeves:
generatedWhiteSleeves += 1
break
else:
# Hit max number of sleeve, try once again
print("Hit max white sleeves, trying again")
continue
elif thisSleeve["name"] == "Black":
if generatedBlackSleeves < maxBlackSleeves:
generatedBlackSleeves += 1
break
else:
# Hit max number of sleeve, try once again
print("Hit max black sleeves, trying again")
continue
elif thisSleeve["name"] == "Silver":
if generatedSilverSleeves < maxSilverSleeves:
generatedSilverSleeves += 1
break
else:
# Hit max number of sleeve, try once again
print("Hit max silver sleeves, trying again")
continue
elif thisSleeve["name"] == "Gold":
if generatedGoldSleeves < maxGoldSleeves:
generatedGoldSleeves += 1
break
else:
# Hit max number of sleeve, try once again
print("Hit max gold sleeves, trying again")
continue
print("thisSleeve")
print(thisSleeve)
nftNumber = x + 1
niftyRecordNFTData = {
"id" : nftNumber,
"name" : "NiftyRecord #" + str(nftNumber),
"symbol": "NFRC",
"image": "https://assets.niftyrecordsnft.com/niftyrecords/" + str(nftNumber) + "/NiftyRecord-" + str(nftNumber) + ".png",
"description": "This is NiftyRecords #" + str(nftNumber) + "!",
"attributes": [
{
"trait_type": "Opened",
"value": "No"
},
{
"trait_type": "Record Sleeve",
"value": thisSleeve["name"]
}
],
"collection": {
"name": "NiftyRecords",
"family": "NiftyRecords"
},
"revealed": False,
"revealAfter": iso8601.parse_date(thisSleeve["revealDate"]).timestamp()
}
fileName = 'server/' + str(nftNumber) + "/NiftyRecord-" + str(nftNumber) + '.json'
os.makedirs(os.path.dirname(fileName), exist_ok=True)
# When generating server-JSON
with open(fileName, 'w') as f:
json.dump(niftyRecordNFTData, f)
#Copy in the respective sleeve image that corresponds to the choice
shutil.copyfile(thisSleeve["imagePath"], 'server/' + str(nftNumber) + '/NiftyRecord-' + str(nftNumber) + '.png')
|
[
"iso8601.parse_date",
"json.dump",
"os.path.dirname",
"random.randint"
] |
[((1223, 1255), 'json.dump', 'json.dump', (['niftyRecordNFTData', 'f'], {}), '(niftyRecordNFTData, f)\n', (1232, 1255), False, 'import json\n'), ((2966, 2988), 'random.randint', 'random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (2980, 2988), False, 'import random\n'), ((5509, 5534), 'os.path.dirname', 'os.path.dirname', (['fileName'], {}), '(fileName)\n', (5524, 5534), False, 'import os\n'), ((5629, 5661), 'json.dump', 'json.dump', (['niftyRecordNFTData', 'f'], {}), '(niftyRecordNFTData, f)\n', (5638, 5661), False, 'import json\n'), ((5342, 5386), 'iso8601.parse_date', 'iso8601.parse_date', (["thisSleeve['revealDate']"], {}), "(thisSleeve['revealDate'])\n", (5360, 5386), False, 'import iso8601\n')]
|
import numpy as np
def filter_ids(array, clinical_ids):
# list of array indices that need to be deleted
del_indices = []
i = 0
for img in array:
id = img[-1]
if id not in clinical_ids:
del_indices.append(i)
i = i + 1
array = np.delete(array, del_indices, axis=0)
return array
|
[
"numpy.delete"
] |
[((286, 323), 'numpy.delete', 'np.delete', (['array', 'del_indices'], {'axis': '(0)'}), '(array, del_indices, axis=0)\n', (295, 323), True, 'import numpy as np\n')]
|
import os
from typing import Dict, List, Tuple
from urllib.parse import urlparse, urlunparse
from astro.constants import FileLocation
from astro.files.locations.base import BaseFileLocation
from astro.utils.dependencies import s3
class S3Location(BaseFileLocation):
"""Handler S3 object store operations"""
location_type = FileLocation.S3
@staticmethod
def _parse_s3_env_var() -> Tuple[str, str]:
"""Return S3 ID/KEY pair from environment vars"""
return os.environ["AWS_ACCESS_KEY_ID"], os.environ["AWS_SECRET_ACCESS_KEY"]
@property
def transport_params(self) -> Dict:
"""Structure s3fs credentials from Airflow connection.
s3fs enables pandas to write to s3
"""
hook = s3.S3Hook(aws_conn_id=self.conn_id) if self.conn_id else s3.S3Hook()
session = hook.get_session()
return {"client": session.client("s3")}
@property
def paths(self) -> List[str]:
"""Resolve S3 file paths with prefix"""
url = urlparse(self.path)
bucket_name = url.netloc
prefix = url.path[1:]
hook = s3.S3Hook(aws_conn_id=self.conn_id) if self.conn_id else s3.S3Hook()
prefixes = hook.list_keys(bucket_name=bucket_name, prefix=prefix)
paths = [
urlunparse((url.scheme, url.netloc, keys, "", "", "")) for keys in prefixes
]
return paths
@property
def size(self) -> int:
return -1
|
[
"astro.utils.dependencies.s3.S3Hook",
"urllib.parse.urlparse",
"urllib.parse.urlunparse"
] |
[((1013, 1032), 'urllib.parse.urlparse', 'urlparse', (['self.path'], {}), '(self.path)\n', (1021, 1032), False, 'from urllib.parse import urlparse, urlunparse\n'), ((748, 783), 'astro.utils.dependencies.s3.S3Hook', 's3.S3Hook', ([], {'aws_conn_id': 'self.conn_id'}), '(aws_conn_id=self.conn_id)\n', (757, 783), False, 'from astro.utils.dependencies import s3\n'), ((805, 816), 'astro.utils.dependencies.s3.S3Hook', 's3.S3Hook', ([], {}), '()\n', (814, 816), False, 'from astro.utils.dependencies import s3\n'), ((1111, 1146), 'astro.utils.dependencies.s3.S3Hook', 's3.S3Hook', ([], {'aws_conn_id': 'self.conn_id'}), '(aws_conn_id=self.conn_id)\n', (1120, 1146), False, 'from astro.utils.dependencies import s3\n'), ((1168, 1179), 'astro.utils.dependencies.s3.S3Hook', 's3.S3Hook', ([], {}), '()\n', (1177, 1179), False, 'from astro.utils.dependencies import s3\n'), ((1284, 1338), 'urllib.parse.urlunparse', 'urlunparse', (["(url.scheme, url.netloc, keys, '', '', '')"], {}), "((url.scheme, url.netloc, keys, '', '', ''))\n", (1294, 1338), False, 'from urllib.parse import urlparse, urlunparse\n')]
|
import uuid
from django.db import models
from django.forms import model_to_dict
# Create your models here.
class DemurageSize(models.Model):
SIZES = (('Dry 20 ft', 'Dry 20 ft'),
('Reefer 20 ft', 'Reefer 20 ft'),
('Special 20 ft', 'Special 20 ft'),
('Dry 40 ft', 'Dry 40 ft'),
('Reefer 40 ft', 'Reefer 40 ft'),
("Special 40 ft", "Special 40 ft"),
("Dry 45 ft", "Dry 45 ft"),
("Reefer 45 ft", "Reefer 45 ft"))
id = models.UUIDField(primary_key=True, unique=True, editable=False, default=uuid.uuid4)
size = models.CharField(max_length=255, unique=True, choices=SIZES)
free_days = models.IntegerField(default=0)
is_active = models.BooleanField(default=True)
date_created = models.DateTimeField(auto_now_add=True)
def delete(self):
self.is_active=False
self.ranges.update(is_active=False)
self.save()
def __str__(self):
return self.size
class Demurage(models.Model):
id = models.UUIDField(primary_key=True, unique=True, editable=False, default=uuid.uuid4)
shipping_company = models.ForeignKey("main.ShippingCompany", on_delete=models.CASCADE, related_name="demurages")
start_day = models.IntegerField()
end_day = models.IntegerField()
price_per_day = models.FloatField()
size = models.ForeignKey("demurage.DemurageSize", on_delete=models.CASCADE, related_name="ranges", null=True)
demurage_type = models.CharField(max_length=250, blank=True, null=True, choices=(("import", "Import"),
("export","Export")))
is_active = models.BooleanField(default=True)
date_created = models.DateTimeField(auto_now_add=True)
def delete(self):
self.is_active=False
self.save()
@property
def shipping_company_detail(self):
return model_to_dict(self.shipping_company, exclude=["date_added","is_active"])
@property
def size_detail(self):
return model_to_dict(self.size, exclude=["date_added","is_active"])
|
[
"django.db.models.CharField",
"django.db.models.DateTimeField",
"django.db.models.ForeignKey",
"django.db.models.FloatField",
"django.db.models.BooleanField",
"django.db.models.IntegerField",
"django.db.models.UUIDField",
"django.forms.model_to_dict"
] |
[((521, 609), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'primary_key': '(True)', 'unique': '(True)', 'editable': '(False)', 'default': 'uuid.uuid4'}), '(primary_key=True, unique=True, editable=False, default=\n uuid.uuid4)\n', (537, 609), False, 'from django.db import models\n'), ((616, 676), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'unique': '(True)', 'choices': 'SIZES'}), '(max_length=255, unique=True, choices=SIZES)\n', (632, 676), False, 'from django.db import models\n'), ((693, 723), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (712, 723), False, 'from django.db import models\n'), ((740, 773), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (759, 773), False, 'from django.db import models\n'), ((793, 832), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (813, 832), False, 'from django.db import models\n'), ((1073, 1161), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'primary_key': '(True)', 'unique': '(True)', 'editable': '(False)', 'default': 'uuid.uuid4'}), '(primary_key=True, unique=True, editable=False, default=\n uuid.uuid4)\n', (1089, 1161), False, 'from django.db import models\n'), ((1180, 1277), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""main.ShippingCompany"""'], {'on_delete': 'models.CASCADE', 'related_name': '"""demurages"""'}), "('main.ShippingCompany', on_delete=models.CASCADE,\n related_name='demurages')\n", (1197, 1277), False, 'from django.db import models\n'), ((1290, 1311), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1309, 1311), False, 'from django.db import models\n'), ((1326, 1347), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1345, 1347), False, 'from django.db import models\n'), ((1368, 1387), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (1385, 1387), False, 'from django.db import models\n'), ((1399, 1505), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""demurage.DemurageSize"""'], {'on_delete': 'models.CASCADE', 'related_name': '"""ranges"""', 'null': '(True)'}), "('demurage.DemurageSize', on_delete=models.CASCADE,\n related_name='ranges', null=True)\n", (1416, 1505), False, 'from django.db import models\n'), ((1522, 1635), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(250)', 'blank': '(True)', 'null': '(True)', 'choices': "(('import', 'Import'), ('export', 'Export'))"}), "(max_length=250, blank=True, null=True, choices=(('import',\n 'Import'), ('export', 'Export')))\n", (1538, 1635), False, 'from django.db import models\n'), ((1659, 1692), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (1678, 1692), False, 'from django.db import models\n'), ((1712, 1751), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1732, 1751), False, 'from django.db import models\n'), ((1915, 1988), 'django.forms.model_to_dict', 'model_to_dict', (['self.shipping_company'], {'exclude': "['date_added', 'is_active']"}), "(self.shipping_company, exclude=['date_added', 'is_active'])\n", (1928, 1988), False, 'from django.forms import model_to_dict\n'), ((2054, 2115), 'django.forms.model_to_dict', 'model_to_dict', (['self.size'], {'exclude': "['date_added', 'is_active']"}), "(self.size, exclude=['date_added', 'is_active'])\n", (2067, 2115), False, 'from django.forms import model_to_dict\n')]
|
import cdutil
import cdat_info
import cdms2
import cdms2,cdutil,sys,MV2,numpy,os,cdat_info
import unittest
import numpy
import tempfile
class CDUTIL(unittest.TestCase):
def testRegions(self):
regionNA = cdutil.region.domain(latitude=(-50.,50.,'ccb'))
f=cdms2.open(cdat_info.get_sampledata_path()+'/clt.nc')
d=f('u', regionNA)
# --------------------------------------------------------
# makesure the warning has been displayed for the 3rd args
# --------------------------------------------------------
bounds = d.getLatitude().getBounds()
self.assertTrue(numpy.allclose(bounds[0], numpy.array([-50., -49.19124603])))
self.assertTrue(numpy.allclose(bounds[-1], numpy.array([49.19124603, 50.])))
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"cdutil.region.domain",
"numpy.array",
"cdat_info.get_sampledata_path"
] |
[((809, 824), 'unittest.main', 'unittest.main', ([], {}), '()\n', (822, 824), False, 'import unittest\n'), ((218, 269), 'cdutil.region.domain', 'cdutil.region.domain', ([], {'latitude': "(-50.0, 50.0, 'ccb')"}), "(latitude=(-50.0, 50.0, 'ccb'))\n", (238, 269), False, 'import cdms2, cdutil, sys, MV2, numpy, os, cdat_info\n'), ((287, 318), 'cdat_info.get_sampledata_path', 'cdat_info.get_sampledata_path', ([], {}), '()\n', (316, 318), False, 'import cdms2, cdutil, sys, MV2, numpy, os, cdat_info\n'), ((655, 689), 'numpy.array', 'numpy.array', (['[-50.0, -49.19124603]'], {}), '([-50.0, -49.19124603])\n', (666, 689), False, 'import numpy\n'), ((742, 774), 'numpy.array', 'numpy.array', (['[49.19124603, 50.0]'], {}), '([49.19124603, 50.0])\n', (753, 774), False, 'import numpy\n')]
|
from assemblyline import odm
from assemblyline.common import forge
Classification = forge.get_classification()
@odm.model(index=True, store=False)
class SubmissionTree(odm.Model):
classification = odm.Classification(default=Classification.UNRESTRICTED) # Classification of the cache
filtered = odm.Boolean(default=False) # Has this cache entry been filtered
expiry_ts = odm.Date() # Expiry date
tree = odm.Text(index=False) # Tree cache
|
[
"assemblyline.odm.Date",
"assemblyline.odm.Text",
"assemblyline.common.forge.get_classification",
"assemblyline.odm.Classification",
"assemblyline.odm.model",
"assemblyline.odm.Boolean"
] |
[((84, 110), 'assemblyline.common.forge.get_classification', 'forge.get_classification', ([], {}), '()\n', (108, 110), False, 'from assemblyline.common import forge\n'), ((114, 148), 'assemblyline.odm.model', 'odm.model', ([], {'index': '(True)', 'store': '(False)'}), '(index=True, store=False)\n', (123, 148), False, 'from assemblyline import odm\n'), ((203, 258), 'assemblyline.odm.Classification', 'odm.Classification', ([], {'default': 'Classification.UNRESTRICTED'}), '(default=Classification.UNRESTRICTED)\n', (221, 258), False, 'from assemblyline import odm\n'), ((305, 331), 'assemblyline.odm.Boolean', 'odm.Boolean', ([], {'default': '(False)'}), '(default=False)\n', (316, 331), False, 'from assemblyline import odm\n'), ((421, 431), 'assemblyline.odm.Date', 'odm.Date', ([], {}), '()\n', (429, 431), False, 'from assemblyline import odm\n'), ((464, 485), 'assemblyline.odm.Text', 'odm.Text', ([], {'index': '(False)'}), '(index=False)\n', (472, 485), False, 'from assemblyline import odm\n')]
|
import time
import tensorflow as tf
from datasets.scripts.fra_eng import datasets_fra_eng
from transformer.text.tokenizer import TokenizerBert
from transformer.architecture.transfo import TransformerNLP
from transformer.train.metrics import MaskedAccuracy
from transformer.train.metrics import MaskedSparseCategoricalCrossentropy
from transformer.train.optimizer import ScheduleLR
if __name__ == '__main__':
# get dataset for french to english traduction
_, _, full_dataset = datasets_fra_eng()
full_dataset = full_dataset.shuffle(buffer_size=len(full_dataset))
len_ds = len(full_dataset)
# build tokenizer
fr_dataset = full_dataset.map(lambda fr, _: fr)
en_dataset = full_dataset.map(lambda _, en: en)
fr_tokenizer = TokenizerBert(lower_case=True)
en_tokenizer = TokenizerBert(lower_case=True)
fr_tokenizer.build_tokenizer(fr_dataset)
en_tokenizer.build_tokenizer(en_dataset)
# prepare dataset
full_dataset = full_dataset.cache()
full_dataset = full_dataset.batch(32)
full_dataset = full_dataset.prefetch(2)
# create transformer
in_vocab_size = len(fr_tokenizer.vocab)
out_vocab_size = len(en_tokenizer.vocab)
transfo = TransformerNLP(n_layers=12, d_model=768, n_heads=12, d_ff=1072,
dropout=0.1, in_vocab_size=in_vocab_size,
out_vocab_size=out_vocab_size,
max_seq_len=40)
# training set-up
schedule_lr = ScheduleLR(d_model=transfo.d_model)
opt = tf.keras.optimizers.Adam(schedule_lr, beta_1=0.9, beta_2=0.98,
epsilon=1e-9)
loss_function = MaskedSparseCategoricalCrossentropy()
acc_function = MaskedAccuracy()
# training function
input_signature = [tf.TensorSpec(shape=(None, None), dtype=tf.int64)] * 3
@tf.function(input_signature=input_signature)
def train(fr_tokens, en_tokens, labels):
with tf.GradientTape() as tape:
proba, _ = transfo(fr_tokens, en_tokens, training=True)
# enventually cut to maximum_length to match proba shape
labels = labels[..., :tf.shape(proba)[-2]]
loss = loss_function(labels, proba)
grads = tape.gradient(loss, transfo.trainable_variables)
opt.apply_gradients(zip(grads, transfo.trainable_variables))
acc = acc_function(labels, proba)
return loss, acc
# training loop
mean_loss = tf.keras.metrics.Mean()
mean_acc = tf.keras.metrics.Mean()
for i, (fr_txt, en_txt) in enumerate(full_dataset):
fr_tokens, en_tokens = fr_tokenizer(fr_txt), en_tokenizer(en_txt)
labels = en_tokens[:, 1:]
en_tokens = en_tokens[:, :-1]
loss, acc = train(fr_tokens, en_tokens, labels)
loss, acc = mean_loss(loss), mean_acc(acc)
if i == 0: start = time.time()
if i % 100 == 0 and i > 0:
current_time_epoch = time.time() - start
time_epoch = current_time_epoch * len_ds / (i+1)
remaining_time = time_epoch - current_time_epoch
print('batch', i, '/', len_ds)
print(f'loss = {loss.numpy():.3f}, acc = {acc.numpy():.3f}')
print(f'estimated remaining time: {int(remaining_time // 60)}min '
f'{remaining_time % 60:.1f}sec')
mean_loss.reset_state(), mean_acc.reset_state()
|
[
"transformer.text.tokenizer.TokenizerBert",
"transformer.architecture.transfo.TransformerNLP",
"tensorflow.keras.metrics.Mean",
"tensorflow.TensorSpec",
"time.time",
"transformer.train.optimizer.ScheduleLR",
"tensorflow.shape",
"transformer.train.metrics.MaskedAccuracy",
"tensorflow.keras.optimizers.Adam",
"tensorflow.function",
"datasets.scripts.fra_eng.datasets_fra_eng",
"tensorflow.GradientTape",
"transformer.train.metrics.MaskedSparseCategoricalCrossentropy"
] |
[((487, 505), 'datasets.scripts.fra_eng.datasets_fra_eng', 'datasets_fra_eng', ([], {}), '()\n', (503, 505), False, 'from datasets.scripts.fra_eng import datasets_fra_eng\n'), ((754, 784), 'transformer.text.tokenizer.TokenizerBert', 'TokenizerBert', ([], {'lower_case': '(True)'}), '(lower_case=True)\n', (767, 784), False, 'from transformer.text.tokenizer import TokenizerBert\n'), ((804, 834), 'transformer.text.tokenizer.TokenizerBert', 'TokenizerBert', ([], {'lower_case': '(True)'}), '(lower_case=True)\n', (817, 834), False, 'from transformer.text.tokenizer import TokenizerBert\n'), ((1203, 1359), 'transformer.architecture.transfo.TransformerNLP', 'TransformerNLP', ([], {'n_layers': '(12)', 'd_model': '(768)', 'n_heads': '(12)', 'd_ff': '(1072)', 'dropout': '(0.1)', 'in_vocab_size': 'in_vocab_size', 'out_vocab_size': 'out_vocab_size', 'max_seq_len': '(40)'}), '(n_layers=12, d_model=768, n_heads=12, d_ff=1072, dropout=0.1,\n in_vocab_size=in_vocab_size, out_vocab_size=out_vocab_size, max_seq_len=40)\n', (1217, 1359), False, 'from transformer.architecture.transfo import TransformerNLP\n'), ((1484, 1519), 'transformer.train.optimizer.ScheduleLR', 'ScheduleLR', ([], {'d_model': 'transfo.d_model'}), '(d_model=transfo.d_model)\n', (1494, 1519), False, 'from transformer.train.optimizer import ScheduleLR\n'), ((1530, 1607), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['schedule_lr'], {'beta_1': '(0.9)', 'beta_2': '(0.98)', 'epsilon': '(1e-09)'}), '(schedule_lr, beta_1=0.9, beta_2=0.98, epsilon=1e-09)\n', (1554, 1607), True, 'import tensorflow as tf\n'), ((1662, 1699), 'transformer.train.metrics.MaskedSparseCategoricalCrossentropy', 'MaskedSparseCategoricalCrossentropy', ([], {}), '()\n', (1697, 1699), False, 'from transformer.train.metrics import MaskedSparseCategoricalCrossentropy\n'), ((1719, 1735), 'transformer.train.metrics.MaskedAccuracy', 'MaskedAccuracy', ([], {}), '()\n', (1733, 1735), False, 'from transformer.train.metrics import MaskedAccuracy\n'), ((1844, 1888), 'tensorflow.function', 'tf.function', ([], {'input_signature': 'input_signature'}), '(input_signature=input_signature)\n', (1855, 1888), True, 'import tensorflow as tf\n'), ((2452, 2475), 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {}), '()\n', (2473, 2475), True, 'import tensorflow as tf\n'), ((2491, 2514), 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {}), '()\n', (2512, 2514), True, 'import tensorflow as tf\n'), ((1784, 1833), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '(None, None)', 'dtype': 'tf.int64'}), '(shape=(None, None), dtype=tf.int64)\n', (1797, 1833), True, 'import tensorflow as tf\n'), ((1947, 1964), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (1962, 1964), True, 'import tensorflow as tf\n'), ((2853, 2864), 'time.time', 'time.time', ([], {}), '()\n', (2862, 2864), False, 'import time\n'), ((2933, 2944), 'time.time', 'time.time', ([], {}), '()\n', (2942, 2944), False, 'import time\n'), ((2145, 2160), 'tensorflow.shape', 'tf.shape', (['proba'], {}), '(proba)\n', (2153, 2160), True, 'import tensorflow as tf\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import glob
import os
import shutil
import subprocess
import sys
def gather_all_proto(proto_dir, suffix="*.proto"):
directory = os.path.join(proto_dir, suffix)
files = glob.glob(directory)
return files
def create_path(path):
"""Utility function to create a path."""
if os.path.isdir(path):
return
os.makedirs(path, exist_ok=True)
def cpp_out(relative_dir, output_dir):
files = gather_all_proto(relative_dir)
for proto_file in files:
subprocess.check_call(
[
shutil.which("protoc"),
"-I%s" % ".",
"--cpp_out=%s" % output_dir,
proto_file,
],
stderr=subprocess.STDOUT,
)
def python_out(relative_dir, output_dir):
files = gather_all_proto(relative_dir)
for proto_file in files:
subprocess.check_call(
[
sys.executable,
"-m",
"grpc_tools.protoc",
"-I%s" % ".",
"--python_out=%s" % os.path.join(output_dir),
proto_file,
],
stderr=subprocess.STDOUT,
)
def cpp_service_out(relative_dir, output_dir):
plugin_path = str(
subprocess.check_output([shutil.which("which"), "grpc_cpp_plugin"]), "utf-8"
).strip()
suffix = "*_service.proto"
files = gather_all_proto(relative_dir, suffix)
for proto_file in files:
subprocess.check_call(
[
shutil.which("protoc"),
"-I%s" % ".",
"--grpc_out=%s" % output_dir,
"--plugin=protoc-gen-grpc=%s" % plugin_path,
proto_file,
],
stderr=subprocess.STDOUT,
)
def python_service_out(relative_dir, output_dir):
suffix = "*_service.proto"
files = gather_all_proto(relative_dir, suffix)
for proto_file in files:
subprocess.check_call(
[
sys.executable,
"-m",
"grpc_tools.protoc",
"-I%s" % '.',
"--python_out=%s" % output_dir,
"--grpc_python_out=%s" % output_dir,
proto_file,
],
stderr=subprocess.STDOUT,
)
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: python proto_generator.py <OUTPUT_PATH> [--cpp] [--python]")
sys.exit(1)
# path to 'GraphScope/proto/python/proto'
current_dir = os.path.dirname(os.path.abspath(__file__))
print(current_dir)
# path to 'GraphScope/proto'
base_dir = os.path.join(current_dir, "../", "../")
os.chdir(base_dir)
print(base_dir)
# must use relative path
relative_dir = os.path.join(".", "graphscope", "proto")
output_dir = sys.argv[1]
output_dir = os.path.realpath(os.path.realpath(output_dir))
create_path(output_dir)
print("Generating cpp proto to:" + output_dir)
if len(sys.argv) <= 2 or len(sys.argv) > 2 and sys.argv[2] == "--cpp":
cpp_out(relative_dir, output_dir)
cpp_service_out(relative_dir, output_dir)
if len(sys.argv) <= 2 or len(sys.argv) > 2 and sys.argv[2] == "--python":
python_out(relative_dir, output_dir)
python_service_out(relative_dir, output_dir)
|
[
"os.path.abspath",
"os.makedirs",
"os.path.isdir",
"os.path.realpath",
"shutil.which",
"glob.glob",
"sys.exit",
"os.path.join",
"os.chdir",
"subprocess.check_call"
] |
[((800, 831), 'os.path.join', 'os.path.join', (['proto_dir', 'suffix'], {}), '(proto_dir, suffix)\n', (812, 831), False, 'import os\n'), ((844, 864), 'glob.glob', 'glob.glob', (['directory'], {}), '(directory)\n', (853, 864), False, 'import glob\n'), ((959, 978), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (972, 978), False, 'import os\n'), ((999, 1031), 'os.makedirs', 'os.makedirs', (['path'], {'exist_ok': '(True)'}), '(path, exist_ok=True)\n', (1010, 1031), False, 'import os\n'), ((3286, 3325), 'os.path.join', 'os.path.join', (['current_dir', '"""../"""', '"""../"""'], {}), "(current_dir, '../', '../')\n", (3298, 3325), False, 'import os\n'), ((3330, 3348), 'os.chdir', 'os.chdir', (['base_dir'], {}), '(base_dir)\n', (3338, 3348), False, 'import os\n'), ((3417, 3457), 'os.path.join', 'os.path.join', (['"""."""', '"""graphscope"""', '"""proto"""'], {}), "('.', 'graphscope', 'proto')\n", (3429, 3457), False, 'import os\n'), ((2598, 2793), 'subprocess.check_call', 'subprocess.check_call', (["[sys.executable, '-m', 'grpc_tools.protoc', '-I%s' % '.', '--python_out=%s' %\n output_dir, '--grpc_python_out=%s' % output_dir, proto_file]"], {'stderr': 'subprocess.STDOUT'}), "([sys.executable, '-m', 'grpc_tools.protoc', '-I%s' %\n '.', '--python_out=%s' % output_dir, '--grpc_python_out=%s' %\n output_dir, proto_file], stderr=subprocess.STDOUT)\n", (2619, 2793), False, 'import subprocess\n'), ((3094, 3105), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3102, 3105), False, 'import sys\n'), ((3187, 3212), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (3202, 3212), False, 'import os\n'), ((3522, 3550), 'os.path.realpath', 'os.path.realpath', (['output_dir'], {}), '(output_dir)\n', (3538, 3550), False, 'import os\n'), ((1206, 1228), 'shutil.which', 'shutil.which', (['"""protoc"""'], {}), "('protoc')\n", (1218, 1228), False, 'import shutil\n'), ((2174, 2196), 'shutil.which', 'shutil.which', (['"""protoc"""'], {}), "('protoc')\n", (2186, 2196), False, 'import shutil\n'), ((1714, 1738), 'os.path.join', 'os.path.join', (['output_dir'], {}), '(output_dir)\n', (1726, 1738), False, 'import os\n'), ((1936, 1957), 'shutil.which', 'shutil.which', (['"""which"""'], {}), "('which')\n", (1948, 1957), False, 'import shutil\n')]
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, <NAME>, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of <NAME>, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: <NAME>
import os
import genmsg
import roslaunch
from roslaunch import RLException
import rospkg
import rospy
import rostopic
class RqtRoscommUtil(object):
@staticmethod
def load_parameters(config, caller_id):
"""
Load parameters onto the parameter server.
Copied from ROSLaunchRunner.
@type config: roslaunch.config.ROSLaunchConfig
@raise RLException:
"""
# XMLRPC proxy for communicating with master, 'xmlrpclib.ServerProxy'
param_server = config.master.get()
param = None
try:
# multi-call style xmlrpc
# According to API doc, get_multi() returns
# multicall XMLRPC proxy for communicating with master,
# "xmlrpclib.MultiCall"
param_server_multi = config.master.get_multi()
# clear specified parameter namespaces
# 2468 unify clear params to prevent error
for param in roslaunch.launch._unify_clear_params(config.clear_params):
if param_server.hasParam(caller_id, param)[2]:
param_server_multi.deleteParam(caller_id, param)
r = param_server_multi()
for code, msg, _ in r:
if code != 1:
raise RLException("Failed to clear parameter {}: ".format(msg))
except RLException:
raise
except Exception as e:
rospy.logerr("load_parameters: unable to set params " +
"(last param was [{}]): {}".format(param, e))
raise # re-raise as this is fatal
try:
# multi-call objects are not reusable
param_server_multi = config.master.get_multi()
for param in config.params.values():
# suppressing this as it causes too much spam
# printlog("setting parameter [%s]"%param.key)
param_server_multi.setParam(caller_id, param.key, param.value)
r = param_server_multi()
for code, msg, _ in r:
if code != 1:
raise RLException("Failed to set parameter: %s" % (msg))
except RLException:
raise
except Exception as e:
print("load_parameters: unable to set params (last param was " +
"[%s]): %s" % (param, e))
raise # re-raise as this is fatal
rospy.logdebug("... load_parameters complete")
@staticmethod
def iterate_packages(subdir):
"""
Iterator for packages that contain the given subdir.
This method is generalizing rosmsg.iterate_packages.
@param subdir: eg. 'launch', 'msg', 'srv', 'action'
@type subdir: str
@raise ValueError:
"""
if subdir == None or subdir == '':
raise ValueError('Invalid package subdir = {}'.format(subdir))
rospack = rospkg.RosPack()
pkgs = rospack.list()
rospy.logdebug('pkgs={}'.format(pkgs))
for p in pkgs:
d = os.path.join(rospack.get_path(p), subdir)
rospy.logdebug('rospack dir={}'.format(d))
if os.path.isdir(d):
yield p, d
@staticmethod
def list_files(package, subdir, file_extension='.launch'):
"""
#TODO: Come up with better name of the method.
Taken from rosmsg.
Lists files contained in the specified package
@param package: package name, ``str``
@param file_extension: Defaults to '.launch', ``str``
:returns: list of msgs/srv in package, ``[str]``
"""
if subdir == None or subdir == '':
raise ValueError('Invalid package subdir = {}'.format(subdir))
rospack = rospkg.RosPack()
path = os.path.join(rospack.get_path(package), subdir)
return [genmsg.resource_name(package, t) for t in RqtRoscommUtil._list_types(path, file_extension)]
@staticmethod
def _list_types(path, ext):
"""
Taken from rosmsg
List all messages in the specified package
:param package str: name of package to search
:param include_depends bool: if True, will also list messages in
package dependencies.
:returns [str]: message type names
"""
types = RqtRoscommUtil._list_resources(path,
RqtRoscommUtil._msg_filter(ext))
result = [x for x in types]
# result = [x[:-len(ext)] for x in types] # Remove extension
result.sort()
return result
@staticmethod
def _list_resources(path, rfilter=os.path.isfile):
"""
Taken from rosmsg._list_resources
List resources in a package directory within a particular
subdirectory. This is useful for listing messages, services, etc...
:param rfilter: resource filter function that returns true if filename
is the desired resource type, ``fn(filename)->bool``
"""
resources = []
if os.path.isdir(path):
resources = [f for f
in os.listdir(path) if rfilter(os.path.join(path, f))]
else:
resources = []
return resources
@staticmethod
def _msg_filter(ext):
"""
Taken from rosmsg._msg_filter
"""
def mfilter(f):
"""
Predicate for filtering directory list. matches message files
:param f: filename, ``str``
"""
return os.path.isfile(f) and f.endswith(ext)
return mfilter
@staticmethod
def is_roscore_running():
"""
@rtype: bool
"""
try:
# Checkif rosmaster is running or not.
rostopic.get_topic_class('/rosout')
return True
except rostopic.ROSTopicIOException as e:
return False
|
[
"rostopic.get_topic_class",
"roslaunch.RLException",
"genmsg.resource_name",
"os.path.isdir",
"rospkg.RosPack",
"os.path.isfile",
"rospy.logdebug",
"os.path.join",
"os.listdir",
"roslaunch.launch._unify_clear_params"
] |
[((4015, 4061), 'rospy.logdebug', 'rospy.logdebug', (['"""... load_parameters complete"""'], {}), "('... load_parameters complete')\n", (4029, 4061), False, 'import rospy\n'), ((4513, 4529), 'rospkg.RosPack', 'rospkg.RosPack', ([], {}), '()\n', (4527, 4529), False, 'import rospkg\n'), ((5351, 5367), 'rospkg.RosPack', 'rospkg.RosPack', ([], {}), '()\n', (5365, 5367), False, 'import rospkg\n'), ((6681, 6700), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (6694, 6700), False, 'import os\n'), ((2567, 2624), 'roslaunch.launch._unify_clear_params', 'roslaunch.launch._unify_clear_params', (['config.clear_params'], {}), '(config.clear_params)\n', (2603, 2624), False, 'import roslaunch\n'), ((4759, 4775), 'os.path.isdir', 'os.path.isdir', (['d'], {}), '(d)\n', (4772, 4775), False, 'import os\n'), ((5449, 5481), 'genmsg.resource_name', 'genmsg.resource_name', (['package', 't'], {}), '(package, t)\n', (5469, 5481), False, 'import genmsg\n'), ((7408, 7443), 'rostopic.get_topic_class', 'rostopic.get_topic_class', (['"""/rosout"""'], {}), "('/rosout')\n", (7432, 7443), False, 'import rostopic\n'), ((7177, 7194), 'os.path.isfile', 'os.path.isfile', (['f'], {}), '(f)\n', (7191, 7194), False, 'import os\n'), ((3711, 3759), 'roslaunch.RLException', 'RLException', (["('Failed to set parameter: %s' % msg)"], {}), "('Failed to set parameter: %s' % msg)\n", (3722, 3759), False, 'from roslaunch import RLException\n'), ((6763, 6779), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (6773, 6779), False, 'import os\n'), ((6791, 6812), 'os.path.join', 'os.path.join', (['path', 'f'], {}), '(path, f)\n', (6803, 6812), False, 'import os\n')]
|
from leapp.cli import main
import leapp.utils.i18n # noqa: F401; pylint: disable=unused-import
main()
|
[
"leapp.cli.main"
] |
[((98, 104), 'leapp.cli.main', 'main', ([], {}), '()\n', (102, 104), False, 'from leapp.cli import main\n')]
|
from django.conf import settings
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy as _
from .assignee import GroupAssignee, UserProfileAssignee
class PublicInfo(models.Model):
email = models.EmailField(
verbose_name=_('Additional email'),
null=True,
blank=True,
)
phone_number = models.CharField(
max_length=15,
verbose_name=_("Phone number"),
null=True,
blank=True,
)
additional_info = models.TextField(
max_length=200,
verbose_name=_('Additional info'),
null=True,
blank=True,
)
class Profile(models.Model):
user = models.OneToOneField(
settings.AUTH_USER_MODEL,
related_name='profile',
on_delete=models.CASCADE,
verbose_name=_('Related User instance'),
)
first_name = models.CharField(
max_length=255,
null=True,
blank=True,
verbose_name=_('First name'),
)
last_name = models.CharField(
max_length=255,
null=True,
blank=True,
verbose_name=_('Last name'),
)
icon = models.URLField(
verbose_name=_('Link to user avatar image'),
null=True,
blank=True,
)
public_info = models.OneToOneField(
PublicInfo,
related_name='profile',
verbose_name=_('Public info'),
on_delete=models.CASCADE,
null=True,
)
@property
def email(self):
return self.user.email
@property
def full_name(self):
if self.first_name and self.last_name:
return '{} {}'.format(self.first_name, self.last_name)
return self.user.username
@property
def username(self):
return self.user.username
def __str__(self):
return self.full_name
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_profile_for_new_user(sender, created, instance, **kwargs):
if created:
public_info = PublicInfo()
public_info.save()
profile = Profile(user=instance, public_info=public_info)
profile.save()
class UserRole(models.Model):
class Role:
REGULAR_USER = 'user'
OWNER = 'owner'
ADMIN = 'admin'
code = models.CharField(
max_length=20,
unique=True,
null=True
)
description = models.TextField(
max_length=200,
null=True,
blank=True,
)
@classmethod
def get_default_role(cls):
# TODO: add default role proper management
return cls.objects.get_or_create(code=cls.Role.REGULAR_USER)[0].id
def is_creator(self):
return self.code == self.Role.OWNER
def is_admin_or_creator(self):
return self.code == self.Role.OWNER or self.code == self.Role.ADMIN
def __str__(self):
return "{}".format(self.code)
class Team(models.Model):
members = models.ManyToManyField(
Profile,
through='MembershipInfo',
related_name='teams'
)
def get_assignees(self):
res = list(map(GroupAssignee, self.groups.all()))
res.extend(map(UserProfileAssignee, self.members.all()))
return res
def add_member(self, profile, role=UserRole.Role.REGULAR_USER):
r = UserRole.objects.get(code=role)
member_info = MembershipInfo(
profile=profile,
team=self,
role=r
)
member_info.save()
def __str__(self):
return str(self.board)
class Group(models.Model):
name = models.CharField(
max_length=50,
verbose_name=_('Group name')
)
description = models.TextField(
max_length=200,
verbose_name=_('Description'),
blank=True,
null=True
)
icon = models.URLField(
verbose_name=_('Link to group avatar image'),
null=True,
blank=True
)
members = models.ManyToManyField(
Profile,
verbose_name=_('Group members'),
related_name='groups'
)
team = models.ForeignKey(
Team,
related_name='groups',
verbose_name=_('Team'),
on_delete=models.CASCADE,
)
@property
def board(self):
return self.team.board
class MembershipInfo(models.Model):
profile = models.ForeignKey(
Profile,
on_delete=models.DO_NOTHING,
related_name='member_info',
)
team = models.ForeignKey(
Team,
on_delete=models.CASCADE,
related_name='members_info',
)
role = models.ForeignKey(
UserRole,
on_delete=models.SET_DEFAULT,
default=UserRole.get_default_role,
)
created_at = models.DateTimeField(auto_now_add=True)
@property
def board(self):
return self.team.board
|
[
"django.db.models.TextField",
"django.db.models.ManyToManyField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.dispatch.receiver",
"django.db.models.DateTimeField",
"django.utils.translation.ugettext_lazy"
] |
[((1918, 1970), 'django.dispatch.receiver', 'receiver', (['post_save'], {'sender': 'settings.AUTH_USER_MODEL'}), '(post_save, sender=settings.AUTH_USER_MODEL)\n', (1926, 1970), False, 'from django.dispatch import receiver\n'), ((2346, 2401), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'unique': '(True)', 'null': '(True)'}), '(max_length=20, unique=True, null=True)\n', (2362, 2401), False, 'from django.db import models\n'), ((2450, 2505), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(200)', 'null': '(True)', 'blank': '(True)'}), '(max_length=200, null=True, blank=True)\n', (2466, 2505), False, 'from django.db import models\n'), ((2999, 3078), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Profile'], {'through': '"""MembershipInfo"""', 'related_name': '"""teams"""'}), "(Profile, through='MembershipInfo', related_name='teams')\n", (3021, 3078), False, 'from django.db import models\n'), ((4390, 4478), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Profile'], {'on_delete': 'models.DO_NOTHING', 'related_name': '"""member_info"""'}), "(Profile, on_delete=models.DO_NOTHING, related_name=\n 'member_info')\n", (4407, 4478), False, 'from django.db import models\n'), ((4517, 4595), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Team'], {'on_delete': 'models.CASCADE', 'related_name': '"""members_info"""'}), "(Team, on_delete=models.CASCADE, related_name='members_info')\n", (4534, 4595), False, 'from django.db import models\n'), ((4639, 4736), 'django.db.models.ForeignKey', 'models.ForeignKey', (['UserRole'], {'on_delete': 'models.SET_DEFAULT', 'default': 'UserRole.get_default_role'}), '(UserRole, on_delete=models.SET_DEFAULT, default=UserRole.\n get_default_role)\n', (4656, 4736), False, 'from django.db import models\n'), ((4781, 4820), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (4801, 4820), False, 'from django.db import models\n'), ((346, 367), 'django.utils.translation.ugettext_lazy', '_', (['"""Additional email"""'], {}), "('Additional email')\n", (347, 367), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((496, 513), 'django.utils.translation.ugettext_lazy', '_', (['"""Phone number"""'], {}), "('Phone number')\n", (497, 513), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((646, 666), 'django.utils.translation.ugettext_lazy', '_', (['"""Additional info"""'], {}), "('Additional info')\n", (647, 666), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((898, 924), 'django.utils.translation.ugettext_lazy', '_', (['"""Related User instance"""'], {}), "('Related User instance')\n", (899, 924), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1052, 1067), 'django.utils.translation.ugettext_lazy', '_', (['"""First name"""'], {}), "('First name')\n", (1053, 1067), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1193, 1207), 'django.utils.translation.ugettext_lazy', '_', (['"""Last name"""'], {}), "('Last name')\n", (1194, 1207), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1265, 1295), 'django.utils.translation.ugettext_lazy', '_', (['"""Link to user avatar image"""'], {}), "('Link to user avatar image')\n", (1266, 1295), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1456, 1472), 'django.utils.translation.ugettext_lazy', '_', (['"""Public info"""'], {}), "('Public info')\n", (1457, 1472), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3697, 3712), 'django.utils.translation.ugettext_lazy', '_', (['"""Group name"""'], {}), "('Group name')\n", (3698, 3712), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3801, 3817), 'django.utils.translation.ugettext_lazy', '_', (['"""Description"""'], {}), "('Description')\n", (3802, 3817), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3913, 3944), 'django.utils.translation.ugettext_lazy', '_', (['"""Link to group avatar image"""'], {}), "('Link to group avatar image')\n", (3914, 3944), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4067, 4085), 'django.utils.translation.ugettext_lazy', '_', (['"""Group members"""'], {}), "('Group members')\n", (4068, 4085), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4220, 4229), 'django.utils.translation.ugettext_lazy', '_', (['"""Team"""'], {}), "('Team')\n", (4221, 4229), True, 'from django.utils.translation import ugettext_lazy as _\n')]
|
from twitter.pants.base import ParseContext
__author__ = '<NAME>'
from collections import defaultdict
from twitter.pants.targets import InternalTarget, TargetWithSources
class MockTarget(InternalTarget, TargetWithSources):
def __init__(self, name, dependencies=None, num_sources=0, exclusives=None):
with ParseContext.temp():
InternalTarget.__init__(self, name, dependencies, exclusives=exclusives)
TargetWithSources.__init__(self, name, exclusives=exclusives)
self.num_sources = num_sources
self.declared_exclusives = defaultdict(set)
if exclusives is not None:
for k in exclusives:
self.declared_exclusives[k] = set([exclusives[k]])
self.exclusives = None
def resolve(self):
yield self
def walk(self, work, predicate=None):
work(self)
for dep in self.dependencies:
dep.walk(work)
|
[
"collections.defaultdict",
"twitter.pants.targets.TargetWithSources.__init__",
"twitter.pants.base.ParseContext.temp",
"twitter.pants.targets.InternalTarget.__init__"
] |
[((549, 565), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (560, 565), False, 'from collections import defaultdict\n'), ((315, 334), 'twitter.pants.base.ParseContext.temp', 'ParseContext.temp', ([], {}), '()\n', (332, 334), False, 'from twitter.pants.base import ParseContext\n'), ((342, 414), 'twitter.pants.targets.InternalTarget.__init__', 'InternalTarget.__init__', (['self', 'name', 'dependencies'], {'exclusives': 'exclusives'}), '(self, name, dependencies, exclusives=exclusives)\n', (365, 414), False, 'from twitter.pants.targets import InternalTarget, TargetWithSources\n'), ((421, 482), 'twitter.pants.targets.TargetWithSources.__init__', 'TargetWithSources.__init__', (['self', 'name'], {'exclusives': 'exclusives'}), '(self, name, exclusives=exclusives)\n', (447, 482), False, 'from twitter.pants.targets import InternalTarget, TargetWithSources\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
COS method
==========
The method comes from [1]_
The original code is found at
http://www.wilmott.com/messageview.cfm?catid=34&threadid=78554
References
----------
.. [1] <NAME>., & <NAME>. (2009).
A Novel Pricing Method for European Options
Based on Fourier-Cosine Series Expansions.
*SIAM Journal on Scientific Computing*, 31(2), 826. doi:10.1137/080718061
<http://ta.twi.tudelft.nl/mf/users/oosterle/oosterlee/COS.pdf>
"""
from __future__ import division, print_function
import numpy as np
import numexpr as ne
__all__ = ['cosmethod']
def cosmethod(model, moneyness=0., call=True, npoints=2**10):
"""COS method.
Parameters
----------
model : instance of specific model class
The method depends on availability of two methods:
- charfun
- cos_restriction
moneyness : array_like
Moneyness of the option, np.log(strike/price) - riskfree * maturity
call : bool array_like
Call/Put flag
npoints : int
Number of points on the grid. The more the better, but slower.
Returns
-------
array_like
Option premium normalized by asset price
Notes
-----
`charfun` method (risk-neutral conditional chracteristic function)
of `model` instance should depend on
one argument only (array_like) and should return
array_like of the same dimension.
`cos_restriction` method of `model` instance takes `maturity`
and `riskfree` as array arguments,
and returns two corresponding arrays (a, b).
"""
if not hasattr(model, 'charfun'):
raise Exception('Characteristic function is not available!')
if not hasattr(model, 'cos_restriction'):
raise Exception('COS restriction is not available!')
# (nobs, ) arrays
alim, blim = model.cos_restriction()
# (npoints, nobs) array
kvec = np.arange(npoints)[:, np.newaxis] * np.pi / (blim - alim)
# (npoints, ) array
unit = np.append(.5, np.ones(npoints-1))
# Arguments
argc = (kvec, alim, blim, 0, blim)
argp = (kvec, alim, blim, alim, 0)
# (nobs, ) array
put = np.logical_not(call)
# (npoints, nobs) array
umat = 2 / (blim - alim) * (call * xfun(*argc) - put * xfun(*argp))
# (npoints, nobs) array
pmat = model.charfun(kvec)
# (npoints, nobs) array
xmat = np.exp(-1j * kvec * (moneyness + alim))
# (nobs, ) array
return np.exp(moneyness) * np.dot(unit, pmat * umat * xmat).real
def xfun(k, a, b, c, d):
"""Xi-Psi function.
Parameters
----------
k : (n, 1) array
a : float or (m, ) array
b : float or (m, ) array
c : float or (m, ) array
d : float or (m, ) array
Returns
-------
(n, m) array
"""
# out0 = (np.cos(k * (d-a)) * np.exp(d) - np.cos(k * (c-a)) * np.exp(c)
# + k * (np.sin(k * (d-a)) * np.exp(d) - np.sin(k * (c-a)) * np.exp(c)))\
# / (1 + k**2)
# out1 = (np.sin(k[1:] * (d-a)) - np.sin(k[1:] * (c-a))) / k[1:]
out0 = ne.evaluate(("(cos(k * (d-a)) * exp(d) - cos(k * (c-a)) * exp(c)"
"+ k * (sin(k * (d-a)) * exp(d) - sin(k * (c-a)) * exp(c)))"
"/ (1 + k**2)"))
k1 = k[1:]
out1 = ne.evaluate("(sin(k1 * (d-a)) - sin(k1 * (c-a))) / k1")
out1 = np.vstack([(d - c) * np.ones_like(a), out1])
return out0 - out1
if __name__ == '__main__':
pass
|
[
"numpy.ones_like",
"numpy.logical_not",
"numpy.ones",
"numexpr.evaluate",
"numpy.arange",
"numpy.exp",
"numpy.dot"
] |
[((2168, 2188), 'numpy.logical_not', 'np.logical_not', (['call'], {}), '(call)\n', (2182, 2188), True, 'import numpy as np\n'), ((2387, 2428), 'numpy.exp', 'np.exp', (['(-1.0j * kvec * (moneyness + alim))'], {}), '(-1.0j * kvec * (moneyness + alim))\n', (2393, 2428), True, 'import numpy as np\n'), ((3045, 3190), 'numexpr.evaluate', 'ne.evaluate', (['"""(cos(k * (d-a)) * exp(d) - cos(k * (c-a)) * exp(c)+ k * (sin(k * (d-a)) * exp(d) - sin(k * (c-a)) * exp(c)))/ (1 + k**2)"""'], {}), "(\n '(cos(k * (d-a)) * exp(d) - cos(k * (c-a)) * exp(c)+ k * (sin(k * (d-a)) * exp(d) - sin(k * (c-a)) * exp(c)))/ (1 + k**2)'\n )\n", (3056, 3190), True, 'import numexpr as ne\n'), ((3231, 3286), 'numexpr.evaluate', 'ne.evaluate', (['"""(sin(k1 * (d-a)) - sin(k1 * (c-a))) / k1"""'], {}), "('(sin(k1 * (d-a)) - sin(k1 * (c-a))) / k1')\n", (3242, 3286), True, 'import numexpr as ne\n'), ((2023, 2043), 'numpy.ones', 'np.ones', (['(npoints - 1)'], {}), '(npoints - 1)\n', (2030, 2043), True, 'import numpy as np\n'), ((2459, 2476), 'numpy.exp', 'np.exp', (['moneyness'], {}), '(moneyness)\n', (2465, 2476), True, 'import numpy as np\n'), ((2479, 2511), 'numpy.dot', 'np.dot', (['unit', '(pmat * umat * xmat)'], {}), '(unit, pmat * umat * xmat)\n', (2485, 2511), True, 'import numpy as np\n'), ((1916, 1934), 'numpy.arange', 'np.arange', (['npoints'], {}), '(npoints)\n', (1925, 1934), True, 'import numpy as np\n'), ((3320, 3335), 'numpy.ones_like', 'np.ones_like', (['a'], {}), '(a)\n', (3332, 3335), True, 'import numpy as np\n')]
|
# WEB-SERVER RESPONSE STATUS LOGGER
# MIT License
# Copyright (c) 2021 jaggiJ
# Checks and logs server response status
import requests, sys
import time, datetime
########################################################################
# PRINTS HELP IF REQUESTED by --help argument and such
helpRequest = ['--help', '-h', 'help', 'h', '-help']
if len(sys.argv) > 1 and sys.argv[1] in helpRequest:
print('''
# USE: 'python[3] res-logger.py <server-address> <request time seconds>'
# for testing:
# 'python3 res-logger.py https://web-address.com 6 &'
# 'tail -F status.log'
# example for permanent run in background:
# 'python3 res-logger.py https://your-web-server.com 300 &' ''')
sys.exit()
#######################################################################
# USER EDITABLE
server = 'https://server.address.here.com'
requestFrequency = 300 #how often server is checked
lessFrequent = 6 #requestFrequency x lessFrequent=log entry interval(sec)
########################################################################
#FUNCTIONS
def append_status():
'''Appends server response status to log.'''
timeNow = datetime.datetime.now()
formatTimeNow = timeNow.strftime("%d/%m/%Y, %H:%M:%S")
res_msg = f'{formatTimeNow} status code: {code}\n'
with open('status.log', 'a') as file:
file.write(res_msg)
def estimate_status():
'''Figures out server response code e.g. 200.'''
try:
req = requests.get(server)
except:
req = 'no connection to server'
if isinstance(req, str):
code = 'no connection to server'
else:
code = req.status_code
return req, code
def help_text():
'''Prints out help to user.'''
print(
'Add one or two arguments:\n1. server address starting with http\
or https,\n2. time in seconds (default is 300), e.g\n"python3 \
res-logger.py https://google.com 6"')
sys.exit()
##########################################################################
#COMMAND LINE ARGUMENTS
commandLineArgs = sys.argv[1::]
if commandLineArgs:
#Checks if user provided proper command line arguments; if not
#prints out help.
if commandLineArgs[0].startswith('http') and commandLineArgs[1]:
server = commandLineArgs[0]
requestFrequency = int(commandLineArgs[1])
elif commandLineArgs[0].startswith('http') and commandLineArgs == 1:
server = commandLineArgs[0]
else:
help_text()
elif not commandLineArgs and server == 'https://server.address.here.com':
help_text()
###############################################################################
#OTHER VARIABLES
req, code = estimate_status()
###############################################################
append_status()
print(f'Initial status code {code}, request: {req}')
#MAIN LOOP
counter = 0
while True:
#Sends requests and logs responses to file.
counter += 1
previous_status = code
req, code = estimate_status()
if code != previous_status:
append_status()
else:
#Decrease frequency of logging same status by calm_factor
if counter % lessFrequent == 0:
append_status()
time.sleep(requestFrequency)
|
[
"requests.get",
"datetime.datetime.now",
"sys.exit",
"time.sleep"
] |
[((715, 725), 'sys.exit', 'sys.exit', ([], {}), '()\n', (723, 725), False, 'import requests, sys\n'), ((1153, 1176), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1174, 1176), False, 'import time, datetime\n'), ((1904, 1914), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1912, 1914), False, 'import requests, sys\n'), ((3194, 3222), 'time.sleep', 'time.sleep', (['requestFrequency'], {}), '(requestFrequency)\n', (3204, 3222), False, 'import time, datetime\n'), ((1461, 1481), 'requests.get', 'requests.get', (['server'], {}), '(server)\n', (1473, 1481), False, 'import requests, sys\n')]
|
# This is a troll indeed ffs *facepalm*
import asyncio
from telethon import events
from telethon.tl.functions.users import GetFullUserRequest
from telethon.tl.types import ChannelParticipantsAdmins
from userbot.utils import admin_cmd
@borg.on(admin_cmd("gbun"))
async def gbun(event):
if event.fwd_from:
return
gbunVar = event.text
gbunVar = gbunVar[6:]
mentions = "`Alert! User Is Banned In The` @Xiaomeme `Federation\n`"
no_reason = "**Reason:** `Likely A Spammer!`"
await event.edit("**Starting A Federation Ban!**")
asyncio.sleep(4)
chat = await event.get_input_chat()
async for x in borg.iter_participants(chat, filter=ChannelParticipantsAdmins):
mentions += f""
reply_message = None
if event.reply_to_msg_id:
reply_message = await event.get_reply_message()
replied_user = await event.client(GetFullUserRequest(reply_message.from_id))
firstname = replied_user.user.first_name
usname = replied_user.user.username
idd = reply_message.from_id
# make meself invulnerable cuz why not xD
if idd == 1089637689:
await reply_message.reply("`You Cannot Ban A Federation Admin`")
else:
jnl=("`Alert!! `"
"[{}](tg://user?id={})"
"`Is Banned In The` @Xiaomeme `Federation.`\n\n`"
"**First Name: ** __{}__\n"
"**ID : ** `{}`\n"
).format(firstname, idd, firstname, idd)
if usname == None:
jnl += "**Username: ** `Doesn't own a username!`\n"
elif usname != "None":
jnl += "**Username** : @{}\n".format(usname)
if len(gbunVar) > 0:
gbunm = "`{}`".format(gbunVar)
gbunr = "**Reason: **"+gbunm
jnl += gbunr
else:
jnl += no_reason
await reply_message.reply(jnl)
else:
mention = "`Alert! User Is Banned In The` @Xiaomeme `Federation`\n**Reason:** `Likely A Spammer!`"
await event.reply(mention)
await event.delete()
|
[
"userbot.utils.admin_cmd",
"asyncio.sleep",
"telethon.tl.functions.users.GetFullUserRequest"
] |
[((575, 591), 'asyncio.sleep', 'asyncio.sleep', (['(4)'], {}), '(4)\n', (588, 591), False, 'import asyncio\n'), ((253, 270), 'userbot.utils.admin_cmd', 'admin_cmd', (['"""gbun"""'], {}), "('gbun')\n", (262, 270), False, 'from userbot.utils import admin_cmd\n'), ((899, 940), 'telethon.tl.functions.users.GetFullUserRequest', 'GetFullUserRequest', (['reply_message.from_id'], {}), '(reply_message.from_id)\n', (917, 940), False, 'from telethon.tl.functions.users import GetFullUserRequest\n')]
|
"""时间序列计算层、神经网络模型定义.
复现华泰金工 alpha net V2、V3 版本.
V2:
```
input: (batch_size, history time steps, features)
stride = 5
input -> expand features -> BN -> LSTM -> BN -> Dense(linear)
```
V3:
```
input: (batch_size, history time steps, features)
stride = 5
+-> expand features -> BN -> GRU -> BN -+
input --| stride = 10 |- concat -> Dense(linear)
+-> expand features -> BN -> GRU -> BN -+
```
(BN: batch normalization)
version: 0.0.7
author: <NAME>
date: 2021-07-29
该module定义了计算不同时间序列特征的层,工程上使用tensorflow
进行高度向量化的计算,训练时较高效。
"""
import tensorflow as _tf
import tensorflow.keras.layers as _tfl
from tensorflow.keras.layers import Layer as _Layer
from tensorflow.keras.initializers import Initializer as _Initializer
from tensorflow.keras import Model as _Model
from .metrics import UpDownAccuracy as _UpDownAccuracy
from abc import ABC as _ABC
from abc import abstractmethod as _abstractmethod
if not "2.3.0" <= _tf.__version__:
print(f"requires tensorflow version >= 2.3.0, "
f"current version {_tf.__version__}")
exit(1)
__all__ = ["Std",
"Return",
"Correlation",
"LinearDecay",
"Covariance",
"ZScore",
"FeatureExpansion",
"AlphaNetV2",
"AlphaNetV3",
"AlphaNetV4",
"load_model"]
class _StrideLayer(_Layer, _ABC):
"""计算每个stride的统计值的基类."""
def __init__(self, stride=10, **kwargs):
"""计算每个stride的统计值的基类.
Args:
stride (int): time steps需要是stride的整数倍
"""
if stride <= 1:
raise ValueError("Illegal Argument: stride should be "
"greater than 1")
super(_StrideLayer, self).__init__(**kwargs)
self.stride = stride
self.out_shape = None
self.intermediate_shape = None
def build(self, input_shape):
"""构建该层,计算维度信息."""
(features,
output_length) = __get_dimensions__(input_shape, self.stride)
self.out_shape = [-1, output_length, features]
self.intermediate_shape = [-1, self.stride, features]
def get_config(self):
"""获取参数,保存模型需要的函数."""
config = super().get_config().copy()
config.update({'stride': self.stride})
return config
class Std(_StrideLayer):
"""计算每个序列各stride的标准差.
Notes:
计算每个feature各个stride的standard deviation
"""
def call(self, inputs, *args, **kwargs):
"""函数主逻辑实现部分.
Args:
inputs (tensor): 输入dimension为(batch_size, time_steps, features)
Returns:
dimension 为(batch_size, time_steps / stride, features)
"""
strides = _tf.reshape(inputs, self.intermediate_shape)
# compute standard deviations for each stride
std = _tf.math.reduce_std(strides, axis=-2)
return _tf.reshape(std, self.out_shape)
class ZScore(_StrideLayer):
"""计算每个序列各stride的均值除以其标准差.
Notes:
并非严格意义上的z-score,
计算公式为每个feature各个stride的mean除以各自的standard deviation
"""
def call(self, inputs, *args, **kwargs):
"""函数主逻辑实现部分.
Args:
inputs (tensor): 输入dimension为(batch_size, time_steps, features)
Returns:
dimension 为(batch_size, time_steps / stride, features)
"""
strides = _tf.reshape(inputs, self.intermediate_shape)
# compute standard deviations for each stride
std = _tf.math.reduce_std(strides, axis=-2)
# compute means for each stride
means = _tf.math.reduce_mean(strides, axis=-2)
# divide means by standard deviations for each stride
z_score = _tf.math.divide_no_nan(means, std)
return _tf.reshape(z_score, self.out_shape)
class LinearDecay(_StrideLayer):
"""计算每个序列各stride的线性衰减加权平均.
Notes:
以线性衰减为权重,计算每个feature各个stride的均值:
如stride为10,则某feature该stride的权重为(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
"""
def call(self, inputs, *args, **kwargs):
"""函数主逻辑实现部分.
Args:
inputs (tensor): 输入dimension为(batch_size, time_steps, features)
Returns:
dimension 为(batch_size, time_steps / stride, features)
"""
# get linear decay kernel
single_kernel = _tf.linspace(1.0, self.stride, num=self.stride)
kernel = _tf.repeat(single_kernel, self.intermediate_shape[2])
kernel = kernel / _tf.reduce_sum(single_kernel)
# reshape tensors into:
# (bash_size * (time_steps / stride), stride, features)
kernel = _tf.reshape(kernel, self.intermediate_shape[1:])
inputs = _tf.reshape(inputs, self.intermediate_shape)
# broadcasting kernel to inputs batch dimension
linear_decay = _tf.reduce_sum(kernel * inputs, axis=1)
linear_decay = _tf.reshape(linear_decay, self.out_shape)
return linear_decay
class Return(_Layer):
"""计算每个序列各stride的回报率.
Notes:
计算公式为每个stride最后一个数除以第一个数再减去一
"""
def __init__(self, stride=10, **kwargs):
"""回报率.
Args:
stride (int): time steps需要是stride的整数倍
"""
if stride <= 1:
raise ValueError("Illegal Argument: stride should be "
"greater than 1")
super(Return, self).__init__(**kwargs)
self.stride = stride
def build(self, input_shape):
"""构建该层,计算维度信息."""
time_steps = input_shape[1]
if time_steps % self.stride != 0:
raise ValueError("Error, time_steps 应该是 stride的整数倍")
def call(self, inputs, *args, **kwargs):
"""函数主逻辑实现部分.
Args:
inputs (tensor): 输入dimension为(batch_size, time_steps, features)
Returns:
dimension 为(batch_size, time_steps / stride, features)
"""
# get the endings of each strides as numerators
numerators = inputs[:, (self.stride - 1)::self.stride, :]
# get the beginnings of each strides as denominators
denominators = inputs[:, 0::self.stride, :]
return _tf.math.divide_no_nan(numerators, denominators) - 1.0
def get_config(self):
"""获取参数,保存模型需要的函数."""
config = super().get_config().copy()
config.update({'stride': self.stride})
return config
class _OuterProductLayer(_Layer, _ABC):
def __init__(self, stride=10, **kwargs):
"""外乘类的扩张层.
Args:
stride (int): time steps需要是stride的整数倍
"""
if stride <= 1:
raise ValueError("Illegal Argument: stride should be "
"greater than 1")
super(_OuterProductLayer, self).__init__(**kwargs)
self.stride = stride
self.intermediate_shape = None
self.out_shape = None
self.lower_mask = None
def build(self, input_shape):
"""构建该层,计算维度信息."""
(features,
output_length) = __get_dimensions__(input_shape, self.stride)
self.intermediate_shape = (-1, self.stride, features)
output_features = int(features * (features - 1) / 2)
self.out_shape = (-1, output_length, output_features)
self.lower_mask = _LowerNoDiagonalMask()((features, features))
def get_config(self):
"""获取参数,保存模型需要的函数."""
config = super().get_config().copy()
config.update({'stride': self.stride})
return config
@_abstractmethod
def call(self, inputs, *args, **kwargs):
"""逻辑实现部分."""
...
class Covariance(_OuterProductLayer):
"""计算每个stride各时间序列片段的covariance.
Notes:
计算每个stride每两个feature之间的covariance大小,
输出feature数量为features * (features - 1) / 2
"""
def call(self, inputs, *args, **kwargs):
"""函数主逻辑实现部分.
Args:
inputs (tensor): 输入dimension为(batch_size, time_steps, features)
Returns:
dimension 为(batch_size, time_steps / stride,
features * (features - 1) / 2)
"""
# compute means for each stride
means = _tf.nn.avg_pool(inputs,
ksize=self.stride,
strides=self.stride,
padding="VALID")
# subtract means for each stride
means_broadcast = _tf.repeat(means, self.stride, axis=1)
means_subtracted = _tf.subtract(inputs, means_broadcast)
means_subtracted = _tf.reshape(means_subtracted,
self.intermediate_shape)
# compute covariance matrix
covariance_matrix = _tf.einsum("ijk,ijm->ikm",
means_subtracted,
means_subtracted)
covariance_matrix = covariance_matrix / (self.stride - 1)
# get the lower part of the covariance matrix
# without the diagonal elements
covariances = _tf.boolean_mask(covariance_matrix,
self.lower_mask,
axis=1)
covariances = _tf.reshape(covariances, self.out_shape)
return covariances
class Correlation(_OuterProductLayer):
"""计算每个stride各时间序列的相关系数.
Notes:
计算每个stride每两个feature之间的correlation coefficient,
输出feature数量为features * (features - 1) / 2
"""
def call(self, inputs, *args, **kwargs):
"""函数主逻辑实现部分.
Args:
inputs (tensor): 输入dimension为(batch_size, time_steps, features)
Returns:
dimension 为(batch_size, time_steps / stride,
features * (features - 1) / 2)
"""
# compute means for each stride
means = _tf.nn.avg_pool(inputs,
ksize=self.stride,
strides=self.stride,
padding="VALID")
# subtract means for each stride
means_broadcast = _tf.repeat(means, self.stride, axis=1)
means_subtracted = _tf.subtract(inputs, means_broadcast)
means_subtracted = _tf.reshape(means_subtracted,
self.intermediate_shape)
# compute standard deviations for each strides
squared_diff = _tf.square(means_subtracted)
mean_squared_error = _tf.reduce_mean(squared_diff, axis=1)
std = _tf.sqrt(mean_squared_error)
# get denominator of correlation matrix
denominator_matrix = _tf.einsum("ik,im->ikm", std, std)
# compute covariance matrix
covariance_matrix = _tf.einsum("ijk,ijm->ikm",
means_subtracted,
means_subtracted)
covariance_matrix = covariance_matrix / self.stride
# take the lower triangle of each matrix without diagonal
covariances = _tf.boolean_mask(covariance_matrix,
self.lower_mask,
axis=1)
denominators = _tf.boolean_mask(denominator_matrix,
self.lower_mask,
axis=1)
correlations = _tf.math.divide_no_nan(covariances, denominators)
correlations = _tf.reshape(correlations, self.out_shape)
return correlations
class FeatureExpansion(_Layer):
"""计算时间序列特征扩张层,汇总6个计算层.
Notes:
该层扩张时间序列的feature数量,并通过stride缩短时间序列长度,
其包括一下一些feature:
- standard deviation
- mean / standard deviation
- linear decay average
- return of each stride
- covariance of each two features for each stride
- correlation coefficient of each two features for each stride
"""
def __init__(self, stride=10, **kwargs):
"""时间序列特征扩张.
Args:
stride (int): time steps需要是stride的整数倍
"""
if type(stride) is not int or stride <= 1:
raise ValueError("Illegal Argument: stride should be an integer "
"greater than 1")
super(FeatureExpansion, self).__init__(**kwargs)
self.stride = stride
self.std = _tf.function(Std(stride=self.stride))
self.z_score = _tf.function(ZScore(stride=self.stride))
self.linear_decay = _tf.function(LinearDecay(stride=self.stride))
self.return_ = _tf.function(Return(stride=self.stride))
self.covariance = _tf.function(Covariance(stride=self.stride))
self.correlation = _tf.function(Correlation(stride=self.stride))
def call(self, inputs, *args, **kwargs):
"""函数主逻辑实现部分.
Args:
inputs (tensor): 输入dimension为(batch_size, time_steps, features)
Returns:
dimension 为(batch_size, time_steps / stride,
features * (features + 3))
"""
std_output = self.std(inputs)
z_score_output = self.z_score(inputs)
decay_linear_output = self.linear_decay(inputs)
return_output = self.return_(inputs)
covariance_output = self.covariance(inputs)
correlation_output = self.correlation(inputs)
return _tf.concat([std_output,
z_score_output,
decay_linear_output,
return_output,
covariance_output,
correlation_output], axis=2)
def get_config(self):
"""获取参数,保存模型需要的函数."""
config = super().get_config().copy()
config.update({'stride': self.stride})
return config
class AlphaNetV2(_Model):
"""神经网络模型,继承``keras.Model``类.
alpha net v2版本模型.
Notes:
复现华泰金工 alpha net V2 版本
``input: (batch_size, history time steps, features)``
"""
def __init__(self,
dropout=0.0,
l2=0.001,
stride=10,
classification=False,
categories=0,
*args,
**kwargs):
"""Alpha net v3.
Notes:
alpha net v2 版本的全tensorflow实现,结构详见代码展开
Args:
dropout: 跟在特征扩张以及Batch Normalization之后的dropout,默认无dropout
l2: 输出层的l2-regularization参数
"""
super(AlphaNetV2, self).__init__(*args, **kwargs)
self.l2 = l2
self.dropout = dropout
self.stride = stride
self.expanded = FeatureExpansion(stride=self.stride)
self.normalized = _tfl.BatchNormalization()
self.dropout = _tfl.Dropout(self.dropout)
self.lstm = _tfl.LSTM(units=30)
self.normalized_2 = _tfl.BatchNormalization()
self.regularizer = _tf.keras.regularizers.l2(self.l2)
if classification:
if categories < 1:
raise ValueError("categories should be at least 1")
elif categories == 1:
self.outputs = _tfl.Dense(1, activation="sigmoid",
kernel_initializer="truncated_normal",
kernel_regularizer=self.regularizer)
else:
self.outputs = _tfl.Dense(categories, activation="softmax",
kernel_initializer="truncated_normal",
kernel_regularizer=self.regularizer)
else:
self.outputs = _tfl.Dense(1, activation="linear",
kernel_initializer="truncated_normal",
kernel_regularizer=self.regularizer)
@_tf.function
def call(self, inputs, training=None, mask=None):
"""计算逻辑实现."""
expanded = self.expanded(inputs)
normalized = self.normalized(expanded, training=training)
lstm = self.lstm(normalized)
normalized2 = self.normalized_2(lstm, training=training)
dropout = self.dropout(normalized2, training=training)
output = self.outputs(dropout)
return output
def compile(self,
optimizer=_tf.keras.optimizers.Adam(0.0001),
loss="MSE",
metrics=None,
loss_weights=None,
weighted_metrics=None,
run_eagerly=None,
**kwargs):
"""设置优化器、loss、metric等."""
super().compile(optimizer=optimizer,
loss=loss,
metrics=metrics,
loss_weights=loss_weights,
weighted_metrics=weighted_metrics,
run_eagerly=run_eagerly)
def get_config(self):
"""获取参数,保存模型需要的函数."""
config = super().get_config().copy()
config.update({'dropout': self.dropout,
'l2': self.l2,
'stride': self.stride})
return config
class AlphaNetV3(_Model):
"""神经网络模型,继承``keras.Model``类.
alpha net v3版本模型.
Notes:
复现华泰金工 alpha net V3 版本
``input: (batch_size, history time steps, features)``
"""
def __init__(self,
dropout=0.0,
l2=0.001,
classification=False,
categories=0,
recurrent_unit="GRU",
hidden_units=30,
*args,
**kwargs):
"""Alpha net v3.
Notes:
alpha net v3 版本的全tensorflow实现,结构详见代码展开
Args:
dropout: 跟在特征扩张以及Batch Normalization之后的dropout,默认无dropout
l2: 输出层的l2-regularization参数
classification: 是否为分类问题
categories: 分类问题的类别数量
recurrent_unit (str): 该参数可以为"GRU"或"LSTM"
"""
super(AlphaNetV3, self).__init__(*args, **kwargs)
self.l2 = l2
self.dropout = dropout
self.expanded10 = FeatureExpansion(stride=10)
self.expanded5 = FeatureExpansion(stride=5)
self.normalized10 = _tfl.BatchNormalization()
self.normalized5 = _tfl.BatchNormalization()
self.dropout_layer = _tfl.Dropout(self.dropout)
if recurrent_unit == "GRU":
self.recurrent10 = _tfl.GRU(units=hidden_units)
self.recurrent5 = _tfl.GRU(units=hidden_units)
elif recurrent_unit == "LSTM":
self.recurrent10 = _tfl.LSTM(units=hidden_units)
self.recurrent5 = _tfl.LSTM(units=hidden_units)
else:
raise ValueError("Unknown recurrent_unit")
self.normalized10_2 = _tfl.BatchNormalization()
self.normalized5_2 = _tfl.BatchNormalization()
self.concat = _tfl.Concatenate(axis=-1)
self.regularizer = _tf.keras.regularizers.l2(self.l2)
if classification:
if categories < 1:
raise ValueError("categories should be at least 1")
elif categories == 1:
self.outputs = _tfl.Dense(1, activation="sigmoid",
kernel_initializer="truncated_normal",
kernel_regularizer=self.regularizer)
else:
self.outputs = _tfl.Dense(categories, activation="softmax",
kernel_initializer="truncated_normal",
kernel_regularizer=self.regularizer)
else:
self.outputs = _tfl.Dense(1, activation="linear",
kernel_initializer="truncated_normal",
kernel_regularizer=self.regularizer)
@_tf.function
def call(self, inputs, training=None, mask=None):
"""计算逻辑实现."""
expanded10 = self.expanded10(inputs)
expanded5 = self.expanded5(inputs)
normalized10 = self.normalized10(expanded10, training=training)
normalized5 = self.normalized5(expanded5, training=training)
recurrent10 = self.recurrent10(normalized10)
recurrent5 = self.recurrent5(normalized5)
normalized10_2 = self.normalized10_2(recurrent10, training=training)
normalized5_2 = self.normalized5_2(recurrent5, training=training)
concat = self.concat([normalized10_2, normalized5_2])
dropout = self.dropout_layer(concat, training=training)
output = self.outputs(dropout)
return output
def compile(self,
optimizer=_tf.keras.optimizers.Adam(0.0001),
loss="MSE",
metrics=None,
loss_weights=None,
weighted_metrics=None,
run_eagerly=None,
**kwargs):
"""设置优化器、loss、metric等."""
super().compile(optimizer=optimizer,
loss=loss,
metrics=metrics,
loss_weights=loss_weights,
weighted_metrics=weighted_metrics,
run_eagerly=run_eagerly)
def get_config(self):
"""获取参数,保存模型需要的函数."""
config = super().get_config().copy()
config.update({'dropout': self.dropout,
'l2': self.l2})
return config
class AlphaNetV4(_Model):
"""神经网络模型,继承``keras.Model``类.
Notes:
``input: (batch_size, history time steps, features)``
"""
def __init__(self,
dropout=0.0,
l2=0.001,
classification=False,
categories=0,
recurrent_unit="GRU",
hidden_units=30,
*args,
**kwargs):
"""Alpha net v4.
Notes:
去掉了batch normalization的模型,
训练需要使用data模块的normalization
或其他自定义normalization.
Args:
dropout: 跟在特征扩张以及Batch Normalization之后的dropout,默认无dropout
l2: 输出层的l2-regularization参数
classification: 是否为分类问题
categories: 分类问题的类别数量
recurrent_unit (str): 该参数可以为"GRU"或"LSTM"
"""
super(AlphaNetV4, self).__init__(*args, **kwargs)
self.l2 = l2
self.dropout = dropout
self.expanded10 = FeatureExpansion(stride=10)
self.expanded5 = FeatureExpansion(stride=5)
self.dropout_layer = _tfl.Dropout(self.dropout)
if recurrent_unit == "GRU":
self.recurrent10 = _tfl.GRU(units=hidden_units)
self.recurrent5 = _tfl.GRU(units=hidden_units)
elif recurrent_unit == "LSTM":
self.recurrent10 = _tfl.LSTM(units=hidden_units)
self.recurrent5 = _tfl.LSTM(units=hidden_units)
else:
raise ValueError("Unknown recurrent_unit")
self.concat = _tfl.Concatenate(axis=-1)
self.regularizer = _tf.keras.regularizers.l2(self.l2)
if classification:
if categories < 1:
raise ValueError("categories should be at least 1")
elif categories == 1:
self.outputs = _tfl.Dense(1, activation="sigmoid",
kernel_initializer="truncated_normal",
kernel_regularizer=self.regularizer)
else:
self.outputs = _tfl.Dense(categories, activation="softmax",
kernel_initializer="truncated_normal",
kernel_regularizer=self.regularizer)
else:
self.outputs = _tfl.Dense(1, activation="linear",
kernel_initializer="truncated_normal",
kernel_regularizer=self.regularizer)
@_tf.function
def call(self, inputs, training=None, mask=None):
"""计算逻辑实现."""
expanded10 = self.expanded10(inputs)
expanded5 = self.expanded5(inputs)
recurrent10 = self.recurrent10(expanded10)
recurrent5 = self.recurrent5(expanded5)
concat = self.concat([recurrent10, recurrent5])
dropout = self.dropout_layer(concat, training=training)
output = self.outputs(dropout)
return output
def compile(self,
optimizer=_tf.keras.optimizers.Adam(0.0001),
loss="MSE",
metrics=None,
loss_weights=None,
weighted_metrics=None,
run_eagerly=None,
**kwargs):
"""设置优化器、loss、metric等."""
super().compile(optimizer=optimizer,
loss=loss,
metrics=metrics,
loss_weights=loss_weights,
weighted_metrics=weighted_metrics,
run_eagerly=run_eagerly)
def get_config(self):
"""获取参数,保存模型需要的函数."""
config = super().get_config().copy()
config.update({'dropout': self.dropout,
'l2': self.l2})
return config
def load_model(filepath,
custom_objects: dict = None,
compile: bool = True,
options=None):
"""用于读取已存储的模型,可识别自定义metric: UpDownAccuracy.
Notes:
包装``tf.keras``的``load_model``函数,添加``UpDownAccuracy``.
Args:
filepath: 文件路径:
- String or `pathlib.Path` object, path to the saved model
- `h5py.File` object from which to load the model
custom_objects: 自定义类的识别,从类或函数名到类或函数的映射字典.
compile: Boolean, 是否compile model.
options: 其他 `tf.saved_model.LoadOptions`.
Returns:
Keras model instance.
Raises:
ImportError: if loading from an hdf5 file and h5py is not available.
IOError: In case of an invalid savefile
"""
object_dict = {"UpDownAccuracy": _UpDownAccuracy}
if custom_objects is not None:
object_dict.update(custom_objects)
return _tf.keras.models.load_model(filepath,
custom_objects=object_dict,
compile=compile,
options=options)
class _LowerNoDiagonalMask(_Initializer):
"""获取不含对角元素的矩阵下三角mask.
Notes:
Provide a mask giving the lower triangular of a matrix
without diagonal elements.
"""
def __init__(self):
super(_LowerNoDiagonalMask, self).__init__()
def __call__(self, shape, **kwargs):
"""计算逻辑."""
ones = _tf.ones(shape)
mask_lower = _tf.linalg.band_part(ones, -1, 0)
mask_diag = _tf.linalg.band_part(ones, 0, 0)
# lower triangle removing the diagonal elements
mask = _tf.cast(mask_lower - mask_diag, dtype=_tf.bool)
return mask
def __get_dimensions__(input_shape, stride):
"""计算相关维度长度.
Notes:
output_length = 原来的时间长度 / stride的长度
Args:
input_shape: pass the inputs of layer to the function
stride (int): the stride of the custom layer
Returns:
(features, output_length)
Raises:
ValueError: 如果历史长度不是stride的整数倍
"""
if type(stride) is not int or stride <= 1:
raise ValueError("Illegal Argument: stride should be an integer "
"greater than 1")
time_steps = input_shape[1]
features = input_shape[2]
output_length = time_steps // stride
if time_steps % stride != 0:
raise ValueError("Error, time_steps 应该是 stride的整数倍")
return features, output_length
|
[
"tensorflow.einsum",
"tensorflow.reduce_sum",
"tensorflow.keras.layers.Dense",
"tensorflow.math.reduce_std",
"tensorflow.reshape",
"tensorflow.sqrt",
"tensorflow.repeat",
"tensorflow.keras.regularizers.l2",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.subtract",
"tensorflow.keras.layers.Concatenate",
"tensorflow.concat",
"tensorflow.cast",
"tensorflow.keras.optimizers.Adam",
"tensorflow.ones",
"tensorflow.keras.models.load_model",
"tensorflow.keras.layers.Dropout",
"tensorflow.linspace",
"tensorflow.keras.layers.GRU",
"tensorflow.reduce_mean",
"tensorflow.math.divide_no_nan",
"tensorflow.math.reduce_mean",
"tensorflow.nn.avg_pool",
"tensorflow.keras.layers.LSTM",
"tensorflow.square",
"tensorflow.boolean_mask",
"tensorflow.linalg.band_part"
] |
[((25733, 25837), 'tensorflow.keras.models.load_model', '_tf.keras.models.load_model', (['filepath'], {'custom_objects': 'object_dict', 'compile': 'compile', 'options': 'options'}), '(filepath, custom_objects=object_dict, compile=\n compile, options=options)\n', (25760, 25837), True, 'import tensorflow as _tf\n'), ((2736, 2780), 'tensorflow.reshape', '_tf.reshape', (['inputs', 'self.intermediate_shape'], {}), '(inputs, self.intermediate_shape)\n', (2747, 2780), True, 'import tensorflow as _tf\n'), ((2850, 2887), 'tensorflow.math.reduce_std', '_tf.math.reduce_std', (['strides'], {'axis': '(-2)'}), '(strides, axis=-2)\n', (2869, 2887), True, 'import tensorflow as _tf\n'), ((2903, 2935), 'tensorflow.reshape', '_tf.reshape', (['std', 'self.out_shape'], {}), '(std, self.out_shape)\n', (2914, 2935), True, 'import tensorflow as _tf\n'), ((3377, 3421), 'tensorflow.reshape', '_tf.reshape', (['inputs', 'self.intermediate_shape'], {}), '(inputs, self.intermediate_shape)\n', (3388, 3421), True, 'import tensorflow as _tf\n'), ((3491, 3528), 'tensorflow.math.reduce_std', '_tf.math.reduce_std', (['strides'], {'axis': '(-2)'}), '(strides, axis=-2)\n', (3510, 3528), True, 'import tensorflow as _tf\n'), ((3586, 3624), 'tensorflow.math.reduce_mean', '_tf.math.reduce_mean', (['strides'], {'axis': '(-2)'}), '(strides, axis=-2)\n', (3606, 3624), True, 'import tensorflow as _tf\n'), ((3706, 3740), 'tensorflow.math.divide_no_nan', '_tf.math.divide_no_nan', (['means', 'std'], {}), '(means, std)\n', (3728, 3740), True, 'import tensorflow as _tf\n'), ((3756, 3792), 'tensorflow.reshape', '_tf.reshape', (['z_score', 'self.out_shape'], {}), '(z_score, self.out_shape)\n', (3767, 3792), True, 'import tensorflow as _tf\n'), ((4307, 4354), 'tensorflow.linspace', '_tf.linspace', (['(1.0)', 'self.stride'], {'num': 'self.stride'}), '(1.0, self.stride, num=self.stride)\n', (4319, 4354), True, 'import tensorflow as _tf\n'), ((4372, 4425), 'tensorflow.repeat', '_tf.repeat', (['single_kernel', 'self.intermediate_shape[2]'], {}), '(single_kernel, self.intermediate_shape[2])\n', (4382, 4425), True, 'import tensorflow as _tf\n'), ((4596, 4644), 'tensorflow.reshape', '_tf.reshape', (['kernel', 'self.intermediate_shape[1:]'], {}), '(kernel, self.intermediate_shape[1:])\n', (4607, 4644), True, 'import tensorflow as _tf\n'), ((4662, 4706), 'tensorflow.reshape', '_tf.reshape', (['inputs', 'self.intermediate_shape'], {}), '(inputs, self.intermediate_shape)\n', (4673, 4706), True, 'import tensorflow as _tf\n'), ((4787, 4826), 'tensorflow.reduce_sum', '_tf.reduce_sum', (['(kernel * inputs)'], {'axis': '(1)'}), '(kernel * inputs, axis=1)\n', (4801, 4826), True, 'import tensorflow as _tf\n'), ((4850, 4891), 'tensorflow.reshape', '_tf.reshape', (['linear_decay', 'self.out_shape'], {}), '(linear_decay, self.out_shape)\n', (4861, 4891), True, 'import tensorflow as _tf\n'), ((8053, 8138), 'tensorflow.nn.avg_pool', '_tf.nn.avg_pool', (['inputs'], {'ksize': 'self.stride', 'strides': 'self.stride', 'padding': '"""VALID"""'}), "(inputs, ksize=self.stride, strides=self.stride, padding='VALID'\n )\n", (8068, 8138), True, 'import tensorflow as _tf\n'), ((8298, 8336), 'tensorflow.repeat', '_tf.repeat', (['means', 'self.stride'], {'axis': '(1)'}), '(means, self.stride, axis=1)\n', (8308, 8336), True, 'import tensorflow as _tf\n'), ((8364, 8401), 'tensorflow.subtract', '_tf.subtract', (['inputs', 'means_broadcast'], {}), '(inputs, means_broadcast)\n', (8376, 8401), True, 'import tensorflow as _tf\n'), ((8429, 8483), 'tensorflow.reshape', '_tf.reshape', (['means_subtracted', 'self.intermediate_shape'], {}), '(means_subtracted, self.intermediate_shape)\n', (8440, 8483), True, 'import tensorflow as _tf\n'), ((8588, 8650), 'tensorflow.einsum', '_tf.einsum', (['"""ijk,ijm->ikm"""', 'means_subtracted', 'means_subtracted'], {}), "('ijk,ijm->ikm', means_subtracted, means_subtracted)\n", (8598, 8650), True, 'import tensorflow as _tf\n'), ((8912, 8972), 'tensorflow.boolean_mask', '_tf.boolean_mask', (['covariance_matrix', 'self.lower_mask'], {'axis': '(1)'}), '(covariance_matrix, self.lower_mask, axis=1)\n', (8928, 8972), True, 'import tensorflow as _tf\n'), ((9073, 9113), 'tensorflow.reshape', '_tf.reshape', (['covariances', 'self.out_shape'], {}), '(covariances, self.out_shape)\n', (9084, 9113), True, 'import tensorflow as _tf\n'), ((9684, 9769), 'tensorflow.nn.avg_pool', '_tf.nn.avg_pool', (['inputs'], {'ksize': 'self.stride', 'strides': 'self.stride', 'padding': '"""VALID"""'}), "(inputs, ksize=self.stride, strides=self.stride, padding='VALID'\n )\n", (9699, 9769), True, 'import tensorflow as _tf\n'), ((9929, 9967), 'tensorflow.repeat', '_tf.repeat', (['means', 'self.stride'], {'axis': '(1)'}), '(means, self.stride, axis=1)\n', (9939, 9967), True, 'import tensorflow as _tf\n'), ((9995, 10032), 'tensorflow.subtract', '_tf.subtract', (['inputs', 'means_broadcast'], {}), '(inputs, means_broadcast)\n', (10007, 10032), True, 'import tensorflow as _tf\n'), ((10060, 10114), 'tensorflow.reshape', '_tf.reshape', (['means_subtracted', 'self.intermediate_shape'], {}), '(means_subtracted, self.intermediate_shape)\n', (10071, 10114), True, 'import tensorflow as _tf\n'), ((10233, 10261), 'tensorflow.square', '_tf.square', (['means_subtracted'], {}), '(means_subtracted)\n', (10243, 10261), True, 'import tensorflow as _tf\n'), ((10291, 10328), 'tensorflow.reduce_mean', '_tf.reduce_mean', (['squared_diff'], {'axis': '(1)'}), '(squared_diff, axis=1)\n', (10306, 10328), True, 'import tensorflow as _tf\n'), ((10343, 10371), 'tensorflow.sqrt', '_tf.sqrt', (['mean_squared_error'], {}), '(mean_squared_error)\n', (10351, 10371), True, 'import tensorflow as _tf\n'), ((10450, 10484), 'tensorflow.einsum', '_tf.einsum', (['"""ik,im->ikm"""', 'std', 'std'], {}), "('ik,im->ikm', std, std)\n", (10460, 10484), True, 'import tensorflow as _tf\n'), ((10550, 10612), 'tensorflow.einsum', '_tf.einsum', (['"""ijk,ijm->ikm"""', 'means_subtracted', 'means_subtracted'], {}), "('ijk,ijm->ikm', means_subtracted, means_subtracted)\n", (10560, 10612), True, 'import tensorflow as _tf\n'), ((10840, 10900), 'tensorflow.boolean_mask', '_tf.boolean_mask', (['covariance_matrix', 'self.lower_mask'], {'axis': '(1)'}), '(covariance_matrix, self.lower_mask, axis=1)\n', (10856, 10900), True, 'import tensorflow as _tf\n'), ((11002, 11063), 'tensorflow.boolean_mask', '_tf.boolean_mask', (['denominator_matrix', 'self.lower_mask'], {'axis': '(1)'}), '(denominator_matrix, self.lower_mask, axis=1)\n', (11018, 11063), True, 'import tensorflow as _tf\n'), ((11167, 11216), 'tensorflow.math.divide_no_nan', '_tf.math.divide_no_nan', (['covariances', 'denominators'], {}), '(covariances, denominators)\n', (11189, 11216), True, 'import tensorflow as _tf\n'), ((11240, 11281), 'tensorflow.reshape', '_tf.reshape', (['correlations', 'self.out_shape'], {}), '(correlations, self.out_shape)\n', (11251, 11281), True, 'import tensorflow as _tf\n'), ((13152, 13279), 'tensorflow.concat', '_tf.concat', (['[std_output, z_score_output, decay_linear_output, return_output,\n covariance_output, correlation_output]'], {'axis': '(2)'}), '([std_output, z_score_output, decay_linear_output, return_output,\n covariance_output, correlation_output], axis=2)\n', (13162, 13279), True, 'import tensorflow as _tf\n'), ((14469, 14494), 'tensorflow.keras.layers.BatchNormalization', '_tfl.BatchNormalization', ([], {}), '()\n', (14492, 14494), True, 'import tensorflow.keras.layers as _tfl\n'), ((14518, 14544), 'tensorflow.keras.layers.Dropout', '_tfl.Dropout', (['self.dropout'], {}), '(self.dropout)\n', (14530, 14544), True, 'import tensorflow.keras.layers as _tfl\n'), ((14565, 14584), 'tensorflow.keras.layers.LSTM', '_tfl.LSTM', ([], {'units': '(30)'}), '(units=30)\n', (14574, 14584), True, 'import tensorflow.keras.layers as _tfl\n'), ((14613, 14638), 'tensorflow.keras.layers.BatchNormalization', '_tfl.BatchNormalization', ([], {}), '()\n', (14636, 14638), True, 'import tensorflow.keras.layers as _tfl\n'), ((14666, 14700), 'tensorflow.keras.regularizers.l2', '_tf.keras.regularizers.l2', (['self.l2'], {}), '(self.l2)\n', (14691, 14700), True, 'import tensorflow as _tf\n'), ((16047, 16080), 'tensorflow.keras.optimizers.Adam', '_tf.keras.optimizers.Adam', (['(0.0001)'], {}), '(0.0001)\n', (16072, 16080), True, 'import tensorflow as _tf\n'), ((17919, 17944), 'tensorflow.keras.layers.BatchNormalization', '_tfl.BatchNormalization', ([], {}), '()\n', (17942, 17944), True, 'import tensorflow.keras.layers as _tfl\n'), ((17972, 17997), 'tensorflow.keras.layers.BatchNormalization', '_tfl.BatchNormalization', ([], {}), '()\n', (17995, 17997), True, 'import tensorflow.keras.layers as _tfl\n'), ((18027, 18053), 'tensorflow.keras.layers.Dropout', '_tfl.Dropout', (['self.dropout'], {}), '(self.dropout)\n', (18039, 18053), True, 'import tensorflow.keras.layers as _tfl\n'), ((18468, 18493), 'tensorflow.keras.layers.BatchNormalization', '_tfl.BatchNormalization', ([], {}), '()\n', (18491, 18493), True, 'import tensorflow.keras.layers as _tfl\n'), ((18523, 18548), 'tensorflow.keras.layers.BatchNormalization', '_tfl.BatchNormalization', ([], {}), '()\n', (18546, 18548), True, 'import tensorflow.keras.layers as _tfl\n'), ((18571, 18596), 'tensorflow.keras.layers.Concatenate', '_tfl.Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (18587, 18596), True, 'import tensorflow.keras.layers as _tfl\n'), ((18624, 18658), 'tensorflow.keras.regularizers.l2', '_tf.keras.regularizers.l2', (['self.l2'], {}), '(self.l2)\n', (18649, 18658), True, 'import tensorflow as _tf\n'), ((20342, 20375), 'tensorflow.keras.optimizers.Adam', '_tf.keras.optimizers.Adam', (['(0.0001)'], {}), '(0.0001)\n', (20367, 20375), True, 'import tensorflow as _tf\n'), ((22174, 22200), 'tensorflow.keras.layers.Dropout', '_tfl.Dropout', (['self.dropout'], {}), '(self.dropout)\n', (22186, 22200), True, 'import tensorflow.keras.layers as _tfl\n'), ((22607, 22632), 'tensorflow.keras.layers.Concatenate', '_tfl.Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (22623, 22632), True, 'import tensorflow.keras.layers as _tfl\n'), ((22660, 22694), 'tensorflow.keras.regularizers.l2', '_tf.keras.regularizers.l2', (['self.l2'], {}), '(self.l2)\n', (22685, 22694), True, 'import tensorflow as _tf\n'), ((24076, 24109), 'tensorflow.keras.optimizers.Adam', '_tf.keras.optimizers.Adam', (['(0.0001)'], {}), '(0.0001)\n', (24101, 24109), True, 'import tensorflow as _tf\n'), ((26295, 26310), 'tensorflow.ones', '_tf.ones', (['shape'], {}), '(shape)\n', (26303, 26310), True, 'import tensorflow as _tf\n'), ((26332, 26365), 'tensorflow.linalg.band_part', '_tf.linalg.band_part', (['ones', '(-1)', '(0)'], {}), '(ones, -1, 0)\n', (26352, 26365), True, 'import tensorflow as _tf\n'), ((26386, 26418), 'tensorflow.linalg.band_part', '_tf.linalg.band_part', (['ones', '(0)', '(0)'], {}), '(ones, 0, 0)\n', (26406, 26418), True, 'import tensorflow as _tf\n'), ((26490, 26538), 'tensorflow.cast', '_tf.cast', (['(mask_lower - mask_diag)'], {'dtype': '_tf.bool'}), '(mask_lower - mask_diag, dtype=_tf.bool)\n', (26498, 26538), True, 'import tensorflow as _tf\n'), ((4452, 4481), 'tensorflow.reduce_sum', '_tf.reduce_sum', (['single_kernel'], {}), '(single_kernel)\n', (4466, 4481), True, 'import tensorflow as _tf\n'), ((6096, 6144), 'tensorflow.math.divide_no_nan', '_tf.math.divide_no_nan', (['numerators', 'denominators'], {}), '(numerators, denominators)\n', (6118, 6144), True, 'import tensorflow as _tf\n'), ((15383, 15497), 'tensorflow.keras.layers.Dense', '_tfl.Dense', (['(1)'], {'activation': '"""linear"""', 'kernel_initializer': '"""truncated_normal"""', 'kernel_regularizer': 'self.regularizer'}), "(1, activation='linear', kernel_initializer='truncated_normal',\n kernel_regularizer=self.regularizer)\n", (15393, 15497), True, 'import tensorflow.keras.layers as _tfl\n'), ((18121, 18149), 'tensorflow.keras.layers.GRU', '_tfl.GRU', ([], {'units': 'hidden_units'}), '(units=hidden_units)\n', (18129, 18149), True, 'import tensorflow.keras.layers as _tfl\n'), ((18180, 18208), 'tensorflow.keras.layers.GRU', '_tfl.GRU', ([], {'units': 'hidden_units'}), '(units=hidden_units)\n', (18188, 18208), True, 'import tensorflow.keras.layers as _tfl\n'), ((19341, 19455), 'tensorflow.keras.layers.Dense', '_tfl.Dense', (['(1)'], {'activation': '"""linear"""', 'kernel_initializer': '"""truncated_normal"""', 'kernel_regularizer': 'self.regularizer'}), "(1, activation='linear', kernel_initializer='truncated_normal',\n kernel_regularizer=self.regularizer)\n", (19351, 19455), True, 'import tensorflow.keras.layers as _tfl\n'), ((22268, 22296), 'tensorflow.keras.layers.GRU', '_tfl.GRU', ([], {'units': 'hidden_units'}), '(units=hidden_units)\n', (22276, 22296), True, 'import tensorflow.keras.layers as _tfl\n'), ((22327, 22355), 'tensorflow.keras.layers.GRU', '_tfl.GRU', ([], {'units': 'hidden_units'}), '(units=hidden_units)\n', (22335, 22355), True, 'import tensorflow.keras.layers as _tfl\n'), ((23377, 23491), 'tensorflow.keras.layers.Dense', '_tfl.Dense', (['(1)'], {'activation': '"""linear"""', 'kernel_initializer': '"""truncated_normal"""', 'kernel_regularizer': 'self.regularizer'}), "(1, activation='linear', kernel_initializer='truncated_normal',\n kernel_regularizer=self.regularizer)\n", (23387, 23491), True, 'import tensorflow.keras.layers as _tfl\n'), ((18279, 18308), 'tensorflow.keras.layers.LSTM', '_tfl.LSTM', ([], {'units': 'hidden_units'}), '(units=hidden_units)\n', (18288, 18308), True, 'import tensorflow.keras.layers as _tfl\n'), ((18339, 18368), 'tensorflow.keras.layers.LSTM', '_tfl.LSTM', ([], {'units': 'hidden_units'}), '(units=hidden_units)\n', (18348, 18368), True, 'import tensorflow.keras.layers as _tfl\n'), ((22426, 22455), 'tensorflow.keras.layers.LSTM', '_tfl.LSTM', ([], {'units': 'hidden_units'}), '(units=hidden_units)\n', (22435, 22455), True, 'import tensorflow.keras.layers as _tfl\n'), ((22486, 22515), 'tensorflow.keras.layers.LSTM', '_tfl.LSTM', ([], {'units': 'hidden_units'}), '(units=hidden_units)\n', (22495, 22515), True, 'import tensorflow.keras.layers as _tfl\n'), ((14892, 15007), 'tensorflow.keras.layers.Dense', '_tfl.Dense', (['(1)'], {'activation': '"""sigmoid"""', 'kernel_initializer': '"""truncated_normal"""', 'kernel_regularizer': 'self.regularizer'}), "(1, activation='sigmoid', kernel_initializer='truncated_normal',\n kernel_regularizer=self.regularizer)\n", (14902, 15007), True, 'import tensorflow.keras.layers as _tfl\n'), ((15137, 15262), 'tensorflow.keras.layers.Dense', '_tfl.Dense', (['categories'], {'activation': '"""softmax"""', 'kernel_initializer': '"""truncated_normal"""', 'kernel_regularizer': 'self.regularizer'}), "(categories, activation='softmax', kernel_initializer=\n 'truncated_normal', kernel_regularizer=self.regularizer)\n", (15147, 15262), True, 'import tensorflow.keras.layers as _tfl\n'), ((18850, 18965), 'tensorflow.keras.layers.Dense', '_tfl.Dense', (['(1)'], {'activation': '"""sigmoid"""', 'kernel_initializer': '"""truncated_normal"""', 'kernel_regularizer': 'self.regularizer'}), "(1, activation='sigmoid', kernel_initializer='truncated_normal',\n kernel_regularizer=self.regularizer)\n", (18860, 18965), True, 'import tensorflow.keras.layers as _tfl\n'), ((19095, 19220), 'tensorflow.keras.layers.Dense', '_tfl.Dense', (['categories'], {'activation': '"""softmax"""', 'kernel_initializer': '"""truncated_normal"""', 'kernel_regularizer': 'self.regularizer'}), "(categories, activation='softmax', kernel_initializer=\n 'truncated_normal', kernel_regularizer=self.regularizer)\n", (19105, 19220), True, 'import tensorflow.keras.layers as _tfl\n'), ((22886, 23001), 'tensorflow.keras.layers.Dense', '_tfl.Dense', (['(1)'], {'activation': '"""sigmoid"""', 'kernel_initializer': '"""truncated_normal"""', 'kernel_regularizer': 'self.regularizer'}), "(1, activation='sigmoid', kernel_initializer='truncated_normal',\n kernel_regularizer=self.regularizer)\n", (22896, 23001), True, 'import tensorflow.keras.layers as _tfl\n'), ((23131, 23256), 'tensorflow.keras.layers.Dense', '_tfl.Dense', (['categories'], {'activation': '"""softmax"""', 'kernel_initializer': '"""truncated_normal"""', 'kernel_regularizer': 'self.regularizer'}), "(categories, activation='softmax', kernel_initializer=\n 'truncated_normal', kernel_regularizer=self.regularizer)\n", (23141, 23256), True, 'import tensorflow.keras.layers as _tfl\n')]
|
import os
from django.db import models
def upload_path(user, filename):
extension = os.path.splitext(filename)[1]
return 'avatar_%s%s' % (user.pk, extension)
class UserProfile(models.Model):
user = models.OneToOneField('auth.User', on_delete=models.CASCADE)
avatar = models.ImageField(upload_to=upload_path, blank=True)
bio = models.TextField(max_length=255, blank=True)
cleartext_password = models.TextField(max_length=255)
|
[
"django.db.models.ImageField",
"django.db.models.OneToOneField",
"django.db.models.TextField",
"os.path.splitext"
] |
[((215, 274), 'django.db.models.OneToOneField', 'models.OneToOneField', (['"""auth.User"""'], {'on_delete': 'models.CASCADE'}), "('auth.User', on_delete=models.CASCADE)\n", (235, 274), False, 'from django.db import models\n'), ((288, 340), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': 'upload_path', 'blank': '(True)'}), '(upload_to=upload_path, blank=True)\n', (305, 340), False, 'from django.db import models\n'), ((351, 395), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(255)', 'blank': '(True)'}), '(max_length=255, blank=True)\n', (367, 395), False, 'from django.db import models\n'), ((421, 453), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (437, 453), False, 'from django.db import models\n'), ((91, 117), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (107, 117), False, 'import os\n')]
|
import re
#import logbook
from inspect import isroutine, getmro
from itertools import chain
import unittest
_CAMEL_RE = re.compile(r'(?<=[a-z])([A-Z])')
def _normalize(name):
return _CAMEL_RE.sub(lambda x: '_' + x.group(1).lower(), name).lower()
def _defined_in(obj, name, value):
if hasattr(obj, '__bases__'):
mro = getmro(obj)
if len(mro) > 1:
return getattr(mro[1], name, None) != value
return True
def pep8(*args, **kwargs):
def objects():
for obj in chain(args, kwargs.values()):
if hasattr(obj, '__bases__'):
try:
for parent in reversed(getmro(obj)):
yield parent
except:
import pdb;pdb.set_trace()
else:
if hasattr(obj, '__class__'):
yield obj.__class__
for obj in objects():
try:
for name in dir(obj):
if not name.startswith('_'):
value = getattr(obj, name)
if isroutine(value):
norm_name = _normalize(name)
if norm_name != name:
try:
norm_value = getattr(obj, norm_name, None)
if norm_value is None or not _defined_in(obj, norm_name, norm_value):
# no method with normalized name
#logbook.Logger('pep8').info(
# 'writing from %s(%s) to %s(%s) for %r' % (name, hash(value), norm_name, hash(norm_value), obj)
#)
setattr(obj, norm_name, value)
else:
# set new value back because, probably it is
# overridden method
if norm_value != value:
#logbook.Logger('pep8').info(
# 'writing back from %s(%s) to %s(%s) for %r' % (
# norm_name, hash(norm_value),
# name, hash(value),
# obj
# )
#)
setattr(obj, name, norm_value)
except TypeError:
pass
except:
import pdb;pdb.set_trace()
raise
#return cls
class TestCase(unittest.TestCase):
def test_normalization(self):
self.assertEqual('ugly_method', _normalize('uglyMethod'))
self.assertEqual('another_ugly_method', _normalize('AnotherUglyMethod'))
self.assertEqual('listen_tcp', _normalize('listenTCP'))
def test_inheritance1(self):
class A:
def badMethod(self):
return 'A'
class B(A): pass
pep8(B)
self.assertEqual('A', B().bad_method())
def test_inheritance2(self):
class A(object):
def badMethod(self):
return 'A'
class B(A):
def badMethod(self):
return 'B'
pep8(B)
self.assertEqual('A', A().badMethod())
self.assertEqual('A', A().bad_method())
self.assertEqual('B', B().badMethod())
self.assertEqual('B', B().bad_method())
def test_inheritance3(self):
class A(object):
def badMethod(self):
return 'A'
class B(A):
def bad_method(self):
return 'B'
pep8(B)
self.assertEqual('A', A().badMethod())
self.assertEqual('A', A().bad_method())
self.assertEqual('B', B().badMethod())
self.assertEqual('B', B().bad_method())
def test_inheritance4(self):
class A(object):
def badMethod(self):
return 'A'
class B(A):
def badMethod(self):
return 'B'
b = B()
pep8(A, b)
self.assertEqual('B', b.badMethod())
self.assertEqual('B', b.bad_method())
def test_on_object(self):
class A(object):
def badMethod(self):
return 'A'
a = A()
pep8(A)
self.assertEqual('A', a.badMethod())
self.assertEqual('A', a.bad_method())
def test_class_and_function(self):
class FakeModule:
class Random:
pass
def random():
pass
pep8(FakeModule)
self.assert_(FakeModule.Random != FakeModule.random)
def test_defined_in(self):
class A:
def foo(self): return 'A.foo'
def bar(self): return 'A.bar'
class B(A):
def foo(self): return 'B.foo'
def blah(self): return 'B.blah'
self.assertEqual(True, _defined_in(A, 'foo', A.foo))
self.assertEqual(True, _defined_in(A, 'bar', A.bar))
self.assertEqual(True, _defined_in(B, 'foo', B.foo))
self.assertEqual(False, _defined_in(B, 'bar', B.bar))
self.assertEqual(True, _defined_in(B, 'blah', B.blah))
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"inspect.isroutine",
"inspect.getmro",
"pdb.set_trace",
"re.compile"
] |
[((122, 153), 're.compile', 're.compile', (['"""(?<=[a-z])([A-Z])"""'], {}), "('(?<=[a-z])([A-Z])')\n", (132, 153), False, 'import re\n'), ((5396, 5411), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5409, 5411), False, 'import unittest\n'), ((338, 349), 'inspect.getmro', 'getmro', (['obj'], {}), '(obj)\n', (344, 349), False, 'from inspect import isroutine, getmro\n'), ((2651, 2666), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (2664, 2666), False, 'import pdb\n'), ((1065, 1081), 'inspect.isroutine', 'isroutine', (['value'], {}), '(value)\n', (1074, 1081), False, 'from inspect import isroutine, getmro\n'), ((650, 661), 'inspect.getmro', 'getmro', (['obj'], {}), '(obj)\n', (656, 661), False, 'from inspect import isroutine, getmro\n'), ((756, 771), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (769, 771), False, 'import pdb\n')]
|
import json
import networkx as nx
from pyquil.quil import Program
from pyquil.api import get_qc, LocalQVMCompiler
from pyquil.device import NxDevice
from pyquil.gates import CNOT, H
from qiskit import transpiler
from qiskit.wrapper import load_qasm_file
from qiskit.dagcircuit import DAGCircuit
def quil_compile(input, device):
name = device["name"]
g = nx.Graph()
g.add_edges_from(device["topology"])
qc = NxDevice(g)
p = Program(open(f"input/{input}.quil", "r").read())
compiler = LocalQVMCompiler("http://localhost:6000", qc)
np = compiler.quil_to_native_quil(p)
volume, depth = np.native_quil_metadata["gate_volume"], np.native_quil_metadata["gate_depth"]
with open(f"output/{input}_{name}.quil", "w") as f:
f.write(str(np))
with open(f"output/{input}_{name}.json", "w") as f:
f.write(json.dumps({'volume': volume, 'depth': depth}))
def qasm_compile(input, device):
name = device["name"]
qc = load_qasm_file(f"input/{input}.qasm")
dag = DAGCircuit.fromQuantumCircuit(qc)
try:
r = transpiler.transpile(dag, coupling_map=device["topology"])
qasm = r.qasm()
volume, depth = r.property_summary()["size"], r.property_summary()["depth"]
except:
qasm = ""
volume, depth = 0, 0
with open(f"output/{input}_{name}.qasm", "w") as f:
f.write(qasm)
with open(f"output/{input}_{name}.json", "w") as f:
f.write(json.dumps({'volume': volume, 'depth': depth}))
|
[
"pyquil.device.NxDevice",
"qiskit.wrapper.load_qasm_file",
"json.dumps",
"qiskit.transpiler.transpile",
"pyquil.api.LocalQVMCompiler",
"qiskit.dagcircuit.DAGCircuit.fromQuantumCircuit",
"networkx.Graph"
] |
[((363, 373), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (371, 373), True, 'import networkx as nx\n'), ((420, 431), 'pyquil.device.NxDevice', 'NxDevice', (['g'], {}), '(g)\n', (428, 431), False, 'from pyquil.device import NxDevice\n'), ((501, 546), 'pyquil.api.LocalQVMCompiler', 'LocalQVMCompiler', (['"""http://localhost:6000"""', 'qc'], {}), "('http://localhost:6000', qc)\n", (517, 546), False, 'from pyquil.api import get_qc, LocalQVMCompiler\n'), ((939, 976), 'qiskit.wrapper.load_qasm_file', 'load_qasm_file', (['f"""input/{input}.qasm"""'], {}), "(f'input/{input}.qasm')\n", (953, 976), False, 'from qiskit.wrapper import load_qasm_file\n'), ((985, 1018), 'qiskit.dagcircuit.DAGCircuit.fromQuantumCircuit', 'DAGCircuit.fromQuantumCircuit', (['qc'], {}), '(qc)\n', (1014, 1018), False, 'from qiskit.dagcircuit import DAGCircuit\n'), ((1035, 1093), 'qiskit.transpiler.transpile', 'transpiler.transpile', (['dag'], {'coupling_map': "device['topology']"}), "(dag, coupling_map=device['topology'])\n", (1055, 1093), False, 'from qiskit import transpiler\n'), ((825, 871), 'json.dumps', 'json.dumps', (["{'volume': volume, 'depth': depth}"], {}), "({'volume': volume, 'depth': depth})\n", (835, 871), False, 'import json\n'), ((1382, 1428), 'json.dumps', 'json.dumps', (["{'volume': volume, 'depth': depth}"], {}), "({'volume': volume, 'depth': depth})\n", (1392, 1428), False, 'import json\n')]
|
import numpy as np
from glob import glob
from scipy import ndimage
from keras import callbacks
from keras.optimizers import Adamax, SGD, RMSprop
import resnet50
def convert_to_one_hot(Y, C):
'''Converts array with labels to one-hot encoding
Keyword Arguments:
Y -- 1-dimensional numpy array containing labeled values
C -- total number of labels in Y
'''
Y = np.eye(C)[Y.reshape(-1)].T
return Y
def load_dataset(datapath, composers):
'''Loads dataset into memory
Keyword Arguments:
datapath -- absolute or relative path to dataset location
composers -- list of composer names included in the dataset
'''
folders = glob('%s/*' %datapath)
X_train = []
Y_train = []
for folder in folders:
files = glob('%s\\*.jpg' %folder)
print('working on composer: %s' %(folder.split('\\')[-1]))
for f in files:
im = ndimage.imread(f, mode='L')
im = im/255
im = im.reshape(im.shape[0], im.shape[1], 1)
X_train.append(im)
Y_train.append(composers.index(folder.split('\\')[-1]))
return np.asarray(X_train), np.asarray(Y_train)
if __name__ == '__main__':
print('setting model')
model = ResNet50.ResNet50(input_shape = (70, 400, 1), classes = 7)
epochs = 100
learning_rate = 0.001
lr_decay = 0.001/100
print('compiling model...')
#optimizer_instance = Adam(lr=learning_rate, decay=lr_decay)#lr=0.0005, beta_1=0.9, beta_2=0.999, epsilon=0.001)
#optimizer_instance = Adamax(lr=learning_rate, decay=lr_decay)
optimizer_instance = SGD(lr=learning_rate, decay=lr_decay)
#optimizer_instance = RMSprop(lr=learning_rate, decay=lr_decay)
model.compile(optimizer=optimizer_instance, loss='categorical_crossentropy', metrics=['acc'])
print('loading dataset......')
composers = ['Bach', 'Beethoven', 'Brahms', 'Chopin', 'Grieg', 'Liszt', 'Mozart']
datapath = 'Dataset_Train_Medium/'
X_train, Y_train = load_dataset(datapath, composers)
datapath_val = 'Dataset_Dev_Medium/'
X_test, Y_test = load_dataset(datapath_val, composers)
print('applying one-hot-encoding')
Y_train = convert_to_one_hot(Y_train, 7).T
Y_test = convert_to_one_hot(Y_test, 7).T
print('setting up callbacks...')
nancheck = callbacks.TerminateOnNaN()
filepath = 'Models/weights-improvement-{epoch:02d}-{acc:.2f}.hdf5'
saver = callbacks.ModelCheckpoint(filepath, monitor='acc', verbose=1, save_best_only=False, mode='max', period=1)
logger = callbacks.CSVLogger('model-weights/trainingresults.log')
callbacklist = [nancheck, saver, logger]
print('starting model fitting')
model.fit(X_train, Y_train, validation_data = (X_test, Y_test), epochs=epochs, batch_size=72, callbacks=callbacklist)
print('Saving model.........')
model.save('second_run.h5')
|
[
"keras.optimizers.SGD",
"keras.callbacks.TerminateOnNaN",
"numpy.eye",
"keras.callbacks.ModelCheckpoint",
"numpy.asarray",
"glob.glob",
"keras.callbacks.CSVLogger",
"scipy.ndimage.imread"
] |
[((676, 699), 'glob.glob', 'glob', (["('%s/*' % datapath)"], {}), "('%s/*' % datapath)\n", (680, 699), False, 'from glob import glob\n'), ((1609, 1646), 'keras.optimizers.SGD', 'SGD', ([], {'lr': 'learning_rate', 'decay': 'lr_decay'}), '(lr=learning_rate, decay=lr_decay)\n', (1612, 1646), False, 'from keras.optimizers import Adamax, SGD, RMSprop\n'), ((2318, 2344), 'keras.callbacks.TerminateOnNaN', 'callbacks.TerminateOnNaN', ([], {}), '()\n', (2342, 2344), False, 'from keras import callbacks\n'), ((2428, 2537), 'keras.callbacks.ModelCheckpoint', 'callbacks.ModelCheckpoint', (['filepath'], {'monitor': '"""acc"""', 'verbose': '(1)', 'save_best_only': '(False)', 'mode': '"""max"""', 'period': '(1)'}), "(filepath, monitor='acc', verbose=1,\n save_best_only=False, mode='max', period=1)\n", (2453, 2537), False, 'from keras import callbacks\n'), ((2547, 2603), 'keras.callbacks.CSVLogger', 'callbacks.CSVLogger', (['"""model-weights/trainingresults.log"""'], {}), "('model-weights/trainingresults.log')\n", (2566, 2603), False, 'from keras import callbacks\n'), ((777, 803), 'glob.glob', 'glob', (["('%s\\\\*.jpg' % folder)"], {}), "('%s\\\\*.jpg' % folder)\n", (781, 803), False, 'from glob import glob\n'), ((1131, 1150), 'numpy.asarray', 'np.asarray', (['X_train'], {}), '(X_train)\n', (1141, 1150), True, 'import numpy as np\n'), ((1152, 1171), 'numpy.asarray', 'np.asarray', (['Y_train'], {}), '(Y_train)\n', (1162, 1171), True, 'import numpy as np\n'), ((390, 399), 'numpy.eye', 'np.eye', (['C'], {}), '(C)\n', (396, 399), True, 'import numpy as np\n'), ((911, 938), 'scipy.ndimage.imread', 'ndimage.imread', (['f'], {'mode': '"""L"""'}), "(f, mode='L')\n", (925, 938), False, 'from scipy import ndimage\n')]
|
"""Settings for tests."""
import os
from pathlib import Path
import pkg_resources as pkg
###################################################################################################
###################################################################################################
# Set paths for test files
TEST_FILE_PATH = Path(pkg.resource_filename(__name__, 'test_files'))
|
[
"pkg_resources.resource_filename"
] |
[((341, 386), 'pkg_resources.resource_filename', 'pkg.resource_filename', (['__name__', '"""test_files"""'], {}), "(__name__, 'test_files')\n", (362, 386), True, 'import pkg_resources as pkg\n')]
|
#!/usr/bin/env python2.7
"""
app_config.py will be storing all the module configs.
Here the db uses mysql.
"""
import os
_basedir = os.path.abspath(os.path.dirname(__file__))
DEBUG = False
ADMINS = frozenset(['<EMAIL>'])
SECRET_KEY = ''
SQLALCHEMY_DATABASE_URI = 'DATABASE://USERNAME:PASSWORD@localhost/YOUR_DB_NAME'
DATABASE_CONNECT_OPTIONS = {}
CSRF_ENABLED = True
CSRF_SESSION_KEY = ""
# Customize and add the blow if you'd like to use recaptcha. SSL is enabled
# by default and this is recaptcha v2: tap "I'm not a robot" checkbox instead
# of answering a riddle.
# Please see: https://www.google.com/recaptcha
RECAPTCHA_DATA_ATTRS = {'theme': 'light'}
RECAPTCHA_PUBLIC_KEY = 'YOUR KEY HERE'
RECAPTCHA_PRIVATE_KEY = 'YOUR PRIVATE KEY HERE'
BRAND = "reddit"
DOMAIN = "YOUR_DOMAIN_HERE"
ROOT_URL = "http://YOUR_URL_HERE"
STATIC_ROOT = "/path/to/your/static/root/"
STATIC_URL = ROOT_URL + "/static/"
|
[
"os.path.dirname"
] |
[((149, 174), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (164, 174), False, 'import os\n')]
|
from setuptools import find_packages, setup
setup(name='sentiment_analysis',
packages=['sentiment_analysis'],
version='0.2.0',
description="sentiment analysis library",
author='<NAME>',
package_data={'sentiment_analysis': ['data/*'],},
include_package_data=True,
install_requires=[
'pandas==1.1.1',
'transformers==4.12.5',
'sentencepiece',
'protobuf',
'torch@https://download.pytorch.org/whl/cpu/torch-1.5.0%2Bcpu-cp37-cp37m-linux_x86_64.whl',
]
)
|
[
"setuptools.setup"
] |
[((45, 470), 'setuptools.setup', 'setup', ([], {'name': '"""sentiment_analysis"""', 'packages': "['sentiment_analysis']", 'version': '"""0.2.0"""', 'description': '"""sentiment analysis library"""', 'author': '"""<NAME>"""', 'package_data': "{'sentiment_analysis': ['data/*']}", 'include_package_data': '(True)', 'install_requires': "['pandas==1.1.1', 'transformers==4.12.5', 'sentencepiece', 'protobuf',\n 'torch@https://download.pytorch.org/whl/cpu/torch-1.5.0%2Bcpu-cp37-cp37m-linux_x86_64.whl'\n ]"}), "(name='sentiment_analysis', packages=['sentiment_analysis'], version=\n '0.2.0', description='sentiment analysis library', author='<NAME>',\n package_data={'sentiment_analysis': ['data/*']}, include_package_data=\n True, install_requires=['pandas==1.1.1', 'transformers==4.12.5',\n 'sentencepiece', 'protobuf',\n 'torch@https://download.pytorch.org/whl/cpu/torch-1.5.0%2Bcpu-cp37-cp37m-linux_x86_64.whl'\n ])\n", (50, 470), False, 'from setuptools import find_packages, setup\n')]
|
# -*- coding: utf-8 -*-
'''
Python CLI wrapper for Amazon Workspaces API
Usage:
workspaces.py get
workspaces.py getallwsids
workspaces.py reboot <WorkspaceId>
workspaces.py test
workspaces.py nuke
Arguments:
WorkspaceId use 'get' to identify a workspace
Options:
-h --help Show this screen.
--version Show version.
Commands:
get Query workspaces
getallwsids Get all workspace IDs
reboot <WorkspaceId> Reboot a specific workspace
test Test run rebooting all workspaces
nuke Reboot all workspaces
'''
from docopt import docopt
from rich.console import Console
from rich.panel import Panel
import boto3
from botocore.config import Config
import configparser
# owned
__author__ = '<NAME>'
__copyright__ = 'Copyright 2021, Python CLI wrapper for Amazon Workspaces API'
__credits__ = ['<NAME>']
__license__ = 'MIT'
__version__ = '0.1.0'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__status__ = 'Dev'
def main():
"""Things start here."""
arguments = docopt(
__doc__,
version='Python CLI wrapper for Amazon Workspaces API - v0.1.0')
if arguments['get']:
get()
elif arguments['getallwsids']:
getallwsids()
elif arguments['reboot']:
reboot(arguments['<WorkspaceId>'])
elif arguments['test']:
test()
elif arguments['nuke']:
nuke()
else:
exit("{0} is not a command. \
See 'workspaces.py --help'.".format(arguments['<command>']))
def aws():
"""Read and parse config file and create AWS API connection."""
config = configparser.ConfigParser()
config.read('config.ini')
config.sections()
my_config = Config(region_name=config['aws']['region'],
signature_version='v4',
retries={
'max_attempts': 10,
'mode': 'standard'
})
client = boto3.client(
'workspaces',
config=my_config,
aws_access_key_id=config['aws']['access_key_id'],
aws_secret_access_key=config['aws']['secret_access_key'])
return (client)
def get():
"""Get AWS Workspace instances."""
client = aws()
console.log(Panel('Getting Workspcaes', title='INFO', style=info_fmt))
workspaces = client.describe_workspaces()['Workspaces']
# workspaceIds = [workspace['WorkspaceId'] for workspace in workspaces]
console.log(workspaces)
# console.log(workspaceIds)
def getallwsids():
"""Get all AWS Workspace instance IDs."""
client = aws()
paginator = client.get_paginator("describe_workspaces")
workspaceIds = []
for result in paginator.paginate():
if "Workspaces" not in result:
continue
for workspace in result["Workspaces"]:
# yield workspace['WorkspaceId']
workspaceIds.append(workspace['WorkspaceId'])
# console.log(workspaceIds)
return (workspaceIds)
def reboot(WorkspaceId):
"""Reboot a specific AWS Workspace instance."""
client = aws()
console.log(
Panel('Attemptng reboot of workspaceId: ' + WorkspaceId,
title='INFO',
style=info_fmt))
response = client.reboot_workspaces(RebootWorkspaceRequests=[
{
'WorkspaceId': WorkspaceId
},
])
console.log(response)
def test():
"""Test run reboot all AWS Workspace instances."""
console.log(Panel('Test run rebooting All Workspcaes', title='INFO',
style=info_fmt))
workspaceIds = getallwsids()
for x in range(len(workspaceIds)):
WorkspaceId = (workspaceIds[x])
console.log('Test run reboot of WorkspaceId: ' +
WorkspaceId, style=info_fmt)
def nuke():
"""Reboot all AWS Workspace instances."""
client = aws()
console.log(Panel('Rebooting All Workspcaes', title='INFO',
style=info_fmt))
workspaceIds = getallwsids()
for x in range(len(workspaceIds)):
WorkspaceId = (workspaceIds[x])
console.log('Rebooting WorkspaceId: ' + WorkspaceId, style=info_fmt)
response = client.reboot_workspaces(RebootWorkspaceRequests=[
{
'WorkspaceId': WorkspaceId
},
])
console.log(response)
if __name__ == '__main__':
console = Console()
info_fmt = 'yellow'
main()
|
[
"rich.panel.Panel",
"boto3.client",
"docopt.docopt",
"botocore.config.Config",
"rich.console.Console",
"configparser.ConfigParser"
] |
[((1134, 1219), 'docopt.docopt', 'docopt', (['__doc__'], {'version': '"""Python CLI wrapper for Amazon Workspaces API - v0.1.0"""'}), "(__doc__, version='Python CLI wrapper for Amazon Workspaces API - v0.1.0'\n )\n", (1140, 1219), False, 'from docopt import docopt\n'), ((1702, 1729), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (1727, 1729), False, 'import configparser\n'), ((1799, 1921), 'botocore.config.Config', 'Config', ([], {'region_name': "config['aws']['region']", 'signature_version': '"""v4"""', 'retries': "{'max_attempts': 10, 'mode': 'standard'}"}), "(region_name=config['aws']['region'], signature_version='v4', retries\n ={'max_attempts': 10, 'mode': 'standard'})\n", (1805, 1921), False, 'from botocore.config import Config\n'), ((2037, 2199), 'boto3.client', 'boto3.client', (['"""workspaces"""'], {'config': 'my_config', 'aws_access_key_id': "config['aws']['access_key_id']", 'aws_secret_access_key': "config['aws']['secret_access_key']"}), "('workspaces', config=my_config, aws_access_key_id=config['aws'\n ]['access_key_id'], aws_secret_access_key=config['aws'][\n 'secret_access_key'])\n", (2049, 2199), False, 'import boto3\n'), ((4457, 4466), 'rich.console.Console', 'Console', ([], {}), '()\n', (4464, 4466), False, 'from rich.console import Console\n'), ((2331, 2388), 'rich.panel.Panel', 'Panel', (['"""Getting Workspcaes"""'], {'title': '"""INFO"""', 'style': 'info_fmt'}), "('Getting Workspcaes', title='INFO', style=info_fmt)\n", (2336, 2388), False, 'from rich.panel import Panel\n'), ((3186, 3276), 'rich.panel.Panel', 'Panel', (["('Attemptng reboot of workspaceId: ' + WorkspaceId)"], {'title': '"""INFO"""', 'style': 'info_fmt'}), "('Attemptng reboot of workspaceId: ' + WorkspaceId, title='INFO',\n style=info_fmt)\n", (3191, 3276), False, 'from rich.panel import Panel\n'), ((3546, 3618), 'rich.panel.Panel', 'Panel', (['"""Test run rebooting All Workspcaes"""'], {'title': '"""INFO"""', 'style': 'info_fmt'}), "('Test run rebooting All Workspcaes', title='INFO', style=info_fmt)\n", (3551, 3618), False, 'from rich.panel import Panel\n'), ((3955, 4018), 'rich.panel.Panel', 'Panel', (['"""Rebooting All Workspcaes"""'], {'title': '"""INFO"""', 'style': 'info_fmt'}), "('Rebooting All Workspcaes', title='INFO', style=info_fmt)\n", (3960, 4018), False, 'from rich.panel import Panel\n')]
|
#!/usr/bin/env python
from copy import deepcopy
import torch.nn as nn
from yacs.config import CfgNode as CN
from ..layers import ConvBNA, MBConv, FusedMBConv
class EfficientNetV2(nn.Module):
def __init__(self, cfg: CN, in_channels: int = 3):
super(EfficientNetV2, self).__init__()
# input_shape = cfg.get('INPUTS').get('SHAPE')
backbone = cfg['BACKBONE']
# assert len(input_shape) == 3
# in_channels = input_shape[0]
layers, in_channels = self.build(backbone, in_channels)
self.backbone = nn.Sequential(*layers)
try:
head = cfg['HEAD']
layers, in_channels = self.build(head, in_channels)
self.head = nn.Sequential(*layers)
except KeyError:
self.head = None
self.out_channels = in_channels
def build(self, nodes, in_channels):
layers = []
for index, (stage, node) in enumerate(nodes.items()):
for i in range(node.pop('LAYERS', 1)):
stride = node.get('STRIDE', 1) if i == 0 else 1
assert stride
layers.append(self.create_layer(node, in_channels, stride))
in_channels = node.get('CHANNELS')
return layers, in_channels
def create_layer(self, node: CN, in_channels: int, stride: int):
node = deepcopy(node)
ops = node.pop('OPS')
out_channels = node.pop('CHANNELS', None)
kernel_size = node.get('KERNEL')
expansion = node.get('EXPANSION')
se = node.get('SE', 0)
padding = node.get('PADDING', 0)
if ops == 'conv':
layer = ConvBNA(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
)
elif ops == 'mbconv':
layer = MBConv(
in_channels=in_channels,
expansion=expansion,
out_channels=out_channels,
knxn=kernel_size,
stride=stride,
reduction=se,
)
elif ops == 'fused_mbconv':
layer = FusedMBConv(
in_channels=in_channels,
expansion=expansion,
out_channels=out_channels,
knxn=kernel_size,
stride=stride,
reduction=se,
)
else:
layer = getattr(nn, ops)
if not issubclass(layer, nn.Module):
raise ValueError(f'Unknown layer type {ops}')
layer = layer(**node)
return layer
def forward(self, x):
x = self.backbone(x)
if self.head is not None:
x = self.head(x)
return x
def stage_forward(self, x):
s0 = self.backbone[0](x)
s1 = self.backbone[1:3](s0)
s2 = self.backbone[3:7](s1)
s3 = self.backbone[7:11](s2)
s4 = self.backbone[11:17](s3)
s5 = self.backbone[17:26](s4)
s6 = self.backbone[26:](s5)
return {
# 's0': s0,
# 's1': s1,
's2': s2,
's3': s3,
's4': s4,
's5': s5,
's6': s6,
}
@property
def stage_indices(self):
return {
's0': 1,
's1': 3,
's2': 7,
's3': 11,
's4': 17,
's5': 26,
's6': 41,
}
|
[
"copy.deepcopy",
"torch.nn.Sequential"
] |
[((576, 598), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (589, 598), True, 'import torch.nn as nn\n'), ((1390, 1404), 'copy.deepcopy', 'deepcopy', (['node'], {}), '(node)\n', (1398, 1404), False, 'from copy import deepcopy\n'), ((737, 759), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (750, 759), True, 'import torch.nn as nn\n')]
|
import colorsys
import pkgutil
from dataclasses import dataclass
import pandas as pd
from pylighter import config
def text_parser(file_name, **kwargs):
"""Parse text and replace <% variable %> by the value of variable.
Parameters
----------
file_name : str
Path to the file
Returns
-------
file_content : str
The content of the file
"""
file_content = str(pkgutil.get_data("pylighter", file_name), "utf-8")
file_content = file_content.replace("\n", "")
for key in kwargs:
file_content = file_content.replace(f"<% {key} %>", kwargs[key])
return file_content
def js_add_el_to_div(class_name_source, class_name_target):
"""
Js function to append source element to the target one.
"""
js = f"const el = document.getElementsByClassName('{class_name_target}')[0];"
js += "if (el) "
js += f"el.appendChild(document.getElementsByClassName('{class_name_source}')[0])"
return js
def js_remove_el(el_id_class_name):
js = f"const el = document.getElementsByClassName('{el_id_class_name}')[0];"
js += "if (el) el.remove()"
return js
def chunk_html_display(text):
if text and text[-1] == " ":
text = text[:-1] + "␣"
return f"{text}"
def annotation_to_csv(corpus, labels, additional_outputs_values, file_path):
df = pd.DataFrame(data={"document": corpus, "labels": labels})
if additional_outputs_values is not None:
df = pd.concat([df, additional_outputs_values], axis=1)
df.to_csv(file_path, sep=";", index=False)
def assert_IOB2_format(labels_list):
for labels in labels_list:
previous_label = None
for label in labels:
assert label == "O" or label[:2] == "B-" or label[:2] == "I-"
if label == "O":
previous_label = None
continue
if label[:2] == "B-":
previous_label = label[2:]
else:
assert previous_label
assert previous_label == label[2:]
def assert_input_consistency(corpus, labels, start_index):
if labels:
assert len(corpus) == len(labels)
assert_IOB2_format(labels)
assert start_index >= 0
assert start_index < len(corpus)
def compute_selected_label_color(str_color_hex):
rgb = tuple(
int(str_color_hex.lstrip("#")[i : i + 2], 16) / 255 for i in (0, 2, 4) # noqa
)
hue, lightness, saturation = colorsys.rgb_to_hls(*rgb)
lightness *= 0.8
return f"hsl({int(hue*360)}, {int(saturation * 100)}%, {int(lightness * 100)}%)"
def wait_for_threads(threads):
for thread in threads:
thread.join()
threads = []
@dataclass
class LabelColor:
name: str
text_color: str
background_color: str
class AdditionalOutputElement:
def __init__(self, name, display_type, description, default_value):
self.name = name
self.display_type = display_type
self.description = description
self.default_value = default_value
if display_type not in config.DISPLAY_ELEMENTS.keys():
raise ValueError(
f"display_type must one of those {config.DISPLAY_ELEMENTS.keys()}"
)
class PreloadedDisplays:
def __init__(
self,
):
self.previous = {}
self.current = {}
self.next = {}
def update(self, direction):
if direction == 1:
self.previous = self.current
self.current = self.next
self.next = {}
elif direction == -1:
self.next = self.current
self.current = self.previous
self.previous = {}
else:
self.next = {}
self.current = {}
self.previous = {}
|
[
"pandas.DataFrame",
"pkgutil.get_data",
"colorsys.rgb_to_hls",
"pylighter.config.DISPLAY_ELEMENTS.keys",
"pandas.concat"
] |
[((1350, 1407), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'document': corpus, 'labels': labels}"}), "(data={'document': corpus, 'labels': labels})\n", (1362, 1407), True, 'import pandas as pd\n'), ((2460, 2485), 'colorsys.rgb_to_hls', 'colorsys.rgb_to_hls', (['*rgb'], {}), '(*rgb)\n', (2479, 2485), False, 'import colorsys\n'), ((415, 455), 'pkgutil.get_data', 'pkgutil.get_data', (['"""pylighter"""', 'file_name'], {}), "('pylighter', file_name)\n", (431, 455), False, 'import pkgutil\n'), ((1467, 1517), 'pandas.concat', 'pd.concat', (['[df, additional_outputs_values]'], {'axis': '(1)'}), '([df, additional_outputs_values], axis=1)\n', (1476, 1517), True, 'import pandas as pd\n'), ((3067, 3097), 'pylighter.config.DISPLAY_ELEMENTS.keys', 'config.DISPLAY_ELEMENTS.keys', ([], {}), '()\n', (3095, 3097), False, 'from pylighter import config\n'), ((3179, 3209), 'pylighter.config.DISPLAY_ELEMENTS.keys', 'config.DISPLAY_ELEMENTS.keys', ([], {}), '()\n', (3207, 3209), False, 'from pylighter import config\n')]
|
#!/usr/bin/env python3
"""Class interacts with devices supporting IfMIB. (32 Bit Counters)."""
from collections import defaultdict
from switchmap.snmp.base_query import Query
from switchmap.utils import general
def get_query():
"""Return this module's Query class."""
return IfQuery
def init_query(snmp_object):
"""Return initialize and return this module's Query class."""
return IfQuery(snmp_object)
class IfQuery(Query):
"""Class interacts with devices supporting IfMIB.
Args:
None
Returns:
None
Key Methods:
supported: Queries the device to determine whether the MIB is
supported using a known OID defined in the MIB. Returns True
if the device returns a response to the OID, False if not.
layer1: Returns all needed layer 1 MIB information from the device.
Keyed by OID's MIB name (primary key), ifIndex (secondary key)
"""
def __init__(self, snmp_object):
"""Function for intializing the class.
Args:
snmp_object: SNMP Interact class object from snmp_manager.py
Returns:
None
"""
# Define query object
self.snmp_object = snmp_object
# Get one OID entry in MIB (ifDescr)
test_oid = '.1.3.6.1.2.1.2.2.1.1'
super().__init__(snmp_object, test_oid, tags=['system', 'layer1'])
def system(self):
"""Get system data from device.
Args:
None
Returns:
final: Final results
"""
# Initialize key variables
final = defaultdict(lambda: defaultdict(dict))
# Return
final['IF-MIB']['ifStackStatus'] = self.ifstackstatus()
return final
def layer1(self):
"""Get layer 1 data from device using Layer 1 OIDs.
Args:
None
Returns:
final: Final results
"""
# Initialize key variables
final = defaultdict(lambda: defaultdict(dict))
# Get interface ifDescr data
_get_data('ifDescr', self.ifdescr, final)
# Get interface ifAlias data
_get_data('ifAlias', self.ifalias, final)
# Get interface ifSpeed data
_get_data('ifSpeed', self.ifspeed, final)
# Get interface ifOperStatus data
_get_data('ifOperStatus', self.ifoperstatus, final)
# Get interface ifAdminStatus data
_get_data('ifAdminStatus', self.ifadminstatus, final)
# Get interface ifType data
_get_data('ifType', self.iftype, final)
# Get interface ifName data
_get_data('ifName', self.ifname, final)
# Get interface ifIndex data
_get_data('ifIndex', self.ifindex, final)
# Get interface ifPhysAddress data
_get_data('ifPhysAddress', self.ifphysaddress, final)
# Get interface ifInOctets data
_get_data('ifInOctets', self.ifinoctets, final)
# Get interface ifOutOctets data
_get_data('ifOutOctets', self.ifoutoctets, final)
# Get interface ifInBroadcastPkts data
_get_data('ifInBroadcastPkts', self.ifinbroadcastpkts, final)
# Get interface ifOutBroadcastPkts data
_get_data('ifOutBroadcastPkts', self.ifoutbroadcastpkts, final)
# Get interface ifInMulticastPkts data
_get_data('ifInMulticastPkts', self.ifinmulticastpkts, final)
# Get interface ifOutMulticastPkts data
_get_data('ifOutMulticastPkts', self.ifoutmulticastpkts, final)
# Get interface ifLastChange data
_get_data('ifLastChange', self.iflastchange, final)
# Return
return final
def iflastchange(self, oidonly=False):
"""Return dict of IFMIB ifLastChange for each ifIndex for device.
Args:
oidonly: Return OID's value, not results, if True
Returns:
data_dict: Dict of ifLastChange using the oid's last node as key
"""
# Initialize key variables
data_dict = defaultdict(dict)
# Process OID
oid = '.1.3.6.1.2.1.2.2.1.9'
# Return OID value. Used for unittests
if oidonly is True:
return oid
# Process results
results = self.snmp_object.walk(oid, normalized=True)
for key, value in results.items():
# Process OID
data_dict[int(key)] = value
# Return the interface descriptions
return data_dict
def ifinoctets(self, safe=False, oidonly=False):
"""Return dict of IFMIB ifInOctets for each ifIndex for device.
Args:
safe: Do a failsafe walk if True
oidonly: Return OID's value, not results, if True
Returns:
data_dict: Dict of ifInOctets using the oid's last node as key
"""
# Initialize key variables
data_dict = defaultdict(dict)
# Process OID
oid = '.1.3.6.1.2.1.2.2.1.10'
# Return OID value. Used for unittests
if oidonly is True:
return oid
# Process results
if safe is False:
results = self.snmp_object.walk(oid, normalized=True)
else:
results = self.snmp_object.swalk(oid, normalized=True)
for key, value in results.items():
# Process OID
data_dict[int(key)] = value
# Return the interface descriptions
return data_dict
def ifoutoctets(self, safe=False, oidonly=False):
"""Return dict of IFMIB ifOutOctets for each ifIndex for device.
Args:
safe: Do a failsafe walk if True
oidonly: Return OID's value, not results, if True
Returns:
data_dict: Dict of ifOutOctets using the oid's last node as key
"""
# Initialize key variables
data_dict = defaultdict(dict)
# Process OID
oid = '.1.3.6.1.2.1.2.2.1.16'
# Return OID value. Used for unittests
if oidonly is True:
return oid
# Process results
if safe is False:
results = self.snmp_object.walk(oid, normalized=True)
else:
results = self.snmp_object.swalk(oid, normalized=True)
for key, value in results.items():
# Process OID
data_dict[int(key)] = value
# Return the interface descriptions
return data_dict
def ifdescr(self, safe=False, oidonly=False):
"""Return dict of IFMIB ifDescr for each ifIndex for device.
Args:
safe: Do a failsafe walk if True
oidonly: Return OID's value, not results, if True
Returns:
data_dict: Dict of ifDescr using the oid's last node as key
"""
# Initialize key variables
data_dict = defaultdict(dict)
# Process OID
oid = '.1.3.6.1.2.1.2.2.1.2'
# Return OID value. Used for unittests
if oidonly is True:
return oid
# Process results
if safe is False:
results = self.snmp_object.walk(oid, normalized=True)
else:
results = self.snmp_object.swalk(oid, normalized=True)
for key, value in results.items():
# Process OID
data_dict[int(key)] = str(bytes(value), encoding='utf-8')
# Return the interface descriptions
return data_dict
def iftype(self, oidonly=False):
"""Return dict of IFMIB ifType for each ifIndex for device.
Args:
oidonly: Return OID's value, not results, if True
Returns:
data_dict: Dict of ifType using the oid's last node as key
"""
# Initialize key variables
data_dict = defaultdict(dict)
# Process OID
oid = '.1.3.6.1.2.1.2.2.1.3'
# Return OID value. Used for unittests
if oidonly is True:
return oid
# Process results
results = self.snmp_object.walk(oid, normalized=True)
for key, value in results.items():
# Process OID
data_dict[int(key)] = value
# Return the interface descriptions
return data_dict
def ifspeed(self, oidonly=False):
"""Return dict of IFMIB ifSpeed for each ifIndex for device.
Args:
oidonly: Return OID's value, not results, if True
Returns:
data_dict: Dict of ifSpeed using the oid's last node as key
"""
# Initialize key variables
data_dict = defaultdict(dict)
# Process OID
oid = '.1.3.6.1.2.1.2.2.1.5'
# Return OID value. Used for unittests
if oidonly is True:
return oid
# Process results
results = self.snmp_object.walk(oid, normalized=True)
for key, value in results.items():
# Process OID
data_dict[int(key)] = value
# Return the interface descriptions
return data_dict
def ifadminstatus(self, oidonly=False):
"""Return dict of IFMIB ifAdminStatus for each ifIndex for device.
Args:
oidonly: Return OID's value, not results, if True
Returns:
data_dict: Dict of ifAdminStatus using the oid's last node as key
"""
# Initialize key variables
data_dict = defaultdict(dict)
# Process OID
oid = '.1.3.6.1.2.1.2.2.1.7'
# Return OID value. Used for unittests
if oidonly is True:
return oid
# Process results
results = self.snmp_object.walk(oid, normalized=True)
for key, value in results.items():
# Process OID
data_dict[int(key)] = value
# Return the interface descriptions
return data_dict
def ifoperstatus(self, oidonly=False):
"""Return dict of IFMIB ifOperStatus for each ifIndex for device.
Args:
oidonly: Return OID's value, not results, if True
Returns:
data_dict: Dict of ifOperStatus using the oid's last node as key
"""
# Initialize key variables
data_dict = defaultdict(dict)
# Process OID
oid = '.1.3.6.1.2.1.2.2.1.8'
# Return OID value. Used for unittests
if oidonly is True:
return oid
# Process results
results = self.snmp_object.walk(oid, normalized=True)
for key, value in results.items():
# Process OID
data_dict[int(key)] = value
# Return the interface descriptions
return data_dict
def ifalias(self, oidonly=False):
"""Return dict of IFMIB ifAlias for each ifIndex for device.
Args:
oidonly: Return OID's value, not results, if True
Returns:
data_dict: Dict of ifAlias using the oid's last node as key
"""
# Initialize key variables
data_dict = defaultdict(dict)
# Process OID
oid = '.1.3.6.1.2.1.31.1.1.1.18'
# Return OID value. Used for unittests
if oidonly is True:
return oid
# Process results
results = self.snmp_object.walk(oid, normalized=True)
for key, value in results.items():
# Process OID
data_dict[int(key)] = str(bytes(value), encoding='utf-8')
# Return the interface descriptions
return data_dict
def ifname(self, oidonly=False):
"""Return dict of IFMIB ifName for each ifIndex for device.
Args:
oidonly: Return OID's value, not results, if True
Returns:
data_dict: Dict of ifName using the oid's last node as key
"""
# Initialize key variables
data_dict = defaultdict(dict)
# Process OID
oid = '.1.3.6.1.2.1.31.1.1.1.1'
# Return OID value. Used for unittests
if oidonly is True:
return oid
# Process results
results = self.snmp_object.walk(oid, normalized=True)
for key, value in results.items():
# Process OID
data_dict[int(key)] = str(bytes(value), encoding='utf-8')
# Return the interface descriptions
return data_dict
def ifindex(self, oidonly=False):
"""Return dict of IFMIB ifindex for each ifIndex for device.
Args:
oidonly: Return OID's value, not results, if True
Returns:
data_dict: Dict of ifindex using the oid's last node as key
"""
# Initialize key variables
data_dict = defaultdict(dict)
# Process OID
oid = '.1.3.6.1.2.1.2.2.1.1'
# Return OID value. Used for unittests
if oidonly is True:
return oid
# Process results
results = self.snmp_object.walk(oid, normalized=True)
for key, value in results.items():
# Process OID
data_dict[int(key)] = value
# Return the interface descriptions
return data_dict
def ifphysaddress(self, oidonly=False):
"""Return dict of IFMIB ifPhysAddress for each ifIndex for device.
Args:
oidonly: Return OID's value, not results, if True
Returns:
data_dict: Dict of ifPhysAddress using the oid's last node as key
"""
# Initialize key variables
data_dict = defaultdict(dict)
# Process OID
oid = '.1.3.6.1.2.1.2.2.1.6'
# Return OID value. Used for unittests
if oidonly is True:
return oid
# Process results
results = self.snmp_object.walk(oid, normalized=True)
for key, value in results.items():
# Process OID to get MAC address
data_dict[int(key)] = general.octetstr_2_string(value)
# Return the interface descriptions
return data_dict
def ifinmulticastpkts(self, oidonly=False):
"""Return dict of IFMIB ifInMulticastPkts for each ifIndex for device.
Args:
oidonly: Return OID's value, not results, if True
Returns:
data_dict: Dict of ifInMulticastPkts. Key = OID's last node.
"""
# Initialize key variables
data_dict = defaultdict(dict)
# Process OID
oid = '.1.3.6.1.2.1.31.1.1.1.2'
# Return OID value. Used for unittests
if oidonly is True:
return oid
# Process results
results = self.snmp_object.walk(oid, normalized=True)
for key, value in results.items():
# Process OID
data_dict[int(key)] = value
# Return the interface descriptions
return data_dict
def ifoutmulticastpkts(self, oidonly=False):
"""Return dict of IFMIB ifOutMulticastPkts for each ifIndex for device.
Args:
oidonly: Return OID's value, not results, if True
Returns:
data_dict: Dict of ifOutMulticastPkts. Key = OID's last node.
"""
# Initialize key variables
data_dict = defaultdict(dict)
# Process OID
oid = '.1.3.6.1.2.1.31.1.1.1.4'
# Return OID value. Used for unittests
if oidonly is True:
return oid
# Process results
results = self.snmp_object.walk(oid, normalized=True)
for key, value in results.items():
# Process OID
data_dict[int(key)] = value
# Return the interface descriptions
return data_dict
def ifinbroadcastpkts(self, oidonly=False):
"""Return dict of IFMIB ifInBroadcastPkts for each ifIndex for device.
Args:
oidonly: Return OID's value, not results, if True
Returns:
data_dict: Dict of ifInBroadcastPkts. Key = OID's last node.
"""
# Initialize key variables
data_dict = defaultdict(dict)
# Process OID
oid = '.1.3.6.1.2.1.31.1.1.1.3'
# Return OID value. Used for unittests
if oidonly is True:
return oid
# Process results
results = self.snmp_object.walk(oid, normalized=True)
for key, value in results.items():
# Process OID
data_dict[int(key)] = value
# Return the interface descriptions
return data_dict
def ifoutbroadcastpkts(self, oidonly=False):
"""Return dict of IFMIB ifOutBroadcastPkts for each ifIndex for device.
Args:
oidonly: Return OID's value, not results, if True
Returns:
data_dict: Dict of ifOutBroadcastPkts. Key = OID's last node.
"""
# Initialize key variables
data_dict = defaultdict(dict)
# Process OID
oid = '.1.3.6.1.2.1.31.1.1.1.5'
# Return OID value. Used for unittests
if oidonly is True:
return oid
# Process results
results = self.snmp_object.walk(oid, normalized=True)
for key, value in results.items():
# Process OID
data_dict[int(key)] = value
# Return the interface descriptions
return data_dict
def ifstackstatus(self, oidonly=False):
"""Return dict of IFMIB ifStackStatus for each ifIndex for device.
Args:
oidonly: Return OID's value, not results, if True
Returns:
final: Dict of ifStackStatus keyed by the ifIndex of the
ifstacklowerlayer as primary, and ifstackhigherlayer as
secondary.
Summary:
According to the official IF-MIB file. ifStackStatus is a
"table containing information on the relationships
between the multiple sub-layers of network interfaces. In
particular, it contains information on which sub-layers run
'on top of' which other sub-layers, where each sub-layer
corresponds to a conceptual row in the ifTable. For
example, when the sub-layer with ifIndex value x runs over
the sub-layer with ifIndex value y, then this table
contains:
ifStackStatus.x.y=active
For each ifIndex value, I, which identifies an active
interface, there are always at least two instantiated rows
in this table associated with I. For one of these rows, I
is the value of ifStackHigherLayer; for the other, I is the
value of ifStackLowerLayer. (If I is not involved in
multiplexing, then these are the only two rows associated
with I.)
For example, two rows exist even for an interface which has
no others stacked on top or below it:
ifStackStatus.0.x=active
ifStackStatus.x.0=active"
In the case of Juniper equipment, VLAN information is only
visible on subinterfaces of the main interface. For example
interface ge-0/0/0 won't have VLAN information assigned to it
directly.
When a VLAN is assigned to this interface, a subinterface
ge-0/0/0.0 is automatically created with a non-Ethernet ifType.
VLAN related OIDs are only maintained for this new subinterface
only. This makes determining an interface's VLAN based on
Ethernet ifType more difficult. ifStackStatus maps the ifIndex of
the primary interface (ge-0/0/0) to the ifIndex of the secondary
interface (ge-0/0/0.0) which manages higher level protocols and
data structures such as VLANs and LLDP.
The primary interface is referred to as the
ifStackLowerLayer and the secondary subinterface is referred to
as the ifStackHigherLayer.
"""
# Initialize key variables
final = defaultdict(lambda: defaultdict(dict))
# Process OID
oid = '.1.3.6.1.2.1.31.1.2.1.3'
# Return OID value. Used for unittests
if oidonly is True:
return oid
# Process results
results = self.snmp_object.walk(oid, normalized=False)
for key in results.keys():
# Get higher and lower layer index values
nodes = key.split('.')
ifstackhigherlayer = int(nodes[-2])
ifstacklowerlayer = int(nodes[-1])
# Skip some values
if ifstacklowerlayer == 0:
continue
# Make primary key the lower layer interface ifIndex and the
# value a list of higher level interface ifIndexes.
if ifstacklowerlayer in final:
final[ifstacklowerlayer].append(ifstackhigherlayer)
else:
final[ifstacklowerlayer] = [ifstackhigherlayer]
# Return the interface descriptions
return final
def _get_data(title, func, dest):
"""Populate dest with data from the given function.
Args:
title: The name of the data
func: The function which will return the data
dest: a dict which will store the data
Returns:
dest: The modified destination dict
"""
# Get interface data
values = func()
for key, value in values.items():
dest[key][title] = value
return dest
|
[
"collections.defaultdict",
"switchmap.utils.general.octetstr_2_string"
] |
[((4035, 4052), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (4046, 4052), False, 'from collections import defaultdict\n'), ((4889, 4906), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (4900, 4906), False, 'from collections import defaultdict\n'), ((5858, 5875), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (5869, 5875), False, 'from collections import defaultdict\n'), ((6815, 6832), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (6826, 6832), False, 'from collections import defaultdict\n'), ((7741, 7758), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (7752, 7758), False, 'from collections import defaultdict\n'), ((8529, 8546), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (8540, 8546), False, 'from collections import defaultdict\n'), ((9335, 9352), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (9346, 9352), False, 'from collections import defaultdict\n'), ((10138, 10155), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (10149, 10155), False, 'from collections import defaultdict\n'), ((10926, 10943), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (10937, 10943), False, 'from collections import defaultdict\n'), ((11745, 11762), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (11756, 11762), False, 'from collections import defaultdict\n'), ((12566, 12583), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (12577, 12583), False, 'from collections import defaultdict\n'), ((13372, 13389), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (13383, 13389), False, 'from collections import defaultdict\n'), ((14227, 14244), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (14238, 14244), False, 'from collections import defaultdict\n'), ((15042, 15059), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (15053, 15059), False, 'from collections import defaultdict\n'), ((15854, 15871), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (15865, 15871), False, 'from collections import defaultdict\n'), ((16669, 16686), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (16680, 16686), False, 'from collections import defaultdict\n'), ((13760, 13792), 'switchmap.utils.general.octetstr_2_string', 'general.octetstr_2_string', (['value'], {}), '(value)\n', (13785, 13792), False, 'from switchmap.utils import general\n'), ((1632, 1649), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (1643, 1649), False, 'from collections import defaultdict\n'), ((2004, 2021), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (2015, 2021), False, 'from collections import defaultdict\n'), ((19838, 19855), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (19849, 19855), False, 'from collections import defaultdict\n')]
|