code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
from torch.utils.data import Sampler
import numpy as np
def get_chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
def flatten(l):
return [item for sublist in l for item in sublist]
class LengthSortSampler(Sampler):
def __init__(self, data_source, bs):
super().__init__(data_source)
self.data_source = data_source
self.bs = bs
try:
int(self.data_source[0])
lengths = self.data_source
except TypeError:
lengths = [len(x) for x in self.data_source]
inds = np.argsort(lengths)[::-1]
chunks = list(get_chunks(inds, bs))
chunk_inds = list(range(len(chunks) - 1))
np.random.shuffle(chunk_inds)
chunk_inds = list(chunk_inds) + [len(chunk_inds)]
self.inds = flatten([chunks[i] for i in chunk_inds])
def __len__(self):
return len(self.data_source)
def __iter__(self):
return iter(self.inds)
| [
"numpy.argsort",
"numpy.random.shuffle"
] | [((704, 733), 'numpy.random.shuffle', 'np.random.shuffle', (['chunk_inds'], {}), '(chunk_inds)\n', (721, 733), True, 'import numpy as np\n'), ((576, 595), 'numpy.argsort', 'np.argsort', (['lengths'], {}), '(lengths)\n', (586, 595), True, 'import numpy as np\n')] |
import math
import numpy as np
class Strategy:
"""Options strategy class.
Takes in a number of `StrategyLeg`'s (option contracts), and filters that determine
entry and exit conditions.
"""
def __init__(self, schema):
self.schema = schema
self.legs = []
self.conditions = []
self.exit_thresholds = (math.inf, math.inf)
def add_leg(self, leg):
"""Adds leg to the strategy"""
assert self.schema == leg.schema
leg.name = "leg_{}".format(len(self.legs) + 1)
self.legs.append(leg)
return self
def add_legs(self, legs):
"""Adds legs to the strategy"""
for leg in legs:
self.add_leg(leg)
return self
def remove_leg(self, leg_number):
"""Removes leg from the strategy"""
self.legs.pop(leg_number)
return self
def clear_legs(self):
"""Removes *all* legs from the strategy"""
self.legs = []
return self
def add_exit_thresholds(self, profit_pct=math.inf, loss_pct=math.inf):
"""Adds maximum profit/loss thresholds. Both **must** be >= 0.0
Args:
profit_pct (float, optional): Max profit level. Defaults to math.inf
loss_pct (float, optional): Max loss level. Defaults to math.inf
"""
assert profit_pct >= 0
assert loss_pct >= 0
self.exit_thresholds = (profit_pct, loss_pct)
def filter_thresholds(self, entry_cost, current_cost):
"""Returns a `pd.Series` of booleans indicating where profit (loss) levels
exceed the given thresholds.
Args:
entry_cost (pd.Series): Total _entry_ cost of inventory row.
current_cost (pd.Series): Present cost of inventory row.
Returns:
pd.Series: Indicator series with `True` for every row that
exceeds the specified profit/loss thresholds.
"""
profit_pct, loss_pct = self.exit_thresholds
excess_return = (current_cost / entry_cost + 1) * -np.sign(entry_cost)
return (excess_return >= profit_pct) | (excess_return <= -loss_pct)
def __repr__(self):
return "Strategy(legs={}, exit_thresholds={})".format(self.legs, self.exit_thresholds)
| [
"numpy.sign"
] | [((2077, 2096), 'numpy.sign', 'np.sign', (['entry_cost'], {}), '(entry_cost)\n', (2084, 2096), True, 'import numpy as np\n')] |
"""
TODO: the code is take from Apache-2 Licensed NLTK: make sure we do this properly!
Copied over from nltk.tranlate.bleu_score. This code has two major changes:
- allows to turn off length/brevity penalty --- it has no sense for self-bleu,
- allows to use arithmetic instead of geometric mean
"""
import math
import sys
from fractions import Fraction
import warnings
from collections import Counter
from nltk.translate.bleu_score import modified_precision, closest_ref_length, brevity_penalty, SmoothingFunction
def corpus_bleu(
list_of_references,
hypotheses,
weights=(0.25, 0.25, 0.25, 0.25),
smoothing_function=None,
auto_reweigh=False,
averaging_mode="geometric",
no_length_penalty=False
):
"""
Calculate a single corpus-level BLEU score (aka. system-level BLEU) for all
the hypotheses and their respective references.
Instead of averaging the sentence level BLEU scores (i.e. marco-average
precision), the original BLEU metric (Papineni et al. 2002) accounts for
the micro-average precision (i.e. summing the numerators and denominators
for each hypothesis-reference(s) pairs before the division).
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'military', 'always',
... 'obeys', 'the', 'commands', 'of', 'the', 'party']
>>> ref1a = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'military', 'will', 'forever',
... 'heed', 'Party', 'commands']
>>> ref1b = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'military', 'forces', 'always',
... 'being', 'under', 'the', 'command', 'of', 'the', 'Party']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'army', 'always', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'party']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> corpus_bleu(list_of_references, hypotheses) # doctest: +ELLIPSIS
0.5920...
The example below show that corpus_bleu() is different from averaging
sentence_bleu() for hypotheses
>>> score1 = sentence_bleu([ref1a, ref1b, ref1c], hyp1)
>>> score2 = sentence_bleu([ref2a], hyp2)
>>> (score1 + score2) / 2 # doctest: +ELLIPSIS
0.6223...
:param list_of_references: a corpus of lists of reference sentences, w.r.t. hypotheses
:type list_of_references: list(list(list(str)))
:param hypotheses: a list of hypothesis sentences
:type hypotheses: list(list(str))
:param weights: weights for unigrams, bigrams, trigrams and so on
:type weights: list(float)
:param smoothing_function:
:type smoothing_function: SmoothingFunction
:param auto_reweigh: Option to re-normalize the weights uniformly.
:type auto_reweigh: bool
:return: The corpus-level BLEU score.
:rtype: float
"""
# Before proceeding to compute BLEU, perform sanity checks.
p_numerators = Counter() # Key = ngram order, and value = no. of ngram matches.
p_denominators = Counter() # Key = ngram order, and value = no. of ngram in ref.
hyp_lengths, ref_lengths = 0, 0
assert len(list_of_references) == len(hypotheses), (
"The number of hypotheses and their reference(s) should be the " "same "
)
# Iterate through each hypothesis and their corresponding references.
for references, hypothesis in zip(list_of_references, hypotheses):
# For each order of ngram, calculate the numerator and
# denominator for the corpus-level modified precision.
for i, _ in enumerate(weights, start=1):
p_i = modified_precision(references, hypothesis, i)
p_numerators[i] += p_i.numerator
p_denominators[i] += p_i.denominator
# Calculate the hypothesis length and the closest reference length.
# Adds them to the corpus-level hypothesis and reference counts.
hyp_len = len(hypothesis)
hyp_lengths += hyp_len
ref_lengths += closest_ref_length(references, hyp_len)
# Calculate corpus-level brevity penalty.
if no_length_penalty and averaging_mode == 'geometric':
bp = 1.0
elif no_length_penalty and averaging_mode == 'arithmetic':
bp = 0.0
else:
assert not no_length_penalty
assert averaging_mode != 'arithmetic', 'Not sure how to apply length penalty when aurithmetic mode'
bp = brevity_penalty(ref_lengths, hyp_lengths)
# Uniformly re-weighting based on maximum hypothesis lengths if largest
# order of n-grams < 4 and weights is set at default.
if auto_reweigh:
if hyp_lengths < 4 and weights == (0.25, 0.25, 0.25, 0.25):
weights = (1 / hyp_lengths,) * hyp_lengths
# Collects the various precision values for the different ngram orders.
p_n = [
Fraction(p_numerators[i], p_denominators[i], _normalize=False)
for i, _ in enumerate(weights, start=1)
]
# Returns 0 if there's no matching n-grams
# We only need to check for p_numerators[1] == 0, since if there's
# no unigrams, there won't be any higher order ngrams.
if p_numerators[1] == 0:
return 0
# If there's no smoothing, set use method0 from SmoothinFunction class.
if not smoothing_function:
smoothing_function = SmoothingFunction().method0
# Smoothen the modified precision.
# Note: smoothing_function() may convert values into floats;
# it tries to retain the Fraction object as much as the
# smoothing method allows.
p_n = smoothing_function(
p_n, references=references, hypothesis=hypothesis, hyp_len=hyp_lengths
)
if averaging_mode == "geometric":
s = (w_i * math.log(p_i) for w_i, p_i in zip(weights, p_n))
s = bp * math.exp(math.fsum(s))
elif averaging_mode == "arithmetic":
s = (w_i * p_i for w_i, p_i in zip(weights, p_n))
s = math.fsum(s)
return s
def sentence_bleu(
references,
hypothesis,
weights=(0.25, 0.25, 0.25, 0.25),
smoothing_function=None,
auto_reweigh=False,
averaging_mode="geometric",
no_length_penalty=False
):
return corpus_bleu(
[references], [hypothesis], weights, smoothing_function, auto_reweigh, averaging_mode, no_length_penalty
) | [
"nltk.translate.bleu_score.SmoothingFunction",
"fractions.Fraction",
"math.log",
"collections.Counter",
"nltk.translate.bleu_score.brevity_penalty",
"nltk.translate.bleu_score.closest_ref_length",
"math.fsum",
"nltk.translate.bleu_score.modified_precision"
] | [((3419, 3428), 'collections.Counter', 'Counter', ([], {}), '()\n', (3426, 3428), False, 'from collections import Counter\n'), ((3507, 3516), 'collections.Counter', 'Counter', ([], {}), '()\n', (3514, 3516), False, 'from collections import Counter\n'), ((4490, 4529), 'nltk.translate.bleu_score.closest_ref_length', 'closest_ref_length', (['references', 'hyp_len'], {}), '(references, hyp_len)\n', (4508, 4529), False, 'from nltk.translate.bleu_score import modified_precision, closest_ref_length, brevity_penalty, SmoothingFunction\n'), ((5340, 5402), 'fractions.Fraction', 'Fraction', (['p_numerators[i]', 'p_denominators[i]'], {'_normalize': '(False)'}), '(p_numerators[i], p_denominators[i], _normalize=False)\n', (5348, 5402), False, 'from fractions import Fraction\n'), ((4104, 4149), 'nltk.translate.bleu_score.modified_precision', 'modified_precision', (['references', 'hypothesis', 'i'], {}), '(references, hypothesis, i)\n', (4122, 4149), False, 'from nltk.translate.bleu_score import modified_precision, closest_ref_length, brevity_penalty, SmoothingFunction\n'), ((4912, 4953), 'nltk.translate.bleu_score.brevity_penalty', 'brevity_penalty', (['ref_lengths', 'hyp_lengths'], {}), '(ref_lengths, hyp_lengths)\n', (4927, 4953), False, 'from nltk.translate.bleu_score import modified_precision, closest_ref_length, brevity_penalty, SmoothingFunction\n'), ((5830, 5849), 'nltk.translate.bleu_score.SmoothingFunction', 'SmoothingFunction', ([], {}), '()\n', (5847, 5849), False, 'from nltk.translate.bleu_score import modified_precision, closest_ref_length, brevity_penalty, SmoothingFunction\n'), ((6452, 6464), 'math.fsum', 'math.fsum', (['s'], {}), '(s)\n', (6461, 6464), False, 'import math\n'), ((6248, 6261), 'math.log', 'math.log', (['p_i'], {}), '(p_i)\n', (6256, 6261), False, 'import math\n'), ((6324, 6336), 'math.fsum', 'math.fsum', (['s'], {}), '(s)\n', (6333, 6336), False, 'import math\n')] |
from models.todo_ajax import TodoAjax
from web_framework import (
current_user,
html_response,
json_response,
)
from utils import log
def index(request):
u = current_user(request)
return html_response('todo_ajax_index.html')
def all(request):
todos = TodoAjax.all()
todos = [t.__dict__ for t in todos]
return json_response(todos)
def add(request):
u = current_user(request)
form = request.json()
log('ajax todo add', form, u)
t = TodoAjax.add(form, u.id)
message = dict(message='{} added succeed'.format(t.title))
return json_response(message)
def route_dict():
d = {
'/todo/ajax/add': add,
'/todo/ajax/index': index,
'/todo/ajax/all': all,
}
return d | [
"utils.log",
"web_framework.current_user",
"models.todo_ajax.TodoAjax.all",
"web_framework.json_response",
"web_framework.html_response",
"models.todo_ajax.TodoAjax.add"
] | [((177, 198), 'web_framework.current_user', 'current_user', (['request'], {}), '(request)\n', (189, 198), False, 'from web_framework import current_user, html_response, json_response\n'), ((210, 247), 'web_framework.html_response', 'html_response', (['"""todo_ajax_index.html"""'], {}), "('todo_ajax_index.html')\n", (223, 247), False, 'from web_framework import current_user, html_response, json_response\n'), ((280, 294), 'models.todo_ajax.TodoAjax.all', 'TodoAjax.all', ([], {}), '()\n', (292, 294), False, 'from models.todo_ajax import TodoAjax\n'), ((346, 366), 'web_framework.json_response', 'json_response', (['todos'], {}), '(todos)\n', (359, 366), False, 'from web_framework import current_user, html_response, json_response\n'), ((395, 416), 'web_framework.current_user', 'current_user', (['request'], {}), '(request)\n', (407, 416), False, 'from web_framework import current_user, html_response, json_response\n'), ((447, 476), 'utils.log', 'log', (['"""ajax todo add"""', 'form', 'u'], {}), "('ajax todo add', form, u)\n", (450, 476), False, 'from utils import log\n'), ((485, 509), 'models.todo_ajax.TodoAjax.add', 'TodoAjax.add', (['form', 'u.id'], {}), '(form, u.id)\n', (497, 509), False, 'from models.todo_ajax import TodoAjax\n'), ((584, 606), 'web_framework.json_response', 'json_response', (['message'], {}), '(message)\n', (597, 606), False, 'from web_framework import current_user, html_response, json_response\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import subprocess
def crunchyroll_name(anime_name, episode_number, resolution):
anime_name = str(anime_name).replace("039T", "'")
# rawName = str(animeName).title().strip().replace("Season ", "S") + " - " + \
# str(episode_number).strip() + " [" + str(resolution) + "]"
file_name = str(re.sub(r'[^A-Za-z0-9\ \-\' \\]+', '', str(anime_name))).title().strip().replace("Season ", "S") \
+ " - " + str(episode_number.zfill(2)).strip() + " [" + str(resolution) + "].mp4"
try:
max_path = int(subprocess.check_output(['getconf', 'PATH_MAX', '/']))
except Exception:
max_path = 4096
if len(file_name) > max_path:
file_name = file_name[:max_path]
return file_name
| [
"subprocess.check_output"
] | [((617, 670), 'subprocess.check_output', 'subprocess.check_output', (["['getconf', 'PATH_MAX', '/']"], {}), "(['getconf', 'PATH_MAX', '/'])\n", (640, 670), False, 'import subprocess\n')] |
#!/usr/bin/env python3
#-*- coding=utf-8 -*-
## export phylogenetic data from UraLex basic vocabulary dataset
import sys
def checkPythonVersion():
if (sys.version_info[0] < 3):
print("Python 3 is needed to run this program.")
sys.exit(1)
checkPythonVersion()
import os
import io
import argparse
import urllib.request
import reader
import versions
import exporter
#implied constants
PARSER_DESC = "Export phylogenetic formats from BEDLAN spreadsheet data."
DEFAULT_NEXUS_DIALECT = "beast"
DEFAULT_CHARSETS = True
DEFAULT_MEANING_LIST = "all"
parser = argparse.ArgumentParser(description=PARSER_DESC)
parser.add_argument("-o","--output",
dest="outfile",
help="output to file OUTFILE. If not set, will output to STDOUT",
metavar="OUTFILE")
parser.add_argument("-x","--exclude-taxa",
dest="exclude_taxa",
help="comma-separated list of taxa to exclude",
default="",
type=str)
parser.add_argument("-l","--meaning-list",
dest="meaning_list",
help="meaning list to use. Defaults to \"" + DEFAULT_MEANING_LIST + "\"",
default=DEFAULT_MEANING_LIST,
type=str)
parser.add_argument("-f","--format",
dest="format",
help="Export format. Currently only one valid option: nexus.",
default="nexus",
type=str)
parser.add_argument("-d","--dialect",
dest="dialect",
help="NEXUS dialect: mrbayes, beast, splitstree. Defaults to \"" + DEFAULT_NEXUS_DIALECT + "\"",
default=DEFAULT_NEXUS_DIALECT)
parser.add_argument("-1","--no-charsets",
dest="charsets",
help="Export without separate characters sets for each meaning",
default=DEFAULT_CHARSETS,
action='store_false')
parser.add_argument("-c","--correlate",
dest="correlate",
action='store_true',
help="Export correlate characters instead of cognate (root-meaning form) characters.")
parser.add_argument("-r","--raw_folder",
dest="raw_folder",
action='store_true',
help="Look for data in an uncompressed 'raw' folder rather than a released zip file.")
if __name__ == '__main__':
if len(sys.argv) == 1:
parser.print_help()
sys.exit()
args = parser.parse_args()
excluded_languages = []
if args.exclude_taxa != "":
excluded_languages = args.exclude_taxa.split(",")
dialect = args.dialect
if (args.raw_folder == True):
dataset = reader.UraLexReader("raw", args.correlate)
else:
dataset = reader.UraLexReader(versions.getLatestVersion(), args.correlate)
exporter = exporter.UralexExporter(dataset)
exporter.setMeaningList(args.meaning_list)
exporter.setLanguageExcludeList(excluded_languages)
exporter.setFormat(args.format, args.dialect)
exporter.setCharsets(args.charsets)
#print("Export")
outlines = exporter.export()
if args.outfile != None:
if os.path.isfile(args.outfile):
while True:
prompt = input("File " + args.outfile + " already exists. Overwrite? (y/n)")
if (prompt == "y" or prompt == "n"):
break
if (prompt == "n"):
print("File not written.")
sys.exit()
f = open(args.outfile,"w")
for line in outlines:
f.write(line + "\n")
f.close()
else:
for line in outlines:
print(line)
| [
"exporter.setFormat",
"exporter.setMeaningList",
"exporter.export",
"exporter.setCharsets",
"argparse.ArgumentParser",
"reader.UraLexReader",
"os.path.isfile",
"exporter.setLanguageExcludeList",
"versions.getLatestVersion",
"sys.exit",
"exporter.UralexExporter"
] | [((600, 648), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'PARSER_DESC'}), '(description=PARSER_DESC)\n', (623, 648), False, 'import argparse\n'), ((2999, 3031), 'exporter.UralexExporter', 'exporter.UralexExporter', (['dataset'], {}), '(dataset)\n', (3022, 3031), False, 'import exporter\n'), ((3036, 3078), 'exporter.setMeaningList', 'exporter.setMeaningList', (['args.meaning_list'], {}), '(args.meaning_list)\n', (3059, 3078), False, 'import exporter\n'), ((3083, 3134), 'exporter.setLanguageExcludeList', 'exporter.setLanguageExcludeList', (['excluded_languages'], {}), '(excluded_languages)\n', (3114, 3134), False, 'import exporter\n'), ((3139, 3184), 'exporter.setFormat', 'exporter.setFormat', (['args.format', 'args.dialect'], {}), '(args.format, args.dialect)\n', (3157, 3184), False, 'import exporter\n'), ((3189, 3224), 'exporter.setCharsets', 'exporter.setCharsets', (['args.charsets'], {}), '(args.charsets)\n', (3209, 3224), False, 'import exporter\n'), ((3263, 3280), 'exporter.export', 'exporter.export', ([], {}), '()\n', (3278, 3280), False, 'import exporter\n'), ((248, 259), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (256, 259), False, 'import sys\n'), ((2606, 2616), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2614, 2616), False, 'import sys\n'), ((2847, 2889), 'reader.UraLexReader', 'reader.UraLexReader', (['"""raw"""', 'args.correlate'], {}), "('raw', args.correlate)\n", (2866, 2889), False, 'import reader\n'), ((3322, 3350), 'os.path.isfile', 'os.path.isfile', (['args.outfile'], {}), '(args.outfile)\n', (3336, 3350), False, 'import os\n'), ((2938, 2965), 'versions.getLatestVersion', 'versions.getLatestVersion', ([], {}), '()\n', (2963, 2965), False, 'import versions\n'), ((3639, 3649), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3647, 3649), False, 'import sys\n')] |
import clr
from PythonNetTest import Multiplier
def multiplyThese (a, b):
m = Multiplier()
return m.Multiply(a, b)
# print "3 * 5: " + str(multiplyThese(3.0, 5.0))
| [
"PythonNetTest.Multiplier"
] | [((81, 93), 'PythonNetTest.Multiplier', 'Multiplier', ([], {}), '()\n', (91, 93), False, 'from PythonNetTest import Multiplier\n')] |
import os
EXAMPLE_SVG_PATH = os.path.join(os.path.dirname(__file__), 'example.svg')
IDS_IN_EXAMPLE_SVG = {'red', 'yellow', 'blue', 'green'}
IDS_IN_EXAMPLE2_SVG = {'punainen', 'keltainen', 'sininen', 'vihrea'}
with open(EXAMPLE_SVG_PATH, 'rb') as infp:
EXAMPLE_SVG_DATA = infp.read()
EXAMPLE2_SVG_DATA = (
EXAMPLE_SVG_DATA
.replace(b'"red"', b'"punainen"')
.replace(b'"green"', b'"vihrea"')
.replace(b'"blue"', b'"sininen"')
.replace(b'"yellow"', b'"keltainen"')
)
| [
"os.path.dirname"
] | [((43, 68), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (58, 68), False, 'import os\n')] |
import deck_of_cards
from deck_of_cards import CountingSystemHiLo, DeckOfCards
import random
from time import sleep
from dataclasses import dataclass
import rich
from rich.console import Console
from rich.panel import Panel
from rich.markdown import Markdown
from rich.text import Text
from rich.layout import Layout
from rich.live import Live
from rich.padding import Padding
n = 25
console = Console(color_system="truecolor")
layout = Layout()
layout.split_column(
Layout(name="status"),
Layout(name="cards"),
)
layout["status"].size = 3
# layout["status"].ratio = 2
layout["status"].visible = False
# layout["cards"].size = None
# layout["cards"].ratio = 6
@dataclass
class Save():
count: int
answer: int
def output(self):
result = Text(justify="center")
if self.count == None or self.answer == None: return
layout["status"].visible = True
if self.answer == self.count:
result.append("Correct", style='bold green')
else:
result.append(f"Incorrect. The correct count was {self.count}.", style='bold red')
layout["status"].update(
Panel(result)
)
last_answer = Save(None, None)
def render_cards(cards):
result = Text(justify="center")
for card in cards:
color_fg = ''
if card.suit == deck_of_cards.Suit.SPADES: color_fg = "black"
if card.suit == deck_of_cards.Suit.CLUBS: color_fg = "black"
if card.suit == deck_of_cards.Suit.HEARTS: color_fg = "red"
if card.suit == deck_of_cards.Suit.DIAMONDS: color_fg = "red"
result.append(card.value.name + " of " + card.suit.name + "\n", style='bold ' + color_fg)
return result
while True:
# with console.screen():
with Live(layout, screen=True):
last_answer.output()
deck = DeckOfCards()
deck.shuffle()
cards = random.sample(deck.cards, n)
count = CountingSystemHiLo.count_cards(cards)
layout["cards"].update(
Panel(render_cards(cards))
)
# console.print("What is the count?", justify="left", style='bold')
answer = int(input(">>> "))
last_answer = Save(count, answer)
| [
"random.sample",
"deck_of_cards.DeckOfCards",
"rich.live.Live",
"rich.panel.Panel",
"rich.layout.Layout",
"rich.text.Text",
"rich.console.Console",
"deck_of_cards.CountingSystemHiLo.count_cards"
] | [((396, 429), 'rich.console.Console', 'Console', ([], {'color_system': '"""truecolor"""'}), "(color_system='truecolor')\n", (403, 429), False, 'from rich.console import Console\n'), ((439, 447), 'rich.layout.Layout', 'Layout', ([], {}), '()\n', (445, 447), False, 'from rich.layout import Layout\n'), ((474, 495), 'rich.layout.Layout', 'Layout', ([], {'name': '"""status"""'}), "(name='status')\n", (480, 495), False, 'from rich.layout import Layout\n'), ((501, 521), 'rich.layout.Layout', 'Layout', ([], {'name': '"""cards"""'}), "(name='cards')\n", (507, 521), False, 'from rich.layout import Layout\n'), ((1235, 1257), 'rich.text.Text', 'Text', ([], {'justify': '"""center"""'}), "(justify='center')\n", (1239, 1257), False, 'from rich.text import Text\n'), ((767, 789), 'rich.text.Text', 'Text', ([], {'justify': '"""center"""'}), "(justify='center')\n", (771, 789), False, 'from rich.text import Text\n'), ((1747, 1772), 'rich.live.Live', 'Live', (['layout'], {'screen': '(True)'}), '(layout, screen=True)\n', (1751, 1772), False, 'from rich.live import Live\n'), ((1818, 1831), 'deck_of_cards.DeckOfCards', 'DeckOfCards', ([], {}), '()\n', (1829, 1831), False, 'from deck_of_cards import CountingSystemHiLo, DeckOfCards\n'), ((1871, 1899), 'random.sample', 'random.sample', (['deck.cards', 'n'], {}), '(deck.cards, n)\n', (1884, 1899), False, 'import random\n'), ((1916, 1953), 'deck_of_cards.CountingSystemHiLo.count_cards', 'CountingSystemHiLo.count_cards', (['cards'], {}), '(cards)\n', (1946, 1953), False, 'from deck_of_cards import CountingSystemHiLo, DeckOfCards\n'), ((1140, 1153), 'rich.panel.Panel', 'Panel', (['result'], {}), '(result)\n', (1145, 1153), False, 'from rich.panel import Panel\n')] |
from django.conf import settings
import requests
from argos.libs.discovery import Discovery
__author__ = 'mphilpot'
class OntologyClient(object):
def __init__(self, token, url=None):
if url is None:
discovery = Discovery()
self.url = discovery.get_url("ontology")
else:
self.url = url
self.token = token
self.cert = getattr(settings, 'REQUESTS_CLIENT_CERT', None)
self.verify = getattr(settings, 'REQUESTS_CLIENT_VERIFY', None)
def get_ontology_description(self, domain, uuids=None):
headers = {
'accept': 'application/json',
'authorization': self.token,
}
params = []
if uuids:
for uuid in uuids:
params.append(('uuid', uuid))
r = requests.get('%s/ontologies/%s' % (self.url, domain), headers=headers, params=params, cert=self.cert, verify=self.verify)
r.raise_for_status()
return r.json()
def get_labels(self, domain, uuids=None):
headers = {
'accept': 'application/json',
'authorization': self.token,
}
params = []
if uuids:
for uuid in uuids:
params.append(('uuid', uuid))
r = requests.get('%s/ontologies/%s/labels' % (self.url, domain), headers=headers, params=params, cert=self.cert, verify=self.verify)
r.raise_for_status()
return r.json()
class OntologyAdminClient(object):
def __init__(self, token, url=None):
if url is None:
discovery = Discovery()
self.url = discovery.get_url("ontology")
else:
self.url = url
self.token = token
self.cert = getattr(settings, 'REQUESTS_CLIENT_CERT', None)
self.verify = getattr(settings, 'REQUESTS_CLIENT_VERIFY', None)
def get_ontology_meta(self, domain):
headers = {
'accept': 'application/json',
'authorization': self.token,
}
r = requests.get('%s/admin/ontologies/meta/%s' % (self.url, domain), headers=headers, cert=self.cert, verify=self.verify)
r.raise_for_status()
return r.json()
def upload_ontology_rdf(self, domain, rdf):
headers = {
'accept': 'application/json',
'content-type': 'text/xml',
'authorization': self.token,
}
r = requests.post('%s/admin/ontology/%s' % (self.url, domain), data=rdf, headers=headers, cert=self.cert, verify=self.verify)
r.raise_for_status()
return r.json() | [
"requests.post",
"argos.libs.discovery.Discovery",
"requests.get"
] | [((818, 943), 'requests.get', 'requests.get', (["('%s/ontologies/%s' % (self.url, domain))"], {'headers': 'headers', 'params': 'params', 'cert': 'self.cert', 'verify': 'self.verify'}), "('%s/ontologies/%s' % (self.url, domain), headers=headers,\n params=params, cert=self.cert, verify=self.verify)\n", (830, 943), False, 'import requests\n'), ((1285, 1418), 'requests.get', 'requests.get', (["('%s/ontologies/%s/labels' % (self.url, domain))"], {'headers': 'headers', 'params': 'params', 'cert': 'self.cert', 'verify': 'self.verify'}), "('%s/ontologies/%s/labels' % (self.url, domain), headers=\n headers, params=params, cert=self.cert, verify=self.verify)\n", (1297, 1418), False, 'import requests\n'), ((2039, 2161), 'requests.get', 'requests.get', (["('%s/admin/ontologies/meta/%s' % (self.url, domain))"], {'headers': 'headers', 'cert': 'self.cert', 'verify': 'self.verify'}), "('%s/admin/ontologies/meta/%s' % (self.url, domain), headers=\n headers, cert=self.cert, verify=self.verify)\n", (2051, 2161), False, 'import requests\n'), ((2427, 2552), 'requests.post', 'requests.post', (["('%s/admin/ontology/%s' % (self.url, domain))"], {'data': 'rdf', 'headers': 'headers', 'cert': 'self.cert', 'verify': 'self.verify'}), "('%s/admin/ontology/%s' % (self.url, domain), data=rdf,\n headers=headers, cert=self.cert, verify=self.verify)\n", (2440, 2552), False, 'import requests\n'), ((239, 250), 'argos.libs.discovery.Discovery', 'Discovery', ([], {}), '()\n', (248, 250), False, 'from argos.libs.discovery import Discovery\n'), ((1596, 1607), 'argos.libs.discovery.Discovery', 'Discovery', ([], {}), '()\n', (1605, 1607), False, 'from argos.libs.discovery import Discovery\n')] |
#Imports
import pandas as pd
import pickle
from sklearn import datasets
def dataset():
iris_data = datasets.load_iris() # Loads the Iris Dataset
X = iris_data.data
y = iris_data.target
# Creating Dataframes
df_data = pd.DataFrame(X, columns=['sepal_length', 'sepal_width','petal_length','petal_width'])
df_target = pd.DataFrame(y, columns=['species'])
# Final DataFrame
df = pd.concat([df_data,df_target],axis=1)
return df
def save_pickle(object_,path,pickle_file_name):
# This function Saves DataFrame in Pickled Format
with open(path + pickle_file_name,'wb') as f:
pickle.dump(object_,f)
if __name__ == '__main__':
path = 'Dataset/'
pickle_file_name = 'data.pkl'
df = dataset()
save_pickle(df, path, pickle_file_name)
| [
"sklearn.datasets.load_iris",
"pickle.dump",
"pandas.concat",
"pandas.DataFrame"
] | [((105, 125), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (123, 125), False, 'from sklearn import datasets\n'), ((249, 340), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {'columns': "['sepal_length', 'sepal_width', 'petal_length', 'petal_width']"}), "(X, columns=['sepal_length', 'sepal_width', 'petal_length',\n 'petal_width'])\n", (261, 340), True, 'import pandas as pd\n'), ((351, 387), 'pandas.DataFrame', 'pd.DataFrame', (['y'], {'columns': "['species']"}), "(y, columns=['species'])\n", (363, 387), True, 'import pandas as pd\n'), ((425, 464), 'pandas.concat', 'pd.concat', (['[df_data, df_target]'], {'axis': '(1)'}), '([df_data, df_target], axis=1)\n', (434, 464), True, 'import pandas as pd\n'), ((638, 661), 'pickle.dump', 'pickle.dump', (['object_', 'f'], {}), '(object_, f)\n', (649, 661), False, 'import pickle\n')] |
from Configurables import DaVinci
from GaudiConf import IOHelper
DaVinci().InputType = 'DST'
DaVinci().TupleFile = 'DVntuple.root'
DaVinci().PrintFreq = 1000
DaVinci().DataType = '2012'
DaVinci().Simulation = True
# Only ask for luminosity information when not using simulated data
DaVinci().Lumi = not DaVinci().Simulation
DaVinci().EvtMax = 1000
# Use the local input data
IOHelper().inputFiles([('root://eoslhcb.cern.ch/'
'/eos/lhcb/grid/prod/lhcb/'
'MC/2012/ALLSTREAMS.DST/00035742/0000/'
'00035742_00000001_1.allstreams.dst')],
clear=True)
| [
"Configurables.DaVinci",
"GaudiConf.IOHelper"
] | [((66, 75), 'Configurables.DaVinci', 'DaVinci', ([], {}), '()\n', (73, 75), False, 'from Configurables import DaVinci\n'), ((94, 103), 'Configurables.DaVinci', 'DaVinci', ([], {}), '()\n', (101, 103), False, 'from Configurables import DaVinci\n'), ((132, 141), 'Configurables.DaVinci', 'DaVinci', ([], {}), '()\n', (139, 141), False, 'from Configurables import DaVinci\n'), ((159, 168), 'Configurables.DaVinci', 'DaVinci', ([], {}), '()\n', (166, 168), False, 'from Configurables import DaVinci\n'), ((187, 196), 'Configurables.DaVinci', 'DaVinci', ([], {}), '()\n', (194, 196), False, 'from Configurables import DaVinci\n'), ((283, 292), 'Configurables.DaVinci', 'DaVinci', ([], {}), '()\n', (290, 292), False, 'from Configurables import DaVinci\n'), ((325, 334), 'Configurables.DaVinci', 'DaVinci', ([], {}), '()\n', (332, 334), False, 'from Configurables import DaVinci\n'), ((304, 313), 'Configurables.DaVinci', 'DaVinci', ([], {}), '()\n', (311, 313), False, 'from Configurables import DaVinci\n'), ((377, 387), 'GaudiConf.IOHelper', 'IOHelper', ([], {}), '()\n', (385, 387), False, 'from GaudiConf import IOHelper\n')] |
import os, math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#from matplotlib.collections import PatchCollection
from sklearn import linear_model
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from importlib import reload
# Constants
#files = ['time_series_19-covid-Confirmed', 'time_series_19-covid-Deaths', 'time_series_19-covid-Recovered']
#labels = ['Confirmed', 'Deaths', 'Recovered']# until 23 March 2020
# Since 24 March 2020
#files = ['time_series_covid19_confirmed_global', 'time_series_covid19_deaths_global']
#labels = ['confirmed', 'deaths']
# Since 28 March 2020
files = ['time_series_covid19_confirmed_global', 'time_series_covid19_deaths_global', 'time_series_covid19_recovered_global']
labels = ['confirmed', 'deaths', 'recovered']
def open_csvs():
'''
Finding and opening your most recent data download if timestamp == None.
Alternatively, specify a substring of requested timestamp to select which files to open.
'''
timestamp = None
#timestamp = '20200330_15-26'
df=dict()
lists = list([list(), list(), list()])
with os.scandir() as it:
for entry in it:
for i in range(3):
if (timestamp==None or timestamp in entry.name) and files[i] in entry.name\
and entry.is_file():
lists[i].append(entry.name)
for i in range(3):
lists[i].sort()
df[labels[i]] = pd.read_csv(lists[i][-1])
return df
def data_preparation(df, country, output):
'''
This is used for the JHU CSSE dataset.
output can be 'confirmed', 'deaths', 'recovered', 'active' or 'all'
'active' returns dft['confirmed']-dft['deaths']-dft['recovered']
'all' returns all three as columns in a DataFrame as used in death_over_cases.py
'''
sets = dict({'EU': ['Austria', 'Belgium', 'Bulgaria', 'Croatia', 'Cyprus', 'Czechia', 'Denmark', 'Estonia', 'Finland', 'France', 'Germany', 'Greece', 'Hungary', 'Ireland', 'Italy', 'Latvia', 'Lithuania', 'Luxembourg', 'Malta', 'Netherlands', 'Poland', 'Portugal', 'Romania', 'Slovakia', 'Slovenia', 'Spain', 'Sweden']})#,
#'China': [['Anhui', 'China'], ['Beijing', 'China'], ['Chongqing', 'China'], ['Fujian', 'China'], ['Gansu', 'China'], ['Guangdong', 'China'], ['Guangxi', 'China'], ['Guizhou', 'China'], ['Hainan', 'China'], ['Hebei', 'China'], ['Heilongjiang', 'China'], ['Henan', 'China'], ['Hong Kong', 'China'], ['Hubei', 'China'], ['Hunan', 'China'], ['Inner Mongolia', 'China'], ['Jiangsu', 'China'], ['Jiangxi', 'China'], ['Jilin', 'China'], ['Liaoning', 'China'], ['Macau', 'China'], ['Ningxia', 'China'], ['Qinghai', 'China'], ['Shaanxi', 'China'], ['Shandong', 'China'], ['Shanghai', 'China'], ['Shanxi', 'China'], ['Sichuan', 'China'], ['Tianjin', 'China'], ['Tibet', 'China'], ['Xinjiang', 'China'], ['Yunnan', 'China'], ['Zhejiang', 'China']]})
#sets = dict({'EU': ['Croatia', 'Hungary']}) # test only
l = list()
if country == 'EU' or country == 'China' or country == 'Australia':
''' First, recursive implementation
l_members = list()
for member in sets[country]:
l_members.append(data_preparation(df, member, only_cases))
dft_members = pd.concat(l_members, axis=1)
return dft_members.sum(axis=1)
'''
M = dict() # these matrices are the booleans of selections for each Province/State, we take their multiple
for i in range(3):
k = labels[i]
M[k] = list()
if country == 'China' or country == 'Australia':
M[k].append((df[k]['Province/State'].notna()) & (df[k]['Country/Region']==country))
l.append(df[k][M[k][0]].iloc[:,4:].sum(axis=0))
else: # country == 'EU'
for member in sets[country]:
#print(member)
if isinstance(member, str):
M[k].append((df[k]['Province/State'].isna()) & (df[k]['Country/Region']==member))
elif len(member)==2: # if it's a pair of [Province/State, Country/Region]
M[k].append((df[k]['Province/State']==member[0])
& (df[k]['Country/Region']==member[1]))
l.append(df[k][np.sum(np.array(M[k]), axis=0)>=1].iloc[:,4:].sum(axis=0))
dft = pd.concat(l, ignore_index=True, axis=1)
#dft.rename(columns={i: labels[i] for i in range(3)}, inplace=True)
else:
for i in range(3):
k = labels[i]
if isinstance(country, str):
l.append(df[k][np.logical_and(df[k]['Province/State'].isna(),
df[k]['Country/Region']==country)].iloc[:,4:])
elif len(country)==2: # if it's a pair of [Province/State, Country/Region]
l.append(df[k][np.logical_and(df[k]['Province/State']==country[0],
df[k]['Country/Region']==country[1])].iloc[:,4:])
dft = pd.concat(l, ignore_index=True, axis=0).transpose()
#print(dft)
dft.rename(columns={i: labels[i] for i in range(3)}, inplace=True)
#print(dft)
if output=='all':
df_ts = dft
elif output=='active':
print('Number of recovered in the past eight days:')
print(dft['recovered'][-8:])
df_ts = dft['confirmed']-dft['deaths']-dft['recovered'] # On 24 March 2020, recovered is not available; on 28 March 2020 it is there again.
else:
df_ts = dft[output]
#print(df_ts)
#df_ts.rename(index={df_ts.index[i]: pd.to_datetime(df_ts.index)[i] for i in range(len(df_ts.index))}, inplace=True)
df_ts.rename(index=pd.Series(df_ts.index, index=df_ts.index).apply(lambda x: pd.to_datetime(x)), inplace=True)
#print(df_ts)
return df_ts
def rm_early_zeros(ts):
'''
Removes early zeros and NaNs from a pandas time series. It finds last (most recent) zero or NaN in
time series and omits all elements before and including this last zero or NaN. Returns the remaining
time series which is free of zeros and NaN.
pd.Series([0,0,0,0,1,2,0,0,3,6]) -> pd.Series([3,6])
'''
zeroindices = ts[(ts==0) | ts.isna()].index
if len(zeroindices)==0:
return ts
else:
successor = np.nonzero((ts.index==zeroindices.max()))[0][0] + 1
return ts[successor:]
def rm_consecutive_early_zeros(ts, keep=1):
'''
Removes first consecutive subsequence of early zeros from a pandas time series
except for the last keep if there are that many.
rm_consecutive_early_zeros(pd.Series([0,0,0,0,1,2,3,6]), 2) -> pd.Series([0,0,1,2,3,6])
'''
zeroindices = ts[ts==0].index
if len(zeroindices)==0:
return ts
else:
first_pos_index = np.nonzero((ts.index==ts[ts>0].index[0]))[0][0]
if first_pos_index <= keep:
return ts
else:
return ts[first_pos_index-keep:]
def separated(s, lang='en', k=3):
'''
Input must be a string. Puts a comma between blocks of k=3 digits:
'1000000' -> '1,000,000'
'''
if lang == 'de':
chr = '.'
else:
chr = ','
if len(s)>=5:
l=list()
for i in range(len(s)//k):
l.insert(0, s[len(s)-(i+1)*k:len(s)-i*k])
if len(s) % k !=0:
l.insert(0, s[:len(s)-(i+1)*k])
return chr.join(l)
else:
return s
def x2str(x, width):
'''
Rounds a number to tenths. If width is greater than its length, then it pads it with space.
If width<0, then it does no padding.
'''
#if x<0.1 and x>-0.1 and width>=6:
# s = '{:.3f}'.format(x) #str(round(x*1000)/1000)
if x<1 and x>-1 and width>=5:
s = '{:.2f}'.format(x) #str(round(x*100)/100)
elif x<10 and x>-10 and width>=4:
s = '{:.1f}'.format(x) #str(round(x*10)/10)
else:
s = '{:.0f}'.format(x) #str(int(round(x)))
if width > len(s):
return s.rjust(width)
else:
return s
def n2str(n, width):
'''
Takes integers. If width is greater than its length, then it pads it with space.
If width<0, then it does no padding.
'''
s = str(n)
if width > len(s):
return s.rjust(width)
else:
return s
def interpolate(df_ts, window_length):
'''
This returns (or interpolates, if not found) from the cumulatives' time series the entry at last entry minus
(window_length-1) days.
'''
# date of interest:
doi = df_ts.index[-1]-pd.Timedelta(f'{window_length-1} days')
if doi in df_ts.index:
return df_ts.loc[doi]
else:
prv = df_ts[df_ts.index<doi]
nxt = df_ts[df_ts.index>doi]
if len(prv)>0 and len(nxt)>0:
i_prv = prv.index[-1]
i_nxt = nxt.index[0]
c_prv = (i_nxt-doi).days/(i_nxt-i_prv).days
c_nxt = (doi-i_prv).days/(i_nxt-i_prv).days
return c_prv*df_ts.loc[i_prv] + c_nxt*df_ts.loc[i_nxt]
elif len(nxt)>0:
return nxt.iloc[0]
elif len(prv)>0: # It can never come this far, df_ts.iloc[-1] exists so nxt is not empty.
return prv.iloc[-1]
'''
def truncate_before(df_ts, window_length):
#This returns (or interpolates, if not found) from the time series the entry at last entry minus
# (window_length-1) days.
# date of interest:
doi = df_ts.index[-1]-pd.Timedelta(f'{window_length-1} days')
if doi in df_ts.index:
return df_ts.loc[doi:]
else:
prv = df_ts[df_ts.index<doi]
nxt = df_ts[df_ts.index>doi]
if len(prv)>0 and len(nxt)>0:
i_prv = prv.index[-1]
i_nxt = nxt.index[0]
c_prv = (i_nxt-doi).days/(i_nxt-i_prv).days
c_nxt = (doi-i_prv).days/(i_nxt-i_prv).days
df_ts.loc[doi] = c_prv*df_ts.loc[i_prv] + c_nxt*df_ts.loc[i_nxt]
df_ts = df_ts.sort_index(inplace=False)
return df_ts.loc[doi:]
elif len(nxt)>0:
df_ts.loc[doi] = nxt.iloc[0]
df_ts = df_ts.sort_index(inplace=False)
return df_ts.loc[doi:]
elif len(prv)>0: # It can never come this far, df_ts.iloc[-1] exists so nxt is not empty.
df_ts.loc[doi] = prv.iloc[-1]
df_ts = df_ts.sort_index(inplace=False)
return df_ts.loc[doi:]
'''
def truncate_before(df_ts, window_length, fill_all_missing):
'''
This returns (or interpolates, if not found) from the cumulatives' time series the entries from (last entry minus
(window_length-1) days) until the last entry.
When some days are missing from the cumulative time series df_ts, then I could assign them zero increments and
assign all increments to the first day after the gap. Instead, I spread out the growth uniformly across the
missing days. The first solution (0, 0, all increment) would give the fitting a tendency to see quickly
growing cumulatives.
'''
df_ts_new = df_ts.copy()
r = range(window_length-1, 0, -1) if fill_all_missing else [window_length-1]
for i in r:
# date of interest:
#doi = df_ts.index[-1]-pd.Timedelta(f'{window_length-1} days')
doi = df_ts.index[-1]-pd.Timedelta(f'{i} days')
if doi not in df_ts.index:
prv = df_ts[df_ts.index<doi]
nxt = df_ts[df_ts.index>doi]
if len(prv)>0 and len(nxt)>0:
i_prv = prv.index[-1]
i_nxt = nxt.index[0]
c_prv = (i_nxt-doi).days/(i_nxt-i_prv).days
c_nxt = (doi-i_prv).days/(i_nxt-i_prv).days
df_ts_new.loc[doi] = c_prv*df_ts.loc[i_prv] + c_nxt*df_ts.loc[i_nxt]
elif len(nxt)>0:
df_ts_new.loc[doi] = nxt.iloc[0]
elif len(prv)>0: # It can never come this far, df_ts.iloc[-1] exists so nxt is not empty.
df_ts_new.loc[doi] = prv.iloc[-1]
df_ts_new = df_ts_new.sort_index(inplace=False)
return df_ts_new.loc[df_ts.index[-1]-pd.Timedelta(f'{window_length-1} days'):]
def analysis(df_ts, window_length, exp_or_lin, extent='full'):
'''
df_ts: pd.Series, it is a time series, can be totals or no. per e.g. 100,000 ppl
window_length: int
exp_or_lin in ['exp', 'lin']
For 'exp', because of log2, this requires all entries in df_ts to be positive.
For 'lin', because of log2, this requires last entry in df_ts to be positive.
extent in ['full', 'minimal']
'minimal' doesn't compute predictions.
output: results = [
daily increment in natural units (units of df_ts): float,
daily growth rate in percentage: float,
doubling time in days: float or 0 for 'minimal',
current cases (df_ts.iloc[-1]),
projection_lower: type(df_ts.dtype) or 0 for 'minimal',
projection_upper: type(df_ts.dtype) or 0 for 'minimal',
model_score=R^2: float,
difference of model fit on last date and last data point in log space: float
]
model: sklearn.linear_model
#failure: 0 or 1; 1 if it failed due to nonpositive number in exponential fit or too short time series
'''
i_ts = (df_ts - df_ts.shift(1))[1:] # i for increments
#if len(i_ts)<window_length:# or (exp_or_lin=='exp' and (i_ts.iloc[-window_length:]<=0).sum()>=5):
if len(i_ts)==0 or (i_ts.index[-1]-i_ts.index[0]).days<window_length-1:
results = 8 * [0]
results[-1] = 100
return results, None
intl_lo_days = 4
intl_hi_days = 6
results = [None] * 8
results[3] = df_ts.iloc[-1]
model = linear_model.LinearRegression(fit_intercept=True)
if exp_or_lin=='exp':
df_ts_orig = df_ts.copy()
df_ts_0 = truncate_before(df_ts_orig, window_length+1, fill_all_missing=False) # For the fit to increments.
df_ts = truncate_before(df_ts, window_length+1, fill_all_missing=True)
i_ts = (df_ts - df_ts.shift(1))[1:] # i for increments
i_ts[i_ts<=0] = 1
y = i_ts.values
ylog = np.log(y)
model.fit((i_ts.index-i_ts.index[-1]).days.values.reshape(-1, 1), ylog)
results[0] = math.exp(model.intercept_)
# For doubling, the area of the increments is equal to df_ts[-1]
# cf. https://www.wolframalpha.com/input/?i=integrate+%28exp%28a+t+%2Bb%29+dt%29+from+t%3D0+to+x
if model.coef_[0]!=0:
temp2 = math.exp(model.intercept_)/model.coef_[0]
temp = model.coef_[0]*df_ts.iloc[-1]/math.exp(model.intercept_) + 1
if temp>0:
results[2] = math.log(temp)/model.coef_[0]
else:
results[2] = np.inf
else:
results[2] = df_ts.iloc[-1]/math.exp(model.intercept_)
if extent == 'full':
if model.coef_[0]!=0:
results[4] = (math.exp(model.coef_[0]*intl_lo_days)-1)*temp2 + df_ts.iloc[-1]
results[5] = (math.exp(model.coef_[0]*intl_hi_days)-1)*temp2 + df_ts.iloc[-1]
else:
results[4] = math.exp(model.intercept_)*intl_lo_days + df_ts.iloc[-1]
results[5] = math.exp(model.intercept_)*intl_hi_days + df_ts.iloc[-1]
#if (i_ts_orig.iloc[-window_length:]>0).all():
#if (truncate_before(i_ts_orig, window_length, fill_all_missing=False)>0).all():
i_ts_0 = (df_ts_0 - df_ts_0.shift(1))[1:]
if (i_ts_0>0).all():
#results[6] = model.score(np.arange(-window_length+1, 1).reshape(-1, 1), ylog)
results[6] = model.score((i_ts_0.index-i_ts_0.index[-1]).days.values.reshape(-1, 1), ylog)
else:
results[6] = 0
#if df_ts.iloc[-1]==df_ts.iloc[-window_length]:
#if df_ts.iloc[-1]==interpolate(df_ts, window_length): # If there is no growth, then exp is not good approx.
first_day = df_ts.index[-1]-pd.Timedelta(f'{window_length-1} days')
if df_ts.iloc[-1]==df_ts.loc[first_day]: # If there is no growth, then exp is not good approx.
results[7] = 100 # Exp overestimates growth by a factor of infinity.
else:
if model.coef_[0]!=0:
#results[7] = temp2*(1-math.exp(model.coef_[0]*(-window_length+1)))/(df_ts.iloc[-1]-df_ts.iloc[-window_length])-1
#results[7] = temp2*(1-math.exp(model.coef_[0]*(-window_length+1)))/(df_ts.iloc[-1]-interpolate(df_ts, window_length))-1
results[7] = temp2*(1-math.exp(model.coef_[0]*(-window_length+1)))/(df_ts.iloc[-1]-df_ts.loc[first_day])-1
else:
#results[7] = math.exp(model.intercept_)*(-window_length+1)/(df_ts.iloc[-1]-df_ts.iloc[-window_length])-1
#results[7] = math.exp(model.intercept_)*(-window_length+1)/(df_ts.iloc[-1]-interpolate(df_ts, window_length))-1
results[7] = math.exp(model.intercept_)*(-window_length+1)/(df_ts.iloc[-1]-df_ts.loc[first_day])-1
elif exp_or_lin=='lin':
df_ts_orig = df_ts.copy()
df_ts_0 = truncate_before(df_ts_orig, window_length+1, fill_all_missing=False) # For the fit to increments.
df_ts = truncate_before(df_ts, window_length+1, fill_all_missing=True)
i_ts = (df_ts - df_ts.shift(1))[1:] # i for increments
y = i_ts.values
model.fit((i_ts.index-i_ts.index[-1]).days.values.reshape(-1, 1), y)
results[0] = model.intercept_
if model.coef_[0]!=0:
if 2*model.coef_[0]*df_ts.iloc[-1] >= - model.intercept_*model.intercept_:
results[2] = (-model.intercept_ + math.sqrt(model.intercept_*model.intercept_ + 2*model.coef_[0]*df_ts.iloc[-1]))/model.coef_[0]
else:
results[2] = np.inf
else:
if model.intercept_!=0:
results[2] = df_ts.iloc[-1]/model.intercept_
else:
if df_ts.iloc[-1]!=0:
results[2] = np.inf
else:
results[2] = 0 # model.coef_[0]==model.intercept_==0
if extent == 'full':
if model.coef_[0]*model.intercept_<0 and\
((model.coef_[0]>0 and -model.intercept_<intl_lo_days*model.coef_)\
or (model.coef_[0]<0 and -model.intercept_>intl_lo_days*model.coef_)):
# there is a zero-crossing until intl_lo_days
results[4] = -model.intercept_*model.intercept_/(2*model.coef_[0]) + df_ts.iloc[-1]
results[5] = results[4]
elif model.coef_[0]*model.intercept_<0 and\
((model.coef_[0]>0 and -model.intercept_<intl_hi_days*model.coef_)\
or (model.coef_[0]<0 and -model.intercept_>intl_hi_days*model.coef_)):
# there is a zero-crossing after intl_lo_days, before intl_hi_days
results[5] = -model.intercept_*model.intercept_/(2*model.coef_[0]) + df_ts.iloc[-1]
if results[4] is None:
results[4] = (model.coef_[0]*intl_lo_days/2+model.intercept_)*intl_lo_days + df_ts.iloc[-1]
if results[5] is None:
results[5] = (model.coef_[0]*intl_hi_days/2+model.intercept_)*intl_hi_days + df_ts.iloc[-1]
#results[6] = model.score(np.arange(-window_length+1, 1).reshape(-1, 1), y)
i_ts_0 = (df_ts_0 - df_ts_0.shift(1))[1:]
results[6] = model.score((i_ts_0.index-i_ts_0.index[-1]).days.values.reshape(-1, 1), y)
#if df_ts.iloc[-1]==df_ts.iloc[-window_length]:
first_day = df_ts.index[-1]-pd.Timedelta(f'{window_length-1} days')
if df_ts.iloc[-1]==df_ts.loc[first_day]: # If there is no growth, then
if model.coef_[0]==0 and model.intercept_==0:
results[7] = 0
else:
results[7] = 100 # a nonzero linear function overestimates growth by a factor of infinity.
else:
#print(model.coef_[0], model.intercept_, '\n', df_ts.iloc[-window_length:])
#print(-(model.coef_[0]*(-window_length+1)/2+model.intercept_)*(-window_length+1))
#print(df_ts.iloc[-1]-df_ts.iloc[-window_length])
#results[7] = -(model.coef_[0]*(-window_length+1)/2+model.intercept_)*(-window_length+1)/(df_ts.iloc[-1]-df_ts.iloc[-window_length])-1 # From the integral
#results[7] = -(model.coef_[0]*(-window_length+1)/2+model.intercept_)*(-window_length+1)/(df_ts.iloc[-1]-interpolate(df_ts, window_length))-1 # From the integral
results[7] = -(model.coef_[0]*(-window_length+1)/2+model.intercept_)*(-window_length+1)/(df_ts.iloc[-1]-df_ts.loc[first_day])-1 # From the integral
#print(df_ts.iloc[-1], df_ts.loc[first_day], results[7])
#print(window_length*(2*model.intercept_+model.coef_[0]*(-window_length+1))/(2*(df_ts.iloc[-1]-df_ts.iloc[-window_length]))-1) # From summing
#print((-model.coef_[0]*(-window_length+1)*(-window_length+1)/2+(model.coef_[0]/2-model.intercept_)*(-window_length+1)+model.intercept_)/(df_ts.iloc[-1]-df_ts.iloc[-window_length])-1) # From summing
#results[7] = np.sum(model.coef_[0]*np.arange(-window_length+1, 1)+model.intercept_)/(df_ts.iloc[-1]-df_ts.iloc[-window_length])-1 # From summing
#print(results[7])
elif exp_or_lin=='mean':
df_ts_orig = df_ts.copy()
df_ts_0 = truncate_before(df_ts_orig, window_length+1, fill_all_missing=False) # For the fit to increments.
df_ts = truncate_before(df_ts, window_length+1, fill_all_missing=True)
i_ts = (df_ts - df_ts.shift(1))[1:] # i for increments
#y = i_ts.values
i_mean = i_ts.mean()
results[0] = i_mean
if results[0]!=0:
results[2] = df_ts.iloc[-1]/i_mean
else:
if df_ts.iloc[-1]!=0:
results[2] = np.inf
else:
results[2] = 0 # df_ts.iloc[-1]==i_ts.mean()==0
if extent == 'full':
results[4] = i_mean*intl_lo_days + df_ts.iloc[-1]
results[5] = i_mean*intl_hi_days + df_ts.iloc[-1]
results[6] = 0 # coefficient of determination R^2
first_day = df_ts.index[-1]-pd.Timedelta(f'{window_length-1} days')
if df_ts.iloc[-1]==df_ts.loc[first_day]: # If there is no growth, then
if i_mean==0:
results[7] = 0
else:
results[7] = 100 # a nonzero linear function overestimates growth by a factor of infinity.
else:
results[7] = -i_mean*(-window_length+1)/(df_ts.iloc[-1]-df_ts.loc[first_day])-1 # From the integral
class SkeletonModel():
def __init__(self, intercept):
self.coef_ = [0]
self.intercept_ = intercept
model = SkeletonModel(results[0])
if results[2]!=np.inf and results[2]!=0 and results[0]>0:
#print(window_length, df_ts.iloc[-window_length-1:], y, model.coef_[0], model.intercept_, results, 1/results[2])
results[1] = (math.pow(2, 1/results[2])-1)*100
else:
#y_last = (df_ts.iloc[-1]+df_ts.iloc[-2]+df_ts.iloc[-3])/3 # smoothening to lessen the impact of penultimate data point
y_last = (df_ts.iloc[-1]+df_ts.iloc[-2])/2 # smoothening to lessen the impact of penultimate data point
if y_last!=0:
results[1] = results[0]*100 / y_last
elif model.coef_[0]==0 and model.intercept_==0:
results[1] = 0
else:
results[1] = np.inf
if extent == 'minimal':
#results[2] = 0
results[4] = 0
results[5] = 0
#print(model.coef_[0], model.intercept_, results[6], results[7])
#print(window_length, results)
return results, model
def select_window_length(R, round_output):
'''
This selects the window length that is good on two aspects: R^2 and matching last value
round_output: boolean. If True, then it returns current and two projected case numbers as int.
'''
nr_col = R.shape[1]
if nr_col==8: # If we're calling this from pick_exp_vs_lin() with window_selection, then we already have this so no need to compute it again and add it as new column.
# We take l_2 norm of 10*(1-R^2) and distance column:
R.insert(nr_col, nr_col, R[6].apply(lambda x: (10*(1-x))**2)
+ R[7].apply(lambda x: x**2))
# Sort and return the row (corresponding to a window_length) with lowest l_2 norm:
#return R.sort_values(7, axis=0, ascending=True).iloc[0:1,:]
if R.shape[0]>1:
R = R.sort_values(8, axis=0, ascending=True)
#print(R)
output = list()
if round_output==True:
for i in range(R.shape[1]):
output.append(int(round(R.iloc[0,i])) if i in [3, 4, 5] else R.iloc[0,i])
else:
output = [R.iloc[0,i] for i in range(R.shape[1])]
return output, R.index[0]
#return [R.iloc[0,i] for i in range(nr_col+1)], R.index[0] # This maintains integer elements as integers, R.iloc[0,:] would cast them as float bc it creates a pd.Series with a shared type.
def pick_exp_vs_lin(r_exp, m_exp, r_lin, m_lin):
r_exp = pd.DataFrame(r_exp).T
r_exp, _ = select_window_length(r_exp, round_output=False)
r_lin = pd.DataFrame(r_lin).T
r_lin, _ = select_window_length(r_lin, round_output=False)
if r_exp[-1] < r_lin[-1]:
return r_exp[:-1], m_exp, 'exp'
else:
return r_lin[:-1], m_lin, 'lin'
#TODO this should have a switch that it should compute in densities when population size is available
def process_geounit(df_ts, window_length, exp_or_lin='both', running_extent='full'):
'''
This processes one geographical unit.
df_ts is the time series.
'''
#df_ts = rm_early_zeros(df_ts)
if exp_or_lin=='mean' and not window_length > 0:
window_length = 7
if window_length > 0:
selected_window_length = window_length
if exp_or_lin=='both':
results_e, model_e = analysis(df_ts, window_length, 'exp', running_extent)
results_l, model_l = analysis(df_ts, window_length, 'lin', running_extent)
results, model, exp_or_lin = pick_exp_vs_lin(results_e, model_e, results_l, model_l)
#print(results_e)
#print(results_l)
elif exp_or_lin=='exp':
results, model = analysis(df_ts, window_length, 'exp', running_extent)
elif exp_or_lin=='lin':
results, model = analysis(df_ts, window_length, 'lin', running_extent)
elif exp_or_lin=='mean':
results, model = analysis(df_ts, window_length, 'mean', running_extent)
else: # do a search over window_lengths for best possible fit
# minimum and maximum allowed window lengths; we test all in this closed interval
wl_lo = 7
wl_hi = 15 # this end point is not included
# Rule out zeros because we take logarithm; rule out windows longer than the time series df_ts.
#wl_hi = min(wl_hi, 1+len(df_ts[df_ts[df_ts>0].idxmin():]), 1+len(df_ts))
wl_hi = min(wl_hi, 1+len(df_ts))
if wl_hi <= wl_lo: # then abort
results, model = analysis(pd.Series([]), 1, 'exp', running_extent)
#return results, model, window_length, exp_or_lin
return pd.DataFrame([results+[window_length, exp_or_lin]]), model
'''
R = pd.DataFrame(np.zeros((wl_hi-wl_lo, 7)), index=range(wl_lo, wl_hi))
models = dict()
for wl in range(wl_lo, wl_hi): # last wl_hi-1 points must be available and positive <==
result_wl, model = analysis_exp(df_ts, wl) # last wl points must be available and positive
R.iloc[wl-wl_lo, :] = result_wl
models[wl] = model
R = R.astype({2: int, 3: int, 4: int})
results, selected_window_length = select_window_length(R)
model = models[selected_window_length]
'''
if exp_or_lin in ['exp', 'both']:
R_e = pd.DataFrame(np.zeros((wl_hi-wl_lo, 8)), index=range(wl_lo, wl_hi))
models_e = dict()
if exp_or_lin in ['lin', 'both']:
R_l = pd.DataFrame(np.zeros((wl_hi-wl_lo, 8)), index=range(wl_lo, wl_hi))
models_l = dict()
for wl in range(wl_lo, wl_hi): # last wl_hi-1 points must be available and positive <==
if exp_or_lin in ['exp', 'both']:
result_wl, model = analysis(df_ts, wl, 'exp', running_extent) # last wl points must be available and positive
R_e.iloc[wl-wl_lo, :] = result_wl
models_e[wl] = model
if exp_or_lin in ['lin', 'both']:
result_wl, model = analysis(df_ts, wl, 'lin', running_extent)
R_l.iloc[wl-wl_lo, :] = result_wl
models_l[wl] = model
if exp_or_lin in ['exp', 'both']:
results_e, selected_window_length_e = select_window_length(R_e, round_output=False)
model_e = models_e[selected_window_length_e]
if exp_or_lin in ['lin', 'both']:
results_l, selected_window_length_l = select_window_length(R_l, round_output=False)
model_l = models_l[selected_window_length_l]
if exp_or_lin == 'exp':
results, model, selected_window_length = results_e[:-1], model_e, selected_window_length_e
if exp_or_lin == 'lin':
results, model, selected_window_length = results_l[:-1], model_l, selected_window_length_l
if exp_or_lin == 'both':
results, model, exp_or_lin = pick_exp_vs_lin(results_e, model_e, results_l, model_l)
selected_window_length = selected_window_length_e if exp_or_lin=='exp'\
else selected_window_length_l
return pd.DataFrame([results+[selected_window_length, exp_or_lin]]), model
def print_header(normalise_by, population_csv=None):
print('The number of cases increases daily by /')
if population_csv is not None:
print('The number of cases per {} people increases daily by /'.format(separated(str(int(normalise_by)))))
print('The number of cases increases daily by (%)/')
print('Time it takes for the number of cases to double /')
print('Latest reported number of cases /')
if population_csv is not None:
print('Latest reported number of cases per {} people /'.format(separated(str(int(normalise_by)))))
print('My estimation for number of cases per {} people at present /'.format(separated(str(int(normalise_by)))))
else:
print('My estimation for number of cases at present /')
print('R^2 /')
print('Tail difference /')
print('Window length /')
print('Exponential (e) or linear (l) approximation\n')
def print_results(country, results, normalise_by, population_csv, wl, exp_or_lin, frmt='normal', lang='en'):
'''
frmt (format) can be 'deaths' or other. For 'deaths', there is one more decimal digit displayed for
cases per 100,000 and estimate interval is not displayed.
'''
country_width = 23 if frmt!='deaths' else 24
interval_width = 14
#if country in ['Baden-Württemberg', 'Bayern', 'Berlin', 'Brandenburg', 'Bremen',
#'Hamburg', 'Hessen', 'Mecklenburg-Vorpommern', 'Niedersachsen',
#'Nordrhein-Westfalen', 'Rheinland-Pfalz', 'Saarland', 'Sachsen',
#'Sachsen-Anhalt', 'Schleswig-Holstein', 'Thüringen',
#'Deutschland']:
if population_csv=='DEU':
pop = load_population_DEU()
elif population_csv=='world':
pop = load_population_world()
elif population_csv=='BW':
pop = load_population_BW()
else:
pop = normalise_by # We don't normalise.
if not isinstance(country, str): # If it's a province or state of a country or region.
country = country[0]
if frmt!='active': # If input is a cumulative number, then don't display negative estimates.
if results[0]<0:
results[0]=0
if results[1]<0:
results[1]=0
if population_csv is not None:
incr_per_ppl = x2str(normalise_by*results[0]/pop[country], 4 if frmt!='deaths' else 6)
else:
incr_per_ppl = ' ' * 4 if frmt!='deaths' else ' ' * 6
#if ((results[6]>=0.95 and results[7]<=0.5) or (results[7]>=-0.2 and results[7]<=0.1)) and\
# results[0]>0 and frmt!='deaths':
if ((results[6]>=0.75 and results[7]<=0.5) or (results[7]>=-0.3 and results[7]<=0.3)) and\
results[0]>0 and frmt!='deaths':
if population_csv is not None:
nr_cases_per_ppl = x2str(normalise_by*results[3]/pop[country], int(math.log10(normalise_by))+1)
est_lo_per_ppl = normalise_by*results[4]/pop[country]
est_hi_per_ppl = normalise_by*results[5]/pop[country]
else:
nr_cases_per_ppl = ' ' * int(math.log10(normalise_by))
est_lo_per_ppl = results[4]
est_hi_per_ppl = results[5]
est_per_ppl_min = min(est_lo_per_ppl, est_hi_per_ppl)
est_per_ppl_max = max(est_lo_per_ppl, est_hi_per_ppl)
interval = ('[' + x2str(est_per_ppl_min, -1) +', '\
+ x2str(est_per_ppl_max, -1) + ']').rjust(interval_width)
else:
if population_csv is not None:
nr_cases_per_ppl = x2str(normalise_by*results[3]/pop[country], int(math.log10(normalise_by))+1)
else:
nr_cases_per_ppl = ' ' * int(math.log10(normalise_by))
if frmt!='deaths':
interval = ' ' * interval_width
else:
interval = ' '
if exp_or_lin=='exp':
letter = 'e'
elif exp_or_lin=='lin':
letter = 'l'
elif exp_or_lin=='mean':
letter = 'm'
print('{0} {1} {2} {3:5.1f}% {4:7.1f} {5} {6} {7} {8} {9:4.2f} {10:5.2f} {11} {12}'.format(
country[:country_width].ljust(country_width),
x2str(results[0], 6),
incr_per_ppl,
results[1],
results[2] if results[0]>=0 else np.NaN, # if results[1]>=0 else np.NaN,
'Tage' if lang=='de' else 'days',
n2str(int(results[3]), 7),
nr_cases_per_ppl,
interval,
results[6],
results[7] if results[7]<100 else np.nan,
str(wl).rjust(2),
letter).replace('.', ',' if lang=='de' else '.'))
def plotting(df_ts, model, save_not_show, country, window_length, exp_or_lin, lang='en', panels=2):
if not isinstance(country, str): # If it's a province or state of a country or region.
country = country[0]
if panels==2:
fig, (ax0, ax1) = plt.subplots(1,2, figsize=(14.4, 4.8))
elif panels==3:
fig, (ax0, ax1, ax2) = plt.subplots(1,3, figsize=(14.4, 4.8))
if lang=='de':
line0 = 'Beobachtungen'
#line1 = 'Exponentielle Annäherung' if exp_or_lin=='exp' else 'Lineare Annäherung'
if exp_or_lin=='exp':
line1 = 'Exponentielle Annäherung'
elif exp_or_lin=='lin':
line1 = 'Lineare Annäherung'
elif exp_or_lin=='mean':
line1 = 'Annäherung mit Durchschnitt'
fig.suptitle(country + ', Stand ' + df_ts.index[-1].strftime('%d.%m.%Y'))
#plt.gcf().text(0.905, 0.86, "© <NAME>, 2020. http://COVID19de.Melykuti.Be", fontsize=8, color='lightgray', rotation=90)
plt.gcf().text(0.905, 0.242, "© <NAME>, 2021. http://COVID19de.Melykuti.Be", fontsize=8, color='lightgray', rotation=90)
else:
line0 = 'Observations'
#line1 = 'Exponential approximation' if exp_or_lin=='exp' else 'Linear approximation'
if exp_or_lin=='exp':
line1 = 'Exponential approximation'
elif exp_or_lin=='lin':
line1 = 'Linear approximation'
elif exp_or_lin=='mean':
line1 = 'Approximation with mean'
fig.suptitle(country + ', ' + df_ts.index[-1].strftime('%d %B %Y').lstrip('0'))
#plt.gcf().text(0.905, 0.862, "© <NAME>, 2020. http://COVID19.Melykuti.Be", fontsize=8, color='lightgray', rotation=90)
plt.gcf().text(0.905, 0.27, "© <NAME>, 2021. http://COVID19.Melykuti.Be", fontsize=8, color='lightgray', rotation=90)
#fig.tight_layout()
fig.subplots_adjust(bottom=0.2)
#ax1.plot(df_ts[df_ts>0], label=line0)
#ax1.plot(df_ts[df_ts>0].iloc[-window_length:].index, np.power(2, np.arange(0, window_length)*model.coef_ + model.intercept_), label=line1)
#plot_x = df_ts.iloc[-window_length:].index
plot_x = pd.date_range(df_ts.index[-1]-pd.Timedelta(f'{window_length-1} days'), df_ts.index[-1])
i_ts = (df_ts - df_ts.shift(1))[1:] # i for increments
ax0.bar(df_ts[1:].index, i_ts[-len(df_ts)+1:], color='tab:blue')
#df_ts_no0 = rm_consecutive_early_zeros(df_ts)
#df_ts_no0 = df_ts
if exp_or_lin=='exp':
if model is not None:
ax0.plot(plot_x, np.exp(model.coef_[0]*np.arange(-window_length+1, 1) + model.intercept_), color='tab:orange', linewidth=3)
if model.coef_[0]!=0:
temp2 = math.exp(model.intercept_)/model.coef_[0]
#plot_y = (np.exp(model.coef_[0]*np.arange(-window_length+1, 1)) - math.exp(model.coef_[0] * (-window_length+1)))*temp2 + df_ts.iloc[-window_length]
plot_y = (np.exp(model.coef_[0]*np.arange(-window_length+1, 1)) - math.exp(model.coef_[0] * (-window_length+1)))*temp2 + interpolate(df_ts, window_length)
else:
#plot_y = math.exp(model.intercept_)*(np.arange(-window_length+1, 1) - (-window_length+1)) + df_ts.iloc[-window_length]
plot_y = math.exp(model.intercept_)*(np.arange(-window_length+1, 1) - (-window_length+1)) + interpolate(df_ts, window_length)
ax1.plot(plot_x, plot_y, label=line1, color='tab:orange', linewidth=3)
if panels==3:
ax2.plot(plot_x, plot_y, label=line1, color='tab:orange', linewidth=3)
elif exp_or_lin=='lin' or exp_or_lin=='mean':
ax0.plot(plot_x, model.coef_[0]*np.arange(-window_length+1, 1) + model.intercept_, color='tab:pink', linewidth=3)
#plot_y = np.arange(0, window_length)*model.coef_ + model.intercept_
#plot_y = (model.coef_[0]*np.arange(-window_length+1, 1)/2+model.intercept_)*np.arange(-window_length+1, 1) + df_ts.iloc[-1]
# plot_y = (model.coef_[0]*np.arange(0, window_length)/2+model.intercept_)*np.arange(0, window_length) + df_ts.iloc[-window_length]
#plot_y = (model.coef_[0]*np.arange(-window_length+1, 1)/2+model.intercept_)*np.arange(-window_length+1, 1) - (model.coef_[0]*(-window_length+1)/2+model.intercept_)*(-window_length+1) + df_ts.iloc[-window_length]
plot_y = (model.coef_[0]*np.arange(-window_length+1, 1)/2+model.intercept_)*np.arange(-window_length+1, 1) - (model.coef_[0]*(-window_length+1)/2+model.intercept_)*(-window_length+1) + interpolate(df_ts, window_length)
ax1.plot(plot_x, plot_y, label=line1, color='tab:pink', linewidth=3)
if panels==3:
ax2.plot(plot_x, plot_y, label=line1, color='tab:pink', linewidth=3)
ax1.plot(df_ts, label=line0, color='tab:blue')
if panels==3:
ax2.plot(df_ts, label=line0, color='tab:blue')
ax2.set_yscale("log")
for tick in ax0.get_xticklabels():
tick.set_rotation(80)
for tick in ax1.get_xticklabels():
tick.set_rotation(80)
if panels==3:
for tick in ax2.get_xticklabels():
tick.set_rotation(80)
handles, labs = ax1.get_legend_handles_labels()
if model is not None:
ax1.legend((handles[1], handles[0]), (labs[1], labs[0]))
else:
ax1.legend([handles[0]], [labs[0]])
if save_not_show==0:
plt.show()
elif save_not_show==1:
imgfile = country.replace(',', '_').replace(' ', '_').replace('(', '_').replace(')', '_')\
+ '_' + df_ts.index[-1].strftime('%Y-%m-%d') + '.png'
plt.savefig(imgfile)
plt.close(fig)
def load_population_world():
pop = pd.read_csv('population_world.csv', sep='\t')
pop_ser=pd.Series(pop.Population.apply(lambda x: int(x.replace(',', ''))).values, index=pop.Country)
countries = dict()
for country in pop_ser.index:
country_new = country.strip()
countries[country_new] = pop_ser.loc[country]
return countries
def load_population_DEU():
pop = pd.read_csv('population_DEU.csv', sep='\t')
pop_ser=pd.Series(pop.insgesamt.values, index=pop.Bundesland)
countries = dict()
for country in pop_ser.index:
country_new = country.strip()
countries[country_new] = pop_ser.loc[country]
return countries
def load_population_BW(incl_density=False):
pop = pd.read_csv('population_BW.csv', sep=',')
pop_ser=pd.Series(pop['Bevölkerung insgesamt'].values, index=pop.Regionalname)
countries = dict()
for country in pop_ser.index:
countries[country] = pop_ser.loc[country]
if incl_density:
pop.rename(index=pop.Regionalname, inplace=True)
return pop.drop('Regionalname', axis=1, inplace=False)
else:
return countries
| [
"pandas.read_csv",
"numpy.log",
"math.sqrt",
"math.log",
"numpy.array",
"pandas.plotting.register_matplotlib_converters",
"math.exp",
"math.log10",
"pandas.to_datetime",
"numpy.arange",
"matplotlib.pyplot.close",
"pandas.DataFrame",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.gcf",
"... | [((231, 263), 'pandas.plotting.register_matplotlib_converters', 'register_matplotlib_converters', ([], {}), '()\n', (261, 263), False, 'from pandas.plotting import register_matplotlib_converters\n'), ((13594, 13643), 'sklearn.linear_model.LinearRegression', 'linear_model.LinearRegression', ([], {'fit_intercept': '(True)'}), '(fit_intercept=True)\n', (13623, 13643), False, 'from sklearn import linear_model\n'), ((39619, 39664), 'pandas.read_csv', 'pd.read_csv', (['"""population_world.csv"""'], {'sep': '"""\t"""'}), "('population_world.csv', sep='\\t')\n", (39630, 39664), True, 'import pandas as pd\n'), ((39978, 40021), 'pandas.read_csv', 'pd.read_csv', (['"""population_DEU.csv"""'], {'sep': '"""\t"""'}), "('population_DEU.csv', sep='\\t')\n", (39989, 40021), True, 'import pandas as pd\n'), ((40034, 40087), 'pandas.Series', 'pd.Series', (['pop.insgesamt.values'], {'index': 'pop.Bundesland'}), '(pop.insgesamt.values, index=pop.Bundesland)\n', (40043, 40087), True, 'import pandas as pd\n'), ((40313, 40354), 'pandas.read_csv', 'pd.read_csv', (['"""population_BW.csv"""'], {'sep': '""","""'}), "('population_BW.csv', sep=',')\n", (40324, 40354), True, 'import pandas as pd\n'), ((40367, 40437), 'pandas.Series', 'pd.Series', (["pop['Bevölkerung insgesamt'].values"], {'index': 'pop.Regionalname'}), "(pop['Bevölkerung insgesamt'].values, index=pop.Regionalname)\n", (40376, 40437), True, 'import pandas as pd\n'), ((1146, 1158), 'os.scandir', 'os.scandir', ([], {}), '()\n', (1156, 1158), False, 'import os, math\n'), ((1474, 1499), 'pandas.read_csv', 'pd.read_csv', (['lists[i][-1]'], {}), '(lists[i][-1])\n', (1485, 1499), True, 'import pandas as pd\n'), ((4375, 4414), 'pandas.concat', 'pd.concat', (['l'], {'ignore_index': '(True)', 'axis': '(1)'}), '(l, ignore_index=True, axis=1)\n', (4384, 4414), True, 'import pandas as pd\n'), ((8537, 8578), 'pandas.Timedelta', 'pd.Timedelta', (['f"""{window_length - 1} days"""'], {}), "(f'{window_length - 1} days')\n", (8549, 8578), True, 'import pandas as pd\n'), ((14027, 14036), 'numpy.log', 'np.log', (['y'], {}), '(y)\n', (14033, 14036), True, 'import numpy as np\n'), ((14138, 14164), 'math.exp', 'math.exp', (['model.intercept_'], {}), '(model.intercept_)\n', (14146, 14164), False, 'import os, math\n'), ((24964, 24983), 'pandas.DataFrame', 'pd.DataFrame', (['r_exp'], {}), '(r_exp)\n', (24976, 24983), True, 'import pandas as pd\n'), ((25061, 25080), 'pandas.DataFrame', 'pd.DataFrame', (['r_lin'], {}), '(r_lin)\n', (25073, 25080), True, 'import pandas as pd\n'), ((29546, 29608), 'pandas.DataFrame', 'pd.DataFrame', (['[results + [selected_window_length, exp_or_lin]]'], {}), '([results + [selected_window_length, exp_or_lin]])\n', (29558, 29608), True, 'import pandas as pd\n'), ((34280, 34319), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(14.4, 4.8)'}), '(1, 2, figsize=(14.4, 4.8))\n', (34292, 34319), True, 'import matplotlib.pyplot as plt\n'), ((39318, 39328), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (39326, 39328), True, 'import matplotlib.pyplot as plt\n'), ((11243, 11268), 'pandas.Timedelta', 'pd.Timedelta', (['f"""{i} days"""'], {}), "(f'{i} days')\n", (11255, 11268), True, 'import pandas as pd\n'), ((15841, 15882), 'pandas.Timedelta', 'pd.Timedelta', (['f"""{window_length - 1} days"""'], {}), "(f'{window_length - 1} days')\n", (15853, 15882), True, 'import pandas as pd\n'), ((34370, 34409), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(14.4, 4.8)'}), '(1, 3, figsize=(14.4, 4.8))\n', (34382, 34409), True, 'import matplotlib.pyplot as plt\n'), ((36171, 36212), 'pandas.Timedelta', 'pd.Timedelta', (['f"""{window_length - 1} days"""'], {}), "(f'{window_length - 1} days')\n", (36183, 36212), True, 'import pandas as pd\n'), ((39535, 39555), 'matplotlib.pyplot.savefig', 'plt.savefig', (['imgfile'], {}), '(imgfile)\n', (39546, 39555), True, 'import matplotlib.pyplot as plt\n'), ((39564, 39578), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (39573, 39578), True, 'import matplotlib.pyplot as plt\n'), ((5046, 5085), 'pandas.concat', 'pd.concat', (['l'], {'ignore_index': '(True)', 'axis': '(0)'}), '(l, ignore_index=True, axis=0)\n', (5055, 5085), True, 'import pandas as pd\n'), ((6808, 6851), 'numpy.nonzero', 'np.nonzero', (['(ts.index == ts[ts > 0].index[0])'], {}), '(ts.index == ts[ts > 0].index[0])\n', (6818, 6851), True, 'import numpy as np\n'), ((12031, 12072), 'pandas.Timedelta', 'pd.Timedelta', (['f"""{window_length - 1} days"""'], {}), "(f'{window_length - 1} days')\n", (12043, 12072), True, 'import pandas as pd\n'), ((14393, 14419), 'math.exp', 'math.exp', (['model.intercept_'], {}), '(model.intercept_)\n', (14401, 14419), False, 'import os, math\n'), ((14705, 14731), 'math.exp', 'math.exp', (['model.intercept_'], {}), '(model.intercept_)\n', (14713, 14731), False, 'import os, math\n'), ((19414, 19455), 'pandas.Timedelta', 'pd.Timedelta', (['f"""{window_length - 1} days"""'], {}), "(f'{window_length - 1} days')\n", (19426, 19455), True, 'import pandas as pd\n'), ((22846, 22873), 'math.pow', 'math.pow', (['(2)', '(1 / results[2])'], {}), '(2, 1 / results[2])\n', (22854, 22873), False, 'import os, math\n'), ((26969, 26982), 'pandas.Series', 'pd.Series', (['[]'], {}), '([])\n', (26978, 26982), True, 'import pandas as pd\n'), ((27091, 27144), 'pandas.DataFrame', 'pd.DataFrame', (['[results + [window_length, exp_or_lin]]'], {}), '([results + [window_length, exp_or_lin]])\n', (27103, 27144), True, 'import pandas as pd\n'), ((27785, 27813), 'numpy.zeros', 'np.zeros', (['(wl_hi - wl_lo, 8)'], {}), '((wl_hi - wl_lo, 8))\n', (27793, 27813), True, 'import numpy as np\n'), ((27943, 27971), 'numpy.zeros', 'np.zeros', (['(wl_hi - wl_lo, 8)'], {}), '((wl_hi - wl_lo, 8))\n', (27951, 27971), True, 'import numpy as np\n'), ((35003, 35012), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (35010, 35012), True, 'import matplotlib.pyplot as plt\n'), ((35715, 35724), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (35722, 35724), True, 'import matplotlib.pyplot as plt\n'), ((5716, 5757), 'pandas.Series', 'pd.Series', (['df_ts.index'], {'index': 'df_ts.index'}), '(df_ts.index, index=df_ts.index)\n', (5725, 5757), True, 'import pandas as pd\n'), ((5774, 5791), 'pandas.to_datetime', 'pd.to_datetime', (['x'], {}), '(x)\n', (5788, 5791), True, 'import pandas as pd\n'), ((14484, 14510), 'math.exp', 'math.exp', (['model.intercept_'], {}), '(model.intercept_)\n', (14492, 14510), False, 'import os, math\n'), ((14567, 14581), 'math.log', 'math.log', (['temp'], {}), '(temp)\n', (14575, 14581), False, 'import os, math\n'), ((22019, 22060), 'pandas.Timedelta', 'pd.Timedelta', (['f"""{window_length - 1} days"""'], {}), "(f'{window_length - 1} days')\n", (22031, 22060), True, 'import pandas as pd\n'), ((32572, 32596), 'math.log10', 'math.log10', (['normalise_by'], {}), '(normalise_by)\n', (32582, 32596), False, 'import os, math\n'), ((33149, 33173), 'math.log10', 'math.log10', (['normalise_by'], {}), '(normalise_by)\n', (33159, 33173), False, 'import os, math\n'), ((36681, 36707), 'math.exp', 'math.exp', (['model.intercept_'], {}), '(model.intercept_)\n', (36689, 36707), False, 'import os, math\n'), ((15030, 15056), 'math.exp', 'math.exp', (['model.intercept_'], {}), '(model.intercept_)\n', (15038, 15056), False, 'import os, math\n'), ((15116, 15142), 'math.exp', 'math.exp', (['model.intercept_'], {}), '(model.intercept_)\n', (15124, 15142), False, 'import os, math\n'), ((32356, 32380), 'math.log10', 'math.log10', (['normalise_by'], {}), '(normalise_by)\n', (32366, 32380), False, 'import os, math\n'), ((33065, 33089), 'math.log10', 'math.log10', (['normalise_by'], {}), '(normalise_by)\n', (33075, 33089), False, 'import os, math\n'), ((37238, 37264), 'math.exp', 'math.exp', (['model.intercept_'], {}), '(model.intercept_)\n', (37246, 37264), False, 'import os, math\n'), ((37641, 37673), 'numpy.arange', 'np.arange', (['(-window_length + 1)', '(1)'], {}), '(-window_length + 1, 1)\n', (37650, 37673), True, 'import numpy as np\n'), ((38377, 38409), 'numpy.arange', 'np.arange', (['(-window_length + 1)', '(1)'], {}), '(-window_length + 1, 1)\n', (38386, 38409), True, 'import numpy as np\n'), ((14825, 14864), 'math.exp', 'math.exp', (['(model.coef_[0] * intl_lo_days)'], {}), '(model.coef_[0] * intl_lo_days)\n', (14833, 14864), False, 'import os, math\n'), ((14919, 14958), 'math.exp', 'math.exp', (['(model.coef_[0] * intl_hi_days)'], {}), '(model.coef_[0] * intl_hi_days)\n', (14927, 14958), False, 'import os, math\n'), ((16801, 16827), 'math.exp', 'math.exp', (['model.intercept_'], {}), '(model.intercept_)\n', (16809, 16827), False, 'import os, math\n'), ((17513, 17602), 'math.sqrt', 'math.sqrt', (['(model.intercept_ * model.intercept_ + 2 * model.coef_[0] * df_ts.iloc[-1])'], {}), '(model.intercept_ * model.intercept_ + 2 * model.coef_[0] * df_ts.\n iloc[-1])\n', (17522, 17602), False, 'import os, math\n'), ((36538, 36570), 'numpy.arange', 'np.arange', (['(-window_length + 1)', '(1)'], {}), '(-window_length + 1, 1)\n', (36547, 36570), True, 'import numpy as np\n'), ((36970, 37017), 'math.exp', 'math.exp', (['(model.coef_[0] * (-window_length + 1))'], {}), '(model.coef_[0] * (-window_length + 1))\n', (36978, 37017), False, 'import os, math\n'), ((37266, 37298), 'numpy.arange', 'np.arange', (['(-window_length + 1)', '(1)'], {}), '(-window_length + 1, 1)\n', (37275, 37298), True, 'import numpy as np\n'), ((16418, 16465), 'math.exp', 'math.exp', (['(model.coef_[0] * (-window_length + 1))'], {}), '(model.coef_[0] * (-window_length + 1))\n', (16426, 16465), False, 'import os, math\n'), ((4884, 4981), 'numpy.logical_and', 'np.logical_and', (["(df[k]['Province/State'] == country[0])", "(df[k]['Country/Region'] == country[1])"], {}), "(df[k]['Province/State'] == country[0], df[k][\n 'Country/Region'] == country[1])\n", (4898, 4981), True, 'import numpy as np\n'), ((36936, 36968), 'numpy.arange', 'np.arange', (['(-window_length + 1)', '(1)'], {}), '(-window_length + 1, 1)\n', (36945, 36968), True, 'import numpy as np\n'), ((38326, 38358), 'numpy.arange', 'np.arange', (['(-window_length + 1)', '(1)'], {}), '(-window_length + 1, 1)\n', (38335, 38358), True, 'import numpy as np\n'), ((4309, 4323), 'numpy.array', 'np.array', (['M[k]'], {}), '(M[k])\n', (4317, 4323), True, 'import numpy as np\n')] |
"""
Tests for the collectd/nginx monitor
"""
from contextlib import contextmanager
from functools import partial as p
import pytest
from tests.helpers.agent import Agent
from tests.helpers.assertions import has_datapoint_with_dim, tcp_socket_open
from tests.helpers.metadata import Metadata
from tests.helpers.util import container_ip, run_service, wait_for
from tests.helpers.verify import verify
pytestmark = [pytest.mark.collectd, pytest.mark.nginx, pytest.mark.monitor_with_endpoints]
METADATA = Metadata.from_package("collectd/nginx")
@contextmanager
def run_nginx():
with run_service("nginx") as nginx_container:
host = container_ip(nginx_container)
assert wait_for(p(tcp_socket_open, host, 80), 60), "service didn't start"
yield host
def test_nginx_default():
with run_nginx() as host, Agent.run(
f"""
monitors:
- type: collectd/nginx
host: {host}
port: 80
"""
) as agent:
verify(agent, METADATA.default_metrics)
assert has_datapoint_with_dim(agent.fake_services, "plugin", "nginx"), "Didn't get nginx datapoints"
def test_nginx_all():
with run_nginx() as host, Agent.run(
f"""
monitors:
- type: collectd/nginx
host: {host}
port: 80
extraMetrics: ["*"]
"""
) as agent:
verify(agent, METADATA.all_metrics)
assert has_datapoint_with_dim(agent.fake_services, "plugin", "nginx"), "Didn't get nginx datapoints"
| [
"tests.helpers.metadata.Metadata.from_package",
"tests.helpers.agent.Agent.run",
"tests.helpers.util.container_ip",
"tests.helpers.verify.verify",
"tests.helpers.assertions.has_datapoint_with_dim",
"functools.partial",
"tests.helpers.util.run_service"
] | [((505, 544), 'tests.helpers.metadata.Metadata.from_package', 'Metadata.from_package', (['"""collectd/nginx"""'], {}), "('collectd/nginx')\n", (526, 544), False, 'from tests.helpers.metadata import Metadata\n'), ((589, 609), 'tests.helpers.util.run_service', 'run_service', (['"""nginx"""'], {}), "('nginx')\n", (600, 609), False, 'from tests.helpers.util import container_ip, run_service, wait_for\n'), ((645, 674), 'tests.helpers.util.container_ip', 'container_ip', (['nginx_container'], {}), '(nginx_container)\n', (657, 674), False, 'from tests.helpers.util import container_ip, run_service, wait_for\n'), ((834, 962), 'tests.helpers.agent.Agent.run', 'Agent.run', (['f"""\n monitors:\n - type: collectd/nginx\n host: {host}\n port: 80\n """'], {}), '(\n f"""\n monitors:\n - type: collectd/nginx\n host: {host}\n port: 80\n """\n )\n', (843, 962), False, 'from tests.helpers.agent import Agent\n'), ((985, 1024), 'tests.helpers.verify.verify', 'verify', (['agent', 'METADATA.default_metrics'], {}), '(agent, METADATA.default_metrics)\n', (991, 1024), False, 'from tests.helpers.verify import verify\n'), ((1040, 1102), 'tests.helpers.assertions.has_datapoint_with_dim', 'has_datapoint_with_dim', (['agent.fake_services', '"""plugin"""', '"""nginx"""'], {}), "(agent.fake_services, 'plugin', 'nginx')\n", (1062, 1102), False, 'from tests.helpers.assertions import has_datapoint_with_dim, tcp_socket_open\n'), ((1188, 1346), 'tests.helpers.agent.Agent.run', 'Agent.run', (['f"""\n monitors:\n - type: collectd/nginx\n host: {host}\n port: 80\n extraMetrics: ["*"]\n """'], {}), '(\n f"""\n monitors:\n - type: collectd/nginx\n host: {host}\n port: 80\n extraMetrics: ["*"]\n """\n )\n', (1197, 1346), False, 'from tests.helpers.agent import Agent\n'), ((1369, 1404), 'tests.helpers.verify.verify', 'verify', (['agent', 'METADATA.all_metrics'], {}), '(agent, METADATA.all_metrics)\n', (1375, 1404), False, 'from tests.helpers.verify import verify\n'), ((1420, 1482), 'tests.helpers.assertions.has_datapoint_with_dim', 'has_datapoint_with_dim', (['agent.fake_services', '"""plugin"""', '"""nginx"""'], {}), "(agent.fake_services, 'plugin', 'nginx')\n", (1442, 1482), False, 'from tests.helpers.assertions import has_datapoint_with_dim, tcp_socket_open\n'), ((699, 727), 'functools.partial', 'p', (['tcp_socket_open', 'host', '(80)'], {}), '(tcp_socket_open, host, 80)\n', (700, 727), True, 'from functools import partial as p\n')] |
import re
from django.urls import path
from render_static.tests.views import TestView
class Unrecognized:
regex = re.compile('Im not normal')
class NotAPattern:
pass
urlpatterns = [
path('test/simple/', TestView.as_view(), name='bad'),
NotAPattern()
]
urlpatterns[0].pattern = Unrecognized()
| [
"render_static.tests.views.TestView.as_view",
"re.compile"
] | [((121, 148), 're.compile', 're.compile', (['"""Im not normal"""'], {}), "('Im not normal')\n", (131, 148), False, 'import re\n'), ((222, 240), 'render_static.tests.views.TestView.as_view', 'TestView.as_view', ([], {}), '()\n', (238, 240), False, 'from render_static.tests.views import TestView\n')] |
from math import hypot
n, w, h = (int(x) for x in input().split())
def fits_in_box(nums: list[int]) -> None:
longest = hypot(w, h)
for num in nums:
print('DA') if num <= longest else print('NE')
fits_in_box(int(input()) for _ in range(n))
| [
"math.hypot"
] | [((126, 137), 'math.hypot', 'hypot', (['w', 'h'], {}), '(w, h)\n', (131, 137), False, 'from math import hypot\n')] |
"""Hook specifications and containers."""
import collections
import enum
from typing import Optional, Mapping, Any
import pluggy # type: ignore
from repobee_plug import log
hookspec = pluggy.HookspecMarker(__package__)
hookimpl = pluggy.HookimplMarker(__package__)
class Status(enum.Enum):
"""Status codes enums for Results.
Attributes:
SUCCESS: Signifies a plugin execution without any complications.
WARNING: Signifies a plugin execution with non-critical failures.
ERROR: Signifies a critical error during execution.
"""
SUCCESS = "success"
WARNING = "warning"
ERROR = "error"
class Result(
collections.namedtuple("Result", ("name", "status", "msg", "data"))
):
"""Container for storing results from hooks."""
def __new__(
cls,
name: str,
status: Status,
msg: str,
data: Optional[Mapping[Any, Any]] = None,
):
return super().__new__(cls, name, status, msg, data)
def __init__(
self,
name: str,
status: Status,
msg: str,
data: Optional[Mapping[Any, Any]] = None,
):
"""
Args:
name: Name to associate with this result. This is typically the
name of the plugin that returns this result.
status: Status of the plugin execution.
msg: A free-form result message.
data: Semi-structured data in the form of a dictionary. All of the
contents of the dictionary should be serializable as this is
primarily used for JSON storage.
"""
super().__init__()
@property
def hook(self) -> str:
log.warning(
"the Result.hook attribute is deprecated, use Result.name instead"
)
return self.name
| [
"collections.namedtuple",
"pluggy.HookimplMarker",
"repobee_plug.log.warning",
"pluggy.HookspecMarker"
] | [((189, 223), 'pluggy.HookspecMarker', 'pluggy.HookspecMarker', (['__package__'], {}), '(__package__)\n', (210, 223), False, 'import pluggy\n'), ((235, 269), 'pluggy.HookimplMarker', 'pluggy.HookimplMarker', (['__package__'], {}), '(__package__)\n', (256, 269), False, 'import pluggy\n'), ((657, 724), 'collections.namedtuple', 'collections.namedtuple', (['"""Result"""', "('name', 'status', 'msg', 'data')"], {}), "('Result', ('name', 'status', 'msg', 'data'))\n", (679, 724), False, 'import collections\n'), ((1695, 1774), 'repobee_plug.log.warning', 'log.warning', (['"""the Result.hook attribute is deprecated, use Result.name instead"""'], {}), "('the Result.hook attribute is deprecated, use Result.name instead')\n", (1706, 1774), False, 'from repobee_plug import log\n')] |
import datetime
import logging
import fcntl
import importlib
import os
import pkgutil
import sys
import multiprocessing as mp
import queue
import signal
import scitag
import scitag.settings
import scitag.plugins
import scitag.backends
import scitag.stun.services
from scitag.config import config
log = logging.getLogger('scitag')
def unlock_file(f):
if f.writable():
fcntl.lockf(f, fcntl.LOCK_UN)
class FlowService(object):
def __init__(self, args, pid_file):
self.pid_file = pid_file
self.backend = config.get('BACKEND')
self.backend_mod = None
self.backend_proc = None
self.plugin = config.get('PLUGIN')
self.plugin_mod = None
self.plugin_proc = None
if args.debug or args.fg:
self.debug = True
else:
self.debug = False
self.flow_id_queue = mp.Queue()
self.term_event = mp.Event()
header = list()
header.append("flowd v.{}: {}".format(scitag.__version__, datetime.datetime.now()))
header.append("config: {}".format(scitag.settings.CONFIG_PATH))
l_max = len(max(header, key=lambda x: len(x)))
log.info('*' * (l_max + 4))
for line in header:
log.info('* {0:<{1}s} *'.format(line, l_max))
log.info('*' * (l_max + 4))
if 'IP_DISCOVERY_ENABLED' in config.keys() and config['IP_DISCOVERY_ENABLED']:
try:
eip, iip = scitag.stun.services.get_ext_ip()
log.info('network info: {}/{}'.format(iip, eip))
except Exception as e:
log.exception(e)
sys.exit(1)
def init_plugins(self):
log.debug(" Loading plugin {}".format(self.plugin))
try:
default_pkg = os.path.dirname(scitag.plugins.__file__)
if self.plugin in [name for _, name, _ in pkgutil.iter_modules([default_pkg])]:
self.plugin_mod = importlib.import_module("scitag.plugins.{}".format(self.plugin))
else:
log.error("Configured plugin not found")
return False
except ImportError as e:
log.error("Exception caught {} while loading plugin {}".format(e, self.plugin))
sys.exit(1)
try:
log.debug(" Calling plugin init: {}".format(self.plugin))
self.plugin_mod.init()
except Exception as e:
log.error("Exception was thrown while initialing plugin {} ({})".format(self.plugin, e))
sys.exit(1)
backend = config.get('BACKEND', scitag.settings.DEFAULT_BACKEND)
log.debug(" Loading backend {}".format(backend))
try:
default_pkg = os.path.dirname(scitag.backends.__file__)
if self.backend in [name for _, name, _ in pkgutil.iter_modules([default_pkg])]:
self.backend_mod = importlib.import_module("scitag.backends.{}".format(self.backend))
else:
log.error("Configured backend not found")
return False
except ImportError as e:
log.error("Exception caught {} while loading backend {}".format(e, self.backend))
sys.exit(1)
def cleanup(self, sig, frame):
log.debug('caught signal {}'.format(sig))
self.term_event.set()
while True:
try:
self.flow_id_queue.get(block=False)
except queue.Empty:
break
except ValueError:
break
self.flow_id_queue.close()
self.flow_id_queue.join_thread()
if self.plugin_proc and self.plugin_proc.is_alive():
self.plugin_proc.join(5)
if self.backend_proc and self.backend_proc.is_alive():
self.backend_proc.join(5)
# wait -> if self.plugin_proc.is_alive()
# self.plugin_proc.terminate()
self.plugin_proc.close()
self.backend_proc.close()
unlock_file(self.pid_file)
log.debug('cleanup done ... ')
@staticmethod
def reload_config():
importlib.reload(scitag.config)
def main(self):
# 1. create queue and process pool for backend
# 2. create process or pool for plugin
# 3. watch plugin and backend pools until they finish
self.backend_proc = mp.Process(target=self.backend_mod.run,
args=(self.flow_id_queue, self.term_event),
daemon=True)
self.plugin_proc = mp.Process(target=self.plugin_mod.run,
args=(self.flow_id_queue, self.term_event),
daemon=True)
try:
self.backend_proc.start()
self.plugin_proc.start()
if self.debug:
signal.signal(signal.SIGINT, self.cleanup)
signal.signal(signal.SIGTERM, self.cleanup)
self.plugin_proc.join()
except Exception as e:
log.exception('Exception caught in main')
log.debug('flowd terminated')
| [
"logging.getLogger",
"signal.signal",
"sys.exit",
"multiprocessing.Event",
"scitag.config.config.keys",
"multiprocessing.Process",
"scitag.stun.services.get_ext_ip",
"os.path.dirname",
"datetime.datetime.now",
"scitag.config.config.get",
"fcntl.lockf",
"importlib.reload",
"multiprocessing.Qu... | [((305, 332), 'logging.getLogger', 'logging.getLogger', (['"""scitag"""'], {}), "('scitag')\n", (322, 332), False, 'import logging\n'), ((384, 413), 'fcntl.lockf', 'fcntl.lockf', (['f', 'fcntl.LOCK_UN'], {}), '(f, fcntl.LOCK_UN)\n', (395, 413), False, 'import fcntl\n'), ((539, 560), 'scitag.config.config.get', 'config.get', (['"""BACKEND"""'], {}), "('BACKEND')\n", (549, 560), False, 'from scitag.config import config\n'), ((648, 668), 'scitag.config.config.get', 'config.get', (['"""PLUGIN"""'], {}), "('PLUGIN')\n", (658, 668), False, 'from scitag.config import config\n'), ((870, 880), 'multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (878, 880), True, 'import multiprocessing as mp\n'), ((907, 917), 'multiprocessing.Event', 'mp.Event', ([], {}), '()\n', (915, 917), True, 'import multiprocessing as mp\n'), ((2560, 2614), 'scitag.config.config.get', 'config.get', (['"""BACKEND"""', 'scitag.settings.DEFAULT_BACKEND'], {}), "('BACKEND', scitag.settings.DEFAULT_BACKEND)\n", (2570, 2614), False, 'from scitag.config import config\n'), ((4079, 4110), 'importlib.reload', 'importlib.reload', (['scitag.config'], {}), '(scitag.config)\n', (4095, 4110), False, 'import importlib\n'), ((4324, 4425), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'self.backend_mod.run', 'args': '(self.flow_id_queue, self.term_event)', 'daemon': '(True)'}), '(target=self.backend_mod.run, args=(self.flow_id_queue, self.\n term_event), daemon=True)\n', (4334, 4425), True, 'import multiprocessing as mp\n'), ((4526, 4626), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'self.plugin_mod.run', 'args': '(self.flow_id_queue, self.term_event)', 'daemon': '(True)'}), '(target=self.plugin_mod.run, args=(self.flow_id_queue, self.\n term_event), daemon=True)\n', (4536, 4626), True, 'import multiprocessing as mp\n'), ((1778, 1818), 'os.path.dirname', 'os.path.dirname', (['scitag.plugins.__file__'], {}), '(scitag.plugins.__file__)\n', (1793, 1818), False, 'import os\n'), ((2714, 2755), 'os.path.dirname', 'os.path.dirname', (['scitag.backends.__file__'], {}), '(scitag.backends.__file__)\n', (2729, 2755), False, 'import os\n'), ((1009, 1032), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1030, 1032), False, 'import datetime\n'), ((1358, 1371), 'scitag.config.config.keys', 'config.keys', ([], {}), '()\n', (1369, 1371), False, 'from scitag.config import config\n'), ((1452, 1485), 'scitag.stun.services.get_ext_ip', 'scitag.stun.services.get_ext_ip', ([], {}), '()\n', (1483, 1485), False, 'import scitag\n'), ((2251, 2262), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2259, 2262), False, 'import sys\n'), ((2529, 2540), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2537, 2540), False, 'import sys\n'), ((3195, 3206), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3203, 3206), False, 'import sys\n'), ((4830, 4872), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'self.cleanup'], {}), '(signal.SIGINT, self.cleanup)\n', (4843, 4872), False, 'import signal\n'), ((4889, 4932), 'signal.signal', 'signal.signal', (['signal.SIGTERM', 'self.cleanup'], {}), '(signal.SIGTERM, self.cleanup)\n', (4902, 4932), False, 'import signal\n'), ((1635, 1646), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1643, 1646), False, 'import sys\n'), ((1873, 1908), 'pkgutil.iter_modules', 'pkgutil.iter_modules', (['[default_pkg]'], {}), '([default_pkg])\n', (1893, 1908), False, 'import pkgutil\n'), ((2811, 2846), 'pkgutil.iter_modules', 'pkgutil.iter_modules', (['[default_pkg]'], {}), '([default_pkg])\n', (2831, 2846), False, 'import pkgutil\n')] |
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(name="py-json-serialize",
version="0.10.0",
description = "json serialize library for Python 2 and 3",
long_description = long_description,
long_description_content_type="text/markdown",
url="https://github.com/randydu/py-json-serialize.git",
author="<NAME>",
author_email="<EMAIL>",
packages=["py_json_serialize"],
keywords=["serialize", "json"],
license="MIT",
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
) | [
"setuptools.setup"
] | [((99, 867), 'setuptools.setup', 'setup', ([], {'name': '"""py-json-serialize"""', 'version': '"""0.10.0"""', 'description': '"""json serialize library for Python 2 and 3"""', 'long_description': 'long_description', 'long_description_content_type': '"""text/markdown"""', 'url': '"""https://github.com/randydu/py-json-serialize.git"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'packages': "['py_json_serialize']", 'keywords': "['serialize', 'json']", 'license': '"""MIT"""', 'classifiers': "['Development Status :: 4 - Beta', 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3']"}), "(name='py-json-serialize', version='0.10.0', description=\n 'json serialize library for Python 2 and 3', long_description=\n long_description, long_description_content_type='text/markdown', url=\n 'https://github.com/randydu/py-json-serialize.git', author='<NAME>',\n author_email='<EMAIL>', packages=['py_json_serialize'], keywords=[\n 'serialize', 'json'], license='MIT', classifiers=[\n 'Development Status :: 4 - Beta', 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3'])\n", (104, 867), False, 'from setuptools import setup\n')] |
""" Backtracking ref impls
see https://leetcode.com/problems/permutations/discuss/18284/Backtrack-Summary%3A-General-Solution-for-10-Questions!!!!!!!!-Python-(Combination-Sum-Subsets-Permutation-Palindrome)
"""
import random
def subsets(arr):
def backtrack(tmp, start, end):
ret.append(tmp[:])
for i in range(start, end):
tmp.append(arr[i])
backtrack(tmp, i + 1, end)
tmp.pop()
ret = []
backtrack([], 0, len(arr))
return ret
def permutations(arr, n=None):
if n is None:
n = len(arr)
def backtrack(start, end):
if start == n:
ret.append(arr[:n])
return
for i in range(start, end):
arr[i], arr[start] = arr[start], arr[i]
backtrack(start + 1, end)
arr[i], arr[start] = arr[start], arr[i]
ret = []
backtrack(0, len(arr))
return ret
def combinations(arr, n):
def backtrack(tmp, start, end):
if len(tmp) == n:
ret.append(tmp[:])
return
for i in range(start, end):
tmp.append(arr[i])
backtrack(tmp, i + 1, end)
tmp.pop()
ret = []
backtrack([], 0, len(arr))
return ret
def test():
import itertools
for l in [0, 1, 2, 5]:
arr = [random.randint(-5, 5) for i in range(l)]
for n in [0, 1, len(arr)]:
if n > len(arr):
continue
ret = itertools.combinations(arr, 2)
ret = tuple(sorted([tuple(sorted(r)) for r in ret]))
ret2 = combinations(arr, 2)
ret2 = tuple(sorted([tuple(sorted(r)) for r in ret2]))
# print(len(ret))
# print(len(ret2))
# print(ret)
# print(ret2)
assert ret == ret2
print('ok')
ret = itertools.permutations(arr, n)
ret = tuple(sorted([tuple(sorted(r)) for r in ret]))
ret2 = permutations(arr, n)
ret2 = tuple(sorted([tuple(sorted(r)) for r in ret2]))
# print(len(ret))
# print(len(ret2))
# print(ret)
# print(ret2)
assert ret == ret2
print('ok')
ret = []
for i in range(len(arr) + 1):
ret += itertools.combinations(arr, i)
ret = tuple(sorted([tuple(sorted(r)) for r in ret]))
ret2 = subsets(arr)
ret2 = tuple(sorted([tuple(sorted(r)) for r in ret2]))
# print(len(ret))
# print(len(ret2))
# print(ret)
# print(ret2)
assert ret == ret2
print('ok')
test()
| [
"itertools.combinations",
"itertools.permutations",
"random.randint"
] | [((1336, 1357), 'random.randint', 'random.randint', (['(-5)', '(5)'], {}), '(-5, 5)\n', (1350, 1357), False, 'import random\n'), ((1484, 1514), 'itertools.combinations', 'itertools.combinations', (['arr', '(2)'], {}), '(arr, 2)\n', (1506, 1514), False, 'import itertools\n'), ((1897, 1927), 'itertools.permutations', 'itertools.permutations', (['arr', 'n'], {}), '(arr, n)\n', (1919, 1927), False, 'import itertools\n'), ((2346, 2376), 'itertools.combinations', 'itertools.combinations', (['arr', 'i'], {}), '(arr, i)\n', (2368, 2376), False, 'import itertools\n')] |
"""Add triples to the graph database
The UWKGM project
:copyright: (c) 2020 Ichise Laboratory at NII & AIST
:author: <NAME>
"""
from typing import Tuple
from dorest.managers.struct import generic
from dorest.managers.struct.decorators import endpoint
from database.database.graph import default_graph_uri
@endpoint(['GET'])
def single(triple: Tuple[str, str, str], graph: str = default_graph_uri) -> str:
"""Adds a triple to the graph database
:param triple: A URI triple (subject, predicate, object)
:param graph: Graph URI
:return: Adding status
"""
return generic.resolve(single)(triple, graph)
| [
"dorest.managers.struct.generic.resolve",
"dorest.managers.struct.decorators.endpoint"
] | [((312, 329), 'dorest.managers.struct.decorators.endpoint', 'endpoint', (["['GET']"], {}), "(['GET'])\n", (320, 329), False, 'from dorest.managers.struct.decorators import endpoint\n'), ((590, 613), 'dorest.managers.struct.generic.resolve', 'generic.resolve', (['single'], {}), '(single)\n', (605, 613), False, 'from dorest.managers.struct import generic\n')] |
# -*- coding: utf-8 -*-
import swarmmaster
from nose.tools import *
import unittest
sc = swarmmaster.SwarmClient(1)
class TestSwarmClient(unittest.TestCase):
"""Basic test cases."""
def test_sc_id (self):
assert sc.id == 1
def test_sc_writebuffer (self):
sc.tx_buffer.clear()
sc.tx_buffer += b'Hello'
assert sc.tx_buffer[:5] == bytearray(b'Hello')
sc.tx_buffer.clear()
assert sc.tx_buffer == bytearray(b'')
def test_sc_readbuffer (self):
sc.rx_buffer.clear()
sc.rx_buffer += b'Hallo'
assert sc.rx_buffer[:5] == bytearray(b'Hallo')
sc.rx_buffer.clear()
assert sc.rx_buffer == bytearray(b'')
def test_sc_failcounter (self):
assert sc.fail_counter == 0
sc.fail_counter +=1
assert sc.fail_counter == 1
def test_sc_xx (self):
sc.prio = 100
assert sc.prio == 100
sc.prio += 1
assert sc.prio == 101
def test_sc_add_data (self):
sc.rx_buffer =bytearray(b'Hello')
msg =bytearray(b'')
msg.append(1)
msg+=b'World'
sc.add_data_to_rx_buffer(msg)
assert sc.last_msg_id ==1
assert sc.rx_buffer ==bytearray(b'HelloWorld')
msg.clear()
msg.append(2)
msg +=b'!'
sc.add_data_to_rx_buffer(msg)
assert sc.last_msg_id ==2
assert sc.rx_buffer== bytearray(b'HelloWorld!')
msg.clear()
msg.append(10)
msg +=b'new start'
sc.add_data_to_rx_buffer(msg)
assert sc.last_msg_id == 10
assert sc.rx_buffer == bytearray(b'new start')
def test_sc_buffer_overflow(self):
sc.max_rx_buf = 5
sc.max_tx_buf = 6
sc.tx_buffer.clear()
sc.rx_buffer.clear()
sc.add_data_to_rx_buffer(b'0123456789')
assert sc.rx_buffer == bytearray(b'56789')
sc.add_data_to_tx_buffer(b'0123456789')
assert sc.tx_buffer == bytearray(b'456789')
sc.max_rx_buf = 2**20 #2**20 means 1MB
sc.max_tx_buf = 2**20
def test_get_packet(self):
sc.tx_buffer.clear()
d1=b'0123456789abcdef0123456789abcde'
d2=b'fedcba9876543210fedcba9876543'
sc.add_data_to_tx_buffer(d1)
sc.add_data_to_tx_buffer(d2)
assert sc.get_tx_buffer_size() == 60
p1 = sc.get_packet()
assert len(p1) == 32
assert p1[1:11] == b'0123456789'
p2 = sc.get_packet()
assert p2[1:32] == b'fedcba9876543210fedcba9876543\xf0\xf0'
p3 = sc.get_packet()
assert p3[0:1]==b'\xc2'
p2_restore = int.from_bytes(p3,'little') ^ int.from_bytes(p1,'little')
p2_restore = bytearray(p2_restore.to_bytes(32,'little'))
p2_restore[0]=0xb1
assert p2_restore == p2
p4 = sc.get_packet()
p4_expected =bytearray(32)
p4_expected[0]=0xc0
assert p4 == p4_expected
pass
if __name__ == '__main__':
unittest.main() | [
"unittest.main",
"swarmmaster.SwarmClient"
] | [((93, 119), 'swarmmaster.SwarmClient', 'swarmmaster.SwarmClient', (['(1)'], {}), '(1)\n', (116, 119), False, 'import swarmmaster\n'), ((2996, 3011), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3009, 3011), False, 'import unittest\n')] |
import sys
sys.path.append("..")
from FeatureBuilder import FeatureBuilder
from Utils.Libraries.wvlib_light.lwvlib import WV
import Utils.Settings as Settings
class WordVectorFeatureBuilder(FeatureBuilder):
def __init__(self, featureSet, style=None):
FeatureBuilder.__init__(self, featureSet, style)
self.model = WV.load(Settings.W2VFILE, 100000, 10000000) #10000, 500000)
def buildFeatures(self, token):
weights = self.model.w_to_normv(token.get("text").lower())
if weights is not None:
for i in range(len(weights)):
self.setFeature("W2V_" + str(i), weights[i])
else:
self.setFeature("W2V_None", 1) | [
"sys.path.append",
"Utils.Libraries.wvlib_light.lwvlib.WV.load",
"FeatureBuilder.FeatureBuilder.__init__"
] | [((11, 32), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (26, 32), False, 'import sys\n'), ((264, 312), 'FeatureBuilder.FeatureBuilder.__init__', 'FeatureBuilder.__init__', (['self', 'featureSet', 'style'], {}), '(self, featureSet, style)\n', (287, 312), False, 'from FeatureBuilder import FeatureBuilder\n'), ((334, 377), 'Utils.Libraries.wvlib_light.lwvlib.WV.load', 'WV.load', (['Settings.W2VFILE', '(100000)', '(10000000)'], {}), '(Settings.W2VFILE, 100000, 10000000)\n', (341, 377), False, 'from Utils.Libraries.wvlib_light.lwvlib import WV\n')] |
from django.urls import include, path
from example.core import views as core_views
urlpatterns = [
path("", core_views.index_django),
path("jinja/", core_views.index_jinja),
path("__reload__/", include("django_browser_reload.urls")),
]
| [
"django.urls.path",
"django.urls.include"
] | [((105, 138), 'django.urls.path', 'path', (['""""""', 'core_views.index_django'], {}), "('', core_views.index_django)\n", (109, 138), False, 'from django.urls import include, path\n'), ((144, 182), 'django.urls.path', 'path', (['"""jinja/"""', 'core_views.index_jinja'], {}), "('jinja/', core_views.index_jinja)\n", (148, 182), False, 'from django.urls import include, path\n'), ((208, 245), 'django.urls.include', 'include', (['"""django_browser_reload.urls"""'], {}), "('django_browser_reload.urls')\n", (215, 245), False, 'from django.urls import include, path\n')] |
from random import randint
import pygame
pygame.init()
pygame.time.set_timer(pygame.USEREVENT, 3000)
W = 400
H = 400
WHITE = (255, 255, 255)
CARS = ('sp_humans/Walk0000.png', 'sp_humans/Walk0001.png', 'sp_humans/Walk0005.png')
CARS_SURF = [] # для хранения готовых машин-поверхностей
# надо установить видео режим до вызова image.load()
sc = pygame.display.set_mode((W, H))
for i in range(len(CARS)):
CARS_SURF.append(pygame.image.load(CARS[i]).convert_alpha())
class Car(pygame.sprite.Sprite):
def __init__(self, x, surf, group):
pygame.sprite.Sprite.__init__(self)
self.image = surf
self.rect = self.image.get_rect(center=(x, 0))
self.add(group) # добавляем в группу
self.speed = randint(1, 3) # у машин будет разная скорость
def update(self):
if self.rect.y < H:
self.rect.y += self.speed
else:
# теперь не перебрасываем вверх,
# а удаляем из всех групп
self.kill()
cars = pygame.sprite.Group()
# добавляем первую машину, которая появляется сразу
Car(randint(1, W), CARS_SURF[randint(0, 2)], cars)
while 1:
for i in pygame.event.get():
if i.type == pygame.QUIT:
exit()
elif i.type == pygame.USEREVENT:
Car(randint(1, W), CARS_SURF[randint(0, 2)], cars)
sc.fill(WHITE)
cars.draw(sc)
pygame.display.update()
pygame.time.delay(20)
cars.update()
| [
"pygame.init",
"pygame.time.delay",
"pygame.event.get",
"pygame.sprite.Group",
"pygame.display.set_mode",
"pygame.sprite.Sprite.__init__",
"pygame.image.load",
"pygame.time.set_timer",
"pygame.display.update",
"random.randint"
] | [((41, 54), 'pygame.init', 'pygame.init', ([], {}), '()\n', (52, 54), False, 'import pygame\n'), ((55, 100), 'pygame.time.set_timer', 'pygame.time.set_timer', (['pygame.USEREVENT', '(3000)'], {}), '(pygame.USEREVENT, 3000)\n', (76, 100), False, 'import pygame\n'), ((347, 378), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(W, H)'], {}), '((W, H))\n', (370, 378), False, 'import pygame\n'), ((1009, 1030), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (1028, 1030), False, 'import pygame\n'), ((1089, 1102), 'random.randint', 'randint', (['(1)', 'W'], {}), '(1, W)\n', (1096, 1102), False, 'from random import randint\n'), ((1160, 1178), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (1176, 1178), False, 'import pygame\n'), ((1380, 1403), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (1401, 1403), False, 'import pygame\n'), ((1408, 1429), 'pygame.time.delay', 'pygame.time.delay', (['(20)'], {}), '(20)\n', (1425, 1429), False, 'import pygame\n'), ((558, 593), 'pygame.sprite.Sprite.__init__', 'pygame.sprite.Sprite.__init__', (['self'], {}), '(self)\n', (587, 593), False, 'import pygame\n'), ((742, 755), 'random.randint', 'randint', (['(1)', '(3)'], {}), '(1, 3)\n', (749, 755), False, 'from random import randint\n'), ((1114, 1127), 'random.randint', 'randint', (['(0)', '(2)'], {}), '(0, 2)\n', (1121, 1127), False, 'from random import randint\n'), ((429, 455), 'pygame.image.load', 'pygame.image.load', (['CARS[i]'], {}), '(CARS[i])\n', (446, 455), False, 'import pygame\n'), ((1290, 1303), 'random.randint', 'randint', (['(1)', 'W'], {}), '(1, W)\n', (1297, 1303), False, 'from random import randint\n'), ((1315, 1328), 'random.randint', 'randint', (['(0)', '(2)'], {}), '(0, 2)\n', (1322, 1328), False, 'from random import randint\n')] |
import click
from graffan.library.analysis.targets import analyze_targets
from graffan.library.models.analysis import AnalysedIteration
from graffan.utilities.forcebalance import (
extract_target_parameters,
load_fb_force_field,
)
@click.command("analyse", help="Analyzes the output of a ForceBalance iteration.")
@click.option(
"--iteration",
default=0,
type=int,
help="The iteration to analyze.",
show_default=True,
)
def analyse_cli(iteration):
# Load in the definitions of the refit parameters.
fb_force_field = load_fb_force_field("")
parameters = extract_target_parameters(fb_force_field)
# Perform the analysis
output = AnalysedIteration(
iteration=iteration,
targets=analyze_targets("", iteration),
refit_parameters=parameters,
)
with open(f"iteration_{str(iteration).zfill(4)}.json", "w") as file:
file.write(output.json(sort_keys=True, indent=2, separators=(",", ": ")))
| [
"click.option",
"graffan.utilities.forcebalance.load_fb_force_field",
"graffan.utilities.forcebalance.extract_target_parameters",
"graffan.library.analysis.targets.analyze_targets",
"click.command"
] | [((243, 329), 'click.command', 'click.command', (['"""analyse"""'], {'help': '"""Analyzes the output of a ForceBalance iteration."""'}), "('analyse', help=\n 'Analyzes the output of a ForceBalance iteration.')\n", (256, 329), False, 'import click\n'), ((326, 432), 'click.option', 'click.option', (['"""--iteration"""'], {'default': '(0)', 'type': 'int', 'help': '"""The iteration to analyze."""', 'show_default': '(True)'}), "('--iteration', default=0, type=int, help=\n 'The iteration to analyze.', show_default=True)\n", (338, 432), False, 'import click\n'), ((556, 579), 'graffan.utilities.forcebalance.load_fb_force_field', 'load_fb_force_field', (['""""""'], {}), "('')\n", (575, 579), False, 'from graffan.utilities.forcebalance import extract_target_parameters, load_fb_force_field\n'), ((597, 638), 'graffan.utilities.forcebalance.extract_target_parameters', 'extract_target_parameters', (['fb_force_field'], {}), '(fb_force_field)\n', (622, 638), False, 'from graffan.utilities.forcebalance import extract_target_parameters, load_fb_force_field\n'), ((744, 774), 'graffan.library.analysis.targets.analyze_targets', 'analyze_targets', (['""""""', 'iteration'], {}), "('', iteration)\n", (759, 774), False, 'from graffan.library.analysis.targets import analyze_targets\n')] |
from test_poetry import wikipedia
def test_random_page_use_given_language(mock_requests_get):
wikipedia.random_page(language="de")
| [
"test_poetry.wikipedia.random_page"
] | [((100, 136), 'test_poetry.wikipedia.random_page', 'wikipedia.random_page', ([], {'language': '"""de"""'}), "(language='de')\n", (121, 136), False, 'from test_poetry import wikipedia\n')] |
import bleach
import bleach_whitelist
from django.conf import settings
from rest_framework.pagination import PageNumberPagination
def sanitize(string):
# bleach doesn't handle None so let's not pass it
if string and getattr(settings, "RESPONSE_SANITIZE_USER_INPUT", True):
return bleach.clean(
string,
tags=bleach_whitelist.markdown_tags,
attributes=bleach_whitelist.markdown_attrs,
styles=bleach_whitelist.all_styles,
)
return string
class LargeResultsSetPagination(PageNumberPagination):
page_size = 500
max_page_size = 1000
page_size_query_param = "page_size"
| [
"bleach.clean"
] | [((298, 440), 'bleach.clean', 'bleach.clean', (['string'], {'tags': 'bleach_whitelist.markdown_tags', 'attributes': 'bleach_whitelist.markdown_attrs', 'styles': 'bleach_whitelist.all_styles'}), '(string, tags=bleach_whitelist.markdown_tags, attributes=\n bleach_whitelist.markdown_attrs, styles=bleach_whitelist.all_styles)\n', (310, 440), False, 'import bleach\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.lost_letter import lost_letter
def test_lost_letter():
"""Test module lost_letter.py by downloading
lost_letter.csv and testing shape of
extracted data has 140 rows and 8 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = lost_letter(test_path)
try:
assert x_train.shape == (140, 8)
except:
shutil.rmtree(test_path)
raise()
| [
"shutil.rmtree",
"tempfile.mkdtemp",
"observations.r.lost_letter.lost_letter"
] | [((381, 399), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (397, 399), False, 'import tempfile\n'), ((422, 444), 'observations.r.lost_letter.lost_letter', 'lost_letter', (['test_path'], {}), '(test_path)\n', (433, 444), False, 'from observations.r.lost_letter import lost_letter\n'), ((503, 527), 'shutil.rmtree', 'shutil.rmtree', (['test_path'], {}), '(test_path)\n', (516, 527), False, 'import shutil\n')] |
#importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import os
import pickle
import glob
import sys
class ParameterFinder:
def __init__(self, image, _h_channel_low=0, _h_channel_high=255, _l_channel_low=0, _l_channel_high=255,
sobelx_filter=1, sobelx_low=0, sobelx_high=0,
sobely_filter=1, sobely_low=0, sobely_high=0,
magn_filter=1, magn_low=0, magn_high=0,
direction_filter=1, direction_low=0, direction_high=0,
direction_avg_filter=3, direction_thresh=0, load_params_path= "do_not_load"):
self.image = image
self._h_channel_low = _h_channel_low
self._h_channel_high = _h_channel_high
self._l_channel_low = _l_channel_low
self._l_channel_high = _l_channel_high
self._sobelx_filter = sobelx_filter
self._sobelx_low = sobelx_low
self._sobelx_high = sobelx_high
self._sobely_filter = sobely_filter
self._sobely_low = sobely_low
self._sobely_high = sobely_high
self._magn_filter = magn_filter
self._magn_low = magn_low
self._magn_high = magn_high
self._direction_filter = direction_filter
self._direction_low = direction_low
self._direction_high = direction_high
self._direction_avg_filter = direction_avg_filter
self._direction_thresh = direction_thresh
self._post_avg_filter = 1
self._post_thresh = 1
if load_params_path != "do_not_load":
[self._sobelx_filter, self._sobelx_low, self._sobelx_high, self._sobely_filter, self._sobely_low, self._sobely_high, self._magn_filter, self._magn_low, self._magn_high, self._direction_filter, self._direction_low, self._direction_high, self._direction_avg_filter, self._direction_thresh] = self.load_params(load_params_path, [self._sobelx_filter, self._sobelx_low, self._sobelx_high, self._sobely_filter, self._sobely_low, self._sobely_high, self._magn_filter, self._magn_low, self._magn_high, self._direction_filter, self._direction_low, self._direction_high, self._direction_avg_filter, self._direction_thresh])
print("self._sobelx_filter: ", self._sobelx_filter)
def onchange_h_channel_low(pos):
self._h_channel_low = pos
self._render()
def onchange_h_channel_high(pos):
self._h_channel_high = pos
self._render()
def onchange_l_channel_low(pos):
self._l_channel_low = pos
self._render()
def onchange_l_channel_high(pos):
self._l_channel_high = pos
self._render()
def onchange_sobelx_low(pos):
self._sobelx_low = pos
self._render()
def onchange_sobelx_high(pos):
self._sobelx_high = pos
self._render()
def onchange_sobelx_filter(pos):
self._sobelx_filter = pos
self._sobelx_filter += (self._sobelx_filter + 1) % 2
self._render()
def onchange_sobely_low(pos):
self._sobely_low = pos
self._render()
def onchange_sobely_high(pos):
self._sobely_high = pos
self._render()
def onchange_sobely_filter(pos):
self._sobely_filter = pos
self._sobely_filter += (self._sobely_filter + 1) % 2
self._render()
def onchange_magn_low(pos):
self._magn_low = pos
self._render()
def onchange_magn_high(pos):
self._magn_high = pos
self._render()
def onchange_magn_filter(pos):
self._magn_filter = pos
self._magn_filter += (self._magn_filter + 1) % 2
self._render()
def onchange_direction_low(pos):
self._direction_low = (pos/100)-(np.pi/2)
self._render()
def onchange_direction_high(pos):
self._direction_high = (pos/100)-(np.pi/2)
self._render()
def onchange_direction_filter(pos):
self._direction_filter = pos
self._direction_filter += (self._direction_filter + 1) % 2
self._render()
def onchange_direction_avg_filter(pos):
self._direction_avg_filter = pos
self._direction_avg_filter += (self._direction_avg_filter + 1) % 2
self._render()
def onchange_direction_thresh(pos):
self._direction_thresh = pos
self._render()
def onchange_post_avg_filter(pos):
self._post_avg_filter = pos
self._post_avg_filter += (self._post_avg_filter + 1) % 2
self._render()
def onchange_post_thresh(pos):
self._post_thresh = pos
self._render()
cv2.namedWindow('output')
cv2.createTrackbar('h_channel_low', 'output', self._h_channel_low, 255, onchange_h_channel_low)
cv2.createTrackbar('h_channel_high', 'output', self._h_channel_high, 255, onchange_h_channel_high)
cv2.createTrackbar('l_channel_low', 'output', self._l_channel_low, 255, onchange_l_channel_low)
cv2.createTrackbar('l_channel_high', 'output', self._l_channel_high, 255, onchange_l_channel_high)
cv2.createTrackbar('sobelx_low', 'output', self._sobelx_low, 255, onchange_sobelx_low)
cv2.createTrackbar('sobelx_high', 'output', self._sobelx_high, 255, onchange_sobelx_high)
cv2.createTrackbar('sobelx_filter', 'output', self._sobelx_filter, 21, onchange_sobelx_filter)
cv2.createTrackbar('sobely_low', 'output', self._sobely_low, 255, onchange_sobely_low)
cv2.createTrackbar('sobely_high', 'output', self._sobely_high, 255, onchange_sobely_high)
cv2.createTrackbar('sobely_filter', 'output', self._sobely_filter, 21, onchange_sobely_filter)
cv2.createTrackbar('magn_low', 'output', self._magn_low, 255, onchange_magn_low)
cv2.createTrackbar('magn_high', 'output', self._magn_high, 255, onchange_magn_high)
cv2.createTrackbar('magn_filter', 'output', self._magn_filter, 21, onchange_magn_filter)
cv2.createTrackbar('direction_low(rad)', 'output', self._direction_low, 314, onchange_direction_low)
cv2.createTrackbar('direction_high(rad)', 'output', self._direction_high, 314, onchange_direction_high)
cv2.createTrackbar('direction_filter', 'output', self._direction_filter, 21, onchange_direction_filter)
cv2.createTrackbar('direction_avg_filter', 'output', self._direction_avg_filter, 21, onchange_direction_avg_filter)
cv2.createTrackbar('direction_thresh', 'output', self._direction_thresh, 255, onchange_direction_thresh)
cv2.createTrackbar('post_avg_filter', 'output', self._post_avg_filter, 21, onchange_post_avg_filter)
cv2.createTrackbar('post_thresh', 'output', self._post_thresh, 255, onchange_post_thresh)
self._render()
print("Adjust the parameters as desired. Hit any key to close.")
cv2.waitKey(0)
cv2.destroyWindow('output')
self.save_params([self._sobelx_filter, self._sobelx_low, self._sobelx_high, self._sobely_filter, self._sobely_low, self._sobely_high, self._magn_filter, self._magn_low, self._magn_high, self._direction_filter, self._direction_low, self._direction_high, self._direction_avg_filter, self._direction_thresh])
def sobelx_low(self):
return self._sobelx_low
def sobelx_high(self):
return self._sobelx_high
def sobelx_filter(self):
return self._sobelx_filter
def sobely_low(self):
return self._sobely_low
def sobely_high(self):
return self._sobely_high
def sobely_filter(self):
return self._sobely_filter
def magn_low(self):
return self._magn_low
def magn_high(self):
return self._magn_high
def magn_filter(self):
return self._magn_filter
def direction_low(self):
return self._direction_low
def direction_high(self):
return self._direction_high
def direction_filter(self):
return self._direction_filter
def direction_avg_filter(self):
return self._direction_avg_filter
def direction_thresh(self):
return self._direction_thresh
def sobelxImage(self):
return self._sobelx_binary
def sobelyImage(self):
return self._sobely_binary
def magImage(self):
return self._mag_binary
def dirImage(self):
return self._dir_binary
def averageImg(self):
return self._avg_img
def thresholdImg(self):
return self._thres_img
def postAverageImg(self):
return self._post_avg_img
def postThresholdImg(self):
return self._post_thres_img
def setImage(self, img):
self.image = img
def extract_single_color(self, img, channel='gray'):
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
if(channel == 'r'):
return img[:,:,0]
elif(channel == 'g'):
return img[:,:,1]
elif(channel == 'b'):
return img[:,:,2]
elif(channel == 'gray'):
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
elif(channel == 'h'):
return hls[:,:,0]
elif(channel == 'l'):
return hls[:,:,1]
elif(channel == 's'):
return hls[:,:,2]
def abs_sobel_thresh(self, image_binary, orient='x', sobel_kernel=3, thresh=(0, 255)):
self.image_binary = image_binary
self.orient = orient
self.sobel_kernel = sobel_kernel
self.thresh = thresh
# Calculate directional gradient
if orient == 'x':
sobel_orient = cv2.Sobel(image_binary, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
elif orient == 'y':
sobel_orient = cv2.Sobel(image_binary, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
abs_sobel = np.absolute(sobel_orient)
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
# Apply threshold
grad_binary = np.zeros_like(scaled_sobel)
grad_binary[(scaled_sobel > thresh[0]) & (scaled_sobel < thresh[1])] = 255 #imshow accepts 1 not!
return grad_binary
def abs_magn_thresh(self, image_binary, magn_sobel_kernel=3, thresh_2=(0, 255)):
# Calculate gradient magnitude
self.image_binary = image_binary
self.magn_sobel_kernel = magn_sobel_kernel
self.thresh_2 = thresh_2
sobel_x = cv2.Sobel(image_binary, cv2.CV_64F, 1, 0, ksize=magn_sobel_kernel())
sobel_y = cv2.Sobel(image_binary, cv2.CV_64F, 0, 1, ksize=magn_sobel_kernel())
# magn = np.sqrt(sobel_x * sobel_x + sobel_y * sobel_y)
magn = np.sqrt(np.power(sobel_x,2) + np.power(sobel_y,2))
scaled_magn = np.uint8(255*magn/np.max(magn))
# Apply threshold
magn_binary = np.zeros_like(scaled_magn)
magn_binary[(scaled_magn > (thresh_2[0])) & (scaled_magn < thresh_2[1])] = 255
return magn_binary
def abs_dir_threshold(self, image_binary, dir_sobel_kernel=3, dir_thresh=(-np.pi/2, np.pi/2)):
self.image_binary = image_binary
self.dir_sobel_kernel = dir_sobel_kernel
self.dir_thresh = dir_thresh
# Calculate gradient direction
sobel_x = cv2.Sobel(image_binary, cv2.CV_64F, 1, 0, ksize=dir_sobel_kernel)
sobel_y = cv2.Sobel(image_binary, cv2.CV_64F, 0, 1, ksize=dir_sobel_kernel)
abs_grad_x = np.absolute(sobel_x)
abs_grad_y = np.absolute(sobel_y)
direction_grad = np.arctan2(abs_grad_y, abs_grad_x)
# Apply threshold
dir_binary = np.zeros_like(direction_grad)
dir_binary[(direction_grad > dir_thresh[0]) & (direction_grad < dir_thresh[1])] = 1
return dir_binary
def abs_average(self, binary_image, filter_size=3):
# non_binary= np.zeros_like(binary_image)
# non_binary[(binary_image > 0)] = 255
# binary_image.convertTo(binary_image,CV_8U, 1/255)
# non_binary = binary_image.view('float32')
# non_binary[:] = binary_image
np.set_printoptions(threshold=sys.maxsize)
# print("binary_image: ", binary_image)
non_binary = np.zeros_like(binary_image)
non_binary[binary_image > 0] = 255
non_binary[binary_image == 0] = 1
# print("non_binary: ", non_binary)
# non_binary = zeros
output_image = cv2.blur(non_binary, (filter_size, filter_size))
# output_image = cv2.medianBlur(binary_image, filter_size)
return output_image
def abs_threshold(self, image, thres_low, thres_high=255):
binary_image = np.zeros_like(image)
binary_image[(image > thres_low) & (image < thres_high)] = 255
return binary_image
def _render(self, save_name="no_file_name"):
single_channel_h = self.extract_single_color(self.image, 'h')
single_channel_l = self.extract_single_color(self.image, 'l')
binary_channel_h = self.abs_threshold(single_channel_h, self._h_channel_low, self._h_channel_high)
binary_channel_l = self.abs_threshold(single_channel_l, self._l_channel_low, self._l_channel_high)
channels_binary = np.zeros_like(binary_channel_h)
channels_binary[(binary_channel_h > 0) | (binary_channel_l > 0)] = 255
self._sobelx_binary = self.abs_sobel_thresh(self.image, 'x', self._sobelx_filter, (self._sobelx_low, self._sobelx_high))
self._sobely_binary = self.abs_sobel_thresh(self.image, 'y', self._sobely_filter, (self._sobely_low, self._sobely_high))
self._mag_binary = self.abs_magn_thresh(self.image, self.magn_filter, (self._magn_low, self._magn_high))
self._dir_binary = self.abs_dir_threshold(self.image, self._direction_filter, (self._direction_low, self._direction_high))
self._avg_img = self.abs_average(self._dir_binary, self._direction_avg_filter)
self._thres_img = self.abs_threshold(self._avg_img, self._direction_thresh)
self.combined = np.zeros_like(self._sobelx_binary)
# self.combined[((self._sobelx_binary == 255) & (self._sobely_binary == 255)) | ((self._mag_binary == 255) & (self._thres_img == 255))] = 255
# self.combined[((self._sobelx_binary == 255) & (self._sobely_binary == 255)) | ((self._thres_img == 255))] = 255
self.combined[((self._sobelx_binary == 255) & (self._sobely_binary == 255)) | (self._mag_binary == 255) | (self._thres_img == 255)] = 255
# self.combined[((self._sobelx_binary == 255) & (self._sobely_binary == 255)) | (self._mag_binary == 255)] = 255
self._post_avg_img = self.abs_average(channels_binary, self._post_avg_filter)
self._post_thres_img = self.abs_threshold(self._post_avg_img, self._post_thresh)
if save_name == "no_file_name":
cv2.imshow('sobelx_binary', self._sobelx_binary)
cv2.imshow('sobely_binary', self._sobely_binary)
cv2.imshow('mag_binary', self._mag_binary)
cv2.imshow('direction_binary', self._dir_binary)
cv2.imshow('direction_&_avg', self._avg_img)
cv2.imshow('direction_&_avg_thresh', self._thres_img)
cv2.imshow('channels_binary', channels_binary)
self.color_binary = np.dstack(( np.zeros_like(self._sobelx_binary),((self._sobelx_binary == 255) & (self._sobely_binary == 255)), ((self._mag_binary == 255) & (self._thres_img == 255)))) * 255
if save_name == "no_file_name":
cv2.imshow('output', channels_binary)
else:
cv2.imwrite(f"test_output/{save_name}_output",channels_binary)
# cv2.imshow('output', self.color_binary)
def save_params(self, var_list):
with open("store_params/params_new",'wb') as f:
pickle.dump(var_list, f)
def load_params(self, param_file, var_list):
with open(param_file, 'rb') as f:
var_list = pickle.load(f)
return var_list
if __name__ == "__main__":
# parser = argparse.ArgumentParser(description='Visualizes the line for hough transform.')
# parser.add_argument('FILENAME')
# args = parser.parse_args()
WORKING_DIR = "/home/nbenndo/Documents/Programming/Udacity/SelfDrivingCarND/CarND-Advanced-Lane-Lines/"
os.chdir(WORKING_DIR)
FILENAME = 'test_images/test4.jpg'
IMG = cv2.imread(FILENAME)#, cv2.IMREAD_GRAYSCALE)
# crop_y_border = IMG.shape[0]//2 + 120
# img_crop_top = IMG[0:crop_y_border-1, 0:IMG.shape[1]]
# img_crop_bottom = IMG[crop_y_border:IMG.shape[0], 0:IMG.shape[1]]
# IMG = np.concatenate((img_crop_top, img_crop_bottom), axis=0)
cv2.imshow('input', IMG)
# cv2.waitKey(0)
param_finder = ParameterFinder(IMG, _h_channel_low=96, _h_channel_high=102, _l_channel_low=220, _l_channel_high=255,
sobelx_filter=3, sobelx_low=16, sobelx_high=255,
sobely_filter=3, sobely_low=36, sobely_high=255,
magn_filter=3, magn_low=15, magn_high=255,
direction_filter=15, direction_low=229, direction_high=287,
direction_avg_filter=11, direction_thresh=143)#, load_params_path="store_params/params_new")
# calculate all images with last parameter
os.chdir(f"{WORKING_DIR}/test_images")
images_test = glob.glob('*.jpg', recursive=False)
os.chdir(WORKING_DIR)
for image_path in images_test:
image = cv2.imread(f"test_images/{image_path}")
param_finder.setImage(image)
param_finder._render(image_path)
# print("Edge parameters:")
# print("GaussianBlur Filter Size: %f" % param_finder.filterSize())
# print("Threshold1: %f" % param_finder.threshold1())
# print("Threshold2: %f" % param_finder.threshold2())
# (head, tail) = os.path.split(args.FILENAME)
# (root, ext) = os.path.splitext(tail)
# smoothed_filename = os.path.join("output_images", root + "-smoothed" + ext)
# edge_filename = os.path.join("output_images", root + "-edges" + ext)
# cv2.imwrite(smoothed_filename, param_finder.smoothedImage())
# cv2.imwrite(edge_filename, param_finder.edgeImage())
cv2.destroyAllWindows() | [
"cv2.imshow",
"cv2.destroyAllWindows",
"numpy.arctan2",
"numpy.max",
"cv2.waitKey",
"cv2.blur",
"glob.glob",
"pickle.load",
"cv2.cvtColor",
"cv2.createTrackbar",
"cv2.imread",
"cv2.namedWindow",
"numpy.set_printoptions",
"cv2.imwrite",
"pickle.dump",
"numpy.power",
"cv2.destroyWindow... | [((16370, 16391), 'os.chdir', 'os.chdir', (['WORKING_DIR'], {}), '(WORKING_DIR)\n', (16378, 16391), False, 'import os\n'), ((16442, 16462), 'cv2.imread', 'cv2.imread', (['FILENAME'], {}), '(FILENAME)\n', (16452, 16462), False, 'import cv2\n'), ((16743, 16767), 'cv2.imshow', 'cv2.imshow', (['"""input"""', 'IMG'], {}), "('input', IMG)\n", (16753, 16767), False, 'import cv2\n'), ((17461, 17499), 'os.chdir', 'os.chdir', (['f"""{WORKING_DIR}/test_images"""'], {}), "(f'{WORKING_DIR}/test_images')\n", (17469, 17499), False, 'import os\n'), ((17518, 17553), 'glob.glob', 'glob.glob', (['"""*.jpg"""'], {'recursive': '(False)'}), "('*.jpg', recursive=False)\n", (17527, 17553), False, 'import glob\n'), ((17558, 17579), 'os.chdir', 'os.chdir', (['WORKING_DIR'], {}), '(WORKING_DIR)\n', (17566, 17579), False, 'import os\n'), ((18356, 18379), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (18377, 18379), False, 'import cv2\n'), ((4871, 4896), 'cv2.namedWindow', 'cv2.namedWindow', (['"""output"""'], {}), "('output')\n", (4886, 4896), False, 'import cv2\n'), ((4906, 5005), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""h_channel_low"""', '"""output"""', 'self._h_channel_low', '(255)', 'onchange_h_channel_low'], {}), "('h_channel_low', 'output', self._h_channel_low, 255,\n onchange_h_channel_low)\n", (4924, 5005), False, 'import cv2\n'), ((5010, 5112), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""h_channel_high"""', '"""output"""', 'self._h_channel_high', '(255)', 'onchange_h_channel_high'], {}), "('h_channel_high', 'output', self._h_channel_high, 255,\n onchange_h_channel_high)\n", (5028, 5112), False, 'import cv2\n'), ((5126, 5225), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""l_channel_low"""', '"""output"""', 'self._l_channel_low', '(255)', 'onchange_l_channel_low'], {}), "('l_channel_low', 'output', self._l_channel_low, 255,\n onchange_l_channel_low)\n", (5144, 5225), False, 'import cv2\n'), ((5230, 5332), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""l_channel_high"""', '"""output"""', 'self._l_channel_high', '(255)', 'onchange_l_channel_high'], {}), "('l_channel_high', 'output', self._l_channel_high, 255,\n onchange_l_channel_high)\n", (5248, 5332), False, 'import cv2\n'), ((5339, 5429), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""sobelx_low"""', '"""output"""', 'self._sobelx_low', '(255)', 'onchange_sobelx_low'], {}), "('sobelx_low', 'output', self._sobelx_low, 255,\n onchange_sobelx_low)\n", (5357, 5429), False, 'import cv2\n'), ((5434, 5527), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""sobelx_high"""', '"""output"""', 'self._sobelx_high', '(255)', 'onchange_sobelx_high'], {}), "('sobelx_high', 'output', self._sobelx_high, 255,\n onchange_sobelx_high)\n", (5452, 5527), False, 'import cv2\n'), ((5532, 5630), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""sobelx_filter"""', '"""output"""', 'self._sobelx_filter', '(21)', 'onchange_sobelx_filter'], {}), "('sobelx_filter', 'output', self._sobelx_filter, 21,\n onchange_sobelx_filter)\n", (5550, 5630), False, 'import cv2\n'), ((5636, 5726), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""sobely_low"""', '"""output"""', 'self._sobely_low', '(255)', 'onchange_sobely_low'], {}), "('sobely_low', 'output', self._sobely_low, 255,\n onchange_sobely_low)\n", (5654, 5726), False, 'import cv2\n'), ((5731, 5824), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""sobely_high"""', '"""output"""', 'self._sobely_high', '(255)', 'onchange_sobely_high'], {}), "('sobely_high', 'output', self._sobely_high, 255,\n onchange_sobely_high)\n", (5749, 5824), False, 'import cv2\n'), ((5829, 5927), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""sobely_filter"""', '"""output"""', 'self._sobely_filter', '(21)', 'onchange_sobely_filter'], {}), "('sobely_filter', 'output', self._sobely_filter, 21,\n onchange_sobely_filter)\n", (5847, 5927), False, 'import cv2\n'), ((5933, 6018), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""magn_low"""', '"""output"""', 'self._magn_low', '(255)', 'onchange_magn_low'], {}), "('magn_low', 'output', self._magn_low, 255, onchange_magn_low\n )\n", (5951, 6018), False, 'import cv2\n'), ((6022, 6109), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""magn_high"""', '"""output"""', 'self._magn_high', '(255)', 'onchange_magn_high'], {}), "('magn_high', 'output', self._magn_high, 255,\n onchange_magn_high)\n", (6040, 6109), False, 'import cv2\n'), ((6114, 6206), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""magn_filter"""', '"""output"""', 'self._magn_filter', '(21)', 'onchange_magn_filter'], {}), "('magn_filter', 'output', self._magn_filter, 21,\n onchange_magn_filter)\n", (6132, 6206), False, 'import cv2\n'), ((6220, 6324), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""direction_low(rad)"""', '"""output"""', 'self._direction_low', '(314)', 'onchange_direction_low'], {}), "('direction_low(rad)', 'output', self._direction_low, 314,\n onchange_direction_low)\n", (6238, 6324), False, 'import cv2\n'), ((6329, 6437), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""direction_high(rad)"""', '"""output"""', 'self._direction_high', '(314)', 'onchange_direction_high'], {}), "('direction_high(rad)', 'output', self._direction_high, \n 314, onchange_direction_high)\n", (6347, 6437), False, 'import cv2\n'), ((6441, 6548), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""direction_filter"""', '"""output"""', 'self._direction_filter', '(21)', 'onchange_direction_filter'], {}), "('direction_filter', 'output', self._direction_filter, 21,\n onchange_direction_filter)\n", (6459, 6548), False, 'import cv2\n'), ((6553, 6673), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""direction_avg_filter"""', '"""output"""', 'self._direction_avg_filter', '(21)', 'onchange_direction_avg_filter'], {}), "('direction_avg_filter', 'output', self.\n _direction_avg_filter, 21, onchange_direction_avg_filter)\n", (6571, 6673), False, 'import cv2\n'), ((6677, 6786), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""direction_thresh"""', '"""output"""', 'self._direction_thresh', '(255)', 'onchange_direction_thresh'], {}), "('direction_thresh', 'output', self._direction_thresh, \n 255, onchange_direction_thresh)\n", (6695, 6786), False, 'import cv2\n'), ((6792, 6896), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""post_avg_filter"""', '"""output"""', 'self._post_avg_filter', '(21)', 'onchange_post_avg_filter'], {}), "('post_avg_filter', 'output', self._post_avg_filter, 21,\n onchange_post_avg_filter)\n", (6810, 6896), False, 'import cv2\n'), ((6901, 6994), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""post_thresh"""', '"""output"""', 'self._post_thresh', '(255)', 'onchange_post_thresh'], {}), "('post_thresh', 'output', self._post_thresh, 255,\n onchange_post_thresh)\n", (6919, 6994), False, 'import cv2\n'), ((7107, 7121), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (7118, 7121), False, 'import cv2\n'), ((7131, 7158), 'cv2.destroyWindow', 'cv2.destroyWindow', (['"""output"""'], {}), "('output')\n", (7148, 7158), False, 'import cv2\n'), ((8986, 9022), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2HLS'], {}), '(img, cv2.COLOR_RGB2HLS)\n', (8998, 9022), False, 'import cv2\n'), ((10003, 10028), 'numpy.absolute', 'np.absolute', (['sobel_orient'], {}), '(sobel_orient)\n', (10014, 10028), True, 'import numpy as np\n'), ((10142, 10169), 'numpy.zeros_like', 'np.zeros_like', (['scaled_sobel'], {}), '(scaled_sobel)\n', (10155, 10169), True, 'import numpy as np\n'), ((10967, 10993), 'numpy.zeros_like', 'np.zeros_like', (['scaled_magn'], {}), '(scaled_magn)\n', (10980, 10993), True, 'import numpy as np\n'), ((11403, 11468), 'cv2.Sobel', 'cv2.Sobel', (['image_binary', 'cv2.CV_64F', '(1)', '(0)'], {'ksize': 'dir_sobel_kernel'}), '(image_binary, cv2.CV_64F, 1, 0, ksize=dir_sobel_kernel)\n', (11412, 11468), False, 'import cv2\n'), ((11487, 11552), 'cv2.Sobel', 'cv2.Sobel', (['image_binary', 'cv2.CV_64F', '(0)', '(1)'], {'ksize': 'dir_sobel_kernel'}), '(image_binary, cv2.CV_64F, 0, 1, ksize=dir_sobel_kernel)\n', (11496, 11552), False, 'import cv2\n'), ((11574, 11594), 'numpy.absolute', 'np.absolute', (['sobel_x'], {}), '(sobel_x)\n', (11585, 11594), True, 'import numpy as np\n'), ((11616, 11636), 'numpy.absolute', 'np.absolute', (['sobel_y'], {}), '(sobel_y)\n', (11627, 11636), True, 'import numpy as np\n'), ((11662, 11696), 'numpy.arctan2', 'np.arctan2', (['abs_grad_y', 'abs_grad_x'], {}), '(abs_grad_y, abs_grad_x)\n', (11672, 11696), True, 'import numpy as np\n'), ((11744, 11773), 'numpy.zeros_like', 'np.zeros_like', (['direction_grad'], {}), '(direction_grad)\n', (11757, 11773), True, 'import numpy as np\n'), ((12211, 12253), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'sys.maxsize'}), '(threshold=sys.maxsize)\n', (12230, 12253), True, 'import numpy as np\n'), ((12323, 12350), 'numpy.zeros_like', 'np.zeros_like', (['binary_image'], {}), '(binary_image)\n', (12336, 12350), True, 'import numpy as np\n'), ((12533, 12581), 'cv2.blur', 'cv2.blur', (['non_binary', '(filter_size, filter_size)'], {}), '(non_binary, (filter_size, filter_size))\n', (12541, 12581), False, 'import cv2\n'), ((12775, 12795), 'numpy.zeros_like', 'np.zeros_like', (['image'], {}), '(image)\n', (12788, 12795), True, 'import numpy as np\n'), ((13326, 13357), 'numpy.zeros_like', 'np.zeros_like', (['binary_channel_h'], {}), '(binary_channel_h)\n', (13339, 13357), True, 'import numpy as np\n'), ((14135, 14169), 'numpy.zeros_like', 'np.zeros_like', (['self._sobelx_binary'], {}), '(self._sobelx_binary)\n', (14148, 14169), True, 'import numpy as np\n'), ((17631, 17670), 'cv2.imread', 'cv2.imread', (['f"""test_images/{image_path}"""'], {}), "(f'test_images/{image_path}')\n", (17641, 17670), False, 'import cv2\n'), ((9804, 9865), 'cv2.Sobel', 'cv2.Sobel', (['image_binary', 'cv2.CV_64F', '(1)', '(0)'], {'ksize': 'sobel_kernel'}), '(image_binary, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n', (9813, 9865), False, 'import cv2\n'), ((14938, 14986), 'cv2.imshow', 'cv2.imshow', (['"""sobelx_binary"""', 'self._sobelx_binary'], {}), "('sobelx_binary', self._sobelx_binary)\n", (14948, 14986), False, 'import cv2\n'), ((14999, 15047), 'cv2.imshow', 'cv2.imshow', (['"""sobely_binary"""', 'self._sobely_binary'], {}), "('sobely_binary', self._sobely_binary)\n", (15009, 15047), False, 'import cv2\n'), ((15060, 15102), 'cv2.imshow', 'cv2.imshow', (['"""mag_binary"""', 'self._mag_binary'], {}), "('mag_binary', self._mag_binary)\n", (15070, 15102), False, 'import cv2\n'), ((15115, 15163), 'cv2.imshow', 'cv2.imshow', (['"""direction_binary"""', 'self._dir_binary'], {}), "('direction_binary', self._dir_binary)\n", (15125, 15163), False, 'import cv2\n'), ((15176, 15220), 'cv2.imshow', 'cv2.imshow', (['"""direction_&_avg"""', 'self._avg_img'], {}), "('direction_&_avg', self._avg_img)\n", (15186, 15220), False, 'import cv2\n'), ((15233, 15286), 'cv2.imshow', 'cv2.imshow', (['"""direction_&_avg_thresh"""', 'self._thres_img'], {}), "('direction_&_avg_thresh', self._thres_img)\n", (15243, 15286), False, 'import cv2\n'), ((15299, 15345), 'cv2.imshow', 'cv2.imshow', (['"""channels_binary"""', 'channels_binary'], {}), "('channels_binary', channels_binary)\n", (15309, 15345), False, 'import cv2\n'), ((15599, 15636), 'cv2.imshow', 'cv2.imshow', (['"""output"""', 'channels_binary'], {}), "('output', channels_binary)\n", (15609, 15636), False, 'import cv2\n'), ((15664, 15727), 'cv2.imwrite', 'cv2.imwrite', (['f"""test_output/{save_name}_output"""', 'channels_binary'], {}), "(f'test_output/{save_name}_output', channels_binary)\n", (15675, 15727), False, 'import cv2\n'), ((15883, 15907), 'pickle.dump', 'pickle.dump', (['var_list', 'f'], {}), '(var_list, f)\n', (15894, 15907), False, 'import pickle\n'), ((16023, 16037), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (16034, 16037), False, 'import pickle\n'), ((9921, 9982), 'cv2.Sobel', 'cv2.Sobel', (['image_binary', 'cv2.CV_64F', '(0)', '(1)'], {'ksize': 'sobel_kernel'}), '(image_binary, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n', (9930, 9982), False, 'import cv2\n'), ((10075, 10092), 'numpy.max', 'np.max', (['abs_sobel'], {}), '(abs_sobel)\n', (10081, 10092), True, 'import numpy as np\n'), ((10822, 10842), 'numpy.power', 'np.power', (['sobel_x', '(2)'], {}), '(sobel_x, 2)\n', (10830, 10842), True, 'import numpy as np\n'), ((10844, 10864), 'numpy.power', 'np.power', (['sobel_y', '(2)'], {}), '(sobel_y, 2)\n', (10852, 10864), True, 'import numpy as np\n'), ((10905, 10917), 'numpy.max', 'np.max', (['magn'], {}), '(magn)\n', (10911, 10917), True, 'import numpy as np\n'), ((15386, 15420), 'numpy.zeros_like', 'np.zeros_like', (['self._sobelx_binary'], {}), '(self._sobelx_binary)\n', (15399, 15420), True, 'import numpy as np\n'), ((9255, 9292), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2GRAY'], {}), '(img, cv2.COLOR_RGB2GRAY)\n', (9267, 9292), False, 'import cv2\n')] |
#
# Unicode escape format setting dialog for the following plugins:
# Unicode escape
# Unicode unescape
#
# Copyright (c) 2020, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import tkinter
import tkinter.ttk
# Print setting to stdout
def print_setting(r, cf, ce):
escape_format = {"\\uXXXX (Java, JavaScript)": "\\u",
"\\uXXXX and \\UXXXXXXXX (C, Python)": "\\U",
"\\u{XXXX} (JavaScript ES6+, PHP 7+)": "\\u{",
"`u{XXXX} (PowerShell 6+)": "`u",
"%uXXXX (Legacy JavaScript)": "%u",
"U+XXXX (Unicode code point)": "U+"}
print("%s\t%s" % (escape_format[cf.get()], ce.get()))
root.quit()
# Create input dialog
root = tkinter.Tk()
root.title("Unicode escape/unescape format setting")
root.protocol("WM_DELETE_WINDOW", (lambda r=root: r.quit()))
label_format = tkinter.Label(root, text="Unicode escape format:")
label_format.grid(row=0, column=0, padx=5, pady=5, sticky="w")
combo_format = tkinter.ttk.Combobox(root, width=40, state="readonly")
combo_format["values"] = ("\\uXXXX (Java, JavaScript)",
"\\uXXXX and \\UXXXXXXXX (C, Python)",
"\\u{XXXX} (JavaScript ES6+, PHP 7+)",
"`u{XXXX} (PowerShell 6+)",
"%uXXXX (Legacy JavaScript)",
"U+XXXX (Unicode code point)")
combo_format.current(0)
combo_format.grid(row=0, column=1, padx=5, pady=5, sticky="w")
if len(sys.argv) > 1 and sys.argv[1] == "-e":
label_encoding = tkinter.Label(root, text="Input encoding:")
elif len(sys.argv) > 1 and sys.argv[1] == "-u":
label_encoding = tkinter.Label(root, text="Output encoding:")
else:
label_encoding = tkinter.Label(root, text="Encoding:")
label_encoding.grid(row=1, column=0, padx=5, pady=5, sticky="w")
combo_encoding = tkinter.ttk.Combobox(root, width=10, state="readonly")
combo_encoding["values"] = ("UTF-8", "UTF-16LE", "UTF-16BE")
combo_encoding.current(0)
combo_encoding.grid(row=1, column=1, padx=5, pady=5, sticky="w")
button = tkinter.Button(root, text='OK', command=(lambda r=root, cf=combo_format, ce=combo_encoding: print_setting(r, cf, ce)))
button.grid(row=2, column=0, padx=5, pady=5, columnspan=3)
button.focus() # Focus to this widget
# Set callback functions
for x in (combo_format, combo_encoding, button):
x.bind("<Return>", lambda event, r=root, cf=combo_format, ce=combo_encoding: print_setting(r, cf, ce))
# Adjust window position
sw = root.winfo_screenwidth()
sh = root.winfo_screenheight()
root.update_idletasks() # Necessary to get width and height of the window
ww = root.winfo_width()
wh = root.winfo_height()
root.geometry('+%d+%d' % ((sw/2) - (ww/2), (sh/2) - (wh/2)))
root.mainloop()
| [
"tkinter.Tk",
"tkinter.Label",
"tkinter.ttk.Combobox"
] | [((2079, 2091), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (2089, 2091), False, 'import tkinter\n'), ((2226, 2276), 'tkinter.Label', 'tkinter.Label', (['root'], {'text': '"""Unicode escape format:"""'}), "(root, text='Unicode escape format:')\n", (2239, 2276), False, 'import tkinter\n'), ((2359, 2413), 'tkinter.ttk.Combobox', 'tkinter.ttk.Combobox', (['root'], {'width': '(40)', 'state': '"""readonly"""'}), "(root, width=40, state='readonly')\n", (2379, 2413), False, 'import tkinter\n'), ((3266, 3320), 'tkinter.ttk.Combobox', 'tkinter.ttk.Combobox', (['root'], {'width': '(10)', 'state': '"""readonly"""'}), "(root, width=10, state='readonly')\n", (3286, 3320), False, 'import tkinter\n'), ((2951, 2994), 'tkinter.Label', 'tkinter.Label', (['root'], {'text': '"""Input encoding:"""'}), "(root, text='Input encoding:')\n", (2964, 2994), False, 'import tkinter\n'), ((3066, 3110), 'tkinter.Label', 'tkinter.Label', (['root'], {'text': '"""Output encoding:"""'}), "(root, text='Output encoding:')\n", (3079, 3110), False, 'import tkinter\n'), ((3140, 3177), 'tkinter.Label', 'tkinter.Label', (['root'], {'text': '"""Encoding:"""'}), "(root, text='Encoding:')\n", (3153, 3177), False, 'import tkinter\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-09-06 02:34
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('item', '0018_auto_20160906_0234'),
('player', '0023_auto_20160612_1804'),
]
operations = [
migrations.CreateModel(
name='Collection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=True)),
('reward_xp', models.IntegerField(default=0)),
('date_created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True)),
('date_updated', django_extensions.db.fields.ModificationDateTimeField(auto_now=True)),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='PlayerCollection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True)),
('date_completed', models.DateTimeField(blank=True, null=True)),
('collection', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='player.Collection')),
('player', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='player.Player')),
],
),
migrations.AlterModelOptions(
name='achievement',
options={'ordering': ['name']},
),
migrations.AlterModelOptions(
name='skill',
options={'ordering': ['name']},
),
migrations.AlterField(
model_name='achievement',
name='name',
field=models.CharField(max_length=30, unique=True),
),
migrations.AlterField(
model_name='skill',
name='name',
field=models.CharField(max_length=25, unique=True),
),
migrations.AlterField(
model_name='skill',
name='skill_type',
field=models.CharField(choices=[(b'ATTK', b'Attack'), (b'SPCL', b'Special'), (b'HEAL', b'Heal'), (b'PASS', b'Passive')], max_length=4),
),
migrations.AddField(
model_name='collection',
name='achievement',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='collection', to='player.Achievement'),
),
migrations.AddField(
model_name='collection',
name='items',
field=models.ManyToManyField(blank=True, related_name='collections', to='item.ItemType'),
),
migrations.AddField(
model_name='collection',
name='reward_item',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='item.ItemType'),
),
migrations.AlterUniqueTogether(
name='playercollection',
unique_together=set([('player', 'collection')]),
),
]
| [
"django.db.models.OneToOneField",
"django.db.models.ForeignKey",
"django.db.models.IntegerField",
"django.db.models.ManyToManyField",
"django.db.migrations.AlterModelOptions",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.models.CharField"
] | [((1701, 1786), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""achievement"""', 'options': "{'ordering': ['name']}"}), "(name='achievement', options={'ordering': ['name']}\n )\n", (1729, 1786), False, 'from django.db import migrations, models\n'), ((1826, 1900), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""skill"""', 'options': "{'ordering': ['name']}"}), "(name='skill', options={'ordering': ['name']})\n", (1854, 1900), False, 'from django.db import migrations, models\n'), ((2049, 2093), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'unique': '(True)'}), '(max_length=30, unique=True)\n', (2065, 2093), False, 'from django.db import migrations, models\n'), ((2212, 2256), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(25)', 'unique': '(True)'}), '(max_length=25, unique=True)\n', (2228, 2256), False, 'from django.db import migrations, models\n'), ((2381, 2514), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[(b'ATTK', b'Attack'), (b'SPCL', b'Special'), (b'HEAL', b'Heal'), (b'PASS',\n b'Passive')]", 'max_length': '(4)'}), "(choices=[(b'ATTK', b'Attack'), (b'SPCL', b'Special'), (\n b'HEAL', b'Heal'), (b'PASS', b'Passive')], max_length=4)\n", (2397, 2514), False, 'from django.db import migrations, models\n'), ((2638, 2783), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""collection"""', 'to': '"""player.Achievement"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='collection', to='player.Achievement')\n", (2658, 2783), False, 'from django.db import migrations, models\n'), ((2901, 2988), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'related_name': '"""collections"""', 'to': '"""item.ItemType"""'}), "(blank=True, related_name='collections', to=\n 'item.ItemType')\n", (2923, 2988), False, 'from django.db import migrations, models\n'), ((3112, 3240), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""+"""', 'to': '"""item.ItemType"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='+', to='item.ItemType')\n", (3129, 3240), False, 'from django.db import migrations, models\n'), ((508, 601), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (524, 601), False, 'from django.db import migrations, models\n'), ((625, 670), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'unique': '(True)'}), '(max_length=100, unique=True)\n', (641, 670), False, 'from django.db import migrations, models\n'), ((703, 733), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (722, 733), False, 'from django.db import migrations, models\n'), ((1158, 1251), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1174, 1251), False, 'from django.db import migrations, models\n'), ((1389, 1432), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1409, 1432), False, 'from django.db import migrations, models\n'), ((1466, 1557), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""player.Collection"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'player.Collection')\n", (1483, 1557), False, 'from django.db import migrations, models\n'), ((1582, 1669), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""player.Player"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'player.Player')\n", (1599, 1669), False, 'from django.db import migrations, models\n')] |
import os
from aztk.models.plugins.plugin_configuration import PluginConfiguration, PluginPort, PluginTargetRole
from aztk.models.plugins.plugin_file import PluginFile
dir_path = os.path.dirname(os.path.realpath(__file__))
class SparkUIProxyPlugin(PluginConfiguration):
def __init__(self):
super().__init__(
name="spark_ui_proxy",
ports=[PluginPort(internal=9999, public=True)],
target_role=PluginTargetRole.Master,
execute="spark_ui_proxy.sh",
args=["localhost:8080", "9999"],
files=[
PluginFile("spark_ui_proxy.sh", os.path.join(dir_path, "spark_ui_proxy.sh")),
PluginFile("spark_ui_proxy.py", os.path.join(dir_path, "spark_ui_proxy.py")),
],
)
| [
"os.path.realpath",
"aztk.models.plugins.plugin_configuration.PluginPort",
"os.path.join"
] | [((196, 222), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (212, 222), False, 'import os\n'), ((377, 415), 'aztk.models.plugins.plugin_configuration.PluginPort', 'PluginPort', ([], {'internal': '(9999)', 'public': '(True)'}), '(internal=9999, public=True)\n', (387, 415), False, 'from aztk.models.plugins.plugin_configuration import PluginConfiguration, PluginPort, PluginTargetRole\n'), ((621, 664), 'os.path.join', 'os.path.join', (['dir_path', '"""spark_ui_proxy.sh"""'], {}), "(dir_path, 'spark_ui_proxy.sh')\n", (633, 664), False, 'import os\n'), ((715, 758), 'os.path.join', 'os.path.join', (['dir_path', '"""spark_ui_proxy.py"""'], {}), "(dir_path, 'spark_ui_proxy.py')\n", (727, 758), False, 'import os\n')] |
#!/usr/bin/env python
#=========================================================================
# This is OPEN SOURCE SOFTWARE governed by the Gnu General Public
# License (GPL) version 3, as described at www.opensource.org.
# Copyright (C)2021 <NAME> <<EMAIL>>
#=========================================================================
from __future__ import (absolute_import, division, print_function,
unicode_literals, generators, nested_scopes, with_statement)
from builtins import (bytes, dict, int, list, object, range, str, ascii,
chr, hex, input, next, oct, open, pow, round, super, filter, map, zip)
# The above imports should allow this program to run in both Python 2 and
# Python 3. You might need to update your version of module "future".
import sys
import ProgramName
from GffTranscriptReader import GffTranscriptReader
from Gene import Gene
from BedReader import BedReader
from Bed6Record import Bed6Record
def processChrom(chrom,geneHash,dhsHash,maxDist):
genes=geneHash.get(chrom)
if(genes is None): return
dhss=dhsHash.get(chrom)
if(dhss is None): return
genes.sort(key=lambda x: x.getBegin())
dhss.sort(key=lambda x: x.getBegin())
proximity(genes,dhss,maxDist)
def distance(gene,dhs):
geneMid=(gene.getBegin()+gene.getEnd())/2
dhsMid=(dhs.getBegin()+dhs.getEnd())/2
gene.mid=geneMid; dhs.mid=dhsMid
d=geneMid-dhsMid
if(d<0): d=-d
return d
def proximity(genes,dhss,maxDist):
i=0; j=0
N_GENES=len(genes); N_DHS=len(dhss)
while(i<N_GENES and j<N_DHS):
gene=genes[i]; dhs=dhss[j]
d=distance(gene,dhs)
if(d<=maxDist):
print(dhs.name,gene.getID(),sep="\t")
if(gene.mid<dhs.mid): i+=1
else: j+=1
#=========================================================================
# main()
#=========================================================================
if(len(sys.argv)!=4):
exit(ProgramName.get()+" <dhs.bed> <genes.gff> <max-distance>\n")
(dhsFile,genesFile,maxDist)=sys.argv[1:]
maxDist=int(maxDist)
gffReader=GffTranscriptReader()
geneHash=gffReader.hashGenesBySubstrate(genesFile)
dhsHash=BedReader.hashBySubstrate(dhsFile)
keys=geneHash.keys()
for chrom in keys:
processChrom(chrom,geneHash,dhsHash,maxDist)
| [
"builtins.int",
"BedReader.BedReader.hashBySubstrate",
"GffTranscriptReader.GffTranscriptReader",
"ProgramName.get"
] | [((2050, 2062), 'builtins.int', 'int', (['maxDist'], {}), '(maxDist)\n', (2053, 2062), False, 'from builtins import bytes, dict, int, list, object, range, str, ascii, chr, hex, input, next, oct, open, pow, round, super, filter, map, zip\n'), ((2074, 2095), 'GffTranscriptReader.GffTranscriptReader', 'GffTranscriptReader', ([], {}), '()\n', (2093, 2095), False, 'from GffTranscriptReader import GffTranscriptReader\n'), ((2155, 2189), 'BedReader.BedReader.hashBySubstrate', 'BedReader.hashBySubstrate', (['dhsFile'], {}), '(dhsFile)\n', (2180, 2189), False, 'from BedReader import BedReader\n'), ((1940, 1957), 'ProgramName.get', 'ProgramName.get', ([], {}), '()\n', (1955, 1957), False, 'import ProgramName\n')] |
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
import numpy as np
#setting up picamera
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 24
rawCapture = PiRGBArray(camera, size=(640, 480))
time.sleep(0.1)
template = cv2.imread("assets/stop_sign.jpg",0) #read template image (stop sign)
template = cv2.resize(template, (0,0), fx=0.7, fy=0.7) #change size of template to match size of sign in source image
w, h = template.shape[::-1] #get width and height of sign
stop = False
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
time.sleep(0.0)
image = frame.array
source_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
result = cv2.matchTemplate(source_gray,template,cv2.TM_CCOEFF_NORMED) #find match in source image
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result) #get location of match in source image (max_loc), get correlation (max_val)
threshold = 0.5 #set threshold for correlation of match to template image
if max_val >= threshold:
stop = True
cv2.rectangle(image, max_loc, (max_loc[0] + w, max_loc[1] + h), (0,255,255), 2) #draw rectangle based on max_loc
else:
stop = False
if stop == True:
print("STOP!")
# Display the resulting frame
cv2.imshow('frame',image)
rawCapture.truncate(0)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
| [
"cv2.rectangle",
"picamera.PiCamera",
"time.sleep",
"cv2.imshow",
"cv2.minMaxLoc",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.cvtColor",
"picamera.array.PiRGBArray",
"cv2.resize",
"cv2.matchTemplate",
"cv2.imread"
] | [((143, 153), 'picamera.PiCamera', 'PiCamera', ([], {}), '()\n', (151, 153), False, 'from picamera import PiCamera\n'), ((221, 256), 'picamera.array.PiRGBArray', 'PiRGBArray', (['camera'], {'size': '(640, 480)'}), '(camera, size=(640, 480))\n', (231, 256), False, 'from picamera.array import PiRGBArray\n'), ((257, 272), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (267, 272), False, 'import time\n'), ((286, 323), 'cv2.imread', 'cv2.imread', (['"""assets/stop_sign.jpg"""', '(0)'], {}), "('assets/stop_sign.jpg', 0)\n", (296, 323), False, 'import cv2\n'), ((367, 411), 'cv2.resize', 'cv2.resize', (['template', '(0, 0)'], {'fx': '(0.7)', 'fy': '(0.7)'}), '(template, (0, 0), fx=0.7, fy=0.7)\n', (377, 411), False, 'import cv2\n'), ((1471, 1494), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1492, 1494), False, 'import cv2\n'), ((639, 654), 'time.sleep', 'time.sleep', (['(0.0)'], {}), '(0.0)\n', (649, 654), False, 'import time\n'), ((699, 738), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (711, 738), False, 'import cv2\n'), ((758, 820), 'cv2.matchTemplate', 'cv2.matchTemplate', (['source_gray', 'template', 'cv2.TM_CCOEFF_NORMED'], {}), '(source_gray, template, cv2.TM_CCOEFF_NORMED)\n', (775, 820), False, 'import cv2\n'), ((889, 910), 'cv2.minMaxLoc', 'cv2.minMaxLoc', (['result'], {}), '(result)\n', (902, 910), False, 'import cv2\n'), ((1355, 1381), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'image'], {}), "('frame', image)\n", (1365, 1381), False, 'import cv2\n'), ((1124, 1210), 'cv2.rectangle', 'cv2.rectangle', (['image', 'max_loc', '(max_loc[0] + w, max_loc[1] + h)', '(0, 255, 255)', '(2)'], {}), '(image, max_loc, (max_loc[0] + w, max_loc[1] + h), (0, 255, \n 255), 2)\n', (1137, 1210), False, 'import cv2\n'), ((1420, 1434), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1431, 1434), False, 'import cv2\n')] |
import torch
import random
import torch.nn as nn
from abc import abstractmethod
from abc import ABCMeta
from torch import Tensor
from typing import Any
from typing import Dict
from typing import List
from typing import Tuple
from typing import Optional
from .losses import GANLoss
from .losses import GANTarget
from .discriminators import DiscriminatorBase
from ..protocol import GaussianGeneratorMixin
from ....data import CVLoader
from ....types import tensor_dict_type
from ....protocol import StepOutputs
from ....protocol import TrainerState
from ....protocol import MetricsOutputs
from ....protocol import ModelWithCustomSteps
from ....constants import LOSS_KEY
from ....constants import INPUT_KEY
from ....constants import LABEL_KEY
from ....constants import PREDICTIONS_KEY
from ....misc.toolkit import to_device
from ....misc.toolkit import mode_context
from ....misc.toolkit import toggle_optimizer
class GANMixin(ModelWithCustomSteps, GaussianGeneratorMixin, metaclass=ABCMeta):
def __init__(
self,
*,
num_classes: Optional[int] = None,
gan_mode: str = "vanilla",
gan_loss_config: Optional[Dict[str, Any]] = None,
):
super().__init__()
self.num_classes = num_classes
self.gan_mode = gan_mode
self.gan_loss = GANLoss(gan_mode)
if gan_loss_config is None:
gan_loss_config = {}
self.lambda_gp = gan_loss_config.get("lambda_gp", 10.0)
@property
@abstractmethod
def g_parameters(self) -> List[nn.Parameter]:
pass
@property
@abstractmethod
def d_parameters(self) -> List[nn.Parameter]:
pass
@abstractmethod
def _g_losses(
self,
batch: tensor_dict_type,
forward_kwargs: Dict[str, Any],
) -> Tuple[tensor_dict_type, tensor_dict_type, Optional[Tensor]]:
# g_losses, sampled, labels
pass
@abstractmethod
def _d_losses(
self,
batch: tensor_dict_type,
sampled: tensor_dict_type,
labels: Optional[Tensor],
) -> tensor_dict_type:
# d_losses
pass
# utilities
@property
def can_reconstruct(self) -> bool:
return False
def forward(
self,
batch_idx: int,
batch: tensor_dict_type,
state: Optional[TrainerState] = None,
**kwargs: Any,
) -> tensor_dict_type:
z = torch.randn(len(batch[INPUT_KEY]), self.latent_dim, device=self.device)
return {PREDICTIONS_KEY: self.decode(z, labels=batch[LABEL_KEY], **kwargs)}
def summary_forward(self, batch_idx: int, batch: tensor_dict_type) -> None:
self._g_losses(batch, {})
class OneStageGANMixin(GANMixin, metaclass=ABCMeta):
def train_step(
self,
batch_idx: int,
batch: tensor_dict_type,
trainer: Any,
forward_kwargs: Dict[str, Any],
loss_kwargs: Dict[str, Any],
) -> StepOutputs:
opt_g = trainer.optimizers["g_parameters"]
opt_d = trainer.optimizers["d_parameters"]
# generator step
toggle_optimizer(self, opt_g)
with torch.cuda.amp.autocast(enabled=trainer.use_amp):
g_losses, sampled, labels = self._g_losses(batch, forward_kwargs)
g_loss = g_losses.pop(LOSS_KEY)
trainer.grad_scaler.scale(g_loss).backward()
if trainer.clip_norm > 0.0:
trainer._clip_norm_step()
trainer.grad_scaler.step(opt_g)
trainer.grad_scaler.update()
opt_g.zero_grad()
# discriminator step
toggle_optimizer(self, opt_d)
with torch.no_grad():
sampled = {k: v.detach().clone() for k, v in sampled.items()}
with torch.cuda.amp.autocast(enabled=trainer.use_amp):
d_losses = self._d_losses(batch, sampled, labels)
d_loss = d_losses.pop(LOSS_KEY)
trainer.grad_scaler.scale(d_loss).backward()
if trainer.clip_norm > 0.0:
trainer._clip_norm_step()
trainer.grad_scaler.step(opt_d)
trainer.grad_scaler.update()
opt_d.zero_grad()
# finalize
trainer._scheduler_step()
forward_results = {PREDICTIONS_KEY: sampled}
loss_dict = {"g": g_loss.item(), "d": d_loss.item()}
loss_dict.update({k: v.item() for k, v in g_losses.items()})
loss_dict.update({k: v.item() for k, v in d_losses.items()})
return StepOutputs(forward_results, loss_dict)
def evaluate_step( # type: ignore
self,
loader: CVLoader,
portion: float,
trainer: Any,
) -> MetricsOutputs:
loss_items: Dict[str, List[float]] = {}
for i, batch in enumerate(loader):
if i / len(loader) >= portion:
break
batch = to_device(batch, self.device)
g_losses, sampled, labels = self._g_losses(batch, {})
d_losses = self._d_losses(batch, sampled, labels)
g_loss = g_losses.pop(LOSS_KEY)
d_loss = d_losses.pop(LOSS_KEY)
loss_dict = {"g": g_loss.item(), "d": d_loss.item()}
loss_dict.update({k: v.item() for k, v in g_losses.items()})
loss_dict.update({k: v.item() for k, v in d_losses.items()})
for k, v in loss_dict.items():
loss_items.setdefault(k, []).append(v)
# gather
mean_loss_items = {k: sum(v) / len(v) for k, v in loss_items.items()}
mean_loss_items[LOSS_KEY] = sum(mean_loss_items.values())
score = trainer._weighted_loss_score(mean_loss_items)
return MetricsOutputs(score, mean_loss_items)
class VanillaGANMixin(OneStageGANMixin, metaclass=ABCMeta):
def __init__(
self,
in_channels: int,
*,
discriminator: str = "basic",
discriminator_config: Optional[Dict[str, Any]] = None,
num_classes: Optional[int] = None,
gan_mode: str = "vanilla",
gan_loss_config: Optional[Dict[str, Any]] = None,
):
super().__init__(
num_classes=num_classes,
gan_mode=gan_mode,
gan_loss_config=gan_loss_config,
)
if discriminator_config is None:
discriminator_config = {}
discriminator_config["in_channels"] = in_channels
discriminator_config["num_classes"] = num_classes
self.discriminator = DiscriminatorBase.make(
discriminator,
config=discriminator_config,
)
@property
def d_parameters(self) -> List[nn.Parameter]:
return list(self.discriminator.parameters())
def _g_losses(
self,
batch: tensor_dict_type,
forward_kwargs: Dict[str, Any],
) -> Tuple[tensor_dict_type, tensor_dict_type, Optional[Tensor]]:
labels = batch.get(LABEL_KEY)
if labels is not None:
labels = labels.view(-1)
sampled = self.sample(len(batch[INPUT_KEY]), labels=labels, **forward_kwargs)
pred_fake = self.discriminator(sampled)
loss_g = self.gan_loss(pred_fake, GANTarget(True, labels))
return {LOSS_KEY: loss_g}, {"sampled": sampled}, labels
def _d_losses(
self,
batch: tensor_dict_type,
sampled: tensor_dict_type,
labels: Optional[Tensor],
) -> tensor_dict_type:
net = batch[INPUT_KEY]
sampled_tensor = sampled["sampled"]
pred_real = self.discriminator(net)
loss_d_real = self.gan_loss(pred_real, GANTarget(True, labels))
pred_fake = self.discriminator(sampled_tensor)
loss_d_fake = self.gan_loss(pred_fake, GANTarget(False, labels))
d_loss = 0.5 * (loss_d_fake + loss_d_real)
losses = {"d_fake": loss_d_fake, "d_real": loss_d_real}
if self.gan_mode == "wgangp":
eps = random.random()
merged = eps * net + (1.0 - eps) * sampled_tensor
with mode_context(self.discriminator, to_train=None, use_grad=True):
pred_merged = self.discriminator(merged.requires_grad_(True)).output # type: ignore
loss_gp = self.gan_loss.loss(merged, pred_merged)
d_loss = d_loss + self.lambda_gp * loss_gp
losses["d_gp"] = loss_gp
losses[LOSS_KEY] = d_loss
return losses
__all__ = [
"GANMixin",
"OneStageGANMixin",
"VanillaGANMixin",
]
| [
"torch.no_grad",
"random.random",
"torch.cuda.amp.autocast"
] | [((3117, 3165), 'torch.cuda.amp.autocast', 'torch.cuda.amp.autocast', ([], {'enabled': 'trainer.use_amp'}), '(enabled=trainer.use_amp)\n', (3140, 3165), False, 'import torch\n'), ((3595, 3610), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3608, 3610), False, 'import torch\n'), ((3699, 3747), 'torch.cuda.amp.autocast', 'torch.cuda.amp.autocast', ([], {'enabled': 'trainer.use_amp'}), '(enabled=trainer.use_amp)\n', (3722, 3747), False, 'import torch\n'), ((7769, 7784), 'random.random', 'random.random', ([], {}), '()\n', (7782, 7784), False, 'import random\n')] |
import os
from dynaconf import Dynaconf # type: ignore
current_directory = os.path.dirname(os.path.realpath(__file__))
settings = Dynaconf(
envvar_prefix="PROMED",
settings_files=[
f"{current_directory}/settings.toml",
],
)
settings["DEBUG"] = True if settings.LOG_LEVEL == "DEBUG" else False
s = settings
| [
"os.path.realpath",
"dynaconf.Dynaconf"
] | [((135, 227), 'dynaconf.Dynaconf', 'Dynaconf', ([], {'envvar_prefix': '"""PROMED"""', 'settings_files': "[f'{current_directory}/settings.toml']"}), "(envvar_prefix='PROMED', settings_files=[\n f'{current_directory}/settings.toml'])\n", (143, 227), False, 'from dynaconf import Dynaconf\n'), ((95, 121), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (111, 121), False, 'import os\n')] |
import os
import sys
import errno
import importlib
import contextlib
# print("prepare Avalon Max Pipeline")
from pyblish import api as pyblish
from . import lib, workio
from ..lib import logger
from .. import api, io, schema, Session
from ..vendor import six
from ..vendor.Qt import QtCore, QtWidgets
from ..pipeline import AVALON_CONTAINER_ID
from ..pipeline import create
# from ..pipeline import load
# from ..pipeline import update
# from ..pipeline import remove
from ..tools import workfiles
import MaxPlus as MP
from MaxPlus import NotificationCodes as NC
from MaxPlus import NotificationManager as NM
from MaxPlus import PathManager as PM
from MaxPlus import ActionFactory as AF
import pymxs
Ev_Mxs = MP.Core.EvalMAXScript
Ex_Mxs = MP.Core.ExecuteMAXScript
rt = pymxs.runtime
self = sys.modules[__name__]
self._menu_name = "avalonmax" # Unique name of menu
self._events = dict() # Registered callbacks
self._ignore_lock = False
try:
self._parent = MP.GetQMaxMainWindow() # Main Window, it means 3dsMAX itself
except:
self._parent = MP.GetQMaxWindow()
AVALON_CONTAINERS = "AVALON_CONTAINERS"
AVALON_CONTAINERS_NODE = None
logger.info("prepare Avalon Max Pipeline")
def install():
'''config: module, get from AVALON_CONFIG'''
self._menu_name = api.Session["AVALON_LABEL"]
_register_callbacks()
_register_events()
_set_project()
_install_menu()
# register host
pyblish.register_host("max_")
def _register_callbacks():
'''
# reg dcc change call_back make avalon can reactorto those signal
# maya max
# _on_max_initialized -> SystemStartup
# _on_scene_open -> FilePostOpen
# _on_scene_new -> SystemPostNew
# _before_scene_save -> FilePreSave
# _on_scene_save -> FilePostSave
'''
# remove pre-install call_back
for _, handle in self._events.items():
try:
NM.Unregister(handle)
except Exception:
pass
self._events.clear()
# install callback
self._events[_system_starup] = NM.Register(NC.SystemStartup, _system_starup)
self._events[_file_post_open] = NM.Register(NC.FilePostOpen, _file_post_open)
self._events[_system_post_new] = NM.Register(NC.SystemPostNew, _system_post_new)
self._events[_file_pre_save] = NM.Register(NC.FilePreSave, _file_pre_save)
self._events[_file_post_save] = NM.Register(NC.FilePostSave, _file_post_save)
logger.info("Installed event handler SystemPostNew..")
logger.info("Installed event handler FilePreSave..")
logger.info("Installed event handler SystemPostNew..")
logger.info("Installed event handler SystemStartup..")
logger.info("Installed event handler FilePostOpen..")
def _register_events():
api.on("taskChanged", _on_task_changed)
logger.info("Installed event callback for 'taskChanged'..")
def _set_project():
'''setting current work folder to max project'''
pass
def _system_starup(*args):
api.emit('init', args)
# run in command mode?
# reference to 3dsMAX itself
try:
self._parent = MP.GetQMaxWindow() # max2016 sp2
except:
self._parent = MP.GetQMaxMainWindow() # max2018.4
_uninstall_menu()
def _file_post_open(*args):
api.emit('open', args)
def _system_post_new(*args):
api.emit('new', args)
def _file_pre_save(*args):
api.emit('before_save', args)
def _file_post_save(*args):
api.emit('save', args)
def _install_menu():
from ..tools import (
projectmanager,
creator,
loader,
publish,
sceneinventory,
contextmanager
)
_uninstall_menu()
def deferred():
print("building menu")
ava_menu_name = self._menu_name
category_name = api.Session["AVALON_LABEL"]
context_menu_name = "{}, {}".format(api.Session["AVALON_ASSET"], api.Session["AVALON_TASK"])
ava_mb = MP.MenuBuilder(ava_menu_name)
context_mb = MP.MenuBuilder(context_menu_name)
act_projectmanager = AF.Create(category_name, 'Project Manager', lambda *args: projectmanager.show(parent=self._parent))
act_set_Context = AF.Create(category_name, 'Set Context', lambda *args: contextmanager.show(parent=self._parent))
act_create = AF.Create(category_name, 'Create...', lambda *args: creator.show(parent=self._parent))
act_load = AF.Create(category_name, 'Load...', lambda *args: loader.show(parent=self._parent))
act_publish = AF.Create(category_name, 'Publish...', lambda *args: publish.show())
act_manage = AF.Create(category_name, 'Manage...', lambda *args: sceneinventory.show(parent=self._parent))
act_workfiles = AF.Create(category_name, 'Work file...', lambda *args: launch_workfiles_app(self._parent))
context_mb.AddItem(act_set_Context)
ava_mb.AddItem(act_projectmanager)
ava_mb.AddItem(act_create)
ava_mb.AddItem(act_load)
ava_mb.AddItem(act_publish)
ava_mb.AddItem(act_manage)
ava_mb.AddItem(act_workfiles)
ava_menu = ava_mb.Create(MP.MenuManager.GetMainMenu())
context_menu = context_mb.Create(ava_menu)
# run script plugin
avalon_core_folder = os.path.dirname(
os.path.dirname(
os.path.dirname(__file__)))
base_ms = avalon_core_folder + '\\setup\max\\avalon_base.ms'
rt.executeScriptFile(base_ms)
QtCore.QTimer.singleShot(100, deferred)
def _uninstall_menu():
if MP.MenuManager.MenuExists(self._menu_name):
MP.MenuManager.UnregisterMenu(self._menu_name)
context_menu_name = "{}, {}".format(api.Session["AVALON_ASSET"], api.Session["AVALON_TASK"])
if MP.MenuManager.MenuExists(context_menu_name):
MP.MenuManager.UnregisterMenu(context_menu_name)
def launch_workfiles_app(*args):
workfiles.show(workio.work_root(), parent=args[0])
def find_host_config(config):
try:
config = importlib.import_module(config.__name__ + ".max_")
except ImportError as exc:
if str(exc) != "No module name {}".format(config.__name__ + ".max_"):
raise
config = None
return config
def _on_task_changed(*args):
# update menu task label
for arg in args:
print('{}:{}'.format(type(arg), arg))
_update_menu_task_label()
def _update_menu_task_label():
"""Update the task label in Avalon menu to current session"""
object_name = "{}|currentContext".format(self._menu_name)
print('self._menu_name:{}'.format(self._menu_name))
# to be continue
def uninstall(config):
print("uninstall start")
config = find_host_config(config)
if hasattr(config, "uninstall"):
config.uninstall()
_uninstall_menu()
pyblish.deregister_host("max_")
print("uninstall end")
def load(Loader,
representation,
name=None,
namespace=None,
data=None):
"""Load asset via database
Arguments:
Loader (api.Loader): The loader to process in host Max.
representation (dict, io.ObjectId or str): Address to representation
name (str, optional): Use pre-defined name
namespace (str, optional): Use pre-defined namespace
data (dict, optional): Additional settings dictionary
"""
print("load")
assert representation is not None, "This is a bug"
if isinstance(representation, (six.string_types, io.ObjectId)):
representation = io.find_one({"_id": io.ObjectId(str(representation))})
version, subset, asset, project = io.parenthood(representation)
assert all([representation, version, subset, asset, project]), (
"This is a bug"
)
context = {
"project": project,
"asset": asset,
"subset": subset,
"version": version,
"representation": representation,
}
# Ensure data is a dictionary when no explicit data provided
if data is None:
data = dict()
assert isinstance(data, dict), "Data must be a dictionary"
name = name or subset["name"]
namespace = namespace or lib.unique_namespace(
asset["name"] + "_",
prefix="_" if asset["name"][0].isdigit() else "",
suffix="_",
)
# TODO(roy): add compatibility check, see `tools.cbloader.lib`
Loader.log.info(
"Running '%s' on '%s'" % (Loader.__name__, asset["name"])
)
def containerise(name,
namespace,
nodes,
context,
loader=None,
suffix="CON"):
"""Bundle `nodes` into an assembly and imprint it with metadata
Containerisation enables a tracking of version, author and origin
for loaded assets.
Arguments:
name (str): Name of resulting assembly
namespace (str): Namespace under which to host container
nodes (list): Long names of nodes to containerise
context (dict): Asset information
loader (str, optional): Name of loader used to produce this container.
suffix (str, optional): Suffix of container, defaults to `_CON`.
Returns:
container (TrackViewNode): container assembly
"""
ava_con = rt.newTrackViewNode(AVALON_CONTAINERS_NODE, name)
rt.CustAttributes.Add(ava_con, rt.avalon_cust_def)
ava_con.name_ = name
ava_con.id = AVALON_CONTAINER_ID
ava_con.loader = str(loader)
ava_con.representation = context["representation"]["_id"]
ava_con.nodes = nodes
return ava_con
def update(container, version=-1):
"""Update `container` to `version`
This function relies on a container being referenced. At the time of this
writing, all assets - models, rigs, animations, shaders - are referenced
and should pose no problem. But should there be an asset that isn't
referenced then this function will need to see an update.
Arguments:
container (avalon-core:container-1.0): Container to update,
from `host.ls()`.
version (int, optional): Update the container to this version.
If no version is passed, the latest is assumed.
"""
print("update")
def remove(container):
"""Remove an existing `container` from Maya scene
Arguments:
container (avalon-core:container-1.0): Which container
to remove from scene.
"""
print("remove")
print(container)
print(dir(container))
def publish():
"""Shorthand to publish from within host"""
import pyblish.util
return pyblish.util.publish()
def ls():
"""List containers from active Max scene
This is the host-equivalent of api.ls(), but instead of listing
assets on disk, it lists assets already loaded in Max; once loaded
they are called 'containers'
"""
pass
class Creator(api.Creator):
def _create_ava_set_object(self):
org_lay = rt.LayerManager.current
ava_lay = rt.LayerManager.getLayerFromName('avalon')
if not ava_lay:
ava_lay = rt.LayerManager.newLayerFromName('avalon')
ava_lay.current = True
# create layer for avalon
inst = rt.AvalonSet()
attrs = ['cross', 'box', 'centermarker', 'axistripod']
for attr in attrs:
rt.execute('$' + inst.name + '[4][1].' + attr + '=off')
org_lay.current = True
return inst
def process(self):
inst = self._create_ava_set_object()
if (self.options or {}).get("useSelection"):
inst_node = MP.INode.GetINodeByName(inst.name)
avalon_nodes = inst_node.BaseObject.ParameterBlock.GetParamByName('avalon_nodes')
sel_objs = [obj for obj in rt.selection]
for obj in sel_objs:
class_ = rt.classOf(obj)
if class_ == rt.AvalonSet or class_ == rt.AvalonContainer:
rt.deselect(obj)
# Exclude AvalonSet and AvalonContainer object
new_list = MP.INodeList()
for node in MP.SelectionManager.Nodes:
new_list.Append(node)
avalon_nodes.Value = new_list
# inst = rt.newTrackViewNode(AVALON_CONTAINERS_NODE, self.name)
inst.ava_id = self.data["id"] = "pyblish.avalon.instance"
inst.family = self.data["family"]
inst.asset = self.data["asset"]
inst.subset = self.data["subset"]
inst.active = self.data["active"]
inst.name = rt.uniqueName(inst.subset)
return inst
class Loader(api.Loader):
hosts = ["max_"]
def __init__(self, context):
super(Loader, self).__init__(context)
self.fname = self.fname.replace(
api.registered_root(), "$AVALON_PROJECTS"
)
print("install Avalon Max Done")
| [
"MaxPlus.NotificationManager.Register",
"MaxPlus.MenuManager.MenuExists",
"importlib.import_module",
"MaxPlus.INode.GetINodeByName",
"MaxPlus.MenuBuilder",
"pyblish.api.util.publish",
"pyblish.api.deregister_host",
"MaxPlus.GetQMaxMainWindow",
"MaxPlus.INodeList",
"os.path.dirname",
"pyblish.api... | [((966, 988), 'MaxPlus.GetQMaxMainWindow', 'MP.GetQMaxMainWindow', ([], {}), '()\n', (986, 988), True, 'import MaxPlus as MP\n'), ((1419, 1448), 'pyblish.api.register_host', 'pyblish.register_host', (['"""max_"""'], {}), "('max_')\n", (1440, 1448), True, 'from pyblish import api as pyblish\n'), ((2085, 2130), 'MaxPlus.NotificationManager.Register', 'NM.Register', (['NC.SystemStartup', '_system_starup'], {}), '(NC.SystemStartup, _system_starup)\n', (2096, 2130), True, 'from MaxPlus import NotificationManager as NM\n'), ((2167, 2212), 'MaxPlus.NotificationManager.Register', 'NM.Register', (['NC.FilePostOpen', '_file_post_open'], {}), '(NC.FilePostOpen, _file_post_open)\n', (2178, 2212), True, 'from MaxPlus import NotificationManager as NM\n'), ((2250, 2297), 'MaxPlus.NotificationManager.Register', 'NM.Register', (['NC.SystemPostNew', '_system_post_new'], {}), '(NC.SystemPostNew, _system_post_new)\n', (2261, 2297), True, 'from MaxPlus import NotificationManager as NM\n'), ((2333, 2376), 'MaxPlus.NotificationManager.Register', 'NM.Register', (['NC.FilePreSave', '_file_pre_save'], {}), '(NC.FilePreSave, _file_pre_save)\n', (2344, 2376), True, 'from MaxPlus import NotificationManager as NM\n'), ((2413, 2458), 'MaxPlus.NotificationManager.Register', 'NM.Register', (['NC.FilePostSave', '_file_post_save'], {}), '(NC.FilePostSave, _file_post_save)\n', (2424, 2458), True, 'from MaxPlus import NotificationManager as NM\n'), ((5543, 5585), 'MaxPlus.MenuManager.MenuExists', 'MP.MenuManager.MenuExists', (['self._menu_name'], {}), '(self._menu_name)\n', (5568, 5585), True, 'import MaxPlus as MP\n'), ((5747, 5791), 'MaxPlus.MenuManager.MenuExists', 'MP.MenuManager.MenuExists', (['context_menu_name'], {}), '(context_menu_name)\n', (5772, 5791), True, 'import MaxPlus as MP\n'), ((6805, 6836), 'pyblish.api.deregister_host', 'pyblish.deregister_host', (['"""max_"""'], {}), "('max_')\n", (6828, 6836), True, 'from pyblish import api as pyblish\n'), ((10556, 10578), 'pyblish.api.util.publish', 'pyblish.util.publish', ([], {}), '()\n', (10576, 10578), True, 'from pyblish import api as pyblish\n'), ((1055, 1073), 'MaxPlus.GetQMaxWindow', 'MP.GetQMaxWindow', ([], {}), '()\n', (1071, 1073), True, 'import MaxPlus as MP\n'), ((3120, 3138), 'MaxPlus.GetQMaxWindow', 'MP.GetQMaxWindow', ([], {}), '()\n', (3136, 3138), True, 'import MaxPlus as MP\n'), ((3946, 3975), 'MaxPlus.MenuBuilder', 'MP.MenuBuilder', (['ava_menu_name'], {}), '(ava_menu_name)\n', (3960, 3975), True, 'import MaxPlus as MP\n'), ((3997, 4030), 'MaxPlus.MenuBuilder', 'MP.MenuBuilder', (['context_menu_name'], {}), '(context_menu_name)\n', (4011, 4030), True, 'import MaxPlus as MP\n'), ((5595, 5641), 'MaxPlus.MenuManager.UnregisterMenu', 'MP.MenuManager.UnregisterMenu', (['self._menu_name'], {}), '(self._menu_name)\n', (5624, 5641), True, 'import MaxPlus as MP\n'), ((5805, 5853), 'MaxPlus.MenuManager.UnregisterMenu', 'MP.MenuManager.UnregisterMenu', (['context_menu_name'], {}), '(context_menu_name)\n', (5834, 5853), True, 'import MaxPlus as MP\n'), ((6006, 6056), 'importlib.import_module', 'importlib.import_module', (["(config.__name__ + '.max_')"], {}), "(config.__name__ + '.max_')\n", (6029, 6056), False, 'import importlib\n'), ((1936, 1957), 'MaxPlus.NotificationManager.Unregister', 'NM.Unregister', (['handle'], {}), '(handle)\n', (1949, 1957), True, 'from MaxPlus import NotificationManager as NM\n'), ((3189, 3211), 'MaxPlus.GetQMaxMainWindow', 'MP.GetQMaxMainWindow', ([], {}), '()\n', (3209, 3211), True, 'import MaxPlus as MP\n'), ((5130, 5158), 'MaxPlus.MenuManager.GetMainMenu', 'MP.MenuManager.GetMainMenu', ([], {}), '()\n', (5156, 5158), True, 'import MaxPlus as MP\n'), ((11551, 11585), 'MaxPlus.INode.GetINodeByName', 'MP.INode.GetINodeByName', (['inst.name'], {}), '(inst.name)\n', (11574, 11585), True, 'import MaxPlus as MP\n'), ((12017, 12031), 'MaxPlus.INodeList', 'MP.INodeList', ([], {}), '()\n', (12029, 12031), True, 'import MaxPlus as MP\n'), ((5331, 5356), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (5346, 5356), False, 'import os\n')] |
# -*- coding: utf-8 -*-
import logging
import sys
import requests
import backoff
import quandl
from ingestionDao import IngestionDaoItf
class IngestorItf( object ):
"""Abstract base class for ingestors,
the class provides a prototypical get data, and no fringes."""
def get_data( self ):
raise NotImplementedError("Class %s doesn't implement aMethod()" % (self.__class__.__name__))
class IngestorQuandl( IngestorItf ):
"""Implementation of an ingestor for quandl timeseries ,
the class provides a prototypical get data, and no fringes."""
def __init__( self, quandl_key, ingestionDao ):
self.logger = logging.getLogger(__name__)
quandl.ApiConfig.api_key = quandl_key
if not isinstance(ingestionDao, IngestionDaoItf):
raise ValueError('argument must be a IngestionDao')
self.ingestionDao = ingestionDao
@backoff.on_exception(backoff.expo,
(requests.exceptions.Timeout,
requests.exceptions.ConnectionError),
max_tries=3)
def get_data( self, name ):
self.logger.info("get_data(%s)", name)
data = quandl.get(name)
data = self.__validate(data)
self.ingestionDao.persist_dataframe( name, data )
return data
def __validate( self, data ):
# here we should validate data according with policies,
# fix where it is possible to fix data,
# or raise an exception if the data cannot be recovered.
# The policies require to observe a few cases to figure out
# what we need to check for and how to fix/discard data
# Optionally we can have the validator injected as we inject
# the DAO
return data
| [
"logging.getLogger",
"quandl.get",
"backoff.on_exception"
] | [((867, 987), 'backoff.on_exception', 'backoff.on_exception', (['backoff.expo', '(requests.exceptions.Timeout, requests.exceptions.ConnectionError)'], {'max_tries': '(3)'}), '(backoff.expo, (requests.exceptions.Timeout, requests.\n exceptions.ConnectionError), max_tries=3)\n', (887, 987), False, 'import backoff\n'), ((643, 670), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (660, 670), False, 'import logging\n'), ((1134, 1150), 'quandl.get', 'quandl.get', (['name'], {}), '(name)\n', (1144, 1150), False, 'import quandl\n')] |
#!/usr/bin/env python3
# Copyright (c) 2021 <NAME>
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
"""This module does comparison of two images"""
import argparse
import os
from io import BytesIO
from typing import Union, List, Tuple
from pathlib import Path
import numpy as np
from PIL import Image
from cv2 import cv2
import face_recognition
from emrtd_face_access.print_to_sg import SetInterval
print = SetInterval().print
def opencv_dnn_detector() -> cv2.dnn_Net:
"""Create face detection network"""
if "net" in opencv_dnn_detector.__dict__:
return opencv_dnn_detector.net
print("[+] Creating face detector network...")
# downloaded from
# https://raw.githubusercontent.com/opencv/opencv_3rdparty/dnn_samples_face_detector_20180205_fp16/res10_300x300_ssd_iter_140000_fp16.caffemodel
model_file = "face_detection/res10_300x300_ssd_iter_140000_fp16.caffemodel"
# downloaded from
# https://raw.githubusercontent.com/opencv/opencv/master/samples/dnn/face_detector/deploy.prototxt
config_file = "face_detection/deploy.prototxt"
opencv_dnn_detector.net = cv2.dnn.readNetFromCaffe(config_file, model_file)
return opencv_dnn_detector.net
def get_bounding_boxes(
image: np.ndarray, conf_threshold: float = 0.5, scale_size: Tuple[int, int] = (-1, -1)
) -> List[Tuple[int, ...]]:
"""Image is expected in opencv format (BGR)
takes image and returns face bounding boxes
scale_size: Tuple[int, int] (height, width)"""
# https://learnopencv.com/face-detection-opencv-dlib-and-deep-learning-c-python/
net = opencv_dnn_detector()
face_locations: List[Tuple[int, ...]] = []
blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300), [104, 117, 123], False, False)
net.setInput(blob)
detections = net.forward()
for i in range(detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > conf_threshold:
x1 = detections[0, 0, i, 3]
y1 = detections[0, 0, i, 4]
x2 = detections[0, 0, i, 5]
y2 = detections[0, 0, i, 6]
if scale_size == (-1, -1):
x1 = int(x1 * image.shape[1])
y1 = int(y1 * image.shape[0])
x2 = int(x2 * image.shape[1])
y2 = int(y2 * image.shape[0])
else:
x1 = int(x1 * scale_size[1])
y1 = int(y1 * scale_size[0])
x2 = int(x2 * scale_size[1])
y2 = int(y2 * scale_size[0])
face_locations.append((y1, x2, y2, x1))
return face_locations
def compare_faces(
id_image: bytes,
cam_image: np.ndarray,
face_location: List[Tuple[int, ...]],
save_dest: Union[Path, None] = None,
) -> bool:
"""
Compare two images. First one should be jpeg, the second one should be opencv image (numpy)
face_location is the location of the face in the second image
:returns: True if they are the same person, False otherwise.
"""
im1 = bytes_to_np(id_image)
im1 = im1[:, :, ::-1]
id_face_loc = get_bounding_boxes(im1)
im1 = im1[:, :, ::-1]
face_encodings = face_recognition.face_encodings(im1, id_face_loc, 10, "large")[0]
im2 = cam_image[:, :, ::-1]
face_encodings2 = face_recognition.face_encodings(im2, face_location, 10, "large")[0]
if save_dest:
Image.fromarray(im1).save(os.path.join(save_dest, "face_one.jpeg"))
Image.fromarray(im2).save(os.path.join(save_dest, "face_two.jpeg"))
dist = face_recognition.face_distance([face_encodings], face_encodings2)[0]
print("[i] Decision threshold is 0.5.")
if dist <= 0.5:
print(
f"[+] Distance between the images is {dist}"
"\n[+] These images are of the same people!"
)
return True
else:
print(
f"[-] Distance between the images is {dist}\n"
"[-] These images are of two different people!"
)
return False
def bytes_to_np(img: bytes) -> np.ndarray:
"""
Converts bytes image (PIL) to numpy image (opencv)
"""
im = Image.open(BytesIO(img))
im = im.convert("RGB")
return np.array(im)
def jpeg_to_png(img: bytes) -> bytes:
"""
Converts a JPEG to a PNG
"""
im = Image.open(BytesIO(img))
width = 240
height = int(im.size[1] * (240 / im.size[0]))
im = im.convert("RGB").resize((width, height))
stream = BytesIO()
im.save(stream, format="PNG")
return stream.getvalue()
def main(im1_filename: Path, im2_filename: Path) -> None:
"""
Compare two persons images.
"""
im1 = np.array(Image.open(im1_filename).convert("RGB"))
im2 = np.array(Image.open(im2_filename).convert("RGB"))
im1 = im1[:, :, ::-1]
id_face_loc = get_bounding_boxes(im1)
im1 = im1[:, :, ::-1]
face_encodings = face_recognition.face_encodings(im1, id_face_loc, 10, "large")[0]
im2 = im2[:, :, ::-1]
cam_face_loc = get_bounding_boxes(im2)
im2 = im2[:, :, ::-1]
face_encodings2 = face_recognition.face_encodings(im2, cam_face_loc, 10, "large")[0]
dist = face_recognition.face_distance([face_encodings], face_encodings2)[0]
if dist < 0.5:
print(f"[+] These images belong to the same person! ({dist})")
else:
print(f"[-] These images do not belong to the same person! ({dist})")
if __name__ == "__main__":
def raise_(ex):
"""https://stackoverflow.com/a/8294654/6077951"""
raise ex
parser = argparse.ArgumentParser(description="Find if two images are of the same people.")
parser.add_argument(
"image_one",
type=lambda x: x if os.path.isfile(x) else raise_(FileNotFoundError(x)),
help="Path to image one",
)
parser.add_argument(
"image_two",
type=lambda x: x if os.path.isfile(x) else raise_(FileNotFoundError(x)),
help="Path to image two",
)
args = parser.parse_args()
main(Path(args.image_one), Path(args.image_two))
| [
"PIL.Image.fromarray",
"PIL.Image.open",
"emrtd_face_access.print_to_sg.SetInterval",
"argparse.ArgumentParser",
"pathlib.Path",
"io.BytesIO",
"os.path.join",
"os.path.isfile",
"numpy.array",
"face_recognition.face_distance",
"face_recognition.face_encodings",
"cv2.cv2.dnn.blobFromImage",
"c... | [((455, 468), 'emrtd_face_access.print_to_sg.SetInterval', 'SetInterval', ([], {}), '()\n', (466, 468), False, 'from emrtd_face_access.print_to_sg import SetInterval\n'), ((1153, 1202), 'cv2.cv2.dnn.readNetFromCaffe', 'cv2.dnn.readNetFromCaffe', (['config_file', 'model_file'], {}), '(config_file, model_file)\n', (1177, 1202), False, 'from cv2 import cv2\n'), ((1707, 1783), 'cv2.cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['image', '(1.0)', '(300, 300)', '[104, 117, 123]', '(False)', '(False)'], {}), '(image, 1.0, (300, 300), [104, 117, 123], False, False)\n', (1728, 1783), False, 'from cv2 import cv2\n'), ((4203, 4215), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (4211, 4215), True, 'import numpy as np\n'), ((4465, 4474), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (4472, 4474), False, 'from io import BytesIO\n'), ((5531, 5617), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Find if two images are of the same people."""'}), "(description=\n 'Find if two images are of the same people.')\n", (5554, 5617), False, 'import argparse\n'), ((3176, 3238), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['im1', 'id_face_loc', '(10)', '"""large"""'], {}), "(im1, id_face_loc, 10, 'large')\n", (3207, 3238), False, 'import face_recognition\n'), ((3297, 3361), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['im2', 'face_location', '(10)', '"""large"""'], {}), "(im2, face_location, 10, 'large')\n", (3328, 3361), False, 'import face_recognition\n'), ((3548, 3613), 'face_recognition.face_distance', 'face_recognition.face_distance', (['[face_encodings]', 'face_encodings2'], {}), '([face_encodings], face_encodings2)\n', (3578, 3613), False, 'import face_recognition\n'), ((4151, 4163), 'io.BytesIO', 'BytesIO', (['img'], {}), '(img)\n', (4158, 4163), False, 'from io import BytesIO\n'), ((4321, 4333), 'io.BytesIO', 'BytesIO', (['img'], {}), '(img)\n', (4328, 4333), False, 'from io import BytesIO\n'), ((4882, 4944), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['im1', 'id_face_loc', '(10)', '"""large"""'], {}), "(im1, id_face_loc, 10, 'large')\n", (4913, 4944), False, 'import face_recognition\n'), ((5066, 5129), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['im2', 'cam_face_loc', '(10)', '"""large"""'], {}), "(im2, cam_face_loc, 10, 'large')\n", (5097, 5129), False, 'import face_recognition\n'), ((5145, 5210), 'face_recognition.face_distance', 'face_recognition.face_distance', (['[face_encodings]', 'face_encodings2'], {}), '([face_encodings], face_encodings2)\n', (5175, 5210), False, 'import face_recognition\n'), ((5988, 6008), 'pathlib.Path', 'Path', (['args.image_one'], {}), '(args.image_one)\n', (5992, 6008), False, 'from pathlib import Path\n'), ((6010, 6030), 'pathlib.Path', 'Path', (['args.image_two'], {}), '(args.image_two)\n', (6014, 6030), False, 'from pathlib import Path\n'), ((3418, 3458), 'os.path.join', 'os.path.join', (['save_dest', '"""face_one.jpeg"""'], {}), "(save_dest, 'face_one.jpeg')\n", (3430, 3458), False, 'import os\n'), ((3494, 3534), 'os.path.join', 'os.path.join', (['save_dest', '"""face_two.jpeg"""'], {}), "(save_dest, 'face_two.jpeg')\n", (3506, 3534), False, 'import os\n'), ((3392, 3412), 'PIL.Image.fromarray', 'Image.fromarray', (['im1'], {}), '(im1)\n', (3407, 3412), False, 'from PIL import Image\n'), ((3468, 3488), 'PIL.Image.fromarray', 'Image.fromarray', (['im2'], {}), '(im2)\n', (3483, 3488), False, 'from PIL import Image\n'), ((4665, 4689), 'PIL.Image.open', 'Image.open', (['im1_filename'], {}), '(im1_filename)\n', (4675, 4689), False, 'from PIL import Image\n'), ((4725, 4749), 'PIL.Image.open', 'Image.open', (['im2_filename'], {}), '(im2_filename)\n', (4735, 4749), False, 'from PIL import Image\n'), ((5687, 5704), 'os.path.isfile', 'os.path.isfile', (['x'], {}), '(x)\n', (5701, 5704), False, 'import os\n'), ((5854, 5871), 'os.path.isfile', 'os.path.isfile', (['x'], {}), '(x)\n', (5868, 5871), False, 'import os\n')] |
#!/usr/bin/env python
# Author: by <NAME> on March 15, 2020
# Date: March 15, 2020
from matplotlib import pyplot
from pydoc import pager
from time import sleep
import argparse
import datetime as dt
import json
import matplotlib
import pandas as pd
import requests
import seaborn
import sys
import yaml
def main():
''' Simple web scraping script to get covid-19 data using:
https://thevirustracker.com free API.
'''
if len(sys.argv) == 2:
if sys.argv[1] == '-h' or sys.argv[1] == '--help':
parser = argparse.ArgumentParser(
description='''COVID Scrapper v0.0.2''',
epilog='''Thanks for using our service.''')
parser.add_argument('-w', help='Print Worldwide COVID-19 data')
parser.add_argument('-g', help='Plot COVID-19 data')
parser.add_argument('-list',
help='Print a list of available countries and codes')
parser.add_argument('-s', metavar='[country]',
help='Print specific country COVID-19 data')
args = parser.parse_args()
if sys.argv[1] == '-w':
get_worldwide_stats(WORLDWIDE_URL)
sys.exit(0)
elif sys.argv[1] == '-list':
print_list_to_user()
sys.exit(0)
elif sys.argv[1] == '-g':
prep_data()
sys.exit(0)
elif len(sys.argv) > 2:
# Account for countries with spaces (i.e United States)
string = ""
index = 2
while index < len(sys.argv):
if sys.argv[index] != " ":
string += sys.argv[index] + " "
index += 1
string = string.strip()
# This acts as if the user chose option #3 in the menu.
country = 'https://thevirustracker.com/free-api?countryTotal={}'\
.format(get_country_code(string))
get_country_stats(country)
else:
# No cli-arguments given.
menu_driver()
def menu_driver():
"""Program main driver.
The user can choose between 1-4 menu options.
1. Wordwide stats
2. List of countries
3. Specific country stats (Full country or two-letter code)
4. Exit the program
"""
done = False
while not done:
print_menu()
user_input = input("Please, enter option: ")
print('------------------------------------------------')
option_info = check_validity(user_input)
if option_info != -1:
pass
if option_info == 5:
done = True
print("\n")
print("Thank you for using COVID-19 Scrapper. Stay safe!")
else:
evaluate_option(option_info)
else:
print("Please, enter a valid number option from 1 to 4....")
sleep(2)
print('------------------------------------------------')
def print_menu():
"""Prints the menu to the user."""
# TODO: think about plotting option in menu.
print()
print("COVID-19 Stats Scrapper. Please, select a number." + "\n")
print("1. To see worldwide stats.")
print("2. To see a list of the available countries and their"\
+ " respective abbreviations.")
print("3. To type a country or abrreviation and see their stats.")
print("4. To visualize Total Cases in the most infected countries.")
print("5. Exit")
def check_validity(option):
"""Check if the input received is a valid digit 1 to 4 inclusive."""
if option.isdigit():
numeric_option = int(option)
if numeric_option >=1 and numeric_option <= 5:
return numeric_option
else:
return -1
else:
return -1
def evaluate_option(user_option):
"""Evaluate the valid input from the user."""
if user_option == 1:
get_worldwide_stats(WORLDWIDE_URL)
elif user_option == 2:
print_list_to_user()
elif user_option == 3:
# Check if there are command line arguments
country_input = input("Please enter a country name or two-letter"\
+ " code of country to see COVID-19 stats.\n")
print("\n")
country = 'https://thevirustracker.com/free-api?countryTotal={}'\
.format(get_country_code(country_input))
get_country_stats(country)
elif user_option == 4:
prep_data()
else:
pass
def print_list_to_user():
with open('countries-json/country-by-abbreviation.json') as json_file:
number = 0
string = ""
for line in yaml.safe_load(json_file):
string += "{}. {}:{}".format(number, line['COUNTRY'],\
line['ABBREVIATION'] + '\n')
number += 1
number = 0
pager(string)
def check_country_is_valid(country):
"""Given the country full name or two-letter code; check if it's a valid
country by searching the countries.txt file for a match.
@param Country full name or country two-letter code.
@return True if country is valid False otherwise.
"""
l = []
fhandler = open('countries.txt', 'r')
for line in fhandler:
temp = line.strip('\n').split(":")
for e in temp:
l.append(e)
fhandler.close()
if country.upper() in l:
return True
else:
return False
def get_worldwide_stats(url):
"""Pull the world wide data from:
https://thevirustracker.com/free-api?global=stats
@param url of the worldwide stats
"""
response = requests.get(url, headers={"User-Agent": "XY"})
content = json.loads(response.content.decode())
#TODO: format to f strings for cleaner look
print()
print("Total cases: {val:,}".format(val=content['results'][0]['total_cases']))
print("Total New cases: {val:,}".format(val=content['results'][0]['total_new_cases_today']))
print("Total Recovered cases: {val:,}".format(val=content['results'][0]['total_recovered']))
print("Total Unresolved cases: {val:,}".format(val=content['results'][0]['total_unresolved']))
print("Total Deaths: {val:,}".format(val=content['results'][0]['total_deaths']))
print("Total Active Cases: {val:,}".format(val=content['results'][0]['total_active_cases']))
print("Total Serious Cases: {val:,}".format(val=content['results'][0]['total_serious_cases']))
death_rate = ((int(content['results'][0]['total_deaths'])) /\
(int(content['results'][0]['total_cases']))) * 100
print("Death Rate: {0:.2f}%".format(death_rate), '\n')
if len(sys.argv) == 1:
ask_user_if_continue()
# We are on script mode. Exit.
else:
sys.exit()
def get_country_stats(data):
"""Pull the world wide data from:
https://thevirustracker.com/free-api?global=stats
https://thevirustracker.com/free-api?countryTotal={@param}
@param url of the specific country stats
"""
response = requests.get(data, headers={"User-Agent": "XY"})
content = json.loads(response.content.decode())
#TODO: format to f strings for cleaner look
print('Country:', content['countrydata'][0]['info']['title'])
print("Total Cases: {val:,}".format(val=content['countrydata'][0]['total_cases']))
print('Total Active Cases: {val:,}'.format(val=content['countrydata'][0]['total_active_cases']))
print('Total Cases Recovered: {val:,}'.format(val=content['countrydata'][0]['total_recovered']))
print('Total Unresolved Cases: {val:,}'.format(val=content['countrydata'][0]['total_unresolved']))
print('Total Deaths Reported: {val:,}'.format(val=content['countrydata'][0]['total_deaths']), '\n')
death_rate = ((int(content['countrydata'][0]['total_deaths'])) /\
(int(content['countrydata'][0]['total_cases']))) * 100
print("Death Rate: {0:.2f}%".format(death_rate), '\n')
if len(sys.argv) == 1:
ask_user_if_continue()
# We are on script mode. Exit.
else:
sys.exit(0)
def prep_data():
'''Format the data for better visualization.
Format: Date Location New Cases New Deaths Total Cases Total Deaths
'''
amount_of_countries = int(input('How many countries would you like to'\
+ ' compare data? (15 countries max.) '))
if amount_of_countries <= 1 or amount_of_countries > 15:
# Default will be 10 if the number given as parameter is too high or
# too low
amount_of_countries = 10
data = pd.read_csv('https://covid.ourworldindata.org/data/ecdc/full_data.csv')
# Format the dates
data['date'] = [dt.datetime.strptime(x, '%Y-%m-%d') for x in data['date']]
# Format colum titles
data.columns = ['Date', 'Country', 'New Cases', 'New Deaths', 'Total Cases',\
'Total Deaths']
# Exclude countries from the data
countries_to_exclude = ['World']
data = data.loc[~(data['Country'].isin(countries_to_exclude))]
# Group the data by Location and Date. Look only for Total Cases and
data = pd.DataFrame(data.groupby(['Country', 'Date'])['Total Cases', \
'Total Deaths'].sum()).reset_index()
data = data.sort_values(by=['Country', 'Date'], ascending=False)
filtered_data = data.drop_duplicates(subset=['Country'], keep='first')
plot_data('Country', 'Total Cases', 'Total cases in the World',\
filtered_data, size=amount_of_countries)
def plot_data(parameter, value, title, data, size):
'''Plot cases and deaths as bar plot for X countries.
Function to plot bar plots using Seaborn.
'''
pyplot.style.use('dark_background')
f, ax = pyplot.subplots(1,1, figsize=(size*2, 5))
data = data.sort_values([value], ascending=False).reset_index(drop=True)
g = seaborn.barplot(data[parameter][0:size], data[value][0:size], palette='Set3')
g.set_title('Number of {} - highest {} values'.format(title, size))
pyplot.show()
if len(sys.argv) <= 1:
print('\n')
ask_user_if_continue()
else:
sys.exit()
def ask_user_if_continue():
decision = input("Would you like to continue using COVID-19 Scrapper? (y/n)")
if decision == 'y':
print_menu()
elif decision == 'n':
print("Thank you for using COVID-19 Scrapper. Stay safe!")
exit()
def get_country_code(country):
"""Retrieve the two-letter code from the .json file
and return the code.
"""
country_code = ""
if check_country_is_valid(country):
pass
else:
print("Please enter a valid country name or two-letter code.")
print("Consult the available country list with -list")
print('----------------------------------------------------------------------')
sys.exit(1)
with open('countries-json/country-by-abbreviation.json') as json_file:
country = country.upper()
if len(country) > 2:
for line in yaml.safe_load(json_file):
if line['COUNTRY'] == country:
country_code = line['ABBREVIATION']
return country_code
else:
return country
if __name__ == "__main__":
WORLDWIDE_URL = 'https://thevirustracker.com/free-api?global=stats'
main()
| [
"pandas.read_csv",
"argparse.ArgumentParser",
"datetime.datetime.strptime",
"matplotlib.pyplot.style.use",
"requests.get",
"pydoc.pager",
"time.sleep",
"yaml.safe_load",
"sys.exit",
"seaborn.barplot",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((4822, 4835), 'pydoc.pager', 'pager', (['string'], {}), '(string)\n', (4827, 4835), False, 'from pydoc import pager\n'), ((5589, 5636), 'requests.get', 'requests.get', (['url'], {'headers': "{'User-Agent': 'XY'}"}), "(url, headers={'User-Agent': 'XY'})\n", (5601, 5636), False, 'import requests\n'), ((6972, 7020), 'requests.get', 'requests.get', (['data'], {'headers': "{'User-Agent': 'XY'}"}), "(data, headers={'User-Agent': 'XY'})\n", (6984, 7020), False, 'import requests\n'), ((8513, 8584), 'pandas.read_csv', 'pd.read_csv', (['"""https://covid.ourworldindata.org/data/ecdc/full_data.csv"""'], {}), "('https://covid.ourworldindata.org/data/ecdc/full_data.csv')\n", (8524, 8584), True, 'import pandas as pd\n'), ((9639, 9674), 'matplotlib.pyplot.style.use', 'pyplot.style.use', (['"""dark_background"""'], {}), "('dark_background')\n", (9655, 9674), False, 'from matplotlib import pyplot\n'), ((9687, 9731), 'matplotlib.pyplot.subplots', 'pyplot.subplots', (['(1)', '(1)'], {'figsize': '(size * 2, 5)'}), '(1, 1, figsize=(size * 2, 5))\n', (9702, 9731), False, 'from matplotlib import pyplot\n'), ((9814, 9891), 'seaborn.barplot', 'seaborn.barplot', (['data[parameter][0:size]', 'data[value][0:size]'], {'palette': '"""Set3"""'}), "(data[parameter][0:size], data[value][0:size], palette='Set3')\n", (9829, 9891), False, 'import seaborn\n'), ((9968, 9981), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (9979, 9981), False, 'from matplotlib import pyplot\n'), ((4610, 4635), 'yaml.safe_load', 'yaml.safe_load', (['json_file'], {}), '(json_file)\n', (4624, 4635), False, 'import yaml\n'), ((6706, 6716), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6714, 6716), False, 'import sys\n'), ((7991, 8002), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (7999, 8002), False, 'import sys\n'), ((8628, 8663), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['x', '"""%Y-%m-%d"""'], {}), "(x, '%Y-%m-%d')\n", (8648, 8663), True, 'import datetime as dt\n'), ((10079, 10089), 'sys.exit', 'sys.exit', ([], {}), '()\n', (10087, 10089), False, 'import sys\n'), ((10795, 10806), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (10803, 10806), False, 'import sys\n'), ((541, 646), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""COVID Scrapper v0.0.2"""', 'epilog': '"""Thanks for using our service."""'}), "(description='COVID Scrapper v0.0.2', epilog=\n 'Thanks for using our service.')\n", (564, 646), False, 'import argparse\n'), ((1203, 1214), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1211, 1214), False, 'import sys\n'), ((2838, 2846), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (2843, 2846), False, 'from time import sleep\n'), ((10971, 10996), 'yaml.safe_load', 'yaml.safe_load', (['json_file'], {}), '(json_file)\n', (10985, 10996), False, 'import yaml\n'), ((1298, 1309), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1306, 1309), False, 'import sys\n'), ((1381, 1392), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1389, 1392), False, 'import sys\n')] |
import sys
class Debugger(object):
"""Display debugging messages if toggled"""
def __init__(self, boolean):
self.report = boolean
self.indent = 0
def say(self, msg, indent=0, end='\n'):
self.indent = indent or self.indent
if self.report:
indent = self.indent * '\t'
fmtmsg = f'{indent}{msg}{end}'
sys.stderr.write(fmtmsg) | [
"sys.stderr.write"
] | [((378, 402), 'sys.stderr.write', 'sys.stderr.write', (['fmtmsg'], {}), '(fmtmsg)\n', (394, 402), False, 'import sys\n')] |
import os
import pandas as pd
if __name__ == "__main__":
print("In what directory shall we parse?")
to_dir = input(">>> ")
for root, dirs, files in os.walk(os.getcwd() + "/" + to_dir, topdown=False):
operating_dir = root
if "fort.58" in str(root):
if "strip_hefesto_outfile.csv" in os.listdir(operating_dir):
os.remove(operating_dir + "/strip_hefesto_outfile.csv")
outfile = open(operating_dir + "/strip_hefesto_outfile.csv", 'a')
for i in os.listdir(operating_dir):
f = operating_dir + "/" + i
if os.path.getsize(f) > 5:
if "HeFESTo" in i:
star_name = i.replace("fort.58.control.", "").replace("_fort.58", "").replace("_bsp.txt_bsp",
"").replace(
"fort.58_", "").replace("_fort58", "").replace("_BSP_HeFESTo_Output_File", "").replace(
"_MORB_HeFESTo_Output_File", "")
df = pd.read_fwf(f, colspecs='infer')
if 'rho' in df.keys():
print("Working on {}".format(i))
rho = df['rho'].tolist()
# depth = df['depth'].tolist()
rho_str = star_name + "," + ",".join(str(z) for z in rho)
outfile.write(rho_str + "\n")
print("Finished with {}".format(i))
outfile.close() | [
"os.path.getsize",
"os.listdir",
"os.getcwd",
"pandas.read_fwf",
"os.remove"
] | [((541, 566), 'os.listdir', 'os.listdir', (['operating_dir'], {}), '(operating_dir)\n', (551, 566), False, 'import os\n'), ((183, 194), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (192, 194), False, 'import os\n'), ((340, 365), 'os.listdir', 'os.listdir', (['operating_dir'], {}), '(operating_dir)\n', (350, 365), False, 'import os\n'), ((384, 439), 'os.remove', 'os.remove', (["(operating_dir + '/strip_hefesto_outfile.csv')"], {}), "(operating_dir + '/strip_hefesto_outfile.csv')\n", (393, 439), False, 'import os\n'), ((633, 651), 'os.path.getsize', 'os.path.getsize', (['f'], {}), '(f)\n', (648, 651), False, 'import os\n'), ((1141, 1173), 'pandas.read_fwf', 'pd.read_fwf', (['f'], {'colspecs': '"""infer"""'}), "(f, colspecs='infer')\n", (1152, 1173), True, 'import pandas as pd\n')] |
import sys
from dvc.progress import progress
try:
# NOTE: in Python3 raw_input() was renamed to input()
input = raw_input
except NameError:
pass
class Prompt(object):
def __init__(self):
self.default = None
def prompt(self, msg, default=False): # pragma: no cover
if self.default is not None:
return self.default
if not sys.stdout.isatty():
return default
answer = input(msg + u' (y/n)\n').lower()
while answer not in ['yes', 'no', 'y', 'n']:
answer = input('Enter \'yes\' or \'no\'.\n').lower()
return answer[0] == "y"
def prompt_password(self, msg): # pragma: no cover
import getpass
if not sys.stdout.isatty():
return None
msg = 'Enter password for {}:\n'.format(msg)
if not progress.is_finished:
msg = u'\n' + msg
return getpass.getpass(msg)
| [
"sys.stdout.isatty",
"getpass.getpass"
] | [((912, 932), 'getpass.getpass', 'getpass.getpass', (['msg'], {}), '(msg)\n', (927, 932), False, 'import getpass\n'), ((383, 402), 'sys.stdout.isatty', 'sys.stdout.isatty', ([], {}), '()\n', (400, 402), False, 'import sys\n'), ((729, 748), 'sys.stdout.isatty', 'sys.stdout.isatty', ([], {}), '()\n', (746, 748), False, 'import sys\n')] |
from flask import Flask, render_template, request
from chatterbot import ChatBot
import pypyodbc as odbc
import re
app = Flask(__name__)
CRMFBot = ChatBot("Chatterbot", storage_adapter="chatterbot.storage.SQLStorageAdapter")
@app.route("/")
def home():
return render_template("index.html")
@app.route("/get")
def get_bot_response():
userText = request.args.get('msg')
db_host = 'python1'
db_name = 'projectdetails'
db_user = 'sa'
db_password = '$'
mudid='12345'
weekno = re.findall('\d+',userText)
req=userText.upper().find('TARGET')
def get_target_details(required,weekno):
connection_string = 'DSN=' + db_host + ';Database=' + db_name + ';UID=' + db_user + ';PWD=' + db_password + ';'
db = odbc.connect(connection_string)
cur=db.cursor()
SQL= "select " + required + " from targetdetails where MUDID = '" + mudid +"' and weekno = '" + weekno[0] +"';"
cur.execute(SQL)
output=cur.fetchone()
print(output[0])
print(type(output))
print(type(output[0]))
if output[0] == '':
res = 'Most likely, you dont have ' + required + ' for week no ' + str(weekno[0])
print(res)
return res
else:
res = 'Your ' + required + ' for week ' + str(weekno[0]) + ' is ' + str(output[0])
print(res)
return res
if req != -1 and weekno:
required='target'
return get_target_details(required,weekno)
elif req == -1 and weekno:
required='achieved'
return get_target_details(required,weekno)
else:
result = str(CRMFBot.get_response(userText))
return result
if __name__ == "__main__":
app.run()
| [
"flask.render_template",
"flask.request.args.get",
"pypyodbc.connect",
"flask.Flask",
"chatterbot.ChatBot",
"re.findall"
] | [((122, 137), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (127, 137), False, 'from flask import Flask, render_template, request\n'), ((148, 225), 'chatterbot.ChatBot', 'ChatBot', (['"""Chatterbot"""'], {'storage_adapter': '"""chatterbot.storage.SQLStorageAdapter"""'}), "('Chatterbot', storage_adapter='chatterbot.storage.SQLStorageAdapter')\n", (155, 225), False, 'from chatterbot import ChatBot\n'), ((265, 294), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (280, 294), False, 'from flask import Flask, render_template, request\n'), ((354, 377), 'flask.request.args.get', 'request.args.get', (['"""msg"""'], {}), "('msg')\n", (370, 377), False, 'from flask import Flask, render_template, request\n'), ((505, 533), 're.findall', 're.findall', (['"""\\\\d+"""', 'userText'], {}), "('\\\\d+', userText)\n", (515, 533), False, 'import re\n'), ((751, 782), 'pypyodbc.connect', 'odbc.connect', (['connection_string'], {}), '(connection_string)\n', (763, 782), True, 'import pypyodbc as odbc\n')] |
import logging
from os import path
from sys import exit
from typing import Any, Dict, List, Optional, Tuple, Union
import coloredlogs
import httpx
import praw
from util import Utility
log: logging.Logger = logging.getLogger(__name__)
coloredlogs.install(level="INFO", fmt="[%(asctime)s] %(message)s", datefmt="%I:%M:%S")
class Snoopy:
"""Redditor watching service which tracks specific user replies in a thread and notifies via Discord."""
def init(self: Any) -> None:
print("Snoopy - Redditor watching service")
print("https://github.com/EthanC/Snoopy\n")
self.configuration: dict = Utility.ReadFile(self, "configuration", "json")
configured: Optional[bool] = Snoopy.LoadConfiguration(self)
if path.isfile("database.json") is False:
log.warning("Could not find database, creating it")
Utility.WriteFile(self, "database", "json", {})
self.database: dict = Utility.ReadFile(self, "database", "json")
if (configured is True) and (self.database is not None):
log.info("Loaded configuration and database")
try:
self.reddit: praw.reddit.Reddit = praw.Reddit(
username=self.username,
password=self.password,
client_id=self.clientId,
client_secret=self.clientSecret,
user_agent="Snoopy by /u/LackingAGoodName (https://github.com/EthanC/Snoopy)",
)
if self.reddit.read_only is True:
raise Exception("read-only mode is active")
except Exception as e:
log.critical(f"Failed to authenticate with Reddit, {e}")
return
log.info(f"Authenticated with Reddit as /u/{self.reddit.user.me().name}")
for configuration in self.configurations:
Snoopy.CheckComments(self, configuration)
def LoadConfiguration(self: Any) -> Optional[bool]:
"""
Set the configuration values specified in configuration.json
Return True if configuration sucessfully loaded.
"""
try:
self.username: str = self.configuration["reddit"]["username"]
self.password: str = self.configuration["reddit"]["password"]
self.clientId: str = self.configuration["reddit"]["clientId"]
self.clientSecret: str = self.configuration["reddit"]["clientSecret"]
self.configurations: List[
Dict[str, Union[bool, str, dict]]
] = self.configuration["configurations"]
self.watermark: str = "[](#SnoopyReply)"
self.template: str = "[Comment](https://reddit.com{}?context=1000) by [\\/u\\/{}](https://reddit.com/user/{}) ({}):\n\n{}\n\n"
return True
except Exception as e:
log.fatal(f"Failed to load configuration, {e}")
def CheckComments(self: Any, configuration: dict) -> None:
"""
Check the latest comments in a Subreddit. Act upon comments which
fit the configured requirements.
"""
if configuration.get("enabled") is not True:
return
if (s := configuration.get("subreddit")) is None:
return
subreddit: praw.reddit.Subreddit = self.reddit.subreddit(s)
flairs: List[Dict[str, str]] = configuration.get("userFlairs", [])
record: Optional[int] = self.database.get(subreddit.display_name.lower())
if record is not None:
count: int = 0
latest: Tuple[bool, int] = (False, 0)
try:
comment: praw.reddit.Comment
for comment in subreddit.comments(limit=None):
created: int = int(comment.created_utc)
flairId: Optional[str] = comment.author_flair_template_id
# Comments are returned newest to oldest, so we want
# to record the first comment which is checked.
if latest[0] is False:
latest: Tuple[bool, int] = (True, created)
if comment.removed is True:
continue
if created <= record:
break
count += 1
if flairId is not None:
for flair in flairs:
if flair["id"] == flairId:
Snoopy.ProcessComment(
self, comment, flair["name"], configuration
)
except Exception as e:
log.error(
f"Failed to get comments from /r/{subreddit.display_name}, {e}"
)
return
if count == 0:
log.info(f"No new comments found in /r/{subreddit.display_name}")
return
log.info(f"Checked {count} new comments in /r/{subreddit.display_name}")
Snoopy.UpdateDatabase(self, subreddit.display_name, latest[1])
else:
try:
comment: praw.reddit.Comment
for comment in subreddit.comments(limit=1):
Snoopy.UpdateDatabase(
self, subreddit.display_name, int(comment.created_utc)
)
break
log.info(f"No record found for /r/{subreddit.display_name}, created it")
except Exception as e:
log.error(
f"Failed to get the latest comment in /r/{subreddit.display_name}, {e}"
)
def ProcessComment(
self: Any, comment: praw.reddit.Comment, flair: str, configuration: dict
) -> None:
"""
Add the specified comment to the thread's comment compilation,
perform miscellaneous tasks based on configuration.
"""
submission: praw.reddit.Submission = comment.submission
subreddit: str = comment.subreddit.display_name
existing: Tuple[bool, Optional[praw.reddit.Comment]] = (False, None)
log.info(
f"Found comment by /u/{comment.author.name} ({flair}) in /r/{subreddit}"
)
try:
topLevel: praw.reddit.Comment
for topLevel in submission.comments:
try:
if topLevel.removed is True:
continue
except Exception as e:
log.error(
f"Unable to determine if comment is removed in /r/{subreddit}"
)
if topLevel.author == self.reddit.user.me():
if topLevel.body.endswith(self.watermark):
existing = (True, topLevel)
break
if topLevel.stickied is True:
topLevel.report(
"Stickied comment replaced, please ensure this was intended"
)
except Exception as e:
log.error(f"Failed to check comments of post in /r/{subreddit}, {e}")
reply: Optional[praw.reddit.Comment] = None
if existing[0] is True:
reply: Optional[praw.reddit.Comment] = Snoopy.UpdateReply(
self, existing[1], comment, flair
)
elif existing[0] is False:
reply: Optional[praw.reddit.Comment] = Snoopy.CreateReply(
self, comment, submission, flair
)
webhook: Dict[str, Union[bool, str]] = configuration.get("webhook", {})
if webhook.get("enabled") is True:
Snoopy.Webhook(self, comment, flair, webhook)
if reply is None:
return
try:
reply.disable_inbox_replies()
except Exception as e:
log.error(
f"Failed to disable inbox replies for comment in /r/{subreddit}, {e}"
)
try:
reply.mod.distinguish(how="yes", sticky=True)
except Exception as e:
log.error(f"Failed to distinguish comment in /r/{subreddit}, {e}")
if configuration.get("lockComment") is True:
try:
reply.mod.lock()
except Exception as e:
log.error(f"Failed to lock comment in /r/{subreddit}, {e}")
if configuration.get("changeFlair") is True:
linkFlairText: Optional[str] = submission.link_flair_text
# This will prevent literal "None" from being added to the
# link flair text when no flair was previously present.
if linkFlairText is None:
linkFlairText: Optional[str] = ""
if linkFlairText.endswith(" Replied)"):
pass
else:
try:
submission.mod.flair(text=f"{linkFlairText} ({flair} Replied)")
except Exception as e:
log.error(f"Failed to modify post flair in /r/{subreddit}, {e}")
def CreateReply(
self: Any,
reply: Optional[praw.reddit.Comment],
parent: praw.reddit.Submission,
flair: str,
) -> Optional[praw.reddit.Comment]:
"""Create a new comment in the specified thread."""
try:
compilation: Optional[praw.reddit.Comment] = parent.reply(
self.template.format(
reply.permalink,
reply.author.name,
reply.author.name,
flair,
Utility.Quote(self, reply.body),
)
+ self.watermark
)
return compilation
except Exception as e:
log.error(
f"Failed to reply to post in /r/{parent.subreddit.display_name}, {e}"
)
def UpdateReply(
self: Any,
compilation: Optional[praw.reddit.Comment],
reply: Optional[praw.reddit.Comment],
flair: str,
) -> Optional[praw.reddit.Comment]:
"""Update the existing comment in the specified thread."""
try:
compilation.edit(
compilation.body.split(self.watermark)[0]
+ self.template.format(
reply.permalink,
reply.author.name,
reply.author.name,
flair,
Utility.Quote(self, reply.body),
)
+ self.watermark
)
return compilation
except Exception as e:
log.error(
f"Failed to edit comment in /r/{compilation.subreddit.display_name}, {e}"
)
def UpdateDatabase(self: Any, subreddit: str, commentTime: int) -> None:
"""Add the latest seen comment's timestamp to database.json"""
self.database.update({subreddit.lower(): commentTime})
Utility.WriteFile(self, "database", "json", self.database)
def Webhook(
self: Any, comment: praw.reddit.Comment, flair: str, configuration: dict
) -> None:
"""Send the specified comment to Discord via Webhook."""
embed: dict = {
"username": configuration["name"],
"avatar_url": configuration["avatarUrl"],
"embeds": [
{
"color": int("FF5700", base=16),
"author": {
"name": f"/u/{comment.author.name} ({flair})",
"url": f"https://reddit.com/user/{comment.author.name}",
"icon_url": comment.author.icon_img,
},
"title": f"Comment in /r/{comment.subreddit.display_name}",
"url": f"https://reddit.com{comment.permalink}?context=1000",
"description": Utility.Truncate(
self, Utility.Quote(self, comment.body), 2045
),
"footer": {
"icon_url": "https://i.imgur.com/zbrkjFR.png",
"text": "Snoopy",
},
"timestamp": Utility.NowISO(self),
}
],
}
res: httpx.Response = httpx.post(configuration["url"], json=embed)
# HTTP 204 (Success: No Content)
if (code := res.status_code) != 204:
log.error(f"Failed to POST to Discord Webhook (HTTP {code}), {res.text}")
if __name__ == "__main__":
try:
Snoopy.init(Snoopy)
except KeyboardInterrupt:
exit()
| [
"logging.getLogger",
"util.Utility.ReadFile",
"coloredlogs.install",
"util.Utility.Quote",
"util.Utility.NowISO",
"os.path.isfile",
"praw.Reddit",
"util.Utility.WriteFile",
"sys.exit",
"httpx.post"
] | [((209, 236), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (226, 236), False, 'import logging\n'), ((237, 328), 'coloredlogs.install', 'coloredlogs.install', ([], {'level': '"""INFO"""', 'fmt': '"""[%(asctime)s] %(message)s"""', 'datefmt': '"""%I:%M:%S"""'}), "(level='INFO', fmt='[%(asctime)s] %(message)s', datefmt=\n '%I:%M:%S')\n", (256, 328), False, 'import coloredlogs\n'), ((623, 670), 'util.Utility.ReadFile', 'Utility.ReadFile', (['self', '"""configuration"""', '"""json"""'], {}), "(self, 'configuration', 'json')\n", (639, 670), False, 'from util import Utility\n'), ((946, 988), 'util.Utility.ReadFile', 'Utility.ReadFile', (['self', '"""database"""', '"""json"""'], {}), "(self, 'database', 'json')\n", (962, 988), False, 'from util import Utility\n'), ((10967, 11025), 'util.Utility.WriteFile', 'Utility.WriteFile', (['self', '"""database"""', '"""json"""', 'self.database'], {}), "(self, 'database', 'json', self.database)\n", (10984, 11025), False, 'from util import Utility\n'), ((12299, 12343), 'httpx.post', 'httpx.post', (["configuration['url']"], {'json': 'embed'}), "(configuration['url'], json=embed)\n", (12309, 12343), False, 'import httpx\n'), ((751, 779), 'os.path.isfile', 'path.isfile', (['"""database.json"""'], {}), "('database.json')\n", (762, 779), False, 'from os import path\n'), ((867, 914), 'util.Utility.WriteFile', 'Utility.WriteFile', (['self', '"""database"""', '"""json"""', '{}'], {}), "(self, 'database', 'json', {})\n", (884, 914), False, 'from util import Utility\n'), ((12621, 12627), 'sys.exit', 'exit', ([], {}), '()\n', (12625, 12627), False, 'from sys import exit\n'), ((1181, 1387), 'praw.Reddit', 'praw.Reddit', ([], {'username': 'self.username', 'password': 'self.password', 'client_id': 'self.clientId', 'client_secret': 'self.clientSecret', 'user_agent': '"""Snoopy by /u/LackingAGoodName (https://github.com/EthanC/Snoopy)"""'}), "(username=self.username, password=self.password, client_id=self.\n clientId, client_secret=self.clientSecret, user_agent=\n 'Snoopy by /u/LackingAGoodName (https://github.com/EthanC/Snoopy)')\n", (1192, 1387), False, 'import praw\n'), ((12203, 12223), 'util.Utility.NowISO', 'Utility.NowISO', (['self'], {}), '(self)\n', (12217, 12223), False, 'from util import Utility\n'), ((9603, 9634), 'util.Utility.Quote', 'Utility.Quote', (['self', 'reply.body'], {}), '(self, reply.body)\n', (9616, 9634), False, 'from util import Utility\n'), ((11939, 11972), 'util.Utility.Quote', 'Utility.Quote', (['self', 'comment.body'], {}), '(self, comment.body)\n', (11952, 11972), False, 'from util import Utility\n'), ((10457, 10488), 'util.Utility.Quote', 'Utility.Quote', (['self', 'reply.body'], {}), '(self, reply.body)\n', (10470, 10488), False, 'from util import Utility\n')] |
# pylint: disable=C0103
# pylint: disable=unnecessary-lambda
"""
This module illustates the humble object principal
whereby the business logic is
seperated from the external interfaces.
class CalendarClockDevice
This class is an implementation of a Tango Device.
No business logic exists in this class.
class CalendarClockModel
This class encapsulates all the business logic for
the CalendarClock device.
Tests in tests/test_calendar_clock.py
class TestCalendarClockModel
This class tests the business logic without having
to instantiate the Tango Device
class TestCalendarClockDevice
This class uses `DeviceTestContext` to test the
Tango device by instantiating the
device class and proxies to device.
"""
from enum import IntEnum
from ska_tango_base import SKABaseDevice
from tango import AttrWriteType, DevState
from tango.server import attribute, command, device_property, run
DEFAULT_YEAR = 1
DEFAULT_MONTH = 2
DEFAULT_DAY = 3
DEFAULT_HOUR = 4
DEFAULT_MINUTE = 5
DEFAULT_SECOND = 6
class DateStyle(IntEnum):
"""Style of the date"""
BRITISH = 0 # DevEnum's must start at 0
AMERICAN = 1 # and increment by 1
class CalendarClockModel: # pylint: disable=R0902
"""This model illustrates the humble object concept whereby
the business logic is seperated from external component interfaces.
"""
months = (31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
date_style = DateStyle.BRITISH
@staticmethod
def leapyear(year):
"""
The method leapyear returns True if the parameter year
is a leap year, False otherwise
"""
if not year % 4 == 0:
return False
if not year % 100 == 0:
return True
if not year % 400 == 0:
return False
return True
@property
def calendar_date(self):
"""Formatted date"""
date_format = "{0:02d}/{1:02d}/{2:04d}"
if self.date_style == DateStyle.BRITISH:
return date_format.format(self.day, self.month, self.year)
return date_format.format(self.month, self.day, self.year)
@property
def clock_time(self):
"""Formatted time"""
time_format = "{0:02d}:{1:02d}:{2:02d}"
return time_format.format(self.hour, self.minute, self.second)
def __init__(
self, day, month, year, hour, minute, second
): # pylint: disable=R0913
"""Init the model"""
self.year = None
self.month = None
self.day = None
self.hour = None
self.minute = None
self.second = None
self.set_calendar(day, month, year)
self.set_clock(hour, minute, second)
def reset(self):
"""Resets the model"""
self.day = DEFAULT_DAY
self.month = DEFAULT_MONTH
self.year = DEFAULT_YEAR
self.hour = DEFAULT_HOUR
self.minute = DEFAULT_MINUTE
self.second = DEFAULT_SECOND
self.date_style = DateStyle.BRITISH
def set_calendar(self, day, month, year):
"""
day, month, year have to be integer values and year has to be
a four digit year number
"""
if (
isinstance(day, int)
and isinstance(month, int)
and isinstance(year, int)
):
self.day = day
self.month = month
self.year = year
else:
raise TypeError("day, month, year have to be integers!")
def set_clock(self, hour, minute, second):
"""
The parameters hour, minute and second have to be
integers and must satisfy the following equations:
0 <= h < 24
0 <= m < 60
0 <= s < 60
"""
if isinstance(hour, int) and 0 <= hour < 24:
self.hour = hour
else:
raise TypeError("Hour have to be integers between 0 and 23!")
if isinstance(minute, int) and 0 <= minute < 60:
self.minute = minute
else:
raise TypeError("Minute have to be integers between 0 and 59!")
if isinstance(second, int) and 0 <= second < 60:
self.second = second
else:
raise TypeError("Second have to be integers between 0 and 59!")
def tick(self):
"""
This method lets the clock "tick", this means that the
internal time will be advanced by one second.
Examples:
>>> x = Clock(12,59,59)
>>> print(x)
12:59:59
>>> x.tick()
>>> print(x)
13:00:00
>>> x.tick()
>>> print(x)
13:00:01
"""
if self.second == 59:
self.second = 0
if self.minute == 59:
self.minute = 0
if self.hour == 23:
self.hour = 0
self.advance()
else:
self.hour += 1
else:
self.minute += 1
else:
self.second += 1
def advance(self):
"""
This method advances to the next date.
"""
max_days = self.months[self.month - 1]
if self.month == 2 and self.leapyear(self.year):
max_days += 1
if self.day == max_days:
self.day = 1
if self.month == 12:
self.month = 1
self.year += 1
else:
self.month += 1
else:
self.day += 1
def switch_on(self):
"""Some sample code of how behaviour is driven by device state"""
current_state = self.get_device_state()
if current_state == DevState.ON:
return
if current_state not in [DevState.INIT, DevState.STANDBY]:
self.set_device_state(DevState.ON)
if current_state == DevState.STANDBY:
self.logger.info("Switching on from STANDBY state")
self.set_device_state(DevState.ON)
if current_state == DevState.INIT:
raise Exception(
"'SwitchOn' command failed. CalendarClock is in 'INIT' state."
)
def switch_off(self):
"""Switch the device off"""
if self.get_device_state() != DevState.OFF:
self.logger.info("Swithed off CalendarClockModel")
self.set_device_state(DevState.OFF)
def __str__(self):
"""String representation of the model"""
datetime_style = "{0:02d}/{1:02d}/{2:04d} {3:02d}:{4:02d}:{5:02d}"
if self.date_style == DateStyle.BRITISH:
return datetime_style.format(
self.day,
self.month,
self.year,
self.hour,
self.minute,
self.second,
)
return datetime_style.format(
self.month,
self.day,
self.year,
self.hour,
self.minute,
self.second,
)
class CalendarClockDevice(SKABaseDevice):
"""The Tango device CalendarClockDevice"""
TimeZone = device_property(dtype="str", default_value="UTC")
def __init__(self, *args, **kwargs):
self.model = CalendarClockModel(
DEFAULT_DAY,
DEFAULT_MONTH,
DEFAULT_YEAR,
DEFAULT_HOUR,
DEFAULT_MINUTE,
DEFAULT_SECOND,
)
super().__init__(*args, **kwargs)
def init_device(self):
super().init_device()
self.model.get_device_state = self.get_state # pylint: disable=W0201
self.model.set_device_state = self.set_state # pylint: disable=W0201
self.model.logger = self.logger # pylint: disable=W0201
self.model.timezone = self.TimeZone # pylint: disable=W0201
self.model.reset()
self.set_state(DevState.UNKNOWN)
@attribute(dtype=DateStyle, access=AttrWriteType.READ_WRITE)
def date_style(self):
"""date_style attribute"""
return self.model.date_style
def write_date_style(self, value):
"""Set the date_style"""
self.model.date_style = value
@attribute
def day(self):
"""The day of the month"""
return self.model.day
@attribute
def month(self):
"""The month in the year"""
return self.model.month
@attribute
def year(self):
"""The year"""
return self.model.year
@attribute(
dtype=str,
doc="Date string in the format 'dd/mm/yyyy'.",
access=AttrWriteType.READ_WRITE,
)
def calendar_date(self):
"""Show formatted date"""
return self.model.calendar_date
def write_calendar_date(self, value):
"""Set the date"""
day, month, year = list(map(lambda x: int(x), value.split("/")))
self.model.set_calendar(day, month, year)
@attribute(
dtype=str,
doc="Time string in the format 'hh:mm:ss'.",
access=AttrWriteType.READ_WRITE,
)
def clock_time(self):
"""Show the formatted time"""
return self.model.clock_time
def write_clock_time(self, value):
"""Set the time"""
hour, minute, second = list(map(lambda x: int(x), value.split(":")))
self.model.set_clock(hour, minute, second)
@attribute
def hour(self):
"""The hour in the day"""
return self.model.hour
@attribute
def minute(self):
"""The minute in the hour"""
return self.model.minute
@attribute
def second(self):
"""The second in the minute"""
return self.model.second
@command
def Advance(self): # pylint: disable=C0103
"""Advande the clock 1 day"""
self.model.advance()
@command
def Tick(self): # pylint: disable=C0103
"""Advande the clock 1 second"""
self.model.tick()
@command
def SwitchOn(self): # pylint: disable=C0103
"""Swith the device on"""
self.model.switch_on()
@command
def SwitchOff(self): # pylint: disable=C0103
"""Swith the device off"""
self.model.switch_off()
@command(dtype_out=str)
def GetFormattedTime(self): # pylint: disable=C0103
"""Get the formatted string of the datetime"""
return str(self.model)
def main(args=None, **kwargs):
"""Run CalendarClockDevice"""
return run((CalendarClockDevice,), args=args, **kwargs)
if __name__ == "__main__":
main()
| [
"tango.server.device_property",
"tango.server.run",
"tango.server.command",
"tango.server.attribute"
] | [((7098, 7147), 'tango.server.device_property', 'device_property', ([], {'dtype': '"""str"""', 'default_value': '"""UTC"""'}), "(dtype='str', default_value='UTC')\n", (7113, 7147), False, 'from tango.server import attribute, command, device_property, run\n'), ((7865, 7924), 'tango.server.attribute', 'attribute', ([], {'dtype': 'DateStyle', 'access': 'AttrWriteType.READ_WRITE'}), '(dtype=DateStyle, access=AttrWriteType.READ_WRITE)\n', (7874, 7924), False, 'from tango.server import attribute, command, device_property, run\n'), ((8435, 8540), 'tango.server.attribute', 'attribute', ([], {'dtype': 'str', 'doc': '"""Date string in the format \'dd/mm/yyyy\'."""', 'access': 'AttrWriteType.READ_WRITE'}), '(dtype=str, doc="Date string in the format \'dd/mm/yyyy\'.", access=\n AttrWriteType.READ_WRITE)\n', (8444, 8540), False, 'from tango.server import attribute, command, device_property, run\n'), ((8869, 8972), 'tango.server.attribute', 'attribute', ([], {'dtype': 'str', 'doc': '"""Time string in the format \'hh:mm:ss\'."""', 'access': 'AttrWriteType.READ_WRITE'}), '(dtype=str, doc="Time string in the format \'hh:mm:ss\'.", access=\n AttrWriteType.READ_WRITE)\n', (8878, 8972), False, 'from tango.server import attribute, command, device_property, run\n'), ((10134, 10156), 'tango.server.command', 'command', ([], {'dtype_out': 'str'}), '(dtype_out=str)\n', (10141, 10156), False, 'from tango.server import attribute, command, device_property, run\n'), ((10378, 10426), 'tango.server.run', 'run', (['(CalendarClockDevice,)'], {'args': 'args'}), '((CalendarClockDevice,), args=args, **kwargs)\n', (10381, 10426), False, 'from tango.server import attribute, command, device_property, run\n')] |
"""運動学関係
2次のマクローリン展開
"""
import sympy as sy
from sympy import sqrt
import sumi_maclaurin_2.P_0 as P_0
import sumi_maclaurin_2.P_1 as P_1
import sumi_maclaurin_2.P_2 as P_2
import sumi_maclaurin_2.R_0_0 as R_0_0
import sumi_maclaurin_2.R_0_1 as R_0_1
import sumi_maclaurin_2.R_0_2 as R_0_2
import sumi_maclaurin_2.R_0_0 as R_1_0
import sumi_maclaurin_2.R_0_1 as R_1_1
import sumi_maclaurin_2.R_0_2 as R_1_2
import sumi_maclaurin_2.R_0_0 as R_2_0
import sumi_maclaurin_2.R_0_1 as R_2_1
import sumi_maclaurin_2.R_0_2 as R_2_2
class Local:
"""ローカル座標系関連
全セクションで同一のパラメータであると仮定
"""
def P(self, q, xi):
"""線形化されたアクチュエータ空間からタスク空間への写像
順運動学
"""
return sy.Matrix([[
P_0.f(q, xi),
P_1.f(q, xi),
P_2.f(q, xi)
]]).T
def R(self, q, xi):
"""線形化された回転行列"""
return sy.Matrix([
[R_0_0.f(q, xi), R_0_1.f(q, xi), R_0_2.f(q, xi)],
[R_1_0.f(q, xi), R_1_1.f(q, xi), R_1_2.f(q, xi)],
[R_2_0.f(q, xi), R_2_1.f(q, xi), R_2_2.f(q, xi)],
])
def MHTM(self, q, xi):
"""モーダル同時変換行列
線形化されたHomogeneous Transformation Matrix
"""
return sy.Matrix([
[R_0_0.f(q, xi), R_0_1.f(q, xi), R_0_2.f(q, xi), P_0.f(q, xi)],
[R_1_0.f(q, xi), R_1_1.f(q, xi), R_1_2.f(q, xi), P_1.f(q, xi)],
[R_2_0.f(q, xi), R_2_1.f(q, xi), R_2_2.f(q, xi), P_2.f(q, xi)],
[0, 0, 0, 1]
])
class Global(Local):
"""位置,回転行列,ヤコビアン,ヘッシアン等のグローバル表現
全セクションで同一のパラメータであると仮定.
すぐ終わる.
"""
def __init__(self, N):
"""
Parameters
---
N : int
セクションの数
"""
print("computing kinematics...")
self.N = N
# self.q_large = q_large
# self.xi_large = xi_large
self.q_large = sy.Matrix(sy.MatrixSymbol('q_large', 3*N, 1)) # 完全な関節角度ベクトル
self.q_dot_large = sy.Matrix(sy.MatrixSymbol('q_dot_large', 3*N, 1)) # 完全な関節角速度ベクトル
self.xi_large = sy.Matrix(sy.MatrixSymbol('xi_large', N, 1)) # 完全なスカラξベクトル
self.set_local()
self.set_global()
self.set_J_OMEGA()
self.set_J_v()
self.set_H_OMEGA()
self.set_H_v()
print("done computing kinematics!")
def set_local(self,):
self.P_s = []
self.R_s = []
for i in range(self.N):
q = self.q_large[i:i+3, :]
xi = self.xi_large[i, 0]
self.P_s.append(self.P(q, xi))
self.R_s.append(self.R(q, xi))
def set_global(self,):
self.Theta_s = []
self.Phi_s = []
for i in range(self.N):
if i == 0:
self.Theta_s.append(self.R_s[0])
self.Phi_s.append(self.P_s[0])
else:
Theta = self.Theta_s[i-1] * self.R_s[i]
self.Theta_s.append(Theta.subs(self.xi_large[i-1, 0], 1))
Phi = self.Phi_s[i-1] + self.Theta_s[i-1] * self.P_s[i]
self.Phi_s.append(Phi.subs(self.xi_large[i-1, 0], 1))
def set_J_OMEGA(self,):
"""角速度ヤコビアンのハット変換を計算"""
def J_OMEGA_ij(i, j):
if j <= 3*i-1:
return self.R_s[i].T * J_OMEGA_s[i-1][:, 3*j:3*j+3] * self.R_s[i]
elif 3*i <= j <= 3*i+2:
return self.R_s[i].T * sy.diff(self.R_s[i], self.q_large[j, 0])
else:
return sy.zeros(3, 3)
J_OMEGA_s = []
for i in range(self.N):
#print("i = ", i)
J_OMEGAs_i = []
for j in range(3*self.N):
#print("j = ", j)
J_OMEGAs_i.append(J_OMEGA_ij(i, j))
J_OMEGA_s.append(sy.Matrix([J_OMEGAs_i]))
self.J_OMEGA_s = J_OMEGA_s
def set_J_v(self,):
"""線速度ヤコビアンをセット"""
def J_v_ij(i, j):
if j < 3*i:
return self.R_s[i].T * \
(J_v_s[i-1][:, j:j+1] + (self.J_OMEGA_s[i][:, 3*j:3*j+3] * self.P_s[i]))
elif 3*i <= j <= 3*i+2:
return self.R_s[i].T * sy.diff(self.P_s[i], self.q_large[j, 0])
else:
return sy.zeros(3, 1)
J_v_s = []
for i in range(self.N):
#print("i = ", i)
J_v_s_i = []
for j in range(3*self.N):
#print("j = ", j)
J_v_s_i.append(J_v_ij(i, j))
J_v_s.append(sy.Matrix([J_v_s_i]))
self.J_v_s = J_v_s
def set_H_OMEGA(self,):
"""角速度ヘッシアンをセット
※本当はテンソル?
"""
def H_OMEGA_ijk(i, j, k):
if j < 3*i and k < 3*i:
H_OMEGA_prev = H_OMEGA_s[i-1][3*j:3*j+3, 3*k:3*k+3] # テンソルから1枚剥がして持ってくる
#print(H_OMEGA_prev.shape)
return self.R_s[i].T * H_OMEGA_prev * self.R_s[i]
elif j < 3*i and 3*i <= k <= 3*i+2:
R_i_diff_k = sy.diff(self.R_s[i], self.q_large[k, 0])
return R_i_diff_k.T * self.J_OMEGA_s[i-1][:, 3*j:3*j+3] * self.R_s[i] +\
self.R_s[i].T * self.J_OMEGA_s[i-1][:, 3*j:3*j+3] * R_i_diff_k
elif 3*i <= j <= 3*i+2 and k < 3*i:
return sy.zeros(3, 3)
elif 3*i <= j <= 3*i+2 and 3*i <= k <= 3*i+2:
R_i_diff_k = sy.diff(self.R_s[i], self.q_large[k, 0])
R_i_diff_j = sy.diff(self.R_s[i], self.q_large[j, 0])
R_i_diff_j_diff_k = sy.diff(R_i_diff_j, self.q_large[k, 0])
return R_i_diff_k.T * R_i_diff_j + self.R_s[i].T * R_i_diff_j_diff_k
else:
return sy.zeros(3, 3)
H_OMEGA_s = []
for i in range(self.N):
#print("i = ", i)
H_OMEGA_s_i = []
for j in range(3*self.N):
#print("j = ", j)
H_OMEGA_s_ij = []
for k in range(3*self.N):
#print("k = ", k)
H_OMEGA_s_ij.append(H_OMEGA_ijk(i, j, k))
H_OMEGA_s_i.append([sy.Matrix([H_OMEGA_s_ij])])
H_OMEGA_s.append(sy.Matrix(H_OMEGA_s_i))
#print(H_OMEGA_s[-1].shape)
self.H_OMEGA_s = H_OMEGA_s
def set_H_v(self,):
"""線速度ヘッシアンをセット
ホントはテンソル?
"""
def H_v_ijk(i, j, k):
if j < 3*i and k < 3*i:
return self.R_s[i].T * \
(H_v_s[i-1][3*j:3*j+3, k:k+1] + self.H_OMEGA_s[i-1][3*j:3*j+3, 3*k:3*k+3] * self.P_s[i])
elif j < 3*i and 3*i <= k <= 3*i+2:
R_i_diff_k = sy.diff(self.R_s[i], self.q_large[k, 0])
P_i_diff_k = sy.diff(self.P_s[i], self.q_large[k, 0])
return R_i_diff_k.T *\
(self.J_v_s[i-1][:, j:j+1] + self.J_OMEGA_s[i-1][:, 3*j:3*j+3] * self.P_s[i]) +\
self.R_s[i].T * self.J_OMEGA_s[i][:, 3*j:3*j+3] * P_i_diff_k
elif 3*i <= j <= 3*i+2 and k < 3*i:
return sy.zeros(3, 1)
elif 3*i <= j <= 3*i+2 and 3*i <= k <= 3*i+2:
R_i_diff_k = sy.diff(self.R_s[i], self.q_large[k, 0])
P_i_diff_j = sy.diff(self.P_s[i], self.q_large[j, 0])
P_i_diff_j_diff_k = sy.diff(P_i_diff_j, self.q_large[k, 0])
return R_i_diff_k.T * P_i_diff_j + self.R_s[i].T * P_i_diff_j_diff_k
else:
return sy.zeros(3, 1)
H_v_s = []
for i in range(self.N):
#print("i = ", i)
H_v_s_i = []
for j in range(3*self.N):
#print("j = ", j)
H_v_s_ij = []
for k in range(3*self.N):
#print("k = ", k)
H_v_s_ij.append(H_v_ijk(i, j, k))
H_v_s_i.append([sy.Matrix([H_v_s_ij])])
H_v_s.append(sy.Matrix(H_v_s_i))
#print(H_OMEGA_s[-1].shape)
self.H_v_s = H_v_s
if __name__ == "__main__":
N = 3
hoge = Global(
# sy.Matrix(q_large),
# sy.Matrix(xi_large),
N
)
#print(hoge.J_v_s)
| [
"sumi_maclaurin_2.R_0_0.f",
"sumi_maclaurin_2.R_0_1.f",
"sympy.Matrix",
"sympy.MatrixSymbol",
"sumi_maclaurin_2.R_0_2.f",
"sumi_maclaurin_2.P_1.f",
"sumi_maclaurin_2.P_0.f",
"sumi_maclaurin_2.P_2.f",
"sympy.diff",
"sympy.zeros"
] | [((1980, 2016), 'sympy.MatrixSymbol', 'sy.MatrixSymbol', (['"""q_large"""', '(3 * N)', '(1)'], {}), "('q_large', 3 * N, 1)\n", (1995, 2016), True, 'import sympy as sy\n'), ((2068, 2108), 'sympy.MatrixSymbol', 'sy.MatrixSymbol', (['"""q_dot_large"""', '(3 * N)', '(1)'], {}), "('q_dot_large', 3 * N, 1)\n", (2083, 2108), True, 'import sympy as sy\n'), ((2158, 2191), 'sympy.MatrixSymbol', 'sy.MatrixSymbol', (['"""xi_large"""', 'N', '(1)'], {}), "('xi_large', N, 1)\n", (2173, 2191), True, 'import sympy as sy\n'), ((3916, 3939), 'sympy.Matrix', 'sy.Matrix', (['[J_OMEGAs_i]'], {}), '([J_OMEGAs_i])\n', (3925, 3939), True, 'import sympy as sy\n'), ((4660, 4680), 'sympy.Matrix', 'sy.Matrix', (['[J_v_s_i]'], {}), '([J_v_s_i])\n', (4669, 4680), True, 'import sympy as sy\n'), ((6454, 6476), 'sympy.Matrix', 'sy.Matrix', (['H_OMEGA_s_i'], {}), '(H_OMEGA_s_i)\n', (6463, 6476), True, 'import sympy as sy\n'), ((8345, 8363), 'sympy.Matrix', 'sy.Matrix', (['H_v_s_i'], {}), '(H_v_s_i)\n', (8354, 8363), True, 'import sympy as sy\n'), ((930, 944), 'sumi_maclaurin_2.R_0_0.f', 'R_0_0.f', (['q', 'xi'], {}), '(q, xi)\n', (937, 944), True, 'import sumi_maclaurin_2.R_0_0 as R_0_0\n'), ((946, 960), 'sumi_maclaurin_2.R_0_1.f', 'R_0_1.f', (['q', 'xi'], {}), '(q, xi)\n', (953, 960), True, 'import sumi_maclaurin_2.R_0_1 as R_0_1\n'), ((962, 976), 'sumi_maclaurin_2.R_0_2.f', 'R_0_2.f', (['q', 'xi'], {}), '(q, xi)\n', (969, 976), True, 'import sumi_maclaurin_2.R_0_2 as R_0_2\n'), ((992, 1006), 'sumi_maclaurin_2.R_0_0.f', 'R_1_0.f', (['q', 'xi'], {}), '(q, xi)\n', (999, 1006), True, 'import sumi_maclaurin_2.R_0_0 as R_1_0\n'), ((1008, 1022), 'sumi_maclaurin_2.R_0_1.f', 'R_1_1.f', (['q', 'xi'], {}), '(q, xi)\n', (1015, 1022), True, 'import sumi_maclaurin_2.R_0_1 as R_1_1\n'), ((1024, 1038), 'sumi_maclaurin_2.R_0_2.f', 'R_1_2.f', (['q', 'xi'], {}), '(q, xi)\n', (1031, 1038), True, 'import sumi_maclaurin_2.R_0_2 as R_1_2\n'), ((1054, 1068), 'sumi_maclaurin_2.R_0_0.f', 'R_2_0.f', (['q', 'xi'], {}), '(q, xi)\n', (1061, 1068), True, 'import sumi_maclaurin_2.R_0_0 as R_2_0\n'), ((1070, 1084), 'sumi_maclaurin_2.R_0_1.f', 'R_2_1.f', (['q', 'xi'], {}), '(q, xi)\n', (1077, 1084), True, 'import sumi_maclaurin_2.R_0_1 as R_2_1\n'), ((1086, 1100), 'sumi_maclaurin_2.R_0_2.f', 'R_2_2.f', (['q', 'xi'], {}), '(q, xi)\n', (1093, 1100), True, 'import sumi_maclaurin_2.R_0_2 as R_2_2\n'), ((1283, 1297), 'sumi_maclaurin_2.R_0_0.f', 'R_0_0.f', (['q', 'xi'], {}), '(q, xi)\n', (1290, 1297), True, 'import sumi_maclaurin_2.R_0_0 as R_0_0\n'), ((1299, 1313), 'sumi_maclaurin_2.R_0_1.f', 'R_0_1.f', (['q', 'xi'], {}), '(q, xi)\n', (1306, 1313), True, 'import sumi_maclaurin_2.R_0_1 as R_0_1\n'), ((1315, 1329), 'sumi_maclaurin_2.R_0_2.f', 'R_0_2.f', (['q', 'xi'], {}), '(q, xi)\n', (1322, 1329), True, 'import sumi_maclaurin_2.R_0_2 as R_0_2\n'), ((1331, 1343), 'sumi_maclaurin_2.P_0.f', 'P_0.f', (['q', 'xi'], {}), '(q, xi)\n', (1336, 1343), True, 'import sumi_maclaurin_2.P_0 as P_0\n'), ((1359, 1373), 'sumi_maclaurin_2.R_0_0.f', 'R_1_0.f', (['q', 'xi'], {}), '(q, xi)\n', (1366, 1373), True, 'import sumi_maclaurin_2.R_0_0 as R_1_0\n'), ((1375, 1389), 'sumi_maclaurin_2.R_0_1.f', 'R_1_1.f', (['q', 'xi'], {}), '(q, xi)\n', (1382, 1389), True, 'import sumi_maclaurin_2.R_0_1 as R_1_1\n'), ((1391, 1405), 'sumi_maclaurin_2.R_0_2.f', 'R_1_2.f', (['q', 'xi'], {}), '(q, xi)\n', (1398, 1405), True, 'import sumi_maclaurin_2.R_0_2 as R_1_2\n'), ((1407, 1419), 'sumi_maclaurin_2.P_1.f', 'P_1.f', (['q', 'xi'], {}), '(q, xi)\n', (1412, 1419), True, 'import sumi_maclaurin_2.P_1 as P_1\n'), ((1435, 1449), 'sumi_maclaurin_2.R_0_0.f', 'R_2_0.f', (['q', 'xi'], {}), '(q, xi)\n', (1442, 1449), True, 'import sumi_maclaurin_2.R_0_0 as R_2_0\n'), ((1451, 1465), 'sumi_maclaurin_2.R_0_1.f', 'R_2_1.f', (['q', 'xi'], {}), '(q, xi)\n', (1458, 1465), True, 'import sumi_maclaurin_2.R_0_1 as R_2_1\n'), ((1467, 1481), 'sumi_maclaurin_2.R_0_2.f', 'R_2_2.f', (['q', 'xi'], {}), '(q, xi)\n', (1474, 1481), True, 'import sumi_maclaurin_2.R_0_2 as R_2_2\n'), ((1483, 1495), 'sumi_maclaurin_2.P_2.f', 'P_2.f', (['q', 'xi'], {}), '(q, xi)\n', (1488, 1495), True, 'import sumi_maclaurin_2.P_2 as P_2\n'), ((3626, 3640), 'sympy.zeros', 'sy.zeros', (['(3)', '(3)'], {}), '(3, 3)\n', (3634, 3640), True, 'import sympy as sy\n'), ((4388, 4402), 'sympy.zeros', 'sy.zeros', (['(3)', '(1)'], {}), '(3, 1)\n', (4396, 4402), True, 'import sympy as sy\n'), ((5185, 5225), 'sympy.diff', 'sy.diff', (['self.R_s[i]', 'self.q_large[k, 0]'], {}), '(self.R_s[i], self.q_large[k, 0])\n', (5192, 5225), True, 'import sympy as sy\n'), ((6985, 7025), 'sympy.diff', 'sy.diff', (['self.R_s[i]', 'self.q_large[k, 0]'], {}), '(self.R_s[i], self.q_large[k, 0])\n', (6992, 7025), True, 'import sympy as sy\n'), ((7055, 7095), 'sympy.diff', 'sy.diff', (['self.P_s[i]', 'self.q_large[k, 0]'], {}), '(self.P_s[i], self.q_large[k, 0])\n', (7062, 7095), True, 'import sympy as sy\n'), ((746, 758), 'sumi_maclaurin_2.P_0.f', 'P_0.f', (['q', 'xi'], {}), '(q, xi)\n', (751, 758), True, 'import sumi_maclaurin_2.P_0 as P_0\n'), ((772, 784), 'sumi_maclaurin_2.P_1.f', 'P_1.f', (['q', 'xi'], {}), '(q, xi)\n', (777, 784), True, 'import sumi_maclaurin_2.P_1 as P_1\n'), ((798, 810), 'sumi_maclaurin_2.P_2.f', 'P_2.f', (['q', 'xi'], {}), '(q, xi)\n', (803, 810), True, 'import sumi_maclaurin_2.P_2 as P_2\n'), ((3544, 3584), 'sympy.diff', 'sy.diff', (['self.R_s[i]', 'self.q_large[j, 0]'], {}), '(self.R_s[i], self.q_large[j, 0])\n', (3551, 3584), True, 'import sympy as sy\n'), ((4306, 4346), 'sympy.diff', 'sy.diff', (['self.P_s[i]', 'self.q_large[j, 0]'], {}), '(self.P_s[i], self.q_large[j, 0])\n', (4313, 4346), True, 'import sympy as sy\n'), ((5482, 5496), 'sympy.zeros', 'sy.zeros', (['(3)', '(3)'], {}), '(3, 3)\n', (5490, 5496), True, 'import sympy as sy\n'), ((6384, 6409), 'sympy.Matrix', 'sy.Matrix', (['[H_OMEGA_s_ij]'], {}), '([H_OMEGA_s_ij])\n', (6393, 6409), True, 'import sympy as sy\n'), ((7405, 7419), 'sympy.zeros', 'sy.zeros', (['(3)', '(1)'], {}), '(3, 1)\n', (7413, 7419), True, 'import sympy as sy\n'), ((8283, 8304), 'sympy.Matrix', 'sy.Matrix', (['[H_v_s_ij]'], {}), '([H_v_s_ij])\n', (8292, 8304), True, 'import sympy as sy\n'), ((5597, 5637), 'sympy.diff', 'sy.diff', (['self.R_s[i]', 'self.q_large[k, 0]'], {}), '(self.R_s[i], self.q_large[k, 0])\n', (5604, 5637), True, 'import sympy as sy\n'), ((5667, 5707), 'sympy.diff', 'sy.diff', (['self.R_s[i]', 'self.q_large[j, 0]'], {}), '(self.R_s[i], self.q_large[j, 0])\n', (5674, 5707), True, 'import sympy as sy\n'), ((5744, 5783), 'sympy.diff', 'sy.diff', (['R_i_diff_j', 'self.q_large[k, 0]'], {}), '(R_i_diff_j, self.q_large[k, 0])\n', (5751, 5783), True, 'import sympy as sy\n'), ((5923, 5937), 'sympy.zeros', 'sy.zeros', (['(3)', '(3)'], {}), '(3, 3)\n', (5931, 5937), True, 'import sympy as sy\n'), ((7520, 7560), 'sympy.diff', 'sy.diff', (['self.R_s[i]', 'self.q_large[k, 0]'], {}), '(self.R_s[i], self.q_large[k, 0])\n', (7527, 7560), True, 'import sympy as sy\n'), ((7590, 7630), 'sympy.diff', 'sy.diff', (['self.P_s[i]', 'self.q_large[j, 0]'], {}), '(self.P_s[i], self.q_large[j, 0])\n', (7597, 7630), True, 'import sympy as sy\n'), ((7667, 7706), 'sympy.diff', 'sy.diff', (['P_i_diff_j', 'self.q_large[k, 0]'], {}), '(P_i_diff_j, self.q_large[k, 0])\n', (7674, 7706), True, 'import sympy as sy\n'), ((7846, 7860), 'sympy.zeros', 'sy.zeros', (['(3)', '(1)'], {}), '(3, 1)\n', (7854, 7860), True, 'import sympy as sy\n')] |
from typing import NoReturn
from ...base import BaseEstimator
import numpy as np
from numpy.linalg import det, inv
from scipy.stats import multivariate_normal
class LDA(BaseEstimator):
"""
Linear Discriminant Analysis (LDA) classifier
Attributes
----------
self.classes_ : np.ndarray of shape (n_classes,)
The different labels classes. To be set in `LDA.fit`
self.mu_ : np.ndarray of shape (n_classes,n_features)
The estimated features means for each class. To be set in `LDA.fit`
self.cov_ : np.ndarray of shape (n_features,n_features)
The estimated features' covariance. To be set in `LDA.fit`
self._cov_inv : np.ndarray of shape (n_features,n_features)
The inverse of the estimated features covariance. To be set in `LDA.fit`
self.pi_: np.ndarray of shape (n_classes)
The estimated class probabilities. To be set in `GaussianNaiveBayes.fit`
"""
def __init__(self):
"""
Instantiate an LDA classifier
"""
super().__init__()
self.classes_, self.mu_, self.cov_, self._cov_inv, self.pi_ = None, None, None, None, None
def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:
"""
fits an LDA model.
Estimates gaussian for each label class - Different mean vector, same covariance
matrix with dependent features.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to fit an estimator for
y : ndarray of shape (n_samples, )
Responses of input data to fit to
"""
self.classes_, counts = np.unique(y, return_counts=True)
self.pi_ = counts / sum(counts)
self.mu_ = np.zeros((len(self.classes_), X.shape[1]))
label_index_dict = {}
# map label to its index:
for i, k in enumerate(self.classes_):
label_index_dict[k] = i
# sum label's samples:
for index, label in enumerate(y):
self.mu_[label_index_dict[label]] += X[index]
# divide by number of samples of each class:
self.mu_ /= counts.reshape(-1, 1)
# calculating self.cov:
self.cov_ = np.zeros((X.shape[1], X.shape[1]))
for index, label in enumerate(y):
error = np.array(X[index] - self.mu_[label_index_dict[label]])
self.cov_ += np.outer(error, error)
self.cov_ /= (X.shape[0] - len(self.classes_))
self._cov_inv = inv(self.cov_)
def _predict(self, X: np.ndarray) -> np.ndarray:
"""
Predict responses for given samples using fitted estimator
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to predict responses for
Returns
-------
responses : ndarray of shape (n_samples, )
Predicted responses of given samples
"""
ak_matrix = self._cov_inv @ self.mu_.transpose() # num_features X num_classes
bk_vec = np.log(self.pi_) - (0.5 * (np.diag(self.mu_ @ self._cov_inv @
self.mu_.transpose()))) # num_classes
classes_indexes = ((X @ ak_matrix) + bk_vec).argmax(1)
classes_indexes = self.classes_[classes_indexes]
prediction = np.zeros((X.shape[0],))
for index, row in enumerate(classes_indexes):
prediction[index] = self.classes_[row]
return prediction
def likelihood(self, X: np.ndarray) -> np.ndarray:
"""
Calculate the likelihood of a given data over the estimated model
Parameters
----------
X : np.ndarray of shape (n_samples, n_features)
Input data to calculate its likelihood over the different classes.
Returns
-------
likelihoods : np.ndarray of shape (n_samples, n_classes)
The likelihood for each sample under each of the classes
"""
if not self.fitted_:
raise ValueError("Estimator must first be fitted before calling `likelihood` function")
likelihood = np.zeros((X.shape[0], len(self.classes_)))
for index, row in enumerate(self.mu_):
likelihood[:, index] = multivariate_normal.pdf(X, mean=row, cov=self.cov_)*self.pi_[index]
return likelihood
def _loss(self, X: np.ndarray, y: np.ndarray) -> float:
"""
Evaluate performance under misclassification loss function
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test samples
y : ndarray of shape (n_samples, )
True labels of test samples
Returns
-------
loss : float
Performance under missclassification loss function
"""
from ...metrics import misclassification_error
return misclassification_error(y, self._predict(X))
| [
"numpy.unique",
"scipy.stats.multivariate_normal.pdf",
"numpy.log",
"numpy.array",
"numpy.zeros",
"numpy.linalg.inv",
"numpy.outer"
] | [((1651, 1683), 'numpy.unique', 'np.unique', (['y'], {'return_counts': '(True)'}), '(y, return_counts=True)\n', (1660, 1683), True, 'import numpy as np\n'), ((2212, 2246), 'numpy.zeros', 'np.zeros', (['(X.shape[1], X.shape[1])'], {}), '((X.shape[1], X.shape[1]))\n', (2220, 2246), True, 'import numpy as np\n'), ((2491, 2505), 'numpy.linalg.inv', 'inv', (['self.cov_'], {}), '(self.cov_)\n', (2494, 2505), False, 'from numpy.linalg import det, inv\n'), ((3322, 3345), 'numpy.zeros', 'np.zeros', (['(X.shape[0],)'], {}), '((X.shape[0],))\n', (3330, 3345), True, 'import numpy as np\n'), ((2309, 2363), 'numpy.array', 'np.array', (['(X[index] - self.mu_[label_index_dict[label]])'], {}), '(X[index] - self.mu_[label_index_dict[label]])\n', (2317, 2363), True, 'import numpy as np\n'), ((2389, 2411), 'numpy.outer', 'np.outer', (['error', 'error'], {}), '(error, error)\n', (2397, 2411), True, 'import numpy as np\n'), ((3028, 3044), 'numpy.log', 'np.log', (['self.pi_'], {}), '(self.pi_)\n', (3034, 3044), True, 'import numpy as np\n'), ((4249, 4300), 'scipy.stats.multivariate_normal.pdf', 'multivariate_normal.pdf', (['X'], {'mean': 'row', 'cov': 'self.cov_'}), '(X, mean=row, cov=self.cov_)\n', (4272, 4300), False, 'from scipy.stats import multivariate_normal\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 1 08:04:50 2019
@author: alexandradarmon
"""
import random
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from punctuation.config import options
from punctuation.visualisation.heatmap_functions import heatmap, annotate_heatmap
from webcolors import hex_to_rgb
color_vector = ['#2764A2','#EC7428','#438823', '#B9312B', '#785BAD','#72473D',
'#CA6FB6', '#6C6C6C','#B1AC27', '#44ADC1']
markers = {'o': 'circle','D': 'diamond', 'p': 'pentagon',
'v': 'triangle_down', '^': 'triangle_up',
'<': 'triangle_left', '>': 'triangle_right',
's': 'square', '*': 'star','x': 'x',
'_': 'hline', 'p': 'pentagon',
'h': 'hexagon1', 'H': 'hexagon2', 'x': 'x',
'D': 'diamond', 'd': 'thin_diamond', '|': 'vline', '+': 'plus',
'P': 'plus_filled', 'X': 'x_filled', 0: 'tickleft', 1: 'tickright',
2: 'tickup', 3: 'tickdown', 4: 'caretleft', 5: 'caretright',
6: 'caretup', 7: 'caretdown', 8: 'caretleftbase', '*': 'star',
9: 'caretrightbase', 10: 'caretupbase', 11: 'caretdownbase',
'None': 'nothing', None: 'nothing', ' ': 'nothing', '': 'nothing'}
marker_vector = list(markers.keys())
rgb_color_vector = [hex_to_rgb(i) for i in color_vector]
def get_overall_kdeplot(df,subfile,
punctuation_vector=options.punctuation_vector,
freq_pun_col=options.freq_pun_col,
with_pairs=False):
for col1, pun1 in zip(freq_pun_col, punctuation_vector):
sns.kdeplot(df[col1], label='{}'.format(pun1), color='black')
plt.legend(loc=0)
plt.savefig('results/stats_corpus/{}/kdeplot_{}.png'.format(subfile,col1))
plt.show()
if with_pairs:
for col2, pun2 in zip(freq_pun_col[freq_pun_col.index(col1)+1:],
punctuation_vector[punctuation_vector.index(pun1)+1:]):
sns.kdeplot(df[col1], df[col2], label='{},{}'.format(pun1,pun2))
plt.legend(loc=0)
plt.savefig('results/stats_corpus/{}/kdeplot_{}_{}.png'.format(subfile,
col1,
col2))
plt.show()
def get_overall_hist(df,subfile,
punctuation_vector=options.punctuation_vector,
freq_pun_col=options.freq_pun_col):
bins = np.arange(0,1,0.01)
for col1, pun1 in zip(freq_pun_col, punctuation_vector):
ax = plt.subplot(111)
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(options.font_size)
plt.hist(df[col1], bins=bins, label=pun1, color='blue')
plt.legend(loc=0, fontsize=options.font_size)
plt.xlabel('punctuation frequency')
plt.ylabel('number of documents')
plt.savefig('results/stats_corpus/{}/hist_{}.png'.format(subfile,col1))
plt.show()
def show_weapon_hist(kl_within_author_samples, kl_between_author_samples,
type_compute_baseline,path_res,feature_name,
baseline_between=None,
baseline_within=None,
bins=100, to_show=True):
bin_size = 0.1
bins = np.arange(0,2, bin_size)
x_bins = np.arange(0,2+bin_size, 4*bin_size)
ax = plt.subplot(111)
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(options.font_size)
y1, bin_edges1=np.histogram(kl_within_author_samples,bins=bins)
y1 = list(map(lambda x: x/sum(y1), y1))
bincenters1 = 0.5*(bin_edges1[1:]+bin_edges1[:-1])
y2, bin_edges2=np.histogram(kl_between_author_samples,bins=bins)
y2 = list(map(lambda x: x/sum(y2), y2))
bincenters2 = 0.5*(bin_edges2[1:]+bin_edges2[:-1])
# plt.hist(kl_within_author_samples, bins=bins, color='black',
# alpha=0.4,)
plt.bar(bincenters1, y1, width=bin_size,
color='black',alpha=0.3,)
plt.plot(bincenters1,y1,'-', color='black')
plt.bar(bincenters1, y2, width=bin_size,
color='blue',alpha=0.3,)
plt.plot(bincenters2, y2, '-', color='blue')
if type_compute_baseline:
plt.axvline(baseline_between, color='blue', linestyle=':')
plt.axvline(baseline_within, color='black', linestyle=':')
plt.xlim(min(min(kl_within_author_samples),
min(kl_between_author_samples)),2)
plt.ylim(0,1)
plt.yticks([0,0.5,1])
plt.xticks(x_bins)
plt.xlabel('KL divergence')
plt.ylabel('frequency')
plt.legend('')
plt.savefig('{}/kl_hist_comparison_{}.png'.format(path_res,feature_name))
if to_show: plt.show()
## CUMSUM REPRESENTATION
#y1_cum_sum = pd.Series(y1).cumsum()
#y1_cum_sum = y1_cum_sum.tolist()
#
#y2_cum_sum = pd.Series(y2).cumsum()
#y2_cum_sum = y2_cum_sum.tolist()
#
#
#plt.plot(bincenters1, y1_cum_sum, color='black', label='within')
#plt.plot(bincenters1, y2_cum_sum, color='blue', label='between')
#plt.legend()
def plot_list_class(df, class_name='author'):
res = df.groupby([class_name],as_index=False)\
['book_id'].count().rename(columns={'book_id':'nb_books'}).sort_values('nb_books',ascending=False)
# list_author = list(res[class_name])
# list_nb_books = list(res['nb_books'])
#
# plt.bar(list(range(0,len(list_author))), list_nb_books)
# plt.xticks([10,50,100,150,200],fontsize=options.font_size)
# plt.yticks([0,20,40,60],fontsize=options.font_size)
# plt.xlim([10,230])
# plt.xlabel('Number of documents',fontsize=options.font_size)
# plt.ylabel('Number of {}s'.format(class_name),fontsize=options.font_size)
# plt.bar(list_nb_books, list_nb_authors,width=3, color='blue')
#
res = res[[class_name,'nb_books']].\
groupby('nb_books',as_index=False)[class_name].count()
list_nb_authors = list(res[class_name])
list_nb_books = list(res['nb_books'])
ax = plt.subplot(111, )
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(options.font_size)
ax.set_xlim([10,230])
plt.xticks([10,50,100,150,200],fontsize=options.font_size)
plt.yticks([0,20,40,60],fontsize=options.font_size)
plt.xlim([10,230])
plt.xlabel('number of documents',fontsize=options.font_size)
plt.ylabel('number of {}s'.format(class_name),fontsize=options.font_size)
plt.bar(list_nb_books, list_nb_authors,width=3, color='blue')
plt.show()
def plot_hist_punc(freq, punctuation_vector=options.punctuation_vector):
y = freq
x = list(range(0,10))
ax = plt.subplot(111)
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(options.font_size)
ax.bar(x, y, align='center', color='b') #< added align keyword
ax.xaxis_date()
ax.set_ylim(bottom=0, top=0.7)
plt.xticks(list(range(0,10)), punctuation_vector[:-1]+['...'])
plt.show()
def plot_hist_words(freq):
ax = plt.subplot(111, )
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(options.font_size)
plt.rcParams.update({'font.size': options.font_size})
plt.bar(list(range(0,len(freq))), freq, color='magenta', align='center')
ax.set_ylim(bottom=0, top=0.4)
#plt.xticks(list(range(0,len(freq))), punctuation_vector)
plt.show()
def func(x, pos):
return "{:.2f}".format(x).replace("0.", ".").replace("1.00", "")
def plot_trans_mat(mat_nb_words,
punctuation_vector=options.punctuation_vector):
vegetables = punctuation_vector[:-1]+['...']
farmers = punctuation_vector[:-1]+['...']
harvest = np.array(mat_nb_words)
fig, ax = plt.subplots()
im, _ = heatmap(harvest, vegetables, farmers, ax=ax,
)
annotate_heatmap(im, valfmt="{x:.1f}", size=7)
plt.tight_layout()
plt.show()
def plot_scatter_freqs(df, title1=None, title2=None,
freq1=None, freq2=None,
font_size=options.font_size,
):
if title1 is None:
title1 = random.choice(df['title'].tolist())
if title2 is None:
title2 = random.choice(df['title'].tolist())
if freq1 is None:
freq1 = df[df['title']==title1]['freq_pun'].iloc[0]
if freq2 is None:
freq2 = df[df['title']==title2]['freq_pun'].iloc[0]
ax = plt.subplot(111, )
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(options.font_size)
plt.xlabel('$\it{'+title1.replace(' ','\ ')+'}$', fontsize=options.font_size)
plt.ylabel('$\it{'+title2.replace(' ','\ ')+'}$', fontsize=options.font_size)
plt.gca().set_aspect('equal', adjustable='box')
vect = np.linspace(-0, 0.5, 10)
plt.xticks([-0.,0.25,0.5], ['0', '0.25', '0.5'], fontsize=options.font_size)
plt.yticks([-0,0.25,0.5],['0', '0.25', '0.5'], fontsize=options.font_size)
for i in range(0,len(color_vector)):
plt.plot(freq1[i], freq2[i], color=color_vector[i], marker="o")
plt.plot(vect, vect, color = 'black', alpha=0.2)
plt.show()
| [
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel",
"numpy.array",
"matplotlib.pyplot.axvline",
"numpy.arange",
"numpy.histogram",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"webcolors.hex_to_rgb",
"numpy.linspace",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.ylim",
"matplotl... | [((1343, 1356), 'webcolors.hex_to_rgb', 'hex_to_rgb', (['i'], {}), '(i)\n', (1353, 1356), False, 'from webcolors import hex_to_rgb\n'), ((2515, 2536), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.01)'], {}), '(0, 1, 0.01)\n', (2524, 2536), True, 'import numpy as np\n'), ((3412, 3437), 'numpy.arange', 'np.arange', (['(0)', '(2)', 'bin_size'], {}), '(0, 2, bin_size)\n', (3421, 3437), True, 'import numpy as np\n'), ((3450, 3490), 'numpy.arange', 'np.arange', (['(0)', '(2 + bin_size)', '(4 * bin_size)'], {}), '(0, 2 + bin_size, 4 * bin_size)\n', (3459, 3490), True, 'import numpy as np\n'), ((3500, 3516), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (3511, 3516), True, 'import matplotlib.pyplot as plt\n'), ((3715, 3764), 'numpy.histogram', 'np.histogram', (['kl_within_author_samples'], {'bins': 'bins'}), '(kl_within_author_samples, bins=bins)\n', (3727, 3764), True, 'import numpy as np\n'), ((3887, 3937), 'numpy.histogram', 'np.histogram', (['kl_between_author_samples'], {'bins': 'bins'}), '(kl_between_author_samples, bins=bins)\n', (3899, 3937), True, 'import numpy as np\n'), ((4144, 4210), 'matplotlib.pyplot.bar', 'plt.bar', (['bincenters1', 'y1'], {'width': 'bin_size', 'color': '"""black"""', 'alpha': '(0.3)'}), "(bincenters1, y1, width=bin_size, color='black', alpha=0.3)\n", (4151, 4210), True, 'import matplotlib.pyplot as plt\n'), ((4228, 4273), 'matplotlib.pyplot.plot', 'plt.plot', (['bincenters1', 'y1', '"""-"""'], {'color': '"""black"""'}), "(bincenters1, y1, '-', color='black')\n", (4236, 4273), True, 'import matplotlib.pyplot as plt\n'), ((4281, 4346), 'matplotlib.pyplot.bar', 'plt.bar', (['bincenters1', 'y2'], {'width': 'bin_size', 'color': '"""blue"""', 'alpha': '(0.3)'}), "(bincenters1, y2, width=bin_size, color='blue', alpha=0.3)\n", (4288, 4346), True, 'import matplotlib.pyplot as plt\n'), ((4364, 4408), 'matplotlib.pyplot.plot', 'plt.plot', (['bincenters2', 'y2', '"""-"""'], {'color': '"""blue"""'}), "(bincenters2, y2, '-', color='blue')\n", (4372, 4408), True, 'import matplotlib.pyplot as plt\n'), ((4679, 4693), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (4687, 4693), True, 'import matplotlib.pyplot as plt\n'), ((4697, 4720), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0, 0.5, 1]'], {}), '([0, 0.5, 1])\n', (4707, 4720), True, 'import matplotlib.pyplot as plt\n'), ((4723, 4741), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x_bins'], {}), '(x_bins)\n', (4733, 4741), True, 'import matplotlib.pyplot as plt\n'), ((4751, 4778), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""KL divergence"""'], {}), "('KL divergence')\n", (4761, 4778), True, 'import matplotlib.pyplot as plt\n'), ((4783, 4806), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""frequency"""'], {}), "('frequency')\n", (4793, 4806), True, 'import matplotlib.pyplot as plt\n'), ((4816, 4830), 'matplotlib.pyplot.legend', 'plt.legend', (['""""""'], {}), "('')\n", (4826, 4830), True, 'import matplotlib.pyplot as plt\n'), ((6202, 6218), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (6213, 6218), True, 'import matplotlib.pyplot as plt\n'), ((6426, 6489), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[10, 50, 100, 150, 200]'], {'fontsize': 'options.font_size'}), '([10, 50, 100, 150, 200], fontsize=options.font_size)\n', (6436, 6489), True, 'import matplotlib.pyplot as plt\n'), ((6489, 6544), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0, 20, 40, 60]'], {'fontsize': 'options.font_size'}), '([0, 20, 40, 60], fontsize=options.font_size)\n', (6499, 6544), True, 'import matplotlib.pyplot as plt\n'), ((6545, 6564), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[10, 230]'], {}), '([10, 230])\n', (6553, 6564), True, 'import matplotlib.pyplot as plt\n'), ((6568, 6629), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""number of documents"""'], {'fontsize': 'options.font_size'}), "('number of documents', fontsize=options.font_size)\n", (6578, 6629), True, 'import matplotlib.pyplot as plt\n'), ((6711, 6773), 'matplotlib.pyplot.bar', 'plt.bar', (['list_nb_books', 'list_nb_authors'], {'width': '(3)', 'color': '"""blue"""'}), "(list_nb_books, list_nb_authors, width=3, color='blue')\n", (6718, 6773), True, 'import matplotlib.pyplot as plt\n'), ((6777, 6787), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6785, 6787), True, 'import matplotlib.pyplot as plt\n'), ((6916, 6932), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (6927, 6932), True, 'import matplotlib.pyplot as plt\n'), ((7309, 7319), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7317, 7319), True, 'import matplotlib.pyplot as plt\n'), ((7358, 7374), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (7369, 7374), True, 'import matplotlib.pyplot as plt\n'), ((7551, 7604), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': options.font_size}"], {}), "({'font.size': options.font_size})\n", (7570, 7604), True, 'import matplotlib.pyplot as plt\n'), ((7783, 7793), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7791, 7793), True, 'import matplotlib.pyplot as plt\n'), ((8098, 8120), 'numpy.array', 'np.array', (['mat_nb_words'], {}), '(mat_nb_words)\n', (8106, 8120), True, 'import numpy as np\n'), ((8145, 8159), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (8157, 8159), True, 'import matplotlib.pyplot as plt\n'), ((8172, 8216), 'punctuation.visualisation.heatmap_functions.heatmap', 'heatmap', (['harvest', 'vegetables', 'farmers'], {'ax': 'ax'}), '(harvest, vegetables, farmers, ax=ax)\n', (8179, 8216), False, 'from punctuation.visualisation.heatmap_functions import heatmap, annotate_heatmap\n'), ((8248, 8294), 'punctuation.visualisation.heatmap_functions.annotate_heatmap', 'annotate_heatmap', (['im'], {'valfmt': '"""{x:.1f}"""', 'size': '(7)'}), "(im, valfmt='{x:.1f}', size=7)\n", (8264, 8294), False, 'from punctuation.visualisation.heatmap_functions import heatmap, annotate_heatmap\n'), ((8299, 8317), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8315, 8317), True, 'import matplotlib.pyplot as plt\n'), ((8322, 8332), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8330, 8332), True, 'import matplotlib.pyplot as plt\n'), ((8858, 8874), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (8869, 8874), True, 'import matplotlib.pyplot as plt\n'), ((9288, 9312), 'numpy.linspace', 'np.linspace', (['(-0)', '(0.5)', '(10)'], {}), '(-0, 0.5, 10)\n', (9299, 9312), True, 'import numpy as np\n'), ((9317, 9396), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[-0.0, 0.25, 0.5]', "['0', '0.25', '0.5']"], {'fontsize': 'options.font_size'}), "([-0.0, 0.25, 0.5], ['0', '0.25', '0.5'], fontsize=options.font_size)\n", (9327, 9396), True, 'import matplotlib.pyplot as plt\n'), ((9398, 9475), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[-0, 0.25, 0.5]', "['0', '0.25', '0.5']"], {'fontsize': 'options.font_size'}), "([-0, 0.25, 0.5], ['0', '0.25', '0.5'], fontsize=options.font_size)\n", (9408, 9475), True, 'import matplotlib.pyplot as plt\n'), ((9594, 9640), 'matplotlib.pyplot.plot', 'plt.plot', (['vect', 'vect'], {'color': '"""black"""', 'alpha': '(0.2)'}), "(vect, vect, color='black', alpha=0.2)\n", (9602, 9640), True, 'import matplotlib.pyplot as plt\n'), ((9647, 9657), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9655, 9657), True, 'import matplotlib.pyplot as plt\n'), ((1735, 1752), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(0)'}), '(loc=0)\n', (1745, 1752), True, 'import matplotlib.pyplot as plt\n'), ((1844, 1854), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1852, 1854), True, 'import matplotlib.pyplot as plt\n'), ((2609, 2625), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (2620, 2625), True, 'import matplotlib.pyplot as plt\n'), ((2807, 2862), 'matplotlib.pyplot.hist', 'plt.hist', (['df[col1]'], {'bins': 'bins', 'label': 'pun1', 'color': '"""blue"""'}), "(df[col1], bins=bins, label=pun1, color='blue')\n", (2815, 2862), True, 'import matplotlib.pyplot as plt\n'), ((2871, 2916), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(0)', 'fontsize': 'options.font_size'}), '(loc=0, fontsize=options.font_size)\n', (2881, 2916), True, 'import matplotlib.pyplot as plt\n'), ((2925, 2960), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""punctuation frequency"""'], {}), "('punctuation frequency')\n", (2935, 2960), True, 'import matplotlib.pyplot as plt\n'), ((2969, 3002), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""number of documents"""'], {}), "('number of documents')\n", (2979, 3002), True, 'import matplotlib.pyplot as plt\n'), ((3091, 3101), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3099, 3101), True, 'import matplotlib.pyplot as plt\n'), ((4448, 4506), 'matplotlib.pyplot.axvline', 'plt.axvline', (['baseline_between'], {'color': '"""blue"""', 'linestyle': '""":"""'}), "(baseline_between, color='blue', linestyle=':')\n", (4459, 4506), True, 'import matplotlib.pyplot as plt\n'), ((4515, 4573), 'matplotlib.pyplot.axvline', 'plt.axvline', (['baseline_within'], {'color': '"""black"""', 'linestyle': '""":"""'}), "(baseline_within, color='black', linestyle=':')\n", (4526, 4573), True, 'import matplotlib.pyplot as plt\n'), ((4925, 4935), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4933, 4935), True, 'import matplotlib.pyplot as plt\n'), ((9525, 9588), 'matplotlib.pyplot.plot', 'plt.plot', (['freq1[i]', 'freq2[i]'], {'color': 'color_vector[i]', 'marker': '"""o"""'}), "(freq1[i], freq2[i], color=color_vector[i], marker='o')\n", (9533, 9588), True, 'import matplotlib.pyplot as plt\n'), ((9224, 9233), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (9231, 9233), True, 'import matplotlib.pyplot as plt\n'), ((2136, 2153), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(0)'}), '(loc=0)\n', (2146, 2153), True, 'import matplotlib.pyplot as plt\n'), ((2327, 2337), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2335, 2337), True, 'import matplotlib.pyplot as plt\n')] |
'''# *************************************************************
# 2018
# *************************************************************
#
# CLIENT
#
# *************************************************************'''
# Import function from socket module
# from socket import *
import socket
import sys
# FUNCTION: Receive from the socket and return decode the message
def commRX(socket):
message = socket.recv(bufsize)
coded = message.decode(code)
return coded
# FUNCTION: Transmision of any message
def commTX(message, socket):
coded = message.encode(code)
socket.send(coded)
code = "utf-8"
host = socket.gethostname()
port = 6666
address = (host, port)
bufsize = 1024
try:
clientSocket = socket(AF_INET, SOCK_STREAM)
clientSocket.connect(address)
except OSError:
print("Error open socket")
server.close()
sys.exit(1)
else:
while True:
print("\n\n", "_"*70, "\n\n")
message = commRX(clientSocket)
if(message == "6"):
print("Disconnecting from server ... ")
print("\n\n", "_"*70, "\n")
break
else:
print(message)
message = input("")
if(message == ""):
message = "<EnterKey>"
commTX(message, clientSocket)
clientSocket.close()
| [
"socket",
"socket.send",
"socket.recv",
"sys.exit",
"socket.gethostname"
] | [((709, 729), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (727, 729), False, 'import socket\n'), ((476, 496), 'socket.recv', 'socket.recv', (['bufsize'], {}), '(bufsize)\n', (487, 496), False, 'import socket\n'), ((662, 680), 'socket.send', 'socket.send', (['coded'], {}), '(coded)\n', (673, 680), False, 'import socket\n'), ((809, 837), 'socket', 'socket', (['AF_INET', 'SOCK_STREAM'], {}), '(AF_INET, SOCK_STREAM)\n', (815, 837), False, 'import socket\n'), ((947, 958), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (955, 958), False, 'import sys\n')] |
# Copyright (c) 2021, M20Zero and contributors
# For license information, please see license.txt
import frappe
from frappe.model.document import Document
class Membership(Document):
def on_submit(self):
self.make_invoice()
def make_invoice(self):
self.invoice_entry(self.name,self.member,self.member_name,self.membership_type,self.amount,self.income_account)
def invoice_entry(self, member,name,member_name,membership_type,amount,income_account):
gl=frappe.get_doc(dict(doctype="Invoice",date=frappe.utils.nowdate(),party_type="member",party=name,party_name=member_name,items=income_account,total=amount,total_qty="1"))
gl.save(ignore_permissions=True)
gl.submit()
| [
"frappe.utils.nowdate"
] | [((533, 555), 'frappe.utils.nowdate', 'frappe.utils.nowdate', ([], {}), '()\n', (553, 555), False, 'import frappe\n')] |
import argparse
import os
import numpy as np
import torch
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
from torchvision import transforms
from torchvision import datasets
from utils.lib import *
from utils.pgd_attack import *
from models.resnet import ResNet
def test(model, dataloader):
model.eval()
n_correct, n_total = 0, 0
for img, label in iter(dataloader):
batch_size = len(label)
img, label = img.cuda(), label.cuda()
with torch.no_grad():
class_output = model(img)
pred = class_output.data.max(1, keepdim=True)[1]
n_correct += pred.eq(label.data.view_as(pred)).cpu().sum()
n_total += batch_size
acc = n_correct.double() / n_total
return acc
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate augmented training dataset and extract features')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--model-type', default='nat_model',
choices=['nat_model', 'adv_model'], type=str, help='model type')
parser.add_argument('--save-dir', default='./generate_data/', type=str, help='dir to save data')
parser.add_argument('--model-dir', default='./checkpoints/', type=str, help='dir to saved model')
# args parse
args = parser.parse_args()
# Set random seed
set_seed(args.seed)
model_type = args.model_type
save_dir = os.path.join(args.save_dir, model_type)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
model_path = os.path.join(args.model_dir, model_type, "checkpoint.pth")
batch_size = 128
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
])
mean = [x/255.0 for x in [125.3, 123.0, 113.9]]
std = [x/255.0 for x in [63.0, 62.1, 66.7]]
train_dataset = datasets.CIFAR10('./datasets/cifar10', train=True, download=True, transform=transform_train)
train_dataloader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=False, num_workers=2)
test_dataset = datasets.CIFAR10('./datasets/cifar10', train=False, transform=transform_test)
test_dataloader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False, num_workers=2)
# Model Setup
model = torch.load(model_path).cuda()
model.eval()
attacker = LinfPGDAttack(model, eps=8/255.0, nb_iter=40,
eps_iter=1/255.0, rand_init=True, clip_min=0., clip_max=1.,
targeted=False, num_classes=10, elementwise_best=True)
augment_data = []
augment_label = []
for batch_x, batch_y in train_dataloader:
augment_data.extend(batch_x.numpy())
augment_label.extend(batch_y.numpy())
correct = 0.0
count = 0.0
for j in range(4):
for batch_x, batch_y in train_dataloader:
batch_x = batch_x.cuda()
batch_y = batch_y.cuda()
adv_batch_x = attacker.perturb(batch_x, batch_y)
augment_data.extend(adv_batch_x.cpu().numpy())
augment_label.extend(batch_y.cpu().numpy())
with torch.no_grad():
outputs = model(adv_batch_x)
preds = torch.argmax(outputs, axis=1)
correct += torch.sum(preds==batch_y)
count += batch_x.shape[0]
print("Adv acc: {:.2f}%".format((correct/count)*100))
augment_data = np.array(augment_data)
augment_label = np.array(augment_label)
np.save(os.path.join(save_dir, "augment_data.npy"), augment_data)
np.save(os.path.join(save_dir, "augment_label.npy"), augment_label)
augment_data = torch.Tensor(augment_data)
augment_label = torch.Tensor(augment_label).long()
augment_dataset = TensorDataset(augment_data, augment_label)
augment_dataloader = DataLoader(augment_dataset, batch_size=batch_size, shuffle=False)
augment_features = []
for batch_x, batch_y in augment_dataloader:
batch_x = batch_x.cuda()
with torch.no_grad():
feature = model.get_feature(batch_x)
augment_features.extend(feature.cpu().numpy())
augment_features = np.array(augment_features)
np.save(os.path.join(save_dir, "augment_feature.npy"), augment_features)
| [
"os.path.exists",
"argparse.ArgumentParser",
"os.makedirs",
"torch.load",
"os.path.join",
"torch.Tensor",
"torch.utils.data.TensorDataset",
"torchvision.transforms.RandomHorizontalFlip",
"torchvision.transforms.RandomCrop",
"numpy.array",
"torchvision.datasets.CIFAR10",
"torch.sum",
"torch.u... | [((813, 913), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate augmented training dataset and extract features"""'}), "(description=\n 'Generate augmented training dataset and extract features')\n", (836, 913), False, 'import argparse\n'), ((1467, 1506), 'os.path.join', 'os.path.join', (['args.save_dir', 'model_type'], {}), '(args.save_dir, model_type)\n', (1479, 1506), False, 'import os\n'), ((1592, 1650), 'os.path.join', 'os.path.join', (['args.model_dir', 'model_type', '"""checkpoint.pth"""'], {}), "(args.model_dir, model_type, 'checkpoint.pth')\n", (1604, 1650), False, 'import os\n'), ((2053, 2150), 'torchvision.datasets.CIFAR10', 'datasets.CIFAR10', (['"""./datasets/cifar10"""'], {'train': '(True)', 'download': '(True)', 'transform': 'transform_train'}), "('./datasets/cifar10', train=True, download=True, transform\n =transform_train)\n", (2069, 2150), False, 'from torchvision import datasets\n'), ((2169, 2259), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_dataset', 'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': '(2)'}), '(dataset=train_dataset, batch_size=batch_size, shuffle=False,\n num_workers=2)\n', (2179, 2259), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((2275, 2352), 'torchvision.datasets.CIFAR10', 'datasets.CIFAR10', (['"""./datasets/cifar10"""'], {'train': '(False)', 'transform': 'transform_test'}), "('./datasets/cifar10', train=False, transform=transform_test)\n", (2291, 2352), False, 'from torchvision import datasets\n'), ((2375, 2464), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'test_dataset', 'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': '(2)'}), '(dataset=test_dataset, batch_size=batch_size, shuffle=False,\n num_workers=2)\n', (2385, 2464), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((3597, 3619), 'numpy.array', 'np.array', (['augment_data'], {}), '(augment_data)\n', (3605, 3619), True, 'import numpy as np\n'), ((3640, 3663), 'numpy.array', 'np.array', (['augment_label'], {}), '(augment_label)\n', (3648, 3663), True, 'import numpy as np\n'), ((3827, 3853), 'torch.Tensor', 'torch.Tensor', (['augment_data'], {}), '(augment_data)\n', (3839, 3853), False, 'import torch\n'), ((3932, 3974), 'torch.utils.data.TensorDataset', 'TensorDataset', (['augment_data', 'augment_label'], {}), '(augment_data, augment_label)\n', (3945, 3974), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((4000, 4065), 'torch.utils.data.DataLoader', 'DataLoader', (['augment_dataset'], {'batch_size': 'batch_size', 'shuffle': '(False)'}), '(augment_dataset, batch_size=batch_size, shuffle=False)\n', (4010, 4065), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((4333, 4359), 'numpy.array', 'np.array', (['augment_features'], {}), '(augment_features)\n', (4341, 4359), True, 'import numpy as np\n'), ((1518, 1542), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (1532, 1542), False, 'import os\n'), ((1552, 1573), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (1563, 1573), False, 'import os\n'), ((3677, 3719), 'os.path.join', 'os.path.join', (['save_dir', '"""augment_data.npy"""'], {}), "(save_dir, 'augment_data.npy')\n", (3689, 3719), False, 'import os\n'), ((3747, 3790), 'os.path.join', 'os.path.join', (['save_dir', '"""augment_label.npy"""'], {}), "(save_dir, 'augment_label.npy')\n", (3759, 3790), False, 'import os\n'), ((4372, 4417), 'os.path.join', 'os.path.join', (['save_dir', '"""augment_feature.npy"""'], {}), "(save_dir, 'augment_feature.npy')\n", (4384, 4417), False, 'import os\n'), ((507, 522), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (520, 522), False, 'import torch\n'), ((1724, 1760), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(32)'], {'padding': '(4)'}), '(32, padding=4)\n', (1745, 1760), False, 'from torchvision import transforms\n'), ((1770, 1803), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (1801, 1803), False, 'from torchvision import transforms\n'), ((1813, 1834), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1832, 1834), False, 'from torchvision import transforms\n'), ((1897, 1918), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1916, 1918), False, 'from torchvision import transforms\n'), ((2496, 2518), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (2506, 2518), False, 'import torch\n'), ((3401, 3430), 'torch.argmax', 'torch.argmax', (['outputs'], {'axis': '(1)'}), '(outputs, axis=1)\n', (3413, 3430), False, 'import torch\n'), ((3455, 3482), 'torch.sum', 'torch.sum', (['(preds == batch_y)'], {}), '(preds == batch_y)\n', (3464, 3482), False, 'import torch\n'), ((3875, 3902), 'torch.Tensor', 'torch.Tensor', (['augment_label'], {}), '(augment_label)\n', (3887, 3902), False, 'import torch\n'), ((4188, 4203), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4201, 4203), False, 'import torch\n'), ((3306, 3321), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3319, 3321), False, 'import torch\n')] |
"""
Functions for explaining classifiers that use tabular data (matrices).
"""
import collections
import json
import copy
import numpy as np
import sklearn
import sklearn.preprocessing
from . import lime_base
from . import explanation
class TableDomainMapper(explanation.DomainMapper):
"""Maps feature ids to names, generates table views, etc"""
def __init__(self, feature_names, feature_values, scaled_row, categorical_features, discretized_feature_names=None):
"""Init.
Args:
feature_names: list of feature names, in order
feature_values: list of strings with the values of the original row
scaled_row: scaled row
categorical_featuers: list of categorical features ids (ints)
"""
self.exp_feature_names = feature_names
if discretized_feature_names is not None:
self.exp_feature_names = discretized_feature_names
self.feature_names = feature_names
self.feature_values = feature_values
self.scaled_row = scaled_row
self.all_categorical = len(categorical_features) == len(scaled_row)
self.categorical_features = categorical_features
def map_exp_ids(self, exp):
"""Maps ids to feature names.
Args:
exp: list of tuples [(id, weight), (id,weight)]
Returns:
list of tuples (feature_name, weight)
"""
return [(self.exp_feature_names[x[0]], x[1]) for x in exp]
def visualize_instance_html(self,
exp,
label,
random_id,
show_table=True,
show_contributions=None,
show_scaled=None,
show_all=False):
"""Shows the current example in a table format.
Args:
exp: list of tuples [(id, weight), (id,weight)]
label: label id (integer)
random_id: random_id being used, appended to div ids and etc in
html.
show_table: if False, don't show table visualization.
show_contributions: if True, add an aditional bar plot with weights
multiplied by example. By default, this is true if there are any
continuous features.
show_scaled: if True, display scaled values in table.
show_all: if True, show zero-weighted features in the table.
"""
if show_contributions is None:
show_contributions = not self.all_categorical
if show_scaled is None:
show_scaled = not self.all_categorical
show_scaled = json.dumps(show_scaled)
weights = [0] * len(self.feature_names)
scaled_exp = []
for i, value in exp:
weights[i] = value * self.scaled_row[i]
scaled_exp.append((i, value * self.scaled_row[i]))
scaled_exp = json.dumps(self.map_exp_ids(scaled_exp))
row = ['%.2f' % a if i not in self.categorical_features else 'N/A'
for i, a in enumerate(self.scaled_row)]
out_list = list(zip(self.feature_names, self.feature_values,
row, weights))
if not show_all:
out_list = [out_list[x[0]] for x in exp]
out = u''
if show_contributions:
out += u'''<script>
var cur_width = parseInt(d3.select('#model%s').select('svg').style('width'));
console.log(cur_width);
var svg_contrib = d3.select('#model%s').append('svg');
exp.ExplainFeatures(svg_contrib, %d, %s, '%s', true);
cur_width = Math.max(cur_width, parseInt(svg_contrib.style('width'))) + 'px';
d3.select('#model%s').style('width', cur_width);
</script>
''' % (random_id, random_id, label, scaled_exp,
'Feature contributions', random_id)
if show_table:
out += u'<div id="mytable%s"></div>' % random_id
out += u'''<script>
var tab = d3.select('#mytable%s');
exp.ShowTable(tab, %s, %d, %s);
</script>
''' % (random_id, json.dumps(out_list), label, show_scaled)
return out
class LimeTabularExplainer(object):
"""Explains predictions on tabular (i.e. matrix) data.
For numerical features, perturb them by sampling from a Normal(0,1) and
doing the inverse operation of mean-centering and scaling, according to the
means and stds in the training data. For categorical features, perturb by
sampling according to the training distribution, and making a binary feature
that is 1 when the value is the same as the instance being explained."""
def __init__(self, training_data, feature_names=None, categorical_features=None,
categorical_names=None, kernel_width=3, verbose=False,
class_names=None, feature_selection='auto',
discretize_continuous=True):
"""Init function.
Args:
training_data: numpy 2d array
feature_names: list of names (strings) corresponding to the columns
in the training data.
categorical_features: list of indices (ints) corresponding to the
categorical columns. Everything else will be considered
continuous.
categorical_names: map from int to list of names, where
categorical_names[x][y] represents the name of the yth value of
column x.
kernel_width: kernel width for the exponential kernel
verbose: if true, print local prediction values from linear model
class_names: list of class names, ordered according to whatever the
classifier is using. If not present, class names will be '0',
'1', ...
feature_selection: feature selection method. can be
'forward_selection', 'lasso_path', 'none' or 'auto'.
See function 'explain_instance_with_data' in lime_base.py for
details on what each of the options does.
discretize_continuous: if True, all non-categorical features will be
discretized into quartiles.
"""
self.categorical_names = categorical_names
self.categorical_features = categorical_features
if self.categorical_names is None:
self.categorical_names = {}
if self.categorical_features is None:
self.categorical_features = []
self.discretizer = None
if discretize_continuous:
self.discretizer = QuartileDiscretizer(training_data, categorical_features, feature_names)
categorical_features = range(training_data.shape[1])
discretized_training_data = self.discretizer.discretize(training_data)
kernel = lambda d: np.sqrt(np.exp(-(d**2) / kernel_width ** 2))
self.feature_selection = feature_selection
self.base = lime_base.LimeBase(kernel, verbose)
self.scaler = None
self.class_names = class_names
self.feature_names = feature_names
self.scaler = sklearn.preprocessing.StandardScaler(with_mean=False)
self.scaler.fit(training_data)
self.feature_values = {}
self.feature_frequencies = {}
for feature in categorical_features:
feature_count = collections.defaultdict(lambda: 0.0)
column = training_data[:, feature]
if self.discretizer is not None:
column = discretized_training_data[:, feature]
feature_count[0] = 0.
feature_count[1] = 0.
feature_count[2] = 0.
feature_count[3] = 0.
for value in column:
feature_count[value] += 1
values, frequencies = map(list, zip(*(feature_count.items())))
#print feature, values, frequencies
self.feature_values[feature] = values
self.feature_frequencies[feature] = (np.array(frequencies) /
sum(frequencies))
self.scaler.mean_[feature] = 0
self.scaler.scale_[feature] = 1
#print self.feature_frequencies
def explain_instance(self, data_row, classifier_fn, labels=(1,),
top_labels=None, num_features=10, num_samples=5000):
"""Generates explanations for a prediction.
First, we generate neighborhood data by randomly perturbing features from
the instance (see __data_inverse). We then learn locally weighted linear
models on this neighborhood data to explain each of the classes in an
interpretable way (see lime_base.py).
Args:
data_row: 1d numpy array, corresponding to a row
classifier_fn: classifier prediction probability function, which
takes a string and outputs prediction probabilities. For
ScikitClassifiers , this is classifier.predict_proba.
labels: iterable with labels to be explained.
top_labels: if not None, ignore labels and produce explanations for
the K labels with highest prediction probabilities, where K is
this parameter.
num_features: maximum number of features present in explanation
num_samples: size of the neighborhood to learn the linear model
Returns:
An Explanation object (see explanation.py) with the corresponding
explanations.
"""
data, inverse = self.__data_inverse(data_row, num_samples)
scaled_data = (data - self.scaler.mean_) / self.scaler.scale_
distances = np.sqrt(np.sum((scaled_data - scaled_data[0]) ** 2, axis=1))
yss = classifier_fn(inverse)
if self.class_names is None:
self.class_names = [str(x) for x in range(yss[0].shape[0])]
else:
self.class_names = list(self.class_names)
feature_names = copy.deepcopy(self.feature_names)
if feature_names is None:
feature_names = [str(x) for x in range(data_row.shape[0])]
round_stuff = lambda x: ['%.2f' % a for a in x]
values = round_stuff(data_row)
for i in self.categorical_features:
name = int(data_row[i])
if i in self.categorical_names:
name = self.categorical_names[i][name]
feature_names[i] = '%s=%s' % (feature_names[i], name)
values[i] = 'True'
categorical_features = self.categorical_features
discretized_feature_names=None
if self.discretizer is not None:
categorical_features = range(data.shape[1])
discretized_instance = self.discretizer.discretize(data_row)
discretized_feature_names = copy.deepcopy(feature_names)
for f in self.discretizer.names:
discretized_feature_names[f] = self.discretizer.names[f][int(discretized_instance[f])]
#values[f] = 'True'
domain_mapper = TableDomainMapper(
feature_names, values, scaled_data[0],
categorical_features=categorical_features,
discretized_feature_names=discretized_feature_names)
ret_exp = explanation.Explanation(domain_mapper=domain_mapper,
class_names=self.class_names)
ret_exp.predict_proba = yss[0]
if top_labels:
labels = np.argsort(yss[0])[-top_labels:]
ret_exp.top_labels = list(labels)
ret_exp.top_labels.reverse()
for label in labels:
ret_exp.intercept[label], ret_exp.local_exp[label] = self.base.explain_instance_with_data(
scaled_data, yss, distances, label, num_features,
feature_selection=self.feature_selection)
return ret_exp
def __data_inverse(self,
data_row,
num_samples):
"""Generates a neighborhood around a prediction.
For numerical features, perturb them by sampling from a Normal(0,1) and
doing the inverse operation of mean-centering and scaling, according to
the means and stds in the training data. For categorical features,
perturb by sampling according to the training distribution, and making a
binary feature that is 1 when the value is the same as the instance
being explained.
Args:
data_row: 1d numpy array, corresponding to a row
num_samples: size of the neighborhood to learn the linear model
Returns:
A tuple (data, inverse), where:
data: dense num_samples * K matrix, where categorical features
are encoded with either 0 (not equal to the corresponding value
in data_row) or 1. The first row is the original instance.
inverse: same as data, except the categorical features are not
binary, but categorical (as the original data)
"""
data = np.zeros((num_samples, data_row.shape[0]))
categorical_features = range(data_row.shape[0])
if self.discretizer is None:
data = np.random.normal(0, 1, num_samples * data_row.shape[0]).reshape(
num_samples, data_row.shape[0])
data = data * self.scaler.scale_ + self.scaler.mean_
categorical_features = self.categorical_features
first_row = data_row
else:
first_row = self.discretizer.discretize(data_row)
data[0] = data_row.copy()
inverse = data.copy()
for column in categorical_features:
values = self.feature_values[column]
freqs = self.feature_frequencies[column]
#print self.feature_frequencies[column], column
inverse_column = np.random.choice(values, size=num_samples, replace=True, p=freqs)
binary_column = np.array([1 if x == first_row[column] else 0 for x in inverse_column])
binary_column[0] = 1
inverse_column[0] = data[0, column]
data[:, column] = binary_column
inverse[:, column] = inverse_column
# if column not in self.categorical_features:
# print values, column,
# print inverse[1:, column]
if self.discretizer is not None:
inverse[1:] = self.discretizer.undiscretize(inverse[1:])
#print zip(inverse[:,10], data[:,10])
return data, inverse
class QuartileDiscretizer:
"""Discretizes data into quartiles."""
def __init__(self, data, categorical_features, feature_names):
"""Initializer
Args:
data: numpy 2d array
categorical_features: list of indices (ints) corresponding to the
categorical columns. These features will not be discretized.
Everything else will be considered continuous, and will be
discretized.
categorical_names: map from int to list of names, where
categorical_names[x][y] represents the name of the yth value of
column x.
feature_names: list of names (strings) corresponding to the columns
in the training data.
"""
to_discretize = [x for x in range(data.shape[1]) if x not in categorical_features]
self.names = {}
self.lambdas = {}
self.ranges = {}
self.means = {}
self.stds = {}
self.mins = {}
self.maxs = {}
for feature in to_discretize:
qts = np.percentile(data[:,feature], [25, 50, 75])
boundaries = np.min(data[:, feature]), np.max(data[:, feature])
name = feature_names[feature]
self.names[feature] = ['%s <= %.2f' % (name, qts[0]), '%.2f < %s <= %.2f' % (qts[0], name, qts[1]), '%.2f < %s <= %.2f' % (qts[1], name, qts[2]), '%s > %.2f' % (name, qts[2])]
self.lambdas[feature] = lambda x, qts=qts: np.searchsorted(qts, x)
discretized = self.lambdas[feature](data[:, feature])
self.means[feature] = [np.mean(data[discretized == x, feature]) for x in range(4)]
self.stds[feature] = [np.std(data[discretized == x, feature]) + 0.000000000001 for x in range(4)]
self.mins[feature] = [boundaries[0], qts[0], qts[1], qts[2]]
self.maxs[feature] = [qts[0], qts[1],qts[2], boundaries[1]]
def discretize(self, data):
"""Discretizes the data.
Args:
data: numpy 2d or 1d array
Returns:
numpy array of same dimension, discretized.
"""
ret = data.copy()
for feature in self.lambdas:
if len(data.shape) == 1:
ret[feature] = int(self.lambdas[feature](ret[feature]))
else:
ret[:,feature] = self.lambdas[feature](ret[:,feature]).astype(int)
return ret
def undiscretize(self, data):
ret = data.copy()
for feature in self.means:
mins = self.mins[feature]
maxs = self.maxs[feature]
means = self.means[feature]
stds = self.stds[feature]
get_inverse = lambda q: max(mins[q], min(np.random.normal(means[q], stds[q]), maxs[q]))
if len(data.shape) == 1:
q = int(ret[feature])
ret[feature] = get_inverse(q)
else:
ret[:,feature] = [get_inverse(int(x)) for x in ret[:, feature]]
return ret
| [
"numpy.random.normal",
"numpy.mean",
"numpy.random.choice",
"numpy.searchsorted",
"numpy.std",
"json.dumps",
"numpy.min",
"numpy.max",
"sklearn.preprocessing.StandardScaler",
"numpy.sum",
"numpy.zeros",
"numpy.array",
"collections.defaultdict",
"numpy.exp",
"numpy.argsort",
"copy.deepc... | [((2727, 2750), 'json.dumps', 'json.dumps', (['show_scaled'], {}), '(show_scaled)\n', (2737, 2750), False, 'import json\n'), ((7312, 7365), 'sklearn.preprocessing.StandardScaler', 'sklearn.preprocessing.StandardScaler', ([], {'with_mean': '(False)'}), '(with_mean=False)\n', (7348, 7365), False, 'import sklearn\n'), ((10183, 10216), 'copy.deepcopy', 'copy.deepcopy', (['self.feature_names'], {}), '(self.feature_names)\n', (10196, 10216), False, 'import copy\n'), ((13244, 13286), 'numpy.zeros', 'np.zeros', (['(num_samples, data_row.shape[0])'], {}), '((num_samples, data_row.shape[0]))\n', (13252, 13286), True, 'import numpy as np\n'), ((7549, 7586), 'collections.defaultdict', 'collections.defaultdict', (['(lambda : 0.0)'], {}), '(lambda : 0.0)\n', (7572, 7586), False, 'import collections\n'), ((9892, 9943), 'numpy.sum', 'np.sum', (['((scaled_data - scaled_data[0]) ** 2)'], {'axis': '(1)'}), '((scaled_data - scaled_data[0]) ** 2, axis=1)\n', (9898, 9943), True, 'import numpy as np\n'), ((10999, 11027), 'copy.deepcopy', 'copy.deepcopy', (['feature_names'], {}), '(feature_names)\n', (11012, 11027), False, 'import copy\n'), ((14046, 14111), 'numpy.random.choice', 'np.random.choice', (['values'], {'size': 'num_samples', 'replace': '(True)', 'p': 'freqs'}), '(values, size=num_samples, replace=True, p=freqs)\n', (14062, 14111), True, 'import numpy as np\n'), ((14140, 14212), 'numpy.array', 'np.array', (['[(1 if x == first_row[column] else 0) for x in inverse_column]'], {}), '([(1 if x == first_row[column] else 0) for x in inverse_column])\n', (14148, 14212), True, 'import numpy as np\n'), ((15798, 15843), 'numpy.percentile', 'np.percentile', (['data[:, feature]', '[25, 50, 75]'], {}), '(data[:, feature], [25, 50, 75])\n', (15811, 15843), True, 'import numpy as np\n'), ((7037, 7072), 'numpy.exp', 'np.exp', (['(-d ** 2 / kernel_width ** 2)'], {}), '(-d ** 2 / kernel_width ** 2)\n', (7043, 7072), True, 'import numpy as np\n'), ((8190, 8211), 'numpy.array', 'np.array', (['frequencies'], {}), '(frequencies)\n', (8198, 8211), True, 'import numpy as np\n'), ((11653, 11671), 'numpy.argsort', 'np.argsort', (['yss[0]'], {}), '(yss[0])\n', (11663, 11671), True, 'import numpy as np\n'), ((15868, 15892), 'numpy.min', 'np.min', (['data[:, feature]'], {}), '(data[:, feature])\n', (15874, 15892), True, 'import numpy as np\n'), ((15894, 15918), 'numpy.max', 'np.max', (['data[:, feature]'], {}), '(data[:, feature])\n', (15900, 15918), True, 'import numpy as np\n'), ((16204, 16227), 'numpy.searchsorted', 'np.searchsorted', (['qts', 'x'], {}), '(qts, x)\n', (16219, 16227), True, 'import numpy as np\n'), ((16329, 16369), 'numpy.mean', 'np.mean', (['data[discretized == x, feature]'], {}), '(data[discretized == x, feature])\n', (16336, 16369), True, 'import numpy as np\n'), ((4304, 4324), 'json.dumps', 'json.dumps', (['out_list'], {}), '(out_list)\n', (4314, 4324), False, 'import json\n'), ((13399, 13454), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(num_samples * data_row.shape[0])'], {}), '(0, 1, num_samples * data_row.shape[0])\n', (13415, 13454), True, 'import numpy as np\n'), ((16423, 16462), 'numpy.std', 'np.std', (['data[discretized == x, feature]'], {}), '(data[discretized == x, feature])\n', (16429, 16462), True, 'import numpy as np\n'), ((17443, 17478), 'numpy.random.normal', 'np.random.normal', (['means[q]', 'stds[q]'], {}), '(means[q], stds[q])\n', (17459, 17478), True, 'import numpy as np\n')] |
#!/usr/bin/python
import math
import time
import sys
import fcntl
from AtlasI2C import (
AtlasI2C
)
TIMEOUT = 60
def get_devices():
device = AtlasI2C()
device_address_list = device.list_i2c_devices()
device_list = []
for i in device_address_list:
device.set_i2c_address(i)
try:
response = device.query("I")
moduletype = response.split(",")[1]
response = device.query("name,?").split(",")[1]
device_list.append(AtlasI2C(address = i, moduletype = moduletype, name = response))
except:
continue
return device_list
def print_devices(device_list, device):
for i in device_list:
if(i == device):
print("--> " + i.get_device_info())
else:
print(" - " + i.get_device_info())
def get_device(device_list, name):
for device in device_list:
if (device.moduletype.lower() == name.lower()):
return device
return None
def calibrate(device, target):
current = -1.0
loop = 0
# waiting for the reading to stabilize
while True:
if loop > TIMEOUT:
raise TimeoutError()
sensor = read(device)
time.sleep(1)
print("Current value, sensor value: {:.2f} {:.2f}".format(current, sensor))
if math.isclose(current, sensor, abs_tol=0.02):
break
current = sensor
loop = loop + 1
# clear previous calibration
cmd = "cal,clear"
print("Clearing previous calibration data... {:s}".format(cmd))
response = device.query(cmd)
print(response)
# make sure calibration clear is done
loop = 0
while True:
if loop > TIMEOUT:
raise TimeoutError()
cmd = "cal,?"
response = device.query(cmd)
if response.startswith("Success"):
response_array = response.split(",")
if len(response_array) > 1:
is_calibrated = int(response_array[1])
if is_calibrated == 0:
break
loop = loop + 1
time.sleep(1)
# calibrate
if device.moduletype.lower() == "ph":
# pH sensor require a special 3-point calibration
# calibrate mid point
cmd = "cal,mid,{:.2f}".format(target)
else:
cmd = "cal,{:.2f}".format(target)
print("Calibrating: {:.3f} to target {:.3f}: {:s}".format(current, target, cmd))
response = device.query(cmd)
print(response)
# make sure calibration clear is done
loop = 0
while True:
if loop > TIMEOUT:
raise TimeoutError()
cmd = "cal,?"
response = device.query(cmd)
if response.startswith("Success"):
response_array = response.split(",")
if len(response_array) > 1:
is_calibrated = int(response_array[1])
if is_calibrated == 1:
break
loop = loop + 1
time.sleep(1)
# waiting for the read value to match the calibration target
while True:
if loop > TIMEOUT:
raise TimeoutError()
sensor = read(device)
time.sleep(1)
if math.isclose(target, sensor, abs_tol=0.02):
break
print("Current value, target value: {:.3f}, {:.3f}".format(sensor, target))
def read(device):
response = device.query("R")
print("Sensor response: %s" % response)
if response.startswith("Success"):
try:
floatVal = float(response.split(":")[1])
print("OK [" + str(floatVal) + "]")
return floatVal
except:
return 0.0
else:
return 0.0
def main():
if len(sys.argv) != 3:
print("Usage: calibrate.py <sensor_name> <target_value>")
exit()
target = float(sys.argv[2])
device_list = get_devices()
print_devices(device_list, device_list[0])
# choose device based on name
device = get_device(device_list, sys.argv[1])
if device == None:
print("Sensor named {:s} not found!".format(sys.argv[1]))
exit()
print("Calibrating {:s} sensor to target value {:.3f}".format(device.moduletype, target))
# print_devices(device_list, device)
calibrate(device, target)
if __name__ == '__main__':
main()
| [
"math.isclose",
"AtlasI2C.AtlasI2C",
"time.sleep"
] | [((152, 162), 'AtlasI2C.AtlasI2C', 'AtlasI2C', ([], {}), '()\n', (160, 162), False, 'from AtlasI2C import AtlasI2C\n'), ((1216, 1229), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1226, 1229), False, 'import time\n'), ((1325, 1368), 'math.isclose', 'math.isclose', (['current', 'sensor'], {'abs_tol': '(0.02)'}), '(current, sensor, abs_tol=0.02)\n', (1337, 1368), False, 'import math\n'), ((2089, 2102), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2099, 2102), False, 'import time\n'), ((2961, 2974), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2971, 2974), False, 'import time\n'), ((3155, 3168), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3165, 3168), False, 'import time\n'), ((3180, 3222), 'math.isclose', 'math.isclose', (['target', 'sensor'], {'abs_tol': '(0.02)'}), '(target, sensor, abs_tol=0.02)\n', (3192, 3222), False, 'import math\n'), ((502, 559), 'AtlasI2C.AtlasI2C', 'AtlasI2C', ([], {'address': 'i', 'moduletype': 'moduletype', 'name': 'response'}), '(address=i, moduletype=moduletype, name=response)\n', (510, 559), False, 'from AtlasI2C import AtlasI2C\n')] |
import copy
import logging
import threading
import entities as e
import networking as n
import physics as p
logger = logging.getLogger(__name__)
lock = threading.RLock()
class World:
ADD = 0x1
DELETE = 0x2
DIFF = 0x3
@staticmethod
def dummy():
return World()
@staticmethod
def diff(from_world, to_world):
msg = bytearray()
# update boundaries
diff = e.Entity.diff(from_world.boundaries, to_world.boundaries)
n.w_byte(msg, len(diff))
n.w_blob(msg, diff)
lock.acquire()
# update entities
for entity_id in from_world.entities:
n.w_byte(msg, entity_id)
if entity_id in to_world.entities:
n.w_byte(msg, World.DIFF)
diff = e.Entity.diff(from_world.entities[entity_id], to_world.entities[entity_id])
n.w_byte(msg, len(diff))
n.w_blob(msg, diff)
else:
n.w_byte(msg, World.DELETE)
# add new entities
for entity_id in to_world.entities:
if entity_id not in from_world.entities:
entity = to_world.entities[entity_id]
n.w_byte(msg, entity_id)
n.w_byte(msg, World.ADD)
if isinstance(entity, e.Cube):
n.w_byte(msg, e.EntityType.CUBE.value)
new = e.Entity.diff(e.Cube.dummy(), entity)
elif isinstance(entity, e.Sphere):
n.w_byte(msg, e.EntityType.SPHERE.value)
new = e.Entity.diff(e.Sphere.dummy(), entity)
else:
raise NotImplementedError
n.w_byte(msg, len(new))
n.w_blob(msg, new)
lock.release()
return msg
def __init__(self):
self.boundaries = e.Cube(p.Vector(0, 0, 0), 3)
self.ids = 0
self.entities = dict()
self.paused = False
def __eq__(self, other):
return vars(self) == vars(other)
def _handle_boundaries_collision(self, entity):
if entity.center.x + entity.size / 2 > self.boundaries.center.x + self.boundaries.size / 2:
entity.direction.x = -entity.direction.x
if entity.center.x - entity.size / 2 < self.boundaries.center.x - self.boundaries.size / 2:
entity.direction.x = -entity.direction.x
if entity.center.y + entity.size / 2 > self.boundaries.center.y + self.boundaries.size / 2:
entity.direction.y = -entity.direction.y
if entity.center.y - entity.size / 2 < self.boundaries.center.y - self.boundaries.size / 2:
entity.direction.y = -entity.direction.y
if entity.center.z + entity.size / 2 > self.boundaries.center.z + self.boundaries.size / 2:
entity.direction.z = -entity.direction.z
if entity.center.z - entity.size / 2 < self.boundaries.center.z - self.boundaries.size / 2:
entity.direction.z = -entity.direction.z
def add_entity(self, entity):
self.entities[self.ids] = entity
self.ids += 1
def draw(self):
lock.acquire()
co = copy.deepcopy(self.entities)
lock.release()
for entity in co.values():
entity.draw()
self.boundaries.draw()
def toggle_pause(self):
self.paused = not self.paused
def tick(self, dt):
if not self.paused:
for entity in self.entities.values():
entity.tick(dt)
for entity in self.entities.values():
self._handle_boundaries_collision(entity)
def update(self, msg):
logger.info(f'update len(msg)={len(msg)}')
msg = bytearray(msg)
# update boundaries
diff = n.r_blob(msg, n.r_byte(msg))
self.boundaries.update(diff)
# update entities
while msg:
entity_id = n.r_byte(msg)
update_type = n.r_byte(msg)
if update_type == World.ADD:
entity_type = e.EntityType(n.r_byte(msg))
new = n.r_blob(msg, n.r_byte(msg))
self.add_entity(e.Entity.new(entity_type, new))
elif update_type == World.DELETE:
del self.entities[entity_id]
elif update_type == World.DIFF:
diff = n.r_blob(msg, n.r_byte(msg))
self.entities[entity_id].update(diff)
else:
raise NotImplementedError
return self
if __name__ == '__main__':
p.random.seed(1337)
from_world = World()
to_world = World()
to_world.boundaries.color = p.Vector.random()
colors = [p.Vector(0x00 / 0xFF, 0x99 / 0xFF, 0xCC / 0xFF), p.Vector(0xCC / 0xFF, 0xFF / 0xFF, 0xCC / 0xFF)]
for i in range(2):
cube = e.Cube(p.Vector(0, 0, 0), 1)
cube.speed = p.random.uniform(-3, 3)
cube.direction = p.Vector.random(-0.5, 0.5).normalize()
cube.color = colors[i]
to_world.add_entity(cube)
colors = [p.Vector(0x66 / 0xFF, 0xCC / 0xFF, 0xFF / 0xFF), p.Vector(0x00 / 0xFF, 0x33 / 0xFF, 0x99 / 0xFF)]
for i in range(2):
sphere = e.Sphere(p.Vector(0, 0, 0), p.random.uniform(0.4, 0.8))
sphere.speed = p.random.uniform(-3, 3)
sphere.direction = p.Vector.random(-0.5, 0.5).normalize()
sphere.color = colors[i]
to_world.add_entity(sphere)
from_world.update(World.diff(from_world, to_world))
assert from_world == to_world
import yaml
with open('world.yml', 'w') as f:
yaml.dump(to_world, f)
with open('world.yml') as f:
loaded_world = yaml.load(f)
assert from_world == loaded_world
| [
"logging.getLogger",
"entities.Entity.diff",
"copy.deepcopy",
"physics.Vector.random",
"physics.Vector",
"yaml.dump",
"entities.Cube.dummy",
"threading.RLock",
"yaml.load",
"entities.Entity.new",
"networking.w_byte",
"entities.Sphere.dummy",
"physics.random.uniform",
"physics.random.seed",... | [((119, 146), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (136, 146), False, 'import logging\n'), ((155, 172), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (170, 172), False, 'import threading\n'), ((4512, 4531), 'physics.random.seed', 'p.random.seed', (['(1337)'], {}), '(1337)\n', (4525, 4531), True, 'import physics as p\n'), ((4613, 4630), 'physics.Vector.random', 'p.Vector.random', ([], {}), '()\n', (4628, 4630), True, 'import physics as p\n'), ((417, 474), 'entities.Entity.diff', 'e.Entity.diff', (['from_world.boundaries', 'to_world.boundaries'], {}), '(from_world.boundaries, to_world.boundaries)\n', (430, 474), True, 'import entities as e\n'), ((516, 535), 'networking.w_blob', 'n.w_blob', (['msg', 'diff'], {}), '(msg, diff)\n', (524, 535), True, 'import networking as n\n'), ((3149, 3177), 'copy.deepcopy', 'copy.deepcopy', (['self.entities'], {}), '(self.entities)\n', (3162, 3177), False, 'import copy\n'), ((4645, 4684), 'physics.Vector', 'p.Vector', (['(0 / 255)', '(153 / 255)', '(204 / 255)'], {}), '(0 / 255, 153 / 255, 204 / 255)\n', (4653, 4684), True, 'import physics as p\n'), ((4694, 4735), 'physics.Vector', 'p.Vector', (['(204 / 255)', '(255 / 255)', '(204 / 255)'], {}), '(204 / 255, 255 / 255, 204 / 255)\n', (4702, 4735), True, 'import physics as p\n'), ((4831, 4854), 'physics.random.uniform', 'p.random.uniform', (['(-3)', '(3)'], {}), '(-3, 3)\n', (4847, 4854), True, 'import physics as p\n'), ((4998, 5039), 'physics.Vector', 'p.Vector', (['(102 / 255)', '(204 / 255)', '(255 / 255)'], {}), '(102 / 255, 204 / 255, 255 / 255)\n', (5006, 5039), True, 'import physics as p\n'), ((5047, 5085), 'physics.Vector', 'p.Vector', (['(0 / 255)', '(51 / 255)', '(153 / 255)'], {}), '(0 / 255, 51 / 255, 153 / 255)\n', (5055, 5085), True, 'import physics as p\n'), ((5215, 5238), 'physics.random.uniform', 'p.random.uniform', (['(-3)', '(3)'], {}), '(-3, 3)\n', (5231, 5238), True, 'import physics as p\n'), ((5527, 5549), 'yaml.dump', 'yaml.dump', (['to_world', 'f'], {}), '(to_world, f)\n', (5536, 5549), False, 'import yaml\n'), ((5606, 5618), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (5615, 5618), False, 'import yaml\n'), ((644, 668), 'networking.w_byte', 'n.w_byte', (['msg', 'entity_id'], {}), '(msg, entity_id)\n', (652, 668), True, 'import networking as n\n'), ((1847, 1864), 'physics.Vector', 'p.Vector', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (1855, 1864), True, 'import physics as p\n'), ((3769, 3782), 'networking.r_byte', 'n.r_byte', (['msg'], {}), '(msg)\n', (3777, 3782), True, 'import networking as n\n'), ((3890, 3903), 'networking.r_byte', 'n.r_byte', (['msg'], {}), '(msg)\n', (3898, 3903), True, 'import networking as n\n'), ((3930, 3943), 'networking.r_byte', 'n.r_byte', (['msg'], {}), '(msg)\n', (3938, 3943), True, 'import networking as n\n'), ((4788, 4805), 'physics.Vector', 'p.Vector', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (4796, 4805), True, 'import physics as p\n'), ((5145, 5162), 'physics.Vector', 'p.Vector', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (5153, 5162), True, 'import physics as p\n'), ((5164, 5190), 'physics.random.uniform', 'p.random.uniform', (['(0.4)', '(0.8)'], {}), '(0.4, 0.8)\n', (5180, 5190), True, 'import physics as p\n'), ((732, 757), 'networking.w_byte', 'n.w_byte', (['msg', 'World.DIFF'], {}), '(msg, World.DIFF)\n', (740, 757), True, 'import networking as n\n'), ((781, 856), 'entities.Entity.diff', 'e.Entity.diff', (['from_world.entities[entity_id]', 'to_world.entities[entity_id]'], {}), '(from_world.entities[entity_id], to_world.entities[entity_id])\n', (794, 856), True, 'import entities as e\n'), ((914, 933), 'networking.w_blob', 'n.w_blob', (['msg', 'diff'], {}), '(msg, diff)\n', (922, 933), True, 'import networking as n\n'), ((968, 995), 'networking.w_byte', 'n.w_byte', (['msg', 'World.DELETE'], {}), '(msg, World.DELETE)\n', (976, 995), True, 'import networking as n\n'), ((1190, 1214), 'networking.w_byte', 'n.w_byte', (['msg', 'entity_id'], {}), '(msg, entity_id)\n', (1198, 1214), True, 'import networking as n\n'), ((1231, 1255), 'networking.w_byte', 'n.w_byte', (['msg', 'World.ADD'], {}), '(msg, World.ADD)\n', (1239, 1255), True, 'import networking as n\n'), ((1728, 1746), 'networking.w_blob', 'n.w_blob', (['msg', 'new'], {}), '(msg, new)\n', (1736, 1746), True, 'import networking as n\n'), ((4880, 4906), 'physics.Vector.random', 'p.Vector.random', (['(-0.5)', '(0.5)'], {}), '(-0.5, 0.5)\n', (4895, 4906), True, 'import physics as p\n'), ((5266, 5292), 'physics.Vector.random', 'p.Vector.random', (['(-0.5)', '(0.5)'], {}), '(-0.5, 0.5)\n', (5281, 5292), True, 'import physics as p\n'), ((1323, 1361), 'networking.w_byte', 'n.w_byte', (['msg', 'e.EntityType.CUBE.value'], {}), '(msg, e.EntityType.CUBE.value)\n', (1331, 1361), True, 'import networking as n\n'), ((4028, 4041), 'networking.r_byte', 'n.r_byte', (['msg'], {}), '(msg)\n', (4036, 4041), True, 'import networking as n\n'), ((4079, 4092), 'networking.r_byte', 'n.r_byte', (['msg'], {}), '(msg)\n', (4087, 4092), True, 'import networking as n\n'), ((4126, 4156), 'entities.Entity.new', 'e.Entity.new', (['entity_type', 'new'], {}), '(entity_type, new)\n', (4138, 4156), True, 'import entities as e\n'), ((1402, 1416), 'entities.Cube.dummy', 'e.Cube.dummy', ([], {}), '()\n', (1414, 1416), True, 'import entities as e\n'), ((1497, 1537), 'networking.w_byte', 'n.w_byte', (['msg', 'e.EntityType.SPHERE.value'], {}), '(msg, e.EntityType.SPHERE.value)\n', (1505, 1537), True, 'import networking as n\n'), ((1578, 1594), 'entities.Sphere.dummy', 'e.Sphere.dummy', ([], {}), '()\n', (1592, 1594), True, 'import entities as e\n'), ((4330, 4343), 'networking.r_byte', 'n.r_byte', (['msg'], {}), '(msg)\n', (4338, 4343), True, 'import networking as n\n')] |
from decimal import Decimal
from PyQt5 import QtCore
from PyQt5.QtWidgets import QTreeWidget, QTreeWidgetItem, QHeaderView
class PlayerCharacterInventoryTreeWidget(QTreeWidget):
def __init__(self, CharacterWindow):
super().__init__()
# Store Parameters
self.CharacterWindow = CharacterWindow
# Header Setup
self.setRootIsDecorated(False)
self.header().setSectionResizeMode(QHeaderView.ResizeToContents)
self.setColumnCount(7)
self.setHeaderLabels(["Name", "Count", "Unit Weight", "Unit Value", "Total Weight", "Total Value", "Tag"])
def FillFromInventory(self):
self.clear()
for ItemIndex in range(len(self.CharacterWindow.PlayerCharacter.Stats["Inventory"])):
self.invisibleRootItem().addChild(PlayerCharacterInventoryWidgetItem(self.CharacterWindow, ItemIndex, self.CharacterWindow.PlayerCharacter.Stats["Inventory"][ItemIndex]))
def SelectIndex(self, Index):
DestinationIndex = self.model().index(Index, 0)
self.setCurrentIndex(DestinationIndex)
self.scrollToItem(self.currentItem(), self.PositionAtCenter)
self.horizontalScrollBar().setValue(0)
class PlayerCharacterInventoryWidgetItem(QTreeWidgetItem):
def __init__(self, CharacterWindow, Index, Item):
super().__init__()
# Store Parameters
self.CharacterWindow = CharacterWindow
self.Index = Index
self.Item = Item
# Variables
self.NameText = Item["Item Name"]
self.CountText = str(Item["Item Count"])
self.UnitWeightText = str(Item["Item Unit Weight"]) + " lbs."
self.UnitValueText = str(Item["Item Unit Value"]) + " " + Item["Item Unit Value Denomination"]
self.TotalWeightText = str(self.CharacterWindow.PlayerCharacter.CalculateItemTotalWeightAndValue(self.Index)["Item Total Weight"].quantize(Decimal("0.01"))) + " lbs."
self.TotalValueText = str(self.CharacterWindow.PlayerCharacter.CalculateItemTotalWeightAndValue(self.Index)["Item Total Value"].quantize(Decimal("0.01"))) + " GP"
self.TagText = Item["Item Tag"]
self.ColumnTextList = [self.NameText, self.CountText, self.UnitWeightText, self.UnitValueText, self.TotalWeightText, self.TotalValueText, self.TagText]
# Set Text
for Column in range(len(self.ColumnTextList)):
self.setText(Column, self.ColumnTextList[Column])
self.setToolTip(Column, self.ColumnTextList[Column])
# Set Alignment
for Column in range(len(self.ColumnTextList) - 1):
self.setTextAlignment(Column, QtCore.Qt.AlignCenter)
| [
"decimal.Decimal"
] | [((1893, 1908), 'decimal.Decimal', 'Decimal', (['"""0.01"""'], {}), "('0.01')\n", (1900, 1908), False, 'from decimal import Decimal\n'), ((2066, 2081), 'decimal.Decimal', 'Decimal', (['"""0.01"""'], {}), "('0.01')\n", (2073, 2081), False, 'from decimal import Decimal\n')] |
#! /usr/bin/env python3
# coding: utf-8
from datetime import datetime
from django.contrib.auth.models import User
from ..models import Training, Exercise, MovementsPerExercise, Movement, MovementSettings, Equipment, MovementSettingsPerMovementsPerExercise
class TestDatabase:
@staticmethod
def create():
# We create a users
admin_user = User.objects.create_superuser(username='admin_user', password='<PASSWORD>', email="<EMAIL>")
ordinary_user = User.objects.create_user(username='ordinary_user', password='<PASSWORD>', is_staff=False)
new_user = User.objects.create_user(username='new_user', password='<PASSWORD>')
# We create some settings
rep = MovementSettings.objects.create(name=MovementSettings.REPETITIONS, founder=admin_user)
weight = MovementSettings.objects.create(name=MovementSettings.WEIGHT, founder=admin_user)
dist = MovementSettings.objects.create(name=MovementSettings.DISTANCE, founder=admin_user)
cal = MovementSettings.objects.create(name=MovementSettings.CALORIES, founder=admin_user)
# We create some equipments
kb = Equipment.objects.create(name="kettlebell", founder=admin_user)
anyone = Equipment.objects.create(name="aucun", founder=admin_user)
ball = Equipment.objects.create(name="balle", founder=admin_user)
drawbar = Equipment.objects.create(name="barre de traction", founder=admin_user)
# We create some movements
squat = Movement.objects.create(name="squat", founder=admin_user, equipment=kb)
squat.settings.add(rep, weight)
push_up = Movement.objects.create(name="pushup", founder=admin_user, equipment=anyone)
push_up.settings.add(rep)
wallball = Movement.objects.create(name="wallball", founder=admin_user, equipment=ball)
wallball.settings.add(rep, weight)
pullup = Movement.objects.create(name="pullup", founder=admin_user, equipment=drawbar)
wallball.settings.add(rep)
# We create some workouts
# 1. Chelsea Workout created by ordinary_user
o_chelsea = Exercise.objects.create(name="chelsea",
exercise_type=Exercise.EMOM,
description="test chelsea",
goal_type=Exercise.TIME,
goal_value=30,
founder=ordinary_user)
o_chelsea_pullup = MovementsPerExercise.objects.create(exercise=o_chelsea,
movement=pullup,
movement_number=1)
o_chelsea_pullup_rep = MovementSettingsPerMovementsPerExercise.objects.create(exercise_movement=o_chelsea_pullup,
setting=rep,
setting_value=10)
o_chelsea_pushup = MovementsPerExercise.objects.create(exercise=o_chelsea,
movement=push_up,
movement_number=2)
o_chelsea_pushup_rep = MovementSettingsPerMovementsPerExercise.objects.create(exercise_movement=o_chelsea_pushup,
setting=rep,
setting_value=20)
o_chelsea_squat = MovementsPerExercise.objects.create(exercise=o_chelsea,
movement=squat,
movement_number=3)
o_chelsea_squat_rep = MovementSettingsPerMovementsPerExercise.objects.create(exercise_movement=o_chelsea_squat,
setting=rep,
setting_value=30)
o_chelsea_squat_weight = MovementSettingsPerMovementsPerExercise.objects.create(exercise_movement=o_chelsea_squat,
setting=weight,
setting_value=10)
# 2. Chelsea Workout created by admin_user
a_chelsea = Exercise.objects.create(name="chelsea",
exercise_type=Exercise.EMOM,
description="test chelsea",
goal_type=Exercise.TIME,
goal_value=30,
is_default=True,
founder=admin_user)
a_chelsea_pullup = MovementsPerExercise.objects.create(exercise=a_chelsea,
movement=pullup,
movement_number=1)
a_chelsea_pullup_rep = MovementSettingsPerMovementsPerExercise.objects.create(exercise_movement=a_chelsea_pullup,
setting=rep,
setting_value=5)
a_chelsea_pushup = MovementsPerExercise.objects.create(exercise=a_chelsea,
movement=push_up,
movement_number=2)
a_chelsea_pushup_rep = MovementSettingsPerMovementsPerExercise.objects.create(exercise_movement=a_chelsea_pushup,
setting=rep,
setting_value=10)
a_chelsea_squat = MovementsPerExercise.objects.create(exercise=a_chelsea,
movement=squat,
movement_number=3)
a_chelsea_squat_rep = MovementSettingsPerMovementsPerExercise.objects.create(exercise_movement=a_chelsea_squat,
setting=rep,
setting_value=15)
a_chelsea_squat_weight = MovementSettingsPerMovementsPerExercise.objects.create(exercise_movement=a_chelsea_squat,
setting=weight,
setting_value=0)
# 3. Connie Workout created by new_user
connie = Exercise.objects.create(name="connie",
exercise_type=Exercise.FORTIME,
description="test connie",
goal_type=Exercise.ROUND,
goal_value=5,
founder=new_user)
connie_pullup = MovementsPerExercise.objects.create(exercise=connie,
movement=pullup,
movement_number=1)
connie_pullup_rep = MovementSettingsPerMovementsPerExercise.objects.create(exercise_movement=connie_pullup,
setting=rep,
setting_value=25)
connie_wallball = MovementsPerExercise.objects.create(exercise=connie,
movement=wallball,
movement_number=2)
connie_wallball_rep = MovementSettingsPerMovementsPerExercise.objects.create(exercise_movement=connie_wallball,
setting=rep,
setting_value=50)
connie_wallball_weight = MovementSettingsPerMovementsPerExercise.objects.create(exercise_movement=connie_wallball,
setting=weight,
setting_value=20)
# We create some trainings
date = datetime(2018, 1, 25)
o_chelsea_training_o_user = Training.objects.create(exercise=o_chelsea,
founder=ordinary_user,
date=date,
performance_type=Training.ROUND,
performance_value=25,
done=True)
date = datetime(2018, 3, 8)
a_chelsea_training_new_user = Training.objects.create(exercise=a_chelsea,
founder=new_user,
date=date,
performance_type=Training.ROUND,
performance_value=15,
done=True)
date = datetime(2018, 4, 5)
connie_training_new_user_1 = Training.objects.create(exercise=connie,
founder=new_user,
date=date,
performance_type=Training.TIME,
performance_value=230,
done=True)
date = datetime(2018, 5, 2)
connie_training_new_user_2 = Training.objects.create(exercise=connie,
founder=new_user,
date=date,
performance_type=Training.TIME,
performance_value=330,
done=True)
date = datetime(2018, 8, 8)
a_chelsea_training_ordinary_user = Training.objects.create(exercise=a_chelsea,
founder=ordinary_user,
date=date,
performance_type=Training.ROUND) | [
"datetime.datetime",
"django.contrib.auth.models.User.objects.create_superuser",
"django.contrib.auth.models.User.objects.create_user"
] | [((367, 463), 'django.contrib.auth.models.User.objects.create_superuser', 'User.objects.create_superuser', ([], {'username': '"""admin_user"""', 'password': '"""<PASSWORD>"""', 'email': '"""<EMAIL>"""'}), "(username='admin_user', password='<PASSWORD>',\n email='<EMAIL>')\n", (396, 463), False, 'from django.contrib.auth.models import User\n'), ((484, 577), 'django.contrib.auth.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': '"""ordinary_user"""', 'password': '"""<PASSWORD>"""', 'is_staff': '(False)'}), "(username='ordinary_user', password='<PASSWORD>',\n is_staff=False)\n", (508, 577), False, 'from django.contrib.auth.models import User\n'), ((593, 661), 'django.contrib.auth.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': '"""new_user"""', 'password': '"""<PASSWORD>"""'}), "(username='new_user', password='<PASSWORD>')\n", (617, 661), False, 'from django.contrib.auth.models import User\n'), ((8944, 8965), 'datetime.datetime', 'datetime', (['(2018)', '(1)', '(25)'], {}), '(2018, 1, 25)\n', (8952, 8965), False, 'from datetime import datetime\n'), ((9422, 9442), 'datetime.datetime', 'datetime', (['(2018)', '(3)', '(8)'], {}), '(2018, 3, 8)\n', (9430, 9442), False, 'from datetime import datetime\n'), ((9856, 9876), 'datetime.datetime', 'datetime', (['(2018)', '(4)', '(5)'], {}), '(2018, 4, 5)\n', (9864, 9876), False, 'from datetime import datetime\n'), ((10286, 10306), 'datetime.datetime', 'datetime', (['(2018)', '(5)', '(2)'], {}), '(2018, 5, 2)\n', (10294, 10306), False, 'from datetime import datetime\n'), ((10716, 10736), 'datetime.datetime', 'datetime', (['(2018)', '(8)', '(8)'], {}), '(2018, 8, 8)\n', (10724, 10736), False, 'from datetime import datetime\n')] |
#Used to log errors to a text file
import sys
sys.stderr = open("errlog.txt", "w")
try:
#Imports several custom functions in different files
from authorization import get_service
from matches import clear_duplicate_events
from matches import get_GosuGamer_matches
from matches import add_events
from customwidget import Table
except ImportError:
#Installs required modules
import os
os.system("install_dependencies.py")
#Imports several custom functions in different files
from authorization import get_service
from matches import clear_duplicate_events
from matches import get_GosuGamer_matches
from matches import add_events
from customwidget import Table
#Imports some GUI libraries
import tkinter as tk
from tkinter import ttk
#Imports a threading library
import threading
#Creates an application class which can be called to run D2MT
class Application:
#Creates an init function which generates all the widgets
def __init__(self, master):
#Creates a frame inside of the window
frame = tk.Frame(master)
frame.grid()
#Gets a sevice that adds and gets events from google calendar
self.service = get_service()
#Creates a combobox of the different websites to get matches from
self.selectedWebsite = tk.StringVar()
self.websites = ttk.Combobox(frame, textvariable = self.selectedWebsite, values = ("GosuGamers"), state = "readonly")
self.websites.current(0)
self.websites.grid(column = 0, row = 1, padx = 5)
#Creates a button to add the matches to your google calendar
self.addMatches = tk.Button(frame, text = "Add Matches", fg = "yellow", bg = "black", command = self.create_thread)
self.addMatches.grid(column = 0, row = 0, pady = 3, padx = 10)
#Creates a quit button to quit the app
self.quit = tk.Button(frame, text = "Quit", fg = "white", bg = "black", command = root.destroy)
self.quit.grid(column = 2, row = 0, pady = 2, padx = 10)
#Creates a progress bar for creating events
self.progressBar = ttk.Progressbar(frame, orient = "horizontal", length = 200, mode = "determinate", maximum = 100)
self.progressBar.grid(column = 1, row = 0, pady = 2, padx = 4)
#Creates a status label to tell the user what the app is doing
self.status = tk.Label(frame, text = "", fg = "black")
self.status.grid(column = 1, row = 1, pady = 2, padx = 2)
#Gets a list of matches from the GosuGamers website
self.matches = get_GosuGamer_matches()
#Gets all the data needed for the table from the matches
self.team1s = []
self.team2s = []
self.leagues = []
self.datetimes = []
for self.match in self.matches:
self.team1s.append(self.match[0].split(" Vs. ")[0])
self.team2s.append(self.match[0].split(" Vs. ")[1])
self.leagues.append(self.match[1])
self.datetimes.append(self.match[2].split("T")[0] + " @ " + self.match[2].split("T")[1])
#Creates the customwidget Table to list the matches
self.matchbox = Table(frame,
["Team 1", "Team 2", "League/Tournament", "Date (YYYY/MM/DD) @ Time (PST)"],
[self.team1s, self.team2s, self.leagues, self.datetimes],
betweenRowHeaders = ["vs.", "in", "on"],
searchable = True)
self.matchbox.grid(row = 2, column = 0)
#Creates a thread to add events alongside the render loop
def create_thread(self):
threading.Thread(target = self.add_events).start()
#Adds the matches to the user's google calendar
def add_events(self):
#Gets the highlighted matches from the table
self.selectedMatches = []
for i in range(0, len(self.matches)):
if i in self.matchbox.curselection():
self.selectedMatches.append(self.matches[i])
#Deletes duplicate matches already in google calendar
clear_duplicate_events(self.service, self.progressBar, self.status, self.selectedMatches)
#Adds the upcoming matches taken off the GosuGamer website
add_events(self.service, self.selectedMatches, self.progressBar, self.status)
#Creates a tkinter winodw loop
root = tk.Tk()
#Extra window sizing stuff (non-functional)
#windowWidth = 500
#windowHeight = 500
#screen_width = root.winfo_screenwidth()
#screen_height = root.winfo_screenheight()
#root.geometry(str(windowWidth) + "x" + str(windowWidth) + "+" + str(int(screen_width/2 - windowWidth/2)) + "+" + str(int(screen_height/2 - windowHeight/2)))
#Creates an instance of the D2MT app
app = Application(root)
#Starts the root tkinter loop
root.mainloop()
| [
"matches.get_GosuGamer_matches",
"tkinter.ttk.Progressbar",
"matches.clear_duplicate_events",
"authorization.get_service",
"matches.add_events",
"tkinter.Button",
"tkinter.StringVar",
"tkinter.Tk",
"tkinter.Label",
"tkinter.ttk.Combobox",
"os.system",
"tkinter.Frame",
"customwidget.Table",
... | [((4403, 4410), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (4408, 4410), True, 'import tkinter as tk\n'), ((420, 456), 'os.system', 'os.system', (['"""install_dependencies.py"""'], {}), "('install_dependencies.py')\n", (429, 456), False, 'import os\n'), ((1080, 1096), 'tkinter.Frame', 'tk.Frame', (['master'], {}), '(master)\n', (1088, 1096), True, 'import tkinter as tk\n'), ((1212, 1225), 'authorization.get_service', 'get_service', ([], {}), '()\n', (1223, 1225), False, 'from authorization import get_service\n'), ((1332, 1346), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (1344, 1346), True, 'import tkinter as tk\n'), ((1371, 1468), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['frame'], {'textvariable': 'self.selectedWebsite', 'values': '"""GosuGamers"""', 'state': '"""readonly"""'}), "(frame, textvariable=self.selectedWebsite, values='GosuGamers',\n state='readonly')\n", (1383, 1468), False, 'from tkinter import ttk\n'), ((1660, 1754), 'tkinter.Button', 'tk.Button', (['frame'], {'text': '"""Add Matches"""', 'fg': '"""yellow"""', 'bg': '"""black"""', 'command': 'self.create_thread'}), "(frame, text='Add Matches', fg='yellow', bg='black', command=self.\n create_thread)\n", (1669, 1754), True, 'import tkinter as tk\n'), ((1897, 1972), 'tkinter.Button', 'tk.Button', (['frame'], {'text': '"""Quit"""', 'fg': '"""white"""', 'bg': '"""black"""', 'command': 'root.destroy'}), "(frame, text='Quit', fg='white', bg='black', command=root.destroy)\n", (1906, 1972), True, 'import tkinter as tk\n'), ((2126, 2218), 'tkinter.ttk.Progressbar', 'ttk.Progressbar', (['frame'], {'orient': '"""horizontal"""', 'length': '(200)', 'mode': '"""determinate"""', 'maximum': '(100)'}), "(frame, orient='horizontal', length=200, mode='determinate',\n maximum=100)\n", (2141, 2218), False, 'from tkinter import ttk\n'), ((2388, 2424), 'tkinter.Label', 'tk.Label', (['frame'], {'text': '""""""', 'fg': '"""black"""'}), "(frame, text='', fg='black')\n", (2396, 2424), True, 'import tkinter as tk\n'), ((2579, 2602), 'matches.get_GosuGamer_matches', 'get_GosuGamer_matches', ([], {}), '()\n', (2600, 2602), False, 'from matches import get_GosuGamer_matches\n'), ((3174, 3390), 'customwidget.Table', 'Table', (['frame', "['Team 1', 'Team 2', 'League/Tournament', 'Date (YYYY/MM/DD) @ Time (PST)']", '[self.team1s, self.team2s, self.leagues, self.datetimes]'], {'betweenRowHeaders': "['vs.', 'in', 'on']", 'searchable': '(True)'}), "(frame, ['Team 1', 'Team 2', 'League/Tournament',\n 'Date (YYYY/MM/DD) @ Time (PST)'], [self.team1s, self.team2s, self.\n leagues, self.datetimes], betweenRowHeaders=['vs.', 'in', 'on'],\n searchable=True)\n", (3179, 3390), False, 'from customwidget import Table\n'), ((4112, 4206), 'matches.clear_duplicate_events', 'clear_duplicate_events', (['self.service', 'self.progressBar', 'self.status', 'self.selectedMatches'], {}), '(self.service, self.progressBar, self.status, self.\n selectedMatches)\n', (4134, 4206), False, 'from matches import clear_duplicate_events\n'), ((4286, 4363), 'matches.add_events', 'add_events', (['self.service', 'self.selectedMatches', 'self.progressBar', 'self.status'], {}), '(self.service, self.selectedMatches, self.progressBar, self.status)\n', (4296, 4363), False, 'from matches import add_events\n'), ((3658, 3698), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.add_events'}), '(target=self.add_events)\n', (3674, 3698), False, 'import threading\n')] |
import os
from shutil import SameFileError, copyfile
from urllib.request import Request, urlopen
import markdown
from bs4 import BeautifulSoup as BS
from blogger_cli.converter.extractor import (
extract_and_write_static,
extract_main_and_meta_from_md,
get_summary_limit,
extract_topic,
replace_ext,
)
def convert_and_copy_to_blog(ctx, md_file):
md_file_path = os.path.abspath(os.path.expanduser(md_file))
html_body, meta = convert(ctx, md_file_path)
html_filename_meta = write_html_and_md(ctx, html_body, md_file_path, meta)
return html_filename_meta
def convert(ctx, md_file_path):
with open(md_file_path, "r", encoding="utf8") as rf:
md_data = rf.read()
ctx.vlog(":: Extracting meta info")
main_md, metadata = extract_main_and_meta_from_md(ctx, md_data)
extensions = ["extra", "smarty"]
html = markdown.markdown(main_md, extensions=extensions, output_format="html5")
char_limit = get_summary_limit(ctx, file_type="md")
metadata["_summary_"] = main_md[:char_limit]
ctx.vlog(":: Extracted summary")
return html, metadata
def write_html_and_md(ctx, html_body, md_file_path, meta):
md_filename = os.path.basename(md_file_path)
destination_dir = ctx.conversion["destination_dir"]
topic = extract_topic(ctx, meta)
md_filename = os.path.join(topic, md_filename)
html_filename = replace_ext(md_filename, ".html")
html_file_path = os.path.join(destination_dir, html_filename)
new_md_file_path = os.path.join(destination_dir, md_filename)
new_blog_post_dir = os.path.dirname(html_file_path)
ctx.vlog(":: New blog_posts_dir finalized", new_blog_post_dir)
if not os.path.exists(new_blog_post_dir):
os.mkdir(new_blog_post_dir)
extract_static = ctx.conversion["extract_static"]
if extract_static:
html_body = extract_and_write_static(
ctx, html_body, new_blog_post_dir, md_filename
)
with open(html_file_path, "w", encoding="utf8") as wf:
wf.write(html_body)
ctx.log(":: Converted basic html to", html_file_path)
# skip copying md file if converting to and from same folder.
if md_file_path != new_md_file_path:
try:
copyfile(md_file_path, new_md_file_path)
ctx.log(":: Copied md file to", new_md_file_path)
except Exception as E:
os.remove(new_md_file_path)
copyfile(md_file_path, new_md_file_path)
ctx.log(":: ERROR", E, "Overwriting md file", new_md_file_path)
return (html_filename, meta)
| [
"markdown.markdown",
"os.path.exists",
"blogger_cli.converter.extractor.extract_topic",
"blogger_cli.converter.extractor.replace_ext",
"os.path.join",
"blogger_cli.converter.extractor.extract_and_write_static",
"blogger_cli.converter.extractor.extract_main_and_meta_from_md",
"os.path.dirname",
"shut... | [((775, 818), 'blogger_cli.converter.extractor.extract_main_and_meta_from_md', 'extract_main_and_meta_from_md', (['ctx', 'md_data'], {}), '(ctx, md_data)\n', (804, 818), False, 'from blogger_cli.converter.extractor import extract_and_write_static, extract_main_and_meta_from_md, get_summary_limit, extract_topic, replace_ext\n'), ((867, 939), 'markdown.markdown', 'markdown.markdown', (['main_md'], {'extensions': 'extensions', 'output_format': '"""html5"""'}), "(main_md, extensions=extensions, output_format='html5')\n", (884, 939), False, 'import markdown\n'), ((958, 996), 'blogger_cli.converter.extractor.get_summary_limit', 'get_summary_limit', (['ctx'], {'file_type': '"""md"""'}), "(ctx, file_type='md')\n", (975, 996), False, 'from blogger_cli.converter.extractor import extract_and_write_static, extract_main_and_meta_from_md, get_summary_limit, extract_topic, replace_ext\n'), ((1189, 1219), 'os.path.basename', 'os.path.basename', (['md_file_path'], {}), '(md_file_path)\n', (1205, 1219), False, 'import os\n'), ((1288, 1312), 'blogger_cli.converter.extractor.extract_topic', 'extract_topic', (['ctx', 'meta'], {}), '(ctx, meta)\n', (1301, 1312), False, 'from blogger_cli.converter.extractor import extract_and_write_static, extract_main_and_meta_from_md, get_summary_limit, extract_topic, replace_ext\n'), ((1332, 1364), 'os.path.join', 'os.path.join', (['topic', 'md_filename'], {}), '(topic, md_filename)\n', (1344, 1364), False, 'import os\n'), ((1385, 1418), 'blogger_cli.converter.extractor.replace_ext', 'replace_ext', (['md_filename', '""".html"""'], {}), "(md_filename, '.html')\n", (1396, 1418), False, 'from blogger_cli.converter.extractor import extract_and_write_static, extract_main_and_meta_from_md, get_summary_limit, extract_topic, replace_ext\n'), ((1440, 1484), 'os.path.join', 'os.path.join', (['destination_dir', 'html_filename'], {}), '(destination_dir, html_filename)\n', (1452, 1484), False, 'import os\n'), ((1508, 1550), 'os.path.join', 'os.path.join', (['destination_dir', 'md_filename'], {}), '(destination_dir, md_filename)\n', (1520, 1550), False, 'import os\n'), ((1575, 1606), 'os.path.dirname', 'os.path.dirname', (['html_file_path'], {}), '(html_file_path)\n', (1590, 1606), False, 'import os\n'), ((404, 431), 'os.path.expanduser', 'os.path.expanduser', (['md_file'], {}), '(md_file)\n', (422, 431), False, 'import os\n'), ((1686, 1719), 'os.path.exists', 'os.path.exists', (['new_blog_post_dir'], {}), '(new_blog_post_dir)\n', (1700, 1719), False, 'import os\n'), ((1729, 1756), 'os.mkdir', 'os.mkdir', (['new_blog_post_dir'], {}), '(new_blog_post_dir)\n', (1737, 1756), False, 'import os\n'), ((1855, 1927), 'blogger_cli.converter.extractor.extract_and_write_static', 'extract_and_write_static', (['ctx', 'html_body', 'new_blog_post_dir', 'md_filename'], {}), '(ctx, html_body, new_blog_post_dir, md_filename)\n', (1879, 1927), False, 'from blogger_cli.converter.extractor import extract_and_write_static, extract_main_and_meta_from_md, get_summary_limit, extract_topic, replace_ext\n'), ((2233, 2273), 'shutil.copyfile', 'copyfile', (['md_file_path', 'new_md_file_path'], {}), '(md_file_path, new_md_file_path)\n', (2241, 2273), False, 'from shutil import SameFileError, copyfile\n'), ((2379, 2406), 'os.remove', 'os.remove', (['new_md_file_path'], {}), '(new_md_file_path)\n', (2388, 2406), False, 'import os\n'), ((2419, 2459), 'shutil.copyfile', 'copyfile', (['md_file_path', 'new_md_file_path'], {}), '(md_file_path, new_md_file_path)\n', (2427, 2459), False, 'from shutil import SameFileError, copyfile\n')] |
import datetime
import time
def main(j, args, params, tags, tasklet):
doc = args.doc
id = args.getTag('id')
width = args.getTag('width')
height = args.getTag('height')
result = "{{jgauge width:%(width)s id:%(id)s height:%(height)s val:%(last24h)s start:0 end:%(total)s}}"
now = datetime.datetime.now()
firsteco = j.apps.system.gridmanager.getErrorconditions(from_='-7d')
total = len(firsteco)
current = len(j.apps.system.gridmanager.getErrorconditions(from_='-1d'))
average = total
if firsteco:
date = datetime.datetime.fromtimestamp(firsteco[0]['lasttime'])
delta = now - date
if delta.days != 0:
average = int(total / delta.days) * 2
if average < current:
average = current
result = result % {'height': height,
'width': width,
'id': id,
'last24h': current,
'total': average}
params.result = (result, doc)
return params
def match(j, args, params, tags, tasklet):
return True
| [
"datetime.datetime.now",
"datetime.datetime.fromtimestamp"
] | [((303, 326), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (324, 326), False, 'import datetime\n'), ((558, 614), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (["firsteco[0]['lasttime']"], {}), "(firsteco[0]['lasttime'])\n", (589, 614), False, 'import datetime\n')] |
from django.contrib.auth.models import User
from django.test import TestCase
from django.urls import resolve, reverse
from ..forms import SignUpForm
from ..views import SignUpView
class SignUpFormTest(TestCase):
def test_form_has_fields(self):
"""Tests if the form contains all the necessary fields"""
form = SignUpForm()
expected = ['username', 'email', '<PASSWORD>', '<PASSWORD>',]
actual = list(form.fields)
self.assertSequenceEqual(expected, actual)
class SignUpTests(TestCase):
def setUp(self):
signup_url = reverse('signup')
self.signup_response = self.client.get(signup_url)
def test_signup_status_code(self):
"""Tests if we get the signup page"""
self.assertEqual(self.signup_response.status_code, 200)
def test_signup_url_resolves_signup_view(self):
"""Tests if the login link maps SignUpView"""
view = resolve('/accounts/signup/')
self.assertEqual(view.func.view_class, SignUpView)
def test_csrf(self):
"""Tests if we get a csrf token"""
self.assertContains(self.signup_response, 'csrfmiddlewaretoken')
def test_contains_form(self):
"""Tests if the page contains the login form"""
form = self.signup_response.context.get('form')
self.assertIsInstance(form, SignUpForm)
def test_form_inputs(self):
"""Tests if the form looks like it should"""
# csrf, username, email, <PASSWORD>, <PASSWORD> -> 5 inputs
self.assertContains(self.signup_response, '<input', 5)
self.assertContains(self.signup_response, 'type="text"', 1)
self.assertContains(self.signup_response, 'type="email"', 1)
self.assertContains(self.signup_response, 'type="password"', 2)
self.assertContains(self.signup_response, 'type="submit"', 1)
class SuccessfulSignUpTests(TestCase):
def setUp(self):
signup_url = reverse('signup')
data = {
'username': 'Vasyan',
'email': '<EMAIL>',
'password1': '<PASSWORD>',
'password2': '<PASSWORD>'
}
self.signup_response = self.client.post(signup_url, data)
self.page_url = reverse('page', kwargs={'page': 1})
def test_redirection_without_parameter_next(self):
"""Tests the redirect after signing up to /page/1/
"""
self.assertRedirects(self.signup_response, self.page_url)
def test_redirection_with_parameter_next(self):
"""Tests the redirect after signing up to a page from
'next' parameter
"""
next_param = '?next={}'.format(reverse('about'))
signup_url = reverse('signup') + next_param
data = {
'username': 'Vasyan1',
'email': '<EMAIL>',
'password1': '<PASSWORD>',
'password2': '<PASSWORD>'
}
signup_response = self.client.post(signup_url, data)
page_url = reverse('about')
self.assertRedirects(signup_response, page_url)
def test_user_creation(self):
"""Tests if the user has been created"""
self.assertTrue(User.objects.exists())
def test_user_authentication(self):
"""Tests if a new user is authenticated after signing up"""
response = self.client.get(self.page_url)
user = response.context.get('user')
self.assertTrue(user.is_authenticated)
class InvalidSignUpTests(TestCase):
def setUp(self):
signup_url = reverse('signup')
self.signup_response = self.client.post(signup_url, {})
def test_signup_status_code(self):
"""Tests if an invalid form submission goes to the same page"""
self.assertEqual(self.signup_response.status_code, 200)
def test_form_errors(self):
"""Tests if we get errors after submissioning an invalid form"""
form = self.signup_response.context.get('form')
self.assertTrue(form.errors)
def test_dont_create_user(self):
"""Tests if a new user is not created after submissioning
an invalid form
"""
self.assertFalse(User.objects.exists())
| [
"django.urls.resolve",
"django.contrib.auth.models.User.objects.exists",
"django.urls.reverse"
] | [((574, 591), 'django.urls.reverse', 'reverse', (['"""signup"""'], {}), "('signup')\n", (581, 591), False, 'from django.urls import resolve, reverse\n'), ((923, 951), 'django.urls.resolve', 'resolve', (['"""/accounts/signup/"""'], {}), "('/accounts/signup/')\n", (930, 951), False, 'from django.urls import resolve, reverse\n'), ((1926, 1943), 'django.urls.reverse', 'reverse', (['"""signup"""'], {}), "('signup')\n", (1933, 1943), False, 'from django.urls import resolve, reverse\n'), ((2204, 2239), 'django.urls.reverse', 'reverse', (['"""page"""'], {'kwargs': "{'page': 1}"}), "('page', kwargs={'page': 1})\n", (2211, 2239), False, 'from django.urls import resolve, reverse\n'), ((2945, 2961), 'django.urls.reverse', 'reverse', (['"""about"""'], {}), "('about')\n", (2952, 2961), False, 'from django.urls import resolve, reverse\n'), ((3478, 3495), 'django.urls.reverse', 'reverse', (['"""signup"""'], {}), "('signup')\n", (3485, 3495), False, 'from django.urls import resolve, reverse\n'), ((2624, 2640), 'django.urls.reverse', 'reverse', (['"""about"""'], {}), "('about')\n", (2631, 2640), False, 'from django.urls import resolve, reverse\n'), ((2663, 2680), 'django.urls.reverse', 'reverse', (['"""signup"""'], {}), "('signup')\n", (2670, 2680), False, 'from django.urls import resolve, reverse\n'), ((3126, 3147), 'django.contrib.auth.models.User.objects.exists', 'User.objects.exists', ([], {}), '()\n', (3145, 3147), False, 'from django.contrib.auth.models import User\n'), ((4100, 4121), 'django.contrib.auth.models.User.objects.exists', 'User.objects.exists', ([], {}), '()\n', (4119, 4121), False, 'from django.contrib.auth.models import User\n')] |
import threading
import fb
import json
import time
import requests
from bs4 import BeautifulSoup as bs
webhook = "https://discordapp.com/api/webhooks/<KEY>"
# bradburne (website)
# alba (fb)
# inchdairnie (email subscription)
# delmor (fb)
# premier let (email sub)
# braemore
with open('config.json') as data:
data = json.load(data)
webhook = data['webhook']
delay = data['delay']
urlArray = data['urls']
def logger(module, message):
print("{} - {}: {}".format(time.strftime("%H:%M:%S.{}".format(str(time.time() % 1)[2:])[:8], time.gmtime(time.time())), module, message))
def getPageSource(url):
pageSource = bs(requests.get(url).text, "html.parser")
return pageSource
def getRollosProperties():
pageSource = getPageSource("https://www.rolloslettings.co.uk/letting-agents/lettings/")
scriptTag = pageSource.find(id="propertyInitialState").text
trimmedEnds = scriptTag.replace("window.params = [];", "").rstrip().rstrip(";").replace("window.initial_property_state = ", "").lstrip()
propertiesJson = json.loads(trimmedEnds)
propertyList = []
for property in propertiesJson['properties']:
propertyList.append(property['property_post']['guid'])
return propertyList
def postToDiscord(name, link):
requests.post(webhook, json={
"username": "New Property",
"embeds": [
{
"color": "65535",
"fields": [
{
"name": name,
"value": link
}
]
}
]
})
def getThortonsProperties():
pageSource = getPageSource("https://thorntons-lettings.co.uk/student-list/")
table = pageSource.find("table", {"class": "data"})
propertyList = []
for row in table.findAll("a"):
propertyList.append(row['href'])
return propertyList
def getLawsonProperties():
pageSource = getPageSource("https://www.lawsonthompson.co.uk/student-lettings/")
allAvailableProperties = pageSource.find("div", {"class": "properties clear"})
rawPropertyList = allAvailableProperties.findAll("div", {"class": "actions"})
propertyList = []
for property in rawPropertyList:
propertyList.append(property.a['href'])
return propertyList
def monitor(site):
if site == "lawson":
propertyList = getLawsonProperties()
elif site == "rollos":
propertyList = getRollosProperties()
elif site == "thorntons":
propertyList = getThortonsProperties()
while True:
if site == "lawson":
newList = getLawsonProperties()
elif site == "rollos":
newList = getRollosProperties()
elif site == "thorntons":
newList = getThortonsProperties()
if newList != propertyList:
for property in newList:
if property not in propertyList:
print("{} - NEW PROPERTY FOUND".format(site))
postToDiscord("New Property", property)
propertyList = newList
else:
logger(site, "No new properties")
time.sleep(15)
# def fb():
# pageSource = getPageSource("https://www.facebook.com/AlbaResidentialStAndrews/")
# with open('readme.txt', 'w') as f:
# f.write(str(pageSource))
if __name__ == "__main__":
for url in urlArray:
thread = threading.Thread(target=fb.run, args=[url])
thread.start()
time.sleep(delay/(len(urlArray)*1000))
rollosThread = threading.Thread(target=monitor, args=["rollos"])
thorntonsThread = threading.Thread(target=monitor, args=["thorntons"])
thorntonsThread.start()
rollosThread.start()
monitor("lawson")
| [
"json.loads",
"requests.post",
"time.sleep",
"requests.get",
"json.load",
"threading.Thread",
"time.time"
] | [((325, 340), 'json.load', 'json.load', (['data'], {}), '(data)\n', (334, 340), False, 'import json\n'), ((1065, 1088), 'json.loads', 'json.loads', (['trimmedEnds'], {}), '(trimmedEnds)\n', (1075, 1088), False, 'import json\n'), ((1295, 1432), 'requests.post', 'requests.post', (['webhook'], {'json': "{'username': 'New Property', 'embeds': [{'color': '65535', 'fields': [{\n 'name': name, 'value': link}]}]}"}), "(webhook, json={'username': 'New Property', 'embeds': [{\n 'color': '65535', 'fields': [{'name': name, 'value': link}]}]})\n", (1308, 1432), False, 'import requests\n'), ((3693, 3742), 'threading.Thread', 'threading.Thread', ([], {'target': 'monitor', 'args': "['rollos']"}), "(target=monitor, args=['rollos'])\n", (3709, 3742), False, 'import threading\n'), ((3765, 3817), 'threading.Thread', 'threading.Thread', ([], {'target': 'monitor', 'args': "['thorntons']"}), "(target=monitor, args=['thorntons'])\n", (3781, 3817), False, 'import threading\n'), ((3283, 3297), 'time.sleep', 'time.sleep', (['(15)'], {}), '(15)\n', (3293, 3297), False, 'import time\n'), ((3559, 3602), 'threading.Thread', 'threading.Thread', ([], {'target': 'fb.run', 'args': '[url]'}), '(target=fb.run, args=[url])\n', (3575, 3602), False, 'import threading\n'), ((640, 657), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (652, 657), False, 'import requests\n'), ((557, 568), 'time.time', 'time.time', ([], {}), '()\n', (566, 568), False, 'import time\n'), ((518, 529), 'time.time', 'time.time', ([], {}), '()\n', (527, 529), False, 'import time\n')] |
#!/usr/bin/env python
"""
Remove overlap (+ extra if desired) from tandem duplications
around scaffold gaps.
"""
import sys
from argparse import ArgumentParser
from collections import defaultdict, namedtuple
from sonLib.bioio import fastaRead, fastaWrite
FalseDup = namedtuple('FalseDup', ['gapStart', 'gapEnd', 'dupSize'])
def parse_dups_file(path):
"""
Parse a .dups file produced by findScaffoldGapDups.
"""
dups = defaultdict(list)
with open(path) as f:
header = f.readline().strip().split()
if header != ['sequence', 'gapStart', 'gapEnd', 'dupSize', 'dupPctID']:
raise RuntimeError("Unexpected .dups file header: %s" % header)
for line in f:
fields = line.strip().split()
dups[fields[0]].append(FalseDup(int(fields[1]), int(fields[2]), int(fields[3])))
return dups
def main():
parser = ArgumentParser(description=__doc__)
parser.add_argument('fasta', help='Sequence file')
parser.add_argument('dups', help='File describing the scaffold gap dups')
parser.add_argument('--additional', type=int, default=0,
help='Additional amount to trim past the dup')
opts = parser.parse_args()
# Ingest dup locations
dups = parse_dups_file(opts.dups)
for header, sequence in fastaRead(opts.fasta):
sequence_dups = dups[header]
offset = 0
sequence = list(sequence)
for dup in sequence_dups:
del sequence[dup.gapEnd - offset:dup.gapEnd + dup.dupSize + opts.additional - offset]
del sequence[dup.gapStart - dup.dupSize - opts.additional - offset:dup.gapStart - offset]
offset += 2 * (dup.dupSize + opts.additional)
fastaWrite(sys.stdout, header, ''.join(sequence))
if __name__ == '__main__':
main()
| [
"sonLib.bioio.fastaRead",
"collections.namedtuple",
"collections.defaultdict",
"argparse.ArgumentParser"
] | [((268, 325), 'collections.namedtuple', 'namedtuple', (['"""FalseDup"""', "['gapStart', 'gapEnd', 'dupSize']"], {}), "('FalseDup', ['gapStart', 'gapEnd', 'dupSize'])\n", (278, 325), False, 'from collections import defaultdict, namedtuple\n'), ((437, 454), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (448, 454), False, 'from collections import defaultdict, namedtuple\n'), ((883, 918), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (897, 918), False, 'from argparse import ArgumentParser\n'), ((1308, 1329), 'sonLib.bioio.fastaRead', 'fastaRead', (['opts.fasta'], {}), '(opts.fasta)\n', (1317, 1329), False, 'from sonLib.bioio import fastaRead, fastaWrite\n')] |
#!/usr/bin/env python
#
# check-tms
#
# DESCRIPTION:
# Plugin to test the old, but still used, TMS map tile API for basic
# functionality. Loads several tiles around the given centerpoint
# and verifies that they loaded OK.
#
# OUTPUT:
# plain text describing the abnormal condition encountered
#
# PLATFORMS:
# Only tested on Linux
#
# DEPENDENCIES:
# pip: sensu_plugin
#
# USAGE:
# Start with --help to see applicable parameters
# NOTES:
# Everything is suspect
#
# LICENSE:
# <NAME> <EMAIL>
# Released under the same terms as Sensu (the MIT license); see LICENSE
# for details.
import requests
from lxml import etree
from sensu_plugin import SensuPluginCheck
class TileMapService(object):
def __init__(self, root_url):
root = root_url
resp = requests.get(root_url)
root = etree.fromstring(resp.content)
maplist = root.xpath('/TileMapService/TileMaps')[0].getchildren()
self.maps = [dict(x.items()) for x in maplist]
for map in self.maps:
resp = requests.get(map['href'])
root = etree.fromstring(resp.content)
# Flatten the thing
map['bbox'] = [dict(x.items()) for x in root.xpath('/TileMap/BoundingBox')][0]
map['origin'] = [dict(x.items()) for x in root.xpath('/TileMap/Origin')][0]
map['tileformat'] = [dict(x.items()) for x in root.xpath('/TileMap/TileFormat')][0]
map['tilesets'] = [dict(x.items()) for x in root.xpath('/TileMap/TileSets')[0].getchildren()]
def get_maplist(self):
return [map['title'] for map in self.maps]
def get_tilexy_from_coords(self, map_title, order, x, y, forced_origin=None):
map = [map for map in self.maps if map['title'] == map_title][0]
bbox = map['bbox']
if forced_origin is None:
origin = map['origin']
else:
origin = {'x': forced_origin[0], 'y': forced_origin[1]}
heigth, width = int(map['tileformat']['height']), int(map['tileformat']['width'])
ratios_for_orders = {int(ts['order']): float(ts['units-per-pixel']) for ts in map['tilesets']}
tx = int((x - float(origin['x'])) / ratios_for_orders[order] / width)
ty = int((y - float(origin['y'])) / ratios_for_orders[order] / heigth)
return (tx,ty)
def get_tile(self, map_title, order, x, y):
map = [map for map in self.maps if map['title'] == map_title][0]
tileset = [tileset for tileset in map['tilesets'] if tileset['order'] == str(order)][0]
tileset_url = tileset['href']
tile_url = tileset_url + "/" + str(x) + "/" + str(y) + "." + map['tileformat']['extension']
resp = requests.get(tile_url)
tile = {'data': resp.content,
'url': tile_url,
'content_type': resp.headers['content-type'],
'status_code': resp.status_code,
}
return tile
class TMSCheck(SensuPluginCheck):
def setup(self):
# self.parser comes from SensuPluginCheck
self.parser.add_argument('-r', '--root', required=True, type=str,
help='TMS root to use for findings maps')
self.parser.add_argument('-l', '--list-maps', required=False, action='store_true',
help='List maps available at root')
self.parser.add_argument('-t', '--service', required=False, type=str,
help='Service type to check (currently only tms is supported)')
self.parser.add_argument('-p', '--point', required=False, type=float,
nargs=2, help="Center point (x,y) for tests in native coordinates for the map")
self.parser.add_argument('-z', '--zoom', required=False, type=int,
help="Center zoom level for tests")
self.parser.add_argument('-s', '--side-length', required=False, type=int, default=1,
help="Side length in tiles for the box to load around the center point")
self.parser.add_argument('-o', '--origo', required=False, type=float, default=None,
nargs=2, help="Force origo to this point (for misconfigured servers)")
self.parser.add_argument('-m', '--map', required=False, type=str,
help="Map to run the checks against")
self.parser.add_argument('-k', '--keep-files', required=False, action='store_true',
help="Keep the downloaded files (for debugging)")
self.parser.add_argument('--verbose', '-v', action='count')
def point_to_box(self, x, y, side_length):
offset = int(side_length / 2)
odd = side_length % 2
x_range = range(x-offset, x+offset+odd)
y_range = range(y-offset, y+offset+odd)
return [(x,y) for x in x_range for y in y_range]
def run(self):
self.check_name('OWS test')
tms = TileMapService(self.options.root)
map = self.options.map
x, y = self.options.point
zoom = self.options.zoom
origo = self.options.origo
if self.options.list_maps:
print("Available Maps:")
for map in tms.get_maplist():
print(map)
centerx, centery = tms.get_tilexy_from_coords(map, zoom, x, y, origo)
tiles = self.point_to_box(centerx, centery, self.options.side_length)
for tile in tiles:
image = tms.get_tile(map, zoom, tile[0], tile[1])
if self.options.keep_files:
with open('tile{}{}.jpg'.format(tile[0],tile[1]), 'wb') as file:
file.write(image['data'])
if self.options.verbose:
print(image['url'])
if image['status_code'] != 200:
self.critical('Tile at URL: {} failed to load. Status code {}'.format(image['url'], image['status_code']))
self.ok('Tiles within test area loaded successfully')
if __name__ == "__main__":
f = TMSCheck()
| [
"lxml.etree.fromstring",
"requests.get"
] | [((800, 822), 'requests.get', 'requests.get', (['root_url'], {}), '(root_url)\n', (812, 822), False, 'import requests\n'), ((838, 868), 'lxml.etree.fromstring', 'etree.fromstring', (['resp.content'], {}), '(resp.content)\n', (854, 868), False, 'from lxml import etree\n'), ((2723, 2745), 'requests.get', 'requests.get', (['tile_url'], {}), '(tile_url)\n', (2735, 2745), False, 'import requests\n'), ((1057, 1082), 'requests.get', 'requests.get', (["map['href']"], {}), "(map['href'])\n", (1069, 1082), False, 'import requests\n'), ((1102, 1132), 'lxml.etree.fromstring', 'etree.fromstring', (['resp.content'], {}), '(resp.content)\n', (1118, 1132), False, 'from lxml import etree\n')] |
from __future__ import annotations
import aiohttp
import datetime
import logging
from typing import Optional
from quart import current_app as app
logger = logging.getLogger("tsundoku")
class KitsuManager:
API_URL = "https://kitsu.io/api/edge/anime"
HEADERS = {
"Accept": "application/vnd.api+json",
"Content-Type": "application/vnd.api+json"
}
def __init__(self):
self.SHOW_BASE = "https://kitsu.io/anime/{}"
self.MEDIA_BASE = "https://media.kitsu.io/anime/poster_images/{}/{}.jpg"
self.show_id = None
self.kitsu_id = None
self.slug = None
@classmethod
async def fetch(cls, show_id: int, show_name: str) -> Optional[KitsuManager]:
"""
Attempts to retrieve Kitsu information
for a specified show name from the Kitsu API.
Parameters
----------
show_id: int
The show's ID.
show_name: str
The name of the show.
Returns
-------
Optional[KitsuManager]
A KitsuManager for a show.
"""
logger.info(f"Fetching Kitsu ID for Show {show_name}")
async with aiohttp.ClientSession(headers=cls.HEADERS) as sess:
payload = {
"filter[text]": show_name
}
async with sess.get(cls.API_URL, params=payload) as resp:
data = await resp.json()
try:
result = data["data"][0]
except (IndexError, KeyError):
return
if not result or not result.get("id"):
return
instance = cls()
instance.kitsu_id = int(result["id"])
instance.slug = result.get("slug")
async with app.db_pool.acquire() as con:
await con.execute("""
DELETE FROM
kitsu_info
WHERE
show_id=$1;
""", show_id)
await con.execute("""
INSERT INTO
kitsu_info
(show_id, kitsu_id, slug)
VALUES
($1, $2, $3);
""", show_id, instance.kitsu_id, instance.slug)
return instance
@classmethod
async def fetch_by_kitsu(cls, show_id: int, kitsu_id: int) -> Optional[KitsuManager]:
"""
Attempts to retrieve Kitsu information
for a specified show ID from the Kitsu API.
Parameters
----------
show_id: int
The show's ID.
kitsu_id: int
The name of the show.
Returns
-------
Optional[KitsuManager]
A KitsuManager for a show.
"""
logger.info(f"Fetching Kitsu ID for Show #{show_id}")
async with aiohttp.ClientSession(headers=cls.HEADERS) as sess:
payload = {
"filter[id]": kitsu_id
}
async with sess.get(cls.API_URL, params=payload) as resp:
data = await resp.json()
try:
result = data["data"][0]
except IndexError:
return
if not result or not result.get("id"):
return
instance = cls()
instance.kitsu_id = int(result["id"])
instance.slug = result.get("slug")
async with app.db_pool.acquire() as con:
await con.execute("""
DELETE FROM
kitsu_info
WHERE
show_id=$1;
""", show_id)
await con.execute("""
INSERT INTO
kitsu_info
(show_id, kitsu_id, slug)
VALUES
($1, $2, $3);
""", show_id, instance.kitsu_id, instance.slug)
return instance
@classmethod
async def from_show_id(cls, show_id: int) -> Optional[KitsuManager]:
"""
Retrieves Kitsu information from the database based
on a show's ID.
Parameters
----------
show_id: int
The show's ID in the database.
Returns
-------
Optional[KitsuManager]
A KitsuManager for the show.
"""
logger.info(f"Retrieving existing Kitsu info for Show ID #{show_id}")
async with app.db_pool.acquire() as con:
row = await con.fetchrow("""
SELECT
kitsu_id,
slug,
last_updated
FROM
kitsu_info
WHERE show_id=$1;
""", show_id)
if not row:
return
instance = cls()
instance.kitsu_id = row["kitsu_id"]
instance.slug = row["slug"]
return instance
@property
def link(self) -> str:
"""
Returns the link to the show on Kitsu
from the show's ID.
Returns
-------
str
The show's link.
"""
return self.SHOW_BASE.format(self.kitsu_id)
async def clear_cache(self) -> None:
"""
Clears the cached data for a show.
"""
async with app.db_pool.acquire() as con:
await con.execute("""
UPDATE
kitsu_info
SET
show_status = NULL,
cached_poster_url = NULL
WHERE
show_id=$1;
""", self.show_id)
async def get_poster_image(self) -> Optional[str]:
"""
Returns the link to the show's poster.
Returns
-------
Optional[str]
The desired poster.
"""
if self.kitsu_id is None:
return
async with app.db_pool.acquire() as con:
url = await con.fetchval("""
SELECT
cached_poster_url
FROM
kitsu_info
WHERE kitsu_id=$1;
""", self.kitsu_id)
if url:
return url
logger.info(f"Retrieving new poster URL for Kitsu ID {self.kitsu_id} from Kitsu")
to_cache = None
async with aiohttp.ClientSession() as sess:
for size in ["large", "medium", "small", "tiny", "original"]:
url = self.MEDIA_BASE.format(self.kitsu_id, size)
async with sess.head(url) as resp:
if resp.status == 404:
continue
logger.info(f"New poster found for Kitsu ID {self.kitsu_id} at [{size}] quality")
to_cache = url
break
if to_cache is None:
return
async with app.db_pool.acquire() as con:
await con.execute("""
UPDATE
kitsu_info
SET
cached_poster_url=$1
WHERE kitsu_id=$2;
""", to_cache, self.kitsu_id)
return to_cache
async def get_status(self) -> Optional[str]:
"""
Returns the status of the show.
Returns
-------
Optional[str]
The show's airing status.
"""
if self.kitsu_id is None:
return
async with app.db_pool.acquire() as con:
row = await con.fetchrow("""
SELECT
show_status,
last_updated
FROM
kitsu_info
WHERE kitsu_id=$1;
""", self.kitsu_id)
now = datetime.datetime.utcnow()
delta = now - row["last_updated"]
if delta.total_seconds() < 86400 and row["show_status"]:
return row["show_status"]
logger.info(f"Retrieving new show status for Kitsu ID {self.kitsu_id} from Kitsu")
to_cache = None
async with aiohttp.ClientSession() as sess:
payload = {
"filter[id]": self.kitsu_id
}
async with sess.get(self.API_URL, params=payload) as resp:
data = await resp.json()
try:
to_cache = data["data"][0]
except IndexError:
return
if to_cache is None:
return
status = to_cache.get("attributes", {}).get("status")
async with app.db_pool.acquire() as con:
await con.execute("""
UPDATE
kitsu_info
SET
show_status=$1,
last_updated=$2
WHERE kitsu_id=$3;
""", status, now, self.kitsu_id)
return status
| [
"logging.getLogger",
"aiohttp.ClientSession",
"quart.current_app.db_pool.acquire",
"datetime.datetime.utcnow"
] | [((158, 187), 'logging.getLogger', 'logging.getLogger', (['"""tsundoku"""'], {}), "('tsundoku')\n", (175, 187), False, 'import logging\n'), ((7644, 7670), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (7668, 7670), False, 'import datetime\n'), ((1174, 1216), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {'headers': 'cls.HEADERS'}), '(headers=cls.HEADERS)\n', (1195, 1216), False, 'import aiohttp\n'), ((1759, 1780), 'quart.current_app.db_pool.acquire', 'app.db_pool.acquire', ([], {}), '()\n', (1778, 1780), True, 'from quart import current_app as app\n'), ((2802, 2844), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {'headers': 'cls.HEADERS'}), '(headers=cls.HEADERS)\n', (2823, 2844), False, 'import aiohttp\n'), ((3372, 3393), 'quart.current_app.db_pool.acquire', 'app.db_pool.acquire', ([], {}), '()\n', (3391, 3393), True, 'from quart import current_app as app\n'), ((4361, 4382), 'quart.current_app.db_pool.acquire', 'app.db_pool.acquire', ([], {}), '()\n', (4380, 4382), True, 'from quart import current_app as app\n'), ((5228, 5249), 'quart.current_app.db_pool.acquire', 'app.db_pool.acquire', ([], {}), '()\n', (5247, 5249), True, 'from quart import current_app as app\n'), ((5823, 5844), 'quart.current_app.db_pool.acquire', 'app.db_pool.acquire', ([], {}), '()\n', (5842, 5844), True, 'from quart import current_app as app\n'), ((6256, 6279), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (6277, 6279), False, 'import aiohttp\n'), ((6789, 6810), 'quart.current_app.db_pool.acquire', 'app.db_pool.acquire', ([], {}), '()\n', (6808, 6810), True, 'from quart import current_app as app\n'), ((7350, 7371), 'quart.current_app.db_pool.acquire', 'app.db_pool.acquire', ([], {}), '()\n', (7369, 7371), True, 'from quart import current_app as app\n'), ((7953, 7976), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (7974, 7976), False, 'import aiohttp\n'), ((8442, 8463), 'quart.current_app.db_pool.acquire', 'app.db_pool.acquire', ([], {}), '()\n', (8461, 8463), True, 'from quart import current_app as app\n')] |
# Copyright (c) 2018, <NAME> <<EMAIL>>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the name of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from docutils import nodes
from docutils.parsers.rst import Directive, directives
from docutils.utils.code_analyzer import Lexer
import re
# Node
class shellblock_node(nodes.Element):
pass
def visit_shellblock_html(self, node):
self.body.append('<pre class="shell-block">')
def depart_shellblock_html(self, node):
self.body.append('</pre>')
class shell:
def __init__(self, conf, line):
self.lines = []
self.ps1 = ""
self.ps2 = ""
self.conf = conf
self.parse_conf()
self.add(line)
self.flines = []
def add(self, line):
self.lines += [line]
def parse_conf(self):
for i in ['ps1', 'ps2', 'lang']:
check = self.add_conf(i)
if not check:
self.lang = 'text'
def add_conf(self, val):
if val in self.conf:
setattr(self, val, self.conf[val])
return True
else:
setattr(self, val, "")
return False
def parse_content(self):
for i in range(len(self.lines)):
base = self.lines[i]
l = re.sub("^[\S]*?> ", "", base)
if i == 0:
prompt = nodes.Text('\n' + self.ps1)
else:
prompt = nodes.Text(self.ps2)
text = nodes.Text(l + '\n')
node = nodes.strong()
node.append(text)
self.flines.append([prompt, node])
return self.flines
class ShellBlockDirective(Directive):
has_content = True
option_spec = {
'caption': directives.unchanged_required,
'class': directives.class_option,
'name': directives.unchanged
}
def run(self):
document = self.state.document
env = document.settings.env
new = []
# Find Prompt Text
for i in self.content:
if re.match("^-> .*?$", i):
try:
new[-1].add(i)
except:
self.join_prompt(new, i)
elif re.match("[\S]*?> .*?$", i):
key = re.sub("> .*?$", "", i)
if key in env.config.rstextras_prompt:
conf = env.config.rstextras_prompt[key]
s = shell(conf, i)
new.append(s)
else:
self.join_prompt(new, i)
# Build Document
node = shellblock_node()
for i in new:
if isinstance(i, str):
node.append(nodes.Text(i))
else:
prompt = i.parse_content()
for [p, code] in prompt:
node.append(p)
node.append(code)
return [node]
def join_prompt(self, block, add):
try:
block[-1] = '\n'.join([block[-1], add])
except:
block.append(add)
| [
"re.sub",
"docutils.nodes.Text",
"docutils.nodes.strong",
"re.match"
] | [((2653, 2683), 're.sub', 're.sub', (['"""^[\\\\S]*?> """', '""""""', 'base'], {}), "('^[\\\\S]*?> ', '', base)\n", (2659, 2683), False, 'import re\n'), ((2844, 2864), 'docutils.nodes.Text', 'nodes.Text', (["(l + '\\n')"], {}), "(l + '\\n')\n", (2854, 2864), False, 'from docutils import nodes\n'), ((2885, 2899), 'docutils.nodes.strong', 'nodes.strong', ([], {}), '()\n', (2897, 2899), False, 'from docutils import nodes\n'), ((3426, 3449), 're.match', 're.match', (['"""^-> .*?$"""', 'i'], {}), "('^-> .*?$', i)\n", (3434, 3449), False, 'import re\n'), ((2732, 2759), 'docutils.nodes.Text', 'nodes.Text', (["('\\n' + self.ps1)"], {}), "('\\n' + self.ps1)\n", (2742, 2759), False, 'from docutils import nodes\n'), ((2803, 2823), 'docutils.nodes.Text', 'nodes.Text', (['self.ps2'], {}), '(self.ps2)\n', (2813, 2823), False, 'from docutils import nodes\n'), ((3593, 3621), 're.match', 're.match', (['"""[\\\\S]*?> .*?$"""', 'i'], {}), "('[\\\\S]*?> .*?$', i)\n", (3601, 3621), False, 'import re\n'), ((3644, 3667), 're.sub', 're.sub', (['"""> .*?$"""', '""""""', 'i'], {}), "('> .*?$', '', i)\n", (3650, 3667), False, 'import re\n'), ((4061, 4074), 'docutils.nodes.Text', 'nodes.Text', (['i'], {}), '(i)\n', (4071, 4074), False, 'from docutils import nodes\n')] |
from flask import Flask
from faker import Faker
from faker.providers import company, job, person, geo
app = Flask(__name__)
@app.route('/')
def story():
fake = Faker()
mystory = "<html><body><p>In a(n) " + fake.company()
mystory = mystory + " a young "
mystory = mystory + fake.language_name()
mystory = mystory + " stumbles across a(n) "
mystory = mystory + fake.domain_word()
mystory = mystory + " which spurs him into conflict with "
mystory = mystory + fake.name()
mystory = mystory + " an " + fake.catch_phrase()
mystory = mystory + " with the help of a(n) "
mystory = mystory + fake.job()
mystory = mystory + " and her "
mystory = mystory + fake.file_name()
mystory = mystory + " culminating in a struggle in "
mystory = mystory + fake.company()
mystory = mystory + " where someone shouts "
mystory = mystory + fake.bs()
mystory = mystory + " </p></body></html>"
return mystory
| [
"faker.Faker",
"flask.Flask"
] | [((110, 125), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (115, 125), False, 'from flask import Flask\n'), ((169, 176), 'faker.Faker', 'Faker', ([], {}), '()\n', (174, 176), False, 'from faker import Faker\n')] |
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(name='ts-microsoftgraph-python',
version='0.2.2',
description='API wrapper for Microsoft Graph written in Python',
long_description=read('README.md'),
url='https://github.com/ThinkingStudio/microsoftgraph-python',
long_description_content_type="text/markdown",
author='<NAME>, <NAME>, <NAME>, <NAME>, <NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['ts_microsoftgraph'],
install_requires=[
'requests'
],
zip_safe=False)
| [
"os.path.dirname"
] | [((87, 112), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (102, 112), False, 'import os\n')] |
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 3.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from datetime import timedelta
import os
from pathlib import Path
from django.conf.global_settings import MEDIA_ROOT, MEDIA_URL, STATIC_ROOT
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'corsheaders',
'rest_framework',
'rest_framework.authtoken',
'core.apps.CoreConfig',
'user.apps.UserConfig',
'eprofile.apps.EprofileConfig',
'vote.apps.VoteConfig',
'candidate.apps.CandidateConfig',
'party.apps.PartyConfig',
'state.apps.StateConfig',
'stats.apps.StatsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'ckeditor'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR, os.path.join(BASE_DIR, 'templates'), ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS')
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kathmandu'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = '/vol/web/static'
MEDIA_URL = '/media/'
MEDIA_ROOT = '/vol/web/media'
AUTH_USER_MODEL = 'core.User'
PASSWORD_RESET_TIMEOUT_DAYS = 1
# EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
EMAIL_BACKEND = os.environ.get('EMAIL_BACKEND')
CELERY_BROKER_URL = 'redis://redis:6379'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework_simplejwt.authentication.JWTAuthentication',
]
}
SIMPLE_JWT = {
'ACCESS_TOKEN_LIFETIME': timedelta(minutes=5),
'REFRESH_TOKEN_LIFETIME': timedelta(days=1),
}
EMAIL_USE_TLS = True
EMAIL_HOST = os.environ.get('EMAIL_HOST')
EMAIL_PORT = os.environ.get('EMAIL_PORT')
EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD')
CORS_ORIGIN_ALLOW_ALL = True
CKEDITOR_UPLOAD_PATH = "media/uploads/ckeditor"
CKEDITOR_CONFIGS = {
'default': {
'toolbar': 'Custom',
'toolbar_Custom': [
['Styles', 'Format', 'Font', 'FontSize'],
['Format'],
['Undo', 'Redo'],
['Bold', 'Italic', 'Underline'],
['NumberedList', 'BulletedList', '-', 'Outdent', 'Indent', '-', 'JustifyLeft', 'JustifyCenter', 'JustifyRight', 'JustifyBlock'],
['Image', 'Table', 'HorizontalRule'],
['TextColor', 'BGColor'],
['SpecialChar',],
['Link', 'Unlink'],
['RemoveFormat', 'Source'],
['Maximize',]
]
}
} | [
"os.path.join",
"datetime.timedelta",
"os.environ.get",
"pathlib.Path"
] | [((789, 817), 'os.environ.get', 'os.environ.get', (['"""SECRET_KEY"""'], {}), "('SECRET_KEY')\n", (803, 817), False, 'import os\n'), ((3970, 4001), 'os.environ.get', 'os.environ.get', (['"""EMAIL_BACKEND"""'], {}), "('EMAIL_BACKEND')\n", (3984, 4001), False, 'import os\n'), ((4335, 4363), 'os.environ.get', 'os.environ.get', (['"""EMAIL_HOST"""'], {}), "('EMAIL_HOST')\n", (4349, 4363), False, 'import os\n'), ((4377, 4405), 'os.environ.get', 'os.environ.get', (['"""EMAIL_PORT"""'], {}), "('EMAIL_PORT')\n", (4391, 4405), False, 'import os\n'), ((4424, 4457), 'os.environ.get', 'os.environ.get', (['"""EMAIL_HOST_USER"""'], {}), "('EMAIL_HOST_USER')\n", (4438, 4457), False, 'import os\n'), ((4480, 4517), 'os.environ.get', 'os.environ.get', (['"""EMAIL_HOST_PASSWORD"""'], {}), "('EMAIL_HOST_PASSWORD')\n", (4494, 4517), False, 'import os\n'), ((4227, 4247), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(5)'}), '(minutes=5)\n', (4236, 4247), False, 'from datetime import timedelta\n'), ((4279, 4296), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (4288, 4296), False, 'from datetime import timedelta\n'), ((2732, 2757), 'os.environ.get', 'os.environ.get', (['"""DB_HOST"""'], {}), "('DB_HOST')\n", (2746, 2757), False, 'import os\n'), ((2775, 2800), 'os.environ.get', 'os.environ.get', (['"""DB_NAME"""'], {}), "('DB_NAME')\n", (2789, 2800), False, 'import os\n'), ((2818, 2843), 'os.environ.get', 'os.environ.get', (['"""DB_USER"""'], {}), "('DB_USER')\n", (2832, 2843), False, 'import os\n'), ((2865, 2890), 'os.environ.get', 'os.environ.get', (['"""DB_PASS"""'], {}), "('DB_PASS')\n", (2879, 2890), False, 'import os\n'), ((2099, 2134), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""templates"""'], {}), "(BASE_DIR, 'templates')\n", (2111, 2134), False, 'import os\n'), ((521, 535), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (525, 535), False, 'from pathlib import Path\n')] |
# _______________________________________________________________________________________
# ___________________________________Welcome_____________________________________________
# _______________________________________________________________________________________
import pygame # Importing modules
import random
import math
from pygame import mixer # Helps handling music
pygame.init() # Initialisation of pygame
# _______________________________________________________________________________________
# Screen Orientation and Manipulation
# Creating screen and giving access to display
screenx = 1200 # Defined screen var for screen size and other use
screeny = 800
screen = pygame.display.set_mode( ( screenx, screeny ) )
# Title, Icon, BackGround, BGMusic
pygame.display.set_caption("Space War") # For changing name on title bar
icon = pygame.image.load('GameIcon.png') # It stores game-icon
pygame.display.set_icon(icon) # It shows game-icon
bg = pygame.image.load('SpaceBG.jpg') # It stores back-ground image
mixer.music.load('SpaceM.mp3') # It loads music
mixer.music.play(-1) # This starts playing music; -1 for never ending music until pygame window stays opened
# _______________________________________________________________________________________
# Defining Attributes of Characters
# Player attributes setting
playerimg = pygame.image.load('Spaceship.png')
playerx = screenx//2 # Initial x coord of player
playery = screeny*(5/6) # Initial y coord of player
deltaXplayer = 0 # Initilly, No change in x coord of player
deltaYplayer = 0 # Initilly, No change in y coord of player
def player(x,y): # Player attributes,functions
screen.blit(playerimg, (x,y)) # Draws player on screen
# Enemies attributes setting
numOfEnemies = 10 # It defines number of enemies to be shown on screen
enemyimg = []
enemyx = [] # Initialising lists for five enemies
enemyy = []
deltaXenemy = [] # Change in coords is same and constant for all enemies
deltaYenemy = []
speedx = (2 + (random.randint(0,5))/10)/5
speedy = (1.5 + (random.randint(0,5))/10)/5
def enemy(x,y):
screen.blit(enemyimg[i], (x,y)) # Draws i'th enemy on screen
for i in range(numOfEnemies): # Iterating for loop for each enemy
enemyimg.append(pygame.image.load('UFO.png')) # Shows enemy image
enemyx.append(random.randint(65,screenx-65)) # Assigns a random x position to an enemy
enemyy.append(random.randint(65,screeny-251)) # Assigns a random y position to an enemy
deltaXenemy.append(speedx) # We want change in x coord of each enemy equal
deltaYenemy.append(speedy) # We want change in y coord of each enemy equal
# Attack attributes setting
attackimg = pygame.image.load('Fireball.png')
attackx = 0 # Initial x coord of attack
attacky = playery - 5 # Initial y coord of attack
deltaYattack = 1 # Change in y coord of attack
inventory = 'Full' # For only one fireball at a time in our inventory
def attack(x,y): # Attack attributes,functions
global inventory
inventory = 'Empty' # When attack is used, our inventory becomes empty
screen.blit(attackimg, (x,y)) # Draws attack on screen
# _______________________________________________________________________________________
# Hit Checker
def hit( x1 , y1 , x2 , y2 ): # When our attck hits enemy
d = math.sqrt( (x1 - x2)**2 + (y1 - y2)**2 )
if d < 40:
return True
else:
return False
def GOcheck( x1 , y1 , x2 , y2 ): # When the enemy hits our player
d = math.sqrt( (x1 - x2)**2 + (y1 - y2)**2 )
if d < 50:
return True
else:
return False
# _______________________________________________________________________________________
# Score
Currentscore = 0
scoreX = 30 # x coord for showing score
scoreY = 15 # y coord for showing score
stdfont = pygame.font.Font('freesansbold.ttf', 30) # Font name and size loading
scorefont = pygame.font.Font('freesansbold.ttf', 50)
def score(x,y): # This shows the score on screenfront
Score = stdfont.render('SCORE : '+ str(Currentscore), True, (0,255,0)) # We need to render in order to show text on screenfront
screen.blit(Score, (x,y))
# _______________________________________________________________________________________
# Game Over = GO
gofont = pygame.font.Font('freesansbold.ttf', 50)
def GOdisp():
overbg = pygame.image.load('BG.png')
screen.blit( overbg, (0,0) )
GoFont = gofont.render('GAME OVER', True, (0,255,0) )
Inter = scorefont.render("Your final score is..", True, (0,255,0) )
Scored = scorefont.render(str(Currentscore), True, (0,255,0) )
screen.blit(GoFont, (450,300))
screen.blit(Inter , (380,350))
screen.blit(Scored, (585,400))
# _______________________________________________________________________________________
# _______________________________________________________________________________________
# _______________________________________________________________________________________
# Gameplay - Game Screen Handling
gamestate = True # It is used for keeping pygame window on
got = False
while gamestate: # Our main screen comes inside while loop
# _______________________________________________________________________________________
screen.fill( (0,0,0) ) # Setting default BG-colour
screen.blit( bg, (0,0) ) # Draws Bg on screen
player(playerx, playery) # Calling player function
score(scoreX,scoreY) # Calling score function
# _______________________________________________________________________________________
# This for loop is checking for happening of events and controlling the player
# An event is everything that can/will happen in your game, basically a key pressed on keyboard
# Pygame module provides very simple functions and methods, so we have used it
# _______________________________________________________________________________________
for event in pygame.event.get():
if event.type == pygame.QUIT: # For closing pygame window when quit button is pressed...
gamestate = False # ...by changing gamestate variable
if event.type == pygame.KEYDOWN: # Keydown is for checking whether any key is pressed or not
if event.key == pygame.K_RIGHT: # If right arrow key pressed..
deltaXplayer = +20 # .. player moves left by 20 pixels
if event.key == pygame.K_LEFT: # If left arrow key pressed..
deltaXplayer = -20 # .. player moves left by 20 pixels
if event.key == pygame.K_SPACE: # If spacebar is pressed..
if inventory is 'Full': # For firing attack, our inventory must be full
attackx = playerx # For const x coord of fired attack
attack( attackx, attacky) # Calling attack function
Firing = mixer.Sound('FireSound.mp3') # We used mixer.Sound because this sound should be heard with BGM
Firing.play()
if event.type == pygame.KEYUP: # Keyup is for checking whether any pressed key is released or not
if (event.key == pygame.K_LEFT): # When we release left key,
deltaXplayer = 0 # change in x coord of player must be zero
if (event.key == pygame.K_RIGHT): # When we release right key,
deltaXplayer = 0 # change in x coord of player must be zero
# _______________________________________________________________________________________
playerx += deltaXplayer # To keep player updated with change in x coord
# Checking for boundaries for player so it don't go out of the screen
if playerx <= 64:
playerx = 64
elif playerx >= screenx-64:
playerx = screenx-64
# _______________________________________________________________________________________
# We have to call enemy only once so we've written it in running while loop
# We have used for loop for iterating over each enemy
for i in range(numOfEnemies):
# _______________________________________________________________________________________
# To Over the Game
if GOcheck( enemyx[i] , enemyy[i] , playerx , playery ): # If An enemy hits player...
for i in range(numOfEnemies): # Disappears all enemies from screen
enemyx[i] = 1400
enemyy[i] = 1200
speedx = 0
speedy = 0
got = True
break
enemy( enemyx[i] , enemyy[i] ) # Calls enemy function for An enemy
enemyx[i] += deltaXenemy[i] # An Enemy will continuously move in x direction
enemyy[i] += deltaYenemy[i] # An Enemy will continuously move in y direction
# _______________________________________________________________________________________
# Checking for boundaries for An enemy so it don't go out of the screen
if enemyx[i] <= 0:
deltaXenemy[i] = speedx
elif enemyx[i] >= screenx-65:
deltaXenemy[i] = 0 - speedx
if enemyy[i] <= 0:
deltaYenemy[i] = speedy
elif enemyy[i] >= screeny-65:
deltaYenemy[i] = 0 - speedy
# _______________________________________________________________________________________
# If Attack Hits an enemy
if hit( enemyx[i] , enemyy[i] , attackx , attacky ) and (inventory is 'Empty'): # If we've alredy fired and hit happens
Currentscore += 1
attacky = playery
inventory = 'Full'
enemyx[i] = (random.randint(65,screenx-65))
enemyy[i] = (random.randint(65,screeny-251))
Boom = mixer.Sound('BoomSound.mp3')
Boom.play()
# _______________________________________________________________________________________
# To display text after Game Over
if got: # If game just overed
GOdisp()
# Atttack movement and its attributes
if attacky <= 0 :
attacky = playery
inventory = 'Full'
if inventory is 'Empty':
attack( attackx , attacky)
attacky -= deltaYattack
# Keeping updating screen with every single data that can have been changed so far
pygame.display.update()
# _______________________________________________________________________________________
# _______________________________________________________________________________________
# _______________________________________________________________________________________
# _______________________________________________________________________________________
# _______________________________________________________________________________________
# _______________________________________________________________________________________
# _______________________________________________________________________________________
# _______________________________________________________________________________________
# _______________________________________________________________________________________
# _______________________________________________________________________________________
# _______________________________________________________________________________________
# _______________________________________________________________________________________
# _______________________________________________________________________________________
# _______________________________________________________________________________________
# _______________________________________________________________________________________
# _______________________________________________________________________________________
# _______________________________________________________________________________________
# _______________________________________________________________________________________
| [
"pygame.mixer.music.play",
"pygame.init",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.display.update",
"pygame.display.set_icon",
"math.sqrt",
"pygame.mixer.Sound",
"pygame.display.set_caption",
"pygame.image.load",
"pygame.font.Font",
"pygame.mixer.music.load",
"random.randint"
] | [((404, 417), 'pygame.init', 'pygame.init', ([], {}), '()\n', (415, 417), False, 'import pygame\n'), ((742, 785), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(screenx, screeny)'], {}), '((screenx, screeny))\n', (765, 785), False, 'import pygame\n'), ((831, 870), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Space War"""'], {}), "('Space War')\n", (857, 870), False, 'import pygame\n'), ((920, 953), 'pygame.image.load', 'pygame.image.load', (['"""GameIcon.png"""'], {}), "('GameIcon.png')\n", (937, 953), False, 'import pygame\n'), ((984, 1013), 'pygame.display.set_icon', 'pygame.display.set_icon', (['icon'], {}), '(icon)\n', (1007, 1013), False, 'import pygame\n'), ((1059, 1091), 'pygame.image.load', 'pygame.image.load', (['"""SpaceBG.jpg"""'], {}), "('SpaceBG.jpg')\n", (1076, 1091), False, 'import pygame\n'), ((1133, 1163), 'pygame.mixer.music.load', 'mixer.music.load', (['"""SpaceM.mp3"""'], {}), "('SpaceM.mp3')\n", (1149, 1163), False, 'from pygame import mixer\n'), ((1199, 1219), 'pygame.mixer.music.play', 'mixer.music.play', (['(-1)'], {}), '(-1)\n', (1215, 1219), False, 'from pygame import mixer\n'), ((1490, 1524), 'pygame.image.load', 'pygame.image.load', (['"""Spaceship.png"""'], {}), "('Spaceship.png')\n", (1507, 1524), False, 'import pygame\n'), ((2969, 3002), 'pygame.image.load', 'pygame.image.load', (['"""Fireball.png"""'], {}), "('Fireball.png')\n", (2986, 3002), False, 'import pygame\n'), ((4213, 4253), 'pygame.font.Font', 'pygame.font.Font', (['"""freesansbold.ttf"""', '(30)'], {}), "('freesansbold.ttf', 30)\n", (4229, 4253), False, 'import pygame\n'), ((4300, 4340), 'pygame.font.Font', 'pygame.font.Font', (['"""freesansbold.ttf"""', '(50)'], {}), "('freesansbold.ttf', 50)\n", (4316, 4340), False, 'import pygame\n'), ((4693, 4733), 'pygame.font.Font', 'pygame.font.Font', (['"""freesansbold.ttf"""', '(50)'], {}), "('freesansbold.ttf', 50)\n", (4709, 4733), False, 'import pygame\n'), ((3674, 3716), 'math.sqrt', 'math.sqrt', (['((x1 - x2) ** 2 + (y1 - y2) ** 2)'], {}), '((x1 - x2) ** 2 + (y1 - y2) ** 2)\n', (3683, 3716), False, 'import math\n'), ((3871, 3913), 'math.sqrt', 'math.sqrt', (['((x1 - x2) ** 2 + (y1 - y2) ** 2)'], {}), '((x1 - x2) ** 2 + (y1 - y2) ** 2)\n', (3880, 3913), False, 'import math\n'), ((4766, 4793), 'pygame.image.load', 'pygame.image.load', (['"""BG.png"""'], {}), "('BG.png')\n", (4783, 4793), False, 'import pygame\n'), ((6414, 6432), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (6430, 6432), False, 'import pygame\n'), ((11033, 11056), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (11054, 11056), False, 'import pygame\n'), ((2512, 2540), 'pygame.image.load', 'pygame.image.load', (['"""UFO.png"""'], {}), "('UFO.png')\n", (2529, 2540), False, 'import pygame\n'), ((2583, 2615), 'random.randint', 'random.randint', (['(65)', '(screenx - 65)'], {}), '(65, screenx - 65)\n', (2597, 2615), False, 'import random\n'), ((2678, 2711), 'random.randint', 'random.randint', (['(65)', '(screeny - 251)'], {}), '(65, screeny - 251)\n', (2692, 2711), False, 'import random\n'), ((2227, 2247), 'random.randint', 'random.randint', (['(0)', '(5)'], {}), '(0, 5)\n', (2241, 2247), False, 'import random\n'), ((2273, 2293), 'random.randint', 'random.randint', (['(0)', '(5)'], {}), '(0, 5)\n', (2287, 2293), False, 'import random\n'), ((10334, 10366), 'random.randint', 'random.randint', (['(65)', '(screenx - 65)'], {}), '(65, screenx - 65)\n', (10348, 10366), False, 'import random\n'), ((10395, 10428), 'random.randint', 'random.randint', (['(65)', '(screeny - 251)'], {}), '(65, screeny - 251)\n', (10409, 10428), False, 'import random\n'), ((10451, 10479), 'pygame.mixer.Sound', 'mixer.Sound', (['"""BoomSound.mp3"""'], {}), "('BoomSound.mp3')\n", (10462, 10479), False, 'from pygame import mixer\n'), ((7418, 7446), 'pygame.mixer.Sound', 'mixer.Sound', (['"""FireSound.mp3"""'], {}), "('FireSound.mp3')\n", (7429, 7446), False, 'from pygame import mixer\n')] |
import os
from dotenv import load_dotenv
from telegram import Bot, Update, InlineKeyboardMarkup, InlineKeyboardButton, ParseMode, ReplyKeyboardMarkup
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext, CallbackQueryHandler, ConversationHandler
from telegram.utils.request import Request
from telegram.utils.helpers import escape_markdown
from db import init_db, get_lists, add_list, remove_list, add_task, get_tasks, edit_task
import lists
import tasks
from common import logger, conversations
from utils import auth, split_list
@auth
def start(update: Update, context: CallbackContext):
"""Send a message when the command /start is issued."""
update.message.reply_text("Hi. Please create or choose task list from existing.")
def handle_error(update: Update, context: CallbackContext):
logger.warning("Update \"%s\" caused error \"%s\"", update, context.error)
commands = {
"start": "start",
"lists": "lists",
"add_list": "addlist",
"remove_list": "removelist",
"tasks": "tasks",
"add_tasks": "addtasks",
"done_task": "done",
"undone_task": "undone",
"cancel": "cancel",
}
def parse_callback_data(data: str):
field, value = data.split("=")
return {"field": field, "value": value}
def callback_handler(update: Update, context: CallbackContext):
callback_data = update.callback_query.data
data = parse_callback_data(callback_data)
if data.get("field") == "list_name":
return lists.choose_list(data.get("value"), update, context)
def cancel(update: Update, context: CallbackContext):
user = update.message.from_user
logger.info("User %s canceled conversation", user.first_name)
update.message.reply_text("Canceled.")
return ConversationHandler.END
def main():
logger.info("Start tasks bot")
bot_token = os.getenv("bot_token")
req = Request(connect_timeout=0.5, read_timeout=1, con_pool_size=8)
bot = Bot(token=bot_token, request=req, )
updater = Updater(bot=bot, use_context=True)
dp = updater.dispatcher
dp.add_handler(CommandHandler(commands["start"], start))
dp.add_handler(CommandHandler(commands["lists"], lists.show_lists))
dp.add_handler(CallbackQueryHandler(callback_handler))
dp.add_handler(ConversationHandler(
entry_points=[CommandHandler(commands["add_list"], lists.add_list)],
states={
conversations["add_list"]["name"]: [MessageHandler(Filters.text, lists.handle_list_name)],
},
fallbacks=[CommandHandler(commands["cancel"], cancel)],
))
dp.add_handler(ConversationHandler(
entry_points=[CommandHandler(commands["remove_list"], lists.remove_list)],
states={
conversations["remove_list"]["choose_list"]: [MessageHandler(Filters.text, lists.handle_removing_list)],
},
fallbacks=[CommandHandler(commands["cancel"], cancel)],
))
dp.add_handler(ConversationHandler(
entry_points=[CommandHandler(commands["add_tasks"], tasks.add_tasks)],
states={
conversations["add_tasks"]["choose_list"]: [MessageHandler(Filters.text, tasks.choose_list_to_add)],
conversations["add_tasks"]["handle_tasks"]: [MessageHandler(Filters.text, tasks.handle_adding_task)],
},
fallbacks=[CommandHandler(commands["cancel"], cancel)],
))
dp.add_handler(ConversationHandler(
entry_points=[CommandHandler(commands["tasks"], tasks.show_tasks)],
states={
conversations["tasks"]["choose_list"]: [MessageHandler(Filters.text, tasks.choose_list_to_show)],
},
fallbacks=[CommandHandler(commands["cancel"], cancel)],
))
dp.add_handler(ConversationHandler(
entry_points=[CommandHandler(commands["done_task"], tasks.done_task)],
states={
conversations["done_task"]["choose_list"]: [MessageHandler(Filters.text, tasks.choose_list_to_done)],
conversations["done_task"]["handle_task"]: [MessageHandler(Filters.text, tasks.handle_done_task)],
},
fallbacks=[CommandHandler(commands["cancel"], cancel)],
))
dp.add_handler(ConversationHandler(
entry_points=[CommandHandler(commands["undone_task"], tasks.undone_task)],
states={
conversations["undone_task"]["choose_list"]: [MessageHandler(Filters.text, tasks.choose_list_to_undone)],
conversations["undone_task"]["handle_task"]: [MessageHandler(Filters.text, tasks.handle_undone_task)],
},
fallbacks=[CommandHandler(commands["cancel"], cancel)],
))
dp.add_error_handler(handle_error)
updater.start_polling()
updater.idle()
logger.info("Stop tasks bot")
if __name__ == "__main__":
main()
| [
"telegram.utils.request.Request",
"os.getenv",
"common.logger.warning",
"telegram.Bot",
"common.logger.info",
"telegram.ext.MessageHandler",
"telegram.ext.CallbackQueryHandler",
"telegram.ext.CommandHandler",
"telegram.ext.Updater"
] | [((844, 914), 'common.logger.warning', 'logger.warning', (['"""Update "%s" caused error "%s\\""""', 'update', 'context.error'], {}), '(\'Update "%s" caused error "%s"\', update, context.error)\n', (858, 914), False, 'from common import logger, conversations\n'), ((1652, 1713), 'common.logger.info', 'logger.info', (['"""User %s canceled conversation"""', 'user.first_name'], {}), "('User %s canceled conversation', user.first_name)\n", (1663, 1713), False, 'from common import logger, conversations\n'), ((1810, 1840), 'common.logger.info', 'logger.info', (['"""Start tasks bot"""'], {}), "('Start tasks bot')\n", (1821, 1840), False, 'from common import logger, conversations\n'), ((1857, 1879), 'os.getenv', 'os.getenv', (['"""bot_token"""'], {}), "('bot_token')\n", (1866, 1879), False, 'import os\n'), ((1890, 1951), 'telegram.utils.request.Request', 'Request', ([], {'connect_timeout': '(0.5)', 'read_timeout': '(1)', 'con_pool_size': '(8)'}), '(connect_timeout=0.5, read_timeout=1, con_pool_size=8)\n', (1897, 1951), False, 'from telegram.utils.request import Request\n'), ((1962, 1995), 'telegram.Bot', 'Bot', ([], {'token': 'bot_token', 'request': 'req'}), '(token=bot_token, request=req)\n', (1965, 1995), False, 'from telegram import Bot, Update, InlineKeyboardMarkup, InlineKeyboardButton, ParseMode, ReplyKeyboardMarkup\n'), ((2012, 2046), 'telegram.ext.Updater', 'Updater', ([], {'bot': 'bot', 'use_context': '(True)'}), '(bot=bot, use_context=True)\n', (2019, 2046), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext, CallbackQueryHandler, ConversationHandler\n'), ((4693, 4722), 'common.logger.info', 'logger.info', (['"""Stop tasks bot"""'], {}), "('Stop tasks bot')\n", (4704, 4722), False, 'from common import logger, conversations\n'), ((2095, 2135), 'telegram.ext.CommandHandler', 'CommandHandler', (["commands['start']", 'start'], {}), "(commands['start'], start)\n", (2109, 2135), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext, CallbackQueryHandler, ConversationHandler\n'), ((2156, 2207), 'telegram.ext.CommandHandler', 'CommandHandler', (["commands['lists']", 'lists.show_lists'], {}), "(commands['lists'], lists.show_lists)\n", (2170, 2207), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext, CallbackQueryHandler, ConversationHandler\n'), ((2228, 2266), 'telegram.ext.CallbackQueryHandler', 'CallbackQueryHandler', (['callback_handler'], {}), '(callback_handler)\n', (2248, 2266), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext, CallbackQueryHandler, ConversationHandler\n'), ((2331, 2383), 'telegram.ext.CommandHandler', 'CommandHandler', (["commands['add_list']", 'lists.add_list'], {}), "(commands['add_list'], lists.add_list)\n", (2345, 2383), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext, CallbackQueryHandler, ConversationHandler\n'), ((2536, 2578), 'telegram.ext.CommandHandler', 'CommandHandler', (["commands['cancel']", 'cancel'], {}), "(commands['cancel'], cancel)\n", (2550, 2578), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext, CallbackQueryHandler, ConversationHandler\n'), ((2651, 2709), 'telegram.ext.CommandHandler', 'CommandHandler', (["commands['remove_list']", 'lists.remove_list'], {}), "(commands['remove_list'], lists.remove_list)\n", (2665, 2709), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext, CallbackQueryHandler, ConversationHandler\n'), ((2876, 2918), 'telegram.ext.CommandHandler', 'CommandHandler', (["commands['cancel']", 'cancel'], {}), "(commands['cancel'], cancel)\n", (2890, 2918), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext, CallbackQueryHandler, ConversationHandler\n'), ((2991, 3045), 'telegram.ext.CommandHandler', 'CommandHandler', (["commands['add_tasks']", 'tasks.add_tasks'], {}), "(commands['add_tasks'], tasks.add_tasks)\n", (3005, 3045), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext, CallbackQueryHandler, ConversationHandler\n'), ((3322, 3364), 'telegram.ext.CommandHandler', 'CommandHandler', (["commands['cancel']", 'cancel'], {}), "(commands['cancel'], cancel)\n", (3336, 3364), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext, CallbackQueryHandler, ConversationHandler\n'), ((3437, 3488), 'telegram.ext.CommandHandler', 'CommandHandler', (["commands['tasks']", 'tasks.show_tasks'], {}), "(commands['tasks'], tasks.show_tasks)\n", (3451, 3488), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext, CallbackQueryHandler, ConversationHandler\n'), ((3648, 3690), 'telegram.ext.CommandHandler', 'CommandHandler', (["commands['cancel']", 'cancel'], {}), "(commands['cancel'], cancel)\n", (3662, 3690), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext, CallbackQueryHandler, ConversationHandler\n'), ((3763, 3817), 'telegram.ext.CommandHandler', 'CommandHandler', (["commands['done_task']", 'tasks.done_task'], {}), "(commands['done_task'], tasks.done_task)\n", (3777, 3817), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext, CallbackQueryHandler, ConversationHandler\n'), ((4092, 4134), 'telegram.ext.CommandHandler', 'CommandHandler', (["commands['cancel']", 'cancel'], {}), "(commands['cancel'], cancel)\n", (4106, 4134), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext, CallbackQueryHandler, ConversationHandler\n'), ((4207, 4265), 'telegram.ext.CommandHandler', 'CommandHandler', (["commands['undone_task']", 'tasks.undone_task'], {}), "(commands['undone_task'], tasks.undone_task)\n", (4221, 4265), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext, CallbackQueryHandler, ConversationHandler\n'), ((4548, 4590), 'telegram.ext.CommandHandler', 'CommandHandler', (["commands['cancel']", 'cancel'], {}), "(commands['cancel'], cancel)\n", (4562, 4590), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext, CallbackQueryHandler, ConversationHandler\n'), ((2451, 2503), 'telegram.ext.MessageHandler', 'MessageHandler', (['Filters.text', 'lists.handle_list_name'], {}), '(Filters.text, lists.handle_list_name)\n', (2465, 2503), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext, CallbackQueryHandler, ConversationHandler\n'), ((2787, 2843), 'telegram.ext.MessageHandler', 'MessageHandler', (['Filters.text', 'lists.handle_removing_list'], {}), '(Filters.text, lists.handle_removing_list)\n', (2801, 2843), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext, CallbackQueryHandler, ConversationHandler\n'), ((3121, 3175), 'telegram.ext.MessageHandler', 'MessageHandler', (['Filters.text', 'tasks.choose_list_to_add'], {}), '(Filters.text, tasks.choose_list_to_add)\n', (3135, 3175), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext, CallbackQueryHandler, ConversationHandler\n'), ((3235, 3289), 'telegram.ext.MessageHandler', 'MessageHandler', (['Filters.text', 'tasks.handle_adding_task'], {}), '(Filters.text, tasks.handle_adding_task)\n', (3249, 3289), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext, CallbackQueryHandler, ConversationHandler\n'), ((3560, 3615), 'telegram.ext.MessageHandler', 'MessageHandler', (['Filters.text', 'tasks.choose_list_to_show'], {}), '(Filters.text, tasks.choose_list_to_show)\n', (3574, 3615), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext, CallbackQueryHandler, ConversationHandler\n'), ((3893, 3948), 'telegram.ext.MessageHandler', 'MessageHandler', (['Filters.text', 'tasks.choose_list_to_done'], {}), '(Filters.text, tasks.choose_list_to_done)\n', (3907, 3948), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext, CallbackQueryHandler, ConversationHandler\n'), ((4007, 4059), 'telegram.ext.MessageHandler', 'MessageHandler', (['Filters.text', 'tasks.handle_done_task'], {}), '(Filters.text, tasks.handle_done_task)\n', (4021, 4059), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext, CallbackQueryHandler, ConversationHandler\n'), ((4343, 4400), 'telegram.ext.MessageHandler', 'MessageHandler', (['Filters.text', 'tasks.choose_list_to_undone'], {}), '(Filters.text, tasks.choose_list_to_undone)\n', (4357, 4400), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext, CallbackQueryHandler, ConversationHandler\n'), ((4461, 4515), 'telegram.ext.MessageHandler', 'MessageHandler', (['Filters.text', 'tasks.handle_undone_task'], {}), '(Filters.text, tasks.handle_undone_task)\n', (4475, 4515), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext, CallbackQueryHandler, ConversationHandler\n')] |
import torch
import random
from .utils import get_conf, encode_conf
class Mutations():
def __init__(self, search_space, prob_mutation=0.8, prob_resize=0.05, prob_swap=0.04, exploration_vs_exploitation=0.5):
n = len(search_space)
# general vars
self.exploration_vs_exploitation = exploration_vs_exploitation
self._cache = {}
# mutations over edges
self.sspace_used = torch.ones(n)*2
self.sspace_success = torch.ones(n)
# mutation edge
self.prob_mutation = prob_mutation
# mutation swap
self.prob_swap = prob_swap
# mutation reduce
self.avg_len = 10
self.prob_resize = prob_resize
def __call__(self, genotype):
"""
takes a genotype and returns a mutated one
"""
self.exploration_vs_exploitation *= 0.996
architecture, use_shared, dataset = get_conf(genotype)
mutations = []
# azzerate smallest entry at level 0
if torch.rand(1)<self.prob_mutation:
k, min_, cc= 0, 9999, 0
for i, val in enumerate(architecture[0]):
if val>0: cc+=1
if val<min_ and val>0: k, min_= i, val
if cc> len(architecture[0])/2:
architecture[0][k] = 0
# update architecture edges
if (torch.rand(1)<self.prob_mutation):
architecture, j = self.mutate_one_edge(architecture)
mutations.append(j)
architecture = self.mutate_swap(architecture)
# update hyperparams
r = torch.rand(2)
if r[0]<0.04: use_shared = (use_shared+1)%2
if r[1]<0.24: dataset = int((dataset+(r[1]-0.01)*50)%10)
# resize architecture
if (torch.rand(1)<self.prob_mutation):
architecture, tmp = self.mutate_resize(architecture)
mutations += tmp
hash_arch = '.'.join([','.join([str(x) for x in ar]) for ar in architecture])
self._cache[hash_arch] = mutations
return encode_conf(architecture, use_shared, dataset)
def mutate_one_edge(self, architecture):
"""basic mutation:change one edge of the cell
how the edge is changed depends on:
- exploitation: successfull_mutations / total_mutations of a specific operation
- explration: to give more attention to low used operation
Moreover, exploration_vs_exploitation makes exploitation a bit more important with time passing
"""
eve = self.exploration_vs_exploitation
rand = torch.rand(3)
# exploitation exploration
weights = [(1-eve)*(self.sspace_success[j] / self.sspace_used[j]) + eve*(1- self.sspace_used[j] / self.sspace_used.max())+.2 for j in range(len(self.sspace_used))]
j = random.choices(list(range(len(architecture[0]))) , weights=weights, k=1)[0]
if rand[0]<0.5:
i = random.randint(0, len(architecture)-1) # random node
else:
i = random.randint(0, int((len(architecture)*2)**0.5))
i = (i*(i+1)//2)-1 # backbone path
for k, block in enumerate(architecture):
if block[j]>0 and torch.rand(1)>.3:
i = k
architecture[i][j] = max(architecture[i][j], 2**int(rand[2]*6))
return architecture, j
def mutate_resize(self, architecture):
"""mutation that adds/removes layers"""
architecture = torch.tensor(architecture, dtype=torch.int)
n_params = float(architecture.sum())
mutations = []
prob_reduce = n_params / (n_params+self.avg_len) * (self.prob_resize*3/4) # reduce prob 3/4 gives a bit more prob to increase rather than reduce
prob_increase = self.avg_len / (n_params*(3/4)+self.avg_len) * self.prob_resize # increase prob
prob_reduce -= 4**len(architecture) / 4**5
depth = int((len(architecture)*2)**0.5) # network depth
if len(architecture)>1 and torch.rand(1)<prob_reduce:
# reduce the cell by one layer, sum the removed layers to the previous ones
if depth > 1:
end = int(depth*(depth-1)/2)
for i in range(len(architecture)-end):
if (torch.rand(1)<.3):
architecture[i] += ((architecture[i]+architecture[i+end])/2).int()
mutations.append(architecture[i+end].max(dim=0)[1].item())
architecture = architecture[:end]
elif torch.rand(1)<prob_increase:
# add a new layer and apply some mutations to it
# poss. problem: small new layer --> bottleneck --> low scores
n = len(architecture[0])
new_arch = [[0]*n for _ in range(depth+1)]
j = int(torch.rand(1)*n)
new_arch[-1][j] += 16
mutations.append(j)
architecture = torch.cat((architecture, torch.tensor(new_arch, dtype=torch.int)), dim=0)
n_params = float(architecture.sum())
self.avg_len = .9*self.avg_len + .1*n_params
return architecture.tolist(), mutations
def mutate_swap(self, architecture):
r = torch.rand(2)
if r[0]<self.prob_swap and len(architecture)>1:
tmp = architecture[0]
i = 1+int(r[1]*(len(architecture)-1))
architecture[0] = architecture[i]
architecture[i] = tmp
return architecture
def update_genoname(self, old, new):
if old==new: return
if isinstance(old, str):
old, _, _ = get_conf(old)
old = '.'.join([','.join([str(x) for x in ar]) for ar in old])
if old not in self._cache: return
if isinstance(new, str):
new, _, _ = get_conf(new)
new = '.'.join([','.join([str(x) for x in ar]) for ar in new])
self._cache[new] = self._cache[old]
del self._cache[old]
def update_strat_good(self, architecture):
"""updates ratio successful/all_mutations for a given mutation type"""
if isinstance(architecture, str):
architecture, _, _ = get_conf(architecture)
architecture = '.'.join([','.join([str(x) for x in ar]) for ar in architecture])
if architecture not in self._cache: return
for j in self._cache[architecture]:
self.sspace_used[j] += 1
self.sspace_success[j] += 1
del self._cache[architecture]
def update_strat_bad(self):
for _,v in self._cache.items():
self.sspace_used[v] += 1
self._cache = {}
| [
"torch.tensor",
"torch.rand",
"torch.ones"
] | [((465, 478), 'torch.ones', 'torch.ones', (['n'], {}), '(n)\n', (475, 478), False, 'import torch\n'), ((1580, 1593), 'torch.rand', 'torch.rand', (['(2)'], {}), '(2)\n', (1590, 1593), False, 'import torch\n'), ((2556, 2569), 'torch.rand', 'torch.rand', (['(3)'], {}), '(3)\n', (2566, 2569), False, 'import torch\n'), ((3536, 3579), 'torch.tensor', 'torch.tensor', (['architecture'], {'dtype': 'torch.int'}), '(architecture, dtype=torch.int)\n', (3548, 3579), False, 'import torch\n'), ((5274, 5287), 'torch.rand', 'torch.rand', (['(2)'], {}), '(2)\n', (5284, 5287), False, 'import torch\n'), ((419, 432), 'torch.ones', 'torch.ones', (['n'], {}), '(n)\n', (429, 432), False, 'import torch\n'), ((1002, 1015), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (1012, 1015), False, 'import torch\n'), ((1344, 1357), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (1354, 1357), False, 'import torch\n'), ((1754, 1767), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (1764, 1767), False, 'import torch\n'), ((4089, 4102), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (4099, 4102), False, 'import torch\n'), ((4610, 4623), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (4620, 4623), False, 'import torch\n'), ((3268, 3281), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (3278, 3281), False, 'import torch\n'), ((4354, 4367), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (4364, 4367), False, 'import torch\n'), ((4889, 4902), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (4899, 4902), False, 'import torch\n'), ((5024, 5063), 'torch.tensor', 'torch.tensor', (['new_arch'], {'dtype': 'torch.int'}), '(new_arch, dtype=torch.int)\n', (5036, 5063), False, 'import torch\n')] |
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='genlab',
version='0.3',
description='Create a lab report',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/IceArrow256/genlab',
author='IceArrow256',
author_email='<EMAIL>',
packages=['genlab'],
install_requires=['ia256utilities', 'docxtpl', 'appdirs', 'pyqt5'],
entry_points={
'console_scripts': [
'genlab=genlab.main:main',
],
},
package_data={'genlab': ['lab.docx']}
)
| [
"os.path.join",
"os.path.dirname",
"setuptools.setup"
] | [((207, 667), 'setuptools.setup', 'setup', ([], {'name': '"""genlab"""', 'version': '"""0.3"""', 'description': '"""Create a lab report"""', 'long_description': 'long_description', 'long_description_content_type': '"""text/markdown"""', 'url': '"""https://github.com/IceArrow256/genlab"""', 'author': '"""IceArrow256"""', 'author_email': '"""<EMAIL>"""', 'packages': "['genlab']", 'install_requires': "['ia256utilities', 'docxtpl', 'appdirs', 'pyqt5']", 'entry_points': "{'console_scripts': ['genlab=genlab.main:main']}", 'package_data': "{'genlab': ['lab.docx']}"}), "(name='genlab', version='0.3', description='Create a lab report',\n long_description=long_description, long_description_content_type=\n 'text/markdown', url='https://github.com/IceArrow256/genlab', author=\n 'IceArrow256', author_email='<EMAIL>', packages=['genlab'],\n install_requires=['ia256utilities', 'docxtpl', 'appdirs', 'pyqt5'],\n entry_points={'console_scripts': ['genlab=genlab.main:main']},\n package_data={'genlab': ['lab.docx']})\n", (212, 667), False, 'from setuptools import setup, find_packages\n'), ((85, 107), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (97, 107), False, 'from os import path\n'), ((120, 148), 'os.path.join', 'path.join', (['here', '"""README.md"""'], {}), "(here, 'README.md')\n", (129, 148), False, 'from os import path\n')] |
import re
import string
from datetime import datetime
import nltk
from nltk.corpus import stopwords
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
linkPattern = (
"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+"
)
def removeLinks(text):
return re.sub(linkPattern, "", text)
def stripEmojis(text):
return text.encode("ascii", "ignore").decode("ascii")
def stripPunctuations(text):
return text.translate(str.maketrans("", "", string.punctuation))
def stripExtraWhiteSpaces(text):
return text.strip()
def removeSpecialChar(text):
return re.sub(r"\W+ ", "", text)
def sentiment_scores(sentence):
# Create a SentimentIntensityAnalyzer object.
sid_obj = SentimentIntensityAnalyzer()
# polarity_scores method of SentimentIntensityAnalyzer
# object gives a sentiment dictionary.
# which contains pos, neg, neu, and compound scores.
sentiment_dict = sid_obj.polarity_scores(sentence)
return sentiment_dict
| [
"re.sub",
"vaderSentiment.vaderSentiment.SentimentIntensityAnalyzer"
] | [((447, 476), 're.sub', 're.sub', (['linkPattern', '""""""', 'text'], {}), "(linkPattern, '', text)\n", (453, 476), False, 'import re\n'), ((761, 786), 're.sub', 're.sub', (['"""\\\\W+ """', '""""""', 'text'], {}), "('\\\\W+ ', '', text)\n", (767, 786), False, 'import re\n'), ((885, 913), 'vaderSentiment.vaderSentiment.SentimentIntensityAnalyzer', 'SentimentIntensityAnalyzer', ([], {}), '()\n', (911, 913), False, 'from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n')] |
import os
import numpy as np
import pandas as pd
from databroker.assets.handlers_base import HandlerBase
class APBBinFileHandler(HandlerBase):
"Read electrometer *.bin files"
def __init__(self, fpath):
# It's a text config file, which we don't store in the resources yet, parsing for now
fpath_txt = f"{os.path.splitext(fpath)[0]}.txt"
with open(fpath_txt, "r") as fp:
content = fp.readlines()
content = [x.strip() for x in content]
_ = int(content[0].split(":")[1])
# Gains = [int(x) for x in content[1].split(":")[1].split(",")]
# Offsets = [int(x) for x in content[2].split(":")[1].split(",")]
# FAdiv = float(content[3].split(":")[1])
# FArate = float(content[4].split(":")[1])
# trigger_timestamp = float(content[5].split(":")[1].strip().replace(",", "."))
raw_data = np.fromfile(fpath, dtype=np.int32)
columns = ["timestamp", "i0", "it", "ir", "iff", "aux1", "aux2", "aux3", "aux4"]
num_columns = len(columns) + 1 # TODO: figure out why 1
raw_data = raw_data.reshape((raw_data.size // num_columns, num_columns))
derived_data = np.zeros((raw_data.shape[0], raw_data.shape[1] - 1))
derived_data[:, 0] = (
raw_data[:, -2] + raw_data[:, -1] * 8.0051232 * 1e-9
) # Unix timestamp with nanoseconds
for i in range(num_columns - 2):
derived_data[:, i + 1] = raw_data[:, i] # ((raw_data[:, i] ) - Offsets[i]) / Gains[i]
self.df = pd.DataFrame(data=derived_data, columns=columns)
self.raw_data = raw_data
def __call__(self):
return self.df
| [
"pandas.DataFrame",
"numpy.fromfile",
"os.path.splitext",
"numpy.zeros"
] | [((892, 926), 'numpy.fromfile', 'np.fromfile', (['fpath'], {'dtype': 'np.int32'}), '(fpath, dtype=np.int32)\n', (903, 926), True, 'import numpy as np\n'), ((1187, 1239), 'numpy.zeros', 'np.zeros', (['(raw_data.shape[0], raw_data.shape[1] - 1)'], {}), '((raw_data.shape[0], raw_data.shape[1] - 1))\n', (1195, 1239), True, 'import numpy as np\n'), ((1540, 1588), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'derived_data', 'columns': 'columns'}), '(data=derived_data, columns=columns)\n', (1552, 1588), True, 'import pandas as pd\n'), ((331, 354), 'os.path.splitext', 'os.path.splitext', (['fpath'], {}), '(fpath)\n', (347, 354), False, 'import os\n')] |
import numpy as np
import pandas as pd
import sys
import re
# question type definition
S = 0 # [S, col, corr [,rate]]
MS = 1 # [MS, [cols,..], [corr,..] [,rate]]
Num = 2 # [Num, [cols,..], [corr,..] [,rate]]
SS = 3 # [SS, [start,end], [corr,...] [,rate]]
# the list of question type and reference
# [type, column, answer[, num_candidate]]
QuestionReferences = None
def get_num_squestions(qref):
numq = 0
for q in qref:
if q[0] == MS:
numq += len(q[1])
elif q[0] == SS:
numq += q[1][1]-q[1][0]+1
else: numq += 1
return numq
def ascoringS(answer, q):
if answer[q[1]] == q[2]:
return 1
else:
return 0
def ascoringMS(answer, columns, i, ref):
ans = answer[columns]
if ref[i] in ans:
return 1
else:
return 0
def ascoringSS(answer, i, columns, ref):
ans = answer[columns[0]+i]
if ans == ref[i]:
return 1
else:
return 0
def ascoringNum(answer, columns, ref):
for i,p in enumerate(columns):
if answer[p] != ref[i]:
return 0
return 1
def ascoring(df, q):
if q[0] == S:
return df.apply(ascoringS, axis=1, raw=True, args=(q,))
elif q[0] == MS:
res = None
for i in range(len(q[2])):
rr = df.apply(ascoringMS, axis=1, raw=True, args=(q[1], i,q[2]))
if res is None:
res = rr
else:
res = pd.concat([res, rr], axis=1)
return res
elif q[0] == Num:
return df.apply(ascoringNum, axis=1, raw=True, args=(q[1], q[2]))
elif q[0] == SS:
res = None
for i in range(q[1][1]-q[1][0]+1):
rr = df.apply(ascoringSS, axis=1, raw=True, args=(i, q[1], q[2]))
if res is None:
res = rr
else:
res = pd.concat([res, rr], axis=1)
return res
else:
print(f"ERROR: Undefined question type: {q[0]}")
exit()
def get_maxcolms(qref):
maxcol = 0
for q in qref:
if q[0] == S:
if maxcol < q[1]: maxcol = q[1]
else:
if maxcol < max(q[1]): maxcol = max(q[1])
return maxcol
def get_sq2p(qref):
num_squestions = get_num_squestions(qref)
sq2p = np.zeros(num_squestions, dtype=np.int)
numq = 0
numsq = 0
for q in qref:
if q[0] == MS:
for i in q[1]:
sq2p[numsq] = numq
numsq += 1
elif q[0] == SS:
for i in range(q[1][1]-q[1][0]+1):
sq2p[numsq] = numq
numsq += 1
else:
sq2p[numsq] = numq
numsq += 1
numq += 1
return sq2p
def correctRate(scorelist_v):
return sum(scorelist_v) / len(scorelist_v)
def print_crate(marubatu, points_alloc):
print("====================================", file=sys.stderr)
print("Correct rate for each small question", file=sys.stderr)
print(" and allocation of points", file=sys.stderr)
print(" No: rate, points, q_type", file=sys.stderr)
print("------------------------------------", file=sys.stderr)
crate = marubatu.iloc[:,1:].apply(correctRate, raw=True)
sq2p = get_sq2p(QuestionReferences)
for i,rate in enumerate(crate):
q = QuestionReferences[sq2p[i]]
if q[0] == S: kind = f' S[{q[1]}]'
elif q[0] == MS: kind = f' MS{q[1]}'
elif q[0] == SS: kind = f' SS{q[1]}'
else: kind = f'Num{q[1]}'
print(f"{i+1:3d}:{rate*100.0:3.0f}%, {points_alloc[i]:2}, {kind:}", file=sys.stderr)
def totalscore(scorelist, points_alloc):
if len(scorelist) != len(points_alloc)+1:
print("ERROR: in totalscore()", file=sys.stderr)
print(scorelist, file=sys.stderr)
print(points_alloc, file=sys.stderr)
exit()
return sum(scorelist[1:] * points_alloc)
# return sum(scorelist[1:]) * 3
def get_points_alloc(qref, desired_pscore):
num_squestions = get_num_squestions(qref)
points_alloc = np.zeros(num_squestions, dtype=np.int)
num = 0
sum_palloc = 0
for q in qref:
weight = 100
if len(q) >= 4:
weight = q[3]
if q[0] == MS:
inum = len(q[1])
elif q[0] == SS:
inum = q[1][1]-q[1][0]+1
else:
inum = 1
for i in range(inum):
points_alloc[num] = weight
sum_palloc += weight
num += 1
basic_unit_float = desired_pscore * 100.0 / sum_palloc
for i in range(num_squestions):
points_float = desired_pscore * points_alloc[i] / sum_palloc
points = round(points_float)
if points <= 0: points = 1
points_alloc[i] = points
return points_alloc, basic_unit_float
def marksheetScoring(filename, crate, desired_pscore):
maxcolms = get_maxcolms(QuestionReferences)
df = pd.read_csv(filename, header=None, dtype=object, skipinitialspace=True, usecols=list(range(maxcolms+1)))
df.fillna('-1', inplace=True)
df.replace('*', -1, inplace=True) # multi-mark col.
df = df.astype('int')
df[0] = df[0]+200000000
df = df.sort_values(by=0, ascending=True)
print(f"Marksheet-answer: #students={df.shape[0]}, #columns={df.shape[1]}(including id-number)", file=sys.stderr)
marubatu = df[[0]]
for q in QuestionReferences:
ascore = ascoring(df, q)
marubatu = pd.concat([marubatu, ascore], axis=1, ignore_index=True)
marubatu.to_csv(filename+'.marubatu', index=False, header=False)
points_alloc, basic_unit_float = get_points_alloc(QuestionReferences, desired_pscore)
perfect_score = sum(points_alloc)
print(f"#Small_questions={len(points_alloc)}", file=sys.stderr)
print(f"Perfect_score={perfect_score} (desired_perfect_score={desired_pscore})", file=sys.stderr)
basic_point_unit = round(basic_unit_float)
basic_point_unit = basic_point_unit if basic_point_unit >= 1 else 1
print(f"Basic_points_unit(weight=100)={basic_point_unit}, (float_unit = {basic_unit_float:5.2f})", file=sys.stderr)
if crate:
print_crate(marubatu, points_alloc)
id_scores = pd.concat([marubatu[0], marubatu.apply(totalscore, axis=1, raw=True, args=(points_alloc,))], axis=1, ignore_index=True)
# scores = marubatu.apply(totalscore, axis=1, raw=True, args=(points_alloc))
# print(scores, file=sys.stderr)
# id_scores = pd.concat([marubatu[0], scores], axis=1, ignore_index=True)
return id_scores
### for Twins upload file
def read_twins_upload_file(twinsfilename):
twins = pd.read_csv(twinsfilename, skiprows=1, header=None, skipinitialspace=True)
twins.columns=['科目番号', '学籍番号', '学期区分', '学期評価', '総合評価']
twins['総合評価'].fillna('0', inplace=True)
# scores = twins['総合評価'].astype(int, inplace=True) # inplaceは働かない
scores = twins['総合評価'].astype(int)
del twins['総合評価']
twins = pd.concat([twins, scores], axis=1, ignore_index=True)
twins.columns=['科目番号', '学籍番号', '学期区分', '学期評価', '総合評価']
id_scores = pd.concat([twins['学籍番号'], twins['総合評価']], axis=1, ignore_index=True)
return id_scores, twins
### ajusting
# adjust
def point_adjust(point, xp, yp, xmax):
gradient1 = yp / xp
gradient2 = (xmax - yp)/(xmax - xp)
if point <= xp:
point = gradient1 * point
elif point <= xmax:
point = gradient2 * point + (xmax * (1.0-gradient2))
return point
def adjust(id_scores, params):
xp, yp, xmax = params
adjustfunc = lambda p: point_adjust(p, xp, yp, xmax)
id_scores = pd.concat([id_scores[0], id_scores[1].map(adjustfunc).astype(int)], axis=1, ignore_index=True)
return id_scores
# a2djust
def get_points_abcd(params, id_scores):
score_list = np.sort(id_scores[1])[::-1]
num = len(score_list)
points_list = [score_list[0]]
cp = 0
for p in params:
cp += p
points_list.append(score_list[round(num * cp / 100.0)])
return points_list
def point_a2djust(p, p_max, p_ap, p_a, p_b, p_c):
if p >= p_ap:
newpoint = 90 + (10/(p_max-p_ap)) * (p-p_ap)
elif p >= p_a:
newpoint = 80 + (10/(p_ap-p_a)) * (p-p_a)
elif p >= p_b:
newpoint = 70 + (10/(p_a-p_b)) * (p-p_b)
elif p >= p_c:
newpoint = 60 + (10/(p_b-p_c)) * (p-p_c)
else:
newpoint = (60.0/p_c) * p
return round(newpoint)
def a2djust(id_scores, params):
# rate_ap, rate_a, rate_b, rate_c = params
p_max, p_ap, p_a, p_b, p_c = get_points_abcd(params, id_scores)
print(f"A2djust: rate_ap={params[0]}, rate_a={params[1]}, rate_b={params[2]}, rate_c={params[3]}", file=sys.stderr)
print(f"A2djust: p_max={p_max}, p_ap={p_ap}, p_a={p_a}, p_b={p_b}, p_c={p_c}", file=sys.stderr)
a2djustfunc = lambda p: point_a2djust(p, p_max, p_ap, p_a, p_b, p_c)
new_id_scores = pd.concat([id_scores[0], id_scores[1].map(a2djustfunc).astype(int)], axis=1, ignore_index=True)
return new_id_scores
# interval
def finterval(x, minval, maxval):
if x < minval: return minval
elif x > maxval: return maxval
else: return x
def interval(id_scores, minmax):
min, max = minmax
func = lambda x: finterval(x, min, max)
scores = id_scores.iloc[:,1].map(func).astype(int)
id_scores = pd.concat([id_scores[0], scores], axis=1, ignore_index=True)
return id_scores
#### print statistics
Pgakuruimei = re.compile(r'.*学群(.+学類).*')
def ex_gakuruimei(str):
mobj = Pgakuruimei.match(str)
if mobj:
return mobj.group(1)
if str.find('体育専門学群') != -1:
return '体育専門学群'
if str.find('芸術専門学群') != -1:
return '芸術専門学群'
return '不明学類'
def read_meibo(filename):
meibo = pd.read_csv(filename, skiprows=4, header=None, skipinitialspace=True)
if meibo[0][0] != 1:
print("Score Error in reading meibo file.", file=sys.stderr)
exit()
meibo = meibo[[3,1,2,4,5]]
meibo.columns = ['学籍番号', '所属学類', '学年', '氏名', '氏名カナ']
meibo['所属学類'] = meibo['所属学類'].map(ex_gakuruimei)
return meibo
def mk_gakurui_dicset(meibo):
dicset = {}
for i in range(meibo.shape[0]):
# gakuruimei = ex_gakuruimei(meibo['所属学類'][i])
gakuruimei = meibo['所属学類'][i]
if gakuruimei in dicset:
dicset[gakuruimei].add(meibo['学籍番号'][i])
else:
dicset[gakuruimei] = set([meibo['学籍番号'][i]])
return dicset
def gakurui_statistics(id_scores, meibofilename):
meibo = read_meibo(meibofilename)
gdicset = mk_gakurui_dicset(meibo)
res = []
for gname in gdicset:
aset = gdicset[gname]
selectstudents = [no in aset for no in id_scores.iloc[:,0]]
scores = id_scores.iloc[:,1][selectstudents]
res.append([gname, scores.describe()])
return res
def print_stat(scores):
print("==================", file=sys.stderr)
print("Score statistics", file=sys.stderr)
print("------------------", file=sys.stderr)
print(scores.describe(), file=sys.stderr)
def print_stat_gakurui(id_scores, meibofilename):
gakurui_sta_list = gakurui_statistics(id_scores, meibofilename)
print("==================", file=sys.stderr)
print("Gakurui statistics", file=sys.stderr)
print("------------------", file=sys.stderr)
notfirst = False
for gakuruiinfo in gakurui_sta_list:
if notfirst:
print('-------', file=sys.stderr)
else:
notfirst = True
print(gakuruiinfo[0], file=sys.stderr)
print(gakuruiinfo[1], file=sys.stderr)
def print_abcd(scores):
all = len(scores)
aplus = scores[scores>=90]
a = scores[scores<90]
aa = a[a>=80]
b = scores[scores<80]
bb = b[b>=70]
c = scores[scores<70]
cc = c[c>=60]
d = scores[scores<60]
print("=================", file=sys.stderr)
print("ABCD distribution", file=sys.stderr)
print("-----------------", file=sys.stderr)
print(f"a+ = {len(aplus)}, {len(aplus)*100/all:4.1f}%", file=sys.stderr)
print(f"a = {len(aa)}, {len(aa)*100/all:4.1f}%", file=sys.stderr)
print(f"b = {len(bb)}, {len(bb)*100/all:4.1f}%", file=sys.stderr)
print(f"c = {len(cc)}, {len(cc)*100/all:4.1f}%", file=sys.stderr)
print(f"d = {len(d)}, {len(d)*100/all:4.1f}%", file=sys.stderr)
def print_distribution(scores):
maxscores = max(scores)
numinterval = maxscores // 10 + 1
counts = np.zeros(numinterval, dtype=np.int)
for c in scores:
cat = c // 10
counts[cat] += 1
print("==================", file=sys.stderr)
print("Score distribution", file=sys.stderr)
print("------------------", file=sys.stderr)
print("L.score: num:", file=sys.stderr)
maxcount = max(counts)
if maxcount > 80:
unit = 80.0/maxcount
else:
unit = 1.0
for i in range(numinterval):
cat = numinterval - i - 1
print(f"{10*cat:5}- :{counts[cat]:4}: ", end="", file=sys.stderr)
for x in range(int(counts[cat]*unit)):
print("*", end="", file=sys.stderr)
print("", file=sys.stderr)
#### join
def print_only_ids(df, idlabel, ncol):
num = 0
for i in df[idlabel]:
if num == 0:
print(" ", end="", file=sys.stderr)
elif num%ncol == 0:
print(", \n ", end="", file=sys.stderr)
else:
print(", ", end="", file=sys.stderr)
print(f"{i}", end="", file=sys.stderr)
num += 1
print("", file=sys.stderr)
def join(id_scores, joinfilename, how):
# id_scores_join = pd.read_csv(joinfilename, header=None, dtype=int, skipinitialspace=True)
id_scores_join = pd.read_csv(joinfilename, header=None, dtype=object, skipinitialspace=True)
id_scores_join.fillna('0', inplace=True)
id_scores_join = id_scores_join.astype('int')
new_id_scores = pd.merge(id_scores, id_scores_join, on=0, how=how)
outer_id_scores = pd.merge(id_scores, id_scores_join, on=0, how='outer', indicator='from')
nrow_left = id_scores.shape[0]
nrow_right = id_scores_join.shape[0]
nrow_new = new_id_scores.shape[0]
nrow_outer = outer_id_scores.shape[0]
print(f"Join({how}): left({nrow_left}) + right({nrow_right}) = {how}-join({nrow_new})", file=sys.stderr)
left_only = outer_id_scores[outer_id_scores['from']=='left_only']
right_only = outer_id_scores[outer_id_scores['from']=='right_only']
print(f" #left_only = {left_only.shape[0]}: keep left scores", file=sys.stderr)
if left_only.shape[0] > 0:
print_only_ids(left_only, 0, 5)
if how == 'left':
print(f" #right_only = {right_only.shape[0]}: ignored by 'left-join'", file=sys.stderr)
else:
print(f" #right_only = {right_only.shape[0]}: keep right scores", file=sys.stderr)
if right_only.shape[0] > 0:
print_only_ids(right_only, 0, 5)
scores_sum = new_id_scores.iloc[:,1:].fillna(0).apply(sum, axis=1, raw=True)
joined_new_id_scores = pd.concat([new_id_scores.iloc[:,0], scores_sum], axis=1, ignore_index=True)
joined_new_id_scores.fillna(0, inplace=True)
# joined_new_id_scores.astype(int, inplace=True) # inplace optoin is ineffective
joined_new_id_scores = joined_new_id_scores.astype(int)
return joined_new_id_scores
def twinsjoin(twins, id_scores, joinfilename):
del twins['総合評価']
id_scores.columns=['学籍番号', '総合評価']
newtwins = pd.merge(twins, id_scores, on='学籍番号', how='left')
# check correctness
twins_outer = pd.merge(twins, id_scores, on='学籍番号', how='outer', indicator='from')
left_only = twins_outer[twins_outer['from']=='left_only']
right_only = twins_outer[twins_outer['from']=='right_only']
if left_only.shape[0] > 0 or right_only.shape[0] > 0:
print("WARNING!!: occur something wrongs in 'twinsjoin'", file=sys.stderr)
print("WARNING!!: occur something wrongs in 'twinsjoin'", file=sys.stderr)
"""
nrow_left = twins.shape[0]
nrow_right = id_scores.shape[0]
nrow_new = newtwins.shape[0]
nrow_outer = twins_outer.shape[0]
print(f"Join(for Twins file): left({nrow_left}) + right({nrow_right}) = LEFT-join({nrow_new})", file=sys.stderr)
print(f" #left_only = {left_only.shape[0]}: keep twins scores (or put a zero score)", file=sys.stderr)
if left_only.shape[0] > 0:
print_only_ids(left_only, '学籍番号', 5)
print(f" #right_only = {right_only.shape[0]}: ignored", file=sys.stderr)
if right_only.shape[0] > 0:
print_only_ids(right_only, '学籍番号', 5)
"""
newtwins['総合評価'].fillna(0, inplace=True)
newscores = newtwins['総合評価'].astype('int')
del newtwins['総合評価']
newtwins = pd.concat([twins, newscores], axis=1, ignore_index=True)
newtwins.columns=['科目番号', '学籍番号', '学期区分', '学期評価', '総合評価']
new_id_scores = newtwins[['学籍番号', '総合評価']]
return newtwins, new_id_scores
#### record
def record(meibofilename, csvfilename2s):
df = read_meibo(meibofilename)
df.rename(columns={'学籍番号':0}, inplace=True)
for csvfilename2 in csvfilename2s:
df2 = pd.read_csv(csvfilename2, header=None, skipinitialspace=True)
df = pd.merge(df, df2, on=0, how='outer')
df.rename(columns={0:'学籍番号'}, inplace=True)
df = df.sort_values(by=['所属学類','学籍番号'], ascending=True)
return df
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='support tools of scoring for performance evaluation', prog='score')
parser.add_argument('csvfile')
parser.add_argument('-marksheet', nargs=2, default=None, metavar=('ref', 'desired_pscore'))
parser.add_argument('-crate', action='store_true', default=False)
parser.add_argument('-join', default=None, metavar='csvfile2')
parser.add_argument('-record', nargs='+', default=None, metavar=('csvfile2'))
parser.add_argument('-twins', action='store_true', default=False)
parser.add_argument('-adjust', nargs=3, type=float, default=None, metavar=('x', 'y', 'xmax'))
parser.add_argument('-a2djust', nargs=4, type=float, default=None, metavar=('A+', 'A', 'B', 'C'))
parser.add_argument('-interval', nargs=2, type=int, default=None, metavar=('min', 'max'))
parser.add_argument('-distribution', action='store_true', default=False)
parser.add_argument('-abcd', action='store_true', default=False)
parser.add_argument('-statistics', action='store_true', default=False)
parser.add_argument('-gakuruistat', default=None, metavar='csv-meibo-utf8')
parser.add_argument('-nostdout', action='store_true', default=False)
parser.add_argument('-output', default=None, metavar='filename')
args = parser.parse_args()
if args.marksheet and args.twins:
print("scoring error: exclusive options: -marksheet and -twins", file=sys.stderr)
exit()
if args.record and args.twins:
print("scoring error: exclusive options: -record and -twins", file=sys.stderr)
exit()
if args.record:
print("NOTICE:", file=sys.stderr)
print("-record option ignores all other options but -output option", file=sys.stderr)
df = record(args.csvfile, args.record)
if args.output:
df.to_excel(args.output, index=False)
else:
df.to_csv(sys.stdout, index=False)
exit()
if args.marksheet:
QuestionReferences = eval(open(args.marksheet[0]).read())
id_scores = marksheetScoring(args.csvfile, args.crate, int(args.marksheet[1]))
else:
if args.twins:
id_scores, twins = read_twins_upload_file(args.csvfile)
else:
# id_scores = pd.read_csv(args.csvfile, header=None, dtype=int, skipinitialspace=True)
id_scores = pd.read_csv(args.csvfile, header=None, dtype=object, skipinitialspace=True)
id_scores.fillna('0', inplace=True)
id_scores = id_scores.astype('int')
if args.join:
if args.twins:
id_scores = join(id_scores, args.join, 'left')
else:
id_scores = join(id_scores, args.join, 'outer')
if args.adjust:
id_scores = adjust(id_scores, args.adjust)
if args.a2djust:
id_scores = a2djust(id_scores, args.a2djust)
if args.interval:
id_scores = interval(id_scores, args.interval)
if args.twins:
twins, id_scores = twinsjoin(twins, id_scores, args.join)
if args.statistics:
print_stat(id_scores.iloc[:,1])
if args.abcd:
print_abcd(id_scores.iloc[:,1])
if args.gakuruistat:
print_stat_gakurui(id_scores, args.gakuruistat)
if args.distribution:
print_distribution(id_scores.iloc[:,1])
if not args.nostdout or args.output:
if args.output:
output = args.output
else:
output = sys.stdout
if args.twins:
twins.to_csv(output, index=False, encoding='cp932')
else:
id_scores.to_csv(output, index=False, header=False)
| [
"pandas.read_csv",
"re.compile",
"argparse.ArgumentParser",
"pandas.merge",
"numpy.sort",
"numpy.zeros",
"pandas.concat"
] | [((9321, 9347), 're.compile', 're.compile', (['""".*学群(.+学類).*"""'], {}), "('.*学群(.+学類).*')\n", (9331, 9347), False, 'import re\n'), ((2271, 2309), 'numpy.zeros', 'np.zeros', (['num_squestions'], {'dtype': 'np.int'}), '(num_squestions, dtype=np.int)\n', (2279, 2309), True, 'import numpy as np\n'), ((4017, 4055), 'numpy.zeros', 'np.zeros', (['num_squestions'], {'dtype': 'np.int'}), '(num_squestions, dtype=np.int)\n', (4025, 4055), True, 'import numpy as np\n'), ((6548, 6622), 'pandas.read_csv', 'pd.read_csv', (['twinsfilename'], {'skiprows': '(1)', 'header': 'None', 'skipinitialspace': '(True)'}), '(twinsfilename, skiprows=1, header=None, skipinitialspace=True)\n', (6559, 6622), True, 'import pandas as pd\n'), ((6868, 6921), 'pandas.concat', 'pd.concat', (['[twins, scores]'], {'axis': '(1)', 'ignore_index': '(True)'}), '([twins, scores], axis=1, ignore_index=True)\n', (6877, 6921), True, 'import pandas as pd\n'), ((6997, 7065), 'pandas.concat', 'pd.concat', (["[twins['学籍番号'], twins['総合評価']]"], {'axis': '(1)', 'ignore_index': '(True)'}), "([twins['学籍番号'], twins['総合評価']], axis=1, ignore_index=True)\n", (7006, 7065), True, 'import pandas as pd\n'), ((9200, 9260), 'pandas.concat', 'pd.concat', (['[id_scores[0], scores]'], {'axis': '(1)', 'ignore_index': '(True)'}), '([id_scores[0], scores], axis=1, ignore_index=True)\n', (9209, 9260), True, 'import pandas as pd\n'), ((9620, 9689), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'skiprows': '(4)', 'header': 'None', 'skipinitialspace': '(True)'}), '(filename, skiprows=4, header=None, skipinitialspace=True)\n', (9631, 9689), True, 'import pandas as pd\n'), ((12285, 12320), 'numpy.zeros', 'np.zeros', (['numinterval'], {'dtype': 'np.int'}), '(numinterval, dtype=np.int)\n', (12293, 12320), True, 'import numpy as np\n'), ((13517, 13592), 'pandas.read_csv', 'pd.read_csv', (['joinfilename'], {'header': 'None', 'dtype': 'object', 'skipinitialspace': '(True)'}), '(joinfilename, header=None, dtype=object, skipinitialspace=True)\n', (13528, 13592), True, 'import pandas as pd\n'), ((13708, 13758), 'pandas.merge', 'pd.merge', (['id_scores', 'id_scores_join'], {'on': '(0)', 'how': 'how'}), '(id_scores, id_scores_join, on=0, how=how)\n', (13716, 13758), True, 'import pandas as pd\n'), ((13781, 13853), 'pandas.merge', 'pd.merge', (['id_scores', 'id_scores_join'], {'on': '(0)', 'how': '"""outer"""', 'indicator': '"""from"""'}), "(id_scores, id_scores_join, on=0, how='outer', indicator='from')\n", (13789, 13853), True, 'import pandas as pd\n'), ((14819, 14895), 'pandas.concat', 'pd.concat', (['[new_id_scores.iloc[:, 0], scores_sum]'], {'axis': '(1)', 'ignore_index': '(True)'}), '([new_id_scores.iloc[:, 0], scores_sum], axis=1, ignore_index=True)\n', (14828, 14895), True, 'import pandas as pd\n'), ((15244, 15293), 'pandas.merge', 'pd.merge', (['twins', 'id_scores'], {'on': '"""学籍番号"""', 'how': '"""left"""'}), "(twins, id_scores, on='学籍番号', how='left')\n", (15252, 15293), True, 'import pandas as pd\n'), ((15336, 15404), 'pandas.merge', 'pd.merge', (['twins', 'id_scores'], {'on': '"""学籍番号"""', 'how': '"""outer"""', 'indicator': '"""from"""'}), "(twins, id_scores, on='学籍番号', how='outer', indicator='from')\n", (15344, 15404), True, 'import pandas as pd\n'), ((16498, 16554), 'pandas.concat', 'pd.concat', (['[twins, newscores]'], {'axis': '(1)', 'ignore_index': '(True)'}), '([twins, newscores], axis=1, ignore_index=True)\n', (16507, 16554), True, 'import pandas as pd\n'), ((17186, 17295), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""support tools of scoring for performance evaluation"""', 'prog': '"""score"""'}), "(description=\n 'support tools of scoring for performance evaluation', prog='score')\n", (17209, 17295), False, 'import argparse\n'), ((5394, 5450), 'pandas.concat', 'pd.concat', (['[marubatu, ascore]'], {'axis': '(1)', 'ignore_index': '(True)'}), '([marubatu, ascore], axis=1, ignore_index=True)\n', (5403, 5450), True, 'import pandas as pd\n'), ((7693, 7714), 'numpy.sort', 'np.sort', (['id_scores[1]'], {}), '(id_scores[1])\n', (7700, 7714), True, 'import numpy as np\n'), ((16890, 16951), 'pandas.read_csv', 'pd.read_csv', (['csvfilename2'], {'header': 'None', 'skipinitialspace': '(True)'}), '(csvfilename2, header=None, skipinitialspace=True)\n', (16901, 16951), True, 'import pandas as pd\n'), ((16965, 17001), 'pandas.merge', 'pd.merge', (['df', 'df2'], {'on': '(0)', 'how': '"""outer"""'}), "(df, df2, on=0, how='outer')\n", (16973, 17001), True, 'import pandas as pd\n'), ((19531, 19606), 'pandas.read_csv', 'pd.read_csv', (['args.csvfile'], {'header': 'None', 'dtype': 'object', 'skipinitialspace': '(True)'}), '(args.csvfile, header=None, dtype=object, skipinitialspace=True)\n', (19542, 19606), True, 'import pandas as pd\n'), ((1454, 1482), 'pandas.concat', 'pd.concat', (['[res, rr]'], {'axis': '(1)'}), '([res, rr], axis=1)\n', (1463, 1482), True, 'import pandas as pd\n'), ((1852, 1880), 'pandas.concat', 'pd.concat', (['[res, rr]'], {'axis': '(1)'}), '([res, rr], axis=1)\n', (1861, 1880), True, 'import pandas as pd\n')] |
"""Algorithmic methods for the selection of common blocks in DiffBlocks
- select_common_blocks
-
- segments_difference
"""
import re
import tempfile
import subprocess
from collections import defaultdict, OrderedDict
import numpy as np
from ..biotools import reverse_complement, sequence_to_record
def format_sequences_as_dicts(sequences):
"""Standardize different formats into a single one.
The ``sequences`` can be either:
- A list [('sequence_id', 'ATGC...'), ('sequence_2', ...)]
- A list of Biopython records (all with different IDs)
- A dict {'sequence_id': "ATGC..."}
- A dict {'sequence_id': biopython_record}
The output is a tuple (sequences_dict, records_dict), where
- sequences_dict is of the form {'sequence_id': 'ATGC...'}
- sequences_dict is of the form {'sequence_id': 'ATGC...'}
"""
if isinstance(sequences, (list, tuple)):
if hasattr(sequences[0], "seq"):
# SEQUENCES = LIST OF RECORDS
records_dict = OrderedDict([(record.id, record) for record in sequences])
sequences_dict = OrderedDict(
[(record.id, str(record.seq).upper()) for record in sequences]
)
else:
# SEQUENCES = LIST OF ATGC STRINGS
sequences_dict = OrderedDict(sequences)
if isinstance(list(sequences_dict.values())[0], str):
records_dict = OrderedDict(
[
(name, sequence_to_record(seq, name=name))
for name, seq in sequences_dict.items()
]
)
else:
records_dict = sequences
elif hasattr(list(sequences.values())[0], "seq"):
# SEQUENCES = DICT {SEQ_ID: RECORD}
records_dict = OrderedDict(sorted(sequences.items()))
sequences_dict = OrderedDict(
[
(record_id, str(record.seq).upper())
for record_id, record in sequences.items()
]
)
else:
# SEQUENCES = DICT {SEQ_ID: ATGC}
sequences_dict = OrderedDict(sorted(sequences.items()))
records_dict = OrderedDict(
[
(name, sequence_to_record(seq, name=name))
for name, seq in sequences.items()
]
)
return sequences_dict, records_dict
def segments_difference(segment, subtracted):
"""Return the difference between segment (start, end) and subtracted.
The result is a list containing either zero, one, or two segments of the
form (start, end).
Examples
--------
>>> segment=(10, 100), subtracted=(0, 85) => [(85, 100)]
>>> segment=(10, 100), subtracted=(40, 125) => [(10, 40)]
>>> segment=(10, 100), subtracted=(30, 55) => [(10, 30), (55, 100)]
>>> segment=(10, 100), subtracted=(0, 150) => []
"""
seg_start, seg_end = segment
sub_start, sub_end = subtracted
result = []
if sub_start > seg_start:
result.append((seg_start, min(sub_start, seg_end)))
if sub_end < seg_end:
result.append((max(seg_start, sub_end), seg_end))
return sorted(list(set(result)))
def find_homologies_between_sequences(
sequences, min_size=0, max_size=None, include_self_homologies=True
):
"""Return a dict listing the locations of all homologies between sequences.
The result is a dict of the form below, where the sequence identifiers
are used as keys.
>>> {
>>> 'seq_1': {
>>> (start1, end1): [('seq2_5', _start, _end), ('seq_3', )...]
>>> (start2, end2): ...
>>> }
>>> }
Parameters
----------
sequences
A dict {'sequence_id': 'ATTGTGCAG...'}.
min_size, max_size
Minimum and maximum size outside which homologies will be ignored.
include_self_homologies
If False, self-homologies will be removed from the list.
"""
# BLAST THE SEQUENCES USING NCBI-BLAST
temp_fasta_path = tempfile.mktemp(".fa")
with open(temp_fasta_path, "w+") as f:
f.write(
"\n\n".join(
["> %s\n%s" % (name, seq.upper()) for name, seq in sequences.items()]
)
)
proc = subprocess.Popen(
[
"blastn",
"-query",
temp_fasta_path,
"-subject",
temp_fasta_path,
"-perc_identity",
"100",
"-dust",
"no",
"-evalue",
"1000000000000000",
"-culling_limit",
"10",
"-ungapped",
"-outfmt",
"6 qseqid qstart qend sseqid sstart send",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
result, __blast_err = proc.communicate()
# PARSE THE RESULT FROM BLAST
parsing = [line.split("\t") for line in result.decode("utf-8").splitlines()]
homologies = {name: defaultdict(lambda *a: []) for name, seq in sequences.items()}
# FILTER THE RESULTS (MIN_SIZE, MAX_SIZE, SELF-HOMOLOGIES)
for query, qstart, qend, subject, sstart, send in parsing:
is_self_homology = (query == subject) and (qstart != sstart)
if is_self_homology and (not include_self_homologies):
continue
qstart, qend = int(qstart) - 1, int(qend)
sstart, send = int(sstart) - 1, int(send)
if qend - qstart < min_size:
continue
if (max_size is not None) and (qend - qstart > max_size):
continue
location = (subject, sstart, send)
homologies[query][(qstart, qend)].append(location)
return homologies
def count_homologies(matches, min_size):
"""Return a dict {(start, end): number_of_homologies_count}.
"""
homologies_counts = {}
if len(matches) == 1:
segment = list(matches.keys())[0]
homologies_counts[segment] = 1
matches_list = sorted(matches.keys())
for i, match1 in enumerate(matches_list):
for match2 in matches_list[i + 1 :]:
segment = start, end = (match2[0], min(match1[1], match2[1]))
if end < start:
# The segment is empty, match1 and match2 as disjunct.
break
elif (end - start > min_size) and (segment not in homologies_counts):
homologies_counts[segment] = len(
[
matching
for (match_start, match_end) in matches_list
for matching in matches[(match_start, match_end)]
if match_start <= start <= end <= match_end
]
)
return homologies_counts
def segment_with_most_homologies(homologies_counts, method="most_coverage_first"):
"""Select the "best" segment, that should be selected next as a common
block."""
def segment_score(segment):
if method == "most_coverage_first":
factor = homologies_counts[segment]
else:
factor = 1
start, end = segment
return factor * (end - start)
return max(
[(0, (None, None))]
+ [(segment_score(segment), segment) for segment in homologies_counts]
)
def select_common_blocks(
homologies, sequences, min_size=0, method="most_coverage_first"
):
"""Select a collection of the largest common blocks, iteratively."""
common_blocks = []
homologies_counts = {
seqname: count_homologies(matches=homologies[seqname], min_size=min_size)
for seqname in sequences
}
# ITERATIVELY SELECT A COMMON BLOCK AND REMOVE THAT BLOCK FROM THE
# homologies IN VARIOUS SEQUENCES, UNTIL THERE IS NO HOMOLOGY
while True:
# FIND THE HOMOLOGY WITH THE BEST OVERALL SCORE ACROSS ALL SEQS
(best_score, (start, end)), seqname = max(
[
(
segment_with_most_homologies(
homologies_counts[seqname], method=method
),
seqname,
)
for seqname in sequences
]
)
# IF NO HOMOLOGY WAS FOUND AT ALL, STOP
if best_score == 0:
break
# FIND WHERE THE SELECTED SUBSEQUENCE APPEARS IN OTHER SEQUENCES.
# AT EACH LOCATION, "EXTRUDE" THE SUBSEQUENCE FROM THE CURRENT
# LOCATIONS IN homologies_counts
best_subsequence = sequences[seqname][start:end]
locations = []
for seqname, sequence in sequences.items():
seq_n_intersections = homologies_counts[seqname]
# we look for both the subsequence and its reverse complement:
for strand in [1, -1]:
if strand == 1:
matches = re.finditer(best_subsequence, sequence)
else:
matches = re.finditer(
reverse_complement(best_subsequence), sequence
)
for match in matches:
# add the location to the list for this subsequence...
start, end = match.start(), match.end()
locations.append((seqname, (start, end, strand)))
# ...then subtract the location from the sequence's
# homologies list
match_as_segment = tuple(sorted([start, end]))
for intersection in list(seq_n_intersections.keys()):
score = seq_n_intersections.pop(intersection)
for diff in segments_difference(intersection, match_as_segment):
diff_start, diff_end = diff
if diff_end - diff_start > min_size:
seq_n_intersections[diff] = score
common_blocks.append((best_subsequence, locations))
# REMOVE SELF-HOMOLOGOUS SEQUENCES
common_blocks = [
(seq, locations) for (seq, locations) in common_blocks if len(locations) >= 2
]
# CREATE THE FINAL COMMON_BLOCKS_DICT
common_blocks_dict = OrderedDict()
if len(common_blocks) > 0:
number_size = int(np.log10(len(common_blocks))) + 1
for i, (sequence, locations) in enumerate(common_blocks):
block_name = "block_%s" % (str(i + 1).zfill(number_size))
common_blocks_dict[block_name] = {
"sequence": sequence,
"locations": locations,
}
return common_blocks_dict
| [
"collections.OrderedDict",
"subprocess.Popen",
"tempfile.mktemp",
"collections.defaultdict",
"re.finditer"
] | [((3982, 4004), 'tempfile.mktemp', 'tempfile.mktemp', (['""".fa"""'], {}), "('.fa')\n", (3997, 4004), False, 'import tempfile\n'), ((4212, 4522), 'subprocess.Popen', 'subprocess.Popen', (["['blastn', '-query', temp_fasta_path, '-subject', temp_fasta_path,\n '-perc_identity', '100', '-dust', 'no', '-evalue', '1000000000000000',\n '-culling_limit', '10', '-ungapped', '-outfmt',\n '6 qseqid qstart qend sseqid sstart send']"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), "(['blastn', '-query', temp_fasta_path, '-subject',\n temp_fasta_path, '-perc_identity', '100', '-dust', 'no', '-evalue',\n '1000000000000000', '-culling_limit', '10', '-ungapped', '-outfmt',\n '6 qseqid qstart qend sseqid sstart send'], stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n", (4228, 4522), False, 'import subprocess\n'), ((10097, 10110), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (10108, 10110), False, 'from collections import defaultdict, OrderedDict\n'), ((4927, 4953), 'collections.defaultdict', 'defaultdict', (['(lambda *a: [])'], {}), '(lambda *a: [])\n', (4938, 4953), False, 'from collections import defaultdict, OrderedDict\n'), ((1006, 1064), 'collections.OrderedDict', 'OrderedDict', (['[(record.id, record) for record in sequences]'], {}), '([(record.id, record) for record in sequences])\n', (1017, 1064), False, 'from collections import defaultdict, OrderedDict\n'), ((1290, 1312), 'collections.OrderedDict', 'OrderedDict', (['sequences'], {}), '(sequences)\n', (1301, 1312), False, 'from collections import defaultdict, OrderedDict\n'), ((8773, 8812), 're.finditer', 're.finditer', (['best_subsequence', 'sequence'], {}), '(best_subsequence, sequence)\n', (8784, 8812), False, 'import re\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import time
import logging
import linecache
import copy
from string import Template
logger = logging.getLogger(__name__)
class ExtraDict(dict):
""" Creates a dict-like structure where we can store the new values of
variables, while keeping the old (initial) ones as well. This is useful
for a local-style context for running code with exec.
"""
def __init__(self, d, extra):
"""
:param d: base dictionary
:param extra: extra dictionary where we keep the modifications
"""
self.d = d
self.extra = extra
self._extrabackup = copy.deepcopy(extra)
def __getitem__(self, key):
if key in self.extra:
return self.extra[key]
if key in self.d:
return self.d[key]
raise KeyError(key)
def __setitem__(self, key, value):
self.extra[key] = value
def reset(self):
""" Reset the local context to the same state as it was when it was
created.
"""
self.extra = copy.deepcopy(self._extrabackup)
class UFTemplate(object):
def __init__(self, initial="", oncycle="", template="",
debug=False, **kwargs):
""" Runs the code from initial once, during initialization, and then,
run the code in oncycle and render the template using the variables
obtainted in that context
:param initial: valid python code which is run only once during
UFTemplate object creation
:param onycyle: valid python code which is run before rendering the
template
:param template: a string used with string.Template to render the
output. The variables are obtained from either initial or oncycle
or submitted as kwargs during object creation
"""
self.debug = debug
self.baseenv = {}
self.initial = initial
self.oncycle = oncycle
self.template = template
self.cycleenv = {}
compiled_initial = compile(self.initial, '<string-1>', 'exec')
linecache.cache['<string-1>'] = len(self.initial), None, self.initial.split("\n"), '<string-1>'
try:
exec(compiled_initial, self.baseenv)
except:
raise
for k, v in kwargs.items():
if self.debug:
logger.debug("Setting %s as %s" % (k, v))
self.baseenv[k] = v
if self.debug:
logger.debug("After initial eval, we have the following values:")
for k, v in self.baseenv.items():
logger.debug("\tVariable %s = %s" % (k, repr(v)))
self.compiled = compile(self.oncycle, '<string-2>', 'exec')
linecache.cache['<string-2>'] = len(self.oncycle), None, self.oncycle.split("\n"), '<string-2>'
self.localcontext = ExtraDict(self.baseenv, self.cycleenv)
def render(self):
""" Renders one template
"""
try:
exec(self.compiled, self.localcontext)
except:
raise
if self.debug:
logger.debug("On initial, we have the following values:")
for k, v in self.localcontext.d.items():
logger.debug("\t[Initial] %s = %s" % (k, repr(v)))
logger.debug("On cycle, we have the following values:")
for k, v in self.localcontext.extra.items():
logger.debug("\t[Cycle] %s = %s" % (k, repr(v)))
try:
return Template(self.template).substitute(self.localcontext)
except:
logger.exception("Could not render")
raise
def render_many(self, howmany=1):
""" Renders a number of templates and returns an array of strings
:param howmany: Number of times to run the template
and generate outputs
"""
t0 = time.time()
renders = []
for _ in range(howmany):
_sr = self.render()
if _sr:
renders.append(_sr)
t1 = time.time()
if self.debug:
logger.debug("Rendered many: howmany=%d, took %2.2f seconds, \
speed=%5d/second" % (howmany, (t1-t0), (howmany/(t1-t0))))
return renders
def reset(self):
self.localcontext.reset()
if __name__ == "__main__":
initial = """
import datetime
def f(x):
return x*x
def getnow():
return str(datetime.datetime.now())
a = 0
b = 100
i = 0
"""
oncycle = """
a += 3
i += 1
s = f(i)
b += a
now = getnow()
"""
template = """Now = ${now}
Render nr. ${i}
f(${i}) = ${s}
b = ${b}
We live in ${where}"""
tpl = UFTemplate(initial, oncycle, template, where="Indonezia")
for text in tpl.render_many(3):
print(text)
tpl.reset()
for text in tpl.render_many(3):
print(text)
| [
"logging.getLogger",
"string.Template",
"time.time",
"copy.deepcopy"
] | [((137, 164), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (154, 164), False, 'import logging\n'), ((644, 664), 'copy.deepcopy', 'copy.deepcopy', (['extra'], {}), '(extra)\n', (657, 664), False, 'import copy\n'), ((1068, 1100), 'copy.deepcopy', 'copy.deepcopy', (['self._extrabackup'], {}), '(self._extrabackup)\n', (1081, 1100), False, 'import copy\n'), ((3857, 3868), 'time.time', 'time.time', ([], {}), '()\n', (3866, 3868), False, 'import time\n'), ((4024, 4035), 'time.time', 'time.time', ([], {}), '()\n', (4033, 4035), False, 'import time\n'), ((3492, 3515), 'string.Template', 'Template', (['self.template'], {}), '(self.template)\n', (3500, 3515), False, 'from string import Template\n')] |
"""Generalized Gell-Mann matrices."""
from typing import Union
from scipy import sparse
import numpy as np
def gen_gell_mann(
ind_1: int, ind_2: int, dim: int, is_sparse: bool = False
) -> Union[np.ndarray, sparse.lil_matrix]:
r"""
Produce a generalized Gell-Mann operator [WikGM2]_.
Construct a :code:`dim`-by-:code:`dim` Hermitian operator. These matrices
span the entire space of :code:`dim`-by-:code:`dim` matrices as
:code:`ind_1` and :code:`ind_2` range from 0 to :code:`dim-1`, inclusive,
and they generalize the Pauli operators when :code:`dim = 2` and the
Gell-Mann operators when :code:`dim = 3`.
Examples
==========
The generalized Gell-Mann matrix for :code:`ind_1 = 0`, :code:`ind_2 = 1`
and :code:`dim = 2` is given as
.. math::
G_{0, 1, 2} = \begin{pmatrix}
0 & 1 \\
1 & 0
\end{pmatrix}.
This can be obtained in :code:`toqito` as follows.
>>> from toqito.matrices import gen_gell_mann
>>> gen_gell_mann(0, 1, 2)
[[0., 1.],
[1., 0.]])
The generalized Gell-Mann matrix :code:`ind_1 = 2`, :code:`ind_2 = 3`, and
:code:`dim = 4` is given as
.. math::
G_{2, 3, 4} = \begin{pmatrix}
0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 \\
0 & 0 & 0 & 1 \\
0 & 0 & 1 & 0
\end{pmatrix}.
This can be obtained in :code:`toqito` as follows.
>>> from toqito.matrices import gen_gell_mann
>>> gen_gell_mann(2, 3, 4)
[[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 1.],
[0., 0., 1., 0.]])
References
==========
.. [WikGM2] Wikipedia: Gell-Mann matrices,
https://en.wikipedia.org/wiki/Gell-Mann_matrices
:param ind_1: A non-negative integer from 0 to :code:`dim-1` (inclusive).
:param ind_2: A non-negative integer from 0 to :code:`dim-1` (inclusive).
:param dim: The dimension of the Gell-Mann operator.
:param is_sparse: If set to :code:`True`, the returned Gell-Mann
operator is a sparse lil_matrix and if set to
:code:`False`, the returned Gell-Mann operator is a
dense :code:`numpy` array.
:return: The generalized Gell-Mann operator.
"""
if ind_1 == ind_2:
if ind_1 == 0:
gm_op = sparse.eye(dim)
else:
scalar = np.sqrt(2 / (ind_1 * (ind_1 + 1)))
diag = np.ones((ind_1, 1))
diag = np.append(diag, -ind_1)
diag = scalar * np.append(diag, np.zeros((dim - ind_1 - 1, 1)))
gm_op = sparse.lil_matrix((dim, dim))
gm_op.setdiag(diag)
else:
e_mat = sparse.lil_matrix((dim, dim))
e_mat[ind_1, ind_2] = 1
if ind_1 < ind_2:
gm_op = e_mat + e_mat.conj().T
else:
gm_op = 1j * e_mat - 1j * e_mat.conj().T
if not is_sparse:
return gm_op.todense()
return gm_op
| [
"scipy.sparse.lil_matrix",
"numpy.sqrt",
"numpy.ones",
"scipy.sparse.eye",
"numpy.append",
"numpy.zeros"
] | [((2791, 2820), 'scipy.sparse.lil_matrix', 'sparse.lil_matrix', (['(dim, dim)'], {}), '((dim, dim))\n', (2808, 2820), False, 'from scipy import sparse\n'), ((2437, 2452), 'scipy.sparse.eye', 'sparse.eye', (['dim'], {}), '(dim)\n', (2447, 2452), False, 'from scipy import sparse\n'), ((2488, 2522), 'numpy.sqrt', 'np.sqrt', (['(2 / (ind_1 * (ind_1 + 1)))'], {}), '(2 / (ind_1 * (ind_1 + 1)))\n', (2495, 2522), True, 'import numpy as np\n'), ((2542, 2561), 'numpy.ones', 'np.ones', (['(ind_1, 1)'], {}), '((ind_1, 1))\n', (2549, 2561), True, 'import numpy as np\n'), ((2581, 2604), 'numpy.append', 'np.append', (['diag', '(-ind_1)'], {}), '(diag, -ind_1)\n', (2590, 2604), True, 'import numpy as np\n'), ((2702, 2731), 'scipy.sparse.lil_matrix', 'sparse.lil_matrix', (['(dim, dim)'], {}), '((dim, dim))\n', (2719, 2731), False, 'from scipy import sparse\n'), ((2649, 2679), 'numpy.zeros', 'np.zeros', (['(dim - ind_1 - 1, 1)'], {}), '((dim - ind_1 - 1, 1))\n', (2657, 2679), True, 'import numpy as np\n')] |
# This Python file uses the following encoding: utf-8
from unittest import TestCase
from pandocfilters import Para, Str, Space, Span, Strong, RawInline, Emph, Header, DefinitionList, Plain
import json
import pandoc_numbering
from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool
def getMeta1():
return {
'pandoc-numbering': createMetaList([
createMetaMap({
'category': createMetaInlines(u'exercise'),
'sectioning': createMetaInlines(u'-.+.')
})
])
}
def getMeta2():
return {
'pandoc-numbering': createMetaList([
createMetaMap({
'category': createMetaInlines(u'exercise'),
'first': createMetaString(u'2'),
'last': createMetaString(u'2'),
})
])
}
def getMeta3():
return {
'pandoc-numbering': createMetaList([
createMetaMap({
'category': createMetaInlines(u'exercise'),
'first': createMetaString(u'a'),
'last': createMetaString(u'b'),
})
])
}
def getMeta4():
return {
'pandoc-numbering': createMetaList([
createMetaMap({
'category': createMetaInlines(u'exercise'),
'classes': createMetaList([createMetaInlines(u'my-class')])
})
])
}
def getMeta5():
return {
'pandoc-numbering': createMetaList([
createMetaMap({
'category': createMetaInlines(u'exercise'),
'format': createMetaBool(False)
})
])
}
def test_numbering_none():
init()
src = Para(createListStr(u'Not an exercise'))
dest = src
assert pandoc_numbering.numbering(src['t'], src['c'], '', {}) == dest
def test_numbering():
init()
src = Para(createListStr(u'Exercise #'))
dest = Para([
Span(
[u'exercise:1', ['pandoc-numbering-text', 'exercise'], []],
[Strong(createListStr(u'Exercise 1'))]
)
])
assert pandoc_numbering.numbering(src['t'], src['c'], '', {}) == dest
def test_numbering_definitionlist():
init()
src = DefinitionList([
[
createListStr(u'Exercise #'),
[Plain([createListStr(u'Content A')])]
],
[
createListStr(u'Exercise #'),
[Plain([createListStr(u'Content B')])]
]
])
dest = DefinitionList([
[
[Span(
[u'exercise:1', ['pandoc-numbering-text', 'exercise'], []],
[Strong(createListStr(u'Exercise 1'))]
)],
[Plain([createListStr(u'Content A')])]
],
[
[Span(
[u'exercise:2', ['pandoc-numbering-text', 'exercise'], []],
[Strong(createListStr(u'Exercise 2'))]
)],
[Plain([createListStr(u'Content B')])]
]
])
assert pandoc_numbering.numbering(src['t'], src['c'], '', {}) == dest
def test_numbering_prefix_single():
init()
src = Para(createListStr(u'Exercise #ex:'))
dest = Para([
Span(
[u'ex:1', ['pandoc-numbering-text', 'ex'], []],
[Strong(createListStr(u'Exercise 1'))]
)
])
assert pandoc_numbering.numbering(src['t'], src['c'], '', {}) == dest
src = Para(createListStr(u'Exercise #'))
dest = Para([
Span(
[u'exercise:1', ['pandoc-numbering-text', 'exercise'], []],
[Strong(createListStr(u'Exercise 1'))]
)
])
assert pandoc_numbering.numbering(src['t'], src['c'], '', {}) == dest
def test_numbering_latex():
init()
src = Para(createListStr(u'Exercise #'))
dest = Para([
RawInline(u'tex', u'\\phantomsection\\addcontentsline{exercise}{exercise}{\\protect\\numberline {1}{\\ignorespaces Exercise}}'),
Span(
[u'exercise:1', ['pandoc-numbering-text', 'exercise'], []],
[RawInline('tex', '\\label{exercise:1}'), Strong(createListStr(u'Exercise 1'))]
)
])
assert pandoc_numbering.numbering(src['t'], src['c'], 'latex', {}) == dest
init()
src = Para(createListStr(u'Exercise (The title) #'))
dest = Para([
RawInline(u'tex', u'\\phantomsection\\addcontentsline{exercise}{exercise}{\\protect\\numberline {1}{\\ignorespaces The title}}'),
Span(
[u'exercise:1', ['pandoc-numbering-text', 'exercise'], []],
[
RawInline('tex', '\\label{exercise:1}'),
Strong(createListStr(u'Exercise 1')),
Space(),
Emph(createListStr(u'(') + createListStr(u'The title') + createListStr(u')'))
]
)
])
assert pandoc_numbering.numbering(src['t'], src['c'], 'latex', {}) == dest
def test_numbering_double():
init()
src = Para(createListStr(u'Exercise #'))
pandoc_numbering.numbering(src['t'], src['c'], '', {})
src = Para(createListStr(u'Exercise #'))
dest = Para([
Span(
[u'exercise:2', ['pandoc-numbering-text', 'exercise'], []],
[Strong(createListStr(u'Exercise 2'))]
)
])
assert pandoc_numbering.numbering(src['t'], src['c'], '', {}) == dest
def test_numbering_title():
init()
src = Para(createListStr(u'Exercise (The title) #'))
dest = Para([
Span(
[u'exercise:1', ['pandoc-numbering-text', 'exercise'], []],
[
Strong(createListStr(u'Exercise 1')),
Space(),
Emph(createListStr(u'(') + createListStr(u'The title') + createListStr(u')'))
]
)
])
assert pandoc_numbering.numbering(src['t'], src['c'], '', {}) == dest
def test_numbering_level():
init()
src = Para(createListStr(u'Exercise +.+.#'))
dest = Para([
Span(
[u'exercise:0.0.1', ['pandoc-numbering-text', 'exercise'], []],
[Strong(createListStr(u'Exercise 0.0.1'))]
)
])
assert pandoc_numbering.numbering(src['t'], src['c'], '', {}) == dest
src = Header(1, [u'first-chapter', [], []], createListStr(u'First chapter'))
pandoc_numbering.numbering(src['t'], src['c'], '', {})
src = Header(2, [u'first-section', [], []], createListStr(u'First section'))
pandoc_numbering.numbering(src['t'], src['c'], '', {})
src = Para(createListStr(u'Exercise +.+.#'))
dest = Para([
Span(
[u'exercise:1.1.1', ['pandoc-numbering-text', 'exercise'], []],
[Strong(createListStr(u'Exercise 1.1.1'))]
)
])
assert pandoc_numbering.numbering(src['t'], src['c'], '', {}) == dest
src = Para(createListStr(u'Exercise +.+.#'))
dest = Para([
Span(
[u'exercise:1.1.2', ['pandoc-numbering-text', 'exercise'], []],
[Strong(createListStr(u'Exercise 1.1.2'))]
)
])
assert pandoc_numbering.numbering(src['t'], src['c'], '', {}) == dest
src = Header(2, [u'second-section', [], []], createListStr(u'Second section'))
pandoc_numbering.numbering(src['t'], src['c'], '', {})
src = Para(createListStr(u'Exercise +.+.#'))
dest = Para([
Span(
[u'exercise:1.2.1', ['pandoc-numbering-text', 'exercise'], []],
[Strong(createListStr(u'Exercise 1.2.1'))]
)
])
assert pandoc_numbering.numbering(src['t'], src['c'], '', {}) == dest
def test_numbering_unnumbered():
init()
src = Header(1, [u'unnumbered-chapter', [u'unnumbered'], []], createListStr(u'Unnumbered chapter'))
pandoc_numbering.numbering(src['t'], src['c'], '', {})
src = Para(createListStr(u'Exercise +.#'))
dest = Para([
Span(
[u'exercise:0.1', ['pandoc-numbering-text', 'exercise'], []],
[Strong(createListStr(u'Exercise 0.1'))]
)
])
assert pandoc_numbering.numbering(src['t'], src['c'], '', {}) == dest
def test_numbering_hidden():
init()
src = Header(1, [u'first-chapter', [], []], createListStr(u'First chapter'))
pandoc_numbering.numbering(src['t'], src['c'], '', {})
src = Para(createListStr(u'Exercise -.#exercise:one'))
dest = Para([
Span(
[u'exercise:one', ['pandoc-numbering-text', 'exercise'], []],
[
Strong(createListStr(u'Exercise 1'))
]
)
])
assert pandoc_numbering.numbering(src['t'], src['c'], '', {}) == dest
src = Para(createListStr(u'Exercise -.#'))
dest = Para([
Span(
[u'exercise:1.2', ['pandoc-numbering-text', 'exercise'], []],
[Strong(createListStr(u'Exercise 2'))]
)
])
assert pandoc_numbering.numbering(src['t'], src['c'], '', {}) == dest
src = Header(1, [u'second-chapter', [], []], createListStr(u'Second chapter'))
pandoc_numbering.numbering(src['t'], src['c'], '', {})
src = Para(createListStr(u'Exercise -.#'))
dest = Para([
Span(
[u'exercise:2.1', ['pandoc-numbering-text', 'exercise'], []],
[Strong(createListStr(u'Exercise 1'))]
)
])
assert pandoc_numbering.numbering(src['t'], src['c'], '', {}) == dest
src = Para(createListStr(u'Exercise +.#'))
dest = Para([
Span(
[u'exercise:2.2', ['pandoc-numbering-text', 'exercise'], []],
[Strong(createListStr(u'Exercise 2.2'))]
)
])
assert pandoc_numbering.numbering(src['t'], src['c'], '', {}) == dest
src = Para([Str(u'Exercise'), Space(), Str(u'#')])
dest = Para([
Span(
[u'exercise:1', ['pandoc-numbering-text', 'exercise'], []],
[Strong(createListStr(u'Exercise 1'))]
)
])
assert pandoc_numbering.numbering(src['t'], src['c'], '', {}) == dest
def test_numbering_sharp_sharp():
init()
src = Para(createListStr(u'Exercise ##'))
dest = Para(createListStr(u'Exercise #'))
pandoc_numbering.numbering(src['t'], src['c'], '', {})
assert src == dest
def sectioning(meta):
src = Header(1, [u'first-chapter', [], []], createListStr(u'First chapter'))
pandoc_numbering.numbering(src['t'], src['c'], '', meta)
src = Header(1, [u'second-chapter', [], []], createListStr(u'Second chapter'))
pandoc_numbering.numbering(src['t'], src['c'], '', meta)
src = Header(2, [u'first-section', [], []], createListStr(u'First section'))
pandoc_numbering.numbering(src['t'], src['c'], '', meta)
src = Header(2, [u'second-section', [], []], createListStr(u'Second section'))
pandoc_numbering.numbering(src['t'], src['c'], '', meta)
def test_numbering_sectioning_string():
init()
meta = getMeta1()
sectioning(meta)
src = Para(createListStr(u'Exercise #'))
dest = Para([
Span(
[u'exercise:2.2.1', ['pandoc-numbering-text', 'exercise'], []],
[Strong(createListStr(u'Exercise 2.1'))]
)
])
assert pandoc_numbering.numbering(src['t'], src['c'], '', meta) == dest
def test_numbering_sectioning_map():
init()
meta = getMeta2()
sectioning(meta)
src = Para([Str(u'Exercise'), Space(), Str(u'#')])
dest = Para([
Span(
[u'exercise:2.2.1', ['pandoc-numbering-text', 'exercise'], []],
[Strong(createListStr(u'Exercise 2.1'))]
)
])
assert pandoc_numbering.numbering(src['t'], src['c'], '', meta) == dest
def test_numbering_sectioning_map_error():
init()
meta = getMeta3()
sectioning(meta)
src = Para(createListStr(u'Exercise #'))
dest = Para([
Span(
[u'exercise:1', ['pandoc-numbering-text', 'exercise'], []],
[Strong(createListStr(u'Exercise 1'))]
)
])
assert pandoc_numbering.numbering(src['t'], src['c'], '', meta) == dest
def test_classes():
init()
meta = getMeta4()
src = Para(createListStr(u'Exercise #'))
dest = Para([
Span(
[u'exercise:1', ['pandoc-numbering-text', 'my-class'], []],
[Strong(createListStr(u'Exercise 1'))]
)
])
assert pandoc_numbering.numbering(src['t'], src['c'], '', meta) == dest
def test_format():
init()
meta = getMeta5()
src = Para(createListStr(u'Exercise #'))
dest = json.loads(json.dumps(Para([
Span(
[u'exercise:1', ['pandoc-numbering-text', 'exercice'], []],
[
Span(['', ['description'], []], createListStr(u'Exercise')),
Span(['', ['number'], []], createListStr(u'1')),
Span(['', ['title'], []], [])
]
)
])))
json.loads(json.dumps(pandoc_numbering.numbering(src['t'], src['c'], '', meta))) == dest
| [
"helper.createListStr",
"pandocfilters.Space",
"pandocfilters.Span",
"pandocfilters.Str",
"helper.init",
"pandocfilters.RawInline",
"helper.createMetaInlines",
"helper.createMetaBool",
"helper.createMetaString",
"pandoc_numbering.numbering"
] | [((1734, 1740), 'helper.init', 'init', ([], {}), '()\n', (1738, 1740), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((1910, 1916), 'helper.init', 'init', ([], {}), '()\n', (1914, 1916), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((2252, 2258), 'helper.init', 'init', ([], {}), '()\n', (2256, 2258), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((3146, 3152), 'helper.init', 'init', ([], {}), '()\n', (3150, 3152), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((3763, 3769), 'helper.init', 'init', ([], {}), '()\n', (3767, 3769), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((4251, 4257), 'helper.init', 'init', ([], {}), '()\n', (4255, 4257), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((4946, 4952), 'helper.init', 'init', ([], {}), '()\n', (4950, 4952), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((5003, 5057), 'pandoc_numbering.numbering', 'pandoc_numbering.numbering', (["src['t']", "src['c']", '""""""', '{}'], {}), "(src['t'], src['c'], '', {})\n", (5029, 5057), False, 'import pandoc_numbering\n'), ((5384, 5390), 'helper.init', 'init', ([], {}), '()\n', (5388, 5390), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((5879, 5885), 'helper.init', 'init', ([], {}), '()\n', (5883, 5885), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((6277, 6331), 'pandoc_numbering.numbering', 'pandoc_numbering.numbering', (["src['t']", "src['c']", '""""""', '{}'], {}), "(src['t'], src['c'], '', {})\n", (6303, 6331), False, 'import pandoc_numbering\n'), ((6418, 6472), 'pandoc_numbering.numbering', 'pandoc_numbering.numbering', (["src['t']", "src['c']", '""""""', '{}'], {}), "(src['t'], src['c'], '', {})\n", (6444, 6472), False, 'import pandoc_numbering\n'), ((7171, 7225), 'pandoc_numbering.numbering', 'pandoc_numbering.numbering', (["src['t']", "src['c']", '""""""', '{}'], {}), "(src['t'], src['c'], '', {})\n", (7197, 7225), False, 'import pandoc_numbering\n'), ((7569, 7575), 'helper.init', 'init', ([], {}), '()\n', (7573, 7575), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((7685, 7739), 'pandoc_numbering.numbering', 'pandoc_numbering.numbering', (["src['t']", "src['c']", '""""""', '{}'], {}), "(src['t'], src['c'], '', {})\n", (7711, 7739), False, 'import pandoc_numbering\n'), ((8073, 8079), 'helper.init', 'init', ([], {}), '()\n', (8077, 8079), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((8166, 8220), 'pandoc_numbering.numbering', 'pandoc_numbering.numbering', (["src['t']", "src['c']", '""""""', '{}'], {}), "(src['t'], src['c'], '', {})\n", (8192, 8220), False, 'import pandoc_numbering\n'), ((8945, 8999), 'pandoc_numbering.numbering', 'pandoc_numbering.numbering', (["src['t']", "src['c']", '""""""', '{}'], {}), "(src['t'], src['c'], '', {})\n", (8971, 8999), False, 'import pandoc_numbering\n'), ((9938, 9944), 'helper.init', 'init', ([], {}), '()\n', (9942, 9944), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((10042, 10096), 'pandoc_numbering.numbering', 'pandoc_numbering.numbering', (["src['t']", "src['c']", '""""""', '{}'], {}), "(src['t'], src['c'], '', {})\n", (10068, 10096), False, 'import pandoc_numbering\n'), ((10229, 10285), 'pandoc_numbering.numbering', 'pandoc_numbering.numbering', (["src['t']", "src['c']", '""""""', 'meta'], {}), "(src['t'], src['c'], '', meta)\n", (10255, 10285), False, 'import pandoc_numbering\n'), ((10374, 10430), 'pandoc_numbering.numbering', 'pandoc_numbering.numbering', (["src['t']", "src['c']", '""""""', 'meta'], {}), "(src['t'], src['c'], '', meta)\n", (10400, 10430), False, 'import pandoc_numbering\n'), ((10517, 10573), 'pandoc_numbering.numbering', 'pandoc_numbering.numbering', (["src['t']", "src['c']", '""""""', 'meta'], {}), "(src['t'], src['c'], '', meta)\n", (10543, 10573), False, 'import pandoc_numbering\n'), ((10662, 10718), 'pandoc_numbering.numbering', 'pandoc_numbering.numbering', (["src['t']", "src['c']", '""""""', 'meta'], {}), "(src['t'], src['c'], '', meta)\n", (10688, 10718), False, 'import pandoc_numbering\n'), ((10764, 10770), 'helper.init', 'init', ([], {}), '()\n', (10768, 10770), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((11159, 11165), 'helper.init', 'init', ([], {}), '()\n', (11163, 11165), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((11570, 11576), 'helper.init', 'init', ([], {}), '()\n', (11574, 11576), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((11942, 11948), 'helper.init', 'init', ([], {}), '()\n', (11946, 11948), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((12291, 12297), 'helper.init', 'init', ([], {}), '()\n', (12295, 12297), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((1758, 1791), 'helper.createListStr', 'createListStr', (['u"""Not an exercise"""'], {}), "(u'Not an exercise')\n", (1771, 1791), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((1820, 1874), 'pandoc_numbering.numbering', 'pandoc_numbering.numbering', (["src['t']", "src['c']", '""""""', '{}'], {}), "(src['t'], src['c'], '', {})\n", (1846, 1874), False, 'import pandoc_numbering\n'), ((1933, 1961), 'helper.createListStr', 'createListStr', (['u"""Exercise #"""'], {}), "(u'Exercise #')\n", (1946, 1961), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((2147, 2201), 'pandoc_numbering.numbering', 'pandoc_numbering.numbering', (["src['t']", "src['c']", '""""""', '{}'], {}), "(src['t'], src['c'], '', {})\n", (2173, 2201), False, 'import pandoc_numbering\n'), ((3042, 3096), 'pandoc_numbering.numbering', 'pandoc_numbering.numbering', (["src['t']", "src['c']", '""""""', '{}'], {}), "(src['t'], src['c'], '', {})\n", (3068, 3096), False, 'import pandoc_numbering\n'), ((3169, 3200), 'helper.createListStr', 'createListStr', (['u"""Exercise #ex:"""'], {}), "(u'Exercise #ex:')\n", (3182, 3200), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((3374, 3428), 'pandoc_numbering.numbering', 'pandoc_numbering.numbering', (["src['t']", "src['c']", '""""""', '{}'], {}), "(src['t'], src['c'], '', {})\n", (3400, 3428), False, 'import pandoc_numbering\n'), ((3453, 3481), 'helper.createListStr', 'createListStr', (['u"""Exercise #"""'], {}), "(u'Exercise #')\n", (3466, 3481), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((3667, 3721), 'pandoc_numbering.numbering', 'pandoc_numbering.numbering', (["src['t']", "src['c']", '""""""', '{}'], {}), "(src['t'], src['c'], '', {})\n", (3693, 3721), False, 'import pandoc_numbering\n'), ((3786, 3814), 'helper.createListStr', 'createListStr', (['u"""Exercise #"""'], {}), "(u'Exercise #')\n", (3799, 3814), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((4178, 4237), 'pandoc_numbering.numbering', 'pandoc_numbering.numbering', (["src['t']", "src['c']", '"""latex"""', '{}'], {}), "(src['t'], src['c'], 'latex', {})\n", (4204, 4237), False, 'import pandoc_numbering\n'), ((4274, 4314), 'helper.createListStr', 'createListStr', (['u"""Exercise (The title) #"""'], {}), "(u'Exercise (The title) #')\n", (4287, 4314), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((4844, 4903), 'pandoc_numbering.numbering', 'pandoc_numbering.numbering', (["src['t']", "src['c']", '"""latex"""', '{}'], {}), "(src['t'], src['c'], 'latex', {})\n", (4870, 4903), False, 'import pandoc_numbering\n'), ((4969, 4997), 'helper.createListStr', 'createListStr', (['u"""Exercise #"""'], {}), "(u'Exercise #')\n", (4982, 4997), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((5074, 5102), 'helper.createListStr', 'createListStr', (['u"""Exercise #"""'], {}), "(u'Exercise #')\n", (5087, 5102), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((5288, 5342), 'pandoc_numbering.numbering', 'pandoc_numbering.numbering', (["src['t']", "src['c']", '""""""', '{}'], {}), "(src['t'], src['c'], '', {})\n", (5314, 5342), False, 'import pandoc_numbering\n'), ((5407, 5447), 'helper.createListStr', 'createListStr', (['u"""Exercise (The title) #"""'], {}), "(u'Exercise (The title) #')\n", (5420, 5447), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((5783, 5837), 'pandoc_numbering.numbering', 'pandoc_numbering.numbering', (["src['t']", "src['c']", '""""""', '{}'], {}), "(src['t'], src['c'], '', {})\n", (5809, 5837), False, 'import pandoc_numbering\n'), ((5902, 5934), 'helper.createListStr', 'createListStr', (['u"""Exercise +.+.#"""'], {}), "(u'Exercise +.+.#')\n", (5915, 5934), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((6128, 6182), 'pandoc_numbering.numbering', 'pandoc_numbering.numbering', (["src['t']", "src['c']", '""""""', '{}'], {}), "(src['t'], src['c'], '', {})\n", (6154, 6182), False, 'import pandoc_numbering\n'), ((6240, 6271), 'helper.createListStr', 'createListStr', (['u"""First chapter"""'], {}), "(u'First chapter')\n", (6253, 6271), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((6381, 6412), 'helper.createListStr', 'createListStr', (['u"""First section"""'], {}), "(u'First section')\n", (6394, 6412), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((6489, 6521), 'helper.createListStr', 'createListStr', (['u"""Exercise +.+.#"""'], {}), "(u'Exercise +.+.#')\n", (6502, 6521), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((6715, 6769), 'pandoc_numbering.numbering', 'pandoc_numbering.numbering', (["src['t']", "src['c']", '""""""', '{}'], {}), "(src['t'], src['c'], '', {})\n", (6741, 6769), False, 'import pandoc_numbering\n'), ((6794, 6826), 'helper.createListStr', 'createListStr', (['u"""Exercise +.+.#"""'], {}), "(u'Exercise +.+.#')\n", (6807, 6826), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((7020, 7074), 'pandoc_numbering.numbering', 'pandoc_numbering.numbering', (["src['t']", "src['c']", '""""""', '{}'], {}), "(src['t'], src['c'], '', {})\n", (7046, 7074), False, 'import pandoc_numbering\n'), ((7133, 7165), 'helper.createListStr', 'createListStr', (['u"""Second section"""'], {}), "(u'Second section')\n", (7146, 7165), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((7242, 7274), 'helper.createListStr', 'createListStr', (['u"""Exercise +.+.#"""'], {}), "(u'Exercise +.+.#')\n", (7255, 7274), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((7468, 7522), 'pandoc_numbering.numbering', 'pandoc_numbering.numbering', (["src['t']", "src['c']", '""""""', '{}'], {}), "(src['t'], src['c'], '', {})\n", (7494, 7522), False, 'import pandoc_numbering\n'), ((7643, 7679), 'helper.createListStr', 'createListStr', (['u"""Unnumbered chapter"""'], {}), "(u'Unnumbered chapter')\n", (7656, 7679), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((7756, 7786), 'helper.createListStr', 'createListStr', (['u"""Exercise +.#"""'], {}), "(u'Exercise +.#')\n", (7769, 7786), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((7976, 8030), 'pandoc_numbering.numbering', 'pandoc_numbering.numbering', (["src['t']", "src['c']", '""""""', '{}'], {}), "(src['t'], src['c'], '', {})\n", (8002, 8030), False, 'import pandoc_numbering\n'), ((8129, 8160), 'helper.createListStr', 'createListStr', (['u"""First chapter"""'], {}), "(u'First chapter')\n", (8142, 8160), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((8237, 8279), 'helper.createListStr', 'createListStr', (['u"""Exercise -.#exercise:one"""'], {}), "(u'Exercise -.#exercise:one')\n", (8250, 8279), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((8497, 8551), 'pandoc_numbering.numbering', 'pandoc_numbering.numbering', (["src['t']", "src['c']", '""""""', '{}'], {}), "(src['t'], src['c'], '', {})\n", (8523, 8551), False, 'import pandoc_numbering\n'), ((8576, 8606), 'helper.createListStr', 'createListStr', (['u"""Exercise -.#"""'], {}), "(u'Exercise -.#')\n", (8589, 8606), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((8794, 8848), 'pandoc_numbering.numbering', 'pandoc_numbering.numbering', (["src['t']", "src['c']", '""""""', '{}'], {}), "(src['t'], src['c'], '', {})\n", (8820, 8848), False, 'import pandoc_numbering\n'), ((8907, 8939), 'helper.createListStr', 'createListStr', (['u"""Second chapter"""'], {}), "(u'Second chapter')\n", (8920, 8939), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((9016, 9046), 'helper.createListStr', 'createListStr', (['u"""Exercise -.#"""'], {}), "(u'Exercise -.#')\n", (9029, 9046), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((9234, 9288), 'pandoc_numbering.numbering', 'pandoc_numbering.numbering', (["src['t']", "src['c']", '""""""', '{}'], {}), "(src['t'], src['c'], '', {})\n", (9260, 9288), False, 'import pandoc_numbering\n'), ((9313, 9343), 'helper.createListStr', 'createListStr', (['u"""Exercise +.#"""'], {}), "(u'Exercise +.#')\n", (9326, 9343), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((9533, 9587), 'pandoc_numbering.numbering', 'pandoc_numbering.numbering', (["src['t']", "src['c']", '""""""', '{}'], {}), "(src['t'], src['c'], '', {})\n", (9559, 9587), False, 'import pandoc_numbering\n'), ((9836, 9890), 'pandoc_numbering.numbering', 'pandoc_numbering.numbering', (["src['t']", "src['c']", '""""""', '{}'], {}), "(src['t'], src['c'], '', {})\n", (9862, 9890), False, 'import pandoc_numbering\n'), ((9961, 9990), 'helper.createListStr', 'createListStr', (['u"""Exercise ##"""'], {}), "(u'Exercise ##')\n", (9974, 9990), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((10008, 10036), 'helper.createListStr', 'createListStr', (['u"""Exercise #"""'], {}), "(u'Exercise #')\n", (10021, 10036), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((10192, 10223), 'helper.createListStr', 'createListStr', (['u"""First chapter"""'], {}), "(u'First chapter')\n", (10205, 10223), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((10336, 10368), 'helper.createListStr', 'createListStr', (['u"""Second chapter"""'], {}), "(u'Second chapter')\n", (10349, 10368), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((10480, 10511), 'helper.createListStr', 'createListStr', (['u"""First section"""'], {}), "(u'First section')\n", (10493, 10511), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((10624, 10656), 'helper.createListStr', 'createListStr', (['u"""Second section"""'], {}), "(u'Second section')\n", (10637, 10656), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((10832, 10860), 'helper.createListStr', 'createListStr', (['u"""Exercise #"""'], {}), "(u'Exercise #')\n", (10845, 10860), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((11052, 11108), 'pandoc_numbering.numbering', 'pandoc_numbering.numbering', (["src['t']", "src['c']", '""""""', 'meta'], {}), "(src['t'], src['c'], '', meta)\n", (11078, 11108), False, 'import pandoc_numbering\n'), ((11457, 11513), 'pandoc_numbering.numbering', 'pandoc_numbering.numbering', (["src['t']", "src['c']", '""""""', 'meta'], {}), "(src['t'], src['c'], '', meta)\n", (11483, 11513), False, 'import pandoc_numbering\n'), ((11638, 11666), 'helper.createListStr', 'createListStr', (['u"""Exercise #"""'], {}), "(u'Exercise #')\n", (11651, 11666), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((11852, 11908), 'pandoc_numbering.numbering', 'pandoc_numbering.numbering', (["src['t']", "src['c']", '""""""', 'meta'], {}), "(src['t'], src['c'], '', meta)\n", (11878, 11908), False, 'import pandoc_numbering\n'), ((11988, 12016), 'helper.createListStr', 'createListStr', (['u"""Exercise #"""'], {}), "(u'Exercise #')\n", (12001, 12016), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((12202, 12258), 'pandoc_numbering.numbering', 'pandoc_numbering.numbering', (["src['t']", "src['c']", '""""""', 'meta'], {}), "(src['t'], src['c'], '', meta)\n", (12228, 12258), False, 'import pandoc_numbering\n'), ((12337, 12365), 'helper.createListStr', 'createListStr', (['u"""Exercise #"""'], {}), "(u'Exercise #')\n", (12350, 12365), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((3842, 3978), 'pandocfilters.RawInline', 'RawInline', (['u"""tex"""', 'u"""\\\\phantomsection\\\\addcontentsline{exercise}{exercise}{\\\\protect\\\\numberline {1}{\\\\ignorespaces Exercise}}"""'], {}), "(u'tex',\n u'\\\\phantomsection\\\\addcontentsline{exercise}{exercise}{\\\\protect\\\\numberline {1}{\\\\ignorespaces Exercise}}'\n )\n", (3851, 3978), False, 'from pandocfilters import Para, Str, Space, Span, Strong, RawInline, Emph, Header, DefinitionList, Plain\n'), ((4342, 4479), 'pandocfilters.RawInline', 'RawInline', (['u"""tex"""', 'u"""\\\\phantomsection\\\\addcontentsline{exercise}{exercise}{\\\\protect\\\\numberline {1}{\\\\ignorespaces The title}}"""'], {}), "(u'tex',\n u'\\\\phantomsection\\\\addcontentsline{exercise}{exercise}{\\\\protect\\\\numberline {1}{\\\\ignorespaces The title}}'\n )\n", (4351, 4479), False, 'from pandocfilters import Para, Str, Space, Span, Strong, RawInline, Emph, Header, DefinitionList, Plain\n'), ((9613, 9629), 'pandocfilters.Str', 'Str', (['u"""Exercise"""'], {}), "(u'Exercise')\n", (9616, 9629), False, 'from pandocfilters import Para, Str, Space, Span, Strong, RawInline, Emph, Header, DefinitionList, Plain\n'), ((9631, 9638), 'pandocfilters.Space', 'Space', ([], {}), '()\n', (9636, 9638), False, 'from pandocfilters import Para, Str, Space, Span, Strong, RawInline, Emph, Header, DefinitionList, Plain\n'), ((9640, 9649), 'pandocfilters.Str', 'Str', (['u"""#"""'], {}), "(u'#')\n", (9643, 9649), False, 'from pandocfilters import Para, Str, Space, Span, Strong, RawInline, Emph, Header, DefinitionList, Plain\n'), ((11228, 11244), 'pandocfilters.Str', 'Str', (['u"""Exercise"""'], {}), "(u'Exercise')\n", (11231, 11244), False, 'from pandocfilters import Para, Str, Space, Span, Strong, RawInline, Emph, Header, DefinitionList, Plain\n'), ((11246, 11253), 'pandocfilters.Space', 'Space', ([], {}), '()\n', (11251, 11253), False, 'from pandocfilters import Para, Str, Space, Span, Strong, RawInline, Emph, Header, DefinitionList, Plain\n'), ((11255, 11264), 'pandocfilters.Str', 'Str', (['u"""#"""'], {}), "(u'#')\n", (11258, 11264), False, 'from pandocfilters import Para, Str, Space, Span, Strong, RawInline, Emph, Header, DefinitionList, Plain\n'), ((2309, 2337), 'helper.createListStr', 'createListStr', (['u"""Exercise #"""'], {}), "(u'Exercise #')\n", (2322, 2337), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((2423, 2451), 'helper.createListStr', 'createListStr', (['u"""Exercise #"""'], {}), "(u'Exercise #')\n", (2436, 2451), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((12755, 12811), 'pandoc_numbering.numbering', 'pandoc_numbering.numbering', (["src['t']", "src['c']", '""""""', 'meta'], {}), "(src['t'], src['c'], '', meta)\n", (12781, 12811), False, 'import pandoc_numbering\n'), ((4070, 4109), 'pandocfilters.RawInline', 'RawInline', (['"""tex"""', '"""\\\\label{exercise:1}"""'], {}), "('tex', '\\\\label{exercise:1}')\n", (4079, 4109), False, 'from pandocfilters import Para, Str, Space, Span, Strong, RawInline, Emph, Header, DefinitionList, Plain\n'), ((4588, 4627), 'pandocfilters.RawInline', 'RawInline', (['"""tex"""', '"""\\\\label{exercise:1}"""'], {}), "('tex', '\\\\label{exercise:1}')\n", (4597, 4627), False, 'from pandocfilters import Para, Str, Space, Span, Strong, RawInline, Emph, Header, DefinitionList, Plain\n'), ((4699, 4706), 'pandocfilters.Space', 'Space', ([], {}), '()\n', (4704, 4706), False, 'from pandocfilters import Para, Str, Space, Span, Strong, RawInline, Emph, Header, DefinitionList, Plain\n'), ((5637, 5644), 'pandocfilters.Space', 'Space', ([], {}), '()\n', (5642, 5644), False, 'from pandocfilters import Para, Str, Space, Span, Strong, RawInline, Emph, Header, DefinitionList, Plain\n'), ((483, 513), 'helper.createMetaInlines', 'createMetaInlines', (['u"""exercise"""'], {}), "(u'exercise')\n", (500, 513), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((545, 571), 'helper.createMetaInlines', 'createMetaInlines', (['u"""-.+."""'], {}), "(u'-.+.')\n", (562, 571), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((735, 765), 'helper.createMetaInlines', 'createMetaInlines', (['u"""exercise"""'], {}), "(u'exercise')\n", (752, 765), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((792, 814), 'helper.createMetaString', 'createMetaString', (['u"""2"""'], {}), "(u'2')\n", (808, 814), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((840, 862), 'helper.createMetaString', 'createMetaString', (['u"""2"""'], {}), "(u'2')\n", (856, 862), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((1027, 1057), 'helper.createMetaInlines', 'createMetaInlines', (['u"""exercise"""'], {}), "(u'exercise')\n", (1044, 1057), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((1084, 1106), 'helper.createMetaString', 'createMetaString', (['u"""a"""'], {}), "(u'a')\n", (1100, 1106), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((1132, 1154), 'helper.createMetaString', 'createMetaString', (['u"""b"""'], {}), "(u'b')\n", (1148, 1154), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((1319, 1349), 'helper.createMetaInlines', 'createMetaInlines', (['u"""exercise"""'], {}), "(u'exercise')\n", (1336, 1349), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((1590, 1620), 'helper.createMetaInlines', 'createMetaInlines', (['u"""exercise"""'], {}), "(u'exercise')\n", (1607, 1620), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((1648, 1669), 'helper.createMetaBool', 'createMetaBool', (['(False)'], {}), '(False)\n', (1662, 1669), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((2087, 2115), 'helper.createListStr', 'createListStr', (['u"""Exercise 1"""'], {}), "(u'Exercise 1')\n", (2100, 2115), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((3314, 3342), 'helper.createListStr', 'createListStr', (['u"""Exercise 1"""'], {}), "(u'Exercise 1')\n", (3327, 3342), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((3607, 3635), 'helper.createListStr', 'createListStr', (['u"""Exercise 1"""'], {}), "(u'Exercise 1')\n", (3620, 3635), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((4118, 4146), 'helper.createListStr', 'createListStr', (['u"""Exercise 1"""'], {}), "(u'Exercise 1')\n", (4131, 4146), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((4652, 4680), 'helper.createListStr', 'createListStr', (['u"""Exercise 1"""'], {}), "(u'Exercise 1')\n", (4665, 4680), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((5228, 5256), 'helper.createListStr', 'createListStr', (['u"""Exercise 2"""'], {}), "(u'Exercise 2')\n", (5241, 5256), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((5590, 5618), 'helper.createListStr', 'createListStr', (['u"""Exercise 1"""'], {}), "(u'Exercise 1')\n", (5603, 5618), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((6064, 6096), 'helper.createListStr', 'createListStr', (['u"""Exercise 0.0.1"""'], {}), "(u'Exercise 0.0.1')\n", (6077, 6096), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((6651, 6683), 'helper.createListStr', 'createListStr', (['u"""Exercise 1.1.1"""'], {}), "(u'Exercise 1.1.1')\n", (6664, 6683), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((6956, 6988), 'helper.createListStr', 'createListStr', (['u"""Exercise 1.1.2"""'], {}), "(u'Exercise 1.1.2')\n", (6969, 6988), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((7404, 7436), 'helper.createListStr', 'createListStr', (['u"""Exercise 1.2.1"""'], {}), "(u'Exercise 1.2.1')\n", (7417, 7436), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((7914, 7944), 'helper.createListStr', 'createListStr', (['u"""Exercise 0.1"""'], {}), "(u'Exercise 0.1')\n", (7927, 7944), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((8424, 8452), 'helper.createListStr', 'createListStr', (['u"""Exercise 1"""'], {}), "(u'Exercise 1')\n", (8437, 8452), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((8734, 8762), 'helper.createListStr', 'createListStr', (['u"""Exercise 2"""'], {}), "(u'Exercise 2')\n", (8747, 8762), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((9174, 9202), 'helper.createListStr', 'createListStr', (['u"""Exercise 1"""'], {}), "(u'Exercise 1')\n", (9187, 9202), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((9471, 9501), 'helper.createListStr', 'createListStr', (['u"""Exercise 2.2"""'], {}), "(u'Exercise 2.2')\n", (9484, 9501), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((9776, 9804), 'helper.createListStr', 'createListStr', (['u"""Exercise 1"""'], {}), "(u'Exercise 1')\n", (9789, 9804), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((10990, 11020), 'helper.createListStr', 'createListStr', (['u"""Exercise 2.1"""'], {}), "(u'Exercise 2.1')\n", (11003, 11020), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((11395, 11425), 'helper.createListStr', 'createListStr', (['u"""Exercise 2.1"""'], {}), "(u'Exercise 2.1')\n", (11408, 11425), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((11792, 11820), 'helper.createListStr', 'createListStr', (['u"""Exercise 1"""'], {}), "(u'Exercise 1')\n", (11805, 11820), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((12142, 12170), 'helper.createListStr', 'createListStr', (['u"""Exercise 1"""'], {}), "(u'Exercise 1')\n", (12155, 12170), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((2359, 2386), 'helper.createListStr', 'createListStr', (['u"""Content A"""'], {}), "(u'Content A')\n", (2372, 2386), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((2473, 2500), 'helper.createListStr', 'createListStr', (['u"""Content B"""'], {}), "(u'Content B')\n", (2486, 2500), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((2745, 2772), 'helper.createListStr', 'createListStr', (['u"""Content A"""'], {}), "(u'Content A')\n", (2758, 2772), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((2983, 3010), 'helper.createListStr', 'createListStr', (['u"""Content B"""'], {}), "(u'Content B')\n", (2996, 3010), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((4781, 4800), 'helper.createListStr', 'createListStr', (['u""")"""'], {}), "(u')')\n", (4794, 4800), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((5719, 5738), 'helper.createListStr', 'createListStr', (['u""")"""'], {}), "(u')')\n", (5732, 5738), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((12665, 12694), 'pandocfilters.Span', 'Span', (["['', ['title'], []]", '[]'], {}), "(['', ['title'], []], [])\n", (12669, 12694), False, 'from pandocfilters import Para, Str, Space, Span, Strong, RawInline, Emph, Header, DefinitionList, Plain\n'), ((1394, 1424), 'helper.createMetaInlines', 'createMetaInlines', (['u"""my-class"""'], {}), "(u'my-class')\n", (1411, 1424), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((2678, 2706), 'helper.createListStr', 'createListStr', (['u"""Exercise 1"""'], {}), "(u'Exercise 1')\n", (2691, 2706), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((2916, 2944), 'helper.createListStr', 'createListStr', (['u"""Exercise 2"""'], {}), "(u'Exercise 2')\n", (2929, 2944), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((4729, 4748), 'helper.createListStr', 'createListStr', (['u"""("""'], {}), "(u'(')\n", (4742, 4748), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((4751, 4778), 'helper.createListStr', 'createListStr', (['u"""The title"""'], {}), "(u'The title')\n", (4764, 4778), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((5667, 5686), 'helper.createListStr', 'createListStr', (['u"""("""'], {}), "(u'(')\n", (5680, 5686), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((5689, 5716), 'helper.createListStr', 'createListStr', (['u"""The title"""'], {}), "(u'The title')\n", (5702, 5716), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((12555, 12581), 'helper.createListStr', 'createListStr', (['u"""Exercise"""'], {}), "(u'Exercise')\n", (12568, 12581), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n'), ((12627, 12646), 'helper.createListStr', 'createListStr', (['u"""1"""'], {}), "(u'1')\n", (12640, 12646), False, 'from helper import init, createMetaList, createMetaMap, createMetaInlines, createListStr, createMetaString, createMetaBool\n')] |
# Copyright (C) 2019-2021, TomTom (http://tomtom.com).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Java template helpers."""
import pytest
from asciidoxy.templates.java.helpers import JavaTemplateHelper
from asciidoxy.generator.filters import InsertionFilter
from .builders import SimpleClassBuilder
@pytest.fixture
def java_class():
builder = SimpleClassBuilder("java")
builder.name("MyClass")
# fill class with typical members
for visibility in ("public", "protected", "private"):
for member_type in ("enum", "class", "trash"):
builder.simple_member(kind=member_type, prot=visibility)
# adds constructors
builder.member_function(prot=visibility, name="MyClass", has_return_value=False)
# add some method
builder.member_function(prot=visibility, name=visibility.capitalize() + "Method")
# add static method
builder.member_function(prot=visibility,
name=visibility.capitalize() + "StaticMethod",
static=True)
# add simple variable
builder.member_variable(prot=visibility)
# add final variable
builder.member_variable(prot=visibility,
name=f"{visibility.capitalize()}Constant",
type_prefix="final ")
return builder.compound
@pytest.fixture
def helper(java_class, empty_generating_api):
return JavaTemplateHelper(empty_generating_api, java_class, InsertionFilter())
def test_public_constants__no_filter(helper):
result = [m.name for m in helper.constants(prot="public")]
assert result == ["PublicConstant"]
def test_public_constants__filter_match(helper):
helper.insert_filter = InsertionFilter(members="Public")
result = [m.name for m in helper.constants(prot="public")]
assert result == ["PublicConstant"]
def test_public_constants__filter_no_match(helper):
helper.insert_filter = InsertionFilter(members="NONE")
result = [m.name for m in helper.constants(prot="public")]
assert len(result) == 0
def test_private_constants__no_filter(helper):
result = [m.name for m in helper.constants(prot="private")]
assert result == ["PrivateConstant"]
| [
"asciidoxy.generator.filters.InsertionFilter"
] | [((2268, 2301), 'asciidoxy.generator.filters.InsertionFilter', 'InsertionFilter', ([], {'members': '"""Public"""'}), "(members='Public')\n", (2283, 2301), False, 'from asciidoxy.generator.filters import InsertionFilter\n'), ((2486, 2517), 'asciidoxy.generator.filters.InsertionFilter', 'InsertionFilter', ([], {'members': '"""NONE"""'}), "(members='NONE')\n", (2501, 2517), False, 'from asciidoxy.generator.filters import InsertionFilter\n'), ((2020, 2037), 'asciidoxy.generator.filters.InsertionFilter', 'InsertionFilter', ([], {}), '()\n', (2035, 2037), False, 'from asciidoxy.generator.filters import InsertionFilter\n')] |
from django.contrib import admin
from .models import Student,Adult
class AdultAdmin(admin.ModelAdmin):
search_fields = ['user__first_name', 'user__last_name','ID_Number' ,'id','user__username',]
list_display = ('user','id','ID_Number',)
class StudentAdmin(admin.ModelAdmin):
search_fields = ['user__first_name','user__last_name','id','grade','user__username',]
list_display = ('user','id','grade')
admin.site.register(Adult,AdultAdmin)
admin.site.register(Student,StudentAdmin)
| [
"django.contrib.admin.site.register"
] | [((418, 456), 'django.contrib.admin.site.register', 'admin.site.register', (['Adult', 'AdultAdmin'], {}), '(Adult, AdultAdmin)\n', (437, 456), False, 'from django.contrib import admin\n'), ((456, 498), 'django.contrib.admin.site.register', 'admin.site.register', (['Student', 'StudentAdmin'], {}), '(Student, StudentAdmin)\n', (475, 498), False, 'from django.contrib import admin\n')] |
import gooeypie as gp
from random import choice
app = gp.GooeyPieApp('Label widget')
align_options = ['left', 'center', 'right']
label_text = ['A short label',
'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut ' \
'labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco ' \
'laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in ' \
'voluptate velit esse cillum dolore eu fugiat nulla pariatur.',
'123456789 ' * 40]
def change_label(event):
if contents_rdo.selected == 'Short':
test_lbl.text = label_text[0]
if contents_rdo.selected == 'Long':
test_lbl.text = label_text[1]
def align(event):
test_lbl.align = align_rdo.selected
def justify(event):
test_lbl.justify = justify_rdo.selected
def add_words(event):
test_lbl.wrap = not test_lbl.wrap
# DEFINE CONTAINERS #
# Main label containers
widget_container = gp.LabelContainer(app, 'Label widget')
testing_container = gp.LabelContainer(app, 'Operations')
log_container = gp.LabelContainer(app, 'Log')
# Operations containers
content_container = gp.Container(testing_container)
length_container = gp.Container(testing_container)
min_length_container = gp.Container(testing_container)
# CREATE WIDGETS
# Test subject
test_lbl = gp.Label(widget_container, label_text[1])
test_lbl.width = 180
test_lbl.wrap = True
# Label contents
contents_rdo = gp.Radiogroup(content_container, ['Short', "Long"], 'horizontal')
contents_rdo.add_event_listener('change', change_label)
other_lbl = gp.Label(content_container, 'Other')
other_inp = gp.Input(content_container)
more_words_btn = gp.Button(content_container, 'Add words', add_words)
# Length
length_lbl = gp.Label(testing_container, 'Wrap length')
length_inp = gp.Input(length_container)
length_set_btn = gp.Button(length_container, 'Set', None)
length_up_btn = gp.Button(length_container, '+', None)
length_down_btn = gp.Button(length_container, '-', None)
# Minimum length
min_length_lbl = gp.Label(testing_container, 'Minimum length')
min_length_inp = gp.Input(min_length_container)
min_length_set_btn = gp.Button(min_length_container, 'Set', None)
min_length_up_btn = gp.Button(min_length_container, '+', None)
min_length_down_btn = gp.Button(min_length_container, '-', None)
# Align
align_lbl = gp.Label(testing_container, 'Align')
align_rdo = gp.Radiogroup(testing_container, align_options, 'horizontal')
align_rdo.add_event_listener('change', align)
# Justify
justify_lbl = gp.Label(testing_container, 'Justify')
justify_rdo = gp.Radiogroup(testing_container, align_options, 'horizontal')
justify_rdo.add_event_listener('change', justify)
# Log
log = gp.Textbox(log_container)
log.height = 10
# ADD ALL WIDGETS #
# Test subject
widget_container.set_grid(1, 1)
widget_container.add(test_lbl, 1, 1, fill=True, stretch=True)
# Content options
content_container.set_grid(1, 4)
content_container.add(contents_rdo, 1, 1)
content_container.add(other_lbl, 1, 2)
content_container.add(other_inp, 1, 3)
content_container.add(more_words_btn, 1, 4)
# Wrap length
length_container.set_grid(1, 4)
length_container.add(length_inp, 1, 1)
length_container.add(length_set_btn, 1, 2)
length_container.add(length_up_btn, 1, 3)
length_container.add(length_down_btn, 1, 4)
# Minimum length
min_length_container.set_grid(1, 4)
min_length_container.add(min_length_inp, 1, 1)
min_length_container.add(min_length_set_btn, 1, 2)
min_length_container.add(min_length_up_btn, 1, 3)
min_length_container.add(min_length_down_btn, 1, 4)
# Testing container
testing_container.set_grid(5, 2)
testing_container.add(content_container, 1, 1, column_span=2)
testing_container.add(length_lbl, 2, 1)
testing_container.add(length_container, 2, 2)
testing_container.add(min_length_lbl, 3, 1)
testing_container.add(min_length_container, 3, 2)
testing_container.add(align_lbl, 4, 1)
testing_container.add(align_rdo, 4, 2)
testing_container.add(justify_lbl, 5, 1)
testing_container.add(justify_rdo, 5, 2)
# Log area
log_container.set_grid(1, 1)
log_container.add(log, 1, 1, fill=True)
# Add everything to main app
app.set_grid(3, 1)
app.set_row_weights(1, 0, 0)
app.add(widget_container, 1, 1, fill=True, stretch=True)
app.add(testing_container, 2, 1, fill=True)
app.add(log_container, 3, 1, fill=True)
app.run()
| [
"gooeypie.Button",
"gooeypie.Label",
"gooeypie.Input",
"gooeypie.Textbox",
"gooeypie.LabelContainer",
"gooeypie.Radiogroup",
"gooeypie.Container",
"gooeypie.GooeyPieApp"
] | [((55, 85), 'gooeypie.GooeyPieApp', 'gp.GooeyPieApp', (['"""Label widget"""'], {}), "('Label widget')\n", (69, 85), True, 'import gooeypie as gp\n'), ((1043, 1081), 'gooeypie.LabelContainer', 'gp.LabelContainer', (['app', '"""Label widget"""'], {}), "(app, 'Label widget')\n", (1060, 1081), True, 'import gooeypie as gp\n'), ((1102, 1138), 'gooeypie.LabelContainer', 'gp.LabelContainer', (['app', '"""Operations"""'], {}), "(app, 'Operations')\n", (1119, 1138), True, 'import gooeypie as gp\n'), ((1155, 1184), 'gooeypie.LabelContainer', 'gp.LabelContainer', (['app', '"""Log"""'], {}), "(app, 'Log')\n", (1172, 1184), True, 'import gooeypie as gp\n'), ((1230, 1261), 'gooeypie.Container', 'gp.Container', (['testing_container'], {}), '(testing_container)\n', (1242, 1261), True, 'import gooeypie as gp\n'), ((1281, 1312), 'gooeypie.Container', 'gp.Container', (['testing_container'], {}), '(testing_container)\n', (1293, 1312), True, 'import gooeypie as gp\n'), ((1336, 1367), 'gooeypie.Container', 'gp.Container', (['testing_container'], {}), '(testing_container)\n', (1348, 1367), True, 'import gooeypie as gp\n'), ((1413, 1454), 'gooeypie.Label', 'gp.Label', (['widget_container', 'label_text[1]'], {}), '(widget_container, label_text[1])\n', (1421, 1454), True, 'import gooeypie as gp\n'), ((1530, 1595), 'gooeypie.Radiogroup', 'gp.Radiogroup', (['content_container', "['Short', 'Long']", '"""horizontal"""'], {}), "(content_container, ['Short', 'Long'], 'horizontal')\n", (1543, 1595), True, 'import gooeypie as gp\n'), ((1664, 1700), 'gooeypie.Label', 'gp.Label', (['content_container', '"""Other"""'], {}), "(content_container, 'Other')\n", (1672, 1700), True, 'import gooeypie as gp\n'), ((1713, 1740), 'gooeypie.Input', 'gp.Input', (['content_container'], {}), '(content_container)\n', (1721, 1740), True, 'import gooeypie as gp\n'), ((1758, 1810), 'gooeypie.Button', 'gp.Button', (['content_container', '"""Add words"""', 'add_words'], {}), "(content_container, 'Add words', add_words)\n", (1767, 1810), True, 'import gooeypie as gp\n'), ((1834, 1876), 'gooeypie.Label', 'gp.Label', (['testing_container', '"""Wrap length"""'], {}), "(testing_container, 'Wrap length')\n", (1842, 1876), True, 'import gooeypie as gp\n'), ((1890, 1916), 'gooeypie.Input', 'gp.Input', (['length_container'], {}), '(length_container)\n', (1898, 1916), True, 'import gooeypie as gp\n'), ((1934, 1974), 'gooeypie.Button', 'gp.Button', (['length_container', '"""Set"""', 'None'], {}), "(length_container, 'Set', None)\n", (1943, 1974), True, 'import gooeypie as gp\n'), ((1991, 2029), 'gooeypie.Button', 'gp.Button', (['length_container', '"""+"""', 'None'], {}), "(length_container, '+', None)\n", (2000, 2029), True, 'import gooeypie as gp\n'), ((2048, 2086), 'gooeypie.Button', 'gp.Button', (['length_container', '"""-"""', 'None'], {}), "(length_container, '-', None)\n", (2057, 2086), True, 'import gooeypie as gp\n'), ((2122, 2167), 'gooeypie.Label', 'gp.Label', (['testing_container', '"""Minimum length"""'], {}), "(testing_container, 'Minimum length')\n", (2130, 2167), True, 'import gooeypie as gp\n'), ((2185, 2215), 'gooeypie.Input', 'gp.Input', (['min_length_container'], {}), '(min_length_container)\n', (2193, 2215), True, 'import gooeypie as gp\n'), ((2237, 2281), 'gooeypie.Button', 'gp.Button', (['min_length_container', '"""Set"""', 'None'], {}), "(min_length_container, 'Set', None)\n", (2246, 2281), True, 'import gooeypie as gp\n'), ((2302, 2344), 'gooeypie.Button', 'gp.Button', (['min_length_container', '"""+"""', 'None'], {}), "(min_length_container, '+', None)\n", (2311, 2344), True, 'import gooeypie as gp\n'), ((2367, 2409), 'gooeypie.Button', 'gp.Button', (['min_length_container', '"""-"""', 'None'], {}), "(min_length_container, '-', None)\n", (2376, 2409), True, 'import gooeypie as gp\n'), ((2431, 2467), 'gooeypie.Label', 'gp.Label', (['testing_container', '"""Align"""'], {}), "(testing_container, 'Align')\n", (2439, 2467), True, 'import gooeypie as gp\n'), ((2480, 2541), 'gooeypie.Radiogroup', 'gp.Radiogroup', (['testing_container', 'align_options', '"""horizontal"""'], {}), "(testing_container, align_options, 'horizontal')\n", (2493, 2541), True, 'import gooeypie as gp\n'), ((2613, 2651), 'gooeypie.Label', 'gp.Label', (['testing_container', '"""Justify"""'], {}), "(testing_container, 'Justify')\n", (2621, 2651), True, 'import gooeypie as gp\n'), ((2666, 2727), 'gooeypie.Radiogroup', 'gp.Radiogroup', (['testing_container', 'align_options', '"""horizontal"""'], {}), "(testing_container, align_options, 'horizontal')\n", (2679, 2727), True, 'import gooeypie as gp\n'), ((2791, 2816), 'gooeypie.Textbox', 'gp.Textbox', (['log_container'], {}), '(log_container)\n', (2801, 2816), True, 'import gooeypie as gp\n')] |
# -*- coding: utf-8 -*-
""" this program takes as input the number of a bus station,
access the mabat.mot.gov.il web site, and retrieves the
bus line number and arrival time for each bus reaching the station"""
""" code and advice used in this program:
מידע על תחנה:
אם תשלח בקשת POST לכתובת הזאת:http://mabat.mot.gov.il/AdalyaService.svc/StationLinesByIdGet
ותשים בbody של הבקשה JSON כזה (תחליף את המספר במספר התחנה שבו אתה מעוניין)
{
"stationId": 21451
}
או אם אתה בקטע של command line אתה מוזמן לשלוח את הפקודה הבאה:
curl -sd '{"stationId": 21451}' http://mabat.mot.gov.il/AdalyaService.svc/StationLinesByIdGet -H 'Content-Type: application/json' | python -m json.tool
==========
https://docs.python.org/2/library/commands.html
https://docs.python.org/2/library/subprocess.html
==========
command = "curl -sd '{\"stationId\": 21451}' http://mabat.mot.gov.il/AdalyaService.svc/StationLinesByIdGet -H 'Content-Type: application/json'"
jzon = os.popen(command)
string = jzon.read()
jzon = json.loads(string)
jzon['Payload']['Lines']
"""
#%%
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import json
import time
import sqlite3
conn = sqlite3.connect('linesInfo.sqlite')
cur = conn.cursor()
cur.execute('''CREATE TABLE IF NOT EXISTS linestats1
(id INTEGER PRIMARY KEY, station_number INTEGER, time REAL, date TEXT, stationLines TEXT)''')
"""
This code will receive a station ID number (an integer)), create the command
to access the mabat.mot.gov.il web site and retrieve the information as a json.
It returns a tuple of the station/bus lines information, epoch time and date/time """
def getInfo(station_number):
""" given the station number, gets the json containing
bus arrival times and station info """
json_string = '{ "stationId": ' + str(station_number) + ', "isSIRI":true, "lang":"1037"}'
command = "curl -sd '" + json_string + "' http://mabat.mot.gov.il/AdalyaService.svc/StationLinesByIdGet -H 'Content-Type: application/json'"
timeAsked = time.time()
timeHuman = time.ctime(timeAsked)
handle = os.popen(command)
responseText = handle.read()
responseJson = json.loads(responseText)
return (station_number, timeAsked, timeHuman, responseJson)
#%%
def saveInfo(responseTuple):
"""takes the tuple from getInfo(station_number) and stores in an sqlite database
"""
#create or connect to linesInfo database and table linestats
cur.execute('INSERT OR IGNORE INTO linestats1 (station_number, time, date, stationLines) VALUES (?, ?, ?, ?)',
(responseTuple[0], responseTuple[1], responseTuple[2], str(responseTuple[3])) #['Payload']))
)
conn.commit()
# cur.execute('SELECT * FROM linestats')
# cur.fetchone()
# THESE LINEs GETS 5 DATA POINTS FROM A SINGLE STATION, AND SAVES THEM IN THE DATABASE
for index in range(60):
data1 = getInfo(25619)
saveInfo(data1)
time.sleep(60)
#%% | [
"time.ctime",
"json.loads",
"sqlite3.connect",
"time.sleep",
"os.popen",
"time.time"
] | [((1167, 1202), 'sqlite3.connect', 'sqlite3.connect', (['"""linesInfo.sqlite"""'], {}), "('linesInfo.sqlite')\n", (1182, 1202), False, 'import sqlite3\n'), ((2022, 2033), 'time.time', 'time.time', ([], {}), '()\n', (2031, 2033), False, 'import time\n'), ((2050, 2071), 'time.ctime', 'time.ctime', (['timeAsked'], {}), '(timeAsked)\n', (2060, 2071), False, 'import time\n'), ((2085, 2102), 'os.popen', 'os.popen', (['command'], {}), '(command)\n', (2093, 2102), False, 'import os\n'), ((2155, 2179), 'json.loads', 'json.loads', (['responseText'], {}), '(responseText)\n', (2165, 2179), False, 'import json\n'), ((2925, 2939), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (2935, 2939), False, 'import time\n')] |
import usefulFunctions
#importing the other file in a program
#all the functions, variables of this file can be used in this file
print(usefulFunctions.roll_dice())
print(usefulFunctions.friends) | [
"usefulFunctions.roll_dice"
] | [((141, 168), 'usefulFunctions.roll_dice', 'usefulFunctions.roll_dice', ([], {}), '()\n', (166, 168), False, 'import usefulFunctions\n')] |
from django.conf.urls import url
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns = [
url(r'^$', views.DashboardView.as_view(), name='home'),
url(r'^sa_dashboard/$', views.study_adviser_view, name='sa_dashboard'),
url(r'^dashboard/$', views.DashboardView.as_view(), name='home'),
url(r'settings/$', views.settings, name='settings'),
url(r'^successfully_logged_out/$', views.logged_out, name='logged_out'),
url(r'^not_in_seth/$', views.not_in_seth, name='not_in_seth'),
url(r'^manual/$', views.manual_view, name='manual'),
url(r'^filter_students/$', views.filter_students_by_module_edition, name='filter_students'),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| [
"django.conf.urls.static.static",
"django.conf.urls.url"
] | [((713, 776), 'django.conf.urls.static.static', 'static', (['settings.STATIC_URL'], {'document_root': 'settings.STATIC_ROOT'}), '(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n', (719, 776), False, 'from django.conf.urls.static import static\n'), ((212, 281), 'django.conf.urls.url', 'url', (['"""^sa_dashboard/$"""', 'views.study_adviser_view'], {'name': '"""sa_dashboard"""'}), "('^sa_dashboard/$', views.study_adviser_view, name='sa_dashboard')\n", (215, 281), False, 'from django.conf.urls import url\n'), ((358, 408), 'django.conf.urls.url', 'url', (['"""settings/$"""', 'views.settings'], {'name': '"""settings"""'}), "('settings/$', views.settings, name='settings')\n", (361, 408), False, 'from django.conf.urls import url\n'), ((415, 485), 'django.conf.urls.url', 'url', (['"""^successfully_logged_out/$"""', 'views.logged_out'], {'name': '"""logged_out"""'}), "('^successfully_logged_out/$', views.logged_out, name='logged_out')\n", (418, 485), False, 'from django.conf.urls import url\n'), ((492, 552), 'django.conf.urls.url', 'url', (['"""^not_in_seth/$"""', 'views.not_in_seth'], {'name': '"""not_in_seth"""'}), "('^not_in_seth/$', views.not_in_seth, name='not_in_seth')\n", (495, 552), False, 'from django.conf.urls import url\n'), ((559, 609), 'django.conf.urls.url', 'url', (['"""^manual/$"""', 'views.manual_view'], {'name': '"""manual"""'}), "('^manual/$', views.manual_view, name='manual')\n", (562, 609), False, 'from django.conf.urls import url\n'), ((616, 711), 'django.conf.urls.url', 'url', (['"""^filter_students/$"""', 'views.filter_students_by_module_edition'], {'name': '"""filter_students"""'}), "('^filter_students/$', views.filter_students_by_module_edition, name=\n 'filter_students')\n", (619, 711), False, 'from django.conf.urls import url\n')] |
from django.contrib import admin
from .models import Ticket
def set_tickets_open(modeladmin, request, queryset):
rows_updated = queryset.update(status='Open')
if rows_updated == 1:
modeladmin.message_user(request, 'Ticket successfully set to open.')
else:
modeladmin.message_user(
request, '{rows} tickets successfully set to open.'.format(rows=rows_updated))
set_tickets_open.short_description = 'Set open to selected tickets'
def set_tickets_closed(modeladmin, request, queryset):
rows_updated = queryset.update(status='Closed')
if rows_updated == 1:
modeladmin.message_user(request, 'Ticket successfully set to closed.')
else:
modeladmin.message_user(
request, '{rows} tickets successfully set to closed.'.format(rows=rows_updated))
set_tickets_closed.short_description = 'Set closed to selected tickets'
class TicketsAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'email', 'category',
'status', 'created_at', 'updated_at')
search_fields = ('content',)
list_filter = ('category', 'status', 'created_at')
fields = ('id', 'name', 'email', 'category', 'content', 'status', 'ip_address', 'user_agent', 'created_at',
'updated_at')
readonly_fields = ('id', 'ip_address', 'user_agent',
'created_at', 'updated_at')
ordering = ('-created_at',)
actions = [set_tickets_open, set_tickets_closed]
def get_actions(self, request):
actions = super(TicketsAdmin, self).get_actions(request)
if not request.user.is_superuser and 'delete_selected' in actions:
actions.pop('delete_selected')
return actions
admin.site.register(Ticket, TicketsAdmin)
| [
"django.contrib.admin.site.register"
] | [((1720, 1761), 'django.contrib.admin.site.register', 'admin.site.register', (['Ticket', 'TicketsAdmin'], {}), '(Ticket, TicketsAdmin)\n', (1739, 1761), False, 'from django.contrib import admin\n')] |
import matplotlib.widgets as mwidgets
class Slider(mwidgets.Slider):
"""Slider widget to select a value from a floating point range.
Parameters
----------
ax : :class:`~matplotlib.axes.Axes` instance
The parent axes for the widget
value_range : (float, float)
(min, max) value allowed for value.
label : str
The slider label.
value : float
Initial value. If None, set to value in middle of value range.
on_slide : function
Callback function for slide event. Function should expect slider value.
value_fmt : str
Format string for formatting the slider text.
slidermin, slidermax : float
Used to contrain the value of this slider to the values
of other sliders.
dragging : bool
If True, slider is responsive to mouse.
pad : float
Padding (in axes coordinates) between `label`/`value_fmt` and slider.
Attributes
----------
value : float
Current slider value.
"""
def __init__(self, ax, value_range, label='', value=None, on_slide=None,
value_fmt='%1.2f', slidermin=None, slidermax=None,
dragging=True, pad=0.02):
mwidgets.AxesWidget.__init__(self, ax)
self.valmin, self.valmax = value_range
if value is None:
value = 0.5 * (self.valmin + self.valmax)
self.val = value
self.valinit = value
self.valfmt = value_fmt
y0 = 0.5
x_low = [self.valmin, value]
x_high = [value, self.valmax]
self.line_low, = ax.plot(x_low, [y0, y0], color='0.5', lw=2)
self.line_high, = ax.plot(x_high, [y0, y0], color='0.7', lw=2)
self.val_handle, = ax.plot(value, y0, 'o',
mec='0.4', mfc='0.6', markersize=8)
ax.set_xlim(value_range)
ax.set_navigate(False)
ax.set_axis_off()
self.connect_event('button_press_event', self._update)
self.connect_event('button_release_event', self._update)
if dragging:
self.connect_event('motion_notify_event', self._update)
self.label = ax.text(-pad, y0, label, transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='right')
self.show_value = False if value_fmt is None else True
if self.show_value:
self.valtext = ax.text(1 + pad, y0, value_fmt % value,
transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='left')
self.slidermin = slidermin
self.slidermax = slidermax
self.drag_active = False
self.cnt = 0
self.observers = {}
if on_slide is not None:
self.on_changed(on_slide)
# Attributes for matplotlib.widgets.Slider compatibility
self.closedmin = self.closedmax = True
@property
def value(self):
return self.val
@value.setter
def value(self, value):
self.val = value
self.line_low.set_xdata([self.valmin, value])
self.line_high.set_xdata([value, self.valmax])
self.val_handle.set_xdata([value])
if self.show_value:
self.valtext.set_text(self.valfmt % value)
def set_val(self, value):
"""Set value of slider."""
# Override matplotlib.widgets.Slider to update graphics objects.
self.value = value
if self.drawon:
self.ax.figure.canvas.draw()
if not self.eventson:
return
for cid, func in self.observers.items():
func(value)
if __name__ == '__main__':
import numpy as np
import matplotlib.pyplot as plt
ax = plt.subplot2grid((10, 1), (0, 0), rowspan=8)
ax_slider = plt.subplot2grid((10, 1), (9, 0))
a0 = 5
x = np.arange(0.0, 1.0, 0.001)
y = np.sin(6 * np.pi * x)
line, = ax.plot(x, a0 * y, lw=2, color='red')
ax.axis([x.min(), x.max(), -10, 10])
def update(val):
amp = samp.value
line.set_ydata(amp * y)
samp = Slider(ax_slider, (0.1, 10.0), on_slide=update,
label='Amplitude:', value=a0)
plt.show()
| [
"numpy.sin",
"matplotlib.widgets.AxesWidget.__init__",
"matplotlib.pyplot.subplot2grid",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((3822, 3866), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(10, 1)', '(0, 0)'], {'rowspan': '(8)'}), '((10, 1), (0, 0), rowspan=8)\n', (3838, 3866), True, 'import matplotlib.pyplot as plt\n'), ((3883, 3916), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(10, 1)', '(9, 0)'], {}), '((10, 1), (9, 0))\n', (3899, 3916), True, 'import matplotlib.pyplot as plt\n'), ((3937, 3963), 'numpy.arange', 'np.arange', (['(0.0)', '(1.0)', '(0.001)'], {}), '(0.0, 1.0, 0.001)\n', (3946, 3963), True, 'import numpy as np\n'), ((3972, 3993), 'numpy.sin', 'np.sin', (['(6 * np.pi * x)'], {}), '(6 * np.pi * x)\n', (3978, 3993), True, 'import numpy as np\n'), ((4278, 4288), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4286, 4288), True, 'import matplotlib.pyplot as plt\n'), ((1215, 1253), 'matplotlib.widgets.AxesWidget.__init__', 'mwidgets.AxesWidget.__init__', (['self', 'ax'], {}), '(self, ax)\n', (1243, 1253), True, 'import matplotlib.widgets as mwidgets\n')] |
# -*- coding: utf-8 -*-
from __future__ import print_function,division,absolute_import
import logging
log = logging.getLogger(__name__) # __name__ is "foo.bar" here
import numpy as np
import numbers
np.seterr(all='ignore')
def findSlice(array,lims):
start = np.ravel(np.argwhere(array>lims[0]))[0]
stop = np.ravel(np.argwhere(array<lims[1]))[-1]
return slice(int(start),int(stop))
def approx(values,approx_values):
""" returns array where every value is replaced by the closest in approx_values
This funciton is useful for rebinning; careful, can be slow with many bins...
Example:
-------
approx( np.arange(0,1,0.1), [0,0.3,0.7] )
array([ 0. , 0. , 0.3, 0.3, 0.3, 0.7, 0.7, 0.7, 0.7, 0.7])
"""
# make sure they are arrays
values = np.asarray(values)
approx_values = np.asarray(approx_values)
# create outter difference
diff = np.abs(values[:,np.newaxis] - approx_values)
args = np.argmin(diff,axis=1)
values = approx_values[args]
#values = np.asarray( [ approx_values[np.argmin(np.abs(v-approx_values))] for v in values] )
return values
def rebin(values,bins):
""" returns array where every value is replaced by the closest in approx_values
This funciton is useful for rebinning
Example:
-------
approx( np.arange(0,1,0.1), [0,0.3,0.7] )
array([ 0. , 0. , 0.3, 0.3, 0.3, 0.7, 0.7, 0.7, 0.7, 0.7])
"""
# make sure they are arrays
bins = np.asarray(bins)
idx = np.digitize(values,bins)
idx[idx > bins.shape[0]-1] = bins.shape[0]-1
return (bins[idx]+bins[idx-1])/2
def reshapeToBroadcast(what,ref):
""" expand the 1d array 'what' to allow broadbasting to match
multidimentional array 'ref'. The two arrays have to same the same
dimensions along the first axis
"""
if what.shape == ref.shape: return what
assert what.shape[0] == ref.shape[0],\
"automatic reshaping requires same first dimention"
shape = [ref.shape[0],] + [1,]*(ref.ndim-1)
return what.reshape(shape)
def removeBackground(x,data,xlims=None,max_iter=100,background_regions=[],**kw):
from dualtree import dualtree
if data.ndim == 1: data = data[np.newaxis,:]
if xlims is not None:
idx = findSlice(x,xlims)
x = x[idx]
data = data[:,idx].copy()
else:
data = data.copy(); # create local copy
# has to be a list of lists ..
if background_regions != [] and isinstance(background_regions[0],numbers.Real):
background_regions = [background_regions,]
background_regions = [findSlice(x,brange) for brange in background_regions]
for i in range(len(data)):
data[i] = data[i] - dualtree.baseline(data[i],max_iter=max_iter,
background_regions=background_regions,**kw)
return x,np.squeeze(data)
def find_hist_ranges(hist,x=None,max_frac=0.1):
high_idx = np.squeeze( np.argwhere(hist>np.nanmax(hist)*max_frac) )
# remove consecutive indices
edges = high_idx[ np.gradient(high_idx).astype(int) != 1 ]
edges = [high_idx[0],] + list(edges) + [high_idx[-1],]
if x is not None:
edges = x[edges]
if len(edges)%2 == 1:
edges = edges[:-1]
n_ranges = int(len(edges)/2)
ranges = edges.reshape( (n_ranges,2) )
return ranges
| [
"logging.getLogger",
"numpy.abs",
"numpy.digitize",
"numpy.asarray",
"numpy.squeeze",
"numpy.argwhere",
"numpy.nanmax",
"numpy.argmin",
"dualtree.dualtree.baseline",
"numpy.gradient",
"numpy.seterr"
] | [((109, 136), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (126, 136), False, 'import logging\n'), ((202, 225), 'numpy.seterr', 'np.seterr', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (211, 225), True, 'import numpy as np\n'), ((815, 833), 'numpy.asarray', 'np.asarray', (['values'], {}), '(values)\n', (825, 833), True, 'import numpy as np\n'), ((854, 879), 'numpy.asarray', 'np.asarray', (['approx_values'], {}), '(approx_values)\n', (864, 879), True, 'import numpy as np\n'), ((922, 967), 'numpy.abs', 'np.abs', (['(values[:, np.newaxis] - approx_values)'], {}), '(values[:, np.newaxis] - approx_values)\n', (928, 967), True, 'import numpy as np\n'), ((978, 1001), 'numpy.argmin', 'np.argmin', (['diff'], {'axis': '(1)'}), '(diff, axis=1)\n', (987, 1001), True, 'import numpy as np\n'), ((1521, 1537), 'numpy.asarray', 'np.asarray', (['bins'], {}), '(bins)\n', (1531, 1537), True, 'import numpy as np\n'), ((1548, 1573), 'numpy.digitize', 'np.digitize', (['values', 'bins'], {}), '(values, bins)\n', (1559, 1573), True, 'import numpy as np\n'), ((2820, 2836), 'numpy.squeeze', 'np.squeeze', (['data'], {}), '(data)\n', (2830, 2836), True, 'import numpy as np\n'), ((273, 301), 'numpy.argwhere', 'np.argwhere', (['(array > lims[0])'], {}), '(array > lims[0])\n', (284, 301), True, 'import numpy as np\n'), ((323, 351), 'numpy.argwhere', 'np.argwhere', (['(array < lims[1])'], {}), '(array < lims[1])\n', (334, 351), True, 'import numpy as np\n'), ((2696, 2791), 'dualtree.dualtree.baseline', 'dualtree.baseline', (['data[i]'], {'max_iter': 'max_iter', 'background_regions': 'background_regions'}), '(data[i], max_iter=max_iter, background_regions=\n background_regions, **kw)\n', (2713, 2791), False, 'from dualtree import dualtree\n'), ((2932, 2947), 'numpy.nanmax', 'np.nanmax', (['hist'], {}), '(hist)\n', (2941, 2947), True, 'import numpy as np\n'), ((3017, 3038), 'numpy.gradient', 'np.gradient', (['high_idx'], {}), '(high_idx)\n', (3028, 3038), True, 'import numpy as np\n')] |
import re
import sublime
import sublime_plugin
from Default.paragraph import *
from Default.paragraph import OldWrapLinesCommand as WrapLinesCommand
from . import jtextwrap as textwrap
class WrapLinesJustifiedCommand(WrapLinesCommand):
''' Same as parent, except using jtextwrap. '''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def run(self, edit, width=0):
if width == 0 and self.view.settings().get("wrap_width"):
try:
width = int(self.view.settings().get("wrap_width"))
except TypeError:
pass
if width == 0 and self.view.settings().get("rulers"):
# try and guess the wrap width from the ruler, if any
try:
width = int(self.view.settings().get("rulers")[0])
except ValueError:
pass
except TypeError:
pass
if width == 0:
width = 78
# Make sure tabs are handled as per the current buffer
tab_width = 8
if self.view.settings().get("tab_size"):
try:
tab_width = int(self.view.settings().get("tab_size"))
except TypeError:
pass
if tab_width == 0:
tab_width == 8
paragraphs = []
for s in self.view.sel():
paragraphs.extend(all_paragraphs_intersecting_selection(self.view, s))
if len(paragraphs) > 0:
self.view.sel().clear()
for p in paragraphs:
self.view.sel().add(p)
# This isn't an ideal way to do it, as we loose the position of the
# cursor within the paragraph: hence why the paragraph is selected
# at the end.
for s in self.view.sel():
wrapper = textwrap.TextWrapper()
wrapper.expand_tabs = False
wrapper.width = width
prefix = self.extract_prefix(s)
if prefix:
wrapper.initial_indent = prefix
wrapper.subsequent_indent = prefix
wrapper.width -= self.width_in_spaces(prefix, tab_width)
if wrapper.width < 0:
continue
txt = self.view.substr(s)
if prefix:
txt = txt.replace(prefix, u"")
txt = txt.expandtabs(tab_width)
txt = wrapper.fill(txt) + u"\n"
self.view.replace(edit, s, txt)
# It's unhelpful to have the entire paragraph selected, just leave the
# selection at the end
ends = [s.end() - 1 for s in self.view.sel()]
self.view.sel().clear()
for pt in ends:
self.view.sel().add(sublime.Region(pt))
| [
"sublime.Region"
] | [((2809, 2827), 'sublime.Region', 'sublime.Region', (['pt'], {}), '(pt)\n', (2823, 2827), False, 'import sublime\n')] |
#!/usr/bin/env python2.7
import itertools
import sys
import re
from Bio import SeqIO
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return itertools.izip_longest(*args, fillvalue=fillvalue)
with open(sys.argv[1] + '_R1.fastq', 'wb') as out1:
with open(sys.argv[1] + '_R2.fastq', 'wb') as out2:
for rec1, rec2 in grouper(SeqIO.parse(sys.stdin, 'fastq'), 2):
if re.sub('/1$', '', rec1.name) != re.sub('/2$', '', rec2.name):
raise Exception(rec1.name + ' ' + rec2.name + ' do not match')
SeqIO.write(rec1, out1, 'fastq')
SeqIO.write(rec2, out2, 'fastq')
| [
"re.sub",
"itertools.izip_longest",
"Bio.SeqIO.parse",
"Bio.SeqIO.write"
] | [((277, 327), 'itertools.izip_longest', 'itertools.izip_longest', (['*args'], {'fillvalue': 'fillvalue'}), '(*args, fillvalue=fillvalue)\n', (299, 327), False, 'import itertools\n'), ((471, 502), 'Bio.SeqIO.parse', 'SeqIO.parse', (['sys.stdin', '"""fastq"""'], {}), "(sys.stdin, 'fastq')\n", (482, 502), False, 'from Bio import SeqIO\n'), ((676, 708), 'Bio.SeqIO.write', 'SeqIO.write', (['rec1', 'out1', '"""fastq"""'], {}), "(rec1, out1, 'fastq')\n", (687, 708), False, 'from Bio import SeqIO\n'), ((721, 753), 'Bio.SeqIO.write', 'SeqIO.write', (['rec2', 'out2', '"""fastq"""'], {}), "(rec2, out2, 'fastq')\n", (732, 753), False, 'from Bio import SeqIO\n'), ((523, 551), 're.sub', 're.sub', (['"""/1$"""', '""""""', 'rec1.name'], {}), "('/1$', '', rec1.name)\n", (529, 551), False, 'import re\n'), ((555, 583), 're.sub', 're.sub', (['"""/2$"""', '""""""', 'rec2.name'], {}), "('/2$', '', rec2.name)\n", (561, 583), False, 'import re\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2018-2021 Accenture Technology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from mercury.system.dict_util import MultiLevelDict
class TestDict(unittest.TestCase):
def test_multi_level_map(self):
# mixed dict and list
mix_path = 'hello.world[0].headers[0]'
value = 'hello world'
m1 = MultiLevelDict()
m1.set_element(mix_path, value)
m1_flatmap = m1.get_flat_map(m1.get_dict())
self.assertEqual(value, m1_flatmap.get(mix_path))
self.assertEqual(m1_flatmap.get(mix_path), m1.get_element(mix_path))
# nested arrays
data = {'a': {'b': [1, 2, 3, [4]]}}
mm = MultiLevelDict(data)
self.assertEqual(4, mm.get_element('a.b[3][0]'))
# verify set_element method
composite_path = 'a.b[3][4][1]'
mm.set_element(composite_path, value)
self.assertEqual(value, mm.get_element(composite_path))
# test flatten map
flat_map = mm.get_flat_map(mm.get_dict())
m2 = MultiLevelDict()
for k in flat_map:
m2.set_element(k, flat_map.get(k))
# the original and the reconstructed dictionaries must match
self.assertEqual(mm.get_dict(), m2.get_dict())
# the individual values must match using two different retrieval methods
for k in flat_map:
self.assertEqual(flat_map.get(k), m2.get_element(k))
has_error = False
try:
mm.set_element('this.is.invalid[0', value)
except ValueError as e:
has_error = True
self.assertTrue('missing end bracket' in str(e))
self.assertTrue(has_error)
has_error = False
try:
mm.set_element('this.is.invalid[0][', value)
except ValueError as e:
has_error = True
self.assertTrue('missing end bracket' in str(e))
self.assertTrue(has_error)
has_error = False
try:
mm.set_element('this.is.invalid[0][x', value)
except ValueError as e:
has_error = True
self.assertTrue('missing end bracket' in str(e))
self.assertTrue(has_error)
has_error = False
try:
mm.set_element('this.is.invalid[0][1', value)
except ValueError as e:
has_error = True
self.assertTrue('missing end bracket' in str(e))
self.assertTrue(has_error)
has_error = False
try:
mm.set_element('this.is.invalid[0][1][x]', value)
except ValueError as e:
has_error = True
self.assertTrue('indexes must be digits' in str(e))
self.assertTrue(has_error)
has_error = False
try:
mm.set_element('this.is.invalid 0][1][x]', value)
except ValueError as e:
has_error = True
self.assertTrue('missing start bracket' in str(e))
self.assertTrue(has_error)
| [
"mercury.system.dict_util.MultiLevelDict"
] | [((903, 919), 'mercury.system.dict_util.MultiLevelDict', 'MultiLevelDict', ([], {}), '()\n', (917, 919), False, 'from mercury.system.dict_util import MultiLevelDict\n'), ((1228, 1248), 'mercury.system.dict_util.MultiLevelDict', 'MultiLevelDict', (['data'], {}), '(data)\n', (1242, 1248), False, 'from mercury.system.dict_util import MultiLevelDict\n'), ((1582, 1598), 'mercury.system.dict_util.MultiLevelDict', 'MultiLevelDict', ([], {}), '()\n', (1596, 1598), False, 'from mercury.system.dict_util import MultiLevelDict\n')] |
# -*- coding: utf-8 -*-
from copy import deepcopy
# Import all the packages
import torch
from torch.autograd import Variable
import torch.nn as nn
import numpy as np
import torch.optim as optim
import torch.nn.functional as f# create a dummy data
import matplotlib.pyplot as plt
import networkx as nx
import timeit
from sklearn import metrics
import scipy.sparse as sparse
import scipy.stats as stats
from sklearn import metrics
from scipy.io import loadmat # this is the SciPy module that loads mat-files
import pandas as pd
from torch_sparse import spspmm
import pandas as pd
class MDS(nn.Module):
def __init__(self,data,relation, input_size,latent_dim,sample_size,device):
super(MDS, self).__init__()
self.input_size=input_size
#self.scaling_factor=nn.Parameter(torch.randn(1,device=device))
self.latent_dim=latent_dim
self.gamma=nn.Parameter(torch.randn(self.input_size,device=device))
#self.alpha=nn.Parameter(torch.randn(self.input_size,device=device))
#create indices to index properly the receiver and senders variable
self.relation = relation
self.pdist = nn.PairwiseDistance(p=2,eps=0)
self.sampling_weights=torch.ones(self.input_size,device=device)
self.sample_size=sample_size
self.latent_z=nn.Parameter(torch.zeros(self.input_size,latent_dim,device=device))
self.latent_z.data=torch.randn(self.input_size,latent_dim,device=device)
def sample_network(self):
# USE torch_sparse lib i.e. : from torch_sparse import spspmm
# sample for undirected network
sample_idx=torch.multinomial(self.sampling_weights, self.sample_size,replacement=False)
# translate sampled indices w.r.t. to the full matrix, it is just a diagonal matrix
indices_translator=torch.cat([sample_idx.unsqueeze(0),sample_idx.unsqueeze(0)],0)
# adjacency matrix in edges format
edges=torch.cat([self.sparse_i_idx.unsqueeze(0),self.sparse_j_idx.unsqueeze(0)],0)
# matrix multiplication B = Adjacency x Indices translator
# see spspmm function, it give a multiplication between two matrices
# indexC is the indices where we have non-zero values and valueC the actual values (in this case ones)
indexC, valueC = spspmm(edges,torch.ones(edges.shape[1]), indices_translator,torch.ones(indices_translator.shape[1]),self.input_size,self.input_size,self.input_size,coalesced=True)
# second matrix multiplication C = Indices translator x B, indexC returns where we have edges inside the sample
indexC, valueC=spspmm(indices_translator,torch.ones(indices_translator.shape[1]),indexC,valueC,self.input_size,self.input_size,self.input_size,coalesced=True)
# edge row position
sparse_i_sample=indexC[0,:]
# edge column position
sparse_j_sample=indexC[1,:]
return sample_idx,sparse_i_sample,sparse_j_sample
def sample(self):
sample_idx=torch.multinomial(self.sampling_weights, self.sample_size,replacement=False)
return sample_idx
#introduce the likelihood function containing the two extra biases gamma_i and alpha_j
def MDS_likelihood(self,epoch):
'''
Poisson log-likelihood ignoring the log(k!) constant
'''
self.epoch=epoch
#sample_idx,sparse_sample_i,sparse_sample_j=self.sample_network()
# mat=torch.exp(-((self.latent_z[sample_idx].unsqueeze(1)-self.latent_z[sample_idx]+1e-06)**2).sum(-1)**0.5)
# z_pdist1=0.5*torch.mm(torch.exp(self.gamma[sample_idx].unsqueeze(0)),(torch.mm((mat-torch.diag(torch.diagonal(mat))),torch.exp(self.gamma[sample_idx]).unsqueeze(-1))))
# #take the sampled matrix indices in order to index gamma_i and alpha_j correctly and in agreement with the previous
# z_pdist2=(-((((self.latent_z[sparse_sample_i]-self.latent_z[sparse_sample_j]+1e-06)**2).sum(-1)))**0.5+self.gamma[sparse_sample_i]+self.gamma[sparse_sample_j]).sum()
# log_likelihood_sparse=z_pdist2-z_pdist1
sample_idx = self.sample()
log_likelihood = ((torch.cdist(self.latent_z[sample_idx],self.latent_z[sample_idx]) - self.relation[sample_idx][:,sample_idx])**2 / self.relation[sample_idx][:,sample_idx].fill_diagonal_(5) ).sum()**0.5
return log_likelihood
def link_prediction(self):
with torch.no_grad():
z_pdist_miss=(((self.latent_z[self.removed_i]-self.latent_z[self.removed_j])**2).sum(-1))**0.5
logit_u_miss=-z_pdist_miss+self.gamma[self.removed_i]+self.gamma[self.removed_j]
rates=torch.exp(logit_u_miss)
self.rates=rates
target=torch.cat((torch.zeros(self.non_sparse_i_idx_removed.shape[0]),torch.ones(self.sparse_i_idx_removed.shape[0])))
#fpr, tpr, thresholds = metrics.roc_curve(target.cpu().data.numpy(), rates.cpu().data.numpy())
precision, tpr, thresholds = metrics.precision_recall_curve(target.cpu().data.numpy(), rates.cpu().data.numpy())
return metrics.roc_auc_score(target.cpu().data.numpy(),rates.cpu().data.numpy()),metrics.auc(tpr,precision)
def get_latent_coord(self):
return self.latent_z.data
| [
"torch.multinomial",
"sklearn.metrics.auc",
"torch.exp",
"torch.cdist",
"torch.no_grad",
"torch.nn.PairwiseDistance",
"torch.zeros",
"torch.randn",
"torch.ones"
] | [((1218, 1249), 'torch.nn.PairwiseDistance', 'nn.PairwiseDistance', ([], {'p': '(2)', 'eps': '(0)'}), '(p=2, eps=0)\n', (1237, 1249), True, 'import torch.nn as nn\n'), ((1282, 1324), 'torch.ones', 'torch.ones', (['self.input_size'], {'device': 'device'}), '(self.input_size, device=device)\n', (1292, 1324), False, 'import torch\n'), ((1487, 1542), 'torch.randn', 'torch.randn', (['self.input_size', 'latent_dim'], {'device': 'device'}), '(self.input_size, latent_dim, device=device)\n', (1498, 1542), False, 'import torch\n'), ((1722, 1799), 'torch.multinomial', 'torch.multinomial', (['self.sampling_weights', 'self.sample_size'], {'replacement': '(False)'}), '(self.sampling_weights, self.sample_size, replacement=False)\n', (1739, 1799), False, 'import torch\n'), ((3109, 3186), 'torch.multinomial', 'torch.multinomial', (['self.sampling_weights', 'self.sample_size'], {'replacement': '(False)'}), '(self.sampling_weights, self.sample_size, replacement=False)\n', (3126, 3186), False, 'import torch\n'), ((951, 994), 'torch.randn', 'torch.randn', (['self.input_size'], {'device': 'device'}), '(self.input_size, device=device)\n', (962, 994), False, 'import torch\n'), ((1402, 1457), 'torch.zeros', 'torch.zeros', (['self.input_size', 'latent_dim'], {'device': 'device'}), '(self.input_size, latent_dim, device=device)\n', (1413, 1457), False, 'import torch\n'), ((2416, 2442), 'torch.ones', 'torch.ones', (['edges.shape[1]'], {}), '(edges.shape[1])\n', (2426, 2442), False, 'import torch\n'), ((2463, 2502), 'torch.ones', 'torch.ones', (['indices_translator.shape[1]'], {}), '(indices_translator.shape[1])\n', (2473, 2502), False, 'import torch\n'), ((2738, 2777), 'torch.ones', 'torch.ones', (['indices_translator.shape[1]'], {}), '(indices_translator.shape[1])\n', (2748, 2777), False, 'import torch\n'), ((4560, 4575), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4573, 4575), False, 'import torch\n'), ((4798, 4821), 'torch.exp', 'torch.exp', (['logit_u_miss'], {}), '(logit_u_miss)\n', (4807, 4821), False, 'import torch\n'), ((5325, 5352), 'sklearn.metrics.auc', 'metrics.auc', (['tpr', 'precision'], {}), '(tpr, precision)\n', (5336, 5352), False, 'from sklearn import metrics\n'), ((4885, 4936), 'torch.zeros', 'torch.zeros', (['self.non_sparse_i_idx_removed.shape[0]'], {}), '(self.non_sparse_i_idx_removed.shape[0])\n', (4896, 4936), False, 'import torch\n'), ((4937, 4983), 'torch.ones', 'torch.ones', (['self.sparse_i_idx_removed.shape[0]'], {}), '(self.sparse_i_idx_removed.shape[0])\n', (4947, 4983), False, 'import torch\n'), ((4273, 4338), 'torch.cdist', 'torch.cdist', (['self.latent_z[sample_idx]', 'self.latent_z[sample_idx]'], {}), '(self.latent_z[sample_idx], self.latent_z[sample_idx])\n', (4284, 4338), False, 'import torch\n')] |
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
m_f = np.load('objects/simulation_model_freq.npy')[:50]
m_p = np.load('objects/simulation_model_power.npy')[:50]
eeg_f = np.load('objects/real_eeg_freq.npy0.npy')[:50]
eeg_p = np.load('objects/real_eeg_power_0.npy')[:50]
plt.figure()
plt.semilogy(eeg_f, eeg_p,linewidth=2.0,c = 'b')
plt.xlabel('frequency [Hz]')
plt.ylabel('Linear spectrum [V RMS]')
plt.title('Power spectrum (scipy.signal.welch)')
plt.show()
| [
"matplotlib.pyplot.semilogy",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.load",
"matplotlib.pyplot.show"
] | [((53, 76), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (66, 76), True, 'import matplotlib.pyplot as plt\n'), ((301, 313), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (311, 313), True, 'import matplotlib.pyplot as plt\n'), ((314, 362), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['eeg_f', 'eeg_p'], {'linewidth': '(2.0)', 'c': '"""b"""'}), "(eeg_f, eeg_p, linewidth=2.0, c='b')\n", (326, 362), True, 'import matplotlib.pyplot as plt\n'), ((363, 391), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""frequency [Hz]"""'], {}), "('frequency [Hz]')\n", (373, 391), True, 'import matplotlib.pyplot as plt\n'), ((392, 429), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Linear spectrum [V RMS]"""'], {}), "('Linear spectrum [V RMS]')\n", (402, 429), True, 'import matplotlib.pyplot as plt\n'), ((430, 478), 'matplotlib.pyplot.title', 'plt.title', (['"""Power spectrum (scipy.signal.welch)"""'], {}), "('Power spectrum (scipy.signal.welch)')\n", (439, 478), True, 'import matplotlib.pyplot as plt\n'), ((479, 489), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (487, 489), True, 'import matplotlib.pyplot as plt\n'), ((84, 128), 'numpy.load', 'np.load', (['"""objects/simulation_model_freq.npy"""'], {}), "('objects/simulation_model_freq.npy')\n", (91, 128), True, 'import numpy as np\n'), ((140, 185), 'numpy.load', 'np.load', (['"""objects/simulation_model_power.npy"""'], {}), "('objects/simulation_model_power.npy')\n", (147, 185), True, 'import numpy as np\n'), ((199, 240), 'numpy.load', 'np.load', (['"""objects/real_eeg_freq.npy0.npy"""'], {}), "('objects/real_eeg_freq.npy0.npy')\n", (206, 240), True, 'import numpy as np\n'), ((254, 293), 'numpy.load', 'np.load', (['"""objects/real_eeg_power_0.npy"""'], {}), "('objects/real_eeg_power_0.npy')\n", (261, 293), True, 'import numpy as np\n')] |
"""Types to match those in the API."""
from typing import Any, Dict, NewType, NamedTuple
LabelName = NewType('LabelName', str)
StateMachine = NewType('StateMachine', str)
State = NewType('State', str)
Metadata = Dict[str, Any]
LabelRef = NamedTuple('LabelRef', [
('name', LabelName),
('state_machine', StateMachine),
])
Label = NamedTuple('Label', [
('ref', LabelRef),
('metadata', Metadata),
('state', State),
])
| [
"typing.NamedTuple",
"typing.NewType"
] | [((103, 128), 'typing.NewType', 'NewType', (['"""LabelName"""', 'str'], {}), "('LabelName', str)\n", (110, 128), False, 'from typing import Any, Dict, NewType, NamedTuple\n'), ((144, 172), 'typing.NewType', 'NewType', (['"""StateMachine"""', 'str'], {}), "('StateMachine', str)\n", (151, 172), False, 'from typing import Any, Dict, NewType, NamedTuple\n'), ((181, 202), 'typing.NewType', 'NewType', (['"""State"""', 'str'], {}), "('State', str)\n", (188, 202), False, 'from typing import Any, Dict, NewType, NamedTuple\n'), ((242, 320), 'typing.NamedTuple', 'NamedTuple', (['"""LabelRef"""', "[('name', LabelName), ('state_machine', StateMachine)]"], {}), "('LabelRef', [('name', LabelName), ('state_machine', StateMachine)])\n", (252, 320), False, 'from typing import Any, Dict, NewType, NamedTuple\n'), ((341, 427), 'typing.NamedTuple', 'NamedTuple', (['"""Label"""', "[('ref', LabelRef), ('metadata', Metadata), ('state', State)]"], {}), "('Label', [('ref', LabelRef), ('metadata', Metadata), ('state',\n State)])\n", (351, 427), False, 'from typing import Any, Dict, NewType, NamedTuple\n')] |
#pylint: skip-file
"""
NOTE: This is a local copy of the readers module, taken from the
support-tools/timeseries repository.
Provides functions to read FTDC data from either an FTDC file or from
a file containing serverStatus JSON documents, one per line. Each
reader takes a filename argument and returns a generator that yields a
sequence of chunks. Each chunk is a map from tuples keys to lists, where
the tuple key represents a path through a JSON document from root to leaf,
and the list is a list of values for that path.
"""
from __future__ import print_function
import collections
import mmap
import os
import re
import struct
import zlib
import sys
import json
def _msg(*s):
print(' '.join(s), file=sys.stderr)
#
# basic bson parser, to be extended as needed
# has optional special handling for ftdc:
# returns numeric types as int64
# ignores non-metric fields
# returns result as tree of OrderedDict, preserving order
#
_int32 = struct.Struct('<i')
_uint32 = struct.Struct('<I')
_int64 = struct.Struct('<q')
_uint64 = struct.Struct('<Q')
_double = struct.Struct('<d')
BSON = collections.OrderedDict
class BsonReaderException(Exception):
"""
General exception when parsin bson.
"""
pass
def _read_bson_doc(buf, at, ftdc=False):
doc = BSON()
doc_len = _int32.unpack_from(buf, at)[0]
doc.bson_len = doc_len
doc_end = at + doc_len
at += 4
while at < doc_end:
bson_type = buf[at]
at += 1
name_end = buf.find(b'\0', at)
n = buf[at : name_end].decode('latin1')
at = name_end + 1
if bson_type==0: # eoo
return doc
elif bson_type==1: # _double
v = _double.unpack_from(buf, at)[0]
if ftdc: v = int(v)
l = 8
elif bson_type==2: # string
l = _uint32.unpack_from(buf, at)[0]
at += 4
v = buf[at : at+l-1] if not ftdc else None
elif bson_type==3: # subdoc
v = _read_bson_doc(buf, at, ftdc)
l = v.bson_len
elif bson_type==4: # array
v = _read_bson_doc(buf, at, ftdc)
l = v.bson_len
if not ftdc: v = v.values() # return as array
elif bson_type==8: # bool
v = buf[at]
l = 1
elif bson_type==5: # bindata
l = _uint32.unpack_from(buf, at)[0]
at += 5 # length plus subtype
v = buf[at : at+l] if not ftdc else None
elif bson_type==7: # objectid
v = None # xxx always ignore for now
l = 12
elif bson_type==9: # datetime
v = _uint64.unpack_from(buf, at)[0]
v = int(v) if ftdc else v / 1000.0
l = 8
elif bson_type==16: # _int32
v = _int32.unpack_from(buf, at)[0]
if ftdc: v = int(v)
l = 4
elif bson_type==17: # timestamp
v = BSON()
v['t'] = int(_uint32.unpack_from(buf, at)[0]) # seconds
v['i'] = int(_uint32.unpack_from(buf, at+4)[0]) # increment
l = 8
elif bson_type==18: # _int64
v = int(_int64.unpack_from(buf, at)[0])
l = 8
elif bson_type==0xff or bson_type==0x7f: # minkey, maxkey
v = None # xxx always ignore for now
l = 0
else:
err_msg = 'unknown type %d(%x) at %d(%x)'
raise BsonReaderException(err_msg % (bson_type, bson_type, at, at))
if v != None:
doc[n] = v
at += l
assert(not 'eoo not found') # should have seen an eoo and returned
def _decode_chunk(chunk_doc, first_only):
# our result is a map from metric keys to list of values for each metric key
# a metric key is a path through the sample document represented as a tuple
metrics = collections.OrderedDict()
# decompress chunk data field
data = chunk_doc['data']
metrics.chunk_len = len(data)
data = data[4:] # skip uncompressed length, we don't need it
data = zlib.decompress(data)
# read reference doc from chunk data, ignoring non-metric fields
ref_doc = _read_bson_doc(data, 0, ftdc=True)
#print_bson_doc(ref_doc)
# traverse the reference document and extract map from metrics keys to values
def extract_keys(doc, n=()):
for k, v in doc.items():
nn = n + (k,)
if type(v)==BSON:
extract_keys(v, nn)
else:
metrics[nn] = [v]
extract_keys(ref_doc)
# get nmetrics, ndeltas
nmetrics = _uint32.unpack_from(data, ref_doc.bson_len)[0]
ndeltas = _uint32.unpack_from(data, ref_doc.bson_len+4)[0]
nsamples = ndeltas + 1
at = ref_doc.bson_len + 8
if nmetrics != len(metrics):
# xxx remove when SERVER-20602 is fixed
_msg('ignoring bad chunk: nmetrics=%d, len(metrics)=%d' % (
nmetrics, len(metrics)))
return None
metrics.nsamples = nsamples
# only want first value in every chunk?
if first_only:
return metrics
# unpacks ftdc packed ints
def unpack(data, at):
res = 0
shift = 0
while True:
b = data[at]
res |= (b&0x7F) << shift
at += 1
if not (b&0x80):
if res > 0x7fffffffffffffff: # negative 64-bit value
res = int(res-0x10000000000000000)
return res, at
shift += 7
# unpack, run-length, delta, transpose the metrics
nzeroes = 0
for metric_values in metrics.values():
value = metric_values[-1]
for _ in range(ndeltas):
if nzeroes:
delta = 0
nzeroes -= 1
else:
delta, at = unpack(data, at)
if delta==0:
nzeroes, at = unpack(data, at)
value += delta
metric_values.append(value)
assert(at==len(data))
# our result
return metrics
def read_ftdc(fn, first_only = False):
"""
Read an ftdc file. fn may be either a single metrics file, or a
directory containing a sequence of metrics files.
"""
# process dir
if os.path.isdir(fn):
for f in sorted(os.listdir(fn)):
for chunk in read_ftdc(os.path.join(fn, f)):
yield chunk
# process file
else:
# open and map file
f = open(fn)
buf = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
at = 0
# traverse the file reading type 1 chunks
while at < len(buf):
try:
chunk_doc = _read_bson_doc(buf, at)
at += chunk_doc.bson_len
if chunk_doc['type']==1:
yield _decode_chunk(chunk_doc, first_only)
except Exception as e:
print('bad bson doc: ')
raise
# bson docs should exactly cover file
assert(at==len(buf))
#
# xxx does not correctly handle schema change from one line to the next
#
def _parse(j, result, key):
for k, v in j.items():
kk = key
if k != 'floatApprox':
kk += (k,)
if type(v)==dict:
_parse(v, result, kk)
else:
result[kk].append(v)
def read_ss(fn):
"""Read a sequence of serverStatus JSON documents, one per line"""
result = collections.defaultdict(list)
for i, line in enumerate(open(fn)):
j = json.loads(line)
_parse(j, result, ('serverStatus',))
if i>0 and i%100==0:
yield result
result.clear()
yield result
#
#
#
def read(fn):
"""Try all readers until one succeeds"""
for f, name in ((read_ss,'ss'), (read_ftdc,'ftdc')):
generator = f(fn)
try:
chunk = next(generator)
if len(chunk) > 0:
for chunk in generator:
yield chunk
break
except Exception as e:
print >>sys.stderr, 'does not appear to be %s: %s' % (name, str(e))
#
# sniff test
#
if __name__ == '__main__':
for chunk in read(sys.argv[1]):
values = chunk.values()
assert(all(len(values[0])==len(v) for v in values))
print('chunk, %d keys, %d values, key 0: %s, key 0 value 0: %d' % (
len(chunk.keys()), len(values[0]), chunk.keys()[0], chunk[chunk.keys()[0]][0]
))
| [
"collections.OrderedDict",
"json.loads",
"os.listdir",
"os.path.join",
"os.path.isdir",
"collections.defaultdict",
"struct.Struct",
"zlib.decompress"
] | [((958, 977), 'struct.Struct', 'struct.Struct', (['"""<i"""'], {}), "('<i')\n", (971, 977), False, 'import struct\n'), ((988, 1007), 'struct.Struct', 'struct.Struct', (['"""<I"""'], {}), "('<I')\n", (1001, 1007), False, 'import struct\n'), ((1017, 1036), 'struct.Struct', 'struct.Struct', (['"""<q"""'], {}), "('<q')\n", (1030, 1036), False, 'import struct\n'), ((1047, 1066), 'struct.Struct', 'struct.Struct', (['"""<Q"""'], {}), "('<Q')\n", (1060, 1066), False, 'import struct\n'), ((1077, 1096), 'struct.Struct', 'struct.Struct', (['"""<d"""'], {}), "('<d')\n", (1090, 1096), False, 'import struct\n'), ((3819, 3844), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (3842, 3844), False, 'import collections\n'), ((4019, 4040), 'zlib.decompress', 'zlib.decompress', (['data'], {}), '(data)\n', (4034, 4040), False, 'import zlib\n'), ((6185, 6202), 'os.path.isdir', 'os.path.isdir', (['fn'], {}), '(fn)\n', (6198, 6202), False, 'import os\n'), ((7369, 7398), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (7392, 7398), False, 'import collections\n'), ((7451, 7467), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (7461, 7467), False, 'import json\n'), ((6228, 6242), 'os.listdir', 'os.listdir', (['fn'], {}), '(fn)\n', (6238, 6242), False, 'import os\n'), ((6280, 6299), 'os.path.join', 'os.path.join', (['fn', 'f'], {}), '(fn, f)\n', (6292, 6299), False, 'import os\n')] |
""" test automol.zmatrix
"""
import automol
from automol.zmatrix.newzmat._bimol_ts import hydrogen_abstraction
from automol.zmatrix.newzmat._bimol_ts import addition
from automol.zmatrix.newzmat._bimol_ts import insertion
from automol.zmatrix.newzmat._bimol_ts import substitution
from automol.zmatrix.newzmat._unimol_ts import hydrogen_migration
from automol.zmatrix.newzmat._unimol_ts import beta_scission
from automol.zmatrix.newzmat._unimol_ts import concerted_unimol_elimination
from automol.zmatrix.newzmat._unimol_ts import ring_forming_scission
from automol.zmatrix.newzmat._util import shifted_standard_zmas_graphs
# ZMA Bank
C2H6_ZMA = automol.geom.zmatrix(
automol.inchi.geometry(automol.smiles.inchi('CC')))
C2H4_ZMA = automol.geom.zmatrix(
automol.inchi.geometry(automol.smiles.inchi('C=C')))
CH4_ZMA = automol.geom.zmatrix(
automol.inchi.geometry(automol.smiles.inchi('C')))
CH2_ZMA = automol.geom.zmatrix(
automol.inchi.geometry(automol.smiles.inchi('[CH2]')))
OH_ZMA = automol.geom.zmatrix(
automol.inchi.geometry(automol.smiles.inchi('[OH]')))
H_ZMA = automol.geom.zmatrix(
automol.inchi.geometry(automol.smiles.inchi('[H]')))
CH3_ZMA = automol.geom.zmatrix(
automol.inchi.geometry(automol.smiles.inchi('[CH3]')))
H2O_ZMA = automol.geom.zmatrix(
automol.inchi.geometry(automol.smiles.inchi('O')))
HO2_ZMA = automol.geom.zmatrix(
automol.inchi.geometry(automol.smiles.inchi('O[O]')))
CH2O_ZMA = automol.geom.zmatrix(
automol.inchi.geometry(automol.smiles.inchi('C=O')))
CH3CH2O_ZMA = automol.geom.zmatrix(
automol.inchi.geometry(automol.smiles.inchi('CC[O]')))
H2O2_ZMA = automol.geom.zmatrix(
automol.inchi.geometry(automol.smiles.inchi('OO')))
CH2COH_ZMA = automol.geom.zmatrix(
automol.inchi.geometry(automol.smiles.inchi('[CH2]CO')))
CH3CH2CH2CH2_ZMA = automol.geom.zmatrix(
automol.inchi.geometry(automol.smiles.inchi('CCC[CH2]')))
CH3CHCH2CH3_ZMA = automol.geom.zmatrix(
automol.inchi.geometry(automol.smiles.inchi('CC[CH]C')))
C3H8_ZMA = automol.geom.zmatrix(
automol.inchi.geometry(automol.smiles.inchi('CCC')))
CH3CH2OO_ZMA = automol.geom.zmatrix(
automol.inchi.geometry(automol.smiles.inchi('CCO[O]')))
CH2CH2OOH_ZMA = automol.geom.zmatrix(
automol.inchi.geometry(automol.smiles.inchi('[CH2]COO')))
cCH2OCH2_ZMA = automol.geom.zmatrix(
automol.inchi.geometry(automol.smiles.inchi('C1CO1')))
# BIMOL TS
def test__ts_hydrogen_abstraction():
""" test zmatrix.ts.hydrogen_abstraction
"""
rct_zmas = [CH4_ZMA, OH_ZMA]
prd_zmas = [CH3_ZMA, H2O_ZMA]
rct_zmas, rct_gras = shifted_standard_zmas_graphs(
rct_zmas, remove_stereo=True)
prd_zmas, prd_gras = shifted_standard_zmas_graphs(
prd_zmas, remove_stereo=True)
tras, _, _, rtyp = automol.graph.reac.classify(rct_gras, prd_gras)
print('\nrtyp', rtyp)
zma_ret = hydrogen_abstraction(rct_zmas, prd_zmas, tras)
print('zma\n', automol.zmatrix.string(zma_ret['ts_zma']))
print('bnd keys\n', zma_ret['bnd_keys'])
print('const keys\n', zma_ret['const_keys'])
def test__ts_addition():
""" test zmatrix.ts.addition
"""
rct_zmas = [C2H4_ZMA, OH_ZMA]
prd_zmas = [CH2COH_ZMA]
rct_zmas, rct_gras = shifted_standard_zmas_graphs(
rct_zmas, remove_stereo=True)
prd_zmas, prd_gras = shifted_standard_zmas_graphs(
prd_zmas, remove_stereo=True)
tras, _, _, rtyp = automol.graph.reac.classify(rct_gras, prd_gras)
print('\nrtyp', rtyp)
zma_ret = addition(rct_zmas, prd_zmas, tras)
print('zma\n', automol.zmatrix.string(zma_ret['ts_zma']))
print('bnd keys\n', zma_ret['bnd_keys'])
print('const keys\n', zma_ret['const_keys'])
def test__ts_insertion():
""" test zmatrix.ts.insertion
"""
rct_zmas = [C2H6_ZMA, CH2_ZMA]
prd_zmas = [C3H8_ZMA]
rct_zmas, rct_gras = shifted_standard_zmas_graphs(
rct_zmas, remove_stereo=True)
prd_zmas, prd_gras = shifted_standard_zmas_graphs(
prd_zmas, remove_stereo=True)
tras, _, _, rtyp = automol.graph.reac.classify(rct_gras, prd_gras)
print('\nrtyp', rtyp)
zma_ret = insertion(rct_zmas, prd_zmas, tras)
print('zma\n', automol.zmatrix.string(zma_ret['ts_zma']))
print('bnd keys\n', zma_ret['bnd_keys'])
print('const keys\n', zma_ret['const_keys'])
def test__ts_substitution():
""" test zmatrix.ts.substitution
"""
rct_zmas = [H2O2_ZMA, H_ZMA]
prd_zmas = [H2O_ZMA, OH_ZMA]
rct_zmas, rct_gras = shifted_standard_zmas_graphs(
rct_zmas, remove_stereo=True)
prd_zmas, prd_gras = shifted_standard_zmas_graphs(
prd_zmas, remove_stereo=True)
tras, _, _, rtyp = automol.graph.reac.classify(rct_gras, prd_gras)
print('\nrtyp', rtyp)
zma_ret = substitution(rct_zmas, prd_zmas, tras)
print('zma\n', automol.zmatrix.string(zma_ret['ts_zma']))
print('bnd keys\n', zma_ret['bnd_keys'])
print('const keys\n', zma_ret['const_keys'])
# UNIMOL TS
def test__ts_hydrogen_migration():
""" test zmatrix.ts.hydrogen_migration
"""
rct_zmas = [CH3CH2CH2CH2_ZMA]
prd_zmas = [CH3CHCH2CH3_ZMA]
rct_zmas, rct_gras = shifted_standard_zmas_graphs(
rct_zmas, remove_stereo=True)
prd_zmas, prd_gras = shifted_standard_zmas_graphs(
prd_zmas, remove_stereo=True)
rct_zmas = [rct_zmas]
prd_zmas = [prd_zmas]
tras, _, _, rtyp = automol.graph.reac.classify(rct_gras, prd_gras)
print('\nrtyp', rtyp)
zma_ret = hydrogen_migration(rct_zmas, prd_zmas, tras)
print('zma\n', automol.zmatrix.string(zma_ret['ts_zma']))
print('bnd keys\n', zma_ret['bnd_keys'])
print('const keys\n', zma_ret['const_keys'])
def test__ts_beta_scission():
""" test zmatrix.ts.beta_scission
"""
rct_zmas = [CH3CH2O_ZMA]
prd_zmas = [CH3_ZMA, CH2O_ZMA]
rct_zmas, rct_gras = shifted_standard_zmas_graphs(
rct_zmas, remove_stereo=True)
prd_zmas, prd_gras = shifted_standard_zmas_graphs(
prd_zmas, remove_stereo=True)
tras, _, _, rtyp = automol.graph.reac.classify(rct_gras, prd_gras)
print('\nrtyp', rtyp)
zma_ret = beta_scission(rct_zmas, prd_zmas, tras)
print('zma\n', automol.zmatrix.string(zma_ret['ts_zma']))
print('bnd keys\n', zma_ret['bnd_keys'])
print('const keys\n', zma_ret['const_keys'])
def test__ts_elimination():
""" test zmatrix.ts.elimination
"""
rct_zmas = [CH3CH2OO_ZMA]
prd_zmas = [C2H4_ZMA, HO2_ZMA]
rct_zmas, rct_gras = shifted_standard_zmas_graphs(
rct_zmas, remove_stereo=True)
prd_zmas, prd_gras = shifted_standard_zmas_graphs(
prd_zmas, remove_stereo=True)
rct_zmas = [rct_zmas]
prd_zmas = [prd_zmas]
tras, _, _, rtyp = automol.graph.reac.classify(rct_gras, prd_gras)
print('\nrtyp', rtyp)
zma_ret = concerted_unimol_elimination(rct_zmas, prd_zmas, tras)
print('zma\n', automol.zmatrix.string(zma_ret['ts_zma']))
print('bnd keys\n', zma_ret['bnd_keys'])
print('const keys\n', zma_ret['const_keys'])
def test__ts_ring_forming_scission():
""" test zmatrix.ts.ring_forming_scission
"""
rct_zmas = [CH2CH2OOH_ZMA]
prd_zmas = [cCH2OCH2_ZMA, OH_ZMA]
rct_zmas, rct_gras = shifted_standard_zmas_graphs(
rct_zmas, remove_stereo=True)
prd_zmas, prd_gras = shifted_standard_zmas_graphs(
prd_zmas, remove_stereo=True)
rct_zmas = [rct_zmas]
prd_zmas = [prd_zmas]
tras, _, _, rtyp = automol.graph.reac.classify(rct_gras, prd_gras)
print('\nrtyp', rtyp)
zma_ret = ring_forming_scission(rct_zmas, prd_zmas, tras)
print('zma\n', automol.zmatrix.string(zma_ret['ts_zma']))
print('bnd keys\n', zma_ret['bnd_keys'])
print('const keys\n', zma_ret['const_keys'])
if __name__ == '__main__':
# BIMOL
test__ts_hydrogen_abstraction()
test__ts_addition()
test__ts_substitution()
# test__ts_insertion()
# UNIMOL
test__ts_hydrogen_migration()
test__ts_beta_scission()
test__ts_elimination()
# test__ts_ring_forming_scission()
| [
"automol.zmatrix.newzmat._util.shifted_standard_zmas_graphs",
"automol.zmatrix.newzmat._unimol_ts.hydrogen_migration",
"automol.zmatrix.newzmat._unimol_ts.ring_forming_scission",
"automol.zmatrix.newzmat._bimol_ts.substitution",
"automol.smiles.inchi",
"automol.zmatrix.newzmat._bimol_ts.hydrogen_abstracti... | [((2592, 2650), 'automol.zmatrix.newzmat._util.shifted_standard_zmas_graphs', 'shifted_standard_zmas_graphs', (['rct_zmas'], {'remove_stereo': '(True)'}), '(rct_zmas, remove_stereo=True)\n', (2620, 2650), False, 'from automol.zmatrix.newzmat._util import shifted_standard_zmas_graphs\n'), ((2685, 2743), 'automol.zmatrix.newzmat._util.shifted_standard_zmas_graphs', 'shifted_standard_zmas_graphs', (['prd_zmas'], {'remove_stereo': '(True)'}), '(prd_zmas, remove_stereo=True)\n', (2713, 2743), False, 'from automol.zmatrix.newzmat._util import shifted_standard_zmas_graphs\n'), ((2777, 2824), 'automol.graph.reac.classify', 'automol.graph.reac.classify', (['rct_gras', 'prd_gras'], {}), '(rct_gras, prd_gras)\n', (2804, 2824), False, 'import automol\n'), ((2866, 2912), 'automol.zmatrix.newzmat._bimol_ts.hydrogen_abstraction', 'hydrogen_abstraction', (['rct_zmas', 'prd_zmas', 'tras'], {}), '(rct_zmas, prd_zmas, tras)\n', (2886, 2912), False, 'from automol.zmatrix.newzmat._bimol_ts import hydrogen_abstraction\n'), ((3225, 3283), 'automol.zmatrix.newzmat._util.shifted_standard_zmas_graphs', 'shifted_standard_zmas_graphs', (['rct_zmas'], {'remove_stereo': '(True)'}), '(rct_zmas, remove_stereo=True)\n', (3253, 3283), False, 'from automol.zmatrix.newzmat._util import shifted_standard_zmas_graphs\n'), ((3318, 3376), 'automol.zmatrix.newzmat._util.shifted_standard_zmas_graphs', 'shifted_standard_zmas_graphs', (['prd_zmas'], {'remove_stereo': '(True)'}), '(prd_zmas, remove_stereo=True)\n', (3346, 3376), False, 'from automol.zmatrix.newzmat._util import shifted_standard_zmas_graphs\n'), ((3410, 3457), 'automol.graph.reac.classify', 'automol.graph.reac.classify', (['rct_gras', 'prd_gras'], {}), '(rct_gras, prd_gras)\n', (3437, 3457), False, 'import automol\n'), ((3499, 3533), 'automol.zmatrix.newzmat._bimol_ts.addition', 'addition', (['rct_zmas', 'prd_zmas', 'tras'], {}), '(rct_zmas, prd_zmas, tras)\n', (3507, 3533), False, 'from automol.zmatrix.newzmat._bimol_ts import addition\n'), ((3847, 3905), 'automol.zmatrix.newzmat._util.shifted_standard_zmas_graphs', 'shifted_standard_zmas_graphs', (['rct_zmas'], {'remove_stereo': '(True)'}), '(rct_zmas, remove_stereo=True)\n', (3875, 3905), False, 'from automol.zmatrix.newzmat._util import shifted_standard_zmas_graphs\n'), ((3940, 3998), 'automol.zmatrix.newzmat._util.shifted_standard_zmas_graphs', 'shifted_standard_zmas_graphs', (['prd_zmas'], {'remove_stereo': '(True)'}), '(prd_zmas, remove_stereo=True)\n', (3968, 3998), False, 'from automol.zmatrix.newzmat._util import shifted_standard_zmas_graphs\n'), ((4032, 4079), 'automol.graph.reac.classify', 'automol.graph.reac.classify', (['rct_gras', 'prd_gras'], {}), '(rct_gras, prd_gras)\n', (4059, 4079), False, 'import automol\n'), ((4121, 4156), 'automol.zmatrix.newzmat._bimol_ts.insertion', 'insertion', (['rct_zmas', 'prd_zmas', 'tras'], {}), '(rct_zmas, prd_zmas, tras)\n', (4130, 4156), False, 'from automol.zmatrix.newzmat._bimol_ts import insertion\n'), ((4481, 4539), 'automol.zmatrix.newzmat._util.shifted_standard_zmas_graphs', 'shifted_standard_zmas_graphs', (['rct_zmas'], {'remove_stereo': '(True)'}), '(rct_zmas, remove_stereo=True)\n', (4509, 4539), False, 'from automol.zmatrix.newzmat._util import shifted_standard_zmas_graphs\n'), ((4574, 4632), 'automol.zmatrix.newzmat._util.shifted_standard_zmas_graphs', 'shifted_standard_zmas_graphs', (['prd_zmas'], {'remove_stereo': '(True)'}), '(prd_zmas, remove_stereo=True)\n', (4602, 4632), False, 'from automol.zmatrix.newzmat._util import shifted_standard_zmas_graphs\n'), ((4666, 4713), 'automol.graph.reac.classify', 'automol.graph.reac.classify', (['rct_gras', 'prd_gras'], {}), '(rct_gras, prd_gras)\n', (4693, 4713), False, 'import automol\n'), ((4755, 4793), 'automol.zmatrix.newzmat._bimol_ts.substitution', 'substitution', (['rct_zmas', 'prd_zmas', 'tras'], {}), '(rct_zmas, prd_zmas, tras)\n', (4767, 4793), False, 'from automol.zmatrix.newzmat._bimol_ts import substitution\n'), ((5143, 5201), 'automol.zmatrix.newzmat._util.shifted_standard_zmas_graphs', 'shifted_standard_zmas_graphs', (['rct_zmas'], {'remove_stereo': '(True)'}), '(rct_zmas, remove_stereo=True)\n', (5171, 5201), False, 'from automol.zmatrix.newzmat._util import shifted_standard_zmas_graphs\n'), ((5236, 5294), 'automol.zmatrix.newzmat._util.shifted_standard_zmas_graphs', 'shifted_standard_zmas_graphs', (['prd_zmas'], {'remove_stereo': '(True)'}), '(prd_zmas, remove_stereo=True)\n', (5264, 5294), False, 'from automol.zmatrix.newzmat._util import shifted_standard_zmas_graphs\n'), ((5380, 5427), 'automol.graph.reac.classify', 'automol.graph.reac.classify', (['rct_gras', 'prd_gras'], {}), '(rct_gras, prd_gras)\n', (5407, 5427), False, 'import automol\n'), ((5469, 5513), 'automol.zmatrix.newzmat._unimol_ts.hydrogen_migration', 'hydrogen_migration', (['rct_zmas', 'prd_zmas', 'tras'], {}), '(rct_zmas, prd_zmas, tras)\n', (5487, 5513), False, 'from automol.zmatrix.newzmat._unimol_ts import hydrogen_migration\n'), ((5838, 5896), 'automol.zmatrix.newzmat._util.shifted_standard_zmas_graphs', 'shifted_standard_zmas_graphs', (['rct_zmas'], {'remove_stereo': '(True)'}), '(rct_zmas, remove_stereo=True)\n', (5866, 5896), False, 'from automol.zmatrix.newzmat._util import shifted_standard_zmas_graphs\n'), ((5931, 5989), 'automol.zmatrix.newzmat._util.shifted_standard_zmas_graphs', 'shifted_standard_zmas_graphs', (['prd_zmas'], {'remove_stereo': '(True)'}), '(prd_zmas, remove_stereo=True)\n', (5959, 5989), False, 'from automol.zmatrix.newzmat._util import shifted_standard_zmas_graphs\n'), ((6023, 6070), 'automol.graph.reac.classify', 'automol.graph.reac.classify', (['rct_gras', 'prd_gras'], {}), '(rct_gras, prd_gras)\n', (6050, 6070), False, 'import automol\n'), ((6112, 6151), 'automol.zmatrix.newzmat._unimol_ts.beta_scission', 'beta_scission', (['rct_zmas', 'prd_zmas', 'tras'], {}), '(rct_zmas, prd_zmas, tras)\n', (6125, 6151), False, 'from automol.zmatrix.newzmat._unimol_ts import beta_scission\n'), ((6473, 6531), 'automol.zmatrix.newzmat._util.shifted_standard_zmas_graphs', 'shifted_standard_zmas_graphs', (['rct_zmas'], {'remove_stereo': '(True)'}), '(rct_zmas, remove_stereo=True)\n', (6501, 6531), False, 'from automol.zmatrix.newzmat._util import shifted_standard_zmas_graphs\n'), ((6566, 6624), 'automol.zmatrix.newzmat._util.shifted_standard_zmas_graphs', 'shifted_standard_zmas_graphs', (['prd_zmas'], {'remove_stereo': '(True)'}), '(prd_zmas, remove_stereo=True)\n', (6594, 6624), False, 'from automol.zmatrix.newzmat._util import shifted_standard_zmas_graphs\n'), ((6710, 6757), 'automol.graph.reac.classify', 'automol.graph.reac.classify', (['rct_gras', 'prd_gras'], {}), '(rct_gras, prd_gras)\n', (6737, 6757), False, 'import automol\n'), ((6799, 6853), 'automol.zmatrix.newzmat._unimol_ts.concerted_unimol_elimination', 'concerted_unimol_elimination', (['rct_zmas', 'prd_zmas', 'tras'], {}), '(rct_zmas, prd_zmas, tras)\n', (6827, 6853), False, 'from automol.zmatrix.newzmat._unimol_ts import concerted_unimol_elimination\n'), ((7199, 7257), 'automol.zmatrix.newzmat._util.shifted_standard_zmas_graphs', 'shifted_standard_zmas_graphs', (['rct_zmas'], {'remove_stereo': '(True)'}), '(rct_zmas, remove_stereo=True)\n', (7227, 7257), False, 'from automol.zmatrix.newzmat._util import shifted_standard_zmas_graphs\n'), ((7292, 7350), 'automol.zmatrix.newzmat._util.shifted_standard_zmas_graphs', 'shifted_standard_zmas_graphs', (['prd_zmas'], {'remove_stereo': '(True)'}), '(prd_zmas, remove_stereo=True)\n', (7320, 7350), False, 'from automol.zmatrix.newzmat._util import shifted_standard_zmas_graphs\n'), ((7436, 7483), 'automol.graph.reac.classify', 'automol.graph.reac.classify', (['rct_gras', 'prd_gras'], {}), '(rct_gras, prd_gras)\n', (7463, 7483), False, 'import automol\n'), ((7525, 7572), 'automol.zmatrix.newzmat._unimol_ts.ring_forming_scission', 'ring_forming_scission', (['rct_zmas', 'prd_zmas', 'tras'], {}), '(rct_zmas, prd_zmas, tras)\n', (7546, 7572), False, 'from automol.zmatrix.newzmat._unimol_ts import ring_forming_scission\n'), ((698, 724), 'automol.smiles.inchi', 'automol.smiles.inchi', (['"""CC"""'], {}), "('CC')\n", (718, 724), False, 'import automol\n'), ((787, 814), 'automol.smiles.inchi', 'automol.smiles.inchi', (['"""C=C"""'], {}), "('C=C')\n", (807, 814), False, 'import automol\n'), ((876, 901), 'automol.smiles.inchi', 'automol.smiles.inchi', (['"""C"""'], {}), "('C')\n", (896, 901), False, 'import automol\n'), ((963, 992), 'automol.smiles.inchi', 'automol.smiles.inchi', (['"""[CH2]"""'], {}), "('[CH2]')\n", (983, 992), False, 'import automol\n'), ((1053, 1081), 'automol.smiles.inchi', 'automol.smiles.inchi', (['"""[OH]"""'], {}), "('[OH]')\n", (1073, 1081), False, 'import automol\n'), ((1141, 1168), 'automol.smiles.inchi', 'automol.smiles.inchi', (['"""[H]"""'], {}), "('[H]')\n", (1161, 1168), False, 'import automol\n'), ((1230, 1259), 'automol.smiles.inchi', 'automol.smiles.inchi', (['"""[CH3]"""'], {}), "('[CH3]')\n", (1250, 1259), False, 'import automol\n'), ((1321, 1346), 'automol.smiles.inchi', 'automol.smiles.inchi', (['"""O"""'], {}), "('O')\n", (1341, 1346), False, 'import automol\n'), ((1408, 1436), 'automol.smiles.inchi', 'automol.smiles.inchi', (['"""O[O]"""'], {}), "('O[O]')\n", (1428, 1436), False, 'import automol\n'), ((1499, 1526), 'automol.smiles.inchi', 'automol.smiles.inchi', (['"""C=O"""'], {}), "('C=O')\n", (1519, 1526), False, 'import automol\n'), ((1592, 1621), 'automol.smiles.inchi', 'automol.smiles.inchi', (['"""CC[O]"""'], {}), "('CC[O]')\n", (1612, 1621), False, 'import automol\n'), ((1684, 1710), 'automol.smiles.inchi', 'automol.smiles.inchi', (['"""OO"""'], {}), "('OO')\n", (1704, 1710), False, 'import automol\n'), ((1775, 1806), 'automol.smiles.inchi', 'automol.smiles.inchi', (['"""[CH2]CO"""'], {}), "('[CH2]CO')\n", (1795, 1806), False, 'import automol\n'), ((1877, 1909), 'automol.smiles.inchi', 'automol.smiles.inchi', (['"""CCC[CH2]"""'], {}), "('CCC[CH2]')\n", (1897, 1909), False, 'import automol\n'), ((1979, 2010), 'automol.smiles.inchi', 'automol.smiles.inchi', (['"""CC[CH]C"""'], {}), "('CC[CH]C')\n", (1999, 2010), False, 'import automol\n'), ((2073, 2100), 'automol.smiles.inchi', 'automol.smiles.inchi', (['"""CCC"""'], {}), "('CCC')\n", (2093, 2100), False, 'import automol\n'), ((2167, 2197), 'automol.smiles.inchi', 'automol.smiles.inchi', (['"""CCO[O]"""'], {}), "('CCO[O]')\n", (2187, 2197), False, 'import automol\n'), ((2265, 2297), 'automol.smiles.inchi', 'automol.smiles.inchi', (['"""[CH2]COO"""'], {}), "('[CH2]COO')\n", (2285, 2297), False, 'import automol\n'), ((2364, 2393), 'automol.smiles.inchi', 'automol.smiles.inchi', (['"""C1CO1"""'], {}), "('C1CO1')\n", (2384, 2393), False, 'import automol\n'), ((2932, 2973), 'automol.zmatrix.string', 'automol.zmatrix.string', (["zma_ret['ts_zma']"], {}), "(zma_ret['ts_zma'])\n", (2954, 2973), False, 'import automol\n'), ((3553, 3594), 'automol.zmatrix.string', 'automol.zmatrix.string', (["zma_ret['ts_zma']"], {}), "(zma_ret['ts_zma'])\n", (3575, 3594), False, 'import automol\n'), ((4176, 4217), 'automol.zmatrix.string', 'automol.zmatrix.string', (["zma_ret['ts_zma']"], {}), "(zma_ret['ts_zma'])\n", (4198, 4217), False, 'import automol\n'), ((4813, 4854), 'automol.zmatrix.string', 'automol.zmatrix.string', (["zma_ret['ts_zma']"], {}), "(zma_ret['ts_zma'])\n", (4835, 4854), False, 'import automol\n'), ((5533, 5574), 'automol.zmatrix.string', 'automol.zmatrix.string', (["zma_ret['ts_zma']"], {}), "(zma_ret['ts_zma'])\n", (5555, 5574), False, 'import automol\n'), ((6171, 6212), 'automol.zmatrix.string', 'automol.zmatrix.string', (["zma_ret['ts_zma']"], {}), "(zma_ret['ts_zma'])\n", (6193, 6212), False, 'import automol\n'), ((6873, 6914), 'automol.zmatrix.string', 'automol.zmatrix.string', (["zma_ret['ts_zma']"], {}), "(zma_ret['ts_zma'])\n", (6895, 6914), False, 'import automol\n'), ((7592, 7633), 'automol.zmatrix.string', 'automol.zmatrix.string', (["zma_ret['ts_zma']"], {}), "(zma_ret['ts_zma'])\n", (7614, 7633), False, 'import automol\n')] |
import torch
import torch.nn as nn
from torch.nn import functional as F
from .cross_entropy_with_uncertainty import CrossEntropyLossWithUncertainty
from .focal_loss import FocalLoss
from dataset import LabelMapper
def get_loss_fn(loss_fn_name,
device,
model_uncertainty,
has_missing_tasks,
mask_uncertain=True,
class_weights=None):
"""Returns the loss function
Args:
loss_fn_name: Name of the loss function. Alternatives: cross_entropy, weighted_loss.
device: Device for loss-related tensors.
model_uncertainty: If true, uncertainty is explicitly modeled in the outputs.
has_missing_tasks: Should be true if there is a possibility that labels for some classes for some examples will be missing.
mask_uncertain: a bool determined whether or not to skip the loss for uncertain. NOTE: Must be set to true currently.
class_weights: a list [negative_weights, positive_weights]
"""
if model_uncertainty:
if loss_fn_name == 'weighted_loss':
return None
loss_fn = CrossEntropyLossWithUncertainty()
else:
apply_masking = has_missing_tasks or mask_uncertain
# Weighted or unweighted
# Only reduce, if we're not gonna mask
if loss_fn_name == 'weighted_loss':
loss_fn = WeightedBCEWithLogitsLoss(class_weights, reduce=not apply_masking)
elif loss_fn_name == 'focal_loss':
loss_fn = FocalLoss()
else:
loss_fn = nn.BCEWithLogitsLoss(reduce=not apply_masking)
# Apply a wrapper that masks missing labels
# and uncertain labels.
if apply_masking:
loss_fn = MaskedLossWrapper(loss_fn, mask_uncertain, has_missing_tasks, device)
return loss_fn
class WeightedBCEWithLogitsLoss(nn.Module):
def __init__(self, class_weights, reduce):
"""Returns a weighted binary cross entropy loss.
Args:
class_weights: a list of two numpy arrays
"""
super().__init__()
assert class_weights is not None
self.reduce = reduce
self.n_weights = class_weights[0]
self.p_weights = class_weights[1]
def _get_weights(self, targets, n_weights, p_weights):
p_weights = torch.cuda.FloatTensor(p_weights)
n_weights = torch.cuda.FloatTensor(n_weights)
weights = ((targets == 1).float() * p_weights
+ (targets == 0).float() * n_weights)
return weights
def forward(self, logits, targets):
weights = self._get_weights(targets,
self.n_weights,
self.p_weights)
loss = F.binary_cross_entropy_with_logits(logits,
targets,
weights,
reduce=self.reduce)
return loss
class MaskedLossWrapper(nn.Module):
def __init__(self,
loss_fn,
mask_uncertain,
has_missing_tasks,
device,
weights=None):
super().__init__()
self.loss_fn = loss_fn
self.has_missing_tasks = has_missing_tasks
self.mask_uncertain = mask_uncertain
self.device = device
def _get_mask(self, targets):
"""Returns a mask to mask uncertain
and missing labels.
Functions tales advantage of the following:
Negative/Positive: 0/1
Uncertain: -1
Missing: -2 """
mask = torch.ones(targets.shape)
if self.mask_uncertain:
mask[targets == LabelMapper.UNCERTAIN] = 0
if self.has_missing_tasks:
mask[targets == LabelMapper.MISSING] = 0
mask = mask.to(self.device)
return mask
def forward(self, logits, targets):
# Apply loss function
loss = self.loss_fn(logits, targets)
# Apply mask to skip missing labels
# and handle uncertain labels
if self.mask_uncertain or self.has_missing_tasks:
mask = self._get_mask(targets)
loss = loss * mask
# Average the loss
loss = loss.sum()
loss = loss * (1 / (mask.sum()))
return loss
| [
"torch.nn.BCEWithLogitsLoss",
"torch.ones",
"torch.cuda.FloatTensor",
"torch.nn.functional.binary_cross_entropy_with_logits"
] | [((2480, 2513), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['p_weights'], {}), '(p_weights)\n', (2502, 2513), False, 'import torch\n'), ((2534, 2567), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['n_weights'], {}), '(n_weights)\n', (2556, 2567), False, 'import torch\n'), ((2907, 2992), 'torch.nn.functional.binary_cross_entropy_with_logits', 'F.binary_cross_entropy_with_logits', (['logits', 'targets', 'weights'], {'reduce': 'self.reduce'}), '(logits, targets, weights, reduce=self.reduce\n )\n', (2941, 2992), True, 'from torch.nn import functional as F\n'), ((3823, 3848), 'torch.ones', 'torch.ones', (['targets.shape'], {}), '(targets.shape)\n', (3833, 3848), False, 'import torch\n'), ((1685, 1731), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {'reduce': '(not apply_masking)'}), '(reduce=not apply_masking)\n', (1705, 1731), True, 'import torch.nn as nn\n')] |
# coding=utf-8
'''
accuracy:98%
'''
import tensorflow as tf
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def var_with_weight_loss(shape, stddev, wl):
vv = tf.Variable(tf.truncated_normal(shape, stddev=stddev))
if wl is not None:
weight_loss = tf.multiply(tf.nn.l2_loss(vv), wl, name='weight_loss')
tf.add_to_collection('losses', weight_loss)
return vv
WIDTH = 5
HEIGHT = 5
CHANNEL = 4
Y_SIZE = 1
LR = 1e-4
BATCH_SIZE = 100
STEP_TIMES = 8000
KEEP_PROB = 0.75
x = tf.placeholder(tf.float32, [None, WIDTH, HEIGHT, CHANNEL])
y = tf.placeholder(tf.float32, [None, Y_SIZE])
keep_prob = tf.placeholder(tf.float32)
W1 = var_with_weight_loss(shape=[5, 5, 4, 64], stddev=5e-2, wl=0.0)
k1 = tf.nn.conv2d(x, W1, [1, 1, 1, 1], padding='SAME')
b1 = tf.Variable(tf.constant(0.0, shape=[64]))
h1 = tf.nn.relu(tf.nn.bias_add(k1, b1))
hp1 = tf.nn.max_pool(h1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')
n1 = tf.nn.lrn(hp1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
W2 = var_with_weight_loss(shape=[5, 5, 64, 64], stddev=5e-2, wl=0.0)
k2 = tf.nn.conv2d(n1, W2, [1, 1, 1, 1], padding='SAME')
b2 = tf.Variable(tf.constant(0.1, shape=[64]))
h2 = tf.nn.relu(tf.nn.bias_add(k2, b2))
hp2 = tf.nn.max_pool(h2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')
reshape=tf.reshape(hp2,[BATCH_SIZE,-1])
dim = reshape.get_shape()[1].value
W3 = var_with_weight_loss(shape=[dim, 125], stddev=0.04, wl=0.004)
b3 = tf.Variable(tf.constant(0.1, shape=[125]))
local3=tf.nn.relu(tf.matmul(reshape,W3)+b3)
W4 = var_with_weight_loss(shape=[125, 62], stddev=0.04, wl=0.004)
b4 = tf.Variable(tf.constant(0.1, shape=[62]))
local4=tf.nn.relu(tf.matmul(local3,W4)+b4)
W5 = var_with_weight_loss(shape=[62, 3], stddev=1/62.0, wl=0.0)
b5 = tf.Variable(tf.constant(0.0, shape=[3]))
logits=tf.nn.relu(tf.matmul(local4,W5)+b5)
W2 = weight_variable([5, 5, 32, 64])
b2 = bias_variable([64])
h2 = tf.nn.relu(conv2d(hp, W2) + b2)
hp2 = max_pool_2x2(h2)
W3 = weight_variable([7 * 7 * 64, 1024])
b3 = bias_variable([1024])
f3 = tf.reshape(hp2, [-1, 7 * 7 * 64])
fc3 = tf.nn.relu(tf.matmul(f3, W3) + b3)
fc3_drop = tf.nn.dropout(fc3, keep_prob)
W4 = weight_variable([1024, 10])
b4 = bias_variable([10])
pred = tf.nn.softmax(tf.matmul(fc3_drop, W4) + b4)
# 损失函数
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y * tf.log(pred), reduction_indices=[1]))
# 训练模型
train_step = tf.train.AdamOptimizer(LR).minimize(cross_entropy)
# 准确率计算
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(y, 1), tf.argmax(pred, 1)), tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(STEP_TIMES):
batch_x, batch_y = mnist.train.next_batch(BATCH_SIZE)
_, loss, rr = sess.run([train_step, cross_entropy, accuracy],
feed_dict={x: batch_x, y: batch_y, keep_prob: KEEP_PROB})
if i % 20 == 0:
print("%d --> %f : %f" % (i, loss, rr))
# print(sess.run(accuracy, feed_dict={x: mnist.train.images, y: mnist.train.labels,keep_prob:1.0}))
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels, keep_prob: 1.0}))
| [
"tensorflow.nn.conv2d",
"tensorflow.nn.max_pool",
"tensorflow.nn.bias_add",
"tensorflow.Variable",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.nn.l2_loss",
"tensorflow.global_variables_initializer",
"tensorflow.argmax",
"tensorflow.nn.lrn",
"tensorflow.nn.dropout",
"tensorflow.... | [((868, 926), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, WIDTH, HEIGHT, CHANNEL]'], {}), '(tf.float32, [None, WIDTH, HEIGHT, CHANNEL])\n', (882, 926), True, 'import tensorflow as tf\n'), ((931, 973), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, Y_SIZE]'], {}), '(tf.float32, [None, Y_SIZE])\n', (945, 973), True, 'import tensorflow as tf\n'), ((986, 1012), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (1000, 1012), True, 'import tensorflow as tf\n'), ((1087, 1136), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'W1', '[1, 1, 1, 1]'], {'padding': '"""SAME"""'}), "(x, W1, [1, 1, 1, 1], padding='SAME')\n", (1099, 1136), True, 'import tensorflow as tf\n'), ((1230, 1306), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['h1'], {'ksize': '[1, 3, 3, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""'}), "(h1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')\n", (1244, 1306), True, 'import tensorflow as tf\n'), ((1312, 1369), 'tensorflow.nn.lrn', 'tf.nn.lrn', (['hp1', '(4)'], {'bias': '(1.0)', 'alpha': '(0.001 / 9.0)', 'beta': '(0.75)'}), '(hp1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)\n', (1321, 1369), True, 'import tensorflow as tf\n'), ((1445, 1495), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['n1', 'W2', '[1, 1, 1, 1]'], {'padding': '"""SAME"""'}), "(n1, W2, [1, 1, 1, 1], padding='SAME')\n", (1457, 1495), True, 'import tensorflow as tf\n'), ((1589, 1665), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['h2'], {'ksize': '[1, 3, 3, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""'}), "(h2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')\n", (1603, 1665), True, 'import tensorflow as tf\n'), ((1675, 1708), 'tensorflow.reshape', 'tf.reshape', (['hp2', '[BATCH_SIZE, -1]'], {}), '(hp2, [BATCH_SIZE, -1])\n', (1685, 1708), True, 'import tensorflow as tf\n'), ((2410, 2443), 'tensorflow.reshape', 'tf.reshape', (['hp2', '[-1, 7 * 7 * 64]'], {}), '(hp2, [-1, 7 * 7 * 64])\n', (2420, 2443), True, 'import tensorflow as tf\n'), ((2496, 2525), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['fc3', 'keep_prob'], {}), '(fc3, keep_prob)\n', (2509, 2525), True, 'import tensorflow as tf\n'), ((105, 143), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['shape'], {'stddev': '(0.1)'}), '(shape, stddev=0.1)\n', (124, 143), True, 'import tensorflow as tf\n'), ((155, 175), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (166, 175), True, 'import tensorflow as tf\n'), ((218, 247), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': 'shape'}), '(0.1, shape=shape)\n', (229, 247), True, 'import tensorflow as tf\n'), ((259, 279), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (270, 279), True, 'import tensorflow as tf\n'), ((311, 367), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'W'], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(x, W, strides=[1, 1, 1, 1], padding='SAME')\n", (323, 367), True, 'import tensorflow as tf\n'), ((402, 477), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['x'], {'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""'}), "(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n", (416, 477), True, 'import tensorflow as tf\n'), ((1154, 1182), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'shape': '[64]'}), '(0.0, shape=[64])\n', (1165, 1182), True, 'import tensorflow as tf\n'), ((1200, 1222), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['k1', 'b1'], {}), '(k1, b1)\n', (1214, 1222), True, 'import tensorflow as tf\n'), ((1513, 1541), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': '[64]'}), '(0.1, shape=[64])\n', (1524, 1541), True, 'import tensorflow as tf\n'), ((1559, 1581), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['k2', 'b2'], {}), '(k2, b2)\n', (1573, 1581), True, 'import tensorflow as tf\n'), ((1827, 1856), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': '[125]'}), '(0.1, shape=[125])\n', (1838, 1856), True, 'import tensorflow as tf\n'), ((1986, 2014), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': '[62]'}), '(0.1, shape=[62])\n', (1997, 2014), True, 'import tensorflow as tf\n'), ((2141, 2168), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'shape': '[3]'}), '(0.0, shape=[3])\n', (2152, 2168), True, 'import tensorflow as tf\n'), ((2913, 2925), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2923, 2925), True, 'import tensorflow as tf\n'), ((546, 587), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['shape'], {'stddev': 'stddev'}), '(shape, stddev=stddev)\n', (565, 587), True, 'import tensorflow as tf\n'), ((697, 740), 'tensorflow.add_to_collection', 'tf.add_to_collection', (['"""losses"""', 'weight_loss'], {}), "('losses', weight_loss)\n", (717, 740), True, 'import tensorflow as tf\n'), ((1876, 1898), 'tensorflow.matmul', 'tf.matmul', (['reshape', 'W3'], {}), '(reshape, W3)\n', (1885, 1898), True, 'import tensorflow as tf\n'), ((2034, 2055), 'tensorflow.matmul', 'tf.matmul', (['local3', 'W4'], {}), '(local3, W4)\n', (2043, 2055), True, 'import tensorflow as tf\n'), ((2188, 2209), 'tensorflow.matmul', 'tf.matmul', (['local4', 'W5'], {}), '(local4, W5)\n', (2197, 2209), True, 'import tensorflow as tf\n'), ((2461, 2478), 'tensorflow.matmul', 'tf.matmul', (['f3', 'W3'], {}), '(f3, W3)\n', (2470, 2478), True, 'import tensorflow as tf\n'), ((2606, 2629), 'tensorflow.matmul', 'tf.matmul', (['fc3_drop', 'W4'], {}), '(fc3_drop, W4)\n', (2615, 2629), True, 'import tensorflow as tf\n'), ((2753, 2779), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['LR'], {}), '(LR)\n', (2775, 2779), True, 'import tensorflow as tf\n'), ((2948, 2981), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2979, 2981), True, 'import tensorflow as tf\n'), ((646, 663), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['vv'], {}), '(vv)\n', (659, 663), True, 'import tensorflow as tf\n'), ((2856, 2871), 'tensorflow.argmax', 'tf.argmax', (['y', '(1)'], {}), '(y, 1)\n', (2865, 2871), True, 'import tensorflow as tf\n'), ((2873, 2891), 'tensorflow.argmax', 'tf.argmax', (['pred', '(1)'], {}), '(pred, 1)\n', (2882, 2891), True, 'import tensorflow as tf\n'), ((2694, 2706), 'tensorflow.log', 'tf.log', (['pred'], {}), '(pred)\n', (2700, 2706), True, 'import tensorflow as tf\n')] |