code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
#!/usr/bin/env python3
import unittest
import sys
import os
import re
from io import StringIO
from unittest.mock import patch
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".")))
import scripts.patch_apply.apply as apply
class TestApplied(unittest.TestCase):
def setUp(self):
self.oldcwd=os.getcwd()
os.chdir(os.path.dirname(__file__))
def tearDown(self):
os.chdir(self.oldcwd)
def test_no_file(self):
with patch('sys.stdout', new=StringIO()) as fakeOutput:
apply.main( pathToPatch='patches/does-not-exist.patch',
dry_run=True
)
self.assertRegex(fakeOutput.getvalue().strip(),
'^Invalid path or filename')
def test_multiple_patches(self):
with patch('sys.stdout', new=StringIO()) as fakeOutput:
apply.main( pathToPatch='patches/clean',
dry_run=True,
reverse=False,
verbose=0,
)
# How many files are in the directory?
numPatches=0
for filename in os.listdir('patches/clean'):
if filename.endswith('~'):
continue
numPatches += 1
self.assertEqual( fakeOutput.getvalue().count('Examining patch:'),
numPatches )
def test_applied(self):
with patch('sys.stdout', new=StringIO()) as fakeOutput:
apply.main( pathToPatch='patches/applied/add-line.patch',
dry_run=True,
reverse=False,
verbose=0,
)
self.assertRegex( fakeOutput.getvalue(), 'Patch failed to apply with git apply' )
def test_context_comment(self):
with patch('sys.stdout', new=StringIO()) as fakeOutput:
apply.main( pathToPatch='patches/context/comment.patch',
dry_run=True,
reverse=False,
verbose=0,
)
self.assertRegex( fakeOutput.getvalue(), 'Patch failed to apply with git apply' )
self.assertRegex( fakeOutput.getvalue(), '1 subpatches can be applied successfully:' )
self.assertRegex( fakeOutput.getvalue(), 'would have been successfully applied \(dry run\)' )
def test_context_function(self):
with patch('sys.stdout', new=StringIO()) as fakeOutput:
apply.main( pathToPatch='patches/context/function.patch',
dry_run=True,
reverse=False,
verbose=0,
)
self.assertRegex( fakeOutput.getvalue(), 'Patch failed to apply with git apply' )
self.assertRegex( fakeOutput.getvalue(), '1 subpatches can be applied successfully:' )
self.assertRegex( fakeOutput.getvalue(), 'would have been successfully applied \(dry run\)' )
def test_applied_offset(self):
with patch('sys.stdout', new=StringIO()) as fakeOutput:
apply.main( pathToPatch='patches/applied/remove-offset.patch',
dry_run=True,
reverse=False,
verbose=0,
)
self.assertRegex( fakeOutput.getvalue(), 'Patch failed to apply with git apply' )
self.assertRegex( fakeOutput.getvalue(), 'Subpatches that were already applied:' )
def test_findGitPrefix(self):
paths = [
'patches',
'test_apply.py',
'non-existant-file',
'non-existant-dir/non-existant-file'
]
for path in paths:
self.assertEqual( apply.findGitPrefix(path), "tests", path )
def test_bad_index(self):
with patch('sys.stdout', new=StringIO()) as fakeOutput:
apply.main( pathToPatch='patches/git/bad-index.patch',
dry_run=True,
reverse=False,
verbose=2,
)
self.assertNotRegex( fakeOutput.getvalue(), 'Subpatches that were applied by git apply:' )
self.assertRegex( fakeOutput.getvalue(), 'Subpatches that did not apply, and we could not find where the patch should be applied' )
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"os.listdir",
"io.StringIO",
"os.getcwd",
"os.path.dirname",
"scripts.patch_apply.apply.main",
"scripts.patch_apply.apply.findGitPrefix",
"os.chdir"
] |
[((4355, 4370), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4368, 4370), False, 'import unittest\n'), ((336, 347), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (345, 347), False, 'import os\n'), ((425, 446), 'os.chdir', 'os.chdir', (['self.oldcwd'], {}), '(self.oldcwd)\n', (433, 446), False, 'import os\n'), ((174, 199), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (189, 199), False, 'import os\n'), ((365, 390), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (380, 390), False, 'import os\n'), ((552, 620), 'scripts.patch_apply.apply.main', 'apply.main', ([], {'pathToPatch': '"""patches/does-not-exist.patch"""', 'dry_run': '(True)'}), "(pathToPatch='patches/does-not-exist.patch', dry_run=True)\n", (562, 620), True, 'import scripts.patch_apply.apply as apply\n'), ((892, 971), 'scripts.patch_apply.apply.main', 'apply.main', ([], {'pathToPatch': '"""patches/clean"""', 'dry_run': '(True)', 'reverse': '(False)', 'verbose': '(0)'}), "(pathToPatch='patches/clean', dry_run=True, reverse=False, verbose=0)\n", (902, 971), True, 'import scripts.patch_apply.apply as apply\n'), ((1164, 1191), 'os.listdir', 'os.listdir', (['"""patches/clean"""'], {}), "('patches/clean')\n", (1174, 1191), False, 'import os\n'), ((1525, 1625), 'scripts.patch_apply.apply.main', 'apply.main', ([], {'pathToPatch': '"""patches/applied/add-line.patch"""', 'dry_run': '(True)', 'reverse': '(False)', 'verbose': '(0)'}), "(pathToPatch='patches/applied/add-line.patch', dry_run=True,\n reverse=False, verbose=0)\n", (1535, 1625), True, 'import scripts.patch_apply.apply as apply\n'), ((1917, 2016), 'scripts.patch_apply.apply.main', 'apply.main', ([], {'pathToPatch': '"""patches/context/comment.patch"""', 'dry_run': '(True)', 'reverse': '(False)', 'verbose': '(0)'}), "(pathToPatch='patches/context/comment.patch', dry_run=True,\n reverse=False, verbose=0)\n", (1927, 2016), True, 'import scripts.patch_apply.apply as apply\n'), ((2514, 2614), 'scripts.patch_apply.apply.main', 'apply.main', ([], {'pathToPatch': '"""patches/context/function.patch"""', 'dry_run': '(True)', 'reverse': '(False)', 'verbose': '(0)'}), "(pathToPatch='patches/context/function.patch', dry_run=True,\n reverse=False, verbose=0)\n", (2524, 2614), True, 'import scripts.patch_apply.apply as apply\n'), ((3110, 3215), 'scripts.patch_apply.apply.main', 'apply.main', ([], {'pathToPatch': '"""patches/applied/remove-offset.patch"""', 'dry_run': '(True)', 'reverse': '(False)', 'verbose': '(0)'}), "(pathToPatch='patches/applied/remove-offset.patch', dry_run=True,\n reverse=False, verbose=0)\n", (3120, 3215), True, 'import scripts.patch_apply.apply as apply\n'), ((3893, 3991), 'scripts.patch_apply.apply.main', 'apply.main', ([], {'pathToPatch': '"""patches/git/bad-index.patch"""', 'dry_run': '(True)', 'reverse': '(False)', 'verbose': '(2)'}), "(pathToPatch='patches/git/bad-index.patch', dry_run=True, reverse\n =False, verbose=2)\n", (3903, 3991), True, 'import scripts.patch_apply.apply as apply\n'), ((3743, 3768), 'scripts.patch_apply.apply.findGitPrefix', 'apply.findGitPrefix', (['path'], {}), '(path)\n', (3762, 3768), True, 'import scripts.patch_apply.apply as apply\n'), ((513, 523), 'io.StringIO', 'StringIO', ([], {}), '()\n', (521, 523), False, 'from io import StringIO\n'), ((853, 863), 'io.StringIO', 'StringIO', ([], {}), '()\n', (861, 863), False, 'from io import StringIO\n'), ((1486, 1496), 'io.StringIO', 'StringIO', ([], {}), '()\n', (1494, 1496), False, 'from io import StringIO\n'), ((1878, 1888), 'io.StringIO', 'StringIO', ([], {}), '()\n', (1886, 1888), False, 'from io import StringIO\n'), ((2475, 2485), 'io.StringIO', 'StringIO', ([], {}), '()\n', (2483, 2485), False, 'from io import StringIO\n'), ((3071, 3081), 'io.StringIO', 'StringIO', ([], {}), '()\n', (3079, 3081), False, 'from io import StringIO\n'), ((3854, 3864), 'io.StringIO', 'StringIO', ([], {}), '()\n', (3862, 3864), False, 'from io import StringIO\n')]
|
from django.contrib import admin
from habitat._common.admin import HabitatAdmin
from habitat.sensors.models import CarbonDioxide
@admin.register(CarbonDioxide)
class CarbonDioxideAdmin(HabitatAdmin):
list_display = ['datetime', 'location', 'value']
list_filter = ['created', 'location']
search_fields = ['^date', 'value']
|
[
"django.contrib.admin.register"
] |
[((132, 161), 'django.contrib.admin.register', 'admin.register', (['CarbonDioxide'], {}), '(CarbonDioxide)\n', (146, 161), False, 'from django.contrib import admin\n')]
|
from pygame import mixer
class Player():
"""Handles everything concerning the current song"""
def __init__(self):
mixer.init()
self.currentlength = 0
self.currentadress = ""
self.name = ""
self.plogo = "||"
self.endless = True
self.elogo = "Loop ✓"
self.percent = 0
self.playing = False
def update_song(self, adress, length, name):
try:
self.currentadress = adress
self.currentlength = length
self.name = name
mixer.music.load(adress)
except Exception:
pass
def play(self):
if self.playing is True:
mixer.music.pause()
self.plogo = "▶"
self.playing = False
elif self.playing is False:
mixer.music.unpause()
self.plogo = "||"
self.playing = True
if mixer.music.get_busy() == 0:
self.plogo = "||"
def endlesssong(self):
if self.endless is True:
self.endless = False
self.elogo = "Loop ✗"
elif self.endless is False:
self.endless = True
self.elogo = "Loop ✓"
def volumecontrol(self, volume):
volume = float(volume) * 0.01
mixer.music.set_volume(volume)
def progressmeter(self):
current = mixer.music.get_pos()
current = current / 1000
try:
self.percent = (current / self.currentlength) * 100
except Exception:
self.percent = 0
return self.percent
def restart(self):
if self.playing is False:
self.plogo = "||"
mixer.music.load(self.currentadress)
mixer.music.play()
def get_plogo(self):
return self.plogo
def get_elogo(self):
return self.elogo
def start_song(self):
mixer.music.load(self.currentadress)
mixer.music.play()
self.playing = True
def restart_loop(self):
if self.endless is True:
if self.percent <= 101 and self.percent >= 100:
mixer.music.load(self.currentadress)
mixer.music.play()
elif self.percent <= 100 and self.percent >= 99:
mixer.music.load(self.currentadress)
mixer.music.play()
elif self.percent > 100:
mixer.music.load(self.currentadress)
mixer.music.play()
def get_current_song(self):
if self.name == "+":
return ""
else:
return self.name
def del_update(self, adress, name):
if adress == self.currentadress and \
name == self.name:
mixer.music.stop()
self.update_song("", 0, "")
|
[
"pygame.mixer.music.get_pos",
"pygame.mixer.music.unpause",
"pygame.mixer.init",
"pygame.mixer.music.play",
"pygame.mixer.music.set_volume",
"pygame.mixer.music.get_busy",
"pygame.mixer.music.pause",
"pygame.mixer.music.load",
"pygame.mixer.music.stop"
] |
[((140, 152), 'pygame.mixer.init', 'mixer.init', ([], {}), '()\n', (150, 152), False, 'from pygame import mixer\n'), ((1334, 1364), 'pygame.mixer.music.set_volume', 'mixer.music.set_volume', (['volume'], {}), '(volume)\n', (1356, 1364), False, 'from pygame import mixer\n'), ((1416, 1437), 'pygame.mixer.music.get_pos', 'mixer.music.get_pos', ([], {}), '()\n', (1435, 1437), False, 'from pygame import mixer\n'), ((1738, 1774), 'pygame.mixer.music.load', 'mixer.music.load', (['self.currentadress'], {}), '(self.currentadress)\n', (1754, 1774), False, 'from pygame import mixer\n'), ((1784, 1802), 'pygame.mixer.music.play', 'mixer.music.play', ([], {}), '()\n', (1800, 1802), False, 'from pygame import mixer\n'), ((1951, 1987), 'pygame.mixer.music.load', 'mixer.music.load', (['self.currentadress'], {}), '(self.currentadress)\n', (1967, 1987), False, 'from pygame import mixer\n'), ((1997, 2015), 'pygame.mixer.music.play', 'mixer.music.play', ([], {}), '()\n', (2013, 2015), False, 'from pygame import mixer\n'), ((576, 600), 'pygame.mixer.music.load', 'mixer.music.load', (['adress'], {}), '(adress)\n', (592, 600), False, 'from pygame import mixer\n'), ((716, 735), 'pygame.mixer.music.pause', 'mixer.music.pause', ([], {}), '()\n', (733, 735), False, 'from pygame import mixer\n'), ((948, 970), 'pygame.mixer.music.get_busy', 'mixer.music.get_busy', ([], {}), '()\n', (968, 970), False, 'from pygame import mixer\n'), ((2813, 2831), 'pygame.mixer.music.stop', 'mixer.music.stop', ([], {}), '()\n', (2829, 2831), False, 'from pygame import mixer\n'), ((850, 871), 'pygame.mixer.music.unpause', 'mixer.music.unpause', ([], {}), '()\n', (869, 871), False, 'from pygame import mixer\n'), ((2188, 2224), 'pygame.mixer.music.load', 'mixer.music.load', (['self.currentadress'], {}), '(self.currentadress)\n', (2204, 2224), False, 'from pygame import mixer\n'), ((2242, 2260), 'pygame.mixer.music.play', 'mixer.music.play', ([], {}), '()\n', (2258, 2260), False, 'from pygame import mixer\n'), ((2340, 2376), 'pygame.mixer.music.load', 'mixer.music.load', (['self.currentadress'], {}), '(self.currentadress)\n', (2356, 2376), False, 'from pygame import mixer\n'), ((2394, 2412), 'pygame.mixer.music.play', 'mixer.music.play', ([], {}), '()\n', (2410, 2412), False, 'from pygame import mixer\n'), ((2468, 2504), 'pygame.mixer.music.load', 'mixer.music.load', (['self.currentadress'], {}), '(self.currentadress)\n', (2484, 2504), False, 'from pygame import mixer\n'), ((2522, 2540), 'pygame.mixer.music.play', 'mixer.music.play', ([], {}), '()\n', (2538, 2540), False, 'from pygame import mixer\n')]
|
import pytz
import pandas as pd
def get_utc_timestamp(dt):
mytz = pytz.timezone("UTC")
return mytz.normalize(mytz.localize(dt, is_dst=False))
def goals_wide_to_long(df: pd.DataFrame, unit_type: str = "test_unit_type") -> pd.DataFrame:
"""
Modify the input DataFrame in a way that it can be evaluatetd using Experiment.evaluate_agg().
Arguments:
df: dataframe in wide format - one row per variant and aggregated data in columns
unit_type: should be the same value as the `unit_type` passed to `Experiment`
Returns:
dataframe in long format - one row per variant and goal
Input dataframe example:
```
experiment_id variant_id views clicks conversions bookings bookings_squared
my-exp a 473661 48194 413 17152 803105
my-exp b 471485 47184 360 14503 677178
my-exp c 477159 48841 406 15892 711661
my-exp d 474934 49090 289 11995 566700
```
"""
# Do not modify the input `df` via reference
df = df.copy()
# Rename first two columns
df.columns = ["exp_id", "exp_variant_id"] + df.columns.to_list()[2:]
# DataFrame `sum_value` to long format
# Select non squared columns and switch from long to wide
cols = [col for col in df.columns.to_list()[2:] if "square" not in col]
df_long = pd.melt(
df, id_vars=["exp_id", "exp_variant_id"], value_vars=cols, var_name="goal", value_name="sum_value"
)
# DataFrame `sum_sqr_value` to long format
# Select squared columns and swich from long to wide
cols_squared = [col for col in df.columns.to_list()[2:] if "square" in col]
df_long_sqr = pd.melt(
df, id_vars=["exp_id", "exp_variant_id"], value_vars=cols_squared, var_name="goal", value_name="sum_sqr_value"
)
df_long_sqr["goal"] = df_long_sqr["goal"].apply(lambda x: "_".join(x.split("_")[:-1]))
# Merge together and add other necessary columns for evaluation
goals = pd.merge(left=df_long, right=df_long_sqr, how="outer", on=["exp_id", "exp_variant_id", "goal"])
goals.insert(2, "unit_type", unit_type)
goals.insert(3, "agg_type", "global")
goals.insert(5, "dimension", "")
goals.insert(6, "dimension_value", "")
goals.insert(7, "count", 0)
goals.insert(8, "sum_sqr_count", 0)
goals.insert(11, "count_unique", 0)
goals["sum_sqr_value"] = goals.apply(_add_value_squared_where_missing, axis="columns")
return goals
def _add_value_squared_where_missing(row):
"""Add values `value_squared` where missing."""
value_squared = row[-2]
value = row[-3]
if value_squared != value_squared:
return value
else:
return value_squared
|
[
"pandas.melt",
"pandas.merge",
"pytz.timezone"
] |
[((72, 92), 'pytz.timezone', 'pytz.timezone', (['"""UTC"""'], {}), "('UTC')\n", (85, 92), False, 'import pytz\n'), ((1472, 1584), 'pandas.melt', 'pd.melt', (['df'], {'id_vars': "['exp_id', 'exp_variant_id']", 'value_vars': 'cols', 'var_name': '"""goal"""', 'value_name': '"""sum_value"""'}), "(df, id_vars=['exp_id', 'exp_variant_id'], value_vars=cols, var_name\n ='goal', value_name='sum_value')\n", (1479, 1584), True, 'import pandas as pd\n'), ((1797, 1920), 'pandas.melt', 'pd.melt', (['df'], {'id_vars': "['exp_id', 'exp_variant_id']", 'value_vars': 'cols_squared', 'var_name': '"""goal"""', 'value_name': '"""sum_sqr_value"""'}), "(df, id_vars=['exp_id', 'exp_variant_id'], value_vars=cols_squared,\n var_name='goal', value_name='sum_sqr_value')\n", (1804, 1920), True, 'import pandas as pd\n'), ((2103, 2202), 'pandas.merge', 'pd.merge', ([], {'left': 'df_long', 'right': 'df_long_sqr', 'how': '"""outer"""', 'on': "['exp_id', 'exp_variant_id', 'goal']"}), "(left=df_long, right=df_long_sqr, how='outer', on=['exp_id',\n 'exp_variant_id', 'goal'])\n", (2111, 2202), True, 'import pandas as pd\n')]
|
from multiprocessing import Pool
import pandas as pd
from functools import partial
import numpy as np
from tqdm import tqdm
def inductive_pooling(df, embeddings, G, workers, gamma=1000, dict_node=None, average_embedding=True):
if average_embedding:
avg_emb = embeddings.mean().values
else:
avg_emb = None
#if __name__ == '__main__':
with Pool(workers) as p:
r = p.map(partial(inductive_pooling_chunk, embeddings=embeddings, G=G, average_embedding=avg_emb), np.array_split(df, workers))
return r
def inductive_pooling_chunk(df, embeddings, G, gamma=1000, average_embedding=None):
#Create a container for the new embeddings
new_embeddings = dict()
for transaction, transaction_row in tqdm(df.iterrows(), total=df.shape[0]):
cardholder = transaction_row.CARD_PAN_ID
merchant = transaction_row.TERM_MIDUID
mutual = False
if G.has_node(cardholder) & G.has_node(merchant):
mutual_neighbors = list(set(G.neighbors(cardholder)).intersection(set(G.neighbors(merchant))))
mutual_neighbors.sort()
if (len(mutual_neighbors) > 0):
mutual = True
# Use dataframe with TX_ID on index (to speed up retrieval of transaction rows)
embeddings_mutual_neighbors = embeddings.loc[mutual_neighbors]
# most recent transaction
most_recent_embedding_mutual_neighbor = embeddings_mutual_neighbors.iloc[-1]
new_embeddings[transaction] = most_recent_embedding_mutual_neighbor
if G.has_node(cardholder) & (not mutual):
cardholder_neighbors = list(G.neighbors(cardholder))
pooled_embedding = get_pooled_embedding(cardholder_neighbors, embeddings, gamma)
new_embeddings[transaction] = pooled_embedding
elif G.has_node(merchant) & (not mutual):
merchant_neighbors = list(G.neighbors(merchant))
pooled_embedding = get_pooled_embedding(merchant_neighbors, embeddings, gamma)
new_embeddings[transaction] = pooled_embedding
elif (not mutual):
new_embeddings[transaction] = average_embedding
return new_embeddings
def get_pooled_embedding(neighbors, embeddings, gamma):
embeddings_to_pool = embeddings.loc[neighbors]
most_recent_embeddings_to_pool = embeddings_to_pool.iloc[-min(gamma, embeddings_to_pool.shape[0]):]
pooled_embedding = pd.DataFrame(most_recent_embeddings_to_pool.mean()).transpose().values[0]
return pooled_embedding
|
[
"numpy.array_split",
"functools.partial",
"multiprocessing.Pool"
] |
[((384, 397), 'multiprocessing.Pool', 'Pool', (['workers'], {}), '(workers)\n', (388, 397), False, 'from multiprocessing import Pool\n'), ((422, 513), 'functools.partial', 'partial', (['inductive_pooling_chunk'], {'embeddings': 'embeddings', 'G': 'G', 'average_embedding': 'avg_emb'}), '(inductive_pooling_chunk, embeddings=embeddings, G=G,\n average_embedding=avg_emb)\n', (429, 513), False, 'from functools import partial\n'), ((511, 538), 'numpy.array_split', 'np.array_split', (['df', 'workers'], {}), '(df, workers)\n', (525, 538), True, 'import numpy as np\n')]
|
import os
import subprocess
import datetime
def render_today(update):
string = []
string.append("# Update %s"%datetime.datetime.now().strftime('%Y-%m-%d'))
if(len(update)==0):
string.append('No Update Today!')
for item in update:
string.append("## %s"%item['CVE_ID'])
string.append("%s" % item['CVE_DESCRIPTION'])
string.append("")
for URL in item['PocOrExp']:
AUTHOR = URL.split('/')[-2]
PROJECT_NAME = URL.split('/')[-1]
link = "- [%s](%s) : " % (URL,URL)
stars = "" %(AUTHOR,PROJECT_NAME)
forks = "" %(AUTHOR,PROJECT_NAME)
string.append(" ".join([link,stars,forks]))
string.append('\n')
with open("Today.md",'w') as f:
f.write("\n".join(string))
return string
def parse_readme(content):
poc_or_exps = {}
CVE_ID = ""
cve_ids = []
for line in content:
if line.startswith('## CVE'):
CVE_ID = "CVE"+line.split('CVE')[-1]
cve_ids.append(CVE_ID)
poc_or_exps[CVE_ID] = {}
poc_or_exps[CVE_ID]['CVE_ID'] = CVE_ID
poc_or_exps[CVE_ID]['CVE_DESCRIPTION'] = ""
poc_or_exps[CVE_ID]['URL'] = []
elif line.startswith('- ['):
url = line.split('[')[1].split(']')[0]
poc_or_exps[CVE_ID]['URL'].append(url)
elif line.startswith('##'):
continue
else:
poc_or_exps[CVE_ID]['CVE_DESCRIPTION'] = line
return poc_or_exps,cve_ids
def get_today_update():
status,output = subprocess.getstatusoutput('rm -rf PocOrExp_in_Github')
status,output = subprocess.getstatusoutput('git clone <EMAIL>:ycdxsb/PocOrExp_in_Github.git PocOrExp_in_Github')
status,output = subprocess.getstatusoutput('cd PocOrExp_in_Github && git tag --sort=committerdate')
tags = output.split('\n')
print(tags)
if(tags[-1]!=datetime.datetime.now().strftime('%Y%m%d')):
print('date info error')
exit(-1)
old_poc_or_exps = []
new_poc_or_exps = []
status,output = subprocess.getstatusoutput('cd PocOrExp_in_Github && git checkout %s' % tags[-2])
with open('PocOrExp_in_Github/PocOrExp.md') as f:
content = f.read().split('\n')
content = [line for line in content if line!='']
old_poc_or_exps,old_cve_ids = parse_readme(content)
status,output = subprocess.getstatusoutput('cd PocOrExp_in_Github && git checkout %s' % tags[-1])
with open('PocOrExp_in_Github/PocOrExp.md') as f:
content = f.read().split('\n')
content = [line for line in content if line!='']
new_poc_or_exps,new_cve_ids = parse_readme(content)
update = []
for CVE_ID in new_cve_ids:
if CVE_ID not in old_cve_ids:
d = {}
d['CVE_ID'] = CVE_ID
d['CVE_DESCRIPTION'] = new_poc_or_exps[CVE_ID]['CVE_DESCRIPTION']
d['PocOrExp'] = new_poc_or_exps[CVE_ID]['URL']
update.append(d)
else:
old_urls = old_poc_or_exps[CVE_ID]['URL']
new_urls = new_poc_or_exps[CVE_ID]['URL']
diff = list(set(new_urls)-set(old_urls))
if(len(diff)==0):
continue
d = {}
d['CVE_ID'] = CVE_ID
d['CVE_DESCRIPTION'] = new_poc_or_exps[CVE_ID]['CVE_DESCRIPTION']
d['PocOrExp'] = []
for url in new_urls:
if url in diff:
d['PocOrExp'].append(url)
update.append(d)
return render_today(update)
if __name__=="__main__":
update_today = get_today_update()
|
[
"subprocess.getstatusoutput",
"datetime.datetime.now"
] |
[((1696, 1751), 'subprocess.getstatusoutput', 'subprocess.getstatusoutput', (['"""rm -rf PocOrExp_in_Github"""'], {}), "('rm -rf PocOrExp_in_Github')\n", (1722, 1751), False, 'import subprocess\n'), ((1772, 1873), 'subprocess.getstatusoutput', 'subprocess.getstatusoutput', (['"""git clone <EMAIL>:ycdxsb/PocOrExp_in_Github.git PocOrExp_in_Github"""'], {}), "(\n 'git clone <EMAIL>:ycdxsb/PocOrExp_in_Github.git PocOrExp_in_Github')\n", (1798, 1873), False, 'import subprocess\n'), ((1889, 1977), 'subprocess.getstatusoutput', 'subprocess.getstatusoutput', (['"""cd PocOrExp_in_Github && git tag --sort=committerdate"""'], {}), "(\n 'cd PocOrExp_in_Github && git tag --sort=committerdate')\n", (1915, 1977), False, 'import subprocess\n'), ((2201, 2286), 'subprocess.getstatusoutput', 'subprocess.getstatusoutput', (["('cd PocOrExp_in_Github && git checkout %s' % tags[-2])"], {}), "('cd PocOrExp_in_Github && git checkout %s' %\n tags[-2])\n", (2227, 2286), False, 'import subprocess\n'), ((2505, 2590), 'subprocess.getstatusoutput', 'subprocess.getstatusoutput', (["('cd PocOrExp_in_Github && git checkout %s' % tags[-1])"], {}), "('cd PocOrExp_in_Github && git checkout %s' %\n tags[-1])\n", (2531, 2590), False, 'import subprocess\n'), ((2036, 2059), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2057, 2059), False, 'import datetime\n'), ((119, 142), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (140, 142), False, 'import datetime\n')]
|
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--name", help="the name of the person you want to find")
parser.add_argument("-a", "--age", help="the age of the person you'd like to find", type=int)
parser.add_argument("-c", "--city", help="the city you'd like to search")
parser.add_argument("-v", "--verbose", help="increase output verbosity", action="store_true")
args = parser.parse_args()
if args.verbose:
print(f"Searching for {args.name} {args.age} years of age in or around {args.city}")
else:
print(f"Searching for {args.name}")
|
[
"argparse.ArgumentParser"
] |
[((26, 51), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (49, 51), False, 'import argparse\n')]
|
# -*- coding: UTF-8 -*-
import re
import argparse
from urllib.parse import urlparse
import wikipedia
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.stem import SnowballStemmer
LANGUAGES = {
"en": "english",
"fr": "french",
"it": "italian",
"es": "spanish",
"pt": "portuguese",
}
def setup_nltk():
for pkg in ("stopwords", "punkt"):
nltk.download(pkg, quiet=True)
# not very user-friendly when using e.g. --help
setup_nltk()
def get_stop_words(lang):
return stopwords.words(LANGUAGES.get(lang, lang))
def mk_tokenizer(lang):
stemmer = SnowballStemmer(LANGUAGES.get(lang, lang))
def tokenize(text):
tokens = word_tokenize(text)
return [stemmer.stem(tok) for tok in tokens]
return tokenize
class AskipModel:
def __init__(self, wikipedia_url):
self._vectorizer = None
loc = urlparse(wikipedia_url)
lang = loc.netloc.split(".", 1)[0]
name = loc.path.split("/")[-1]
self.set_model(name, lang=lang)
def set_model(self, name, lang="en"):
wikipedia.set_lang(lang)
# https://github.com/goldsmith/Wikipedia/issues/124
page = wikipedia.page(name, auto_suggest=False)
texts = []
titles = 1
for sent in sent_tokenize(page.content):
for p in re.split(r"\n+", sent):
if p[0] == "=" and p[-1] == "=":
titles += 1
continue # title
if len(p) < 30:
continue
if "»" in p and not "«" in p:
continue
texts.append(p)
stop_words = "english" if lang == "en" else get_stop_words(lang)
vectorizer = TfidfVectorizer(
tokenizer=mk_tokenizer(lang),
max_df=0.97,
min_df=0.01,
strip_accents="unicode",
stop_words=stop_words)
X = vectorizer.fit_transform(texts)
n_clusters = max(titles, 16, len(texts)//10) # arbitrary
km = KMeans(n_clusters=n_clusters).fit(X.todense())
self._vectorizer = vectorizer
self._km = km
self._texts = texts
def ask(self, q):
q = re.sub(r"[?!]+$", "", q)
q = re.sub(r"^(?:what is|what's|quel est|quelle est|que) +", "",
q, re.IGNORECASE)
cluster = self._km.predict(self._vectorizer.transform([q]))[0]
indexes = [i for i, cl in enumerate(self._km.labels_) if cl == cluster]
# Try to limit the number of results by assuming sentences about a
# subject are grouped together in the corpus.
# We should first check if this is necessary by looking at the
# distribution of the indexes. If they're all in the same place in the
# corpus that step isn't necessary.
p05 = indexes[ int(len(indexes) * 0.05) ]
p95 = indexes[ int(len(indexes) * 0.95) ]
indexes = [i for i in indexes if p05 <= i <= p95]
# arbitrary limit
for i in indexes[:4]:
print(self._texts[i], end=" ")
print()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("wikipedia_url")
args = parser.parse_args()
m = AskipModel(args.wikipedia_url)
while True:
try:
q = input("--> ")
except EOFError:
break
if not q or q in {"bye", "exit", "quit"}:
break
m.ask(q)
if __name__ == "__main__":
main()
|
[
"wikipedia.page",
"nltk.tokenize.word_tokenize",
"argparse.ArgumentParser",
"re.split",
"sklearn.cluster.KMeans",
"wikipedia.set_lang",
"nltk.tokenize.sent_tokenize",
"nltk.download",
"re.sub",
"urllib.parse.urlparse"
] |
[((3292, 3317), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3315, 3317), False, 'import argparse\n'), ((529, 559), 'nltk.download', 'nltk.download', (['pkg'], {'quiet': '(True)'}), '(pkg, quiet=True)\n', (542, 559), False, 'import nltk\n'), ((826, 845), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['text'], {}), '(text)\n', (839, 845), False, 'from nltk.tokenize import sent_tokenize, word_tokenize\n'), ((1024, 1047), 'urllib.parse.urlparse', 'urlparse', (['wikipedia_url'], {}), '(wikipedia_url)\n', (1032, 1047), False, 'from urllib.parse import urlparse\n'), ((1221, 1245), 'wikipedia.set_lang', 'wikipedia.set_lang', (['lang'], {}), '(lang)\n', (1239, 1245), False, 'import wikipedia\n'), ((1322, 1362), 'wikipedia.page', 'wikipedia.page', (['name'], {'auto_suggest': '(False)'}), '(name, auto_suggest=False)\n', (1336, 1362), False, 'import wikipedia\n'), ((1422, 1449), 'nltk.tokenize.sent_tokenize', 'sent_tokenize', (['page.content'], {}), '(page.content)\n', (1435, 1449), False, 'from nltk.tokenize import sent_tokenize, word_tokenize\n'), ((2379, 2402), 're.sub', 're.sub', (['"""[?!]+$"""', '""""""', 'q'], {}), "('[?!]+$', '', q)\n", (2385, 2402), False, 'import re\n'), ((2417, 2494), 're.sub', 're.sub', (['"""^(?:what is|what\'s|quel est|quelle est|que) +"""', '""""""', 'q', 're.IGNORECASE'], {}), '("^(?:what is|what\'s|quel est|quelle est|que) +", \'\', q, re.IGNORECASE)\n', (2423, 2494), False, 'import re\n'), ((1472, 1494), 're.split', 're.split', (['"""\\\\n+"""', 'sent'], {}), "('\\\\n+', sent)\n", (1480, 1494), False, 'import re\n'), ((2208, 2237), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'n_clusters'}), '(n_clusters=n_clusters)\n', (2214, 2237), False, 'from sklearn.cluster import KMeans\n')]
|
from AirSimClient import *
# connect to the AirSim simulator
import car_client_for_rl
# connect to the AirSim simulator
client = car_client_for_rl.CarClientForRL()
client.__init__
#state = client.getStatus()
#print("state: %s" % state)
throttle = float(input("Please enter throttle: "))
steering = float(input("Please enter steering: "))
client._take_car_action(throttle,steering)
input_cmd = 'no'
while(input_cmd!='q'):
input_cmd = input("Please enter cmd\n(c: continue \nr: reset \nq: quit): \n")
if input_cmd=='q':
client.reset()
client.enableApiControl(False)
elif input_cmd == 'r':
client._reset_car()
#print("state: %s" % state)
elif input_cmd == 'c':
throttle = float(input("Please enter throttle: "))
steering = float(input("Please enter steering: "))
client._take_car_action(throttle,steering)
else:
input_cmd = input("Invalid cmd, Please re-enter cmd(c for continue / r for reset / q for quit): ")
|
[
"car_client_for_rl.CarClientForRL"
] |
[((135, 169), 'car_client_for_rl.CarClientForRL', 'car_client_for_rl.CarClientForRL', ([], {}), '()\n', (167, 169), False, 'import car_client_for_rl\n')]
|
from tensorflow.keras.layers import (
MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization,
GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate,
DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract)
import tensorflow.keras.backend as K
import tensorflow as tf
# class GlobalAveragePooling2D(tf.keras.layers.GlobalAveragePooling2D):
# def __init__(self, keep_dims=False, **kwargs):
# super(GlobalAveragePooling2D, self).__init__(**kwargs)
# self.keep_dims = keep_dims
#
# def call(self, inputs):
# if self.keep_dims is False:
# return super(GlobalAveragePooling2D, self).call(inputs)
# else:
# return tf.keras.backend.mean(inputs, axis=[1, 2], keepdims=True)
#
# def compute_output_shape(self, input_shape):
# if self.keep_dims is False:
# return super(GlobalAveragePooling2D, self).compute_output_shape(input_shape)
# else:
# input_shape = tf.TensorShape(input_shape).as_list()
# return tf.TensorShape([input_shape[0], 1, 1, input_shape[3]])
#
# def get_config(self):
# config = super(GlobalAveragePooling2D, self).get_config()
# config['keep_dim'] = self.keep_dims
# return config
MOMENTUM = 0.99
EPSILON = 1e-5
DECAY = tf.keras.regularizers.L2(l2=0.0001/2)
# DECAY = None
BN = tf.keras.layers.experimental.SyncBatchNormalization
CONV_KERNEL_INITIALIZER = tf.keras.initializers.VarianceScaling(scale=1.0, mode="fan_out", distribution="truncated_normal")
atrous_rates= (6, 12, 18)
def deepLabV3Plus(features, fpn_times=2, activation='swish', fpn_channels=64, mode='fpn'):
skip1, x = features # c1 48 / c2 64
# Image Feature branch
shape_before = tf.shape(x)
b4 = GlobalAveragePooling2D()(x)
b4_shape = tf.keras.backend.int_shape(b4)
# from (b_size, channels)->(b_size, 1, 1, channels)
b4 = Reshape((1, 1, b4_shape[1]))(b4)
b4 = Conv2D(256, (1, 1), padding='same',
kernel_regularizer=DECAY,
use_bias=False, name='image_pooling')(b4)
b4 = BatchNormalization(name='image_pooling_BN', epsilon=1e-5)(b4)
b4 = Activation(activation)(b4)
# upsample. have to use compat because of the option align_corners
size_before = tf.keras.backend.int_shape(x)
b4 = tf.keras.layers.experimental.preprocessing.Resizing(
*size_before[1:3], interpolation="bilinear"
)(b4)
# b4 = UpSampling2D(size=(32, 64), interpolation="bilinear")(b4)
# simple 1x1
b0 = Conv2D(256, (1, 1), padding='same',
kernel_regularizer=DECAY,
use_bias=False, name='aspp0')(x)
# b0 = BatchNormalization(name='aspp0_BN', epsilon=1e-5)(b0)
b0 = BN(name='aspp0_BN', epsilon=1e-5)(b0)
b0 = Activation(activation, name='aspp0_activation')(b0)
b1 = SepConv_BN(x, 256, 'aspp1',
rate=atrous_rates[0], depth_activation=True, epsilon=1e-5)
# rate = 12 (24)
b2 = SepConv_BN(x, 256, 'aspp2',
rate=atrous_rates[1], depth_activation=True, epsilon=1e-5)
# rate = 18 (36)
b3 = SepConv_BN(x, 256, 'aspp3',
rate=atrous_rates[2], depth_activation=True, epsilon=1e-5)
# concatenate ASPP branches & project
x = Concatenate()([b4, b0, b1, b2, b3])
x = Conv2D(256, (1, 1), padding='same',
kernel_regularizer=DECAY,
use_bias=False, name='concat_projection')(x)
# x = BatchNormalization(name='concat_projection_BN', epsilon=1e-5)(x)
x = BN(name='concat_projection_BN', epsilon=1e-5)(x)
x = Activation(activation)(x)
x = Dropout(0.1)(x)
skip_size = tf.keras.backend.int_shape(skip1)
x = tf.keras.layers.experimental.preprocessing.Resizing(
*skip_size[1:3], interpolation="bilinear"
)(x)
aux_temp_aspp = x
# x = UpSampling2D((4,4), interpolation='bilinear')(x)
dec_skip1 = Conv2D(48, (1, 1), padding='same',
kernel_regularizer=DECAY,
use_bias=False, name='feature_projection0')(skip1)
# dec_skip1 = BatchNormalization(
# name='feature_projection0_BN', epsilon=1e-5)(dec_skip1)
dec_skip1 = BN(
name='feature_projection0_BN', epsilon=1e-5)(dec_skip1)
dec_skip1 = Activation(activation)(dec_skip1)
x = Concatenate()([x, dec_skip1])
x = SepConv_BN(x, 256, 'decoder_conv0',
depth_activation=True, epsilon=1e-5)
x = SepConv_BN(x, 256, 'decoder_conv1',
depth_activation=True, epsilon=1e-5)
return x, aux_temp_aspp
def proposed(features, fpn_times=2, activation='swish', fpn_channels=64, mode='fpn'):
skip1, x = features # c1 48 / c2 64
# Image Feature branch
shape_before = tf.shape(x)
b4 = GlobalAveragePooling2D()(x)
b4_shape = tf.keras.backend.int_shape(b4)
# from (b_size, channels)->(b_size, 1, 1, channels)
b4 = Reshape((1, 1, b4_shape[1]))(b4)
b4 = Conv2D(256, (1, 1), padding='same',
kernel_regularizer=DECAY,
use_bias=False, name='image_pooling')(b4)
# b4 = BatchNormalization(name='image_pooling_BN', epsilon=EPSILON)(b4)
b4 = BN(name='image_pooling_BN', epsilon=EPSILON)(b4)
b4 = Activation(activation)(b4)
# upsample. have to use compat because of the option align_corners
size_before = tf.keras.backend.int_shape(x)
b4 = tf.keras.layers.experimental.preprocessing.Resizing(
*size_before[1:3], interpolation="bilinear"
)(b4)
# b4 = UpSampling2D(size=(32, 64), interpolation="bilinear")(b4)
# simple 1x1
b0 = Conv2D(256, (1, 1), padding='same',
kernel_regularizer=DECAY,
use_bias=False, name='aspp0')(x)
# b0 = BatchNormalization(name='aspp0_BN', epsilon=EPSILON)(b0)
b0 = BN(name='aspp0_BN', epsilon=EPSILON)(b0)
b0 = Activation(activation, name='aspp0_activation')(b0)
b1 = conv3x3(x, 256, 'aspp1',
rate=atrous_rates[0], epsilon=EPSILON, activation=activation)
# rate = 12 (24)
b2 = conv3x3(x, 256, 'aspp2',
rate=atrous_rates[1], epsilon=EPSILON, activation=activation)
# rate = 18 (36)
b3 = conv3x3(x, 256, 'aspp3',
rate=atrous_rates[2], epsilon=EPSILON, activation=activation)
# concatenate ASPP branches & project
x = Concatenate()([b4, b0, b1, b2, b3])
x = Conv2D(256, (1, 1), padding='same',
kernel_regularizer=DECAY,
use_bias=False, name='concat_projection')(x)
# x = BatchNormalization(name='concat_projection_BN', epsilon=EPSILON)(x)
x = BN(name='concat_projection_BN', epsilon=EPSILON)(x)
x = Activation(activation)(x)
x = Dropout(0.1)(x)
# x to 128x256 size
skip_size = tf.keras.backend.int_shape(skip1)
x = tf.keras.layers.experimental.preprocessing.Resizing(
*skip_size[1:3], interpolation="bilinear"
)(x)
aspp_aux = x
dec_skip1 = Conv2D(48, (1, 1), padding='same',
kernel_regularizer=DECAY,
use_bias=False, name='feature_projection0')(skip1)
# dec_skip1 = BatchNormalization(
# name='feature_projection0_BN', epsilon=EPSILON)(dec_skip1)
dec_skip1 = BN(
name='feature_projection0_BN', epsilon=EPSILON)(dec_skip1)
dec_skip1 = Activation(activation)(dec_skip1)
x = Concatenate()([x, dec_skip1])
skip_aux = x
x = conv3x3(x, 256, 'decoder_conv0', epsilon=EPSILON, activation=activation)
x = conv3x3(x, 256, 'decoder_conv1', epsilon=EPSILON, activation=activation)
return x, aspp_aux, skip_aux
def proposed_experiments(features, activation='swish'):
skip1, x = features # c1 48 / c2 64
# Image Feature branch
shape_before = tf.shape(x)
b4 = GlobalAveragePooling2D()(x)
b4_shape = tf.keras.backend.int_shape(b4)
# from (b_size, channels)->(b_size, 1, 1, channels)
b4 = Reshape((1, 1, b4_shape[1]))(b4)
b4 = Conv2D(256, (1, 1), padding='same',
kernel_regularizer=DECAY,
use_bias=False, name='image_pooling')(b4)
# b4 = BatchNormalization(name='image_pooling_BN', epsilon=EPSILON)(b4)
b4 = BN(name='image_pooling_BN', epsilon=EPSILON)(b4)
b4 = Activation(activation)(b4)
# upsample. have to use compat because of the option align_corners
size_before = tf.keras.backend.int_shape(x)
b4 = tf.keras.layers.experimental.preprocessing.Resizing(
*size_before[1:3], interpolation="bilinear"
)(b4)
# b4 = UpSampling2D(size=(32, 64), interpolation="bilinear")(b4)
# simple 1x1
b0 = Conv2D(256, (1, 1), padding='same',
kernel_regularizer=DECAY,
use_bias=False, name='aspp0')(x)
# b0 = BatchNormalization(name='aspp0_BN', epsilon=EPSILON)(b0)
b0 = BN(name='aspp0_BN', epsilon=EPSILON)(b0)
b0 = Activation(activation, name='aspp0_activation')(b0)
b1 = conv3x3(x, 256, 'aspp1',
rate=atrous_rates[0], epsilon=EPSILON, activation=activation)
# rate = 12 (24)
b2 = conv3x3(x, 256, 'aspp2',
rate=atrous_rates[1], epsilon=EPSILON, activation=activation)
# rate = 18 (36)
b3 = conv3x3(x, 256, 'aspp3',
rate=atrous_rates[2], epsilon=EPSILON, activation=activation)
# concatenate ASPP branches & project
x = Concatenate()([b4, b0, b1, b2, b3])
x = Conv2D(256, (1, 1), padding='same',
kernel_regularizer=DECAY,
use_bias=False, name='concat_projection')(x)
# x = BatchNormalization(name='concat_projection_BN', epsilon=EPSILON)(x)
x = BN(name='concat_projection_BN', epsilon=EPSILON)(x)
x = Activation(activation)(x)
x = Dropout(0.1)(x)
# x to 128x256 size
skip_size = tf.keras.backend.int_shape(skip1)
x = tf.keras.layers.experimental.preprocessing.Resizing(
*skip_size[1:3], interpolation="bilinear"
)(x)
aspp_aux = x
dec_skip1 = Conv2D(48, (1, 1), padding='same',
kernel_regularizer=DECAY,
use_bias=False, name='feature_projection0')(skip1)
# dec_skip1 = BatchNormalization(
# name='feature_projection0_BN', epsilon=EPSILON)(dec_skip1)
dec_skip1 = BN(
name='feature_projection0_BN', epsilon=EPSILON)(dec_skip1)
dec_skip1 = Activation(activation)(dec_skip1)
edge = edge_creater(skip_x=skip1, aspp_feature=x, epsilon=EPSILON, activation=activation)
x = Concatenate()([x, dec_skip1, edge])
x = conv3x3(x, 256, 'decoder_conv1', epsilon=EPSILON, activation=activation)
x = conv3x3(x, 256, 'decoder_conv2', epsilon=EPSILON, activation=activation)
return x, edge, aspp_aux
def edge_creater(skip_x, aspp_feature, epsilon=1e-3, activation='relu'):
skip_x = Conv2D(24, kernel_size=1, strides=1, padding='same', use_bias=False)(skip_x)
skip_x = BN(epsilon=EPSILON)(skip_x)
skip_x = Activation(activation)(skip_x)
aspp_feature = Conv2D(256, kernel_size=1, strides=1, padding='same', use_bias=False)(aspp_feature)
aspp_feature = BN(epsilon=EPSILON)(aspp_feature)
aspp_feature = Activation(activation)(aspp_feature)
aspp_feature = conv3x3(aspp_feature, 256, prefix='aspp_feature_128x', stride=1,
kernel_size=3, rate=1, epsilon=epsilon, activation=activation)
concat_feature = Concatenate()([skip_x, aspp_feature])
concat_feature = Conv2D(256, kernel_size=1, strides=1, padding='same', use_bias=False)(concat_feature)
concat_feature = BN(epsilon=EPSILON)(concat_feature)
concat_feature = Activation(activation)(concat_feature)
edge = Subtract()([concat_feature, aspp_feature])
edge = Conv2D(24, kernel_size=1, strides=1, padding='same', use_bias=False)(edge)
edge = BN(epsilon=EPSILON)(edge)
edge = Activation(activation)(edge)
return edge
def conv3x3(x, filters, prefix, stride=1, kernel_size=3, rate=1, depth_activation=False, epsilon=1e-3, activation='swish', mode='sep'):
if mode != 'std':
if stride == 1:
depth_padding = 'same'
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
x = ZeroPadding2D((pad_beg, pad_end))(x)
depth_padding = 'valid'
if not depth_activation:
x = Activation(activation)(x)
x = DepthwiseConv2D((kernel_size, kernel_size), strides=(stride, stride), dilation_rate=(rate, rate),
kernel_regularizer=DECAY,
padding=depth_padding, use_bias=False, name=prefix + '_depthwise')(x)
# x = BatchNormalization(name=prefix + '_depthwise_BN', epsilon=epsilon)(x)
x = BN(name=prefix + '_depthwise_BN', epsilon=epsilon)(x)
if depth_activation:
x = Activation(activation)(x)
x = Conv2D(filters, (1, 1), padding='same',
kernel_regularizer=DECAY,
use_bias=False, name=prefix + '_pointwise')(x)
# x = BatchNormalization(name=prefix + '_pointwise_BN', epsilon=epsilon)(x)
x = BN(name=prefix + '_pointwise_BN', epsilon=epsilon)(x)
if depth_activation:
x = Activation(activation)(x)
else:
x = Conv2D(filters=filters, kernel_size=(kernel_size, kernel_size), strides=(stride, stride),
padding='same', kernel_regularizer=DECAY,
use_bias=False, dilation_rate=(rate, rate), name=prefix + '_stdConv')(x)
# x = BatchNormalization(name=prefix + '_stdConv_BN', epsilon=epsilon)(x)
x = BN(name=prefix + '_stdConv_BN', epsilon=epsilon)(x)
x = Activation(activation)(x)
return x
def decoding_block(input_feature, channel_ratio=8, name=None):
input_shape = tf.keras.backend.int_shape(input_feature)
x = channel_attention(input_feature=input_feature, ratio=channel_ratio)
x = spatial_attention(x)
temp = x
# output = Conv2D(input_shape[3], (1, 1), padding='same',
# kernel_regularizer=DECAY,
# use_bias=False)(x)
# output = BatchNormalization(epsilon=EPSILON)(output)
# output = Activation('swish')(output)
#
# output = Concatenate()([output, input_feature])
#
# output = SepConv_BN(output, input_shape[3], name,
# depth_activation=True, epsilon=EPSILON)
return x, temp
def channel_attention(input_feature, ratio=8):
channel_axis = 1 if K.image_data_format() == "channels_first" else -1
# channel = input_feature._keras_shape[channel_axis]
input_shape = tf.keras.backend.int_shape(input_feature)
channel = input_shape[3]
shared_layer_one = Dense(channel // ratio,
activation='swish',
kernel_initializer='he_normal',
use_bias=True,
bias_initializer='zeros')
shared_layer_two = Dense(channel,
kernel_initializer='he_normal',
use_bias=True,
bias_initializer='zeros')
avg_pool = GlobalAveragePooling2D()(input_feature)
avg_pool = Reshape((1, 1, channel))(avg_pool)
# assert avg_pool._keras_shape[1:] == (1, 1, channel)
avg_pool = shared_layer_one(avg_pool)
# assert avg_pool._keras_shape[1:] == (1, 1, channel // ratio)
avg_pool = shared_layer_two(avg_pool)
# assert avg_pool._keras_shape[1:] == (1, 1, channel)
max_pool = GlobalMaxPooling2D()(input_feature)
max_pool = Reshape((1, 1, channel))(max_pool)
# assert max_pool._keras_shape[1:] == (1, 1, channel)
max_pool = shared_layer_one(max_pool)
# assert max_pool._keras_shape[1:] == (1, 1, channel // ratio)
max_pool = shared_layer_two(max_pool)
# assert max_pool._keras_shape[1:] == (1, 1, channel)
cbam_feature = Add()([avg_pool, max_pool])
cbam_feature = Activation('sigmoid')(cbam_feature)
return multiply([input_feature, cbam_feature])
def spatial_attention(input_feature, kernel_size=7):
cbam_feature = input_feature
avg_pool = Lambda(lambda x: K.mean(x, axis=3, keepdims=True))(cbam_feature)
# assert avg_pool._keras_shape[-1] == 1
max_pool = Lambda(lambda x: K.max(x, axis=3, keepdims=True))(cbam_feature)
# assert max_pool._keras_shape[-1] == 1
concat = Concatenate(axis=3)([avg_pool, max_pool])
# assert concat._keras_shape[-1] == 2
cbam_feature = Conv2D(filters=1,
kernel_size=kernel_size,
strides=1,
padding='same',
activation='sigmoid',
kernel_initializer='he_normal',
use_bias=False)(concat)
# assert cbam_feature._keras_shape[-1] == 1
return multiply([input_feature, cbam_feature])
def SepConv_BN(x, filters, prefix, stride=1, kernel_size=3, rate=1, depth_activation=False, epsilon=1e-3):
activation = 'swish'
""" SepConv with BN between depthwise & pointwise. Optionally add activation after BN
Implements right "same" padding for even kernel sizes
Args:
x: input tensor
filters: num of filters in pointwise convolution
prefix: prefix before name
stride: stride at depthwise conv
kernel_size: kernel size for depthwise convolution
rate: atrous rate for depthwise convolution
depth_activation: flag to use activation between depthwise & poinwise convs
epsilon: epsilon to use in BN layer
"""
if stride == 1:
depth_padding = 'same'
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
x = ZeroPadding2D((pad_beg, pad_end))(x)
depth_padding = 'valid'
if not depth_activation:
x = Activation(activation)(x)
x = DepthwiseConv2D((kernel_size, kernel_size), strides=(stride, stride), dilation_rate=(rate, rate),
kernel_regularizer=DECAY,
padding=depth_padding, use_bias=False, name=prefix + '_depthwise')(x)
# x = BatchNormalization(name=prefix + '_depthwise_BN', epsilon=epsilon)(x)
x = BN(name=prefix + '_depthwise_BN', epsilon=epsilon)(x)
if depth_activation:
x = Activation(activation)(x)
x = Conv2D(filters, (1, 1), padding='same',
kernel_regularizer=DECAY,
use_bias=False, name=prefix + '_pointwise')(x)
# x = BatchNormalization(name=prefix + '_pointwise_BN', epsilon=epsilon)(x)
x = BN(name=prefix + '_pointwise_BN', epsilon=epsilon)(x)
if depth_activation:
x = Activation(activation)(x)
return x
|
[
"tensorflow.keras.layers.multiply",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.backend.max",
"tensorflow.keras.backend.int_shape",
"tensorflow.keras.regularizers.L2",
"tensorflow.keras.layers.DepthwiseConv2D",
"tensorflow.keras.layers.experimental.preprocessing.Resizing",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Concatenate",
"tensorflow.keras.layers.ZeroPadding2D",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.GlobalAveragePooling2D",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.backend.image_data_format",
"tensorflow.keras.layers.GlobalMaxPooling2D",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.Subtract",
"tensorflow.keras.backend.mean",
"tensorflow.keras.initializers.VarianceScaling",
"tensorflow.shape",
"tensorflow.keras.layers.Add"
] |
[((1377, 1416), 'tensorflow.keras.regularizers.L2', 'tf.keras.regularizers.L2', ([], {'l2': '(0.0001 / 2)'}), '(l2=0.0001 / 2)\n', (1401, 1416), True, 'import tensorflow as tf\n'), ((1513, 1614), 'tensorflow.keras.initializers.VarianceScaling', 'tf.keras.initializers.VarianceScaling', ([], {'scale': '(1.0)', 'mode': '"""fan_out"""', 'distribution': '"""truncated_normal"""'}), "(scale=1.0, mode='fan_out',\n distribution='truncated_normal')\n", (1550, 1614), True, 'import tensorflow as tf\n'), ((1816, 1827), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (1824, 1827), True, 'import tensorflow as tf\n'), ((1880, 1910), 'tensorflow.keras.backend.int_shape', 'tf.keras.backend.int_shape', (['b4'], {}), '(b4)\n', (1906, 1910), True, 'import tensorflow as tf\n'), ((2350, 2379), 'tensorflow.keras.backend.int_shape', 'tf.keras.backend.int_shape', (['x'], {}), '(x)\n', (2376, 2379), True, 'import tensorflow as tf\n'), ((3740, 3773), 'tensorflow.keras.backend.int_shape', 'tf.keras.backend.int_shape', (['skip1'], {}), '(skip1)\n', (3766, 3773), True, 'import tensorflow as tf\n'), ((4831, 4842), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (4839, 4842), True, 'import tensorflow as tf\n'), ((4895, 4925), 'tensorflow.keras.backend.int_shape', 'tf.keras.backend.int_shape', (['b4'], {}), '(b4)\n', (4921, 4925), True, 'import tensorflow as tf\n'), ((5428, 5457), 'tensorflow.keras.backend.int_shape', 'tf.keras.backend.int_shape', (['x'], {}), '(x)\n', (5454, 5457), True, 'import tensorflow as tf\n'), ((6853, 6886), 'tensorflow.keras.backend.int_shape', 'tf.keras.backend.int_shape', (['skip1'], {}), '(skip1)\n', (6879, 6886), True, 'import tensorflow as tf\n'), ((7842, 7853), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (7850, 7853), True, 'import tensorflow as tf\n'), ((7906, 7936), 'tensorflow.keras.backend.int_shape', 'tf.keras.backend.int_shape', (['b4'], {}), '(b4)\n', (7932, 7936), True, 'import tensorflow as tf\n'), ((8439, 8468), 'tensorflow.keras.backend.int_shape', 'tf.keras.backend.int_shape', (['x'], {}), '(x)\n', (8465, 8468), True, 'import tensorflow as tf\n'), ((9864, 9897), 'tensorflow.keras.backend.int_shape', 'tf.keras.backend.int_shape', (['skip1'], {}), '(skip1)\n', (9890, 9897), True, 'import tensorflow as tf\n'), ((13953, 13994), 'tensorflow.keras.backend.int_shape', 'tf.keras.backend.int_shape', (['input_feature'], {}), '(input_feature)\n', (13979, 13994), True, 'import tensorflow as tf\n'), ((14784, 14825), 'tensorflow.keras.backend.int_shape', 'tf.keras.backend.int_shape', (['input_feature'], {}), '(input_feature)\n', (14810, 14825), True, 'import tensorflow as tf\n'), ((14879, 14999), 'tensorflow.keras.layers.Dense', 'Dense', (['(channel // ratio)'], {'activation': '"""swish"""', 'kernel_initializer': '"""he_normal"""', 'use_bias': '(True)', 'bias_initializer': '"""zeros"""'}), "(channel // ratio, activation='swish', kernel_initializer='he_normal',\n use_bias=True, bias_initializer='zeros')\n", (14884, 14999), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((15135, 15226), 'tensorflow.keras.layers.Dense', 'Dense', (['channel'], {'kernel_initializer': '"""he_normal"""', 'use_bias': '(True)', 'bias_initializer': '"""zeros"""'}), "(channel, kernel_initializer='he_normal', use_bias=True,\n bias_initializer='zeros')\n", (15140, 15226), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((16167, 16206), 'tensorflow.keras.layers.multiply', 'multiply', (['[input_feature, cbam_feature]'], {}), '([input_feature, cbam_feature])\n', (16175, 16206), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((17024, 17063), 'tensorflow.keras.layers.multiply', 'multiply', (['[input_feature, cbam_feature]'], {}), '([input_feature, cbam_feature])\n', (17032, 17063), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((1837, 1861), 'tensorflow.keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), '()\n', (1859, 1861), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((1976, 2004), 'tensorflow.keras.layers.Reshape', 'Reshape', (['(1, 1, b4_shape[1])'], {}), '((1, 1, b4_shape[1]))\n', (1983, 2004), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((2018, 2122), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(256)', '(1, 1)'], {'padding': '"""same"""', 'kernel_regularizer': 'DECAY', 'use_bias': '(False)', 'name': '"""image_pooling"""'}), "(256, (1, 1), padding='same', kernel_regularizer=DECAY, use_bias=\n False, name='image_pooling')\n", (2024, 2122), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((2163, 2221), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""image_pooling_BN"""', 'epsilon': '(1e-05)'}), "(name='image_pooling_BN', epsilon=1e-05)\n", (2181, 2221), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((2234, 2256), 'tensorflow.keras.layers.Activation', 'Activation', (['activation'], {}), '(activation)\n', (2244, 2256), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((2389, 2489), 'tensorflow.keras.layers.experimental.preprocessing.Resizing', 'tf.keras.layers.experimental.preprocessing.Resizing', (['*size_before[1:3]'], {'interpolation': '"""bilinear"""'}), "(*size_before[1:3],\n interpolation='bilinear')\n", (2440, 2489), True, 'import tensorflow as tf\n'), ((2608, 2704), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(256)', '(1, 1)'], {'padding': '"""same"""', 'kernel_regularizer': 'DECAY', 'use_bias': '(False)', 'name': '"""aspp0"""'}), "(256, (1, 1), padding='same', kernel_regularizer=DECAY, use_bias=\n False, name='aspp0')\n", (2614, 2704), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((2856, 2903), 'tensorflow.keras.layers.Activation', 'Activation', (['activation'], {'name': '"""aspp0_activation"""'}), "(activation, name='aspp0_activation')\n", (2866, 2903), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((3350, 3363), 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {}), '()\n', (3361, 3363), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((3395, 3503), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(256)', '(1, 1)'], {'padding': '"""same"""', 'kernel_regularizer': 'DECAY', 'use_bias': '(False)', 'name': '"""concat_projection"""'}), "(256, (1, 1), padding='same', kernel_regularizer=DECAY, use_bias=\n False, name='concat_projection')\n", (3401, 3503), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((3672, 3694), 'tensorflow.keras.layers.Activation', 'Activation', (['activation'], {}), '(activation)\n', (3682, 3694), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((3707, 3719), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.1)'], {}), '(0.1)\n', (3714, 3719), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((3782, 3880), 'tensorflow.keras.layers.experimental.preprocessing.Resizing', 'tf.keras.layers.experimental.preprocessing.Resizing', (['*skip_size[1:3]'], {'interpolation': '"""bilinear"""'}), "(*skip_size[1:3],\n interpolation='bilinear')\n", (3833, 3880), True, 'import tensorflow as tf\n'), ((3994, 4102), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(48)', '(1, 1)'], {'padding': '"""same"""', 'kernel_regularizer': 'DECAY', 'use_bias': '(False)', 'name': '"""feature_projection0"""'}), "(48, (1, 1), padding='same', kernel_regularizer=DECAY, use_bias=False,\n name='feature_projection0')\n", (4000, 4102), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((4356, 4378), 'tensorflow.keras.layers.Activation', 'Activation', (['activation'], {}), '(activation)\n', (4366, 4378), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((4398, 4411), 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {}), '()\n', (4409, 4411), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((4852, 4876), 'tensorflow.keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), '()\n', (4874, 4876), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((4991, 5019), 'tensorflow.keras.layers.Reshape', 'Reshape', (['(1, 1, b4_shape[1])'], {}), '((1, 1, b4_shape[1]))\n', (4998, 5019), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((5033, 5137), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(256)', '(1, 1)'], {'padding': '"""same"""', 'kernel_regularizer': 'DECAY', 'use_bias': '(False)', 'name': '"""image_pooling"""'}), "(256, (1, 1), padding='same', kernel_regularizer=DECAY, use_bias=\n False, name='image_pooling')\n", (5039, 5137), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((5312, 5334), 'tensorflow.keras.layers.Activation', 'Activation', (['activation'], {}), '(activation)\n', (5322, 5334), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((5467, 5567), 'tensorflow.keras.layers.experimental.preprocessing.Resizing', 'tf.keras.layers.experimental.preprocessing.Resizing', (['*size_before[1:3]'], {'interpolation': '"""bilinear"""'}), "(*size_before[1:3],\n interpolation='bilinear')\n", (5518, 5567), True, 'import tensorflow as tf\n'), ((5686, 5782), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(256)', '(1, 1)'], {'padding': '"""same"""', 'kernel_regularizer': 'DECAY', 'use_bias': '(False)', 'name': '"""aspp0"""'}), "(256, (1, 1), padding='same', kernel_regularizer=DECAY, use_bias=\n False, name='aspp0')\n", (5692, 5782), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((5940, 5987), 'tensorflow.keras.layers.Activation', 'Activation', (['activation'], {'name': '"""aspp0_activation"""'}), "(activation, name='aspp0_activation')\n", (5950, 5987), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((6433, 6446), 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {}), '()\n', (6444, 6446), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((6478, 6586), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(256)', '(1, 1)'], {'padding': '"""same"""', 'kernel_regularizer': 'DECAY', 'use_bias': '(False)', 'name': '"""concat_projection"""'}), "(256, (1, 1), padding='same', kernel_regularizer=DECAY, use_bias=\n False, name='concat_projection')\n", (6484, 6586), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((6761, 6783), 'tensorflow.keras.layers.Activation', 'Activation', (['activation'], {}), '(activation)\n', (6771, 6783), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((6796, 6808), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.1)'], {}), '(0.1)\n', (6803, 6808), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((6895, 6993), 'tensorflow.keras.layers.experimental.preprocessing.Resizing', 'tf.keras.layers.experimental.preprocessing.Resizing', (['*skip_size[1:3]'], {'interpolation': '"""bilinear"""'}), "(*skip_size[1:3],\n interpolation='bilinear')\n", (6946, 6993), True, 'import tensorflow as tf\n'), ((7042, 7150), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(48)', '(1, 1)'], {'padding': '"""same"""', 'kernel_regularizer': 'DECAY', 'use_bias': '(False)', 'name': '"""feature_projection0"""'}), "(48, (1, 1), padding='same', kernel_regularizer=DECAY, use_bias=False,\n name='feature_projection0')\n", (7048, 7150), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((7410, 7432), 'tensorflow.keras.layers.Activation', 'Activation', (['activation'], {}), '(activation)\n', (7420, 7432), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((7452, 7465), 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {}), '()\n', (7463, 7465), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((7863, 7887), 'tensorflow.keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), '()\n', (7885, 7887), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((8002, 8030), 'tensorflow.keras.layers.Reshape', 'Reshape', (['(1, 1, b4_shape[1])'], {}), '((1, 1, b4_shape[1]))\n', (8009, 8030), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((8044, 8148), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(256)', '(1, 1)'], {'padding': '"""same"""', 'kernel_regularizer': 'DECAY', 'use_bias': '(False)', 'name': '"""image_pooling"""'}), "(256, (1, 1), padding='same', kernel_regularizer=DECAY, use_bias=\n False, name='image_pooling')\n", (8050, 8148), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((8323, 8345), 'tensorflow.keras.layers.Activation', 'Activation', (['activation'], {}), '(activation)\n', (8333, 8345), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((8478, 8578), 'tensorflow.keras.layers.experimental.preprocessing.Resizing', 'tf.keras.layers.experimental.preprocessing.Resizing', (['*size_before[1:3]'], {'interpolation': '"""bilinear"""'}), "(*size_before[1:3],\n interpolation='bilinear')\n", (8529, 8578), True, 'import tensorflow as tf\n'), ((8697, 8793), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(256)', '(1, 1)'], {'padding': '"""same"""', 'kernel_regularizer': 'DECAY', 'use_bias': '(False)', 'name': '"""aspp0"""'}), "(256, (1, 1), padding='same', kernel_regularizer=DECAY, use_bias=\n False, name='aspp0')\n", (8703, 8793), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((8951, 8998), 'tensorflow.keras.layers.Activation', 'Activation', (['activation'], {'name': '"""aspp0_activation"""'}), "(activation, name='aspp0_activation')\n", (8961, 8998), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((9444, 9457), 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {}), '()\n', (9455, 9457), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((9489, 9597), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(256)', '(1, 1)'], {'padding': '"""same"""', 'kernel_regularizer': 'DECAY', 'use_bias': '(False)', 'name': '"""concat_projection"""'}), "(256, (1, 1), padding='same', kernel_regularizer=DECAY, use_bias=\n False, name='concat_projection')\n", (9495, 9597), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((9772, 9794), 'tensorflow.keras.layers.Activation', 'Activation', (['activation'], {}), '(activation)\n', (9782, 9794), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((9807, 9819), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.1)'], {}), '(0.1)\n', (9814, 9819), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((9906, 10004), 'tensorflow.keras.layers.experimental.preprocessing.Resizing', 'tf.keras.layers.experimental.preprocessing.Resizing', (['*skip_size[1:3]'], {'interpolation': '"""bilinear"""'}), "(*skip_size[1:3],\n interpolation='bilinear')\n", (9957, 10004), True, 'import tensorflow as tf\n'), ((10052, 10160), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(48)', '(1, 1)'], {'padding': '"""same"""', 'kernel_regularizer': 'DECAY', 'use_bias': '(False)', 'name': '"""feature_projection0"""'}), "(48, (1, 1), padding='same', kernel_regularizer=DECAY, use_bias=False,\n name='feature_projection0')\n", (10058, 10160), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((10420, 10442), 'tensorflow.keras.layers.Activation', 'Activation', (['activation'], {}), '(activation)\n', (10430, 10442), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((10558, 10571), 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {}), '()\n', (10569, 10571), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((10874, 10942), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(24)'], {'kernel_size': '(1)', 'strides': '(1)', 'padding': '"""same"""', 'use_bias': '(False)'}), "(24, kernel_size=1, strides=1, padding='same', use_bias=False)\n", (10880, 10942), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((11005, 11027), 'tensorflow.keras.layers.Activation', 'Activation', (['activation'], {}), '(activation)\n', (11015, 11027), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((11056, 11125), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(256)'], {'kernel_size': '(1)', 'strides': '(1)', 'padding': '"""same"""', 'use_bias': '(False)'}), "(256, kernel_size=1, strides=1, padding='same', use_bias=False)\n", (11062, 11125), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((11212, 11234), 'tensorflow.keras.layers.Activation', 'Activation', (['activation'], {}), '(activation)\n', (11222, 11234), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((11432, 11445), 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {}), '()\n', (11443, 11445), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((11492, 11561), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(256)'], {'kernel_size': '(1)', 'strides': '(1)', 'padding': '"""same"""', 'use_bias': '(False)'}), "(256, kernel_size=1, strides=1, padding='same', use_bias=False)\n", (11498, 11561), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((11656, 11678), 'tensorflow.keras.layers.Activation', 'Activation', (['activation'], {}), '(activation)\n', (11666, 11678), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((11707, 11717), 'tensorflow.keras.layers.Subtract', 'Subtract', ([], {}), '()\n', (11715, 11717), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((11762, 11830), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(24)'], {'kernel_size': '(1)', 'strides': '(1)', 'padding': '"""same"""', 'use_bias': '(False)'}), "(24, kernel_size=1, strides=1, padding='same', use_bias=False)\n", (11768, 11830), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((11885, 11907), 'tensorflow.keras.layers.Activation', 'Activation', (['activation'], {}), '(activation)\n', (11895, 11907), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((15326, 15350), 'tensorflow.keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), '()\n', (15348, 15350), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((15381, 15405), 'tensorflow.keras.layers.Reshape', 'Reshape', (['(1, 1, channel)'], {}), '((1, 1, channel))\n', (15388, 15405), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((15699, 15719), 'tensorflow.keras.layers.GlobalMaxPooling2D', 'GlobalMaxPooling2D', ([], {}), '()\n', (15717, 15719), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((15750, 15774), 'tensorflow.keras.layers.Reshape', 'Reshape', (['(1, 1, channel)'], {}), '((1, 1, channel))\n', (15757, 15774), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((16072, 16077), 'tensorflow.keras.layers.Add', 'Add', ([], {}), '()\n', (16075, 16077), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((16119, 16140), 'tensorflow.keras.layers.Activation', 'Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (16129, 16140), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((16556, 16575), 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(3)'}), '(axis=3)\n', (16567, 16575), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((16659, 16802), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(1)', 'kernel_size': 'kernel_size', 'strides': '(1)', 'padding': '"""same"""', 'activation': '"""sigmoid"""', 'kernel_initializer': '"""he_normal"""', 'use_bias': '(False)'}), "(filters=1, kernel_size=kernel_size, strides=1, padding='same',\n activation='sigmoid', kernel_initializer='he_normal', use_bias=False)\n", (16665, 16802), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((18213, 18412), 'tensorflow.keras.layers.DepthwiseConv2D', 'DepthwiseConv2D', (['(kernel_size, kernel_size)'], {'strides': '(stride, stride)', 'dilation_rate': '(rate, rate)', 'kernel_regularizer': 'DECAY', 'padding': 'depth_padding', 'use_bias': '(False)', 'name': "(prefix + '_depthwise')"}), "((kernel_size, kernel_size), strides=(stride, stride),\n dilation_rate=(rate, rate), kernel_regularizer=DECAY, padding=\n depth_padding, use_bias=False, name=prefix + '_depthwise')\n", (18228, 18412), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((18668, 18782), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['filters', '(1, 1)'], {'padding': '"""same"""', 'kernel_regularizer': 'DECAY', 'use_bias': '(False)', 'name': "(prefix + '_pointwise')"}), "(filters, (1, 1), padding='same', kernel_regularizer=DECAY, use_bias=\n False, name=prefix + '_pointwise')\n", (18674, 18782), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((12554, 12753), 'tensorflow.keras.layers.DepthwiseConv2D', 'DepthwiseConv2D', (['(kernel_size, kernel_size)'], {'strides': '(stride, stride)', 'dilation_rate': '(rate, rate)', 'kernel_regularizer': 'DECAY', 'padding': 'depth_padding', 'use_bias': '(False)', 'name': "(prefix + '_depthwise')"}), "((kernel_size, kernel_size), strides=(stride, stride),\n dilation_rate=(rate, rate), kernel_regularizer=DECAY, padding=\n depth_padding, use_bias=False, name=prefix + '_depthwise')\n", (12569, 12753), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((13037, 13151), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['filters', '(1, 1)'], {'padding': '"""same"""', 'kernel_regularizer': 'DECAY', 'use_bias': '(False)', 'name': "(prefix + '_pointwise')"}), "(filters, (1, 1), padding='same', kernel_regularizer=DECAY, use_bias=\n False, name=prefix + '_pointwise')\n", (13043, 13151), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((13432, 13643), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': 'filters', 'kernel_size': '(kernel_size, kernel_size)', 'strides': '(stride, stride)', 'padding': '"""same"""', 'kernel_regularizer': 'DECAY', 'use_bias': '(False)', 'dilation_rate': '(rate, rate)', 'name': "(prefix + '_stdConv')"}), "(filters=filters, kernel_size=(kernel_size, kernel_size), strides=(\n stride, stride), padding='same', kernel_regularizer=DECAY, use_bias=\n False, dilation_rate=(rate, rate), name=prefix + '_stdConv')\n", (13438, 13643), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((13830, 13852), 'tensorflow.keras.layers.Activation', 'Activation', (['activation'], {}), '(activation)\n', (13840, 13852), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((14658, 14679), 'tensorflow.keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (14677, 14679), True, 'import tensorflow.keras.backend as K\n'), ((18068, 18101), 'tensorflow.keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(pad_beg, pad_end)'], {}), '((pad_beg, pad_end))\n', (18081, 18101), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((18179, 18201), 'tensorflow.keras.layers.Activation', 'Activation', (['activation'], {}), '(activation)\n', (18189, 18201), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((18634, 18656), 'tensorflow.keras.layers.Activation', 'Activation', (['activation'], {}), '(activation)\n', (18644, 18656), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((18990, 19012), 'tensorflow.keras.layers.Activation', 'Activation', (['activation'], {}), '(activation)\n', (19000, 19012), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((12393, 12426), 'tensorflow.keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(pad_beg, pad_end)'], {}), '((pad_beg, pad_end))\n', (12406, 12426), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((12516, 12538), 'tensorflow.keras.layers.Activation', 'Activation', (['activation'], {}), '(activation)\n', (12526, 12538), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((12999, 13021), 'tensorflow.keras.layers.Activation', 'Activation', (['activation'], {}), '(activation)\n', (13009, 13021), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((13383, 13405), 'tensorflow.keras.layers.Activation', 'Activation', (['activation'], {}), '(activation)\n', (13393, 13405), False, 'from tensorflow.keras.layers import MaxPooling2D, SeparableConv2D, UpSampling2D, Activation, BatchNormalization, GlobalAveragePooling2D, Conv2D, Dropout, Concatenate, multiply, Add, concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D, Dense, GlobalMaxPooling2D, Permute, Lambda, Subtract\n'), ((16328, 16360), 'tensorflow.keras.backend.mean', 'K.mean', (['x'], {'axis': '(3)', 'keepdims': '(True)'}), '(x, axis=3, keepdims=True)\n', (16334, 16360), True, 'import tensorflow.keras.backend as K\n'), ((16452, 16483), 'tensorflow.keras.backend.max', 'K.max', (['x'], {'axis': '(3)', 'keepdims': '(True)'}), '(x, axis=3, keepdims=True)\n', (16457, 16483), True, 'import tensorflow.keras.backend as K\n')]
|
import flask
import zeeguu
from flask import request
from zeeguu.content_recommender.mixed_recommender import user_article_info
from zeeguu.model import Article, UserArticle
from .utils.route_wrappers import cross_domain, with_session
from .utils.json_result import json_result
from . import api, db_session
# ---------------------------------------------------------------------------
@api.route("/user_article", methods=("GET",))
# ---------------------------------------------------------------------------
@cross_domain
@with_session
def user_article():
"""
called user_article because it returns info about the article
but also the user-specific data relative to the article
takes url as URL argument
NOTE: the url should be encoded with quote_plus (Pyton) and encodeURIComponent(Javascript)
this is not perfectly RESTful, but we're not fundamentalist...
and currently we want to have the url as the URI for the article
and for some reason if we put the uri as part of the path,
apache decodes it before we get it in here.
so for now, we're just not putting it as part of the path
:return: json as prepared by content_recommender.mixed_recommender.user_article_info
"""
article_id = int(request.args.get('article_id', ''))
if not article_id:
flask.abort(400)
article = Article.query.filter_by(id=article_id).one()
return json_result(user_article_info(flask.g.user, article, with_content=True))
# ---------------------------------------------------------------------------
@api.route("/user_article", methods=("POST",))
# ---------------------------------------------------------------------------
@cross_domain
@with_session
def user_article_update():
"""
update info about this (user x article) pair
in the form data you can provide
- liked=True|1|False|0
- starred -ibidem-
:return: json as prepared by content_recommender.mixed_recommender.user_article_info
"""
article_id = int(request.form.get('article_id'))
starred = request.form.get('starred')
liked = request.form.get('liked')
article = Article.query.filter_by(id=article_id).one()
user_article = UserArticle.find_or_create(db_session, flask.g.user, article)
if starred is not None:
user_article.set_starred(starred in ["True", "1"])
if liked is not None:
user_article.set_liked(liked in ["True", "1"])
db_session.commit()
return "OK"
# ---------------------------------------------------------------------------
# !!!!!!!!!!!!!!!!!!!!!!!!! DEPRECATED !!!!!!!!!!!!!!!!!!!!!!!!!
@api.route("/get_user_article_info", methods=("POST",))
# !!!!!!!!!!!!!!!!!!!!!!!!! DEPRECATED !!!!!!!!!!!!!!!!!!!!!!!!!
# ---------------------------------------------------------------------------
@cross_domain
@with_session
def get_user_article_info():
"""
expects one parameter: url
:return: json dictionary with info
"""
url = str(request.form.get('url', ''))
article = Article.find_or_create(db_session, url)
return json_result(user_article_info(flask.g.user, article))
|
[
"zeeguu.model.Article.find_or_create",
"flask.request.args.get",
"flask.request.form.get",
"zeeguu.content_recommender.mixed_recommender.user_article_info",
"flask.abort",
"zeeguu.model.Article.query.filter_by",
"zeeguu.model.UserArticle.find_or_create"
] |
[((2107, 2134), 'flask.request.form.get', 'request.form.get', (['"""starred"""'], {}), "('starred')\n", (2123, 2134), False, 'from flask import request\n'), ((2147, 2172), 'flask.request.form.get', 'request.form.get', (['"""liked"""'], {}), "('liked')\n", (2163, 2172), False, 'from flask import request\n'), ((2253, 2314), 'zeeguu.model.UserArticle.find_or_create', 'UserArticle.find_or_create', (['db_session', 'flask.g.user', 'article'], {}), '(db_session, flask.g.user, article)\n', (2279, 2314), False, 'from zeeguu.model import Article, UserArticle\n'), ((3080, 3119), 'zeeguu.model.Article.find_or_create', 'Article.find_or_create', (['db_session', 'url'], {}), '(db_session, url)\n', (3102, 3119), False, 'from zeeguu.model import Article, UserArticle\n'), ((1290, 1324), 'flask.request.args.get', 'request.args.get', (['"""article_id"""', '""""""'], {}), "('article_id', '')\n", (1306, 1324), False, 'from flask import request\n'), ((1357, 1373), 'flask.abort', 'flask.abort', (['(400)'], {}), '(400)\n', (1368, 1373), False, 'import flask\n'), ((1458, 1517), 'zeeguu.content_recommender.mixed_recommender.user_article_info', 'user_article_info', (['flask.g.user', 'article'], {'with_content': '(True)'}), '(flask.g.user, article, with_content=True)\n', (1475, 1517), False, 'from zeeguu.content_recommender.mixed_recommender import user_article_info\n'), ((2061, 2091), 'flask.request.form.get', 'request.form.get', (['"""article_id"""'], {}), "('article_id')\n", (2077, 2091), False, 'from flask import request\n'), ((3036, 3063), 'flask.request.form.get', 'request.form.get', (['"""url"""', '""""""'], {}), "('url', '')\n", (3052, 3063), False, 'from flask import request\n'), ((3144, 3184), 'zeeguu.content_recommender.mixed_recommender.user_article_info', 'user_article_info', (['flask.g.user', 'article'], {}), '(flask.g.user, article)\n', (3161, 3184), False, 'from zeeguu.content_recommender.mixed_recommender import user_article_info\n'), ((1389, 1427), 'zeeguu.model.Article.query.filter_by', 'Article.query.filter_by', ([], {'id': 'article_id'}), '(id=article_id)\n', (1412, 1427), False, 'from zeeguu.model import Article, UserArticle\n'), ((2188, 2226), 'zeeguu.model.Article.query.filter_by', 'Article.query.filter_by', ([], {'id': 'article_id'}), '(id=article_id)\n', (2211, 2226), False, 'from zeeguu.model import Article, UserArticle\n')]
|
import smtplib
from email.message import EmailMessage
from datetime import datetime
def notify_error(report_name, error_log, to_list: str, login: str, password: str):
"""Auto-notify for automated scripts crashing.
:param report_name: Name of automated report.
:param error_log: Raised exception or other error to report.
:param to_list: Semicolon separated list of email addresses. (ex - <EMAIL>; <EMAIL>; <EMAIL>;)
:param login: Office 365 login email. This is also used for the from field.
:param password: Office 365 password.
"""
mailserver = smtplib.SMTP("smtp.office365.com", 587)
mailserver.ehlo()
mailserver.starttls()
mailserver.login(login, password)
msg = EmailMessage()
msg.add_header("Content-Type", "text/html")
message = f"""
<HTML>
<BODY>
{report_name} failed on execution at {datetime.now().strftime("%m/%d/%Y %H:%M:%S")}
<br>
Error Log:
<br>
{error_log}
<br>
</BODY>
</HTML>"""
msg.set_payload(message)
msg["Subject"] = f"Automated Report Error Notification - {report_name}"
msg["From"] = login
msg["To"] = to_list
mailserver.send_message(msg)
mailserver.quit()
|
[
"email.message.EmailMessage",
"datetime.datetime.now",
"smtplib.SMTP"
] |
[((582, 621), 'smtplib.SMTP', 'smtplib.SMTP', (['"""smtp.office365.com"""', '(587)'], {}), "('smtp.office365.com', 587)\n", (594, 621), False, 'import smtplib\n'), ((718, 732), 'email.message.EmailMessage', 'EmailMessage', ([], {}), '()\n', (730, 732), False, 'from email.message import EmailMessage\n'), ((975, 989), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (987, 989), False, 'from datetime import datetime\n')]
|
import argparse
import datetime
import os,sys
import time
os.chdir('/home/qiuziming/product/torchdistill')
root=os.getcwd()
sys.path.append(root)
import torch
from torch import distributed as dist
from torch.backends import cudnn
from torch.nn import DataParallel
from torch.nn.parallel import DistributedDataParallel
from torchdistill.common import file_util, yaml_util, module_util
from torchdistill.common.constant import def_logger
from torchdistill.common.main_util import is_main_process, init_distributed_mode, load_ckpt, save_ckpt, set_seed
from torchdistill.core.distillation import get_distillation_box
from torchdistill.core.training import get_training_box
from torchdistill.datasets import util
from torchdistill.eval.classification import compute_accuracy
from torchdistill.misc.log import setup_log_file, SmoothedValue, MetricLogger
from torchdistill.models.official import get_image_classification_model
from torchdistill.models.registry import get_model
inps,outs=[],[]
logger = def_logger.getChild(__name__)
def layer_hook(module,inp,out):
outs.append(out)
def get_argparser():
parser = argparse.ArgumentParser(description='Knowledge distillation for image classification models')
parser.add_argument('--config',default='configs/sample/cifar10/kd/resnet18_from_resnet50_visualize.yaml',help='yaml file path')
# densenet100_from_densenet250-final_run.yaml resnet18_from_resnet50-final_run.yaml
parser.add_argument('--device', default='cuda', help='device')
parser.add_argument('--log', default='log/cifar10/kd/fkd/resnet18_from_resnet50_visualize.txt',help='log file path')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch')
parser.add_argument('--seed', type=int, help='seed in random number generator')
parser.add_argument('-test_only', action='store_true', help='only test the models')
parser.add_argument('-student_only', action='store_true', help='test the student model only')
parser.add_argument('-log_config', action='store_true', help='log config')
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('-adjust_lr', action='store_true',
help='multiply learning rate by number of distributed processes (world_size)')
return parser
def load_model(model_config, device, distributed):
model = get_image_classification_model(model_config, distributed)
if model is None:
repo_or_dir = model_config.get('repo_or_dir', None)
model = get_model(model_config['name'], repo_or_dir, **model_config['params'])
ckpt_file_path = model_config['ckpt']
load_ckpt(ckpt_file_path, model=model, strict=True)
return model.to(device)
def train_one_epoch(training_box, device, epoch, log_freq):
metric_logger = MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', SmoothedValue(window_size=1, fmt='{value}'))
metric_logger.add_meter('img/s', SmoothedValue(window_size=10, fmt='{value}'))
header = 'Epoch: [{}]'.format(epoch)
for sample_batch, targets, supp_dict in \
metric_logger.log_every(training_box.train_data_loader, log_freq, header):
start_time = time.time()
sample_batch, targets = sample_batch.to(device), targets.to(device)
loss = training_box(sample_batch, targets, supp_dict)
training_box.update_params(loss)
batch_size = sample_batch.shape[0]
metric_logger.update(loss=loss.item(), lr=training_box.optimizer.param_groups[0]['lr'])
metric_logger.meters['img/s'].update(batch_size / (time.time() - start_time))
if (torch.isnan(loss) or torch.isinf(loss)) and is_main_process():
raise ValueError('The training loop was broken due to loss = {}'.format(loss))
@torch.inference_mode()
def evaluate(model, data_loader, device, device_ids, distributed, log_freq=1000, title=None, header='Test:'):
model.to(device)
if distributed:
model = DistributedDataParallel(model, device_ids=device_ids)
elif device.type.startswith('cuda'):
model = DataParallel(model, device_ids=device_ids)
if title is not None:
logger.info(title)
model.eval()
metric_logger = MetricLogger(delimiter=' ')
for image, target in metric_logger.log_every(data_loader, log_freq, header):
image = image.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
output = model(image)
from SST.utils.Matrix import confusion_matrix_pyplot,Kernel_VIS
from SST.utils.Pmatrix import Matrix_VIS
# confusion_matrix_pyplot(target,output,num_classes=10)
global outs
outs.append(output)
outs=Kernel_VIS()(outs)
for out in outs:
Matrix_VIS(out)
exit(-1)
acc1, acc5 = compute_accuracy(output, target, topk=(1, 5))
# FIXME need to take into account that the datasets
# could have been padded in distributed setup
batch_size = image.shape[0]
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
top1_accuracy = metric_logger.acc1.global_avg
top5_accuracy = metric_logger.acc5.global_avg
logger.info(' * Acc@1 {:.4f}\tAcc@5 {:.4f}\n'.format(top1_accuracy, top5_accuracy))
return metric_logger.acc1.global_avg
def main(args):
log_file_path = args.log
if is_main_process() and log_file_path is not None:
setup_log_file(os.path.expanduser(log_file_path))
distributed, device_ids = init_distributed_mode(args.world_size, args.dist_url)
logger.info(args)
cudnn.benchmark = True
set_seed(args.seed)
config = yaml_util.load_yaml_file(os.path.expanduser(args.config))
device = torch.device(args.device)
dataset_dict = util.get_all_datasets(config['datasets'])
models_config = config['models']
teacher_model_config = models_config.get('teacher_model', None)
teacher_model =\
load_model(teacher_model_config, device, distributed) if teacher_model_config is not None else None
teacher_model.layer1.register_forward_hook(layer_hook)
teacher_model.layer2.register_forward_hook(layer_hook)
teacher_model.layer3.register_forward_hook(layer_hook)
student_model_config =\
models_config['student_model'] if 'student_model' in models_config else models_config['model']
if args.log_config:
logger.info(config)
test_config = config['test']
test_data_loader_config = test_config['test_data_loader']
test_data_loader = util.build_data_loader(dataset_dict[test_data_loader_config['dataset_id']],
test_data_loader_config, distributed)
log_freq = test_config.get('log_freq', 1000)
evaluate(teacher_model, test_data_loader, device, device_ids, distributed, log_freq=log_freq,
title='[Student: {}]'.format(student_model_config['name']))
if __name__ == '__main__':
argparser = get_argparser()
main(argparser.parse_args())
|
[
"argparse.ArgumentParser",
"SST.utils.Matrix.Kernel_VIS",
"torchdistill.datasets.util.get_all_datasets",
"torch.device",
"SST.utils.Pmatrix.Matrix_VIS",
"os.chdir",
"torch.isnan",
"torchdistill.common.main_util.is_main_process",
"sys.path.append",
"torchdistill.datasets.util.build_data_loader",
"torchdistill.misc.log.SmoothedValue",
"torch.isinf",
"torch.nn.parallel.DistributedDataParallel",
"torchdistill.common.main_util.set_seed",
"torchdistill.common.main_util.init_distributed_mode",
"torchdistill.models.registry.get_model",
"torchdistill.common.constant.def_logger.getChild",
"torchdistill.models.official.get_image_classification_model",
"torchdistill.eval.classification.compute_accuracy",
"torch.inference_mode",
"os.getcwd",
"torchdistill.common.main_util.load_ckpt",
"time.time",
"torchdistill.misc.log.MetricLogger",
"torch.nn.DataParallel",
"os.path.expanduser"
] |
[((58, 106), 'os.chdir', 'os.chdir', (['"""/home/qiuziming/product/torchdistill"""'], {}), "('/home/qiuziming/product/torchdistill')\n", (66, 106), False, 'import os, sys\n'), ((112, 123), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (121, 123), False, 'import os, sys\n'), ((124, 145), 'sys.path.append', 'sys.path.append', (['root'], {}), '(root)\n', (139, 145), False, 'import os, sys\n'), ((996, 1025), 'torchdistill.common.constant.def_logger.getChild', 'def_logger.getChild', (['__name__'], {}), '(__name__)\n', (1015, 1025), False, 'from torchdistill.common.constant import def_logger\n'), ((3955, 3977), 'torch.inference_mode', 'torch.inference_mode', ([], {}), '()\n', (3975, 3977), False, 'import torch\n'), ((1115, 1213), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Knowledge distillation for image classification models"""'}), "(description=\n 'Knowledge distillation for image classification models')\n", (1138, 1213), False, 'import argparse\n'), ((2549, 2606), 'torchdistill.models.official.get_image_classification_model', 'get_image_classification_model', (['model_config', 'distributed'], {}), '(model_config, distributed)\n', (2579, 2606), False, 'from torchdistill.models.official import get_image_classification_model\n'), ((2822, 2873), 'torchdistill.common.main_util.load_ckpt', 'load_ckpt', (['ckpt_file_path'], {'model': 'model', 'strict': '(True)'}), '(ckpt_file_path, model=model, strict=True)\n', (2831, 2873), False, 'from torchdistill.common.main_util import is_main_process, init_distributed_mode, load_ckpt, save_ckpt, set_seed\n'), ((2984, 3012), 'torchdistill.misc.log.MetricLogger', 'MetricLogger', ([], {'delimiter': '""" """'}), "(delimiter=' ')\n", (2996, 3012), False, 'from torchdistill.misc.log import setup_log_file, SmoothedValue, MetricLogger\n'), ((4391, 4419), 'torchdistill.misc.log.MetricLogger', 'MetricLogger', ([], {'delimiter': '""" """'}), "(delimiter=' ')\n", (4403, 4419), False, 'from torchdistill.misc.log import setup_log_file, SmoothedValue, MetricLogger\n'), ((5845, 5898), 'torchdistill.common.main_util.init_distributed_mode', 'init_distributed_mode', (['args.world_size', 'args.dist_url'], {}), '(args.world_size, args.dist_url)\n', (5866, 5898), False, 'from torchdistill.common.main_util import is_main_process, init_distributed_mode, load_ckpt, save_ckpt, set_seed\n'), ((5952, 5971), 'torchdistill.common.main_util.set_seed', 'set_seed', (['args.seed'], {}), '(args.seed)\n', (5960, 5971), False, 'from torchdistill.common.main_util import is_main_process, init_distributed_mode, load_ckpt, save_ckpt, set_seed\n'), ((6056, 6081), 'torch.device', 'torch.device', (['args.device'], {}), '(args.device)\n', (6068, 6081), False, 'import torch\n'), ((6101, 6142), 'torchdistill.datasets.util.get_all_datasets', 'util.get_all_datasets', (["config['datasets']"], {}), "(config['datasets'])\n", (6122, 6142), False, 'from torchdistill.datasets import util\n'), ((6855, 6972), 'torchdistill.datasets.util.build_data_loader', 'util.build_data_loader', (["dataset_dict[test_data_loader_config['dataset_id']]", 'test_data_loader_config', 'distributed'], {}), "(dataset_dict[test_data_loader_config['dataset_id']],\n test_data_loader_config, distributed)\n", (6877, 6972), False, 'from torchdistill.datasets import util\n'), ((2705, 2775), 'torchdistill.models.registry.get_model', 'get_model', (["model_config['name']", 'repo_or_dir'], {}), "(model_config['name'], repo_or_dir, **model_config['params'])\n", (2714, 2775), False, 'from torchdistill.models.registry import get_model\n'), ((3047, 3090), 'torchdistill.misc.log.SmoothedValue', 'SmoothedValue', ([], {'window_size': '(1)', 'fmt': '"""{value}"""'}), "(window_size=1, fmt='{value}')\n", (3060, 3090), False, 'from torchdistill.misc.log import setup_log_file, SmoothedValue, MetricLogger\n'), ((3129, 3173), 'torchdistill.misc.log.SmoothedValue', 'SmoothedValue', ([], {'window_size': '(10)', 'fmt': '"""{value}"""'}), "(window_size=10, fmt='{value}')\n", (3142, 3173), False, 'from torchdistill.misc.log import setup_log_file, SmoothedValue, MetricLogger\n'), ((3370, 3381), 'time.time', 'time.time', ([], {}), '()\n', (3379, 3381), False, 'import time\n'), ((4145, 4198), 'torch.nn.parallel.DistributedDataParallel', 'DistributedDataParallel', (['model'], {'device_ids': 'device_ids'}), '(model, device_ids=device_ids)\n', (4168, 4198), False, 'from torch.nn.parallel import DistributedDataParallel\n'), ((4993, 5038), 'torchdistill.eval.classification.compute_accuracy', 'compute_accuracy', (['output', 'target'], {'topk': '(1, 5)'}), '(output, target, topk=(1, 5))\n', (5009, 5038), False, 'from torchdistill.eval.classification import compute_accuracy\n'), ((5707, 5724), 'torchdistill.common.main_util.is_main_process', 'is_main_process', ([], {}), '()\n', (5722, 5724), False, 'from torchdistill.common.main_util import is_main_process, init_distributed_mode, load_ckpt, save_ckpt, set_seed\n'), ((6010, 6041), 'os.path.expanduser', 'os.path.expanduser', (['args.config'], {}), '(args.config)\n', (6028, 6041), False, 'import os, sys\n'), ((3842, 3859), 'torchdistill.common.main_util.is_main_process', 'is_main_process', ([], {}), '()\n', (3857, 3859), False, 'from torchdistill.common.main_util import is_main_process, init_distributed_mode, load_ckpt, save_ckpt, set_seed\n'), ((4256, 4298), 'torch.nn.DataParallel', 'DataParallel', (['model'], {'device_ids': 'device_ids'}), '(model, device_ids=device_ids)\n', (4268, 4298), False, 'from torch.nn import DataParallel\n'), ((4883, 4895), 'SST.utils.Matrix.Kernel_VIS', 'Kernel_VIS', ([], {}), '()\n', (4893, 4895), False, 'from SST.utils.Matrix import confusion_matrix_pyplot, Kernel_VIS\n'), ((4939, 4954), 'SST.utils.Pmatrix.Matrix_VIS', 'Matrix_VIS', (['out'], {}), '(out)\n', (4949, 4954), False, 'from SST.utils.Pmatrix import Matrix_VIS\n'), ((5779, 5812), 'os.path.expanduser', 'os.path.expanduser', (['log_file_path'], {}), '(log_file_path)\n', (5797, 5812), False, 'import os, sys\n'), ((3798, 3815), 'torch.isnan', 'torch.isnan', (['loss'], {}), '(loss)\n', (3809, 3815), False, 'import torch\n'), ((3819, 3836), 'torch.isinf', 'torch.isinf', (['loss'], {}), '(loss)\n', (3830, 3836), False, 'import torch\n'), ((3759, 3770), 'time.time', 'time.time', ([], {}), '()\n', (3768, 3770), False, 'import time\n')]
|
from sqlalchemy import Column, Integer, String
from base import Base
class Qualifier(Base):
__tablename__ = 'Qualifiers'
id = Column('QualifierID', Integer, primary_key=True)
code = Column('QualifierCode', String, nullable=False)
description = Column('QualifierDescription', String, nullable=False)
def __repr__(self):
return "<Qualifier('%s', '%s', '%s')>" % (self.id, self.code, self.description)
|
[
"sqlalchemy.Column"
] |
[((135, 183), 'sqlalchemy.Column', 'Column', (['"""QualifierID"""', 'Integer'], {'primary_key': '(True)'}), "('QualifierID', Integer, primary_key=True)\n", (141, 183), False, 'from sqlalchemy import Column, Integer, String\n'), ((194, 241), 'sqlalchemy.Column', 'Column', (['"""QualifierCode"""', 'String'], {'nullable': '(False)'}), "('QualifierCode', String, nullable=False)\n", (200, 241), False, 'from sqlalchemy import Column, Integer, String\n'), ((257, 311), 'sqlalchemy.Column', 'Column', (['"""QualifierDescription"""', 'String'], {'nullable': '(False)'}), "('QualifierDescription', String, nullable=False)\n", (263, 311), False, 'from sqlalchemy import Column, Integer, String\n')]
|
from __future__ import absolute_import
import daisy
import logging
import unittest
import networkx as nx
logger = logging.getLogger(__name__)
# logging.basicConfig(level=logging.DEBUG)
daisy.scheduler._NO_SPAWN_STATUS_THREAD = True
class TestFilterMongoGraph(unittest.TestCase):
def get_mongo_graph_provider(
self, mode):
return daisy.persistence.MongoDbGraphProvider(
'test_daisy_graph',
directed=True,
mode=mode)
def test_graph_filtering(self):
graph_provider = self.get_mongo_graph_provider('w')
roi = daisy.Roi((0, 0, 0),
(10, 10, 10))
graph = graph_provider[roi]
graph.add_node(2, position=(2, 2, 2), selected=True)
graph.add_node(42, position=(1, 1, 1), selected=False)
graph.add_node(23, position=(5, 5, 5), selected=True)
graph.add_node(57, position=daisy.Coordinate((7, 7, 7)), selected=True)
graph.add_edge(42, 23, selected=False)
graph.add_edge(57, 23, selected=True)
graph.add_edge(2, 42, selected=True)
graph.write_nodes()
graph.write_edges()
graph_provider = self.get_mongo_graph_provider('r')
filtered_nodes = graph_provider.read_nodes(
roi, attr_filter={'selected': True})
filtered_node_ids = [node['id'] for node in filtered_nodes]
expected_node_ids = [2, 23, 57]
self.assertCountEqual(expected_node_ids, filtered_node_ids)
filtered_edges = graph_provider.read_edges(
roi, attr_filter={'selected': True})
filtered_edge_endpoints = [(edge['u'], edge['v'])
for edge in filtered_edges]
expected_edge_endpoints = [(57, 23), (2, 42)]
self.assertCountEqual(expected_edge_endpoints, filtered_edge_endpoints)
filtered_subgraph = graph_provider.get_graph(
roi,
nodes_filter={'selected': True},
edges_filter={'selected': True})
nodes_with_position = [node for node, data
in filtered_subgraph.nodes(data=True)
if 'position' in data]
self.assertCountEqual(expected_node_ids, nodes_with_position)
self.assertCountEqual(expected_edge_endpoints,
filtered_subgraph.edges())
def test_graph_filtering_complex(self):
graph_provider = self.get_mongo_graph_provider('w')
roi = daisy.Roi((0, 0, 0),
(10, 10, 10))
graph = graph_provider[roi]
graph.add_node(2,
position=(2, 2, 2),
selected=True,
test='test')
graph.add_node(42,
position=(1, 1, 1),
selected=False,
test='test2')
graph.add_node(23,
position=(5, 5, 5),
selected=True,
test='test2')
graph.add_node(57,
position=daisy.Coordinate((7, 7, 7)),
selected=True,
test='test')
graph.add_edge(42, 23,
selected=False,
a=100,
b=3)
graph.add_edge(57, 23,
selected=True,
a=100,
b=2)
graph.add_edge(2, 42,
selected=True,
a=101,
b=3)
graph.write_nodes()
graph.write_edges()
graph_provider = self.get_mongo_graph_provider('r')
filtered_nodes = graph_provider.read_nodes(
roi, attr_filter={'selected': True,
'test': 'test'})
filtered_node_ids = [node['id'] for node in filtered_nodes]
expected_node_ids = [2, 57]
self.assertCountEqual(expected_node_ids, filtered_node_ids)
filtered_edges = graph_provider.read_edges(
roi, attr_filter={'selected': True,
'a': 100})
filtered_edge_endpoints = [(edge['u'], edge['v'])
for edge in filtered_edges]
expected_edge_endpoints = [(57, 23)]
self.assertCountEqual(expected_edge_endpoints, filtered_edge_endpoints)
filtered_subgraph = graph_provider.get_graph(
roi,
nodes_filter={'selected': True,
'test': 'test'},
edges_filter={'selected': True,
'a': 100})
nodes_with_position = [node for node, data
in filtered_subgraph.nodes(data=True)
if 'position' in data]
self.assertCountEqual(expected_node_ids, nodes_with_position)
self.assertCountEqual(expected_edge_endpoints,
filtered_subgraph.edges())
def test_graph_read_and_update_specific_attrs(self):
graph_provider = self.get_mongo_graph_provider('w')
roi = daisy.Roi((0, 0, 0),
(10, 10, 10))
graph = graph_provider[roi]
graph.add_node(2,
position=(2, 2, 2),
selected=True,
test='test')
graph.add_node(42,
position=(1, 1, 1),
selected=False,
test='test2')
graph.add_node(23,
position=(5, 5, 5),
selected=True,
test='test2')
graph.add_node(57,
position=daisy.Coordinate((7, 7, 7)),
selected=True,
test='test')
graph.add_edge(42, 23,
selected=False,
a=100,
b=3)
graph.add_edge(57, 23,
selected=True,
a=100,
b=2)
graph.add_edge(2, 42,
selected=True,
a=101,
b=3)
graph.write_nodes()
graph.write_edges()
graph_provider = self.get_mongo_graph_provider('r+')
limited_graph = graph_provider.get_graph(
roi, node_attrs=['selected'], edge_attrs=['c'])
for node, data in limited_graph.nodes(data=True):
self.assertFalse('test' in data)
self.assertTrue('selected' in data)
data['selected'] = True
for u, v, data in limited_graph.edges(data=True):
self.assertFalse('a' in data)
self.assertFalse('b' in data)
nx.set_edge_attributes(limited_graph, 5, 'c')
limited_graph.update_edge_attrs(attributes=['c'])
limited_graph.update_node_attrs(attributes=['selected'])
updated_graph = graph_provider.get_graph(roi)
for node, data in updated_graph.nodes(data=True):
self.assertTrue(data['selected'])
for u, v, data in updated_graph.edges(data=True):
self.assertEqual(data['c'], 5)
def test_graph_read_unbounded_roi(self):
graph_provider = self.get_mongo_graph_provider('w')
roi = daisy.Roi((0, 0, 0),
(10, 10, 10))
unbounded_roi = daisy.Roi((None, None, None), (None, None, None))
graph = graph_provider[roi]
graph.add_node(2,
position=(2, 2, 2),
selected=True,
test='test')
graph.add_node(42,
position=(1, 1, 1),
selected=False,
test='test2')
graph.add_node(23,
position=(5, 5, 5),
selected=True,
test='test2')
graph.add_node(57,
position=daisy.Coordinate((7, 7, 7)),
selected=True,
test='test')
graph.add_edge(42, 23,
selected=False,
a=100,
b=3)
graph.add_edge(57, 23,
selected=True,
a=100,
b=2)
graph.add_edge(2, 42,
selected=True,
a=101,
b=3)
graph.write_nodes()
graph.write_edges()
graph_provider = self.get_mongo_graph_provider('r+')
limited_graph = graph_provider.get_graph(
unbounded_roi, node_attrs=['selected'], edge_attrs=['c'])
seen = []
for node, data in limited_graph.nodes(data=True):
self.assertFalse('test' in data)
self.assertTrue('selected' in data)
data['selected'] = True
seen.append(node)
self.assertCountEqual(seen, [2, 42, 23, 57])
|
[
"daisy.Coordinate",
"daisy.persistence.MongoDbGraphProvider",
"daisy.Roi",
"networkx.set_edge_attributes",
"logging.getLogger"
] |
[((116, 143), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (133, 143), False, 'import logging\n'), ((358, 446), 'daisy.persistence.MongoDbGraphProvider', 'daisy.persistence.MongoDbGraphProvider', (['"""test_daisy_graph"""'], {'directed': '(True)', 'mode': 'mode'}), "('test_daisy_graph', directed=True,\n mode=mode)\n", (396, 446), False, 'import daisy\n'), ((591, 625), 'daisy.Roi', 'daisy.Roi', (['(0, 0, 0)', '(10, 10, 10)'], {}), '((0, 0, 0), (10, 10, 10))\n', (600, 625), False, 'import daisy\n'), ((2501, 2535), 'daisy.Roi', 'daisy.Roi', (['(0, 0, 0)', '(10, 10, 10)'], {}), '((0, 0, 0), (10, 10, 10))\n', (2510, 2535), False, 'import daisy\n'), ((5165, 5199), 'daisy.Roi', 'daisy.Roi', (['(0, 0, 0)', '(10, 10, 10)'], {}), '((0, 0, 0), (10, 10, 10))\n', (5174, 5199), False, 'import daisy\n'), ((7367, 7401), 'daisy.Roi', 'daisy.Roi', (['(0, 0, 0)', '(10, 10, 10)'], {}), '((0, 0, 0), (10, 10, 10))\n', (7376, 7401), False, 'import daisy\n'), ((7450, 7499), 'daisy.Roi', 'daisy.Roi', (['(None, None, None)', '(None, None, None)'], {}), '((None, None, None), (None, None, None))\n', (7459, 7499), False, 'import daisy\n'), ((6815, 6860), 'networkx.set_edge_attributes', 'nx.set_edge_attributes', (['limited_graph', '(5)', '"""c"""'], {}), "(limited_graph, 5, 'c')\n", (6837, 6860), True, 'import networkx as nx\n'), ((909, 936), 'daisy.Coordinate', 'daisy.Coordinate', (['(7, 7, 7)'], {}), '((7, 7, 7))\n', (925, 936), False, 'import daisy\n'), ((3090, 3117), 'daisy.Coordinate', 'daisy.Coordinate', (['(7, 7, 7)'], {}), '((7, 7, 7))\n', (3106, 3117), False, 'import daisy\n'), ((5754, 5781), 'daisy.Coordinate', 'daisy.Coordinate', (['(7, 7, 7)'], {}), '((7, 7, 7))\n', (5770, 5781), False, 'import daisy\n'), ((8031, 8058), 'daisy.Coordinate', 'daisy.Coordinate', (['(7, 7, 7)'], {}), '((7, 7, 7))\n', (8047, 8058), False, 'import daisy\n')]
|
import asyncio
import json
import logging
import os
import sys
from typing import Optional
from discord_webhook import DiscordWebhook
from fastapi import FastAPI
from pydantic import BaseModel, BaseSettings
from pyngrok import ngrok
from meraki_register_webhook import MerakiWebhook
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
def setup_ngrok():
"""
Build ngrok tunnel for inbound webhook calls
"""
logging.info("ngrok enabled. Spinning up tunnels...")
# Get Auth token:
NGROK_AUTH_TOKEN = os.environ.get("NGROK_TOKEN")
if not NGROK_AUTH_TOKEN:
logging.error("Missing config item: NGROK_TOKEN. Program will still run, but non-authenticated tunnels will break after some time...")
if NGROK_AUTH_TOKEN:
logging.info("Adding ngrok authentication token...")
ngrok.set_auth_token(NGROK_AUTH_TOKEN)
# Get uvicon port number
port = sys.argv[sys.argv.index("--port") + 1] if "--port" in sys.argv else 8000
# Open a new ngrok tunnel & Update settings
ngrok_url = ngrok.connect(port, bind_tls=True).public_url
return ngrok_url
async def check_ngrok():
"""
ngrok may re-establish session occasionally, which means new public URL.
This will check intermittently, and update Meraki's config if needed
"""
while True:
await asyncio.sleep(30)
logging.info("Checking ngrok session...")
current_url = ngrok.get_tunnels()[0].public_url
logging.info(f"Current ngrok URL: {current_url}")
logging.info(f"Current webhook URL: {settings.WEBHOOK_URL}")
if current_url != settings.WEBHOOK_URL:
logging.info(
"Current ngrok URL does not match Meraki-configured URL. Need to update..."
)
meraki.update_webhook_url(current_url)
class Settings(BaseSettings):
# Webserver / inbound settings
USE_NGROK = os.environ.get("USE_NGROK")
if USE_NGROK == False:
WEBHOOK_URL = os.environ.get("MERAKI_TARGET_WEBHOOK_URL")
if not WEBHOOK_URL:
logging.error(
"Error: ngrok disabled, but no self URL provided. Missing config item MERAKI_TARGET_WEBHOOK_URL"
)
sys.exit(1)
else:
WEBHOOK_URL = setup_ngrok()
# Meraki-specific settings
NETWORK_NAME = os.environ.get("MERAKI_TARGET_NETWORK_NAME")
if not NETWORK_NAME:
logging.error("Error: Missing config item MERAKI_TARGET_NETWORK_NAME")
sys.exit(1)
WEBHOOK_NAME = os.environ.get("MERAKI_WEBHOOK_NAME")
MERAKI_API_KEY = os.environ.get("MERAKI_API_KEY")
if not MERAKI_API_KEY:
logging.error("Error: Missing config item MERAKI_API_KEY")
sys.exit(1)
# Discord settings
DISCORD_URL = os.environ.get("DISCORD_URL")
if not DISCORD_URL:
logging.error("Error: Missing config item DISCORD_URL")
sys.exit(1)
# Defaults, for those settings which are not required
if not WEBHOOK_NAME:
WEBHOOK_NAME = "api-generated_Discord"
if USE_NGROK == None:
USE_NGROK = True
class MerakiAlert(BaseModel):
# Meraki API ver / secret
version: float
sharedSecret: str
sentAt: str
# Org info
organizationId: int
organizationName: str
organizationUrl: str
# Network Info
networkId: str
networkName: str
networkTags: Optional[list] = None
deviceSerial: str
# Device Info
deviceMac: str
deviceName: str
deviceUrl: str
deviceTags: Optional[list] = None
deviceModel: str
# Alert Info
alertId: str
alertType: str
alertTypeId: str
alertLevel: str
occurredAt: str
alertData: Optional[dict] = None
## Main Stuffs:
settings = Settings()
meraki = MerakiWebhook(
settings.MERAKI_API_KEY,
settings.WEBHOOK_NAME,
settings.WEBHOOK_URL,
settings.NETWORK_NAME,
)
logging.info(f"Accepting requests at: {settings.WEBHOOK_URL}")
app = FastAPI()
@app.post("/post-msg-discord")
async def post_from_meraki(item: MerakiAlert):
logging.info("Got POST request")
if item.sharedSecret == meraki.webhook_config["sharedSecret"]:
logging.info("API secret matches")
logging.info(item)
sendDiscordMsg(item)
return {"message": "Message received"}
else:
logging.error(f"Received bad API secret: {item.sharedSecret}")
return {"message": "Bad webhook secret"}
@app.on_event("startup")
async def startup_event():
if settings.USE_NGROK:
asyncio.create_task(check_ngrok())
def sendDiscordMsg(data):
"""
Send alert via Discord webhooks
"""
content = formatMessage(data)
logging.info("Sending Discord message...")
try:
webhook = DiscordWebhook(url=settings.DISCORD_URL, content=str(content))
response = webhook.execute()
except:
logging.exception("Failed to send message")
return
if response.status_code == 200:
logging.info("Message successfully posted to Discord webhook")
else:
logging.error("Failed to send message")
return
def formatMessage(data):
"""
Format incoming message before passing to Discord
"""
logging.info("Formatting message payload...")
time = (data.occurredAt).split("T")
message = [":alarm_clock: __**Meraki Alert**__ :alarm_clock: "]
message.append(f"**Device:** {data.deviceName}")
message.append(f"**Message info:** {data.alertType}")
message.append(f"**Occurred at:** {time[0]} - {time[1][:8]}")
if len(data.alertData) > 0:
message.append(f"**Additional data:** ```fix\r\n{data.alertData}\r\n```")
sendmessage = ""
for each in message:
sendmessage += each + "\r\n"
return sendmessage
|
[
"pyngrok.ngrok.get_tunnels",
"logging.error",
"logging.exception",
"asyncio.sleep",
"meraki_register_webhook.MerakiWebhook",
"os.environ.get",
"logging.info",
"pyngrok.ngrok.connect",
"sys.argv.index",
"pyngrok.ngrok.set_auth_token",
"sys.exit",
"fastapi.FastAPI"
] |
[((3752, 3863), 'meraki_register_webhook.MerakiWebhook', 'MerakiWebhook', (['settings.MERAKI_API_KEY', 'settings.WEBHOOK_NAME', 'settings.WEBHOOK_URL', 'settings.NETWORK_NAME'], {}), '(settings.MERAKI_API_KEY, settings.WEBHOOK_NAME, settings.\n WEBHOOK_URL, settings.NETWORK_NAME)\n', (3765, 3863), False, 'from meraki_register_webhook import MerakiWebhook\n'), ((3878, 3940), 'logging.info', 'logging.info', (['f"""Accepting requests at: {settings.WEBHOOK_URL}"""'], {}), "(f'Accepting requests at: {settings.WEBHOOK_URL}')\n", (3890, 3940), False, 'import logging\n'), ((3947, 3956), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (3954, 3956), False, 'from fastapi import FastAPI\n'), ((438, 491), 'logging.info', 'logging.info', (['"""ngrok enabled. Spinning up tunnels..."""'], {}), "('ngrok enabled. Spinning up tunnels...')\n", (450, 491), False, 'import logging\n'), ((538, 567), 'os.environ.get', 'os.environ.get', (['"""NGROK_TOKEN"""'], {}), "('NGROK_TOKEN')\n", (552, 567), False, 'import os\n'), ((1907, 1934), 'os.environ.get', 'os.environ.get', (['"""USE_NGROK"""'], {}), "('USE_NGROK')\n", (1921, 1934), False, 'import os\n'), ((2331, 2375), 'os.environ.get', 'os.environ.get', (['"""MERAKI_TARGET_NETWORK_NAME"""'], {}), "('MERAKI_TARGET_NETWORK_NAME')\n", (2345, 2375), False, 'import os\n'), ((2520, 2557), 'os.environ.get', 'os.environ.get', (['"""MERAKI_WEBHOOK_NAME"""'], {}), "('MERAKI_WEBHOOK_NAME')\n", (2534, 2557), False, 'import os\n'), ((2579, 2611), 'os.environ.get', 'os.environ.get', (['"""MERAKI_API_KEY"""'], {}), "('MERAKI_API_KEY')\n", (2593, 2611), False, 'import os\n'), ((2768, 2797), 'os.environ.get', 'os.environ.get', (['"""DISCORD_URL"""'], {}), "('DISCORD_URL')\n", (2782, 2797), False, 'import os\n'), ((4041, 4073), 'logging.info', 'logging.info', (['"""Got POST request"""'], {}), "('Got POST request')\n", (4053, 4073), False, 'import logging\n'), ((4659, 4701), 'logging.info', 'logging.info', (['"""Sending Discord message..."""'], {}), "('Sending Discord message...')\n", (4671, 4701), False, 'import logging\n'), ((5185, 5230), 'logging.info', 'logging.info', (['"""Formatting message payload..."""'], {}), "('Formatting message payload...')\n", (5197, 5230), False, 'import logging\n'), ((312, 346), 'os.environ.get', 'os.environ.get', (['"""LOGLEVEL"""', '"""INFO"""'], {}), "('LOGLEVEL', 'INFO')\n", (326, 346), False, 'import os\n'), ((605, 749), 'logging.error', 'logging.error', (['"""Missing config item: NGROK_TOKEN. Program will still run, but non-authenticated tunnels will break after some time..."""'], {}), "(\n 'Missing config item: NGROK_TOKEN. Program will still run, but non-authenticated tunnels will break after some time...'\n )\n", (618, 749), False, 'import logging\n'), ((773, 825), 'logging.info', 'logging.info', (['"""Adding ngrok authentication token..."""'], {}), "('Adding ngrok authentication token...')\n", (785, 825), False, 'import logging\n'), ((834, 872), 'pyngrok.ngrok.set_auth_token', 'ngrok.set_auth_token', (['NGROK_AUTH_TOKEN'], {}), '(NGROK_AUTH_TOKEN)\n', (854, 872), False, 'from pyngrok import ngrok\n'), ((1052, 1086), 'pyngrok.ngrok.connect', 'ngrok.connect', (['port'], {'bind_tls': '(True)'}), '(port, bind_tls=True)\n', (1065, 1086), False, 'from pyngrok import ngrok\n'), ((1368, 1409), 'logging.info', 'logging.info', (['"""Checking ngrok session..."""'], {}), "('Checking ngrok session...')\n", (1380, 1409), False, 'import logging\n'), ((1474, 1523), 'logging.info', 'logging.info', (['f"""Current ngrok URL: {current_url}"""'], {}), "(f'Current ngrok URL: {current_url}')\n", (1486, 1523), False, 'import logging\n'), ((1532, 1592), 'logging.info', 'logging.info', (['f"""Current webhook URL: {settings.WEBHOOK_URL}"""'], {}), "(f'Current webhook URL: {settings.WEBHOOK_URL}')\n", (1544, 1592), False, 'import logging\n'), ((1984, 2027), 'os.environ.get', 'os.environ.get', (['"""MERAKI_TARGET_WEBHOOK_URL"""'], {}), "('MERAKI_TARGET_WEBHOOK_URL')\n", (1998, 2027), False, 'import os\n'), ((2409, 2479), 'logging.error', 'logging.error', (['"""Error: Missing config item MERAKI_TARGET_NETWORK_NAME"""'], {}), "('Error: Missing config item MERAKI_TARGET_NETWORK_NAME')\n", (2422, 2479), False, 'import logging\n'), ((2488, 2499), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2496, 2499), False, 'import sys\n'), ((2647, 2705), 'logging.error', 'logging.error', (['"""Error: Missing config item MERAKI_API_KEY"""'], {}), "('Error: Missing config item MERAKI_API_KEY')\n", (2660, 2705), False, 'import logging\n'), ((2714, 2725), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2722, 2725), False, 'import sys\n'), ((2830, 2885), 'logging.error', 'logging.error', (['"""Error: Missing config item DISCORD_URL"""'], {}), "('Error: Missing config item DISCORD_URL')\n", (2843, 2885), False, 'import logging\n'), ((2894, 2905), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2902, 2905), False, 'import sys\n'), ((4149, 4183), 'logging.info', 'logging.info', (['"""API secret matches"""'], {}), "('API secret matches')\n", (4161, 4183), False, 'import logging\n'), ((4192, 4210), 'logging.info', 'logging.info', (['item'], {}), '(item)\n', (4204, 4210), False, 'import logging\n'), ((4305, 4367), 'logging.error', 'logging.error', (['f"""Received bad API secret: {item.sharedSecret}"""'], {}), "(f'Received bad API secret: {item.sharedSecret}')\n", (4318, 4367), False, 'import logging\n'), ((4952, 5014), 'logging.info', 'logging.info', (['"""Message successfully posted to Discord webhook"""'], {}), "('Message successfully posted to Discord webhook')\n", (4964, 5014), False, 'import logging\n'), ((5033, 5072), 'logging.error', 'logging.error', (['"""Failed to send message"""'], {}), "('Failed to send message')\n", (5046, 5072), False, 'import logging\n'), ((1342, 1359), 'asyncio.sleep', 'asyncio.sleep', (['(30)'], {}), '(30)\n', (1355, 1359), False, 'import asyncio\n'), ((1653, 1752), 'logging.info', 'logging.info', (['"""Current ngrok URL does not match Meraki-configured URL. Need to update..."""'], {}), "(\n 'Current ngrok URL does not match Meraki-configured URL. Need to update...'\n )\n", (1665, 1752), False, 'import logging\n'), ((2068, 2189), 'logging.error', 'logging.error', (['"""Error: ngrok disabled, but no self URL provided. Missing config item MERAKI_TARGET_WEBHOOK_URL"""'], {}), "(\n 'Error: ngrok disabled, but no self URL provided. Missing config item MERAKI_TARGET_WEBHOOK_URL'\n )\n", (2081, 2189), False, 'import logging\n'), ((2222, 2233), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2230, 2233), False, 'import sys\n'), ((4849, 4892), 'logging.exception', 'logging.exception', (['"""Failed to send message"""'], {}), "('Failed to send message')\n", (4866, 4892), False, 'import logging\n'), ((923, 947), 'sys.argv.index', 'sys.argv.index', (['"""--port"""'], {}), "('--port')\n", (937, 947), False, 'import sys\n'), ((1432, 1451), 'pyngrok.ngrok.get_tunnels', 'ngrok.get_tunnels', ([], {}), '()\n', (1449, 1451), False, 'from pyngrok import ngrok\n')]
|
import matplotlib.pyplot as plt
import numpy as np
from numpy import pi
import pandas as pd
from scripts.volatility_tree import build_volatility_tree
from scripts.profiler import profiler
i = complex(0, 1)
# option parameters
T = 1
H_original = 90 # limit
K_original = 100.0 # strike
r_premia = 10 # annual interest rate
# Bates model parameters
V0 = 0.01 # initial volatility
kappa = 2 # heston parameter, mean reversion
theta = 0.01 # heston parameter, long-run variance
sigma = omega = 0.2 # heston parameter, volatility of variance.
# Omega is used in variance tree, sigma - everywhere else
rho = 0.5 # heston parameter #correlation
# method parameters
N = 10 # number_of_time_steps
M = 2**15 # number of points in price grid
dx = 1e-3
omega_plus = 1
omega_minus = -1
r = np.log(r_premia/100 + 1)
omega = sigma
# time-space domain construction
x_space = np.linspace(-M * dx / 2, M * dx / 2, num=M, endpoint=False)
u_space = np.linspace(-pi / dx, pi / dx, num=M, endpoint=False)
du = u_space[1] - u_space[0]
first_step_of_return = np.array([elem + V0*rho/sigma for elem in x_space])
original_prices_array = H_original * np.exp(first_step_of_return)
delta_t = T/N
# making volatilily tree
markov_chain = build_volatility_tree(T, V0, kappa, theta, omega, N)
V = markov_chain[0]
pu_f = markov_chain[1]
pd_f = markov_chain[2]
f_up = markov_chain[3]
f_down = markov_chain[4]
rho_hat = np.sqrt(1 - rho**2)
q = 1.0/delta_t + r
factor = (q*delta_t)**(-1)
def G(S, K):
"""the payoff function of put option. Nothing to do with barrier"""
return max(K-S, 0)
F_n_plus_1 = np.zeros((len(x_space), len(V[N])), dtype=complex)
F_n = np.zeros((len(x_space), len(V[N])), dtype=complex)
for j in range(len(x_space)):
for k in range(len(V[N])):
F_n_plus_1[j, k] = np.array(G(H_original * np.exp(x_space[j]), K_original))
# the global cycle starts here. It iterates over the volatility tree we just constructed, and goes backwards in time
# starting from n-1 position
# print("Main cycle entered")
# when the variance is less than that, is is reasonable to assume it to be zero, which leads to simpler calculations
treshold = 1e-6
discount_factor = np.exp(r*delta_t)
def psi(xi, gamma=0, sigma=sigma):
return (sigma**2/2) * np.power(xi, 2) - 1j*gamma*xi
def make_rad_fft(f_x):
sign_change_k = np.array([(-1)**k for k in range(0, M)])
sign_change_l = np.array([(-1)**l for l in range(0, M)])
# учитываем порядок хранения
sign_change_l = np.fft.fftshift(sign_change_l)
f = sign_change_k * f_x
f_hat = dx * sign_change_l * np.fft.fft(f)
# избегаем особенностей хранения результатов fft, нам они не нужны.
return f_hat
def make_rad_ifft(f_hat_xi):
M = len(f_hat_xi)
sign_change_k = np.array([(-1)**k for k in range(0, M)])
sign_change_l = np.array([(-1)**l for l in range(0, M)])
f = (1/dx) * sign_change_k * np.fft.ifft(sign_change_l * f_hat_xi)
return f
def make_phi_minus(gamma=0, sigma=sigma):
def integrand_minus(upsilon_array):
"""
принимает и возвращает массив длиной в степень двойки, исходя из логики дальнейшего использования
"""
value = np.log(1 + psi(upsilon_array + 1j * omega_plus, gamma=gamma, sigma=sigma) / q) / (upsilon_array + 1j * omega_plus) ** 2
return value
def F_minus_capital():
m_indicator = np.where(x_space >= 0, 1, 0)
trimmed_x_space = m_indicator * x_space # чтобы при "сильно отрицательных" x не росла экспонента
integral = make_rad_ifft(integrand_minus(u_space))
exponent = np.exp(-trimmed_x_space * omega_plus)
return m_indicator * exponent * integral
fm = F_minus_capital()
F_m_hat = make_rad_fft(fm)
def make_phi_minus_array(xi_array):
first_term = - 1j * xi_array * (fm[M // 2])
second_term = - xi_array * xi_array * F_m_hat
return np.exp(first_term + second_term)
mex_symbol_minus = make_phi_minus_array(u_space)
return mex_symbol_minus
def make_phi_plus(gamma=0, sigma=sigma):
def integrand_plus(upsilon_array):
"""
принимает и возвращает массив длиной в степень двойки, исходя из логики дальнейшего использования
"""
value = np.log(1 + psi(upsilon_array + 1j * omega_minus, gamma=gamma, sigma=sigma) / q) / (upsilon_array + 1j * omega_minus) ** 2
return value
def F_plus_capital():
p_indicator = np.where(x_space <= 0, 1, 0)
trimmed_x_space = p_indicator * x_space # чтобы при "сильно отрицательных" x не росла экспонента
integral = make_rad_ifft(integrand_plus(u_space))
exponent = np.exp(-trimmed_x_space * omega_plus)
return p_indicator * exponent * integral
fp = F_plus_capital()
F_p_hat = make_rad_fft(fp)
def make_phi_plus_array(xi_array):
first_term = 1j * xi_array * fp[M // 2]
second_term = - xi_array * xi_array * F_p_hat
return np.exp(first_term + second_term)
mex_symbol_plus = make_phi_plus_array(u_space)
return mex_symbol_plus
for n in range(len(V[N]) - 2, -1, -1):
print(str(n) + " of " + str(len(V[N]) - 2))
with profiler():
for k in range(n+1):
# to calculate the binomial expectation one should use Antonino's matrices f_up and f_down
# the meaning of the containing integers are as follows - after (n,k) you will be in
# either (n+1, k + f_up) or (n+1, k - f_down). We use k_u and k_d shorthands, respectively
k_u = k + int(f_up[n][k])
k_d = k + int(f_down[n][k])
# initial condition of a step
f_n_plus_1_k_u = np.array([F_n_plus_1[j][k_u] for j in range(len(x_space))])
f_n_plus_1_k_d = np.array([F_n_plus_1[j][k_d] for j in range(len(x_space))])
H_N_k = - (rho / sigma) * V[n, k] # modified barrier
local_domain = np.array([x_space[j] + H_N_k for j in range(len(x_space))])
if V[n, k] >= treshold:
# set up variance-dependent parameters for a given step
sigma_local = rho_hat * np.sqrt(V[n, k])
gamma = r - 0.5 * V[n, k] - rho / sigma * kappa * (theta - V[n, k]) # also local
phi_plus_array = make_phi_minus(gamma=gamma, sigma=sigma_local)
phi_minus_array = make_phi_plus(gamma=gamma, sigma=sigma_local)
indicator = np.where(local_domain >= H_N_k, 1, 0)
# factorization calculation
f_n_k_u = factor * \
make_rad_ifft(phi_minus_array *
make_rad_fft(
indicator *
make_rad_ifft(phi_plus_array * make_rad_fft(f_n_plus_1_k_u))))
f_n_k_d = factor * \
make_rad_ifft(phi_minus_array *
make_rad_fft(
indicator *
make_rad_ifft(phi_plus_array * make_rad_fft(f_n_plus_1_k_d))))
elif V[n, k] < treshold:
f_n_plus_1_k_u = [F_n_plus_1[j][k_u] for j in range(len(x_space))]
f_n_k_u = discount_factor * f_n_plus_1_k_u
f_n_plus_1_k_d = [F_n_plus_1[j][k_d] for j in range(len(x_space))]
f_n_k_d = discount_factor * f_n_plus_1_k_d
f_n_k = f_n_k_u * pu_f[n, k] + f_n_k_d * pd_f[n, k]
for j in range(len(f_n_k)):
# here we try some cutdown magic. The procedure without it returns great bubbles to the right
# from the strike. And the more L the greater this bubble grows.
# what we are going to do there is to try to cut off all the values on prices greater than, say,
# 4 times bigger then the strike
# we use S>4K and, therefore, y > ln(4K/H) + (pho/sigma)*V inequality to do this
if local_domain[j] < np.log(3.5*K_original/H_original + (rho/sigma) * V[n][k]):
F_n[j][k] = f_n_k[j]
else:
F_n[j][k] = complex(0)
# plt.plot(original_prices_array, f_n_plus_1_k_u)
# plt.show()
# for j in range(len(y)):
# tree_to_csv_file(y[j], "../output/routine/price_slices/Y" + str(original_prices_array[j]) + ".csv")
# for j in range(len(F)):
# tree_to_csv_file(F[j], "../output/routine/answers/F" + str(original_prices_array[j]) + ".csv")
answer_total = open("../output/routine/answer_cumul.csv", "w")
answers_list = np.array([F_n[j][0] for j in range(len(x_space))])
for elem in list(zip(original_prices_array, answers_list)):
answer_total.write(str(elem[0]) + ',')
answer_total.write(str(elem[1].real) + ',')
# answer_total.write(str(elem[1].imag) + ',')
answer_total.write('\n')
# for j in range(len(F)):
# tree_to_csv_file(F[j], "../output/routine/answers/F" + str(original_prices_array[j]) + ".csv")
plt.plot(original_prices_array[(original_prices_array>75) & (original_prices_array<200)],
answers_list[(original_prices_array>75) & (original_prices_array<200)])
plt.savefig("../output/figure.png")
plt.show()
plt.close()
|
[
"numpy.fft.ifft",
"scripts.profiler.profiler",
"matplotlib.pyplot.show",
"numpy.log",
"matplotlib.pyplot.plot",
"scripts.volatility_tree.build_volatility_tree",
"matplotlib.pyplot.close",
"numpy.fft.fft",
"numpy.power",
"numpy.fft.fftshift",
"numpy.array",
"numpy.exp",
"numpy.linspace",
"numpy.where",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] |
[((819, 845), 'numpy.log', 'np.log', (['(r_premia / 100 + 1)'], {}), '(r_premia / 100 + 1)\n', (825, 845), True, 'import numpy as np\n'), ((906, 965), 'numpy.linspace', 'np.linspace', (['(-M * dx / 2)', '(M * dx / 2)'], {'num': 'M', 'endpoint': '(False)'}), '(-M * dx / 2, M * dx / 2, num=M, endpoint=False)\n', (917, 965), True, 'import numpy as np\n'), ((977, 1030), 'numpy.linspace', 'np.linspace', (['(-pi / dx)', '(pi / dx)'], {'num': 'M', 'endpoint': '(False)'}), '(-pi / dx, pi / dx, num=M, endpoint=False)\n', (988, 1030), True, 'import numpy as np\n'), ((1087, 1144), 'numpy.array', 'np.array', (['[(elem + V0 * rho / sigma) for elem in x_space]'], {}), '([(elem + V0 * rho / sigma) for elem in x_space])\n', (1095, 1144), True, 'import numpy as np\n'), ((1267, 1319), 'scripts.volatility_tree.build_volatility_tree', 'build_volatility_tree', (['T', 'V0', 'kappa', 'theta', 'omega', 'N'], {}), '(T, V0, kappa, theta, omega, N)\n', (1288, 1319), False, 'from scripts.volatility_tree import build_volatility_tree\n'), ((1452, 1473), 'numpy.sqrt', 'np.sqrt', (['(1 - rho ** 2)'], {}), '(1 - rho ** 2)\n', (1459, 1473), True, 'import numpy as np\n'), ((2246, 2265), 'numpy.exp', 'np.exp', (['(r * delta_t)'], {}), '(r * delta_t)\n', (2252, 2265), True, 'import numpy as np\n'), ((9294, 9473), 'matplotlib.pyplot.plot', 'plt.plot', (['original_prices_array[(original_prices_array > 75) & (original_prices_array <\n 200)]', 'answers_list[(original_prices_array > 75) & (original_prices_array < 200)]'], {}), '(original_prices_array[(original_prices_array > 75) & (\n original_prices_array < 200)], answers_list[(original_prices_array > 75\n ) & (original_prices_array < 200)])\n', (9302, 9473), True, 'import matplotlib.pyplot as plt\n'), ((9467, 9502), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../output/figure.png"""'], {}), "('../output/figure.png')\n", (9478, 9502), True, 'import matplotlib.pyplot as plt\n'), ((9504, 9514), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9512, 9514), True, 'import matplotlib.pyplot as plt\n'), ((9516, 9527), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9525, 9527), True, 'import matplotlib.pyplot as plt\n'), ((1177, 1205), 'numpy.exp', 'np.exp', (['first_step_of_return'], {}), '(first_step_of_return)\n', (1183, 1205), True, 'import numpy as np\n'), ((2570, 2600), 'numpy.fft.fftshift', 'np.fft.fftshift', (['sign_change_l'], {}), '(sign_change_l)\n', (2585, 2600), True, 'import numpy as np\n'), ((2666, 2679), 'numpy.fft.fft', 'np.fft.fft', (['f'], {}), '(f)\n', (2676, 2679), True, 'import numpy as np\n'), ((2994, 3031), 'numpy.fft.ifft', 'np.fft.ifft', (['(sign_change_l * f_hat_xi)'], {}), '(sign_change_l * f_hat_xi)\n', (3005, 3031), True, 'import numpy as np\n'), ((3481, 3509), 'numpy.where', 'np.where', (['(x_space >= 0)', '(1)', '(0)'], {}), '(x_space >= 0, 1, 0)\n', (3489, 3509), True, 'import numpy as np\n'), ((3697, 3734), 'numpy.exp', 'np.exp', (['(-trimmed_x_space * omega_plus)'], {}), '(-trimmed_x_space * omega_plus)\n', (3703, 3734), True, 'import numpy as np\n'), ((4014, 4046), 'numpy.exp', 'np.exp', (['(first_term + second_term)'], {}), '(first_term + second_term)\n', (4020, 4046), True, 'import numpy as np\n'), ((4566, 4594), 'numpy.where', 'np.where', (['(x_space <= 0)', '(1)', '(0)'], {}), '(x_space <= 0, 1, 0)\n', (4574, 4594), True, 'import numpy as np\n'), ((4781, 4818), 'numpy.exp', 'np.exp', (['(-trimmed_x_space * omega_plus)'], {}), '(-trimmed_x_space * omega_plus)\n', (4787, 4818), True, 'import numpy as np\n'), ((5094, 5126), 'numpy.exp', 'np.exp', (['(first_term + second_term)'], {}), '(first_term + second_term)\n', (5100, 5126), True, 'import numpy as np\n'), ((5310, 5320), 'scripts.profiler.profiler', 'profiler', ([], {}), '()\n', (5318, 5320), False, 'from scripts.profiler import profiler\n'), ((2331, 2346), 'numpy.power', 'np.power', (['xi', '(2)'], {}), '(xi, 2)\n', (2339, 2346), True, 'import numpy as np\n'), ((6586, 6623), 'numpy.where', 'np.where', (['(local_domain >= H_N_k)', '(1)', '(0)'], {}), '(local_domain >= H_N_k, 1, 0)\n', (6594, 6623), True, 'import numpy as np\n'), ((1876, 1894), 'numpy.exp', 'np.exp', (['x_space[j]'], {}), '(x_space[j])\n', (1882, 1894), True, 'import numpy as np\n'), ((6273, 6289), 'numpy.sqrt', 'np.sqrt', (['V[n, k]'], {}), '(V[n, k])\n', (6280, 6289), True, 'import numpy as np\n'), ((8270, 8331), 'numpy.log', 'np.log', (['(3.5 * K_original / H_original + rho / sigma * V[n][k])'], {}), '(3.5 * K_original / H_original + rho / sigma * V[n][k])\n', (8276, 8331), True, 'import numpy as np\n')]
|
from django.urls import path
from . import views
app_name = 'TestApp'
urlpatterns = [
path('', views.index, name='index'),
path('hello/<name>/', views.hello, name='hello'),
]
|
[
"django.urls.path"
] |
[((92, 127), 'django.urls.path', 'path', (['""""""', 'views.index'], {'name': '"""index"""'}), "('', views.index, name='index')\n", (96, 127), False, 'from django.urls import path\n'), ((133, 181), 'django.urls.path', 'path', (['"""hello/<name>/"""', 'views.hello'], {'name': '"""hello"""'}), "('hello/<name>/', views.hello, name='hello')\n", (137, 181), False, 'from django.urls import path\n')]
|
from PIL import Image, ImageDraw, ImageFilter
from math import *
step=50
I=Image.new('RGBA',(1000,1000),(0,0,0,255))
d=ImageDraw.Draw(I)
def draw_circle(d,i,j, fill=(255,255,255,255)):
r=max(5, sin(i*pi/1000)*sin(j*pi/1000)*15)
d.ellipse((i-r,j-r,i+r,j+r),fill=fill)
for j in range(0,1000+step,step/2):
for i in range(0,1000+step,step/2):
draw_circle(d,i+step,j+step)
I=I.rotate(25)
I = I.filter(ImageFilter.SMOOTH)
I.show()
|
[
"PIL.ImageDraw.Draw",
"PIL.Image.new"
] |
[((77, 124), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', '(1000, 1000)', '(0, 0, 0, 255)'], {}), "('RGBA', (1000, 1000), (0, 0, 0, 255))\n", (86, 124), False, 'from PIL import Image, ImageDraw, ImageFilter\n'), ((121, 138), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['I'], {}), '(I)\n', (135, 138), False, 'from PIL import Image, ImageDraw, ImageFilter\n')]
|
#
# Copyright (c) 2019, 2020, 2021, <NAME>
# All rights reserved.
#
from gfs.common.log import GFSLogger
from gfs.common.base import GFSBase
class GremlinFSConfig(GFSBase):
logger = GFSLogger.getLogger("GremlinFSConfig")
@classmethod
def defaults(clazz):
return {
"kf_topic1": "gfs1",
"kf_topic2": "gfs2",
"kf_group": "ripple-group",
"log_level": GFSLogger.getLogLevel(),
"client_id": "0010",
"fs_ns": "gfs1",
"fs_root": None,
"fs_root_init": False,
"extends_label": 'extends',
"implements_label": 'implements',
"folder_label": 'group',
"ref_label": 'ref',
"in_label": 'in',
"self_label": 'self',
"template_label": 'template',
"template_format": 'mustache',
"view_label": 'view',
"extends_name": 'extends0',
"implements_name": 'implements0',
"in_name": 'in0',
"self_name": 'self0',
"vertex_folder": '.V',
"edge_folder": '.E',
"in_edge_folder": 'IN', # 'EI',
"out_edge_folder": 'OUT', # 'EO',
"uuid_property": 'uuid',
"name_property": 'name',
"data_property": 'data',
"template_property": 'template',
"format_property": 'format',
"default_uid": 1001,
"default_gid": 1001,
"default_mode": 0o777,
"labels": []
}
def __init__(self, **kwargs):
# Defaults
self.setall(GremlinFSConfig.defaults())
# Overrides
self.setall(kwargs)
|
[
"gfs.common.log.GFSLogger.getLogLevel",
"gfs.common.log.GFSLogger.getLogger"
] |
[((194, 232), 'gfs.common.log.GFSLogger.getLogger', 'GFSLogger.getLogger', (['"""GremlinFSConfig"""'], {}), "('GremlinFSConfig')\n", (213, 232), False, 'from gfs.common.log import GFSLogger\n'), ((425, 448), 'gfs.common.log.GFSLogger.getLogLevel', 'GFSLogger.getLogLevel', ([], {}), '()\n', (446, 448), False, 'from gfs.common.log import GFSLogger\n')]
|
import json
import os
import random
from pathlib import Path
from typing import Optional, List, Dict
import cherrypy
import numpy as np
import psutil
import yaml
from app.emotions import predict_topk_emotions, EMOTIONS, get_fonts
from app.features import extract_audio_features
from app.keywords import predict_keywords
METHOD_NAME_EMOTIONS = 'music-emotions'
METHOD_NAME_FONTS = 'music-fonts'
METHOD_NAME_KEYWORDS = 'music-keywords'
ERROR_MESSAGE_NO_FILE_WITH_FEATURES = 'Invalid audio features path `%s`: no such file'
ERROR_MESSAGE_INVALID_FEATURES = 'Failed to extract features from audio (is audio length less than 60 seconds?)'
ERROR_MESSAGE_UNKNOWN_EMOTION = 'Unknown emotion `%s`. Expected emotions: [%s]'
ERROR_MESSAGE_MODEL_FAIL_EMOTIONS = 'Failed to predict emotions: something wrong with model'
ERROR_MESSAGE_MODEL_FAIL_KEYWORDS = 'Failed to predict keywords: something wrong with model'
TypeAudio = cherrypy._cpreqbody.Part
process = psutil.Process(os.getpid()) # for monitoring and debugging purposes
config = yaml.safe_load(open('config.yml'))
class ApiServerController(object):
@cherrypy.expose(METHOD_NAME_EMOTIONS)
def music_emotions(self, audio: TypeAudio):
"""
:param audio: audio file with music song
"""
features = get_audio_features(audio)
if len(features) == 0:
return result_error(ERROR_MESSAGE_INVALID_FEATURES)
emotions = predict_topk_emotions(np.array(features), k=3)
if len(emotions) == 0:
return result_error(ERROR_MESSAGE_MODEL_FAIL_EMOTIONS)
return result_emotions(emotions)
@cherrypy.expose(METHOD_NAME_FONTS)
def music_fonts(self, audio: Optional[TypeAudio] = None, emotion: Optional[str] = None):
"""
:param audio: audio file with music song
:param emotion: emotion, selected by user
"""
if emotion is not None and emotion not in EMOTIONS:
return result_error(ERROR_MESSAGE_UNKNOWN_EMOTION % (emotion, ', '.join(EMOTIONS)))
if emotion is not None:
emotions = [emotion]
else:
features = get_audio_features(audio)
if len(features) == 0:
return result_error(ERROR_MESSAGE_INVALID_FEATURES)
emotions = predict_topk_emotions(np.array(features), k=3)
emotion_fonts = {}
for emotion in emotions:
emotion_fonts[emotion] = get_fonts(emotion)
return result_emotion_fonts(emotion_fonts)
@cherrypy.expose(METHOD_NAME_KEYWORDS)
def music_keywords(self, audio: TypeAudio):
"""
:param audio: audio file with music song
"""
features = get_audio_features(audio)
if len(features) == 0:
return result_error(ERROR_MESSAGE_INVALID_FEATURES)
keywords = predict_keywords(np.array(features), k=10)
if len(keywords) == 0:
return result_error(ERROR_MESSAGE_MODEL_FAIL_KEYWORDS)
return result_keywords(keywords)
def get_audio_features(audio: TypeAudio) -> List[np.ndarray]:
"""
:param audio: audio file with music song
:return: list of features for each full minute
"""
audio_file_name_prefix = random.randrange(1048576)
tmp_dir = config['app']['tmp_dir']
audio_file_path = Path(os.path.join(tmp_dir, f'{audio_file_name_prefix}-{audio.filename}'))
audio_file_path.parent.mkdir(exist_ok=True, parents=True)
audio_file_path.write_bytes(audio.file.read())
features = extract_audio_features(audio_file_path)
os.remove(audio_file_path)
return features
def result_error(error_message: str) -> str:
"""
:param: error_message: error message to return
"""
return json.dumps({
'result': {
'error': error_message
}
})
def result_emotions(emotions: List[str]) -> str:
"""
:param: emotions: list of emotions to return, e.g. ['comfortable', 'happy', 'wary']
"""
return json.dumps({
'result': {
'emotions': emotions
}
})
def result_emotion_fonts(emotion_fonts: Dict[str, List[str]]) -> str:
"""
:param: emotions: fonts grouped by emotion, e.g.
{
'comfortable': ['LexendExa', 'Suravaram', 'Philosopher'],
'happy': ['LilitaOne', 'Acme']
}
"""
return json.dumps({
'result': [
{'emotion': emotion, 'fonts': fonts} for emotion, fonts in emotion_fonts.items()
]
})
def result_keywords(keywords: List[str]) -> str:
"""
:param: keywords: list of keywords to return, e.g. ['porn', 'guitar', 'obama']
"""
return json.dumps({
'result': {
'keywords': keywords
}
})
if __name__ == '__main__':
cherrypy.tree.mount(ApiServerController(), '/demo')
cherrypy.config.update({
'server.socket_port': config['app']['port'],
'server.socket_host': config['app']['host'],
'server.thread_pool': config['app']['thread_pool'],
'log.access_file': 'access1.log',
'log.error_file': 'error1.log',
'log.screen': True,
'tools.response_headers.on': True,
'tools.encode.encoding': 'utf-8',
'tools.response_headers.headers': [('Content-Type', 'text/html;encoding=utf-8')],
})
try:
cherrypy.engine.start()
cherrypy.engine.block()
except KeyboardInterrupt:
cherrypy.engine.stop()
|
[
"os.remove",
"cherrypy.expose",
"os.getpid",
"cherrypy.engine.start",
"json.dumps",
"cherrypy.config.update",
"cherrypy.engine.block",
"random.randrange",
"numpy.array",
"app.features.extract_audio_features",
"app.emotions.get_fonts",
"os.path.join",
"cherrypy.engine.stop"
] |
[((968, 979), 'os.getpid', 'os.getpid', ([], {}), '()\n', (977, 979), False, 'import os\n'), ((1110, 1147), 'cherrypy.expose', 'cherrypy.expose', (['METHOD_NAME_EMOTIONS'], {}), '(METHOD_NAME_EMOTIONS)\n', (1125, 1147), False, 'import cherrypy\n'), ((1622, 1656), 'cherrypy.expose', 'cherrypy.expose', (['METHOD_NAME_FONTS'], {}), '(METHOD_NAME_FONTS)\n', (1637, 1656), False, 'import cherrypy\n'), ((2505, 2542), 'cherrypy.expose', 'cherrypy.expose', (['METHOD_NAME_KEYWORDS'], {}), '(METHOD_NAME_KEYWORDS)\n', (2520, 2542), False, 'import cherrypy\n'), ((3212, 3237), 'random.randrange', 'random.randrange', (['(1048576)'], {}), '(1048576)\n', (3228, 3237), False, 'import random\n'), ((3503, 3542), 'app.features.extract_audio_features', 'extract_audio_features', (['audio_file_path'], {}), '(audio_file_path)\n', (3525, 3542), False, 'from app.features import extract_audio_features\n'), ((3547, 3573), 'os.remove', 'os.remove', (['audio_file_path'], {}), '(audio_file_path)\n', (3556, 3573), False, 'import os\n'), ((3719, 3767), 'json.dumps', 'json.dumps', (["{'result': {'error': error_message}}"], {}), "({'result': {'error': error_message}})\n", (3729, 3767), False, 'import json\n'), ((3970, 4016), 'json.dumps', 'json.dumps', (["{'result': {'emotions': emotions}}"], {}), "({'result': {'emotions': emotions}})\n", (3980, 4016), False, 'import json\n'), ((4626, 4672), 'json.dumps', 'json.dumps', (["{'result': {'keywords': keywords}}"], {}), "({'result': {'keywords': keywords}})\n", (4636, 4672), False, 'import json\n'), ((4799, 5226), 'cherrypy.config.update', 'cherrypy.config.update', (["{'server.socket_port': config['app']['port'], 'server.socket_host': config[\n 'app']['host'], 'server.thread_pool': config['app']['thread_pool'],\n 'log.access_file': 'access1.log', 'log.error_file': 'error1.log',\n 'log.screen': True, 'tools.response_headers.on': True,\n 'tools.encode.encoding': 'utf-8', 'tools.response_headers.headers': [(\n 'Content-Type', 'text/html;encoding=utf-8')]}"], {}), "({'server.socket_port': config['app']['port'],\n 'server.socket_host': config['app']['host'], 'server.thread_pool':\n config['app']['thread_pool'], 'log.access_file': 'access1.log',\n 'log.error_file': 'error1.log', 'log.screen': True,\n 'tools.response_headers.on': True, 'tools.encode.encoding': 'utf-8',\n 'tools.response_headers.headers': [('Content-Type',\n 'text/html;encoding=utf-8')]})\n", (4821, 5226), False, 'import cherrypy\n'), ((3305, 3372), 'os.path.join', 'os.path.join', (['tmp_dir', 'f"""{audio_file_name_prefix}-{audio.filename}"""'], {}), "(tmp_dir, f'{audio_file_name_prefix}-{audio.filename}')\n", (3317, 3372), False, 'import os\n'), ((5300, 5323), 'cherrypy.engine.start', 'cherrypy.engine.start', ([], {}), '()\n', (5321, 5323), False, 'import cherrypy\n'), ((5332, 5355), 'cherrypy.engine.block', 'cherrypy.engine.block', ([], {}), '()\n', (5353, 5355), False, 'import cherrypy\n'), ((1451, 1469), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (1459, 1469), True, 'import numpy as np\n'), ((2429, 2447), 'app.emotions.get_fonts', 'get_fonts', (['emotion'], {}), '(emotion)\n', (2438, 2447), False, 'from app.emotions import predict_topk_emotions, EMOTIONS, get_fonts\n'), ((2841, 2859), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (2849, 2859), True, 'import numpy as np\n'), ((5394, 5416), 'cherrypy.engine.stop', 'cherrypy.engine.stop', ([], {}), '()\n', (5414, 5416), False, 'import cherrypy\n'), ((2306, 2324), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (2314, 2324), True, 'import numpy as np\n')]
|
import os
import subprocess
def unix_tail(filename, lines=20):
if not os.access(filename, os.R_OK):
raise Exception('Cannot access "%s"' %(filename))
try:
args = ['tail', filename, '-n', str(lines)]
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
output = proc.communicate()[0]
lines = output.strip().split('\n')
lines.reverse()
return lines
except Exception as e:
raise Exception('Error performing tail on "%s": %s'
%(filename, str(e)))
|
[
"subprocess.Popen",
"os.access"
] |
[((77, 105), 'os.access', 'os.access', (['filename', 'os.R_OK'], {}), '(filename, os.R_OK)\n', (86, 105), False, 'import os\n'), ((241, 287), 'subprocess.Popen', 'subprocess.Popen', (['args'], {'stdout': 'subprocess.PIPE'}), '(args, stdout=subprocess.PIPE)\n', (257, 287), False, 'import subprocess\n')]
|
import pycurl
import urllib.parse
from collections import defaultdict
from io import BytesIO
import json
def pycurlgetURL(url):
buffer = BytesIO()
c = pycurl.Curl()
c.setopt(c.URL, url)
c.setopt(c.WRITEDATA, buffer)
c.perform()
c.close()
body = buffer.getvalue()
return json.loads(body.decode('iso-8859-1'))
def pycurlget(url, params):
buffer = BytesIO()
c = pycurl.Curl()
pairs = urllib.parse.urlencode(params)
c.setopt(c.URL, url+'?'+pairs)
c.setopt(c.WRITEDATA, buffer)
c.perform()
c.close()
body = buffer.getvalue()
return json.loads(body.decode('iso-8859-1'))
def pycurlpost(url, params):
buffer = BytesIO()
c = pycurl.Curl()
pairs = urllib.parse.urlencode(params)
c.setopt(c.URL, url)
c.setopt(c.POSTFIELDS, pairs)
c.setopt(c.WRITEDATA, buffer)
c.perform()
c.close()
body = buffer.getvalue()
return json.loads(body.decode('iso-8859-1'))
|
[
"io.BytesIO",
"pycurl.Curl"
] |
[((142, 151), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (149, 151), False, 'from io import BytesIO\n'), ((160, 173), 'pycurl.Curl', 'pycurl.Curl', ([], {}), '()\n', (171, 173), False, 'import pycurl\n'), ((383, 392), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (390, 392), False, 'from io import BytesIO\n'), ((401, 414), 'pycurl.Curl', 'pycurl.Curl', ([], {}), '()\n', (412, 414), False, 'import pycurl\n'), ((679, 688), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (686, 688), False, 'from io import BytesIO\n'), ((697, 710), 'pycurl.Curl', 'pycurl.Curl', ([], {}), '()\n', (708, 710), False, 'import pycurl\n')]
|
import uuid
import xml.etree.ElementTree as Et
import csv
import lampak
bedir = './xmlz/'
kidir = './csvk/'
bef = 'ntsz_old.xml'
befile = bedir + bef
kifile = kidir + bef + "_conv.csv"
# Ezek a mezők vannak a capture csv-ben
fields = ['Fixture', 'Optics', 'Wattage', 'Unit', 'Circuit', 'Channel',
'Groups', 'Patch', 'DMX Mode', 'DMX Channles', 'Layer', 'Focus',
'Filters', 'Gobos', 'Accessories', 'Purpose', 'Note', 'Weight',
'Location', 'Position X', 'Position Y', 'Position Z', 'Rotation X',
'Rotation Y', 'Rotation Z', 'Focus', 'Pan', 'Focus Tilt', 'Invert Pan',
'Pan Start Limit', 'Pan End Limit', 'Invert Tilt', 'Tilt Start Limit',
'Tilt End Limit', 'Identifier', 'External Identifier']
data = []
mytree = Et.parse(befile)
# namespace definiálása az ma2 exportált xml alapján
ns = {'bas': "http://schemas.malighting.de/grandma2/xml/MA",
'xsi': "http://www.w3.org/2001/XMLSchema-instance",
'schema': "http://schemas.malighting.de/grandma2/xml/MA http://schemas.malighting.de/grandma2/xml/3.9.60/MA.xsd"}
myroot = mytree.getroot() # xml gyökér kijelölése itt <MA>
print('Showfile dátuma: {}'.format(myroot[0].attrib['datetime']))
print('Showfile neve: {}'.format(myroot[0].attrib['showfile']))
print("MA2 programverzió: {},{},{}".format(
myroot.attrib['major_vers'], myroot.attrib['minor_vers'], myroot.attrib['stream_vers']))
for Layer in myroot.findall("bas:Layer", ns):
rn = Layer.attrib['name'] # Réteg nevének és indexének kinyerése
ridx = Layer.attrib['index']
uid = uuid.uuid4()
for Fixture in Layer: # Lámpákon szaladunk végig.
egylampa = lampak.Lampa(Fixture[0].attrib['name'])
egylampa.Unit = Fixture.attrib['name']
egylampa.Layer = rn
egylampa.extidentifier = uid
fi = Fixture.attrib['index'] # Lámpa index és név
''''# Itt csak átalakítjuk a mostani fileban a macet gagyibbra lustaságból!
if egylampa.Fixture[:8] == 'Mac700PB': # levágjuk a sorszámot a lámpanévből
egylampa.Fixture = 'Martin MAC 250 Entour' # jól kicseréljük a capture student verzióval
'''
egylampa.Patch = Fixture[1][0][0].text # Fixture/subfixture/patch szövege
if egylampa.Patch != '0': # ha nincs dmx címe a cuccnak akkor az előzőt adjuk a mdmxnek
mdmx = egylampa.Patch
fpos = Fixture[1][1][0].attrib # Fixture/subfixture/absoluteposition/attribjai
egylampa.posx = fpos['x'] + 'm'
egylampa.posy = fpos['y'] + 'm'
egylampa.posz = fpos['z'] + 'm'
frot = Fixture[1][1][1].attrib
egylampa.rotx = frot['x']+'°'
egylampa.roty = frot['y']+'°'
egylampa.rotz = frot['z']+'°'
if 'fixture_id' in Fixture.attrib: # Ha robotlámpa akkor ma2 szerint fixture
egylampa.Channel = Fixture.attrib['fixture_id']
if 'channel_id' in Fixture.attrib: # ha dimmer akkor channel ma2 szerint
egylampa.Channel = Fixture.attrib['channel_id']
if 'is_multipatch' in Fixture.attrib: # ha multipatchelt a lámpa
egylampa.Patch = mdmx
data.append(egylampa.lamplista())
with open(kifile, 'w', newline='') as csvfile:
filewriter = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
filewriter.writerow(fields)
filewriter.writerows(data)
csvfile.close()
print("Az importálható cucc a {} -ban található.".format(kifile))
print("Ja amúgy kész vagyok...")
|
[
"xml.etree.ElementTree.parse",
"lampak.Lampa",
"uuid.uuid4",
"csv.writer"
] |
[((773, 789), 'xml.etree.ElementTree.parse', 'Et.parse', (['befile'], {}), '(befile)\n', (781, 789), True, 'import xml.etree.ElementTree as Et\n'), ((1571, 1583), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1581, 1583), False, 'import uuid\n'), ((3233, 3309), 'csv.writer', 'csv.writer', (['csvfile'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_MINIMAL'}), '(csvfile, delimiter=\',\', quotechar=\'"\', quoting=csv.QUOTE_MINIMAL)\n', (3243, 3309), False, 'import csv\n'), ((1658, 1697), 'lampak.Lampa', 'lampak.Lampa', (["Fixture[0].attrib['name']"], {}), "(Fixture[0].attrib['name'])\n", (1670, 1697), False, 'import lampak\n')]
|
import base64
from apistar import http
from apistar.authentication import Authenticated
from apistar.interfaces import Auth
class BasicAuthentication():
def authenticate(self, authorization: http.Header):
"""
Determine the user associated with a request, using HTTP Basic Authentication.
"""
if authorization is None:
return None
scheme, token = authorization.split()
if scheme.lower() != 'basic':
return None
username, password = base64.b64decode(token).decode('utf-8').split(':')
return Authenticated(username)
|
[
"base64.b64decode",
"apistar.authentication.Authenticated"
] |
[((586, 609), 'apistar.authentication.Authenticated', 'Authenticated', (['username'], {}), '(username)\n', (599, 609), False, 'from apistar.authentication import Authenticated\n'), ((520, 543), 'base64.b64decode', 'base64.b64decode', (['token'], {}), '(token)\n', (536, 543), False, 'import base64\n')]
|
import requests
from pathlib import Path
import os
from PIL import Image
from instabot import Bot
from dotenv import load_dotenv
import random
from scripts import download_file
Image.MAX_IMAGE_PIXELS = 900000000
IMAGES_DIRECTORY = 'images/'
INSTAGRAM_IMAGES_DIRECTORY = 'images_instagram/'
def main():
load_dotenv()
username = os.getenv('INSTAGRAM_USERNAME'),
password = os.getenv('INSTAGRAM_PASSWORD')
fetch_spacex_last_launch()
fetch_hubble()
create_images_for_instagram()
post_images_to_instagram(username, password)
def fetch_spacex_last_launch(directory=IMAGES_DIRECTORY):
directory = Path(directory)
directory.mkdir(parents=True, exist_ok=True)
while True:
parameter = 'latest'
method_url = f'https://api.spacexdata.com/v3/launches/{parameter}'
response = requests.get(method_url)
response.raise_for_status()
response_dict = response.json()
try:
images_urls = response_dict['links']['flickr_images']
if not len(images_urls):
raise Exception('Images list is empty')
except Exception or KeyError:
parameter = response_dict['flight_number']
else:
break
for image_number, image_url in enumerate(images_urls):
image_path = directory / f'spacex{image_number}.jpg'
download_file(image_url, image_path)
def fetch_hubble():
host = 'http://hubblesite.org'
method = '/api/v3/images/all'
url = host + method
response = requests.get(url)
for image_data in response.json():
image_id = image_data['id']
download_hubble_img(image_id)
def get_extension_file(url):
return url.split('.')[-1]
def download_hubble_img(id, directory=IMAGES_DIRECTORY):
directory = Path(directory)
directory.mkdir(parents=True, exist_ok=True)
host = 'http://hubblesite.org'
method = '/api/v3/image/'
request_url = host + method + str(id)
response = requests.get(request_url)
response.raise_for_status()
image_url = 'http:' + response.json()['image_files'][-1]['file_url']
extension = get_extension_file(image_url)
path = directory / f'{id}.{extension}'
print(f'Download image: id {id}')
download_file(image_url, path)
def get_proportional_size(width, height, max_size=1080):
if max(width, height) > max_size:
resize_ratio = max_size / max(width, height)
width *= resize_ratio
height *= resize_ratio
return width, height
def create_images_for_instagram():
images_files = os.listdir(IMAGES_DIRECTORY)
directory = Path(INSTAGRAM_IMAGES_DIRECTORY)
directory.mkdir(parents=True, exist_ok=True)
for image_file in images_files:
image_path = IMAGES_DIRECTORY + image_file
try:
image = Image.open(image_path)
except OSError:
continue
if image.mode == 'RGBA':
image = image.convert('RGB')
new_size = get_proportional_size(*image.size)
new_image = image
new_image.thumbnail(new_size)
new_image_file = image_file.split('.')[0] + '.jpg'
new_image_path = INSTAGRAM_IMAGES_DIRECTORY + new_image_file
new_image.save(new_image_path, format='JPEG')
def post_images_to_instagram(username, password):
files_images = os.listdir(INSTAGRAM_IMAGES_DIRECTORY)
with open('space_quotes.txt', 'r', encoding='utf-8') as file:
quotes = file.read().split('\n')
bot = Bot()
bot.login(
is_threaded=False,
username=username,
password=password
)
for file_image in files_images:
path = INSTAGRAM_IMAGES_DIRECTORY + file_image
bot.upload_photo(path, caption=random.choice(quotes))
if __name__ == '__main__':
main()
|
[
"os.listdir",
"random.choice",
"PIL.Image.open",
"dotenv.load_dotenv",
"pathlib.Path",
"requests.get",
"instabot.Bot",
"os.getenv",
"scripts.download_file"
] |
[((324, 337), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (335, 337), False, 'from dotenv import load_dotenv\n'), ((403, 434), 'os.getenv', 'os.getenv', (['"""INSTAGRAM_PASSWORD"""'], {}), "('INSTAGRAM_PASSWORD')\n", (412, 434), False, 'import os\n'), ((654, 669), 'pathlib.Path', 'Path', (['directory'], {}), '(directory)\n', (658, 669), False, 'from pathlib import Path\n'), ((1580, 1597), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (1592, 1597), False, 'import requests\n'), ((1860, 1875), 'pathlib.Path', 'Path', (['directory'], {}), '(directory)\n', (1864, 1875), False, 'from pathlib import Path\n'), ((2056, 2081), 'requests.get', 'requests.get', (['request_url'], {}), '(request_url)\n', (2068, 2081), False, 'import requests\n'), ((2324, 2354), 'scripts.download_file', 'download_file', (['image_url', 'path'], {}), '(image_url, path)\n', (2337, 2354), False, 'from scripts import download_file\n'), ((2661, 2689), 'os.listdir', 'os.listdir', (['IMAGES_DIRECTORY'], {}), '(IMAGES_DIRECTORY)\n', (2671, 2689), False, 'import os\n'), ((2707, 2739), 'pathlib.Path', 'Path', (['INSTAGRAM_IMAGES_DIRECTORY'], {}), '(INSTAGRAM_IMAGES_DIRECTORY)\n', (2711, 2739), False, 'from pathlib import Path\n'), ((3449, 3487), 'os.listdir', 'os.listdir', (['INSTAGRAM_IMAGES_DIRECTORY'], {}), '(INSTAGRAM_IMAGES_DIRECTORY)\n', (3459, 3487), False, 'import os\n'), ((3610, 3615), 'instabot.Bot', 'Bot', ([], {}), '()\n', (3613, 3615), False, 'from instabot import Bot\n'), ((354, 385), 'os.getenv', 'os.getenv', (['"""INSTAGRAM_USERNAME"""'], {}), "('INSTAGRAM_USERNAME')\n", (363, 385), False, 'import os\n'), ((865, 889), 'requests.get', 'requests.get', (['method_url'], {}), '(method_url)\n', (877, 889), False, 'import requests\n'), ((1406, 1442), 'scripts.download_file', 'download_file', (['image_url', 'image_path'], {}), '(image_url, image_path)\n', (1419, 1442), False, 'from scripts import download_file\n'), ((2916, 2938), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (2926, 2938), False, 'from PIL import Image\n'), ((3855, 3876), 'random.choice', 'random.choice', (['quotes'], {}), '(quotes)\n', (3868, 3876), False, 'import random\n')]
|
import logging
from logging import handlers as logging_handlers
class Logg():
LOGMAXSIZE = 50000000
LOGBACKUPCOUT = 2
LOGLEVEL = logging.ERROR
def create_logger(name, logfile=None, logmaxsize=LOGMAXSIZE, loglevel=LOGLEVEL,
logbackupcount=LOGBACKUPCOUT):
""" create and returns logger object that will log to file"""
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
format = '%(asctime)s : %(levelname)s : %(name)s : %(message)s'
formatter = logging.Formatter(format)
if logfile:
RotatingFileHandler = logging_handlers.RotatingFileHandler
file_handler = RotatingFileHandler(logfile, maxBytes=logmaxsize,
backupCount=logbackupcount)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.debug('DEBUG created logger: {}'.format(name))
return logger
|
[
"logging.Formatter",
"logging.getLogger"
] |
[((382, 405), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (399, 405), False, 'import logging\n'), ((537, 562), 'logging.Formatter', 'logging.Formatter', (['format'], {}), '(format)\n', (554, 562), False, 'import logging\n')]
|
import math
SNAP = 0.001
class Vector2(object):
def __init__(self, x=0.0, y=0.0):
self.x = x
self.y = y
class Vector3(object):
def __init__(self, x=0, y=0, z=0):
self.x = x
self.y = y
self.z = z
def clone(self):
return Vector3(self.x, self.y, self.z)
def cross(self, v1, v2):
self.x = v1.y * v2.z - v1.z * v2.y
self.y = v1.z * v2.x - v1.x * v2.z
self.z = v1.x * v2.y - v1.y * v2.x
return self
def distance(self, v):
dx = self.x - v.x
dy = self.y - v.y
dz = self.z - v.z
return math.sqrt(dx*dx + dy*dy + dz*dz)
def distanceSq(self, v):
dx = self.x - v.x
dy = self.y - v.y
dz = self.z - v.z
return (dx*dx + dy*dy + dz*dz)
def dot(self, v):
return self.x * v.x + self.y * v.y + self.z * v.z
def length(self):
return math.sqrt(self.x*self.x + self.y*self.y + self.z * self.z)
def lengthSq(self):
return (self.x*self.x + self.y*self.y + self.z * self.z)
def addScalar(self, s):
self.x += s
self.y += s
self.z += s
return self
def divScalar(self, s):
self.x /= s
self.y /= s
self.z /= s
return self
def multScalar(self, s):
self.x *= s
self.y *= s
self.z *= s
return self
def sub(self, a, b):
self.x = a.x - b.x
self.y = a.y - b.y
self.z = a.z - b.z
return self
def subScalar(self, s):
self.x -= s
self.y -= s
self.z -= s
return self
def equals(self, v, e=None):
e = SNAP if e is None else e
if v.x > self.x-e and v.x < self.x+e and \
v.y > self.y-e and v.y < self.y+e and \
v.z > self.z-e and v.z < self.z+e:
return True
else:
return False
def normalize(self):
len = self.length()
if len > 0.0:
self.multScalar(1.0 / len)
return self
def set(self, x, y, z):
self.x = x
self.y = y
self.z = z
def tostring(self):
return "%0.3f %0.3f %0.3f" % (self.x, self.y, self.z)
class Matrix2(object):
"""
Matrix2
"""
def __init__(self, a=1.0, b=0.0, c=0.0, d=1.0, tx=0.0, ty=0.0):
self.a = a
self.b = b
self.c = c
self.d = d
self.tx = tx
self.ty = ty
def append(self, a, b, c, d, tx, ty):
a1 = self.a
b1 = self.b
c1 = self.c
d1 = self.d
self.a = a*a1+b*c1
self.b = a*b1+b*d1
self.c = c*a1+d*c1
self.d = c*b1+d*d1
self.tx = tx*a1+ty*c1+self.tx
self.ty = tx*b1+ty*d1+self.ty
def append_matrix(self, m):
self.append(m.a, m.b, m.c, m.d, m.tx, m.ty)
def multiply_point(self, vec):
return [
self.a*vec[0] + self.c*vec[1] + self.tx,
self.b*vec[0] + self.d*vec[1] + self.ty
]
def prepend(self, a, b, c, d, tx, ty):
tx1 = self.tx
if (a != 1.0 or b != 0.0 or c != 0.0 or d != 1.0):
a1 = self.a
c1 = self.c
self.a = a1*a+self.b*c
self.b = a1*b+self.b*d
self.c = c1*a+self.d*c
self.d = c1*b+self.d*d
self.tx = tx1*a+self.ty*c+tx
self.ty = tx1*b+self.ty*d+ty
def prepend_matrix(self, m):
self.prepend(m.a, m.b, m.c, m.d, m.tx, m.ty)
def rotate(self, angle):
cos = math.cos(angle)
sin = math.sin(angle)
a1 = self.a
c1 = self.c
tx1 = self.tx
self.a = a1*cos-self.b*sin
self.b = a1*sin+self.b*cos
self.c = c1*cos-self.d*sin
self.d = c1*sin+self.d*cos
self.tx = tx1*cos-self.ty*sin
self.ty = tx1*sin+self.ty*cos
def scale(self, x, y):
self.a *= x;
self.d *= y;
self.tx *= x;
self.ty *= y;
def translate(self, x, y):
self.tx += x;
self.ty += y;
class Matrix4(object):
"""
Matrix4
"""
def __init__(self, data=None):
if not data is None and len(data) == 16:
self.n11 = data[0]; self.n12 = data[1]; self.n13 = data[2]; self.n14 = data[3]
self.n21 = data[4]; self.n22 = data[5]; self.n23 = data[6]; self.n24 = data[7]
self.n31 = data[8]; self.n32 = data[9]; self.n33 = data[10]; self.n34 = data[11]
self.n41 = data[12]; self.n42 = data[13]; self.n43 = data[14]; self.n44 = data[15]
else:
self.n11 = 1.0; self.n12 = 0.0; self.n13 = 0.0; self.n14 = 0.0
self.n21 = 0.0; self.n22 = 1.0; self.n23 = 0.0; self.n24 = 0.0
self.n31 = 0.0; self.n32 = 0.0; self.n33 = 1.0; self.n34 = 0.0
self.n41 = 0.0; self.n42 = 0.0; self.n43 = 0.0; self.n44 = 1.0
def clone(self):
return Matrix4(self.flatten())
def flatten(self):
return [self.n11, self.n12, self.n13, self.n14, \
self.n21, self.n22, self.n23, self.n24, \
self.n31, self.n32, self.n33, self.n34, \
self.n41, self.n42, self.n43, self.n44]
def identity(self):
self.n11 = 1.0; self.n12 = 0.0; self.n13 = 0.0; self.n14 = 0.0
self.n21 = 0.0; self.n22 = 1.0; self.n23 = 0.0; self.n24 = 0.0
self.n31 = 0.0; self.n32 = 0.0; self.n33 = 1.0; self.n34 = 0.0
self.n41 = 0.0; self.n42 = 0.0; self.n43 = 0.0; self.n44 = 1.0
return self
def multiply(self, a, b):
a11 = a.n11; a12 = a.n12; a13 = a.n13; a14 = a.n14
a21 = a.n21; a22 = a.n22; a23 = a.n23; a24 = a.n24
a31 = a.n31; a32 = a.n32; a33 = a.n33; a34 = a.n34
a41 = a.n41; a42 = a.n42; a43 = a.n43; a44 = a.n44
b11 = b.n11; b12 = b.n12; b13 = b.n13; b14 = b.n14
b21 = b.n21; b22 = b.n22; b23 = b.n23; b24 = b.n24
b31 = b.n31; b32 = b.n32; b33 = b.n33; b34 = b.n34
b41 = b.n41; b42 = b.n42; b43 = b.n43; b44 = b.n44
self.n11 = a11 * b11 + a12 * b21 + a13 * b31 + a14 * b41
self.n12 = a11 * b12 + a12 * b22 + a13 * b32 + a14 * b42
self.n13 = a11 * b13 + a12 * b23 + a13 * b33 + a14 * b43
self.n14 = a11 * b14 + a12 * b24 + a13 * b34 + a14 * b44
self.n21 = a21 * b11 + a22 * b21 + a23 * b31 + a24 * b41
self.n22 = a21 * b12 + a22 * b22 + a23 * b32 + a24 * b42
self.n23 = a21 * b13 + a22 * b23 + a23 * b33 + a24 * b43
self.n24 = a21 * b14 + a22 * b24 + a23 * b34 + a24 * b44
self.n31 = a31 * b11 + a32 * b21 + a33 * b31 + a34 * b41
self.n32 = a31 * b12 + a32 * b22 + a33 * b32 + a34 * b42
self.n33 = a31 * b13 + a32 * b23 + a33 * b33 + a34 * b43
self.n34 = a31 * b14 + a32 * b24 + a33 * b34 + a34 * b44
self.n41 = a41 * b11 + a42 * b21 + a43 * b31 + a44 * b41
self.n42 = a41 * b12 + a42 * b22 + a43 * b32 + a44 * b42
self.n43 = a41 * b13 + a42 * b23 + a43 * b33 + a44 * b43
self.n44 = a41 * b14 + a42 * b24 + a43 * b34 + a44 * b44
return self
def multiplyVector3(self, vec):
vx = vec[0]
vy = vec[1]
vz = vec[2]
d = 1.0 / (self.n41 * vx + self.n42 * vy + self.n43 * vz + self.n44)
x = (self.n11 * vx + self.n12 * vy + self.n13 * vz + self.n14) * d
y = (self.n21 * vx + self.n22 * vy + self.n23 * vz + self.n24) * d
z = (self.n31 * vx + self.n32 * vy + self.n33 * vz + self.n34) * d
return [x, y, z]
def multiplyVec3(self, vec):
vx = vec.x
vy = vec.y
vz = vec.z
d = 1.0 / (self.n41 * vx + self.n42 * vy + self.n43 * vz + self.n44)
x = (self.n11 * vx + self.n12 * vy + self.n13 * vz + self.n14) * d
y = (self.n21 * vx + self.n22 * vy + self.n23 * vz + self.n24) * d
z = (self.n31 * vx + self.n32 * vy + self.n33 * vz + self.n34) * d
return Vector3(x, y, z)
def multiplyVector4(self, v):
vx = v[0]; vy = v[1]; vz = v[2]; vw = v[3];
x = self.n11 * vx + self.n12 * vy + self.n13 * vz + self.n14 * vw;
y = self.n21 * vx + self.n22 * vy + self.n23 * vz + self.n24 * vw;
z = self.n31 * vx + self.n32 * vy + self.n33 * vz + self.n34 * vw;
w = self.n41 * vx + self.n42 * vy + self.n43 * vz + self.n44 * vw;
return [x, y, z, w];
def det(self):
#( based on http://www.euclideanspace.com/maths/algebra/matrix/functions/inverse/fourD/index.htm )
return (
self.n14 * self.n23 * self.n32 * self.n41-
self.n13 * self.n24 * self.n32 * self.n41-
self.n14 * self.n22 * self.n33 * self.n41+
self.n12 * self.n24 * self.n33 * self.n41+
self.n13 * self.n22 * self.n34 * self.n41-
self.n12 * self.n23 * self.n34 * self.n41-
self.n14 * self.n23 * self.n31 * self.n42+
self.n13 * self.n24 * self.n31 * self.n42+
self.n14 * self.n21 * self.n33 * self.n42-
self.n11 * self.n24 * self.n33 * self.n42-
self.n13 * self.n21 * self.n34 * self.n42+
self.n11 * self.n23 * self.n34 * self.n42+
self.n14 * self.n22 * self.n31 * self.n43-
self.n12 * self.n24 * self.n31 * self.n43-
self.n14 * self.n21 * self.n32 * self.n43+
self.n11 * self.n24 * self.n32 * self.n43+
self.n12 * self.n21 * self.n34 * self.n43-
self.n11 * self.n22 * self.n34 * self.n43-
self.n13 * self.n22 * self.n31 * self.n44+
self.n12 * self.n23 * self.n31 * self.n44+
self.n13 * self.n21 * self.n32 * self.n44-
self.n11 * self.n23 * self.n32 * self.n44-
self.n12 * self.n21 * self.n33 * self.n44+
self.n11 * self.n22 * self.n33 * self.n44)
def lookAt(self, eye, center, up):
x = Vector3(); y = Vector3(); z = Vector3();
z.sub(eye, center).normalize();
x.cross(up, z).normalize();
y.cross(z, x).normalize();
#eye.normalize()
self.n11 = x.x; self.n12 = x.y; self.n13 = x.z; self.n14 = -x.dot(eye);
self.n21 = y.x; self.n22 = y.y; self.n23 = y.z; self.n24 = -y.dot(eye);
self.n31 = z.x; self.n32 = z.y; self.n33 = z.z; self.n34 = -z.dot(eye);
self.n41 = 0.0; self.n42 = 0.0; self.n43 = 0.0; self.n44 = 1.0;
return self;
def multiplyScalar(self, s):
self.n11 *= s; self.n12 *= s; self.n13 *= s; self.n14 *= s;
self.n21 *= s; self.n22 *= s; self.n23 *= s; self.n24 *= s;
self.n31 *= s; self.n32 *= s; self.n33 *= s; self.n34 *= s;
self.n41 *= s; self.n42 *= s; self.n43 *= s; self.n44 *= s;
return self
@classmethod
def inverse(cls, m1):
# TODO: make this more efficient
#( based on http://www.euclideanspace.com/maths/algebra/matrix/functions/inverse/fourD/index.htm )
m2 = Matrix4();
m2.n11 = m1.n23*m1.n34*m1.n42 - m1.n24*m1.n33*m1.n42 + m1.n24*m1.n32*m1.n43 - m1.n22*m1.n34*m1.n43 - m1.n23*m1.n32*m1.n44 + m1.n22*m1.n33*m1.n44;
m2.n12 = m1.n14*m1.n33*m1.n42 - m1.n13*m1.n34*m1.n42 - m1.n14*m1.n32*m1.n43 + m1.n12*m1.n34*m1.n43 + m1.n13*m1.n32*m1.n44 - m1.n12*m1.n33*m1.n44;
m2.n13 = m1.n13*m1.n24*m1.n42 - m1.n14*m1.n23*m1.n42 + m1.n14*m1.n22*m1.n43 - m1.n12*m1.n24*m1.n43 - m1.n13*m1.n22*m1.n44 + m1.n12*m1.n23*m1.n44;
m2.n14 = m1.n14*m1.n23*m1.n32 - m1.n13*m1.n24*m1.n32 - m1.n14*m1.n22*m1.n33 + m1.n12*m1.n24*m1.n33 + m1.n13*m1.n22*m1.n34 - m1.n12*m1.n23*m1.n34;
m2.n21 = m1.n24*m1.n33*m1.n41 - m1.n23*m1.n34*m1.n41 - m1.n24*m1.n31*m1.n43 + m1.n21*m1.n34*m1.n43 + m1.n23*m1.n31*m1.n44 - m1.n21*m1.n33*m1.n44;
m2.n22 = m1.n13*m1.n34*m1.n41 - m1.n14*m1.n33*m1.n41 + m1.n14*m1.n31*m1.n43 - m1.n11*m1.n34*m1.n43 - m1.n13*m1.n31*m1.n44 + m1.n11*m1.n33*m1.n44;
m2.n23 = m1.n14*m1.n23*m1.n41 - m1.n13*m1.n24*m1.n41 - m1.n14*m1.n21*m1.n43 + m1.n11*m1.n24*m1.n43 + m1.n13*m1.n21*m1.n44 - m1.n11*m1.n23*m1.n44;
m2.n24 = m1.n13*m1.n24*m1.n31 - m1.n14*m1.n23*m1.n31 + m1.n14*m1.n21*m1.n33 - m1.n11*m1.n24*m1.n33 - m1.n13*m1.n21*m1.n34 + m1.n11*m1.n23*m1.n34;
m2.n31 = m1.n22*m1.n34*m1.n41 - m1.n24*m1.n32*m1.n41 + m1.n24*m1.n31*m1.n42 - m1.n21*m1.n34*m1.n42 - m1.n22*m1.n31*m1.n44 + m1.n21*m1.n32*m1.n44;
m2.n32 = m1.n14*m1.n32*m1.n41 - m1.n12*m1.n34*m1.n41 - m1.n14*m1.n31*m1.n42 + m1.n11*m1.n34*m1.n42 + m1.n12*m1.n31*m1.n44 - m1.n11*m1.n32*m1.n44;
m2.n33 = m1.n13*m1.n24*m1.n41 - m1.n14*m1.n22*m1.n41 + m1.n14*m1.n21*m1.n42 - m1.n11*m1.n24*m1.n42 - m1.n12*m1.n21*m1.n44 + m1.n11*m1.n22*m1.n44;
m2.n34 = m1.n14*m1.n22*m1.n31 - m1.n12*m1.n24*m1.n31 - m1.n14*m1.n21*m1.n32 + m1.n11*m1.n24*m1.n32 + m1.n12*m1.n21*m1.n34 - m1.n11*m1.n22*m1.n34;
m2.n41 = m1.n23*m1.n32*m1.n41 - m1.n22*m1.n33*m1.n41 - m1.n23*m1.n31*m1.n42 + m1.n21*m1.n33*m1.n42 + m1.n22*m1.n31*m1.n43 - m1.n21*m1.n32*m1.n43;
m2.n42 = m1.n12*m1.n33*m1.n41 - m1.n13*m1.n32*m1.n41 + m1.n13*m1.n31*m1.n42 - m1.n11*m1.n33*m1.n42 - m1.n12*m1.n31*m1.n43 + m1.n11*m1.n32*m1.n43;
m2.n43 = m1.n13*m1.n22*m1.n41 - m1.n12*m1.n23*m1.n41 - m1.n13*m1.n21*m1.n42 + m1.n11*m1.n23*m1.n42 + m1.n12*m1.n21*m1.n43 - m1.n11*m1.n22*m1.n43;
m2.n44 = m1.n12*m1.n23*m1.n31 - m1.n13*m1.n22*m1.n31 + m1.n13*m1.n21*m1.n32 - m1.n11*m1.n23*m1.n32 - m1.n12*m1.n21*m1.n33 + m1.n11*m1.n22*m1.n33;
m2.multiplyScalar(1.0 / m1.det());
return m2;
@classmethod
def rotationMatrix(cls, x, y, z, angle):
rot = Matrix4()
c = math.cos(angle)
s = math.sin(angle)
t = 1 - c
rot.n11 = t * x * x + c
rot.n12 = t * x * y - s * z
rot.n13 = t * x * z + s * y
rot.n21 = t * x * y + s * z
rot.n22 = t * y * y + c
rot.n23 = t * y * z - s * x
rot.n31 = t * x * z - s * y
rot.n32 = t * y * z + s * x
rot.n33 = t * z * z + c
return rot
@classmethod
def scaleMatrix(cls, x, y, z):
m = Matrix4()
m.n11 = x
m.n22 = y
m.n33 = z
return m
@classmethod
def translationMatrix(cls, x, y, z):
m = Matrix4()
m.n14 = x
m.n24 = y
m.n34 = z
return m
|
[
"math.sin",
"math.cos",
"math.sqrt"
] |
[((635, 673), 'math.sqrt', 'math.sqrt', (['(dx * dx + dy * dy + dz * dz)'], {}), '(dx * dx + dy * dy + dz * dz)\n', (644, 673), False, 'import math\n'), ((946, 1008), 'math.sqrt', 'math.sqrt', (['(self.x * self.x + self.y * self.y + self.z * self.z)'], {}), '(self.x * self.x + self.y * self.y + self.z * self.z)\n', (955, 1008), False, 'import math\n'), ((3664, 3679), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (3672, 3679), False, 'import math\n'), ((3694, 3709), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (3702, 3709), False, 'import math\n'), ((13758, 13773), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (13766, 13773), False, 'import math\n'), ((13786, 13801), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (13794, 13801), False, 'import math\n')]
|
from django.test import TestCase
from rest_framework.test import force_authenticate
from rest_framework import status
from django.contrib.auth.models import User
from django.test.client import RequestFactory
from users.api import AccountDetailsView
class TestAccountDetailsView(TestCase):
def setUp(self):
self.email = "<EMAIL>"
self.username = "donald"
self.user = User.objects.create(username=self.username, email=self.email)
self.view = AccountDetailsView.as_view()
self.factory = RequestFactory()
self.request_url = '/api/account-details/'
def test_not_authorized(self):
request = self.factory.get(self.request_url, {}, format='json')
response = self.view(request)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_get_user_info(self):
request = self.factory.get(self.request_url, {}, format='json')
force_authenticate(request, user=self.user)
response = self.view(request)
self.assertEqual(response.data['owner']['email'], self.email)
self.assertEqual(response.data['owner']['username'], self.username)
|
[
"django.contrib.auth.models.User.objects.create",
"django.test.client.RequestFactory",
"users.api.AccountDetailsView.as_view",
"rest_framework.test.force_authenticate"
] |
[((397, 458), 'django.contrib.auth.models.User.objects.create', 'User.objects.create', ([], {'username': 'self.username', 'email': 'self.email'}), '(username=self.username, email=self.email)\n', (416, 458), False, 'from django.contrib.auth.models import User\n'), ((479, 507), 'users.api.AccountDetailsView.as_view', 'AccountDetailsView.as_view', ([], {}), '()\n', (505, 507), False, 'from users.api import AccountDetailsView\n'), ((531, 547), 'django.test.client.RequestFactory', 'RequestFactory', ([], {}), '()\n', (545, 547), False, 'from django.test.client import RequestFactory\n'), ((937, 980), 'rest_framework.test.force_authenticate', 'force_authenticate', (['request'], {'user': 'self.user'}), '(request, user=self.user)\n', (955, 980), False, 'from rest_framework.test import force_authenticate\n')]
|
#!/usr/bin/python3
"""Remove all end of line whitespace and tabs in a given text file(s).
Usage:
trimeol.py [input_files]
If no input_files are supplied, the program reads from stdin.
"""
import os
import stat
import sys
import time
import binarycheck
def process_file(fname):
"""Remove all end of line whitespace and tabs from the text file 'name'.
Returns:
-1 if error.
0 if file changed.
1 if file skipped.
2 if file not changed.
"""
lines = []
original_len = 0
if binarycheck.is_binary_file(fname):
print("Skipping {} - it appears to be a binary file.".format(fname))
return 1
try:
with open(fname, 'r') as fin:
for line in fin:
original_len += len(line)
lines.append(line.rstrip(" \t\n") + "\n")
data = "".join(lines)
if original_len == len(data): # No changes made
return 2
return write_data(data, fname)
except IOError:
print("An error occurred processing {}".format(fname))
return -1
def write_data(data, destination):
"""Write (overwrite) 'data' to 'destination' file semi-atomically.
Returns 0 on success.
"""
tmpfile = destination + ".tmp"
try:
with open(tmpfile, "w") as fout:
fout.write(data)
fout.flush()
os.fsync(fout.fileno())
try:
os.rename(tmpfile, destination)
except OSError:
# Probably a Windows machine, try to remove destination first.
os.remove(destination)
os.rename(tmpfile, destination)
except IOError:
print("An error occured writing {}".format(destination))
return -1
return 0
def usage():
"""Print usage information."""
print("Usage: {} [input_files]\n"
"If no input_files are supplied, the program reads "
"from stdin.".format(os.path.basename(sys.argv[0])))
def main():
time1 = time.clock()
counters = {"processed": 0,
"skipped": 0,
"changed": 0,
"errors": 0}
files = []
if len(sys.argv) == 1:
if stat.S_ISFIFO(os.fstat(0).st_mode): # Check for piped stdin.
files = sys.stdin
else: # No input - print usage.
usage()
sys.exit(0)
else:
files = sys.argv[1:]
for fname in files:
res = process_file(fname.strip(" \r\n\r"))
if res >= 0:
counters["processed"] += 1
if res == 0:
counters["changed"] += 1
elif res == 1:
counters["skipped"] += 1
else:
counters["errors"] += 1
time2 = time.clock()
timediff = round(time2 - time1, 4)
print("Finished processing files after {} seconds.".format(timediff))
col_width = max(len(row) for row in counters.keys()) + 2
print("=== Summary ===")
for key, value in counters.items():
print("{}: {}".format(key.capitalize().ljust(col_width), value))
if __name__ == "__main__":
main()
|
[
"os.remove",
"os.path.basename",
"os.rename",
"time.clock",
"binarycheck.is_binary_file",
"os.fstat",
"sys.exit"
] |
[((537, 570), 'binarycheck.is_binary_file', 'binarycheck.is_binary_file', (['fname'], {}), '(fname)\n', (563, 570), False, 'import binarycheck\n'), ((1999, 2011), 'time.clock', 'time.clock', ([], {}), '()\n', (2009, 2011), False, 'import time\n'), ((2735, 2747), 'time.clock', 'time.clock', ([], {}), '()\n', (2745, 2747), False, 'import time\n'), ((1434, 1465), 'os.rename', 'os.rename', (['tmpfile', 'destination'], {}), '(tmpfile, destination)\n', (1443, 1465), False, 'import os\n'), ((1942, 1971), 'os.path.basename', 'os.path.basename', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (1958, 1971), False, 'import os\n'), ((2350, 2361), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2358, 2361), False, 'import sys\n'), ((1577, 1599), 'os.remove', 'os.remove', (['destination'], {}), '(destination)\n', (1586, 1599), False, 'import os\n'), ((1612, 1643), 'os.rename', 'os.rename', (['tmpfile', 'destination'], {}), '(tmpfile, destination)\n', (1621, 1643), False, 'import os\n'), ((2201, 2212), 'os.fstat', 'os.fstat', (['(0)'], {}), '(0)\n', (2209, 2212), False, 'import os\n')]
|
import logging
logger = logging.getLogger(__name__)
import uuid
class SchedulerLock(object):
def __init__(self, duration, lock_name="scheduler_lock"):
self.id = self.get_instance_id()
self.duration = duration
self.lock_name = lock_name
logger.debug("%s:%s initialized with %s duration.",
self.__class__.__name__, self.id, duration)
def get_instance_id(self):
""" Can be overridden, but a random UUID at launch is probably good
enough.
"""
return uuid.uuid4().hex
def lock_expired(self, expiry, now):
""" Returns True if the lock is expired, False otherwise. """
if not expiry or int(now) > int(expiry):
return True
return False
def acquire(self):
""" Should be overridden and return True or False depending on whether
it got the lock or not.
"""
raise NotImplementedError
class NoOpLock(SchedulerLock):
def __init__(self):
logger.warning("!!! Using NoOpLock")
logger.warning("!!! Do not do this if you are planning to run more ")
logger.warning("!!! than one scheduler.""")
def acquire(self):
return True
|
[
"uuid.uuid4",
"logging.getLogger"
] |
[((25, 52), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (42, 52), False, 'import logging\n'), ((544, 556), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (554, 556), False, 'import uuid\n')]
|
#!/usr/bin/env python
# encoding: utf-8
from pdb_break import f
f(5)
|
[
"pdb_break.f"
] |
[((66, 70), 'pdb_break.f', 'f', (['(5)'], {}), '(5)\n', (67, 70), False, 'from pdb_break import f\n')]
|
import pandas as pd
from wind_power_forecasting.preprocessing.dataframe import sort_df_index_if_needed, \
convert_df_index_to_datetime_if_needed
from wind_power_forecasting.utils.dataframe import copy_or_not_copy
def input_preprocessing(X_df: pd.DataFrame, datetime_label) -> pd.DataFrame:
# Convert dataframe into time series:
# 1. Set datetime column as index
# 2. Convert it into datetime
# 3. Sort it
# 4. Force the frequency (fill with missing values).
X_df = df_to_ts(X_df, datetime_label, freq='H')
return X_df
def df_to_ts(df: pd.DataFrame, datetime_label: str, freq, copy=False) -> pd.DataFrame:
# Copy the dataframe to avoid border effects (if needed).
df = copy_or_not_copy(df, copy)
# Set the datetime column as dataframe index and check they are no duplicated.
df.set_index(datetime_label, inplace=True, verify_integrity=True)
# Convert the index into a datetime.
convert_df_index_to_datetime_if_needed(df, copy=False)
# Sort the index
sort_df_index_if_needed(df, copy=False)
# Set the dataframe frequency.
df = df.asfreq(freq)
return df
def remove_na(df, copy=False, **kwargs):
# Protect against automatic frequency change from pandas !!!
# when the new dataset with dropped rows has another sampling period, pandas automatically changes it.
sampling_freq = df.index.freq
df.dropna(**kwargs)
df.index.freq = sampling_freq
return df
|
[
"wind_power_forecasting.preprocessing.dataframe.sort_df_index_if_needed",
"wind_power_forecasting.utils.dataframe.copy_or_not_copy",
"wind_power_forecasting.preprocessing.dataframe.convert_df_index_to_datetime_if_needed"
] |
[((722, 748), 'wind_power_forecasting.utils.dataframe.copy_or_not_copy', 'copy_or_not_copy', (['df', 'copy'], {}), '(df, copy)\n', (738, 748), False, 'from wind_power_forecasting.utils.dataframe import copy_or_not_copy\n'), ((949, 1003), 'wind_power_forecasting.preprocessing.dataframe.convert_df_index_to_datetime_if_needed', 'convert_df_index_to_datetime_if_needed', (['df'], {'copy': '(False)'}), '(df, copy=False)\n', (987, 1003), False, 'from wind_power_forecasting.preprocessing.dataframe import sort_df_index_if_needed, convert_df_index_to_datetime_if_needed\n'), ((1030, 1069), 'wind_power_forecasting.preprocessing.dataframe.sort_df_index_if_needed', 'sort_df_index_if_needed', (['df'], {'copy': '(False)'}), '(df, copy=False)\n', (1053, 1069), False, 'from wind_power_forecasting.preprocessing.dataframe import sort_df_index_if_needed, convert_df_index_to_datetime_if_needed\n')]
|
'''List all User Shell Folders via ID number.
An alternative to the usual
objShell = win32com.client.Dispatch("WScript.Shell")
allUserProgramsMenu = objShell.SpecialFolders("AllUsersPrograms")
because "These special folders do not work in all language locales, a preferred
method is to query the value from User Shell folders"
Sources:
https://stackoverflow.com/questions/2063508/find-system-folder-locations-in-python
https://ss64.com/vb/special.html
https://ss64.com/nt/shell-folders-vbs.txt
https://docs.microsoft.com/en-gb/windows/win32/api/shldisp/ne-shldisp-shellspecialfolderconstants#constants
'''
import win32com.client
import csv
shapp = win32com.client.Dispatch("Shell.Application")
csvfile = "special-folder-constants.csv"
## ----------------------------------------
from collections import namedtuple
UserFolder = namedtuple('UserFolder', 'id, description')
ndata = map(UserFolder._make, csv.reader(open(csvfile)))
## ----------------------------------------
data = list(csv.DictReader(open(csvfile)))
def get_description(name):
'''Return description as str'''
for row in data:
if name.upper() == row["UserFolder"]:
return row["Description"]
def get_names(data):
'''Return list of user special folder names from data'''
names = []
for row in data:
names.append(row["UserFolder"])
return names
def get_path_by_name(name):
# print(name.upper())
for row in data:
if name.upper() == row["UserFolder"]:
# print(row["ID"])
return shapp.namespace(int(row["ID"])).self.path
return None
def print_data(data):
'''Display user folder Names and Paths'''
print('{:<20} {}'.format('Name', 'Path'))
for row in data:
name = row["UserFolder"]
path = shapp.namespace(int(row["ID"])).self.path
print(f'{name:<20} {path}')
if __name__ == "__main__":
print("-"*40)
name = 'startmenu'
path = get_path_by_name(name)
print("Name:\t{}".format(name))
print("Path:\t{}".format(path))
print("Desc:\t{}".format(get_description(name)))
print("-"*40)
# print("-"*40)
# print(get_names(data))
# print("-"*40)
# print_data(data)
|
[
"collections.namedtuple"
] |
[((869, 912), 'collections.namedtuple', 'namedtuple', (['"""UserFolder"""', '"""id, description"""'], {}), "('UserFolder', 'id, description')\n", (879, 912), False, 'from collections import namedtuple\n')]
|
import torch.nn as nn
from torch.nn.modules.utils import _pair
from ..functions.roi_align import roi_align
class RoIAlign(nn.Module):
def __init__(self,
out_size,
spatial_scale,
sample_num=0,
use_torchvision=False):
super(RoIAlign, self).__init__()
self.out_size = out_size
self.spatial_scale = float(spatial_scale)
self.sample_num = int(sample_num)
self.use_torchvision = use_torchvision
def forward(self, features, rois):
if self.use_torchvision:
from torchvision.ops import roi_align as tv_roi_align
return tv_roi_align(features, rois, _pair(self.out_size),
self.spatial_scale, self.sample_num)
else:
return roi_align(features, rois, self.out_size, self.spatial_scale,
self.sample_num)
|
[
"torch.nn.modules.utils._pair"
] |
[((716, 736), 'torch.nn.modules.utils._pair', '_pair', (['self.out_size'], {}), '(self.out_size)\n', (721, 736), False, 'from torch.nn.modules.utils import _pair\n')]
|
import json
import datetime
import os
from src.utils.youtube.channels import Channels
def test_list_channels_should_return_right_url():
channel_req = Channels("snippet", "channel_id")
assert channel_req.get_url() == "https://www.googleapis.com/youtube/v3/channels"
def test_list_channels_should_return_right_parameters():
channel_req = Channels("snippet", "channel_id")
assert channel_req.get_parameters() == {"part": "snippet", "id": "channel_id"}
def test_list_channel_should_parse_snippet_correctly():
channel_req = Channels("snippet", "channel_id")
with open("tests/samples/aqua_snippet.json", "r", encoding="utf-8") as f:
sample = json.load(f)
res = channel_req.parse_item(sample)
assert res.id == "UC1opHUrw8rvnsadT-iGp7Cg"
assert res.title == "Aqua Ch. 湊あくあ"
assert (
res.description
== "バーチャルメイド⚓️湊あくあ(みなとあくあ)です!ど、ドジとか言わないでください!\n放送で色んな変わったゲームや雑談をしています…!!\n【生放送】#湊あくあ生放送【関連ツイート】#湊あくあ 【ファン】 #あくあクルー【絵文字】⚓️【ファンアート】 #あくあーと ※動画やツイートで使用させて頂くことがあります。担当絵師:がおう先生【@umaiyo_puyoman】"
)
assert res.published_at == datetime.datetime(2018, 8, 1, 6, 38, 45)
assert (
res.thumbnail.default
== "https://yt3.ggpht.com/a/AGF-l79lFypl4LxY5kf60UpCL6gakgSGHtN-t8hq1g=s88-c-k-c0xffffffff-no-rj-mo"
)
assert (
res.thumbnail.medium
== "https://yt3.ggpht.com/a/AGF-l79lFypl4LxY5kf60UpCL6gakgSGHtN-t8hq1g=s240-c-k-c0xffffffff-no-rj-mo"
)
assert (
res.thumbnail.high
== "https://yt3.ggpht.com/a/AGF-l79lFypl4LxY5kf60UpCL6gakgSGHtN-t8hq1g=s800-c-k-c0xffffffff-no-rj-mo"
)
assert res.country == "JP"
|
[
"datetime.datetime",
"json.load",
"src.utils.youtube.channels.Channels"
] |
[((157, 190), 'src.utils.youtube.channels.Channels', 'Channels', (['"""snippet"""', '"""channel_id"""'], {}), "('snippet', 'channel_id')\n", (165, 190), False, 'from src.utils.youtube.channels import Channels\n'), ((354, 387), 'src.utils.youtube.channels.Channels', 'Channels', (['"""snippet"""', '"""channel_id"""'], {}), "('snippet', 'channel_id')\n", (362, 387), False, 'from src.utils.youtube.channels import Channels\n'), ((548, 581), 'src.utils.youtube.channels.Channels', 'Channels', (['"""snippet"""', '"""channel_id"""'], {}), "('snippet', 'channel_id')\n", (556, 581), False, 'from src.utils.youtube.channels import Channels\n'), ((677, 689), 'json.load', 'json.load', (['f'], {}), '(f)\n', (686, 689), False, 'import json\n'), ((1123, 1163), 'datetime.datetime', 'datetime.datetime', (['(2018)', '(8)', '(1)', '(6)', '(38)', '(45)'], {}), '(2018, 8, 1, 6, 38, 45)\n', (1140, 1163), False, 'import datetime\n')]
|
import matplotlib.pyplot as plt
import pylab
import numpy as N
import scipy.io as sio
import math
from pyfmi import load_fmu
fmu_loc = '/home/shashank/Documents/Gap Year Work/TAMU_ROVm/ROVm/Resources/FMU/'
fmu_sm_name = 'SimplifiedBlueROV2.fmu'
fmu_fm_name = 'InputBasedBlueROV2.fmu'
fmu_full_sm_name = fmu_loc + fmu_sm_name
fmu_full_fm_name = fmu_loc + fmu_fm_name
smmodel = load_fmu(fmu_full_sm_name)
fmmodel = load_fmu(fmu_full_fm_name)
# logInfo = {1:[1,4],2:[4,7],3:[5,4],4:[6,10],5:[15,17],6:[16,17]}
logInfo = {1:[16,1],}
for i in range(1,len(logInfo)+1):
for j in range(1,logInfo[i][1]+1):
logNumber = [logInfo[i][0], 10]
exp_log_loc = '/home/shashank/Documents/Gap Year Work/TAMU_ROVm/ROVm/Resources/PARSED_DATA_SPLIT/LOG' + str(logNumber[0]) + '/'
exp_log_handle = 'IN_OUT_LOG' + str(logNumber[0]) + '_PARSED_' + str(logNumber[1]) + '.mat'
exp_log_name = exp_log_loc + exp_log_handle
exp = sio.loadmat(exp_log_name)
parsed_exp = exp['inout_cell_mat_parsed']
t = parsed_exp[:,0]
in_ch1 = parsed_exp[:,1]
in_ch2 = parsed_exp[:,2]
in_ch3 = parsed_exp[:,3]
in_ch4 = parsed_exp[:,4]
in_ch5 = parsed_exp[:,5]
in_ch6 = parsed_exp[:,6]
u_traj = N.transpose(N.vstack((t,in_ch1, in_ch2, in_ch3, in_ch4, in_ch5, in_ch6)))
t_end_index = int(math.floor(parsed_exp.shape[0]-1))
t_data = parsed_exp[0:t_end_index,0]
t_end = math.floor(t_data[t_data.shape[0]-1])
v_x_data = parsed_exp[0:t_end_index,7]
v_y_data = parsed_exp[0:t_end_index,9]
v_z_data = parsed_exp[0:t_end_index,8]
try:
input_object = (['u[1]','u[2]','u[3]','u[4]','u[5]','u[6]'], u_traj)
smmodel.set('rovBody.mu_d',500)
res_sm = smmodel.simulate(final_time = t_end, input=input_object)
v_x_sm = res_sm['absoluteVelocity.v[1]']
v_y_sm = res_sm['absoluteVelocity.v[2]']
v_z_sm = res_sm['absoluteVelocity.v[3]']
t_sm = res_sm['time']
res_fm = fmmodel.simulate(final_time = t_end, input=input_object)
v_x_fm = res_fm['absoluteVelocity.v[1]']
v_y_fm = res_fm['absoluteVelocity.v[2]']
v_z_fm = res_fm['absoluteVelocity.v[3]']
t_fm = res_fm['time']
plt.figure(1)
plt.figure(figsize=(19.2,10.8), dpi=100)
plt.subplot(3,1,1)
plt.plot(t_sm, v_x_sm, t_fm, v_x_fm)
plt.legend(('Simplified', 'Full'))
plt.title("Model Comparison | Testing the Simplified and Full Model on Data Set: " + str(logNumber[0]) + "." + str(logNumber[1]))
plt.ylabel("X-Axis (m/s)")
plt.grid(True)
plt.subplot(3,1,2)
plt.plot(t_sm, v_y_sm, t_fm, v_y_fm)
plt.legend(('Simplified', 'Full'))
plt.ylabel("Y-Axis (m/s)")
plt.grid(True)
plt.subplot(3,1,3)
plt.plot(t_sm, v_z_sm, t_fm, v_z_fm)
plt.legend(('Simplified', 'Full'))
plt.ylabel("Z-Axis (m/s)")
plt.xlabel("Time (s)")
plt.grid(True)
pylab.savefig("Comp_"+str(logNumber[0]) + '_' + str(logNumber[1]) + '.png', bbox_inches = 'tight')
except:
print("Error in simulating Log " + str(logNumber[0]) + "." + str(logNumber[1]))
|
[
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.plot",
"scipy.io.loadmat",
"matplotlib.pyplot.legend",
"math.floor",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"pyfmi.load_fmu",
"matplotlib.pyplot.grid",
"numpy.vstack",
"matplotlib.pyplot.xlabel"
] |
[((389, 415), 'pyfmi.load_fmu', 'load_fmu', (['fmu_full_sm_name'], {}), '(fmu_full_sm_name)\n', (397, 415), False, 'from pyfmi import load_fmu\n'), ((427, 453), 'pyfmi.load_fmu', 'load_fmu', (['fmu_full_fm_name'], {}), '(fmu_full_fm_name)\n', (435, 453), False, 'from pyfmi import load_fmu\n'), ((936, 961), 'scipy.io.loadmat', 'sio.loadmat', (['exp_log_name'], {}), '(exp_log_name)\n', (947, 961), True, 'import scipy.io as sio\n'), ((1395, 1434), 'math.floor', 'math.floor', (['t_data[t_data.shape[0] - 1]'], {}), '(t_data[t_data.shape[0] - 1])\n', (1405, 1434), False, 'import math\n'), ((1224, 1285), 'numpy.vstack', 'N.vstack', (['(t, in_ch1, in_ch2, in_ch3, in_ch4, in_ch5, in_ch6)'], {}), '((t, in_ch1, in_ch2, in_ch3, in_ch4, in_ch5, in_ch6))\n', (1232, 1285), True, 'import numpy as N\n'), ((1309, 1344), 'math.floor', 'math.floor', (['(parsed_exp.shape[0] - 1)'], {}), '(parsed_exp.shape[0] - 1)\n', (1319, 1344), False, 'import math\n'), ((2148, 2161), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (2158, 2161), True, 'import matplotlib.pyplot as plt\n'), ((2166, 2207), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(19.2, 10.8)', 'dpi': '(100)'}), '(figsize=(19.2, 10.8), dpi=100)\n', (2176, 2207), True, 'import matplotlib.pyplot as plt\n'), ((2211, 2231), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (2222, 2231), True, 'import matplotlib.pyplot as plt\n'), ((2234, 2270), 'matplotlib.pyplot.plot', 'plt.plot', (['t_sm', 'v_x_sm', 't_fm', 'v_x_fm'], {}), '(t_sm, v_x_sm, t_fm, v_x_fm)\n', (2242, 2270), True, 'import matplotlib.pyplot as plt\n'), ((2275, 2309), 'matplotlib.pyplot.legend', 'plt.legend', (["('Simplified', 'Full')"], {}), "(('Simplified', 'Full'))\n", (2285, 2309), True, 'import matplotlib.pyplot as plt\n'), ((2448, 2474), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""X-Axis (m/s)"""'], {}), "('X-Axis (m/s)')\n", (2458, 2474), True, 'import matplotlib.pyplot as plt\n'), ((2479, 2493), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (2487, 2493), True, 'import matplotlib.pyplot as plt\n'), ((2500, 2520), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (2511, 2520), True, 'import matplotlib.pyplot as plt\n'), ((2523, 2559), 'matplotlib.pyplot.plot', 'plt.plot', (['t_sm', 'v_y_sm', 't_fm', 'v_y_fm'], {}), '(t_sm, v_y_sm, t_fm, v_y_fm)\n', (2531, 2559), True, 'import matplotlib.pyplot as plt\n'), ((2564, 2598), 'matplotlib.pyplot.legend', 'plt.legend', (["('Simplified', 'Full')"], {}), "(('Simplified', 'Full'))\n", (2574, 2598), True, 'import matplotlib.pyplot as plt\n'), ((2603, 2629), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y-Axis (m/s)"""'], {}), "('Y-Axis (m/s)')\n", (2613, 2629), True, 'import matplotlib.pyplot as plt\n'), ((2634, 2648), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (2642, 2648), True, 'import matplotlib.pyplot as plt\n'), ((2655, 2675), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (2666, 2675), True, 'import matplotlib.pyplot as plt\n'), ((2678, 2714), 'matplotlib.pyplot.plot', 'plt.plot', (['t_sm', 'v_z_sm', 't_fm', 'v_z_fm'], {}), '(t_sm, v_z_sm, t_fm, v_z_fm)\n', (2686, 2714), True, 'import matplotlib.pyplot as plt\n'), ((2719, 2753), 'matplotlib.pyplot.legend', 'plt.legend', (["('Simplified', 'Full')"], {}), "(('Simplified', 'Full'))\n", (2729, 2753), True, 'import matplotlib.pyplot as plt\n'), ((2758, 2784), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Z-Axis (m/s)"""'], {}), "('Z-Axis (m/s)')\n", (2768, 2784), True, 'import matplotlib.pyplot as plt\n'), ((2789, 2811), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (2799, 2811), True, 'import matplotlib.pyplot as plt\n'), ((2816, 2830), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (2824, 2830), True, 'import matplotlib.pyplot as plt\n')]
|
import sys
import os
import sklearn
from sklearn.decomposition import TruncatedSVD
# give this a different alias so that it does not conflict with SPACY
from sklearn.externals import joblib as sklearn_joblib
import data_io, params, SIF_embedding
from SIF_embedding import get_weighted_average
# helper for word2vec format
from data_io_w2v import load_w2v_word_map
import numpy as np
from past.builtins import xrange
# this is a modified version of getWeight() from data_io.py
# the main difference is that there is an option here to look up word keys as
# different casings since the default
def getWeightAlternate(words, word2weight,
attempt_other_cases = True,
report_oov = True):
weight4ind = {}
word_oov_set = set()
word_remapping_set = set()
for word, ind in words.items():
word_key = word
if attempt_other_cases:
if word_key not in word2weight:
word_lower = word.lower()
word_capital = word.capitalize()
word_upper = word.upper()
# let's try these in order
if word_lower in word2weight:
word_key = word_lower
elif word_capital in word2weight:
word_key = word_capital
elif word_upper in word2weight:
word_key = word_upper
if word_key in word2weight:
weight4ind[ind] = word2weight[word_key]
if word != word_key:
word_remapping_set.add(word)
else:
weight4ind[ind] = 1.0
word_oov_set.add(word)
if report_oov:
print('Total words remapped : {}'.format(len(word_remapping_set)))
print('Total word-weight OOV : {}'.format(len(word_oov_set)))
#word_oov_sorted = sorted(list(word_oov_set))
#print('Out of vocabulary with respect to word weighting:')
#print(word_oov_sorted)
return weight4ind
def get_weighted_average_alternate(We, x, w):
"""
Compute the weighted average vectors
:param We: We[i,:] is the vector for word i
:param x: x[i, :] are the indices of the words in sentence i
:param w: w[i, :] are the weights for the words in sentence i
:return: emb[i, :] are the weighted average vector for sentence i
"""
n_samples = x.shape[0]
emb = np.zeros((n_samples, We.shape[1]))
for i in xrange(n_samples):
denom = np.count_nonzero(w[i,:])
if denom <= 0.0:
print('WHOA! Sample [{0}] attempted to compute a denominator of : [{1}]'.format(i, denom))
else:
emb[i,:] = w[i,:].dot(We[x[i,:],:]) / denom
return emb
# This class serves as a means of fitting an SIF model and then being able to transform other sentence vectors later
# This also allows save/loading model components via scikit-learn's joblib implementation
class SIFModel(object):
def __init__(self):
self.trained = False
self.svd = None
self.word_map = None
self.params = params
self.sentence_count = -1
self.lowercase_tokens = False
self.embeddings_filepath = None
self.embeddings_format = None
def transform(self, We, sentences):
x, m = data_io.sentences2idx(sentences, self.word_map) # x is the array of word indices, m is the binary mask indicating whether there is a word in that location
w = data_io.seq2weight(x, m, self.weight4ind) # get word weights
weighted_emb = get_weighted_average(We, x, w)
# now use the model we've already loaded
return self.remove_pc(weighted_emb)
def compute_pc(self, X):
# this is what happens in compute_pc() in src/SIF_embedding.py
self.svd = TruncatedSVD(n_components=self.params.rmpc, n_iter=7, random_state=0)
self.svd.fit(X)
def remove_pc(self, X):
pc = self.svd.components_
if self.params.rmpc == 1:
XX = X - X.dot(pc.transpose()) * pc
else:
XX = X - X.dot(pc.transpose()).dot(pc)
return XX
def fit(self, sentences, We, lowercase_tokens, embeddings_format, embeddings_filepath, params, word_map, weight4ind):
# store these off for pickling or extra transforms
self.word_map = word_map
self.weight4ind = weight4ind
self.params = params
self.lowercase_tokens = lowercase_tokens
self.embeddings_format = embeddings_format
self.embeddings_filepath = embeddings_filepath
self.sentence_count = len(sentences)
x, m = data_io.sentences2idx(sentences, self.word_map) # x is the array of word indices, m is the binary mask indicating whether there is a word in that location
w = data_io.seq2weight(x, m, self.weight4ind) # get word weights
# now let's do some of what happens in src/SIF_embedding.py
# but also keep some pieces along the way
#weighted_emb = get_weighted_average(We, x, w)
weighted_emb = get_weighted_average_alternate(We, x, w)
self.compute_pc(weighted_emb)
self.trained = True
return self.remove_pc(weighted_emb)
@staticmethod
def embedding_loading_helper(embeddings_filepath, embeddings_format):
words = None
We = None
if embeddings_format == 'GLOVE':
print('Loading embeddings as GLOVE')
(words, We) = data_io.load_glove_word_map(embeddings_filepath)
elif embeddings_format == 'WORD2VEC_BIN':
(words, We) = load_w2v_word_map(embeddings_filepath, binary = True)
elif embeddings_format == 'WORD2VEC_TXT':
(words, We) = load_w2v_word_map(embeddings_filepath, binary = False)
else:
print('Unknown embeddings format : {}'.format(embeddings_format))
return words, We
|
[
"past.builtins.xrange",
"SIF_embedding.get_weighted_average",
"numpy.count_nonzero",
"sklearn.decomposition.TruncatedSVD",
"data_io.load_glove_word_map",
"data_io_w2v.load_w2v_word_map",
"data_io.seq2weight",
"numpy.zeros",
"data_io.sentences2idx"
] |
[((2442, 2476), 'numpy.zeros', 'np.zeros', (['(n_samples, We.shape[1])'], {}), '((n_samples, We.shape[1]))\n', (2450, 2476), True, 'import numpy as np\n'), ((2490, 2507), 'past.builtins.xrange', 'xrange', (['n_samples'], {}), '(n_samples)\n', (2496, 2507), False, 'from past.builtins import xrange\n'), ((2525, 2550), 'numpy.count_nonzero', 'np.count_nonzero', (['w[i, :]'], {}), '(w[i, :])\n', (2541, 2550), True, 'import numpy as np\n'), ((3345, 3392), 'data_io.sentences2idx', 'data_io.sentences2idx', (['sentences', 'self.word_map'], {}), '(sentences, self.word_map)\n', (3366, 3392), False, 'import data_io, params, SIF_embedding\n'), ((3512, 3553), 'data_io.seq2weight', 'data_io.seq2weight', (['x', 'm', 'self.weight4ind'], {}), '(x, m, self.weight4ind)\n', (3530, 3553), False, 'import data_io, params, SIF_embedding\n'), ((3596, 3626), 'SIF_embedding.get_weighted_average', 'get_weighted_average', (['We', 'x', 'w'], {}), '(We, x, w)\n', (3616, 3626), False, 'from SIF_embedding import get_weighted_average\n'), ((3848, 3917), 'sklearn.decomposition.TruncatedSVD', 'TruncatedSVD', ([], {'n_components': 'self.params.rmpc', 'n_iter': '(7)', 'random_state': '(0)'}), '(n_components=self.params.rmpc, n_iter=7, random_state=0)\n', (3860, 3917), False, 'from sklearn.decomposition import TruncatedSVD\n'), ((4731, 4778), 'data_io.sentences2idx', 'data_io.sentences2idx', (['sentences', 'self.word_map'], {}), '(sentences, self.word_map)\n', (4752, 4778), False, 'import data_io, params, SIF_embedding\n'), ((4898, 4939), 'data_io.seq2weight', 'data_io.seq2weight', (['x', 'm', 'self.weight4ind'], {}), '(x, m, self.weight4ind)\n', (4916, 4939), False, 'import data_io, params, SIF_embedding\n'), ((5598, 5646), 'data_io.load_glove_word_map', 'data_io.load_glove_word_map', (['embeddings_filepath'], {}), '(embeddings_filepath)\n', (5625, 5646), False, 'import data_io, params, SIF_embedding\n'), ((5723, 5774), 'data_io_w2v.load_w2v_word_map', 'load_w2v_word_map', (['embeddings_filepath'], {'binary': '(True)'}), '(embeddings_filepath, binary=True)\n', (5740, 5774), False, 'from data_io_w2v import load_w2v_word_map\n'), ((5853, 5905), 'data_io_w2v.load_w2v_word_map', 'load_w2v_word_map', (['embeddings_filepath'], {'binary': '(False)'}), '(embeddings_filepath, binary=False)\n', (5870, 5905), False, 'from data_io_w2v import load_w2v_word_map\n')]
|
import argparse
import numpy as np
import torch
from models.common import *
if __name__ == '__main__':
weights_path = r'runs\evolution\weights\best.pt'
is_half = True
# Load pytorch model
model = torch.load(weights_path, map_location=torch.device('cpu'))
net = model['model']
if is_half:
net.half() # 把FP32转为FP16
# print(model)
ckpt = {'epoch': -1,
'best_fitness': model['best_fitness'],
'training_results': None,
'model': net,
'optimizer': None}
# Save .pt
torch.save(ckpt, 'runs\evolution\weights/test.pt')
# for name, parameters in model.named_parameters():
# # print(name,':',parameters.size())
# print(parameters.dtype)
|
[
"torch.save",
"torch.device"
] |
[((563, 615), 'torch.save', 'torch.save', (['ckpt', '"""runs\\\\evolution\\\\weights/test.pt"""'], {}), "(ckpt, 'runs\\\\evolution\\\\weights/test.pt')\n", (573, 615), False, 'import torch\n'), ((254, 273), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (266, 273), False, 'import torch\n')]
|
from PIL import Image
import math
def combine_frames(frames: list[Image], output: str, framerate: int = 50) -> None:
"""Combine a list of frame images into a single .gif
Note that Chrome has a fun bug where GIFs are limited to 50FPS
This function will automatically clamp framerates to 50FPS
Args:
frames (list[Image]): List of frame images
output (str): Path to save output GIF, including extension
framerate (int, optional): Framerate of the gif. Max of 50FPS. Defaults to 50.
"""
if framerate == 60:
framerate = 50
durations = [math.floor(1000 / framerate)] * len(frames)
frames[0].save(
output,
format="GIF",
append_images=frames[1:],
save_all=True,
duration=durations,
loop=0,
transparency=0,
)
|
[
"math.floor"
] |
[((595, 623), 'math.floor', 'math.floor', (['(1000 / framerate)'], {}), '(1000 / framerate)\n', (605, 623), False, 'import math\n')]
|
# coding=utf8
# This code is adapted from the https://github.com/tensorflow/models/tree/master/official/r1/resnet.
# ==========================================================================================
# NAVER’s modifications are Copyright 2020 NAVER corp. All rights reserved.
# ==========================================================================================
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import tensorflow as tf
from official.utils.export import export
from utils import data_util
from functions import data_config
import numpy as np
from tqdm import tqdm
def export_test(bin_export_path, flags_obj, ir_eval):
ds = tf.data.Dataset.list_files(flags_obj.data_dir + '/' + flags_obj.val_regex)
ds = ds.interleave(tf.data.TFRecordDataset, cycle_length=10)
def parse_tfr(example_proto):
feature_def = {'image/class/label': tf.FixedLenFeature([], dtype=tf.int64, default_value=-1),
'image/encoded': tf.FixedLenFeature([], dtype=tf.string, default_value='')}
features = tf.io.parse_single_example(serialized=example_proto, features=feature_def)
return features['image/encoded'], features['image/class/label']
ds = ds.map(parse_tfr)
ds = ds.batch(flags_obj.val_batch_size)
iterator = ds.make_one_shot_iterator()
images, labels = iterator.get_next()
dconf = data_config.get_config(flags_obj.dataset_name)
num_val_images = dconf.num_images['validation']
if flags_obj.zeroshot_eval or ir_eval:
feature_dim = flags_obj.embedding_size if flags_obj.embedding_size > 0 else flags_obj.num_features
np_features = np.zeros((num_val_images, feature_dim), dtype=np.float32)
np_labels = np.zeros(num_val_images, dtype=np.int64)
np_i = 0
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
tf.saved_model.load(sess=sess, export_dir=bin_export_path, tags={"serve"})
for _ in tqdm(range(int(num_val_images / flags_obj.val_batch_size) + 1)):
try:
np_image, np_label = sess.run([images, labels])
np_predict = sess.run('embedding_tensor:0',
feed_dict={'input_tensor:0': np_image})
np_features[np_i:np_i + np_predict.shape[0], :] = np_predict
np_labels[np_i:np_i + np_label.shape[0]] = np_label
np_i += np_predict.shape[0]
except tf.errors.OutOfRangeError:
break
assert np_i == num_val_images
from sklearn.preprocessing import normalize
x = normalize(np_features)
np_sim = x.dot(x.T)
np.fill_diagonal(np_sim, -10) # removing similarity for query.
num_correct = 0
for i in range(num_val_images):
cur_label = np_labels[i]
rank1_label = np_labels[np.argmax(np_sim[i, :])]
if rank1_label == cur_label:
num_correct += 1
recall_at_1 = num_correct / num_val_images
metric = recall_at_1
else:
np_i = 0
correct_cnt = 0
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
tf.saved_model.load(sess=sess, export_dir=bin_export_path, tags={"serve"})
for _ in tqdm(range(int(num_val_images / flags_obj.val_batch_size) + 1)):
try:
np_image, np_label = sess.run([images, labels])
np_predict = sess.run('ArgMax:0',
feed_dict={'input_tensor:0': np_image})
np_i += np_predict.shape[0]
correct_cnt += np.sum(np_predict == np_label)
except tf.errors.OutOfRangeError:
break
assert np_i == num_val_images
metric = correct_cnt / np_i
return metric
def image_bytes_serving_input_fn(image_shape, decoder_name, dtype=tf.float32, pptype='imagenet'):
"""Serving input fn for raw jpeg images."""
def _preprocess_image(image_bytes):
"""Preprocess a single raw image."""
# Bounding box around the whole image.
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=dtype, shape=[1, 1, 4])
_, _, num_channels = image_shape
tf.logging.info("!!!!!!!!!! Preprocessing type for exporting pb: {} and decoder type: {}".format(pptype, decoder_name))
image = data_util.preprocess_image(
image_buffer=image_bytes, is_training=False, bbox=bbox,
num_channels=num_channels, dtype=dtype, use_random_crop=False,
decoder_name=decoder_name, dct_method='INTEGER_ACCURATE', preprocessing_type=pptype)
return image
image_bytes_list = tf.placeholder(
shape=[None], dtype=tf.string, name='input_tensor')
images = tf.map_fn(
_preprocess_image, image_bytes_list, back_prop=False, dtype=dtype)
return tf.estimator.export.TensorServingInputReceiver(
images, {'image_bytes': image_bytes_list})
def export_pb(flags_core, flags_obj, shape, classifier, ir_eval=False):
export_dtype = flags_core.get_tf_dtype(flags_obj)
if not flags_obj.data_format:
raise ValueError('The `data_format` must be specified: channels_first or channels_last ')
bin_export_path = os.path.join(flags_obj.export_dir, flags_obj.data_format, 'binary_input')
bin_input_receiver_fn = functools.partial(image_bytes_serving_input_fn, shape, flags_obj.export_decoder_type,
dtype=export_dtype, pptype=flags_obj.preprocessing_type)
pp_export_path = os.path.join(flags_obj.export_dir, flags_obj.data_format, 'preprocessed_input')
pp_input_receiver_fn = export.build_tensor_serving_input_receiver_fn(
shape, batch_size=None, dtype=export_dtype)
result_bin_export_path = classifier.export_savedmodel(bin_export_path, bin_input_receiver_fn)
classifier.export_savedmodel(pp_export_path, pp_input_receiver_fn)
if flags_obj.export_decoder_type == 'jpeg':
metric = export_test(result_bin_export_path, flags_obj, ir_eval)
msg = 'IMPOTANT! Evaluation metric of exported saved_model.pb is {}'.format(metric)
tf.logging.info(msg)
with tf.gfile.Open(result_bin_export_path.decode("utf-8") + '/model_performance.txt', 'w') as fp:
fp.write(msg)
|
[
"numpy.sum",
"tensorflow.logging.info",
"numpy.argmax",
"tensorflow.local_variables_initializer",
"os.path.join",
"tensorflow.estimator.export.TensorServingInputReceiver",
"tensorflow.placeholder",
"tensorflow.map_fn",
"functools.partial",
"numpy.fill_diagonal",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"tensorflow.constant",
"utils.data_util.preprocess_image",
"sklearn.preprocessing.normalize",
"tensorflow.saved_model.load",
"tensorflow.io.parse_single_example",
"numpy.zeros",
"tensorflow.data.Dataset.list_files",
"tensorflow.FixedLenFeature",
"functions.data_config.get_config",
"official.utils.export.export.build_tensor_serving_input_receiver_fn"
] |
[((1436, 1510), 'tensorflow.data.Dataset.list_files', 'tf.data.Dataset.list_files', (["(flags_obj.data_dir + '/' + flags_obj.val_regex)"], {}), "(flags_obj.data_dir + '/' + flags_obj.val_regex)\n", (1462, 1510), True, 'import tensorflow as tf\n'), ((2116, 2162), 'functions.data_config.get_config', 'data_config.get_config', (['flags_obj.dataset_name'], {}), '(flags_obj.dataset_name)\n', (2138, 2162), False, 'from functions import data_config\n'), ((5267, 5333), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[None]', 'dtype': 'tf.string', 'name': '"""input_tensor"""'}), "(shape=[None], dtype=tf.string, name='input_tensor')\n", (5281, 5333), True, 'import tensorflow as tf\n'), ((5350, 5426), 'tensorflow.map_fn', 'tf.map_fn', (['_preprocess_image', 'image_bytes_list'], {'back_prop': '(False)', 'dtype': 'dtype'}), '(_preprocess_image, image_bytes_list, back_prop=False, dtype=dtype)\n', (5359, 5426), True, 'import tensorflow as tf\n'), ((5441, 5534), 'tensorflow.estimator.export.TensorServingInputReceiver', 'tf.estimator.export.TensorServingInputReceiver', (['images', "{'image_bytes': image_bytes_list}"], {}), "(images, {'image_bytes':\n image_bytes_list})\n", (5487, 5534), True, 'import tensorflow as tf\n'), ((5810, 5883), 'os.path.join', 'os.path.join', (['flags_obj.export_dir', 'flags_obj.data_format', '"""binary_input"""'], {}), "(flags_obj.export_dir, flags_obj.data_format, 'binary_input')\n", (5822, 5883), False, 'import os\n'), ((5910, 6062), 'functools.partial', 'functools.partial', (['image_bytes_serving_input_fn', 'shape', 'flags_obj.export_decoder_type'], {'dtype': 'export_dtype', 'pptype': 'flags_obj.preprocessing_type'}), '(image_bytes_serving_input_fn, shape, flags_obj.\n export_decoder_type, dtype=export_dtype, pptype=flags_obj.\n preprocessing_type)\n', (5927, 6062), False, 'import functools\n'), ((6117, 6196), 'os.path.join', 'os.path.join', (['flags_obj.export_dir', 'flags_obj.data_format', '"""preprocessed_input"""'], {}), "(flags_obj.export_dir, flags_obj.data_format, 'preprocessed_input')\n", (6129, 6196), False, 'import os\n'), ((6222, 6316), 'official.utils.export.export.build_tensor_serving_input_receiver_fn', 'export.build_tensor_serving_input_receiver_fn', (['shape'], {'batch_size': 'None', 'dtype': 'export_dtype'}), '(shape, batch_size=None, dtype\n =export_dtype)\n', (6267, 6316), False, 'from official.utils.export import export\n'), ((1815, 1889), 'tensorflow.io.parse_single_example', 'tf.io.parse_single_example', ([], {'serialized': 'example_proto', 'features': 'feature_def'}), '(serialized=example_proto, features=feature_def)\n', (1841, 1889), True, 'import tensorflow as tf\n'), ((2375, 2432), 'numpy.zeros', 'np.zeros', (['(num_val_images, feature_dim)'], {'dtype': 'np.float32'}), '((num_val_images, feature_dim), dtype=np.float32)\n', (2383, 2432), True, 'import numpy as np\n'), ((2449, 2489), 'numpy.zeros', 'np.zeros', (['num_val_images'], {'dtype': 'np.int64'}), '(num_val_images, dtype=np.int64)\n', (2457, 2489), True, 'import numpy as np\n'), ((3315, 3337), 'sklearn.preprocessing.normalize', 'normalize', (['np_features'], {}), '(np_features)\n', (3324, 3337), False, 'from sklearn.preprocessing import normalize\n'), ((3366, 3395), 'numpy.fill_diagonal', 'np.fill_diagonal', (['np_sim', '(-10)'], {}), '(np_sim, -10)\n', (3382, 3395), True, 'import numpy as np\n'), ((4741, 4804), 'tensorflow.constant', 'tf.constant', (['[0.0, 0.0, 1.0, 1.0]'], {'dtype': 'dtype', 'shape': '[1, 1, 4]'}), '([0.0, 0.0, 1.0, 1.0], dtype=dtype, shape=[1, 1, 4])\n', (4752, 4804), True, 'import tensorflow as tf\n'), ((4978, 5221), 'utils.data_util.preprocess_image', 'data_util.preprocess_image', ([], {'image_buffer': 'image_bytes', 'is_training': '(False)', 'bbox': 'bbox', 'num_channels': 'num_channels', 'dtype': 'dtype', 'use_random_crop': '(False)', 'decoder_name': 'decoder_name', 'dct_method': '"""INTEGER_ACCURATE"""', 'preprocessing_type': 'pptype'}), "(image_buffer=image_bytes, is_training=False,\n bbox=bbox, num_channels=num_channels, dtype=dtype, use_random_crop=\n False, decoder_name=decoder_name, dct_method='INTEGER_ACCURATE',\n preprocessing_type=pptype)\n", (5004, 5221), False, 'from utils import data_util\n'), ((6691, 6711), 'tensorflow.logging.info', 'tf.logging.info', (['msg'], {}), '(msg)\n', (6706, 6711), True, 'import tensorflow as tf\n'), ((1647, 1703), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]'], {'dtype': 'tf.int64', 'default_value': '(-1)'}), '([], dtype=tf.int64, default_value=-1)\n', (1665, 1703), True, 'import tensorflow as tf\n'), ((1741, 1798), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]'], {'dtype': 'tf.string', 'default_value': '""""""'}), "([], dtype=tf.string, default_value='')\n", (1759, 1798), True, 'import tensorflow as tf\n'), ((2512, 2524), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2522, 2524), True, 'import tensorflow as tf\n'), ((2639, 2713), 'tensorflow.saved_model.load', 'tf.saved_model.load', ([], {'sess': 'sess', 'export_dir': 'bin_export_path', 'tags': "{'serve'}"}), "(sess=sess, export_dir=bin_export_path, tags={'serve'})\n", (2658, 2713), True, 'import tensorflow as tf\n'), ((3754, 3766), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3764, 3766), True, 'import tensorflow as tf\n'), ((3881, 3955), 'tensorflow.saved_model.load', 'tf.saved_model.load', ([], {'sess': 'sess', 'export_dir': 'bin_export_path', 'tags': "{'serve'}"}), "(sess=sess, export_dir=bin_export_path, tags={'serve'})\n", (3900, 3955), True, 'import tensorflow as tf\n'), ((2549, 2582), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2580, 2582), True, 'import tensorflow as tf\n'), ((2599, 2631), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (2629, 2631), True, 'import tensorflow as tf\n'), ((3547, 3570), 'numpy.argmax', 'np.argmax', (['np_sim[i, :]'], {}), '(np_sim[i, :])\n', (3556, 3570), True, 'import numpy as np\n'), ((3791, 3824), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3822, 3824), True, 'import tensorflow as tf\n'), ((3841, 3873), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (3871, 3873), True, 'import tensorflow as tf\n'), ((4286, 4316), 'numpy.sum', 'np.sum', (['(np_predict == np_label)'], {}), '(np_predict == np_label)\n', (4292, 4316), True, 'import numpy as np\n')]
|
# Talon voice commands for Xcode
# <NAME> <EMAIL>
from talon.voice import Key, Context
from ..misc.mouse import control_shift_click
ctx = Context("xcode", bundle="com.apple.dt.Xcode")
ctx.keymap(
{
"build it": Key("cmd-b"),
"stop it": Key("cmd-."),
"run it": Key("cmd-r"),
"go back": Key("cmd-ctrl-left"),
"go (fore | forward)": Key("cmd-ctrl-right"),
"find in (proj | project)": Key("cmd-shift-f"),
"(sell find in (proj | project) | find selection in project)": Key(
"cmd-e cmd-shift-f enter"
),
"(sell find ace in (proj | project) | replace selection in project)": Key(
"cmd-e cmd-shift-alt-f"
),
"next in (proj | project)": Key("cmd-ctrl-g"),
"prev in (proj | project)": Key("shift-cmd-ctrl-g"),
"split window": Key("cmd-alt-enter"),
"show editor": Key("cmd-enter"),
"(show | hide) debug": Key("cmd-shift-y"),
"(show | find) call hierarchy": Key("cmd-ctrl-shift-h"),
"show (recent | recent files)": [Key("ctrl-1"), "recent files\n"],
"show related": Key("ctrl-1"),
"show history": Key("ctrl-2"),
"show files": Key("ctrl-5"),
"show (methods | items)": Key("ctrl-6"),
"show navigator": Key("cmd-0"),
"hide (navigator | project | warnings | breakpoints | reports | build)": Key(
"cmd-0"
),
"show project": Key("cmd-1"),
"show warnings": Key("cmd-5"),
"show breakpoints": Key("cmd-8"),
"show (reports | build)": Key("cmd-9"),
"show diffs": Key("cmd-alt-shift-enter"),
"(next counterpart | show header | switcher)": Key("cmd-ctrl-down"),
"prev counterpart": Key("cmd-ctrl-up"),
"toggle comment": Key("cmd-/"),
"toggle breakpoint": Key("cmd-\\"),
"toggle all breakpoints": Key("cmd-y"),
"move line up": Key("cmd-alt-["),
"move line down": Key("cmd-alt-]"),
"go (deafen | definition)": Key("cmd-ctrl-j"),
"edit scheme": Key("cmd-shift-,"),
"quick open": Key("cmd-shift-o"),
"comm skoosh": "// ",
"(comm | comment) line": [
"//------------------------------------------------------------------------------",
Key("enter"),
],
"step in": Key("f7"),
"step over": Key("f6"),
"step out": Key("f8"),
"step (continue | go)": Key("ctrl-cmd-y"),
"show blame for line": Key("cmd-alt-ctrl-b"),
"(reveal file | show file in finder)": Key("cmd-alt-ctrl-shift-f"),
"(snipline | delete line)": Key("cmd-alt-ctrl-shift-backspace"),
"add cursor down": Key("ctrl-shift-down"),
"add cursor up": Key("ctrl-shift-up"),
"add cursor": control_shift_click,
"dub add cursor": lambda m: control_shift_click(m, 0, 2),
"((select | sell) (partial | sub) [word] left)": Key("shift-ctrl-left"),
"((select | sell) (partial | sub) [word] right)": Key("shift-ctrl-right"),
# the following require custom key bindings in xcode preferences
"((partial | sub) [word] left | wonkrim)": Key("alt-ctrl-left"),
"((partial | sub) [word] right | wonkrish)": Key("alt-ctrl-right"),
}
)
|
[
"talon.voice.Key",
"talon.voice.Context"
] |
[((141, 186), 'talon.voice.Context', 'Context', (['"""xcode"""'], {'bundle': '"""com.apple.dt.Xcode"""'}), "('xcode', bundle='com.apple.dt.Xcode')\n", (148, 186), False, 'from talon.voice import Key, Context\n'), ((226, 238), 'talon.voice.Key', 'Key', (['"""cmd-b"""'], {}), "('cmd-b')\n", (229, 238), False, 'from talon.voice import Key, Context\n'), ((259, 271), 'talon.voice.Key', 'Key', (['"""cmd-."""'], {}), "('cmd-.')\n", (262, 271), False, 'from talon.voice import Key, Context\n'), ((291, 303), 'talon.voice.Key', 'Key', (['"""cmd-r"""'], {}), "('cmd-r')\n", (294, 303), False, 'from talon.voice import Key, Context\n'), ((324, 344), 'talon.voice.Key', 'Key', (['"""cmd-ctrl-left"""'], {}), "('cmd-ctrl-left')\n", (327, 344), False, 'from talon.voice import Key, Context\n'), ((377, 398), 'talon.voice.Key', 'Key', (['"""cmd-ctrl-right"""'], {}), "('cmd-ctrl-right')\n", (380, 398), False, 'from talon.voice import Key, Context\n'), ((436, 454), 'talon.voice.Key', 'Key', (['"""cmd-shift-f"""'], {}), "('cmd-shift-f')\n", (439, 454), False, 'from talon.voice import Key, Context\n'), ((527, 557), 'talon.voice.Key', 'Key', (['"""cmd-e cmd-shift-f enter"""'], {}), "('cmd-e cmd-shift-f enter')\n", (530, 557), False, 'from talon.voice import Key, Context\n'), ((659, 687), 'talon.voice.Key', 'Key', (['"""cmd-e cmd-shift-alt-f"""'], {}), "('cmd-e cmd-shift-alt-f')\n", (662, 687), False, 'from talon.voice import Key, Context\n'), ((747, 764), 'talon.voice.Key', 'Key', (['"""cmd-ctrl-g"""'], {}), "('cmd-ctrl-g')\n", (750, 764), False, 'from talon.voice import Key, Context\n'), ((802, 825), 'talon.voice.Key', 'Key', (['"""shift-cmd-ctrl-g"""'], {}), "('shift-cmd-ctrl-g')\n", (805, 825), False, 'from talon.voice import Key, Context\n'), ((851, 871), 'talon.voice.Key', 'Key', (['"""cmd-alt-enter"""'], {}), "('cmd-alt-enter')\n", (854, 871), False, 'from talon.voice import Key, Context\n'), ((896, 912), 'talon.voice.Key', 'Key', (['"""cmd-enter"""'], {}), "('cmd-enter')\n", (899, 912), False, 'from talon.voice import Key, Context\n'), ((945, 963), 'talon.voice.Key', 'Key', (['"""cmd-shift-y"""'], {}), "('cmd-shift-y')\n", (948, 963), False, 'from talon.voice import Key, Context\n'), ((1005, 1028), 'talon.voice.Key', 'Key', (['"""cmd-ctrl-shift-h"""'], {}), "('cmd-ctrl-shift-h')\n", (1008, 1028), False, 'from talon.voice import Key, Context\n'), ((1129, 1142), 'talon.voice.Key', 'Key', (['"""ctrl-1"""'], {}), "('ctrl-1')\n", (1132, 1142), False, 'from talon.voice import Key, Context\n'), ((1168, 1181), 'talon.voice.Key', 'Key', (['"""ctrl-2"""'], {}), "('ctrl-2')\n", (1171, 1181), False, 'from talon.voice import Key, Context\n'), ((1205, 1218), 'talon.voice.Key', 'Key', (['"""ctrl-5"""'], {}), "('ctrl-5')\n", (1208, 1218), False, 'from talon.voice import Key, Context\n'), ((1254, 1267), 'talon.voice.Key', 'Key', (['"""ctrl-6"""'], {}), "('ctrl-6')\n", (1257, 1267), False, 'from talon.voice import Key, Context\n'), ((1295, 1307), 'talon.voice.Key', 'Key', (['"""cmd-0"""'], {}), "('cmd-0')\n", (1298, 1307), False, 'from talon.voice import Key, Context\n'), ((1390, 1402), 'talon.voice.Key', 'Key', (['"""cmd-0"""'], {}), "('cmd-0')\n", (1393, 1402), False, 'from talon.voice import Key, Context\n'), ((1450, 1462), 'talon.voice.Key', 'Key', (['"""cmd-1"""'], {}), "('cmd-1')\n", (1453, 1462), False, 'from talon.voice import Key, Context\n'), ((1489, 1501), 'talon.voice.Key', 'Key', (['"""cmd-5"""'], {}), "('cmd-5')\n", (1492, 1501), False, 'from talon.voice import Key, Context\n'), ((1531, 1543), 'talon.voice.Key', 'Key', (['"""cmd-8"""'], {}), "('cmd-8')\n", (1534, 1543), False, 'from talon.voice import Key, Context\n'), ((1579, 1591), 'talon.voice.Key', 'Key', (['"""cmd-9"""'], {}), "('cmd-9')\n", (1582, 1591), False, 'from talon.voice import Key, Context\n'), ((1615, 1641), 'talon.voice.Key', 'Key', (['"""cmd-alt-shift-enter"""'], {}), "('cmd-alt-shift-enter')\n", (1618, 1641), False, 'from talon.voice import Key, Context\n'), ((1698, 1718), 'talon.voice.Key', 'Key', (['"""cmd-ctrl-down"""'], {}), "('cmd-ctrl-down')\n", (1701, 1718), False, 'from talon.voice import Key, Context\n'), ((1748, 1766), 'talon.voice.Key', 'Key', (['"""cmd-ctrl-up"""'], {}), "('cmd-ctrl-up')\n", (1751, 1766), False, 'from talon.voice import Key, Context\n'), ((1794, 1806), 'talon.voice.Key', 'Key', (['"""cmd-/"""'], {}), "('cmd-/')\n", (1797, 1806), False, 'from talon.voice import Key, Context\n'), ((1837, 1850), 'talon.voice.Key', 'Key', (['"""cmd-\\\\"""'], {}), "('cmd-\\\\')\n", (1840, 1850), False, 'from talon.voice import Key, Context\n'), ((1886, 1898), 'talon.voice.Key', 'Key', (['"""cmd-y"""'], {}), "('cmd-y')\n", (1889, 1898), False, 'from talon.voice import Key, Context\n'), ((1924, 1940), 'talon.voice.Key', 'Key', (['"""cmd-alt-["""'], {}), "('cmd-alt-[')\n", (1927, 1940), False, 'from talon.voice import Key, Context\n'), ((1968, 1984), 'talon.voice.Key', 'Key', (['"""cmd-alt-]"""'], {}), "('cmd-alt-]')\n", (1971, 1984), False, 'from talon.voice import Key, Context\n'), ((2022, 2039), 'talon.voice.Key', 'Key', (['"""cmd-ctrl-j"""'], {}), "('cmd-ctrl-j')\n", (2025, 2039), False, 'from talon.voice import Key, Context\n'), ((2064, 2082), 'talon.voice.Key', 'Key', (['"""cmd-shift-,"""'], {}), "('cmd-shift-,')\n", (2067, 2082), False, 'from talon.voice import Key, Context\n'), ((2106, 2124), 'talon.voice.Key', 'Key', (['"""cmd-shift-o"""'], {}), "('cmd-shift-o')\n", (2109, 2124), False, 'from talon.voice import Key, Context\n'), ((2343, 2352), 'talon.voice.Key', 'Key', (['"""f7"""'], {}), "('f7')\n", (2346, 2352), False, 'from talon.voice import Key, Context\n'), ((2375, 2384), 'talon.voice.Key', 'Key', (['"""f6"""'], {}), "('f6')\n", (2378, 2384), False, 'from talon.voice import Key, Context\n'), ((2406, 2415), 'talon.voice.Key', 'Key', (['"""f8"""'], {}), "('f8')\n", (2409, 2415), False, 'from talon.voice import Key, Context\n'), ((2449, 2466), 'talon.voice.Key', 'Key', (['"""ctrl-cmd-y"""'], {}), "('ctrl-cmd-y')\n", (2452, 2466), False, 'from talon.voice import Key, Context\n'), ((2499, 2520), 'talon.voice.Key', 'Key', (['"""cmd-alt-ctrl-b"""'], {}), "('cmd-alt-ctrl-b')\n", (2502, 2520), False, 'from talon.voice import Key, Context\n'), ((2569, 2596), 'talon.voice.Key', 'Key', (['"""cmd-alt-ctrl-shift-f"""'], {}), "('cmd-alt-ctrl-shift-f')\n", (2572, 2596), False, 'from talon.voice import Key, Context\n'), ((2634, 2669), 'talon.voice.Key', 'Key', (['"""cmd-alt-ctrl-shift-backspace"""'], {}), "('cmd-alt-ctrl-shift-backspace')\n", (2637, 2669), False, 'from talon.voice import Key, Context\n'), ((2698, 2720), 'talon.voice.Key', 'Key', (['"""ctrl-shift-down"""'], {}), "('ctrl-shift-down')\n", (2701, 2720), False, 'from talon.voice import Key, Context\n'), ((2747, 2767), 'talon.voice.Key', 'Key', (['"""ctrl-shift-up"""'], {}), "('ctrl-shift-up')\n", (2750, 2767), False, 'from talon.voice import Key, Context\n'), ((2935, 2957), 'talon.voice.Key', 'Key', (['"""shift-ctrl-left"""'], {}), "('shift-ctrl-left')\n", (2938, 2957), False, 'from talon.voice import Key, Context\n'), ((3017, 3040), 'talon.voice.Key', 'Key', (['"""shift-ctrl-right"""'], {}), "('shift-ctrl-right')\n", (3020, 3040), False, 'from talon.voice import Key, Context\n'), ((3166, 3186), 'talon.voice.Key', 'Key', (['"""alt-ctrl-left"""'], {}), "('alt-ctrl-left')\n", (3169, 3186), False, 'from talon.voice import Key, Context\n'), ((3241, 3262), 'talon.voice.Key', 'Key', (['"""alt-ctrl-right"""'], {}), "('alt-ctrl-right')\n", (3244, 3262), False, 'from talon.voice import Key, Context\n'), ((1071, 1084), 'talon.voice.Key', 'Key', (['"""ctrl-1"""'], {}), "('ctrl-1')\n", (1074, 1084), False, 'from talon.voice import Key, Context\n'), ((2299, 2311), 'talon.voice.Key', 'Key', (['"""enter"""'], {}), "('enter')\n", (2302, 2311), False, 'from talon.voice import Key, Context\n')]
|
from django.urls import path
from . import views
from . import AmendViews
app_name = 'polls'
# urlpatterns = [
# path('', views.index, name = 'index'),
#
# path('<int:question_id>/', views.detail, name='detail'),
#
# path('<int:question_id>/results/', views.results, name='results'),
#
# path('<int:question_id>/vote/', views.vote, name='vote'),
# ]
# 使用通用视图
urlpatterns = [
path('', AmendViews.IndexView.as_view(), name = 'index'),
path('<int:pk>/', AmendViews.DetailView.as_view(), name='detail'),
path('<int:pk>/results/', AmendViews.ResultsView.as_view(), name='results'),
path('<int:question_id>/vote/',AmendViews.vote, name='vote')
]
|
[
"django.urls.path"
] |
[((623, 684), 'django.urls.path', 'path', (['"""<int:question_id>/vote/"""', 'AmendViews.vote'], {'name': '"""vote"""'}), "('<int:question_id>/vote/', AmendViews.vote, name='vote')\n", (627, 684), False, 'from django.urls import path\n')]
|
import json
class Person:
def __init__(self, name, age, job, verified, parents):
self.name = name
self.age = age
self.job = job
self.verified = verified
self.parents = parents
def __str__(self):
return ", ".join([f"{k}: {v}" for k, v in self.__dict__.items()])
class MyEncoder(json.JSONEncoder):
def default(self, o):
return o.__dict__
class MyDecoder(json.JSONDecoder):
def decode(self, s):
d = json.JSONDecoder.decode(self, s)
return Person(**d)
if __name__ == '__main__':
bob = Person(name="Bob", age=12, job=None, verified=True,
parents=["Alice", "Carl"])
bob_json = json.dumps(bob, cls=MyEncoder)
print(bob_json)
bob = json.loads(bob_json, cls=MyDecoder)
print(bob)
|
[
"json.JSONDecoder.decode",
"json.loads",
"json.dumps"
] |
[((688, 718), 'json.dumps', 'json.dumps', (['bob'], {'cls': 'MyEncoder'}), '(bob, cls=MyEncoder)\n', (698, 718), False, 'import json\n'), ((749, 784), 'json.loads', 'json.loads', (['bob_json'], {'cls': 'MyDecoder'}), '(bob_json, cls=MyDecoder)\n', (759, 784), False, 'import json\n'), ((483, 515), 'json.JSONDecoder.decode', 'json.JSONDecoder.decode', (['self', 's'], {}), '(self, s)\n', (506, 515), False, 'import json\n')]
|
from django.http import *
from heartbeat.models import Heartbeat
from heartbeat.forms import HeartBeatForm
from django.views.decorators.csrf import csrf_exempt
from heartbeat.PhasNoiseReduce import noiseReduce
import json
from datetime import datetime
import os
import sys
import time
@csrf_exempt
def save_audio_file(request):
try:
if request.method == "POST":
audio_data = HeartBeatForm(request.POST, request.FILES)
if audio_data.is_valid():
file_name_rebase = audio_data.cleaned_data['user_id'] + \
'_' + audio_data.cleaned_data['dog_name'] + \
'_' + datetime.now().strftime("%Y-%m-%d_%H시:%M분") + \
'.aac'
obj = Heartbeat(user_id=audio_data.cleaned_data['user_id'],
dog_name=audio_data.cleaned_data['dog_name'])
obj.audio_file = request.FILES['audio_file']
obj.audio_file.name = file_name_rebase
obj.heartbeat_normal_condition = -1
obj.save()
os.system('ffmpeg -i ./heartbeat_data/' + obj.audio_file.name + ' ' + './heartbeat_data/' + obj.audio_file.name[:-4] + '.wav')
os.system('rm -r ./heartbeat_data/' + obj.audio_file.name)
noiseReduce(obj.audio_file.name[:-4])
return HttpResponse()
else:
return HttpResponseForbidden(request.FILES['audio_file'])
else:
return HttpResponseNotAllowed()
except Exception as e:
return HttpResponseServerError(e)
@csrf_exempt
def search_log(request):
try:
if request.method == "POST":
json_data = json.loads(request.body)
audio_info = Heartbeat.objects.filter(audio_idx__exact=json_data['audio_idx'])[0]
audio_info_dic = {
"audio_idx": audio_info.audio_idx,
"dog_name": audio_info.dog_name,
"user_id": audio_info.user_id,
"create_data": json_default(audio_info.create_date),
"heartbeat_normal_condition": audio_info.heartbeat_normal_condition
}
return HttpResponse(json.dumps(audio_info_dic))
else:
return HttpResponseForbidden()
except Exception as e:
return HttpResponseServerError(e)
def json_default(value):
if isinstance(value, datetime):
return value.strftime('%Y-%m-%d_%H:%M')
|
[
"heartbeat.models.Heartbeat",
"json.loads",
"os.system",
"json.dumps",
"heartbeat.models.Heartbeat.objects.filter",
"heartbeat.forms.HeartBeatForm",
"heartbeat.PhasNoiseReduce.noiseReduce",
"datetime.datetime.now"
] |
[((402, 444), 'heartbeat.forms.HeartBeatForm', 'HeartBeatForm', (['request.POST', 'request.FILES'], {}), '(request.POST, request.FILES)\n', (415, 444), False, 'from heartbeat.forms import HeartBeatForm\n'), ((1762, 1786), 'json.loads', 'json.loads', (['request.body'], {}), '(request.body)\n', (1772, 1786), False, 'import json\n'), ((793, 897), 'heartbeat.models.Heartbeat', 'Heartbeat', ([], {'user_id': "audio_data.cleaned_data['user_id']", 'dog_name': "audio_data.cleaned_data['dog_name']"}), "(user_id=audio_data.cleaned_data['user_id'], dog_name=audio_data.\n cleaned_data['dog_name'])\n", (802, 897), False, 'from heartbeat.models import Heartbeat\n'), ((1137, 1267), 'os.system', 'os.system', (["('ffmpeg -i ./heartbeat_data/' + obj.audio_file.name + ' ' +\n './heartbeat_data/' + obj.audio_file.name[:-4] + '.wav')"], {}), "('ffmpeg -i ./heartbeat_data/' + obj.audio_file.name + ' ' +\n './heartbeat_data/' + obj.audio_file.name[:-4] + '.wav')\n", (1146, 1267), False, 'import os\n'), ((1280, 1338), 'os.system', 'os.system', (["('rm -r ./heartbeat_data/' + obj.audio_file.name)"], {}), "('rm -r ./heartbeat_data/' + obj.audio_file.name)\n", (1289, 1338), False, 'import os\n'), ((1356, 1393), 'heartbeat.PhasNoiseReduce.noiseReduce', 'noiseReduce', (['obj.audio_file.name[:-4]'], {}), '(obj.audio_file.name[:-4])\n', (1367, 1393), False, 'from heartbeat.PhasNoiseReduce import noiseReduce\n'), ((1812, 1877), 'heartbeat.models.Heartbeat.objects.filter', 'Heartbeat.objects.filter', ([], {'audio_idx__exact': "json_data['audio_idx']"}), "(audio_idx__exact=json_data['audio_idx'])\n", (1836, 1877), False, 'from heartbeat.models import Heartbeat\n'), ((2258, 2284), 'json.dumps', 'json.dumps', (['audio_info_dic'], {}), '(audio_info_dic)\n', (2268, 2284), False, 'import json\n'), ((680, 694), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (692, 694), False, 'from datetime import datetime\n')]
|
import cv2 as cv
import numpy as np
# https://docs.opencv.org/4.2.0/d7/dfc/group__highgui.html
def white_balance(img):
result = cv.cvtColor(img, cv.COLOR_BGR2LAB)
avg_a = np.average(result[:, :, 1])
avg_b = np.average(result[:, :, 2])
result[:, :, 1] = result[:, :, 1] - ((avg_a - 128) * (result[:, :, 0] / 255.0) * 1.1)
result[:, :, 2] = result[:, :, 2] - ((avg_b - 128) * (result[:, :, 0] / 255.0) * 1.1)
result = cv.cvtColor(result, cv.COLOR_LAB2BGR)
return result
cv.namedWindow("webcam")
cv.moveWindow('webcam', 0, 0)
cv.namedWindow("l")
cv.moveWindow("l", 0, 300)
cv.namedWindow("a")
cv.moveWindow("a", 340, 300)
cv.namedWindow("b")
cv.moveWindow("b", 680, 300)
image = cv.imread("sample.png")
image = cv.resize(image, (320, 240))
cv.imshow('webcam', image)
# gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
# b, g, r = cv.split(image)
# cv.imshow('b', b)
# cv.imshow('g', g)
# cv.imshow('r', r)
conv = cv.cvtColor(image, cv.COLOR_BGR2LAB)
l, a, b = cv.split(conv)
# l[:] = 100
a[:] += 10 # green - red
b[:] -= 25 # blue - yellow
# print(b[0][0])
# print(type(b)) # numpy.ndarray
# print(b.shape)
# print(b.size)
# print(b.dtype)
cv.imshow('l', l)
cv.imshow('a', a)
cv.imshow('b', b)
result = cv.merge((l, a, b))
result = cv.cvtColor(result, cv.COLOR_LAB2BGR)
cv.imshow('result', result)
auto_balanced = white_balance(image)
cv.imshow('auto', auto_balanced)
# cv.setWindowProperty('webcam', cv.WND_PROP_AUTOSIZE, cv.WINDOW_FULLSCREEN)
cv.waitKey(0)
# cv.destroyWindow('SnapshotTest')
cv.destroyAllWindows()
vc.release()
|
[
"numpy.average",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.merge",
"cv2.imread",
"cv2.namedWindow",
"cv2.split",
"cv2.moveWindow",
"cv2.imshow",
"cv2.resize"
] |
[((499, 523), 'cv2.namedWindow', 'cv.namedWindow', (['"""webcam"""'], {}), "('webcam')\n", (513, 523), True, 'import cv2 as cv\n'), ((524, 553), 'cv2.moveWindow', 'cv.moveWindow', (['"""webcam"""', '(0)', '(0)'], {}), "('webcam', 0, 0)\n", (537, 553), True, 'import cv2 as cv\n'), ((555, 574), 'cv2.namedWindow', 'cv.namedWindow', (['"""l"""'], {}), "('l')\n", (569, 574), True, 'import cv2 as cv\n'), ((575, 601), 'cv2.moveWindow', 'cv.moveWindow', (['"""l"""', '(0)', '(300)'], {}), "('l', 0, 300)\n", (588, 601), True, 'import cv2 as cv\n'), ((603, 622), 'cv2.namedWindow', 'cv.namedWindow', (['"""a"""'], {}), "('a')\n", (617, 622), True, 'import cv2 as cv\n'), ((623, 651), 'cv2.moveWindow', 'cv.moveWindow', (['"""a"""', '(340)', '(300)'], {}), "('a', 340, 300)\n", (636, 651), True, 'import cv2 as cv\n'), ((653, 672), 'cv2.namedWindow', 'cv.namedWindow', (['"""b"""'], {}), "('b')\n", (667, 672), True, 'import cv2 as cv\n'), ((673, 701), 'cv2.moveWindow', 'cv.moveWindow', (['"""b"""', '(680)', '(300)'], {}), "('b', 680, 300)\n", (686, 701), True, 'import cv2 as cv\n'), ((711, 734), 'cv2.imread', 'cv.imread', (['"""sample.png"""'], {}), "('sample.png')\n", (720, 734), True, 'import cv2 as cv\n'), ((743, 771), 'cv2.resize', 'cv.resize', (['image', '(320, 240)'], {}), '(image, (320, 240))\n', (752, 771), True, 'import cv2 as cv\n'), ((772, 798), 'cv2.imshow', 'cv.imshow', (['"""webcam"""', 'image'], {}), "('webcam', image)\n", (781, 798), True, 'import cv2 as cv\n'), ((944, 980), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2LAB'], {}), '(image, cv.COLOR_BGR2LAB)\n', (955, 980), True, 'import cv2 as cv\n'), ((991, 1005), 'cv2.split', 'cv.split', (['conv'], {}), '(conv)\n', (999, 1005), True, 'import cv2 as cv\n'), ((1175, 1192), 'cv2.imshow', 'cv.imshow', (['"""l"""', 'l'], {}), "('l', l)\n", (1184, 1192), True, 'import cv2 as cv\n'), ((1193, 1210), 'cv2.imshow', 'cv.imshow', (['"""a"""', 'a'], {}), "('a', a)\n", (1202, 1210), True, 'import cv2 as cv\n'), ((1211, 1228), 'cv2.imshow', 'cv.imshow', (['"""b"""', 'b'], {}), "('b', b)\n", (1220, 1228), True, 'import cv2 as cv\n'), ((1239, 1258), 'cv2.merge', 'cv.merge', (['(l, a, b)'], {}), '((l, a, b))\n', (1247, 1258), True, 'import cv2 as cv\n'), ((1268, 1305), 'cv2.cvtColor', 'cv.cvtColor', (['result', 'cv.COLOR_LAB2BGR'], {}), '(result, cv.COLOR_LAB2BGR)\n', (1279, 1305), True, 'import cv2 as cv\n'), ((1306, 1333), 'cv2.imshow', 'cv.imshow', (['"""result"""', 'result'], {}), "('result', result)\n", (1315, 1333), True, 'import cv2 as cv\n'), ((1372, 1404), 'cv2.imshow', 'cv.imshow', (['"""auto"""', 'auto_balanced'], {}), "('auto', auto_balanced)\n", (1381, 1404), True, 'import cv2 as cv\n'), ((1483, 1496), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (1493, 1496), True, 'import cv2 as cv\n'), ((1541, 1563), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (1561, 1563), True, 'import cv2 as cv\n'), ((134, 168), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_BGR2LAB'], {}), '(img, cv.COLOR_BGR2LAB)\n', (145, 168), True, 'import cv2 as cv\n'), ((181, 208), 'numpy.average', 'np.average', (['result[:, :, 1]'], {}), '(result[:, :, 1])\n', (191, 208), True, 'import numpy as np\n'), ((221, 248), 'numpy.average', 'np.average', (['result[:, :, 2]'], {}), '(result[:, :, 2])\n', (231, 248), True, 'import numpy as np\n'), ((442, 479), 'cv2.cvtColor', 'cv.cvtColor', (['result', 'cv.COLOR_LAB2BGR'], {}), '(result, cv.COLOR_LAB2BGR)\n', (453, 479), True, 'import cv2 as cv\n')]
|
# Generated by Django 2.2.6 on 2019-10-14 14:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tasks', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='countbeanstask',
name='status',
field=models.CharField(choices=[('PENDING', 'PENDING'), ('RECEIVED', 'RECEIVED'), ('STARTED', 'STARTED'), ('PROGESS', 'PROGESS'), ('SUCCESS', 'SUCCESS'), ('FAILURE', 'FAILURE'), ('REVOKED', 'REVOKED'), ('REJECTED', 'REJECTED'), ('RETRY', 'RETRY'), ('IGNORED', 'IGNORED')], db_index=True, default='PENDING', max_length=128, verbose_name='status'),
),
migrations.AlterField(
model_name='sendemailtask',
name='status',
field=models.CharField(choices=[('PENDING', 'PENDING'), ('RECEIVED', 'RECEIVED'), ('STARTED', 'STARTED'), ('PROGESS', 'PROGESS'), ('SUCCESS', 'SUCCESS'), ('FAILURE', 'FAILURE'), ('REVOKED', 'REVOKED'), ('REJECTED', 'REJECTED'), ('RETRY', 'RETRY'), ('IGNORED', 'IGNORED')], db_index=True, default='PENDING', max_length=128, verbose_name='status'),
),
]
|
[
"django.db.models.CharField"
] |
[((331, 688), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('PENDING', 'PENDING'), ('RECEIVED', 'RECEIVED'), ('STARTED', 'STARTED'),\n ('PROGESS', 'PROGESS'), ('SUCCESS', 'SUCCESS'), ('FAILURE', 'FAILURE'),\n ('REVOKED', 'REVOKED'), ('REJECTED', 'REJECTED'), ('RETRY', 'RETRY'), (\n 'IGNORED', 'IGNORED')]", 'db_index': '(True)', 'default': '"""PENDING"""', 'max_length': '(128)', 'verbose_name': '"""status"""'}), "(choices=[('PENDING', 'PENDING'), ('RECEIVED', 'RECEIVED'),\n ('STARTED', 'STARTED'), ('PROGESS', 'PROGESS'), ('SUCCESS', 'SUCCESS'),\n ('FAILURE', 'FAILURE'), ('REVOKED', 'REVOKED'), ('REJECTED', 'REJECTED'\n ), ('RETRY', 'RETRY'), ('IGNORED', 'IGNORED')], db_index=True, default=\n 'PENDING', max_length=128, verbose_name='status')\n", (347, 688), False, 'from django.db import migrations, models\n'), ((799, 1156), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('PENDING', 'PENDING'), ('RECEIVED', 'RECEIVED'), ('STARTED', 'STARTED'),\n ('PROGESS', 'PROGESS'), ('SUCCESS', 'SUCCESS'), ('FAILURE', 'FAILURE'),\n ('REVOKED', 'REVOKED'), ('REJECTED', 'REJECTED'), ('RETRY', 'RETRY'), (\n 'IGNORED', 'IGNORED')]", 'db_index': '(True)', 'default': '"""PENDING"""', 'max_length': '(128)', 'verbose_name': '"""status"""'}), "(choices=[('PENDING', 'PENDING'), ('RECEIVED', 'RECEIVED'),\n ('STARTED', 'STARTED'), ('PROGESS', 'PROGESS'), ('SUCCESS', 'SUCCESS'),\n ('FAILURE', 'FAILURE'), ('REVOKED', 'REVOKED'), ('REJECTED', 'REJECTED'\n ), ('RETRY', 'RETRY'), ('IGNORED', 'IGNORED')], db_index=True, default=\n 'PENDING', max_length=128, verbose_name='status')\n", (815, 1156), False, 'from django.db import migrations, models\n')]
|
"""
Course Unit API Serializers. Representing course unit catalog data
"""
from rest_framework import serializers
class UnitSerializer(serializers.Serializer):
"""
Serializer for Course Unit objects providing minimal data about the course unit.
"""
id = serializers.CharField(read_only=True)
# course_id = serializers.CharField(read_only=True)
block_id = serializers.CharField(read_only=True)
block_name = serializers.CharField(read_only=True)
block_type = serializers.CharField(read_only=True)
class CourseSerializer(serializers.Serializer):
"""
Serializer for Course
"""
course_id = serializers.CharField(read_only=True)
course_name = serializers.CharField(read_only=True)
subchapter_name = serializers.CharField(read_only=True)
units = UnitSerializer(many=True, read_only=True)
|
[
"rest_framework.serializers.CharField"
] |
[((277, 314), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (298, 314), False, 'from rest_framework import serializers\n'), ((388, 425), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (409, 425), False, 'from rest_framework import serializers\n'), ((445, 482), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (466, 482), False, 'from rest_framework import serializers\n'), ((500, 537), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (521, 537), False, 'from rest_framework import serializers\n'), ((646, 683), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (667, 683), False, 'from rest_framework import serializers\n'), ((702, 739), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (723, 739), False, 'from rest_framework import serializers\n'), ((762, 799), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (783, 799), False, 'from rest_framework import serializers\n')]
|
import os
from dotenv import load_dotenv
from config import PROJECT_NAME
# load env variables
load_dotenv()
pg_user = os.getenv("POSTGRES_USER")
pg_password = os.getenv("POSTGRES_PASSWORD")
pg_db = os.getenv("POSTGRES_DB")
SENTRY_ENV_NAME = f"{PROJECT_NAME}_lottery_bot".casefold()
GUILD_INDEX = 0
TORTOISE_ORM = {
"connections": {"default": f"postgres://{pg_user}:{pg_password}@localhost:5432/{pg_db}"},
"apps": {
"app": {
"models": ["app.models", "aerich.models"],
}
},
}
|
[
"dotenv.load_dotenv",
"os.getenv"
] |
[((96, 109), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (107, 109), False, 'from dotenv import load_dotenv\n'), ((122, 148), 'os.getenv', 'os.getenv', (['"""POSTGRES_USER"""'], {}), "('POSTGRES_USER')\n", (131, 148), False, 'import os\n'), ((163, 193), 'os.getenv', 'os.getenv', (['"""POSTGRES_PASSWORD"""'], {}), "('POSTGRES_PASSWORD')\n", (172, 193), False, 'import os\n'), ((202, 226), 'os.getenv', 'os.getenv', (['"""POSTGRES_DB"""'], {}), "('POSTGRES_DB')\n", (211, 226), False, 'import os\n')]
|
from time import sleep
from playsound import playsound
from frontend import Frontend
from leds import LEDs, Color
frontend = Frontend()
leds = LEDs()
print("Testing audio output")
playsound('sample.mp3')
print("Audio playback ended")
try:
print("Testing EEG Frontend")
data = frontend.read_regs(0x00, 1)
assert data == [0x3E], "Wrong output"
print("EEG Frontend responsive")
print("Testing LEDs")
print("Aquisition LED")
leds.aquisition(True)
sleep(0.5)
leds.aquisition(False)
sleep(0.5)
leds.aquisition(True)
print("USER1 (PWM) LED")
for i in range(200):
red = (i % 10) * 10
blue = ((i % 100) // 10) * 10
leds.led1(red, 0, blue)
sleep(0.02)
print("USER2 (2-color) LED")
for state in [Color.RED, Color.BLUE, Color.PURPLE, Color.CLOSED] * 3:
leds.led2(state)
sleep(0.2)
print("USER3 LED")
for state in [Color.RED, Color.CLOSED] * 3:
leds.led3(state)
sleep(0.2)
print("LEDs testing ended")
finally:
frontend.close()
leds.close()
|
[
"playsound.playsound",
"leds.LEDs",
"frontend.Frontend",
"time.sleep"
] |
[((127, 137), 'frontend.Frontend', 'Frontend', ([], {}), '()\n', (135, 137), False, 'from frontend import Frontend\n'), ((145, 151), 'leds.LEDs', 'LEDs', ([], {}), '()\n', (149, 151), False, 'from leds import LEDs, Color\n'), ((183, 206), 'playsound.playsound', 'playsound', (['"""sample.mp3"""'], {}), "('sample.mp3')\n", (192, 206), False, 'from playsound import playsound\n'), ((480, 490), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (485, 490), False, 'from time import sleep\n'), ((522, 532), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (527, 532), False, 'from time import sleep\n'), ((720, 731), 'time.sleep', 'sleep', (['(0.02)'], {}), '(0.02)\n', (725, 731), False, 'from time import sleep\n'), ((873, 883), 'time.sleep', 'sleep', (['(0.2)'], {}), '(0.2)\n', (878, 883), False, 'from time import sleep\n'), ((989, 999), 'time.sleep', 'sleep', (['(0.2)'], {}), '(0.2)\n', (994, 999), False, 'from time import sleep\n')]
|
import os
from setuptools import find_packages, setup
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as _in:
return _in.read()
setup(
name="civis-jupyter-extensions",
version="1.1.0",
author="<NAME>",
author_email="<EMAIL>",
url="https://www.civisanalytics.com",
description=("Tools for using the Civis "
"Platform with Jupyter notebooks."),
packages=find_packages(),
long_description=read('README.rst'),
include_package_data=True,
license="BSD-3",
install_requires=read('requirements.txt').strip().split('\n'))
|
[
"os.path.dirname",
"setuptools.find_packages"
] |
[((440, 455), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (453, 455), False, 'from setuptools import find_packages, setup\n'), ((100, 125), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (115, 125), False, 'import os\n')]
|
from sample import *
import time
import os
import functools
from pprint import pprint
from fft import Fft
from prune import *
import math
import multiprocessing
import sys
def runParallelTest(params):
header = params[0]
data = params[1]
test = params[2]
#Start Time
startTime = time.time()
Config.verbose = False
s = Sample()
s.add(header)
for row in data:
s.add(row)
fft = Fft(s)
#Test sample
t = s.clone()
for row in test:
t.add(row)
treesSort = []
for f in fft.trees:
#Get accuracy for each tree
TP = 0
TN = 0
FP = 0
FN = 0
firstRow = True
for row in t.rows:
if firstRow:
firstRow = False
else:
result = row[t.y[0].at]
for b in f:
if not b.disc or b.disc.matches(row):
#True
if b.typ == result:
if result == 1:
TP+=1
if result == 0:
TN+=1
#False
else:
if result == 1:
FN+=1
if result == 0:
FP+=1
break;
break;
treesSort.append([f, TP, TN, FP, FN])
#Sort by accuracy
#Accuracy = TP+TN / TP+TN+FP+FN
treesSort.sort(key=lambda x: (x[1]+x[2])/(x[1]+x[2]+x[3]+x[4]))
chosenTree = treesSort[-1]
TP = chosenTree[1]
TN = chosenTree[2]
FP = chosenTree[3]
FN = chosenTree[4]
try:
accuracy = (TP+TN) / (TP+TN+FP+FN)
precision = TP / (TP+FP)
falseAlarm = FP / (FP+TN)
recall = TP/(TP+FN)
return [accuracy, precision, falseAlarm, recall]
except BaseException:
accuracy = (TP+TN) / (TP+TN+FP+FN)
calculatedTime = time.time() - startTime
return [accuracy, 0, 0, 0]
#Set the arguments
if len(sys.argv) > 1:
try:
chosenDataset = int(sys.argv[1])
Config.dataSet = Config.dataSets[chosenDataset]
print(Config.dataSet)
except BaseException:
print("error")
if len(sys.argv) > 2:
try:
chosenImprovements = sys.argv[2]
Config.DISCLESS = False if chosenImprovements[0] == '0' else True
Config.SHORTTREES = False if chosenImprovements[1] == '0' else True
Config.BASEBALLTREES = False if chosenImprovements[2] == '0' else True
Config.SPILLTREES = False if chosenImprovements[3] == '0' else True
Config.BINARYCHOPS = False if chosenImprovements[4] == '0' else True
Config.PRUNETREES = False if chosenImprovements[5] == '0' else True
except BaseException:
print("error")
startTime = time.time()
myPath = os.path.dirname(os.path.abspath(__file__))
myPath = myPath[:myPath.rindex("/")]
myPath = myPath[:myPath.rindex("/")]
# Get the data and headers
totalRows = 0
headers = None
data = []
for i, row in enumerate(readCSV(myPath + Config.dataSet)):
if i == 0 :
headers = row
else:
totalRows+=1
data.append(row)
#Split the data
fiveSplitData = []
for i in range(0, 5):
tempData = []
for j in range(math.floor(len(data)/5 * i), math.floor(len(data)/5 * (i+1))):
tempData.append(data[j])
fiveSplitData.append(tempData)
pool = multiprocessing.Pool()
pool = multiprocessing.Pool(processes=25)
inputs = []
for i in range(0, 5):
#Repeat 5 times
for j in range(0, 5):
seperateData = []
seperateTest = []
seperateTest.extend(fiveSplitData[j])
for k in range(0, 5):
if not k == j:
seperateData.extend(fiveSplitData[k])
params = [headers, seperateData, seperateTest]
inputs.append(params)
outputs = pool.map(runParallelTest, inputs)
print("\n--------------Time------------")
print((time.time() - startTime)/25)
print("\n--------------ACCURACY------------")
print(sum(map(lambda x: x[0], outputs))/25)
print("\n--------------PRECISION------------")
print(sum(map(lambda x: x[1], outputs))/25)
print("\n--------------FALSE ALARM------------")
print(sum(map(lambda x: x[2], outputs))/25)
print("\n--------------RECALL------------")
print(sum(map(lambda x: x[3], outputs))/25)
|
[
"multiprocessing.Pool",
"os.path.abspath",
"fft.Fft",
"time.time"
] |
[((2596, 2607), 'time.time', 'time.time', ([], {}), '()\n', (2605, 2607), False, 'import time\n'), ((3188, 3210), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {}), '()\n', (3208, 3210), False, 'import multiprocessing\n'), ((3219, 3253), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': '(25)'}), '(processes=25)\n', (3239, 3253), False, 'import multiprocessing\n'), ((310, 321), 'time.time', 'time.time', ([], {}), '()\n', (319, 321), False, 'import time\n'), ((428, 434), 'fft.Fft', 'Fft', (['s'], {}), '(s)\n', (431, 434), False, 'from fft import Fft\n'), ((2636, 2661), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (2651, 2661), False, 'import os\n'), ((3696, 3707), 'time.time', 'time.time', ([], {}), '()\n', (3705, 3707), False, 'import time\n'), ((1753, 1764), 'time.time', 'time.time', ([], {}), '()\n', (1762, 1764), False, 'import time\n')]
|
# Date: 2020/11/21
# Author: <NAME>
# Description:
# This is a simple program to learn how to use cristal boxes in python
##
#import
import unittest
#is_older(): Verify if the person is older
def is_older(age):
if age >= 18:
return True
else:
return False
#Class
class cristal_box_test(unittest.TestCase):
def test_is_older(self):
age = 20
result = is_older(age)
def test_is_younger(self):
age = 15
result = is_older
#run(): This function runs all the other functions in the program
def run():
unittest.main()
#main(): This is the main function of the program
if __name__ == "__main__":
run()
|
[
"unittest.main"
] |
[((594, 609), 'unittest.main', 'unittest.main', ([], {}), '()\n', (607, 609), False, 'import unittest\n')]
|
import os
from dotenv import load_dotenv
basedir = os.path.abspath(os.path.dirname(__file__))
env = os.path.join(basedir, '.env')
if os.path.exists(env):
load_dotenv(env)
else:
print('Warning: .env file not found')
class Config(object):
DEBUG = False
TESTING = False
NO_SOCKETIO = True if os.environ.get('NO_SOCKETIO') else False
class DevConfig(Config):
DEBUG = True
class TestConfig(Config):
TESTING = True
class ProdConfig(Config):
pass
|
[
"os.path.dirname",
"os.path.exists",
"dotenv.load_dotenv",
"os.environ.get",
"os.path.join"
] |
[((102, 131), 'os.path.join', 'os.path.join', (['basedir', '""".env"""'], {}), "(basedir, '.env')\n", (114, 131), False, 'import os\n'), ((135, 154), 'os.path.exists', 'os.path.exists', (['env'], {}), '(env)\n', (149, 154), False, 'import os\n'), ((69, 94), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (84, 94), False, 'import os\n'), ((160, 176), 'dotenv.load_dotenv', 'load_dotenv', (['env'], {}), '(env)\n', (171, 176), False, 'from dotenv import load_dotenv\n'), ((313, 342), 'os.environ.get', 'os.environ.get', (['"""NO_SOCKETIO"""'], {}), "('NO_SOCKETIO')\n", (327, 342), False, 'import os\n')]
|
#This file for checking vocabulary testing programme for beginner to advance purpose in this programe show hindi meaning word and user give english word for this hindi meaning.
from tkinter import *
from tkinter import messagebox
from insert import *
from tempInsert import *
root = Tk()
root.title("VocabQuiz")
root.geometry("400x250+10+10")
root.resizable(False, False)
root.configure(background='#8F5902')
entry = Entry(font=('Verdana', 18),width=23, bg='white',bd=3, fg='green')
entry.place(x=20, y=150)
#Both are Fetching random data from given database
cursor = conn.execute("SELECT id, words, meaning from VOCABULARY ORDER BY RANDOM() LIMIT 1")
cursor1 = newConn.execute("SELECT id, words, meaning from VOCABULARY RANDOM LIMIT 1")
def checkReal(row):
'''Here function check parameter value of input value are same or not'''
#Asks English word for given Hindi meaning word
word = str(entry.get())
# print(word)
#This condition checks whether the given word and the English word are the same or not
if word == row[1].lower():
# print("Your answer is correct!!")
newData(row[0], row[1], row[2])
index = row[0]
delete = '''DELETE from VOCABULARY where ID = ?'''
conn.execute(delete, (index,))
conn.commit()
root.destroy()
else:
messagebox.askretrycancel("Incorrect Word", "Try again?")
# print("Your answer is not correct!!")
def checkTemp(row):
'''Here function check parameter value of input value are same or not'''
#Asks English word for given Hindi meaning word
word = str(entry.get())
# print(word)
#This condition checks whether the given word and the English word are the same or not
if word == row[1].lower():
# print("Your answer is correct!!")
insertData(row[0], row[1], row[2])
index = row[0]
delete = '''DELETE from VOCABULARY where ID = ?'''
newConn.execute(delete, (index,))
newConn.commit()
root.destroy()
else:
messagebox.askretrycancel("Incorrect Word", "Try again?")
# print("Your answer is not correct!!")
def realVocabFile():
global cursor
#'fetchall' fuction check fetched data is empty or not
if len(cursor.fetchall()) != 0:
cursor = conn.execute("SELECT id, words, meaning from VOCABULARY ORDER BY RANDOM() LIMIT 1")
for row in cursor:
hindi = StringVar(root, row[2])
label = Message(root, textvariable=hindi,relief=RAISED, font=('Verdana', 16, 'bold'), width=250, bg='white', fg='green')
label.place(x=75, y=65)
message_label = Label(text=' Put English Meaning ', font=('Verdana', 16, 'bold'),bg='green', fg='white')
message_label.place(x=35, y=120)
chang_button = Button(text='Ok', font=('Verdana', 16), bg='red', bd=3, fg='white', command=lambda: checkReal(row))
chang_button.place(x=155, y=200)
else:
with open('/VocabQuiz/test.txt', mode='w') as file:
file.write("tempVocabFile")
tempVocabFile()
def tempVocabFile():
global cursor1
if len(cursor1.fetchall()) != 0:
cursor1 = newConn.execute("SELECT id, words, meaning from VOCABULARY RANDOM LIMIT 1")
for row in cursor1:
hindi = StringVar(root, row[2])
label = Message(root, textvariable=hindi,relief=RAISED, font=('Verdana', 16, 'bold'), width=250, bg='white', fg='green')
label.place(x=75, y=65)
message_label = Label(text=' Put English Meaning ', font=('Verdana', 16, 'bold'),bg='green', fg='white')
message_label.place(x=35, y=120)
chang_button = Button(text='Ok', font=('Verdana', 16), bg='red', bd=3, fg='white', command=lambda: checkTemp(row))
chang_button.place(x=155, y=200)
else:
with open('/VocabQuiz/test.txt', mode='w') as file:
file.write("realVocabFile")
realVocabFile()
if __name__ == '__main__':
message_label1 = Label(text=' Hindi to English VocabCheck', font=('Verdana', 16, 'bold'), bg='white', fg='green')
message_label1.place(x=20, y=20)
with open('/VocabQuiz/test.txt', mode='r') as file:
fun = file.read()
if fun == 'realVocabFile':
realVocabFile()
else:
tempVocabFile()
mainloop()
|
[
"tkinter.messagebox.askretrycancel"
] |
[((1334, 1391), 'tkinter.messagebox.askretrycancel', 'messagebox.askretrycancel', (['"""Incorrect Word"""', '"""Try again?"""'], {}), "('Incorrect Word', 'Try again?')\n", (1359, 1391), False, 'from tkinter import messagebox\n'), ((2039, 2096), 'tkinter.messagebox.askretrycancel', 'messagebox.askretrycancel', (['"""Incorrect Word"""', '"""Try again?"""'], {}), "('Incorrect Word', 'Try again?')\n", (2064, 2096), False, 'from tkinter import messagebox\n')]
|
from nltk import word_tokenize, WordNetLemmatizer
from plotly.graph_objs import Scatter, Bar
from wordcloud import WordCloud
def generate_plots(df):
"""
Generate plot objected to be rendered int the dashboard:
- Bar chart to plot distribution of genre
- Bar chart to plot distribution of disaster category types
- Word cloud to plot frequency of word in message content
INPUT
df - training set, pd.DataFrame
OUTPUT
graphs - list of plotly objects, List
"""
genre_counts = df.groupby('genre').count()['message']
genre_names = list(genre_counts.index)
# melt dataframe
df1 = df.melt(id_vars=['id', 'message', 'original', 'genre'], var_name='category', value_name='active')
# Graph 2 - Distribution of category types
category_counts = df1[df1.active == 1].groupby('category').agg({'message': 'count'}) \
.reset_index().sort_values(by='message', ascending=True)
category_names = category_counts['category'].values
# Graph 3 - Wordcloud of sample of messages (Sample of 100 messages)
words = df.sample(100)['message'].apply(_tokenize).values
words = [word for word_list in words for word in word_list]
# create visuals
graphs = [
{
'data': [
Bar(
x=genre_names,
y=genre_counts
)
],
'layout': {
'title': 'Distribution of Message Genres',
'yaxis': {
'title': "Count"
},
'xaxis': {
'title': "Genre"
}
}
},
{
'data': [
Bar(
x=category_counts['message'],
y=category_names,
orientation='h'
)
],
'layout': {
'title': 'Distribution of Disaster category types',
'yaxis': {
'title': "Count"
},
'xaxis': {
'title': "Category"
},
'margin': dict(l=150, r=15, pad=10)
}
}
]
wc = _plotly_wordcloud(' '.join(words))
graphs.append(wc)
return graphs
def _tokenize(text):
"""
Tokenize words from input sentences
INPUT
text - message content, str
OUTPUT
cleaned tokens - cleaned tokens after tokenization phase, List
"""
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
def _plotly_wordcloud(text):
"""
Word cloud plot. Based on: https://github.com/PrashantSaikia/Wordcloud-in-Plotly
INPUT
text - message content, str
OUTPUT
chart - word cloud chart, plotly objects
"""
wc = WordCloud(max_words=200,
max_font_size=40,
min_font_size=2,
min_word_length=3)
wc.generate(text)
word_list = []
freq_list = []
fontsize_list = []
position_list = []
orientation_list = []
color_list = []
for (word, freq), fontsize, position, orientation, color in wc.layout_:
word_list.append(word)
freq_list.append(freq)
fontsize_list.append(fontsize)
position_list.append(position)
orientation_list.append(orientation)
color_list.append(color)
# get the positions
x = []
y = []
for i in position_list:
x.append(i[0])
y.append(i[1])
new_freq_list = []
for i in freq_list:
new_freq_list.append(i * 100)
new_freq_list
wc_plot_data = {
'data': [
Scatter(
x=x,
y=y,
textfont=dict(size=new_freq_list,
color=color_list),
hoverinfo='text',
hovertext=['{0}: {1}'.format(w, f) for w, f in zip(word_list, freq_list)],
mode='text',
text=word_list
)
],
'layout': {
'title': 'Message: Word cloud',
'xaxis': {'showgrid': False,
'showticklabels': False,
'zeroline': False},
'yaxis': {'showgrid': False,
'showticklabels': False,
'zeroline': False},
}
}
return wc_plot_data
|
[
"nltk.WordNetLemmatizer",
"wordcloud.WordCloud",
"nltk.word_tokenize",
"plotly.graph_objs.Bar"
] |
[((2570, 2589), 'nltk.word_tokenize', 'word_tokenize', (['text'], {}), '(text)\n', (2583, 2589), False, 'from nltk import word_tokenize, WordNetLemmatizer\n'), ((2607, 2626), 'nltk.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (2624, 2626), False, 'from nltk import word_tokenize, WordNetLemmatizer\n'), ((3046, 3124), 'wordcloud.WordCloud', 'WordCloud', ([], {'max_words': '(200)', 'max_font_size': '(40)', 'min_font_size': '(2)', 'min_word_length': '(3)'}), '(max_words=200, max_font_size=40, min_font_size=2, min_word_length=3)\n', (3055, 3124), False, 'from wordcloud import WordCloud\n'), ((1329, 1363), 'plotly.graph_objs.Bar', 'Bar', ([], {'x': 'genre_names', 'y': 'genre_counts'}), '(x=genre_names, y=genre_counts)\n', (1332, 1363), False, 'from plotly.graph_objs import Scatter, Bar\n'), ((1759, 1827), 'plotly.graph_objs.Bar', 'Bar', ([], {'x': "category_counts['message']", 'y': 'category_names', 'orientation': '"""h"""'}), "(x=category_counts['message'], y=category_names, orientation='h')\n", (1762, 1827), False, 'from plotly.graph_objs import Scatter, Bar\n')]
|
import os
import random
import shutil
import tempfile
import time
from multiprocessing import Pool
from multiprocessing import Process
from pathlib import Path
import more_itertools as mo
from diskcache import Cache
from diskcache import Deque
from six import wraps
from fasteners import test
from fasteners.process_lock import InterProcessReaderWriterLock as ReaderWriterLock
PROCESS_COUNT = 20
def unpack(func):
@wraps(func)
def wrapper(arg_tuple):
return func(*arg_tuple)
return wrapper
def run_doesnt_hang(disk_cache_dir, lock_file, type_):
lock = (ReaderWriterLock(lock_file).write_lock if type_ == 'w' else
ReaderWriterLock(lock_file).read_lock)
with lock():
with Cache(disk_cache_dir) as dc_:
dc_.incr(type_)
@unpack
def run_no_concurrent_writers(disk_cache_dir, lock_file):
with Cache(disk_cache_dir) as dc_:
for _ in range(10):
no_concurrent_writers_acquire_check(dc_, lock_file)
def no_concurrent_writers_acquire_check(dc_, lock_file):
with ReaderWriterLock(lock_file).write_lock():
if dc_.get('active_count', 0) >= 1:
dc_.incr('dups_count')
dc_.incr('active_count')
time.sleep(random.random() / 1000)
dc_.decr('active_count')
dc_.incr('visited_count')
@unpack
def run_no_cuncurrent_readers_writers(disk_cache_dir, lock_file):
with Cache(disk_cache_dir) as dc_:
for _ in range(10):
no_concurrent_readers_writers_acquire_check(dc_, lock_file,
random.choice([True, False]))
def no_concurrent_readers_writers_acquire_check(dc_, lock_file, reader):
if reader:
lock_func = ReaderWriterLock(lock_file).read_lock
else:
lock_func = ReaderWriterLock(lock_file).write_lock
with lock_func():
if not reader:
if dc_.get('active_count', 0) >= 1:
dc_.incr('dups_count')
dc_.incr('active_count')
time.sleep(random.random() / 1000)
dc_.decr('active_count')
dc_.incr('visited_count')
def run_reader_writer_chaotic(disk_cache_dir, lock_file, type_, blow_up):
lock = (ReaderWriterLock(lock_file).write_lock if type_ == 'w' else
ReaderWriterLock(lock_file).read_lock)
with lock():
with Cache(disk_cache_dir) as dc_:
dc_.incr(type_)
if blow_up:
raise RuntimeError()
def reader_releases_lock_upon_crash_reader_lock(disk_cache_dir, lock_file, i):
with ReaderWriterLock(lock_file).read_lock():
with Cache(disk_cache_dir) as dc_:
dc_.set('pid{}'.format(i), os.getpid())
raise RuntimeError('')
def reader_releases_lock_upon_crash_writer_lock(disk_cache_dir, lock_file, i):
ReaderWriterLock(lock_file).acquire_write_lock(timeout=5)
with Cache(disk_cache_dir) as dc_:
dc_.set('pid{}'.format(i), os.getpid())
def run_writer_releases_lock_upon_crash(disk_cache_dir, lock_file, i, crash):
ReaderWriterLock(lock_file).acquire_write_lock(timeout=5)
with Cache(disk_cache_dir) as dc_:
dc_.set('pid{}'.format(i), os.getpid())
if crash:
raise RuntimeError('')
class ProcessReaderWriterLock(test.TestCase):
def setUp(self):
super(ProcessReaderWriterLock, self).setUp()
lock_file = tempfile.NamedTemporaryFile()
lock_file.close()
self.lock_file = lock_file.name
self.disk_cache_dir = tempfile.mkdtemp()
def tearDown(self):
super(ProcessReaderWriterLock, self).tearDown()
shutil.rmtree(self.disk_cache_dir, ignore_errors=True)
try:
os.remove(self.lock_file)
except OSError:
pass
def test_lock(self):
with ReaderWriterLock(self.lock_file).write_lock():
pass
with ReaderWriterLock(self.lock_file).read_lock():
pass
def test_no_concurrent_writers(self):
pool = Pool(PROCESS_COUNT)
pool.map(run_no_concurrent_writers, [(self.disk_cache_dir, self.lock_file)] * PROCESS_COUNT,
chunksize=1)
with Cache(self.disk_cache_dir) as dc:
self.assertEqual(dc.get('active_count'), 0)
self.assertEqual(dc.get('dups_count'), None)
self.assertEqual(dc.get('visited_count'), 10 * PROCESS_COUNT)
def test_no_concurrent_readers_writers(self):
pool = Pool(PROCESS_COUNT)
pool.map(run_no_cuncurrent_readers_writers,
[(self.disk_cache_dir, self.lock_file)] * PROCESS_COUNT, chunksize=1)
with Cache(self.disk_cache_dir) as dc:
self.assertEqual(dc.get('active_count'), 0)
self.assertEqual(dc.get('dups_count'), None)
self.assertEqual(dc.get('visited_count'), 10 * PROCESS_COUNT)
def test_writer_releases_lock_upon_crash(self):
p1 = Process(target=run_writer_releases_lock_upon_crash,
args=(self.disk_cache_dir, self.lock_file, 1, True))
p2 = Process(target=run_writer_releases_lock_upon_crash,
args=(self.disk_cache_dir, self.lock_file, 2, False))
p1.start()
p1.join()
p2.start()
p2.join()
with Cache(self.disk_cache_dir) as dc:
assert dc.get('pid1') != dc.get('pid2')
self.assertNotEqual(0, p1.exitcode)
self.assertEqual(0, p2.exitcode)
def test_reader_releases_lock_upon_crash(self):
p1 = Process(target=reader_releases_lock_upon_crash_reader_lock,
args=(self.disk_cache_dir, self.lock_file, 1))
p2 = Process(target=reader_releases_lock_upon_crash_writer_lock,
args=(self.disk_cache_dir, self.lock_file, 2))
p1.start()
p1.join()
p2.start()
p2.join()
with Cache(self.disk_cache_dir) as dc:
assert dc.get('pid1') != dc.get('pid2')
self.assertNotEqual(0, p1.exitcode)
self.assertEqual(0, p2.exitcode)
def test_multi_reader_multi_writer(self):
visits = _spawn_variation(Path(self.disk_cache_dir),
Path(self.lock_file), 10, 10)
self.assertEqual(20 * 2, len(visits))
self._assert_valid(visits)
def test_multi_reader_single_writer(self):
visits = _spawn_variation(Path(self.disk_cache_dir),
Path(self.lock_file), 9, 1)
self.assertEqual(10 * 2, len(visits))
self._assert_valid(visits)
def test_multi_writer(self):
visits = _spawn_variation(Path(self.disk_cache_dir),
Path(self.lock_file), 0, 10)
self.assertEqual(10 * 2, len(visits))
self._assert_valid(visits)
def _assert_valid(self, visits):
"""Check if writes dont overlap other writes and reads"""
# check that writes open and close consequently
write_blocks = mo.split_at(visits, lambda x: x[1] == 'r')
for write_block in write_blocks:
for v1, v2 in mo.chunked(write_block, 2):
self.assertEqual(v1[0], v2[0])
# check that reads open and close in groups between writes
read_blocks = mo.split_at(visits, lambda x: x[1] == 'w')
for read_block in read_blocks:
for v1, v2 in mo.chunked(sorted(read_block), 2):
self.assertEqual(v1[0], v2[0])
def _spawn_variation(disk_cache_dir, lock_file, readers, writers):
visits = Deque(directory=str(disk_cache_dir / 'w'))
pool = Pool(readers + writers)
pool.map(_spawling, [(lock_file, visits, type_) for type_ in ['w'] * writers + ['r'] * readers])
return visits
@unpack
def _spawling(lock_file, visits, type_):
lock = ReaderWriterLock(lock_file)
if type_ == 'w':
lock.acquire_write_lock(timeout=5)
else:
lock.acquire_read_lock(timeout=5)
visits.append((os.getpid(), type_))
time.sleep(random.random() / 100 + 0.01)
visits.append((os.getpid(), type_))
if type_ == 'w':
lock.release_write_lock()
else:
lock.release_read_lock()
|
[
"tempfile.NamedTemporaryFile",
"os.remove",
"os.getpid",
"more_itertools.split_at",
"random.choice",
"random.random",
"pathlib.Path",
"tempfile.mkdtemp",
"fasteners.process_lock.InterProcessReaderWriterLock",
"more_itertools.chunked",
"multiprocessing.Pool",
"shutil.rmtree",
"multiprocessing.Process",
"six.wraps",
"diskcache.Cache"
] |
[((424, 435), 'six.wraps', 'wraps', (['func'], {}), '(func)\n', (429, 435), False, 'from six import wraps\n'), ((7543, 7566), 'multiprocessing.Pool', 'Pool', (['(readers + writers)'], {}), '(readers + writers)\n', (7547, 7566), False, 'from multiprocessing import Pool\n'), ((7748, 7775), 'fasteners.process_lock.InterProcessReaderWriterLock', 'ReaderWriterLock', (['lock_file'], {}), '(lock_file)\n', (7764, 7775), True, 'from fasteners.process_lock import InterProcessReaderWriterLock as ReaderWriterLock\n'), ((861, 882), 'diskcache.Cache', 'Cache', (['disk_cache_dir'], {}), '(disk_cache_dir)\n', (866, 882), False, 'from diskcache import Cache\n'), ((1400, 1421), 'diskcache.Cache', 'Cache', (['disk_cache_dir'], {}), '(disk_cache_dir)\n', (1405, 1421), False, 'from diskcache import Cache\n'), ((2857, 2878), 'diskcache.Cache', 'Cache', (['disk_cache_dir'], {}), '(disk_cache_dir)\n', (2862, 2878), False, 'from diskcache import Cache\n'), ((3086, 3107), 'diskcache.Cache', 'Cache', (['disk_cache_dir'], {}), '(disk_cache_dir)\n', (3091, 3107), False, 'from diskcache import Cache\n'), ((3353, 3382), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (3380, 3382), False, 'import tempfile\n'), ((3479, 3497), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (3495, 3497), False, 'import tempfile\n'), ((3588, 3642), 'shutil.rmtree', 'shutil.rmtree', (['self.disk_cache_dir'], {'ignore_errors': '(True)'}), '(self.disk_cache_dir, ignore_errors=True)\n', (3601, 3642), False, 'import shutil\n'), ((3974, 3993), 'multiprocessing.Pool', 'Pool', (['PROCESS_COUNT'], {}), '(PROCESS_COUNT)\n', (3978, 3993), False, 'from multiprocessing import Pool\n'), ((4426, 4445), 'multiprocessing.Pool', 'Pool', (['PROCESS_COUNT'], {}), '(PROCESS_COUNT)\n', (4430, 4445), False, 'from multiprocessing import Pool\n'), ((4886, 4995), 'multiprocessing.Process', 'Process', ([], {'target': 'run_writer_releases_lock_upon_crash', 'args': '(self.disk_cache_dir, self.lock_file, 1, True)'}), '(target=run_writer_releases_lock_upon_crash, args=(self.\n disk_cache_dir, self.lock_file, 1, True))\n', (4893, 4995), False, 'from multiprocessing import Process\n'), ((5025, 5135), 'multiprocessing.Process', 'Process', ([], {'target': 'run_writer_releases_lock_upon_crash', 'args': '(self.disk_cache_dir, self.lock_file, 2, False)'}), '(target=run_writer_releases_lock_upon_crash, args=(self.\n disk_cache_dir, self.lock_file, 2, False))\n', (5032, 5135), False, 'from multiprocessing import Process\n'), ((5480, 5591), 'multiprocessing.Process', 'Process', ([], {'target': 'reader_releases_lock_upon_crash_reader_lock', 'args': '(self.disk_cache_dir, self.lock_file, 1)'}), '(target=reader_releases_lock_upon_crash_reader_lock, args=(self.\n disk_cache_dir, self.lock_file, 1))\n', (5487, 5591), False, 'from multiprocessing import Process\n'), ((5621, 5732), 'multiprocessing.Process', 'Process', ([], {'target': 'reader_releases_lock_upon_crash_writer_lock', 'args': '(self.disk_cache_dir, self.lock_file, 2)'}), '(target=reader_releases_lock_upon_crash_writer_lock, args=(self.\n disk_cache_dir, self.lock_file, 2))\n', (5628, 5732), False, 'from multiprocessing import Process\n'), ((6942, 6984), 'more_itertools.split_at', 'mo.split_at', (['visits', "(lambda x: x[1] == 'r')"], {}), "(visits, lambda x: x[1] == 'r')\n", (6953, 6984), True, 'import more_itertools as mo\n'), ((7217, 7259), 'more_itertools.split_at', 'mo.split_at', (['visits', "(lambda x: x[1] == 'w')"], {}), "(visits, lambda x: x[1] == 'w')\n", (7228, 7259), True, 'import more_itertools as mo\n'), ((585, 612), 'fasteners.process_lock.InterProcessReaderWriterLock', 'ReaderWriterLock', (['lock_file'], {}), '(lock_file)\n', (601, 612), True, 'from fasteners.process_lock import InterProcessReaderWriterLock as ReaderWriterLock\n'), ((657, 684), 'fasteners.process_lock.InterProcessReaderWriterLock', 'ReaderWriterLock', (['lock_file'], {}), '(lock_file)\n', (673, 684), True, 'from fasteners.process_lock import InterProcessReaderWriterLock as ReaderWriterLock\n'), ((726, 747), 'diskcache.Cache', 'Cache', (['disk_cache_dir'], {}), '(disk_cache_dir)\n', (731, 747), False, 'from diskcache import Cache\n'), ((1726, 1753), 'fasteners.process_lock.InterProcessReaderWriterLock', 'ReaderWriterLock', (['lock_file'], {}), '(lock_file)\n', (1742, 1753), True, 'from fasteners.process_lock import InterProcessReaderWriterLock as ReaderWriterLock\n'), ((1794, 1821), 'fasteners.process_lock.InterProcessReaderWriterLock', 'ReaderWriterLock', (['lock_file'], {}), '(lock_file)\n', (1810, 1821), True, 'from fasteners.process_lock import InterProcessReaderWriterLock as ReaderWriterLock\n'), ((2196, 2223), 'fasteners.process_lock.InterProcessReaderWriterLock', 'ReaderWriterLock', (['lock_file'], {}), '(lock_file)\n', (2212, 2223), True, 'from fasteners.process_lock import InterProcessReaderWriterLock as ReaderWriterLock\n'), ((2268, 2295), 'fasteners.process_lock.InterProcessReaderWriterLock', 'ReaderWriterLock', (['lock_file'], {}), '(lock_file)\n', (2284, 2295), True, 'from fasteners.process_lock import InterProcessReaderWriterLock as ReaderWriterLock\n'), ((2337, 2358), 'diskcache.Cache', 'Cache', (['disk_cache_dir'], {}), '(disk_cache_dir)\n', (2342, 2358), False, 'from diskcache import Cache\n'), ((2592, 2613), 'diskcache.Cache', 'Cache', (['disk_cache_dir'], {}), '(disk_cache_dir)\n', (2597, 2613), False, 'from diskcache import Cache\n'), ((2790, 2817), 'fasteners.process_lock.InterProcessReaderWriterLock', 'ReaderWriterLock', (['lock_file'], {}), '(lock_file)\n', (2806, 2817), True, 'from fasteners.process_lock import InterProcessReaderWriterLock as ReaderWriterLock\n'), ((2922, 2933), 'os.getpid', 'os.getpid', ([], {}), '()\n', (2931, 2933), False, 'import os\n'), ((3019, 3046), 'fasteners.process_lock.InterProcessReaderWriterLock', 'ReaderWriterLock', (['lock_file'], {}), '(lock_file)\n', (3035, 3046), True, 'from fasteners.process_lock import InterProcessReaderWriterLock as ReaderWriterLock\n'), ((3151, 3162), 'os.getpid', 'os.getpid', ([], {}), '()\n', (3160, 3162), False, 'import os\n'), ((3668, 3693), 'os.remove', 'os.remove', (['self.lock_file'], {}), '(self.lock_file)\n', (3677, 3693), False, 'import os\n'), ((4139, 4165), 'diskcache.Cache', 'Cache', (['self.disk_cache_dir'], {}), '(self.disk_cache_dir)\n', (4144, 4165), False, 'from diskcache import Cache\n'), ((4599, 4625), 'diskcache.Cache', 'Cache', (['self.disk_cache_dir'], {}), '(self.disk_cache_dir)\n', (4604, 4625), False, 'from diskcache import Cache\n'), ((5242, 5268), 'diskcache.Cache', 'Cache', (['self.disk_cache_dir'], {}), '(self.disk_cache_dir)\n', (5247, 5268), False, 'from diskcache import Cache\n'), ((5839, 5865), 'diskcache.Cache', 'Cache', (['self.disk_cache_dir'], {}), '(self.disk_cache_dir)\n', (5844, 5865), False, 'from diskcache import Cache\n'), ((6092, 6117), 'pathlib.Path', 'Path', (['self.disk_cache_dir'], {}), '(self.disk_cache_dir)\n', (6096, 6117), False, 'from pathlib import Path\n'), ((6153, 6173), 'pathlib.Path', 'Path', (['self.lock_file'], {}), '(self.lock_file)\n', (6157, 6173), False, 'from pathlib import Path\n'), ((6347, 6372), 'pathlib.Path', 'Path', (['self.disk_cache_dir'], {}), '(self.disk_cache_dir)\n', (6351, 6372), False, 'from pathlib import Path\n'), ((6408, 6428), 'pathlib.Path', 'Path', (['self.lock_file'], {}), '(self.lock_file)\n', (6412, 6428), False, 'from pathlib import Path\n'), ((6586, 6611), 'pathlib.Path', 'Path', (['self.disk_cache_dir'], {}), '(self.disk_cache_dir)\n', (6590, 6611), False, 'from pathlib import Path\n'), ((6647, 6667), 'pathlib.Path', 'Path', (['self.lock_file'], {}), '(self.lock_file)\n', (6651, 6667), False, 'from pathlib import Path\n'), ((7052, 7078), 'more_itertools.chunked', 'mo.chunked', (['write_block', '(2)'], {}), '(write_block, 2)\n', (7062, 7078), True, 'import more_itertools as mo\n'), ((7913, 7924), 'os.getpid', 'os.getpid', ([], {}), '()\n', (7922, 7924), False, 'import os\n'), ((7998, 8009), 'os.getpid', 'os.getpid', ([], {}), '()\n', (8007, 8009), False, 'import os\n'), ((1051, 1078), 'fasteners.process_lock.InterProcessReaderWriterLock', 'ReaderWriterLock', (['lock_file'], {}), '(lock_file)\n', (1067, 1078), True, 'from fasteners.process_lock import InterProcessReaderWriterLock as ReaderWriterLock\n'), ((1224, 1239), 'random.random', 'random.random', ([], {}), '()\n', (1237, 1239), False, 'import random\n'), ((1586, 1614), 'random.choice', 'random.choice', (['[True, False]'], {}), '([True, False])\n', (1599, 1614), False, 'import random\n'), ((2017, 2032), 'random.random', 'random.random', ([], {}), '()\n', (2030, 2032), False, 'import random\n'), ((2538, 2565), 'fasteners.process_lock.InterProcessReaderWriterLock', 'ReaderWriterLock', (['lock_file'], {}), '(lock_file)\n', (2554, 2565), True, 'from fasteners.process_lock import InterProcessReaderWriterLock as ReaderWriterLock\n'), ((2661, 2672), 'os.getpid', 'os.getpid', ([], {}), '()\n', (2670, 2672), False, 'import os\n'), ((7949, 7964), 'random.random', 'random.random', ([], {}), '()\n', (7962, 7964), False, 'import random\n'), ((3775, 3807), 'fasteners.process_lock.InterProcessReaderWriterLock', 'ReaderWriterLock', (['self.lock_file'], {}), '(self.lock_file)\n', (3791, 3807), True, 'from fasteners.process_lock import InterProcessReaderWriterLock as ReaderWriterLock\n'), ((3853, 3885), 'fasteners.process_lock.InterProcessReaderWriterLock', 'ReaderWriterLock', (['self.lock_file'], {}), '(self.lock_file)\n', (3869, 3885), True, 'from fasteners.process_lock import InterProcessReaderWriterLock as ReaderWriterLock\n')]
|
from todoist_gcal_sync.utils.auth.gcal_OAuth import get_credentials
from todoist_gcal_sync.utils import sql_ops
import httplib2
from apiclient import discovery
gcal_creds = get_credentials()
http = gcal_creds.authorize(httplib2.Http())
# 'cache_discovery=False' is used to circumvent the file_cache issue for oauth2client >= 4.0.0
# More info on the issue here: https://github.com/google/google-api-python-client/issues/299
service = discovery.build('calendar', 'v3', http=http, cache_discovery=False)
cal_ids = sql_ops.select_from_where(
"calendar_id, calendar_sync_token", "gcal_ids", None, None, fetch_all=True)
def google_code(cal_id, sync_token):
next_sync_token = None
page_token = None
while True:
events = service.events().list(calendarId=cal_id, pageToken=page_token,
syncToken=sync_token).execute()
for event in events['items']:
print(event['summary'])
if 'nextSyncToken' in events:
next_sync_token = events['nextSyncToken']
page_token = events.get('nextPageToken')
if not page_token:
break
return next_sync_token
for i in range(0, len(cal_ids)):
sync_token = google_code(cal_ids[i][0], cal_ids[i][1])
if sql_ops.update_set_where(
"gcal_ids", "calendar_sync_token = ?", "calendar_id = ?", sync_token, cal_ids[i][0]):
print("Calendar sync token updated.")
|
[
"httplib2.Http",
"todoist_gcal_sync.utils.sql_ops.select_from_where",
"apiclient.discovery.build",
"todoist_gcal_sync.utils.sql_ops.update_set_where",
"todoist_gcal_sync.utils.auth.gcal_OAuth.get_credentials"
] |
[((174, 191), 'todoist_gcal_sync.utils.auth.gcal_OAuth.get_credentials', 'get_credentials', ([], {}), '()\n', (189, 191), False, 'from todoist_gcal_sync.utils.auth.gcal_OAuth import get_credentials\n'), ((436, 503), 'apiclient.discovery.build', 'discovery.build', (['"""calendar"""', '"""v3"""'], {'http': 'http', 'cache_discovery': '(False)'}), "('calendar', 'v3', http=http, cache_discovery=False)\n", (451, 503), False, 'from apiclient import discovery\n'), ((515, 620), 'todoist_gcal_sync.utils.sql_ops.select_from_where', 'sql_ops.select_from_where', (['"""calendar_id, calendar_sync_token"""', '"""gcal_ids"""', 'None', 'None'], {'fetch_all': '(True)'}), "('calendar_id, calendar_sync_token', 'gcal_ids',\n None, None, fetch_all=True)\n", (540, 620), False, 'from todoist_gcal_sync.utils import sql_ops\n'), ((220, 235), 'httplib2.Http', 'httplib2.Http', ([], {}), '()\n', (233, 235), False, 'import httplib2\n'), ((1267, 1380), 'todoist_gcal_sync.utils.sql_ops.update_set_where', 'sql_ops.update_set_where', (['"""gcal_ids"""', '"""calendar_sync_token = ?"""', '"""calendar_id = ?"""', 'sync_token', 'cal_ids[i][0]'], {}), "('gcal_ids', 'calendar_sync_token = ?',\n 'calendar_id = ?', sync_token, cal_ids[i][0])\n", (1291, 1380), False, 'from todoist_gcal_sync.utils import sql_ops\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import unittest
import numpy as np
import torch
from mmf.common.registry import registry
from mmf.common.sample import Sample, SampleList
from mmf.models.cnn_lstm import CNNLSTM
from mmf.utils.configuration import Configuration
from mmf.utils.general import get_mmf_root
from tests.test_utils import dummy_args
class TestModelCNNLSTM(unittest.TestCase):
def setUp(self):
torch.manual_seed(1234)
registry.register("clevr_text_vocab_size", 80)
registry.register("clevr_num_final_outputs", 32)
config_path = os.path.join(
get_mmf_root(),
"..",
"projects",
"others",
"cnn_lstm",
"clevr",
"defaults.yaml",
)
config_path = os.path.abspath(config_path)
args = dummy_args(model="cnn_lstm", dataset="clevr")
args.opts.append("config={}".format(config_path))
configuration = Configuration(args)
configuration.config.datasets = "clevr"
configuration.freeze()
self.config = configuration.config
registry.register("config", self.config)
def test_forward(self):
model_config = self.config.model_config.cnn_lstm
cnn_lstm = CNNLSTM(model_config)
cnn_lstm.build()
cnn_lstm.init_losses()
self.assertTrue(isinstance(cnn_lstm, torch.nn.Module))
test_sample = Sample()
test_sample.text = torch.randint(1, 79, (10,), dtype=torch.long)
test_sample.image = torch.randn(3, 320, 480)
test_sample.targets = torch.randn(32)
test_sample_list = SampleList([test_sample])
test_sample_list.dataset_type = "train"
test_sample_list.dataset_name = "clevr"
output = cnn_lstm(test_sample_list)
scores = output["scores"]
loss = output["losses"]["train/clevr/logit_bce"]
np.testing.assert_almost_equal(loss.item(), 19.2635, decimal=4)
self.assertEqual(scores.size(), torch.Size((1, 32)))
|
[
"mmf.common.sample.SampleList",
"os.path.abspath",
"torch.randint",
"torch.manual_seed",
"mmf.models.cnn_lstm.CNNLSTM",
"mmf.common.registry.registry.register",
"mmf.utils.general.get_mmf_root",
"tests.test_utils.dummy_args",
"torch.randn",
"mmf.common.sample.Sample",
"torch.Size",
"mmf.utils.configuration.Configuration"
] |
[((448, 471), 'torch.manual_seed', 'torch.manual_seed', (['(1234)'], {}), '(1234)\n', (465, 471), False, 'import torch\n'), ((480, 526), 'mmf.common.registry.registry.register', 'registry.register', (['"""clevr_text_vocab_size"""', '(80)'], {}), "('clevr_text_vocab_size', 80)\n", (497, 526), False, 'from mmf.common.registry import registry\n'), ((535, 583), 'mmf.common.registry.registry.register', 'registry.register', (['"""clevr_num_final_outputs"""', '(32)'], {}), "('clevr_num_final_outputs', 32)\n", (552, 583), False, 'from mmf.common.registry import registry\n'), ((818, 846), 'os.path.abspath', 'os.path.abspath', (['config_path'], {}), '(config_path)\n', (833, 846), False, 'import os\n'), ((862, 907), 'tests.test_utils.dummy_args', 'dummy_args', ([], {'model': '"""cnn_lstm"""', 'dataset': '"""clevr"""'}), "(model='cnn_lstm', dataset='clevr')\n", (872, 907), False, 'from tests.test_utils import dummy_args\n'), ((990, 1009), 'mmf.utils.configuration.Configuration', 'Configuration', (['args'], {}), '(args)\n', (1003, 1009), False, 'from mmf.utils.configuration import Configuration\n'), ((1140, 1180), 'mmf.common.registry.registry.register', 'registry.register', (['"""config"""', 'self.config'], {}), "('config', self.config)\n", (1157, 1180), False, 'from mmf.common.registry import registry\n'), ((1287, 1308), 'mmf.models.cnn_lstm.CNNLSTM', 'CNNLSTM', (['model_config'], {}), '(model_config)\n', (1294, 1308), False, 'from mmf.models.cnn_lstm import CNNLSTM\n'), ((1452, 1460), 'mmf.common.sample.Sample', 'Sample', ([], {}), '()\n', (1458, 1460), False, 'from mmf.common.sample import Sample, SampleList\n'), ((1488, 1533), 'torch.randint', 'torch.randint', (['(1)', '(79)', '(10,)'], {'dtype': 'torch.long'}), '(1, 79, (10,), dtype=torch.long)\n', (1501, 1533), False, 'import torch\n'), ((1562, 1586), 'torch.randn', 'torch.randn', (['(3)', '(320)', '(480)'], {}), '(3, 320, 480)\n', (1573, 1586), False, 'import torch\n'), ((1617, 1632), 'torch.randn', 'torch.randn', (['(32)'], {}), '(32)\n', (1628, 1632), False, 'import torch\n'), ((1661, 1686), 'mmf.common.sample.SampleList', 'SampleList', (['[test_sample]'], {}), '([test_sample])\n', (1671, 1686), False, 'from mmf.common.sample import Sample, SampleList\n'), ((632, 646), 'mmf.utils.general.get_mmf_root', 'get_mmf_root', ([], {}), '()\n', (644, 646), False, 'from mmf.utils.general import get_mmf_root\n'), ((2032, 2051), 'torch.Size', 'torch.Size', (['(1, 32)'], {}), '((1, 32))\n', (2042, 2051), False, 'import torch\n')]
|
import numpy as np
import math
from geofractal import *
#-------------------------------------------------------
# Fractal dimension
#-------------------------------------------------------
df = 1.8
#-------------------------------------------------------
# Fractal prefactor
#-------------------------------------------------------
k0 = 0.5*(0.3-np.sqrt(3.0))*(df-1.0)+np.sqrt(3.0)
#-------------------------------------------------------
# Model of correlation function
#-------------------------------------------------------
#cormodel= 'EXPNL'
#cormodel= 'GAUSS'
cormodel= 'FLDIM'
#-------------------------------------------------------
# call geofractal.py
#-------------------------------------------------------
Nmin = 1.e0
Nmax = 1.e10
N = 250
PN = np.exp(np.linspace(math.log(Nmin),math.log(Nmax),N))
G = np.zeros(N)
for i in range(N):
G[i] = geofractal(PN[i],df,k0,cormodel)
#-------------------------------------------------------
# output the results
#-------------------------------------------------------
filename='gratio.out'
with open(filename,'w') as f:
f.write('# df = %13.6e \n'%df)
f.write('# k0 = %13.6e \n'%k0)
f.write('# model = %11s \n'%cormodel)
f.write('# %11s %13s\n'%('PN','G/NpiR0^2'))
for i in range(N):
f.write('%13.6e %13.6e\n'%(PN[i],G[i]))
|
[
"math.log",
"numpy.zeros",
"numpy.sqrt"
] |
[((829, 840), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (837, 840), True, 'import numpy as np\n'), ((375, 387), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (382, 387), True, 'import numpy as np\n'), ((788, 802), 'math.log', 'math.log', (['Nmin'], {}), '(Nmin)\n', (796, 802), False, 'import math\n'), ((803, 817), 'math.log', 'math.log', (['Nmax'], {}), '(Nmax)\n', (811, 817), False, 'import math\n'), ((352, 364), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (359, 364), True, 'import numpy as np\n')]
|
# coding: utf-8
from riemann.config.config_loader import initialize_config
from riemann.data.data_loader import get_training_data
from riemann.config.graph_sampling_config import GraphSamplingConfig
initialize_config()
g = get_training_data()
iter = g.get_neighbor_iterator(GraphSamplingConfig())
|
[
"riemann.config.graph_sampling_config.GraphSamplingConfig",
"riemann.data.data_loader.get_training_data",
"riemann.config.config_loader.initialize_config"
] |
[((201, 220), 'riemann.config.config_loader.initialize_config', 'initialize_config', ([], {}), '()\n', (218, 220), False, 'from riemann.config.config_loader import initialize_config\n'), ((225, 244), 'riemann.data.data_loader.get_training_data', 'get_training_data', ([], {}), '()\n', (242, 244), False, 'from riemann.data.data_loader import get_training_data\n'), ((276, 297), 'riemann.config.graph_sampling_config.GraphSamplingConfig', 'GraphSamplingConfig', ([], {}), '()\n', (295, 297), False, 'from riemann.config.graph_sampling_config import GraphSamplingConfig\n')]
|
"""
A Maximum-Entropy model for backbone torsion angles.
Reference: Rowicka and Otwinowski 2004
"""
import numpy
from csb.statistics.pdf import BaseDensity
class MaxentModel(BaseDensity):
"""
Fourier expansion of a biangular log-probability density
"""
def __init__(self, n, beta=1.):
"""
@param n: order of the fourier expansion
@type n: int
@param beta: inverse temperature
@type beta: float
"""
super(MaxentModel, self).__init__()
self._n = int(n)
self._cc = numpy.zeros((self._n, self._n))
self._ss = numpy.zeros((self._n, self._n))
self._cs = numpy.zeros((self._n, self._n))
self._sc = numpy.zeros((self._n, self._n))
self._beta = float(beta)
@property
def beta(self):
"""
Inverse temperature
@rtype: float
"""
return self._beta
@property
def n(self):
"""
Order of the fourier expansion
@rtype: int
"""
return self._n
def load_old(self, aa, f_name):
"""
Load set of expansion coefficients from isd.
@param aa: Amino acid type
@param f_name: File containing ramachandran definition
"""
import os
params, _energies = eval(open(os.path.expanduser(f_name)).read())
params = params[self._n - 1]
for k, l, x, f, g in params[aa]:
if f == 'cos' and g == 'cos':
self._cc[k, l] = -x
elif f == 'cos' and g == 'sin':
self._cs[k, l] = -x
elif f == 'sin' and g == 'cos':
self._sc[k, l] = -x
elif f == 'sin' and g == 'sin':
self._ss[k, l] = -x
def load(self, aa, f_name):
"""
Load set of expansion coefficients from isd+.
@param aa: Amino acid type
@param f_name: File containing ramachandran definition
"""
import os
from numpy import reshape, array
from csb.io import load
f_name = os.path.expanduser(f_name)
params, _energies = load(f_name)
params = params[self._n]
a, b, c, d = params[aa]
a, b, c, d = reshape(array(a), (self._n, self._n)).astype('d'), \
reshape(array(b), (self._n, self._n)).astype('d'), \
reshape(array(c), (self._n, self._n)).astype('d'), \
reshape(array(d), (self._n, self._n)).astype('d')
# Not a typo, I accidently swichted cos*sin and sin*cos
self._cc, self._cs, self._sc, self._ss = -a, -c, -b, -d
def _periodicities(self):
return numpy.arange(self._n)
def log_prob(self, x, y):
"""
Return the energy at positions (x,y).
@param x: x-coordinates for evaluation
@type x: array-like
@param y: y-coordinates for evaluation
@type y: array-like
"""
return -self.energy(x, y)
def set(self, coef):
"""
Set the fourier expansion coefficients and calculations the
new partation function.
@param coef: expansion coefficents
@type coef: array like, with shape (4,n,n)
"""
self._cc[:, :], self._ss[:, :], self._cs[:, :], self._sc[:, :] = \
numpy.reshape(coef, (4, self._n, self._n))
self.normalize()
def get(self):
"""
Return current expansion coefficients.
"""
return numpy.array([self._cc, self._ss, self._cs, self._sc])
def energy(self, x, y=None):
"""
Return the energy at positions (x,y).
@param x: x-coordinates for evaluation
@type x: array-like
@param y: y-coordinates for evaluation
@type y: array-like
"""
from numpy import sin, cos, dot, multiply
k = self._periodicities()
cx, sx = cos(multiply.outer(k, x)), sin(multiply.outer(k, x))
if y is not None:
cy, sy = cos(multiply.outer(k, y)), sin(multiply.outer(k, y))
else:
cy, sy = cx, sx
return dot(dot(cx.T, self._cc), cy) + \
dot(dot(cx.T, self._cs), sy) + \
dot(dot(sx.T, self._sc), cy) + \
dot(dot(sx.T, self._ss), sy)
def sample_weights(self):
"""
Create a random set of expansion coefficients.
"""
from numpy import add
from numpy.random import standard_normal
k = self._periodicities()
k = add.outer(k ** 2, k ** 2)
self.set([standard_normal(k.shape) for i in range(4)])
self.normalize(True)
def prob(self, x, y):
"""
Return the probability of the configurations x cross y.
"""
from csb.numeric import exp
return exp(-self.beta * self(x, y))
def z(self):
"""
Calculate the partion function .
"""
from scipy.integrate import dblquad
from numpy import pi
return dblquad(self.prob, 0., 2 * pi, lambda x: 0., lambda x: 2 * pi)
def log_z(self, n=500, integration='simpson'):
"""
Calculate the log partion function.
"""
from numpy import pi, linspace, max
from csb.numeric import log, exp
if integration == 'simpson':
from csb.numeric import simpson_2d
x = linspace(0., 2 * pi, 2 * n + 1)
dx = x[1] - x[0]
f = -self.beta * self.energy(x)
f_max = max(f)
f -= f_max
I = simpson_2d(exp(f))
return log(I) + f_max + 2 * log(dx)
elif integration == 'trapezoidal':
from csb.numeric import trapezoidal_2d
x = linspace(0., 2 * pi, n)
dx = x[1] - x[0]
f = -self.beta * self.energy(x)
f_max = max(f)
f -= f_max
I = trapezoidal_2d(exp(f))
return log(I) + f_max + 2 * log(dx)
else:
raise NotImplementedError(
'Choose from trapezoidal and simpson-rule Integration')
def entropy(self, n=500):
"""
Calculate the entropy of the model.
@param n: number of integration points for numerical integration
@type n: integer
"""
from csb.numeric import trapezoidal_2d
from numpy import pi, linspace, max
from csb.numeric import log, exp
x = linspace(0., 2 * pi, n)
dx = x[1] - x[0]
f = -self.beta * self.energy(x)
f_max = max(f)
log_z = log(trapezoidal_2d(exp(f - f_max))) + f_max + 2 * log(dx)
average_energy = trapezoidal_2d(f * exp(f - f_max))\
* exp(f_max + 2 * log(dx) - log_z)
return -average_energy + log_z
def calculate_statistics(self, data):
"""
Calculate the sufficient statistics for the data.
"""
from numpy import cos, sin, dot, multiply
k = self._periodicities()
cx = cos(multiply.outer(k, data[:, 0]))
sx = sin(multiply.outer(k, data[:, 0]))
cy = cos(multiply.outer(k, data[:, 1]))
sy = sin(multiply.outer(k, data[:, 1]))
return dot(cx, cy.T), dot(sx, sy.T), dot(cx, sy.T), dot(sx, cy.T)
def normalize(self, normalize_full=True):
"""
Remove parameter, which do not have any influence on the model
and compute the partition function.
@param normalize_full: compute partition function
@type normalize_full: boolean
"""
self._cc[0, 0] = 0.
self._ss[:, 0] = 0.
self._ss[0, :] = 0.
self._cs[:, 0] = 0.
self._sc[0, :] = 0.
if normalize_full:
self._cc[0, 0] = self.log_z()
class MaxentPosterior(object):
"""
Object to hold and calculate the posterior (log)probability
given an exponential family model and corresponding data.
"""
def __init__(self, model, data):
"""
@param model: MaxentModel
@param data: two dimensonal data
"""
self._model = model
self._data = numpy.array(data)
self._stats = self.model.calculate_statistics(self._data)
self._log_likelihoods = []
@property
def model(self):
return self._model
@model.setter
def model(self, value):
self._model = value
self._stats = self.model.calculate_statistics(self._data)
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = numpy.array(value)
self._stats = self.model.calculate_statistics(value)
@property
def stats(self):
return self._stats
def __call__(self, weights=None, n=100):
"""
Returns the log posterior likelihood
@param weights: optional expansion coefficients of the model,
if none are specified those of the model are used
@param n: number of integration point for calculating the partition function
"""
from numpy import sum
if weights is not None:
self.model.set(weights)
a = sum(self._stats[0] * self.model._cc)
b = sum(self._stats[1] * self.model._ss)
c = sum(self._stats[2] * self.model._cs)
d = sum(self._stats[3] * self.model._sc)
log_z = self.data.shape[0] * self.model.log_z(n=n)
log_likelihood = -self.model.beta * (a + b + c + d) - log_z
self._log_likelihoods.append(log_likelihood)
return log_likelihood
|
[
"numpy.sum",
"csb.numeric.log",
"numpy.zeros",
"numpy.max",
"numpy.multiply.outer",
"numpy.arange",
"numpy.reshape",
"numpy.array",
"scipy.integrate.dblquad",
"numpy.linspace",
"numpy.dot",
"numpy.random.standard_normal",
"numpy.add.outer",
"os.path.expanduser",
"csb.io.load",
"csb.numeric.exp"
] |
[((566, 597), 'numpy.zeros', 'numpy.zeros', (['(self._n, self._n)'], {}), '((self._n, self._n))\n', (577, 597), False, 'import numpy\n'), ((617, 648), 'numpy.zeros', 'numpy.zeros', (['(self._n, self._n)'], {}), '((self._n, self._n))\n', (628, 648), False, 'import numpy\n'), ((668, 699), 'numpy.zeros', 'numpy.zeros', (['(self._n, self._n)'], {}), '((self._n, self._n))\n', (679, 699), False, 'import numpy\n'), ((719, 750), 'numpy.zeros', 'numpy.zeros', (['(self._n, self._n)'], {}), '((self._n, self._n))\n', (730, 750), False, 'import numpy\n'), ((2083, 2109), 'os.path.expanduser', 'os.path.expanduser', (['f_name'], {}), '(f_name)\n', (2101, 2109), False, 'import os\n'), ((2138, 2150), 'csb.io.load', 'load', (['f_name'], {}), '(f_name)\n', (2142, 2150), False, 'from csb.io import load\n'), ((2675, 2696), 'numpy.arange', 'numpy.arange', (['self._n'], {}), '(self._n)\n', (2687, 2696), False, 'import numpy\n'), ((3329, 3371), 'numpy.reshape', 'numpy.reshape', (['coef', '(4, self._n, self._n)'], {}), '(coef, (4, self._n, self._n))\n', (3342, 3371), False, 'import numpy\n'), ((3503, 3556), 'numpy.array', 'numpy.array', (['[self._cc, self._ss, self._cs, self._sc]'], {}), '([self._cc, self._ss, self._cs, self._sc])\n', (3514, 3556), False, 'import numpy\n'), ((4536, 4561), 'numpy.add.outer', 'add.outer', (['(k ** 2)', '(k ** 2)'], {}), '(k ** 2, k ** 2)\n', (4545, 4561), False, 'from numpy import add\n'), ((5024, 5088), 'scipy.integrate.dblquad', 'dblquad', (['self.prob', '(0.0)', '(2 * pi)', '(lambda x: 0.0)', '(lambda x: 2 * pi)'], {}), '(self.prob, 0.0, 2 * pi, lambda x: 0.0, lambda x: 2 * pi)\n', (5031, 5088), False, 'from scipy.integrate import dblquad\n'), ((6452, 6476), 'numpy.linspace', 'linspace', (['(0.0)', '(2 * pi)', 'n'], {}), '(0.0, 2 * pi, n)\n', (6460, 6476), False, 'from numpy import pi, linspace, max\n'), ((6558, 6564), 'numpy.max', 'max', (['f'], {}), '(f)\n', (6561, 6564), False, 'from numpy import pi, linspace, max\n'), ((8141, 8158), 'numpy.array', 'numpy.array', (['data'], {}), '(data)\n', (8152, 8158), False, 'import numpy\n'), ((8600, 8618), 'numpy.array', 'numpy.array', (['value'], {}), '(value)\n', (8611, 8618), False, 'import numpy\n'), ((9201, 9237), 'numpy.sum', 'sum', (['(self._stats[0] * self.model._cc)'], {}), '(self._stats[0] * self.model._cc)\n', (9204, 9237), False, 'from numpy import sum\n'), ((9250, 9286), 'numpy.sum', 'sum', (['(self._stats[1] * self.model._ss)'], {}), '(self._stats[1] * self.model._ss)\n', (9253, 9286), False, 'from numpy import sum\n'), ((9299, 9335), 'numpy.sum', 'sum', (['(self._stats[2] * self.model._cs)'], {}), '(self._stats[2] * self.model._cs)\n', (9302, 9335), False, 'from numpy import sum\n'), ((9348, 9384), 'numpy.sum', 'sum', (['(self._stats[3] * self.model._sc)'], {}), '(self._stats[3] * self.model._sc)\n', (9351, 9384), False, 'from numpy import sum\n'), ((5393, 5425), 'numpy.linspace', 'linspace', (['(0.0)', '(2 * pi)', '(2 * n + 1)'], {}), '(0.0, 2 * pi, 2 * n + 1)\n', (5401, 5425), False, 'from numpy import pi, linspace, max\n'), ((5519, 5525), 'numpy.max', 'max', (['f'], {}), '(f)\n', (5522, 5525), False, 'from numpy import pi, linspace, max\n'), ((7028, 7057), 'numpy.multiply.outer', 'multiply.outer', (['k', 'data[:, 0]'], {}), '(k, data[:, 0])\n', (7042, 7057), False, 'from numpy import cos, sin, dot, multiply\n'), ((7076, 7105), 'numpy.multiply.outer', 'multiply.outer', (['k', 'data[:, 0]'], {}), '(k, data[:, 0])\n', (7090, 7105), False, 'from numpy import cos, sin, dot, multiply\n'), ((7124, 7153), 'numpy.multiply.outer', 'multiply.outer', (['k', 'data[:, 1]'], {}), '(k, data[:, 1])\n', (7138, 7153), False, 'from numpy import cos, sin, dot, multiply\n'), ((7172, 7201), 'numpy.multiply.outer', 'multiply.outer', (['k', 'data[:, 1]'], {}), '(k, data[:, 1])\n', (7186, 7201), False, 'from numpy import cos, sin, dot, multiply\n'), ((7219, 7232), 'numpy.dot', 'dot', (['cx', 'cy.T'], {}), '(cx, cy.T)\n', (7222, 7232), False, 'from numpy import cos, sin, dot, multiply\n'), ((7234, 7247), 'numpy.dot', 'dot', (['sx', 'sy.T'], {}), '(sx, sy.T)\n', (7237, 7247), False, 'from numpy import cos, sin, dot, multiply\n'), ((7249, 7262), 'numpy.dot', 'dot', (['cx', 'sy.T'], {}), '(cx, sy.T)\n', (7252, 7262), False, 'from numpy import cos, sin, dot, multiply\n'), ((7264, 7277), 'numpy.dot', 'dot', (['sx', 'cy.T'], {}), '(sx, cy.T)\n', (7267, 7277), False, 'from numpy import cos, sin, dot, multiply\n'), ((3920, 3940), 'numpy.multiply.outer', 'multiply.outer', (['k', 'x'], {}), '(k, x)\n', (3934, 3940), False, 'from numpy import cos, sin, dot, multiply\n'), ((3947, 3967), 'numpy.multiply.outer', 'multiply.outer', (['k', 'x'], {}), '(k, x)\n', (3961, 3967), False, 'from numpy import cos, sin, dot, multiply\n'), ((4275, 4294), 'numpy.dot', 'dot', (['sx.T', 'self._ss'], {}), '(sx.T, self._ss)\n', (4278, 4294), False, 'from numpy import cos, sin, dot, multiply\n'), ((4580, 4604), 'numpy.random.standard_normal', 'standard_normal', (['k.shape'], {}), '(k.shape)\n', (4595, 4604), False, 'from numpy.random import standard_normal\n'), ((5577, 5583), 'csb.numeric.exp', 'exp', (['f'], {}), '(f)\n', (5580, 5583), False, 'from csb.numeric import log, exp\n'), ((5745, 5769), 'numpy.linspace', 'linspace', (['(0.0)', '(2 * pi)', 'n'], {}), '(0.0, 2 * pi, n)\n', (5753, 5769), False, 'from numpy import pi, linspace, max\n'), ((5863, 5869), 'numpy.max', 'max', (['f'], {}), '(f)\n', (5866, 5869), False, 'from numpy import pi, linspace, max\n'), ((6632, 6639), 'csb.numeric.log', 'log', (['dx'], {}), '(dx)\n', (6635, 6639), False, 'from csb.numeric import log, exp\n'), ((4020, 4040), 'numpy.multiply.outer', 'multiply.outer', (['k', 'y'], {}), '(k, y)\n', (4034, 4040), False, 'from numpy import cos, sin, dot, multiply\n'), ((4047, 4067), 'numpy.multiply.outer', 'multiply.outer', (['k', 'y'], {}), '(k, y)\n', (4061, 4067), False, 'from numpy import cos, sin, dot, multiply\n'), ((4227, 4246), 'numpy.dot', 'dot', (['sx.T', 'self._sc'], {}), '(sx.T, self._sc)\n', (4230, 4246), False, 'from numpy import cos, sin, dot, multiply\n'), ((5604, 5610), 'csb.numeric.log', 'log', (['I'], {}), '(I)\n', (5607, 5610), False, 'from csb.numeric import log, exp\n'), ((5625, 5632), 'csb.numeric.log', 'log', (['dx'], {}), '(dx)\n', (5628, 5632), False, 'from csb.numeric import log, exp\n'), ((5924, 5930), 'csb.numeric.exp', 'exp', (['f'], {}), '(f)\n', (5927, 5930), False, 'from csb.numeric import log, exp\n'), ((6684, 6698), 'csb.numeric.exp', 'exp', (['(f - f_max)'], {}), '(f - f_max)\n', (6687, 6698), False, 'from csb.numeric import log, exp\n'), ((1330, 1356), 'os.path.expanduser', 'os.path.expanduser', (['f_name'], {}), '(f_name)\n', (1348, 1356), False, 'import os\n'), ((2246, 2254), 'numpy.array', 'array', (['a'], {}), '(a)\n', (2251, 2254), False, 'from numpy import reshape, array\n'), ((2317, 2325), 'numpy.array', 'array', (['b'], {}), '(b)\n', (2322, 2325), False, 'from numpy import reshape, array\n'), ((2388, 2396), 'numpy.array', 'array', (['c'], {}), '(c)\n', (2393, 2396), False, 'from numpy import reshape, array\n'), ((2459, 2467), 'numpy.array', 'array', (['d'], {}), '(d)\n', (2464, 2467), False, 'from numpy import reshape, array\n'), ((4131, 4150), 'numpy.dot', 'dot', (['cx.T', 'self._cc'], {}), '(cx.T, self._cc)\n', (4134, 4150), False, 'from numpy import cos, sin, dot, multiply\n'), ((4179, 4198), 'numpy.dot', 'dot', (['cx.T', 'self._cs'], {}), '(cx.T, self._cs)\n', (4182, 4198), False, 'from numpy import cos, sin, dot, multiply\n'), ((5951, 5957), 'csb.numeric.log', 'log', (['I'], {}), '(I)\n', (5954, 5957), False, 'from csb.numeric import log, exp\n'), ((5972, 5979), 'csb.numeric.log', 'log', (['dx'], {}), '(dx)\n', (5975, 5979), False, 'from csb.numeric import log, exp\n'), ((6601, 6615), 'csb.numeric.exp', 'exp', (['(f - f_max)'], {}), '(f - f_max)\n', (6604, 6615), False, 'from csb.numeric import log, exp\n'), ((6744, 6751), 'csb.numeric.log', 'log', (['dx'], {}), '(dx)\n', (6747, 6751), False, 'from csb.numeric import log, exp\n')]
|
# -*- mode:python; coding:utf-8 -*-
# Copyright (c) 2020 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trestle Remove Command."""
import argparse
import logging
import pathlib
from typing import List, Tuple, Type
from ilcli import Command # type: ignore
import trestle.core.const as const
import trestle.core.err as err
from trestle.core import utils
from trestle.core.base_model import OscalBaseModel
from trestle.core.err import TrestleError
from trestle.core.models.actions import CreatePathAction, RemoveAction, WriteFileAction
from trestle.core.models.elements import Element, ElementPath
from trestle.core.models.file_content_type import FileContentType
from trestle.core.models.plans import Plan
from trestle.utils import fs
from trestle.utils import log
logger = logging.getLogger(__name__)
class RemoveCmd(Command):
"""Remove a subcomponent to an existing model."""
name = 'remove'
def _init_arguments(self) -> None:
self.add_argument(
f'-{const.ARG_FILE_SHORT}',
f'--{const.ARG_FILE}',
help=const.ARG_DESC_FILE + ' to remove component/subcomponent to.',
required=True
)
self.add_argument(
f'-{const.ARG_ELEMENT_SHORT}',
f'--{const.ARG_ELEMENT}',
help=const.ARG_DESC_ELEMENT + ' to remove.',
required=True
)
def _run(self, args: argparse.Namespace) -> int:
"""Remove an OSCAL component/subcomponent to the specified component.
This method takes input a filename and a list of comma-seperated element path. Element paths are field aliases.
The method first finds the parent model from the file and loads the file into the model.
Then the method executes 'remove' for each of the element paths specified.
"""
log.set_log_level_from_args(args)
args_dict = args.__dict__
file_path = pathlib.Path(args_dict[const.ARG_FILE])
# Get parent model and then load json into parent model
try:
parent_model, parent_alias = fs.get_contextual_model_type(file_path.absolute())
except Exception as err:
logger.debug(f'fs.get_contextual_model_type() failed: {err}')
logger.error(f'Remove failed (fs.get_contextual_model_type()): {err}')
return 1
try:
parent_object = parent_model.oscal_read(file_path.absolute())
except Exception as err:
logger.debug(f'parent_model.oscal_read() failed: {err}')
logger.error(f'Remove failed (parent_model.oscal_read()): {err}')
return 1
parent_element = Element(parent_object, utils.classname_to_alias(parent_model.__name__, 'json'))
add_plan = Plan()
# Do _remove for each element_path specified in args
element_paths: List[str] = str(args_dict[const.ARG_ELEMENT]).split(',')
for elm_path_str in element_paths:
element_path = ElementPath(elm_path_str)
try:
remove_action, parent_element = self.remove(element_path, parent_model, parent_element)
except TrestleError as err:
logger.debug(f'self.remove() failed: {err}')
logger.error(f'Remove failed (self.remove()): {err}')
return 1
add_plan.add_action(remove_action)
create_action = CreatePathAction(file_path.absolute(), True)
write_action = WriteFileAction(
file_path.absolute(), parent_element, FileContentType.to_content_type(file_path.suffix)
)
add_plan.add_action(remove_action)
add_plan.add_action(create_action)
add_plan.add_action(write_action)
try:
add_plan.simulate()
except TrestleError as err:
logger.debug(f'Remove failed at simulate(): {err}')
logger.error(f'Remove failed (simulate()): {err}')
return 1
try:
add_plan.execute()
except TrestleError as err:
logger.debug(f'Remove failed at execute(): {err}')
logger.error(f'Remove failed (execute()): {err}')
return 1
return 0
@classmethod
def remove(cls, element_path: ElementPath, parent_model: Type[OscalBaseModel],
parent_element: Element) -> Tuple[RemoveAction, Element]:
"""For the element_path, remove a model from the parent_element of a given parent_model.
First we check if there is an existing element at that path
If not, we complain.
Then we set up an action plan to update the model (specified by file_path) in memory,
return the action and return the parent_element.
LIMITATIONS:
1. This does not remove elements of a list or dict. Instead, the entire list or dict is removed.
2. This cannot remove arbitrarily named elements that are not specified in the schema.
For example, "responsible-parties" contains named elements, e.g., "organisation". The tool will not
remove the "organisation" as it is not in the schema, but one can remove its elements, e.g., "party-uuids".
"""
element_path_list = element_path.get_full_path_parts()
if '*' in element_path_list:
raise err.TrestleError('trestle remove does not support Wildcard element path.')
deleting_element = parent_element.get_at(element_path)
if deleting_element is not None:
# The element already exists
if type(deleting_element) is list:
logger.warning(
'Warning: trestle remove does not support removing elements of a list: '
'this removes the entire list'
)
elif type(deleting_element) is dict:
logger.warning(
'Warning: trestle remove does not support removing dict elements: '
'this removes the entire dict element'
)
else:
raise err.TrestleError(f'Bad element path: {str(element_path)}')
remove_action = RemoveAction(parent_element, element_path)
return remove_action, parent_element
|
[
"trestle.core.utils.classname_to_alias",
"trestle.core.err.TrestleError",
"trestle.core.models.elements.ElementPath",
"trestle.utils.log.set_log_level_from_args",
"logging.getLogger",
"pathlib.Path",
"trestle.core.models.file_content_type.FileContentType.to_content_type",
"trestle.core.models.actions.RemoveAction",
"trestle.core.models.plans.Plan"
] |
[((1310, 1337), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1327, 1337), False, 'import logging\n'), ((2353, 2386), 'trestle.utils.log.set_log_level_from_args', 'log.set_log_level_from_args', (['args'], {}), '(args)\n', (2380, 2386), False, 'from trestle.utils import log\n'), ((2442, 2481), 'pathlib.Path', 'pathlib.Path', (['args_dict[const.ARG_FILE]'], {}), '(args_dict[const.ARG_FILE])\n', (2454, 2481), False, 'import pathlib\n'), ((3278, 3284), 'trestle.core.models.plans.Plan', 'Plan', ([], {}), '()\n', (3282, 3284), False, 'from trestle.core.models.plans import Plan\n'), ((6631, 6673), 'trestle.core.models.actions.RemoveAction', 'RemoveAction', (['parent_element', 'element_path'], {}), '(parent_element, element_path)\n', (6643, 6673), False, 'from trestle.core.models.actions import CreatePathAction, RemoveAction, WriteFileAction\n'), ((3201, 3256), 'trestle.core.utils.classname_to_alias', 'utils.classname_to_alias', (['parent_model.__name__', '"""json"""'], {}), "(parent_model.__name__, 'json')\n", (3225, 3256), False, 'from trestle.core import utils\n'), ((3497, 3522), 'trestle.core.models.elements.ElementPath', 'ElementPath', (['elm_path_str'], {}), '(elm_path_str)\n', (3508, 3522), False, 'from trestle.core.models.elements import Element, ElementPath\n'), ((4047, 4096), 'trestle.core.models.file_content_type.FileContentType.to_content_type', 'FileContentType.to_content_type', (['file_path.suffix'], {}), '(file_path.suffix)\n', (4078, 4096), False, 'from trestle.core.models.file_content_type import FileContentType\n'), ((5806, 5880), 'trestle.core.err.TrestleError', 'err.TrestleError', (['"""trestle remove does not support Wildcard element path."""'], {}), "('trestle remove does not support Wildcard element path.')\n", (5822, 5880), True, 'import trestle.core.err as err\n')]
|
import configparser
from dlpipe.data_reader.mongodb import MongoDBConnect
from dlpipe.utils import DLPipeLogger
from bson import ObjectId
import plotly.graph_objs as go
import plotly.offline as offline
def create_plot_data(convert_data: list, batch_size: int, smooth_window: int=1):
"""
Convert metric data into x, y graph data
:param convert_data: list of metric objects with keys [batch, epoch, value]
:param batch_size: maximum number of batches in one epoch
:param smooth_window: values are averaged over the size of smooth_window
:return: (x_values, y_values) => tuple of x,y values for the scatter plot
"""
x_values = []
y_values = []
window_counter = 0
sum_value = 0
for i, data in enumerate(convert_data):
sum_value += float(data["value"])
window_counter += 1
if window_counter == smooth_window or i == len(convert_data) - 1:
decimal = (float(data["batch"]) / batch_size)
x_val = float(data["epoch"]) + decimal
x_values.append(x_val)
y_val = sum_value / window_counter
y_values.append(y_val)
window_counter = 0
sum_value = 0
return x_values, y_values
def plot_acc_loss_graph(exp_id, col):
"""
Create a scatter plot of loss and accuracy for validation and training data
:param exp_id: Experiment Id
"""
exp_obj = col.find_one({"_id": ObjectId(exp_id)})
batch_size = int(exp_obj["max_batches_per_epoch"])
x_train_loss, y_train_loss = create_plot_data(exp_obj["metrics"]["training"]["loss"], batch_size, 50)
x_val_loss, y_val_loss = create_plot_data(exp_obj["metrics"]["validation"]["loss"], batch_size, 1)
x_train_acc, y_train_acc = create_plot_data(exp_obj["metrics"]["training"]["acc"], batch_size, 50)
x_val_acc, y_val_acc = create_plot_data(exp_obj["metrics"]["validation"]["acc"], batch_size, 1)
trace_train_loss = go.Scatter(x=x_train_loss, y=y_train_loss, mode="lines", name="training loss")
trace_val_loss = go.Scatter(x=x_val_loss, y=y_val_loss, mode="lines", name="validation loss")
trace_train_acc = go.Scatter(x=x_train_loss, y=y_train_acc, mode="lines", name="training accuracy")
trace_val_acc = go.Scatter(x=x_val_loss, y=y_val_acc, mode="lines", name="validation accuracy")
data = [trace_train_loss, trace_val_loss, trace_train_acc, trace_val_acc]
layout = dict(title="accuracy + loss")
fig = dict(data=data, layout=layout)
offline.plot(fig, filename='loss_acc_' + str(exp_id) + '.html')
if __name__ == "__main__":
DLPipeLogger.remove_file_logger()
cp = configparser.ConfigParser()
if len(cp.read('./connections.ini')) == 0:
raise ValueError("Config File could not be loaded, please check the correct path!")
MongoDBConnect.add_connections_from_config(cp)
col_exp = MongoDBConnect.get_collection("localhost_mongo_db", "models", "experiment")
plot_exp_id = "5ba802c732b9016996d2f0cc"
plot_acc_loss_graph(plot_exp_id, col_exp)
|
[
"plotly.graph_objs.Scatter",
"dlpipe.data_reader.mongodb.MongoDBConnect.add_connections_from_config",
"dlpipe.utils.DLPipeLogger.remove_file_logger",
"dlpipe.data_reader.mongodb.MongoDBConnect.get_collection",
"configparser.ConfigParser",
"bson.ObjectId"
] |
[((1940, 2018), 'plotly.graph_objs.Scatter', 'go.Scatter', ([], {'x': 'x_train_loss', 'y': 'y_train_loss', 'mode': '"""lines"""', 'name': '"""training loss"""'}), "(x=x_train_loss, y=y_train_loss, mode='lines', name='training loss')\n", (1950, 2018), True, 'import plotly.graph_objs as go\n'), ((2040, 2116), 'plotly.graph_objs.Scatter', 'go.Scatter', ([], {'x': 'x_val_loss', 'y': 'y_val_loss', 'mode': '"""lines"""', 'name': '"""validation loss"""'}), "(x=x_val_loss, y=y_val_loss, mode='lines', name='validation loss')\n", (2050, 2116), True, 'import plotly.graph_objs as go\n'), ((2139, 2225), 'plotly.graph_objs.Scatter', 'go.Scatter', ([], {'x': 'x_train_loss', 'y': 'y_train_acc', 'mode': '"""lines"""', 'name': '"""training accuracy"""'}), "(x=x_train_loss, y=y_train_acc, mode='lines', name=\n 'training accuracy')\n", (2149, 2225), True, 'import plotly.graph_objs as go\n'), ((2241, 2320), 'plotly.graph_objs.Scatter', 'go.Scatter', ([], {'x': 'x_val_loss', 'y': 'y_val_acc', 'mode': '"""lines"""', 'name': '"""validation accuracy"""'}), "(x=x_val_loss, y=y_val_acc, mode='lines', name='validation accuracy')\n", (2251, 2320), True, 'import plotly.graph_objs as go\n'), ((2584, 2617), 'dlpipe.utils.DLPipeLogger.remove_file_logger', 'DLPipeLogger.remove_file_logger', ([], {}), '()\n', (2615, 2617), False, 'from dlpipe.utils import DLPipeLogger\n'), ((2628, 2655), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (2653, 2655), False, 'import configparser\n'), ((2799, 2845), 'dlpipe.data_reader.mongodb.MongoDBConnect.add_connections_from_config', 'MongoDBConnect.add_connections_from_config', (['cp'], {}), '(cp)\n', (2841, 2845), False, 'from dlpipe.data_reader.mongodb import MongoDBConnect\n'), ((2860, 2935), 'dlpipe.data_reader.mongodb.MongoDBConnect.get_collection', 'MongoDBConnect.get_collection', (['"""localhost_mongo_db"""', '"""models"""', '"""experiment"""'], {}), "('localhost_mongo_db', 'models', 'experiment')\n", (2889, 2935), False, 'from dlpipe.data_reader.mongodb import MongoDBConnect\n'), ((1429, 1445), 'bson.ObjectId', 'ObjectId', (['exp_id'], {}), '(exp_id)\n', (1437, 1445), False, 'from bson import ObjectId\n')]
|
import ctre
import wpilib
import math
from ctre import WPI_TalonSRX as Talon
from wpilib.drive.differentialdrive import DifferentialDrive
from wpilib.speedcontrollergroup import SpeedControllerGroup
from wpilib.smartdashboard import SmartDashboard as SD
from wpilib.command import Subsystem
#from robotpy_ext.common_drivers.navx import AHRS
from navx import AHRS
class DriveTrain(Subsystem):
'''
'Tank Drive' system set up with 2 motors per side, one a "master"
with a mag encoder attached and the other "slave" controller set
to follow the "master".
'''
def __init__(self, robot):
self.robot = robot
self.ahrs = AHRS.create_spi()
self.ahrs.reset()
# self.angleAdjustment = self.ahrs.getAngle()
# self.ahrs.setAngleAdjustment(self.angleAdjustment)
# Initialize all controllers
self.driveLeftMaster = Talon(self.robot.kDriveTrain['left_master'])
self.driveLeftSlave = Talon(self.robot.kDriveTrain['left_slave'])
self.driveRightMaster = Talon(self.robot.kDriveTrain['right_master'])
self.driveRightSlave = Talon(self.robot.kDriveTrain['right_slave'])
wpilib.LiveWindow.addActuator("DriveTrain",
"LeftMaster", self.driveLeftMaster)
wpilib.LiveWindow.addActuator("DriveTrain",
"RightMaster", self.driveRightMaster)
# Connect the slaves to the masters on each side
self.driveLeftSlave.follow(self.driveLeftMaster)
self.driveRightSlave.follow(self.driveRightMaster)
self.driveLeftMaster.configNominalOutputForward(0, 0)
self.driveLeftMaster.configNominalOutputReverse(0, 0)
self.driveRightMaster.configNominalOutputForward(0, 0)
self.driveRightMaster.configNominalOutputReverse(0, 0)
self.speed = .4
self.driveLeftMaster.configPeakOutputForward(self.speed, 0)
self.driveLeftMaster.configPeakOutputReverse(-self.speed, 0)
self.driveRightMaster.configPeakOutputForward(self.speed, 0)
self.driveRightMaster.configPeakOutputReverse(-self.speed, 0)
self.driveLeftMaster.configClosedLoopRamp(.2, 0)
self.driveRightMaster.configClosedLoopRamp(.2, 0)
self.driveLeftMaster.setSafetyEnabled(False)
self.driveRightMaster.setSafetyEnabled(False)
# Makes sure both sides' controllers show green and use positive
# values to move the bot forward.
self.driveLeftSlave.setInverted(False)
self.driveLeftMaster.setInverted(False)
self.driveRightSlave.setInverted(True)
self.driveRightMaster.setInverted(True)
self.PID()
"""
Initializes the count for toggling which side of the
robot will be considered the front when driving.
"""
self.robotFrontToggleCount = 2
# Configures each master to use the attached Mag Encoders
self.driveLeftMaster.configSelectedFeedbackSensor(
ctre.talonsrx.TalonSRX.FeedbackDevice.CTRE_MagEncoder_Relative, 0, 0)
self.driveRightMaster.configSelectedFeedbackSensor(
ctre.talonsrx.TalonSRX.FeedbackDevice.CTRE_MagEncoder_Relative, 0, 0)
# Reverses the encoder direction so forward movement always
# results in a positive increase in the encoder ticks.
self.driveLeftMaster.setSensorPhase(True)
self.driveRightMaster.setSensorPhase(True)
self.driveLeftMaster.setSelectedSensorPosition(0, 0, 0)
self.driveRightMaster.setSelectedSensorPosition(0, 0, 0)
# these supposedly aren't part of the WPI_TalonSRX class
# self.driveLeftMaster.setSelectedSensorPostion(0, 0, 10)
# self.driveRightMaster.setSelectedSensorPosition(0, 0, 10)
# Throw data on the SmartDashboard so we can work with it.
# SD.putNumber(
# 'Left Quad Pos.',
# self.driveLeftMaster.getQuadraturePosition())
# SD.putNumber(
# 'Right Quad Pos.',
# self.driveRightMaster.getQuadraturePosition())
self.leftVel = None
self.leftPos = None
self.rightVel = None
self.rightPos = None
# self.driveLeftMaster.config_kP(0, .3, 10)
self.driveControllerLeft = SpeedControllerGroup(self.driveLeftMaster)
self.driveControllerRight = SpeedControllerGroup(self.driveRightMaster)
self.driveControllerRight.setInverted(True)
self.drive = DifferentialDrive(self.driveControllerLeft,
self.driveControllerRight)
self.drive.setSafetyEnabled(False)
self.previousError = 0
super().__init__()
def autoInit(self):
self.speed = .5
self.driveLeftMaster.configPeakOutputForward(self.speed, 0)
self.driveLeftMaster.configPeakOutputReverse(-self.speed, 0)
self.driveRightMaster.configPeakOutputForward(self.speed, 0)
self.driveRightMaster.configPeakOutputReverse(-self.speed, 0)
self.driveLeftMaster.config_kP(0, .115, 0)
self.driveRightMaster.config_kP(0, .115, 0)
# self.driveLeftMaster.config_kP(0, .185, 0)
# self.driveRightMaster.config_kP(0, .185, 0)
# self.driveLeftMaster.config_kP(0, 20, 0)
# self.driveRightMaster.config_kP(0, 20, 0)
self.driveLeftMaster.config_kF(0, 0.0, 0)
self.driveRightMaster.config_kF(0, 0.0, 0)
def teleInit(self):
self.speed = .55
self.driveLeftMaster.configPeakOutputForward(self.speed, 0)
self.driveLeftMaster.configPeakOutputReverse(-self.speed, 0)
self.driveRightMaster.configPeakOutputForward(self.speed, 0)
self.driveRightMaster.configPeakOutputReverse(-self.speed, 0)
self.driveLeftMaster.config_kP(0, 0.0, 0)
self.driveRightMaster.config_kP(0, 0.0, 0)
self.driveLeftMaster.config_kF(0, 0.313, 0)
self.driveRightMaster.config_kF(0, 0.313, 0)
def moveToPosition(self, position):
self.driveLeftMaster.set(ctre.talonsrx.TalonSRX.ControlMode.Position, position)
self.driveRightMaster.set(ctre.talonsrx.TalonSRX.ControlMode.Position, position)
def stop(self):
self.drive.stopMotor()
def arcade(self, speed, rotation):
# self.updateSD()
if self.robot.dStick.getRawButtonReleased(3):
self.robotFrontToggleCount += 1
"""
This if statement acts as a toggle to change which motors are
inverted, completely changing the "front" of the robot. This is
useful for when we are about to climb.
"""
if self.robotFrontToggleCount%2 == 0:
self.drive.arcadeDrive(speed, rotation, True)
else:
self.drive.arcadeDrive(-speed, rotation, True)
def arcadeWithRPM(self, speed, rotation, maxRPM):
# self.updateSD()
self.driveLeftMaster.setSafetyEnabled(False)
if self.robot.dStick.getRawButtonReleased(3):
self.robotFrontToggleCount += 1
if self.robotFrontToggleCount%2 == 0:
XSpeed = wpilib.RobotDrive.limit(speed)
else:
XSpeed = wpilib.RobotDrive.limit(-speed)
XSpeed = self.applyDeadband(XSpeed, .02)
ZRotation = wpilib.RobotDrive.limit(rotation)
ZRotation = self.applyDeadband(ZRotation, .02)
XSpeed = math.copysign(XSpeed * XSpeed, XSpeed)
ZRotation = math.copysign(ZRotation * ZRotation, ZRotation)
maxInput = math.copysign(max(abs(XSpeed), abs(ZRotation)), XSpeed)
if XSpeed >= 0.0:
if ZRotation >= 0.0:
leftMotorSpeed = maxInput
rightMotorSpeed = XSpeed - ZRotation
else:
leftMotorSpeed = XSpeed + ZRotation
rightMotorSpeed = maxInput
else:
if ZRotation >= 0.0:
leftMotorSpeed = XSpeed + ZRotation
rightMotorSpeed = maxInput
else:
leftMotorSpeed = maxInput
rightMotorSpeed = XSpeed - ZRotation
leftMotorSpeed = wpilib.RobotDrive.limit(leftMotorSpeed)
rightMotorSpeed = wpilib.RobotDrive.limit(rightMotorSpeed)
leftMotorRPM = leftMotorSpeed * maxRPM
rightMotorRPM = rightMotorSpeed * maxRPM
self.driveLeftMaster.set(ctre.talonsrx.TalonSRX.ControlMode.Velocity, leftMotorRPM)
self.driveRightMaster.set(ctre.talonsrx.TalonSRX.ControlMode.Velocity, rightMotorRPM)
def updateSD(self):
leftVel = self.driveLeftMaster.getSelectedSensorVelocity(0)
leftPos = self.driveLeftMaster.getSelectedSensorPosition(0)
rightVel = self.driveRightMaster.getSelectedSensorVelocity(0)
rightPos = self.driveRightMaster.getSelectedSensorPosition(0)
# calculate side deltas
if self.leftVel:
leftVelDelta = leftVel - self.leftVel
else:
leftVelDelta = 0
if self.leftPos:
leftPosDelta = leftPos - self.leftPos
else:
leftPosDelta = 0
if self.rightVel:
rightVelDelta = rightVel - self.rightVel
else:
rightVelDelta = 0
if self.rightPos:
rightPosDelta = rightPos - self.rightPos
else:
rightPosDelta = 0
# calculate delta of delta
differenceVel = leftVelDelta - rightVelDelta
differencePos = leftPosDelta - rightPosDelta
SD.putNumber("LeftSensorVel", leftVel)
SD.putNumber("LeftSensorPos", leftPos)
SD.putNumber("RightSensorVel", rightVel)
SD.putNumber("RightSensorPos", rightPos)
SD.putNumber('LeftVelDelta', leftVelDelta)
SD.putNumber('LeftPosDelta', leftPosDelta)
SD.putNumber('RightVelDelta', rightVelDelta)
SD.putNumber('RightPosDelta', rightPosDelta)
SD.putNumber('DifferenceVel', differenceVel)
SD.putNumber('DifferencePos', differencePos)
SD.putNumber('Angle', self.ahrs.getAngle())
SD.putNumber('Angle Adjustment', self.ahrs.getAngleAdjustment())
self.leftVel = leftVel
self.leftPos = leftPos
self.rightVel = rightVel
self.rightPos = rightPos
# kP = self.driveLeftMaster.configGetParameter(
# self.driveLeftMaster.ParamEnum.eProfileParamSlot_P, 0, 10)
# SmartDashboard.putNumber('Left Proportional', kP)
# these may give the derivitive an integral of the PID once
# they are set. For now, they just show 0
#SD.putNumber(
# 'Left Derivative',
# self.driveLeftMaster.getErrorDerivative(0))
#SD.putNumber(
# 'Left Integral',
# self.driveLeftMaster.getIntegralAccumulator(0))
def applyDeadband(self, value, deadband):
"""Returns 0.0 if the given value is within the specified range around zero. The remaining range
between the deadband and 1.0 is scaled from 0.0 to 1.0.
:param value: value to clip
:param deadband: range around zero
"""
if abs(value) > deadband:
if value < 0.0:
return (value - deadband) / (1.0 - deadband)
else:
return (value + deadband) / (1.0 - deadband)
return 0.0
def setAngle(self, angle, tolerance):
#self.tolerance = tolerance
#self.calculateAdjustedSetpoint(angle)
self.turnController.setSetpoint(angle)
if ((self.ahrs.getYaw() <= (angle + tolerance))
and (self.ahrs.getYaw() >= (angle - tolerance))):
self.turnController.disable()
self.driveLeftMaster.set(0)
self.driveRightMaster.set(0)
else:
self.turnController.enable()
self.drive.arcadeDrive(0, self.output)
#self.leftTurnController.setSetpoint(angle)
def isInGyroPosition(self):
SD.putNumber('Is in gyro position', ((self.ahrs.getYaw() <= (self.turnController.getSetpoint() + self.robot.autonomous.ANGLE_TOLERANCE)) and (self.ahrs.getYaw() >= (self.turnController.getSetpoint() - self.robot.autonomous.ANGLE_TOLERANCE)))
)
return((self.ahrs.getYaw() <= (self.turnController.getSetpoint() + self.robot.autonomous.ANGLE_TOLERANCE)) and (self.ahrs.getYaw() >= (self.turnController.getSetpoint() - self.robot.autonomous.ANGLE_TOLERANCE)))
def calculateAdjustedSetpoint(self, angle):
self.startingYaw = self.robot.autonomous.startingYaw
adjustedAngle = angle + self.startingYaw
if adjustedAngle<-180:
undershot = adjustedAngle+180
adjustedAngle = 180+undershot
elif adjustedAngle>180:
overshot = adjustedAngle-180
adjustedAngle = -180+overshot
self.adjustedSetpoint = adjustedAngle
def PID(self):
self.kP = .045
self.kI = 0.00
self.kD = 0.00
self.kF = 0.00
self.turnController = wpilib.PIDController(self.kP, self.kI, self.kD, self.kF, self.ahrs, output=self)
self.turnController.setInputRange(-180, 180)
self.turnController.setOutputRange(-0.55, 0.55)
self.turnController.disable()
def pidWrite(self, output):
self.output = output
|
[
"wpilib.drive.differentialdrive.DifferentialDrive",
"wpilib.LiveWindow.addActuator",
"ctre.WPI_TalonSRX",
"wpilib.RobotDrive.limit",
"math.copysign",
"wpilib.smartdashboard.SmartDashboard.putNumber",
"wpilib.PIDController",
"navx.AHRS.create_spi",
"wpilib.speedcontrollergroup.SpeedControllerGroup"
] |
[((667, 684), 'navx.AHRS.create_spi', 'AHRS.create_spi', ([], {}), '()\n', (682, 684), False, 'from navx import AHRS\n'), ((905, 949), 'ctre.WPI_TalonSRX', 'Talon', (["self.robot.kDriveTrain['left_master']"], {}), "(self.robot.kDriveTrain['left_master'])\n", (910, 949), True, 'from ctre import WPI_TalonSRX as Talon\n'), ((980, 1023), 'ctre.WPI_TalonSRX', 'Talon', (["self.robot.kDriveTrain['left_slave']"], {}), "(self.robot.kDriveTrain['left_slave'])\n", (985, 1023), True, 'from ctre import WPI_TalonSRX as Talon\n'), ((1056, 1101), 'ctre.WPI_TalonSRX', 'Talon', (["self.robot.kDriveTrain['right_master']"], {}), "(self.robot.kDriveTrain['right_master'])\n", (1061, 1101), True, 'from ctre import WPI_TalonSRX as Talon\n'), ((1133, 1177), 'ctre.WPI_TalonSRX', 'Talon', (["self.robot.kDriveTrain['right_slave']"], {}), "(self.robot.kDriveTrain['right_slave'])\n", (1138, 1177), True, 'from ctre import WPI_TalonSRX as Talon\n'), ((1187, 1266), 'wpilib.LiveWindow.addActuator', 'wpilib.LiveWindow.addActuator', (['"""DriveTrain"""', '"""LeftMaster"""', 'self.driveLeftMaster'], {}), "('DriveTrain', 'LeftMaster', self.driveLeftMaster)\n", (1216, 1266), False, 'import wpilib\n'), ((1313, 1399), 'wpilib.LiveWindow.addActuator', 'wpilib.LiveWindow.addActuator', (['"""DriveTrain"""', '"""RightMaster"""', 'self.driveRightMaster'], {}), "('DriveTrain', 'RightMaster', self.\n driveRightMaster)\n", (1342, 1399), False, 'import wpilib\n'), ((4405, 4447), 'wpilib.speedcontrollergroup.SpeedControllerGroup', 'SpeedControllerGroup', (['self.driveLeftMaster'], {}), '(self.driveLeftMaster)\n', (4425, 4447), False, 'from wpilib.speedcontrollergroup import SpeedControllerGroup\n'), ((4484, 4527), 'wpilib.speedcontrollergroup.SpeedControllerGroup', 'SpeedControllerGroup', (['self.driveRightMaster'], {}), '(self.driveRightMaster)\n', (4504, 4527), False, 'from wpilib.speedcontrollergroup import SpeedControllerGroup\n'), ((4601, 4671), 'wpilib.drive.differentialdrive.DifferentialDrive', 'DifferentialDrive', (['self.driveControllerLeft', 'self.driveControllerRight'], {}), '(self.driveControllerLeft, self.driveControllerRight)\n', (4618, 4671), False, 'from wpilib.drive.differentialdrive import DifferentialDrive\n'), ((7572, 7605), 'wpilib.RobotDrive.limit', 'wpilib.RobotDrive.limit', (['rotation'], {}), '(rotation)\n', (7595, 7605), False, 'import wpilib\n'), ((7688, 7726), 'math.copysign', 'math.copysign', (['(XSpeed * XSpeed)', 'XSpeed'], {}), '(XSpeed * XSpeed, XSpeed)\n', (7701, 7726), False, 'import math\n'), ((7747, 7794), 'math.copysign', 'math.copysign', (['(ZRotation * ZRotation)', 'ZRotation'], {}), '(ZRotation * ZRotation, ZRotation)\n', (7760, 7794), False, 'import math\n'), ((8420, 8459), 'wpilib.RobotDrive.limit', 'wpilib.RobotDrive.limit', (['leftMotorSpeed'], {}), '(leftMotorSpeed)\n', (8443, 8459), False, 'import wpilib\n'), ((8486, 8526), 'wpilib.RobotDrive.limit', 'wpilib.RobotDrive.limit', (['rightMotorSpeed'], {}), '(rightMotorSpeed)\n', (8509, 8526), False, 'import wpilib\n'), ((9808, 9846), 'wpilib.smartdashboard.SmartDashboard.putNumber', 'SD.putNumber', (['"""LeftSensorVel"""', 'leftVel'], {}), "('LeftSensorVel', leftVel)\n", (9820, 9846), True, 'from wpilib.smartdashboard import SmartDashboard as SD\n'), ((9855, 9893), 'wpilib.smartdashboard.SmartDashboard.putNumber', 'SD.putNumber', (['"""LeftSensorPos"""', 'leftPos'], {}), "('LeftSensorPos', leftPos)\n", (9867, 9893), True, 'from wpilib.smartdashboard import SmartDashboard as SD\n'), ((9903, 9943), 'wpilib.smartdashboard.SmartDashboard.putNumber', 'SD.putNumber', (['"""RightSensorVel"""', 'rightVel'], {}), "('RightSensorVel', rightVel)\n", (9915, 9943), True, 'from wpilib.smartdashboard import SmartDashboard as SD\n'), ((9952, 9992), 'wpilib.smartdashboard.SmartDashboard.putNumber', 'SD.putNumber', (['"""RightSensorPos"""', 'rightPos'], {}), "('RightSensorPos', rightPos)\n", (9964, 9992), True, 'from wpilib.smartdashboard import SmartDashboard as SD\n'), ((10002, 10044), 'wpilib.smartdashboard.SmartDashboard.putNumber', 'SD.putNumber', (['"""LeftVelDelta"""', 'leftVelDelta'], {}), "('LeftVelDelta', leftVelDelta)\n", (10014, 10044), True, 'from wpilib.smartdashboard import SmartDashboard as SD\n'), ((10053, 10095), 'wpilib.smartdashboard.SmartDashboard.putNumber', 'SD.putNumber', (['"""LeftPosDelta"""', 'leftPosDelta'], {}), "('LeftPosDelta', leftPosDelta)\n", (10065, 10095), True, 'from wpilib.smartdashboard import SmartDashboard as SD\n'), ((10105, 10149), 'wpilib.smartdashboard.SmartDashboard.putNumber', 'SD.putNumber', (['"""RightVelDelta"""', 'rightVelDelta'], {}), "('RightVelDelta', rightVelDelta)\n", (10117, 10149), True, 'from wpilib.smartdashboard import SmartDashboard as SD\n'), ((10158, 10202), 'wpilib.smartdashboard.SmartDashboard.putNumber', 'SD.putNumber', (['"""RightPosDelta"""', 'rightPosDelta'], {}), "('RightPosDelta', rightPosDelta)\n", (10170, 10202), True, 'from wpilib.smartdashboard import SmartDashboard as SD\n'), ((10212, 10256), 'wpilib.smartdashboard.SmartDashboard.putNumber', 'SD.putNumber', (['"""DifferenceVel"""', 'differenceVel'], {}), "('DifferenceVel', differenceVel)\n", (10224, 10256), True, 'from wpilib.smartdashboard import SmartDashboard as SD\n'), ((10265, 10309), 'wpilib.smartdashboard.SmartDashboard.putNumber', 'SD.putNumber', (['"""DifferencePos"""', 'differencePos'], {}), "('DifferencePos', differencePos)\n", (10277, 10309), True, 'from wpilib.smartdashboard import SmartDashboard as SD\n'), ((13512, 13597), 'wpilib.PIDController', 'wpilib.PIDController', (['self.kP', 'self.kI', 'self.kD', 'self.kF', 'self.ahrs'], {'output': 'self'}), '(self.kP, self.kI, self.kD, self.kF, self.ahrs, output=self\n )\n', (13532, 13597), False, 'import wpilib\n'), ((7395, 7425), 'wpilib.RobotDrive.limit', 'wpilib.RobotDrive.limit', (['speed'], {}), '(speed)\n', (7418, 7425), False, 'import wpilib\n'), ((7461, 7492), 'wpilib.RobotDrive.limit', 'wpilib.RobotDrive.limit', (['(-speed)'], {}), '(-speed)\n', (7484, 7492), False, 'import wpilib\n')]
|
import psycopg2
def open_db(db_config):
"""
This function open posgresql-session with db-config
:param db_config: dict of db-parameters
:type db_config: dict
:return: 2 objects - connect-object and cursor-object
:rtype: object
"""
user = db_config["user"]
password = db_config["password"]
host = db_config["address"]
port = db_config["port"]
db_name = db_config["db_name"]
connect = psycopg2.connect(dbname=db_name, user=user, password=password, host=host, port=port)
cursor = connect.cursor()
return cursor, connect
def close_db(cursor, connect):
"""
This function close the connection to db
:param database: database-object
:param connect: connection-object
:type database: object
:type connect: object
"""
cursor.close()
connect.close()
def get_user_by_chat_id(db_config, tg_chat_id):
"""
This function add an event to db.
:param db_config: db config dict
:param event_id: event id to add
:type db_config: dict
:type event_id: int
"""
cursor, connect = open_db(db_config)
cursor.execute("SELECT * FROM \"USERS\" WHERE \"TG_CHAT_ID\" = (%i);", (tg_chat_id,))
result = cursor.fetchall()
close_db(cursor, connect)
print("result {}".format(result))
|
[
"psycopg2.connect"
] |
[((437, 525), 'psycopg2.connect', 'psycopg2.connect', ([], {'dbname': 'db_name', 'user': 'user', 'password': 'password', 'host': 'host', 'port': 'port'}), '(dbname=db_name, user=user, password=password, host=host,\n port=port)\n', (453, 525), False, 'import psycopg2\n')]
|
# This file is part of the pycalver project
# https://gitlab.com/mbarkhau/pycalver
#
# Copyright (c) 2019 <NAME> (<EMAIL>) - MIT License
# SPDX-License-Identifier: MIT
#
# pycalver/vcs.py (this file) is based on code from the
# bumpversion project: https://github.com/peritus/bumpversion
# Copyright (c) 2013-2014 <NAME> - MIT License
"""Minimal Git and Mercirial API.
If terminology for similar concepts differs between git and
mercurial, then the git terms are used. For example "fetch"
(git) instead of "pull" (hg) .
"""
import os
import logging
import tempfile
import typing as typ
import subprocess as sp
log = logging.getLogger("pycalver.vcs")
VCS_SUBCOMMANDS_BY_NAME = {
'git': {
'is_usable' : "git rev-parse --git-dir",
'fetch' : "git fetch",
'ls_tags' : "git tag --list",
'status' : "git status --porcelain",
'add_path' : "git add --update {path}",
'commit' : "git commit --file {path}",
'tag' : "git tag --annotate {tag} --message {tag}",
'push_tag' : "git push origin --follow-tags {tag}",
'show_remotes': "git config --get remote.origin.url",
},
'hg': {
'is_usable' : "hg root",
'fetch' : "hg pull",
'ls_tags' : "hg tags",
'status' : "hg status -umard",
'add_path' : "hg add {path}",
'commit' : "hg commit --logfile {path}",
'tag' : "hg tag {tag} --message {tag}",
'push_tag' : "hg push {tag}",
'show_remotes': "hg paths",
},
}
Env = typ.Dict[str, str]
class VCS:
"""VCS absraction for git and mercurial."""
def __init__(self, name: str, subcommands: typ.Dict[str, str] = None):
self.name = name
if subcommands is None:
self.subcommands = VCS_SUBCOMMANDS_BY_NAME[name]
else:
self.subcommands = subcommands
def __call__(self, cmd_name: str, env: Env = None, **kwargs: str) -> str:
"""Invoke subcommand and return output."""
cmd_tmpl = self.subcommands[cmd_name]
cmd_str = cmd_tmpl.format(**kwargs)
if cmd_name in ("commit", "tag", "push_tag"):
log.info(cmd_str)
else:
log.debug(cmd_str)
output_data: bytes = sp.check_output(cmd_str.split(), env=env, stderr=sp.STDOUT)
# TODO (mb 2018-11-15): Detect encoding of output?
_encoding = "utf-8"
return output_data.decode(_encoding)
@property
def is_usable(self) -> bool:
"""Detect availability of subcommand."""
if not os.path.exists(f".{self.name}"):
return False
cmd = self.subcommands['is_usable'].split()
try:
retcode = sp.call(cmd, stderr=sp.PIPE, stdout=sp.PIPE)
return retcode == 0
except OSError as e:
if e.errno == 2:
# git/mercurial is not installed.
return False
raise
@property
def has_remote(self) -> bool:
try:
output = self('show_remotes')
if output.strip() == "":
return False
return True
except Exception:
return False
def fetch(self) -> None:
"""Fetch updates from remote origin."""
if self.has_remote:
self('fetch')
def status(self, required_files: typ.Set[str]) -> typ.List[str]:
"""Get status lines."""
status_output = self('status')
status_items = [line.split(" ", 1) for line in status_output.splitlines()]
return [
filepath.strip()
for status, filepath in status_items
if filepath.strip() in required_files or status != "??"
]
def ls_tags(self) -> typ.List[str]:
"""List vcs tags on all branches."""
ls_tag_lines = self('ls_tags').splitlines()
log.debug(f"ls_tags output {ls_tag_lines}")
return [line.strip().split(" ", 1)[0] for line in ls_tag_lines]
def add(self, path: str) -> None:
"""Add updates to be included in next commit."""
try:
self('add_path', path=path)
except sp.CalledProcessError as ex:
if "already tracked!" in str(ex):
# mercurial
return
else:
raise
def commit(self, message: str) -> None:
"""Commit added files."""
message_data = message.encode("utf-8")
tmp_file = tempfile.NamedTemporaryFile("wb", delete=False)
assert " " not in tmp_file.name
fh: typ.IO[bytes]
with tmp_file as fh:
fh.write(message_data)
env: Env = os.environ.copy()
env['HGENCODING'] = "utf-8"
self('commit', env=env, path=tmp_file.name)
os.unlink(tmp_file.name)
def tag(self, tag_name: str) -> None:
"""Create an annotated tag."""
self('tag', tag=tag_name)
def push(self, tag_name: str) -> None:
"""Push changes to origin."""
if self.has_remote:
self('push_tag', tag=tag_name)
def __repr__(self) -> str:
"""Generate string representation."""
return f"VCS(name='{self.name}')"
def get_vcs() -> VCS:
"""Detect the appropriate VCS for a repository.
raises OSError if the directory doesn't use a supported VCS.
"""
for vcs_name in VCS_SUBCOMMANDS_BY_NAME.keys():
vcs = VCS(name=vcs_name)
if vcs.is_usable:
return vcs
raise OSError("No such directory .git/ or .hg/ ")
|
[
"tempfile.NamedTemporaryFile",
"os.unlink",
"os.environ.copy",
"os.path.exists",
"subprocess.call",
"logging.getLogger"
] |
[((620, 653), 'logging.getLogger', 'logging.getLogger', (['"""pycalver.vcs"""'], {}), "('pycalver.vcs')\n", (637, 653), False, 'import logging\n'), ((4494, 4541), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', (['"""wb"""'], {'delete': '(False)'}), "('wb', delete=False)\n", (4521, 4541), False, 'import tempfile\n'), ((4694, 4711), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (4709, 4711), False, 'import os\n'), ((4808, 4832), 'os.unlink', 'os.unlink', (['tmp_file.name'], {}), '(tmp_file.name)\n', (4817, 4832), False, 'import os\n'), ((2601, 2632), 'os.path.exists', 'os.path.exists', (['f""".{self.name}"""'], {}), "(f'.{self.name}')\n", (2615, 2632), False, 'import os\n'), ((2748, 2792), 'subprocess.call', 'sp.call', (['cmd'], {'stderr': 'sp.PIPE', 'stdout': 'sp.PIPE'}), '(cmd, stderr=sp.PIPE, stdout=sp.PIPE)\n', (2755, 2792), True, 'import subprocess as sp\n')]
|
import wx
import images
from .generic_bitmap_button import GenericBitmapButton
from pubsub import pub
from datetime import datetime
class _ToolColor(wx.Panel):
def __init__(self, parent):
super().__init__(parent)
self._init_ui()
self.display_color('#000000')
def _init_ui(self):
self.main_sizer = wx.BoxSizer(wx.VERTICAL)
self.main_sizer.Add(GenericBitmapButton(self, 'tool_color'))
self.color_indicator = wx.StaticLine(self, size=(-1, 2))
self.main_sizer.Add(self.color_indicator, flag=wx.EXPAND)
self.SetSizer(self.main_sizer)
def display_color(self, color):
self.color_indicator.SetBackgroundColour(color)
class TextEditorToolbar(wx.Panel):
def __init__(self, parent):
super().__init__(parent)
self.editor = parent
self._init_ui()
self._init_event()
def _init_ui(self):
self.main_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.tool_font_name = wx.Choice(self, choices=['Helvetica', 'Arial', 'sans-serif'], size=(100, -1))
self.tool_font_size = wx.Choice(self, choices=['12','13','14','16','18','24','36','48','72'], style=wx.CB_SORT, size=(50, -1))
self.tool_bold = GenericBitmapButton(self, 'tool_bold')
self.tool_italic = GenericBitmapButton(self, 'tool_italic')
self.tool_underline = GenericBitmapButton(self, 'tool_underline')
self.tool_color = _ToolColor(self)
self.tool_background = GenericBitmapButton(self, 'tool_background')
self.tool_quote = GenericBitmapButton(self, 'tool_quote')
self.tool_code_block = GenericBitmapButton(self, 'tool_code_block')
self.tool_bullet_list = GenericBitmapButton(self, 'tool_bullet_list')
self.tool_ordered_list = GenericBitmapButton(self, 'tool_ordered_list')
self.tool_align = wx.Choice(self, choices=[
_("text_editor_toolbar.align_left"), _("text_editor_toolbar.align_center"),
_("text_editor_toolbar.align_right"), _("text_editor_toolbar.justify_align")
], size=(100, -1))
self.tool_time = GenericBitmapButton(self, 'tool_time')
self.tool_info = GenericBitmapButton(self, 'tool_info')
self.tool_full_screen = GenericBitmapButton(self, 'tool_full_screen')
self.tool_more_action = GenericBitmapButton(self, 'tool_more_action')
self.main_sizer.AddSpacer(2)
self.main_sizer.Add(self.tool_font_name, flag=wx.RIGHT, border=5)
self.main_sizer.Add(self.tool_font_size, flag=wx.RIGHT, border=5)
self.main_sizer.Add(self.tool_align, flag=wx.RIGHT, border=5)
self.main_sizer.Add(self.tool_bold, flag=wx.RIGHT, border=3)
self.main_sizer.Add(self.tool_italic, flag=wx.RIGHT, border=4)
self.main_sizer.Add(self.tool_underline, flag=wx.RIGHT, border=8)
self.main_sizer.Add(self.tool_color, flag=wx.RIGHT, border=8)
self.main_sizer.Add(self.tool_background, flag=wx.RIGHT, border=8)
self.main_sizer.Add(self.tool_quote, flag=wx.RIGHT, border=8)
self.main_sizer.Add(self.tool_code_block, flag=wx.RIGHT, border=8)
self.main_sizer.Add(self.tool_bullet_list, flag=wx.RIGHT, border=8)
self.main_sizer.Add(self.tool_ordered_list, flag=wx.RIGHT, border=20)
self.main_sizer.Add(self.tool_time, flag=wx.RIGHT, border=9)
self.main_sizer.Add(self.tool_info, flag=wx.RIGHT, border=9)
self.main_sizer.Add(self.tool_full_screen, flag=wx.RIGHT, border=9)
self.main_sizer.Add(self.tool_more_action)
self.SetSizer(self.main_sizer)
def _init_event(self):
self.tool_font_name.Bind(wx.EVT_CHOICE, self._on_font_name_selected)
self.tool_font_size.Bind(wx.EVT_CHOICE, self._on_font_size_selected)
self.tool_bold.Bind(wx.EVT_BUTTON, self._on_bold_clicked)
self.tool_italic.Bind(wx.EVT_BUTTON, self._on_italic_clicked)
self.tool_underline.Bind(wx.EVT_BUTTON, self._on_underline_clicked)
self.tool_color.Bind(wx.EVT_BUTTON, self._on_fg_color_clicked)
self.tool_background.Bind(wx.EVT_BUTTON, self._on_bg_color_clicked)
self.tool_quote.Bind(wx.EVT_BUTTON, self._on_quote_clicked)
self.tool_code_block.Bind(wx.EVT_BUTTON, self._on_code_block_clicked)
self.tool_bullet_list.Bind(wx.EVT_BUTTON, self._on_bullet_list_clicked)
self.tool_ordered_list.Bind(wx.EVT_BUTTON, self._on_ordered_list_clicked)
self.tool_align.Bind(wx.EVT_CHOICE, self._on_align_selected)
self.tool_info.Bind(wx.EVT_BUTTON, self._on_info_clicked)
self.tool_time.Bind(wx.EVT_BUTTON, self._on_time_clicked)
self.tool_full_screen.Bind(wx.EVT_BUTTON, self._on_full_screen_clicked)
def _on_time_clicked(self, e):
current_time = datetime.now()
weekdays = [_("day1"), _("day2"), _("day3"), _("day4"), _("day5"), _("day6"), _("day7")]
ymd = current_time.strftime('%Y-%m-%d')
hms = current_time.strftime('%H:%M:%S')
weekday = weekdays[current_time.weekday()]
self.editor.webview.run_js('quill.insertTime', f"{ymd} {weekday} {hms}")
def _on_italic_clicked(self, e):
format_val = not self.editor.content_format['italic']
self.editor.format_content('italic', format_val)
self._display_italic_format()
def _display_italic_format(self):
bitmap = images.tool_italic_active.Bitmap if self.editor.content_format['italic'] else images.tool_italic.Bitmap
self.tool_italic.SetBitmap(bitmap)
def _on_underline_clicked(self, e):
format_val = not self.editor.content_format['underline']
self.editor.format_content('underline', format_val)
self._display_underline_format()
def _display_underline_format(self):
bitmap = images.tool_underline_active.Bitmap if self.editor.content_format['underline'] else images.tool_underline.Bitmap
self.tool_underline.SetBitmap(bitmap)
def _on_quote_clicked(self, e):
format_val = not self.editor.content_format['blockquote']
self.editor.format_content('blockquote', format_val)
self._display_quote_format()
def _display_quote_format(self):
bitmap = images.tool_quote_active.Bitmap if self.editor.content_format['blockquote'] else images.tool_quote.Bitmap
self.tool_quote.SetBitmap(bitmap)
def _on_bullet_list_clicked(self, e):
format_val = False if self.editor.content_format['list'] == 'bullet' else 'bullet'
self.editor.format_content('list', format_val)
self._display_list_format()
def _display_list_format(self):
format_val = self.editor.content_format['list']
if format_val is False:
self.tool_ordered_list.SetBitmap(images.tool_ordered_list.Bitmap)
self.tool_bullet_list.SetBitmap(images.tool_bullet_list.Bitmap)
elif format_val == 'bullet':
self.tool_ordered_list.SetBitmap(images.tool_ordered_list.Bitmap)
self.tool_bullet_list.SetBitmap(images.tool_bullet_list_active.Bitmap)
elif format_val == 'ordered':
self.tool_ordered_list.SetBitmap(images.tool_ordered_list_active.Bitmap)
self.tool_bullet_list.SetBitmap(images.tool_bullet_list.Bitmap)
def _on_ordered_list_clicked(self, e):
format_val = False if self.editor.content_format['list'] == 'ordered' else 'ordered'
self.editor.format_content('list', format_val)
self._display_list_format()
def _on_align_selected(self, e):
format_val = {
_("text_editor_toolbar.align_left"): False, _("text_editor_toolbar.align_center"): 'center',
_("text_editor_toolbar.align_right"): 'right', _("text_editor_toolbar.justify_align"): 'justify'
}.get(e.String, False)
self.editor.format_content('align', format_val)
def _display_align_format(self):
align_val = self.editor.content_format['align']
if isinstance(align_val, list):
align_val = align_val[0]
format_val = {
False: _("text_editor_toolbar.align_left"), 'center': _("text_editor_toolbar.align_center"),
'right': _("text_editor_toolbar.align_right"), 'justify': _("text_editor_toolbar.justify_align")
}.get(align_val, '左对齐')
self.tool_align.SetSelection(self.tool_align.GetItems().index(format_val))
def _on_info_clicked(self, e):
pass
def _on_full_screen_clicked(self, e):
if self.editor.is_full_screen:
self.editor.is_full_screen = False
self.tool_full_screen.SetBitmap(images.tool_full_screen.Bitmap)
else:
self.editor.is_full_screen = True
self.tool_full_screen.SetBitmap(images.tool_full_screen_active.Bitmap)
pub.sendMessage('note.full_screen',enable=self.editor.is_full_screen)
def _on_font_name_selected(self, e):
self.editor.format_content('font', e.String)
def _on_font_size_selected(self, e):
self.editor.format_content('size', f'{e.String}px')
def _on_bold_clicked(self, e):
format_val = not self.editor.content_format['bold']
self.editor.format_content('bold', format_val)
self._display_bold_format()
def _on_fg_color_clicked(self, e):
color = wx.GetColourFromUser(self, self.editor.content_format['color'] or '#000000').GetAsString(wx.C2S_HTML_SYNTAX)
self.editor.format_content('color', color)
self._display_color_format()
def _on_bg_color_clicked(self, e):
color = wx.GetColourFromUser(self, self.editor.content_format['background'] or '#ffffff').GetAsString(wx.C2S_HTML_SYNTAX)
self.editor.format_content('background', color)
self._display_background_format()
def _on_code_block_clicked(self, e):
format_val = not self.editor.content_format['code-block']
self.editor.format_content('code-block',format_val)
self._display_code_block_format()
def _display_bold_format(self):
bitmap = images.tool_bold_active.Bitmap if self.editor.content_format['bold'] else images.tool_bold.Bitmap
self.tool_bold.SetBitmap(bitmap)
def _display_code_block_format(self):
bitmap = images.tool_code_block_active.Bitmap if self.editor.content_format['code-block'] else images.tool_code_block.Bitmap
self.tool_code_block.SetBitmap(bitmap)
def _display_font_format(self):
format_val = self.editor.content_format['font']
if format_val is False:
index = 0
elif format_val in self.tool_font_name.GetItems():
index = self.tool_font_name.GetItems().index(self.editor.content_format['font'])
else:
index = self.tool_font_name.Append(format_val)
self.tool_font_name.SetSelection(index)
def _display_size_format(self):
format_val = self.editor.content_format['size']
if format_val is False:
index = 0
# todo handle em rem
elif format_val[:-2] in self.tool_font_size.GetItems():
index = self.tool_font_size.GetItems().index(format_val[:-2])
else:
index = self.tool_font_size.Append(format_val[:-2])
self.tool_font_size.SetSelection(index)
def _display_color_format(self):
self.tool_color.display_color(self.editor.content_format['color'] or '#000000')
def _display_background_format(self):
self.tool_background.SetBackgroundColour(self.editor.content_format['background'] or '#ffffff')
self.tool_background.Refresh()
def display_format(self, changed_format):
if 'bold' in changed_format:
self._display_bold_format()
if 'font' in changed_format:
self._display_font_format()
if 'italic' in changed_format:
self._display_italic_format()
if 'underline' in changed_format:
self._display_underline_format()
if 'blockquote' in changed_format:
self._display_quote_format()
if 'list' in changed_format:
self._display_list_format()
if 'size' in changed_format:
self._display_size_format()
if 'color' in changed_format:
self._display_color_format()
if 'background' in changed_format:
self._display_background_format()
if 'code-block' in changed_format:
self._display_code_block_format()
if 'align' in changed_format:
self._display_align_format()
|
[
"wx.StaticLine",
"wx.Choice",
"pubsub.pub.sendMessage",
"wx.BoxSizer",
"wx.GetColourFromUser",
"datetime.datetime.now"
] |
[((340, 364), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (351, 364), False, 'import wx\n'), ((465, 498), 'wx.StaticLine', 'wx.StaticLine', (['self'], {'size': '(-1, 2)'}), '(self, size=(-1, 2))\n', (478, 498), False, 'import wx\n'), ((930, 956), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (941, 956), False, 'import wx\n'), ((988, 1065), 'wx.Choice', 'wx.Choice', (['self'], {'choices': "['Helvetica', 'Arial', 'sans-serif']", 'size': '(100, -1)'}), "(self, choices=['Helvetica', 'Arial', 'sans-serif'], size=(100, -1))\n", (997, 1065), False, 'import wx\n'), ((1096, 1212), 'wx.Choice', 'wx.Choice', (['self'], {'choices': "['12', '13', '14', '16', '18', '24', '36', '48', '72']", 'style': 'wx.CB_SORT', 'size': '(50, -1)'}), "(self, choices=['12', '13', '14', '16', '18', '24', '36', '48',\n '72'], style=wx.CB_SORT, size=(50, -1))\n", (1105, 1212), False, 'import wx\n'), ((4775, 4789), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4787, 4789), False, 'from datetime import datetime\n'), ((8761, 8831), 'pubsub.pub.sendMessage', 'pub.sendMessage', (['"""note.full_screen"""'], {'enable': 'self.editor.is_full_screen'}), "('note.full_screen', enable=self.editor.is_full_screen)\n", (8776, 8831), False, 'from pubsub import pub\n'), ((9271, 9347), 'wx.GetColourFromUser', 'wx.GetColourFromUser', (['self', "(self.editor.content_format['color'] or '#000000')"], {}), "(self, self.editor.content_format['color'] or '#000000')\n", (9291, 9347), False, 'import wx\n'), ((9524, 9609), 'wx.GetColourFromUser', 'wx.GetColourFromUser', (['self', "(self.editor.content_format['background'] or '#ffffff')"], {}), "(self, self.editor.content_format['background'] or\n '#ffffff')\n", (9544, 9609), False, 'import wx\n')]
|
#!/usr/bin/env python3.6
# Is a work in progress
# TODO: split to multiple files
import glob
import os
import sys
import carla
import zmq
import random
import time
import os
ROTATION_PARAMS = ("pitch", "yaw", "roll")
COORDINATES_PARAMS = ("velocity", "acceleration", "angular_velocity", "location")
CONTROL_PARAMS = (
"throttle",
"steer",
"brake",
"hand_brake",
"reverse",
"manual_gear_shift",
"gear",
)
GNSS_PARAMS = ("latitude", "longitude", "altitude")
IMU_PARAMS = ("compass",)
IMU_COORDINATE_PARAMS = ("accelerometer", "gyroscope")
POS_TICK_INTERVAL = str(1 / 50) # seconds
actor_list = []
class Factory:
def __init__(self, world, blueprint_library):
self.world = world
self.blueprint_library = blueprint_library
def get_vehicles(self):
crossing_bp = self.blueprint_library.filter("vehicle.nissan.micra")[0]
crossing_bp.set_attribute("color", "255,255,255") # Vehicle that cross the street
vehicles = {
"ego": self.world.spawn_actor(
self.blueprint_library.filter("vehicle.audi.etron")[0],
carla.Transform(
carla.Location(x=41.5, y=262, z=1), carla.Rotation(yaw=90)
),
),
"parked": self.world.spawn_actor(
self.blueprint_library.filter("vehicle.nissan.patrol")[0],
carla.Transform(
carla.Location(x=46.5, y=271, z=1), carla.Rotation(yaw=90)
),
),
"crossing": self.world.spawn_actor(
crossing_bp,
carla.Transform(carla.Location(x=6, y=302, z=1), carla.Rotation(yaw=0)),
),
"walker": self.world.spawn_actor(
self.blueprint_library.filter("walker.pedestrian.0001")[0],
carla.Transform(
carla.Location(x=37.5, y=295, z=0), carla.Rotation(yaw=90)
),
),
"bicycle": self.world.spawn_actor(
self.blueprint_library.filter("vehicle.bh.crossbike")[0],
carla.Transform(
carla.Location(x=38.5, y=297, z=0), carla.Rotation(yaw=90)
),
),
}
# Remember that location origin is on the center of the vehicle
# Bounding box extent is only half of the real extent
# Config the position of the vehicles and VRUs
end_of_road = 300
ego_length = vehicles["ego"].bounding_box.extent.y * 2
ego_location = vehicles["ego"].get_location()
ego_location.y = end_of_road - 42 + ego_length / 2
vehicles["ego"].set_location(ego_location)
parked_length = vehicles["parked"].bounding_box.extent.y * 2
parked_location = vehicles["parked"].get_location()
parked_location.y = end_of_road - 42 + 20 + parked_length / 2
vehicles["parked"].set_location(parked_location)
actor_list.extend(vehicles.values())
return vehicles
def get_camera(self, vehicle):
veh_location = vehicle.get_location()
# 0 = FL, 1 = FR, 2 = BL, 3 = BR
wheels = [w.position/100 for w in vehicle.get_physics_control().wheels]
middle_rear_axle_x = (wheels[2].x + wheels[3].x) / 2
middle_rear_axle_y = (wheels[2].y + wheels[3].y) / 2
axle_to_center = ((middle_rear_axle_x-veh_location.x)**2 + (middle_rear_axle_y-veh_location.y)**2)**(1/2)
wheel_radius = vehicle.get_physics_control().wheels[2].radius
# Relative to vehicle location
ground = wheels[2].z-wheel_radius/100
location = carla.Location(
x=3.37-axle_to_center, # 3.37 camera position Fabio
z=1.39-veh_location.z+ground
)
rotation = carla.Rotation(roll=-0.22, pitch=-0.73, yaw=0.46)
camera_bp = self.blueprint_library.find("sensor.camera.rgb")
camera_bp.set_attribute("image_size_x", "1280")
camera_bp.set_attribute("image_size_y", "720")
camera_bp.set_attribute("sensor_tick", str(1/30))
camera_transform = carla.Transform(location, rotation) # vehicle coordinates since it is attached to the vehicle, if not attached to an actor use global coordinates and the sensor do not move anymore
camera = self.world.spawn_actor(camera_bp, camera_transform, attach_to=vehicle)
actor_list.append(camera)
return camera
def get_gnss(self, vehicle):
gnss_transform = carla.Transform(carla.Location(x=0.5, z=0.5))
gnss_bp = self.blueprint_library.find("sensor.other.gnss")
gnss_bp.set_attribute("sensor_tick", POS_TICK_INTERVAL)
gnss = self.world.spawn_actor(gnss_bp, gnss_transform, attach_to=vehicle)
actor_list.append(gnss)
return gnss
def get_imu(self, vehicle):
imu_transform = carla.Transform(carla.Location(x=0.5, z=0.5))
imu_bp = self.blueprint_library.find("sensor.other.imu")
imu_bp.set_attribute("sensor_tick", POS_TICK_INTERVAL)
imu = self.world.spawn_actor(imu_bp, imu_transform, attach_to=vehicle)
actor_list.append(imu)
return imu
def main():
n_output = len([d for d in os.listdir() if d.startswith("out")])
out_folder = f"out{n_output:02d}"
os.makedirs(out_folder, exist_ok=True)
pos_file = open(f"{out_folder}/pos.csv", "w")
gnss_file = open(f"{out_folder}/gnss.csv", "w")
imu_file = open(f"{out_folder}/imu.csv", "w")
client = carla.Client("localhost", 2000)
client.set_timeout(5.0)
world = client.get_world()
if world.get_map().name != "Carissma":
client.load_world("Carissma")
world = client.reload_world()
blueprint_library = world.get_blueprint_library()
spectator = world.get_spectator()
spectator.set_transform(
carla.Transform(
carla.Location(x=30.8, y=274.4, z=50), carla.Rotation(pitch=-90),
)
)
factory = Factory(world, blueprint_library)
vehs = factory.get_vehicles()
tracked_veh = vehs["ego"]
def write_pos_labels():
labels = ["timestamp"]
labels.extend(
f"{attr}_{c}" for attr in COORDINATES_PARAMS for c in ("x", "y", "z")
)
labels.extend(f"rotation_{attr}" for attr in ROTATION_PARAMS)
labels.extend(f"control_{attr}" for attr in CONTROL_PARAMS)
pos_file.write(",".join(labels) + "\n")
def write_pos_values(w_snapshot):
coordinates_attrs = (
getattr(tracked_veh, "get_" + p)() for p in COORDINATES_PARAMS
)
# Get timestamp value
values = [w_snapshot.platform_timestamp]
# Get COORDINATES_PARAMS values
values.extend(
getattr(attr, c) for attr in coordinates_attrs for c in ("x", "y", "z")
)
# Get ROTATION_PARAMS values
rotation = tracked_veh.get_transform().rotation
values.extend(getattr(rotation, attr) for attr in ROTATION_PARAMS)
# Get CONTROL_PARAMS values
control = tracked_veh.get_control()
values.extend(getattr(control, attr) for attr in CONTROL_PARAMS)
pos_file.write(",".join(map(str, values)) + "\n")
def write_gnss_labels():
gnss_file.write(",".join(("timestamp",) + GNSS_PARAMS) + "\n")
def write_imu_labels():
coordinates_attrs = tuple(
f"{attr}_{c}" for attr in IMU_COORDINATE_PARAMS for c in ("x", "y", "z")
)
imu_file.write(",".join(("timestamp",) + coordinates_attrs + IMU_PARAMS) + "\n")
def write_gnss_values(data):
snapshot = world.get_snapshot()
write_pos_values(snapshot) # Small gambiarra
values = [snapshot.platform_timestamp]
values.extend(getattr(data, attr) for attr in GNSS_PARAMS)
gnss_file.write(",".join(map(str, values)) + "\n")
def write_imu_values(data):
values = [world.get_snapshot().platform_timestamp]
values.extend(getattr(data, attr) for attr in IMU_PARAMS)
attrs = (getattr(data, attr) for attr in IMU_COORDINATE_PARAMS)
values.extend(getattr(attr, c) for attr in attrs for c in ("x", "y", "z"))
imu_file.write(",".join(map(str, values)) + "\n")
try:
print("Initiating writing of position data...")
write_pos_labels()
print("Initiating writing of gnss data...")
gnss = factory.get_gnss(tracked_veh)
write_gnss_labels()
gnss.listen(write_gnss_values)
print("Initiating writing of imu data...")
imu = factory.get_imu(tracked_veh)
write_imu_labels()
imu.listen(write_imu_values)
print("Initiating camera recording...")
time.sleep(1) # Let car to to the ground
camera = factory.get_camera(tracked_veh)
camera.listen(
lambda image: image.save_to_disk(f"{out_folder}/{image.frame:06d}.png")
)
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind("tcp://*:5555")
print("Waiting for data...")
while True:
speed = socket.recv()
tracked_veh.set_velocity(carla.Vector3D(0, float(speed), 0))
socket.send(b'1') # received!
finally:
print("Destroying actors...")
vehicles_list = list()
for actor in actor_list:
if isinstance(actor, carla.libcarla.Vehicle): # Avoid segfault
actor.set_autopilot(False)
vehicles_list.append(actor)
continue # Let vehicles for later (segfault)
actor.destroy()
time.sleep(0.5)
# Destroy vehicles
for v in vehicles_list:
v.destroy()
pos_file.close()
gnss_file.close()
imu_file.close()
print("done.")
if __name__ == "__main__":
main()
|
[
"carla.Transform",
"os.makedirs",
"time.sleep",
"carla.Client",
"carla.Rotation",
"carla.Location",
"os.listdir",
"zmq.Context"
] |
[((5300, 5338), 'os.makedirs', 'os.makedirs', (['out_folder'], {'exist_ok': '(True)'}), '(out_folder, exist_ok=True)\n', (5311, 5338), False, 'import os\n'), ((5506, 5537), 'carla.Client', 'carla.Client', (['"""localhost"""', '(2000)'], {}), "('localhost', 2000)\n", (5518, 5537), False, 'import carla\n'), ((3642, 3715), 'carla.Location', 'carla.Location', ([], {'x': '(3.37 - axle_to_center)', 'z': '(1.39 - veh_location.z + ground)'}), '(x=3.37 - axle_to_center, z=1.39 - veh_location.z + ground)\n', (3656, 3715), False, 'import carla\n'), ((3796, 3845), 'carla.Rotation', 'carla.Rotation', ([], {'roll': '(-0.22)', 'pitch': '(-0.73)', 'yaw': '(0.46)'}), '(roll=-0.22, pitch=-0.73, yaw=0.46)\n', (3810, 3845), False, 'import carla\n'), ((4113, 4148), 'carla.Transform', 'carla.Transform', (['location', 'rotation'], {}), '(location, rotation)\n', (4128, 4148), False, 'import carla\n'), ((8714, 8727), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (8724, 8727), False, 'import time\n'), ((8941, 8954), 'zmq.Context', 'zmq.Context', ([], {}), '()\n', (8952, 8954), False, 'import zmq\n'), ((9619, 9634), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (9629, 9634), False, 'import time\n'), ((4516, 4544), 'carla.Location', 'carla.Location', ([], {'x': '(0.5)', 'z': '(0.5)'}), '(x=0.5, z=0.5)\n', (4530, 4544), False, 'import carla\n'), ((4886, 4914), 'carla.Location', 'carla.Location', ([], {'x': '(0.5)', 'z': '(0.5)'}), '(x=0.5, z=0.5)\n', (4900, 4914), False, 'import carla\n'), ((5877, 5914), 'carla.Location', 'carla.Location', ([], {'x': '(30.8)', 'y': '(274.4)', 'z': '(50)'}), '(x=30.8, y=274.4, z=50)\n', (5891, 5914), False, 'import carla\n'), ((5916, 5941), 'carla.Rotation', 'carla.Rotation', ([], {'pitch': '(-90)'}), '(pitch=-90)\n', (5930, 5941), False, 'import carla\n'), ((5220, 5232), 'os.listdir', 'os.listdir', ([], {}), '()\n', (5230, 5232), False, 'import os\n'), ((1169, 1203), 'carla.Location', 'carla.Location', ([], {'x': '(41.5)', 'y': '(262)', 'z': '(1)'}), '(x=41.5, y=262, z=1)\n', (1183, 1203), False, 'import carla\n'), ((1205, 1227), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(90)'}), '(yaw=90)\n', (1219, 1227), False, 'import carla\n'), ((1436, 1470), 'carla.Location', 'carla.Location', ([], {'x': '(46.5)', 'y': '(271)', 'z': '(1)'}), '(x=46.5, y=271, z=1)\n', (1450, 1470), False, 'import carla\n'), ((1472, 1494), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(90)'}), '(yaw=90)\n', (1486, 1494), False, 'import carla\n'), ((1638, 1669), 'carla.Location', 'carla.Location', ([], {'x': '(6)', 'y': '(302)', 'z': '(1)'}), '(x=6, y=302, z=1)\n', (1652, 1669), False, 'import carla\n'), ((1671, 1692), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(0)'}), '(yaw=0)\n', (1685, 1692), False, 'import carla\n'), ((1885, 1919), 'carla.Location', 'carla.Location', ([], {'x': '(37.5)', 'y': '(295)', 'z': '(0)'}), '(x=37.5, y=295, z=0)\n', (1899, 1919), False, 'import carla\n'), ((1921, 1943), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(90)'}), '(yaw=90)\n', (1935, 1943), False, 'import carla\n'), ((2152, 2186), 'carla.Location', 'carla.Location', ([], {'x': '(38.5)', 'y': '(297)', 'z': '(0)'}), '(x=38.5, y=297, z=0)\n', (2166, 2186), False, 'import carla\n'), ((2188, 2210), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(90)'}), '(yaw=90)\n', (2202, 2210), False, 'import carla\n')]
|
"""
Test the abilities of the limit filter.
This is not about query parsing, but rather
handling once we have the filter.
"""
import py.test
from tiddlyweb.model.tiddler import Tiddler
from tiddlyweb.filters.limit import limit
from tiddlyweb.filters import parse_for_filters, recursive_filter, FilterError
tiddlers = [Tiddler('1'), Tiddler('c'), Tiddler('a'), Tiddler('b')]
def test_simple_limit():
limited_tiddlers = limit(tiddlers, count=2)
assert ['1', 'c'] == [tiddler.title for tiddler in limited_tiddlers]
def test_ranged_limit():
limited_tiddlers = limit(tiddlers, index=1, count=2)
assert ['c', 'a'] == [tiddler.title for tiddler in limited_tiddlers]
def test_negative_limit():
with py.test.raises(ValueError):
limit(tiddlers, index=-1, count=2)
def test_exception():
filter, _ = parse_for_filters('limit=-1,2')
with py.test.raises(FilterError):
recursive_filter(filter, tiddlers)
|
[
"tiddlyweb.filters.recursive_filter",
"tiddlyweb.model.tiddler.Tiddler",
"tiddlyweb.filters.limit.limit",
"tiddlyweb.filters.parse_for_filters"
] |
[((321, 333), 'tiddlyweb.model.tiddler.Tiddler', 'Tiddler', (['"""1"""'], {}), "('1')\n", (328, 333), False, 'from tiddlyweb.model.tiddler import Tiddler\n'), ((335, 347), 'tiddlyweb.model.tiddler.Tiddler', 'Tiddler', (['"""c"""'], {}), "('c')\n", (342, 347), False, 'from tiddlyweb.model.tiddler import Tiddler\n'), ((349, 361), 'tiddlyweb.model.tiddler.Tiddler', 'Tiddler', (['"""a"""'], {}), "('a')\n", (356, 361), False, 'from tiddlyweb.model.tiddler import Tiddler\n'), ((363, 375), 'tiddlyweb.model.tiddler.Tiddler', 'Tiddler', (['"""b"""'], {}), "('b')\n", (370, 375), False, 'from tiddlyweb.model.tiddler import Tiddler\n'), ((427, 451), 'tiddlyweb.filters.limit.limit', 'limit', (['tiddlers'], {'count': '(2)'}), '(tiddlers, count=2)\n', (432, 451), False, 'from tiddlyweb.filters.limit import limit\n'), ((576, 609), 'tiddlyweb.filters.limit.limit', 'limit', (['tiddlers'], {'index': '(1)', 'count': '(2)'}), '(tiddlers, index=1, count=2)\n', (581, 609), False, 'from tiddlyweb.filters.limit import limit\n'), ((833, 864), 'tiddlyweb.filters.parse_for_filters', 'parse_for_filters', (['"""limit=-1,2"""'], {}), "('limit=-1,2')\n", (850, 864), False, 'from tiddlyweb.filters import parse_for_filters, recursive_filter, FilterError\n'), ((758, 792), 'tiddlyweb.filters.limit.limit', 'limit', (['tiddlers'], {'index': '(-1)', 'count': '(2)'}), '(tiddlers, index=-1, count=2)\n', (763, 792), False, 'from tiddlyweb.filters.limit import limit\n'), ((911, 945), 'tiddlyweb.filters.recursive_filter', 'recursive_filter', (['filter', 'tiddlers'], {}), '(filter, tiddlers)\n', (927, 945), False, 'from tiddlyweb.filters import parse_for_filters, recursive_filter, FilterError\n')]
|
from datetime import datetime
from django.db import models
from apps.users.models import BaseModel
from apps.organizations.models import Teacher
from apps.organizations.models import CourseOrg
from DjangoUeditor.models import UEditorField
# Create your models here.
#订单表
class Order(models.Model):
order_number = models.CharField(max_length=64, verbose_name="订单号")
status_choices = ((0, '未支付'), (1, '已支付'))
order_status = models.IntegerField(choices=status_choices, default=0, verbose_name="支付状态")
course = models.ForeignKey(to='Course', on_delete=models.CASCADE, verbose_name="课程名")
userid = models.CharField(max_length=60, verbose_name="用户编号")
add_time = models.DateTimeField(default=datetime.now, verbose_name="添加时间")
class Meta:
verbose_name = "课程订单"
verbose_name_plural = verbose_name
def __str__(self):
return "课程订单"
class Course(BaseModel):
teacher = models.ForeignKey(Teacher, on_delete=models.CASCADE, verbose_name="讲师")
course_org = models.ForeignKey(CourseOrg, null=True, blank=True, on_delete=models.CASCADE, verbose_name="课程机构")
name = models.CharField(verbose_name="课程名", max_length=50)
desc = models.CharField(verbose_name="课程描述",max_length=300)
learn_times = models.IntegerField(default=0, verbose_name="学习时长(分钟数)")
degree = models.CharField(verbose_name="难度", choices=(("cj","初级"), ("zj","中级"), ("gj","高级")), max_length=2)
students = models.IntegerField(default=0, verbose_name="学习人数")
fav_nums = models.IntegerField(default=0, verbose_name="收藏人数")
click_nums = models.IntegerField(default=0, verbose_name="点击数")
notice = models.CharField(verbose_name="课程公告", max_length=300, default="")
category = models.CharField(default="后端开发", max_length=20, verbose_name="课程类别")
detail = UEditorField(verbose_name="课程详情", width=600, height=300, imagePath="courses/ueditor/images/",
filePath="courses/ueditor/files/", default="")
image = models.ImageField(upload_to="courses/%Y/%m", verbose_name="封面图", max_length=100)
needpay = models.BooleanField(default=False, verbose_name="是否付费课程")
price = models.IntegerField(default=0, verbose_name="价格")
class Meta:
verbose_name = "课程信息"
verbose_name_plural = verbose_name
def __str__(self):
return self.name
def lesson_nums(self):
return self.lesson_set.all().count()#统计课程章节数
#管理系统内显示图片而非src路径
def show_image(self):
from django.utils.safestring import mark_safe
return mark_safe("<img src='{}' height=125px width=222px>".format(self.image.url))
show_image.short_description = "图片"
#链接直接跳到课程本身
def go_to(self):
from django.utils.safestring import mark_safe
return mark_safe("<a href='/course/{}'>跳转</a>".format(self.id))
go_to.short_description = "跳转"
class CourseTag(BaseModel):
course = models.ForeignKey(Course, on_delete=models.CASCADE, verbose_name="课程")
tag = models.CharField(max_length=100, verbose_name="标签")
class Meta:
verbose_name = "课程标签"
verbose_name_plural = verbose_name
def __str__(self):
return self.tag
class Lesson(BaseModel):
teacher = models.ForeignKey(Teacher, on_delete=models.CASCADE, verbose_name="讲师", null=True)
course = models.ForeignKey(Course, on_delete=models.CASCADE)
name = models.CharField(max_length=100, verbose_name="章节名")
learn_times = models.IntegerField(default=0, verbose_name="学习时长(分钟数)")
class Meta:
verbose_name = "课程章节"
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class Video(BaseModel):
teacher = models.ForeignKey(Teacher, on_delete=models.CASCADE, verbose_name="讲师", null=True)
course = models.ForeignKey(Course, on_delete=models.CASCADE, null=True)
lesson = models.ForeignKey(Lesson, verbose_name="章节", on_delete=models.CASCADE)
name = models.CharField(max_length=100, verbose_name="视频名")
learn_times = models.IntegerField(default=0, verbose_name="学习时长(分钟数)")
url = models.CharField(max_length=1000, verbose_name="访问地址")
class Meta:
verbose_name = "视频"
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class CourseResource(BaseModel):
teacher = models.ForeignKey(Teacher, on_delete=models.CASCADE, verbose_name="讲师", null=True)
course = models.ForeignKey(Course, on_delete=models.CASCADE, verbose_name="课程")
name = models.CharField(max_length=100, verbose_name="名称")
file = models.FileField(upload_to="course/resource//%Y/%M", verbose_name="下载地址", max_length=200)
class Meta:
verbose_name = "课程资源"
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class CourseHomework(BaseModel):
teacher = models.ForeignKey(Teacher, on_delete=models.CASCADE, verbose_name="讲师", null=True)
course = models.ForeignKey(Course, on_delete=models.CASCADE, verbose_name="课程", default='')
name = models.CharField(max_length=100, verbose_name="名称", default='')
class Meta:
verbose_name = "课程作业"
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class CourseHomeworkDetail(BaseModel):
teacher = models.ForeignKey(Teacher, on_delete=models.CASCADE, verbose_name="讲师", null=True)
course = models.ForeignKey(Course, on_delete=models.CASCADE, null=True)
name = models.ForeignKey(CourseHomework, on_delete=models.CASCADE, null=False, default='', verbose_name="所属作业")
question = models.CharField(max_length=100, verbose_name="题目")
cone = models.CharField(max_length=100, verbose_name="选项A")
ctwo = models.CharField(max_length=100, verbose_name="选项B")
cthree = models.CharField(max_length=100, verbose_name="选项C")
cfour = models.CharField(max_length=100, verbose_name="选项D")
answer = models.CharField(verbose_name="答案", choices=(("A","A"), ("B","B"), ("C","C"), ("D","D")), max_length=2)
jiexi = models.CharField(max_length=100, verbose_name="解析")
class Meta:
verbose_name = "课程作业题目"
verbose_name_plural = verbose_name
# def __str__(self):
# return "课程作业题目"
|
[
"django.db.models.FileField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"DjangoUeditor.models.UEditorField",
"django.db.models.BooleanField",
"django.db.models.ImageField",
"django.db.models.IntegerField",
"django.db.models.DateTimeField"
] |
[((321, 372), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'verbose_name': '"""订单号"""'}), "(max_length=64, verbose_name='订单号')\n", (337, 372), False, 'from django.db import models\n'), ((438, 513), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': 'status_choices', 'default': '(0)', 'verbose_name': '"""支付状态"""'}), "(choices=status_choices, default=0, verbose_name='支付状态')\n", (457, 513), False, 'from django.db import models\n'), ((527, 603), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': '"""Course"""', 'on_delete': 'models.CASCADE', 'verbose_name': '"""课程名"""'}), "(to='Course', on_delete=models.CASCADE, verbose_name='课程名')\n", (544, 603), False, 'from django.db import models\n'), ((617, 669), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(60)', 'verbose_name': '"""用户编号"""'}), "(max_length=60, verbose_name='用户编号')\n", (633, 669), False, 'from django.db import models\n'), ((685, 748), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'datetime.now', 'verbose_name': '"""添加时间"""'}), "(default=datetime.now, verbose_name='添加时间')\n", (705, 748), False, 'from django.db import models\n'), ((926, 997), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Teacher'], {'on_delete': 'models.CASCADE', 'verbose_name': '"""讲师"""'}), "(Teacher, on_delete=models.CASCADE, verbose_name='讲师')\n", (943, 997), False, 'from django.db import models\n'), ((1015, 1118), 'django.db.models.ForeignKey', 'models.ForeignKey', (['CourseOrg'], {'null': '(True)', 'blank': '(True)', 'on_delete': 'models.CASCADE', 'verbose_name': '"""课程机构"""'}), "(CourseOrg, null=True, blank=True, on_delete=models.\n CASCADE, verbose_name='课程机构')\n", (1032, 1118), False, 'from django.db import models\n'), ((1125, 1176), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""课程名"""', 'max_length': '(50)'}), "(verbose_name='课程名', max_length=50)\n", (1141, 1176), False, 'from django.db import models\n'), ((1188, 1241), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""课程描述"""', 'max_length': '(300)'}), "(verbose_name='课程描述', max_length=300)\n", (1204, 1241), False, 'from django.db import models\n'), ((1259, 1315), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'verbose_name': '"""学习时长(分钟数)"""'}), "(default=0, verbose_name='学习时长(分钟数)')\n", (1278, 1315), False, 'from django.db import models\n'), ((1329, 1435), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""难度"""', 'choices': "(('cj', '初级'), ('zj', '中级'), ('gj', '高级'))", 'max_length': '(2)'}), "(verbose_name='难度', choices=(('cj', '初级'), ('zj', '中级'), (\n 'gj', '高级')), max_length=2)\n", (1345, 1435), False, 'from django.db import models\n'), ((1443, 1494), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'verbose_name': '"""学习人数"""'}), "(default=0, verbose_name='学习人数')\n", (1462, 1494), False, 'from django.db import models\n'), ((1510, 1561), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'verbose_name': '"""收藏人数"""'}), "(default=0, verbose_name='收藏人数')\n", (1529, 1561), False, 'from django.db import models\n'), ((1579, 1629), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'verbose_name': '"""点击数"""'}), "(default=0, verbose_name='点击数')\n", (1598, 1629), False, 'from django.db import models\n'), ((1643, 1708), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""课程公告"""', 'max_length': '(300)', 'default': '""""""'}), "(verbose_name='课程公告', max_length=300, default='')\n", (1659, 1708), False, 'from django.db import models\n'), ((1724, 1792), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""后端开发"""', 'max_length': '(20)', 'verbose_name': '"""课程类别"""'}), "(default='后端开发', max_length=20, verbose_name='课程类别')\n", (1740, 1792), False, 'from django.db import models\n'), ((1806, 1951), 'DjangoUeditor.models.UEditorField', 'UEditorField', ([], {'verbose_name': '"""课程详情"""', 'width': '(600)', 'height': '(300)', 'imagePath': '"""courses/ueditor/images/"""', 'filePath': '"""courses/ueditor/files/"""', 'default': '""""""'}), "(verbose_name='课程详情', width=600, height=300, imagePath=\n 'courses/ueditor/images/', filePath='courses/ueditor/files/', default='')\n", (1818, 1951), False, 'from DjangoUeditor.models import UEditorField\n'), ((1985, 2070), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""courses/%Y/%m"""', 'verbose_name': '"""封面图"""', 'max_length': '(100)'}), "(upload_to='courses/%Y/%m', verbose_name='封面图', max_length=100\n )\n", (2002, 2070), False, 'from django.db import models\n'), ((2080, 2137), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'verbose_name': '"""是否付费课程"""'}), "(default=False, verbose_name='是否付费课程')\n", (2099, 2137), False, 'from django.db import models\n'), ((2150, 2199), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'verbose_name': '"""价格"""'}), "(default=0, verbose_name='价格')\n", (2169, 2199), False, 'from django.db import models\n'), ((2897, 2967), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Course'], {'on_delete': 'models.CASCADE', 'verbose_name': '"""课程"""'}), "(Course, on_delete=models.CASCADE, verbose_name='课程')\n", (2914, 2967), False, 'from django.db import models\n'), ((2978, 3029), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'verbose_name': '"""标签"""'}), "(max_length=100, verbose_name='标签')\n", (2994, 3029), False, 'from django.db import models\n'), ((3210, 3296), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Teacher'], {'on_delete': 'models.CASCADE', 'verbose_name': '"""讲师"""', 'null': '(True)'}), "(Teacher, on_delete=models.CASCADE, verbose_name='讲师',\n null=True)\n", (3227, 3296), False, 'from django.db import models\n'), ((3306, 3357), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Course'], {'on_delete': 'models.CASCADE'}), '(Course, on_delete=models.CASCADE)\n', (3323, 3357), False, 'from django.db import models\n'), ((3369, 3421), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'verbose_name': '"""章节名"""'}), "(max_length=100, verbose_name='章节名')\n", (3385, 3421), False, 'from django.db import models\n'), ((3440, 3496), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'verbose_name': '"""学习时长(分钟数)"""'}), "(default=0, verbose_name='学习时长(分钟数)')\n", (3459, 3496), False, 'from django.db import models\n'), ((3676, 3762), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Teacher'], {'on_delete': 'models.CASCADE', 'verbose_name': '"""讲师"""', 'null': '(True)'}), "(Teacher, on_delete=models.CASCADE, verbose_name='讲师',\n null=True)\n", (3693, 3762), False, 'from django.db import models\n'), ((3772, 3834), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Course'], {'on_delete': 'models.CASCADE', 'null': '(True)'}), '(Course, on_delete=models.CASCADE, null=True)\n', (3789, 3834), False, 'from django.db import models\n'), ((3848, 3918), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Lesson'], {'verbose_name': '"""章节"""', 'on_delete': 'models.CASCADE'}), "(Lesson, verbose_name='章节', on_delete=models.CASCADE)\n", (3865, 3918), False, 'from django.db import models\n'), ((3930, 3982), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'verbose_name': '"""视频名"""'}), "(max_length=100, verbose_name='视频名')\n", (3946, 3982), False, 'from django.db import models\n'), ((4001, 4057), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'verbose_name': '"""学习时长(分钟数)"""'}), "(default=0, verbose_name='学习时长(分钟数)')\n", (4020, 4057), False, 'from django.db import models\n'), ((4068, 4122), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1000)', 'verbose_name': '"""访问地址"""'}), "(max_length=1000, verbose_name='访问地址')\n", (4084, 4122), False, 'from django.db import models\n'), ((4310, 4396), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Teacher'], {'on_delete': 'models.CASCADE', 'verbose_name': '"""讲师"""', 'null': '(True)'}), "(Teacher, on_delete=models.CASCADE, verbose_name='讲师',\n null=True)\n", (4327, 4396), False, 'from django.db import models\n'), ((4406, 4476), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Course'], {'on_delete': 'models.CASCADE', 'verbose_name': '"""课程"""'}), "(Course, on_delete=models.CASCADE, verbose_name='课程')\n", (4423, 4476), False, 'from django.db import models\n'), ((4488, 4539), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'verbose_name': '"""名称"""'}), "(max_length=100, verbose_name='名称')\n", (4504, 4539), False, 'from django.db import models\n'), ((4551, 4644), 'django.db.models.FileField', 'models.FileField', ([], {'upload_to': '"""course/resource//%Y/%M"""', 'verbose_name': '"""下载地址"""', 'max_length': '(200)'}), "(upload_to='course/resource//%Y/%M', verbose_name='下载地址',\n max_length=200)\n", (4567, 4644), False, 'from django.db import models\n'), ((4829, 4915), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Teacher'], {'on_delete': 'models.CASCADE', 'verbose_name': '"""讲师"""', 'null': '(True)'}), "(Teacher, on_delete=models.CASCADE, verbose_name='讲师',\n null=True)\n", (4846, 4915), False, 'from django.db import models\n'), ((4925, 5011), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Course'], {'on_delete': 'models.CASCADE', 'verbose_name': '"""课程"""', 'default': '""""""'}), "(Course, on_delete=models.CASCADE, verbose_name='课程',\n default='')\n", (4942, 5011), False, 'from django.db import models\n'), ((5019, 5082), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'verbose_name': '"""名称"""', 'default': '""""""'}), "(max_length=100, verbose_name='名称', default='')\n", (5035, 5082), False, 'from django.db import models\n'), ((5277, 5363), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Teacher'], {'on_delete': 'models.CASCADE', 'verbose_name': '"""讲师"""', 'null': '(True)'}), "(Teacher, on_delete=models.CASCADE, verbose_name='讲师',\n null=True)\n", (5294, 5363), False, 'from django.db import models\n'), ((5373, 5435), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Course'], {'on_delete': 'models.CASCADE', 'null': '(True)'}), '(Course, on_delete=models.CASCADE, null=True)\n', (5390, 5435), False, 'from django.db import models\n'), ((5447, 5555), 'django.db.models.ForeignKey', 'models.ForeignKey', (['CourseHomework'], {'on_delete': 'models.CASCADE', 'null': '(False)', 'default': '""""""', 'verbose_name': '"""所属作业"""'}), "(CourseHomework, on_delete=models.CASCADE, null=False,\n default='', verbose_name='所属作业')\n", (5464, 5555), False, 'from django.db import models\n'), ((5567, 5618), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'verbose_name': '"""题目"""'}), "(max_length=100, verbose_name='题目')\n", (5583, 5618), False, 'from django.db import models\n'), ((5630, 5682), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'verbose_name': '"""选项A"""'}), "(max_length=100, verbose_name='选项A')\n", (5646, 5682), False, 'from django.db import models\n'), ((5694, 5746), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'verbose_name': '"""选项B"""'}), "(max_length=100, verbose_name='选项B')\n", (5710, 5746), False, 'from django.db import models\n'), ((5760, 5812), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'verbose_name': '"""选项C"""'}), "(max_length=100, verbose_name='选项C')\n", (5776, 5812), False, 'from django.db import models\n'), ((5825, 5877), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'verbose_name': '"""选项D"""'}), "(max_length=100, verbose_name='选项D')\n", (5841, 5877), False, 'from django.db import models\n'), ((5892, 6003), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""答案"""', 'choices': "(('A', 'A'), ('B', 'B'), ('C', 'C'), ('D', 'D'))", 'max_length': '(2)'}), "(verbose_name='答案', choices=(('A', 'A'), ('B', 'B'), ('C',\n 'C'), ('D', 'D')), max_length=2)\n", (5908, 6003), False, 'from django.db import models\n'), ((6008, 6059), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'verbose_name': '"""解析"""'}), "(max_length=100, verbose_name='解析')\n", (6024, 6059), False, 'from django.db import models\n')]
|
#!/usr/bin/env python
""" Calculates a lookup table with optimal switching times for an isolated matrix-type DAB three-phase rectifier.
This file calculates a 3D lookup table of relative switching times for an IMDAB3R, which are optimized for minimal
conduction losses. In discontinuous conduction mode (DCM) analytical equations for the optimal
operating conditions are used and numerical optimization is used in continuous conduction mode (CCM).
"""
import sys
import argparse
import time
import numpy as np
from scipy.optimize import fmin_slsqp
import hw_functions as hw
from csv_io import export_csv
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, ETH Zurich"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
def solver_to_sw_times(x):
d1 = x[0] # space used by the numeric solver
d2 = x[1]
d_dc = x[2]
shift = x[3]
shift = np.clip(shift, -0.25, 0.25) # -0.25 ... 0.25
d_dc = np.clip(d_dc, 0, 0.5) # duty cycle of dc-side transformer voltage, 0 ... 0.5
t = [0.5-d1, 0.5-d2, 0, 0]
t[2] = 0.5 - (d1/2 + shift + d_dc/2)
t[3] = -(d1/2 + shift - d_dc/2)
return t
def solver_to_sw_times_jac(x, u=None):
# jacobian of solver_to_sw_times which maps d1, d2, d_dc and shift to sw_times
J1 = np.array(
[[ -1, 0, 0, 0], # derivative of s[0] w.r.t to x[0]...x[3]
[ 0, -1, 0, 0],
[-1/2, 0, -1/2, -1],
[-1/2, 0, 1/2, -1]])
return J1
def sw_times_to_solver(s):
# transform from switching times to solver coordinate system
d1 = 0.5 - s[0]
d2 = 0.5 - s[1]
shift = -0.5 * (s[2] + s[3] - 0.5 + d1)
shift = np.clip(shift,-0.25,0.25)
d_dc = 2 * (s[3] + d1 / 2 + shift)
d_dc = np.clip(d_dc, 0, 0.5)
x = np.zeros(4)
x[0] = d1
x[1] = d2
x[2] = d_dc
x[3] = shift
return x
# helper functions for DCM to create switching time vectors for given duty cycles
def align_fe(d_1, d_2, d_dc):
t_3 = 0.5 - d_1
t = [0.5 - d_1, 0.5 - d_2, t_3, d_dc - 0.5 + t_3]
return t
def align_re(d_1, d_2, d_dc):
t = np.array([0.5 - d_1, 0.5 - d_2, 0.5 - d_dc, 0])
return t
# check that obtained switching times achieved the required output current
def check_solution(u, t, i_dc_ref):
_, _, _, i_dc, q = hw.dab_io_currents(u, t)
i_dc_err = (i_dc - i_dc_ref)
q_err = q
# if possible: normalize
if (i_dc_err > 1e-6):
i_dc_err = i_dc_err / i_dc_ref
q_err = q_err / i_dc_ref
ret = 0
if (np.abs(i_dc_err) > 1e-3):
print('invalid solution, i_dc_err: ', i_dc_err)
ret = ret + 100
if (np.abs(q_err) > 1e-3):
print('invalid solution, q_err: ', q_err)
ret = ret + 10
return ret
# in max output current DCM, there is a u_pn for which both d_1 and d_dc become 0.5 for a given ac voltage (aka grid
# voltage angle wt) and achieve no reactive power at the mains input (q=0)
# this is the boundary case where the solution switches from aligned rising edges (u_pn lower than equivalent AC
# voltage) to falling edge aligned
def dcm_max_u_pn_boundary(u):
u_ab = u[0] # in sector 1
u_bc = u[1]
u_pn = 2 * (u_ab**2 + u_ab*u_bc + u_bc**2) / (2*u_ab + u_bc)
return u_pn
# calculates max dc output current possible with DCM for given voltages u
def dcm_i_dc_max(u):
u_ab = u[0] # in sector 1
u_bc = u[1]
u_pn = u[2]
if u_pn < dcm_max_u_pn_boundary(u):
if u_bc < 1e-4:
# wt = 0, calculate d_2 required to draw the same charge from phase a and phase b (to achieve q=0)
uac = u_ab + u_bc # by definition of the three-phase voltages
d_dc = 0.5
d_1 = d_dc * u_pn / uac # this is 1/2 - t1
# this case is simple: only during 0 < t < d_1 we are connected to the mains and the transformer
# current rises linearly (due to d_dc = 0.5 the dc voltage must be on all the time)
# therefore we just have to make d_2 so long that the area of the current triangle is split in half:
d_2 = np.sqrt(0.5) * d_1
t = align_re(d_1, d_2, d_dc)
else:
# analytic solution found by mathematica, works fine from 0 < wt <= 30deg
det = (u_bc**2)*(u_ab + 2*u_bc)*(u_ab + u_bc - u_pn)*(u_pn**2)*(2*(u_ab**2 + u_ab*u_bc + u_bc**2) -
(2*u_ab + u_bc)*u_pn)
t1 = (u_ab*(u_ab+u_bc-u_pn)*(2*(u_ab**2 + u_ab*u_bc + u_bc**2) - (2*u_ab + u_bc)*u_pn) +
np.sqrt(det)) / (4*u_ab*(u_ab+u_bc)*(u_ab**2+u_ab*u_bc+u_bc**2) -
2*(u_ab-u_bc)*(2*u_ab**2+3*u_ab*u_bc+2*u_bc**2)*u_pn)
x = (u_pn/(1-2*t1) - u_ab) / u_bc # that's d_2 / d_1, this ensures volt-sec balance
t2 = 0.5 - x*(0.5-t1)
t = [t1, t2, 0, 0]
else:
# analytic solution found by mathematica
det =(u_ab**2 - u_bc**2)*(u_ab - u_pn)*u_pn*(2*(u_ab**2 + u_ab*u_bc + u_bc**2) - (2*u_ab + u_bc)*u_pn)
t2 = (-(u_ab**2)*u_bc+(u_bc**3)-np.sqrt(det))/(2*(u_bc**2)*(-u_ab+u_bc) + 2*(2*(u_ab**2)+u_bc**2)*u_pn -
2*(2*u_ab+u_bc)*(u_pn**2))
t4 = -1/2 + (u_ab/2 + u_bc*(1/2-t2)) / u_pn # ensure volt sec balance
t = [0, t2, 0, t4]
_, _, _, i_dc, _ = hw.dab_io_currents(u, t)
return [t, i_dc]
# check if DCM can be used to the achieve an output current of i_dc_ref at operating point u
def check_dcm(u, i_dc_ref, do_print=False):
# calc max output current for discontinuous conduction mode (DCM) for the given voltages
t_opt, i_dc_dcm_max = dcm_i_dc_max(u)
if do_print:
print('i_dc_dcm_max: ', i_dc_dcm_max)
# check if this output current should be realized by DCM
if i_dc_ref > i_dc_dcm_max:
return None
if do_print:
print('using DCM solution')
# the requested current is achievable by TCM, so we use this solution as it is the one with the lowest rms
# transformer current
k = np.sqrt(i_dc_ref / i_dc_dcm_max) # scaling factor for the three duty cycles of u_p and u_s
# extract duty cycles from switching times calculated for max output current
d_1 = 0.5 - t_opt[0]
d_2 = 0.5 - t_opt[1]
d_dc = 0.5 - t_opt[2] + t_opt[3]
is_re_aligned = (t_opt[3] == 0) # If t_4 (t[3]) is 0 the rising edges of pri and sec voltage are aligned
# apply scaling factor and re-create switching times
d_1 = d_1 * k
d_2 = d_2 * k
d_dc = d_dc * k
if is_re_aligned:
t_opt = align_re(d_1, d_2, d_dc)
else:
t_opt = align_fe(d_1, d_2, d_dc)
return t_opt
# derive optimal switching times in CCM for given voltages u with an output current i_dc_ref
def calc_ccm(u, i_dc_ref, i_dc_nom, t0, do_print=False):
# objective function
def obj(x):
s = solver_to_sw_times(x)
# note: Imperfections result from the fact that we can only consider an finite amount of harmonics. To avoid
# problems with the numeric solver we select a rather high n here as the computational burden is low.
i_rms_sqr, _, _ = hw.rms_current_harm(u, s, n=200)
f = (i_rms_sqr / (i_dc_nom ** 2))
return f
# gradient of the objective
def gradient(x):
t = solver_to_sw_times(x)
J = solver_to_sw_times_jac(x)
di_dt = hw.rms_current_grad(u, t, n=200)
res = np.dot(di_dt, J) / (i_dc_nom ** 2)
return res
# equality constraint functions: demanded dc output current (ie active power) and reactive power
def eqcons(x):
t = solver_to_sw_times(x)
_, _, _, i_dc, q = hw.dab_io_currents(u, t)
i_dc_err = (i_dc - i_dc_ref) / i_dc_nom
q_err = q / i_dc_nom
return [i_dc_err, q_err]
# inequality constraints: ensure ZVS
def ieqcons(x):
s = solver_to_sw_times(x)
i, _ = hw.switched_current(u, s)
return np.array(i) / i_dc_nom # pos values are ZVS, which is what the inequality constraints ensure
x0 = sw_times_to_solver(t0)
b = [(0, 0.5), (0, 0.5), (0, 0.5), (-0.24, 0.24)] # bounds
if do_print:
iprint = 1
else:
iprint = 0
# call the solver
opt_x, fx, its, i_mode, s_mode = fmin_slsqp(obj, x0, f_eqcons=eqcons, fprime=gradient, f_ieqcons=ieqcons,
bounds=b, full_output=True, iter=1000,
iprint=iprint)
opt_s = solver_to_sw_times(opt_x)
if do_print or i_mode != 0:
print('opt terminated in {0:} iterations with {1:}: {2:} '.format(its, i_mode, s_mode))
eqc = eqcons(opt_x)
ieqc = ieqcons(opt_x)
if (np.max(np.abs((eqc))) > 1e-3) or (np.min(ieqc) < -1e-6):
i_mode = 100
print('Constraint violation detected: eq cons={0:} ieq cons = {1:}'.format(eqc, ieqc))
return [opt_s, i_mode] # i_mode is zero on success or positive otherwise
# Maximum output current in Triangular Current Mode (TCM) of a conventional dc/dc DAB according to
# KRISMER AND KOLAR: CLOSED FORM SOLUTION FOR MINIMUM CONDUCTION LOSS MODULATION OF DAB CONVERTERS
# in IEEE Transactions on Power Electronics, vol. 27, no. 1, pp. 174-188, Jan. 2012
# https://doi.org/10.1109/TPEL.2011.2157976
# Note: TCM in a conventional DAB is like DCM in the IMDAB3R
def krismer_i_dc_tcm_max(u):
# check that we are either at wt=0 or wt=30deg, otherwise we can't operate like a conventional DAB
assert ((np.abs(u[0] - u[1]) < 1e-6) or (np.abs(u[1]) <= 1e-6))
# abbreviation, assuming mains voltage in sector 1
u_ac = u[0] + u[1]
u_pn = u[2]
# corner case: with 0 output voltage we cannot operate in TCM (there is no way to control the current with the
# secondary side voltage)
if u_pn < 1e-6:
return 0
# normalized quantities in Krimer's notation
v_ref = u_ac
v_A = np.min([u_ac, u_pn]) / v_ref
v_B = np.max([u_ac, u_pn]) / v_ref
# calc max power for which we use triangular current mode (ZCS)
p_tcm_max = np.pi / 2 * v_A ** 2 * (v_B - v_A) / v_B
# rescale back to the dc output current in our notation
i_dc_tcm_max = p_tcm_max * v_ref**2 / (2 * np.pi * u_pn)
return i_dc_tcm_max
# calc optimal switching times according to
# KRISMER AND KOLAR: CLOSED FORM SOLUTION FOR MINIMUM CONDUCTION LOSS MODULATION OF DAB CONVERTERS
# in IEEE Transactions on Power Electronics, vol. 27, no. 1, pp. 174-188, Jan. 2012
# https://doi.org/10.1109/TPEL.2011.2157976
def krismer(u, i_dc_ref):
# abbreviation, assuming mains voltage in sector 1
u_ac = u[0] + u[1]
u_pn = u[2]
# check that we are at wt=30deg, otherwise we can't operate like a conventional DAB
# Note: For wt=0 the transformer current looks the same, however, we need to determine an additional
# duty cycle d_2 (switching time t_2) for phase b. Even though this does not change the shape of the current (as
# u_bc is zero) changing d_2 will result in different reactive power and a function solver is required)
assert (np.abs(u[0] - u[1]) < 1e-6)
# normalized quantities according to Krimer's notation
v_ref = u_ac
v_A = np.min([u_ac, u_pn]) / v_ref
v_B = np.max([u_ac, u_pn]) / v_ref
p = u_pn * i_dc_ref * 2 * np.pi / (v_ref**2)
# calc max power for which we use triangular current mode (discontinuous conduction mode)
p_tcm_max = np.pi/2 * v_A**2 * (v_B-v_A) / v_B
if (p <= p_tcm_max) and (v_A != v_B):
if v_A < 0.001:
# we have no output voltage hence output power is 0 for any output current so we cannot use krismers eq.
# however this is trivial, we always operate at max phase shift and create the required transformer current
# amplitude
d_a = 0.5 # get as much current to secondary as we can
phi = 0.25 # i.e. 90deg
d_b = 0.5 - np.sqrt(0.25 - 2*i_dc_ref)
assert (d_b <= 0.5) # if this fails we demanded too much current
# print('I0, d_b: ', d_b)
else:
# standard case as considered by krismer
phi = np.pi * np.sqrt((v_B - v_A) * p/np.pi / (2 * v_A**2 * v_B))
d_a = phi / np.pi * v_B / (v_B - v_A)
d_b = phi / np.pi * v_A / (v_B - v_A)
phi = phi / (2*np.pi) # we use 0..1 for 0..2pi
# print('TCM, phi: ',phi)
else:
# try OTM, equations are copy/paste from the paper mentioned above
e1 = -(2*v_A**2 + v_B**2)/(v_A**2 + v_B**2)
e2 = (v_A**3*v_B + p/np.pi * (v_A**2 + v_B**2)) / (v_A**3 * v_B + v_A * v_B**3)
e3 = ((8 * v_A**7 * v_B**5)
- (64 * (p/np.pi)**3 * (v_A**2 + v_B**2)**3)
- (p/np.pi * v_A**4 * v_B**2 * (4*v_A**2 + v_B**2) * (4*v_A**2 + 13*v_B**2))
+ (16 * (p/np.pi)**2 * v_A * (v_A**2 + v_B**2)**2 * (4*v_A**2*v_B + v_B**3)) )
e4 = ((8 * v_A**9 * v_B**3)
- ( 8 * (p/np.pi)**3 * (8*v_A**2 - v_B**2) * (v_A**2 + v_B**2)**2 )
- (12 * p/np.pi * v_A**6 * v_B**2 * (4*v_A**2 + v_B**2) )
+ ( 3 * (p/np.pi)**2 * v_A**3 * v_B * (4*v_A**2 + v_B**2) * (8*v_A**2 + 5*v_B**2) )
+ ((3*p/np.pi)**1.5 * v_A * v_B**2 * np.sqrt(e3)) )
e5 = ((2 * v_A**6 * v_B**2
+ (2 * p/np.pi * (4*v_A**2 + v_B**2) * (p/np.pi * (v_A**2 + v_B**2) - v_A**3 * v_B) )) /
(3 * v_A * v_B * (v_A**2 + v_B**2) * e4**(1/3.0)) )
e6 = ((4 * (v_A**3*v_B**2 + 2*v_A**5) + 4 * p/np.pi * (v_A**2*v_B + v_B**3))
/ (v_A * (v_A**2 + v_B**2)**2))
e7 = ( (e4**(1/3.0) / (6 * v_A**3 * v_B + 6 * v_A * v_B**3) )
+ (e1**2 / 4) - (2*e2 / 3) + e5)
e8 = 0.25 * ( (-e1**3 - e6)/np.sqrt(e7) + 3*e1**2 - 8*e2 - 4*e7 )
d_a = 0.5
d_b = 0.25 * (2*np.sqrt(e7) - 2*np.sqrt(e8) - e1)
if (d_b <= 0.5):
# print('OTM, d_b: ', d_b)
# unlike krismer's our phi is 0..1 for 0..360deg, he uses 0..2pi
phi = 0.5 * (0.5 - np.sqrt(d_b*(1-d_b) - p/(np.pi*v_A*v_B) ))
# print('OTM, phi: ', phi)
else:
# OTM did not yield a valid solution, so use phase shift modulation
d_a = 0.5
d_b = 0.5
phi = 0.5 * (0.5 - np.sqrt(0.25 - p/(np.pi*v_A*v_B)))
# print('CPM, phi: ', phi)
# now transform the duty cycles and phase shifts back to our switching times
if u_pn < u_ac:
t_opt = [0.5 - d_b, 0.5 - d_b, 0, 0] # by def. u1 and u2 switch at the same time
t_opt[3] = -0.5 * (2 * phi - t_opt[0] - d_a + 0.5)
t_opt[2] = -d_a + 0.5 + t_opt[3]
else :
t_opt = [0.5 - d_a, 0.5 - d_a, 0, 0] # by def. u1 and u2 switch at the same time
t_opt[3] = -0.5 * (2 * phi - t_opt[0] - d_b + 0.5)
t_opt[2] = -d_b + 0.5 + t_opt[3]
return t_opt
#
# i_dc_ref
def calc_t_opt(u, i_dc_ref, i_dc_nom, t0, do_print=True):
"""Calculate optimal (min. rms current) switching times t_opt for given operating conditions
:param u: AC and DC voltages
:param i_dc_ref: requested dc output current, f*L = 1 is assumed
:param i_dc_nom: normalization for i_dc_ref (to improve convergence of numerical solver)
:param t0: initial conditions for numerical solver
:param do_print: set to true for debug output
:return: [t_opt, mode]: t_opt - array with rel switching times, mode: error code (0 = success)
"""
if i_dc_ref < 0.001:
# 0 output current required -> trivial solution is to produce no transformer voltages
t_opt = [0.5, 0.5, 0.5, 0]
return [t_opt, 0]
if u[2] <= 0.01:
# no ouput voltage, trivial solution: use max duty cycle (0.5) and phase shift (0.25) for secondary
# and the same duty cycles d_1 and d_2 on the primary, which will lead to mains input currents
# with 0 amplitude
d_1 = 0.5 - np.sqrt(0.25 - 2 * i_dc_ref) # select duty cycle to create correct transformer current
d_2 = d_1 # switch both u_ab and u_bc at the same time, this leads to 0 power transfer between them
d_dc = 0.5
shift = 0.25
t_opt = solver_to_sw_times([d_1, d_2, d_dc, shift])
return [t_opt, 0]
if np.abs(u[0] - u[1]) < 1e-6:
# u_ab and u_bc are equal, i.e. we can use the analytic solution for a conventional DAB
t_opt = krismer(u, i_dc_ref)
return [t_opt, 0]
# if possible, try to use DCM
t_opt = check_dcm(u, i_dc_ref, do_print)
if t_opt is not None:
return [t_opt, 0]
# i_dc_ref is too high for DCM, so use the numeric optimizer for CCM
return calc_ccm(u, i_dc_ref, i_dc_nom, t0, do_print)
def calc_table(resolution, i_dc_max, u_pn_max, lut_fn, log_fn=None):
""" Calculate 3D lookup table (LUT)
@params:
resolution - Required : Number of sampling points in each dimension (int)
i_dc_max - Required : Highest normalized dc current in final LUT (float)
u_pn_max - Required : Highest normalized output voltage in final LUT (float)
lut_fn - Required : File name were LUT will be stored
log_fn - Optional : Log file name, stdout if this is None
"""
grid_res = [resolution, resolution, resolution]
if log_fn is not None:
log_file = open(log_fn, mode='w')
else:
log_file = sys.stderr
i_dc_range = np.linspace(0, i_dc_max, num=grid_res[0])
u_pn_range = np.linspace(0, u_pn_max, num=grid_res[1])
u_bc_range = np.linspace(0, 0.5, num=grid_res[2])
opt_mode = np.zeros(grid_res) # optimizer return code (error code, 0 means success)
grid_res.append(4)
sw_times = np.zeros(grid_res)
n_not_solved = 0
log_file.write('resolution: {}\n'.format(resolution))
log_file.write('i_dc_max: {}\n'.format(i_dc_max))
log_file.write('u_pn_max: {}\n'.format(u_pn_max))
time.clock()
total_pts = len(i_dc_range) * len(u_pn_range) * len(u_bc_range)
pts_done = 0
# sweep the 3D grid, u_bc must be the inner most loop for convergence reasons
for (k1, i_dc) in enumerate(i_dc_range):
log_file.write('---------------------\n')
for (k2, u_pn) in enumerate(u_pn_range):
log_file.write('--------\n')
log_file.write('k1={0:} k2={1:}\n'.format(k1,k2))
last_t_opt = []
# traverse starting with u2=05 for which we operate like a conventional DAB were we have a closed
# analytic solution. This is then used as starting point for the next point
for (k3, u_bc) in reversed(list(enumerate(u_bc_range))):
u_ac = 1 # this is our normalization ref voltage
u_ab = u_ac - u_bc
u = [u_ab, u_bc, u_pn]
log_file.write('u={0:} i_dc={1:.7f}\n'.format(u, i_dc))
t_opt, m = calc_t_opt(u, i_dc, i_dc, last_t_opt, do_print=False)
if m == 0:
# double check the validity of the obtained solution
m = check_solution(u, t_opt, i_dc)
opt_mode[k1, k2, k3] = m
sw_times[k1, k2, k3, 0:4] = t_opt
if m != 0:
n_not_solved += 1
log_file.write('^ not solved\n')
# mark point in table so the user can investigate the problem
else :
last_t_opt = t_opt # keep a copy of our initial conditions
# show a progress bar in the terminal
pts_done = pts_done + 1
suffix = 'elapsed: {}s'.format(int(time.clock()))
print_progress(pts_done, total_pts, prefix='Progress', suffix=suffix, decimals=1, bar_length=80)
log_file.write('\nnumber of points not solved: {}\n'.format(n_not_solved))
if log_fn is not None:
log_file.close()
sys.stderr.write('\nnumber of points not solved: {}\n'.format(n_not_solved))
# write LUT data to file
export_csv(lut_fn, grid_res, i_dc_range, u_pn_range, u_bc_range, sw_times)
# Snippet taken from: https://gist.github.com/aubricus/f91fb55dc6ba5557fbab06119420dd6a
# Print iterations progress
def print_progress(iteration, total, prefix='', suffix='', decimals=1, bar_length=100):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
bar_length - Optional : character length of bar (Int)
"""
str_format = "{0:." + str(decimals) + "f}"
percents = str_format.format(100 * (iteration / float(total)))
filled_length = int(round(bar_length * iteration / float(total)))
bar = '█' * filled_length + '-' * (bar_length - filled_length)
# send output to stderr instead of stdout as stdout is can be used as log file
#sys.stderr.write('\x1b[2K') # should clear the last display line but does not work for some reason
sys.stderr.write('\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='LUT calculation for IMDAB3R converters')
parser.add_argument('-o', '--output', type=str, help='output file name', default='recent.csv')
parser.add_argument('-l', '--log', type=str, help='log file name, goes to stdout if no file is given')
parser.add_argument('-n', type=int, help='LUT resolution (no of sampling points per dimension).', default=30)
parser.add_argument('-i-dc', type=float, help='Max. normalized output current', default=0.07)
parser.add_argument('-u-pn', type=float, help='Max. normalized output voltage w.r.t. primary', default=1.33)
args = parser.parse_args()
resolution = int(args.n)
i_dc_max = args.i_dc
u_pn_max = args.u_pn
lut_fn = args.output
log_fn = args.log
if i_dc_max > 0.12:
print('i_dc values above 0.12 are not feasible, limiting range of LUT')
i_dc_max = 0.12
calc_table(resolution, i_dc_max, u_pn_max, lut_fn, log_fn)
|
[
"sys.stdout.write",
"numpy.abs",
"csv_io.export_csv",
"argparse.ArgumentParser",
"hw_functions.rms_current_grad",
"numpy.clip",
"hw_functions.dab_io_currents",
"sys.stdout.flush",
"scipy.optimize.fmin_slsqp",
"time.clock",
"numpy.max",
"numpy.linspace",
"hw_functions.rms_current_harm",
"numpy.min",
"numpy.dot",
"hw_functions.switched_current",
"numpy.zeros",
"numpy.array",
"sys.stderr.write",
"numpy.sqrt"
] |
[((955, 982), 'numpy.clip', 'np.clip', (['shift', '(-0.25)', '(0.25)'], {}), '(shift, -0.25, 0.25)\n', (962, 982), True, 'import numpy as np\n'), ((1012, 1033), 'numpy.clip', 'np.clip', (['d_dc', '(0)', '(0.5)'], {}), '(d_dc, 0, 0.5)\n', (1019, 1033), True, 'import numpy as np\n'), ((1345, 1438), 'numpy.array', 'np.array', (['[[-1, 0, 0, 0], [0, -1, 0, 0], [-1 / 2, 0, -1 / 2, -1], [-1 / 2, 0, 1 / 2, -1]]'], {}), '([[-1, 0, 0, 0], [0, -1, 0, 0], [-1 / 2, 0, -1 / 2, -1], [-1 / 2, 0,\n 1 / 2, -1]])\n', (1353, 1438), True, 'import numpy as np\n'), ((1727, 1754), 'numpy.clip', 'np.clip', (['shift', '(-0.25)', '(0.25)'], {}), '(shift, -0.25, 0.25)\n', (1734, 1754), True, 'import numpy as np\n'), ((1803, 1824), 'numpy.clip', 'np.clip', (['d_dc', '(0)', '(0.5)'], {}), '(d_dc, 0, 0.5)\n', (1810, 1824), True, 'import numpy as np\n'), ((1834, 1845), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (1842, 1845), True, 'import numpy as np\n'), ((2161, 2208), 'numpy.array', 'np.array', (['[0.5 - d_1, 0.5 - d_2, 0.5 - d_dc, 0]'], {}), '([0.5 - d_1, 0.5 - d_2, 0.5 - d_dc, 0])\n', (2169, 2208), True, 'import numpy as np\n'), ((2358, 2382), 'hw_functions.dab_io_currents', 'hw.dab_io_currents', (['u', 't'], {}), '(u, t)\n', (2376, 2382), True, 'import hw_functions as hw\n'), ((5429, 5453), 'hw_functions.dab_io_currents', 'hw.dab_io_currents', (['u', 't'], {}), '(u, t)\n', (5447, 5453), True, 'import hw_functions as hw\n'), ((6125, 6157), 'numpy.sqrt', 'np.sqrt', (['(i_dc_ref / i_dc_dcm_max)'], {}), '(i_dc_ref / i_dc_dcm_max)\n', (6132, 6157), True, 'import numpy as np\n'), ((8343, 8473), 'scipy.optimize.fmin_slsqp', 'fmin_slsqp', (['obj', 'x0'], {'f_eqcons': 'eqcons', 'fprime': 'gradient', 'f_ieqcons': 'ieqcons', 'bounds': 'b', 'full_output': '(True)', 'iter': '(1000)', 'iprint': 'iprint'}), '(obj, x0, f_eqcons=eqcons, fprime=gradient, f_ieqcons=ieqcons,\n bounds=b, full_output=True, iter=1000, iprint=iprint)\n', (8353, 8473), False, 'from scipy.optimize import fmin_slsqp\n'), ((17484, 17525), 'numpy.linspace', 'np.linspace', (['(0)', 'i_dc_max'], {'num': 'grid_res[0]'}), '(0, i_dc_max, num=grid_res[0])\n', (17495, 17525), True, 'import numpy as np\n'), ((17543, 17584), 'numpy.linspace', 'np.linspace', (['(0)', 'u_pn_max'], {'num': 'grid_res[1]'}), '(0, u_pn_max, num=grid_res[1])\n', (17554, 17584), True, 'import numpy as np\n'), ((17602, 17638), 'numpy.linspace', 'np.linspace', (['(0)', '(0.5)'], {'num': 'grid_res[2]'}), '(0, 0.5, num=grid_res[2])\n', (17613, 17638), True, 'import numpy as np\n'), ((17655, 17673), 'numpy.zeros', 'np.zeros', (['grid_res'], {}), '(grid_res)\n', (17663, 17673), True, 'import numpy as np\n'), ((17768, 17786), 'numpy.zeros', 'np.zeros', (['grid_res'], {}), '(grid_res)\n', (17776, 17786), True, 'import numpy as np\n'), ((17980, 17992), 'time.clock', 'time.clock', ([], {}), '()\n', (17990, 17992), False, 'import time\n'), ((20075, 20149), 'csv_io.export_csv', 'export_csv', (['lut_fn', 'grid_res', 'i_dc_range', 'u_pn_range', 'u_bc_range', 'sw_times'], {}), '(lut_fn, grid_res, i_dc_range, u_pn_range, u_bc_range, sw_times)\n', (20085, 20149), False, 'from csv_io import export_csv\n'), ((21397, 21415), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (21413, 21415), False, 'import sys\n'), ((21459, 21536), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""LUT calculation for IMDAB3R converters"""'}), "(description='LUT calculation for IMDAB3R converters')\n", (21482, 21536), False, 'import argparse\n'), ((2577, 2593), 'numpy.abs', 'np.abs', (['i_dc_err'], {}), '(i_dc_err)\n', (2583, 2593), True, 'import numpy as np\n'), ((2691, 2704), 'numpy.abs', 'np.abs', (['q_err'], {}), '(q_err)\n', (2697, 2704), True, 'import numpy as np\n'), ((7221, 7253), 'hw_functions.rms_current_harm', 'hw.rms_current_harm', (['u', 's'], {'n': '(200)'}), '(u, s, n=200)\n', (7240, 7253), True, 'import hw_functions as hw\n'), ((7455, 7487), 'hw_functions.rms_current_grad', 'hw.rms_current_grad', (['u', 't'], {'n': '(200)'}), '(u, t, n=200)\n', (7474, 7487), True, 'import hw_functions as hw\n'), ((7739, 7763), 'hw_functions.dab_io_currents', 'hw.dab_io_currents', (['u', 't'], {}), '(u, t)\n', (7757, 7763), True, 'import hw_functions as hw\n'), ((7985, 8010), 'hw_functions.switched_current', 'hw.switched_current', (['u', 's'], {}), '(u, s)\n', (8004, 8010), True, 'import hw_functions as hw\n'), ((9988, 10008), 'numpy.min', 'np.min', (['[u_ac, u_pn]'], {}), '([u_ac, u_pn])\n', (9994, 10008), True, 'import numpy as np\n'), ((10027, 10047), 'numpy.max', 'np.max', (['[u_ac, u_pn]'], {}), '([u_ac, u_pn])\n', (10033, 10047), True, 'import numpy as np\n'), ((11151, 11170), 'numpy.abs', 'np.abs', (['(u[0] - u[1])'], {}), '(u[0] - u[1])\n', (11157, 11170), True, 'import numpy as np\n'), ((11266, 11286), 'numpy.min', 'np.min', (['[u_ac, u_pn]'], {}), '([u_ac, u_pn])\n', (11272, 11286), True, 'import numpy as np\n'), ((11305, 11325), 'numpy.max', 'np.max', (['[u_ac, u_pn]'], {}), '([u_ac, u_pn])\n', (11311, 11325), True, 'import numpy as np\n'), ((16324, 16343), 'numpy.abs', 'np.abs', (['(u[0] - u[1])'], {}), '(u[0] - u[1])\n', (16330, 16343), True, 'import numpy as np\n'), ((21256, 21332), 'sys.stderr.write', 'sys.stderr.write', (["('\\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix))"], {}), "('\\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix))\n", (21272, 21332), False, 'import sys\n'), ((21370, 21392), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (21386, 21392), False, 'import sys\n'), ((7502, 7518), 'numpy.dot', 'np.dot', (['di_dt', 'J'], {}), '(di_dt, J)\n', (7508, 7518), True, 'import numpy as np\n'), ((8026, 8037), 'numpy.array', 'np.array', (['i'], {}), '(i)\n', (8034, 8037), True, 'import numpy as np\n'), ((8825, 8837), 'numpy.min', 'np.min', (['ieqc'], {}), '(ieqc)\n', (8831, 8837), True, 'import numpy as np\n'), ((9578, 9597), 'numpy.abs', 'np.abs', (['(u[0] - u[1])'], {}), '(u[0] - u[1])\n', (9584, 9597), True, 'import numpy as np\n'), ((9610, 9622), 'numpy.abs', 'np.abs', (['u[1]'], {}), '(u[1])\n', (9616, 9622), True, 'import numpy as np\n'), ((15994, 16022), 'numpy.sqrt', 'np.sqrt', (['(0.25 - 2 * i_dc_ref)'], {}), '(0.25 - 2 * i_dc_ref)\n', (16001, 16022), True, 'import numpy as np\n'), ((4128, 4140), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (4135, 4140), True, 'import numpy as np\n'), ((5145, 5157), 'numpy.sqrt', 'np.sqrt', (['det'], {}), '(det)\n', (5152, 5157), True, 'import numpy as np\n'), ((8798, 8809), 'numpy.abs', 'np.abs', (['eqc'], {}), '(eqc)\n', (8804, 8809), True, 'import numpy as np\n'), ((11987, 12015), 'numpy.sqrt', 'np.sqrt', (['(0.25 - 2 * i_dc_ref)'], {}), '(0.25 - 2 * i_dc_ref)\n', (11994, 12015), True, 'import numpy as np\n'), ((12222, 12277), 'numpy.sqrt', 'np.sqrt', (['((v_B - v_A) * p / np.pi / (2 * v_A ** 2 * v_B))'], {}), '((v_B - v_A) * p / np.pi / (2 * v_A ** 2 * v_B))\n', (12229, 12277), True, 'import numpy as np\n'), ((13330, 13341), 'numpy.sqrt', 'np.sqrt', (['e3'], {}), '(e3)\n', (13337, 13341), True, 'import numpy as np\n'), ((4617, 4629), 'numpy.sqrt', 'np.sqrt', (['det'], {}), '(det)\n', (4624, 4629), True, 'import numpy as np\n'), ((14120, 14170), 'numpy.sqrt', 'np.sqrt', (['(d_b * (1 - d_b) - p / (np.pi * v_A * v_B))'], {}), '(d_b * (1 - d_b) - p / (np.pi * v_A * v_B))\n', (14127, 14170), True, 'import numpy as np\n'), ((14371, 14410), 'numpy.sqrt', 'np.sqrt', (['(0.25 - p / (np.pi * v_A * v_B))'], {}), '(0.25 - p / (np.pi * v_A * v_B))\n', (14378, 14410), True, 'import numpy as np\n'), ((13913, 13924), 'numpy.sqrt', 'np.sqrt', (['e7'], {}), '(e7)\n', (13920, 13924), True, 'import numpy as np\n'), ((13929, 13940), 'numpy.sqrt', 'np.sqrt', (['e8'], {}), '(e8)\n', (13936, 13940), True, 'import numpy as np\n'), ((19697, 19709), 'time.clock', 'time.clock', ([], {}), '()\n', (19707, 19709), False, 'import time\n'), ((13832, 13843), 'numpy.sqrt', 'np.sqrt', (['e7'], {}), '(e7)\n', (13839, 13843), True, 'import numpy as np\n')]
|
import unittest
import numpy
from chainer import cuda
from chainer import testing
from chainer.testing import attr
from chainer import utils
class TestWalkerAlias(unittest.TestCase):
def setUp(self):
self.ps = numpy.array([5, 3, 4, 1, 2], dtype=numpy.int32)
self.sampler = utils.WalkerAlias(self.ps)
def check_sample(self):
counts = numpy.zeros(len(self.ps), numpy.float32)
for _ in range(1000):
vs = self.sampler.sample((4, 3))
numpy.add.at(counts, cuda.to_cpu(vs), 1)
counts /= (1000 * 12)
counts *= sum(self.ps)
testing.assert_allclose(self.ps, counts, atol=0.1, rtol=0.1)
def test_sample_cpu(self):
self.check_sample()
@attr.gpu
def test_sample_gpu(self):
self.sampler.to_gpu()
self.assertTrue(self.sampler.use_gpu)
self.check_sample()
@attr.gpu
def test_to_cpu(self):
self.sampler.to_gpu()
self.sampler.to_cpu()
self.assertFalse(self.sampler.use_gpu)
self.check_sample()
testing.run_module(__name__, __file__)
|
[
"chainer.testing.assert_allclose",
"chainer.utils.WalkerAlias",
"chainer.cuda.to_cpu",
"numpy.array",
"chainer.testing.run_module"
] |
[((1059, 1097), 'chainer.testing.run_module', 'testing.run_module', (['__name__', '__file__'], {}), '(__name__, __file__)\n', (1077, 1097), False, 'from chainer import testing\n'), ((227, 274), 'numpy.array', 'numpy.array', (['[5, 3, 4, 1, 2]'], {'dtype': 'numpy.int32'}), '([5, 3, 4, 1, 2], dtype=numpy.int32)\n', (238, 274), False, 'import numpy\n'), ((298, 324), 'chainer.utils.WalkerAlias', 'utils.WalkerAlias', (['self.ps'], {}), '(self.ps)\n', (315, 324), False, 'from chainer import utils\n'), ((609, 669), 'chainer.testing.assert_allclose', 'testing.assert_allclose', (['self.ps', 'counts'], {'atol': '(0.1)', 'rtol': '(0.1)'}), '(self.ps, counts, atol=0.1, rtol=0.1)\n', (632, 669), False, 'from chainer import testing\n'), ((520, 535), 'chainer.cuda.to_cpu', 'cuda.to_cpu', (['vs'], {}), '(vs)\n', (531, 535), False, 'from chainer import cuda\n')]
|
import math
import re
import torch
def read_bpseq(file):
with open(file) as f:
p = [0]
s = ['']
name = sc = t = None
for l in f:
if l.startswith('#'):
m = re.search(r'^# (.*) \(s=([\d.]+), ([\d.]+)s\)', l)
if m:
name, sc, t = m[1], float(m[2]), float(m[3])
else:
idx, c, pair = l.rstrip('\n').split()
s.append(c)
p.append(int(pair))
seq = ''.join(s)
return (seq, p, name, sc, t)
def read_pdb(file):
p = []
with open(file) as f:
for l in f:
l = l.rstrip('\n').split()
if len(l) == 2 and l[0].isdecimal() and l[1].isdecimal():
p.append([int(l[0]), int(l[1])])
return p
def compare_bpseq(ref, pred):
L = len(ref) - 1
tp = fp = fn = 0
if ((len(ref) > 0 and isinstance(ref[0], list)) or (isinstance(ref, torch.Tensor) and ref.ndim == 2)):
if isinstance(ref, torch.Tensor):
ref = ref.tolist()
ref = {(min(i, j), max(i, j)) for i, j in ref}
pred = {(i, j) for i, j in enumerate(pred) if i < j}
tp = len(ref & pred)
fp = len(pred - ref)
fn = len(ref - pred)
else:
assert (len(ref) == len(pred))
for i, (j1, j2) in enumerate(zip(ref, pred)):
if j1 > 0 and i < j1: # pos
if j1 == j2:
tp += 1
elif j2 > 0 and i < j2:
fp += 1
fn += 1
else:
fn += 1
elif j2 > 0 and i < j2:
fp += 1
tn = L * (L - 1) // 2 - tp - fp - fn
return (tp, tn, fp, fn)
def accuracy(tp, tn, fp, fn):
sen = tp / (tp + fn) if tp + fn > 0. else 0.
ppv = tp / (tp + fp) if tp + fp > 0. else 0.
fval = 2 * sen * ppv / (sen + ppv) if sen + ppv > 0. else 0.
mcc = ((tp * tn) - (fp * fn)) / math.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)) if (tp + fp) * (
tp + fn) * (tn + fp) * (tn + fn) > 0. else 0.
return (sen, ppv, fval, mcc)
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser(description='calculate SEN, PPV, F, MCC for the predicted RNA secondary structure',
add_help=True)
parser.add_argument('ref', type=str, help='BPSEQ-formatted file with the refernece structure')
parser.add_argument('pred', type=str, help='BPSEQ-formatted file with the predicted structure')
parser.add_argument('--pdb', action='store_true', help='use pdb labels for ref')
args = parser.parse_args()
if args.pdb:
ref = read_pdb(args.ref)
else:
seq, ref, _, _, _ = read_bpseq(args.ref)
seq, pred, name, sc, t = read_bpseq(args.pred)
x = compare_bpseq(ref, pred)
x = [name, len(seq), t, sc] + list(x) + list(accuracy(*x))
print(', '.join([str(v) for v in x]))
|
[
"re.search",
"argparse.ArgumentParser",
"math.sqrt"
] |
[((2217, 2339), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""calculate SEN, PPV, F, MCC for the predicted RNA secondary structure"""', 'add_help': '(True)'}), "(description=\n 'calculate SEN, PPV, F, MCC for the predicted RNA secondary structure',\n add_help=True)\n", (2231, 2339), False, 'from argparse import ArgumentParser\n'), ((1965, 2021), 'math.sqrt', 'math.sqrt', (['((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))'], {}), '((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))\n', (1974, 2021), False, 'import math\n'), ((222, 275), 're.search', 're.search', (['"""^# (.*) \\\\(s=([\\\\d.]+), ([\\\\d.]+)s\\\\)"""', 'l'], {}), "('^# (.*) \\\\(s=([\\\\d.]+), ([\\\\d.]+)s\\\\)', l)\n", (231, 275), False, 'import re\n')]
|
import httpretty
import requests
from behave import given, when, then
from nose.tools import assert_in
import requestsdefaulter
@given(u'I have the default headers set to')
def set_default_headers(context):
"""
:type context: behave.runner.Context
"""
headers = row_table(context)
def default_headers_function():
return headers
requestsdefaulter.default_headers(default_headers_function)
def row_table(context):
headers = {}
for row in context.table:
headers[row["Header"]] = row["Value"]
return headers
@when(u'I make a request')
def make_request(context):
"""
:type context: behave.runner.Context
"""
requests.get(context.mock_url)
@then(u'the request should contain the headers')
def assert_headers(context):
"""
:type context: behave.runner.Context
"""
expected_headers = [(k, v) for k, v in row_table(context).items()]
request = httpretty.last_request()
actual_headers = request.headers.items()
for expected_header in expected_headers:
assert_in(expected_header, actual_headers)
@when(u'I make a request with the headers')
def make_request_with_headers(context):
"""
:type context: behave.runner.Context
"""
headers = row_table(context)
requests.get(context.mock_url, headers=headers)
|
[
"behave.when",
"behave.then",
"httpretty.last_request",
"nose.tools.assert_in",
"requestsdefaulter.default_headers",
"requests.get",
"behave.given"
] |
[((132, 175), 'behave.given', 'given', (['u"""I have the default headers set to"""'], {}), "(u'I have the default headers set to')\n", (137, 175), False, 'from behave import given, when, then\n'), ((569, 594), 'behave.when', 'when', (['u"""I make a request"""'], {}), "(u'I make a request')\n", (573, 594), False, 'from behave import given, when, then\n'), ((720, 767), 'behave.then', 'then', (['u"""the request should contain the headers"""'], {}), "(u'the request should contain the headers')\n", (724, 767), False, 'from behave import given, when, then\n'), ((1112, 1154), 'behave.when', 'when', (['u"""I make a request with the headers"""'], {}), "(u'I make a request with the headers')\n", (1116, 1154), False, 'from behave import given, when, then\n'), ((368, 427), 'requestsdefaulter.default_headers', 'requestsdefaulter.default_headers', (['default_headers_function'], {}), '(default_headers_function)\n', (401, 427), False, 'import requestsdefaulter\n'), ((686, 716), 'requests.get', 'requests.get', (['context.mock_url'], {}), '(context.mock_url)\n', (698, 716), False, 'import requests\n'), ((942, 966), 'httpretty.last_request', 'httpretty.last_request', ([], {}), '()\n', (964, 966), False, 'import httpretty\n'), ((1292, 1339), 'requests.get', 'requests.get', (['context.mock_url'], {'headers': 'headers'}), '(context.mock_url, headers=headers)\n', (1304, 1339), False, 'import requests\n'), ((1066, 1108), 'nose.tools.assert_in', 'assert_in', (['expected_header', 'actual_headers'], {}), '(expected_header, actual_headers)\n', (1075, 1108), False, 'from nose.tools import assert_in\n')]
|
#!/usr/bin/env python
__author__ = '<NAME>'
import sys
import argparse
from RouToolPa.Routines import AnnotationsRoutines
parser = argparse.ArgumentParser()
parser.add_argument("-g", "--gff", action="store", dest="gff", required=True,
help="Gff file")
parser.add_argument("-o", "--output", action="store", dest="output",
help="Output file with ids. Default: stdout")
args = parser.parse_args()
if args.output is None:
args.output = sys.stdout
AnnotationsRoutines.get_scaffold_ids_from_gff(args.gff, out_file=args.output)
|
[
"argparse.ArgumentParser",
"RouToolPa.Routines.AnnotationsRoutines.get_scaffold_ids_from_gff"
] |
[((133, 158), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (156, 158), False, 'import argparse\n'), ((494, 571), 'RouToolPa.Routines.AnnotationsRoutines.get_scaffold_ids_from_gff', 'AnnotationsRoutines.get_scaffold_ids_from_gff', (['args.gff'], {'out_file': 'args.output'}), '(args.gff, out_file=args.output)\n', (539, 571), False, 'from RouToolPa.Routines import AnnotationsRoutines\n')]
|
from django.contrib.auth.models import User
from django.db import models
# Create your models here.
class Book(models.Model):
title = models.CharField(max_length=200)
subject = models.ForeignKey('Subject', on_delete=models.CASCADE, blank=True, null=True)
author = models.ForeignKey('Author', on_delete=models.CASCADE, blank=True, null=True)
available = models.BooleanField(default=True)
favorite = models.ManyToManyField(User, blank=True)
def __str__(self):
return self.title
class Author(models.Model):
name = models.CharField(max_length=100)
image = models.ImageField(upload_to='author/', null=True, blank=True)
about = models.TextField(default='')
books = models.ManyToManyField(Book, related_name='+', blank=True)
def __str__(self):
return self.name
class Subject(models.Model):
title = models.CharField(max_length=100)
authors = models.ManyToManyField(Author, blank=True)
books = models.ManyToManyField(Book, related_name='+', blank=True)
def __str__(self):
return self.title
|
[
"django.db.models.TextField",
"django.db.models.ManyToManyField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"django.db.models.ImageField"
] |
[((142, 174), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (158, 174), False, 'from django.db import models\n'), ((189, 266), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Subject"""'], {'on_delete': 'models.CASCADE', 'blank': '(True)', 'null': '(True)'}), "('Subject', on_delete=models.CASCADE, blank=True, null=True)\n", (206, 266), False, 'from django.db import models\n'), ((280, 356), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Author"""'], {'on_delete': 'models.CASCADE', 'blank': '(True)', 'null': '(True)'}), "('Author', on_delete=models.CASCADE, blank=True, null=True)\n", (297, 356), False, 'from django.db import models\n'), ((373, 406), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (392, 406), False, 'from django.db import models\n'), ((422, 462), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['User'], {'blank': '(True)'}), '(User, blank=True)\n', (444, 462), False, 'from django.db import models\n'), ((554, 586), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (570, 586), False, 'from django.db import models\n'), ((599, 660), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""author/"""', 'null': '(True)', 'blank': '(True)'}), "(upload_to='author/', null=True, blank=True)\n", (616, 660), False, 'from django.db import models\n'), ((673, 701), 'django.db.models.TextField', 'models.TextField', ([], {'default': '""""""'}), "(default='')\n", (689, 701), False, 'from django.db import models\n'), ((714, 772), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Book'], {'related_name': '"""+"""', 'blank': '(True)'}), "(Book, related_name='+', blank=True)\n", (736, 772), False, 'from django.db import models\n'), ((865, 897), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (881, 897), False, 'from django.db import models\n'), ((912, 954), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Author'], {'blank': '(True)'}), '(Author, blank=True)\n', (934, 954), False, 'from django.db import models\n'), ((967, 1025), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Book'], {'related_name': '"""+"""', 'blank': '(True)'}), "(Book, related_name='+', blank=True)\n", (989, 1025), False, 'from django.db import models\n')]
|
# -*- coding: utf-8 -*-
import os, jinja2
import numpy as np
import scipy.optimize
from ..util import functions as f
from ..util import tools, constants
# see README for terminology, terminolology, lol
class Vertex():
""" point with an index that's used in block and face definition
and can output in OpenFOAM format """
def __init__(self, point):
self.point = np.array(point)
self.mesh_index = None # will be changed in Mesh.prepare_data()
def __repr__(self):
s = constants.vector_format(self.point)
if self.mesh_index is not None:
s += " // {}".format(self.mesh_index)
return s
class Edge():
def __init__(self, index_1, index_2, points):
""" an edge is defined by two vertices and points in between;
a single point edge is treated as 'arc', more points are
treated as 'spline'.
passed indexes refer to position in Block.edges[] list; Mesh.prepare_data()
will assign actual Vertex objects.
"""
# indexes in block.edges[] list
self.block_index_1 = index_1
self.block_index_2 = index_2
# these will refer to actual Vertex objects after Mesh.prepare_data()
self.vertex_1 = None
self.vertex_2 = None
self.type, self.points = self.get_type(points)
@staticmethod
def get_type(points):
""" returns edge type and a list of points:
'None' for a straight line,
'arc' for a circular arc,
'spline' for a spline """
if points is None:
return None, None
# if multiple points are given check that they are of correct length
points = np.array(points)
shape = np.shape(points)
if len(shape) == 1:
t = 'arc'
else:
assert len(shape) == 2
for p in points:
assert len(p) == 3
t = 'spline'
return t, points
@property
def point_list(self):
if self.type == 'arc':
return constants.vector_format(self.points)
else:
return "(" + \
" ".join([constants.vector_format(p) for p in self.points]) + \
")"
@property
def is_valid(self):
# 'all' spline edges are 'valid'
if self.type == 'spline':
return True
# wedge geometries produce coincident
# edges and vertices; drop those
if f.norm(self.vertex_1.point - self.vertex_2.point) < constants.tol:
return False
# if case vertex1, vertex2 and point in between
# are collinear, blockMesh will find an arc with
# infinite radius and crash.
# so, check for collinearity; if the three points
# are actually collinear, this edge is redundant and can be
# silently dropped
OA = self.vertex_1.point
OB = self.vertex_2.point
OC = self.points
# if point C is on the same line as A and B:
# OC = OA + k*(OB-OA)
AB = OB - OA
AC = OC - OA
k = f.norm(AC)/f.norm(AB)
d = f.norm((OA+AC) - (OA + k*AB))
return d > constants.tol
def get_length(self):
# TODO: test
def curve_length(points):
l = 0
for i in range(len(points)-1):
l += f.norm(points[i+1] - points[i])
return l
if self.type == 'arc':
edge_points = np.array([
self.vertex_1.point,
self.points,
self.vertex_2.point
])
return curve_length(edge_points)
elif self.type == 'spline':
edge_points = np.concatenate((
[self.vertex_1.point],
self.points,
[self.vertex_2.point]), axis=0)
return curve_length(edge_points)
else:
raise AttributeError(f"Unknown edge type: {self.type}")
def __repr__(self):
return "{} {} {} {}".format(
self.type,
self.vertex_1.mesh_index,
self.vertex_2.mesh_index,
self.point_list
)
|
[
"numpy.shape",
"numpy.array",
"numpy.concatenate"
] |
[((383, 398), 'numpy.array', 'np.array', (['point'], {}), '(point)\n', (391, 398), True, 'import numpy as np\n'), ((1708, 1724), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (1716, 1724), True, 'import numpy as np\n'), ((1741, 1757), 'numpy.shape', 'np.shape', (['points'], {}), '(points)\n', (1749, 1757), True, 'import numpy as np\n'), ((3522, 3587), 'numpy.array', 'np.array', (['[self.vertex_1.point, self.points, self.vertex_2.point]'], {}), '([self.vertex_1.point, self.points, self.vertex_2.point])\n', (3530, 3587), True, 'import numpy as np\n'), ((3758, 3845), 'numpy.concatenate', 'np.concatenate', (['([self.vertex_1.point], self.points, [self.vertex_2.point])'], {'axis': '(0)'}), '(([self.vertex_1.point], self.points, [self.vertex_2.point]),\n axis=0)\n', (3772, 3845), True, 'import numpy as np\n')]
|
# coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.10.0
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ProcessorStatusSnapshotDTO(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'group_id': 'str',
'name': 'str',
'type': 'str',
'run_status': 'str',
'execution_node': 'str',
'bytes_read': 'int',
'bytes_written': 'int',
'read': 'str',
'written': 'str',
'flow_files_in': 'int',
'bytes_in': 'int',
'input': 'str',
'flow_files_out': 'int',
'bytes_out': 'int',
'output': 'str',
'task_count': 'int',
'tasks_duration_nanos': 'int',
'tasks': 'str',
'tasks_duration': 'str',
'active_thread_count': 'int',
'terminated_thread_count': 'int'
}
attribute_map = {
'id': 'id',
'group_id': 'groupId',
'name': 'name',
'type': 'type',
'run_status': 'runStatus',
'execution_node': 'executionNode',
'bytes_read': 'bytesRead',
'bytes_written': 'bytesWritten',
'read': 'read',
'written': 'written',
'flow_files_in': 'flowFilesIn',
'bytes_in': 'bytesIn',
'input': 'input',
'flow_files_out': 'flowFilesOut',
'bytes_out': 'bytesOut',
'output': 'output',
'task_count': 'taskCount',
'tasks_duration_nanos': 'tasksDurationNanos',
'tasks': 'tasks',
'tasks_duration': 'tasksDuration',
'active_thread_count': 'activeThreadCount',
'terminated_thread_count': 'terminatedThreadCount'
}
def __init__(self, id=None, group_id=None, name=None, type=None, run_status=None, execution_node=None, bytes_read=None, bytes_written=None, read=None, written=None, flow_files_in=None, bytes_in=None, input=None, flow_files_out=None, bytes_out=None, output=None, task_count=None, tasks_duration_nanos=None, tasks=None, tasks_duration=None, active_thread_count=None, terminated_thread_count=None):
"""
ProcessorStatusSnapshotDTO - a model defined in Swagger
"""
self._id = None
self._group_id = None
self._name = None
self._type = None
self._run_status = None
self._execution_node = None
self._bytes_read = None
self._bytes_written = None
self._read = None
self._written = None
self._flow_files_in = None
self._bytes_in = None
self._input = None
self._flow_files_out = None
self._bytes_out = None
self._output = None
self._task_count = None
self._tasks_duration_nanos = None
self._tasks = None
self._tasks_duration = None
self._active_thread_count = None
self._terminated_thread_count = None
if id is not None:
self.id = id
if group_id is not None:
self.group_id = group_id
if name is not None:
self.name = name
if type is not None:
self.type = type
if run_status is not None:
self.run_status = run_status
if execution_node is not None:
self.execution_node = execution_node
if bytes_read is not None:
self.bytes_read = bytes_read
if bytes_written is not None:
self.bytes_written = bytes_written
if read is not None:
self.read = read
if written is not None:
self.written = written
if flow_files_in is not None:
self.flow_files_in = flow_files_in
if bytes_in is not None:
self.bytes_in = bytes_in
if input is not None:
self.input = input
if flow_files_out is not None:
self.flow_files_out = flow_files_out
if bytes_out is not None:
self.bytes_out = bytes_out
if output is not None:
self.output = output
if task_count is not None:
self.task_count = task_count
if tasks_duration_nanos is not None:
self.tasks_duration_nanos = tasks_duration_nanos
if tasks is not None:
self.tasks = tasks
if tasks_duration is not None:
self.tasks_duration = tasks_duration
if active_thread_count is not None:
self.active_thread_count = active_thread_count
if terminated_thread_count is not None:
self.terminated_thread_count = terminated_thread_count
@property
def id(self):
"""
Gets the id of this ProcessorStatusSnapshotDTO.
The id of the processor.
:return: The id of this ProcessorStatusSnapshotDTO.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this ProcessorStatusSnapshotDTO.
The id of the processor.
:param id: The id of this ProcessorStatusSnapshotDTO.
:type: str
"""
self._id = id
@property
def group_id(self):
"""
Gets the group_id of this ProcessorStatusSnapshotDTO.
The id of the parent process group to which the processor belongs.
:return: The group_id of this ProcessorStatusSnapshotDTO.
:rtype: str
"""
return self._group_id
@group_id.setter
def group_id(self, group_id):
"""
Sets the group_id of this ProcessorStatusSnapshotDTO.
The id of the parent process group to which the processor belongs.
:param group_id: The group_id of this ProcessorStatusSnapshotDTO.
:type: str
"""
self._group_id = group_id
@property
def name(self):
"""
Gets the name of this ProcessorStatusSnapshotDTO.
The name of the prcessor.
:return: The name of this ProcessorStatusSnapshotDTO.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this ProcessorStatusSnapshotDTO.
The name of the prcessor.
:param name: The name of this ProcessorStatusSnapshotDTO.
:type: str
"""
self._name = name
@property
def type(self):
"""
Gets the type of this ProcessorStatusSnapshotDTO.
The type of the processor.
:return: The type of this ProcessorStatusSnapshotDTO.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this ProcessorStatusSnapshotDTO.
The type of the processor.
:param type: The type of this ProcessorStatusSnapshotDTO.
:type: str
"""
self._type = type
@property
def run_status(self):
"""
Gets the run_status of this ProcessorStatusSnapshotDTO.
The state of the processor.
:return: The run_status of this ProcessorStatusSnapshotDTO.
:rtype: str
"""
return self._run_status
@run_status.setter
def run_status(self, run_status):
"""
Sets the run_status of this ProcessorStatusSnapshotDTO.
The state of the processor.
:param run_status: The run_status of this ProcessorStatusSnapshotDTO.
:type: str
"""
allowed_values = ["Running", "Stopped", "Validating", "Disabled", "Invalid"]
if run_status not in allowed_values:
raise ValueError(
"Invalid value for `run_status` ({0}), must be one of {1}"
.format(run_status, allowed_values)
)
self._run_status = run_status
@property
def execution_node(self):
"""
Gets the execution_node of this ProcessorStatusSnapshotDTO.
Indicates the node where the process will execute.
:return: The execution_node of this ProcessorStatusSnapshotDTO.
:rtype: str
"""
return self._execution_node
@execution_node.setter
def execution_node(self, execution_node):
"""
Sets the execution_node of this ProcessorStatusSnapshotDTO.
Indicates the node where the process will execute.
:param execution_node: The execution_node of this ProcessorStatusSnapshotDTO.
:type: str
"""
allowed_values = ["ALL", "PRIMARY"]
if execution_node not in allowed_values:
raise ValueError(
"Invalid value for `execution_node` ({0}), must be one of {1}"
.format(execution_node, allowed_values)
)
self._execution_node = execution_node
@property
def bytes_read(self):
"""
Gets the bytes_read of this ProcessorStatusSnapshotDTO.
The number of bytes read by this Processor in the last 5 mintues
:return: The bytes_read of this ProcessorStatusSnapshotDTO.
:rtype: int
"""
return self._bytes_read
@bytes_read.setter
def bytes_read(self, bytes_read):
"""
Sets the bytes_read of this ProcessorStatusSnapshotDTO.
The number of bytes read by this Processor in the last 5 mintues
:param bytes_read: The bytes_read of this ProcessorStatusSnapshotDTO.
:type: int
"""
self._bytes_read = bytes_read
@property
def bytes_written(self):
"""
Gets the bytes_written of this ProcessorStatusSnapshotDTO.
The number of bytes written by this Processor in the last 5 minutes
:return: The bytes_written of this ProcessorStatusSnapshotDTO.
:rtype: int
"""
return self._bytes_written
@bytes_written.setter
def bytes_written(self, bytes_written):
"""
Sets the bytes_written of this ProcessorStatusSnapshotDTO.
The number of bytes written by this Processor in the last 5 minutes
:param bytes_written: The bytes_written of this ProcessorStatusSnapshotDTO.
:type: int
"""
self._bytes_written = bytes_written
@property
def read(self):
"""
Gets the read of this ProcessorStatusSnapshotDTO.
The number of bytes read in the last 5 minutes.
:return: The read of this ProcessorStatusSnapshotDTO.
:rtype: str
"""
return self._read
@read.setter
def read(self, read):
"""
Sets the read of this ProcessorStatusSnapshotDTO.
The number of bytes read in the last 5 minutes.
:param read: The read of this ProcessorStatusSnapshotDTO.
:type: str
"""
self._read = read
@property
def written(self):
"""
Gets the written of this ProcessorStatusSnapshotDTO.
The number of bytes written in the last 5 minutes.
:return: The written of this ProcessorStatusSnapshotDTO.
:rtype: str
"""
return self._written
@written.setter
def written(self, written):
"""
Sets the written of this ProcessorStatusSnapshotDTO.
The number of bytes written in the last 5 minutes.
:param written: The written of this ProcessorStatusSnapshotDTO.
:type: str
"""
self._written = written
@property
def flow_files_in(self):
"""
Gets the flow_files_in of this ProcessorStatusSnapshotDTO.
The number of FlowFiles that have been accepted in the last 5 minutes
:return: The flow_files_in of this ProcessorStatusSnapshotDTO.
:rtype: int
"""
return self._flow_files_in
@flow_files_in.setter
def flow_files_in(self, flow_files_in):
"""
Sets the flow_files_in of this ProcessorStatusSnapshotDTO.
The number of FlowFiles that have been accepted in the last 5 minutes
:param flow_files_in: The flow_files_in of this ProcessorStatusSnapshotDTO.
:type: int
"""
self._flow_files_in = flow_files_in
@property
def bytes_in(self):
"""
Gets the bytes_in of this ProcessorStatusSnapshotDTO.
The size of the FlowFiles that have been accepted in the last 5 minutes
:return: The bytes_in of this ProcessorStatusSnapshotDTO.
:rtype: int
"""
return self._bytes_in
@bytes_in.setter
def bytes_in(self, bytes_in):
"""
Sets the bytes_in of this ProcessorStatusSnapshotDTO.
The size of the FlowFiles that have been accepted in the last 5 minutes
:param bytes_in: The bytes_in of this ProcessorStatusSnapshotDTO.
:type: int
"""
self._bytes_in = bytes_in
@property
def input(self):
"""
Gets the input of this ProcessorStatusSnapshotDTO.
The count/size of flowfiles that have been accepted in the last 5 minutes.
:return: The input of this ProcessorStatusSnapshotDTO.
:rtype: str
"""
return self._input
@input.setter
def input(self, input):
"""
Sets the input of this ProcessorStatusSnapshotDTO.
The count/size of flowfiles that have been accepted in the last 5 minutes.
:param input: The input of this ProcessorStatusSnapshotDTO.
:type: str
"""
self._input = input
@property
def flow_files_out(self):
"""
Gets the flow_files_out of this ProcessorStatusSnapshotDTO.
The number of FlowFiles transferred to a Connection in the last 5 minutes
:return: The flow_files_out of this ProcessorStatusSnapshotDTO.
:rtype: int
"""
return self._flow_files_out
@flow_files_out.setter
def flow_files_out(self, flow_files_out):
"""
Sets the flow_files_out of this ProcessorStatusSnapshotDTO.
The number of FlowFiles transferred to a Connection in the last 5 minutes
:param flow_files_out: The flow_files_out of this ProcessorStatusSnapshotDTO.
:type: int
"""
self._flow_files_out = flow_files_out
@property
def bytes_out(self):
"""
Gets the bytes_out of this ProcessorStatusSnapshotDTO.
The size of the FlowFiles transferred to a Connection in the last 5 minutes
:return: The bytes_out of this ProcessorStatusSnapshotDTO.
:rtype: int
"""
return self._bytes_out
@bytes_out.setter
def bytes_out(self, bytes_out):
"""
Sets the bytes_out of this ProcessorStatusSnapshotDTO.
The size of the FlowFiles transferred to a Connection in the last 5 minutes
:param bytes_out: The bytes_out of this ProcessorStatusSnapshotDTO.
:type: int
"""
self._bytes_out = bytes_out
@property
def output(self):
"""
Gets the output of this ProcessorStatusSnapshotDTO.
The count/size of flowfiles that have been processed in the last 5 minutes.
:return: The output of this ProcessorStatusSnapshotDTO.
:rtype: str
"""
return self._output
@output.setter
def output(self, output):
"""
Sets the output of this ProcessorStatusSnapshotDTO.
The count/size of flowfiles that have been processed in the last 5 minutes.
:param output: The output of this ProcessorStatusSnapshotDTO.
:type: str
"""
self._output = output
@property
def task_count(self):
"""
Gets the task_count of this ProcessorStatusSnapshotDTO.
The number of times this Processor has run in the last 5 minutes
:return: The task_count of this ProcessorStatusSnapshotDTO.
:rtype: int
"""
return self._task_count
@task_count.setter
def task_count(self, task_count):
"""
Sets the task_count of this ProcessorStatusSnapshotDTO.
The number of times this Processor has run in the last 5 minutes
:param task_count: The task_count of this ProcessorStatusSnapshotDTO.
:type: int
"""
self._task_count = task_count
@property
def tasks_duration_nanos(self):
"""
Gets the tasks_duration_nanos of this ProcessorStatusSnapshotDTO.
The number of nanoseconds that this Processor has spent running in the last 5 minutes
:return: The tasks_duration_nanos of this ProcessorStatusSnapshotDTO.
:rtype: int
"""
return self._tasks_duration_nanos
@tasks_duration_nanos.setter
def tasks_duration_nanos(self, tasks_duration_nanos):
"""
Sets the tasks_duration_nanos of this ProcessorStatusSnapshotDTO.
The number of nanoseconds that this Processor has spent running in the last 5 minutes
:param tasks_duration_nanos: The tasks_duration_nanos of this ProcessorStatusSnapshotDTO.
:type: int
"""
self._tasks_duration_nanos = tasks_duration_nanos
@property
def tasks(self):
"""
Gets the tasks of this ProcessorStatusSnapshotDTO.
The total number of task this connectable has completed over the last 5 minutes.
:return: The tasks of this ProcessorStatusSnapshotDTO.
:rtype: str
"""
return self._tasks
@tasks.setter
def tasks(self, tasks):
"""
Sets the tasks of this ProcessorStatusSnapshotDTO.
The total number of task this connectable has completed over the last 5 minutes.
:param tasks: The tasks of this ProcessorStatusSnapshotDTO.
:type: str
"""
self._tasks = tasks
@property
def tasks_duration(self):
"""
Gets the tasks_duration of this ProcessorStatusSnapshotDTO.
The total duration of all tasks for this connectable over the last 5 minutes.
:return: The tasks_duration of this ProcessorStatusSnapshotDTO.
:rtype: str
"""
return self._tasks_duration
@tasks_duration.setter
def tasks_duration(self, tasks_duration):
"""
Sets the tasks_duration of this ProcessorStatusSnapshotDTO.
The total duration of all tasks for this connectable over the last 5 minutes.
:param tasks_duration: The tasks_duration of this ProcessorStatusSnapshotDTO.
:type: str
"""
self._tasks_duration = tasks_duration
@property
def active_thread_count(self):
"""
Gets the active_thread_count of this ProcessorStatusSnapshotDTO.
The number of threads currently executing in the processor.
:return: The active_thread_count of this ProcessorStatusSnapshotDTO.
:rtype: int
"""
return self._active_thread_count
@active_thread_count.setter
def active_thread_count(self, active_thread_count):
"""
Sets the active_thread_count of this ProcessorStatusSnapshotDTO.
The number of threads currently executing in the processor.
:param active_thread_count: The active_thread_count of this ProcessorStatusSnapshotDTO.
:type: int
"""
self._active_thread_count = active_thread_count
@property
def terminated_thread_count(self):
"""
Gets the terminated_thread_count of this ProcessorStatusSnapshotDTO.
The number of threads currently terminated for the processor.
:return: The terminated_thread_count of this ProcessorStatusSnapshotDTO.
:rtype: int
"""
return self._terminated_thread_count
@terminated_thread_count.setter
def terminated_thread_count(self, terminated_thread_count):
"""
Sets the terminated_thread_count of this ProcessorStatusSnapshotDTO.
The number of threads currently terminated for the processor.
:param terminated_thread_count: The terminated_thread_count of this ProcessorStatusSnapshotDTO.
:type: int
"""
self._terminated_thread_count = terminated_thread_count
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ProcessorStatusSnapshotDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"six.iteritems"
] |
[((21026, 21055), 'six.iteritems', 'iteritems', (['self.swagger_types'], {}), '(self.swagger_types)\n', (21035, 21055), False, 'from six import iteritems\n')]
|
from gevent import monkey
# monkey.patch_all(aggressive=False)
monkey.patch_socket()
monkey.patch_thread()
monkey.patch_time()
monkey.patch_ssl()
from JumpScale import j
from gevent.pywsgi import WSGIServer
from JumpScale.servers.serverbase import returnCodes
import time
import gevent
def jsonrpc(func):
def wrapper(s, environ, start_response):
payload = j.data.serializer.json.loads(environ['wsgi.input'].read())
try:
method_name = payload['method']
method_kwargs = payload.get('params', dict())
return_code, return_format, data = func(s, method_name, **method_kwargs)
if return_code == returnCodes.OK:
result = {'result': data, 'id': payload['id'], 'jsonrpc': '2.0'}
else:
result = {'result': None, 'id': payload['id'], 'jsonrpc': '2.0', 'error': {'code': 1, 'data': data}}
except Exception as e:
result = s.invalidRequest()
statuscode = '200 OK' if not result.get('error', None) else '500 Internal Server Error'
start_response(
status=statuscode,
headers=[('Content-type', 'application/json-rpc')], # headers must be a mutable list
)
return [json.dumps(result)]
return wrapper
class GeventWSServer:
def __init__(self, addr, port, sslorg=None, ssluser=None, sslkeyvaluestor=None):
"""
@param handler is passed as a class
"""
self.port = port
self.addr = addr
self.key = "1234"
self.nr = 0
# self.jobhandler = JobHandler()
self.daemon = j.servers.base.getDaemon(sslorg=sslorg, ssluser=ssluser, sslkeyvaluestor=sslkeyvaluestor)
self.server = WSGIServer(('', self.port), self.rpcRequest)
self.type = "geventws"
self.greenlets = {}
self.now = 0
self.fiveMinuteId = 0
self.hourId = 0
self.dayId = 0
def startClock(self, obj=None):
self.schedule("timer", self._timer)
self.schedule("timer2", self._timer2)
if obj is not None:
obj.now = self.now
obj.fiveMinuteId = self.fiveMinuteId
obj.hourId = self.hourId
obj.dayId = self.dayId
def _timer(self):
"""
will remember time every 1 sec
"""
# lfmid = 0
while True:
self.now = time.time()
print("timer")
gevent.sleep(1)
def _timer2(self):
"""
will remember time every 1 sec
"""
# lfmid = 0
while True:
self.fiveMinuteId = j.data.time.get5MinuteId(self.now)
self.hourId = j.data.time.getHourId(self.now)
self.dayId = j.data.time.getDayId(self.now)
print("timer2")
gevent.sleep(200)
def schedule(self, name, ffunction, *args, **kwargs):
self.greenlets[name] = gevent.greenlet.Greenlet(ffunction, *args, **kwargs)
self.greenlets[name].start()
return self.greenlets[name]
def responseRaw(self, data, start_response):
start_response('200 OK', [('Content-Type', 'text/plain')])
return [data]
def responseNotFound(self, start_response):
start_response('404 Not Found', [('Content-Type', 'text/html')])
return ['<h1>Not Found</h1>']
def rpcRequest(self, environ, start_response):
if environ["CONTENT_TYPE"] == 'application/raw' and environ["REQUEST_METHOD"] == 'POST':
data = environ["wsgi.input"].read()
category, cmd, data2, informat, returnformat, sessionid = j.servers.base._unserializeBinSend(data)
resultcode, returnformat, result = self.daemon.processRPCUnSerialized(
cmd, informat, returnformat, data2, sessionid, category=category)
data3 = j.servers.base._serializeBinReturn(resultcode, returnformat, result)
return self.responseRaw(data3, start_response)
elif environ['CONTENT_TYPE'].startswith('application/json') and environ["REQUEST_METHOD"] == 'POST':
return self.handleJSONRPC(environ, start_response)
else:
return self.responseNotFound(start_response)
def invalidRequest(self):
msg = {'error': {'code': -32600, 'message': 'Invalid Request'}, 'id': None, 'jsonrpc': '2.0'}
return msg
@jsonrpc
def handleJSONRPC(self, method, **params):
category, cmd = method.split('.', 1)
sessionid = params.pop('sessionid', None)
session = self.daemon.getSession(sessionid=sessionid, cmd=cmd)
return self.daemon.processRPC(cmd, params, 'j', session, category=category)
# def router(self, environ, start_response):
# path = environ["PATH_INFO"].lstrip("/")
# if path == "" or path.rstrip("/") == "wiki":
# path == "wiki/system"
# print "path:%s" % path
# if path.find("favicon.ico") != -1:
# return self.processor_page(environ, start_response, self.filesroot, "favicon.ico", prefix="")
# ctx = RequestContext(application="", actor="", method="", env=environ,
# start_response=start_response, path=path, params=None)
# ctx.params = self._getParamsFromEnv(environ, ctx)
def start(self):
print(("started on %s" % self.port))
try:
self.server.serve_forever()
except KeyboardInterrupt:
print("bye")
def addCMDsInterface(self, MyCommands, category="", proxy=False):
self.daemon.addCMDsInterface(MyCommands, category, proxy=proxy)
|
[
"JumpScale.j.servers.base.getDaemon",
"gevent.greenlet.Greenlet",
"JumpScale.j.servers.base._unserializeBinSend",
"JumpScale.j.data.time.getHourId",
"gevent.monkey.patch_ssl",
"JumpScale.j.servers.base._serializeBinReturn",
"time.time",
"gevent.monkey.patch_time",
"gevent.monkey.patch_socket",
"gevent.pywsgi.WSGIServer",
"JumpScale.j.data.time.getDayId",
"gevent.sleep",
"gevent.monkey.patch_thread",
"JumpScale.j.data.time.get5MinuteId"
] |
[((63, 84), 'gevent.monkey.patch_socket', 'monkey.patch_socket', ([], {}), '()\n', (82, 84), False, 'from gevent import monkey\n'), ((85, 106), 'gevent.monkey.patch_thread', 'monkey.patch_thread', ([], {}), '()\n', (104, 106), False, 'from gevent import monkey\n'), ((107, 126), 'gevent.monkey.patch_time', 'monkey.patch_time', ([], {}), '()\n', (124, 126), False, 'from gevent import monkey\n'), ((127, 145), 'gevent.monkey.patch_ssl', 'monkey.patch_ssl', ([], {}), '()\n', (143, 145), False, 'from gevent import monkey\n'), ((1621, 1715), 'JumpScale.j.servers.base.getDaemon', 'j.servers.base.getDaemon', ([], {'sslorg': 'sslorg', 'ssluser': 'ssluser', 'sslkeyvaluestor': 'sslkeyvaluestor'}), '(sslorg=sslorg, ssluser=ssluser, sslkeyvaluestor=\n sslkeyvaluestor)\n', (1645, 1715), False, 'from JumpScale import j\n'), ((1733, 1777), 'gevent.pywsgi.WSGIServer', 'WSGIServer', (["('', self.port)", 'self.rpcRequest'], {}), "(('', self.port), self.rpcRequest)\n", (1743, 1777), False, 'from gevent.pywsgi import WSGIServer\n'), ((2920, 2972), 'gevent.greenlet.Greenlet', 'gevent.greenlet.Greenlet', (['ffunction', '*args'], {}), '(ffunction, *args, **kwargs)\n', (2944, 2972), False, 'import gevent\n'), ((2396, 2407), 'time.time', 'time.time', ([], {}), '()\n', (2405, 2407), False, 'import time\n'), ((2447, 2462), 'gevent.sleep', 'gevent.sleep', (['(1)'], {}), '(1)\n', (2459, 2462), False, 'import gevent\n'), ((2623, 2657), 'JumpScale.j.data.time.get5MinuteId', 'j.data.time.get5MinuteId', (['self.now'], {}), '(self.now)\n', (2647, 2657), False, 'from JumpScale import j\n'), ((2684, 2715), 'JumpScale.j.data.time.getHourId', 'j.data.time.getHourId', (['self.now'], {}), '(self.now)\n', (2705, 2715), False, 'from JumpScale import j\n'), ((2741, 2771), 'JumpScale.j.data.time.getDayId', 'j.data.time.getDayId', (['self.now'], {}), '(self.now)\n', (2761, 2771), False, 'from JumpScale import j\n'), ((2812, 2829), 'gevent.sleep', 'gevent.sleep', (['(200)'], {}), '(200)\n', (2824, 2829), False, 'import gevent\n'), ((3612, 3652), 'JumpScale.j.servers.base._unserializeBinSend', 'j.servers.base._unserializeBinSend', (['data'], {}), '(data)\n', (3646, 3652), False, 'from JumpScale import j\n'), ((3838, 3906), 'JumpScale.j.servers.base._serializeBinReturn', 'j.servers.base._serializeBinReturn', (['resultcode', 'returnformat', 'result'], {}), '(resultcode, returnformat, result)\n', (3872, 3906), False, 'from JumpScale import j\n')]
|
import numpy
from numpy.testing import assert_array_equal
import pandas as pd
import pytest
import ipdb
import alpha_tech_tracker.technical_analysis as ta
import alpha_tech_tracker.stock_price_data_loader as data_loader
def test_load_from_csv():
data_loader.load_from_csv()
|
[
"alpha_tech_tracker.stock_price_data_loader.load_from_csv"
] |
[((254, 281), 'alpha_tech_tracker.stock_price_data_loader.load_from_csv', 'data_loader.load_from_csv', ([], {}), '()\n', (279, 281), True, 'import alpha_tech_tracker.stock_price_data_loader as data_loader\n')]
|
import datetime
import re
from decimal import Decimal
from flask import Blueprint, request
from sqlalchemy import text
from cache import cache, make_cache_key
from db import db
from timer import timer
routes = Blueprint('ebrake', __name__, url_prefix='/federal-emergency-brake')
@routes.route('/', methods=['GET'])
@timer
@cache.cached(key_prefix=make_cache_key)
def get_rki_emergency_brake():
""" Returns the incidences and corresponding emergency brake information based on rki.de/inzidenzen
The calculation whether a county is in federal-emergency-brake is performed here: https://github.com/dbvis-ukon/coronavis/blob/master/Crawler/crawl_rki_incidences.py#L141
---
parameters:
- name: from
type: string
description: A date in ISO format
required: false
default: 2020-01-01
example: 2021-04-20
- name: to
type: string
description: A date in ISO format
required: false
example: 2021-05-20
- name: ids
type: string[]
description: ids (AGS) of the regions, comma separated
required: false
example: 08335,08336
responses:
200:
description:
schema:
type: object
properties:
last_updated:
type: string
example: 2021-04-25T08:39:47
last_checked:
type: string
example: 2021-04-26T02:28:39.523499+02:00
data:
type: array
items:
type: object
properties:
id:
type: string
example: 08335
description: The AGS of the county
name:
type: string
example: <NAME>
description: The name of the county
timestamp:
type: string
example: 2021-04-25T00:00:00
description: The reference date
7_day_incidence:
type: number
format: float
example: 152.2851504514
description: The 7 day incidence based on the excel sheet
7_day_cases:
type: number
format: int
example: 436
description: The 7 day cases based on the excel sheet
ebrake100:
type: boolean
example: true
description: true iff the county is currently in the ebrake(100), false otherwise; may be null
ebrake150:
type: boolean
example: true
description: true iff the county is currently in the ebrake(150), false otherwise; may be null
ebrake165:
type: boolean
example: true
description: true iff the county is currently in the ebrake(165), false otherwise; may be null
holiday:
type: string
example: <NAME>
description: The name of the holiday (German) or null iff no holiday
"""
from_time = '2020-01-01'
to_time = (datetime.datetime.now() + datetime.timedelta(days=10)).isoformat()
if request.args.get('from'):
from_time = request.args.get('from')
if request.args.get('to'):
to_time = request.args.get('to')
sql_ids = ''
if request.args.get('ids'):
ids = request.args.get('ids').split(',')
sanitized_sql = []
for id in ids:
id = re.sub('[^0-9]+', '', id)
sanitized_sql.append(f"(id LIKE '{id}%')")
sql_ids = f"AND ({' OR '.join(sanitized_sql)})"
sql_stmt = f'''
SELECT
e.datenbestand,
e.updated_at,
e.id,
e.timestamp,
e."7_day_incidence",
e."7_day_cases",
e.ebrake100,
e.ebrake165,
(le.bez || ' ' || le.name) as le_name,
e.ebrake150,
e.holiday
FROM ebrake_data e
JOIN landkreise_extended le ON e.id = le.ids
WHERE e.timestamp >= :fromtime
AND e.timestamp <= :totime
{sql_ids}
'''
res = db.engine.execute(text(sql_stmt), fromtime=from_time, totime=to_time).fetchall()
entries = []
for d in res:
entries.append({
'id': d[2],
'timestamp': d[3].isoformat(),
'holiday': d[10],
'7_day_incidence': float(d[4]) if isinstance(d[4], Decimal) else None,
'7_day_cases': int(d[5]) if isinstance(d[4], Decimal) else None,
'ebrake100': d[6],
'ebrake150': d[9],
'ebrake165': d[7],
'name': d[8]
})
return {
'last_updated': res[0][0].isoformat(),
'last_checked': res[0][1].isoformat(),
'data': entries
}, 200
|
[
"flask.Blueprint",
"cache.cache.cached",
"flask.request.args.get",
"sqlalchemy.text",
"datetime.timedelta",
"datetime.datetime.now",
"re.sub"
] |
[((213, 281), 'flask.Blueprint', 'Blueprint', (['"""ebrake"""', '__name__'], {'url_prefix': '"""/federal-emergency-brake"""'}), "('ebrake', __name__, url_prefix='/federal-emergency-brake')\n", (222, 281), False, 'from flask import Blueprint, request\n'), ((328, 367), 'cache.cache.cached', 'cache.cached', ([], {'key_prefix': 'make_cache_key'}), '(key_prefix=make_cache_key)\n', (340, 367), False, 'from cache import cache, make_cache_key\n'), ((3421, 3445), 'flask.request.args.get', 'request.args.get', (['"""from"""'], {}), "('from')\n", (3437, 3445), False, 'from flask import Blueprint, request\n'), ((3500, 3522), 'flask.request.args.get', 'request.args.get', (['"""to"""'], {}), "('to')\n", (3516, 3522), False, 'from flask import Blueprint, request\n'), ((3590, 3613), 'flask.request.args.get', 'request.args.get', (['"""ids"""'], {}), "('ids')\n", (3606, 3613), False, 'from flask import Blueprint, request\n'), ((3467, 3491), 'flask.request.args.get', 'request.args.get', (['"""from"""'], {}), "('from')\n", (3483, 3491), False, 'from flask import Blueprint, request\n'), ((3542, 3564), 'flask.request.args.get', 'request.args.get', (['"""to"""'], {}), "('to')\n", (3558, 3564), False, 'from flask import Blueprint, request\n'), ((3731, 3756), 're.sub', 're.sub', (['"""[^0-9]+"""', '""""""', 'id'], {}), "('[^0-9]+', '', id)\n", (3737, 3756), False, 'import re\n'), ((3346, 3369), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3367, 3369), False, 'import datetime\n'), ((3372, 3399), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(10)'}), '(days=10)\n', (3390, 3399), False, 'import datetime\n'), ((3629, 3652), 'flask.request.args.get', 'request.args.get', (['"""ids"""'], {}), "('ids')\n", (3645, 3652), False, 'from flask import Blueprint, request\n'), ((4421, 4435), 'sqlalchemy.text', 'text', (['sql_stmt'], {}), '(sql_stmt)\n', (4425, 4435), False, 'from sqlalchemy import text\n')]
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Test cases for checking that the secondary Storage usage is accounted. This is verified by checking the usage_event table
for a volume in 'Uploaded' state.
This test case does the following:
1.Creates an account and uploads a volume.
2.After the volume is uploaded successfully, connects to the database
3.From the database verifies that an entry is added to cloud.events table for the uploaded volume.
4.Cleans up the resources.
"""
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.lib.utils import *
from marvin.lib.base import *
from marvin.lib.common import *
from nose.plugins.attrib import attr
from marvin.sshClient import SshClient
from marvin.codes import (BACKED_UP, PASS, FAIL)
import time
def verify_vm(self, vmid, state):
list_vm = list_virtual_machines(self.userapiclient,
account=self.account.name,
domainid=self.account.domainid,
id=vmid
)
self.assertEqual(
validateList(list_vm)[0],
PASS,
"Check List vm response for vmid: %s" %
vmid)
self.assertGreater(
len(list_vm),
0,
"Check the list vm response for vm id: %s" %
vmid)
vm = list_vm[0]
self.assertEqual(
vm.id,
str(vmid),
"Vm deployed is different from the test")
self.assertEqual(vm.state, state, "VM is in %s state" %state)
def uploadVolume(self):
# upload a volume
self.debug("Upload volume format is '%s'" %self.uploadVolumeformat)
self.testdata["configurableData"]["upload_volume"]["format"] = self.uploadVolumeformat
self.testdata["configurableData"]["upload_volume"]["url"] = self.uploadvolumeUrl
upload_volume = Volume.upload(
self.apiclient,
self.testdata["configurableData"]["upload_volume"],
account=self.account.name,
domainid=self.domain.id,
zoneid=self.zone.id
)
upload_volume.wait_for_upload(self.apiclient)
return upload_volume.id
def restartUsageServer(self):
#Restart usage server
sshClient = SshClient(
self.mgtSvrDetails["mgtSvrIp"],
22,
self.mgtSvrDetails["user"],
self.mgtSvrDetails["passwd"]
)
command = "service cloudstack-usage restart"
sshClient.execute(command)
return
def checkUsage(self, uuid_upload_volume_id):
volume_id = self.dbclient.execute("SELECT id from cloud.volumes where uuid='%s';" % uuid_upload_volume_id)
self.debug("Volume id of uploaded volume is= %s" %volume_id[0]);
qryresult_after_usageServerExecution = self.dbclient.execute(
"SELECT type FROM cloud.usage_event where resource_id = '%s';" % (volume_id[0]))
self.debug("Usage Type is %s " % qryresult_after_usageServerExecution[0][0])
self.assertEqual(qryresult_after_usageServerExecution[0][0], 'VOLUME.UPLOAD')
class TestSecondaryVolumeUsage(cloudstackTestCase):
@classmethod
def setUpClass(cls):
testClient = super(TestSecondaryVolumeUsage, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.dbclient = testClient.getDbConnection()
cls.testdata = testClient.getParsedTestDataConfig()
cls.hypervisor = cls.testClient.getHypervisorInfo()
cls.storagetype = 'shared'
# Get Zone, Domain and templates
cls.domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
cls.mgtSvrDetails = cls.config.__dict__["mgtSvr"][0].__dict__
cls._cleanup = []
# Create an account
cls.account = Account.create(
cls.apiclient,
cls.testdata["account"],
domainid=cls.domain.id
)
cls._cleanup.append(cls.account)
# Create user api client of the account
cls.userapiclient = testClient.getUserApiClient(
UserName=cls.account.name,
DomainName=cls.account.domain
)
# Create Service offering
cls.service_offering = ServiceOffering.create(
cls.apiclient,
cls.testdata["service_offering"],
)
cls._cleanup.append(cls.service_offering)
cls.disk_offering = DiskOffering.create(
cls.apiclient,
cls.testdata["disk_offering"],
)
cls._cleanup.append(cls.disk_offering)
cls.skip = 0
hosts = list_hosts(
cls.apiclient,
type="Routing"
)
for hypervisorhost in hosts:
if hypervisorhost.hypervisor.lower() in ["xenserver"]:
cls.uploadVolumeformat = "VHD"
cls.uploadvolumeUrl = "http://download.cloudstack.org/releases/2.0.0/systemvm.vhd.bz2"
break
elif hypervisorhost.hypervisor.lower() in ["vmware"]:
cls.uploadVolumeformat = "OVA"
cls.uploadvolumeUrl = "http://download.cloudstack.org/releases/2.2.0/systemvm-redundant-router.ova"
break
elif hypervisorhost.hypervisor == "KVM":
cls.uploadVolumeformat = "QCOW2"
cls.uploadvolumeUrl = "http://download.cloudstack.org/releases/2.0.0/UbuntuServer-10-04-64bit.qcow2.bz2"
break
elif hypervisorhost.hypervisor == "LXC":
cls.uploadvolumeformat = "QCOW2"
cls.uploadvolumeUrl = "http://download.cloudstack.org/releases/2.0.0/UbuntuServer-10-04-64bit.qcow2.bz2"
break
else:
break
cls.template = get_template(
cls.apiclient,
cls.zone.id,
cls.testdata["ostype"])
try:
cls.vm = VirtualMachine.create(
cls.userapiclient,
cls.testdata["small"],
templateid=cls.template.id,
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=cls.service_offering.id,
zoneid=cls.zone.id
)
except Exception as e:
cls.tearDownClass()
raise e
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
@attr(tags=["basic", "advanced"], required_hardware="true")
def test_01_SecondaryUsageUploadedVolume(self):
try:
uploaded_volume_id_uuid = uploadVolume(self)
checkUsage(self, uploaded_volume_id_uuid)
except Exception as e:
self.tearDown()
raise e
return
|
[
"marvin.sshClient.SshClient",
"nose.plugins.attrib.attr"
] |
[((2972, 3079), 'marvin.sshClient.SshClient', 'SshClient', (["self.mgtSvrDetails['mgtSvrIp']", '(22)', "self.mgtSvrDetails['user']", "self.mgtSvrDetails['passwd']"], {}), "(self.mgtSvrDetails['mgtSvrIp'], 22, self.mgtSvrDetails['user'],\n self.mgtSvrDetails['passwd'])\n", (2981, 3079), False, 'from marvin.sshClient import SshClient\n'), ((7252, 7310), 'nose.plugins.attrib.attr', 'attr', ([], {'tags': "['basic', 'advanced']", 'required_hardware': '"""true"""'}), "(tags=['basic', 'advanced'], required_hardware='true')\n", (7256, 7310), False, 'from nose.plugins.attrib import attr\n')]
|
# Generated by Django 3.0.3 on 2020-03-07 08:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('munactives', '0003_auto_20200209_1142'),
]
operations = [
migrations.CreateModel(
name='owner',
fields=[
('owner_id', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=100)),
('phone', models.IntegerField()),
('address', models.TextField()),
('fio', models.TextField()),
],
),
migrations.CreateModel(
name='owner_active',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('active_id', models.PositiveIntegerField()),
('active_type', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='contenttypes.ContentType')),
('owner_id', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='munactives.owner')),
],
),
]
|
[
"django.db.models.TextField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.PositiveIntegerField",
"django.db.models.AutoField",
"django.db.models.IntegerField"
] |
[((430, 484), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (449, 484), False, 'from django.db import migrations, models\n'), ((512, 544), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (528, 544), False, 'from django.db import migrations, models\n'), ((573, 594), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (592, 594), False, 'from django.db import migrations, models\n'), ((625, 643), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (641, 643), False, 'from django.db import migrations, models\n'), ((670, 688), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (686, 688), False, 'from django.db import migrations, models\n'), ((826, 919), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (842, 919), False, 'from django.db import migrations, models\n'), ((948, 977), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {}), '()\n', (975, 977), False, 'from django.db import migrations, models\n'), ((1012, 1110), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.PROTECT', 'to': '"""contenttypes.ContentType"""'}), "(on_delete=django.db.models.deletion.PROTECT, to=\n 'contenttypes.ContentType')\n", (1029, 1110), False, 'from django.db import migrations, models\n'), ((1137, 1227), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.PROTECT', 'to': '"""munactives.owner"""'}), "(on_delete=django.db.models.deletion.PROTECT, to=\n 'munactives.owner')\n", (1154, 1227), False, 'from django.db import migrations, models\n')]
|
from annotypes import Anno, Array, Union, Sequence, TYPE_CHECKING
from enum import Enum
import numpy as np
from malcolm.core import Table, Future, Context, PartRegistrar, DEFAULT_TIMEOUT
from malcolm.modules import scanning
if TYPE_CHECKING:
from typing import List, Any
class AttributeDatasetType(Enum):
DETECTOR = "detector"
MONITOR = "monitor"
POSITION = "position"
class DatasetType(Enum):
PRIMARY = "primary"
SECONDARY = "secondary"
MONITOR = "monitor"
POSITION_SET = "position_set"
POSITION_VALUE = "position_value"
class StatisticsName(Enum):
MIN = "MIN_VALUE" # Minimum counts in any element
MIN_X = "MIN_X" # X position of minimum counts
MIN_Y = "MIN_Y" # Y position of minimum counts
MAX = "MAX_VALUE" # Maximum counts in any element
MAX_X = "MAX_X" # X position of maximum counts
MAX_Y = "MAX_Y" # Y position of maximum counts
MEAN = "MEAN_VALUE" # Mean counts of all elements
SIGMA = "SIGMA_VALUE" # Sigma of all elements
SUM = "TOTAL" # Sum of all elements
NET = "NET" # Sum of all elements not in background region
with Anno("Dataset names"):
ANameArray = Array[str]
with Anno("Filenames of HDF files relative to fileDir"):
AFilenameArray = Array[str]
with Anno("Types of dataset"):
ATypeArray = Array[DatasetType]
with Anno("Rank (number of dimensions) of the dataset"):
ARankArray = Array[np.int32]
with Anno("Dataset paths within HDF files"):
APathArray = Array[str]
with Anno("UniqueID array paths within HDF files"):
AUniqueIDArray = Array[str]
UNameArray = Union[ANameArray, Sequence[str]]
UFilenameArray = Union[AFilenameArray, Sequence[str]]
UTypeArray = Union[ATypeArray, Sequence[DatasetType]]
URankArray = Union[ARankArray, Sequence[np.int32]]
UPathArray = Union[APathArray, Sequence[str]]
UUniqueIDArray = Union[AUniqueIDArray, Sequence[str]]
class DatasetTable(Table):
# This will be serialized so we need type to be called that
# noinspection PyShadowingBuiltins
def __init__(self,
name, # type: UNameArray
filename, # type: UFilenameArray
type, # type: UTypeArray
rank, # type: URankArray
path, # type: UPathArray
uniqueid, # type: UUniqueIDArray
):
# type: (...) -> None
self.name = ANameArray(name)
self.filename = AFilenameArray(filename)
self.type = ATypeArray(type)
self.rank = ARankArray(rank)
self.path = APathArray(path)
self.uniqueid = AUniqueIDArray(uniqueid)
class ADBaseActions(object):
def __init__(self, mri):
# type: (str) -> None
self.mri = mri
# When arrayCounter gets to here we are done
self.done_when_reaches = 0
# CompletedSteps = arrayCounter + self.uniqueid_offset
self.uniqueid_offset = 0
# A future that completes when detector start calls back
self.start_future = None # type: Future
def setup_detector_async(self, context, completed_steps, steps_to_do,
**kwargs):
# type: (Context, int, int, **Any) -> List[Future]
context.unsubscribe_all()
child = context.block_view(self.mri)
if completed_steps == 0:
# This is an initial configure, so reset arrayCounter to 0
array_counter = 0
self.done_when_reaches = steps_to_do
else:
# This is rewinding or setting up for another batch,
# skip to a uniqueID that has not been produced yet
array_counter = self.done_when_reaches
self.done_when_reaches += steps_to_do
self.uniqueid_offset = completed_steps - array_counter
for k, v in dict(
arrayCounter=array_counter,
imageMode="Multiple",
numImages=steps_to_do,
arrayCallbacks=True).items():
if k not in kwargs and k in child:
kwargs[k] = v
fs = child.put_attribute_values_async(kwargs)
return fs
def setup_detector(self, context, completed_steps, steps_to_do, **kwargs):
# type: (Context, int, int, **Any) -> None
fs = self.setup_detector_async(
context, completed_steps, steps_to_do, **kwargs)
context.wait_all_futures(fs)
def arm_detector(self, context):
# type: (Context) -> None
child = context.block_view(self.mri)
self.start_future = child.start_async()
child.when_value_matches("acquiring", True, timeout=DEFAULT_TIMEOUT)
def wait_for_detector(self, context, registrar):
# type: (Context, PartRegistrar) -> None
child = context.block_view(self.mri)
child.arrayCounterReadback.subscribe_value(
self.update_completed_steps, registrar)
context.wait_all_futures(self.start_future)
# Now wait to make sure any update_completed_steps come in. Give
# it 5 seconds to timeout just in case there are any stray frames that
# haven't made it through yet
child.when_value_matches(
"arrayCounterReadback", self.done_when_reaches,
timeout=DEFAULT_TIMEOUT)
def abort_detector(self, context):
# type: (Context) -> None
child = context.block_view(self.mri)
child.stop()
# Stop is a put to a busy record which returns immediately
# The detector might take a while to actually stop so use the
# acquiring pv (which is the same asyn parameter as the busy record
# that stop() pokes) to check that it has finished
child.when_value_matches("acquiring", False, timeout=DEFAULT_TIMEOUT)
def update_completed_steps(self, value, registrar):
# type: (int, PartRegistrar) -> None
completed_steps = value + self.uniqueid_offset
registrar.report(scanning.infos.RunProgressInfo(completed_steps))
|
[
"malcolm.modules.scanning.infos.RunProgressInfo",
"annotypes.Anno"
] |
[((1130, 1151), 'annotypes.Anno', 'Anno', (['"""Dataset names"""'], {}), "('Dataset names')\n", (1134, 1151), False, 'from annotypes import Anno, Array, Union, Sequence, TYPE_CHECKING\n'), ((1186, 1236), 'annotypes.Anno', 'Anno', (['"""Filenames of HDF files relative to fileDir"""'], {}), "('Filenames of HDF files relative to fileDir')\n", (1190, 1236), False, 'from annotypes import Anno, Array, Union, Sequence, TYPE_CHECKING\n'), ((1275, 1299), 'annotypes.Anno', 'Anno', (['"""Types of dataset"""'], {}), "('Types of dataset')\n", (1279, 1299), False, 'from annotypes import Anno, Array, Union, Sequence, TYPE_CHECKING\n'), ((1342, 1392), 'annotypes.Anno', 'Anno', (['"""Rank (number of dimensions) of the dataset"""'], {}), "('Rank (number of dimensions) of the dataset')\n", (1346, 1392), False, 'from annotypes import Anno, Array, Union, Sequence, TYPE_CHECKING\n'), ((1432, 1470), 'annotypes.Anno', 'Anno', (['"""Dataset paths within HDF files"""'], {}), "('Dataset paths within HDF files')\n", (1436, 1470), False, 'from annotypes import Anno, Array, Union, Sequence, TYPE_CHECKING\n'), ((1505, 1550), 'annotypes.Anno', 'Anno', (['"""UniqueID array paths within HDF files"""'], {}), "('UniqueID array paths within HDF files')\n", (1509, 1550), False, 'from annotypes import Anno, Array, Union, Sequence, TYPE_CHECKING\n'), ((5918, 5965), 'malcolm.modules.scanning.infos.RunProgressInfo', 'scanning.infos.RunProgressInfo', (['completed_steps'], {}), '(completed_steps)\n', (5948, 5965), False, 'from malcolm.modules import scanning\n')]
|
"""
MIT License
Copyright (c) 2021-present Defxult#8269
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import asyncio
import inspect
import random
import re
from typing import List, NoReturn, Optional, Sequence, Union
import discord
from discord.ext.commands import Context
from . import ViewButton
from .abc import _DEFAULT_STYLE, _BaseMenu, _PageController
from .decorators import ensure_not_primed
from .errors import *
class ViewMenu(_BaseMenu):
"""A class to create a discord pagination menu using :class:`discord.ui.View`
Parameters
----------
ctx: :class:`discord.ext.commands.Context`
The Context object. You can get this using a command or if you're in a `discord.on_message` event
menu_type: :class:`int`
The configuration of the menu. Class variables :attr:`ViewMenu.TypeEmbed`, :attr:`ViewMenu.TypeEmbedDynamic`, or :attr:`ViewMenu.TypeText`
Kwargs
------
all_can_click: :class:`bool`
Sets if everyone is allowed to control when pages are 'turned' when buttons are pressed (defaults to `False`)
allowed_mentions: :class:`discord.AllowedMentions`
Controls the mentions being processed in the menu message (defaults to :class:`discord.AllowedMentions(everyone=False, users=True, roles=False, replied_user=True)`).
Not valid for `ViewMenu` with a `menu_type` of `TypeText`
custom_embed: :class:`discord.Embed`
Embed object to use when adding data with :meth:`ViewMenu.add_row()`. Used for styling purposes only (:attr:`ViewMenu.TypeEmbedDynamic` only/defaults to :class:`None`)
delete_interactions: :class:`bool`
Delete the prompt message by the bot and response message by the user when asked what page they would like to go to when using :attr:`ViewButton.ID_GO_TO_PAGE` (defaults to `True`)
delete_on_timeout: :class:`bool`
Delete the menu when it times out (defaults to `False`) If `True`, :attr:`disable_buttons_on_timeout` and :attr:`remove_buttons_on_timeout` will not execute regardless of if they are `True`. This takes priority over those actions
disable_buttons_on_timeout: :class:`bool`
Disable the buttons on the menu when the menu times out (defaults to `True`) If :attr:`delete_on_timeout` is `True`, this will be overridden
name: :class:`str`
A name you can set for the menu (defaults to :class:`None`)
only_roles: List[:class:`discord.Role`]
If set, only members with any of the given roles are allowed to control the menu. The menu owner can always control the menu (defaults to :class:`None`)
remove_buttons_on_timeout: :class:`bool`
Remove the buttons on the menu when the menu times out (defaults to `False`) If :attr:`disable_buttons_on_timeout` is `True`, this will be overridden
rows_requested: :class:`int`
The amount of information per :meth:`ViewMenu.add_row()` you would like applied to each embed page (:attr:`ViewMenu.TypeEmbedDynamic` only/defaults to :class:`None`)
show_page_director: :class:`bool`
Shown at the botttom of each embed page. "Page 1/20" (defaults to `True`)
style: :class:`str`
A custom page director style you can select. "$" represents the current page, "&" represents the total amount of pages (defaults to "Page $/&") Example: `ViewMenu(ctx, ..., style='On $ out of &')`
timeout: Union[:class:`int`, :class:`float`, :class:`None`]
The timer for when the menu times out. Can be :class:`None` for no timeout (defaults to `60.0`)
wrap_in_codeblock: :class:`str`
The discord codeblock language identifier to wrap your data in (:attr:`ViewMenu.TypeEmbedDynamic` only/defaults to :class:`None`). Example: `ViewMenu(ctx, ..., wrap_in_codeblock='py')`
"""
def __init__(self, ctx: Context, *, menu_type: int, **kwargs):
super().__init__(ctx, menu_type, **kwargs)
# kwargs
self.disable_buttons_on_timeout: bool = kwargs.get('disable_buttons_on_timeout', True)
self.remove_buttons_on_timeout: bool = kwargs.get('remove_buttons_on_timeout', False)
self.__timeout: Union[int, float, None] = kwargs.get('timeout', 60.0) # property get/set
# view
self._view = discord.ui.View(timeout=self.__timeout)
self._view.on_timeout = self._on_dpy_view_timeout
self._view.on_error = self._on_dpy_view_error
def __repr__(self):
cls = self.__class__
return f'<ViewMenu name={self.name!r} owner={str(self._ctx.author)!r} is_running={self._is_running} timeout={self.timeout} menu_type={cls._get_menu_type(self._menu_type)!r}>'
async def _on_dpy_view_timeout(self) -> None:
self._menu_timed_out = True
await self.stop(delete_menu_message=self.delete_on_timeout, remove_buttons=self.remove_buttons_on_timeout, disable_buttons=self.disable_buttons_on_timeout)
async def _on_dpy_view_error(self, error: Exception, item: discord.ui.Item, inter: discord.Interaction) -> NoReturn:
try:
raise error
finally:
await self.stop()
def _get_new_view(self) -> discord.ui.View:
"""Returns a new :class:`discord.ui.View` object with the `timeout` parameter already set along with `on_timeout` and `on_error`"""
new_view = discord.ui.View(timeout=self.timeout)
new_view.on_timeout = self._on_dpy_view_timeout
new_view.on_error = self._on_dpy_view_error
return new_view
@property
def timeout(self):
return self.__timeout
@timeout.setter
def timeout(self, value) -> Union[int, float, None]:
"""A property getter/setter for kwarg `timeout`"""
if isinstance(value, (int, float, type(None))):
self._view.timeout = value
self.__timeout = value
else:
raise IncorrectType(f'"timeout" expected int, float, or None, got {value.__class__.__name__}')
def _check(self, inter: discord.Interaction) -> None:
"""Base menu button interaction check"""
author_pass = False
if self._ctx.author.id == inter.user.id: author_pass = True
if self.only_roles: self.all_can_click = False
if self.only_roles:
for role in self.only_roles:
if role in inter.user.roles:
author_pass = True
break
if self.all_can_click:
author_pass = True
return author_pass
async def _handle_event(self, button: ViewButton) -> None:
"""|coro| If an event is set, disable/remove the buttons from the menu when the click requirement has been met"""
if button.event:
event_type = button.event.event_type
event_value = button.event.value
if button.total_clicks == event_value:
if event_type == ViewButton.Event._DISABLE:
self.disable_button(button)
elif event_type == ViewButton.Event._REMOVE:
self.remove_button(button)
await self.refresh_menu_buttons()
def _remove_director(self, page: Union[discord.Embed, str]) -> Union[discord.Embed, str]:
"""Removes the page director contents from the page. This is used for :meth:`ViewMenu.update()`"""
style = self.style
if style is None:
style = _DEFAULT_STYLE
escaped_style = re.escape(style)
STYLE_PATTERN = escaped_style.replace(r'\$', r'\d{1,}').replace(r'\&', r'\d{1,}')
STYLE_STR_PATTERN = escaped_style.replace(r'\$', r'\d{1,}').replace(r'\&', r'(\d{1,}.*)')
if self.show_page_director:
if isinstance(page, discord.Embed):
if page.footer.text:
DIRECTOR_PATTERN = STYLE_PATTERN + r':? '
if re.search(DIRECTOR_PATTERN, page.footer.text):
page.set_footer(text=re.sub(DIRECTOR_PATTERN, '', page.footer.text), icon_url=page.footer.icon_url)
elif isinstance(page, str):
if re.search(STYLE_STR_PATTERN, page):
return re.sub(STYLE_STR_PATTERN, '', page).rstrip('\n')
else:
return page
else:
raise TypeError(f'_remove_director parameter "page" expected discord.Embed or str, got {page.__class__.__name__}')
else:
return page
async def update(self, *, new_pages: Union[List[Union[discord.Embed, str]], None], new_buttons: Union[List[ViewButton], None]) -> None:
"""|coro|
When the menu is running, update the pages or buttons
Parameters
----------
new_pages: List[Union[:class:`discord.Embed`, :class:`str`]]
Pages to *replace* the current pages with. If the menus current `menu_type` is :attr:`ViewMenu.TypeEmbed`, only :class:`discord.Embed` can be used. If :attr:`ViewMenu.TypeText`, only :class:`str` can be used. If you
don't want to replace any pages, set this to :class:`None`
new_buttons: List[:class:`ViewButton`]
Buttons to *replace* the current buttons with. Can be an empty list if you want the updated menu to have no buttons. Can also be set to :class:`None` if you don't want to replace any buttons
Raises
------
- `ViewMenuException`: The :class:`ViewButton` custom_id was not recognized or a :class:`ViewButton` with that ID has already been added
- `TooManyButtons`: There are already 25 buttons on the menu
- `IncorrectType`: The values in :param:`new_pages` did not match the :class:`ViewMenu`'s `menu_type`. An attempt to use this method when the `menu_type` is :attr:`ViewMenu.TypeEmbedDynamic` which is not allowed. Or
all :param:`new_buttons` values were not of type :class:`ViewButton`
"""
if self._is_running:
# ----------------------- CHECKS -----------------------
# Note: button count > 25 check is done in :meth:`ViewMenu.add_button`
if new_pages is None and new_buttons is None:
return
if self._menu_type not in (ViewMenu.TypeEmbed, ViewMenu.TypeText):
raise IncorrectType('Updating a menu is only valid for a menu with menu_type ViewMenu.TypeEmbed or ViewMenu.TypeText')
if self._menu_type == ViewMenu.TypeEmbed and new_pages:
if not all([isinstance(page, discord.Embed) for page in new_pages]):
raise IncorrectType('When updating the menu, all values must be of type discord.Embed because the current menu_type is ViewMenu.TypeEmbed')
if self._menu_type == ViewMenu.TypeText and new_pages:
if not all([isinstance(page, str) for page in new_pages]):
raise IncorrectType('When updating the menu, all values must be of type str because the current menu_type is ViewMenu.TypeText')
if isinstance(new_pages, list) and len(new_pages) == 0:
raise ViewMenuException('new_pages cannot be an empty list. Must be None if no new pages should be added')
# ----------------------- END CHECKS -----------------------
if new_pages is not None:
if self._menu_type == ViewMenu.TypeEmbed:
for new_embed_page in new_pages:
self._remove_director(new_embed_page)
self._pages = new_pages.copy()
self._pc = _PageController(new_pages)
self._refresh_page_director_info(ViewMenu.TypeEmbed, self._pages)
else:
removed_director_info = []
for new_str_page in new_pages.copy():
removed_director_info.append(self._remove_director(new_str_page))
self._pages = removed_director_info.copy()
self._pc = _PageController(self._pages)
self._refresh_page_director_info(ViewMenu.TypeText, self._pages)
else:
# page controller needs to be reset because even though there are no new pages. the page index is still in the location BEFORE the update
# EXAMPLE: 5 page menu > click Next button (on page 2) > update menu no new pages > click Next button (on page 3)
# that makes no sense and resetting the page controller fixes that issue
self._pc = _PageController(self._pages)
kwargs_to_pass = {}
self._view.stop()
self._view = self._get_new_view()
# re-using current buttons
if isinstance(new_buttons, type(None)):
original_buttons = self._buttons.copy()
self.remove_all_buttons()
for current_btns in original_buttons:
self._bypass_primed = True
self.add_button(current_btns)
# using new buttons
elif isinstance(new_buttons, list):
self.remove_all_buttons()
if len(new_buttons) >= 1: # empty lists mean all buttons will be removed
for new_btn in new_buttons:
self._bypass_primed = True
self.add_button(new_btn)
kwargs_to_pass['view'] = self._view
if self._menu_type == ViewMenu.TypeEmbed:
kwargs_to_pass['embed'] = self._pages[0]
else:
kwargs_to_pass['content'] = self._pages[0]
await self._msg.edit(**kwargs_to_pass)
def randomize_button_styles(self) -> None:
"""Set all buttons currently registered to the menu to a random :class:`discord.ButtonStyle` excluding link buttons"""
all_styles = (
discord.ButtonStyle.blurple,
discord.ButtonStyle.green,
discord.ButtonStyle.gray,
discord.ButtonStyle.red
)
for btn in [button for button in self._buttons if button.style not in (discord.ButtonStyle.link, discord.ButtonStyle.url)]:
btn.style = random.choice(all_styles)
def set_button_styles(self, style: discord.ButtonStyle) -> None:
"""Set all buttons currently registered to the menu to the specified :class:`discord.ButtonStyle` excluding link buttons
Parameters
----------
style: :class:`discord.ButtonStyle`
The button style to set
"""
for btn in [button for button in self._buttons if button.style not in (discord.ButtonStyle.link, discord.ButtonStyle.url)]:
btn.style = style
async def refresh_menu_buttons(self) -> None:
"""|coro|
When the menu is running, update the message to reflect the buttons that were removed, enabled, or disabled
"""
if self._is_running:
current_buttons = self._buttons.copy()
self.remove_all_buttons()
self._view.stop()
self._view = self._get_new_view()
for btn in current_buttons:
self._bypass_primed = True
self.add_button(btn)
await self._msg.edit(view=self._view)
def remove_button(self, button: ViewButton) -> None:
"""Remove a button from the menu
Parameters
----------
button: :class:`ViewButton`
The button to remove
Raises
------
- `ButtonNotFound`: The provided button was not found in the list of buttons on the menu
"""
if button in self._buttons:
button._menu = None
self._buttons.remove(button)
self._view.remove_item(button)
else:
raise ButtonNotFound('Cannot remove a button that is not registered')
def remove_all_buttons(self) -> None:
"""Remove all buttons from the menu"""
for btn in self._buttons:
btn._menu = None
self._buttons.clear()
self._view.clear_items()
def disable_button(self, button: ViewButton) -> None:
"""Disable a button on the menu
Parameters
----------
button: :class:`ViewButton`
The button to disable
Raises
------
- `ButtonNotFound`: The provided button was not found in the list of buttons on the menu
"""
if button in self._buttons:
idx = self._buttons.index(button)
self._buttons[idx].disabled = True
else:
raise ButtonNotFound('Cannot disable a button that is not registered')
def disable_all_buttons(self) -> None:
"""Disable all buttons on the menu"""
for btn in self._buttons:
btn.disabled = True
def enable_button(self, button: ViewButton) -> None:
"""Enable the specified button
Parameters
----------
button: :class:`ViewButton`
The button to enable
Raises
------
- `ButtonNotFound`: The provided button was not found in the list of buttons on the menu
"""
if button in self._buttons:
idx = self._buttons.index(button)
self._buttons[idx].disabled = False
else:
raise ButtonNotFound('Cannot enable a button that is not registered')
def enable_all_buttons(self) -> None:
"""Enable all buttons on the menu"""
for btn in self._buttons:
btn.disabled = False
def _button_add_check(self, button: ViewButton) -> None:
"""A set of checks to ensure the proper button is being added"""
# ensure they are using only the ViewButton and not ReactionMenus :class:`ReactionButton`
if isinstance(button, ViewButton):
# ensure the button custom_id is a valid one, but skip this check if its a link button because they dont have custom_ids
if button.style == discord.ButtonStyle.link:
pass
else:
# Note: this needs to be an re search because of buttons with an ID of "[ID]_[unique ID]"
if not re.search(ViewButton._RE_IDs, button.custom_id):
raise ViewMenuException(f'ViewButton custom_id {button.custom_id!r} was not recognized')
# ensure there are no duplicate custom_ids for the base navigation buttons
# Note: there's no need to have a check for buttons that are not navigation buttons because they have a unique ID and duplicates of those are allowed
active_button_ids: List[str] = [btn.custom_id for btn in self._buttons]
if button.custom_id in active_button_ids:
if not all([button.custom_id is None, button.style == discord.ButtonStyle.link]):
name = ViewButton._get_id_name_from_id(button.custom_id)
raise ViewMenuException(f'A ViewButton with custom_id {name!r} has already been added')
# if the menu_type is TypeText, disallow custom embed buttons
if button.style != discord.ButtonStyle.link and self._menu_type == ViewMenu.TypeText:
if button.custom_id == ViewButton.ID_CUSTOM_EMBED:
if button.followup and button.followup.embed is not None:
raise MenuSettingsMismatch('ViewButton with custom_id ViewButton.ID_CUSTOM_EMBED cannot be used when the menu_type is ViewMenu.TypeText')
# if using a skip button, ensure the skip attribute was set
if button.custom_id == ViewButton.ID_SKIP and button.skip is None:
raise ViewMenuException('When attempting to add a button custom_id ViewButton.ID_SKIP, the "skip" kwarg was not set')
# ensure there are no more than 25 buttons
if len(self._buttons) >= 25:
raise TooManyButtons('ViewMenu cannot have more than 25 buttons (discord limitation)')
else:
raise IncorrectType(f'When adding a button to the ViewMenu, the button type must be ViewButton, got {button.__class__.__name__}')
def _maybe_unique_id(self, button: ViewButton) -> None:
"""Create a unique ID if the `custom_id` for buttons that are allowed to have duplicates
Note ::
This excludes link buttons because they don't have a `custom_id`
"""
if button.custom_id in (ViewButton.ID_CALLER, ViewButton.ID_SEND_MESSAGE, ViewButton.ID_CUSTOM_EMBED, ViewButton.ID_SKIP):
button.custom_id = f'{button.custom_id}_{id(button)}'
@ensure_not_primed
def add_button(self, button: ViewButton) -> None:
"""Add a button to the menu
Parameters
----------
button: :class:`ViewButton`
The button to add
Raises
------
- `MenuAlreadyRunning`: Attempted to call method after the menu has already started
- `MenuSettingsMismatch`: The buttons custom_id was set as :attr:`ViewButton.ID_CUSTOM_EMBED` but the `menu_type` is :attr:`ViewMenu.TypeText`
- `ViewMenuException`: The custom_id for the button was not recognized or a button with that custom_id has already been added
- `TooManyButtons`: There are already 25 buttons on the menu
- `IncorrectType`: Parameter :param:`button` was not of type :class:`ViewButton`
"""
self._button_add_check(button)
self._maybe_unique_id(button)
button._menu = self
self._view.add_item(button)
self._buttons.append(button)
@ensure_not_primed
def add_buttons(self, buttons: Sequence[ViewButton]) -> None:
"""Add multiple buttons to the menu at once
Parameters
----------
buttons: Sequence[:class:`ViewButton`]
The buttons to add
Raises
------
- `MenuAlreadyRunning`: Attempted to call method after the menu has already started
- `MenuSettingsMismatch`: One of the buttons `custom_id` was set as :attr:`ViewButton.ID_CUSTOM_EMBED` but the `menu_type` is :attr:`ViewMenu.TypeText`
- `ViewMenuException`: The `custom_id` for a button was not recognized or a button with that `custom_id` has already been added
- `TooManyButtons`: There are already 25 buttons on the menu
- `IncorrectType`: One or more values supplied in parameter :param:`buttons` was not of type :class:`ViewButton`
"""
for btn in buttons:
self.add_button(btn)
def get_button(self, identity: str, *, search_by: str='label') -> List[ViewButton]:
"""Get a button that has been registered to the menu by it's label, custom_id, or name
Parameters
----------
identity: :class:`str`
The button label, custom_id, or name
search_by: :class:`str`
How to search for the button. If "label", it's searched by button labels. If "id", it's searched by it's custom_id.
If "name", it's searched by button names
Returns
-------
List[:class:`ViewButton`]: The button(s) matching the given identity
Raises
------
- `ViewMenuException`: Parameter :param:`search_by` was not "label", "id", or "name"
"""
identity = str(identity)
search_by = str(search_by).lower()
if search_by == 'label':
matched_labels: List[ViewButton] = [btn for btn in self._buttons if btn.label and btn.label == identity]
return matched_labels
elif search_by == 'id':
matched_ids: List[ViewButton] = [btn for btn in self._buttons if btn.custom_id and btn.custom_id.startswith(identity)]
return matched_ids
elif search_by == 'name':
matched_names: List[ViewButton] = [btn for btn in self._buttons if btn.name and btn.name == identity]
return matched_names
else:
raise ViewMenuException(f'Parameter "search_by" expected "label", "id", or "name", got {search_by!r}')
async def _paginate(self, button: ViewButton, inter: discord.Interaction) -> None:
"""When the button is pressed, handle the pagination process"""
if not self._check(inter):
await inter.response.defer()
return
button._update_statistics(inter.user)
await self._handle_event(button)
if button.custom_id == ViewButton.ID_PREVIOUS_PAGE:
await inter.response.edit_message(**self._determine_kwargs(self._pc.prev()))
elif button.custom_id == ViewButton.ID_NEXT_PAGE:
await inter.response.edit_message(**self._determine_kwargs(self._pc.next()))
elif button.custom_id == ViewButton.ID_GO_TO_FIRST_PAGE:
await inter.response.edit_message(**self._determine_kwargs(self._pc.first_page()))
elif button.custom_id == ViewButton.ID_GO_TO_LAST_PAGE:
await inter.response.edit_message(**self._determine_kwargs(self._pc.last_page()))
elif button.custom_id == ViewButton.ID_GO_TO_PAGE:
await inter.response.defer()
prompt: discord.Message = await self._msg.channel.send(f'{inter.user.display_name}, what page would you like to go to?')
try:
selection_message: discord.Message = await self._ctx.bot.wait_for('message', check=lambda m: all([m.channel.id == self._msg.channel.id, m.author.id == inter.user.id]), timeout=self.timeout)
page = int(selection_message.content)
except asyncio.TimeoutError:
return
except ValueError:
return
else:
if 1 <= page <= len(self._pages):
self._pc.index = page - 1
await self._msg.edit(**self._determine_kwargs(self._pc.current_page))
if self.delete_interactions:
await prompt.delete()
await selection_message.delete()
elif button.custom_id == ViewButton.ID_END_SESSION:
await self.stop(delete_menu_message=True)
else:
if button.custom_id.startswith(ViewButton.ID_CALLER):
if button.followup is None or button.followup.details is None:
error_msg = 'ViewButton custom_id was set as ViewButton.ID_CALLER but the "followup" kwarg for that ViewButton was not set ' \
'or method ViewButton.Followup.set_caller_details(..) was not called to set the caller information'
raise ViewMenuException(error_msg)
func = button.followup.details.func
args = button.followup.details.args
kwargs = button.followup.details.kwargs
# reply now because we don't know how long the users function will take to execute
await inter.response.defer()
try:
if asyncio.iscoroutinefunction(func): await func(*args, **kwargs)
else: func(*args, **kwargs)
except Exception as err:
call_failed_error_msg = inspect.cleandoc(
f"""
The button with custom_id ViewButton.ID_CALLER with the label "{button.label}" raised an error during it's execution
-> {err.__class__.__name__}: {err}
"""
)
raise ViewMenuException(call_failed_error_msg)
else:
if button.followup:
# if this executes, the user doesn't want to respond with a message, only with the caller function (already called ^)
if all((button.followup.content is None, button.followup.embed is None, button.followup.file is None)):
pass
else:
followup_kwargs = button.followup._to_dict()
# inter.followup() has no attribute delete_after/details, so manually delete the key/val pairs to avoid :exc:`TypeError`, got an unexpected kwarg
del followup_kwargs['delete_after']
del followup_kwargs['details']
# if there's no file, remove it to avoid an NoneType error
if followup_kwargs['file'] is None:
del followup_kwargs['file']
followup_message: discord.WebhookMessage = await inter.followup.send(**followup_kwargs)
if button.followup.delete_after:
await followup_message.delete(delay=button.followup.delete_after)
elif button.custom_id.startswith(ViewButton.ID_SEND_MESSAGE):
if button.followup is None:
raise ViewMenuException('ViewButton custom_id was set as ViewButton.ID_SEND_MESSAGE but the "followup" kwarg for that ViewButton was not set')
# there must be at least 1. cannot send an empty message
if all((button.followup.content is None, button.followup.embed is None, button.followup.file is None)):
raise ViewMenuException('When using a ViewButton with a custom_id of ViewButton.ID_SEND_MESSAGE, the followup message cannot be empty')
followup_kwargs = button.followup._to_dict()
# inter.followup.send() has no kwarg "details"
del followup_kwargs['details']
# files are ignored
del followup_kwargs['file']
# inter.followup.send() has no kwarg "delete_after"
del followup_kwargs['delete_after']
# defer instead of inter.response.send_message() so `delete_after` and `allowed_mentions` can be used
# inter.followup.send() is used instead
await inter.response.defer()
sent_message: discord.WebhookMessage = await inter.followup.send(**followup_kwargs)
if button.followup.delete_after:
await sent_message.delete(delay=button.followup.delete_after)
elif button.custom_id.startswith(ViewButton.ID_CUSTOM_EMBED):
if self._menu_type not in (ViewMenu.TypeEmbed, ViewMenu.TypeEmbedDynamic):
raise ViewMenuException('Buttons with custom_id ViewButton.ID_CUSTOM_EMBED can only be used when the menu_type is ViewMenu.TypeEmbed or ViewMenu.TypeEmbedDynamic')
else:
if button.followup is None or button.followup.embed is None:
raise ViewMenuException('ViewButton custom_id was set as ViewButton.ID_CUSTOM_EMBED but the "followup" kwargs for that ViewButton was not set or the "embed" kwarg for the followup was not set')
await inter.response.edit_message(embed=button.followup.embed)
elif button.custom_id.startswith(ViewButton.ID_SKIP):
await inter.response.edit_message(**self._determine_kwargs(self._pc.skip(button.skip)))
else:
# this shouldn't execute because of :meth:`_button_add_check`, but just in case i missed something, raise the appropriate error
raise ViewMenuException(f'ViewButton custom_id {button.custom_id!r} was not recognized')
await self._contact_relay(inter.user, button)
async def stop(self, *, delete_menu_message: bool=False, remove_buttons: bool=False, disable_buttons: bool=False) -> None:
"""|coro|
Stops the process of the menu with the option of deleting the menu's message, removing the buttons, or disabling the buttons upon stop
Parameters
----------
delete_menu_message: :class:`bool`
Delete the message the menu is operating from
remove_buttons: :class:`bool`
Remove the buttons from the menu
disable_buttons: :class:`bool`
Disable the buttons on the menu
Parameter Hierarchy
-------------------
Only one option is available when stopping the menu. If you have multiple parameters as `True`, only one will execute
- `delete_menu_message` > `disable_buttons`
- `disable_buttons` > `remove_buttons`
Raises
------
- `discord.DiscordException`: Any exception that can be raised when deleting or editing a message
"""
if self._is_running:
try:
if delete_menu_message:
await self._msg.delete()
elif disable_buttons:
if not self._buttons:
return # if there are no buttons (they've all been removed) to disable, skip this step
self.disable_all_buttons()
await self._msg.edit(view=self._view)
elif remove_buttons:
if not self._buttons:
return # if there are no buttons (they've already been removed), skip this step
self.remove_all_buttons()
await self._msg.edit(view=self._view)
except discord.DiscordException as dpy_error:
raise dpy_error
finally:
self._view.stop()
self._is_running = False
if self in ViewMenu._active_sessions:
ViewMenu._active_sessions.remove(self)
self._on_close_event.set()
await self._handle_on_timeout()
@ensure_not_primed
async def start(self, *, send_to: Optional[Union[str, int, discord.TextChannel, discord.Thread]]=None, reply: bool=False) -> None:
"""|coro|
Start the menu
Parameters
----------
send_to: Optional[Union[:class:`str`, :class:`int`, :class:`discord.TextChannel`, :class:`discord.Thread`]]
The channel/thread you'd like the menu to start in. Use the channel/threads name, ID, or it's object. Please note that if you intend to use a channel/thread object, using
method :meth:`discord.Client.get_channel()` (or any other related methods), that channel should be in the same list as if you were to use `ctx.guild.text_channels`
or `ctx.guild.threads`. This only works on a context guild channel basis. That means a menu instance cannot be created in one guild and the menu itself (:param:`send_to`)
be sent to another. Whichever guild context the menu was instantiated in, the channels/threads of that guild are the only options for :param:`send_to`
reply: :class:`bool`
Enables the menu message to reply to the message that triggered it. Parameter :param:`send_to` must be :class:`None` if this is `True`
Raises
------
- `MenuAlreadyRunning`: Attempted to call method after the menu has already started
- `NoPages`: The menu was started when no pages have been added
- `NoButtons`: Attempted to start the menu when no Buttons have been registered
- `ViewMenuException`: The :class:`ViewMenu`'s `menu_type` was not recognized. There were more than one base navigation buttons. Or a :attr:`ViewButton.ID_CUSTOM_EMBED` button was not correctly formatted
- `DescriptionOversized`: When using a `menu_type` of :attr:`ViewMenu.TypeEmbedDynamic`, the embed description was over discords size limit
- `IncorrectType`: Parameter :param:`send_to` was not :class:`str`, :class:`int`, or :class:`discord.TextChannel`
- `MenuException`: The channel set in :param:`send_to` was not found
"""
if ViewMenu._sessions_limited:
can_proceed = await self._handle_session_limits()
if not can_proceed:
return
# checks
# Note 1: each at least 1 page check is done in it's own if statement to avoid clashing between pages and custom embeds
# Note 2: at least 1 page check for add_row is done in "(dynamic menu)"
# ensure at least 1 button exists before starting the menu
if not self._buttons: raise NoButtons
if self._menu_type not in ViewMenu._all_menu_types(): raise ViewMenuException('ViewMenu menu_type not recognized')
reply_kwargs = self._handle_reply_kwargs(send_to, reply)
# add page (normal menu)
if self._menu_type == ViewMenu.TypeEmbed:
self._refresh_page_director_info(ViewMenu.TypeEmbed, self._pages)
navigation_btns = [btn for btn in self._buttons if btn.custom_id in ViewButton._base_nav_buttons()]
# an re search is required here because buttons with ID_CUSTOM_EMBED dont have a normal ID, the ID is "8_[unique ID]"
custom_embed_btns = [btn for btn in self._buttons if btn.style != discord.ButtonStyle.link and re.search(r'8_\d+', btn.custom_id)]
if all([not self._pages, not custom_embed_btns]):
raise NoPages
# normal pages, no custom embeds
if self._pages and not custom_embed_btns:
self._msg = await self._handle_send_to(send_to).send(embed=self._pages[0], view=self._view, **reply_kwargs) # allowed_mentions not needed in embeds
# only custom embeds
elif not self._pages and custom_embed_btns:
# since there are only custom embeds, there is no need for base navigation buttons, so remove them if any
for nav_btn in navigation_btns:
if nav_btn in self._buttons:
self._buttons.remove(nav_btn)
# ensure all custom embed buttons have the proper values set
for custom_btn in custom_embed_btns:
if custom_btn.followup is None or custom_btn.followup.embed is None:
raise ViewMenuException('ViewButton custom_id was set as ViewButton.ID_CUSTOM_EMBED but the "followup" kwargs for that ViewButton was not set or the "embed" kwarg for the followup was not set')
# since there are only custom embeds, self._pages is still set to :class:`None`, so set the embed in `.send()` to the first custom embed in the list
self._msg = await self._handle_send_to(send_to).send(embed=custom_embed_btns[0].followup.embed, view=self._view, **reply_kwargs)
# normal pages and custom embeds
else:
# since there are custom embeds, ensure there is at least one base navigation button so they can switch between the normal pages and custom embeds
if not navigation_btns:
error_msg = inspect.cleandoc(
"""
Since you've added pages and custom embeds, there needs to be at least one base navigation button. Without one, there's no way to go back to the normal pages in the menu if a custom embed button is pressed.
The available base navigation buttons are buttons with the custom_id:
- ViewButton.ID_PREVIOUS_PAGE
- ViewButton.ID_NEXT_PAGE
- ViewButton.ID_GO_TO_FIRST_PAGE
- ViewButton.ID_GO_TO_LAST_PAGE
- ViewButton.ID_GO_TO_PAGE
"""
)
raise ViewMenuException(error_msg)
else:
self._msg = await self._handle_send_to(send_to).send(embed=self._pages[0], view=self._view, **reply_kwargs) # allowed_mentions not needed in embeds
# add row (dynamic menu)
elif self._menu_type == ViewMenu.TypeEmbedDynamic:
await self._build_dynamic_pages(send_to)
# add page (text menu)
else:
if not self._pages:
raise NoPages
self._refresh_page_director_info(ViewMenu.TypeText, self._pages)
self._msg = await self._handle_send_to(send_to).send(content=self._pages[0], view=self._view, allowed_mentions=self.allowed_mentions, **reply_kwargs)
self._pc = _PageController(self._pages)
self._is_running = True
ViewMenu._active_sessions.append(self)
|
[
"discord.ui.View",
"random.choice",
"re.escape",
"asyncio.iscoroutinefunction",
"re.search",
"inspect.cleandoc",
"re.sub"
] |
[((5262, 5301), 'discord.ui.View', 'discord.ui.View', ([], {'timeout': 'self.__timeout'}), '(timeout=self.__timeout)\n', (5277, 5301), False, 'import discord\n'), ((6328, 6365), 'discord.ui.View', 'discord.ui.View', ([], {'timeout': 'self.timeout'}), '(timeout=self.timeout)\n', (6343, 6365), False, 'import discord\n'), ((8482, 8498), 're.escape', 're.escape', (['style'], {}), '(style)\n', (8491, 8498), False, 'import re\n'), ((15379, 15404), 'random.choice', 'random.choice', (['all_styles'], {}), '(all_styles)\n', (15392, 15404), False, 'import random\n'), ((8902, 8947), 're.search', 're.search', (['DIRECTOR_PATTERN', 'page.footer.text'], {}), '(DIRECTOR_PATTERN, page.footer.text)\n', (8911, 8947), False, 'import re\n'), ((9133, 9167), 're.search', 're.search', (['STYLE_STR_PATTERN', 'page'], {}), '(STYLE_STR_PATTERN, page)\n', (9142, 9167), False, 'import re\n'), ((19457, 19504), 're.search', 're.search', (['ViewButton._RE_IDs', 'button.custom_id'], {}), '(ViewButton._RE_IDs, button.custom_id)\n', (19466, 19504), False, 'import re\n'), ((38721, 38755), 're.search', 're.search', (['"""8_\\\\d+"""', 'btn.custom_id'], {}), "('8_\\\\d+', btn.custom_id)\n", (38730, 38755), False, 'import re\n'), ((40581, 41233), 'inspect.cleandoc', 'inspect.cleandoc', (['"""\n Since you\'ve added pages and custom embeds, there needs to be at least one base navigation button. Without one, there\'s no way to go back to the normal pages in the menu if a custom embed button is pressed.\n The available base navigation buttons are buttons with the custom_id:\n - ViewButton.ID_PREVIOUS_PAGE\n - ViewButton.ID_NEXT_PAGE\n - ViewButton.ID_GO_TO_FIRST_PAGE\n - ViewButton.ID_GO_TO_LAST_PAGE\n - ViewButton.ID_GO_TO_PAGE\n """'], {}), '(\n """\n Since you\'ve added pages and custom embeds, there needs to be at least one base navigation button. Without one, there\'s no way to go back to the normal pages in the menu if a custom embed button is pressed.\n The available base navigation buttons are buttons with the custom_id:\n - ViewButton.ID_PREVIOUS_PAGE\n - ViewButton.ID_NEXT_PAGE\n - ViewButton.ID_GO_TO_FIRST_PAGE\n - ViewButton.ID_GO_TO_LAST_PAGE\n - ViewButton.ID_GO_TO_PAGE\n """\n )\n', (40597, 41233), False, 'import inspect\n'), ((8994, 9040), 're.sub', 're.sub', (['DIRECTOR_PATTERN', '""""""', 'page.footer.text'], {}), "(DIRECTOR_PATTERN, '', page.footer.text)\n", (9000, 9040), False, 'import re\n'), ((9196, 9231), 're.sub', 're.sub', (['STYLE_STR_PATTERN', '""""""', 'page'], {}), "(STYLE_STR_PATTERN, '', page)\n", (9202, 9231), False, 'import re\n'), ((28472, 28505), 'asyncio.iscoroutinefunction', 'asyncio.iscoroutinefunction', (['func'], {}), '(func)\n', (28499, 28505), False, 'import asyncio\n'), ((28668, 28928), 'inspect.cleandoc', 'inspect.cleandoc', (['f"""\n The button with custom_id ViewButton.ID_CALLER with the label "{button.label}" raised an error during it\'s execution\n -> {err.__class__.__name__}: {err}\n """'], {}), '(\n f"""\n The button with custom_id ViewButton.ID_CALLER with the label "{button.label}" raised an error during it\'s execution\n -> {err.__class__.__name__}: {err}\n """\n )\n', (28684, 28928), False, 'import inspect\n')]
|
import random
import string
def generate_secure_password(length=32):
return "".join(random.choices(string.ascii_letters + string.digits, k=length))
def poll_options(options):
while True:
for index, option in enumerate(options):
print(f"[{index+1}]: {option}")
val = input("Selection: ")
try:
selected_option = options[int(val)+1]
break
except TypeError:
pass
except ValueError:
pass
except IndexError:
pass
return selected_option
|
[
"random.choices"
] |
[((90, 152), 'random.choices', 'random.choices', (['(string.ascii_letters + string.digits)'], {'k': 'length'}), '(string.ascii_letters + string.digits, k=length)\n', (104, 152), False, 'import random\n')]
|
import argparse
import pandas as pd
from mongoengine import QuerySet
from ..tables import species, dataset
def _load_configuration() -> argparse.Namespace:
"""
Parse command line arguments.
Parameters
----------
:return: configuration object
"""
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--password", help="Password to access the DB service.", required=False)
parser.add_argument("--database", help="Database name.", required=False, default="dbgen_test")
parser.add_argument("--host", help="Host.", required=False, default="localhost")
parser.add_argument("--port", help="Port.", required=False, default=27017)
parser.add_argument("-r", "--root-data-dir", help="Root directory for input data.",
required=False, default="./test/data/")
args = parser.parse_args()
return args
def _options(queryset: QuerySet, species_name: str = None, dataset_name: str = None, pheno_or_tool_name: str = None):
"""
Filter query according to the provided parameters
:param queryset: current objects to be filtered
:param species_name: species name
:param dataset_name: dataset name
:param pheno_or_tool_name: phenotype name
"""
if species_name and pheno_or_tool_name and dataset_name:
s = species.Species.objects(name=species_name).first()
d = dataset.Dataset.objects(name=dataset_name).first()
data = queryset.filter(species=s, dataset=d, name=pheno_or_tool_name)
elif species_name and dataset_name and (not pheno_or_tool_name):
s = species.Species.objects(name=species_name).first()
d = dataset.Dataset.objects(name=dataset_name).first()
data = queryset.filter(species=s, dataset=d)
elif (not species_name) and pheno_or_tool_name and dataset_name:
d = dataset.Dataset.objects(name=dataset_name).first()
data = queryset.filter(dataset=d, name=pheno_or_tool_name)
elif species_name and pheno_or_tool_name and (not dataset_name):
s = species.Species.objects(name=species_name).first()
data = queryset.filter(species=s, name=pheno_or_tool_name)
elif species_name and (not pheno_or_tool_name) and (not dataset_name):
s = species.Species.objects(name=species_name).first()
data = queryset.filter(species=s)
elif dataset_name and (not pheno_or_tool_name) and (not species_name):
d = dataset.Dataset.objects(name=dataset_name).first()
data = queryset.filter(dataset=d)
else:
return pd.DataFrame()
return data
|
[
"pandas.DataFrame",
"argparse.ArgumentParser"
] |
[((289, 314), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (312, 314), False, 'import argparse\n'), ((2548, 2562), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2560, 2562), True, 'import pandas as pd\n')]
|
import numpy as np
from matplotlib import image as mimage
from time import time
class Timer(object):
"""A simple timer context-manager, taken from
https://blog.usejournal.com/how-to-create-your-own-timing-context-manager-in-python-a0e944b48cf8
"""
def __init__(self, description):
self.description = description
def __enter__(self):
self.start = time()
def __exit__(self, type, value, traceback):
self.end = time()
print("{desc}: {time}s".format(
desc=self.description, time=(self.end - self.start)))
def rgb_to_gray(rgb):
"""Convert color image to grayscale.
Parameters
----------
rgb : ndarray
Three dimensional array, last dimension being at least 3 in size.
Returns
-------
gray: ndarray
Grayscale image.
"""
if len(rgb.shape) < 3:
return rgb.squeeze()
return np.dot(rgb[..., :3], [0.2989, 0.5870, 0.1140])
def imread(filename, dtype='float64', force_grayscale=False):
"""Read a file from disk.
Parameters
----------
filename : str
Filename on disk.
dtype : str, optional
Data-type of returned array, by default 'float64'
force_grayscale : bool, optional
If true, a grayscale image is returned only works if input is rgb, by default False
Returns
-------
im: ndarray
Loaded image.
"""
im = mimage.imread(filename)
if force_grayscale:
im = rgb_to_gray(im)
im = im.astype(dtype)
if dtype == 'float32' or dtype == 'float64':
im /= np.max(im)
return im
def read_txt_matrix(txtf, header=False):
"""
Reads an matrix encoded in ASCII into memory as numpy matrix.
"""
return np.asarray([map(float, line.strip().split()) for iline, line in
enumerate(open(txtf, 'r').readlines()) if (iline > 0 or not header)])
|
[
"numpy.dot",
"matplotlib.image.imread",
"numpy.max",
"time.time"
] |
[((906, 950), 'numpy.dot', 'np.dot', (['rgb[..., :3]', '[0.2989, 0.587, 0.114]'], {}), '(rgb[..., :3], [0.2989, 0.587, 0.114])\n', (912, 950), True, 'import numpy as np\n'), ((1416, 1439), 'matplotlib.image.imread', 'mimage.imread', (['filename'], {}), '(filename)\n', (1429, 1439), True, 'from matplotlib import image as mimage\n'), ((387, 393), 'time.time', 'time', ([], {}), '()\n', (391, 393), False, 'from time import time\n'), ((462, 468), 'time.time', 'time', ([], {}), '()\n', (466, 468), False, 'from time import time\n'), ((1582, 1592), 'numpy.max', 'np.max', (['im'], {}), '(im)\n', (1588, 1592), True, 'import numpy as np\n')]
|
#
# Copyright (C) 2021 <NAME>
#
from typing import Any
import math
class Point:
"""
Class representing a point on a plane.
"""
def __init__(self, x: float, y: float):
"""
Constructs a 2d point.
Args:
x: The x-coordinate of the point.
y: The y-coordinate of the point.
"""
self.x = x
self.y = y
def __repr__(self) -> str:
"""
Returns a string representation of the point.
"""
return "(x=%lf, y=%lf)" % (self.x, self.y)
def __sub__(self, other: Any) -> "Point":
"""
Subtract another point from this point.
Args:
other: The point to be subtracted from this
point.
Returns:
The resultant point.
Raises:
ValueError: If the other operand is not a point.
"""
if not isinstance(other, Point):
raise ValueError("Both operands must be points")
x_delta = self.x - other.x
y_delta = self.y - other.y
return Point(x_delta, y_delta)
def __eq__(self, other: Any) -> bool:
"""
Check if this point equals another point.
Args:
other: The point to check equality with.
Returns:
Whether the points are equal.
Raises:
ValueError: If the other operand is not a point.
"""
if not isinstance(other, Point):
raise ValueError("Both operands must be points")
return self.x == other.x and self.y == other.y
def __abs__(self) -> float:
"""
Calculates the euclidean distance of a point from the
origin.
Returns:
The euclidean distance of the point from the origin.
"""
return math.sqrt((self.x ** 2) + (self.y ** 2))
def distance(self, other: "Point") -> float:
"""
Calculates the distance between this point and another
point.
Args:
other: The other point to calculate the distance from.
Returns:
The distance between this point and another point.
"""
return abs(self - other)
|
[
"math.sqrt"
] |
[((1824, 1860), 'math.sqrt', 'math.sqrt', (['(self.x ** 2 + self.y ** 2)'], {}), '(self.x ** 2 + self.y ** 2)\n', (1833, 1860), False, 'import math\n')]
|
import numpy as np
import pandas as pd
from scipy import interpolate
from .Constants import *
from .AtomicData import *
from .Conversions import *
##########################
# Taken from: https://stackoverflow.com/questions/779495/python-access-data-in-package-subdirectory
# This imports the file 'PREM500.csv' within the DarkCapPy package so the user doesn't have to.
import os
this_dir, this_filename = os.path.split(__file__)
# this_dir, this_filename = os.path.split(__file__)
##########################
# Earth radius and mass
##########################
Planet_Path = os.path.join(this_dir, "PREM500_Mod.csv")
VelocityDist_Path = os.path.join(this_dir, "EarthDMVelDist.csv")
Planet_Radius = 6.371e8 # cm
Planet_Mass = 5.972e27 # grams
Planet_Life = yr2s(4.5e9) # 4.5 Gyr -> sec
##########################
# Sun radius and mass
##########################
# Planet_Path = os.path.join(this_dir, "struct_b16_agss09.csv")
# Vel_Dist_Path = os.path.join(this_dir, "SunDMVelDist.csv")
# Planet_Radius = 69.551e9 # cm
# Planet_Mass = 1.989e33 # g
# Planet_Life = yr2s(4.5e9) # 4.5 Gyr -> sec
# Variables to be used in DarkPhoton.py
# 1). radius_List
# 2). deltaR_List
# 3). escVel2_List
# 4). element_List
########################################################
# Data Input #
########################################################
##########################
# Read in Planet Data
##########################
Planet_File = pd.read_csv(Planet_Path, delim_whitespace=True, header = 8)
Vel_Dist_File = pd.read_csv(Vel_Dist_Path)
radius_List = Planet_File['Radius'] * Planet_Radius
enclosedMass_List = Planet_File['Mass'] * Planet_Mass
element_List = np.asarray(Planet_File.columns[6:-1])
assert len(radius_List) == len(enclosedMass_List), 'Lengths of radius list and enclosed mass list do not match'
##########################
# Shell Thickness
##########################
def deltaR_Func(radiusList):
# Input is a list if radiii values,
# output is a list of deltaR values
# DeltaR = Next radius - current radius
# We define the variable 'radiusListm1' in order to give a deltaRList which has the same length as radiusList
radiusListm1 = radiusList[0:len(radiusList)-1]
s = [0] # Temporary variable used to obtain deltaRList. Stores radiusList offset by one index.
for i in radiusListm1:
s.append(i)
deltaRList = radiusList[0:len(radiusList)] - s[0:len(s)]
return deltaRList
##########################
# Shell Mass
##########################
def shellMass_Func(totalMassList):
shellMass_List = []
bigMass = 0
smallMass = 0
for i in range(0,len(totalMassList)):
if i == 0:
shellMass_List.append(0)
else:
bigMass = totalMassList[i]
smallMass = totalMassList[i-1]
shellMass_List.append(bigMass-smallMass)
return shellMass_List
##########################
# Shell Density
##########################
def shellDensity_Func(shellMass, shellRadius, deltaR):
shellDensity = []
for i in range(0,len(shellMass)):
shellDensity.append(shellMass[i]/(4*np.pi*shellRadius[i]**2 * deltaR[i]))
# Kludge for radius = 0
shellDensity[0] = shellDensity[1]
return shellDensity
##########################
# Number Density of each element
##########################
def numDensity_Func(element):
numDensityList = []
for i in range(0,len(shellDensity_List)):
mf = Planet_File[str(element)][i]
numDensityList.append(mf * g2GeV(shellDensity_List[i]) / amu2GeV(atomicNumbers[element]))
return numDensityList
##########################
# Escape Velocity
##########################
def escVel_Func(index, enclosedMassList, radiusList, deltaRList):
G_Newton = 6.674e-11 * 100**3 * (1000)**-1 # in cm^3/(g s^2)
c = 3e10 # in cm/s
factor = 2.*G_Newton/c**2 # prefactor
constant = max(enclosedMassList) / max(radiusList)
assert len(enclosedMassList) == len(radiusList), 'Lengths of Enclosed mass list and radius list do not match'
assert len(radiusList) == len(deltaRList), 'Lengths of radius list and delta R list do not match'
if (index == 0):
tempSum = 0
elif (index != 0):
tempSum = 0
for i in range(index, len(radiusList)):
summand = enclosedMassList[i] * deltaRList[i] / (radiusList[i])**2
tempSum += summand
return (factor * (tempSum + constant))
##########################
# Generate all lists
##########################
deltaR_List = deltaR_Func(radius_List)
shellMass_List = shellMass_Func(enclosedMass_List)
shellDensity_List = shellDensity_Func(shellMass_List, radius_List, deltaR_List)
assert len(radius_List) == len(deltaR_List)
assert len(radius_List) == len(shellMass_List)
assert len(radius_List) == len(shellDensity_List)
escVel2_List = [] #| Construct an array of escape velocities
for i in range(0,len(radius_List)): #|
escVel2_List.append(escVel_Func(i, enclosedMass_List, radius_List, deltaR_List)) #|
escVel2_List[0] = escVel2_List[1] # Set the i=0 and i=1 escape velocities equal
##########################
# DM Velocity Distrubution
##########################
velocity_Range_List = Vel_Dist_File['Velocity_Range'] # A list of velocities between 0 and V_gal
planet_velocity_List = Vel_Dist_File['VelocityDist_Planet_Frame'] # The DM velocity distrubution in the planet frame
########################
# Interpolate the Velocity Distribution
########################
velRange = velocity_Range_List
fCrossVect = planet_velocity_List
fCrossInterp = interpolate.interp1d(velRange, fCrossVect, kind ='linear')
##########################
# Interpolate
# These are intentionally commented out, we don't actually use them in DarkPhoton.py
# I'm not sure why I made these but they are here if they are usefull
##########################
# Earth_enclosedMassInterp = interpolate.interp1d(radius_List, enclosedMass_List, kind='linear')
# Earth_escVel2Interp = interpolate.interp1d(radius_List, escVel2_List, kind='linear')
# Earth_densityInterp = interpolate.interp1d(radius_List,Earth_density_List,kind='linear')
|
[
"pandas.read_csv",
"numpy.asarray",
"scipy.interpolate.interp1d",
"os.path.split",
"os.path.join"
] |
[((409, 432), 'os.path.split', 'os.path.split', (['__file__'], {}), '(__file__)\n', (422, 432), False, 'import os\n'), ((580, 621), 'os.path.join', 'os.path.join', (['this_dir', '"""PREM500_Mod.csv"""'], {}), "(this_dir, 'PREM500_Mod.csv')\n", (592, 621), False, 'import os\n'), ((642, 686), 'os.path.join', 'os.path.join', (['this_dir', '"""EarthDMVelDist.csv"""'], {}), "(this_dir, 'EarthDMVelDist.csv')\n", (654, 686), False, 'import os\n'), ((1504, 1561), 'pandas.read_csv', 'pd.read_csv', (['Planet_Path'], {'delim_whitespace': '(True)', 'header': '(8)'}), '(Planet_Path, delim_whitespace=True, header=8)\n', (1515, 1561), True, 'import pandas as pd\n'), ((1581, 1607), 'pandas.read_csv', 'pd.read_csv', (['Vel_Dist_Path'], {}), '(Vel_Dist_Path)\n', (1592, 1607), True, 'import pandas as pd\n'), ((1731, 1768), 'numpy.asarray', 'np.asarray', (['Planet_File.columns[6:-1]'], {}), '(Planet_File.columns[6:-1])\n', (1741, 1768), True, 'import numpy as np\n'), ((5675, 5732), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['velRange', 'fCrossVect'], {'kind': '"""linear"""'}), "(velRange, fCrossVect, kind='linear')\n", (5695, 5732), False, 'from scipy import interpolate\n')]
|
#!/usr/bin/env python
# Copyright 2019, <NAME>, mailto:<EMAIL>
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Run code with Nuitka compiled and put that through valgrind.
"""
import os
import sys
# Find nuitka package relative to us.
sys.path.insert(
0, os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), ".."))
)
# isort:start
import shutil
import tempfile
from nuitka.tools.testing.Valgrind import getBinarySizes, runValgrind
input_file = sys.argv[1]
nuitka_binary = os.environ.get(
"NUITKA_BINARY", os.path.join(os.path.dirname(__file__), "../bin/nuitka")
)
nuitka_binary = os.path.normpath(nuitka_binary)
basename = os.path.basename(input_file)
tempdir = tempfile.mkdtemp(
prefix=basename + "-", dir=None if not os.path.exists("/var/tmp") else "/var/tmp"
)
output_binary = os.path.join(
tempdir, (basename[:-3] if input_file.endswith(".py") else basename) + ".bin"
)
os.environ["PYTHONHASHSEED"] = "0"
# To make that python run well despite the "-S" flag for things that need site
# to expand sys.path.
os.environ["PYTHONPATH"] = os.pathsep.join(sys.path)
os.system(
"%s %s --python-flag=-S --output-dir=%s %s %s %s"
% (
sys.executable,
nuitka_binary,
tempdir,
"--unstripped",
os.environ.get("NUITKA_EXTRA_OPTIONS", ""),
input_file,
)
)
if not os.path.exists(output_binary):
sys.exit("Seeming failure of Nuitka to compile, no %r." % output_binary)
log_base = basename[:-3] if input_file.endswith(".py") else basename
if "number" in sys.argv or "numbers" in sys.argv:
log_file = log_base + ".log"
else:
log_file = None
log_file = log_base + ".log"
sys.stdout.flush()
ticks = runValgrind(
None, "callgrind", [output_binary], include_startup=False, save_logfilename=log_file
)
if "number" in sys.argv or "numbers" in sys.argv:
sizes = getBinarySizes(output_binary)
print("SIZE=%d" % (sizes[0] + sizes[1]))
print("TICKS=%s" % ticks)
print("BINARY=%s" % nuitka_binary)
max_mem = runValgrind(None, "massif", [output_binary], include_startup=True)
print("MEM=%s" % max_mem)
shutil.rmtree(tempdir)
else:
os.system("kcachegrind 2>/dev/null 1>/dev/null %s &" % log_file)
|
[
"os.path.abspath",
"os.pathsep.join",
"nuitka.tools.testing.Valgrind.runValgrind",
"os.path.basename",
"os.path.dirname",
"os.path.exists",
"os.system",
"os.environ.get",
"nuitka.tools.testing.Valgrind.getBinarySizes",
"sys.stdout.flush",
"os.path.normpath",
"shutil.rmtree",
"sys.exit"
] |
[((1298, 1329), 'os.path.normpath', 'os.path.normpath', (['nuitka_binary'], {}), '(nuitka_binary)\n', (1314, 1329), False, 'import os\n'), ((1342, 1370), 'os.path.basename', 'os.path.basename', (['input_file'], {}), '(input_file)\n', (1358, 1370), False, 'import os\n'), ((1768, 1793), 'os.pathsep.join', 'os.pathsep.join', (['sys.path'], {}), '(sys.path)\n', (1783, 1793), False, 'import os\n'), ((2363, 2381), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2379, 2381), False, 'import sys\n'), ((2392, 2493), 'nuitka.tools.testing.Valgrind.runValgrind', 'runValgrind', (['None', '"""callgrind"""', '[output_binary]'], {'include_startup': '(False)', 'save_logfilename': 'log_file'}), "(None, 'callgrind', [output_binary], include_startup=False,\n save_logfilename=log_file)\n", (2403, 2493), False, 'from nuitka.tools.testing.Valgrind import getBinarySizes, runValgrind\n'), ((2044, 2073), 'os.path.exists', 'os.path.exists', (['output_binary'], {}), '(output_binary)\n', (2058, 2073), False, 'import os\n'), ((2079, 2151), 'sys.exit', 'sys.exit', (["('Seeming failure of Nuitka to compile, no %r.' % output_binary)"], {}), "('Seeming failure of Nuitka to compile, no %r.' % output_binary)\n", (2087, 2151), False, 'import sys\n'), ((2559, 2588), 'nuitka.tools.testing.Valgrind.getBinarySizes', 'getBinarySizes', (['output_binary'], {}), '(output_binary)\n', (2573, 2588), False, 'from nuitka.tools.testing.Valgrind import getBinarySizes, runValgrind\n'), ((2719, 2785), 'nuitka.tools.testing.Valgrind.runValgrind', 'runValgrind', (['None', '"""massif"""', '[output_binary]'], {'include_startup': '(True)'}), "(None, 'massif', [output_binary], include_startup=True)\n", (2730, 2785), False, 'from nuitka.tools.testing.Valgrind import getBinarySizes, runValgrind\n'), ((2822, 2844), 'shutil.rmtree', 'shutil.rmtree', (['tempdir'], {}), '(tempdir)\n', (2835, 2844), False, 'import shutil\n'), ((2855, 2919), 'os.system', 'os.system', (["('kcachegrind 2>/dev/null 1>/dev/null %s &' % log_file)"], {}), "('kcachegrind 2>/dev/null 1>/dev/null %s &' % log_file)\n", (2864, 2919), False, 'import os\n'), ((1236, 1261), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1251, 1261), False, 'import os\n'), ((1964, 2006), 'os.environ.get', 'os.environ.get', (['"""NUITKA_EXTRA_OPTIONS"""', '""""""'], {}), "('NUITKA_EXTRA_OPTIONS', '')\n", (1978, 2006), False, 'import os\n'), ((989, 1014), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1004, 1014), False, 'import os\n'), ((1443, 1469), 'os.path.exists', 'os.path.exists', (['"""/var/tmp"""'], {}), "('/var/tmp')\n", (1457, 1469), False, 'import os\n')]
|
# Import APIView class from the rest_framework.views modules
from rest_framework.views import APIView
# Imports the response object which used to return responses from the APIView
from rest_framework.response import Response
# Create new class based on the APIView class.
class HelloApiView(APIView):
"""Test API View"""
# Handles HTTP Get Request where it calls get function....
def get(self, request, format=None):
"""Returns a list of APIView features"""
# DEfine a list which describes all of the features of an APIView:
an_apiview = [
'Uses HTTP methods as function (get, post, patch, put, delete)',
'Is similar to a traditional Django View'
'Gives you the most control over your application logic'
'Is mapped manually to URLs',
]
return Response({'message': 'Hello', 'an_apiview': an_apiview})
|
[
"rest_framework.response.Response"
] |
[((849, 905), 'rest_framework.response.Response', 'Response', (["{'message': 'Hello', 'an_apiview': an_apiview}"], {}), "({'message': 'Hello', 'an_apiview': an_apiview})\n", (857, 905), False, 'from rest_framework.response import Response\n')]
|
#!/usr/bin/env python
# <examples/doc_nistgauss2.py>
import matplotlib.pyplot as plt
import numpy as np
from lmfit.models import ExponentialModel, GaussianModel
dat = np.loadtxt('NIST_Gauss2.dat')
x = dat[:, 1]
y = dat[:, 0]
exp_mod = ExponentialModel(prefix='exp_')
gauss1 = GaussianModel(prefix='g1_')
gauss2 = GaussianModel(prefix='g2_')
def index_of(arrval, value):
"""return index of array *at or below* value """
if value < min(arrval):
return 0
return max(np.where(arrval <= value)[0])
ix1 = index_of(x, 75)
ix2 = index_of(x, 135)
ix3 = index_of(x, 175)
pars1 = exp_mod.guess(y[:ix1], x=x[:ix1])
pars2 = gauss1.guess(y[ix1:ix2], x=x[ix1:ix2])
pars3 = gauss2.guess(y[ix2:ix3], x=x[ix2:ix3])
pars = pars1 + pars2 + pars3
mod = gauss1 + gauss2 + exp_mod
out = mod.fit(y, pars, x=x)
print(out.fit_report(min_correl=0.5))
plt.plot(x, y, 'b')
plt.plot(x, out.init_fit, 'k--')
plt.plot(x, out.best_fit, 'r-')
# plt.savefig('../doc/_images/models_nistgauss2.png')
plt.show()
# <end examples/doc_nistgauss2.py>
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.where",
"numpy.loadtxt",
"lmfit.models.GaussianModel",
"lmfit.models.ExponentialModel"
] |
[((170, 199), 'numpy.loadtxt', 'np.loadtxt', (['"""NIST_Gauss2.dat"""'], {}), "('NIST_Gauss2.dat')\n", (180, 199), True, 'import numpy as np\n'), ((239, 270), 'lmfit.models.ExponentialModel', 'ExponentialModel', ([], {'prefix': '"""exp_"""'}), "(prefix='exp_')\n", (255, 270), False, 'from lmfit.models import ExponentialModel, GaussianModel\n'), ((280, 307), 'lmfit.models.GaussianModel', 'GaussianModel', ([], {'prefix': '"""g1_"""'}), "(prefix='g1_')\n", (293, 307), False, 'from lmfit.models import ExponentialModel, GaussianModel\n'), ((317, 344), 'lmfit.models.GaussianModel', 'GaussianModel', ([], {'prefix': '"""g2_"""'}), "(prefix='g2_')\n", (330, 344), False, 'from lmfit.models import ExponentialModel, GaussianModel\n'), ((857, 876), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""b"""'], {}), "(x, y, 'b')\n", (865, 876), True, 'import matplotlib.pyplot as plt\n'), ((877, 909), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'out.init_fit', '"""k--"""'], {}), "(x, out.init_fit, 'k--')\n", (885, 909), True, 'import matplotlib.pyplot as plt\n'), ((910, 941), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'out.best_fit', '"""r-"""'], {}), "(x, out.best_fit, 'r-')\n", (918, 941), True, 'import matplotlib.pyplot as plt\n'), ((996, 1006), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1004, 1006), True, 'import matplotlib.pyplot as plt\n'), ((489, 514), 'numpy.where', 'np.where', (['(arrval <= value)'], {}), '(arrval <= value)\n', (497, 514), True, 'import numpy as np\n')]
|
import datetime
import twint
import csv
import re
import json
import os
# 現在時刻
dt_now = datetime.datetime.now().strftime("%Y/%m/%d %H:%M")
c = twint.Config()
c.Username = "pref_toyama"
c.Search = "感染者の現況"
c.Since = datetime.datetime.now().strftime("%Y-%m-%d")
c.Store_csv = True
c.Output = "tmp.csv"
twint.run.Search(c)
try:
with open("tmp.csv", 'r', encoding="utf-8") as f:
reader = csv.reader(f)
i = 0
for row in reader:
if i == 0:
pass
elif i == 1:
text = row[10]
new = int(re.search(r"新たに(\d+?)名", text).group(1))
total = int(re.search(r"感染者数:(\d+?)名", text).group(1))
hospitalized = int(re.search(r"入院中又は入院等調整中 (\d+?)人", text).group(1))
lodging = int(re.search(r"宿泊療養施設入所者数 (\d+?)人", text).group(1))
discharged = int(re.search(r"退院者数 (\d+?)人", text).group(1))
death = int(re.search(r"死亡者数 (\d+?)人", text).group(1))
# 検査陽性者の状況
with open('../data/patients_summary.json', 'r', encoding='utf-8') as file:
data = json.load(file)
data["date"] = dt_now
data["value"] = total
data["children"][0]["value"] = hospitalized
data["children"][0]["children"][0]["value"] += new
data["children"][1]["value"] = lodging
data["children"][2]["value"] = death
data["children"][3]["value"] = discharged
with open('../data/patients_summary.json', 'w', encoding='utf-8') as file:
json.dump(data, file, ensure_ascii=False, indent=4)
# 公表日別による新規陽性者数の推移
with open('../data/patients_number.json', 'r', encoding='utf-8') as file:
data = json.load(file)
data["date"] = dt_now
data["data"].append({"日付": datetime.datetime.now().strftime("%Y-%m-%d"), "小計": new})
with open('../data/patients_number.json', 'w', encoding='utf-8') as file:
json.dump(data, file, ensure_ascii=False, indent=4)
else:
break
i += 1
f.close
os.remove("tmp.csv")
except:
# 公表日別による新規陽性者数の推移
with open('../data/patients_number.json', 'r', encoding='utf-8') as file:
data = json.load(file)
data["date"] = dt_now
data["data"].append({"日付": datetime.datetime.now().strftime("%Y-%m-%d"), "小計": 0})
with open('../data/patients_number.json', 'w', encoding='utf-8') as file:
json.dump(data, file, ensure_ascii=False, indent=4)
# 検査陽性者の状況
with open('../data/patients_summary.json', 'r', encoding='utf-8') as file:
data = json.load(file)
data["date"] = dt_now
with open('../data/patients_summary.json', 'w', encoding='utf-8') as file:
json.dump(data, file, ensure_ascii=False, indent=4)
# 最終更新日時
with open('../data/data.json', 'r', encoding='utf-8') as file:
data = json.load(file)
data['lastUpdate'] = dt_now
with open('../data/data.json', 'w', encoding='utf-8') as file:
json.dump(data, file, ensure_ascii=False, indent=4)
|
[
"json.dump",
"os.remove",
"json.load",
"csv.reader",
"twint.run.Search",
"twint.Config",
"re.search",
"datetime.datetime.now"
] |
[((145, 159), 'twint.Config', 'twint.Config', ([], {}), '()\n', (157, 159), False, 'import twint\n'), ((303, 322), 'twint.run.Search', 'twint.run.Search', (['c'], {}), '(c)\n', (319, 322), False, 'import twint\n'), ((2325, 2345), 'os.remove', 'os.remove', (['"""tmp.csv"""'], {}), "('tmp.csv')\n", (2334, 2345), False, 'import os\n'), ((3139, 3154), 'json.load', 'json.load', (['file'], {}), '(file)\n', (3148, 3154), False, 'import json\n'), ((89, 112), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (110, 112), False, 'import datetime\n'), ((217, 240), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (238, 240), False, 'import datetime\n'), ((400, 413), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (410, 413), False, 'import csv\n'), ((3262, 3313), 'json.dump', 'json.dump', (['data', 'file'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(data, file, ensure_ascii=False, indent=4)\n', (3271, 3313), False, 'import json\n'), ((2470, 2485), 'json.load', 'json.load', (['file'], {}), '(file)\n', (2479, 2485), False, 'import json\n'), ((2862, 2877), 'json.load', 'json.load', (['file'], {}), '(file)\n', (2871, 2877), False, 'import json\n'), ((2701, 2752), 'json.dump', 'json.dump', (['data', 'file'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(data, file, ensure_ascii=False, indent=4)\n', (2710, 2752), False, 'import json\n'), ((3003, 3054), 'json.dump', 'json.dump', (['data', 'file'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(data, file, ensure_ascii=False, indent=4)\n', (3012, 3054), False, 'import json\n'), ((1149, 1164), 'json.load', 'json.load', (['file'], {}), '(file)\n', (1158, 1164), False, 'import json\n'), ((1681, 1732), 'json.dump', 'json.dump', (['data', 'file'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(data, file, ensure_ascii=False, indent=4)\n', (1690, 1732), False, 'import json\n'), ((1897, 1912), 'json.load', 'json.load', (['file'], {}), '(file)\n', (1906, 1912), False, 'import json\n'), ((2555, 2578), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2576, 2578), False, 'import datetime\n'), ((581, 611), 're.search', 're.search', (['"""新たに(\\\\d+?)名"""', 'text'], {}), "('新たに(\\\\d+?)名', text)\n", (590, 611), False, 'import re\n'), ((650, 682), 're.search', 're.search', (['"""感染者数:(\\\\d+?)名"""', 'text'], {}), "('感染者数:(\\\\d+?)名', text)\n", (659, 682), False, 'import re\n'), ((728, 767), 're.search', 're.search', (['"""入院中又は入院等調整中 (\\\\d+?)人"""', 'text'], {}), "('入院中又は入院等調整中 (\\\\d+?)人', text)\n", (737, 767), False, 'import re\n'), ((808, 846), 're.search', 're.search', (['"""宿泊療養施設入所者数 (\\\\d+?)人"""', 'text'], {}), "('宿泊療養施設入所者数 (\\\\d+?)人', text)\n", (817, 846), False, 'import re\n'), ((890, 922), 're.search', 're.search', (['"""退院者数 (\\\\d+?)人"""', 'text'], {}), "('退院者数 (\\\\d+?)人', text)\n", (899, 922), False, 'import re\n'), ((961, 993), 're.search', 're.search', (['"""死亡者数 (\\\\d+?)人"""', 'text'], {}), "('死亡者数 (\\\\d+?)人', text)\n", (970, 993), False, 'import re\n'), ((2194, 2245), 'json.dump', 'json.dump', (['data', 'file'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(data, file, ensure_ascii=False, indent=4)\n', (2203, 2245), False, 'import json\n'), ((2014, 2037), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2035, 2037), False, 'import datetime\n')]
|
import librosa
import librosa.display
import matplotlib.pyplot as plt
import numpy as np
import torch
from tqdm import tqdm
from trainer.base_trainer import BaseTrainer
from util.utils import compute_SDR
plt.switch_backend('agg')
class Trainer(BaseTrainer):
def __init__(self, config, resume: bool, model, loss_function, optimizer, train_dataloader, validation_dataloader):
super(Trainer, self).__init__(config, resume, model, loss_function, optimizer)
self.train_dataloader = train_dataloader
self.validation_dataloader = validation_dataloader
def _train_epoch(self, epoch):
loss_total = 0.0
short_loss_total = 0.0
middle_loss_total = 0.0
long_loss_total = 0.0
for mixture, target, reference, _ in tqdm(self.train_dataloader, desc="Training"):
mixture = mixture.to(self.device).unsqueeze(1)
target = target.to(self.device).unsqueeze(1)
reference = reference.to(self.device).unsqueeze(1)
self.optimizer.zero_grad()
short_scale_enhanced, middle_scale_enhanced, long_scale_enhanced, _ = self.model(mixture, reference)
loss, (short_loss, middle_loss, long_loss) = self.loss_function(target, short_scale_enhanced, middle_scale_enhanced, long_scale_enhanced)
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), 5)
self.optimizer.step()
loss_total += loss.item()
short_loss_total += short_loss.item()
middle_loss_total += middle_loss.item()
long_loss_total += long_loss.item()
# if i == 0:
# self.writer.add_figure(f"Train_Tensor/Mixture", self.image_grad(mixture_mag.cpu()), epoch)
# self.writer.add_figure(f"Train_Tensor/Target", self.image_grad(target_mag.cpu()), epoch)
# self.writer.add_figure(f"Train_Tensor/Enhanced", self.image_grad(enhanced_mag.detach().cpu()), epoch)
# self.writer.add_figure(f"Train_Tensor/Ref", self.image_grad(reference.cpu()), epoch)
self.writer.add_scalar(f"Train/Loss", loss_total / len(self.train_dataloader), epoch)
self.writer.add_scalar(f"Train/Short Loss", short_loss_total / len(self.train_dataloader), epoch)
self.writer.add_scalar(f"Train/Middle Loss", middle_loss_total / len(self.train_dataloader), epoch)
self.writer.add_scalar(f"Train/Long Loss", long_loss_total / len(self.train_dataloader), epoch)
@torch.no_grad()
def _validation_epoch(self, epoch):
visualize_audio_limit = self.validation_custom_config["visualize_audio_limit"]
visualize_waveform_limit = self.validation_custom_config["visualize_waveform_limit"]
visualize_spectrogram_limit = self.validation_custom_config["visualize_spectrogram_limit"]
n_samples = self.validation_custom_config["n_samples"]
weights = self.validation_custom_config["weights"]
sr = self.validation_custom_config["sr"]
get_metrics_ave = lambda metrics: np.sum(metrics) / len(metrics)
sdr_c_m = [] # Clean and mixture
sdr_c_e = [] # Clean and enhanced
for i, (mixture, target, reference, target_filename) in tqdm(enumerate(self.validation_dataloader)):
assert len(target_filename) == 1, "The batch size of validation dataloader must be 1."
name = target_filename[0]
mixture = mixture.to(self.device)
reference = reference.to(self.device)
mixture_chunks = list(torch.split(mixture, n_samples, dim=-1))
last_chunk = mixture_chunks[-1]
if last_chunk.size(-1) != n_samples:
mixture_chunks[-1] = torch.cat((
mixture_chunks[-1],
torch.zeros(1, n_samples - last_chunk.size(-1)).to(self.device)
), dim=1)
enhanced_chunks = []
for mixture_chunk in mixture_chunks:
short_scale, middle_scale, long_scale, _ = self.model(mixture_chunk, reference).detach().cpu()
enhanced_chunks.append(short_scale * weights[0] + middle_scale * weights[1] + long_scale * weights[2])
enhanced = torch.cat(enhanced_chunks, dim=1) # [F, T]
enhanced = enhanced[:, :mixture.shape[1]]
mixture = mixture.reshape(-1).cpu().numpy()
enhanced = enhanced.reshape(-1).cpu().numpy()
target = target.reshape(-1).cpu().numpy()
reference = reference.reshape(-1).cpu().numpy()
# Visualize audio
if i <= visualize_audio_limit:
self.writer.add_audio(f"Speech/{name}_Mixture", mixture, epoch, sample_rate=sr)
self.writer.add_audio(f"Speech/{name}_Enhanced", enhanced, epoch, sample_rate=sr)
self.writer.add_audio(f"Speech/{name}_Target", target, epoch, sample_rate=sr)
self.writer.add_audio(f"Speech/{name}_Reference", reference, epoch, sample_rate=sr)
# Visualize waveform
if i <= visualize_waveform_limit:
fig, ax = plt.subplots(3, 1)
for j, y in enumerate([mixture, enhanced, target]):
ax[j].set_title("mean: {:.3f}, std: {:.3f}, max: {:.3f}, min: {:.3f}".format(
np.mean(y),
np.std(y),
np.max(y),
np.min(y)
))
librosa.display.waveplot(y, sr=sr, ax=ax[j])
plt.tight_layout()
self.writer.add_figure(f"Waveform/{name}", fig, epoch)
# Visualize spectrogram
mixture_mag, _ = librosa.magphase(librosa.stft(mixture, n_fft=320, hop_length=160))
enhanced_mag, _ = librosa.magphase(librosa.stft(enhanced, n_fft=320, hop_length=160))
target_mag, _ = librosa.magphase(librosa.stft(target, n_fft=320, hop_length=160))
if i <= visualize_spectrogram_limit:
fig, axes = plt.subplots(3, 1, figsize=(6, 6))
for k, mag in enumerate([
mixture_mag,
enhanced_mag,
target_mag,
]):
axes[k].set_title(f"mean: {np.mean(mag):.3f}, "
f"std: {np.std(mag):.3f}, "
f"max: {np.max(mag):.3f}, "
f"min: {np.min(mag):.3f}")
librosa.display.specshow(librosa.amplitude_to_db(mag), cmap="magma", y_axis="linear", ax=axes[k],
sr=sr)
plt.tight_layout()
self.writer.add_figure(f"Spectrogram/{name}", fig, epoch)
# Metrics
c_m = compute_SDR(target, mixture)
c_e = compute_SDR(target, enhanced)
sdr_c_m.append(c_m)
sdr_c_e.append(c_e)
print(f"Value: {c_e - c_m} \n"
f"Mean: {get_metrics_ave(sdr_c_e) - get_metrics_ave(sdr_c_m)}")
self.writer.add_scalars(f"Metrics/SDR", {
"target and mixture": get_metrics_ave(sdr_c_m),
"target and enhanced": get_metrics_ave(sdr_c_e)
}, epoch)
score = get_metrics_ave(sdr_c_e)
return score
|
[
"matplotlib.pyplot.switch_backend",
"matplotlib.pyplot.tight_layout",
"tqdm.tqdm",
"numpy.sum",
"librosa.display.waveplot",
"numpy.std",
"torch.split",
"torch.cat",
"numpy.max",
"numpy.mean",
"numpy.min",
"librosa.amplitude_to_db",
"util.utils.compute_SDR",
"torch.no_grad",
"matplotlib.pyplot.subplots",
"librosa.stft"
] |
[((218, 243), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""agg"""'], {}), "('agg')\n", (236, 243), True, 'import matplotlib.pyplot as plt\n'), ((2571, 2586), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2584, 2586), False, 'import torch\n'), ((804, 848), 'tqdm.tqdm', 'tqdm', (['self.train_dataloader'], {'desc': '"""Training"""'}), "(self.train_dataloader, desc='Training')\n", (808, 848), False, 'from tqdm import tqdm\n'), ((4320, 4353), 'torch.cat', 'torch.cat', (['enhanced_chunks'], {'dim': '(1)'}), '(enhanced_chunks, dim=1)\n', (4329, 4353), False, 'import torch\n'), ((6969, 6997), 'util.utils.compute_SDR', 'compute_SDR', (['target', 'mixture'], {}), '(target, mixture)\n', (6980, 6997), False, 'from util.utils import compute_SDR\n'), ((7017, 7046), 'util.utils.compute_SDR', 'compute_SDR', (['target', 'enhanced'], {}), '(target, enhanced)\n', (7028, 7046), False, 'from util.utils import compute_SDR\n'), ((3129, 3144), 'numpy.sum', 'np.sum', (['metrics'], {}), '(metrics)\n', (3135, 3144), True, 'import numpy as np\n'), ((3637, 3676), 'torch.split', 'torch.split', (['mixture', 'n_samples'], {'dim': '(-1)'}), '(mixture, n_samples, dim=-1)\n', (3648, 3676), False, 'import torch\n'), ((5232, 5250), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {}), '(3, 1)\n', (5244, 5250), True, 'import matplotlib.pyplot as plt\n'), ((5670, 5688), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5686, 5688), True, 'import matplotlib.pyplot as plt\n'), ((5847, 5895), 'librosa.stft', 'librosa.stft', (['mixture'], {'n_fft': '(320)', 'hop_length': '(160)'}), '(mixture, n_fft=320, hop_length=160)\n', (5859, 5895), False, 'import librosa\n'), ((5945, 5994), 'librosa.stft', 'librosa.stft', (['enhanced'], {'n_fft': '(320)', 'hop_length': '(160)'}), '(enhanced, n_fft=320, hop_length=160)\n', (5957, 5994), False, 'import librosa\n'), ((6042, 6089), 'librosa.stft', 'librosa.stft', (['target'], {'n_fft': '(320)', 'hop_length': '(160)'}), '(target, n_fft=320, hop_length=160)\n', (6054, 6089), False, 'import librosa\n'), ((6172, 6206), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'figsize': '(6, 6)'}), '(3, 1, figsize=(6, 6))\n', (6184, 6206), True, 'import matplotlib.pyplot as plt\n'), ((6831, 6849), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6847, 6849), True, 'import matplotlib.pyplot as plt\n'), ((5608, 5652), 'librosa.display.waveplot', 'librosa.display.waveplot', (['y'], {'sr': 'sr', 'ax': 'ax[j]'}), '(y, sr=sr, ax=ax[j])\n', (5632, 5652), False, 'import librosa\n'), ((6688, 6716), 'librosa.amplitude_to_db', 'librosa.amplitude_to_db', (['mag'], {}), '(mag)\n', (6711, 6716), False, 'import librosa\n'), ((5444, 5454), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (5451, 5454), True, 'import numpy as np\n'), ((5481, 5490), 'numpy.std', 'np.std', (['y'], {}), '(y)\n', (5487, 5490), True, 'import numpy as np\n'), ((5517, 5526), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (5523, 5526), True, 'import numpy as np\n'), ((5553, 5562), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (5559, 5562), True, 'import numpy as np\n'), ((6421, 6433), 'numpy.mean', 'np.mean', (['mag'], {}), '(mag)\n', (6428, 6433), True, 'import numpy as np\n'), ((6489, 6500), 'numpy.std', 'np.std', (['mag'], {}), '(mag)\n', (6495, 6500), True, 'import numpy as np\n'), ((6556, 6567), 'numpy.max', 'np.max', (['mag'], {}), '(mag)\n', (6562, 6567), True, 'import numpy as np\n'), ((6623, 6634), 'numpy.min', 'np.min', (['mag'], {}), '(mag)\n', (6629, 6634), True, 'import numpy as np\n')]
|
import src.view.senhasView as sv
import src.model.senhasModel as sm
class SenhasController:
def __init__(self):
self.senhas_model = sm.SenhasModel()
def start(self):
sev = sv.SenhasView(self)
sev.start()
def searchAllSenhas(self):
return self.senhas_model.selectAll()
def searchSenha(self, nome):
return self.senhas_model.select(nome)
def saveSenha(self, nome, tipo, login, senha, obs):
return self.senhas_model.save(nome, tipo, login, senha, obs)
def updateSenha(self, codigo, nome, tipo, login, senha, obs):
return self.senhas_model.update(codigo, nome, tipo, login, senha, obs)
def deleteSenha(self, codigo):
return self.senhas_model.delete(codigo)
|
[
"src.model.senhasModel.SenhasModel",
"src.view.senhasView.SenhasView"
] |
[((146, 162), 'src.model.senhasModel.SenhasModel', 'sm.SenhasModel', ([], {}), '()\n', (160, 162), True, 'import src.model.senhasModel as sm\n'), ((199, 218), 'src.view.senhasView.SenhasView', 'sv.SenhasView', (['self'], {}), '(self)\n', (212, 218), True, 'import src.view.senhasView as sv\n')]
|
'''
Copyright 2015 by <NAME>
This file is part of Statistical Parameter Estimation Tool (SPOTPY).
:author: <NAME>
This example implements the Rosenbrock function into SPOT.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import spotpy
class spot_setup(object):
slow = 1000
def __init__(self):
self.params = [spotpy.parameter.List('x',[1,2,3,4,6,7,8,9,0]), #Give possible x values as a List
spotpy.parameter.List('y',[0,1,2,5,7,8,9,0,1])] #Give possible y values as a List
self.database = file('MyOwnDatabase.txt','w')
def parameters(self):
return spotpy.parameter.generate(self.params)
def simulation(self,vector):
x=np.array(vector)
for i in xrange(self.slow):
s = np.sin(i)
simulations= [sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0)]
return simulations
def evaluation(self):
observations=[0]
return observations
def objectivefunction(self,simulation,evaluation):
objectivefunction=-spotpy.objectivefunctions.rmse(evaluation,simulation)
return objectivefunction
def save(self, objectivefunctions, parameter, simulations):
line=str(objectivefunctions)+','+str(parameter).strip('[]')+','+str(simulations).strip('[]')+'\n'
self.database.write(line)
spot_setup=spot_setup()
'Leave out dbformat and dbname and spotpy will return results in spot_setup.save function'
sampler=spotpy.algorithms.mc(spot_setup)
sampler.sample(10) #Choose equaly or less repetitions as you have parameters in your List
spot_setup.database.close() # Close the created txt file
|
[
"spotpy.parameter.List",
"spotpy.parameter.generate",
"numpy.sin",
"numpy.array",
"spotpy.algorithms.mc",
"spotpy.objectivefunctions.rmse"
] |
[((1669, 1701), 'spotpy.algorithms.mc', 'spotpy.algorithms.mc', (['spot_setup'], {}), '(spot_setup)\n', (1689, 1701), False, 'import spotpy\n'), ((778, 816), 'spotpy.parameter.generate', 'spotpy.parameter.generate', (['self.params'], {}), '(self.params)\n', (803, 816), False, 'import spotpy\n'), ((869, 885), 'numpy.array', 'np.array', (['vector'], {}), '(vector)\n', (877, 885), True, 'import numpy as np\n'), ((462, 517), 'spotpy.parameter.List', 'spotpy.parameter.List', (['"""x"""', '[1, 2, 3, 4, 6, 7, 8, 9, 0]'], {}), "('x', [1, 2, 3, 4, 6, 7, 8, 9, 0])\n", (483, 517), False, 'import spotpy\n'), ((567, 622), 'spotpy.parameter.List', 'spotpy.parameter.List', (['"""y"""', '[0, 1, 2, 5, 7, 8, 9, 0, 1]'], {}), "('y', [0, 1, 2, 5, 7, 8, 9, 0, 1])\n", (588, 622), False, 'import spotpy\n'), ((938, 947), 'numpy.sin', 'np.sin', (['i'], {}), '(i)\n', (944, 947), True, 'import numpy as np\n'), ((1231, 1285), 'spotpy.objectivefunctions.rmse', 'spotpy.objectivefunctions.rmse', (['evaluation', 'simulation'], {}), '(evaluation, simulation)\n', (1261, 1285), False, 'import spotpy\n')]
|
from kivy.app import App
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.scrollview import ScrollView
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.config import Config
class Scr1(Screen):
def __init__(self,name='first'):
super().__init__(name=name)
l1=BoxLayout(orientation="vertical")
label=Label(text='[size=20][b]Hello[/b][/size]')
label2=Label(text="Калькулятор")
btn=Button(text="Начать")
l1.add_widget(label)
l1.add_widget(label2)
l1.add_widget(btn)
self.add_widget(l1)
btn.on_press=self.next
def next(self):
self.manager.current='second'
class Scr2(Screen):
def __init__(self,name='second'):
super().__init__(name=name)
l1=BoxLayout(orientation="vertical")
label=Label(text='[size=20][b]Hello[/b][/size]')
label2=Label(text="Калькулятор")
btn=Button(text="Начать")
l1.add_widget(label)
l1.add_widget(label2)
l1.add_widget(btn)
self.add_widget(l1)
btn.on_press=self.next
def next(self):
self.manager.current='second'
class MyApp(App):
def build(self):
sm=ScreenManager()
sm.add_widget(Scr1(name="first"))
sm.add_widget(Scr2(name="second"))
return sm
MyApp().run()
|
[
"kivy.uix.boxlayout.BoxLayout",
"kivy.uix.label.Label",
"kivy.uix.screenmanager.ScreenManager",
"kivy.uix.button.Button"
] |
[((423, 456), 'kivy.uix.boxlayout.BoxLayout', 'BoxLayout', ([], {'orientation': '"""vertical"""'}), "(orientation='vertical')\n", (432, 456), False, 'from kivy.uix.boxlayout import BoxLayout\n'), ((472, 514), 'kivy.uix.label.Label', 'Label', ([], {'text': '"""[size=20][b]Hello[/b][/size]"""'}), "(text='[size=20][b]Hello[/b][/size]')\n", (477, 514), False, 'from kivy.uix.label import Label\n'), ((531, 556), 'kivy.uix.label.Label', 'Label', ([], {'text': '"""Калькулятор"""'}), "(text='Калькулятор')\n", (536, 556), False, 'from kivy.uix.label import Label\n'), ((570, 591), 'kivy.uix.button.Button', 'Button', ([], {'text': '"""Начать"""'}), "(text='Начать')\n", (576, 591), False, 'from kivy.uix.button import Button\n'), ((915, 948), 'kivy.uix.boxlayout.BoxLayout', 'BoxLayout', ([], {'orientation': '"""vertical"""'}), "(orientation='vertical')\n", (924, 948), False, 'from kivy.uix.boxlayout import BoxLayout\n'), ((964, 1006), 'kivy.uix.label.Label', 'Label', ([], {'text': '"""[size=20][b]Hello[/b][/size]"""'}), "(text='[size=20][b]Hello[/b][/size]')\n", (969, 1006), False, 'from kivy.uix.label import Label\n'), ((1023, 1048), 'kivy.uix.label.Label', 'Label', ([], {'text': '"""Калькулятор"""'}), "(text='Калькулятор')\n", (1028, 1048), False, 'from kivy.uix.label import Label\n'), ((1062, 1083), 'kivy.uix.button.Button', 'Button', ([], {'text': '"""Начать"""'}), "(text='Начать')\n", (1068, 1083), False, 'from kivy.uix.button import Button\n'), ((1353, 1368), 'kivy.uix.screenmanager.ScreenManager', 'ScreenManager', ([], {}), '()\n', (1366, 1368), False, 'from kivy.uix.screenmanager import ScreenManager, Screen\n')]
|
# flake8: noqa
import logging
import os
import warnings
logger = logging.getLogger(__name__)
warnings.simplefilter("default")
try:
import alchemy
from .alchemy import AlchemyRunner, SupervisedAlchemyRunner
warnings.warn(
"AlchemyRunner and SupervisedAlchemyRunner are deprecated; "
"use AlchemyLogger instead (`from catalyst.dl import AlchemyLogger`)",
DeprecationWarning
)
except ImportError as ex:
logger.warning(
"alchemy not available, to install alchemy, "
"run `pip install alchemy-catalyst`."
)
if os.environ.get("USE_ALCHEMY", "0") == "1":
raise ex
try:
import neptune
from .neptune import NeptuneRunner, SupervisedNeptuneRunner
warnings.warn(
"NeptuneRunner and SupervisedNeptuneRunner are deprecated; "
"will be removed in 20.04 release", DeprecationWarning
)
except ImportError as ex:
if os.environ.get("USE_NEPTUNE", "0") == "1":
logger.warning(
"neptune not available, to install neptune, "
"run `pip install neptune-client`."
)
raise ex
try:
import wandb
from .wandb import WandbRunner, SupervisedWandbRunner
warnings.warn(
"WandbRunner and SupervisedWandbRunner are deprecated; "
"will be removed in 20.04 release", DeprecationWarning
)
except ImportError as ex:
if os.environ.get("USE_WANDB", "0") == "1":
logger.warning(
"wandb not available, to install wandb, run `pip install wandb`."
)
raise ex
|
[
"os.environ.get",
"warnings.warn",
"warnings.simplefilter",
"logging.getLogger"
] |
[((66, 93), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (83, 93), False, 'import logging\n'), ((94, 126), 'warnings.simplefilter', 'warnings.simplefilter', (['"""default"""'], {}), "('default')\n", (115, 126), False, 'import warnings\n'), ((221, 393), 'warnings.warn', 'warnings.warn', (['"""AlchemyRunner and SupervisedAlchemyRunner are deprecated; use AlchemyLogger instead (`from catalyst.dl import AlchemyLogger`)"""', 'DeprecationWarning'], {}), "(\n 'AlchemyRunner and SupervisedAlchemyRunner are deprecated; use AlchemyLogger instead (`from catalyst.dl import AlchemyLogger`)'\n , DeprecationWarning)\n", (234, 393), False, 'import warnings\n'), ((730, 867), 'warnings.warn', 'warnings.warn', (['"""NeptuneRunner and SupervisedNeptuneRunner are deprecated; will be removed in 20.04 release"""', 'DeprecationWarning'], {}), "(\n 'NeptuneRunner and SupervisedNeptuneRunner are deprecated; will be removed in 20.04 release'\n , DeprecationWarning)\n", (743, 867), False, 'import warnings\n'), ((1202, 1335), 'warnings.warn', 'warnings.warn', (['"""WandbRunner and SupervisedWandbRunner are deprecated; will be removed in 20.04 release"""', 'DeprecationWarning'], {}), "(\n 'WandbRunner and SupervisedWandbRunner are deprecated; will be removed in 20.04 release'\n , DeprecationWarning)\n", (1215, 1335), False, 'import warnings\n'), ((576, 610), 'os.environ.get', 'os.environ.get', (['"""USE_ALCHEMY"""', '"""0"""'], {}), "('USE_ALCHEMY', '0')\n", (590, 610), False, 'import os\n'), ((916, 950), 'os.environ.get', 'os.environ.get', (['"""USE_NEPTUNE"""', '"""0"""'], {}), "('USE_NEPTUNE', '0')\n", (930, 950), False, 'import os\n'), ((1384, 1416), 'os.environ.get', 'os.environ.get', (['"""USE_WANDB"""', '"""0"""'], {}), "('USE_WANDB', '0')\n", (1398, 1416), False, 'import os\n')]
|