code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 17 11:02:21 2019
@author: elizabethhutton
"""
import pandas as pd
import numpy as np
from sklearn.manifold import TSNE
from matplotlib import pyplot as plt
from sklearn.cluster import KMeans
from wordcloud import WordCloud
from yellowbrick.cluster import KElbowVisualizer
from sklearn.neighbors import NearestNeighbors
import spacy
import iterate
class Cluster():
def __init__(self,corpus,num_clusters):
"""Perform k-means clustering on corpus.
Keyword Arguments:
corpus -- document corpus as Corpus object
num_clusters -- k clusters to search for
"""
self.k = num_clusters
self.top_words = None
word_vectors = corpus.vectors
kmeans_clustering = KMeans(n_clusters = num_clusters, init='k-means++')
self.model = kmeans_clustering
idx = kmeans_clustering.fit_predict(word_vectors)
self.centers = kmeans_clustering.cluster_centers_
#update corpus vectors with cluster labels
corpus.clusters = pd.DataFrame(idx,columns=['clusterid'],index=word_vectors.index)
return
def get_top_words(self, corpus, knn):
"""Get knn top words for each cluster.
Keyword Arguments:
corpus -- pandas df of words and their vectors
knn -- (int) num words to find per cluster
"""
word_vectors = corpus.vectors
neigh = NearestNeighbors(n_neighbors=knn, metric= 'cosine')
neigh.fit(word_vectors)
top_word_idxs = list()
for center in self.centers:
center = center.reshape(1,-1)
top_n = neigh.kneighbors(center,n_neighbors=knn,return_distance=False)
top_word_idxs.append(top_n)
top_n_words = pd.DataFrame()
for i, cluster in enumerate(top_word_idxs):
cluster_name = 'Cluster ' + str(i)
words = list()
for idx in cluster[0]:
word = word_vectors.iloc[idx].name
words.append(word)
top_n_words[cluster_name] = words
self.top_words = top_n_words
return top_n_words
def iterate_kmeans(clean_corpus,elbow):
#prep for clustering
clean_corpus.vectorize()
#iterate kmeans over num topics
#methods = 'var','dist','c_h'
elbow.elbow_kmeans_variance(clean_corpus)
elbow.elbow_kmeans_inertia(clean_corpus)
elbow.elbow_kmeans_ch(clean_corpus)
elbow.elbow_kmeans_dist(clean_corpus)
return
#fix
def plot_tsne(word_vectors):
tsne = TSNE(n_components=2, random_state=0, n_iter=5000, perplexity=3)
np.set_printoptions(suppress=True)
T = tsne.fit_transform(word_vectors)
labels = word_vectors.index
plt.figure(figsize=(12, 6))
plt.scatter(T[:, 0], T[:, 1], c='orange', edgecolors='r')
for label, x, y in zip(labels, T[:, 0], T[:, 1]):
plt.annotate(label, xy=(x+1, y+1), xytext=(0, 0), textcoords='offset points')
return
def get_kmeans(clean_corpus,num_topics):
cluster_model = Cluster(clean_corpus,num_topics)
top_words_kmeans = cluster_model.get_top_words(clean_corpus, knn=10)
return cluster_model,top_words_kmeans
| [
"sklearn.cluster.KMeans",
"sklearn.manifold.TSNE",
"matplotlib.pyplot.annotate",
"matplotlib.pyplot.figure",
"sklearn.neighbors.NearestNeighbors",
"matplotlib.pyplot.scatter",
"pandas.DataFrame",
"numpy.set_printoptions"
] | [((829, 878), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'num_clusters', 'init': '"""k-means++"""'}), "(n_clusters=num_clusters, init='k-means++')\n", (835, 878), False, 'from sklearn.cluster import KMeans\n'), ((1131, 1197), 'pandas.DataFrame', 'pd.DataFrame', (['idx'], {'columns': "['clusterid']", 'index': 'word_vectors.index'}), "(idx, columns=['clusterid'], index=word_vectors.index)\n", (1143, 1197), True, 'import pandas as pd\n'), ((1528, 1578), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': 'knn', 'metric': '"""cosine"""'}), "(n_neighbors=knn, metric='cosine')\n", (1544, 1578), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((1878, 1892), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1890, 1892), True, 'import pandas as pd\n'), ((2721, 2784), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)', 'random_state': '(0)', 'n_iter': '(5000)', 'perplexity': '(3)'}), '(n_components=2, random_state=0, n_iter=5000, perplexity=3)\n', (2725, 2784), False, 'from sklearn.manifold import TSNE\n'), ((2793, 2827), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (2812, 2827), True, 'import numpy as np\n'), ((2926, 2953), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (2936, 2953), True, 'from matplotlib import pyplot as plt\n'), ((2962, 3019), 'matplotlib.pyplot.scatter', 'plt.scatter', (['T[:, 0]', 'T[:, 1]'], {'c': '"""orange"""', 'edgecolors': '"""r"""'}), "(T[:, 0], T[:, 1], c='orange', edgecolors='r')\n", (2973, 3019), True, 'from matplotlib import pyplot as plt\n'), ((3090, 3176), 'matplotlib.pyplot.annotate', 'plt.annotate', (['label'], {'xy': '(x + 1, y + 1)', 'xytext': '(0, 0)', 'textcoords': '"""offset points"""'}), "(label, xy=(x + 1, y + 1), xytext=(0, 0), textcoords=\n 'offset points')\n", (3102, 3176), True, 'from matplotlib import pyplot as plt\n')] |
from netmiko import ConnectHandler
from getpass import getpass
device1 = {
'host' : 'cisco4.lasthop.io',
'username' : 'pyclass',
'password' : getpass(),
'device_type' : 'cisco_ios',
'global_delay_factor' : 2,
}
net_connect = ConnectHandler(**device1)
print(net_connect.find_prompt())
output = net_connect.send_command_timing(
"ping", strip_prompt=False, strip_command=False
)
output += net_connect.send_command_timing("\n", strip_prompt=False, strip_command=False)
output += net_connect.send_command_timing(
"8.8.8.8", strip_prompt=False, strip_command=False
)
output += net_connect.send_command_timing("\n", strip_prompt=False, strip_command=False)
output += net_connect.send_command_timing("\n", strip_prompt=False, strip_command=False)
output += net_connect.send_command_timing("\n", strip_prompt=False, strip_command=False)
output += net_connect.send_command_timing("\n", strip_prompt=False, strip_command=False)
output += net_connect.send_command_timing("\n", strip_prompt=False, strip_command=False)
net_connect.disconnect()
print()
print(output)
print()
| [
"netmiko.ConnectHandler",
"getpass.getpass"
] | [((247, 272), 'netmiko.ConnectHandler', 'ConnectHandler', ([], {}), '(**device1)\n', (261, 272), False, 'from netmiko import ConnectHandler\n'), ((155, 164), 'getpass.getpass', 'getpass', ([], {}), '()\n', (162, 164), False, 'from getpass import getpass\n')] |
# -*- coding: utf-8 -*-
import os
import telebot
import time
import random
import threading
from emoji import emojize
from telebot import types
from pymongo import MongoClient
import traceback
token = os.environ['TELEGRAM_TOKEN']
bot = telebot.TeleBot(token)
fighters=[]
btimer=10
names=['Волк', 'Осёл', 'Кроль', '<NAME>', 'Сосааать']
@bot.message_handler(commands=['start'])
def start(m):
no=0
for ids in fighters:
if ids['id']==m.from_user.id:
no=1
if no==0:
fighters.append(createplayer(user=m.from_user))
bot.send_message(m.chat.id, 'Вы успешно зашли в игру! Теперь ждите, пока ваш боец прострелит кому-нибудь яйцо.\nСоветую кинуть бота в мут!')
@bot.message_handler(commands=['add'])
def add(m):
if m.from_user.id==441399484:
name=m.text.split(' ')[1]
fighters.append(createplayer(name=name))
bot.send_message(m.chat.id, 'Добавлен игрок "'+name+'"!')
@bot.message_handler(commands=['settimer'])
def settimer(m):
if m.from_user.id==441399484:
try:
global btimer
btimer=int(m.text.split(' ')[1])
except:
pass
@bot.message_handler(commands=['stats'])
def stats(m):
me=None
for ids in fighters:
if ids['id']==m.from_user.id:
me=ids
if me!=None:
text=''
text+='ХП: '+str(me['hp'])+'\n'
text+='В вас попали: '+str(me['hitted'])+' раз(а)\n'
text+='Вы убили: '+str(me['killed'])+' дурачков\n'
bot.send_message(m.chat.id, text)
def createplayer(user=None, name=None):
if user!=None:
name=user.first_name
idd=user.id
else:
name=name
idd='npc'
return {
'hp':1000,
'damage':10,
'killchance':5,
'name':name,
'id':idd,
'hitted':0, # сколько раз попали
'killed':0, # сколько уебал
'killer':''
}
def fight():
for ids in fighters:
alive=[]
for idss in fighters:
if idss['hp']>0 and idss['id']!=ids['id']:
alive.append(idss)
if len(alive)>0:
text=''
target=random.choice(alive)
dmg=ids['damage']+ids['damage']*(random.randint(-20, 20)/100)
target['hp']-=dmg
target['hitted']+=1
text+='Вы попали в '+target['name']+'! Нанесено '+str(dmg)+' урона.\n'
if target['hp']<=0:
ids['killed']+=1
target['killer']=ids['name']
text+='Вы убили цель!\n'
else:
if random.randint(1,100)<=ids['killchance']:
target['hp']=0
ids['killed']+=1
target['killer']=ids['name']
text+='Вы прострелили яйцо цели! Та погибает.\n'
try:
bot.send_message(ids['id'], text)
except:
pass
dellist=[]
for ids in fighters:
if ids['hp']<=0:
dellist.append(ids)
for ids in dellist:
try:
bot.send_message(ids['id'], 'Вы сдохли. Вас убил '+ids['killer'])
except:
pass
me=ids
text='Итоговые статы:\n\n'
text+='ХП: '+str(me['hp'])+'\n'
text+='В вас попали: '+str(me['hitted'])+' раз(а)\n'
text+='Вы убили: '+str(me['killed'])+' дурачков\n'
try:
bot.send_message(ids['id'], text)
except:
pass
fighters.remove(ids)
if len(fighters)<=2:
name=random.choice(names)
fighters.append(createplayer(name=name))
global btimer
t=threading.Timer(btimer, fight)
t.start()
fight()
def medit(message_text,chat_id, message_id,reply_markup=None,parse_mode=None):
return bot.edit_message_text(chat_id=chat_id,message_id=message_id,text=message_text,reply_markup=reply_markup,
parse_mode=parse_mode)
print('7777')
bot.polling(none_stop=True,timeout=600)
| [
"threading.Timer",
"random.choice",
"random.randint",
"telebot.TeleBot"
] | [((237, 259), 'telebot.TeleBot', 'telebot.TeleBot', (['token'], {}), '(token)\n', (252, 259), False, 'import telebot\n'), ((3715, 3745), 'threading.Timer', 'threading.Timer', (['btimer', 'fight'], {}), '(btimer, fight)\n', (3730, 3745), False, 'import threading\n'), ((3621, 3641), 'random.choice', 'random.choice', (['names'], {}), '(names)\n', (3634, 3641), False, 'import random\n'), ((2239, 2259), 'random.choice', 'random.choice', (['alive'], {}), '(alive)\n', (2252, 2259), False, 'import random\n'), ((2667, 2689), 'random.randint', 'random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (2681, 2689), False, 'import random\n'), ((2305, 2328), 'random.randint', 'random.randint', (['(-20)', '(20)'], {}), '(-20, 20)\n', (2319, 2328), False, 'import random\n')] |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ReportGenerator',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('updated_at', models.DateTimeField(auto_now=True)),
('name', models.SlugField(max_length=32, unique=True)),
('class_name', models.CharField(help_text='Class name for for generate the report. It must be subclass of django_easy_report.reports.ReportBaseGenerator', max_length=64)),
('init_params', models.TextField(blank=True, help_text='JSON with init parameters', null=True)),
('permissions', models.CharField(blank=True, help_text='Comma separated permission list. Permission formatted as: <content_type.app_label>.<permission.codename>', max_length=1024, null=True)),
('always_generate', models.BooleanField(default=False, help_text='Do not search for similar reports previously generated')),
('always_download', models.BooleanField(default=False, help_text='Never will redirect to storage URL')),
('preserve_report', models.BooleanField(default=False, help_text='If model is deleted, do not remove the file on storage')),
],
),
migrations.CreateModel(
name='ReportQuery',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('status', models.PositiveSmallIntegerField(choices=[(0, 'Created'), (10, 'Working'), (20, 'Done'), (30, 'Error')], default=0)),
('filename', models.CharField(max_length=32)),
('mimetype', models.CharField(default='application/octet-stream', max_length=32)),
('params', models.TextField(blank=True, null=True)),
('params_hash', models.CharField(max_length=128)),
('storage_path_location', models.CharField(blank=True, max_length=512, null=True)),
('report', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='django_easy_report.reportgenerator')),
],
options={
'ordering': ('created_at',),
},
),
migrations.CreateModel(
name='ReportSender',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('updated_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(db_index=True, max_length=32, unique=True)),
('email_from', models.EmailField(blank=True, help_text='If have content email must be send when report is completed.', max_length=254, null=True)),
('size_to_attach', models.PositiveIntegerField(default=0, help_text='If size is bigger, the file will be upload using storage system, otherwise the file will be send as attached on the email.')),
('storage_class_name', models.CharField(help_text='Class name for for save the report. It must be subclass of django.core.files.storage.Storage', max_length=64)),
('storage_init_params', models.TextField(blank=True, help_text='JSON with init parameters', null=True)),
],
),
migrations.CreateModel(
name='ReportRequester',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('request_at', models.DateTimeField(auto_now_add=True)),
('user_params', models.TextField(blank=True, null=True)),
('notified', models.BooleanField(default=False)),
('query', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='django_easy_report.reportquery')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='reportgenerator',
name='sender',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='django_easy_report.reportsender'),
),
]
| [
"django.db.models.EmailField",
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"django.db.models.SlugField",
"django.db.models.PositiveSmallIntegerField",
"django.db.models.PositiveIntegerField",
"django.db.models.BigAutoField",
"django.db.models.DateTim... | [((198, 255), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (229, 255), False, 'from django.db import migrations, models\n'), ((4582, 4687), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.PROTECT', 'to': '"""django_easy_report.reportsender"""'}), "(on_delete=django.db.models.deletion.PROTECT, to=\n 'django_easy_report.reportsender')\n", (4599, 4687), False, 'from django.db import migrations, models\n'), ((395, 491), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (414, 491), False, 'from django.db import migrations, models\n'), ((521, 556), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (541, 556), False, 'from django.db import migrations, models\n'), ((584, 628), 'django.db.models.SlugField', 'models.SlugField', ([], {'max_length': '(32)', 'unique': '(True)'}), '(max_length=32, unique=True)\n', (600, 628), False, 'from django.db import migrations, models\n'), ((662, 826), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Class name for for generate the report. It must be subclass of django_easy_report.reports.ReportBaseGenerator"""', 'max_length': '(64)'}), "(help_text=\n 'Class name for for generate the report. It must be subclass of django_easy_report.reports.ReportBaseGenerator'\n , max_length=64)\n", (678, 826), False, 'from django.db import migrations, models\n'), ((851, 929), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'help_text': '"""JSON with init parameters"""', 'null': '(True)'}), "(blank=True, help_text='JSON with init parameters', null=True)\n", (867, 929), False, 'from django.db import migrations, models\n'), ((964, 1160), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Comma separated permission list. Permission formatted as: <content_type.app_label>.<permission.codename>"""', 'max_length': '(1024)', 'null': '(True)'}), "(blank=True, help_text=\n 'Comma separated permission list. Permission formatted as: <content_type.app_label>.<permission.codename>'\n , max_length=1024, null=True)\n", (980, 1160), False, 'from django.db import migrations, models\n'), ((1189, 1296), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'help_text': '"""Do not search for similar reports previously generated"""'}), "(default=False, help_text=\n 'Do not search for similar reports previously generated')\n", (1208, 1296), False, 'from django.db import migrations, models\n'), ((1330, 1417), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'help_text': '"""Never will redirect to storage URL"""'}), "(default=False, help_text=\n 'Never will redirect to storage URL')\n", (1349, 1417), False, 'from django.db import migrations, models\n'), ((1451, 1558), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'help_text': '"""If model is deleted, do not remove the file on storage"""'}), "(default=False, help_text=\n 'If model is deleted, do not remove the file on storage')\n", (1470, 1558), False, 'from django.db import migrations, models\n'), ((1690, 1786), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1709, 1786), False, 'from django.db import migrations, models\n'), ((1816, 1855), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1836, 1855), False, 'from django.db import migrations, models\n'), ((1889, 1924), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (1909, 1924), False, 'from django.db import migrations, models\n'), ((1954, 2073), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'choices': "[(0, 'Created'), (10, 'Working'), (20, 'Done'), (30, 'Error')]", 'default': '(0)'}), "(choices=[(0, 'Created'), (10, 'Working'),\n (20, 'Done'), (30, 'Error')], default=0)\n", (1986, 2073), False, 'from django.db import migrations, models\n'), ((2101, 2132), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)'}), '(max_length=32)\n', (2117, 2132), False, 'from django.db import migrations, models\n'), ((2164, 2231), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""application/octet-stream"""', 'max_length': '(32)'}), "(default='application/octet-stream', max_length=32)\n", (2180, 2231), False, 'from django.db import migrations, models\n'), ((2261, 2300), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2277, 2300), False, 'from django.db import migrations, models\n'), ((2335, 2367), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (2351, 2367), False, 'from django.db import migrations, models\n'), ((2412, 2467), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(512)', 'null': '(True)'}), '(blank=True, max_length=512, null=True)\n', (2428, 2467), False, 'from django.db import migrations, models\n'), ((2497, 2605), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.PROTECT', 'to': '"""django_easy_report.reportgenerator"""'}), "(on_delete=django.db.models.deletion.PROTECT, to=\n 'django_easy_report.reportgenerator')\n", (2514, 2605), False, 'from django.db import migrations, models\n'), ((2820, 2916), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2839, 2916), False, 'from django.db import migrations, models\n'), ((2946, 2981), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (2966, 2981), False, 'from django.db import migrations, models\n'), ((3009, 3068), 'django.db.models.CharField', 'models.CharField', ([], {'db_index': '(True)', 'max_length': '(32)', 'unique': '(True)'}), '(db_index=True, max_length=32, unique=True)\n', (3025, 3068), False, 'from django.db import migrations, models\n'), ((3102, 3241), 'django.db.models.EmailField', 'models.EmailField', ([], {'blank': '(True)', 'help_text': '"""If have content email must be send when report is completed."""', 'max_length': '(254)', 'null': '(True)'}), "(blank=True, help_text=\n 'If have content email must be send when report is completed.',\n max_length=254, null=True)\n", (3119, 3241), False, 'from django.db import migrations, models\n'), ((3270, 3454), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(0)', 'help_text': '"""If size is bigger, the file will be upload using storage system, otherwise the file will be send as attached on the email."""'}), "(default=0, help_text=\n 'If size is bigger, the file will be upload using storage system, otherwise the file will be send as attached on the email.'\n )\n", (3297, 3454), False, 'from django.db import migrations, models\n'), ((3486, 3633), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Class name for for save the report. It must be subclass of django.core.files.storage.Storage"""', 'max_length': '(64)'}), "(help_text=\n 'Class name for for save the report. It must be subclass of django.core.files.storage.Storage'\n , max_length=64)\n", (3502, 3633), False, 'from django.db import migrations, models\n'), ((3666, 3744), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'help_text': '"""JSON with init parameters"""', 'null': '(True)'}), "(blank=True, help_text='JSON with init parameters', null=True)\n", (3682, 3744), False, 'from django.db import migrations, models\n'), ((3885, 3981), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (3904, 3981), False, 'from django.db import migrations, models\n'), ((4011, 4050), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (4031, 4050), False, 'from django.db import migrations, models\n'), ((4085, 4124), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (4101, 4124), False, 'from django.db import migrations, models\n'), ((4156, 4190), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (4175, 4190), False, 'from django.db import migrations, models\n'), ((4219, 4323), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""django_easy_report.reportquery"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'django_easy_report.reportquery')\n", (4236, 4323), False, 'from django.db import migrations, models\n'), ((4346, 4442), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL)\n', (4363, 4442), False, 'from django.db import migrations, models\n')] |
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.utils.functional import cached_property
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_noop, ugettext_lazy
from couchdbkit import ResourceNotFound
from memoized import memoized
from corehq.apps.fixtures.dispatcher import FixtureInterfaceDispatcher
from corehq.apps.fixtures.models import FixtureDataType, _id_from_doc
from corehq.apps.fixtures.views import FixtureViewMixIn, fixtures_home
from corehq.apps.reports.filters.base import BaseSingleOptionFilter
from corehq.apps.reports.generic import GenericReportView, GenericTabularReport
class FixtureInterface(FixtureViewMixIn, GenericReportView):
base_template = 'fixtures/fixtures_base.html'
asynchronous = False
dispatcher = FixtureInterfaceDispatcher
exportable = False
needs_filters = False
class FixtureSelectFilter(BaseSingleOptionFilter):
slug = "table_id"
label = ""
placeholder = "place"
default_text = ugettext_lazy("Select a Table")
@property
def selected(self):
# ko won't display default selected-value as it should, display default_text instead
return ""
@property
@memoized
def fixtures(self):
return sorted(FixtureDataType.by_domain(self.domain), key=lambda t: t.tag.lower())
@property
@memoized
def options(self):
return [(_id_from_doc(f), f.tag) for f in self.fixtures]
class FixtureViewInterface(GenericTabularReport, FixtureInterface):
name = ugettext_noop("View Tables")
slug = "view_lookup_tables"
report_template_path = 'fixtures/view_table.html'
fields = ['corehq.apps.fixtures.interface.FixtureSelectFilter']
@property
def view_response(self):
if not self.has_tables():
messages.info(self.request, _("You don't have any tables defined yet - create tables to view them."))
return HttpResponseRedirect(fixtures_home(self.domain))
else:
return super(FixtureViewInterface, self).view_response
@property
def report_context(self):
assert self.has_tables()
if not self.request.GET.get("table_id", None):
return {"table_not_selected": True}
try:
context = super(FixtureViewInterface, self).report_context
except ResourceNotFound:
return {"table_not_selected": True}
# Build javascript options for DataTables
report_table = context['report_table']
headers = report_table.get('headers')
data_tables_options = {
'slug': self.context['report']['slug'],
'defaultRows': report_table.get('default_rows', 10),
'startAtRowNum': report_table.get('start_at_row', 0),
'showAllRowsOption': report_table.get('show_all_rows'),
'autoWidth': headers.auto_width,
}
if headers.render_aoColumns:
data_tables_options.update({
'aoColumns': headers.render_aoColumns,
})
if headers.custom_sort:
data_tables_options.update({
'customSort': headers.custom_sort,
})
pagination = context['report_table'].get('pagination', {})
if pagination.get('is_on'):
data_tables_options.update({
'ajaxSource': pagination.get('source'),
'ajaxParams': pagination.get('params'),
})
left_col = context['report_table'].get('left_col', {})
if left_col.get('is_fixed'):
data_tables_options.update({
'fixColumns': True,
'fixColsNumLeft': left_col['fixed'].get('num'),
'fixColsWidth': left_col['fixed'].get('width'),
})
context.update({
"selected_table": self.table.get("table_id", ""),
'data_tables_options': data_tables_options,
})
if self.lookup_table:
context.update({
"table_description": self.lookup_table.description,
})
return context
@memoized
def has_tables(self):
return True if list(FixtureDataType.by_domain(self.domain)) else False
@property
@memoized
def table(self):
from corehq.apps.fixtures.views import data_table
if self.has_tables() and self.request.GET.get("table_id", None):
return data_table(self.request, self.domain)
else:
return {"headers": None, "rows": None}
@cached_property
def lookup_table(self):
if self.has_tables() and self.request.GET.get("table_id", None):
return FixtureDataType.get(self.request.GET['table_id'])
return None
@property
def headers(self):
return self.table["headers"]
@property
def rows(self):
return self.table["rows"]
class FixtureEditInterface(FixtureInterface):
name = ugettext_noop("Manage Tables")
slug = "edit_lookup_tables"
report_template_path = 'fixtures/manage_tables.html'
@property
def report_context(self):
context = super(FixtureEditInterface, self).report_context
context.update(types=self.data_types)
return context
@property
@memoized
def data_types(self):
return list(FixtureDataType.by_domain(self.domain))
| [
"corehq.apps.fixtures.models._id_from_doc",
"django.utils.translation.ugettext_lazy",
"django.utils.translation.ugettext_noop",
"corehq.apps.fixtures.views.data_table",
"corehq.apps.fixtures.views.fixtures_home",
"corehq.apps.fixtures.models.FixtureDataType.get",
"django.utils.translation.ugettext",
"... | [((1048, 1079), 'django.utils.translation.ugettext_lazy', 'ugettext_lazy', (['"""Select a Table"""'], {}), "('Select a Table')\n", (1061, 1079), False, 'from django.utils.translation import ugettext_noop, ugettext_lazy\n'), ((1572, 1600), 'django.utils.translation.ugettext_noop', 'ugettext_noop', (['"""View Tables"""'], {}), "('View Tables')\n", (1585, 1600), False, 'from django.utils.translation import ugettext_noop, ugettext_lazy\n'), ((4964, 4994), 'django.utils.translation.ugettext_noop', 'ugettext_noop', (['"""Manage Tables"""'], {}), "('Manage Tables')\n", (4977, 4994), False, 'from django.utils.translation import ugettext_noop, ugettext_lazy\n'), ((1305, 1343), 'corehq.apps.fixtures.models.FixtureDataType.by_domain', 'FixtureDataType.by_domain', (['self.domain'], {}), '(self.domain)\n', (1330, 1343), False, 'from corehq.apps.fixtures.models import FixtureDataType, _id_from_doc\n'), ((4446, 4483), 'corehq.apps.fixtures.views.data_table', 'data_table', (['self.request', 'self.domain'], {}), '(self.request, self.domain)\n', (4456, 4483), False, 'from corehq.apps.fixtures.views import data_table\n'), ((4691, 4740), 'corehq.apps.fixtures.models.FixtureDataType.get', 'FixtureDataType.get', (["self.request.GET['table_id']"], {}), "(self.request.GET['table_id'])\n", (4710, 4740), False, 'from corehq.apps.fixtures.models import FixtureDataType, _id_from_doc\n'), ((5341, 5379), 'corehq.apps.fixtures.models.FixtureDataType.by_domain', 'FixtureDataType.by_domain', (['self.domain'], {}), '(self.domain)\n', (5366, 5379), False, 'from corehq.apps.fixtures.models import FixtureDataType, _id_from_doc\n'), ((1443, 1458), 'corehq.apps.fixtures.models._id_from_doc', '_id_from_doc', (['f'], {}), '(f)\n', (1455, 1458), False, 'from corehq.apps.fixtures.models import FixtureDataType, _id_from_doc\n'), ((1875, 1947), 'django.utils.translation.ugettext', '_', (['"""You don\'t have any tables defined yet - create tables to view them."""'], {}), '("You don\'t have any tables defined yet - create tables to view them.")\n', (1876, 1947), True, 'from django.utils.translation import ugettext as _\n'), ((1989, 2015), 'corehq.apps.fixtures.views.fixtures_home', 'fixtures_home', (['self.domain'], {}), '(self.domain)\n', (2002, 2015), False, 'from corehq.apps.fixtures.views import FixtureViewMixIn, fixtures_home\n'), ((4195, 4233), 'corehq.apps.fixtures.models.FixtureDataType.by_domain', 'FixtureDataType.by_domain', (['self.domain'], {}), '(self.domain)\n', (4220, 4233), False, 'from corehq.apps.fixtures.models import FixtureDataType, _id_from_doc\n')] |
"""
Code for the optimization and gaming component of the Baselining work.
@author: <NAME>, <NAME>
@date Mar 2, 2016
"""
import numpy as np
import pandas as pd
import logging
from gurobipy import GRB, Model, quicksum, LinExpr
from pandas.tseries.holiday import USFederalHolidayCalendar
from datetime import datetime
from .utils import (get_energy_charges, get_demand_charge, dem_charges, dem_charges_yearly,
get_pdp_demand_credit, get_DR_rewards, powerset, E19,
carbon_costs)
# define some string formatters
psform = '%Y-%m-%d %H:%M'
dsform = '%Y-%m-%d'
class BLModel(object):
"""
Abstract base class for Baselining models.
"""
def __init__(self, name):
"""
Construct an abstract dynamical system object based on the
gurobipy Model object 'model'.
"""
self._name = name
self._model = Model()
def get_model(self):
"""
Returns the underlying gurobiy Model object.
"""
return self._model
def set_dynsys(self, dynsys):
"""
Initialize dynamical system for underlying dynamics.
"""
self._dynsys = dynsys
def set_window(self, index):
"""
Set the window for the optimization. Here index is a pandas
DatetimeIndex.
"""
self._index = index
self._dynsys.set_window(index)
def energy_charges(self, tariff, isRT=False, LMP=None, isPDP=False,
twindow=None, carbon=False):
"""
Return total enery consumption charges (as determined by the
tariff's energy charge) as a gurobipy LinExpr.
"""
locidx = self._index.tz_convert('US/Pacific')
year = locidx[0].year
if isRT and isPDP:
raise Exception('Cannot combine RTP and PDP.')
nrg_charges = get_energy_charges(
self._index, tariff, isRT=isRT, LMP=LMP,
isPDP=isPDP, carbon=carbon, year=year)['EnergyCharge']
cons = self._dynsys.get_consumption()['energy']
if twindow is None:
# echrg_= quicksum([ec * con for ec, con in
# zip(nrg_charges.values, cons.values)])
echrg_ = [ec * con for ec, con in
zip(nrg_charges.values, cons.values)]
echrg = pd.Series(echrg_, index=locidx)
else:
nrg_charges_ = nrg_charges.loc[twindow[0]:twindow[1]]
cons_ = cons.loc[twindow[0]:twindow[1]]
# echrg = quicksum([ec * con for ec, con in
# zip(nrg_charges_.values, cons_.values)])
echrg_ = [ec * con for ec, con in
zip(nrg_charges_.values, cons_.values)]
indx = locidx[locidx.get_loc(twindow[0]):
locidx.get_loc(twindow[1])+1]
echrg = pd.Series(echrg_, index=indx)
return echrg
def demand_charges(self, tariff, isPDP=False):
"""
Return the total demand charges under the tariff as a
gurobipy LinExpr.
"""
# determine which year/month combinations there is a demand charge,
# and create a variable for each of them
if hasattr(self, '_maxcon'):
for maxcon in self._maxcon.values():
self._model.remove(maxcon)
del self._maxcon
if hasattr(self, '_maxconbnd'):
for maxconbnd in self._maxconbnd.values():
self._model.remove(maxconbnd)
del self._maxconbnd
if hasattr(self, '_maxconppk'):
for maxconppk in self._maxconppk.values():
self._model.remove(maxconppk)
del self._maxconppk
if hasattr(self, '_maxconppkbnd'):
for maxconppkbnd in self._maxconppkbnd.values():
self._model.remove(maxconppkbnd)
del self._maxconppkbnd
if hasattr(self, '_maxconpk'):
for maxconpk in self._maxconpk.values():
self._model.remove(maxconpk)
del self._maxconpk
if hasattr(self, '_maxconpkbnd'):
for maxconpkbnd in self._maxconpkbnd.values():
self._model.remove(maxconpkbnd)
del self._maxconpkbnd
if hasattr(self, '_maxconpks'):
for maxconpks in self._maxconpks.values():
self._model.remove(maxconpks)
del self._maxconpks
if hasattr(self, '_maxconppkw'):
for maxconppkw in self._maxconppkw.values():
self._model.remove(maxconppkw)
del self._maxconppkw
if hasattr(self, '_maxconppkbndw'):
for maxconppkbndw in self._maxconppkbndw.values():
self._model.remove(maxconppkbndw)
del self._maxconppkbndw
if hasattr(self, '_maxconppks'):
for maxconppks in self._maxconppks.values():
self._model.remove(maxconppks)
del self._maxconppks
if hasattr(self, '_maxconppkbnds'):
for maxconppkbnds in self._maxconppkbnds.values():
self._model.remove(maxconppkbnds)
del self._maxconppkbnds
self._model.update()
locidx = self._index.tz_convert('US/Pacific')
ym_dict = {year: np.unique(locidx[locidx.year == year].month)
for year in np.unique(locidx.year)}
indx = []
for year, months in ym_dict.items():
for month in months:
indx.append(pd.Timestamp(datetime(year, month, 1),
tz='US/Pacific'))
if tariff in dem_charges:
if not(tariff in E19):
self._maxcon, self._maxconbnd = {}, {}
# locidx = self._index.tz_convert('US/Pacific')
# print locidx
# the following creates a dictionary with all years in the data
# as keys, and for each year the value is an array of (unique)
# months that appear during that year. This is used for keeping
# track of the peak consumpiton for the demand charge
# ym_dict = {year: np.unique(locidx[locidx.year == year].month)
# for year in np.unique(locidx.year)}
# indx=[]
for year, months in ym_dict.items():
for month in months:
# declare variable for max consumption
self._maxcon[year, month] = self._model.addVar(
vtype=GRB.CONTINUOUS,
name='maxcon[{},{}]'.format(year, month))
# indx.append(pd.Timestamp(datetime(year,month,1),tz='US/Pacific'))
self._model.update()
# now add in the necessary constraints and update objective
dcharges = []
cons = self._dynsys.get_consumption()['power']
for year, months in ym_dict.items():
for month in months:
relcons = cons[(locidx.year == year) &
(locidx.month == month)].values
for i, con in enumerate(relcons):
self._maxconbnd[year, month, i] = self._model.addConstr(
lhs=self._maxcon[year, month],
sense=GRB.GREATER_EQUAL,
rhs=con, name='maxconbnd[{},{},{}]'.format(
year, month, i))
# dcharges += (get_demand_charge(tariff, month, isPDP)*
# self._maxcon[year, month])
dcharges.append(
(get_demand_charge(tariff, month, isPDP, year=year) *
self._maxcon[year, month]))
dcharges = pd.Series(dcharges, index=indx)
self._model.update()
return dcharges
else:
# for E19 tarrifs
idx_ = self._index.tz_convert('US/Pacific')
iswknd = idx_.dayofweek > 5
holidays = USFederalHolidayCalendar().holidays(
idx_.min(), idx_.max())
iswknd = iswknd | pd.DatetimeIndex(idx_.date).isin(holidays)
issummer = (idx_.month >= 5) & (idx_.month <= 10)
ToD = idx_.hour + idx_.minute / 60
ispeak = ~iswknd & issummer & (ToD >= 12) & (ToD < 18)
ispartial_summer = (~iswknd & issummer & (
((ToD >= 8.5) & (ToD < 12)) |
((ToD >= 18) & (ToD < 21.5))))
ispartial_winter = ~iswknd & ~issummer & (
(ToD >= 8.5) & (ToD < 21.5))
# create dictionaries for variables
self._maxcon, self._maxconbnd = {}, {}
self._maxconppks, self._maxconppkbnds = {}, {}
self._maxconpks, self._maxconpkbnds = {}, {}
self._maxconpk, self._maxconpkbnd = {}, {}
self._maxconppk, self._maxconppkbnd = {}, {}
self._maxconppkw, self._maxconppkbndw = {}, {}
# locidx = self._index.tz_convert('US/Pacific')
# ym_dict = {year: np.unique(locidx[locidx.year == year].month)
# for year in np.unique(locidx.year)}
# indx=[]
for year, months in ym_dict.items():
for month in months:
# declare variable for max consumption
self._maxcon[year, month] = self._model.addVar(
vtype=GRB.CONTINUOUS,
name='maxcon[{},{}]'.format(year, month))
# declare variable for part peak consumption
self._maxconppk[year, month] = self._model.addVar(
vtype=GRB.CONTINUOUS,
name='maxconppk[{},{}]'.format(year, month))
# declare variable for max peak only in summer
if (5 <= month) & (month <= 10):
# add variable for maximum peak usage in summer
self._maxconpk[year, month] = self._model.addVar(
vtype=GRB.CONTINUOUS,
name='maxconpk[{},{}]'.format(year, month))
# indx.append(pd.Timestamp(datetime(year,month,1),tz='US/Pacific'))
self._model.update() # update model
# now add in the necessary constraints and update objective
dcharges = []
cons = self._dynsys.get_consumption()['power']
for year, months in ym_dict.items():
for month in months:
dchrg = 0.0
# for peak summer less than max demand
if (month >= 5) & (month <= 10):
self._maxconpkbnd[year, month] = self._model.addConstr(
lhs=self._maxcon[year, month],
sense=GRB.GREATER_EQUAL,
rhs=self._maxconpk[year, month],
name='maxconpkbnd[{},{}]'.format(year, month))
# max partial peak summer greater than consumption
ppconsum = cons[(ispartial_summer) &
(locidx.year == year) &
(locidx.month == month)].values
for i, con in enumerate(ppconsum):
self._maxconppkbnds[year, month, i] = self._model.addConstr(
lhs=self._maxconppk[year, month],
sense=GRB.GREATER_EQUAL,
rhs=con,
name='maxconppkbnds[{},{},{}]'.format(
year, month, i))
# max peak consumption summer
pconsum = cons[(ispeak) & (locidx.year == year) &
(locidx.month == month)].values
for i, con in enumerate(pconsum):
self._maxconpkbnds[year, month, i] = self._model.addConstr(
lhs=self._maxconpk[year, month],
sense=GRB.GREATER_EQUAL,
rhs=con,
name='maxconpkbnds[{},{},{}]'.format(
year, month, i))
# max partial peak winter
ppkconwin = cons[(ispartial_winter) &
(locidx.year == year) &
(locidx.month == month)].values
for i, con in enumerate(ppkconwin):
self._maxconppkbndw[year, month, i] = self._model.addConstr(
lhs=self._maxconppk[year, month],
sense=GRB.GREATER_EQUAL,
rhs=con,
name='maxconppkbndw[{},{},{}]'.format(
year, month, i))
# max demand each month
relcons = cons[(locidx.year == year) &
(locidx.month == month)].values
for i, con in enumerate(relcons):
self._maxconbnd[year, month, i] = self._model.addConstr(
lhs=self._maxcon[year, month],
sense=GRB.GREATER_EQUAL,
rhs=con, name='maxconbnd[{},{},{}]'.format(
year, month, i))
# max partial peaks (summer & winter) < than max demand
self._maxconppkbnd[year, month, i] = self._model.addConstr(
lhs=self._maxcon[year, month],
sense=GRB.GREATER_EQUAL,
rhs=self._maxconppk[year, month],
name='maxconppkbnd[{},{},{}]'.format(
year, month, i))
demchrg = get_demand_charge(tariff, month, year=year)
if (month >= 5) & (month <= 10):
mpeakchg = demchrg['mpeak']
ppeakchg = demchrg['ppeak']
maxchg = demchrg['max']
if isPDP:
pdpcred = get_pdp_demand_credit(tariff, month, year=year)
mpeakchg = mpeakchg - pdpcred['peak']
dchrg += mpeakchg * self._maxconpk[year, month]
# dcharges.append(mpeakchg * self._maxconpk[year, month])
else:
ppeakchg = demchrg['ppeak']
maxchg = demchrg['max']
# add partpeak and maximum demand charge
dcharges.append(
(maxchg * self._maxcon[year, month] +
ppeakchg * self._maxconppk[year, month])+dchrg)
self._model.update()
dcharges = pd.Series(dcharges, index=indx)
return dcharges
else:
return pd.Series([LinExpr(0.0) for ij in
range(0, np.size(indx, 0))], index=indx)
def DR_compensation(self, LMP, dr_periods, BL='CAISO', **kwargs):
"""
Return compensation for DR, i.e. reductions w.r.t. baseline.
Here LMP is a pandas Series (indexed by a tz-aware pandas
Datetimeindex containing all of the object's indices) and
dr_periods is a pandas DatetimeIndex.
"""
# start by removing all variables (might be inefficient, but o/w it
# is a pain in the ass do deal with the multihour baselines etc.)
self._removeOld()
# no work if no DR events are specified
if (LMP is None) or (dr_periods is None):
return pd.Series([0.0], index=['None'])
# get DR rewards (in case we want LMP-G instead of LMP)
DR_rewards = get_DR_rewards(LMP, isLMPmG=kwargs.get('isLMPmG'),
tariff=kwargs.get('tariff'))
# populate optimization problem for proper BL choices
if BL == 'CAISO':
# print self._DR_comp_CAISO(DR_rewards, dr_periods)
return self._DR_comp_CAISO(DR_rewards, dr_periods)
elif BL == 'expMA':
return self._DR_comp_expMA(DR_rewards, dr_periods, **kwargs)
else:
raise NotImplementedError(
'Baseline type "{}" not known!'.format(BL))
def _DR_comp_CAISO(self, LMP, dr_periods):
"""
Return compensation for DR, i.e. reductions w.r.t. CAISO baseline.
Here LMP is a pandas Series (indexed by a tz-aware pandas
Datetimeindex containing all of the object's indices) and
dr_periods is a pandas DatetimeIndex. Note that LMP may also be
LMP-G, i.e. the LMP minus the generation component of the tariff.
"""
valid_periods = dr_periods[dr_periods.isin(self._index)].tz_convert(
'US/Pacific')
locidx = self._index.tz_convert('US/Pacific')
grouped = valid_periods.groupby(valid_periods.date)
# define auxiliary variables for each possible dr period if none exist
self._red, self._z, self._bl = {}, {}, {}
self._redpos, self._redBL, self._red0, self._blcon = {}, {}, {}, {}
self._dr_periods = valid_periods
# add variables if there are days w/ multiple possible DR events
if np.max([len(grp) for grp in grouped.values()]) > 1:
self._zday, self._zdaysum, self._zdaymax = {}, {}, {}
# now create variables for different days and periods within each day
for day, periods in grouped.items():
daystr = day.strftime(dsform)
perstrs = [per.strftime(psform) for per in periods]
if len(periods) > 1:
self._zday[daystr] = self._model.addVar(
vtype=GRB.BINARY, name='zday[{}]'.format(daystr))
for period, perstr in zip(periods, perstrs):
self._red[perstr] = self._model.addVar(
vtype=GRB.CONTINUOUS, name='red[{}]'.format(perstr))
self._z[perstr] = self._model.addVar(
vtype=GRB.BINARY, name='z[{}]'.format(perstr))
self._bl[perstr] = self._model.addVar(
vtype=GRB.CONTINUOUS, name='bl[{}]'.format(perstr))
self._model.update() # this must be done before defining constaints
# determine "bigM" value from the bounds on the control variables
M = np.sum(np.asarray(self._dynsys._opts['nrg_coeffs']) *
(self._dynsys._opts['umax'] - self._dynsys._opts['umin']),
axis=1).max()
# if u is not bounded the the above results in an NaN value. We need
# to deal with this in a better way than the following:
if np.isnan(M):
M = 1e9
# perform some preparations for the constraints
# drcomp = 0.0
nrgcons = self._dynsys.get_consumption()['energy']
lmps = LMP.tz_convert('US/Pacific').loc[locidx] / 1000 # to $/kWh
holidays = USFederalHolidayCalendar().holidays(
start=locidx.min(), end=locidx.max())
isBusiness = (locidx.dayofweek < 5) & (~locidx.isin(holidays))
isBusiness = pd.Series(isBusiness, index=locidx)
# add constraints on varible zday (if multiple periods per day)
for day, periods in grouped.items():
daystr = day.strftime(dsform)
perstrs = [per.strftime(psform) for per in periods]
if len(periods) > 1:
self._zdaysum[daystr] = self._model.addConstr(
lhs=self._zday[daystr],
sense=GRB.LESS_EQUAL,
rhs=quicksum([self._z[ps] for ps in perstrs]),
name='zdaysum[{}]'.format(daystr))
for period, perstr in zip(periods, perstrs):
self._zdaymax[perstr] = self._model.addConstr(
lhs=self._zday[daystr],
sense=GRB.GREATER_EQUAL,
rhs=self._z[perstr],
name='zdaymax[{}]'.format(perstr))
self._model.update()
# formulate constaints and add terms to objective
drcomp_ = []
for i, day in enumerate(grouped):
periods = grouped[day]
# print('Formulating constraints for day {} of {}'.format(
# i, len(grouped)))
perstrs = [per.strftime(psform) for per in periods]
for period, perstr in zip(periods, perstrs):
per_select = ((locidx < period) &
(locidx.hour == period.hour) &
(locidx.minute == period.minute))
if isBusiness.loc[period]:
nmax = 10
per_select = per_select & isBusiness.values
else:
nmax = 4
per_select = per_select & (~isBusiness.values)
similars = locidx[per_select].sort_values(ascending=False)
# now go through similar days sucessively
sim_nonDR, sim_DR, sim_DR_mult = [], [], []
for sim in similars:
if len(sim_nonDR) == nmax:
continue
if sim in self._dr_periods:
sim_DR += [sim]
if len(grouped[pd.Timestamp(sim.date())]) > 1:
sim_DR_mult += [sim]
else:
sim_nonDR += [sim]
sim_DR = pd.DatetimeIndex(
sim_DR).sort_values(ascending=False)
sim_DR_mult = pd.DatetimeIndex(
sim_DR_mult).sort_values(ascending=False)
sim_nonDR = pd.DatetimeIndex(
sim_nonDR).sort_values(ascending=False)
# get consumption variables
cons_nonDR = nrgcons.loc[sim_nonDR].values
# Now add constraits on the baseline variables
for idxset in powerset(range(len(sim_DR))):
K = [sim_DR[i] for i in idxset]
Kc = [sim_DR[i] for i in range(len(sim_DR))
if i not in idxset]
qK = nrgcons.loc[K].values.tolist()
# Need to make sure to use zday if there are multiple
# events possible that day!
zK, zKc = [], []
for k in K:
if k in sim_DR_mult:
zK.append(self._zday[k.strftime(dsform)])
else:
zK.append(self._z[k.strftime(psform)])
for kc in Kc:
if kc in sim_DR_mult:
zKc.append(self._zday[kc.strftime(dsform)])
else:
zKc.append(self._z[kc.strftime(psform)])
# the following uses that the "closest" days appear first
qD = cons_nonDR[:nmax-len(idxset)].tolist()
n = len(sim_nonDR)
if n == 0:
print('No non-DR day available for BL computation -' +
' too many DR events!')
bnd = (quicksum(qD + qK) / float(n) +
M * quicksum(zK) +
M * quicksum([(1-z) for z in zKc]))
self._blcon[perstr, idxset] = self._model.addConstr(
lhs=self._bl[perstr], sense=GRB.LESS_EQUAL,
rhs=bnd, name="blcon[{},{}]".format(perstr, idxset))
# add constraints on baseline reduction
self._redpos[perstr] = self._model.addConstr(
lhs=self._red[perstr], sense=GRB.GREATER_EQUAL,
rhs=0.0, name='redpos[{}]'.format(perstr))
self._redBL[perstr] = self._model.addConstr(
lhs=self._red[perstr], sense=GRB.LESS_EQUAL,
rhs=self._bl[perstr] - nrgcons.loc[period],
name='redBL[{}]'.format(perstr))
self._red0[perstr] = self._model.addConstr(
lhs=self._red[perstr], sense=GRB.LESS_EQUAL,
rhs=self._z[perstr] * M, name='red0[{}]'.format(perstr))
# add DR compensation to objective
# drcomp += lmps.loc[period] * self._red[perstr]
drcomp_.append(lmps.loc[period] * self._red[perstr])
drcomp = pd.Series(drcomp_, index=self._dr_periods)
self._model.update()
return drcomp
def _DR_comp_expMA(self, LMP, dr_periods, **kwargs):
"""
Return compensation for DR, i.e. reductions w.r.t. CAISO baseline.
Here LMP is a pandas Series (indexed by a tz-aware pandas
Datetimeindex containing all of the object's indices) and
dr_hours is a pandas DatetimeIndex. Note that LMP may also be
LMP-G, i.e. the LMP minus the generation component of the tariff.
"""
# set default values for alphas if not passed as kwargs
if 'alpha_b' in kwargs:
alpha_b = kwargs['alpha_b']
else:
alpha_b = 0.175 # business day
if 'alpha_nb' in kwargs:
alpha_nb = kwargs['alpha_nb']
else:
alpha_nb = 0.25 # non-business day
valid_periods = dr_periods[dr_periods.isin(self._index)]
locidx = self._index.tz_convert('US/Pacific')
grouped = valid_periods.groupby(
valid_periods.tz_convert('US/Pacific').date)
# define auxiliary variables for each possible dr period if none exist
self._red, self._z, self._bl = {}, {}, {}
self._redpos, self._redBL, self._red0, self._blcon = {}, {}, {}, {}
self._dr_periods = valid_periods
# add variables if there are days w/ multiple possible DR events
if np.max([len(grp) for grp in grouped.values()]) > 1:
self._zday, self._zdaysum, self._zdaymax = {}, {}, {}
# now create variables for different days and periods within each day
for day, periods in grouped.items():
daystr = day.strftime(dsform)
perstrs = [per.strftime(psform) for per in periods]
if len(periods) > 1:
self._zday[daystr] = self._model.addVar(
vtype=GRB.BINARY, name='zday[{}]'.format(daystr))
for period, perstr in zip(periods, perstrs):
self._red[perstr] = self._model.addVar(
vtype=GRB.CONTINUOUS, name='red[{}]'.format(perstr))
self._z[perstr] = self._model.addVar(
vtype=GRB.BINARY, name='z[{}]'.format(perstr))
# for the expMA we have to define a variable for the bl value
# for every period of the simulation range
for per in self._index:
perstr = per.strftime(psform)
self._bl[perstr] = self._model.addVar(
vtype=GRB.CONTINUOUS, name='bl[{}]'.format(perstr))
self._model.update() # this must be done before defining constaints
# determine "bigM" value from the bounds on the control variables
M = np.sum(np.asarray(self._dynsys._opts['nrg_coeffs']) *
(self._dynsys._opts['umax'] - self._dynsys._opts['umin']),
axis=1).max()
# if u is not bounded the the above results in an NaN value. We need
# to deal with this in a better way than the following:
if np.isnan(M):
M = 1e9
# perform some preparations for the constraints
drcomp_ = []
nrgcons = self._dynsys.get_consumption()['energy']
lmps = LMP.tz_convert('US/Pacific').loc[locidx] / 1000 # to $/kWh
holidays = USFederalHolidayCalendar().holidays(
start=locidx.min(), end=locidx.max())
isBusiness = (locidx.dayofweek < 5) & (~locidx.isin(holidays))
isBusiness = pd.Series(isBusiness, index=locidx)
# add constraints on varible zday (if multiple periods per day)
for day, periods in grouped.items():
daystr = day.strftime(dsform)
perstrs = [per.strftime(psform) for per in periods]
if len(periods) > 1:
self._zdaysum[daystr] = self._model.addConstr(
lhs=self._zday[daystr],
sense=GRB.LESS_EQUAL,
rhs=quicksum([self._z[ps] for ps in perstrs]),
name='zdaysum[{}]'.format(daystr))
for period, perstr in zip(periods, perstrs):
self._zdaymax[perstr] = self._model.addConstr(
lhs=self._zday[daystr],
sense=GRB.GREATER_EQUAL,
rhs=self._z[perstr],
name='zdaymax[{}]'.format(perstr))
self._model.update()
# now add the constraints that define the baseline as well as a
# bunch of other stuff
for cons, alpha in zip([nrgcons[isBusiness], nrgcons[~isBusiness]],
[alpha_b, alpha_nb]):
# localize consumption index
considxloc = cons.index.tz_convert('US/Pacific')
# compute BLs for each hour separately
con_hrly = {hour: cons[considxloc.hour == hour].sort_index()
for hour in range(24)}
for hour, con in con_hrly.items():
# set the initial value of the BL to zero (this should not have
# an overly large effect of the course of a year or so...)
# NOTE: This assumes that the first occurrence of an hour (for
# both business and non-business days) is NOT a potential event
perstr_pre = con.index[0].strftime(psform)
self._blcon[perstr_pre, 'init'] = self._model.addConstr(
lhs=self._bl[perstr_pre], sense=GRB.EQUAL,
rhs=0.0, name='blcon[{}]'.format(perstr_pre))
# now loop through the rest
for period, q in con.iloc[1:].items():
perstr = period.strftime(psform)
# if the period under consideration is a DR period,
# we have to do some work ...
if period in valid_periods:
# need to use zday if this day has multiple DR events
dt = period.tz_convert('US/Pacific').date()
if len(grouped[dt]) > 1:
z = self._zday[dt.strftime(dsform)]
else:
z = self._z[perstr]
# add big M constraints on the bl
self._blcon[perstr, 'static'] = self._model.addConstr(
lhs=self._bl[perstr], sense=GRB.LESS_EQUAL,
rhs=self._bl[perstr_pre] + M * (1 - z),
name='blcon[{},static]'.format(perstr))
self._blcon[perstr, 'change'] = self._model.addConstr(
lhs=self._bl[perstr], sense=GRB.LESS_EQUAL,
rhs=alpha*q + (1-alpha)*self._bl[perstr_pre] + M*z,
name='blcon[{},change]'.format(perstr))
# add constraints on baseline reduction
self._redpos[perstr] = self._model.addConstr(
lhs=self._red[perstr], sense=GRB.GREATER_EQUAL,
rhs=0.0, name='redpos[{}]'.format(perstr))
self._redBL[perstr] = self._model.addConstr(
lhs=self._red[perstr], sense=GRB.LESS_EQUAL,
rhs=self._bl[perstr] - q,
name='redBL[{}]'.format(perstr))
self._red0[perstr] = self._model.addConstr(
lhs=self._red[perstr], sense=GRB.LESS_EQUAL,
rhs=self._z[perstr] * M,
name='red0[{}]'.format(perstr))
# add DR compensation to objective
drcomp_.append(
(lmps.loc[period.tz_convert('US/Pacific')] *
self._red[perstr]))
# ... otherwise this is pretty straightforward
else:
self._blcon[perstr] = self._model.addConstr(
lhs=self._bl[perstr], sense=GRB.EQUAL,
rhs=alpha * q + (1 - alpha) * self._bl[perstr_pre],
name='blcon[{}]'.format(perstr))
# update and keep track of last bl variable
perstr_pre = perstr
drcomp = pd.Series(drcomp_, index=self._dr_periods)
self._model.update()
return drcomp
def DR_comp_blfix(self, LMP, bl_values, **kwargs):
"""
Return compensation for DR, i.e. reductions w.r.t. baseline.
Here LMP is a pandas Series (indexed by a tz-aware pandas
Datetimeindex containing all of the object's indices) and
bl_values is a pandas Series, whose index is a DatetimeIndex,
each entry of which represents a possible DR period, and whose
values are the baseline values for those periods (assumed fixed).
This is used for solving the baseline-taking equilibrium problem.
Note that LMP may also be LMP-G, i.e. the LMP minus the generation
component of the tariff.
"""
self._removeOld()
self._blvals = bl_values[
bl_values.index.isin(self._index)].tz_convert('US/Pacific')
locidx = self._index.tz_convert('US/Pacific')
self._grouped = self._blvals.index.groupby(self._blvals.index.date)
# define dictionaries to store variables in
self._red, self._z = {}, {}
self._redpos, self._redBL, self._red0 = {}, {}, {}
# create variables for different days and periods within each day
for day, periods in self._grouped.items():
perstrs = [per.strftime(psform) for per in periods]
for period, perstr in zip(periods, perstrs):
self._red[perstr] = self._model.addVar(
vtype=GRB.CONTINUOUS, name='red[{}]'.format(perstr))
self._z[perstr] = self._model.addVar(
vtype=GRB.BINARY, name='z[{}]'.format(perstr))
self._model.update() # must be done before defining constaints
# determine "bigM" value from the bounds on the control variables
M = np.sum(np.asarray(self._dynsys._opts['nrg_coeffs']) *
(self._dynsys._opts['umax'] -
self._dynsys._opts['umin']), axis=1).max()
# if u is not bounded the the above results in an NaN value. We
# need to deal with this in a better way than the following:
if np.isnan(M):
M = 1e9
# perform some preparations for the constraints
self._drcomp = 0.0
nrgcons = self._dynsys.get_consumption()['energy']
DR_rewards = get_DR_rewards(LMP, isLMPmG=kwargs.get('isLMPmG'),
tariff=kwargs.get('tariff'))
# Pick out relevant dates and congvert to $/kWh
DR_rewards = DR_rewards.tz_convert('US/Pacific').loc[locidx] / 1000
holidays = USFederalHolidayCalendar().holidays(
start=locidx.min(), end=locidx.max())
isBusiness = (locidx.dayofweek < 5) & (~locidx.isin(holidays))
isBusiness = pd.Series(isBusiness, index=locidx)
# formulate constaints and add terms to objective
for i, day in enumerate(self._grouped):
periods = self._grouped[day]
perstrs = [per.strftime(psform) for per in periods]
for period, perstr in zip(periods, perstrs):
# add constraints on baseline reduction
self._redpos[perstr] = self._model.addConstr(
lhs=self._red[perstr], sense=GRB.GREATER_EQUAL,
rhs=0.0, name='redpos[{}]'.format(perstr))
self._redBL[perstr] = self._model.addConstr(
lhs=(self._red[perstr] + nrgcons.loc[period] -
(1-self._z[perstr]) * M),
sense=GRB.LESS_EQUAL, rhs=self._blvals.loc[period],
name='redBL[{}]'.format(perstr))
self._red0[perstr] = self._model.addConstr(
lhs=self._red[perstr], sense=GRB.LESS_EQUAL,
rhs=self._z[perstr] * M, name='red0[{}]'.format(
perstr))
# add DR compensation to objective
self._drcomp += DR_rewards.loc[period] * self._red[perstr]
self._model.update()
return self._drcomp
def compute_baseline(self, bl_periods, red_times=None, BL='CAISO',
**kwargs):
"""
Compute the CAISO baseline for all elements of the pandas
Datetimeindex bl_periods. If red_times is a Datetimeindex,
regard the associated days as "event days" (in addition to
weekend days and holidays).
"""
if BL == 'CAISO':
return self._BL_CAISO(bl_periods, red_times=red_times)
elif BL == 'expMA':
return self._BL_expMA(bl_periods, red_times=red_times,
**kwargs)
else:
raise NotImplementedError(
'Baseline type "{}" not known!'.format(BL))
def _BL_CAISO(self, bl_periods, red_times=None):
"""
Compute the CAISO baseline for all elements of the pandas
Datetimeindex bl_periods. If red_times is a Datetimeindex,
regard the associated days as "event days" (in addition to
weekend days and holidays).
"""
locidx = self._index.tz_convert('US/Pacific')
cons = self._dynsys.get_consumption()['energy'].tz_convert(
'US/Pacific')
holidays = USFederalHolidayCalendar().holidays(
start=locidx.min(), end=locidx.max())
isBusiness = (locidx.dayofweek < 5) & (~locidx.isin(holidays))
isBusiness = pd.Series(isBusiness, index=locidx)
if red_times is not None:
isEventDay = locidx.normalize().isin(red_times.tz_convert(
'US/Pacific').normalize())
blidx, blvals = bl_periods.tz_convert('US/Pacific'), []
for period in blidx:
per_select = ((locidx < period) &
(locidx.hour == period.hour) &
(locidx.minute == period.minute))
if isBusiness.loc[period]:
nmax = 10
per_select = per_select & isBusiness.values
else:
nmax = 4
per_select = per_select & (~isBusiness.values)
if red_times is not None:
per_select = per_select & (~isEventDay)
similars = locidx[per_select].sort_values(ascending=False)[:nmax]
blvals.append(np.sum([c.getValue() for c in cons.loc[similars]]) /
float(len(similars)))
return pd.Series(blvals, index=blidx.tz_convert('GMT'))
def _BL_expMA(self, bl_periods, red_times=None, alpha_b=0.14,
alpha_nb=0.32):
"""
Compute the expMA baseline for all elements of the pandas
Datetimeindex bl_periods using the smoothing parameter alpha.
If red_times is a Datetimeindex, regard the associated days as
"event days" (in addition to weekend days and holidays).
"""
locidx = self._index.tz_convert('US/Pacific')
cons = self._dynsys.get_consumption()['energy'].tz_convert(
'US/Pacific')
cons = pd.Series([c.getValue() for c in cons],
index=cons.index)
holidays = USFederalHolidayCalendar().holidays(
start=locidx.min(), end=locidx.max())
isBusiness = (locidx.dayofweek < 5) & (~locidx.isin(holidays))
isBusiness = pd.Series(isBusiness, index=locidx)
bls = []
for con, alpha in zip([cons[isBusiness], cons[~isBusiness]],
[alpha_b, alpha_nb]):
# determine intitial values for the BL from non-DR data
if red_times is not None:
nDRc = con[~con.index.isin(red_times)]
else:
nDRc = con
cmeans = nDRc.groupby(nDRc.index.hour).mean()
# compute BL for each hour separately
con_hrly = {hour: con[con.index.hour == hour]
for hour in range(24)}
bl_hrly = []
for hour, conhr in con_hrly.items():
blvals = [cmeans[hour]]
if red_times is not None:
for period, c in conhr.items():
if period in red_times:
blvals.append(blvals[-1])
else:
blvals.append(alpha*c + (1-alpha)*blvals[-1])
else:
for period, c in conhr.items():
blvals.append(alpha*c + (1-alpha)*blvals[-1])
bl_hrly.append(pd.Series(blvals[1:], index=conhr.index))
bls.append(pd.concat(bl_hrly).tz_convert('GMT'))
return pd.concat(bls).loc[bl_periods]
def optimize(self, tariff, LMP=None, dr_periods=None, BL='CAISO',
isRT=False, isPDP=False, carbon=False, **kwargs):
"""
Solve the participant's optimization problem. Pass in additional
Lin/Quad Expr of other objective terms with 'add_obj_term' kwarg
"""
if isRT and (dr_periods is not None):
raise Exception('Cannot combine DR with RTP.')
if isPDP and (dr_periods is not None):
raise Exception('Cannot combine DR with PDP.')
# extract additonal objective term if given
if 'add_obj_term' in kwargs:
add_obj_term = kwargs['add_obj_term']
else:
add_obj_term = 0
# energy charges are always included (demand charges
# are set to zero if tariff has none and DR_compensation is
# set to zero if there are no DR events ...)
# if (LMP is None) or (dr_periods is None):
# #print drc
# drc = 0.0
# else:
# #print self.DR_compensation(LMP, dr_periods, BL=BL,
# # tariff=tariff, **kwargs)
# drc=quicksum(self.DR_compensation(LMP, dr_periods, BL=BL,
# tariff=tariff, **kwargs).values.tolist())
self._model.setObjective(
self._dynsys.additional_cost_term(vals=False) +
quicksum(self.energy_charges(
tariff, isRT=isRT, LMP=LMP, isPDP=isPDP,
carbon=carbon).values) +
quicksum(self.demand_charges(tariff, isPDP=False).values) -
quicksum(self.DR_compensation(LMP, dr_periods, BL=BL,
tariff=tariff, **kwargs).values) +
add_obj_term)
self._model.optimize()
def optimize_blfixed(self, tariff, LMP, bl_values, carbon=False, **kwargs):
"""
Solve the participant's optimziation problem in case the baseline
values are fixed.
"""
# No option for RTPs. No biggie, since RTP and DR are alternatives.
# extract additonal objective term if given
if 'add_obj_term' in kwargs:
add_obj_term = kwargs['add_obj_term']
else:
add_obj_term = 0
self._model.setObjective(
quicksum(self.energy_charges(tariff, LMP=LMP,
carbon=carbon).values) +
self._dynsys.additional_cost_term(vals=False))
self._model.update()
# for some tariffs we also have demand charges
if tariff in dem_charges:
self._model.setObjective(
self._model.getObjective() +
quicksum(self.demand_charges(tariff).values))
else:
if hasattr(self, '_maxcon'):
for maxcon in self._maxcon.values():
self._model.remove(maxcon)
del self._maxcon
if hasattr(self, '_maxconbnd'):
for maxconbnd in self._maxconbnd.values():
self._model.remove(maxconbnd)
del self._maxconbnd
self._model.update()
self._nonDRobj = self._model.getObjective() + add_obj_term
self._model.setObjective(
self._nonDRobj - self.DR_comp_blfix(
LMP, bl_values, tariff=tariff, **kwargs))
self._model.optimize()
def generation_cost(self, LMP, carbon=False):
"""
Return the generation cost of the partipant's consumption (= price
of consuption according to the LMPs) as a gurobipy LinExpr.
"""
lmps = LMP.loc[self._index] / 1000 # select and convert price to $/kWh
if carbon:
lmps += pd.Series(carbon_costs).loc[self._index.tz_convert(
'US/Pacific').year].values / 1000.0
cons = self._dynsys.get_consumption()['energy']
return quicksum([lmp * con for lmp, con in
zip(lmps.values, cons.values)])
def get_results(self):
"""
Return results of optimziation problem.
"""
columns = {}
xopt, uopt = self._dynsys.get_optvals()
for i in range(xopt.shape[1]):
columns['x{}'.format(i+1)] = xopt[:-1, i]
for i in range(uopt.shape[1]):
columns['u{}'.format(i+1)] = uopt[:, i]
cons = self._dynsys.get_consumption()
columns['nrg_cons'] = np.array([e.getValue() for e in cons['energy']])
columns['pwr_cons'] = np.array([e.getValue() for e in cons['power']])
dfs = [pd.DataFrame(columns, index=self._index)]
if hasattr(self, '_z'):
perstrs, vals = [], []
for perstr, z in self._z.items():
perstrs.append(perstr)
vals.append(bool(z.X))
dtidx = pd.to_datetime(perstrs, format=psform).tz_localize(
'US/Pacific').tz_convert('GMT')
dfs.append(pd.DataFrame({'z': vals}, index=dtidx))
if hasattr(self, '_red'):
perstrs, vals = [], []
for perstr, red in self._red.items():
perstrs.append(perstr)
vals.append(red.X)
dtidx = pd.to_datetime(perstrs, format=psform).tz_localize(
'US/Pacific').tz_convert('GMT')
dfs.append(pd.DataFrame({'red': vals}, index=dtidx))
if hasattr(self, '_bl'):
perstrs, vals = [], []
for perstr, bl in self._bl.items():
perstrs.append(perstr)
vals.append(bl.X)
dtidx = pd.to_datetime(perstrs, format=psform).tz_localize(
'US/Pacific').tz_convert('GMT')
dfs.append(pd.DataFrame({'BL': vals}, index=dtidx))
return pd.concat(dfs, axis=1)
def _removeOld(self):
"""
Helper function removing all DR-related variables from the
underlying gurobipy optimization model.
"""
if hasattr(self, '_zday'):
for zday in self._zday.values():
self._model.remove(zday)
del self._zday
if hasattr(self, '_red'):
for red in self._red.values():
self._model.remove(red)
del self._red
if hasattr(self, '_z'):
for z in self._z.values():
self._model.remove(z)
del self._z
if hasattr(self, '_bl'):
for bl in self._bl.values():
self._model.remove(bl)
del self._bl
if hasattr(self, '_zdaysum'):
for zdaysum in self._zdaysum.values():
self._model.remove(zdaysum)
del self._zdaysum
if hasattr(self, '_zdaymax'):
for zdaymax in self._zdaymax.values():
self._model.remove(zdaymax)
del self._zdaymax
if hasattr(self, '_blcon'):
for blcon in self._blcon.values():
self._model.remove(blcon)
del self._blcon
if hasattr(self, '_redpos'):
for redpos in self._redpos.values():
self._model.remove(redpos)
del self._redpos
if hasattr(self, '_redBL'):
for redBL in self._redBL.values():
self._model.remove(redBL)
del self._redBL
if hasattr(self, '_red0'):
for red0 in self._red0.values():
self._model.remove(red0)
del self._red0
self._model.update()
def compute_BLtaking_eq(blmodel, tariff, LMP, dr_periods, BL='CAISO',
blinit='noDR', eps=1.0, maxiter=20, carbon=False,
**kwargs):
"""
Function used ot compute Baseline-taking equilibrium.
"""
if 'logger' in kwargs:
logger = kwargs['logger']
if 'isLMPmG' in kwargs:
logstr = BL + ' (LMP-G)'
else:
logstr = BL
logger.log(logging.INFO,
'Computing BL-taking eq. for ' '{} BL.'.format(logstr))
dfs, blvals, objs, gencosts, residuals = [], [], [], [], []
if blinit == 'gamed':
blmodel.optimize(tariff, LMP=LMP, dr_periods=dr_periods,
BL=BL, carbon=carbon, **kwargs)
elif blinit == 'noDR':
blmodel.optimize(tariff, LMP=LMP, carbon=carbon, **kwargs)
else:
errmsg = 'Unknown BL initialization parameter {}.'.format(blinit)
logger.log(logging.ERROR, errmsg)
raise NotImplementedError(errmsg)
# retrieve data from the solution for initialization
dfs.append(blmodel.get_results())
if 'red' in dfs[-1]:
blvals.append(blmodel.compute_baseline(
dr_periods, BL=BL, red_times=dfs[-1][dfs[-1]['red'] > 0].index))
else:
blvals.append(blmodel.compute_baseline(dr_periods, BL=BL))
objs.append(blmodel._model.getObjective().getValue())
gencosts.append(blmodel.generation_cost(LMP).getValue())
residuals.append(np.NaN)
# solve the bl-taking problem for the first time using the bl values
# from the previous solution of the problem
blmodel.optimize_blfixed(tariff, LMP=LMP, bl_values=blvals[-1],
carbon=carbon, **kwargs)
dfs.append(blmodel.get_results())
blvals.append(blmodel.compute_baseline(
dr_periods, BL=BL, red_times=dfs[-1][dfs[-1]['red'] > 0].index))
logger.info(f"")
# todo: what are the units/magnitude of the residuals? I increased the MIPGap (2020-02-05), but that seems to
# have resulted in convergence failure. If the mipgap is too big relative to the convergence tolerance,
# that seems normal. I need to reason out the implications of a 1e-3 mipgap for the baseline residuals that
# implies
objs.append(blmodel._model.getObjective().getValue())
gencosts.append(blmodel.generation_cost(LMP).getValue())
# blvalues are in kWh, on the order of 200kWh on average, max 660 for a building
# costs are on the order of 0.1 $/kWh.
# make the bl convergence in terms of decimal fraction, like the mipgap
# require the max deviation over periods to be within x percent of the mean. should be a couple kWh
# residuals.append(2*np.max(blvals[1] - blvals[0])/np.mean(blvals[1] + blvals[0]))
residuals.append(np.max(blvals[1] - blvals[0])) # had a div by 0 for above
n_iter = 0
while (residuals[-1] > eps) and (n_iter < maxiter):
if 'logger' in kwargs:
logger.log(logging.INFO,
'Residual: {:.2f}, '.format(residuals[-1]) +
'Continuing fixed point iteration.')
blmodel.optimize_blfixed(
tariff, LMP=LMP, bl_values=blvals[-1], carbon=carbon, **kwargs)
dfs.append(blmodel.get_results())
blvals.append(blmodel.compute_baseline(
dr_periods, BL=BL,
red_times=dfs[-1][dfs[-1]['red'] > 0].index))
objs.append(blmodel._model.getObjective().getValue())
gencosts.append(blmodel.generation_cost(LMP).getValue())
residuals.append(np.linalg.norm(blvals[-2] - blvals[-1]))
n_iter += 1
if 'logger' in kwargs:
if residuals[-1] <= eps:
logger.log(logging.INFO,
'Fixed-point iteration successful. ' +
'BL-taking eq. found.')
else:
logger.log(logging.WARNING,
'Fixed-point iteration failed.' +
'No BL-taking eq. found. ')
return dfs[-1]
| [
"pandas.Series",
"datetime.datetime",
"numpy.unique",
"pandas.tseries.holiday.USFederalHolidayCalendar",
"pandas.DatetimeIndex",
"numpy.size",
"numpy.linalg.norm",
"numpy.asarray",
"numpy.max",
"gurobipy.quicksum",
"gurobipy.LinExpr",
"numpy.isnan",
"gurobipy.Model",
"pandas.DataFrame",
... | [((907, 914), 'gurobipy.Model', 'Model', ([], {}), '()\n', (912, 914), False, 'from gurobipy import GRB, Model, quicksum, LinExpr\n'), ((19530, 19541), 'numpy.isnan', 'np.isnan', (['M'], {}), '(M)\n', (19538, 19541), True, 'import numpy as np\n'), ((19974, 20009), 'pandas.Series', 'pd.Series', (['isBusiness'], {'index': 'locidx'}), '(isBusiness, index=locidx)\n', (19983, 20009), True, 'import pandas as pd\n'), ((25336, 25378), 'pandas.Series', 'pd.Series', (['drcomp_'], {'index': 'self._dr_periods'}), '(drcomp_, index=self._dr_periods)\n', (25345, 25378), True, 'import pandas as pd\n'), ((28369, 28380), 'numpy.isnan', 'np.isnan', (['M'], {}), '(M)\n', (28377, 28380), True, 'import numpy as np\n'), ((28811, 28846), 'pandas.Series', 'pd.Series', (['isBusiness'], {'index': 'locidx'}), '(isBusiness, index=locidx)\n', (28820, 28846), True, 'import pandas as pd\n'), ((33666, 33708), 'pandas.Series', 'pd.Series', (['drcomp_'], {'index': 'self._dr_periods'}), '(drcomp_, index=self._dr_periods)\n', (33675, 33708), True, 'import pandas as pd\n'), ((35856, 35867), 'numpy.isnan', 'np.isnan', (['M'], {}), '(M)\n', (35864, 35867), True, 'import numpy as np\n'), ((36498, 36533), 'pandas.Series', 'pd.Series', (['isBusiness'], {'index': 'locidx'}), '(isBusiness, index=locidx)\n', (36507, 36533), True, 'import pandas as pd\n'), ((39170, 39205), 'pandas.Series', 'pd.Series', (['isBusiness'], {'index': 'locidx'}), '(isBusiness, index=locidx)\n', (39179, 39205), True, 'import pandas as pd\n'), ((41061, 41096), 'pandas.Series', 'pd.Series', (['isBusiness'], {'index': 'locidx'}), '(isBusiness, index=locidx)\n', (41070, 41096), True, 'import pandas as pd\n'), ((48127, 48149), 'pandas.concat', 'pd.concat', (['dfs'], {'axis': '(1)'}), '(dfs, axis=1)\n', (48136, 48149), True, 'import pandas as pd\n'), ((52639, 52668), 'numpy.max', 'np.max', (['(blvals[1] - blvals[0])'], {}), '(blvals[1] - blvals[0])\n', (52645, 52668), True, 'import numpy as np\n'), ((2370, 2401), 'pandas.Series', 'pd.Series', (['echrg_'], {'index': 'locidx'}), '(echrg_, index=locidx)\n', (2379, 2401), True, 'import pandas as pd\n'), ((2895, 2924), 'pandas.Series', 'pd.Series', (['echrg_'], {'index': 'indx'}), '(echrg_, index=indx)\n', (2904, 2924), True, 'import pandas as pd\n'), ((5309, 5353), 'numpy.unique', 'np.unique', (['locidx[locidx.year == year].month'], {}), '(locidx[locidx.year == year].month)\n', (5318, 5353), True, 'import numpy as np\n'), ((16453, 16485), 'pandas.Series', 'pd.Series', (['[0.0]'], {'index': "['None']"}), "([0.0], index=['None'])\n", (16462, 16485), True, 'import pandas as pd\n'), ((46945, 46985), 'pandas.DataFrame', 'pd.DataFrame', (['columns'], {'index': 'self._index'}), '(columns, index=self._index)\n', (46957, 46985), True, 'import pandas as pd\n'), ((53414, 53453), 'numpy.linalg.norm', 'np.linalg.norm', (['(blvals[-2] - blvals[-1])'], {}), '(blvals[-2] - blvals[-1])\n', (53428, 53453), True, 'import numpy as np\n'), ((5385, 5407), 'numpy.unique', 'np.unique', (['locidx.year'], {}), '(locidx.year)\n', (5394, 5407), True, 'import numpy as np\n'), ((7950, 7981), 'pandas.Series', 'pd.Series', (['dcharges'], {'index': 'indx'}), '(dcharges, index=indx)\n', (7959, 7981), True, 'import pandas as pd\n'), ((15600, 15631), 'pandas.Series', 'pd.Series', (['dcharges'], {'index': 'indx'}), '(dcharges, index=indx)\n', (15609, 15631), True, 'import pandas as pd\n'), ((19795, 19821), 'pandas.tseries.holiday.USFederalHolidayCalendar', 'USFederalHolidayCalendar', ([], {}), '()\n', (19819, 19821), False, 'from pandas.tseries.holiday import USFederalHolidayCalendar\n'), ((28632, 28658), 'pandas.tseries.holiday.USFederalHolidayCalendar', 'USFederalHolidayCalendar', ([], {}), '()\n', (28656, 28658), False, 'from pandas.tseries.holiday import USFederalHolidayCalendar\n'), ((36319, 36345), 'pandas.tseries.holiday.USFederalHolidayCalendar', 'USFederalHolidayCalendar', ([], {}), '()\n', (36343, 36345), False, 'from pandas.tseries.holiday import USFederalHolidayCalendar\n'), ((38991, 39017), 'pandas.tseries.holiday.USFederalHolidayCalendar', 'USFederalHolidayCalendar', ([], {}), '()\n', (39015, 39017), False, 'from pandas.tseries.holiday import USFederalHolidayCalendar\n'), ((40882, 40908), 'pandas.tseries.holiday.USFederalHolidayCalendar', 'USFederalHolidayCalendar', ([], {}), '()\n', (40906, 40908), False, 'from pandas.tseries.holiday import USFederalHolidayCalendar\n'), ((42361, 42375), 'pandas.concat', 'pd.concat', (['bls'], {}), '(bls)\n', (42370, 42375), True, 'import pandas as pd\n'), ((47321, 47359), 'pandas.DataFrame', 'pd.DataFrame', (["{'z': vals}"], {'index': 'dtidx'}), "({'z': vals}, index=dtidx)\n", (47333, 47359), True, 'import pandas as pd\n'), ((47697, 47737), 'pandas.DataFrame', 'pd.DataFrame', (["{'red': vals}"], {'index': 'dtidx'}), "({'red': vals}, index=dtidx)\n", (47709, 47737), True, 'import pandas as pd\n'), ((48071, 48110), 'pandas.DataFrame', 'pd.DataFrame', (["{'BL': vals}"], {'index': 'dtidx'}), "({'BL': vals}, index=dtidx)\n", (48083, 48110), True, 'import pandas as pd\n'), ((15708, 15720), 'gurobipy.LinExpr', 'LinExpr', (['(0.0)'], {}), '(0.0)\n', (15715, 15720), False, 'from gurobipy import GRB, Model, quicksum, LinExpr\n'), ((42243, 42283), 'pandas.Series', 'pd.Series', (['blvals[1:]'], {'index': 'conhr.index'}), '(blvals[1:], index=conhr.index)\n', (42252, 42283), True, 'import pandas as pd\n'), ((5546, 5570), 'datetime.datetime', 'datetime', (['year', 'month', '(1)'], {}), '(year, month, 1)\n', (5554, 5570), False, 'from datetime import datetime\n'), ((8234, 8260), 'pandas.tseries.holiday.USFederalHolidayCalendar', 'USFederalHolidayCalendar', ([], {}), '()\n', (8258, 8260), False, 'from pandas.tseries.holiday import USFederalHolidayCalendar\n'), ((19220, 19264), 'numpy.asarray', 'np.asarray', (["self._dynsys._opts['nrg_coeffs']"], {}), "(self._dynsys._opts['nrg_coeffs'])\n", (19230, 19264), True, 'import numpy as np\n'), ((20439, 20480), 'gurobipy.quicksum', 'quicksum', (['[self._z[ps] for ps in perstrs]'], {}), '([self._z[ps] for ps in perstrs])\n', (20447, 20480), False, 'from gurobipy import GRB, Model, quicksum, LinExpr\n'), ((22321, 22345), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['sim_DR'], {}), '(sim_DR)\n', (22337, 22345), True, 'import pandas as pd\n'), ((22426, 22455), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['sim_DR_mult'], {}), '(sim_DR_mult)\n', (22442, 22455), True, 'import pandas as pd\n'), ((22534, 22561), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['sim_nonDR'], {}), '(sim_nonDR)\n', (22550, 22561), True, 'import pandas as pd\n'), ((28059, 28103), 'numpy.asarray', 'np.asarray', (["self._dynsys._opts['nrg_coeffs']"], {}), "(self._dynsys._opts['nrg_coeffs'])\n", (28069, 28103), True, 'import numpy as np\n'), ((29276, 29317), 'gurobipy.quicksum', 'quicksum', (['[self._z[ps] for ps in perstrs]'], {}), '([self._z[ps] for ps in perstrs])\n', (29284, 29317), False, 'from gurobipy import GRB, Model, quicksum, LinExpr\n'), ((35545, 35589), 'numpy.asarray', 'np.asarray', (["self._dynsys._opts['nrg_coeffs']"], {}), "(self._dynsys._opts['nrg_coeffs'])\n", (35555, 35589), True, 'import numpy as np\n'), ((42308, 42326), 'pandas.concat', 'pd.concat', (['bl_hrly'], {}), '(bl_hrly)\n', (42317, 42326), True, 'import pandas as pd\n'), ((8349, 8376), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['idx_.date'], {}), '(idx_.date)\n', (8365, 8376), True, 'import pandas as pd\n'), ((15770, 15786), 'numpy.size', 'np.size', (['indx', '(0)'], {}), '(indx, 0)\n', (15777, 15786), True, 'import numpy as np\n'), ((24190, 24222), 'gurobipy.quicksum', 'quicksum', (['[(1 - z) for z in zKc]'], {}), '([(1 - z) for z in zKc])\n', (24198, 24222), False, 'from gurobipy import GRB, Model, quicksum, LinExpr\n'), ((46102, 46125), 'pandas.Series', 'pd.Series', (['carbon_costs'], {}), '(carbon_costs)\n', (46111, 46125), True, 'import pandas as pd\n'), ((47198, 47236), 'pandas.to_datetime', 'pd.to_datetime', (['perstrs'], {'format': 'psform'}), '(perstrs, format=psform)\n', (47212, 47236), True, 'import pandas as pd\n'), ((47574, 47612), 'pandas.to_datetime', 'pd.to_datetime', (['perstrs'], {'format': 'psform'}), '(perstrs, format=psform)\n', (47588, 47612), True, 'import pandas as pd\n'), ((47948, 47986), 'pandas.to_datetime', 'pd.to_datetime', (['perstrs'], {'format': 'psform'}), '(perstrs, format=psform)\n', (47962, 47986), True, 'import pandas as pd\n'), ((24082, 24099), 'gurobipy.quicksum', 'quicksum', (['(qD + qK)'], {}), '(qD + qK)\n', (24090, 24099), False, 'from gurobipy import GRB, Model, quicksum, LinExpr\n'), ((24144, 24156), 'gurobipy.quicksum', 'quicksum', (['zK'], {}), '(zK)\n', (24152, 24156), False, 'from gurobipy import GRB, Model, quicksum, LinExpr\n')] |
#
# prp-htcondor-portal/provisioner
#
# BSD license, copyright <NAME> 2021
#
# Main entry point of the provisioner process
#
import sys
import time
from . import provisioner_logging
from . import provisioner_htcondor
from . import provisioner_k8s
from . import event_loop
def main(namespace, max_pods_per_cluster=10, sleep_time=10):
log_obj = provisioner_logging.ProvisionerStdoutLogging(want_log_debug=True)
# TBD: Proper security
schedd_obj = provisioner_htcondor.ProvisionerSchedd(namespace, {'.*':'.*'})
collector_obj = provisioner_htcondor.ProvisionerCollector(namespace, '.*')
k8s_obj = provisioner_k8s.ProvisionerK8S(namespace)
k8s_obj.authenticate()
el = event_loop.ProvisionerEventLoop(log_obj, schedd_obj, collector_obj, k8s_obj, max_pods_per_cluster)
while True:
log_obj.log_debug("[Main] Iteration started")
try:
el.one_iteration()
except:
log_obj.log_debug("[Main] Exception in one_iteration")
log_obj.sync()
time.sleep(sleep_time)
if __name__ == "__main__":
# execute only if run as a script
main(sys.argv[1])
| [
"time.sleep"
] | [((998, 1020), 'time.sleep', 'time.sleep', (['sleep_time'], {}), '(sleep_time)\n', (1008, 1020), False, 'import time\n')] |
# Front matter
##############
import os
from os import fdopen, remove
from tempfile import mkstemp
from shutil import move
import glob
import re
import time
import pandas as pd
import numpy as np
from scipy import constants
from scipy.optimize import curve_fit, fsolve
from scipy.interpolate import interp1d
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator
from matplotlib import gridspec
from scipy.interpolate import spline
import math
import seaborn as sns
matplotlib.rc('xtick', labelsize=16)
matplotlib.rc('ytick', labelsize=16)
rc = {'lines.linewidth': 1,
'axes.labelsize': 20,
'axes.titlesize': 20,
'legend.fontsize': 26,
'xtick.direction': u'in',
'ytick.direction': u'in'}
sns.set_style('ticks', rc=rc)
start_time = time.time()
# Input scaling parameter results
##########################################
xi_filename = 'Results/scalingparameters.csv'
xi_df = pd.read_csv(xi_filename)
# Rename columns to avoid confusion
xi_df = xi_df.rename(columns={'Vi':'Vj', 'dVi':'dVj', 'V':'Vk','dV':'dVk',
'V/Vi':'Vk/Vj','xi':'xi(Vk/Vj)','dxi':'dxi(Vk/Vj)'})
# Transform scaling parameters to each reference volume
#######################################################
folder_list = xi_df.drop_duplicates(subset='Ref Folder')['Ref Folder'].values
for ref_folder in folder_list:
# for ref_folder in ['2009Oct_30GPa']:
print('Rescaling to '+ref_folder)
# Reference volume to scale everything to
Vi = xi_df[xi_df['Ref Folder']==ref_folder].iloc[-1]['Vj']
xi_rescaled_df = xi_df[['Vj','Vk','xi(Vk/Vj)','dxi(Vk/Vj)']].copy()
xi_rescaled_df['Vi'] = Vi*np.ones(len(xi_rescaled_df))
# rescaled xi(Vk/Vi) = xi(Vk/Vj) * complementary xi(Vj/Vi)
# Complementary xi needed to calculate rescaled xi:
xi_rescaled_df['xi(Vj/Vi)'] = [xi_rescaled_df[(xi_rescaled_df['Vj']==Vi) &
(xi_rescaled_df['Vk']==Vj)].iloc[-1]['xi(Vk/Vj)'] for Vj in xi_rescaled_df['Vj']]
xi_rescaled_df['dxi(Vj/Vi)'] = [xi_rescaled_df[(xi_rescaled_df['Vj']==Vi) &
(xi_rescaled_df['Vk']==Vj)].iloc[-1]['dxi(Vk/Vj)'] for Vj in xi_rescaled_df['Vj']]
xi_rescaled_df['Vk/Vi'] = xi_rescaled_df['Vk']/xi_rescaled_df['Vi']
# Calculate rescaled xi
xi_rescaled_df['xi(Vk/Vi)'] = xi_rescaled_df['xi(Vk/Vj)']*xi_rescaled_df['xi(Vj/Vi)']
# Calculate uncertainty on rescaled xi
# If c = a*b, dc = sqrt((b*da)^2 + (a*db)^2)
xi_rescaled_df['dxi(Vk/Vi)'] = np.sqrt(
(xi_rescaled_df['xi(Vj/Vi)']*xi_rescaled_df['dxi(Vk/Vj)'])**2 +
(xi_rescaled_df['xi(Vk/Vj)']*xi_rescaled_df['dxi(Vj/Vi)'])**2)
# Eliminate data points where Vi = Vk
xi_rescaled_df = xi_rescaled_df[xi_rescaled_df['Vk'] != Vi]
xi_rescaled_df = xi_rescaled_df.round(decimals=4)
xi_rescaled_df.to_csv(ref_folder+'/rescaledparameters.csv',index=False)
# Plot scaling parameters
fig, (ax0) = plt.subplots(nrows = 1, ncols=1, figsize=(6,4.5))
ax0.errorbar(xi_rescaled_df['Vk/Vi'],xi_rescaled_df['xi(Vk/Vi)'],
yerr=xi_rescaled_df['dxi(Vk/Vi)'],
marker = 'o', color = 'gray', mfc='lightgray', ms=6, markeredgewidth=1,
ls='none',elinewidth=1)
ax0.set_xlabel(r'$V/V_i$',fontsize = 16)
ax0.set_ylabel(r'$\xi$',fontsize = 16)
ax0.tick_params(direction='in',right='on',top='on')
fig.savefig(ref_folder+'/scalingparam.pdf', format='pdf',
bbox_inches='tight')
plt.close() | [
"numpy.sqrt",
"pandas.read_csv",
"seaborn.set_style",
"matplotlib.pyplot.close",
"matplotlib.rc",
"time.time",
"matplotlib.pyplot.subplots"
] | [((509, 545), 'matplotlib.rc', 'matplotlib.rc', (['"""xtick"""'], {'labelsize': '(16)'}), "('xtick', labelsize=16)\n", (522, 545), False, 'import matplotlib\n'), ((547, 583), 'matplotlib.rc', 'matplotlib.rc', (['"""ytick"""'], {'labelsize': '(16)'}), "('ytick', labelsize=16)\n", (560, 583), False, 'import matplotlib\n'), ((765, 794), 'seaborn.set_style', 'sns.set_style', (['"""ticks"""'], {'rc': 'rc'}), "('ticks', rc=rc)\n", (778, 794), True, 'import seaborn as sns\n'), ((809, 820), 'time.time', 'time.time', ([], {}), '()\n', (818, 820), False, 'import time\n'), ((955, 979), 'pandas.read_csv', 'pd.read_csv', (['xi_filename'], {}), '(xi_filename)\n', (966, 979), True, 'import pandas as pd\n'), ((2411, 2557), 'numpy.sqrt', 'np.sqrt', (["((xi_rescaled_df['xi(Vj/Vi)'] * xi_rescaled_df['dxi(Vk/Vj)']) ** 2 + (\n xi_rescaled_df['xi(Vk/Vj)'] * xi_rescaled_df['dxi(Vj/Vi)']) ** 2)"], {}), "((xi_rescaled_df['xi(Vj/Vi)'] * xi_rescaled_df['dxi(Vk/Vj)']) ** 2 +\n (xi_rescaled_df['xi(Vk/Vj)'] * xi_rescaled_df['dxi(Vj/Vi)']) ** 2)\n", (2418, 2557), True, 'import numpy as np\n'), ((2820, 2868), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': '(6, 4.5)'}), '(nrows=1, ncols=1, figsize=(6, 4.5))\n', (2832, 2868), True, 'import matplotlib.pyplot as plt\n'), ((3294, 3305), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3303, 3305), True, 'import matplotlib.pyplot as plt\n')] |
"""Test cases."""
import unittest
import logging
from speedtest2dynamodb import parse_output
class SpeedTest2DynamoDBTestCase(unittest.TestCase):
"""Collection of tests."""
def setUp(self):
self.logger = logging.getLogger()
def test_parse_output_bit(self):
"""Test output that contains only bit/s."""
self.assertEqual(
parse_output(
'Ping: 10.331 ms\nDownload: 40.5 bit/s\nUpload: 5.88 Bit/s'
),
(10.331, 40.5, 5.88)
)
def test_parse_output_kbit(self):
"""Test output that contains only Kbit/s."""
self.assertEqual(
parse_output(
'Ping: 10.331 ms\nDownload: 40.53 Kbit/s\nUpload: 5.88 kbit/s'
),
(10.331, 41502.72, 6021.12)
)
def test_parse_output_mbit(self):
"""Test output that contains only Mbit/s."""
self.assertEqual(
parse_output(
'Ping: 10.331 ms\nDownload: 40.53 mbit/s\nUpload: 5.88 Mbit/s'
),
(10.331, 42498785.28, 6165626.88)
)
def test_parse_output_gbit(self):
"""Test output that contains only Gbit/s."""
self.assertEqual(
parse_output(
'Ping: 10.331 ms\nDownload: 40.53 Gbit/s\nUpload: 5.88 gbit/s'
),
(10.331, 43518756126.72, 6313601925.12)
)
def test_parse_output_mixed_bit(self):
"""Test output that contains bit/s and Gbit/s."""
self.assertEqual(
parse_output(
'Ping: 10.331 ms\nDownload: 40.53 Gbit/s\nUpload: 5.88 bit/s'
),
(10.331, 43518756126.72, 5.88)
)
def test_parse_output_swapped_order(self):
"""Test output with changed order."""
self.assertEqual(
parse_output(
'Upload: 5.88 bit/s\nPing: 10.331 ms\nDownload: 40.53 bit/s'
),
(10.331, 40.53, 5.88)
)
def test_parse_output_not_matching(self):
"""Test whether default values are returned when unable to parse."""
# Silence logging, as we expect to produce exceptions within tests and
# do not want to clutter the output:
old_log_level = self.logger.getEffectiveLevel()
self.logger.setLevel(logging.CRITICAL)
self.assertEqual(
parse_output(
'Ping: 10.331 s\nDownload: 40.xx bit/s\nUpload: 5.88 m/s'
),
(-1, -1, -1)
)
# Restore to default log level:
self.logger.setLevel(old_log_level)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"speedtest2dynamodb.parse_output",
"logging.getLogger"
] | [((2628, 2643), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2641, 2643), False, 'import unittest\n'), ((223, 242), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (240, 242), False, 'import logging\n'), ((371, 446), 'speedtest2dynamodb.parse_output', 'parse_output', (['"""Ping: 10.331 ms\nDownload: 40.5 bit/s\nUpload: 5.88 Bit/s"""'], {}), '("""Ping: 10.331 ms\nDownload: 40.5 bit/s\nUpload: 5.88 Bit/s""")\n', (383, 446), False, 'from speedtest2dynamodb import parse_output\n'), ((649, 727), 'speedtest2dynamodb.parse_output', 'parse_output', (['"""Ping: 10.331 ms\nDownload: 40.53 Kbit/s\nUpload: 5.88 kbit/s"""'], {}), '("""Ping: 10.331 ms\nDownload: 40.53 Kbit/s\nUpload: 5.88 kbit/s""")\n', (661, 727), False, 'from speedtest2dynamodb import parse_output\n'), ((937, 1015), 'speedtest2dynamodb.parse_output', 'parse_output', (['"""Ping: 10.331 ms\nDownload: 40.53 mbit/s\nUpload: 5.88 Mbit/s"""'], {}), '("""Ping: 10.331 ms\nDownload: 40.53 mbit/s\nUpload: 5.88 Mbit/s""")\n', (949, 1015), False, 'from speedtest2dynamodb import parse_output\n'), ((1231, 1309), 'speedtest2dynamodb.parse_output', 'parse_output', (['"""Ping: 10.331 ms\nDownload: 40.53 Gbit/s\nUpload: 5.88 gbit/s"""'], {}), '("""Ping: 10.331 ms\nDownload: 40.53 Gbit/s\nUpload: 5.88 gbit/s""")\n', (1243, 1309), False, 'from speedtest2dynamodb import parse_output\n'), ((1541, 1618), 'speedtest2dynamodb.parse_output', 'parse_output', (['"""Ping: 10.331 ms\nDownload: 40.53 Gbit/s\nUpload: 5.88 bit/s"""'], {}), '("""Ping: 10.331 ms\nDownload: 40.53 Gbit/s\nUpload: 5.88 bit/s""")\n', (1553, 1618), False, 'from speedtest2dynamodb import parse_output\n'), ((1833, 1909), 'speedtest2dynamodb.parse_output', 'parse_output', (['"""Upload: 5.88 bit/s\nPing: 10.331 ms\nDownload: 40.53 bit/s"""'], {}), '("""Upload: 5.88 bit/s\nPing: 10.331 ms\nDownload: 40.53 bit/s""")\n', (1845, 1909), False, 'from speedtest2dynamodb import parse_output\n'), ((2373, 2446), 'speedtest2dynamodb.parse_output', 'parse_output', (['"""Ping: 10.331 s\nDownload: 40.xx bit/s\nUpload: 5.88 m/s"""'], {}), '("""Ping: 10.331 s\nDownload: 40.xx bit/s\nUpload: 5.88 m/s""")\n', (2385, 2446), False, 'from speedtest2dynamodb import parse_output\n')] |
import cv2
import numpy as np
import picamera
import time
def identifySq(pt, w, h):
tlx = 80
tly = 210
ppx = 94
ppy = 82
sqx = (pt[0]-(tlx-ppx/2))/ppx
sqy = (pt[1]-(tly-ppy/2))/ppy
# print ("ID",pt, w, h, sqx, sqy)
if sqx < 0 or sqx >= 4 or sqy < 0 or sqy >= 4:
return 0, False
return sqy*4 + sqx, True
if __name__ == '__main__' :
# Acquire source image.
cam = picamera.PiCamera()
cam.capture('newimg.jpg')
# Read source image.
im_src = cv2.imread('newimg.jpg')
# Resize image
newWidth = 640.0
rat1 = newWidth / im_src.shape[1]
dim1 = (int(newWidth), int(im_src.shape[0] * rat1))
im_small = cv2.resize(im_src, dim1, interpolation = cv2.INTER_AREA)
# Four corners of the book in source image
pts_src = np.array([[57, 368], [98, 22], [585, 28], [626, 374]], dtype=float)
# Read destination image.
im_dst = cv2.imread('destimg2.jpg')
# Four corners of the book in destination image.
pts_dst = np.array([[0, 0], [511, 0], [511, 639], [0, 639]], dtype=float)
# Calculate Homography
h, status = cv2.findHomography(pts_src, pts_dst)
# Warp source image to destination based on homography
im_out = cv2.warpPerspective(im_small, h, (im_dst.shape[1], im_dst.shape[0]))
im_grey = cv2.cvtColor(im_out, cv2.COLOR_BGR2GRAY)
cv2.imwrite('img23.png', im_out)
# Match to template tiles
tileFiles = ['tile000002.png', 'tile000004.png', 'tile000008.png',
'tile000016.png', 'tile000032.png', 'tile000064.png',
'tile000128.png', 'tile000256.png', 'tile000512.png',
'tile001024.png']
lineThicknessIdx = 1
tileVal = 2
boardCells = [0] * 16
for tileFile in tileFiles:
tile = cv2.imread(tileFile, 0)
w, h = tile.shape[::-1]
# Apply template Matching
method = cv2.TM_CCOEFF_NORMED
res = cv2.matchTemplate(im_grey, tile, method)
threshold = 0.8
loc = np.where(res >= threshold)
for pt in zip(*loc[::-1]):
sq, sqValid = identifySq(pt, w, h)
if sqValid:
if boardCells[sq] == 0:
boardCells[sq] = tileVal
cv2.putText(im_out, str(tileVal), (pt[0],pt[1]+h/3),cv2.FONT_HERSHEY_SCRIPT_COMPLEX, 1, 0, 1)
#print(sq, tileVal)
# print(pt, tileVal, w, h)
#cv2.rectangle(im_out, pt, (pt[0] + w, pt[1] + h), (0, 0, 255), lineThicknessIdx)
lineThicknessIdx += 1
# print("Found", len(zip(*loc[::-1])),"tiles of", tileVal)
tileVal *= 2
for cellIdx in range(len(boardCells)):
print(cellIdx, boardCells[cellIdx])
cv2.imshow("Matched One", im_out)
cv2.waitKey(1000)
# time.sleep(5)
| [
"cv2.imwrite",
"cv2.findHomography",
"numpy.where",
"picamera.PiCamera",
"cv2.imshow",
"numpy.array",
"cv2.warpPerspective",
"cv2.waitKey",
"cv2.cvtColor",
"cv2.resize",
"cv2.matchTemplate",
"cv2.imread"
] | [((417, 436), 'picamera.PiCamera', 'picamera.PiCamera', ([], {}), '()\n', (434, 436), False, 'import picamera\n'), ((506, 530), 'cv2.imread', 'cv2.imread', (['"""newimg.jpg"""'], {}), "('newimg.jpg')\n", (516, 530), False, 'import cv2\n'), ((682, 736), 'cv2.resize', 'cv2.resize', (['im_src', 'dim1'], {'interpolation': 'cv2.INTER_AREA'}), '(im_src, dim1, interpolation=cv2.INTER_AREA)\n', (692, 736), False, 'import cv2\n'), ((805, 872), 'numpy.array', 'np.array', (['[[57, 368], [98, 22], [585, 28], [626, 374]]'], {'dtype': 'float'}), '([[57, 368], [98, 22], [585, 28], [626, 374]], dtype=float)\n', (813, 872), True, 'import numpy as np\n'), ((917, 943), 'cv2.imread', 'cv2.imread', (['"""destimg2.jpg"""'], {}), "('destimg2.jpg')\n", (927, 943), False, 'import cv2\n'), ((1011, 1074), 'numpy.array', 'np.array', (['[[0, 0], [511, 0], [511, 639], [0, 639]]'], {'dtype': 'float'}), '([[0, 0], [511, 0], [511, 639], [0, 639]], dtype=float)\n', (1019, 1074), True, 'import numpy as np\n'), ((1119, 1155), 'cv2.findHomography', 'cv2.findHomography', (['pts_src', 'pts_dst'], {}), '(pts_src, pts_dst)\n', (1137, 1155), False, 'import cv2\n'), ((1229, 1297), 'cv2.warpPerspective', 'cv2.warpPerspective', (['im_small', 'h', '(im_dst.shape[1], im_dst.shape[0])'], {}), '(im_small, h, (im_dst.shape[1], im_dst.shape[0]))\n', (1248, 1297), False, 'import cv2\n'), ((1312, 1352), 'cv2.cvtColor', 'cv2.cvtColor', (['im_out', 'cv2.COLOR_BGR2GRAY'], {}), '(im_out, cv2.COLOR_BGR2GRAY)\n', (1324, 1352), False, 'import cv2\n'), ((1358, 1390), 'cv2.imwrite', 'cv2.imwrite', (['"""img23.png"""', 'im_out'], {}), "('img23.png', im_out)\n", (1369, 1390), False, 'import cv2\n'), ((2721, 2754), 'cv2.imshow', 'cv2.imshow', (['"""Matched One"""', 'im_out'], {}), "('Matched One', im_out)\n", (2731, 2754), False, 'import cv2\n'), ((2760, 2777), 'cv2.waitKey', 'cv2.waitKey', (['(1000)'], {}), '(1000)\n', (2771, 2777), False, 'import cv2\n'), ((1784, 1807), 'cv2.imread', 'cv2.imread', (['tileFile', '(0)'], {}), '(tileFile, 0)\n', (1794, 1807), False, 'import cv2\n'), ((1927, 1967), 'cv2.matchTemplate', 'cv2.matchTemplate', (['im_grey', 'tile', 'method'], {}), '(im_grey, tile, method)\n', (1944, 1967), False, 'import cv2\n'), ((2006, 2032), 'numpy.where', 'np.where', (['(res >= threshold)'], {}), '(res >= threshold)\n', (2014, 2032), True, 'import numpy as np\n')] |
import re
import discord
from data.model import Tag
class TagModal(discord.ui.Modal):
def __init__(self, bot, tag_name, author: discord.Member) -> None:
self.bot = bot
self.tag_name = tag_name
self.author = author
self.tag = None
super().__init__(title=f"Add tag {self.tag_name}")
self.add_item(
discord.ui.TextInput(
label="Body of the tag",
placeholder="Enter the body of the tag",
style=discord.TextStyle.long,
)
)
for i in range(2):
self.add_item(
discord.ui.TextInput(
label=f"Button {(i%2)+1} name",
placeholder="Enter a name for the button. You can also put an emoji at the start.",
style=discord.TextStyle.short,
required=False,
max_length=80
)
)
self.add_item(
discord.ui.TextInput(
label=f"Button {(i%2)+1} link",
placeholder="Enter a link for the button",
style=discord.TextStyle.short,
required=False
)
)
async def on_submit(self, interaction: discord.Interaction):
if interaction.user != self.author:
return
button_names = [child.value.strip() for child in self.children[1::2] if child.value is not None and len(child.value.strip()) > 0]
links = [child.value.strip() for child in self.children[2::2] if child.value is not None and len(child.value.strip()) > 0]
# make sure all links are valid URLs with regex
if not all(re.match(r'^(https|http)://.*', link) for link in links):
await self.send_error(interaction, "The links must be valid URLs!")
return
if len(button_names) != len(links):
await self.send_error(interaction, "All buttons must have labels and links!")
return
buttons = list(zip(button_names, links))
description = self.children[0].value
if not description:
await self.send_error(interaction, "Description is missing!")
return
for label in button_names:
custom_emojis = re.search(r'<:\d+>|<:.+?:\d+>|<a:.+:\d+>|[\U00010000-\U0010ffff]', label)
if custom_emojis is not None:
emoji = custom_emojis.group(0).strip()
if not label.startswith(emoji):
await self.send_error(interaction, "Emojis must be at the start of labels!")
return
label = label.replace(emoji, '')
label = label.strip()
if not label:
await self.send_error(interaction, "A button cannot just be an emoji!")
return
# prepare tag data for database
tag = Tag()
tag.name = self.tag_name.lower()
tag.content = description
tag.added_by_id = self.author.id
tag.added_by_tag = str(self.author)
tag.button_links = buttons
self.tag = tag
self.stop()
try:
await interaction.response.send_message()
except:
pass
async def send_error(self, interaction: discord.Interaction, error: str):
embed = discord.Embed(title=":(\nYour command ran into a problem", description=error, color=discord.Color.red())
await interaction.response.send_message(embed=embed, ephemeral=True)
class EditTagModal(discord.ui.Modal):
def __init__(self, tag: Tag, author: discord.Member) -> None:
self.tag = tag
self.author = author
self.edited = False
super().__init__(title=f"Edit tag {self.tag.name}")
self.add_item(
discord.ui.TextInput(
label="Body of the tag",
placeholder="Enter the body of the tag",
style=discord.TextStyle.long,
default=tag.content
)
)
for i in range(2):
self.add_item(
discord.ui.TextInput(
label=f"Button {(i%2)+1} name",
placeholder="Enter a name for the button. You can also put an emoji at the start.",
style=discord.TextStyle.short,
required=False,
max_length=80,
default=self.tag.button_links[i][0] if len(self.tag.button_links) > i else None
)
)
self.add_item(
discord.ui.TextInput(
label=f"Button {(i%2)+1} link",
placeholder="Enter a link for the button",
style=discord.TextStyle.short,
required=False,
default=self.tag.button_links[i][1] if len(self.tag.button_links) > i else None
)
)
async def on_submit(self, interaction: discord.Interaction):
if interaction.user != self.author:
return
button_names = [child.value.strip() for child in self.children[1::2] if child.value is not None and len(child.value.strip()) > 0]
links = [child.value.strip() for child in self.children[2::2] if child.value is not None and len(child.value.strip()) > 0]
# make sure all links are valid URLs with regex
if not all(re.match(r'^(https|http)://.*', link) for link in links):
await self.send_error(interaction, "The links must be valid URLs!")
return
if len(button_names) != len(links):
await self.send_error(interaction, "All buttons must have labels and links!")
return
buttons = list(zip(button_names, links))
description = self.children[0].value
if not description:
await self.send_error(interaction, "Description is missing!")
return
for label in button_names:
custom_emojis = re.search(r'<:\d+>|<:.+?:\d+>|<a:.+:\d+>|[\U00010000-\U0010ffff]', label)
if custom_emojis is not None:
emoji = custom_emojis.group(0).strip()
if not label.startswith(emoji):
await self.send_error(interaction, "Emojis must be at the start of labels!")
return
label = label.replace(emoji, '')
label = label.strip()
if not label:
await self.send_error(interaction, "A button cannot just be an emoji!")
return
# prepare tag data for database
self.tag.content = description
self.tag.button_links = buttons
self.edited = True
self.stop()
try:
await interaction.response.send_message()
except:
pass
async def send_error(self, interaction: discord.Interaction, error: str):
embed = discord.Embed(title=":(\nYour command ran into a problem", description=error, color=discord.Color.red())
await interaction.response.send_message(embed=embed, ephemeral=True)
| [
"re.search",
"data.model.Tag",
"re.match",
"discord.ui.TextInput",
"discord.Color.red"
] | [((2955, 2960), 'data.model.Tag', 'Tag', ([], {}), '()\n', (2958, 2960), False, 'from data.model import Tag\n'), ((365, 486), 'discord.ui.TextInput', 'discord.ui.TextInput', ([], {'label': '"""Body of the tag"""', 'placeholder': '"""Enter the body of the tag"""', 'style': 'discord.TextStyle.long'}), "(label='Body of the tag', placeholder=\n 'Enter the body of the tag', style=discord.TextStyle.long)\n", (385, 486), False, 'import discord\n'), ((2321, 2398), 're.search', 're.search', (['"""<:\\\\d+>|<:.+?:\\\\d+>|<a:.+:\\\\d+>|[\\\\U00010000-\\\\U0010ffff]"""', 'label'], {}), "('<:\\\\d+>|<:.+?:\\\\d+>|<a:.+:\\\\d+>|[\\\\U00010000-\\\\U0010ffff]', label)\n", (2330, 2398), False, 'import re\n'), ((3867, 4014), 'discord.ui.TextInput', 'discord.ui.TextInput', ([], {'label': '"""Body of the tag"""', 'placeholder': '"""Enter the body of the tag"""', 'style': 'discord.TextStyle.long', 'default': 'tag.content'}), "(label='Body of the tag', placeholder=\n 'Enter the body of the tag', style=discord.TextStyle.long, default=tag.\n content)\n", (3887, 4014), False, 'import discord\n'), ((6069, 6146), 're.search', 're.search', (['"""<:\\\\d+>|<:.+?:\\\\d+>|<a:.+:\\\\d+>|[\\\\U00010000-\\\\U0010ffff]"""', 'label'], {}), "('<:\\\\d+>|<:.+?:\\\\d+>|<a:.+:\\\\d+>|[\\\\U00010000-\\\\U0010ffff]', label)\n", (6078, 6146), False, 'import re\n'), ((626, 835), 'discord.ui.TextInput', 'discord.ui.TextInput', ([], {'label': 'f"""Button {i % 2 + 1} name"""', 'placeholder': '"""Enter a name for the button. You can also put an emoji at the start."""', 'style': 'discord.TextStyle.short', 'required': '(False)', 'max_length': '(80)'}), "(label=f'Button {i % 2 + 1} name', placeholder=\n 'Enter a name for the button. You can also put an emoji at the start.',\n style=discord.TextStyle.short, required=False, max_length=80)\n", (646, 835), False, 'import discord\n'), ((1000, 1154), 'discord.ui.TextInput', 'discord.ui.TextInput', ([], {'label': 'f"""Button {i % 2 + 1} link"""', 'placeholder': '"""Enter a link for the button"""', 'style': 'discord.TextStyle.short', 'required': '(False)'}), "(label=f'Button {i % 2 + 1} link', placeholder=\n 'Enter a link for the button', style=discord.TextStyle.short, required=\n False)\n", (1020, 1154), False, 'import discord\n'), ((3487, 3506), 'discord.Color.red', 'discord.Color.red', ([], {}), '()\n', (3504, 3506), False, 'import discord\n'), ((7095, 7114), 'discord.Color.red', 'discord.Color.red', ([], {}), '()\n', (7112, 7114), False, 'import discord\n'), ((1730, 1766), 're.match', 're.match', (['"""^(https|http)://.*"""', 'link'], {}), "('^(https|http)://.*', link)\n", (1738, 1766), False, 'import re\n'), ((5478, 5514), 're.match', 're.match', (['"""^(https|http)://.*"""', 'link'], {}), "('^(https|http)://.*', link)\n", (5486, 5514), False, 'import re\n')] |
import NLQ_Preprocessor as preProcessor
import NLP_Engine as nlpEngine
import NLQ_Interpreter as interpreter
import nltk
import time
class NLQ_Chunker:
def __init__(self):
self.preprocessor = preProcessor.PreProcessor()
self.nlp_engine = nlpEngine.NLP_Engine()
self.interpreter = interpreter.Interpreter()
def chunk_a_sentence(self, sentence):
sentence = self.preprocessor.replace_special_words(sentence)['sentence']
# this method returns an object {'sentence': xxxx, 'origional_sentence': xxxx}
tokens = self.preprocessor.filter_tokens_result(nltk.word_tokenize(sentence))
tags = self.preprocessor.recify_tagging_result(nltk.pos_tag(tokens))
# get the bigram of the sentence, which tells subjects/objects from other elements
bigram = self.nlp_engine.bigram_chunk_sentence(tags)
final_gram = self.nlp_engine.top_pattern_recognizer(bigram) # the fully processed tree that contains all the info needed.
# final_gram.draw()
return self.interpreter.main_tree_navigator(final_gram)
#
#
#
#
#
# chunker = NLQ_Chunker()
# sentence = input('Ask: ')
# start = time.time()
# chunker.chunk_a_sentence(sentence)
# print('took ' , time.time() - start, 'seconds') | [
"nltk.pos_tag",
"nltk.word_tokenize",
"NLP_Engine.NLP_Engine",
"NLQ_Preprocessor.PreProcessor",
"NLQ_Interpreter.Interpreter"
] | [((208, 235), 'NLQ_Preprocessor.PreProcessor', 'preProcessor.PreProcessor', ([], {}), '()\n', (233, 235), True, 'import NLQ_Preprocessor as preProcessor\n'), ((262, 284), 'NLP_Engine.NLP_Engine', 'nlpEngine.NLP_Engine', ([], {}), '()\n', (282, 284), True, 'import NLP_Engine as nlpEngine\n'), ((312, 337), 'NLQ_Interpreter.Interpreter', 'interpreter.Interpreter', ([], {}), '()\n', (335, 337), True, 'import NLQ_Interpreter as interpreter\n'), ((607, 635), 'nltk.word_tokenize', 'nltk.word_tokenize', (['sentence'], {}), '(sentence)\n', (625, 635), False, 'import nltk\n'), ((692, 712), 'nltk.pos_tag', 'nltk.pos_tag', (['tokens'], {}), '(tokens)\n', (704, 712), False, 'import nltk\n')] |
from src.neural_networks.art_fuzzy import ARTFUZZY
import numpy as np
def test_If_I_isintance_numpy():
A = ARTFUZZY([1.0, 2.0])
assert isinstance(A.I, np.ndarray)
def test_If_W_isintance_numpy():
A = ARTFUZZY([1.0, 2.0])
assert isinstance(A.I, np.ndarray) | [
"src.neural_networks.art_fuzzy.ARTFUZZY"
] | [((113, 133), 'src.neural_networks.art_fuzzy.ARTFUZZY', 'ARTFUZZY', (['[1.0, 2.0]'], {}), '([1.0, 2.0])\n', (121, 133), False, 'from src.neural_networks.art_fuzzy import ARTFUZZY\n'), ((219, 239), 'src.neural_networks.art_fuzzy.ARTFUZZY', 'ARTFUZZY', (['[1.0, 2.0]'], {}), '([1.0, 2.0])\n', (227, 239), False, 'from src.neural_networks.art_fuzzy import ARTFUZZY\n')] |
'''Tests for methods in helpers/no_import_common_class/utilities.py'''
# pylint: disable=missing-function-docstring
# pylint: disable=redefined-outer-name
import pytest
import helpers.no_import_common_class.paragraph_helpers as helpers
import utilities.random_methods as utils
import testing.data.dict_constants as constants
def test_find_dictionary_from_list_by_key_and_value():
cats = constants.LIST_OF_SIMILAR_DICTIONARIES
black_cats = utils.find_dictionary_from_list_by_key_and_value(cats, 'color', 'black')
assert len(black_cats) == 2
assert black_cats[0]['color'] == 'black'
assert black_cats[1]['color'] == 'black'
def test_find_value_from_dictionary_list():
cats = constants.LIST_OF_DIFFERENT_DICTIONARIES
current_cats = utils.find_value_from_dictionary_list(cats, 'alive')
assert len(current_cats) == 2
assert isinstance(current_cats[0], bool)
assert current_cats[0]
assert current_cats[1]
@pytest.mark.parametrize('key, expected', [('alive', True), ('name', False)])
def test_key_not_in_dictionary(key, expected):
result = utils.key_not_in_dictionary({'name': 'Nemo'}, key)
assert result == expected
@pytest.mark.parametrize('key, expected', [('alive', False), ('name', True)])
def test_key_in_dictionary(key, expected):
result = utils.key_in_dictionary({'name': 'Nemo'}, key)
assert result == expected
@pytest.mark.parametrize('key, expected', [('in_', True), ('in', True), ('file', False)])
def test_dictionary_key_begins_with_substring(key, expected):
result = utils.dictionary_key_begins_with_substring({'in_file': 'data/input_file.json'}, key)
assert result == expected
@pytest.mark.parametrize('key, value', [('name', 'Nemo'), ('color', 'black'), ('year', '1994')])
def test_dict_from_split_string(key, value):
result = utils.dict_from_split_string('Nemo~black~1994', '~', ('name', 'color', 'year'))
assert result[key] == value
@pytest.mark.parametrize('key_1, val_1, key_2, val_2_list, association_list', [
('dog_id', 'Inky', 'cat_id', ['Nemo', 'Grayface', 'PD'], None),
('dog_id', 'Camden', 'cat_id', ['Sammy', 'Mac'], [{'dog_id': 'Wrigley', 'cat_id': 'Sammy'},
{'dog_id': 'Wrigley', 'cat_id': 'Mac'}]),
('dog_id', 'Pluto', 'cat_id', ['Ninja', 'Ronin'], None)
])
def test_add_to_associations(key_1, val_1, key_2, val_2_list, association_list):
size = 0 if association_list is None else len(association_list)
resulting_list = helpers.add_to_associations(key_1, val_1, key_2, val_2_list, association_list)
val_2 = val_2_list[-1]
last_association = resulting_list[-1]
assert len(resulting_list) == size + len(val_2_list)
assert last_association[key_1] == val_1
assert last_association[key_2] == val_2
| [
"utilities.random_methods.dict_from_split_string",
"utilities.random_methods.find_dictionary_from_list_by_key_and_value",
"pytest.mark.parametrize",
"utilities.random_methods.key_not_in_dictionary",
"utilities.random_methods.dictionary_key_begins_with_substring",
"utilities.random_methods.key_in_dictionar... | [((951, 1027), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""key, expected"""', "[('alive', True), ('name', False)]"], {}), "('key, expected', [('alive', True), ('name', False)])\n", (974, 1027), False, 'import pytest\n'), ((1172, 1248), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""key, expected"""', "[('alive', False), ('name', True)]"], {}), "('key, expected', [('alive', False), ('name', True)])\n", (1195, 1248), False, 'import pytest\n'), ((1385, 1478), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""key, expected"""', "[('in_', True), ('in', True), ('file', False)]"], {}), "('key, expected', [('in_', True), ('in', True), (\n 'file', False)])\n", (1408, 1478), False, 'import pytest\n'), ((1667, 1766), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""key, value"""', "[('name', 'Nemo'), ('color', 'black'), ('year', '1994')]"], {}), "('key, value', [('name', 'Nemo'), ('color', 'black'),\n ('year', '1994')])\n", (1690, 1766), False, 'import pytest\n'), ((1936, 2286), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""key_1, val_1, key_2, val_2_list, association_list"""', "[('dog_id', 'Inky', 'cat_id', ['Nemo', 'Grayface', 'PD'], None), ('dog_id',\n 'Camden', 'cat_id', ['Sammy', 'Mac'], [{'dog_id': 'Wrigley', 'cat_id':\n 'Sammy'}, {'dog_id': 'Wrigley', 'cat_id': 'Mac'}]), ('dog_id', 'Pluto',\n 'cat_id', ['Ninja', 'Ronin'], None)]"], {}), "('key_1, val_1, key_2, val_2_list, association_list',\n [('dog_id', 'Inky', 'cat_id', ['Nemo', 'Grayface', 'PD'], None), (\n 'dog_id', 'Camden', 'cat_id', ['Sammy', 'Mac'], [{'dog_id': 'Wrigley',\n 'cat_id': 'Sammy'}, {'dog_id': 'Wrigley', 'cat_id': 'Mac'}]), ('dog_id',\n 'Pluto', 'cat_id', ['Ninja', 'Ronin'], None)])\n", (1959, 2286), False, 'import pytest\n'), ((450, 522), 'utilities.random_methods.find_dictionary_from_list_by_key_and_value', 'utils.find_dictionary_from_list_by_key_and_value', (['cats', '"""color"""', '"""black"""'], {}), "(cats, 'color', 'black')\n", (498, 522), True, 'import utilities.random_methods as utils\n'), ((762, 814), 'utilities.random_methods.find_value_from_dictionary_list', 'utils.find_value_from_dictionary_list', (['cats', '"""alive"""'], {}), "(cats, 'alive')\n", (799, 814), True, 'import utilities.random_methods as utils\n'), ((1088, 1138), 'utilities.random_methods.key_not_in_dictionary', 'utils.key_not_in_dictionary', (["{'name': 'Nemo'}", 'key'], {}), "({'name': 'Nemo'}, key)\n", (1115, 1138), True, 'import utilities.random_methods as utils\n'), ((1305, 1351), 'utilities.random_methods.key_in_dictionary', 'utils.key_in_dictionary', (["{'name': 'Nemo'}", 'key'], {}), "({'name': 'Nemo'}, key)\n", (1328, 1351), True, 'import utilities.random_methods as utils\n'), ((1549, 1637), 'utilities.random_methods.dictionary_key_begins_with_substring', 'utils.dictionary_key_begins_with_substring', (["{'in_file': 'data/input_file.json'}", 'key'], {}), "({'in_file':\n 'data/input_file.json'}, key)\n", (1591, 1637), True, 'import utilities.random_methods as utils\n'), ((1821, 1900), 'utilities.random_methods.dict_from_split_string', 'utils.dict_from_split_string', (['"""Nemo~black~1994"""', '"""~"""', "('name', 'color', 'year')"], {}), "('Nemo~black~1994', '~', ('name', 'color', 'year'))\n", (1849, 1900), True, 'import utilities.random_methods as utils\n'), ((2508, 2586), 'helpers.no_import_common_class.paragraph_helpers.add_to_associations', 'helpers.add_to_associations', (['key_1', 'val_1', 'key_2', 'val_2_list', 'association_list'], {}), '(key_1, val_1, key_2, val_2_list, association_list)\n', (2535, 2586), True, 'import helpers.no_import_common_class.paragraph_helpers as helpers\n')] |
# coding: utf-8
#
# Project: X-ray image reader
# https://github.com/silx-kit/fabio
#
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: <NAME> (<EMAIL>)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE
from __future__ import with_statement, print_function
__authors__ = ["<NAME>"]
__license__ = "MIT"
__date__ = "12/11/2018"
import re
import logging
from . import tifimage
_logger = logging.getLogger(__name__)
class PilatusTiffFrame(tifimage.TiffFrame):
"""Frame container for TIFF format generated by a Pilatus detector"""
def __init__(self, data, tiff_header, pilatus_header):
super(PilatusTiffFrame, self).__init__(data, tiff_header)
self.pilatus_header = pilatus_header
# Override the header
self._header = pilatus_header
class PilatusImage(tifimage.TifImage):
""" Read in Pilatus format, also
pilatus images, including header info """
DESCRIPTION = "Pilatus file format based on Tiff"
DEFAULT_EXTENSIONS = ["tif", "tiff"]
_keyvalue_spliter = re.compile(r"\s*[,:=\s]\s*")
"""It allow to split the first white space, colon, coma, or equal
character and remove white spaces around"""
def _create_pilatus_header(self, tiff_header):
"""
Parse Pilatus header from a TIFF header.
The Pilatus header is stored in the metadata ImageDescription (tag 270)
as an ASCII text which looks like:
.. block-code:: python
imageDescription = '# Pixel_size 172e-6 m x 172e-6 m\r\n'\
'# Silicon sensor, thickness 0.000320 m\r\n# Exposure_time 90.000000 s\r\n'\
'# Exposure_period 90.000000 s\r\n# Tau = 0 s\r\n'\
'# Count_cutoff 1048574 counts\r\n# Threshold_setting 0 eV\r\n'\
'# Gain_setting not implemented (vrf = 9.900)\r\n'\
'# N_excluded_pixels = 0\r\n# Excluded_pixels: (nil)\r\n'\
'# Flat_field: (nil)\r\n# Trim_directory: (nil)\r\n\x00'
:rtype: OrderedDict
"""
if "imageDescription" not in tiff_header:
# It is not a Pilatus TIFF image
raise IOError("Image is not a Pilatus image")
header = self.check_header()
description = tiff_header["imageDescription"]
for line in description.split("\n"):
index = line.find('# ')
if index == -1:
if line.strip(" \x00") != "":
# If it is not an empty line
_logger.debug("Pilatus header line '%s' misformed. Skipped", line)
continue
line = line[2:].strip()
if line == "":
# empty line
continue
result = self._keyvalue_spliter.split(line, 1)
if len(result) != 2:
_logger.debug("Pilatus header line '%s' misformed. Skipped", line)
continue
key, value = result
header[key] = value
return header
def _create_frame(self, image_data, tiff_header):
"""Create exposed data from TIFF information"""
pilatus_header = self._create_pilatus_header(tiff_header)
frame = PilatusTiffFrame(image_data, tiff_header, pilatus_header)
return frame
pilatusimage = PilatusImage
| [
"logging.getLogger",
"re.compile"
] | [((1470, 1497), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1487, 1497), False, 'import logging\n'), ((2107, 2137), 're.compile', 're.compile', (['"""\\\\s*[,:=\\\\s]\\\\s*"""'], {}), "('\\\\s*[,:=\\\\s]\\\\s*')\n", (2117, 2137), False, 'import re\n')] |
# coding: utf-8
"""
CLOUD API
IONOS Enterprise-grade Infrastructure as a Service (IaaS) solutions can be managed through the Cloud API, in addition or as an alternative to the \"Data Center Designer\" (DCD) browser-based tool. Both methods employ consistent concepts and features, deliver similar power and flexibility, and can be used to perform a multitude of management tasks, including adding servers, volumes, configuring networks, and so on. # noqa: E501
The version of the OpenAPI document: 6.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from ionoscloud.configuration import Configuration
class S3KeyProperties(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'secret_key': 'str',
'active': 'bool',
}
attribute_map = {
'secret_key': 'secretKey',
'active': 'active',
}
def __init__(self, secret_key=None, active=None, local_vars_configuration=None): # noqa: E501
"""S3KeyProperties - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._secret_key = None
self._active = None
self.discriminator = None
if secret_key is not None:
self.secret_key = secret_key
if active is not None:
self.active = active
@property
def secret_key(self):
"""Gets the secret_key of this S3KeyProperties. # noqa: E501
Secret of the S3 key. # noqa: E501
:return: The secret_key of this S3KeyProperties. # noqa: E501
:rtype: str
"""
return self._secret_key
@secret_key.setter
def secret_key(self, secret_key):
"""Sets the secret_key of this S3KeyProperties.
Secret of the S3 key. # noqa: E501
:param secret_key: The secret_key of this S3KeyProperties. # noqa: E501
:type secret_key: str
"""
self._secret_key = secret_key
@property
def active(self):
"""Gets the active of this S3KeyProperties. # noqa: E501
Denotes weather the S3 key is active. # noqa: E501
:return: The active of this S3KeyProperties. # noqa: E501
:rtype: bool
"""
return self._active
@active.setter
def active(self, active):
"""Sets the active of this S3KeyProperties.
Denotes weather the S3 key is active. # noqa: E501
:param active: The active of this S3KeyProperties. # noqa: E501
:type active: bool
"""
self._active = active
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, S3KeyProperties):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, S3KeyProperties):
return True
return self.to_dict() != other.to_dict()
| [
"ionoscloud.configuration.Configuration",
"six.iteritems"
] | [((3223, 3256), 'six.iteritems', 'six.iteritems', (['self.openapi_types'], {}), '(self.openapi_types)\n', (3236, 3256), False, 'import six\n'), ((1567, 1582), 'ionoscloud.configuration.Configuration', 'Configuration', ([], {}), '()\n', (1580, 1582), False, 'from ionoscloud.configuration import Configuration\n')] |
from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileRequired, FileAllowed
class CSVUploadForm(FlaskForm):
csvfile = FileField(
"CSV Mark Sheet",
validators=[FileRequired(), FileAllowed(["csv"], "CSV Files only!")],
)
| [
"flask_wtf.file.FileRequired",
"flask_wtf.file.FileAllowed"
] | [((201, 215), 'flask_wtf.file.FileRequired', 'FileRequired', ([], {}), '()\n', (213, 215), False, 'from flask_wtf.file import FileField, FileRequired, FileAllowed\n'), ((217, 256), 'flask_wtf.file.FileAllowed', 'FileAllowed', (["['csv']", '"""CSV Files only!"""'], {}), "(['csv'], 'CSV Files only!')\n", (228, 256), False, 'from flask_wtf.file import FileField, FileRequired, FileAllowed\n')] |
import pandas as pd
from tensorflow import keras, reduce_sum, ragged, function, math, nn, reduce_mean
from os import environ
class MaskedEmbeddingsAggregatorLayer(keras.layers.Layer):
def __init__(self, agg_mode='sum', **kwargs):
super(MaskedEmbeddingsAggregatorLayer, self).__init__(**kwargs)
if agg_mode not in ['sum', 'mean']:
raise NotImplementedError('mode {} not implemented!'.format(agg_mode))
self.agg_mode = agg_mode
@function
def call(self, inputs, mask=None):
masked_embeddings = ragged.boolean_mask(inputs, mask)
if self.agg_mode == 'sum':
aggregated = reduce_sum(masked_embeddings, axis=1)
elif self.agg_mode == 'mean':
aggregated = reduce_mean(masked_embeddings, axis=1)
return aggregated
def get_config(self):
# this is used when loading a saved model that uses a custom layer
return {'agg_mode': self.agg_mode}
class L2NormLayer(keras.layers.Layer):
def __init__(self, **kwargs):
super(L2NormLayer, self).__init__(**kwargs)
@function
def call(self, inputs, mask=None):
if mask is not None:
inputs = ragged.boolean_mask(inputs, mask).to_tensor()
return math.l2_normalize(inputs, axis=-1)
def compute_mask(self, inputs, mask):
return mask
def get_data(corpus_path):
corpus = pd.read_json(environ.get("DATA_COLLECTION_HOST") + "/api/details/get-indexed-songs")
""" Save Corpus """
corpus.to_pickle(corpus_path)
print("Saved Corpus at: ", corpus_path)
watch_history = pd.read_json(environ.get("DATA_COLLECTION_HOST") + "/api/history/get-history")
# search_history = pd.read_json("http://localhost:9000/recommendation/history/search")
return corpus, watch_history
def get_model(NUM_CLASSES):
EMBEDDING_DIMS = 16
DENSE_UNITS = 64
DROPOUT_PCT = 0.0
ALPHA = 0.0
LEARNING_RATE = 0.003
""" Handle Search Queries and Watch History - Encoded Indices of Songs """
search_queries = keras.layers.Input(shape=(None,), name='search_query')
watch_history = keras.layers.Input(shape=(None,), name='watch_history')
features_embedding_layer = keras.layers.Embedding(input_dim=NUM_CLASSES, output_dim=EMBEDDING_DIMS, mask_zero=True,
trainable=True, name='searched_embeddings')
labels_embedding_layer = keras.layers.Embedding(input_dim=NUM_CLASSES, output_dim=EMBEDDING_DIMS, mask_zero=True,
trainable=True, name='watched_embeddings')
avg_embeddings = MaskedEmbeddingsAggregatorLayer(agg_mode='mean', name='aggregate_embeddings')
dense_1 = keras.layers.Dense(units=DENSE_UNITS, name='dense_1')
dense_2 = keras.layers.Dense(units=DENSE_UNITS, name='dense_2')
dense_3 = keras.layers.Dense(units=DENSE_UNITS, name='dense_3')
l2_norm_1 = L2NormLayer(name='l2_norm_1')
dense_output = keras.layers.Dense(NUM_CLASSES, activation=nn.softmax, name='dense_output')
""" L2 Normalize Inputs
- Normalize - Average Inputs
- Concat as Single Layer
"""
searched_embeddings = features_embedding_layer(search_queries)
l2_norm_searched = l2_norm_1(searched_embeddings)
avg_searched = avg_embeddings(l2_norm_searched)
labels_watched_embeddings = labels_embedding_layer(watch_history)
l2_norm_watched = l2_norm_1(labels_watched_embeddings)
avg_watched = avg_embeddings(l2_norm_watched)
concat_inputs = keras.layers.Concatenate(axis=1)([avg_searched, avg_watched])
"""### Dense Layers
Contains:
- DenseLayers
- BatchNormalization Layers
- Relu Layers
"""
dense_1_features = dense_1(concat_inputs)
dense_1_relu = keras.layers.ReLU(name='dense_1_relu')(dense_1_features)
dense_1_batch_norm = keras.layers.BatchNormalization(name='dense_1_batch_norm')(dense_1_relu)
dense_2_features = dense_2(dense_1_relu)
dense_2_relu = keras.layers.ReLU(name='dense_2_relu')(dense_2_features)
dense_3_features = dense_3(dense_2_relu)
dense_3_relu = keras.layers.ReLU(name='dense_3_relu')(dense_3_features)
dense_3_batch_norm = keras.layers.BatchNormalization(name='dense_3_batch_norm')(dense_3_relu)
outputs = dense_output(dense_3_batch_norm)
"""### Compiling the Model"""
optimiser = keras.optimizers.Adam(learning_rate=LEARNING_RATE)
loss = 'sparse_categorical_crossentropy'
model = keras.models.Model(
inputs=[search_queries, watch_history],
outputs=[outputs]
)
model.compile(optimizer=optimiser, loss=loss)
return model
def retrain_model(corpus_path, model_snapshot_location):
corpus, watch_history = get_data(corpus_path)
""" Make Indexes for speedier revival """
song_ids = corpus["song_id"].unique().tolist()
song_2_index = {x: i for i, x in enumerate(song_ids)}
# index_2_songid = {i: x for i, x in enumerate(song_ids)}
user_ids = watch_history["user_id"].unique().tolist()
user_2_index = {x: i for i, x in enumerate(user_ids)}
# index_2_userid = {i: x for i, x in enumerate(user_ids)}
""" Encoded Song Ids and user Ids to feed to the network """
watch_history['user_id'] = watch_history['user_id'].map(user_2_index)
watch_history['song_id'] = watch_history['song_id'].map(song_2_index)
""" Group user's watch history """
watches_grouped = watch_history.groupby(['user_id'])['song_id'].apply(list).reset_index()
""" Treat last watched as Past Prediction """
watches_grouped['past_predicted'] = watches_grouped['song_id'].apply(lambda x: (x[-1]))
""" Save model snapshot callback """
checkpoint = keras.callbacks.ModelCheckpoint(model_snapshot_location, monitor='loss', verbose=1,
save_best_only=True, mode='min')
model = get_model(NUM_CLASSES=(len(corpus) + 2))
""" Not Adding Search Queries"""
model.fit([
keras.preprocessing.sequence.pad_sequences(watches_grouped['song_id']),
keras.preprocessing.sequence.pad_sequences(watches_grouped['song_id']),
], watches_grouped['past_predicted'].values, callbacks=[checkpoint], steps_per_epoch=1, epochs=100,
verbose=1)
print("Model Retrained")
| [
"tensorflow.keras.layers.Input",
"tensorflow.keras.layers.Concatenate",
"tensorflow.keras.preprocessing.sequence.pad_sequences",
"tensorflow.keras.layers.ReLU",
"tensorflow.reduce_sum",
"tensorflow.ragged.boolean_mask",
"os.environ.get",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.kera... | [((2042, 2096), 'tensorflow.keras.layers.Input', 'keras.layers.Input', ([], {'shape': '(None,)', 'name': '"""search_query"""'}), "(shape=(None,), name='search_query')\n", (2060, 2096), False, 'from tensorflow import keras, reduce_sum, ragged, function, math, nn, reduce_mean\n'), ((2117, 2172), 'tensorflow.keras.layers.Input', 'keras.layers.Input', ([], {'shape': '(None,)', 'name': '"""watch_history"""'}), "(shape=(None,), name='watch_history')\n", (2135, 2172), False, 'from tensorflow import keras, reduce_sum, ragged, function, math, nn, reduce_mean\n'), ((2205, 2341), 'tensorflow.keras.layers.Embedding', 'keras.layers.Embedding', ([], {'input_dim': 'NUM_CLASSES', 'output_dim': 'EMBEDDING_DIMS', 'mask_zero': '(True)', 'trainable': '(True)', 'name': '"""searched_embeddings"""'}), "(input_dim=NUM_CLASSES, output_dim=EMBEDDING_DIMS,\n mask_zero=True, trainable=True, name='searched_embeddings')\n", (2227, 2341), False, 'from tensorflow import keras, reduce_sum, ragged, function, math, nn, reduce_mean\n'), ((2421, 2556), 'tensorflow.keras.layers.Embedding', 'keras.layers.Embedding', ([], {'input_dim': 'NUM_CLASSES', 'output_dim': 'EMBEDDING_DIMS', 'mask_zero': '(True)', 'trainable': '(True)', 'name': '"""watched_embeddings"""'}), "(input_dim=NUM_CLASSES, output_dim=EMBEDDING_DIMS,\n mask_zero=True, trainable=True, name='watched_embeddings')\n", (2443, 2556), False, 'from tensorflow import keras, reduce_sum, ragged, function, math, nn, reduce_mean\n'), ((2720, 2773), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', ([], {'units': 'DENSE_UNITS', 'name': '"""dense_1"""'}), "(units=DENSE_UNITS, name='dense_1')\n", (2738, 2773), False, 'from tensorflow import keras, reduce_sum, ragged, function, math, nn, reduce_mean\n'), ((2788, 2841), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', ([], {'units': 'DENSE_UNITS', 'name': '"""dense_2"""'}), "(units=DENSE_UNITS, name='dense_2')\n", (2806, 2841), False, 'from tensorflow import keras, reduce_sum, ragged, function, math, nn, reduce_mean\n'), ((2856, 2909), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', ([], {'units': 'DENSE_UNITS', 'name': '"""dense_3"""'}), "(units=DENSE_UNITS, name='dense_3')\n", (2874, 2909), False, 'from tensorflow import keras, reduce_sum, ragged, function, math, nn, reduce_mean\n'), ((2976, 3051), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['NUM_CLASSES'], {'activation': 'nn.softmax', 'name': '"""dense_output"""'}), "(NUM_CLASSES, activation=nn.softmax, name='dense_output')\n", (2994, 3051), False, 'from tensorflow import keras, reduce_sum, ragged, function, math, nn, reduce_mean\n'), ((4378, 4428), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'learning_rate': 'LEARNING_RATE'}), '(learning_rate=LEARNING_RATE)\n', (4399, 4428), False, 'from tensorflow import keras, reduce_sum, ragged, function, math, nn, reduce_mean\n'), ((4487, 4564), 'tensorflow.keras.models.Model', 'keras.models.Model', ([], {'inputs': '[search_queries, watch_history]', 'outputs': '[outputs]'}), '(inputs=[search_queries, watch_history], outputs=[outputs])\n', (4505, 4564), False, 'from tensorflow import keras, reduce_sum, ragged, function, math, nn, reduce_mean\n'), ((5712, 5832), 'tensorflow.keras.callbacks.ModelCheckpoint', 'keras.callbacks.ModelCheckpoint', (['model_snapshot_location'], {'monitor': '"""loss"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""min"""'}), "(model_snapshot_location, monitor='loss',\n verbose=1, save_best_only=True, mode='min')\n", (5743, 5832), False, 'from tensorflow import keras, reduce_sum, ragged, function, math, nn, reduce_mean\n'), ((551, 584), 'tensorflow.ragged.boolean_mask', 'ragged.boolean_mask', (['inputs', 'mask'], {}), '(inputs, mask)\n', (570, 584), False, 'from tensorflow import keras, reduce_sum, ragged, function, math, nn, reduce_mean\n'), ((1249, 1283), 'tensorflow.math.l2_normalize', 'math.l2_normalize', (['inputs'], {'axis': '(-1)'}), '(inputs, axis=-1)\n', (1266, 1283), False, 'from tensorflow import keras, reduce_sum, ragged, function, math, nn, reduce_mean\n'), ((3530, 3562), 'tensorflow.keras.layers.Concatenate', 'keras.layers.Concatenate', ([], {'axis': '(1)'}), '(axis=1)\n', (3554, 3562), False, 'from tensorflow import keras, reduce_sum, ragged, function, math, nn, reduce_mean\n'), ((3781, 3819), 'tensorflow.keras.layers.ReLU', 'keras.layers.ReLU', ([], {'name': '"""dense_1_relu"""'}), "(name='dense_1_relu')\n", (3798, 3819), False, 'from tensorflow import keras, reduce_sum, ragged, function, math, nn, reduce_mean\n'), ((3863, 3921), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {'name': '"""dense_1_batch_norm"""'}), "(name='dense_1_batch_norm')\n", (3894, 3921), False, 'from tensorflow import keras, reduce_sum, ragged, function, math, nn, reduce_mean\n'), ((4001, 4039), 'tensorflow.keras.layers.ReLU', 'keras.layers.ReLU', ([], {'name': '"""dense_2_relu"""'}), "(name='dense_2_relu')\n", (4018, 4039), False, 'from tensorflow import keras, reduce_sum, ragged, function, math, nn, reduce_mean\n'), ((4123, 4161), 'tensorflow.keras.layers.ReLU', 'keras.layers.ReLU', ([], {'name': '"""dense_3_relu"""'}), "(name='dense_3_relu')\n", (4140, 4161), False, 'from tensorflow import keras, reduce_sum, ragged, function, math, nn, reduce_mean\n'), ((4205, 4263), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {'name': '"""dense_3_batch_norm"""'}), "(name='dense_3_batch_norm')\n", (4236, 4263), False, 'from tensorflow import keras, reduce_sum, ragged, function, math, nn, reduce_mean\n'), ((645, 682), 'tensorflow.reduce_sum', 'reduce_sum', (['masked_embeddings'], {'axis': '(1)'}), '(masked_embeddings, axis=1)\n', (655, 682), False, 'from tensorflow import keras, reduce_sum, ragged, function, math, nn, reduce_mean\n'), ((1402, 1437), 'os.environ.get', 'environ.get', (['"""DATA_COLLECTION_HOST"""'], {}), "('DATA_COLLECTION_HOST')\n", (1413, 1437), False, 'from os import environ\n'), ((1610, 1645), 'os.environ.get', 'environ.get', (['"""DATA_COLLECTION_HOST"""'], {}), "('DATA_COLLECTION_HOST')\n", (1621, 1645), False, 'from os import environ\n'), ((5993, 6063), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'keras.preprocessing.sequence.pad_sequences', (["watches_grouped['song_id']"], {}), "(watches_grouped['song_id'])\n", (6035, 6063), False, 'from tensorflow import keras, reduce_sum, ragged, function, math, nn, reduce_mean\n'), ((6073, 6143), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'keras.preprocessing.sequence.pad_sequences', (["watches_grouped['song_id']"], {}), "(watches_grouped['song_id'])\n", (6115, 6143), False, 'from tensorflow import keras, reduce_sum, ragged, function, math, nn, reduce_mean\n'), ((746, 784), 'tensorflow.reduce_mean', 'reduce_mean', (['masked_embeddings'], {'axis': '(1)'}), '(masked_embeddings, axis=1)\n', (757, 784), False, 'from tensorflow import keras, reduce_sum, ragged, function, math, nn, reduce_mean\n'), ((1188, 1221), 'tensorflow.ragged.boolean_mask', 'ragged.boolean_mask', (['inputs', 'mask'], {}), '(inputs, mask)\n', (1207, 1221), False, 'from tensorflow import keras, reduce_sum, ragged, function, math, nn, reduce_mean\n')] |
import django_tables2 as tables
from nautobot.utilities.tables import (
BaseTable,
ButtonsColumn,
ToggleColumn,
)
from example_plugin.models import AnotherExampleModel, ExampleModel
class ExampleModelTable(BaseTable):
"""Table for list view of `ExampleModel` objects."""
pk = ToggleColumn()
name = tables.LinkColumn()
actions = ButtonsColumn(ExampleModel)
class Meta(BaseTable.Meta):
model = ExampleModel
fields = ["pk", "name", "number"]
class AnotherExampleModelTable(BaseTable):
"""Table for list view of `AnotherExampleModel` objects."""
pk = ToggleColumn()
name = tables.LinkColumn()
actions = ButtonsColumn(AnotherExampleModel)
class Meta(BaseTable.Meta):
model = AnotherExampleModel
fields = ["pk", "name", "number"]
| [
"nautobot.utilities.tables.ToggleColumn",
"django_tables2.LinkColumn",
"nautobot.utilities.tables.ButtonsColumn"
] | [((301, 315), 'nautobot.utilities.tables.ToggleColumn', 'ToggleColumn', ([], {}), '()\n', (313, 315), False, 'from nautobot.utilities.tables import BaseTable, ButtonsColumn, ToggleColumn\n'), ((327, 346), 'django_tables2.LinkColumn', 'tables.LinkColumn', ([], {}), '()\n', (344, 346), True, 'import django_tables2 as tables\n'), ((361, 388), 'nautobot.utilities.tables.ButtonsColumn', 'ButtonsColumn', (['ExampleModel'], {}), '(ExampleModel)\n', (374, 388), False, 'from nautobot.utilities.tables import BaseTable, ButtonsColumn, ToggleColumn\n'), ((612, 626), 'nautobot.utilities.tables.ToggleColumn', 'ToggleColumn', ([], {}), '()\n', (624, 626), False, 'from nautobot.utilities.tables import BaseTable, ButtonsColumn, ToggleColumn\n'), ((638, 657), 'django_tables2.LinkColumn', 'tables.LinkColumn', ([], {}), '()\n', (655, 657), True, 'import django_tables2 as tables\n'), ((672, 706), 'nautobot.utilities.tables.ButtonsColumn', 'ButtonsColumn', (['AnotherExampleModel'], {}), '(AnotherExampleModel)\n', (685, 706), False, 'from nautobot.utilities.tables import BaseTable, ButtonsColumn, ToggleColumn\n')] |
from unittest import TestCase
from datetime import datetime
from ctparse.ctparse import ctparse, _match_rule
from ctparse.types import Time
class TestCTParse(TestCase):
def test_ctparse(self):
txt = '12.12.2020'
res = ctparse(txt)
self.assertEqual(res.resolution, Time(year=2020, month=12, day=12))
self.assertIsNotNone(str(res))
self.assertIsNotNone(repr(res))
# non sense gives no result
self.assertIsNone(ctparse('gargelbabel'))
txt = '12.12.'
res = ctparse(txt, ts=datetime(2020, 12, 1))
self.assertEqual(res.resolution, Time(year=2020, month=12, day=12))
res = ctparse(txt, ts=datetime(2020, 12, 1), debug=True)
self.assertEqual(next(res).resolution, Time(year=2020, month=12, day=12))
def test_ctparse_timeout(self):
# timeout in ctparse: should rather mock the logger and see
# whether the timeout was hit, but cannot get it mocked
txt = 'tomorrow 8 yesterday Sep 9 9 12 2023 1923'
ctparse(txt, timeout=0.0001)
def test_match_rule(self):
self.assertEqual(list(_match_rule([], ['not empty'])), [])
self.assertEqual(list(_match_rule(['not empty'], [])), [])
| [
"datetime.datetime",
"ctparse.ctparse.ctparse",
"ctparse.types.Time",
"ctparse.ctparse._match_rule"
] | [((241, 253), 'ctparse.ctparse.ctparse', 'ctparse', (['txt'], {}), '(txt)\n', (248, 253), False, 'from ctparse.ctparse import ctparse, _match_rule\n'), ((1029, 1057), 'ctparse.ctparse.ctparse', 'ctparse', (['txt'], {'timeout': '(0.0001)'}), '(txt, timeout=0.0001)\n', (1036, 1057), False, 'from ctparse.ctparse import ctparse, _match_rule\n'), ((295, 328), 'ctparse.types.Time', 'Time', ([], {'year': '(2020)', 'month': '(12)', 'day': '(12)'}), '(year=2020, month=12, day=12)\n', (299, 328), False, 'from ctparse.types import Time\n'), ((471, 493), 'ctparse.ctparse.ctparse', 'ctparse', (['"""gargelbabel"""'], {}), "('gargelbabel')\n", (478, 493), False, 'from ctparse.ctparse import ctparse, _match_rule\n'), ((612, 645), 'ctparse.types.Time', 'Time', ([], {'year': '(2020)', 'month': '(12)', 'day': '(12)'}), '(year=2020, month=12, day=12)\n', (616, 645), False, 'from ctparse.types import Time\n'), ((759, 792), 'ctparse.types.Time', 'Time', ([], {'year': '(2020)', 'month': '(12)', 'day': '(12)'}), '(year=2020, month=12, day=12)\n', (763, 792), False, 'from ctparse.types import Time\n'), ((548, 569), 'datetime.datetime', 'datetime', (['(2020)', '(12)', '(1)'], {}), '(2020, 12, 1)\n', (556, 569), False, 'from datetime import datetime\n'), ((677, 698), 'datetime.datetime', 'datetime', (['(2020)', '(12)', '(1)'], {}), '(2020, 12, 1)\n', (685, 698), False, 'from datetime import datetime\n'), ((1120, 1150), 'ctparse.ctparse._match_rule', '_match_rule', (['[]', "['not empty']"], {}), "([], ['not empty'])\n", (1131, 1150), False, 'from ctparse.ctparse import ctparse, _match_rule\n'), ((1187, 1217), 'ctparse.ctparse._match_rule', '_match_rule', (["['not empty']", '[]'], {}), "(['not empty'], [])\n", (1198, 1217), False, 'from ctparse.ctparse import ctparse, _match_rule\n')] |
from __future__ import absolute_import
from __future__ import print_function
import veriloggen
import thread_mutex_try_lock
expected_verilog = """
module test;
reg CLK;
reg RST;
blinkled
uut
(
.CLK(CLK),
.RST(RST)
);
initial begin
$dumpfile("uut.vcd");
$dumpvars(0, uut);
end
initial begin
CLK = 0;
forever begin
#5 CLK = !CLK;
end
end
initial begin
RST = 0;
#100;
RST = 1;
#100;
RST = 0;
#10000;
$finish;
end
endmodule
module blinkled
(
input CLK,
input RST
);
reg _mymutex_lock_reg;
reg [32-1:0] _mymutex_lock_id;
reg [8-1:0] _th_myfunc_start;
reg [32-1:0] th_blink;
localparam th_blink_init = 0;
reg signed [32-1:0] _th_blink_tid_0;
reg [32-1:0] th_myfunc_0;
localparam th_myfunc_0_init = 0;
reg [32-1:0] th_myfunc_1;
localparam th_myfunc_1_init = 0;
reg [32-1:0] th_myfunc_2;
localparam th_myfunc_2_init = 0;
reg [32-1:0] th_myfunc_3;
localparam th_myfunc_3_init = 0;
reg [32-1:0] th_myfunc_4;
localparam th_myfunc_4_init = 0;
reg [32-1:0] th_myfunc_5;
localparam th_myfunc_5_init = 0;
reg [32-1:0] th_myfunc_6;
localparam th_myfunc_6_init = 0;
reg [32-1:0] th_myfunc_7;
localparam th_myfunc_7_init = 0;
reg _th_myfunc_0_called;
reg signed [32-1:0] _th_myfunc_0_tid_1;
reg signed [32-1:0] _th_myfunc_0_tid_2;
reg _tmp_0;
reg signed [32-1:0] _th_myfunc_0_lock_3;
reg signed [32-1:0] _th_myfunc_0_waitcount_4;
reg _tmp_1;
reg signed [32-1:0] _th_myfunc_0_i_5;
reg _th_myfunc_1_called;
reg signed [32-1:0] _th_myfunc_1_tid_6;
reg signed [32-1:0] _th_myfunc_1_tid_7;
reg _tmp_2;
reg signed [32-1:0] _th_myfunc_1_lock_8;
reg signed [32-1:0] _th_myfunc_1_waitcount_9;
reg _tmp_3;
reg signed [32-1:0] _th_myfunc_1_i_10;
reg _th_myfunc_2_called;
reg signed [32-1:0] _th_myfunc_2_tid_11;
reg signed [32-1:0] _th_myfunc_2_tid_12;
reg _tmp_4;
reg signed [32-1:0] _th_myfunc_2_lock_13;
reg signed [32-1:0] _th_myfunc_2_waitcount_14;
reg _tmp_5;
reg signed [32-1:0] _th_myfunc_2_i_15;
reg _th_myfunc_3_called;
reg signed [32-1:0] _th_myfunc_3_tid_16;
reg signed [32-1:0] _th_myfunc_3_tid_17;
reg _tmp_6;
reg signed [32-1:0] _th_myfunc_3_lock_18;
reg signed [32-1:0] _th_myfunc_3_waitcount_19;
reg _tmp_7;
reg signed [32-1:0] _th_myfunc_3_i_20;
reg _th_myfunc_4_called;
reg signed [32-1:0] _th_myfunc_4_tid_21;
reg signed [32-1:0] _th_myfunc_4_tid_22;
reg _tmp_8;
reg signed [32-1:0] _th_myfunc_4_lock_23;
reg signed [32-1:0] _th_myfunc_4_waitcount_24;
reg _tmp_9;
reg signed [32-1:0] _th_myfunc_4_i_25;
reg _th_myfunc_5_called;
reg signed [32-1:0] _th_myfunc_5_tid_26;
reg signed [32-1:0] _th_myfunc_5_tid_27;
reg _tmp_10;
reg signed [32-1:0] _th_myfunc_5_lock_28;
reg signed [32-1:0] _th_myfunc_5_waitcount_29;
reg _tmp_11;
reg signed [32-1:0] _th_myfunc_5_i_30;
reg _th_myfunc_6_called;
reg signed [32-1:0] _th_myfunc_6_tid_31;
reg signed [32-1:0] _th_myfunc_6_tid_32;
reg _tmp_12;
reg signed [32-1:0] _th_myfunc_6_lock_33;
reg signed [32-1:0] _th_myfunc_6_waitcount_34;
reg _tmp_13;
reg signed [32-1:0] _th_myfunc_6_i_35;
reg _th_myfunc_7_called;
reg signed [32-1:0] _th_myfunc_7_tid_36;
reg signed [32-1:0] _th_myfunc_7_tid_37;
reg _tmp_14;
reg signed [32-1:0] _th_myfunc_7_lock_38;
reg signed [32-1:0] _th_myfunc_7_waitcount_39;
reg _tmp_15;
reg signed [32-1:0] _th_myfunc_7_i_40;
always @(posedge CLK) begin
if(RST) begin
_mymutex_lock_reg <= 0;
_mymutex_lock_id <= 0;
end else begin
if((th_myfunc_0 == 3) && !_mymutex_lock_reg) begin
_mymutex_lock_reg <= 1;
_mymutex_lock_id <= 0;
end
if((th_myfunc_0 == 10) && !_mymutex_lock_reg) begin
_mymutex_lock_reg <= 1;
_mymutex_lock_id <= 0;
end
if((th_myfunc_0 == 19) && (_mymutex_lock_id == 0)) begin
_mymutex_lock_reg <= 0;
end
if((th_myfunc_1 == 3) && !_mymutex_lock_reg) begin
_mymutex_lock_reg <= 1;
_mymutex_lock_id <= 1;
end
if((th_myfunc_1 == 10) && !_mymutex_lock_reg) begin
_mymutex_lock_reg <= 1;
_mymutex_lock_id <= 1;
end
if((th_myfunc_1 == 19) && (_mymutex_lock_id == 1)) begin
_mymutex_lock_reg <= 0;
end
if((th_myfunc_2 == 3) && !_mymutex_lock_reg) begin
_mymutex_lock_reg <= 1;
_mymutex_lock_id <= 2;
end
if((th_myfunc_2 == 10) && !_mymutex_lock_reg) begin
_mymutex_lock_reg <= 1;
_mymutex_lock_id <= 2;
end
if((th_myfunc_2 == 19) && (_mymutex_lock_id == 2)) begin
_mymutex_lock_reg <= 0;
end
if((th_myfunc_3 == 3) && !_mymutex_lock_reg) begin
_mymutex_lock_reg <= 1;
_mymutex_lock_id <= 3;
end
if((th_myfunc_3 == 10) && !_mymutex_lock_reg) begin
_mymutex_lock_reg <= 1;
_mymutex_lock_id <= 3;
end
if((th_myfunc_3 == 19) && (_mymutex_lock_id == 3)) begin
_mymutex_lock_reg <= 0;
end
if((th_myfunc_4 == 3) && !_mymutex_lock_reg) begin
_mymutex_lock_reg <= 1;
_mymutex_lock_id <= 4;
end
if((th_myfunc_4 == 10) && !_mymutex_lock_reg) begin
_mymutex_lock_reg <= 1;
_mymutex_lock_id <= 4;
end
if((th_myfunc_4 == 19) && (_mymutex_lock_id == 4)) begin
_mymutex_lock_reg <= 0;
end
if((th_myfunc_5 == 3) && !_mymutex_lock_reg) begin
_mymutex_lock_reg <= 1;
_mymutex_lock_id <= 5;
end
if((th_myfunc_5 == 10) && !_mymutex_lock_reg) begin
_mymutex_lock_reg <= 1;
_mymutex_lock_id <= 5;
end
if((th_myfunc_5 == 19) && (_mymutex_lock_id == 5)) begin
_mymutex_lock_reg <= 0;
end
if((th_myfunc_6 == 3) && !_mymutex_lock_reg) begin
_mymutex_lock_reg <= 1;
_mymutex_lock_id <= 6;
end
if((th_myfunc_6 == 10) && !_mymutex_lock_reg) begin
_mymutex_lock_reg <= 1;
_mymutex_lock_id <= 6;
end
if((th_myfunc_6 == 19) && (_mymutex_lock_id == 6)) begin
_mymutex_lock_reg <= 0;
end
if((th_myfunc_7 == 3) && !_mymutex_lock_reg) begin
_mymutex_lock_reg <= 1;
_mymutex_lock_id <= 7;
end
if((th_myfunc_7 == 10) && !_mymutex_lock_reg) begin
_mymutex_lock_reg <= 1;
_mymutex_lock_id <= 7;
end
if((th_myfunc_7 == 19) && (_mymutex_lock_id == 7)) begin
_mymutex_lock_reg <= 0;
end
end
end
localparam th_blink_1 = 1;
localparam th_blink_2 = 2;
localparam th_blink_3 = 3;
localparam th_blink_4 = 4;
localparam th_blink_5 = 5;
localparam th_blink_6 = 6;
localparam th_blink_7 = 7;
localparam th_blink_8 = 8;
localparam th_blink_9 = 9;
localparam th_blink_10 = 10;
localparam th_blink_11 = 11;
always @(posedge CLK) begin
if(RST) begin
th_blink <= th_blink_init;
_th_blink_tid_0 <= 0;
_th_myfunc_start[_th_blink_tid_0] <= (0 >> _th_blink_tid_0) & 1'd1;
end else begin
case(th_blink)
th_blink_init: begin
th_blink <= th_blink_1;
end
th_blink_1: begin
_th_blink_tid_0 <= 0;
th_blink <= th_blink_2;
end
th_blink_2: begin
if(_th_blink_tid_0 < 8) begin
th_blink <= th_blink_3;
end else begin
th_blink <= th_blink_7;
end
end
th_blink_3: begin
_th_myfunc_start[_th_blink_tid_0] <= 1;
th_blink <= th_blink_4;
end
th_blink_4: begin
th_blink <= th_blink_5;
th_blink <= th_blink_5;
th_blink <= th_blink_5;
th_blink <= th_blink_5;
th_blink <= th_blink_5;
th_blink <= th_blink_5;
th_blink <= th_blink_5;
th_blink <= th_blink_5;
end
th_blink_5: begin
_th_myfunc_start[_th_blink_tid_0] <= 0;
th_blink <= th_blink_6;
end
th_blink_6: begin
_th_blink_tid_0 <= _th_blink_tid_0 + 1;
th_blink <= th_blink_2;
end
th_blink_7: begin
_th_blink_tid_0 <= 0;
th_blink <= th_blink_8;
end
th_blink_8: begin
if(_th_blink_tid_0 < 8) begin
th_blink <= th_blink_9;
end else begin
th_blink <= th_blink_11;
end
end
th_blink_9: begin
if((_th_blink_tid_0 == 0)? th_myfunc_0 == 21 :
(_th_blink_tid_0 == 1)? th_myfunc_1 == 21 :
(_th_blink_tid_0 == 2)? th_myfunc_2 == 21 :
(_th_blink_tid_0 == 3)? th_myfunc_3 == 21 :
(_th_blink_tid_0 == 4)? th_myfunc_4 == 21 :
(_th_blink_tid_0 == 5)? th_myfunc_5 == 21 :
(_th_blink_tid_0 == 6)? th_myfunc_6 == 21 :
(_th_blink_tid_0 == 7)? th_myfunc_7 == 21 : 0) begin
th_blink <= th_blink_10;
end
end
th_blink_10: begin
_th_blink_tid_0 <= _th_blink_tid_0 + 1;
th_blink <= th_blink_8;
end
endcase
end
end
localparam th_myfunc_0_1 = 1;
localparam th_myfunc_0_2 = 2;
localparam th_myfunc_0_3 = 3;
localparam th_myfunc_0_4 = 4;
localparam th_myfunc_0_5 = 5;
localparam th_myfunc_0_6 = 6;
localparam th_myfunc_0_7 = 7;
localparam th_myfunc_0_8 = 8;
localparam th_myfunc_0_9 = 9;
localparam th_myfunc_0_10 = 10;
localparam th_myfunc_0_11 = 11;
localparam th_myfunc_0_12 = 12;
localparam th_myfunc_0_13 = 13;
localparam th_myfunc_0_14 = 14;
localparam th_myfunc_0_15 = 15;
localparam th_myfunc_0_16 = 16;
localparam th_myfunc_0_17 = 17;
localparam th_myfunc_0_18 = 18;
localparam th_myfunc_0_19 = 19;
localparam th_myfunc_0_20 = 20;
localparam th_myfunc_0_21 = 21;
always @(posedge CLK) begin
if(RST) begin
th_myfunc_0 <= th_myfunc_0_init;
_th_myfunc_0_called <= 0;
_th_myfunc_0_tid_1 <= 0;
_th_myfunc_0_tid_2 <= 0;
_tmp_0 <= 0;
_th_myfunc_0_lock_3 <= 0;
_th_myfunc_0_waitcount_4 <= 0;
_tmp_1 <= 0;
_th_myfunc_0_i_5 <= 0;
end else begin
case(th_myfunc_0)
th_myfunc_0_init: begin
if(_th_myfunc_start[0] && (th_blink == 4)) begin
_th_myfunc_0_called <= 1;
end
if(_th_myfunc_start[0] && (th_blink == 4)) begin
_th_myfunc_0_tid_1 <= _th_blink_tid_0;
end
if((th_blink == 4) && _th_myfunc_start[0]) begin
th_myfunc_0 <= th_myfunc_0_1;
end
end
th_myfunc_0_1: begin
_th_myfunc_0_tid_2 <= _th_myfunc_0_tid_1;
th_myfunc_0 <= th_myfunc_0_2;
end
th_myfunc_0_2: begin
$display("-- Thread %d TryLock", _th_myfunc_0_tid_2);
th_myfunc_0 <= th_myfunc_0_3;
end
th_myfunc_0_3: begin
th_myfunc_0 <= th_myfunc_0_4;
end
th_myfunc_0_4: begin
_tmp_0 <= _mymutex_lock_reg & (_mymutex_lock_id == 0);
th_myfunc_0 <= th_myfunc_0_5;
end
th_myfunc_0_5: begin
_th_myfunc_0_lock_3 <= _tmp_0;
th_myfunc_0 <= th_myfunc_0_6;
end
th_myfunc_0_6: begin
_th_myfunc_0_waitcount_4 <= 0;
th_myfunc_0 <= th_myfunc_0_7;
end
th_myfunc_0_7: begin
if(!_th_myfunc_0_lock_3) begin
th_myfunc_0 <= th_myfunc_0_8;
end else begin
th_myfunc_0 <= th_myfunc_0_14;
end
end
th_myfunc_0_8: begin
$display("-- Thread %d TryLock", _th_myfunc_0_tid_2);
th_myfunc_0 <= th_myfunc_0_9;
end
th_myfunc_0_9: begin
_th_myfunc_0_waitcount_4 <= _th_myfunc_0_waitcount_4 + 1;
th_myfunc_0 <= th_myfunc_0_10;
end
th_myfunc_0_10: begin
th_myfunc_0 <= th_myfunc_0_11;
end
th_myfunc_0_11: begin
_tmp_1 <= _mymutex_lock_reg & (_mymutex_lock_id == 0);
th_myfunc_0 <= th_myfunc_0_12;
end
th_myfunc_0_12: begin
_th_myfunc_0_lock_3 <= _tmp_1;
th_myfunc_0 <= th_myfunc_0_13;
end
th_myfunc_0_13: begin
th_myfunc_0 <= th_myfunc_0_7;
end
th_myfunc_0_14: begin
$display("Thread %d Lock: waitcount=%d", _th_myfunc_0_tid_2, _th_myfunc_0_waitcount_4);
th_myfunc_0 <= th_myfunc_0_15;
end
th_myfunc_0_15: begin
_th_myfunc_0_i_5 <= 0;
th_myfunc_0 <= th_myfunc_0_16;
end
th_myfunc_0_16: begin
if(_th_myfunc_0_i_5 < 20) begin
th_myfunc_0 <= th_myfunc_0_17;
end else begin
th_myfunc_0 <= th_myfunc_0_18;
end
end
th_myfunc_0_17: begin
_th_myfunc_0_i_5 <= _th_myfunc_0_i_5 + 1;
th_myfunc_0 <= th_myfunc_0_16;
end
th_myfunc_0_18: begin
$display("Thread %d Hello", _th_myfunc_0_tid_2);
th_myfunc_0 <= th_myfunc_0_19;
end
th_myfunc_0_19: begin
th_myfunc_0 <= th_myfunc_0_20;
end
th_myfunc_0_20: begin
$display("Thread %d Unlock", _th_myfunc_0_tid_2);
th_myfunc_0 <= th_myfunc_0_21;
end
endcase
end
end
localparam th_myfunc_1_1 = 1;
localparam th_myfunc_1_2 = 2;
localparam th_myfunc_1_3 = 3;
localparam th_myfunc_1_4 = 4;
localparam th_myfunc_1_5 = 5;
localparam th_myfunc_1_6 = 6;
localparam th_myfunc_1_7 = 7;
localparam th_myfunc_1_8 = 8;
localparam th_myfunc_1_9 = 9;
localparam th_myfunc_1_10 = 10;
localparam th_myfunc_1_11 = 11;
localparam th_myfunc_1_12 = 12;
localparam th_myfunc_1_13 = 13;
localparam th_myfunc_1_14 = 14;
localparam th_myfunc_1_15 = 15;
localparam th_myfunc_1_16 = 16;
localparam th_myfunc_1_17 = 17;
localparam th_myfunc_1_18 = 18;
localparam th_myfunc_1_19 = 19;
localparam th_myfunc_1_20 = 20;
localparam th_myfunc_1_21 = 21;
always @(posedge CLK) begin
if(RST) begin
th_myfunc_1 <= th_myfunc_1_init;
_th_myfunc_1_called <= 0;
_th_myfunc_1_tid_6 <= 0;
_th_myfunc_1_tid_7 <= 0;
_tmp_2 <= 0;
_th_myfunc_1_lock_8 <= 0;
_th_myfunc_1_waitcount_9 <= 0;
_tmp_3 <= 0;
_th_myfunc_1_i_10 <= 0;
end else begin
case(th_myfunc_1)
th_myfunc_1_init: begin
if(_th_myfunc_start[1] && (th_blink == 4)) begin
_th_myfunc_1_called <= 1;
end
if(_th_myfunc_start[1] && (th_blink == 4)) begin
_th_myfunc_1_tid_6 <= _th_blink_tid_0;
end
if((th_blink == 4) && _th_myfunc_start[1]) begin
th_myfunc_1 <= th_myfunc_1_1;
end
end
th_myfunc_1_1: begin
_th_myfunc_1_tid_7 <= _th_myfunc_1_tid_6;
th_myfunc_1 <= th_myfunc_1_2;
end
th_myfunc_1_2: begin
$display("-- Thread %d TryLock", _th_myfunc_1_tid_7);
th_myfunc_1 <= th_myfunc_1_3;
end
th_myfunc_1_3: begin
th_myfunc_1 <= th_myfunc_1_4;
end
th_myfunc_1_4: begin
_tmp_2 <= _mymutex_lock_reg & (_mymutex_lock_id == 1);
th_myfunc_1 <= th_myfunc_1_5;
end
th_myfunc_1_5: begin
_th_myfunc_1_lock_8 <= _tmp_2;
th_myfunc_1 <= th_myfunc_1_6;
end
th_myfunc_1_6: begin
_th_myfunc_1_waitcount_9 <= 0;
th_myfunc_1 <= th_myfunc_1_7;
end
th_myfunc_1_7: begin
if(!_th_myfunc_1_lock_8) begin
th_myfunc_1 <= th_myfunc_1_8;
end else begin
th_myfunc_1 <= th_myfunc_1_14;
end
end
th_myfunc_1_8: begin
$display("-- Thread %d TryLock", _th_myfunc_1_tid_7);
th_myfunc_1 <= th_myfunc_1_9;
end
th_myfunc_1_9: begin
_th_myfunc_1_waitcount_9 <= _th_myfunc_1_waitcount_9 + 1;
th_myfunc_1 <= th_myfunc_1_10;
end
th_myfunc_1_10: begin
th_myfunc_1 <= th_myfunc_1_11;
end
th_myfunc_1_11: begin
_tmp_3 <= _mymutex_lock_reg & (_mymutex_lock_id == 1);
th_myfunc_1 <= th_myfunc_1_12;
end
th_myfunc_1_12: begin
_th_myfunc_1_lock_8 <= _tmp_3;
th_myfunc_1 <= th_myfunc_1_13;
end
th_myfunc_1_13: begin
th_myfunc_1 <= th_myfunc_1_7;
end
th_myfunc_1_14: begin
$display("Thread %d Lock: waitcount=%d", _th_myfunc_1_tid_7, _th_myfunc_1_waitcount_9);
th_myfunc_1 <= th_myfunc_1_15;
end
th_myfunc_1_15: begin
_th_myfunc_1_i_10 <= 0;
th_myfunc_1 <= th_myfunc_1_16;
end
th_myfunc_1_16: begin
if(_th_myfunc_1_i_10 < 20) begin
th_myfunc_1 <= th_myfunc_1_17;
end else begin
th_myfunc_1 <= th_myfunc_1_18;
end
end
th_myfunc_1_17: begin
_th_myfunc_1_i_10 <= _th_myfunc_1_i_10 + 1;
th_myfunc_1 <= th_myfunc_1_16;
end
th_myfunc_1_18: begin
$display("Thread %d Hello", _th_myfunc_1_tid_7);
th_myfunc_1 <= th_myfunc_1_19;
end
th_myfunc_1_19: begin
th_myfunc_1 <= th_myfunc_1_20;
end
th_myfunc_1_20: begin
$display("Thread %d Unlock", _th_myfunc_1_tid_7);
th_myfunc_1 <= th_myfunc_1_21;
end
endcase
end
end
localparam th_myfunc_2_1 = 1;
localparam th_myfunc_2_2 = 2;
localparam th_myfunc_2_3 = 3;
localparam th_myfunc_2_4 = 4;
localparam th_myfunc_2_5 = 5;
localparam th_myfunc_2_6 = 6;
localparam th_myfunc_2_7 = 7;
localparam th_myfunc_2_8 = 8;
localparam th_myfunc_2_9 = 9;
localparam th_myfunc_2_10 = 10;
localparam th_myfunc_2_11 = 11;
localparam th_myfunc_2_12 = 12;
localparam th_myfunc_2_13 = 13;
localparam th_myfunc_2_14 = 14;
localparam th_myfunc_2_15 = 15;
localparam th_myfunc_2_16 = 16;
localparam th_myfunc_2_17 = 17;
localparam th_myfunc_2_18 = 18;
localparam th_myfunc_2_19 = 19;
localparam th_myfunc_2_20 = 20;
localparam th_myfunc_2_21 = 21;
always @(posedge CLK) begin
if(RST) begin
th_myfunc_2 <= th_myfunc_2_init;
_th_myfunc_2_called <= 0;
_th_myfunc_2_tid_11 <= 0;
_th_myfunc_2_tid_12 <= 0;
_tmp_4 <= 0;
_th_myfunc_2_lock_13 <= 0;
_th_myfunc_2_waitcount_14 <= 0;
_tmp_5 <= 0;
_th_myfunc_2_i_15 <= 0;
end else begin
case(th_myfunc_2)
th_myfunc_2_init: begin
if(_th_myfunc_start[2] && (th_blink == 4)) begin
_th_myfunc_2_called <= 1;
end
if(_th_myfunc_start[2] && (th_blink == 4)) begin
_th_myfunc_2_tid_11 <= _th_blink_tid_0;
end
if((th_blink == 4) && _th_myfunc_start[2]) begin
th_myfunc_2 <= th_myfunc_2_1;
end
end
th_myfunc_2_1: begin
_th_myfunc_2_tid_12 <= _th_myfunc_2_tid_11;
th_myfunc_2 <= th_myfunc_2_2;
end
th_myfunc_2_2: begin
$display("-- Thread %d TryLock", _th_myfunc_2_tid_12);
th_myfunc_2 <= th_myfunc_2_3;
end
th_myfunc_2_3: begin
th_myfunc_2 <= th_myfunc_2_4;
end
th_myfunc_2_4: begin
_tmp_4 <= _mymutex_lock_reg & (_mymutex_lock_id == 2);
th_myfunc_2 <= th_myfunc_2_5;
end
th_myfunc_2_5: begin
_th_myfunc_2_lock_13 <= _tmp_4;
th_myfunc_2 <= th_myfunc_2_6;
end
th_myfunc_2_6: begin
_th_myfunc_2_waitcount_14 <= 0;
th_myfunc_2 <= th_myfunc_2_7;
end
th_myfunc_2_7: begin
if(!_th_myfunc_2_lock_13) begin
th_myfunc_2 <= th_myfunc_2_8;
end else begin
th_myfunc_2 <= th_myfunc_2_14;
end
end
th_myfunc_2_8: begin
$display("-- Thread %d TryLock", _th_myfunc_2_tid_12);
th_myfunc_2 <= th_myfunc_2_9;
end
th_myfunc_2_9: begin
_th_myfunc_2_waitcount_14 <= _th_myfunc_2_waitcount_14 + 1;
th_myfunc_2 <= th_myfunc_2_10;
end
th_myfunc_2_10: begin
th_myfunc_2 <= th_myfunc_2_11;
end
th_myfunc_2_11: begin
_tmp_5 <= _mymutex_lock_reg & (_mymutex_lock_id == 2);
th_myfunc_2 <= th_myfunc_2_12;
end
th_myfunc_2_12: begin
_th_myfunc_2_lock_13 <= _tmp_5;
th_myfunc_2 <= th_myfunc_2_13;
end
th_myfunc_2_13: begin
th_myfunc_2 <= th_myfunc_2_7;
end
th_myfunc_2_14: begin
$display("Thread %d Lock: waitcount=%d", _th_myfunc_2_tid_12, _th_myfunc_2_waitcount_14);
th_myfunc_2 <= th_myfunc_2_15;
end
th_myfunc_2_15: begin
_th_myfunc_2_i_15 <= 0;
th_myfunc_2 <= th_myfunc_2_16;
end
th_myfunc_2_16: begin
if(_th_myfunc_2_i_15 < 20) begin
th_myfunc_2 <= th_myfunc_2_17;
end else begin
th_myfunc_2 <= th_myfunc_2_18;
end
end
th_myfunc_2_17: begin
_th_myfunc_2_i_15 <= _th_myfunc_2_i_15 + 1;
th_myfunc_2 <= th_myfunc_2_16;
end
th_myfunc_2_18: begin
$display("Thread %d Hello", _th_myfunc_2_tid_12);
th_myfunc_2 <= th_myfunc_2_19;
end
th_myfunc_2_19: begin
th_myfunc_2 <= th_myfunc_2_20;
end
th_myfunc_2_20: begin
$display("Thread %d Unlock", _th_myfunc_2_tid_12);
th_myfunc_2 <= th_myfunc_2_21;
end
endcase
end
end
localparam th_myfunc_3_1 = 1;
localparam th_myfunc_3_2 = 2;
localparam th_myfunc_3_3 = 3;
localparam th_myfunc_3_4 = 4;
localparam th_myfunc_3_5 = 5;
localparam th_myfunc_3_6 = 6;
localparam th_myfunc_3_7 = 7;
localparam th_myfunc_3_8 = 8;
localparam th_myfunc_3_9 = 9;
localparam th_myfunc_3_10 = 10;
localparam th_myfunc_3_11 = 11;
localparam th_myfunc_3_12 = 12;
localparam th_myfunc_3_13 = 13;
localparam th_myfunc_3_14 = 14;
localparam th_myfunc_3_15 = 15;
localparam th_myfunc_3_16 = 16;
localparam th_myfunc_3_17 = 17;
localparam th_myfunc_3_18 = 18;
localparam th_myfunc_3_19 = 19;
localparam th_myfunc_3_20 = 20;
localparam th_myfunc_3_21 = 21;
always @(posedge CLK) begin
if(RST) begin
th_myfunc_3 <= th_myfunc_3_init;
_th_myfunc_3_called <= 0;
_th_myfunc_3_tid_16 <= 0;
_th_myfunc_3_tid_17 <= 0;
_tmp_6 <= 0;
_th_myfunc_3_lock_18 <= 0;
_th_myfunc_3_waitcount_19 <= 0;
_tmp_7 <= 0;
_th_myfunc_3_i_20 <= 0;
end else begin
case(th_myfunc_3)
th_myfunc_3_init: begin
if(_th_myfunc_start[3] && (th_blink == 4)) begin
_th_myfunc_3_called <= 1;
end
if(_th_myfunc_start[3] && (th_blink == 4)) begin
_th_myfunc_3_tid_16 <= _th_blink_tid_0;
end
if((th_blink == 4) && _th_myfunc_start[3]) begin
th_myfunc_3 <= th_myfunc_3_1;
end
end
th_myfunc_3_1: begin
_th_myfunc_3_tid_17 <= _th_myfunc_3_tid_16;
th_myfunc_3 <= th_myfunc_3_2;
end
th_myfunc_3_2: begin
$display("-- Thread %d TryLock", _th_myfunc_3_tid_17);
th_myfunc_3 <= th_myfunc_3_3;
end
th_myfunc_3_3: begin
th_myfunc_3 <= th_myfunc_3_4;
end
th_myfunc_3_4: begin
_tmp_6 <= _mymutex_lock_reg & (_mymutex_lock_id == 3);
th_myfunc_3 <= th_myfunc_3_5;
end
th_myfunc_3_5: begin
_th_myfunc_3_lock_18 <= _tmp_6;
th_myfunc_3 <= th_myfunc_3_6;
end
th_myfunc_3_6: begin
_th_myfunc_3_waitcount_19 <= 0;
th_myfunc_3 <= th_myfunc_3_7;
end
th_myfunc_3_7: begin
if(!_th_myfunc_3_lock_18) begin
th_myfunc_3 <= th_myfunc_3_8;
end else begin
th_myfunc_3 <= th_myfunc_3_14;
end
end
th_myfunc_3_8: begin
$display("-- Thread %d TryLock", _th_myfunc_3_tid_17);
th_myfunc_3 <= th_myfunc_3_9;
end
th_myfunc_3_9: begin
_th_myfunc_3_waitcount_19 <= _th_myfunc_3_waitcount_19 + 1;
th_myfunc_3 <= th_myfunc_3_10;
end
th_myfunc_3_10: begin
th_myfunc_3 <= th_myfunc_3_11;
end
th_myfunc_3_11: begin
_tmp_7 <= _mymutex_lock_reg & (_mymutex_lock_id == 3);
th_myfunc_3 <= th_myfunc_3_12;
end
th_myfunc_3_12: begin
_th_myfunc_3_lock_18 <= _tmp_7;
th_myfunc_3 <= th_myfunc_3_13;
end
th_myfunc_3_13: begin
th_myfunc_3 <= th_myfunc_3_7;
end
th_myfunc_3_14: begin
$display("Thread %d Lock: waitcount=%d", _th_myfunc_3_tid_17, _th_myfunc_3_waitcount_19);
th_myfunc_3 <= th_myfunc_3_15;
end
th_myfunc_3_15: begin
_th_myfunc_3_i_20 <= 0;
th_myfunc_3 <= th_myfunc_3_16;
end
th_myfunc_3_16: begin
if(_th_myfunc_3_i_20 < 20) begin
th_myfunc_3 <= th_myfunc_3_17;
end else begin
th_myfunc_3 <= th_myfunc_3_18;
end
end
th_myfunc_3_17: begin
_th_myfunc_3_i_20 <= _th_myfunc_3_i_20 + 1;
th_myfunc_3 <= th_myfunc_3_16;
end
th_myfunc_3_18: begin
$display("Thread %d Hello", _th_myfunc_3_tid_17);
th_myfunc_3 <= th_myfunc_3_19;
end
th_myfunc_3_19: begin
th_myfunc_3 <= th_myfunc_3_20;
end
th_myfunc_3_20: begin
$display("Thread %d Unlock", _th_myfunc_3_tid_17);
th_myfunc_3 <= th_myfunc_3_21;
end
endcase
end
end
localparam th_myfunc_4_1 = 1;
localparam th_myfunc_4_2 = 2;
localparam th_myfunc_4_3 = 3;
localparam th_myfunc_4_4 = 4;
localparam th_myfunc_4_5 = 5;
localparam th_myfunc_4_6 = 6;
localparam th_myfunc_4_7 = 7;
localparam th_myfunc_4_8 = 8;
localparam th_myfunc_4_9 = 9;
localparam th_myfunc_4_10 = 10;
localparam th_myfunc_4_11 = 11;
localparam th_myfunc_4_12 = 12;
localparam th_myfunc_4_13 = 13;
localparam th_myfunc_4_14 = 14;
localparam th_myfunc_4_15 = 15;
localparam th_myfunc_4_16 = 16;
localparam th_myfunc_4_17 = 17;
localparam th_myfunc_4_18 = 18;
localparam th_myfunc_4_19 = 19;
localparam th_myfunc_4_20 = 20;
localparam th_myfunc_4_21 = 21;
always @(posedge CLK) begin
if(RST) begin
th_myfunc_4 <= th_myfunc_4_init;
_th_myfunc_4_called <= 0;
_th_myfunc_4_tid_21 <= 0;
_th_myfunc_4_tid_22 <= 0;
_tmp_8 <= 0;
_th_myfunc_4_lock_23 <= 0;
_th_myfunc_4_waitcount_24 <= 0;
_tmp_9 <= 0;
_th_myfunc_4_i_25 <= 0;
end else begin
case(th_myfunc_4)
th_myfunc_4_init: begin
if(_th_myfunc_start[4] && (th_blink == 4)) begin
_th_myfunc_4_called <= 1;
end
if(_th_myfunc_start[4] && (th_blink == 4)) begin
_th_myfunc_4_tid_21 <= _th_blink_tid_0;
end
if((th_blink == 4) && _th_myfunc_start[4]) begin
th_myfunc_4 <= th_myfunc_4_1;
end
end
th_myfunc_4_1: begin
_th_myfunc_4_tid_22 <= _th_myfunc_4_tid_21;
th_myfunc_4 <= th_myfunc_4_2;
end
th_myfunc_4_2: begin
$display("-- Thread %d TryLock", _th_myfunc_4_tid_22);
th_myfunc_4 <= th_myfunc_4_3;
end
th_myfunc_4_3: begin
th_myfunc_4 <= th_myfunc_4_4;
end
th_myfunc_4_4: begin
_tmp_8 <= _mymutex_lock_reg & (_mymutex_lock_id == 4);
th_myfunc_4 <= th_myfunc_4_5;
end
th_myfunc_4_5: begin
_th_myfunc_4_lock_23 <= _tmp_8;
th_myfunc_4 <= th_myfunc_4_6;
end
th_myfunc_4_6: begin
_th_myfunc_4_waitcount_24 <= 0;
th_myfunc_4 <= th_myfunc_4_7;
end
th_myfunc_4_7: begin
if(!_th_myfunc_4_lock_23) begin
th_myfunc_4 <= th_myfunc_4_8;
end else begin
th_myfunc_4 <= th_myfunc_4_14;
end
end
th_myfunc_4_8: begin
$display("-- Thread %d TryLock", _th_myfunc_4_tid_22);
th_myfunc_4 <= th_myfunc_4_9;
end
th_myfunc_4_9: begin
_th_myfunc_4_waitcount_24 <= _th_myfunc_4_waitcount_24 + 1;
th_myfunc_4 <= th_myfunc_4_10;
end
th_myfunc_4_10: begin
th_myfunc_4 <= th_myfunc_4_11;
end
th_myfunc_4_11: begin
_tmp_9 <= _mymutex_lock_reg & (_mymutex_lock_id == 4);
th_myfunc_4 <= th_myfunc_4_12;
end
th_myfunc_4_12: begin
_th_myfunc_4_lock_23 <= _tmp_9;
th_myfunc_4 <= th_myfunc_4_13;
end
th_myfunc_4_13: begin
th_myfunc_4 <= th_myfunc_4_7;
end
th_myfunc_4_14: begin
$display("Thread %d Lock: waitcount=%d", _th_myfunc_4_tid_22, _th_myfunc_4_waitcount_24);
th_myfunc_4 <= th_myfunc_4_15;
end
th_myfunc_4_15: begin
_th_myfunc_4_i_25 <= 0;
th_myfunc_4 <= th_myfunc_4_16;
end
th_myfunc_4_16: begin
if(_th_myfunc_4_i_25 < 20) begin
th_myfunc_4 <= th_myfunc_4_17;
end else begin
th_myfunc_4 <= th_myfunc_4_18;
end
end
th_myfunc_4_17: begin
_th_myfunc_4_i_25 <= _th_myfunc_4_i_25 + 1;
th_myfunc_4 <= th_myfunc_4_16;
end
th_myfunc_4_18: begin
$display("Thread %d Hello", _th_myfunc_4_tid_22);
th_myfunc_4 <= th_myfunc_4_19;
end
th_myfunc_4_19: begin
th_myfunc_4 <= th_myfunc_4_20;
end
th_myfunc_4_20: begin
$display("Thread %d Unlock", _th_myfunc_4_tid_22);
th_myfunc_4 <= th_myfunc_4_21;
end
endcase
end
end
localparam th_myfunc_5_1 = 1;
localparam th_myfunc_5_2 = 2;
localparam th_myfunc_5_3 = 3;
localparam th_myfunc_5_4 = 4;
localparam th_myfunc_5_5 = 5;
localparam th_myfunc_5_6 = 6;
localparam th_myfunc_5_7 = 7;
localparam th_myfunc_5_8 = 8;
localparam th_myfunc_5_9 = 9;
localparam th_myfunc_5_10 = 10;
localparam th_myfunc_5_11 = 11;
localparam th_myfunc_5_12 = 12;
localparam th_myfunc_5_13 = 13;
localparam th_myfunc_5_14 = 14;
localparam th_myfunc_5_15 = 15;
localparam th_myfunc_5_16 = 16;
localparam th_myfunc_5_17 = 17;
localparam th_myfunc_5_18 = 18;
localparam th_myfunc_5_19 = 19;
localparam th_myfunc_5_20 = 20;
localparam th_myfunc_5_21 = 21;
always @(posedge CLK) begin
if(RST) begin
th_myfunc_5 <= th_myfunc_5_init;
_th_myfunc_5_called <= 0;
_th_myfunc_5_tid_26 <= 0;
_th_myfunc_5_tid_27 <= 0;
_tmp_10 <= 0;
_th_myfunc_5_lock_28 <= 0;
_th_myfunc_5_waitcount_29 <= 0;
_tmp_11 <= 0;
_th_myfunc_5_i_30 <= 0;
end else begin
case(th_myfunc_5)
th_myfunc_5_init: begin
if(_th_myfunc_start[5] && (th_blink == 4)) begin
_th_myfunc_5_called <= 1;
end
if(_th_myfunc_start[5] && (th_blink == 4)) begin
_th_myfunc_5_tid_26 <= _th_blink_tid_0;
end
if((th_blink == 4) && _th_myfunc_start[5]) begin
th_myfunc_5 <= th_myfunc_5_1;
end
end
th_myfunc_5_1: begin
_th_myfunc_5_tid_27 <= _th_myfunc_5_tid_26;
th_myfunc_5 <= th_myfunc_5_2;
end
th_myfunc_5_2: begin
$display("-- Thread %d TryLock", _th_myfunc_5_tid_27);
th_myfunc_5 <= th_myfunc_5_3;
end
th_myfunc_5_3: begin
th_myfunc_5 <= th_myfunc_5_4;
end
th_myfunc_5_4: begin
_tmp_10 <= _mymutex_lock_reg & (_mymutex_lock_id == 5);
th_myfunc_5 <= th_myfunc_5_5;
end
th_myfunc_5_5: begin
_th_myfunc_5_lock_28 <= _tmp_10;
th_myfunc_5 <= th_myfunc_5_6;
end
th_myfunc_5_6: begin
_th_myfunc_5_waitcount_29 <= 0;
th_myfunc_5 <= th_myfunc_5_7;
end
th_myfunc_5_7: begin
if(!_th_myfunc_5_lock_28) begin
th_myfunc_5 <= th_myfunc_5_8;
end else begin
th_myfunc_5 <= th_myfunc_5_14;
end
end
th_myfunc_5_8: begin
$display("-- Thread %d TryLock", _th_myfunc_5_tid_27);
th_myfunc_5 <= th_myfunc_5_9;
end
th_myfunc_5_9: begin
_th_myfunc_5_waitcount_29 <= _th_myfunc_5_waitcount_29 + 1;
th_myfunc_5 <= th_myfunc_5_10;
end
th_myfunc_5_10: begin
th_myfunc_5 <= th_myfunc_5_11;
end
th_myfunc_5_11: begin
_tmp_11 <= _mymutex_lock_reg & (_mymutex_lock_id == 5);
th_myfunc_5 <= th_myfunc_5_12;
end
th_myfunc_5_12: begin
_th_myfunc_5_lock_28 <= _tmp_11;
th_myfunc_5 <= th_myfunc_5_13;
end
th_myfunc_5_13: begin
th_myfunc_5 <= th_myfunc_5_7;
end
th_myfunc_5_14: begin
$display("Thread %d Lock: waitcount=%d", _th_myfunc_5_tid_27, _th_myfunc_5_waitcount_29);
th_myfunc_5 <= th_myfunc_5_15;
end
th_myfunc_5_15: begin
_th_myfunc_5_i_30 <= 0;
th_myfunc_5 <= th_myfunc_5_16;
end
th_myfunc_5_16: begin
if(_th_myfunc_5_i_30 < 20) begin
th_myfunc_5 <= th_myfunc_5_17;
end else begin
th_myfunc_5 <= th_myfunc_5_18;
end
end
th_myfunc_5_17: begin
_th_myfunc_5_i_30 <= _th_myfunc_5_i_30 + 1;
th_myfunc_5 <= th_myfunc_5_16;
end
th_myfunc_5_18: begin
$display("Thread %d Hello", _th_myfunc_5_tid_27);
th_myfunc_5 <= th_myfunc_5_19;
end
th_myfunc_5_19: begin
th_myfunc_5 <= th_myfunc_5_20;
end
th_myfunc_5_20: begin
$display("Thread %d Unlock", _th_myfunc_5_tid_27);
th_myfunc_5 <= th_myfunc_5_21;
end
endcase
end
end
localparam th_myfunc_6_1 = 1;
localparam th_myfunc_6_2 = 2;
localparam th_myfunc_6_3 = 3;
localparam th_myfunc_6_4 = 4;
localparam th_myfunc_6_5 = 5;
localparam th_myfunc_6_6 = 6;
localparam th_myfunc_6_7 = 7;
localparam th_myfunc_6_8 = 8;
localparam th_myfunc_6_9 = 9;
localparam th_myfunc_6_10 = 10;
localparam th_myfunc_6_11 = 11;
localparam th_myfunc_6_12 = 12;
localparam th_myfunc_6_13 = 13;
localparam th_myfunc_6_14 = 14;
localparam th_myfunc_6_15 = 15;
localparam th_myfunc_6_16 = 16;
localparam th_myfunc_6_17 = 17;
localparam th_myfunc_6_18 = 18;
localparam th_myfunc_6_19 = 19;
localparam th_myfunc_6_20 = 20;
localparam th_myfunc_6_21 = 21;
always @(posedge CLK) begin
if(RST) begin
th_myfunc_6 <= th_myfunc_6_init;
_th_myfunc_6_called <= 0;
_th_myfunc_6_tid_31 <= 0;
_th_myfunc_6_tid_32 <= 0;
_tmp_12 <= 0;
_th_myfunc_6_lock_33 <= 0;
_th_myfunc_6_waitcount_34 <= 0;
_tmp_13 <= 0;
_th_myfunc_6_i_35 <= 0;
end else begin
case(th_myfunc_6)
th_myfunc_6_init: begin
if(_th_myfunc_start[6] && (th_blink == 4)) begin
_th_myfunc_6_called <= 1;
end
if(_th_myfunc_start[6] && (th_blink == 4)) begin
_th_myfunc_6_tid_31 <= _th_blink_tid_0;
end
if((th_blink == 4) && _th_myfunc_start[6]) begin
th_myfunc_6 <= th_myfunc_6_1;
end
end
th_myfunc_6_1: begin
_th_myfunc_6_tid_32 <= _th_myfunc_6_tid_31;
th_myfunc_6 <= th_myfunc_6_2;
end
th_myfunc_6_2: begin
$display("-- Thread %d TryLock", _th_myfunc_6_tid_32);
th_myfunc_6 <= th_myfunc_6_3;
end
th_myfunc_6_3: begin
th_myfunc_6 <= th_myfunc_6_4;
end
th_myfunc_6_4: begin
_tmp_12 <= _mymutex_lock_reg & (_mymutex_lock_id == 6);
th_myfunc_6 <= th_myfunc_6_5;
end
th_myfunc_6_5: begin
_th_myfunc_6_lock_33 <= _tmp_12;
th_myfunc_6 <= th_myfunc_6_6;
end
th_myfunc_6_6: begin
_th_myfunc_6_waitcount_34 <= 0;
th_myfunc_6 <= th_myfunc_6_7;
end
th_myfunc_6_7: begin
if(!_th_myfunc_6_lock_33) begin
th_myfunc_6 <= th_myfunc_6_8;
end else begin
th_myfunc_6 <= th_myfunc_6_14;
end
end
th_myfunc_6_8: begin
$display("-- Thread %d TryLock", _th_myfunc_6_tid_32);
th_myfunc_6 <= th_myfunc_6_9;
end
th_myfunc_6_9: begin
_th_myfunc_6_waitcount_34 <= _th_myfunc_6_waitcount_34 + 1;
th_myfunc_6 <= th_myfunc_6_10;
end
th_myfunc_6_10: begin
th_myfunc_6 <= th_myfunc_6_11;
end
th_myfunc_6_11: begin
_tmp_13 <= _mymutex_lock_reg & (_mymutex_lock_id == 6);
th_myfunc_6 <= th_myfunc_6_12;
end
th_myfunc_6_12: begin
_th_myfunc_6_lock_33 <= _tmp_13;
th_myfunc_6 <= th_myfunc_6_13;
end
th_myfunc_6_13: begin
th_myfunc_6 <= th_myfunc_6_7;
end
th_myfunc_6_14: begin
$display("Thread %d Lock: waitcount=%d", _th_myfunc_6_tid_32, _th_myfunc_6_waitcount_34);
th_myfunc_6 <= th_myfunc_6_15;
end
th_myfunc_6_15: begin
_th_myfunc_6_i_35 <= 0;
th_myfunc_6 <= th_myfunc_6_16;
end
th_myfunc_6_16: begin
if(_th_myfunc_6_i_35 < 20) begin
th_myfunc_6 <= th_myfunc_6_17;
end else begin
th_myfunc_6 <= th_myfunc_6_18;
end
end
th_myfunc_6_17: begin
_th_myfunc_6_i_35 <= _th_myfunc_6_i_35 + 1;
th_myfunc_6 <= th_myfunc_6_16;
end
th_myfunc_6_18: begin
$display("Thread %d Hello", _th_myfunc_6_tid_32);
th_myfunc_6 <= th_myfunc_6_19;
end
th_myfunc_6_19: begin
th_myfunc_6 <= th_myfunc_6_20;
end
th_myfunc_6_20: begin
$display("Thread %d Unlock", _th_myfunc_6_tid_32);
th_myfunc_6 <= th_myfunc_6_21;
end
endcase
end
end
localparam th_myfunc_7_1 = 1;
localparam th_myfunc_7_2 = 2;
localparam th_myfunc_7_3 = 3;
localparam th_myfunc_7_4 = 4;
localparam th_myfunc_7_5 = 5;
localparam th_myfunc_7_6 = 6;
localparam th_myfunc_7_7 = 7;
localparam th_myfunc_7_8 = 8;
localparam th_myfunc_7_9 = 9;
localparam th_myfunc_7_10 = 10;
localparam th_myfunc_7_11 = 11;
localparam th_myfunc_7_12 = 12;
localparam th_myfunc_7_13 = 13;
localparam th_myfunc_7_14 = 14;
localparam th_myfunc_7_15 = 15;
localparam th_myfunc_7_16 = 16;
localparam th_myfunc_7_17 = 17;
localparam th_myfunc_7_18 = 18;
localparam th_myfunc_7_19 = 19;
localparam th_myfunc_7_20 = 20;
localparam th_myfunc_7_21 = 21;
always @(posedge CLK) begin
if(RST) begin
th_myfunc_7 <= th_myfunc_7_init;
_th_myfunc_7_called <= 0;
_th_myfunc_7_tid_36 <= 0;
_th_myfunc_7_tid_37 <= 0;
_tmp_14 <= 0;
_th_myfunc_7_lock_38 <= 0;
_th_myfunc_7_waitcount_39 <= 0;
_tmp_15 <= 0;
_th_myfunc_7_i_40 <= 0;
end else begin
case(th_myfunc_7)
th_myfunc_7_init: begin
if(_th_myfunc_start[7] && (th_blink == 4)) begin
_th_myfunc_7_called <= 1;
end
if(_th_myfunc_start[7] && (th_blink == 4)) begin
_th_myfunc_7_tid_36 <= _th_blink_tid_0;
end
if((th_blink == 4) && _th_myfunc_start[7]) begin
th_myfunc_7 <= th_myfunc_7_1;
end
end
th_myfunc_7_1: begin
_th_myfunc_7_tid_37 <= _th_myfunc_7_tid_36;
th_myfunc_7 <= th_myfunc_7_2;
end
th_myfunc_7_2: begin
$display("-- Thread %d TryLock", _th_myfunc_7_tid_37);
th_myfunc_7 <= th_myfunc_7_3;
end
th_myfunc_7_3: begin
th_myfunc_7 <= th_myfunc_7_4;
end
th_myfunc_7_4: begin
_tmp_14 <= _mymutex_lock_reg & (_mymutex_lock_id == 7);
th_myfunc_7 <= th_myfunc_7_5;
end
th_myfunc_7_5: begin
_th_myfunc_7_lock_38 <= _tmp_14;
th_myfunc_7 <= th_myfunc_7_6;
end
th_myfunc_7_6: begin
_th_myfunc_7_waitcount_39 <= 0;
th_myfunc_7 <= th_myfunc_7_7;
end
th_myfunc_7_7: begin
if(!_th_myfunc_7_lock_38) begin
th_myfunc_7 <= th_myfunc_7_8;
end else begin
th_myfunc_7 <= th_myfunc_7_14;
end
end
th_myfunc_7_8: begin
$display("-- Thread %d TryLock", _th_myfunc_7_tid_37);
th_myfunc_7 <= th_myfunc_7_9;
end
th_myfunc_7_9: begin
_th_myfunc_7_waitcount_39 <= _th_myfunc_7_waitcount_39 + 1;
th_myfunc_7 <= th_myfunc_7_10;
end
th_myfunc_7_10: begin
th_myfunc_7 <= th_myfunc_7_11;
end
th_myfunc_7_11: begin
_tmp_15 <= _mymutex_lock_reg & (_mymutex_lock_id == 7);
th_myfunc_7 <= th_myfunc_7_12;
end
th_myfunc_7_12: begin
_th_myfunc_7_lock_38 <= _tmp_15;
th_myfunc_7 <= th_myfunc_7_13;
end
th_myfunc_7_13: begin
th_myfunc_7 <= th_myfunc_7_7;
end
th_myfunc_7_14: begin
$display("Thread %d Lock: waitcount=%d", _th_myfunc_7_tid_37, _th_myfunc_7_waitcount_39);
th_myfunc_7 <= th_myfunc_7_15;
end
th_myfunc_7_15: begin
_th_myfunc_7_i_40 <= 0;
th_myfunc_7 <= th_myfunc_7_16;
end
th_myfunc_7_16: begin
if(_th_myfunc_7_i_40 < 20) begin
th_myfunc_7 <= th_myfunc_7_17;
end else begin
th_myfunc_7 <= th_myfunc_7_18;
end
end
th_myfunc_7_17: begin
_th_myfunc_7_i_40 <= _th_myfunc_7_i_40 + 1;
th_myfunc_7 <= th_myfunc_7_16;
end
th_myfunc_7_18: begin
$display("Thread %d Hello", _th_myfunc_7_tid_37);
th_myfunc_7 <= th_myfunc_7_19;
end
th_myfunc_7_19: begin
th_myfunc_7 <= th_myfunc_7_20;
end
th_myfunc_7_20: begin
$display("Thread %d Unlock", _th_myfunc_7_tid_37);
th_myfunc_7 <= th_myfunc_7_21;
end
endcase
end
end
endmodule
"""
def test():
veriloggen.reset()
test_module = thread_mutex_try_lock.mkTest()
code = test_module.to_verilog()
from pyverilog.vparser.parser import VerilogParser
from pyverilog.ast_code_generator.codegen import ASTCodeGenerator
parser = VerilogParser()
expected_ast = parser.parse(expected_verilog)
codegen = ASTCodeGenerator()
expected_code = codegen.visit(expected_ast)
assert(expected_code == code)
| [
"pyverilog.vparser.parser.VerilogParser",
"veriloggen.reset",
"pyverilog.ast_code_generator.codegen.ASTCodeGenerator",
"thread_mutex_try_lock.mkTest"
] | [((42758, 42776), 'veriloggen.reset', 'veriloggen.reset', ([], {}), '()\n', (42774, 42776), False, 'import veriloggen\n'), ((42795, 42825), 'thread_mutex_try_lock.mkTest', 'thread_mutex_try_lock.mkTest', ([], {}), '()\n', (42823, 42825), False, 'import thread_mutex_try_lock\n'), ((43001, 43016), 'pyverilog.vparser.parser.VerilogParser', 'VerilogParser', ([], {}), '()\n', (43014, 43016), False, 'from pyverilog.vparser.parser import VerilogParser\n'), ((43081, 43099), 'pyverilog.ast_code_generator.codegen.ASTCodeGenerator', 'ASTCodeGenerator', ([], {}), '()\n', (43097, 43099), False, 'from pyverilog.ast_code_generator.codegen import ASTCodeGenerator\n')] |
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
class Idea(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
date_posted = models.DateTimeField(default=timezone.now)
author = models.ForeignKey(User, on_delete=models.CASCADE)
tag_name= models.CharField(max_length=20,default='general')
def __str__(self):
return self.title
class Comments(models.Model):
description = models.CharField(max_length=100)
pub_date = models.DateTimeField(default=timezone.now)
author = models.ForeignKey(User, on_delete=models.CASCADE)
idea=models.ForeignKey(Idea, on_delete=models.CASCADE, related_name='comments')
# class Tags(models.Model):
# tag_name= models.CharField(max_length=20)
# idea=models.ForeignKey(Idea, on_delete=models.CASCADE, related_name='tags')
# Create your models here.
| [
"django.db.models.DateTimeField",
"django.db.models.TextField",
"django.db.models.CharField",
"django.db.models.ForeignKey"
] | [((148, 180), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (164, 180), False, 'from django.db import models\n'), ((195, 213), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (211, 213), False, 'from django.db import models\n'), ((232, 274), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'timezone.now'}), '(default=timezone.now)\n', (252, 274), False, 'from django.db import models\n'), ((288, 337), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (305, 337), False, 'from django.db import models\n'), ((352, 402), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'default': '"""general"""'}), "(max_length=20, default='general')\n", (368, 402), False, 'from django.db import models\n'), ((501, 533), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (517, 533), False, 'from django.db import models\n'), ((549, 591), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'timezone.now'}), '(default=timezone.now)\n', (569, 591), False, 'from django.db import models\n'), ((605, 654), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (622, 654), False, 'from django.db import models\n'), ((664, 738), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Idea'], {'on_delete': 'models.CASCADE', 'related_name': '"""comments"""'}), "(Idea, on_delete=models.CASCADE, related_name='comments')\n", (681, 738), False, 'from django.db import models\n')] |
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\business\advertising_manager.py
# Compiled at: 2017-04-27 01:01:18
# Size of source mod 2**32: 6129 bytes
from protocolbuffers import Business_pb2, DistributorOps_pb2
from business.business_enums import BusinessAdvertisingType
from distributor.ops import GenericProtocolBufferOp
from distributor.system import Distributor
import services, sims4
logger = sims4.log.Logger('Business', default_owner='jdimailig')
class HasAdvertisingManagerMixin:
def __init__(self, *args, **kwargs):
(super().__init__)(*args, **kwargs)
self._advertising_manager = AdvertisingManager.create_from_business_manager(self)
def get_advertising_multiplier(self):
return self._advertising_manager.get_advertising_multiplier()
def set_advertising_type(self, advertising_type):
self._advertising_manager.set_advertising_type(advertising_type)
def get_advertising_type_for_gsi(self):
return str(self._advertising_manager._advertising_type)
def get_current_advertising_cost(self):
return self._advertising_manager.get_current_advertising_cost()
class AdvertisingManager:
@classmethod
def create_from_business_manager(cls, business_manager):
return AdvertisingManager(business_manager, business_manager.tuning_data.advertising_configuration)
def __init__(self, business_manager, advertising_configuration):
self._business_manager = business_manager
self._configuration = advertising_configuration
self._advertising_type = advertising_configuration.default_advertising_type
self._advertising_update_time = None
self._advertising_cost = 0
def clear_state(self):
self._advertising_cost = 0
self._advertising_update_time = None
def open_business(self):
self.set_advertising_type(self._advertising_type)
def get_current_advertising_cost(self):
return self._advertising_cost + self._get_advertising_cost_since_last_update()
def get_advertising_cost_per_hour(self):
return self._configuration.get_advertising_cost_per_hour(self._advertising_type)
def set_advertising_type(self, advertising_type):
self._advertising_cost += self._get_advertising_cost_since_last_update()
self._advertising_update_time = services.time_service().sim_now
if advertising_type == BusinessAdvertisingType.INVALID:
logger.error('Attempting to set an INVALID advertising type to {}. This will be ignored.', advertising_type)
else:
self._advertising_type = advertising_type
self._send_advertisement_update_message()
def get_advertising_multiplier(self):
return self._configuration.get_customer_count_multiplier(self._advertising_type)
def _get_advertising_cost_since_last_update(self):
now = services.time_service().sim_now
running_cost = 0
if self._advertising_update_time is None:
self._advertising_update_time = now
running_cost = 0
else:
hours_in_ad_type = (now - self._advertising_update_time).in_hours()
running_cost = hours_in_ad_type * self.get_advertising_cost_per_hour()
return running_cost
def _send_advertisement_update_message(self):
msg = Business_pb2.BusinessAdvertisementUpdate()
msg.zone_id = self._business_manager.business_zone_id
msg.advertisement_chosen = self._advertising_type
op = GenericProtocolBufferOp(DistributorOps_pb2.Operation.BUSINESS_ADVERTISEMENT_DATA_UPDATE, msg)
Distributor.instance().add_op_with_no_owner(op) | [
"sims4.log.Logger",
"services.time_service",
"protocolbuffers.Business_pb2.BusinessAdvertisementUpdate",
"distributor.ops.GenericProtocolBufferOp",
"distributor.system.Distributor.instance"
] | [((574, 629), 'sims4.log.Logger', 'sims4.log.Logger', (['"""Business"""'], {'default_owner': '"""jdimailig"""'}), "('Business', default_owner='jdimailig')\n", (590, 629), False, 'import services, sims4\n'), ((3493, 3535), 'protocolbuffers.Business_pb2.BusinessAdvertisementUpdate', 'Business_pb2.BusinessAdvertisementUpdate', ([], {}), '()\n', (3533, 3535), False, 'from protocolbuffers import Business_pb2, DistributorOps_pb2\n'), ((3669, 3767), 'distributor.ops.GenericProtocolBufferOp', 'GenericProtocolBufferOp', (['DistributorOps_pb2.Operation.BUSINESS_ADVERTISEMENT_DATA_UPDATE', 'msg'], {}), '(DistributorOps_pb2.Operation.\n BUSINESS_ADVERTISEMENT_DATA_UPDATE, msg)\n', (3692, 3767), False, 'from distributor.ops import GenericProtocolBufferOp\n'), ((2502, 2525), 'services.time_service', 'services.time_service', ([], {}), '()\n', (2523, 2525), False, 'import services, sims4\n'), ((3039, 3062), 'services.time_service', 'services.time_service', ([], {}), '()\n', (3060, 3062), False, 'import services, sims4\n'), ((3771, 3793), 'distributor.system.Distributor.instance', 'Distributor.instance', ([], {}), '()\n', (3791, 3793), False, 'from distributor.system import Distributor\n')] |
import numpy as np
from mpi4py import MPI
from tacs import TACS, elements, constitutive, functions
from static_analysis_base_test import StaticTestCase
'''
Create a two separate cantilevered plates connected by an RBE3 element.
Apply a load at the RBE2 center node and test KSFailure, StructuralMass,
and Compliance functions and sensitivities
----------- -----------
| |\ /| |
| | \ / | |
| Plate 1 |__\/__| Plate 2 |
| | /\ | |
| | / \ | |
| |/ \| |
------------ -----------
'''
FUNC_REFS = np.array([1.2600980396870352, 51400.0, 3767896.1409673616, 2.912191091671254])
# Length of plate in x/y direction
Lx = 10.0
Ly = 10.0
# Number of elements in x/y direction for each plate
nx = 4
ny = 4
# applied force at center node
applied_force = np.array([1e8, 0.0, 1.0e6, 0.0, 0.0, 1e8])
# KS function weight
ksweight = 10.0
class ProblemTest(StaticTestCase.StaticTest):
N_PROCS = 2 # this is how many MPI processes to use for this TestCase.
def setup_assembler(self, comm, dtype):
"""
Setup mesh and tacs assembler for problem we will be testing.
"""
# Overwrite default check values
if dtype == complex:
self.rtol = 1e-5
self.atol = 1e-8
self.dh = 1e-50
else:
self.rtol = 1e-1
self.atol = 1e-4
self.dh = 1e-5
# Create the stiffness object
props = constitutive.MaterialProperties(rho=2570.0, E=70e9, nu=0.3, ys=350e6)
stiff = constitutive.IsoShellConstitutive(props, t=0.1, tNum=0)
# Set up the element transform function
transform = elements.ShellNaturalTransform()
shell = elements.Quad4Shell(transform, stiff)
num_rbe_nodes = 0
# Allocate the TACSCreator object
vars_per_node = shell.getVarsPerNode()
creator = TACS.Creator(comm, vars_per_node)
if comm.rank == 0:
num_elems = nx * ny
num_nodes = (nx + 1) * (ny + 1)
# discretize (left) plate
x = np.linspace(0, Lx, nx + 1, dtype)
y = np.linspace(0, Ly, ny + 1, dtype)
left_xyz = np.zeros([nx + 1, ny + 1, 3], dtype)
left_xyz[:, :, 0], left_xyz[:, :, 1] = np.meshgrid(x, y, indexing='ij')
left_node_ids = np.arange(num_nodes, dtype=np.intc).reshape(nx + 1, ny + 1)
# Define right plate by copying left plate and shifting 2 m
right_xyz = left_xyz.copy()
right_xyz[:, :, 0] += 2.0 * Lx
right_node_ids = left_node_ids + num_nodes
# Double the node/element count
num_nodes *= 2
num_elems *= 2
# Set connectivity for each plate element
conn = []
for i in range(nx):
for j in range(ny):
conn.extend([left_node_ids[i, j],
left_node_ids[i + 1, j],
left_node_ids[i, j + 1],
left_node_ids[i + 1, j + 1]])
conn.extend([right_node_ids[i, j],
right_node_ids[i + 1, j],
right_node_ids[i, j + 1],
right_node_ids[i + 1, j + 1]])
# Append connectivity for rbe element
center_node_id = num_nodes
center_node_xyz = np.array([1.5 * Lx, 0.5 * Ly, 0.0], dtype=dtype)
num_nodes += 1
# Add center node as indep rbe node
rbe_conn = [center_node_id]
dep_nodes = []
dummy_nodes = []
# Append all dependent nodes and a dummy node for each dep node added
for j in range(ny + 1):
# Add nodes on right edge of left plate as dep RBE nodes
dep_nodes.append(left_node_ids[-1, j])
dummy_node_id = num_nodes
dummy_nodes.append(dummy_node_id)
# Add nodes on left edge of right plate as indep RBE nodes
dep_nodes.append(right_node_ids[0, j])
dummy_node_id = num_nodes + 1
dummy_nodes.append(dummy_node_id)
# Increment node count for new dummy nodes
num_nodes += 2
rbe_conn.extend(dep_nodes)
rbe_conn.extend(dummy_nodes)
dummy_node_xyz = np.zeros([len(dep_nodes), 3], dtype=dtype)
# Add rbe to global connectivity
num_rbe_nodes = len(rbe_conn)
conn.extend(rbe_conn)
num_elems += 1
# Set element info for plates
conn = np.array(conn, dtype=np.intc)
ptr = np.arange(0, 4 * num_elems + 1, 4, dtype=np.intc)
comp_ids = np.zeros(num_elems, dtype=np.intc)
# Correct last entries for RBE
ptr[-1] = ptr[-2] + num_rbe_nodes
comp_ids[-1] = 1
creator.setGlobalConnectivity(num_nodes, ptr, conn, comp_ids)
# Set up the boundary conditions (fixed at left hand edge)
bcnodes = np.append(left_node_ids[0, :], right_node_ids[-1, :])
creator.setBoundaryConditions(bcnodes)
# Set the node locations
xyz = np.append(left_xyz.flatten(), right_xyz.flatten())
xyz = np.append(xyz.flatten(), center_node_xyz)
xyz = np.append(xyz.flatten(), dummy_node_xyz)
creator.setNodes(xyz.flatten())
# Set up rbe object
num_rbe_nodes = comm.bcast(num_rbe_nodes, root=0)
# Which dependent dofs are connected
dep_dofs = np.array([1, 1, 1, 1, 1, 1], np.intc)
# Set the artificial stiffness to be low to pass the sensitivity tests
# This will affect the accuracy of the element behavior
rbe = elements.RBE2(num_rbe_nodes, dep_dofs, C1=1e2, C2=1e-1)
# Set the elements for each (only two) component
element_list = [shell, rbe]
creator.setElements(element_list)
# Create the tacs assembler object
assembler = creator.createTACS()
return assembler
def setup_tacs_vecs(self, assembler, force_vec, dv_pert_vec, ans_pert_vec, xpts_pert_vec):
"""
Setup user-defined vectors for analysis and fd/cs sensitivity verification
"""
local_num_nodes = assembler.getNumOwnedNodes()
vars_per_node = assembler.getVarsPerNode()
# The nodes have been distributed across processors now
# Let's find which nodes this processor owns
xpts0 = assembler.createNodeVec()
assembler.getNodes(xpts0)
xpts0_array = xpts0.getArray()
# Split node vector into numpy arrays for easier parsing of vectors
local_xyz = xpts0_array.reshape(local_num_nodes, 3)
local_x, local_y, local_z = local_xyz[:, 0], local_xyz[:, 1], local_xyz[:, 2]
# Create force vector
f_array = force_vec.getArray().reshape(local_num_nodes, vars_per_node)
# Apply distributed forces at tip of beam
# Apply Qxx
f_array[np.logical_and(local_x == 1.5 * Lx, local_y == 0.5 * Ly), :] = applied_force
# Create temporary dv vec for doing fd/cs
dv_pert_array = dv_pert_vec.getArray()
dv_pert_array[:] = 1.0
# Create temporary state variable vec for doing fd/cs
ans_pert_array = ans_pert_vec.getArray()
# Define perturbation array that uniformly moves all nodes on right edge of left plate to the upward
ans_pert_array = ans_pert_array.reshape(local_num_nodes, vars_per_node)
ans_pert_array[local_x == Lx, 1] = 1.0
# Define perturbation array that uniformly moves all nodes on right edge of plate to the right
xpts_pert_array = xpts_pert_vec.getArray()
xpts_pert_array = xpts_pert_array.reshape(local_num_nodes, 3)
# Define perturbation array that uniformly moves all nodes on right edge of left plate to the right
xpts_pert_array[local_x == Lx, 0] = 1.0
return
def setup_funcs(self, assembler):
"""
Create a list of functions to be tested and their reference values for the problem
"""
func_list = [functions.KSFailure(assembler, ksWeight=ksweight),
functions.StructuralMass(assembler),
functions.Compliance(assembler),
functions.KSDisplacement(assembler, ksWeight=ksweight, direction=[1.0, 1.0, 1.0])]
return func_list, FUNC_REFS
| [
"tacs.functions.Compliance",
"tacs.TACS.Creator",
"tacs.constitutive.IsoShellConstitutive",
"tacs.functions.KSFailure",
"numpy.arange",
"numpy.logical_and",
"tacs.constitutive.MaterialProperties",
"tacs.elements.RBE2",
"numpy.append",
"numpy.array",
"numpy.linspace",
"tacs.elements.ShellNatura... | [((612, 690), 'numpy.array', 'np.array', (['[1.2600980396870352, 51400.0, 3767896.1409673616, 2.912191091671254]'], {}), '([1.2600980396870352, 51400.0, 3767896.1409673616, 2.912191091671254])\n', (620, 690), True, 'import numpy as np\n'), ((863, 925), 'numpy.array', 'np.array', (['[100000000.0, 0.0, 1000000.0, 0.0, 0.0, 100000000.0]'], {}), '([100000000.0, 0.0, 1000000.0, 0.0, 0.0, 100000000.0])\n', (871, 925), True, 'import numpy as np\n'), ((1516, 1605), 'tacs.constitutive.MaterialProperties', 'constitutive.MaterialProperties', ([], {'rho': '(2570.0)', 'E': '(70000000000.0)', 'nu': '(0.3)', 'ys': '(350000000.0)'}), '(rho=2570.0, E=70000000000.0, nu=0.3, ys=\n 350000000.0)\n', (1547, 1605), False, 'from tacs import TACS, elements, constitutive, functions\n'), ((1602, 1657), 'tacs.constitutive.IsoShellConstitutive', 'constitutive.IsoShellConstitutive', (['props'], {'t': '(0.1)', 'tNum': '(0)'}), '(props, t=0.1, tNum=0)\n', (1635, 1657), False, 'from tacs import TACS, elements, constitutive, functions\n'), ((1727, 1759), 'tacs.elements.ShellNaturalTransform', 'elements.ShellNaturalTransform', ([], {}), '()\n', (1757, 1759), False, 'from tacs import TACS, elements, constitutive, functions\n'), ((1776, 1813), 'tacs.elements.Quad4Shell', 'elements.Quad4Shell', (['transform', 'stiff'], {}), '(transform, stiff)\n', (1795, 1813), False, 'from tacs import TACS, elements, constitutive, functions\n'), ((1948, 1981), 'tacs.TACS.Creator', 'TACS.Creator', (['comm', 'vars_per_node'], {}), '(comm, vars_per_node)\n', (1960, 1981), False, 'from tacs import TACS, elements, constitutive, functions\n'), ((5711, 5748), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 1, 1]', 'np.intc'], {}), '([1, 1, 1, 1, 1, 1], np.intc)\n', (5719, 5748), True, 'import numpy as np\n'), ((5906, 5962), 'tacs.elements.RBE2', 'elements.RBE2', (['num_rbe_nodes', 'dep_dofs'], {'C1': '(100.0)', 'C2': '(0.1)'}), '(num_rbe_nodes, dep_dofs, C1=100.0, C2=0.1)\n', (5919, 5962), False, 'from tacs import TACS, elements, constitutive, functions\n'), ((2141, 2174), 'numpy.linspace', 'np.linspace', (['(0)', 'Lx', '(nx + 1)', 'dtype'], {}), '(0, Lx, nx + 1, dtype)\n', (2152, 2174), True, 'import numpy as np\n'), ((2191, 2224), 'numpy.linspace', 'np.linspace', (['(0)', 'Ly', '(ny + 1)', 'dtype'], {}), '(0, Ly, ny + 1, dtype)\n', (2202, 2224), True, 'import numpy as np\n'), ((2248, 2284), 'numpy.zeros', 'np.zeros', (['[nx + 1, ny + 1, 3]', 'dtype'], {}), '([nx + 1, ny + 1, 3], dtype)\n', (2256, 2284), True, 'import numpy as np\n'), ((2336, 2368), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {'indexing': '"""ij"""'}), "(x, y, indexing='ij')\n", (2347, 2368), True, 'import numpy as np\n'), ((3503, 3551), 'numpy.array', 'np.array', (['[1.5 * Lx, 0.5 * Ly, 0.0]'], {'dtype': 'dtype'}), '([1.5 * Lx, 0.5 * Ly, 0.0], dtype=dtype)\n', (3511, 3551), True, 'import numpy as np\n'), ((4741, 4770), 'numpy.array', 'np.array', (['conn'], {'dtype': 'np.intc'}), '(conn, dtype=np.intc)\n', (4749, 4770), True, 'import numpy as np\n'), ((4789, 4838), 'numpy.arange', 'np.arange', (['(0)', '(4 * num_elems + 1)', '(4)'], {'dtype': 'np.intc'}), '(0, 4 * num_elems + 1, 4, dtype=np.intc)\n', (4798, 4838), True, 'import numpy as np\n'), ((4862, 4896), 'numpy.zeros', 'np.zeros', (['num_elems'], {'dtype': 'np.intc'}), '(num_elems, dtype=np.intc)\n', (4870, 4896), True, 'import numpy as np\n'), ((5185, 5238), 'numpy.append', 'np.append', (['left_node_ids[0, :]', 'right_node_ids[-1, :]'], {}), '(left_node_ids[0, :], right_node_ids[-1, :])\n', (5194, 5238), True, 'import numpy as np\n'), ((8295, 8344), 'tacs.functions.KSFailure', 'functions.KSFailure', (['assembler'], {'ksWeight': 'ksweight'}), '(assembler, ksWeight=ksweight)\n', (8314, 8344), False, 'from tacs import TACS, elements, constitutive, functions\n'), ((8367, 8402), 'tacs.functions.StructuralMass', 'functions.StructuralMass', (['assembler'], {}), '(assembler)\n', (8391, 8402), False, 'from tacs import TACS, elements, constitutive, functions\n'), ((8425, 8456), 'tacs.functions.Compliance', 'functions.Compliance', (['assembler'], {}), '(assembler)\n', (8445, 8456), False, 'from tacs import TACS, elements, constitutive, functions\n'), ((8479, 8564), 'tacs.functions.KSDisplacement', 'functions.KSDisplacement', (['assembler'], {'ksWeight': 'ksweight', 'direction': '[1.0, 1.0, 1.0]'}), '(assembler, ksWeight=ksweight, direction=[1.0, 1.0,\n 1.0])\n', (8503, 8564), False, 'from tacs import TACS, elements, constitutive, functions\n'), ((7169, 7225), 'numpy.logical_and', 'np.logical_and', (['(local_x == 1.5 * Lx)', '(local_y == 0.5 * Ly)'], {}), '(local_x == 1.5 * Lx, local_y == 0.5 * Ly)\n', (7183, 7225), True, 'import numpy as np\n'), ((2398, 2433), 'numpy.arange', 'np.arange', (['num_nodes'], {'dtype': 'np.intc'}), '(num_nodes, dtype=np.intc)\n', (2407, 2433), True, 'import numpy as np\n')] |
# -*- coding: UTF-8 -*-
#
# Copyright 2008-2011, <NAME>, <EMAIL>
#
# This file is part of Pyrit.
#
# Pyrit is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrit is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pyrit. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import os
import sys
def default_config():
config = {'default_storage': 'file://', \
'rpc_server': 'false', \
'rpc_announce': 'true', \
'rpc_announce_broadcast': 'false', \
'rpc_knownclients': '', \
'workunit_size': '75000', \
'limit_ncpus': 0}
return config
def read_configfile(filename):
config = default_config()
with open(filename, 'rb') as f:
for line in f:
if line.startswith('#') or '=' not in line:
continue
option, value = map(str.strip, line.split('=', 1))
if option in config:
config[option] = value
else:
print >> sys.stderr, "WARNING: Unknown option '%s' " \
"in configfile '%s'" % (option, filename)
return config
def write_configfile(config, filename):
with open(filename, 'wb') as f:
for option, value in sorted(config.items()):
f.write("%s = %s\n" % (option, value))
configpath = os.path.expanduser(os.path.join('~', '.pyrit'))
default_configfile = os.path.join(configpath, 'config')
if os.path.exists(default_configfile):
cfg = read_configfile(default_configfile)
else:
cfg = default_config()
if not os.path.exists(configpath):
os.makedirs(configpath)
write_configfile(cfg, default_configfile)
| [
"os.path.exists",
"os.path.join",
"os.makedirs"
] | [((1930, 1964), 'os.path.join', 'os.path.join', (['configpath', '"""config"""'], {}), "(configpath, 'config')\n", (1942, 1964), False, 'import os\n'), ((1969, 2003), 'os.path.exists', 'os.path.exists', (['default_configfile'], {}), '(default_configfile)\n', (1983, 2003), False, 'import os\n'), ((1880, 1907), 'os.path.join', 'os.path.join', (['"""~"""', '""".pyrit"""'], {}), "('~', '.pyrit')\n", (1892, 1907), False, 'import os\n'), ((2095, 2121), 'os.path.exists', 'os.path.exists', (['configpath'], {}), '(configpath)\n', (2109, 2121), False, 'import os\n'), ((2131, 2154), 'os.makedirs', 'os.makedirs', (['configpath'], {}), '(configpath)\n', (2142, 2154), False, 'import os\n')] |
import sys
import pylab as plb
import numpy as np
import mountaincar
class DummyAgent():
"""A not so good agent for the mountain-car task.
"""
def __init__(self, mountain_car = None, parameter1 = 3.0):
if mountain_car is None:
self.mountain_car = mountaincar.MountainCar()
else:
self.mountain_car = mountain_car
self.parameter1 = parameter1
def visualize_trial(self, n_steps = 200):
"""Do a trial without learning, with display.
Parameters
----------
n_steps -- number of steps to simulate for
"""
# prepare for the visualization
plb.ion()
plb.pause(0.0001)
mv = mountaincar.MountainCarViewer(self.mountain_car)
mv.create_figure(n_steps, n_steps)
plb.show()
# make sure the mountain-car is reset
self.mountain_car.reset()
for n in range(n_steps):
print('\rt =', self.mountain_car.t,
sys.stdout.flush())
# choose a random action
self.mountain_car.apply_force(np.random.randint(3) - 1)
# simulate the timestep
self.mountain_car.simulate_timesteps(100, 0.01)
# update the visualization
mv.update_figure()
plb.show()
plb.pause(0.0001)
# check for rewards
if self.mountain_car.R > 0.0:
print("\rreward obtained at t = ", self.mountain_car.t)
break
def learn(self):
# This is your job!
pass
if __name__ == "__main__":
d = DummyAgent()
d.visualize_trial()
plb.show()
| [
"pylab.ion",
"numpy.random.randint",
"pylab.pause",
"mountaincar.MountainCar",
"sys.stdout.flush",
"mountaincar.MountainCarViewer",
"pylab.show"
] | [((1688, 1698), 'pylab.show', 'plb.show', ([], {}), '()\n', (1696, 1698), True, 'import pylab as plb\n'), ((674, 683), 'pylab.ion', 'plb.ion', ([], {}), '()\n', (681, 683), True, 'import pylab as plb\n'), ((692, 709), 'pylab.pause', 'plb.pause', (['(0.0001)'], {}), '(0.0001)\n', (701, 709), True, 'import pylab as plb\n'), ((723, 771), 'mountaincar.MountainCarViewer', 'mountaincar.MountainCarViewer', (['self.mountain_car'], {}), '(self.mountain_car)\n', (752, 771), False, 'import mountaincar\n'), ((823, 833), 'pylab.show', 'plb.show', ([], {}), '()\n', (831, 833), True, 'import pylab as plb\n'), ((291, 316), 'mountaincar.MountainCar', 'mountaincar.MountainCar', ([], {}), '()\n', (314, 316), False, 'import mountaincar\n'), ((1338, 1348), 'pylab.show', 'plb.show', ([], {}), '()\n', (1346, 1348), True, 'import pylab as plb\n'), ((1361, 1378), 'pylab.pause', 'plb.pause', (['(0.0001)'], {}), '(0.0001)\n', (1370, 1378), True, 'import pylab as plb\n'), ((1021, 1039), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1037, 1039), False, 'import sys\n'), ((1133, 1153), 'numpy.random.randint', 'np.random.randint', (['(3)'], {}), '(3)\n', (1150, 1153), True, 'import numpy as np\n')] |
import numpy as np
from pyquil import Program
from pyquil.api import QuantumComputer, get_qc
from grove.alpha.jordan_gradient.gradient_utils import (binary_float_to_decimal_float,
measurements_to_bf)
from grove.alpha.phaseestimation.phase_estimation import phase_estimation
def gradient_program(f_h: float, precision: int) -> Program:
"""
Gradient estimation via Jordan's algorithm (10.1103/PhysRevLett.95.050501).
:param f_h: Oracle output at perturbation h.
:param precision: Bit precision of gradient.
:return: Quil program to estimate gradient of f.
"""
# encode oracle values into phase
phase_factor = np.exp(1.0j * 2 * np.pi * abs(f_h))
U = np.array([[phase_factor, 0],
[0, phase_factor]])
p_gradient = phase_estimation(U, precision)
return p_gradient
def estimate_gradient(f_h: float, precision: int,
gradient_max: int = 1,
n_measurements: int = 50,
qc: QuantumComputer = None) -> float:
"""
Estimate the gradient using function evaluation at perturbation, h.
:param f_h: Oracle output at perturbation h.
:param precision: Bit precision of gradient.
:param gradient_max: OOM estimate of largest gradient value.
:param n_measurements: Number of times to measure system.
:param qc: The QuantumComputer object.
:return: Decimal estimate of gradient.
"""
# scale f_h by range of values gradient can take on
f_h *= 1. / gradient_max
# generate gradient program
perturbation_sign = np.sign(f_h)
p_gradient = gradient_program(f_h, precision)
# run gradient program
if qc is None:
qc = get_qc(f"{len(p_gradient.get_qubits())}q-qvm")
p_gradient.wrap_in_numshots_loop(n_measurements)
executable = qc.compiler.native_quil_to_executable(p_gradient)
measurements = qc.run(executable)
# summarize measurements
bf_estimate = perturbation_sign * measurements_to_bf(measurements)
bf_explicit = '{0:.16f}'.format(bf_estimate)
deci_estimate = binary_float_to_decimal_float(bf_explicit)
# rescale gradient
deci_estimate *= gradient_max
return deci_estimate
| [
"grove.alpha.phaseestimation.phase_estimation.phase_estimation",
"grove.alpha.jordan_gradient.gradient_utils.measurements_to_bf",
"numpy.array",
"grove.alpha.jordan_gradient.gradient_utils.binary_float_to_decimal_float",
"numpy.sign"
] | [((744, 792), 'numpy.array', 'np.array', (['[[phase_factor, 0], [0, phase_factor]]'], {}), '([[phase_factor, 0], [0, phase_factor]])\n', (752, 792), True, 'import numpy as np\n'), ((828, 858), 'grove.alpha.phaseestimation.phase_estimation.phase_estimation', 'phase_estimation', (['U', 'precision'], {}), '(U, precision)\n', (844, 858), False, 'from grove.alpha.phaseestimation.phase_estimation import phase_estimation\n'), ((1630, 1642), 'numpy.sign', 'np.sign', (['f_h'], {}), '(f_h)\n', (1637, 1642), True, 'import numpy as np\n'), ((2129, 2171), 'grove.alpha.jordan_gradient.gradient_utils.binary_float_to_decimal_float', 'binary_float_to_decimal_float', (['bf_explicit'], {}), '(bf_explicit)\n', (2158, 2171), False, 'from grove.alpha.jordan_gradient.gradient_utils import binary_float_to_decimal_float, measurements_to_bf\n'), ((2027, 2059), 'grove.alpha.jordan_gradient.gradient_utils.measurements_to_bf', 'measurements_to_bf', (['measurements'], {}), '(measurements)\n', (2045, 2059), False, 'from grove.alpha.jordan_gradient.gradient_utils import binary_float_to_decimal_float, measurements_to_bf\n')] |
from torch import randn
from torch.nn import Conv2d
from backpack import extend
def data_conv2d(device="cpu"):
N, Cin, Hin, Win = 100, 10, 32, 32
Cout, KernelH, KernelW = 25, 5, 5
X = randn(N, Cin, Hin, Win, requires_grad=True, device=device)
module = extend(Conv2d(Cin, Cout, (KernelH, KernelW))).to(device=device)
out = module(X)
Hout = Hin - (KernelH - 1)
Wout = Win - (KernelW - 1)
vin = randn(N, Cout, Hout, Wout, device=device)
vout = randn(N, Cin, Hin, Win, device=device)
return {
"X": X,
"module": module,
"output": out,
"vout_ag": vout,
"vout_bp": vout.view(N, -1, 1),
"vin_ag": vin,
"vin_bp": vin.view(N, -1, 1),
}
| [
"torch.randn",
"torch.nn.Conv2d"
] | [((200, 258), 'torch.randn', 'randn', (['N', 'Cin', 'Hin', 'Win'], {'requires_grad': '(True)', 'device': 'device'}), '(N, Cin, Hin, Win, requires_grad=True, device=device)\n', (205, 258), False, 'from torch import randn\n'), ((429, 470), 'torch.randn', 'randn', (['N', 'Cout', 'Hout', 'Wout'], {'device': 'device'}), '(N, Cout, Hout, Wout, device=device)\n', (434, 470), False, 'from torch import randn\n'), ((482, 520), 'torch.randn', 'randn', (['N', 'Cin', 'Hin', 'Win'], {'device': 'device'}), '(N, Cin, Hin, Win, device=device)\n', (487, 520), False, 'from torch import randn\n'), ((279, 316), 'torch.nn.Conv2d', 'Conv2d', (['Cin', 'Cout', '(KernelH, KernelW)'], {}), '(Cin, Cout, (KernelH, KernelW))\n', (285, 316), False, 'from torch.nn import Conv2d\n')] |
# -*- coding: utf-8 -*-
"""
Spyder Editor
Code written by <NAME> with modifications by <NAME> and <NAME>
This file produces plots comparing our first order sensitivity with BS vega.
"""
# %%
# To run the stuff, you need the package plotly in your anaconda "conda install plotly"
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import plotly.io as pio
init_notebook_mode()
pio.renderers.default='svg'
import numpy as np
import numpy.random
import pandas as pd
from scipy.stats import norm, multivariate_normal
from scipy.optimize import minimize
import time
_tstart_stack = []
def tic():
_tstart_stack.append(time.time())
def toc(fmt="Elapsed: %s s"):
print(fmt % (time.time() - _tstart_stack.pop()))
# %%
# We first provide the computation of a call option according to BS (we assume Log normal distribution)
# definition of the dplus and minus functions
# and the BS formula.
def dplus(S, K, T, sigma):
sigmaT = sigma * T ** 0.5
return np.log(S/K)/sigmaT + sigmaT/2
def dminus(S, K, T, sigma):
sigmaT = sigma * T ** 0.5
return np.log(S/K)/sigmaT - sigmaT/2
def BS(S, K, T, sigma, Type = 1):
factor1 = S * norm.cdf(Type * dplus(S, K, T, sigma))
factor2 = K * norm.cdf(Type * dminus(S, K, T, sigma))
return Type * (factor1 - factor2)
# Now we provide the computation for the exact call according to the computations in BDT
# We take p = 2
def Robust_Call_Exact_fun(S, K, T, sigma, delta):
def fun(v): #v[0] = a, v[1] = lambda
price = BS(S,max(K - (2 * v[0] + 1)/ (2 * v[1]),0.000001), T, sigma)
return price + v[0] ** 2 / (2 * v[1]) + 0.5 * v[1] * delta ** 2
def cons_fun(v): # the value of v[0] should be constrained to keep strike positive
tmp = K - (2 * v[0] + 1)/ (2 * v[1])
return tmp
cons = ({'type': 'ineq', 'fun' : cons_fun})
guess = np.array([0, 1])
bounds = ((-np.Inf, np.Inf), (0, np.Inf))
res = minimize(fun, guess,
constraints=cons,
method='SLSQP',
bounds=bounds
)
return res.fun
Robust_Call_Exact = np.vectorize(Robust_Call_Exact_fun)
# Now we provide the computation for the first order model uncertainty sensitivity (Upsilon)
# and the resulting BS robust price approximation
# We take p = 2
def Robust_Call_Upsilon(S, K, T, sigma, delta):
muK = norm.cdf(dminus(S, K, T, sigma))
correction = np.sqrt(muK * (1-muK))
return correction
def Robust_Call_Approximation(S, K, T, sigma, delta):
price = BS(S, K, T, sigma)
correction = Robust_Call_Upsilon(S, K, T, sigma, delta)
return price + correction * delta
# %%
# Ploting the robust call and FO appriximation for a given strike and increasing uncertainty radius
S = 1
K = 1.2
T = 1
sigma = 0.2
Delta = np.linspace(0, 0.2, 50)
Y0 = BS(S, K, T, sigma)
Y1 = Robust_Call_Approximation(S, K, T, sigma, Delta)
Y2 = Robust_Call_Exact(S, K, T, sigma, Delta)
fig = go.Figure()
fig.add_scatter(x = Delta, y = Y1, name = 'FO')
fig.add_scatter(x = Delta, y = Y2, name = 'RBS')
#fig.layout.title = "Exact Robust Call vs First Order Approx: Strike K="+str(K)+", BS Price="+str(np.round(Y0,4))
fig.layout.xaxis.title = "delta"
fig.layout.yaxis.title = "Price"
iplot(fig)
# %%
# Ploting the robust call and FO appriximation for a given radius of uncertainty and a range of strikes
S = 1
K = np.linspace(0.6, 1.4, 100)
T = 1
sigma = 0.2
delta = 0.05
Y0 = Robust_Call_Approximation(S, K, T, sigma, delta)
Y1 = Robust_Call_Exact(S, K, T, sigma, delta)
Y2 = BS(S, K, T, sigma)
fig = go.Figure()
fig.add_scatter(x = K, y = Y0, name = 'FO')
fig.add_scatter(x = K, y = Y1, name = 'Exact')
fig.add_scatter(x = K, y = Y2, name = 'BS')
fig.layout.title = "Call Price vs Exact Robust Call and First Order Approx : delta ="+str(delta)
fig.layout.xaxis.title = "Strike"
fig.layout.yaxis.title = "Price"
iplot(fig)
# %%
# Run a plot to comapre BS Vega and BS Upsilon (Uncertainty Sensitivity)
# Plots show the sensitivities
S = 1
K = np.linspace(0.4 * S, 2 * S, 100)
T = 1
sigma = 0.2
delta = 0.02 #is irrelevant here
Y1 = S * (norm.pdf(dplus(S, K , T, sigma)))
Y0 = S * (Robust_Call_Upsilon(S, K, T, sigma, delta))
fig = go.Figure()
fig.add_scatter(x = K, y = Y0, name = 'BS Upsilon')
fig.add_scatter(x = K, y = Y1, name = 'BS Vega')
#fig.layout.title = "Call Price Sensitivity: Vega vs Upsilon, sigma= "+str(sigma)
fig.layout.xaxis.title = "Strike"
fig.layout.yaxis.title = "Price"
iplot(fig)
# %%
# Run a ploting to comapre BS Vega and BS Upsilon (Uncertainty Sensitivity)
# Plots show the sensitivities
S = 1
K = np.linspace(0.6 * S, 1.4 * S, 100)
T = 1
sigma = 0.2
delta = 0.02 #is irrelevant here
Y0 = S * (norm.pdf(dplus(S, K * np.exp(T * sigma ** 2), T, sigma)) + 1/2-1/np.sqrt(2 * np.pi))
Y1 = S * (Robust_Call_Upsilon(S, K, T, sigma, delta))
fig = go.Figure()
fig.add_scatter(x = K, y = Y0, name = 'BS Vega (shifted) + const')
fig.add_scatter(x = K, y = Y1, name = 'BS Upsilon')
fig.layout.title = "Call Price Sensitivity: Vega vs Upsilon, sigma= "+str(sigma)
fig.layout.xaxis.title = "Strike"
fig.layout.yaxis.title = "Price"
iplot(fig)
| [
"numpy.sqrt",
"plotly.offline.iplot",
"scipy.optimize.minimize",
"plotly.offline.init_notebook_mode",
"numpy.log",
"numpy.exp",
"numpy.array",
"numpy.linspace",
"time.time",
"plotly.graph_objs.Figure",
"numpy.vectorize"
] | [((418, 438), 'plotly.offline.init_notebook_mode', 'init_notebook_mode', ([], {}), '()\n', (436, 438), False, 'from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\n'), ((2182, 2217), 'numpy.vectorize', 'np.vectorize', (['Robust_Call_Exact_fun'], {}), '(Robust_Call_Exact_fun)\n', (2194, 2217), True, 'import numpy as np\n'), ((2864, 2887), 'numpy.linspace', 'np.linspace', (['(0)', '(0.2)', '(50)'], {}), '(0, 0.2, 50)\n', (2875, 2887), True, 'import numpy as np\n'), ((3021, 3032), 'plotly.graph_objs.Figure', 'go.Figure', ([], {}), '()\n', (3030, 3032), True, 'import plotly.graph_objs as go\n'), ((3311, 3321), 'plotly.offline.iplot', 'iplot', (['fig'], {}), '(fig)\n', (3316, 3321), False, 'from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\n'), ((3443, 3469), 'numpy.linspace', 'np.linspace', (['(0.6)', '(1.4)', '(100)'], {}), '(0.6, 1.4, 100)\n', (3454, 3469), True, 'import numpy as np\n'), ((3634, 3645), 'plotly.graph_objs.Figure', 'go.Figure', ([], {}), '()\n', (3643, 3645), True, 'import plotly.graph_objs as go\n'), ((3946, 3956), 'plotly.offline.iplot', 'iplot', (['fig'], {}), '(fig)\n', (3951, 3956), False, 'from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\n'), ((4078, 4110), 'numpy.linspace', 'np.linspace', (['(0.4 * S)', '(2 * S)', '(100)'], {}), '(0.4 * S, 2 * S, 100)\n', (4089, 4110), True, 'import numpy as np\n'), ((4268, 4279), 'plotly.graph_objs.Figure', 'go.Figure', ([], {}), '()\n', (4277, 4279), True, 'import plotly.graph_objs as go\n'), ((4533, 4543), 'plotly.offline.iplot', 'iplot', (['fig'], {}), '(fig)\n', (4538, 4543), False, 'from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\n'), ((4668, 4702), 'numpy.linspace', 'np.linspace', (['(0.6 * S)', '(1.4 * S)', '(100)'], {}), '(0.6 * S, 1.4 * S, 100)\n', (4679, 4702), True, 'import numpy as np\n'), ((4911, 4922), 'plotly.graph_objs.Figure', 'go.Figure', ([], {}), '()\n', (4920, 4922), True, 'import plotly.graph_objs as go\n'), ((5193, 5203), 'plotly.offline.iplot', 'iplot', (['fig'], {}), '(fig)\n', (5198, 5203), False, 'from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\n'), ((1909, 1925), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (1917, 1925), True, 'import numpy as np\n'), ((1982, 2051), 'scipy.optimize.minimize', 'minimize', (['fun', 'guess'], {'constraints': 'cons', 'method': '"""SLSQP"""', 'bounds': 'bounds'}), "(fun, guess, constraints=cons, method='SLSQP', bounds=bounds)\n", (1990, 2051), False, 'from scipy.optimize import minimize\n'), ((2487, 2511), 'numpy.sqrt', 'np.sqrt', (['(muK * (1 - muK))'], {}), '(muK * (1 - muK))\n', (2494, 2511), True, 'import numpy as np\n'), ((682, 693), 'time.time', 'time.time', ([], {}), '()\n', (691, 693), False, 'import time\n'), ((1028, 1041), 'numpy.log', 'np.log', (['(S / K)'], {}), '(S / K)\n', (1034, 1041), True, 'import numpy as np\n'), ((1128, 1141), 'numpy.log', 'np.log', (['(S / K)'], {}), '(S / K)\n', (1134, 1141), True, 'import numpy as np\n'), ((4830, 4848), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (4837, 4848), True, 'import numpy as np\n'), ((743, 754), 'time.time', 'time.time', ([], {}), '()\n', (752, 754), False, 'import time\n'), ((4787, 4809), 'numpy.exp', 'np.exp', (['(T * sigma ** 2)'], {}), '(T * sigma ** 2)\n', (4793, 4809), True, 'import numpy as np\n')] |
import pytest
import pglet
from pglet import Textbox, Stack
@pytest.fixture
def page():
return pglet.page('test_add', no_window=True)
def test_add_single_control(page):
result = page.add(Textbox(id="txt1", label="<NAME>:"))
assert result.id == "txt1", "Test failed"
def test_add_controls_argv(page):
t1 = Textbox(id="firstName", label="<NAME>:")
t2 = Textbox(id="lastName", label="<NAME>:")
result = page.add(t1, t2, to="page", at=0)
assert result == [t1, t2], "Test failed"
def test_add_controls_list(page):
t1 = Textbox(id="firstName", label="<NAME>:")
t2 = Textbox(id="lastName", label="<NAME>:")
result = page.add([t1, t2], to="page", at=0)
assert result == [t1, t2], "Test failed"
def test_add_controls_to_another_control(page):
stack = Stack(id="stack1", horizontal=True)
page.add(stack)
t1 = page.add(Textbox(id="firstName", label="<NAME>:"),
to=stack, at=0)
assert t1.id == "stack1:firstName", "Test failed" | [
"pglet.page",
"pglet.Textbox",
"pglet.Stack"
] | [((100, 138), 'pglet.page', 'pglet.page', (['"""test_add"""'], {'no_window': '(True)'}), "('test_add', no_window=True)\n", (110, 138), False, 'import pglet\n'), ((324, 364), 'pglet.Textbox', 'Textbox', ([], {'id': '"""firstName"""', 'label': '"""<NAME>:"""'}), "(id='firstName', label='<NAME>:')\n", (331, 364), False, 'from pglet import Textbox, Stack\n'), ((374, 413), 'pglet.Textbox', 'Textbox', ([], {'id': '"""lastName"""', 'label': '"""<NAME>:"""'}), "(id='lastName', label='<NAME>:')\n", (381, 413), False, 'from pglet import Textbox, Stack\n'), ((550, 590), 'pglet.Textbox', 'Textbox', ([], {'id': '"""firstName"""', 'label': '"""<NAME>:"""'}), "(id='firstName', label='<NAME>:')\n", (557, 590), False, 'from pglet import Textbox, Stack\n'), ((600, 639), 'pglet.Textbox', 'Textbox', ([], {'id': '"""lastName"""', 'label': '"""<NAME>:"""'}), "(id='lastName', label='<NAME>:')\n", (607, 639), False, 'from pglet import Textbox, Stack\n'), ((799, 834), 'pglet.Stack', 'Stack', ([], {'id': '"""stack1"""', 'horizontal': '(True)'}), "(id='stack1', horizontal=True)\n", (804, 834), False, 'from pglet import Textbox, Stack\n'), ((197, 232), 'pglet.Textbox', 'Textbox', ([], {'id': '"""txt1"""', 'label': '"""<NAME>:"""'}), "(id='txt1', label='<NAME>:')\n", (204, 232), False, 'from pglet import Textbox, Stack\n'), ((874, 914), 'pglet.Textbox', 'Textbox', ([], {'id': '"""firstName"""', 'label': '"""<NAME>:"""'}), "(id='firstName', label='<NAME>:')\n", (881, 914), False, 'from pglet import Textbox, Stack\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import gc
import sys
import random
import unittest
from cllist import sllist
from cllist import sllistnode
from cllist import dllist
from cllist import dllistnode
gc.set_debug(gc.DEBUG_UNCOLLECTABLE | gc.DEBUG_STATS)
if sys.hexversion >= 0x03000000:
# python 3 compatibility wrappers
def py23_xrange(*args):
return range(*args)
def py23_range(*args):
return list(range(*args))
def cmp(a, b):
if a == b:
return 0
elif a < b:
return -1
else:
return 1
else:
# python 2 compatibility wrappers
def py23_xrange(*args):
return xrange(*args)
def py23_range(*args):
return range(*args)
class testsllist(unittest.TestCase):
def test_init_empty(self):
ll = sllist()
self.assertEqual(len(ll), 0)
self.assertEqual(ll.size, 0)
self.assertEqual(list(ll), [])
def test_init_with_sequence(self):
ref = py23_range(0, 1024, 4)
ll = sllist(ref)
self.assertEqual(len(ll), len(ref))
self.assertEqual(ll.size, len(ref))
self.assertEqual(list(ll), ref)
def test_init_with_non_sequence(self):
self.assertRaises(TypeError, sllist, 1)
self.assertRaises(TypeError, sllist, 1.5)
def test_str(self):
a = sllist([])
self.assertEqual(str(a), 'sllist()')
b = sllist([None, 1, 'abc'])
self.assertEqual(str(b), 'sllist([None, 1, abc])')
def test_repr(self):
a = sllist([])
self.assertEqual(repr(a), 'sllist()')
b = sllist([None, 1, 'abc'])
self.assertEqual(repr(b), 'sllist([None, 1, \'abc\'])')
def test_node_str(self):
a = sllist([None, None]).first
self.assertEqual(str(a), 'sllistnode(None)')
b = sllist([1, None]).first
self.assertEqual(str(b), 'sllistnode(1)')
c = sllist(['abc', None]).first
self.assertEqual(str(c), 'sllistnode(abc)')
def test_node_repr(self):
a = sllist([None]).first
self.assertEqual(repr(a), '<sllistnode(None)>')
b = sllist([1, None]).first
self.assertEqual(repr(b), '<sllistnode(1)>')
c = sllist(['abc', None]).first
self.assertEqual(repr(c), '<sllistnode(\'abc\')>')
def test_cmp(self):
a = sllist(py23_xrange(0, 1100))
b = sllist(py23_xrange(0, 1101))
c = sllist([1, 2, 3, 4])
d = sllist([1, 2, 3, 5])
e = sllist([1, 0, 0, 0])
f = sllist([0, 0, 0, 0])
self.assertEqual(cmp(a, a), 0)
self.assertEqual(cmp(a, b), -1)
self.assertEqual(cmp(b, a), 1)
self.assertEqual(cmp(c, d), -1)
self.assertEqual(cmp(d, c), 1)
self.assertEqual(cmp(e, f), 1)
self.assertEqual(cmp(f, e), -1)
def test_cmp_nonlist(self):
a = sllist(py23_xrange(0, 1100))
b = [py23_xrange(0, 1100)]
if sys.hexversion < 0x03000000:
# actual order is not specified by language
self.assertNotEqual(cmp(a, b), 0)
self.assertNotEqual(cmp(b, a), 0)
self.assertNotEqual(cmp([], a), 0)
self.assertNotEqual(cmp(a, []), 0)
def test_eq(self):
a = sllist(py23_xrange(0, 1100))
b = sllist(py23_xrange(0, 1101))
c = sllist([1, 2, 3, 4])
d = sllist([1, 2, 3, 5])
e = sllist([1, 0, 0, 0])
f = sllist([0, 0, 0, 0])
self.assertTrue(sllist() == sllist())
self.assertTrue(a == a)
self.assertFalse(sllist() == a)
self.assertFalse(a == sllist())
self.assertFalse(a == b)
self.assertFalse(b == a)
self.assertFalse(c == d)
self.assertFalse(d == c)
self.assertFalse(e == f)
self.assertFalse(f == e)
def test_ne(self):
a = sllist(py23_xrange(0, 1100))
b = sllist(py23_xrange(0, 1101))
c = sllist([1, 2, 3, 4])
d = sllist([1, 2, 3, 5])
e = sllist([1, 0, 0, 0])
f = sllist([0, 0, 0, 0])
self.assertFalse(sllist() != sllist())
self.assertFalse(a != a)
self.assertTrue(sllist() != a)
self.assertTrue(a != sllist())
self.assertTrue(a != b)
self.assertTrue(b != a)
self.assertTrue(c != d)
self.assertTrue(d != c)
self.assertTrue(e != f)
self.assertTrue(f != e)
def test_lt(self):
a = sllist(py23_xrange(0, 1100))
b = sllist(py23_xrange(0, 1101))
c = sllist([1, 2, 3, 4])
d = sllist([1, 2, 3, 5])
e = sllist([1, 0, 0, 0])
f = sllist([0, 0, 0, 0])
self.assertFalse(sllist() < sllist())
self.assertFalse(a < a)
self.assertTrue(sllist() < a)
self.assertFalse(a < sllist())
self.assertTrue(a < b)
self.assertFalse(b < a)
self.assertTrue(c < d)
self.assertFalse(d < c)
self.assertFalse(e < f)
self.assertTrue(f < e)
def test_gt(self):
a = sllist(py23_xrange(0, 1100))
b = sllist(py23_xrange(0, 1101))
c = sllist([1, 2, 3, 4])
d = sllist([1, 2, 3, 5])
e = sllist([1, 0, 0, 0])
f = sllist([0, 0, 0, 0])
self.assertFalse(sllist() > sllist())
self.assertFalse(a > a)
self.assertFalse(sllist() > a)
self.assertTrue(a > sllist())
self.assertFalse(a > b)
self.assertTrue(b > a)
self.assertFalse(c > d)
self.assertTrue(d > c)
self.assertTrue(e > f)
self.assertFalse(f > e)
def test_le(self):
a = sllist(py23_xrange(0, 1100))
b = sllist(py23_xrange(0, 1101))
c = sllist([1, 2, 3, 4])
d = sllist([1, 2, 3, 5])
e = sllist([1, 0, 0, 0])
f = sllist([0, 0, 0, 0])
self.assertTrue(sllist() <= sllist())
self.assertTrue(a <= a)
self.assertTrue(sllist() <= a)
self.assertFalse(a <= sllist())
self.assertTrue(a <= b)
self.assertFalse(b <= a)
self.assertTrue(c <= d)
self.assertFalse(d <= c)
self.assertFalse(e <= f)
self.assertTrue(f <= e)
def test_ge(self):
a = sllist(py23_xrange(0, 1100))
b = sllist(py23_xrange(0, 1101))
c = sllist([1, 2, 3, 4])
d = sllist([1, 2, 3, 5])
e = sllist([1, 0, 0, 0])
f = sllist([0, 0, 0, 0])
self.assertTrue(sllist() >= sllist())
self.assertTrue(a >= a)
self.assertFalse(sllist() >= a)
self.assertTrue(a >= sllist())
self.assertFalse(a >= b)
self.assertTrue(b >= a)
self.assertFalse(c >= d)
self.assertTrue(d >= c)
self.assertTrue(e >= f)
self.assertFalse(f >= e)
def test_nodeat(self):
ref = py23_range(0, 1024, 4)
ll = sllist(ref)
for idx in py23_xrange(len(ll)):
self.assertTrue(isinstance(ll.nodeat(idx), sllistnode))
self.assertEqual(ll.nodeat(idx).value, ref[idx])
for idx in py23_xrange(len(ll)):
self.assertTrue(isinstance(ll.nodeat(idx), sllistnode))
self.assertEqual(ll.nodeat(-idx - 1).value, ref[-idx - 1])
self.assertRaises(TypeError, ll.nodeat, None)
self.assertRaises(TypeError, ll.nodeat, 'abc')
self.assertRaises(IndexError, ll.nodeat, len(ref))
self.assertRaises(IndexError, ll.nodeat, -len(ref) - 1)
def test_nodeat_empty(self):
ll = sllist()
self.assertRaises(TypeError, ll.nodeat, None)
self.assertRaises(TypeError, ll.nodeat, 'abc')
self.assertRaises(IndexError, ll.nodeat, 0)
self.assertRaises(IndexError, ll.nodeat, -1)
def test_iter(self):
ref = py23_range(0, 1024, 4)
ll = sllist(ref)
idx = 0
for val in ll:
self.assertFalse(isinstance(val, sllistnode))
self.assertEqual(val, ref[idx])
idx += 1
self.assertEqual(idx, len(ref))
def test_iter_empty(self):
ll = sllist()
count = 0
for val in ll:
count += 1
self.assertEqual(count, 0)
def test_reversed(self):
ref = py23_range(0, 1024, 4)
ll = sllist(ref)
idx = len(ref) - 1
for val in reversed(ll):
self.assertFalse(isinstance(val, sllistnode))
self.assertEqual(val, ref[idx])
idx -= 1
self.assertEqual(idx, -1)
def test_reversed_empty(self):
ll = sllist()
count = 0
for val in reversed(ll):
count += 1
self.assertEqual(count, 0)
def test_append_left(self):
ll = sllist([1, 2, 3, 4])
ll.appendleft(5)
self.assertTrue([5, 1, 2, 3, 4], list(ll))
def test_append_right(self):
ll = sllist([1, 2, 3, 4])
ll.appendleft(5)
self.assertTrue([1, 2, 3, 4, 5], list(ll))
def test_pop_left_from_one_elem(self):
ll = sllist(py23_xrange(0, 100))
dd = ll.popleft()
self.assertEqual(dd, 0)
def test_pop_right_from_one_elem(self):
ll = sllist(py23_xrange(0, 100))
dd = ll.popright()
self.assertEqual(dd, 99)
def test_pop_right_from_n_elem(self):
ll = sllist(py23_xrange(0, 100))
dd = ll.popright()
self.assertEqual(dd, 99)
def test_get_node_at_from_n_elem(self):
ll = sllist(py23_xrange(0, 100))
self.assertEqual(50, ll[50])
def test_remove_from_n_elem(self):
ll = sllist()
nn = sllistnode()
ll.append(nn)
to_del = ll.nodeat(0)
ll.remove(to_del)
self.assertEqual(None, None)
def test_insert_after(self):
ll = sllist([1, 3, '123'])
ll.insertafter(100, ll.first)
self.assertEqual([1, 100, 3, '123'], list(ll))
def test_insert_before(self):
ll = sllist([1, 3, '123'])
ll.insertbefore(100, ll.first)
self.assertEqual([100, 1, 3, '123'], list(ll))
def test_insert_value_after(self):
ll = sllist(py23_xrange(4))
ref = sllist([0, 1, 2, 10, 3])
prev = ll.nodeat(2)
next = ll.nodeat(3)
arg_node = sllistnode(10)
new_node = ll.insertafter(arg_node, ll.nodeat(2))
self.assertNotEqual(new_node, arg_node)
self.assertEqual(new_node.value, 10)
self.assertEqual(new_node.next, next)
self.assertEqual(prev.next, new_node)
self.assertEqual(ll, ref)
def test_insert_value_after_last(self):
ll = sllist(py23_xrange(4))
ref = sllist([0, 1, 2, 3, 10])
prev = ll.nodeat(3)
arg_node = sllistnode(10)
new_node = ll.insertafter(arg_node, ll.nodeat(-1))
self.assertNotEqual(new_node, arg_node)
self.assertEqual(new_node.value, 10)
self.assertEqual(new_node.next, None)
self.assertEqual(prev.next, new_node)
self.assertEqual(new_node, ll.last)
self.assertEqual(ll, ref)
def test_insert_value_before(self):
ll = sllist(py23_xrange(4))
ref = sllist([0, 1, 10, 2, 3])
prev = ll.nodeat(1)
next = ll.nodeat(2)
arg_node = sllistnode(10)
new_node = ll.insertbefore(arg_node, ll.nodeat(2))
self.assertNotEqual(new_node, arg_node)
self.assertEqual(new_node.value, 10)
self.assertEqual(new_node.next, next)
self.assertEqual(prev.next, new_node)
self.assertEqual(ll, ref)
def test_insert_value_before_first(self):
ll = sllist(py23_xrange(4))
ref = sllist([10, 0, 1, 2, 3])
next = ll.nodeat(0)
arg_node = sllistnode(10)
new_node = ll.insertbefore(arg_node, ll.nodeat(0))
self.assertNotEqual(new_node, arg_node)
self.assertEqual(new_node.value, 10)
self.assertEqual(new_node.next, next)
self.assertEqual(new_node, ll.first)
self.assertEqual(ll, ref)
def test_insert_invalid_ref(self):
ll = sllist([1, 2, 3, 4])
self.assertRaises(TypeError, ll.insertafter, 10, 1)
self.assertRaises(TypeError, ll.insertafter, 10, 'abc')
self.assertRaises(TypeError, ll.insertafter, 10, [])
self.assertRaises(ValueError, ll.insertafter, 10, sllistnode())
self.assertRaises(TypeError, ll.insertbefore, 10, 1)
self.assertRaises(TypeError, ll.insertbefore, 10, 'abc')
self.assertRaises(TypeError, ll.insertbefore, 10, [])
self.assertRaises(ValueError, ll.insertbefore, 10, sllistnode())
def test_append(self):
ll = sllist(py23_xrange(4))
ref = sllist([0, 1, 2, 3, 10])
prev = ll.nodeat(-1)
arg_node = sllistnode(10)
new_node = ll.append(arg_node)
self.assertNotEqual(new_node, arg_node)
self.assertEqual(new_node.value, 10)
self.assertEqual(new_node.next, None)
self.assertEqual(prev.next, new_node)
self.assertEqual(ll.last, new_node)
self.assertEqual(ll, ref)
def test_appendleft(self):
ll = sllist(py23_xrange(4))
ref = sllist([10, 0, 1, 2, 3])
next = ll.nodeat(0)
arg_node = sllistnode(10)
new_node = ll.appendleft(arg_node)
self.assertNotEqual(new_node, arg_node)
self.assertEqual(new_node.value, 10)
self.assertEqual(new_node.next, next)
self.assertEqual(ll.first, new_node)
self.assertEqual(ll, ref)
def test_appendright(self):
ll = sllist(py23_xrange(4))
ref = sllist([0, 1, 2, 3, 10])
prev = ll.nodeat(-1)
arg_node = sllistnode(10)
new_node = ll.appendright(arg_node)
self.assertNotEqual(new_node, arg_node)
self.assertEqual(new_node.value, 10)
self.assertEqual(new_node.next, None)
self.assertEqual(prev.next, new_node)
self.assertEqual(ll.last, new_node)
self.assertEqual(ll, ref)
def test_extend(self):
a_ref = py23_range(0, 1024, 4)
b_ref = py23_range(8092, 8092 + 1024, 4)
b = sllist(b_ref)
ab_ref = sllist(a_ref + b_ref)
a = sllist(a_ref)
a.extend(b)
self.assertEqual(a, ab_ref)
self.assertEqual(len(a), len(ab_ref))
a = sllist(a_ref)
a.extend(b_ref)
self.assertEqual(a, ab_ref)
self.assertEqual(len(a), len(ab_ref))
a = sllist(a_ref)
a.extend(a)
self.assertEqual(a, sllist(a_ref + a_ref))
self.assertEqual(len(a), len(a_ref) * 2)
def test_extend_empty(self):
filled_ref = py23_range(0, 1024, 4)
filled = sllist(filled_ref)
empty = sllist()
empty.extend(empty)
self.assertEqual(empty, sllist([] + []))
self.assertEqual(len(empty), 0)
empty = sllist()
empty.extend(filled)
self.assertEqual(empty, sllist([] + filled_ref))
self.assertEqual(len(empty), len(filled_ref))
empty = sllist()
filled.extend(empty)
self.assertEqual(filled, sllist(filled_ref + []))
self.assertEqual(len(filled), len(filled_ref))
def test_extendleft(self):
a_ref = py23_range(0, 1024, 4)
b_ref = py23_range(8092, 8092 + 1024, 4)
b = sllist(b_ref)
ab_ref = sllist(list(reversed(b_ref)) + a_ref)
a = sllist(a_ref)
a.extendleft(b)
self.assertEqual(a, ab_ref)
self.assertEqual(len(a), len(ab_ref))
a = sllist(a_ref)
a.extendleft(b_ref)
self.assertEqual(a, ab_ref)
self.assertEqual(len(a), len(ab_ref))
a = sllist(a_ref)
a.extendleft(a)
self.assertEqual(a, sllist(list(reversed(a_ref)) + a_ref))
self.assertEqual(len(a), len(a_ref) * 2)
def test_extendleft_empty(self):
filled_ref = py23_range(0, 1024, 4)
filled = sllist(filled_ref)
empty = sllist()
empty.extendleft(empty)
self.assertEqual(empty, sllist([] + []))
self.assertEqual(len(empty), 0)
empty = sllist()
empty.extendleft(filled)
self.assertEqual(empty, sllist(list(reversed(filled_ref)) + []))
self.assertEqual(len(empty), len(filled_ref))
empty = sllist()
filled.extendleft(empty)
self.assertEqual(filled, sllist(list(reversed([])) + filled_ref))
self.assertEqual(len(filled), len(filled_ref))
def test_extendright(self):
a_ref = py23_range(0, 1024, 4)
b_ref = py23_range(8092, 8092 + 1024, 4)
b = sllist(b_ref)
ab_ref = sllist(a_ref + b_ref)
a = sllist(a_ref)
a.extendright(b)
self.assertEqual(a, ab_ref)
self.assertEqual(len(a), len(ab_ref))
a = sllist(a_ref)
a.extendright(b_ref)
self.assertEqual(a, ab_ref)
self.assertEqual(len(a), len(ab_ref))
a = sllist(a_ref)
a.extendright(a)
self.assertEqual(a, sllist(a_ref + a_ref))
self.assertEqual(len(a), len(a_ref) * 2)
def test_extendright_empty(self):
filled_ref = py23_range(0, 1024, 4)
filled = sllist(filled_ref)
empty = sllist()
empty.extendright(empty)
self.assertEqual(empty, sllist([] + []))
self.assertEqual(len(empty), 0)
empty = sllist()
empty.extendright(filled)
self.assertEqual(empty, sllist([] + filled_ref))
self.assertEqual(len(empty), len(filled_ref))
empty = sllist()
filled.extendright(empty)
self.assertEqual(filled, sllist(filled_ref + []))
self.assertEqual(len(filled), len(filled_ref))
def test_clear_empty(self):
empty_list = sllist()
empty_list.clear()
self.assertEqual(empty_list.first, None)
self.assertEqual(empty_list.last, None)
self.assertEqual(empty_list.size, 0)
self.assertEqual(list(empty_list), [])
def test_clear(self):
ll = sllist(py23_xrange(0, 1024, 4))
del_node = ll.nodeat(4)
ll.clear()
self.assertEqual(ll.first, None)
self.assertEqual(ll.last, None)
self.assertEqual(ll.size, 0)
self.assertEqual(list(ll), [])
self.assertEqual(del_node.next, None)
def test_pop(self):
ref = py23_range(0, 1024, 4)
ll = sllist(ref)
del_node = ll.nodeat(-1)
result = ll.pop()
self.assertEqual(result, ref[-1])
self.assertEqual(len(ll), len(ref) - 1)
self.assertEqual(ll.size, len(ref) - 1)
self.assertEqual(ll.last.value, ref[-2])
self.assertEqual(list(ll), ref[:-1])
self.assertEqual(del_node.next, None)
ref = py23_range(0, 1024, 4)
ll = sllist(ref)
result = ll.pop(1)
self.assertEqual(result, ref[1])
result = ll.pop(1)
self.assertEqual(result, ref[2])
self.assertEqual(ll.size, len(ref)-2)
ref = py23_range(0, 1024, 4)
ll = sllist(ref)
result = ll.pop(0)
self.assertEqual(result, ref[0])
self.assertEqual(ll.first.value, ref[1])
for i in range(len(ll)):
result = ll.pop(0)
self.assertEqual(result, ref[i+1])
self.assertEqual(ll.first, None)
self.assertEqual(ll.last, None)
ref = py23_range(0, 1024, 4)
ll = sllist(ref)
i = len(ll)-1
while i >= 0:
result = ll.pop(i)
self.assertEqual(result, ref[i])
i -= 1
self.assertEqual(ll.first, None)
self.assertEqual(ll.last, None)
def test_slice(self):
lst = list(range(100))
slst = sllist(lst)
self.assertEqual(lst[0:20], list(slst[0:20]))
self.assertEqual(lst[40:60], list(slst[40:60]))
self.assertEqual(lst[60:40], list(slst[60:40]))
self.assertEqual(lst[:-1], list(slst[:-1]))
self.assertEqual(lst[-20:], list(slst[-20:]))
self.assertEqual(lst[-20:-5], list(slst[-20:-5]))
self.assertEqual(lst[-5:-20], list(slst[-5:-20]))
self.assertEqual(lst[-70:50], list(slst[-70:50]))
self.assertEqual(lst[5:500], list(slst[5:500]))
self.assertEqual(lst[:], list(slst[:]))
smlst = list(range(8))
smslst = sllist(smlst)
self.assertEqual(smlst[2:5], list(smslst[2:5]))
self.assertEqual(smlst[-3:-1], list(smslst[-3:-1]))
for i in range(100):
for j in range(100):
try:
self.assertEqual(lst[i:j], list(slst[i:j]))
except AssertionError as ae:
import pdb; pdb.set_trace()
sys.stderr.write("Failed on [ %d : %d ]\n" %(i, j))
raise ae
# Test if version of python (2.7+ , 3.? + ) supports step in slices
try:
lst[0:10:2]
except:
# If not supported, test is over
return
self.assertEqual(lst[0:20:2], list(slst[0:20:2]))
self.assertEqual(lst[0:21:2], list(slst[0:21:2]))
self.assertEqual(lst[50:80:6], list(slst[50:80:6]))
for i in range(30):
for j in range(30):
for s in range(1, 30, 1):
try:
self.assertEqual(lst[i:j:s], list(slst[i:j:s]))
except AssertionError as ae:
sys.stderr.write("Failed on [ %d : %d : %d ]\n" %(i, j, s))
raise ae
def test_popleft(self):
ref = py23_range(0, 1024, 4)
ll = sllist(ref)
del_node = ll.nodeat(0)
result = ll.popleft()
self.assertEqual(result, ref[0])
self.assertEqual(len(ll), len(ref) - 1)
self.assertEqual(ll.size, len(ref) - 1)
self.assertEqual(ll.first.value, ref[1])
self.assertEqual(list(ll), ref[1:])
self.assertEqual(del_node.next, None)
def test_popright(self):
ref = py23_range(0, 1024, 4)
ll = sllist(ref)
del_node = ll.nodeat(-1)
result = ll.popright()
self.assertEqual(result, ref[-1])
self.assertEqual(len(ll), len(ref) - 1)
self.assertEqual(ll.size, len(ref) - 1)
self.assertEqual(ll.last.value, ref[-2])
self.assertEqual(list(ll), ref[:-1])
self.assertEqual(del_node.next, None)
def test_pop_from_empty_list(self):
ll = sllist()
self.assertRaises(ValueError, ll.pop)
self.assertRaises(ValueError, ll.popleft)
self.assertRaises(ValueError, ll.popright)
def test_remove(self):
ref = py23_range(0, 1024, 4)
ll = sllist(ref)
prev_node = ll.nodeat(3)
del_node = ll.nodeat(4)
next_node = ll.nodeat(5)
result = ll.remove(del_node)
ref_result = ref[4]
del ref[4]
self.assertEqual(list(ll), ref)
self.assertEqual(result, ref_result)
self.assertEqual(len(ll), len(ref))
self.assertEqual(ll.size, len(ref))
self.assertEqual(prev_node.next, next_node)
self.assertEqual(del_node.next, None)
def test_remove_from_empty_list(self):
ll = sllist()
self.assertRaises(ValueError, ll.remove, sllistnode())
def test_remove_invalid_node(self):
ll = sllist([1, 2, 3, 4])
self.assertRaises(ValueError, ll.remove, sllistnode())
def test_remove_already_deleted_node(self):
ll = sllist([1, 2, 3, 4])
node = ll.nodeat(2)
ll.remove(node)
self.assertRaises(ValueError, ll.remove, node)
def test_rotate_left(self):
for n in py23_xrange(128):
ref = py23_range(32)
split = n % len(ref)
ref_result = ref[split:] + ref[:split]
ll = sllist(ref)
new_first = ll.nodeat(split)
new_last = ll.nodeat(split - 1)
ll.rotate(-n)
self.assertEqual(list(ll), ref_result)
self.assertEqual(ll.first, new_first)
self.assertEqual(ll.last, new_last)
self.assertEqual(ll.size, len(ref))
self.assertEqual(ll.last.next, None)
def test_rotate_right(self):
for n in py23_xrange(128):
ref = py23_range(32)
split = n % len(ref)
ref_result = ref[-split:] + ref[:-split]
ll = sllist(ref)
new_first = ll.nodeat(-split)
last_idx = -split - 1
new_last = ll.nodeat(last_idx)
ll.rotate(n)
self.assertEqual(list(ll), ref_result)
self.assertEqual(ll.first, new_first)
self.assertEqual(ll.last, new_last)
self.assertEqual(ll.size, len(ref))
self.assertEqual(ll.last.next, None)
def test_rotate_left_empty(self):
for n in py23_xrange(4):
ll = sllist()
ll.rotate(-n)
self.assertEqual(ll.first, None)
self.assertEqual(ll.last, None)
self.assertEqual(ll.size, 0)
def test_rotate_right_empty(self):
for n in py23_xrange(4):
ll = sllist()
ll.rotate(n)
self.assertEqual(ll.first, None)
self.assertEqual(ll.last, None)
self.assertEqual(ll.size, 0)
def test_getitem(self):
ref = py23_range(0, 1024, 4)
ll = sllist(ref)
for idx in py23_xrange(len(ll)):
self.assertFalse(isinstance(ll[idx], sllistnode))
self.assertEqual(ll[idx], ref[idx])
for idx in py23_xrange(len(ll)):
self.assertFalse(isinstance(ll[idx], sllistnode))
self.assertEqual(ll[-idx - 1], ref[-idx - 1])
self.assertRaises(TypeError, ll.__getitem__, None)
self.assertRaises(TypeError, ll.__getitem__, 'abc')
self.assertRaises(IndexError, ll.__getitem__, len(ref))
self.assertRaises(IndexError, ll.__getitem__, -len(ref) - 1)
def test_getitem_empty(self):
ll = sllist()
self.assertRaises(TypeError, ll.__getitem__, None)
self.assertRaises(TypeError, ll.__getitem__, 'abc')
self.assertRaises(IndexError, ll.__getitem__, 0)
self.assertRaises(IndexError, ll.__getitem__, -1)
def test_del(self):
ref = py23_range(0, 1024, 4)
ll = sllist(ref)
del ll[0]
del ref[0]
self.assertEqual(list(ll), ref)
del ll[len(ll) - 1]
del ref[len(ref) - 1]
self.assertEqual(list(ll), ref)
del ll[(len(ll) - 1) // 2]
del ref[(len(ref) - 1) // 2]
self.assertEqual(list(ll), ref)
def del_item(idx):
del ll[idx]
self.assertRaises(IndexError, del_item, len(ll))
for i in py23_xrange(len(ll)):
del ll[0]
self.assertEqual(len(ll), 0)
def test_concat(self):
a_ref = py23_range(0, 1024, 4)
a = sllist(a_ref)
b_ref = py23_range(8092, 8092 + 1024, 4)
b = sllist(b_ref)
ab_ref = sllist(a_ref + b_ref)
c = a + b
self.assertEqual(c, ab_ref)
self.assertEqual(len(c), len(ab_ref))
c = a + b_ref
self.assertEqual(c, ab_ref)
self.assertEqual(len(c), len(ab_ref))
def test_concat_empty(self):
empty = sllist()
filled_ref = py23_range(0, 1024, 4)
filled = sllist(filled_ref)
res = empty + empty
self.assertEqual(res, sllist([] + []))
self.assertEqual(len(res), 0)
res = empty + filled
self.assertEqual(res, sllist([] + filled_ref))
self.assertEqual(len(res), len(filled_ref))
res = filled + empty
self.assertEqual(res, sllist(filled_ref + []))
self.assertEqual(len(res), len(filled_ref))
def test_concat_inplace(self):
a_ref = py23_range(0, 1024, 4)
b_ref = py23_range(8092, 8092 + 1024, 4)
b = sllist(b_ref)
ab_ref = sllist(a_ref + b_ref)
a = sllist(a_ref)
a += b
self.assertEqual(a, ab_ref)
self.assertEqual(len(a), len(ab_ref))
a = sllist(a_ref)
a += b_ref
self.assertEqual(a, ab_ref)
self.assertEqual(len(a), len(ab_ref))
a = sllist(a_ref)
a += a
self.assertEqual(a, sllist(a_ref + a_ref))
self.assertEqual(len(a), len(ab_ref))
def test_concat_inplace_empty(self):
filled_ref = py23_range(0, 1024, 4)
filled = sllist(filled_ref)
empty = sllist()
empty += empty
self.assertEqual(empty, sllist([] + []))
self.assertEqual(len(empty), 0)
empty = sllist()
empty += filled
self.assertEqual(empty, sllist([] + filled_ref))
self.assertEqual(len(empty), len(filled_ref))
empty = sllist()
filled += empty
self.assertEqual(filled, sllist(filled_ref + []))
self.assertEqual(len(filled), len(filled_ref))
def test_index(self):
lst = [1, 5, 10, 5, 9]
sl = sllist(lst)
self.assertEqual(sl.index(1), 0)
self.assertEqual(sl.index(5), 1)
self.assertEqual(sl.rindex(5), 3)
self.assertEqual(sl.rindex(9), 4)
gotException = False
try:
sl.index(2)
except ValueError:
gotException = True
self.assertEqual(gotException, True)
def test_contains(self):
lst = [1, 5, 7]
sl = sllist(lst)
self.assertEqual(5 in sl, True)
self.assertEqual(1 in sl, True)
self.assertEqual(7 in sl, True)
self.assertEqual(8 in sl, False)
def test_repeat(self):
ref = py23_range(0, 1024, 4)
ll = sllist(ref)
self.assertEqual(ll * 4, sllist(ref * 4))
def test_repeat_empty(self):
ll = sllist()
self.assertEqual(ll * 4, sllist([] * 4))
def test_repeat_inplace(self):
ref = py23_range(0, 1024, 4)
ll = sllist(ref)
ll *= 4
self.assertEqual(ll, sllist(ref * 4))
def test_repeat_inplace_empty(self):
ll = sllist()
ll *= 4
self.assertEqual(ll, sllist([] * 4))
def test_list_readonly_attributes(self):
if sys.hexversion >= 0x03000000:
expected_error = AttributeError
else:
expected_error = TypeError
ll = sllist(py23_range(4))
self.assertRaises(expected_error, setattr, ll, 'first', None)
self.assertRaises(expected_error, setattr, ll, 'last', None)
self.assertRaises(expected_error, setattr, ll, 'size', None)
def test_node_readonly_attributes(self):
if sys.hexversion >= 0x03000000:
expected_error = AttributeError
else:
expected_error = TypeError
ll = sllistnode()
self.assertRaises(expected_error, setattr, ll, 'next', None)
# COMMENTED BECAUSE HASH DOES NOT WORK
# def test_list_hash(self):
# self.assertEqual(hash(sllist()), hash(sllist()))
# self.assertEqual(hash(sllist(py23_range(0, 1024, 4))),
# hash(sllist(py23_range(0, 1024, 4))))
# self.assertEqual(hash(sllist([0, 2])), hash(sllist([0.0, 2.0])))
class testdllist(unittest.TestCase):
def test_init_empty(self):
ll = dllist()
self.assertEqual(len(ll), 0)
self.assertEqual(ll.size, 0)
self.assertEqual(list(ll), [])
def test_init_with_sequence(self):
ref = py23_range(0, 1024, 4)
ll = dllist(ref)
self.assertEqual(len(ll), len(ref))
self.assertEqual(ll.size, len(ref))
self.assertEqual(list(ll), ref)
def test_init_with_non_sequence(self):
self.assertRaises(TypeError, dllist, None);
self.assertRaises(TypeError, dllist, 1);
self.assertRaises(TypeError, dllist, 1.5);
def test_str(self):
a = dllist([])
self.assertEqual(str(a), 'dllist()')
b = dllist([None, 1, 'abc'])
self.assertEqual(str(b), 'dllist([None, 1, abc])')
def test_repr(self):
a = dllist([])
self.assertEqual(repr(a), 'dllist()')
b = dllist([None, 1, 'abc'])
self.assertEqual(repr(b), 'dllist([None, 1, \'abc\'])')
def test_node_str(self):
a = dllist([None, None]).first
self.assertEqual(str(a), 'dllistnode(None)')
b = dllist([1, None]).first
self.assertEqual(str(b), 'dllistnode(1)')
c = dllist(['abc', None]).first
self.assertEqual(str(c), 'dllistnode(abc)')
def test_node_repr(self):
a = dllist([None]).first
self.assertEqual(repr(a), '<dllistnode(None)>')
b = dllist([1, None]).first
self.assertEqual(repr(b), '<dllistnode(1)>')
c = dllist(['abc', None]).first
self.assertEqual(repr(c), '<dllistnode(\'abc\')>')
def test_cmp(self):
a = dllist(py23_xrange(0, 1100))
b = dllist(py23_xrange(0, 1101))
c = dllist([1, 2, 3, 4])
d = dllist([1, 2, 3, 5])
e = dllist([1, 0, 0, 0])
f = dllist([0, 0, 0, 0])
self.assertEqual(cmp(a, a), 0)
self.assertEqual(cmp(a, b), -1)
self.assertEqual(cmp(b, a), 1)
self.assertEqual(cmp(c, d), -1)
self.assertEqual(cmp(d, c), 1)
self.assertEqual(cmp(e, f), 1)
self.assertEqual(cmp(f, e), -1)
def test_cmp_nonlist(self):
a = dllist(py23_xrange(0, 1100))
b = [py23_xrange(0, 1100)]
if sys.hexversion < 0x03000000:
# actual order is not specified by language
self.assertNotEqual(cmp(a, b), 0)
self.assertNotEqual(cmp(b, a), 0)
self.assertNotEqual(cmp([], a), 0)
self.assertNotEqual(cmp(a, []), 0)
def test_eq(self):
a = dllist(py23_xrange(0, 1100))
b = dllist(py23_xrange(0, 1101))
c = dllist([1, 2, 3, 4])
d = dllist([1, 2, 3, 5])
e = dllist([1, 0, 0, 0])
f = dllist([0, 0, 0, 0])
self.assertTrue(dllist() == dllist())
self.assertTrue(a == a)
self.assertFalse(dllist() == a)
self.assertFalse(a == dllist())
self.assertFalse(a == b)
self.assertFalse(b == a)
self.assertFalse(c == d)
self.assertFalse(d == c)
self.assertFalse(e == f)
self.assertFalse(f == e)
def test_ne(self):
a = dllist(py23_xrange(0, 1100))
b = dllist(py23_xrange(0, 1101))
c = dllist([1, 2, 3, 4])
d = dllist([1, 2, 3, 5])
e = dllist([1, 0, 0, 0])
f = dllist([0, 0, 0, 0])
self.assertFalse(dllist() != dllist())
self.assertFalse(a != a)
self.assertTrue(dllist() != a)
self.assertTrue(a != dllist())
self.assertTrue(a != b)
self.assertTrue(b != a)
self.assertTrue(c != d)
self.assertTrue(d != c)
self.assertTrue(e != f)
self.assertTrue(f != e)
def test_lt(self):
a = dllist(py23_xrange(0, 1100))
b = dllist(py23_xrange(0, 1101))
c = dllist([1, 2, 3, 4])
d = dllist([1, 2, 3, 5])
e = dllist([1, 0, 0, 0])
f = dllist([0, 0, 0, 0])
self.assertFalse(dllist() < dllist())
self.assertFalse(a < a)
self.assertTrue(dllist() < a)
self.assertFalse(a < dllist())
self.assertTrue(a < b)
self.assertFalse(b < a)
self.assertTrue(c < d)
self.assertFalse(d < c)
self.assertFalse(e < f)
self.assertTrue(f < e)
def test_gt(self):
a = dllist(py23_xrange(0, 1100))
b = dllist(py23_xrange(0, 1101))
c = dllist([1, 2, 3, 4])
d = dllist([1, 2, 3, 5])
e = dllist([1, 0, 0, 0])
f = dllist([0, 0, 0, 0])
self.assertFalse(dllist() > dllist())
self.assertFalse(a > a)
self.assertFalse(dllist() > a)
self.assertTrue(a > dllist())
self.assertFalse(a > b)
self.assertTrue(b > a)
self.assertFalse(c > d)
self.assertTrue(d > c)
self.assertTrue(e > f)
self.assertFalse(f > e)
def test_le(self):
a = dllist(py23_xrange(0, 1100))
b = dllist(py23_xrange(0, 1101))
c = dllist([1, 2, 3, 4])
d = dllist([1, 2, 3, 5])
e = dllist([1, 0, 0, 0])
f = dllist([0, 0, 0, 0])
self.assertTrue(dllist() <= dllist())
self.assertTrue(a <= a)
self.assertTrue(dllist() <= a)
self.assertFalse(a <= dllist())
self.assertTrue(a <= b)
self.assertFalse(b <= a)
self.assertTrue(c <= d)
self.assertFalse(d <= c)
self.assertFalse(e <= f)
self.assertTrue(f <= e)
def test_ge(self):
a = dllist(py23_xrange(0, 1100))
b = dllist(py23_xrange(0, 1101))
c = dllist([1, 2, 3, 4])
d = dllist([1, 2, 3, 5])
e = dllist([1, 0, 0, 0])
f = dllist([0, 0, 0, 0])
self.assertTrue(dllist() >= dllist())
self.assertTrue(a >= a)
self.assertFalse(dllist() >= a)
self.assertTrue(a >= dllist())
self.assertFalse(a >= b)
self.assertTrue(b >= a)
self.assertFalse(c >= d)
self.assertTrue(d >= c)
self.assertTrue(e >= f)
self.assertFalse(f >= e)
def test_nodeat(self):
ref = py23_range(0, 1024, 4)
ll = dllist(ref)
for idx in py23_xrange(len(ll)):
self.assertTrue(isinstance(ll.nodeat(idx), dllistnode))
self.assertEqual(ll.nodeat(idx).value, ref[idx])
for idx in py23_xrange(len(ll)):
self.assertTrue(isinstance(ll.nodeat(idx), dllistnode))
self.assertEqual(ll.nodeat(-idx - 1).value, ref[-idx - 1])
self.assertRaises(TypeError, ll.nodeat, None)
self.assertRaises(TypeError, ll.nodeat, 'abc')
self.assertRaises(IndexError, ll.nodeat, len(ref))
self.assertRaises(IndexError, ll.nodeat, -len(ref) - 1)
def test_nodeat_empty(self):
ll = dllist()
self.assertRaises(TypeError, ll.nodeat, None)
self.assertRaises(TypeError, ll.nodeat, 'abc')
self.assertRaises(IndexError, ll.nodeat, 0)
self.assertRaises(IndexError, ll.nodeat, -1)
def test_iter(self):
ref = py23_range(0, 1024, 4)
ll = dllist(ref)
idx = 0
for val in ll:
self.assertFalse(isinstance(val, dllistnode))
self.assertEqual(val, ref[idx])
idx += 1
self.assertEqual(idx, len(ref))
def test_iter_empty(self):
ll = dllist()
count = 0
for val in ll:
count += 1
self.assertEqual(count, 0)
def test_reversed(self):
ref = py23_range(0, 1024, 4)
ll = dllist(ref)
idx = len(ref) - 1
for val in reversed(ll):
self.assertFalse(isinstance(val, dllistnode))
self.assertEqual(val, ref[idx])
idx -= 1
self.assertEqual(idx, -1)
def test_reversed_empty(self):
ll = dllist()
count = 0
for val in reversed(ll):
count += 1
self.assertEqual(count, 0)
def test_insert_value(self):
ll = dllist(py23_xrange(4))
ref = dllist([0, 1, 2, 3, 10])
prev = ll.nodeat(-1)
arg_node = dllistnode(10)
new_node = ll.insert(arg_node)
self.assertNotEqual(new_node, arg_node)
self.assertEqual(new_node.value, 10)
self.assertEqual(new_node.prev, prev)
self.assertEqual(new_node.next, None)
self.assertEqual(prev.next, new_node)
self.assertEqual(new_node, ll.last)
self.assertEqual(ll, ref)
def test_insert_value_before(self):
ll = dllist(py23_xrange(4))
ref = dllist([0, 1, 10, 2, 3])
prev = ll.nodeat(1)
next = ll.nodeat(2)
arg_node = dllistnode(10)
new_node = ll.insert(arg_node, ll.nodeat(2))
self.assertNotEqual(new_node, arg_node)
self.assertEqual(new_node.value, 10)
self.assertEqual(new_node.prev, prev)
self.assertEqual(new_node.next, next)
self.assertEqual(prev.next, new_node)
self.assertEqual(next.prev, new_node)
self.assertEqual(ll, ref)
def test_insert_value_before_first(self):
ll = dllist(py23_xrange(4))
ref = dllist([10, 0, 1, 2, 3])
next = ll.nodeat(0)
arg_node = dllistnode(10)
new_node = ll.insert(arg_node, ll.nodeat(0))
self.assertNotEqual(new_node, arg_node)
self.assertEqual(new_node.value, 10)
self.assertEqual(new_node.prev, None)
self.assertEqual(new_node.next, next)
self.assertEqual(next.prev, new_node)
self.assertEqual(new_node, ll.first)
self.assertEqual(ll, ref)
def test_insert_invalid_ref(self):
ll = dllist()
self.assertRaises(TypeError, ll.insert, 10, 1)
self.assertRaises(TypeError, ll.insert, 10, 'abc')
self.assertRaises(TypeError, ll.insert, 10, [])
self.assertRaises(ValueError, ll.insert, 10, dllistnode())
def test_append(self):
ll = dllist(py23_xrange(4))
ref = dllist([0, 1, 2, 3, 10])
prev = ll.nodeat(-1)
arg_node = dllistnode(10)
new_node = ll.append(arg_node)
self.assertNotEqual(new_node, arg_node)
self.assertEqual(new_node.value, 10);
self.assertEqual(new_node.prev, prev)
self.assertEqual(new_node.next, None)
self.assertEqual(prev.next, new_node)
self.assertEqual(ll.last, new_node)
self.assertEqual(ll, ref)
def test_appendleft(self):
ll = dllist(py23_xrange(4))
ref = dllist([10, 0, 1, 2, 3])
next = ll.nodeat(0)
arg_node = dllistnode(10)
new_node = ll.appendleft(arg_node)
self.assertNotEqual(new_node, arg_node)
self.assertEqual(new_node.value, 10);
self.assertEqual(new_node.prev, None)
self.assertEqual(new_node.next, next)
self.assertEqual(next.prev, new_node)
self.assertEqual(ll.first, new_node)
self.assertEqual(ll, ref)
def test_appendright(self):
ll = dllist(py23_xrange(4))
ref = dllist([0, 1, 2, 3, 10])
prev = ll.nodeat(-1)
arg_node = dllistnode(10)
new_node = ll.appendright(arg_node)
self.assertNotEqual(new_node, arg_node)
self.assertEqual(new_node.value, 10);
self.assertEqual(new_node.prev, prev)
self.assertEqual(new_node.next, None)
self.assertEqual(prev.next, new_node)
self.assertEqual(ll.last, new_node)
self.assertEqual(ll, ref)
def test_extend(self):
a_ref = py23_range(0, 1024, 4)
b_ref = py23_range(8092, 8092 + 1024, 4)
b = dllist(b_ref)
ab_ref = dllist(a_ref + b_ref)
a = dllist(a_ref)
a.extend(b)
self.assertEqual(a, ab_ref)
self.assertEqual(len(a), len(ab_ref))
a = dllist(a_ref)
a.extend(b_ref)
self.assertEqual(a, ab_ref)
self.assertEqual(len(a), len(ab_ref))
a = dllist(a_ref)
a.extend(a)
self.assertEqual(a, dllist(a_ref + a_ref))
self.assertEqual(len(a), len(a_ref) * 2)
def test_extend_empty(self):
filled_ref = py23_range(0, 1024, 4)
filled = dllist(filled_ref)
empty = dllist()
empty.extend(empty)
self.assertEqual(empty, dllist([] + []))
self.assertEqual(len(empty), 0)
empty = dllist()
empty.extend(filled)
self.assertEqual(empty, dllist([] + filled_ref))
self.assertEqual(len(empty), len(filled_ref))
empty = dllist()
filled.extend(empty)
self.assertEqual(filled, dllist(filled_ref + []))
self.assertEqual(len(filled), len(filled_ref))
def test_extendleft(self):
a_ref = py23_range(0, 1024, 4)
b_ref = py23_range(8092, 8092 + 1024, 4)
b = dllist(b_ref)
ab_ref = dllist(list(reversed(b_ref)) + a_ref)
a = dllist(a_ref)
a.extendleft(b)
self.assertEqual(a, ab_ref)
self.assertEqual(len(a), len(ab_ref))
a = dllist(a_ref)
a.extendleft(b_ref)
self.assertEqual(a, ab_ref)
self.assertEqual(len(a), len(ab_ref))
a = dllist(a_ref)
a.extendleft(a)
self.assertEqual(a, dllist(list(reversed(a_ref)) + a_ref))
self.assertEqual(len(a), len(a_ref) * 2)
def test_extendleft_empty(self):
filled_ref = py23_range(0, 1024, 4)
filled = dllist(filled_ref)
empty = dllist()
empty.extendleft(empty)
self.assertEqual(empty, dllist([] + []))
self.assertEqual(len(empty), 0)
empty = dllist()
empty.extendleft(filled)
self.assertEqual(empty, dllist(list(reversed(filled_ref)) + []))
self.assertEqual(len(empty), len(filled_ref))
empty = dllist()
filled.extendleft(empty)
self.assertEqual(filled, dllist(list(reversed([])) + filled_ref))
self.assertEqual(len(filled), len(filled_ref))
def test_extendright(self):
a_ref = py23_range(0, 1024, 4)
b_ref = py23_range(8092, 8092 + 1024, 4)
b = dllist(b_ref)
ab_ref = dllist(a_ref + b_ref)
a = dllist(a_ref)
a.extendright(b)
self.assertEqual(a, ab_ref)
self.assertEqual(len(a), len(ab_ref))
a = dllist(a_ref)
a.extendright(b_ref)
self.assertEqual(a, ab_ref)
self.assertEqual(len(a), len(ab_ref))
a = dllist(a_ref)
a.extendright(a)
self.assertEqual(a, dllist(a_ref + a_ref))
self.assertEqual(len(a), len(a_ref) * 2)
def test_extendright_empty(self):
filled_ref = py23_range(0, 1024, 4)
filled = dllist(filled_ref)
empty = dllist()
empty.extendright(empty)
self.assertEqual(empty, dllist([] + []))
self.assertEqual(len(empty), 0)
empty = dllist()
empty.extendright(filled)
self.assertEqual(empty, dllist([] + filled_ref))
self.assertEqual(len(empty), len(filled_ref))
empty = dllist()
filled.extendright(empty)
self.assertEqual(filled, dllist(filled_ref + []))
self.assertEqual(len(filled), len(filled_ref))
def test_clear_empty(self):
empty_list = dllist()
empty_list.clear()
self.assertEqual(empty_list.first, None)
self.assertEqual(empty_list.last, None)
self.assertEqual(empty_list.size, 0)
self.assertEqual(list(empty_list), [])
def test_clear(self):
ll = dllist(py23_xrange(0, 1024, 4))
del_node = ll.nodeat(4)
ll.clear()
self.assertEqual(ll.first, None)
self.assertEqual(ll.last, None)
self.assertEqual(ll.size, 0)
self.assertEqual(list(ll), [])
self.assertEqual(del_node.prev, None)
self.assertEqual(del_node.next, None)
def test_pop(self):
ref = py23_range(0, 1024, 4)
ll = dllist(ref)
del_node = ll.nodeat(-1)
result = ll.pop();
self.assertEqual(result, ref[-1])
self.assertEqual(len(ll), len(ref) - 1)
self.assertEqual(ll.size, len(ref) - 1)
self.assertEqual(ll.last.value, ref[-2])
self.assertEqual(list(ll), ref[:-1])
self.assertEqual(del_node.prev, None)
self.assertEqual(del_node.next, None)
ref = py23_range(0, 1024, 4)
ll = dllist(ref)
#import pdb; pdb.set_trace()
result = ll.pop(1)
self.assertEqual(result, ref[1])
result = ll.pop(1)
self.assertEqual(result, ref[2])
self.assertEqual(ll.size, len(ref)-2)
secondNode = ll.nodeat(1)
self.assertEquals(secondNode.prev, ll.first)
self.assertEquals(ll.first.prev, None)
ref = py23_range(0, 1024, 4)
ll = dllist(ref)
result = ll.pop(0)
self.assertEqual(result, ref[0])
self.assertEqual(ll.first.value, ref[1])
for i in range(len(ll)):
result = ll.pop(0)
self.assertEqual(result, ref[i+1])
self.assertEqual(ll.first, None)
self.assertEqual(ll.last, None)
ref = py23_range(0, 1024, 4)
ll = dllist(ref)
i = len(ll) - 1
while i >= 0:
result = ll.pop(i)
self.assertEqual(result, ref[i])
i -= 1
self.assertEqual(ll.first, None)
self.assertEqual(ll.last, None)
ref = py23_range(0, 1024, 4)
lastIdx = list(ref).index(ref[-1])
allIndexes = list(range(lastIdx+1))
random.shuffle(allIndexes)
ll = dllist(ref)
while allIndexes:
# print ( "Popping %d out of %d indexes. Value: %s\n\tFirst=%s\n\tMiddle=%s\n\tLast=%s\n\tSize=%d\n" %(allIndexes[0], len(allIndexes), str(ll[allIndexes[0]]), ll.first, ll.middle, ll.last, ll.size))
nextIndex = allIndexes.pop(0)
listAccessValue = ll[nextIndex]
poppedValue = ll.pop(nextIndex)
self.assertEquals(listAccessValue, poppedValue)
for i in range(len(allIndexes)):
if allIndexes[i] > nextIndex:
allIndexes[i] -= 1
self.assertEqual(ll.first, None)
self.assertEqual(ll.last, None)
def test_popleft(self):
ref = py23_range(0, 1024, 4)
ll = dllist(ref)
del_node = ll.nodeat(0)
result = ll.popleft()
self.assertEqual(result, ref[0])
self.assertEqual(len(ll), len(ref) - 1)
self.assertEqual(ll.size, len(ref) - 1)
self.assertEqual(ll.first.value, ref[1])
self.assertEqual(list(ll), ref[1:])
self.assertEqual(del_node.prev, None)
self.assertEqual(del_node.next, None)
def test_popright(self):
ref = py23_range(0, 1024, 4)
ll = dllist(ref)
del_node = ll.nodeat(-1)
result = ll.popright()
self.assertEqual(result, ref[-1])
self.assertEqual(len(ll), len(ref) - 1)
self.assertEqual(ll.size, len(ref) - 1)
self.assertEqual(ll.last.value, ref[-2])
self.assertEqual(list(ll), ref[:-1])
self.assertEqual(del_node.prev, None)
self.assertEqual(del_node.next, None)
def test_pop_from_empty_list(self):
ll = dllist()
self.assertRaises(ValueError, ll.pop)
self.assertRaises(ValueError, ll.popleft)
self.assertRaises(ValueError, ll.popright)
def test_remove(self):
ref = py23_range(0, 1024, 4)
ll = dllist(ref)
prev_node = ll.nodeat(3)
del_node = ll.nodeat(4)
next_node = ll.nodeat(5)
result = ll.remove(del_node)
ref_result = ref[4]
del ref[4]
self.assertEqual(list(ll), ref)
self.assertEqual(result, ref_result)
self.assertEqual(len(ll), len(ref))
self.assertEqual(ll.size, len(ref))
self.assertEqual(prev_node.next, next_node)
self.assertEqual(next_node.prev, prev_node)
self.assertEqual(del_node.prev, None)
self.assertEqual(del_node.next, None)
def test_remove_from_empty_list(self):
ll = dllist()
self.assertRaises(ValueError, ll.remove, dllistnode())
def test_remove_invalid_node(self):
ll = dllist([1, 2, 3, 4])
self.assertRaises(ValueError, ll.remove, dllistnode())
def test_remove_already_deleted_node(self):
ll = dllist([1, 2, 3, 4])
node = ll.nodeat(2)
ll.remove(node)
self.assertRaises(ValueError, ll.remove, node)
def test_rotate_left(self):
for n in py23_xrange(128):
ref = py23_range(32)
split = n % len(ref)
ref_result = ref[split:] + ref[:split]
ll = dllist(ref)
new_first = ll.nodeat(split)
new_last = ll.nodeat(split - 1)
# touch future middle element to initialize cache
cached_idx = (len(ll) // 2 + n) % len(ll)
ll[cached_idx]
ll.rotate(-n)
self.assertEqual(list(ll), ref_result)
self.assertEqual(ll.first, new_first)
self.assertEqual(ll.last, new_last)
self.assertEqual(ll.size, len(ref))
self.assertEqual(ll.first.prev, None)
self.assertEqual(ll.first.next.prev, ll.first)
self.assertEqual(ll.last.next, None)
self.assertEqual(ll.last.prev.next, ll.last)
# check if cached index is updated correctly
self.assertEqual(ll[len(ll) // 2], ref_result[len(ref_result) // 2])
def test_rotate_right(self):
for n in py23_xrange(128):
ref = py23_range(32)
split = n % len(ref)
ref_result = ref[-split:] + ref[:-split]
ll = dllist(ref)
new_first = ll.nodeat(-split)
last_idx = -split - 1
new_last = ll.nodeat(last_idx)
# touch future middle element to initialize cache
cached_idx = len(ll) - (len(ll) // 2 + n) % len(ll) - 1
ll[cached_idx]
ll.rotate(n)
self.assertEqual(list(ll), ref_result)
self.assertEqual(ll.first, new_first)
self.assertEqual(ll.last, new_last)
self.assertEqual(ll.size, len(ref))
self.assertEqual(ll.first.prev, None)
self.assertEqual(ll.first.next.prev, ll.first)
self.assertEqual(ll.last.next, None)
self.assertEqual(ll.last.prev.next, ll.last)
# check if cached index is updated correctly
self.assertEqual(ll[len(ll) // 2], ref_result[len(ref_result) // 2])
def test_rotate_left_empty(self):
for n in py23_xrange(4):
ll = dllist()
ll.rotate(-n)
self.assertEqual(ll.first, None)
self.assertEqual(ll.last, None)
self.assertEqual(ll.size, 0)
def test_rotate_right_empty(self):
for n in py23_xrange(4):
ll = dllist()
ll.rotate(n)
self.assertEqual(ll.first, None)
self.assertEqual(ll.last, None)
self.assertEqual(ll.size, 0)
def test_getitem(self):
ref = py23_range(0, 1024, 4)
ll = dllist(ref)
for idx in py23_xrange(len(ll)):
self.assertFalse(isinstance(ll[idx], dllistnode))
self.assertEqual(ll[idx], ref[idx])
for idx in py23_xrange(len(ll)):
self.assertFalse(isinstance(ll[idx], dllistnode))
self.assertEqual(ll[-idx - 1], ref[-idx - 1])
self.assertRaises(TypeError, ll.__getitem__, None)
self.assertRaises(TypeError, ll.__getitem__, 'abc')
self.assertRaises(IndexError, ll.__getitem__, len(ref))
self.assertRaises(IndexError, ll.__getitem__, -len(ref) - 1)
def test_getitem_empty(self):
ll = dllist()
self.assertRaises(TypeError, ll.__getitem__, None)
self.assertRaises(TypeError, ll.__getitem__, 'abc')
self.assertRaises(IndexError, ll.__getitem__, 0)
self.assertRaises(IndexError, ll.__getitem__, -1)
def test_del(self):
ref = py23_range(0, 1024, 4)
ll = dllist(ref)
del ll[0]
del ref[0]
self.assertEqual(list(ll), ref)
del ll[len(ll) - 1]
del ref[len(ref) - 1]
self.assertEqual(list(ll), ref)
del ll[(len(ll) - 1) // 2]
del ref[(len(ref) - 1) // 2]
self.assertEqual(list(ll), ref)
def del_item(idx):
del ll[idx]
self.assertRaises(IndexError, del_item, len(ll))
for i in py23_xrange(len(ll)):
del ll[0]
self.assertEqual(len(ll), 0)
def test_concat(self):
a_ref = py23_range(0, 1024, 4)
a = dllist(a_ref)
b_ref = py23_range(8092, 8092 + 1024, 4)
b = dllist(b_ref)
ab_ref = dllist(a_ref + b_ref)
c = a + b
self.assertEqual(c, ab_ref)
self.assertEqual(len(c), len(ab_ref))
c = a + b_ref
self.assertEqual(c, ab_ref)
self.assertEqual(len(c), len(a_ref) * 2)
def test_concat_empty(self):
empty = dllist()
filled_ref = py23_range(0, 1024, 4)
filled = dllist(filled_ref)
res = empty + empty
self.assertEqual(res, dllist([] + []))
self.assertEqual(len(res), 0)
res = empty + filled
self.assertEqual(res, dllist([] + filled_ref))
self.assertEqual(len(res), len(filled_ref))
res = filled + empty
self.assertEqual(res, dllist(filled_ref + []))
self.assertEqual(len(res), len(filled_ref))
def test_concat_inplace(self):
a_ref = py23_range(0, 1024, 4)
b_ref = py23_range(8092, 8092 + 1024, 4)
b = dllist(b_ref)
ab_ref = dllist(a_ref + b_ref)
a = dllist(a_ref)
a += b
self.assertEqual(a, ab_ref)
self.assertEqual(len(a), len(ab_ref))
a = dllist(a_ref)
a += b_ref
self.assertEqual(a, ab_ref)
self.assertEqual(len(a), len(ab_ref))
a = dllist(a_ref)
a += a
self.assertEqual(a, dllist(a_ref + a_ref))
self.assertEqual(len(a), len(a_ref) * 2)
def test_concat_inplace_empty(self):
filled_ref = py23_range(0, 1024, 4)
filled = dllist(filled_ref)
empty = dllist()
empty += empty
self.assertEqual(empty, dllist([] + []))
self.assertEqual(len(empty), 0)
empty = dllist()
empty += filled
self.assertEqual(empty, dllist([] + filled_ref))
self.assertEqual(len(empty), len(filled_ref))
empty = dllist()
filled += empty
self.assertEqual(filled, dllist(filled_ref + []))
self.assertEqual(len(filled), len(filled_ref))
def test_index(self):
lst = [1, 5, 10, 5, 9]
dl = dllist(lst)
self.assertEqual(dl.index(1), 0)
self.assertEqual(dl.index(5), 1)
self.assertEqual(dl.rindex(5), 3)
self.assertEqual(dl.rindex(9), 4)
gotException = False
try:
dl.index(2)
except ValueError:
gotException = True
self.assertEqual(gotException, True)
def test_contains(self):
lst = [1, 5, 7]
sl = dllist(lst)
self.assertEqual(5 in sl, True)
self.assertEqual(1 in sl, True)
self.assertEqual(7 in sl, True)
self.assertEqual(8 in sl, False)
def test_slice(self):
lst = list(range(100))
dlst = dllist(lst)
self.assertEqual(lst[0:20], list(dlst[0:20]))
self.assertEqual(lst[40:60], list(dlst[40:60]))
self.assertEqual(lst[60:40], list(dlst[60:40]))
self.assertEqual(lst[:-1], list(dlst[:-1]))
self.assertEqual(lst[-20:], list(dlst[-20:]))
self.assertEqual(lst[-20:-5], list(dlst[-20:-5]))
self.assertEqual(lst[-5:-20], list(dlst[-5:-20]))
self.assertEqual(lst[-70:50], list(dlst[-70:50]))
self.assertEqual(lst[5:500], list(dlst[5:500]))
self.assertEqual(lst[:], list(dlst[:]))
smlst = list(range(8))
smdlst = dllist(smlst)
self.assertEqual(smlst[2:5], list(smdlst[2:5]))
self.assertEqual(smlst[-3:-1], list(smdlst[-3:-1]))
for i in range(100):
for j in range(100):
self.assertEqual(lst[i:j], list(dlst[i:j]))
# Test if version of python (2.7+ , 3.? + ) supports step in slices
try:
lst[0:10:2]
except:
# If not supported, test is over
return
self.assertEqual(lst[0:20:2], list(dlst[0:20:2]))
self.assertEqual(lst[0:21:2], list(dlst[0:21:2]))
self.assertEqual(lst[50:80:6], list(dlst[50:80:6]))
for i in range(100):
for j in range(100):
for s in range(1, 100, 1):
try:
self.assertEqual(lst[i:j:s], list(dlst[i:j:s]))
except AssertionError as ae:
sys.stderr.write("Failed on [ %d : %d : %d ]\n" %(i, j, s))
raise ae
def test_repeat(self):
ref = py23_range(0, 1024, 4)
ll = dllist(ref)
self.assertEqual(ll * 4, dllist(ref * 4))
def test_repeat_empty(self):
ll = dllist()
self.assertEqual(ll * 4, dllist([] * 4))
def test_repeat_inplace(self):
ref = py23_range(0, 1024, 4)
ll = dllist(ref)
ll *= 4
self.assertEqual(ll, dllist(ref * 4))
def test_repeat_inplace_empty(self):
ll = dllist()
ll *= 4
self.assertEqual(ll, dllist([] * 4))
def test_list_readonly_attributes(self):
if sys.hexversion >= 0x03000000:
expected_error = AttributeError
else:
expected_error = TypeError
ll = dllist(py23_range(4))
self.assertRaises(expected_error, setattr, ll, 'first', None)
self.assertRaises(expected_error, setattr, ll, 'last', None)
self.assertRaises(expected_error, setattr, ll, 'size', None)
def test_node_readonly_attributes(self):
if sys.hexversion >= 0x03000000:
expected_error = AttributeError
else:
expected_error = TypeError
ll = dllistnode()
self.assertRaises(expected_error, setattr, ll, 'prev', None)
self.assertRaises(expected_error, setattr, ll, 'next', None)
# COMMENTED BECAUSE HASH DOES NOT WORK
# def test_list_hash(self):
# self.assertEqual(hash(dllist()), hash(dllist()))
# self.assertEqual(hash(dllist(py23_range(0, 1024, 4))),
# hash(dllist(py23_range(0, 1024, 4))))
# self.assertEqual(hash(dllist([0, 2])), hash(dllist([0.0, 2.0])))
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(testsllist))
suite.addTest(unittest.makeSuite(testdllist))
return suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| [
"unittest.TestSuite",
"cllist.dllistnode",
"cllist.sllistnode",
"random.shuffle",
"gc.set_debug",
"unittest.makeSuite",
"sys.stderr.write",
"cllist.sllist",
"pdb.set_trace",
"cllist.dllist",
"unittest.TextTestRunner"
] | [((210, 263), 'gc.set_debug', 'gc.set_debug', (['(gc.DEBUG_UNCOLLECTABLE | gc.DEBUG_STATS)'], {}), '(gc.DEBUG_UNCOLLECTABLE | gc.DEBUG_STATS)\n', (222, 263), False, 'import gc\n'), ((61660, 61680), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (61678, 61680), False, 'import unittest\n'), ((837, 845), 'cllist.sllist', 'sllist', ([], {}), '()\n', (843, 845), False, 'from cllist import sllist\n'), ((1049, 1060), 'cllist.sllist', 'sllist', (['ref'], {}), '(ref)\n', (1055, 1060), False, 'from cllist import sllist\n'), ((1368, 1378), 'cllist.sllist', 'sllist', (['[]'], {}), '([])\n', (1374, 1378), False, 'from cllist import sllist\n'), ((1436, 1460), 'cllist.sllist', 'sllist', (["[None, 1, 'abc']"], {}), "([None, 1, 'abc'])\n", (1442, 1460), False, 'from cllist import sllist\n'), ((1558, 1568), 'cllist.sllist', 'sllist', (['[]'], {}), '([])\n', (1564, 1568), False, 'from cllist import sllist\n'), ((1627, 1651), 'cllist.sllist', 'sllist', (["[None, 1, 'abc']"], {}), "([None, 1, 'abc'])\n", (1633, 1651), False, 'from cllist import sllist\n'), ((2443, 2463), 'cllist.sllist', 'sllist', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (2449, 2463), False, 'from cllist import sllist\n'), ((2476, 2496), 'cllist.sllist', 'sllist', (['[1, 2, 3, 5]'], {}), '([1, 2, 3, 5])\n', (2482, 2496), False, 'from cllist import sllist\n'), ((2509, 2529), 'cllist.sllist', 'sllist', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (2515, 2529), False, 'from cllist import sllist\n'), ((2542, 2562), 'cllist.sllist', 'sllist', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (2548, 2562), False, 'from cllist import sllist\n'), ((3348, 3368), 'cllist.sllist', 'sllist', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (3354, 3368), False, 'from cllist import sllist\n'), ((3381, 3401), 'cllist.sllist', 'sllist', (['[1, 2, 3, 5]'], {}), '([1, 2, 3, 5])\n', (3387, 3401), False, 'from cllist import sllist\n'), ((3414, 3434), 'cllist.sllist', 'sllist', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (3420, 3434), False, 'from cllist import sllist\n'), ((3447, 3467), 'cllist.sllist', 'sllist', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (3453, 3467), False, 'from cllist import sllist\n'), ((3942, 3962), 'cllist.sllist', 'sllist', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (3948, 3962), False, 'from cllist import sllist\n'), ((3975, 3995), 'cllist.sllist', 'sllist', (['[1, 2, 3, 5]'], {}), '([1, 2, 3, 5])\n', (3981, 3995), False, 'from cllist import sllist\n'), ((4008, 4028), 'cllist.sllist', 'sllist', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (4014, 4028), False, 'from cllist import sllist\n'), ((4041, 4061), 'cllist.sllist', 'sllist', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (4047, 4061), False, 'from cllist import sllist\n'), ((4530, 4550), 'cllist.sllist', 'sllist', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (4536, 4550), False, 'from cllist import sllist\n'), ((4563, 4583), 'cllist.sllist', 'sllist', (['[1, 2, 3, 5]'], {}), '([1, 2, 3, 5])\n', (4569, 4583), False, 'from cllist import sllist\n'), ((4596, 4616), 'cllist.sllist', 'sllist', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (4602, 4616), False, 'from cllist import sllist\n'), ((4629, 4649), 'cllist.sllist', 'sllist', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (4635, 4649), False, 'from cllist import sllist\n'), ((5112, 5132), 'cllist.sllist', 'sllist', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (5118, 5132), False, 'from cllist import sllist\n'), ((5145, 5165), 'cllist.sllist', 'sllist', (['[1, 2, 3, 5]'], {}), '([1, 2, 3, 5])\n', (5151, 5165), False, 'from cllist import sllist\n'), ((5178, 5198), 'cllist.sllist', 'sllist', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (5184, 5198), False, 'from cllist import sllist\n'), ((5211, 5231), 'cllist.sllist', 'sllist', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (5217, 5231), False, 'from cllist import sllist\n'), ((5694, 5714), 'cllist.sllist', 'sllist', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (5700, 5714), False, 'from cllist import sllist\n'), ((5727, 5747), 'cllist.sllist', 'sllist', (['[1, 2, 3, 5]'], {}), '([1, 2, 3, 5])\n', (5733, 5747), False, 'from cllist import sllist\n'), ((5760, 5780), 'cllist.sllist', 'sllist', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (5766, 5780), False, 'from cllist import sllist\n'), ((5793, 5813), 'cllist.sllist', 'sllist', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (5799, 5813), False, 'from cllist import sllist\n'), ((6284, 6304), 'cllist.sllist', 'sllist', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (6290, 6304), False, 'from cllist import sllist\n'), ((6317, 6337), 'cllist.sllist', 'sllist', (['[1, 2, 3, 5]'], {}), '([1, 2, 3, 5])\n', (6323, 6337), False, 'from cllist import sllist\n'), ((6350, 6370), 'cllist.sllist', 'sllist', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (6356, 6370), False, 'from cllist import sllist\n'), ((6383, 6403), 'cllist.sllist', 'sllist', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (6389, 6403), False, 'from cllist import sllist\n'), ((6834, 6845), 'cllist.sllist', 'sllist', (['ref'], {}), '(ref)\n', (6840, 6845), False, 'from cllist import sllist\n'), ((7475, 7483), 'cllist.sllist', 'sllist', ([], {}), '()\n', (7481, 7483), False, 'from cllist import sllist\n'), ((7774, 7785), 'cllist.sllist', 'sllist', (['ref'], {}), '(ref)\n', (7780, 7785), False, 'from cllist import sllist\n'), ((8033, 8041), 'cllist.sllist', 'sllist', ([], {}), '()\n', (8039, 8041), False, 'from cllist import sllist\n'), ((8221, 8232), 'cllist.sllist', 'sllist', (['ref'], {}), '(ref)\n', (8227, 8232), False, 'from cllist import sllist\n'), ((8499, 8507), 'cllist.sllist', 'sllist', ([], {}), '()\n', (8505, 8507), False, 'from cllist import sllist\n'), ((8663, 8683), 'cllist.sllist', 'sllist', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (8669, 8683), False, 'from cllist import sllist\n'), ((8807, 8827), 'cllist.sllist', 'sllist', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (8813, 8827), False, 'from cllist import sllist\n'), ((9513, 9521), 'cllist.sllist', 'sllist', ([], {}), '()\n', (9519, 9521), False, 'from cllist import sllist\n'), ((9535, 9547), 'cllist.sllistnode', 'sllistnode', ([], {}), '()\n', (9545, 9547), False, 'from cllist import sllistnode\n'), ((9710, 9731), 'cllist.sllist', 'sllist', (["[1, 3, '123']"], {}), "([1, 3, '123'])\n", (9716, 9731), False, 'from cllist import sllist\n'), ((9873, 9894), 'cllist.sllist', 'sllist', (["[1, 3, '123']"], {}), "([1, 3, '123'])\n", (9879, 9894), False, 'from cllist import sllist\n'), ((10079, 10103), 'cllist.sllist', 'sllist', (['[0, 1, 2, 10, 3]'], {}), '([0, 1, 2, 10, 3])\n', (10085, 10103), False, 'from cllist import sllist\n'), ((10179, 10193), 'cllist.sllistnode', 'sllistnode', (['(10)'], {}), '(10)\n', (10189, 10193), False, 'from cllist import sllistnode\n'), ((10566, 10590), 'cllist.sllist', 'sllist', (['[0, 1, 2, 3, 10]'], {}), '([0, 1, 2, 3, 10])\n', (10572, 10590), False, 'from cllist import sllist\n'), ((10638, 10652), 'cllist.sllistnode', 'sllistnode', (['(10)'], {}), '(10)\n', (10648, 10652), False, 'from cllist import sllistnode\n'), ((11066, 11090), 'cllist.sllist', 'sllist', (['[0, 1, 10, 2, 3]'], {}), '([0, 1, 10, 2, 3])\n', (11072, 11090), False, 'from cllist import sllist\n'), ((11166, 11180), 'cllist.sllistnode', 'sllistnode', (['(10)'], {}), '(10)\n', (11176, 11180), False, 'from cllist import sllistnode\n'), ((11556, 11580), 'cllist.sllist', 'sllist', (['[10, 0, 1, 2, 3]'], {}), '([10, 0, 1, 2, 3])\n', (11562, 11580), False, 'from cllist import sllist\n'), ((11628, 11642), 'cllist.sllistnode', 'sllistnode', (['(10)'], {}), '(10)\n', (11638, 11642), False, 'from cllist import sllistnode\n'), ((11973, 11993), 'cllist.sllist', 'sllist', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (11979, 11993), False, 'from cllist import sllist\n'), ((12590, 12614), 'cllist.sllist', 'sllist', (['[0, 1, 2, 3, 10]'], {}), '([0, 1, 2, 3, 10])\n', (12596, 12614), False, 'from cllist import sllist\n'), ((12663, 12677), 'cllist.sllistnode', 'sllistnode', (['(10)'], {}), '(10)\n', (12673, 12677), False, 'from cllist import sllistnode\n'), ((13062, 13086), 'cllist.sllist', 'sllist', (['[10, 0, 1, 2, 3]'], {}), '([10, 0, 1, 2, 3])\n', (13068, 13086), False, 'from cllist import sllist\n'), ((13134, 13148), 'cllist.sllistnode', 'sllistnode', (['(10)'], {}), '(10)\n', (13144, 13148), False, 'from cllist import sllistnode\n'), ((13493, 13517), 'cllist.sllist', 'sllist', (['[0, 1, 2, 3, 10]'], {}), '([0, 1, 2, 3, 10])\n', (13499, 13517), False, 'from cllist import sllist\n'), ((13566, 13580), 'cllist.sllistnode', 'sllistnode', (['(10)'], {}), '(10)\n', (13576, 13580), False, 'from cllist import sllistnode\n'), ((14016, 14029), 'cllist.sllist', 'sllist', (['b_ref'], {}), '(b_ref)\n', (14022, 14029), False, 'from cllist import sllist\n'), ((14047, 14068), 'cllist.sllist', 'sllist', (['(a_ref + b_ref)'], {}), '(a_ref + b_ref)\n', (14053, 14068), False, 'from cllist import sllist\n'), ((14081, 14094), 'cllist.sllist', 'sllist', (['a_ref'], {}), '(a_ref)\n', (14087, 14094), False, 'from cllist import sllist\n'), ((14209, 14222), 'cllist.sllist', 'sllist', (['a_ref'], {}), '(a_ref)\n', (14215, 14222), False, 'from cllist import sllist\n'), ((14341, 14354), 'cllist.sllist', 'sllist', (['a_ref'], {}), '(a_ref)\n', (14347, 14354), False, 'from cllist import sllist\n'), ((14570, 14588), 'cllist.sllist', 'sllist', (['filled_ref'], {}), '(filled_ref)\n', (14576, 14588), False, 'from cllist import sllist\n'), ((14605, 14613), 'cllist.sllist', 'sllist', ([], {}), '()\n', (14611, 14613), False, 'from cllist import sllist\n'), ((14747, 14755), 'cllist.sllist', 'sllist', ([], {}), '()\n', (14753, 14755), False, 'from cllist import sllist\n'), ((14912, 14920), 'cllist.sllist', 'sllist', ([], {}), '()\n', (14918, 14920), False, 'from cllist import sllist\n'), ((15195, 15208), 'cllist.sllist', 'sllist', (['b_ref'], {}), '(b_ref)\n', (15201, 15208), False, 'from cllist import sllist\n'), ((15276, 15289), 'cllist.sllist', 'sllist', (['a_ref'], {}), '(a_ref)\n', (15282, 15289), False, 'from cllist import sllist\n'), ((15408, 15421), 'cllist.sllist', 'sllist', (['a_ref'], {}), '(a_ref)\n', (15414, 15421), False, 'from cllist import sllist\n'), ((15544, 15557), 'cllist.sllist', 'sllist', (['a_ref'], {}), '(a_ref)\n', (15550, 15557), False, 'from cllist import sllist\n'), ((15797, 15815), 'cllist.sllist', 'sllist', (['filled_ref'], {}), '(filled_ref)\n', (15803, 15815), False, 'from cllist import sllist\n'), ((15832, 15840), 'cllist.sllist', 'sllist', ([], {}), '()\n', (15838, 15840), False, 'from cllist import sllist\n'), ((15978, 15986), 'cllist.sllist', 'sllist', ([], {}), '()\n', (15984, 15986), False, 'from cllist import sllist\n'), ((16163, 16171), 'cllist.sllist', 'sllist', ([], {}), '()\n', (16169, 16171), False, 'from cllist import sllist\n'), ((16467, 16480), 'cllist.sllist', 'sllist', (['b_ref'], {}), '(b_ref)\n', (16473, 16480), False, 'from cllist import sllist\n'), ((16498, 16519), 'cllist.sllist', 'sllist', (['(a_ref + b_ref)'], {}), '(a_ref + b_ref)\n', (16504, 16519), False, 'from cllist import sllist\n'), ((16532, 16545), 'cllist.sllist', 'sllist', (['a_ref'], {}), '(a_ref)\n', (16538, 16545), False, 'from cllist import sllist\n'), ((16665, 16678), 'cllist.sllist', 'sllist', (['a_ref'], {}), '(a_ref)\n', (16671, 16678), False, 'from cllist import sllist\n'), ((16802, 16815), 'cllist.sllist', 'sllist', (['a_ref'], {}), '(a_ref)\n', (16808, 16815), False, 'from cllist import sllist\n'), ((17041, 17059), 'cllist.sllist', 'sllist', (['filled_ref'], {}), '(filled_ref)\n', (17047, 17059), False, 'from cllist import sllist\n'), ((17076, 17084), 'cllist.sllist', 'sllist', ([], {}), '()\n', (17082, 17084), False, 'from cllist import sllist\n'), ((17223, 17231), 'cllist.sllist', 'sllist', ([], {}), '()\n', (17229, 17231), False, 'from cllist import sllist\n'), ((17393, 17401), 'cllist.sllist', 'sllist', ([], {}), '()\n', (17399, 17401), False, 'from cllist import sllist\n'), ((17603, 17611), 'cllist.sllist', 'sllist', ([], {}), '()\n', (17609, 17611), False, 'from cllist import sllist\n'), ((18229, 18240), 'cllist.sllist', 'sllist', (['ref'], {}), '(ref)\n', (18235, 18240), False, 'from cllist import sllist\n'), ((18629, 18640), 'cllist.sllist', 'sllist', (['ref'], {}), '(ref)\n', (18635, 18640), False, 'from cllist import sllist\n'), ((18883, 18894), 'cllist.sllist', 'sllist', (['ref'], {}), '(ref)\n', (18889, 18894), False, 'from cllist import sllist\n'), ((19265, 19276), 'cllist.sllist', 'sllist', (['ref'], {}), '(ref)\n', (19271, 19276), False, 'from cllist import sllist\n'), ((19581, 19592), 'cllist.sllist', 'sllist', (['lst'], {}), '(lst)\n', (19587, 19592), False, 'from cllist import sllist\n'), ((20193, 20206), 'cllist.sllist', 'sllist', (['smlst'], {}), '(smlst)\n', (20199, 20206), False, 'from cllist import sllist\n'), ((21493, 21504), 'cllist.sllist', 'sllist', (['ref'], {}), '(ref)\n', (21499, 21504), False, 'from cllist import sllist\n'), ((21923, 21934), 'cllist.sllist', 'sllist', (['ref'], {}), '(ref)\n', (21929, 21934), False, 'from cllist import sllist\n'), ((22331, 22339), 'cllist.sllist', 'sllist', ([], {}), '()\n', (22337, 22339), False, 'from cllist import sllist\n'), ((22565, 22576), 'cllist.sllist', 'sllist', (['ref'], {}), '(ref)\n', (22571, 22576), False, 'from cllist import sllist\n'), ((23087, 23095), 'cllist.sllist', 'sllist', ([], {}), '()\n', (23093, 23095), False, 'from cllist import sllist\n'), ((23213, 23233), 'cllist.sllist', 'sllist', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (23219, 23233), False, 'from cllist import sllist\n'), ((23359, 23379), 'cllist.sllist', 'sllist', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (23365, 23379), False, 'from cllist import sllist\n'), ((25252, 25263), 'cllist.sllist', 'sllist', (['ref'], {}), '(ref)\n', (25258, 25263), False, 'from cllist import sllist\n'), ((25876, 25884), 'cllist.sllist', 'sllist', ([], {}), '()\n', (25882, 25884), False, 'from cllist import sllist\n'), ((26194, 26205), 'cllist.sllist', 'sllist', (['ref'], {}), '(ref)\n', (26200, 26205), False, 'from cllist import sllist\n'), ((26780, 26793), 'cllist.sllist', 'sllist', (['a_ref'], {}), '(a_ref)\n', (26786, 26793), False, 'from cllist import sllist\n'), ((26855, 26868), 'cllist.sllist', 'sllist', (['b_ref'], {}), '(b_ref)\n', (26861, 26868), False, 'from cllist import sllist\n'), ((26886, 26907), 'cllist.sllist', 'sllist', (['(a_ref + b_ref)'], {}), '(a_ref + b_ref)\n', (26892, 26907), False, 'from cllist import sllist\n'), ((27162, 27170), 'cllist.sllist', 'sllist', ([], {}), '()\n', (27168, 27170), False, 'from cllist import sllist\n'), ((27232, 27250), 'cllist.sllist', 'sllist', (['filled_ref'], {}), '(filled_ref)\n', (27238, 27250), False, 'from cllist import sllist\n'), ((27772, 27785), 'cllist.sllist', 'sllist', (['b_ref'], {}), '(b_ref)\n', (27778, 27785), False, 'from cllist import sllist\n'), ((27803, 27824), 'cllist.sllist', 'sllist', (['(a_ref + b_ref)'], {}), '(a_ref + b_ref)\n', (27809, 27824), False, 'from cllist import sllist\n'), ((27837, 27850), 'cllist.sllist', 'sllist', (['a_ref'], {}), '(a_ref)\n', (27843, 27850), False, 'from cllist import sllist\n'), ((27960, 27973), 'cllist.sllist', 'sllist', (['a_ref'], {}), '(a_ref)\n', (27966, 27973), False, 'from cllist import sllist\n'), ((28087, 28100), 'cllist.sllist', 'sllist', (['a_ref'], {}), '(a_ref)\n', (28093, 28100), False, 'from cllist import sllist\n'), ((28316, 28334), 'cllist.sllist', 'sllist', (['filled_ref'], {}), '(filled_ref)\n', (28322, 28334), False, 'from cllist import sllist\n'), ((28351, 28359), 'cllist.sllist', 'sllist', ([], {}), '()\n', (28357, 28359), False, 'from cllist import sllist\n'), ((28488, 28496), 'cllist.sllist', 'sllist', ([], {}), '()\n', (28494, 28496), False, 'from cllist import sllist\n'), ((28648, 28656), 'cllist.sllist', 'sllist', ([], {}), '()\n', (28654, 28656), False, 'from cllist import sllist\n'), ((28866, 28877), 'cllist.sllist', 'sllist', (['lst'], {}), '(lst)\n', (28872, 28877), False, 'from cllist import sllist\n'), ((29286, 29297), 'cllist.sllist', 'sllist', (['lst'], {}), '(lst)\n', (29292, 29297), False, 'from cllist import sllist\n'), ((29539, 29550), 'cllist.sllist', 'sllist', (['ref'], {}), '(ref)\n', (29545, 29550), False, 'from cllist import sllist\n'), ((29648, 29656), 'cllist.sllist', 'sllist', ([], {}), '()\n', (29654, 29656), False, 'from cllist import sllist\n'), ((29792, 29803), 'cllist.sllist', 'sllist', (['ref'], {}), '(ref)\n', (29798, 29803), False, 'from cllist import sllist\n'), ((29921, 29929), 'cllist.sllist', 'sllist', ([], {}), '()\n', (29927, 29929), False, 'from cllist import sllist\n'), ((30617, 30629), 'cllist.sllistnode', 'sllistnode', ([], {}), '()\n', (30627, 30629), False, 'from cllist import sllistnode\n'), ((31103, 31111), 'cllist.dllist', 'dllist', ([], {}), '()\n', (31109, 31111), False, 'from cllist import dllist\n'), ((31315, 31326), 'cllist.dllist', 'dllist', (['ref'], {}), '(ref)\n', (31321, 31326), False, 'from cllist import dllist\n'), ((31688, 31698), 'cllist.dllist', 'dllist', (['[]'], {}), '([])\n', (31694, 31698), False, 'from cllist import dllist\n'), ((31756, 31780), 'cllist.dllist', 'dllist', (["[None, 1, 'abc']"], {}), "([None, 1, 'abc'])\n", (31762, 31780), False, 'from cllist import dllist\n'), ((31878, 31888), 'cllist.dllist', 'dllist', (['[]'], {}), '([])\n', (31884, 31888), False, 'from cllist import dllist\n'), ((31947, 31971), 'cllist.dllist', 'dllist', (["[None, 1, 'abc']"], {}), "([None, 1, 'abc'])\n", (31953, 31971), False, 'from cllist import dllist\n'), ((32763, 32783), 'cllist.dllist', 'dllist', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (32769, 32783), False, 'from cllist import dllist\n'), ((32796, 32816), 'cllist.dllist', 'dllist', (['[1, 2, 3, 5]'], {}), '([1, 2, 3, 5])\n', (32802, 32816), False, 'from cllist import dllist\n'), ((32829, 32849), 'cllist.dllist', 'dllist', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (32835, 32849), False, 'from cllist import dllist\n'), ((32862, 32882), 'cllist.dllist', 'dllist', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (32868, 32882), False, 'from cllist import dllist\n'), ((33668, 33688), 'cllist.dllist', 'dllist', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (33674, 33688), False, 'from cllist import dllist\n'), ((33701, 33721), 'cllist.dllist', 'dllist', (['[1, 2, 3, 5]'], {}), '([1, 2, 3, 5])\n', (33707, 33721), False, 'from cllist import dllist\n'), ((33734, 33754), 'cllist.dllist', 'dllist', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (33740, 33754), False, 'from cllist import dllist\n'), ((33767, 33787), 'cllist.dllist', 'dllist', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (33773, 33787), False, 'from cllist import dllist\n'), ((34262, 34282), 'cllist.dllist', 'dllist', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (34268, 34282), False, 'from cllist import dllist\n'), ((34295, 34315), 'cllist.dllist', 'dllist', (['[1, 2, 3, 5]'], {}), '([1, 2, 3, 5])\n', (34301, 34315), False, 'from cllist import dllist\n'), ((34328, 34348), 'cllist.dllist', 'dllist', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (34334, 34348), False, 'from cllist import dllist\n'), ((34361, 34381), 'cllist.dllist', 'dllist', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (34367, 34381), False, 'from cllist import dllist\n'), ((34850, 34870), 'cllist.dllist', 'dllist', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (34856, 34870), False, 'from cllist import dllist\n'), ((34883, 34903), 'cllist.dllist', 'dllist', (['[1, 2, 3, 5]'], {}), '([1, 2, 3, 5])\n', (34889, 34903), False, 'from cllist import dllist\n'), ((34916, 34936), 'cllist.dllist', 'dllist', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (34922, 34936), False, 'from cllist import dllist\n'), ((34949, 34969), 'cllist.dllist', 'dllist', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (34955, 34969), False, 'from cllist import dllist\n'), ((35432, 35452), 'cllist.dllist', 'dllist', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (35438, 35452), False, 'from cllist import dllist\n'), ((35465, 35485), 'cllist.dllist', 'dllist', (['[1, 2, 3, 5]'], {}), '([1, 2, 3, 5])\n', (35471, 35485), False, 'from cllist import dllist\n'), ((35498, 35518), 'cllist.dllist', 'dllist', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (35504, 35518), False, 'from cllist import dllist\n'), ((35531, 35551), 'cllist.dllist', 'dllist', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (35537, 35551), False, 'from cllist import dllist\n'), ((36014, 36034), 'cllist.dllist', 'dllist', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (36020, 36034), False, 'from cllist import dllist\n'), ((36047, 36067), 'cllist.dllist', 'dllist', (['[1, 2, 3, 5]'], {}), '([1, 2, 3, 5])\n', (36053, 36067), False, 'from cllist import dllist\n'), ((36080, 36100), 'cllist.dllist', 'dllist', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (36086, 36100), False, 'from cllist import dllist\n'), ((36113, 36133), 'cllist.dllist', 'dllist', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (36119, 36133), False, 'from cllist import dllist\n'), ((36604, 36624), 'cllist.dllist', 'dllist', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (36610, 36624), False, 'from cllist import dllist\n'), ((36637, 36657), 'cllist.dllist', 'dllist', (['[1, 2, 3, 5]'], {}), '([1, 2, 3, 5])\n', (36643, 36657), False, 'from cllist import dllist\n'), ((36670, 36690), 'cllist.dllist', 'dllist', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (36676, 36690), False, 'from cllist import dllist\n'), ((36703, 36723), 'cllist.dllist', 'dllist', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (36709, 36723), False, 'from cllist import dllist\n'), ((37154, 37165), 'cllist.dllist', 'dllist', (['ref'], {}), '(ref)\n', (37160, 37165), False, 'from cllist import dllist\n'), ((37795, 37803), 'cllist.dllist', 'dllist', ([], {}), '()\n', (37801, 37803), False, 'from cllist import dllist\n'), ((38094, 38105), 'cllist.dllist', 'dllist', (['ref'], {}), '(ref)\n', (38100, 38105), False, 'from cllist import dllist\n'), ((38353, 38361), 'cllist.dllist', 'dllist', ([], {}), '()\n', (38359, 38361), False, 'from cllist import dllist\n'), ((38541, 38552), 'cllist.dllist', 'dllist', (['ref'], {}), '(ref)\n', (38547, 38552), False, 'from cllist import dllist\n'), ((38819, 38827), 'cllist.dllist', 'dllist', ([], {}), '()\n', (38825, 38827), False, 'from cllist import dllist\n'), ((39021, 39045), 'cllist.dllist', 'dllist', (['[0, 1, 2, 3, 10]'], {}), '([0, 1, 2, 3, 10])\n', (39027, 39045), False, 'from cllist import dllist\n'), ((39094, 39108), 'cllist.dllistnode', 'dllistnode', (['(10)'], {}), '(10)\n', (39104, 39108), False, 'from cllist import dllistnode\n'), ((39548, 39572), 'cllist.dllist', 'dllist', (['[0, 1, 10, 2, 3]'], {}), '([0, 1, 10, 2, 3])\n', (39554, 39572), False, 'from cllist import dllist\n'), ((39648, 39662), 'cllist.dllistnode', 'dllistnode', (['(10)'], {}), '(10)\n', (39658, 39662), False, 'from cllist import dllistnode\n'), ((40124, 40148), 'cllist.dllist', 'dllist', (['[10, 0, 1, 2, 3]'], {}), '([10, 0, 1, 2, 3])\n', (40130, 40148), False, 'from cllist import dllist\n'), ((40196, 40210), 'cllist.dllistnode', 'dllistnode', (['(10)'], {}), '(10)\n', (40206, 40210), False, 'from cllist import dllistnode\n'), ((40627, 40635), 'cllist.dllist', 'dllist', ([], {}), '()\n', (40633, 40635), False, 'from cllist import dllist\n'), ((40951, 40975), 'cllist.dllist', 'dllist', (['[0, 1, 2, 3, 10]'], {}), '([0, 1, 2, 3, 10])\n', (40957, 40975), False, 'from cllist import dllist\n'), ((41024, 41038), 'cllist.dllistnode', 'dllistnode', (['(10)'], {}), '(10)\n', (41034, 41038), False, 'from cllist import dllistnode\n'), ((41470, 41494), 'cllist.dllist', 'dllist', (['[10, 0, 1, 2, 3]'], {}), '([10, 0, 1, 2, 3])\n', (41476, 41494), False, 'from cllist import dllist\n'), ((41542, 41556), 'cllist.dllistnode', 'dllistnode', (['(10)'], {}), '(10)\n', (41552, 41556), False, 'from cllist import dllistnode\n'), ((41994, 42018), 'cllist.dllist', 'dllist', (['[0, 1, 2, 3, 10]'], {}), '([0, 1, 2, 3, 10])\n', (42000, 42018), False, 'from cllist import dllist\n'), ((42067, 42081), 'cllist.dllistnode', 'dllistnode', (['(10)'], {}), '(10)\n', (42077, 42081), False, 'from cllist import dllistnode\n'), ((42564, 42577), 'cllist.dllist', 'dllist', (['b_ref'], {}), '(b_ref)\n', (42570, 42577), False, 'from cllist import dllist\n'), ((42595, 42616), 'cllist.dllist', 'dllist', (['(a_ref + b_ref)'], {}), '(a_ref + b_ref)\n', (42601, 42616), False, 'from cllist import dllist\n'), ((42629, 42642), 'cllist.dllist', 'dllist', (['a_ref'], {}), '(a_ref)\n', (42635, 42642), False, 'from cllist import dllist\n'), ((42757, 42770), 'cllist.dllist', 'dllist', (['a_ref'], {}), '(a_ref)\n', (42763, 42770), False, 'from cllist import dllist\n'), ((42889, 42902), 'cllist.dllist', 'dllist', (['a_ref'], {}), '(a_ref)\n', (42895, 42902), False, 'from cllist import dllist\n'), ((43118, 43136), 'cllist.dllist', 'dllist', (['filled_ref'], {}), '(filled_ref)\n', (43124, 43136), False, 'from cllist import dllist\n'), ((43153, 43161), 'cllist.dllist', 'dllist', ([], {}), '()\n', (43159, 43161), False, 'from cllist import dllist\n'), ((43295, 43303), 'cllist.dllist', 'dllist', ([], {}), '()\n', (43301, 43303), False, 'from cllist import dllist\n'), ((43460, 43468), 'cllist.dllist', 'dllist', ([], {}), '()\n', (43466, 43468), False, 'from cllist import dllist\n'), ((43743, 43756), 'cllist.dllist', 'dllist', (['b_ref'], {}), '(b_ref)\n', (43749, 43756), False, 'from cllist import dllist\n'), ((43824, 43837), 'cllist.dllist', 'dllist', (['a_ref'], {}), '(a_ref)\n', (43830, 43837), False, 'from cllist import dllist\n'), ((43956, 43969), 'cllist.dllist', 'dllist', (['a_ref'], {}), '(a_ref)\n', (43962, 43969), False, 'from cllist import dllist\n'), ((44092, 44105), 'cllist.dllist', 'dllist', (['a_ref'], {}), '(a_ref)\n', (44098, 44105), False, 'from cllist import dllist\n'), ((44345, 44363), 'cllist.dllist', 'dllist', (['filled_ref'], {}), '(filled_ref)\n', (44351, 44363), False, 'from cllist import dllist\n'), ((44380, 44388), 'cllist.dllist', 'dllist', ([], {}), '()\n', (44386, 44388), False, 'from cllist import dllist\n'), ((44526, 44534), 'cllist.dllist', 'dllist', ([], {}), '()\n', (44532, 44534), False, 'from cllist import dllist\n'), ((44711, 44719), 'cllist.dllist', 'dllist', ([], {}), '()\n', (44717, 44719), False, 'from cllist import dllist\n'), ((45015, 45028), 'cllist.dllist', 'dllist', (['b_ref'], {}), '(b_ref)\n', (45021, 45028), False, 'from cllist import dllist\n'), ((45046, 45067), 'cllist.dllist', 'dllist', (['(a_ref + b_ref)'], {}), '(a_ref + b_ref)\n', (45052, 45067), False, 'from cllist import dllist\n'), ((45080, 45093), 'cllist.dllist', 'dllist', (['a_ref'], {}), '(a_ref)\n', (45086, 45093), False, 'from cllist import dllist\n'), ((45213, 45226), 'cllist.dllist', 'dllist', (['a_ref'], {}), '(a_ref)\n', (45219, 45226), False, 'from cllist import dllist\n'), ((45350, 45363), 'cllist.dllist', 'dllist', (['a_ref'], {}), '(a_ref)\n', (45356, 45363), False, 'from cllist import dllist\n'), ((45589, 45607), 'cllist.dllist', 'dllist', (['filled_ref'], {}), '(filled_ref)\n', (45595, 45607), False, 'from cllist import dllist\n'), ((45624, 45632), 'cllist.dllist', 'dllist', ([], {}), '()\n', (45630, 45632), False, 'from cllist import dllist\n'), ((45771, 45779), 'cllist.dllist', 'dllist', ([], {}), '()\n', (45777, 45779), False, 'from cllist import dllist\n'), ((45941, 45949), 'cllist.dllist', 'dllist', ([], {}), '()\n', (45947, 45949), False, 'from cllist import dllist\n'), ((46151, 46159), 'cllist.dllist', 'dllist', ([], {}), '()\n', (46157, 46159), False, 'from cllist import dllist\n'), ((46823, 46834), 'cllist.dllist', 'dllist', (['ref'], {}), '(ref)\n', (46829, 46834), False, 'from cllist import dllist\n'), ((47270, 47281), 'cllist.dllist', 'dllist', (['ref'], {}), '(ref)\n', (47276, 47281), False, 'from cllist import dllist\n'), ((47697, 47708), 'cllist.dllist', 'dllist', (['ref'], {}), '(ref)\n', (47703, 47708), False, 'from cllist import dllist\n'), ((48079, 48090), 'cllist.dllist', 'dllist', (['ref'], {}), '(ref)\n', (48085, 48090), False, 'from cllist import dllist\n'), ((48451, 48477), 'random.shuffle', 'random.shuffle', (['allIndexes'], {}), '(allIndexes)\n', (48465, 48477), False, 'import random\n'), ((48492, 48503), 'cllist.dllist', 'dllist', (['ref'], {}), '(ref)\n', (48498, 48503), False, 'from cllist import dllist\n'), ((49253, 49264), 'cllist.dllist', 'dllist', (['ref'], {}), '(ref)\n', (49259, 49264), False, 'from cllist import dllist\n'), ((49729, 49740), 'cllist.dllist', 'dllist', (['ref'], {}), '(ref)\n', (49735, 49740), False, 'from cllist import dllist\n'), ((50183, 50191), 'cllist.dllist', 'dllist', ([], {}), '()\n', (50189, 50191), False, 'from cllist import dllist\n'), ((50417, 50428), 'cllist.dllist', 'dllist', (['ref'], {}), '(ref)\n', (50423, 50428), False, 'from cllist import dllist\n'), ((51037, 51045), 'cllist.dllist', 'dllist', ([], {}), '()\n', (51043, 51045), False, 'from cllist import dllist\n'), ((51163, 51183), 'cllist.dllist', 'dllist', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (51169, 51183), False, 'from cllist import dllist\n'), ((51309, 51329), 'cllist.dllist', 'dllist', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (51315, 51329), False, 'from cllist import dllist\n'), ((54110, 54121), 'cllist.dllist', 'dllist', (['ref'], {}), '(ref)\n', (54116, 54121), False, 'from cllist import dllist\n'), ((54734, 54742), 'cllist.dllist', 'dllist', ([], {}), '()\n', (54740, 54742), False, 'from cllist import dllist\n'), ((55052, 55063), 'cllist.dllist', 'dllist', (['ref'], {}), '(ref)\n', (55058, 55063), False, 'from cllist import dllist\n'), ((55638, 55651), 'cllist.dllist', 'dllist', (['a_ref'], {}), '(a_ref)\n', (55644, 55651), False, 'from cllist import dllist\n'), ((55713, 55726), 'cllist.dllist', 'dllist', (['b_ref'], {}), '(b_ref)\n', (55719, 55726), False, 'from cllist import dllist\n'), ((55744, 55765), 'cllist.dllist', 'dllist', (['(a_ref + b_ref)'], {}), '(a_ref + b_ref)\n', (55750, 55765), False, 'from cllist import dllist\n'), ((56023, 56031), 'cllist.dllist', 'dllist', ([], {}), '()\n', (56029, 56031), False, 'from cllist import dllist\n'), ((56093, 56111), 'cllist.dllist', 'dllist', (['filled_ref'], {}), '(filled_ref)\n', (56099, 56111), False, 'from cllist import dllist\n'), ((56633, 56646), 'cllist.dllist', 'dllist', (['b_ref'], {}), '(b_ref)\n', (56639, 56646), False, 'from cllist import dllist\n'), ((56664, 56685), 'cllist.dllist', 'dllist', (['(a_ref + b_ref)'], {}), '(a_ref + b_ref)\n', (56670, 56685), False, 'from cllist import dllist\n'), ((56698, 56711), 'cllist.dllist', 'dllist', (['a_ref'], {}), '(a_ref)\n', (56704, 56711), False, 'from cllist import dllist\n'), ((56821, 56834), 'cllist.dllist', 'dllist', (['a_ref'], {}), '(a_ref)\n', (56827, 56834), False, 'from cllist import dllist\n'), ((56948, 56961), 'cllist.dllist', 'dllist', (['a_ref'], {}), '(a_ref)\n', (56954, 56961), False, 'from cllist import dllist\n'), ((57180, 57198), 'cllist.dllist', 'dllist', (['filled_ref'], {}), '(filled_ref)\n', (57186, 57198), False, 'from cllist import dllist\n'), ((57215, 57223), 'cllist.dllist', 'dllist', ([], {}), '()\n', (57221, 57223), False, 'from cllist import dllist\n'), ((57352, 57360), 'cllist.dllist', 'dllist', ([], {}), '()\n', (57358, 57360), False, 'from cllist import dllist\n'), ((57512, 57520), 'cllist.dllist', 'dllist', ([], {}), '()\n', (57518, 57520), False, 'from cllist import dllist\n'), ((57730, 57741), 'cllist.dllist', 'dllist', (['lst'], {}), '(lst)\n', (57736, 57741), False, 'from cllist import dllist\n'), ((58150, 58161), 'cllist.dllist', 'dllist', (['lst'], {}), '(lst)\n', (58156, 58161), False, 'from cllist import dllist\n'), ((58398, 58409), 'cllist.dllist', 'dllist', (['lst'], {}), '(lst)\n', (58404, 58409), False, 'from cllist import dllist\n'), ((59010, 59023), 'cllist.dllist', 'dllist', (['smlst'], {}), '(smlst)\n', (59016, 59023), False, 'from cllist import dllist\n'), ((60084, 60095), 'cllist.dllist', 'dllist', (['ref'], {}), '(ref)\n', (60090, 60095), False, 'from cllist import dllist\n'), ((60193, 60201), 'cllist.dllist', 'dllist', ([], {}), '()\n', (60199, 60201), False, 'from cllist import dllist\n'), ((60337, 60348), 'cllist.dllist', 'dllist', (['ref'], {}), '(ref)\n', (60343, 60348), False, 'from cllist import dllist\n'), ((60466, 60474), 'cllist.dllist', 'dllist', ([], {}), '()\n', (60472, 60474), False, 'from cllist import dllist\n'), ((61162, 61174), 'cllist.dllistnode', 'dllistnode', ([], {}), '()\n', (61172, 61174), False, 'from cllist import dllistnode\n'), ((61699, 61729), 'unittest.makeSuite', 'unittest.makeSuite', (['testsllist'], {}), '(testsllist)\n', (61717, 61729), False, 'import unittest\n'), ((61749, 61779), 'unittest.makeSuite', 'unittest.makeSuite', (['testdllist'], {}), '(testdllist)\n', (61767, 61779), False, 'import unittest\n'), ((1758, 1778), 'cllist.sllist', 'sllist', (['[None, None]'], {}), '([None, None])\n', (1764, 1778), False, 'from cllist import sllist\n'), ((1850, 1867), 'cllist.sllist', 'sllist', (['[1, None]'], {}), '([1, None])\n', (1856, 1867), False, 'from cllist import sllist\n'), ((1936, 1957), 'cllist.sllist', 'sllist', (["['abc', None]"], {}), "(['abc', None])\n", (1942, 1957), False, 'from cllist import sllist\n'), ((2059, 2073), 'cllist.sllist', 'sllist', (['[None]'], {}), '([None])\n', (2065, 2073), False, 'from cllist import sllist\n'), ((2148, 2165), 'cllist.sllist', 'sllist', (['[1, None]'], {}), '([1, None])\n', (2154, 2165), False, 'from cllist import sllist\n'), ((2237, 2258), 'cllist.sllist', 'sllist', (["['abc', None]"], {}), "(['abc', None])\n", (2243, 2258), False, 'from cllist import sllist\n'), ((12237, 12249), 'cllist.sllistnode', 'sllistnode', ([], {}), '()\n', (12247, 12249), False, 'from cllist import sllistnode\n'), ((12498, 12510), 'cllist.sllistnode', 'sllistnode', ([], {}), '()\n', (12508, 12510), False, 'from cllist import sllistnode\n'), ((14403, 14424), 'cllist.sllist', 'sllist', (['(a_ref + a_ref)'], {}), '(a_ref + a_ref)\n', (14409, 14424), False, 'from cllist import sllist\n'), ((14674, 14689), 'cllist.sllist', 'sllist', (['([] + [])'], {}), '([] + [])\n', (14680, 14689), False, 'from cllist import sllist\n'), ((14817, 14840), 'cllist.sllist', 'sllist', (['([] + filled_ref)'], {}), '([] + filled_ref)\n', (14823, 14840), False, 'from cllist import sllist\n'), ((14983, 15006), 'cllist.sllist', 'sllist', (['(filled_ref + [])'], {}), '(filled_ref + [])\n', (14989, 15006), False, 'from cllist import sllist\n'), ((15905, 15920), 'cllist.sllist', 'sllist', (['([] + [])'], {}), '([] + [])\n', (15911, 15920), False, 'from cllist import sllist\n'), ((16869, 16890), 'cllist.sllist', 'sllist', (['(a_ref + a_ref)'], {}), '(a_ref + a_ref)\n', (16875, 16890), False, 'from cllist import sllist\n'), ((17150, 17165), 'cllist.sllist', 'sllist', (['([] + [])'], {}), '([] + [])\n', (17156, 17165), False, 'from cllist import sllist\n'), ((17298, 17321), 'cllist.sllist', 'sllist', (['([] + filled_ref)'], {}), '([] + filled_ref)\n', (17304, 17321), False, 'from cllist import sllist\n'), ((17469, 17492), 'cllist.sllist', 'sllist', (['(filled_ref + [])'], {}), '(filled_ref + [])\n', (17475, 17492), False, 'from cllist import sllist\n'), ((23145, 23157), 'cllist.sllistnode', 'sllistnode', ([], {}), '()\n', (23155, 23157), False, 'from cllist import sllistnode\n'), ((23283, 23295), 'cllist.sllistnode', 'sllistnode', ([], {}), '()\n', (23293, 23295), False, 'from cllist import sllistnode\n'), ((23689, 23700), 'cllist.sllist', 'sllist', (['ref'], {}), '(ref)\n', (23695, 23700), False, 'from cllist import sllist\n'), ((24263, 24274), 'cllist.sllist', 'sllist', (['ref'], {}), '(ref)\n', (24269, 24274), False, 'from cllist import sllist\n'), ((24754, 24762), 'cllist.sllist', 'sllist', ([], {}), '()\n', (24760, 24762), False, 'from cllist import sllist\n'), ((25009, 25017), 'cllist.sllist', 'sllist', ([], {}), '()\n', (25015, 25017), False, 'from cllist import sllist\n'), ((27309, 27324), 'cllist.sllist', 'sllist', (['([] + [])'], {}), '([] + [])\n', (27315, 27324), False, 'from cllist import sllist\n'), ((27423, 27446), 'cllist.sllist', 'sllist', (['([] + filled_ref)'], {}), '([] + filled_ref)\n', (27429, 27446), False, 'from cllist import sllist\n'), ((27559, 27582), 'cllist.sllist', 'sllist', (['(filled_ref + [])'], {}), '(filled_ref + [])\n', (27565, 27582), False, 'from cllist import sllist\n'), ((28144, 28165), 'cllist.sllist', 'sllist', (['(a_ref + a_ref)'], {}), '(a_ref + a_ref)\n', (28150, 28165), False, 'from cllist import sllist\n'), ((28415, 28430), 'cllist.sllist', 'sllist', (['([] + [])'], {}), '([] + [])\n', (28421, 28430), False, 'from cllist import sllist\n'), ((28553, 28576), 'cllist.sllist', 'sllist', (['([] + filled_ref)'], {}), '([] + filled_ref)\n', (28559, 28576), False, 'from cllist import sllist\n'), ((28714, 28737), 'cllist.sllist', 'sllist', (['(filled_ref + [])'], {}), '(filled_ref + [])\n', (28720, 28737), False, 'from cllist import sllist\n'), ((29584, 29599), 'cllist.sllist', 'sllist', (['(ref * 4)'], {}), '(ref * 4)\n', (29590, 29599), False, 'from cllist import sllist\n'), ((29690, 29704), 'cllist.sllist', 'sllist', (['([] * 4)'], {}), '([] * 4)\n', (29696, 29704), False, 'from cllist import sllist\n'), ((29849, 29864), 'cllist.sllist', 'sllist', (['(ref * 4)'], {}), '(ref * 4)\n', (29855, 29864), False, 'from cllist import sllist\n'), ((29975, 29989), 'cllist.sllist', 'sllist', (['([] * 4)'], {}), '([] * 4)\n', (29981, 29989), False, 'from cllist import sllist\n'), ((32078, 32098), 'cllist.dllist', 'dllist', (['[None, None]'], {}), '([None, None])\n', (32084, 32098), False, 'from cllist import dllist\n'), ((32170, 32187), 'cllist.dllist', 'dllist', (['[1, None]'], {}), '([1, None])\n', (32176, 32187), False, 'from cllist import dllist\n'), ((32256, 32277), 'cllist.dllist', 'dllist', (["['abc', None]"], {}), "(['abc', None])\n", (32262, 32277), False, 'from cllist import dllist\n'), ((32379, 32393), 'cllist.dllist', 'dllist', (['[None]'], {}), '([None])\n', (32385, 32393), False, 'from cllist import dllist\n'), ((32468, 32485), 'cllist.dllist', 'dllist', (['[1, None]'], {}), '([1, None])\n', (32474, 32485), False, 'from cllist import dllist\n'), ((32557, 32578), 'cllist.dllist', 'dllist', (["['abc', None]"], {}), "(['abc', None])\n", (32563, 32578), False, 'from cllist import dllist\n'), ((40859, 40871), 'cllist.dllistnode', 'dllistnode', ([], {}), '()\n', (40869, 40871), False, 'from cllist import dllistnode\n'), ((42951, 42972), 'cllist.dllist', 'dllist', (['(a_ref + a_ref)'], {}), '(a_ref + a_ref)\n', (42957, 42972), False, 'from cllist import dllist\n'), ((43222, 43237), 'cllist.dllist', 'dllist', (['([] + [])'], {}), '([] + [])\n', (43228, 43237), False, 'from cllist import dllist\n'), ((43365, 43388), 'cllist.dllist', 'dllist', (['([] + filled_ref)'], {}), '([] + filled_ref)\n', (43371, 43388), False, 'from cllist import dllist\n'), ((43531, 43554), 'cllist.dllist', 'dllist', (['(filled_ref + [])'], {}), '(filled_ref + [])\n', (43537, 43554), False, 'from cllist import dllist\n'), ((44453, 44468), 'cllist.dllist', 'dllist', (['([] + [])'], {}), '([] + [])\n', (44459, 44468), False, 'from cllist import dllist\n'), ((45417, 45438), 'cllist.dllist', 'dllist', (['(a_ref + a_ref)'], {}), '(a_ref + a_ref)\n', (45423, 45438), False, 'from cllist import dllist\n'), ((45698, 45713), 'cllist.dllist', 'dllist', (['([] + [])'], {}), '([] + [])\n', (45704, 45713), False, 'from cllist import dllist\n'), ((45846, 45869), 'cllist.dllist', 'dllist', (['([] + filled_ref)'], {}), '([] + filled_ref)\n', (45852, 45869), False, 'from cllist import dllist\n'), ((46017, 46040), 'cllist.dllist', 'dllist', (['(filled_ref + [])'], {}), '(filled_ref + [])\n', (46023, 46040), False, 'from cllist import dllist\n'), ((51095, 51107), 'cllist.dllistnode', 'dllistnode', ([], {}), '()\n', (51105, 51107), False, 'from cllist import dllistnode\n'), ((51233, 51245), 'cllist.dllistnode', 'dllistnode', ([], {}), '()\n', (51243, 51245), False, 'from cllist import dllistnode\n'), ((51639, 51650), 'cllist.dllist', 'dllist', (['ref'], {}), '(ref)\n', (51645, 51650), False, 'from cllist import dllist\n'), ((52660, 52671), 'cllist.dllist', 'dllist', (['ref'], {}), '(ref)\n', (52666, 52671), False, 'from cllist import dllist\n'), ((53612, 53620), 'cllist.dllist', 'dllist', ([], {}), '()\n', (53618, 53620), False, 'from cllist import dllist\n'), ((53867, 53875), 'cllist.dllist', 'dllist', ([], {}), '()\n', (53873, 53875), False, 'from cllist import dllist\n'), ((56170, 56185), 'cllist.dllist', 'dllist', (['([] + [])'], {}), '([] + [])\n', (56176, 56185), False, 'from cllist import dllist\n'), ((56284, 56307), 'cllist.dllist', 'dllist', (['([] + filled_ref)'], {}), '([] + filled_ref)\n', (56290, 56307), False, 'from cllist import dllist\n'), ((56420, 56443), 'cllist.dllist', 'dllist', (['(filled_ref + [])'], {}), '(filled_ref + [])\n', (56426, 56443), False, 'from cllist import dllist\n'), ((57005, 57026), 'cllist.dllist', 'dllist', (['(a_ref + a_ref)'], {}), '(a_ref + a_ref)\n', (57011, 57026), False, 'from cllist import dllist\n'), ((57279, 57294), 'cllist.dllist', 'dllist', (['([] + [])'], {}), '([] + [])\n', (57285, 57294), False, 'from cllist import dllist\n'), ((57417, 57440), 'cllist.dllist', 'dllist', (['([] + filled_ref)'], {}), '([] + filled_ref)\n', (57423, 57440), False, 'from cllist import dllist\n'), ((57578, 57601), 'cllist.dllist', 'dllist', (['(filled_ref + [])'], {}), '(filled_ref + [])\n', (57584, 57601), False, 'from cllist import dllist\n'), ((60129, 60144), 'cllist.dllist', 'dllist', (['(ref * 4)'], {}), '(ref * 4)\n', (60135, 60144), False, 'from cllist import dllist\n'), ((60235, 60249), 'cllist.dllist', 'dllist', (['([] * 4)'], {}), '([] * 4)\n', (60241, 60249), False, 'from cllist import dllist\n'), ((60394, 60409), 'cllist.dllist', 'dllist', (['(ref * 4)'], {}), '(ref * 4)\n', (60400, 60409), False, 'from cllist import dllist\n'), ((60520, 60534), 'cllist.dllist', 'dllist', (['([] * 4)'], {}), '([] * 4)\n', (60526, 60534), False, 'from cllist import dllist\n'), ((61831, 61867), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (61854, 61867), False, 'import unittest\n'), ((3492, 3500), 'cllist.sllist', 'sllist', ([], {}), '()\n', (3498, 3500), False, 'from cllist import sllist\n'), ((3504, 3512), 'cllist.sllist', 'sllist', ([], {}), '()\n', (3510, 3512), False, 'from cllist import sllist\n'), ((3571, 3579), 'cllist.sllist', 'sllist', ([], {}), '()\n', (3577, 3579), False, 'from cllist import sllist\n'), ((3616, 3624), 'cllist.sllist', 'sllist', ([], {}), '()\n', (3622, 3624), False, 'from cllist import sllist\n'), ((4087, 4095), 'cllist.sllist', 'sllist', ([], {}), '()\n', (4093, 4095), False, 'from cllist import sllist\n'), ((4099, 4107), 'cllist.sllist', 'sllist', ([], {}), '()\n', (4105, 4107), False, 'from cllist import sllist\n'), ((4166, 4174), 'cllist.sllist', 'sllist', ([], {}), '()\n', (4172, 4174), False, 'from cllist import sllist\n'), ((4210, 4218), 'cllist.sllist', 'sllist', ([], {}), '()\n', (4216, 4218), False, 'from cllist import sllist\n'), ((4675, 4683), 'cllist.sllist', 'sllist', ([], {}), '()\n', (4681, 4683), False, 'from cllist import sllist\n'), ((4686, 4694), 'cllist.sllist', 'sllist', ([], {}), '()\n', (4692, 4694), False, 'from cllist import sllist\n'), ((4752, 4760), 'cllist.sllist', 'sllist', ([], {}), '()\n', (4758, 4760), False, 'from cllist import sllist\n'), ((4795, 4803), 'cllist.sllist', 'sllist', ([], {}), '()\n', (4801, 4803), False, 'from cllist import sllist\n'), ((5257, 5265), 'cllist.sllist', 'sllist', ([], {}), '()\n', (5263, 5265), False, 'from cllist import sllist\n'), ((5268, 5276), 'cllist.sllist', 'sllist', ([], {}), '()\n', (5274, 5276), False, 'from cllist import sllist\n'), ((5335, 5343), 'cllist.sllist', 'sllist', ([], {}), '()\n', (5341, 5343), False, 'from cllist import sllist\n'), ((5377, 5385), 'cllist.sllist', 'sllist', ([], {}), '()\n', (5383, 5385), False, 'from cllist import sllist\n'), ((5838, 5846), 'cllist.sllist', 'sllist', ([], {}), '()\n', (5844, 5846), False, 'from cllist import sllist\n'), ((5850, 5858), 'cllist.sllist', 'sllist', ([], {}), '()\n', (5856, 5858), False, 'from cllist import sllist\n'), ((5916, 5924), 'cllist.sllist', 'sllist', ([], {}), '()\n', (5922, 5924), False, 'from cllist import sllist\n'), ((5961, 5969), 'cllist.sllist', 'sllist', ([], {}), '()\n', (5967, 5969), False, 'from cllist import sllist\n'), ((6428, 6436), 'cllist.sllist', 'sllist', ([], {}), '()\n', (6434, 6436), False, 'from cllist import sllist\n'), ((6440, 6448), 'cllist.sllist', 'sllist', ([], {}), '()\n', (6446, 6448), False, 'from cllist import sllist\n'), ((6507, 6515), 'cllist.sllist', 'sllist', ([], {}), '()\n', (6513, 6515), False, 'from cllist import sllist\n'), ((6551, 6559), 'cllist.sllist', 'sllist', ([], {}), '()\n', (6557, 6559), False, 'from cllist import sllist\n'), ((33812, 33820), 'cllist.dllist', 'dllist', ([], {}), '()\n', (33818, 33820), False, 'from cllist import dllist\n'), ((33824, 33832), 'cllist.dllist', 'dllist', ([], {}), '()\n', (33830, 33832), False, 'from cllist import dllist\n'), ((33891, 33899), 'cllist.dllist', 'dllist', ([], {}), '()\n', (33897, 33899), False, 'from cllist import dllist\n'), ((33936, 33944), 'cllist.dllist', 'dllist', ([], {}), '()\n', (33942, 33944), False, 'from cllist import dllist\n'), ((34407, 34415), 'cllist.dllist', 'dllist', ([], {}), '()\n', (34413, 34415), False, 'from cllist import dllist\n'), ((34419, 34427), 'cllist.dllist', 'dllist', ([], {}), '()\n', (34425, 34427), False, 'from cllist import dllist\n'), ((34486, 34494), 'cllist.dllist', 'dllist', ([], {}), '()\n', (34492, 34494), False, 'from cllist import dllist\n'), ((34530, 34538), 'cllist.dllist', 'dllist', ([], {}), '()\n', (34536, 34538), False, 'from cllist import dllist\n'), ((34995, 35003), 'cllist.dllist', 'dllist', ([], {}), '()\n', (35001, 35003), False, 'from cllist import dllist\n'), ((35006, 35014), 'cllist.dllist', 'dllist', ([], {}), '()\n', (35012, 35014), False, 'from cllist import dllist\n'), ((35072, 35080), 'cllist.dllist', 'dllist', ([], {}), '()\n', (35078, 35080), False, 'from cllist import dllist\n'), ((35115, 35123), 'cllist.dllist', 'dllist', ([], {}), '()\n', (35121, 35123), False, 'from cllist import dllist\n'), ((35577, 35585), 'cllist.dllist', 'dllist', ([], {}), '()\n', (35583, 35585), False, 'from cllist import dllist\n'), ((35588, 35596), 'cllist.dllist', 'dllist', ([], {}), '()\n', (35594, 35596), False, 'from cllist import dllist\n'), ((35655, 35663), 'cllist.dllist', 'dllist', ([], {}), '()\n', (35661, 35663), False, 'from cllist import dllist\n'), ((35697, 35705), 'cllist.dllist', 'dllist', ([], {}), '()\n', (35703, 35705), False, 'from cllist import dllist\n'), ((36158, 36166), 'cllist.dllist', 'dllist', ([], {}), '()\n', (36164, 36166), False, 'from cllist import dllist\n'), ((36170, 36178), 'cllist.dllist', 'dllist', ([], {}), '()\n', (36176, 36178), False, 'from cllist import dllist\n'), ((36236, 36244), 'cllist.dllist', 'dllist', ([], {}), '()\n', (36242, 36244), False, 'from cllist import dllist\n'), ((36281, 36289), 'cllist.dllist', 'dllist', ([], {}), '()\n', (36287, 36289), False, 'from cllist import dllist\n'), ((36748, 36756), 'cllist.dllist', 'dllist', ([], {}), '()\n', (36754, 36756), False, 'from cllist import dllist\n'), ((36760, 36768), 'cllist.dllist', 'dllist', ([], {}), '()\n', (36766, 36768), False, 'from cllist import dllist\n'), ((36827, 36835), 'cllist.dllist', 'dllist', ([], {}), '()\n', (36833, 36835), False, 'from cllist import dllist\n'), ((36871, 36879), 'cllist.dllist', 'dllist', ([], {}), '()\n', (36877, 36879), False, 'from cllist import dllist\n'), ((20549, 20564), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (20562, 20564), False, 'import pdb\n'), ((20585, 20637), 'sys.stderr.write', 'sys.stderr.write', (["('Failed on [ %d : %d ]\\n' % (i, j))"], {}), "('Failed on [ %d : %d ]\\n' % (i, j))\n", (20601, 20637), False, 'import sys\n'), ((21310, 21370), 'sys.stderr.write', 'sys.stderr.write', (["('Failed on [ %d : %d : %d ]\\n' % (i, j, s))"], {}), "('Failed on [ %d : %d : %d ]\\n' % (i, j, s))\n", (21326, 21370), False, 'import sys\n'), ((59911, 59971), 'sys.stderr.write', 'sys.stderr.write', (["('Failed on [ %d : %d : %d ]\\n' % (i, j, s))"], {}), "('Failed on [ %d : %d : %d ]\\n' % (i, j, s))\n", (59927, 59971), False, 'import sys\n')] |
import trio
from trio._highlevel_open_tcp_stream import close_on_error
from trio.socket import socket, SOCK_STREAM
try:
from trio.socket import AF_UNIX
has_unix = True
except ImportError:
has_unix = False
__all__ = ["open_unix_socket"]
async def open_unix_socket(filename,):
"""Opens a connection to the specified
`Unix domain socket <https://en.wikipedia.org/wiki/Unix_domain_socket>`__.
You must have read/write permission on the specified file to connect.
Args:
filename (str or bytes): The filename to open the connection to.
Returns:
SocketStream: a :class:`~trio.abc.Stream` connected to the given file.
Raises:
OSError: If the socket file could not be connected to.
RuntimeError: If AF_UNIX sockets are not supported.
"""
if not has_unix:
raise RuntimeError("Unix sockets are not supported on this platform")
if filename is None:
raise ValueError("Filename cannot be None")
# much more simplified logic vs tcp sockets - one socket type and only one
# possible location to connect to
sock = socket(AF_UNIX, SOCK_STREAM)
with close_on_error(sock):
await sock.connect(filename)
return trio.SocketStream(sock)
| [
"trio.SocketStream",
"trio._highlevel_open_tcp_stream.close_on_error",
"trio.socket.socket"
] | [((1107, 1135), 'trio.socket.socket', 'socket', (['AF_UNIX', 'SOCK_STREAM'], {}), '(AF_UNIX, SOCK_STREAM)\n', (1113, 1135), False, 'from trio.socket import socket, SOCK_STREAM\n'), ((1216, 1239), 'trio.SocketStream', 'trio.SocketStream', (['sock'], {}), '(sock)\n', (1233, 1239), False, 'import trio\n'), ((1145, 1165), 'trio._highlevel_open_tcp_stream.close_on_error', 'close_on_error', (['sock'], {}), '(sock)\n', (1159, 1165), False, 'from trio._highlevel_open_tcp_stream import close_on_error\n')] |
#!/usr/local/bin/python3
# -*- coding: utf-8 -*-
# Встроенные модули
import time, sys, subprocess
from threading import Thread
# Внешние модули
try:
import psycopg2
except ModuleNotFoundError as err:
print(err)
sys.exit(1)
# Внутренние модули
try:
from mod_common import *
except ModuleNotFoundError as err:
print(err)
sys.exit(1)
# Класс для работы с изменениями в nftables
class setup_nftables(Thread):
# Стартовые параметры
def __init__(self, threads_list, todolist):
super().__init__()
self.daemon = True
self.threads_list = threads_list
self.todolist = todolist
# Поток изменений в nftables
def run(self):
# Запись в лог файл
log_write('Thread setup_nftables running')
try:
# Подключение к базе
conn_pg = psycopg2.connect(database='nifard', user=get_config('DatabaseUserName'), password=get_config('DatabasePassword'))
except psycopg2.DatabaseError as error:
log_write(error)
sys.exit(1)
# Цикл чтения таблицы
while not app_work.empty():
# Чтение из таблицы базы данных
cursor = conn_pg.cursor()
try:
cursor.execute("select * from users;")
except psycopg2.DatabaseError as error:
log_write(error)
subprocess.call('nft flush ruleset', shell=True)
sys.exit(1)
conn_pg.commit()
rows = cursor.fetchall()
# Получение текущего списка правил nftables по таблице nat
rules_nat = subprocess.check_output('nft -a list table nat | head -n -2 | tail +4', shell=True).decode().strip()
try:
# Получение текущего списка правил nftables по таблице traffic
rules_traffic = subprocess.check_output('nft -a list table traffic | grep daddr', shell=True).decode().strip()
except:
rules_traffic = ''
# Получение текущего списка правил nftables по таблице speed
rules_speed = subprocess.check_output('nft -a list table speed | head -n -2 | tail +4', shell=True).decode().strip()
#
for row in rows:
if app_work.empty(): break # Повторная проверка на завершение потока
ip_addr = row[0] # IP адрес
username = row[1] # Имя пользователя
computer = row[2] # Имя компьютера
domain = row[3] # Имя домена
speed = row[4] # Группа скорости
# Проверка ip адреса на валидность
if ip_addr.count('.') == 3 and ip_addr.find(get_config('ADUserIPMask')) != -1:
# Обнуление переменных модификаций правил
speed_db = 0
speed_nft = 0
# Определение модификаций
try:
# Если лимит
if int(speed[speed.find('_')+1:])//1024 >= 1:
speed_db = int(speed[speed.find('_')+1:])//1024
else:
speed_db = int(speed[speed.find('_')+1:])
except:
# Если безлимит
speed_db = speed[speed.find('_')+1:]
# Получение скорости из nftables
if ' '+ip_addr+' ' in rules_speed:
# Получение скорости из nftables
for line in rules_speed.splitlines():
if line.split()[2] == ip_addr:
speed_nft = int(line.split()[6])
break
else:
if ' '+ip_addr+' ' in rules_traffic:
speed_nft = 'nolimit'
else:
speed_nft = 'disable'
#
# Удаление правил в том числе для пересоздания
if speed == 'disable' or speed_db != speed_nft:
# Проверка на наличие его в rules_nat
if ' '+ip_addr+' ' in rules_nat:
rule_nat = ''
# Получение номера правила для таблицы nat
for line in rules_nat.splitlines():
if line.split()[2] == ip_addr:
rule_nat = line.split()[8]
rule_nat = 'nft delete rule nat postrouting handle '+rule_nat+'\n'
break
# Получение номера правила и удаление для таблицы traffic
rule_traffic = ''
rule_counter = 'nft delete counter traffic '+ip_addr+'\n'
# Получение номера правила и удаление для таблицы traffic
for line in rules_traffic.splitlines():
if line.split()[2] == ip_addr:
rule_traffic = line.split()[10]
rule_traffic = 'nft delete rule traffic prerouting handle '+rule_traffic+'\n'
break
rule_speed = ''
# Получение номера правила и удаление для таблицы speed
for line in rules_speed.splitlines():
if line.split()[2] == ip_addr:
rule_speed = line.split()[11]
rule_speed = 'nft delete rule speed prerouting handle '+rule_speed+'\n'
break
# Удаление выбранного правила из nftables
subprocess.call(rule_traffic + rule_nat + rule_speed, shell=True)
# Ожидание перед удалением счётчика
time.sleep(1)
subprocess.call(rule_counter, shell=True)
# Запись в лог файл
log_write('Delete '+ip_addr+' from nftables')
#
# Добавление правил
if (speed != 'disable' and speed.find('disable') == -1):
# Если ip адреса ещё нет в nftables, и при этом он не принадлежит другому домену, то добавляем
if ' '+ip_addr+' ' not in rules_nat and (domain == get_config('DomainRealm') or domain == 'Domain Unknown'):
# Формирование правила в nat
rule_nat = 'nft add rule nat postrouting ip saddr '+ip_addr+' oif '+get_config('InternetInterface')+' masquerade\n'
# Формирование правила в traffic (подсчёт трафика)
rule_traffic = 'nft add counter traffic '+ip_addr+'\n'
rule_traffic += 'nft add rule traffic prerouting ip daddr '+ip_addr+' iif '+get_config('InternetInterface')+' counter name '+ip_addr+'\n'
# Формирование правила в speed (оганичение трафика)
rule_limit = ''
if speed.find('nolimit') == -1:
rule_limit = 'nft add rule speed prerouting ip daddr '+ip_addr+' limit rate over '+speed[speed.find('_')+1:]+' kbytes/second drop\n'
# Добавление текущих правил в nftables
subprocess.call(rule_nat + rule_traffic + rule_limit, shell=True)
# Запись в лог файл
log_write('Adding '+ip_addr+' in nftables')
#
# Закрытие курсора и задержка выполнения
cursor.close()
# Ожидание потока
for tick in range(5):
if app_work.empty():
break
time.sleep(1)
conn_pg.close()
subprocess.call('nft flush ruleset', shell=True)
# Запись в лог файл
log_write('Thread setup_nftables stopped')
# Удаление потока из списка
self.threads_list.get()
| [
"subprocess.check_output",
"subprocess.call",
"time.sleep",
"sys.exit"
] | [((219, 230), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (227, 230), False, 'import time, sys, subprocess\n'), ((334, 345), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (342, 345), False, 'import time, sys, subprocess\n'), ((6674, 6722), 'subprocess.call', 'subprocess.call', (['"""nft flush ruleset"""'], {'shell': '(True)'}), "('nft flush ruleset', shell=True)\n", (6689, 6722), False, 'import time, sys, subprocess\n'), ((963, 974), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (971, 974), False, 'import time, sys, subprocess\n'), ((6636, 6649), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (6646, 6649), False, 'import time, sys, subprocess\n'), ((1240, 1288), 'subprocess.call', 'subprocess.call', (['"""nft flush ruleset"""'], {'shell': '(True)'}), "('nft flush ruleset', shell=True)\n", (1255, 1288), False, 'import time, sys, subprocess\n'), ((1297, 1308), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1305, 1308), False, 'import time, sys, subprocess\n'), ((1446, 1533), 'subprocess.check_output', 'subprocess.check_output', (['"""nft -a list table nat | head -n -2 | tail +4"""'], {'shell': '(True)'}), "('nft -a list table nat | head -n -2 | tail +4',\n shell=True)\n", (1469, 1533), False, 'import time, sys, subprocess\n'), ((1876, 1965), 'subprocess.check_output', 'subprocess.check_output', (['"""nft -a list table speed | head -n -2 | tail +4"""'], {'shell': '(True)'}), "('nft -a list table speed | head -n -2 | tail +4',\n shell=True)\n", (1899, 1965), False, 'import time, sys, subprocess\n'), ((4845, 4910), 'subprocess.call', 'subprocess.call', (['(rule_traffic + rule_nat + rule_speed)'], {'shell': '(True)'}), '(rule_traffic + rule_nat + rule_speed, shell=True)\n', (4860, 4910), False, 'import time, sys, subprocess\n'), ((4975, 4988), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4985, 4988), False, 'import time, sys, subprocess\n'), ((5003, 5044), 'subprocess.call', 'subprocess.call', (['rule_counter'], {'shell': '(True)'}), '(rule_counter, shell=True)\n', (5018, 5044), False, 'import time, sys, subprocess\n'), ((6293, 6358), 'subprocess.call', 'subprocess.call', (['(rule_nat + rule_traffic + rule_limit)'], {'shell': '(True)'}), '(rule_nat + rule_traffic + rule_limit, shell=True)\n', (6308, 6358), False, 'import time, sys, subprocess\n'), ((1653, 1730), 'subprocess.check_output', 'subprocess.check_output', (['"""nft -a list table traffic | grep daddr"""'], {'shell': '(True)'}), "('nft -a list table traffic | grep daddr', shell=True)\n", (1676, 1730), False, 'import time, sys, subprocess\n')] |
from django import forms
from . models import Holdings
class HoldingsForm(forms.ModelForm):
class Meta:
model = Holdings
fields = ['title', 'holding', 'authors', 'category']
widgets={
'title': forms.TextInput(attrs={'class': 'form-control'}),
'holding': forms.FileInput(attrs={'type': 'file'}),
'authors': forms.TextInput(attrs={'class': 'form-control'}),
'category': forms.Select(attrs={'class': 'form-control'}),
}
class SearchForm(forms.Form): # create a search form
query = forms.CharField(max_length=250)
class Meta:
widgets={
'query': forms.TextInput(attrs={'class': 'form-control'}),
}
| [
"django.forms.FileInput",
"django.forms.Select",
"django.forms.CharField",
"django.forms.TextInput"
] | [((580, 611), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(250)'}), '(max_length=250)\n', (595, 611), False, 'from django import forms\n'), ((234, 282), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (249, 282), False, 'from django import forms\n'), ((307, 346), 'django.forms.FileInput', 'forms.FileInput', ([], {'attrs': "{'type': 'file'}"}), "(attrs={'type': 'file'})\n", (322, 346), False, 'from django import forms\n'), ((371, 419), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (386, 419), False, 'from django import forms\n'), ((445, 490), 'django.forms.Select', 'forms.Select', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (457, 490), False, 'from django import forms\n'), ((667, 715), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (682, 715), False, 'from django import forms\n')] |
import cv2
class VideoReader(object):
'''
Class docstring for VideoReader():
Provides a generator for video frames. Returns a numpy array in BGR format.
'''
def __init__(self, file_name):
self.file_name = file_name
try: # OpenCV parses an integer to read a webcam. Supplying '0' will use webcam.
self.file_name = int(file_name)
except ValueError:
pass
self.read = cv2.VideoCapture(self.file_name)
def __iter__(self):
self.cap = cv2.VideoCapture(self.file_name)
if not self.cap.isOpened():
raise IOError('Video {} cannot be opened'.format(self.file_name))
return self
def __next__(self):
was_read, img = self.cap.read()
if not was_read:
raise StopIteration
return img
def properties(self):
self.w, self.h, self.count = self.read.get(cv2.CAP_PROP_FRAME_WIDTH), self.read.get(cv2.CAP_PROP_FRAME_HEIGHT), self.read.get(cv2.CAP_PROP_FRAME_COUNT)
return int(self.count), (int(self.h), int(self.w)) | [
"cv2.VideoCapture"
] | [((445, 477), 'cv2.VideoCapture', 'cv2.VideoCapture', (['self.file_name'], {}), '(self.file_name)\n', (461, 477), False, 'import cv2\n'), ((522, 554), 'cv2.VideoCapture', 'cv2.VideoCapture', (['self.file_name'], {}), '(self.file_name)\n', (538, 554), False, 'import cv2\n')] |
import os
import json
from contextlib import suppress
from OrderBook import *
from Signal import Signal
class OrderBookContainer:
def __init__(self, path_to_file):
self.order_books = []
self.trades = []
self.cur_directory = os.path.dirname(path_to_file)
self.f_name = os.path.split(path_to_file)[1]
with open(path_to_file, 'r') as infile:
for line in infile:
ob = json.loads(line)
self.order_books.append(OrderBook(ob))
def create_training_dataset(self):
if not self.order_books:
return
output_dir = os.path.join(self.cur_directory, 'Datasets')
with suppress(OSError):
os.mkdir(output_dir)
dataset_file_path = os.path.splitext(os.path.join(output_dir, self.f_name))[0] + '.ds'
best_prices = self.order_books[0].best_prices
mid_price = (best_prices['buy_price'] + best_prices['sell_price']) / 2
with open(dataset_file_path, 'w') as json_file:
for idx, ob in enumerate(self.order_books[0:-1]):
next_best_prices = self.order_books[idx + 1].best_prices
next_mid_price = (next_best_prices['buy_price'] + next_best_prices['sell_price']) / 2
if mid_price != next_mid_price:
direction = 0 if mid_price > next_mid_price else 1
json.dump({'volumes': ob.volumes, 'direction': direction}, json_file)
json_file.write('\n')
mid_price = next_mid_price
def _open_position(self, best_prices, signal):
self.trades.append({})
self.trades[-1]['direction'] = signal
self.trades[-1]['open_time'] = best_prices['time'];
if signal == Signal.BUY:
self.trades[-1]['open_price'] = best_prices['buy_price'];
elif signal == Signal.SELL:
self.trades[-1]['open_price'] = best_prices['sell_price'];
def _close_position(self, best_prices):
self.trades[-1]['close_time'] = best_prices['time'];
if self.trades[-1]['direction'] == Signal.BUY:
self.trades[-1]['close_price'] = best_prices['sell_price'];
elif self.trades[-1]['direction'] == Signal.SELL:
self.trades[-1]['close_price'] = best_prices['buy_price'];
def _reverse_position(self, best_prices, signal):
self._close_position(best_prices)
self._open_position(best_prices, signal)
def backtest(self, generator, threshold):
self.trades = []
for ob in self.order_books[0:-1]:
best_prices = ob.best_prices
signal = generator(ob.volumes, threshold)
if not self.trades and signal != Signal.WAIT:
self._open_position(best_prices, signal)
elif signal != self.trades[-1]['direction'] and signal != Signal.WAIT:
self._reverse_position(best_prices, signal)
if not self.trades:
best_prices = self.order_books[-1].best_prices
self._close_position(best_prices)
return self.trades
def backtest_n(self, generator, ffnn, threshold):
self.trades = []
for ob in self.order_books[0:-1]:
best_prices = ob.best_prices
signal = generator(ffnn, ob.volumes, threshold)
if not self.trades and signal != Signal.WAIT:
self._open_position(best_prices, signal)
elif signal != self.trades[-1]['direction'] and signal != Signal.WAIT:
self._reverse_position(best_prices, signal)
if not self.trades:
best_prices = self.order_books[-1].best_prices
self._close_position(best_prices)
return self.trades
| [
"json.loads",
"os.path.join",
"os.path.split",
"os.path.dirname",
"contextlib.suppress",
"os.mkdir",
"json.dump"
] | [((267, 296), 'os.path.dirname', 'os.path.dirname', (['path_to_file'], {}), '(path_to_file)\n', (282, 296), False, 'import os\n'), ((662, 706), 'os.path.join', 'os.path.join', (['self.cur_directory', '"""Datasets"""'], {}), "(self.cur_directory, 'Datasets')\n", (674, 706), False, 'import os\n'), ((320, 347), 'os.path.split', 'os.path.split', (['path_to_file'], {}), '(path_to_file)\n', (333, 347), False, 'import os\n'), ((723, 740), 'contextlib.suppress', 'suppress', (['OSError'], {}), '(OSError)\n', (731, 740), False, 'from contextlib import suppress\n'), ((755, 775), 'os.mkdir', 'os.mkdir', (['output_dir'], {}), '(output_dir)\n', (763, 775), False, 'import os\n'), ((457, 473), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (467, 473), False, 'import json\n'), ((824, 861), 'os.path.join', 'os.path.join', (['output_dir', 'self.f_name'], {}), '(output_dir, self.f_name)\n', (836, 861), False, 'import os\n'), ((1508, 1577), 'json.dump', 'json.dump', (["{'volumes': ob.volumes, 'direction': direction}", 'json_file'], {}), "({'volumes': ob.volumes, 'direction': direction}, json_file)\n", (1517, 1577), False, 'import json\n')] |
'''Functions and classes to wrap existing classes. Provides a wrapper metaclass
and also a function that returns a wrapped class. The function is more flexible
as a metaclass has multiple inheritence limitations.
A wrapper metaclass for building wrapper objects. It is instantiated by
specifying a class to be to be wrapped as a parent class with WrapperMetaClass
as the metaclass. This creates a wrapped/proxy base class for that type of
object. Note: this subverts the subclass mechanism as it does not actually
create a subclass of the wrapped class. Instead it is simply used to create the
forwarding of the member functions and attributes, while the wrapped class is
replaced with object as the parent.
Note: if a constructor is provided for the wrapper then it should call
init_wrapper manually. It also sets up '_wrapped' and '_wrapped_cls' attributes
so these cannot be attributes of the wrapped class.
This metaclass is to be used for wrapping clingo.Control, clingo.SolveHandle,
and clingo.Model objects.
Note: some ideas and code have been copied from:
https://code.activestate.com/recipes/496741-object-proxying/
'''
import functools
import inspect
# ------------------------------------------------------------------------------
# Make proxy member functions and properties
# ------------------------------------------------------------------------------
def _make_wrapper_function(fn):
@functools.wraps(fn)
def wrapper(self, *args, **kwargs):
func=getattr(self._wrapped,fn.__name__)
return func(*args, **kwargs)
return wrapper
def _make_wrapper_property(name, get_only=True):
def getter(self):
return getattr(self._wrapped,name)
def setter(self,x):
return self._wrapped.__setattr__(name,x)
return property(getter,setter)
def _check_wrapper_object(wrapper,strict=False):
ActualType = type(wrapper._wrapped)
WrappedType = wrapper._wrapped_cls
if issubclass(ActualType,WrappedType): return
if strict:
raise TypeError(("Invalid proxied object {} not of expected type "
"{}").format(wrapper._wrapped,WrappedType))
# Constructor for every Predicate sub-class
def init_wrapper(wrapper, *args, **kwargs):
Wrapped = wrapper._wrapped_cls
if "wrapped_" in kwargs:
if len(args) != 0 and len(kwargs) != 1:
raise ValueError(("Invalid initialisation: the 'wrapped_' argument "
"cannot be combined with other arguments"))
wrapper._wrapped = kwargs["wrapped_"]
_check_wrapper_object(wrapper,strict=False)
else:
wrapper._wrapped = Wrapped(*args,**kwargs)
# ------------------------------------------------------------------------------
# The wrapper metaclass
# ------------------------------------------------------------------------------
class WrapperMetaClass(type):
def __new__(meta, name, bases, dct):
if len(bases) != 1:
raise TypeError("ProxyMetaClass requires exactly one parent class")
Wrapped = bases[0]
bases = (object,)
ignore=["__init__", "__new__", "__dict__", "__weakref__", "__setattr__",
"__getattr__"]
if "_wrapped_cls" in dct:
raise TypeError(("ProxyMetaClass cannot proxy a class with a "
"\"_wrapped_cls\" attribute: {}").format(PrClass))
dct["_wrapped_cls"] = Wrapped
# Mirror the attributes of the proxied class
for key,value in Wrapped.__dict__.items():
if key in ignore: continue
if key in dct: continue
if callable(value):
dct[key]=_make_wrapper_function(value)
else:
dct[key]=_make_wrapper_property(key)
# Create the init function if none is provided
if "__init__" not in dct: dct["__init__"] = init_wrapper
return super(WrapperMetaClass, meta).__new__(meta, name, bases, dct)
#------------------------------------------------------------------------------
# Alternative wrapper implementation that doesn't use a metaclass. The metaclass
# version is a problem when wrapping a class that already has a metaclass. This
# version takes a class to be wrapped and an optional override class. In then
# creates an wrapper class that has all the properties and member functions of
# the override class as well as the wrapped class (with the override class
# overriding any function/property that is common to both).
# ------------------------------------------------------------------------------
def make_class_wrapper(inputclass, override=None):
def getattrdoc(cls, key):
if not cls: return None
try:
attr = getattr(cls,key)
if isinstance(attr,property) or callable(attr):
return attr.__doc__
return None
except:
return None
Wrapped = inputclass
name = inputclass.__name__
w_ignore=set(["__init__", "__new__", "__del__","__weakref__", "__setattr__",
"__getattr__", "__module__", "__name__", "__dict__",
"__abstractmethods__", "__orig_bases__", "__parameters__", "_abc_impl"])
o_ignore=set(["__module__", "__new__", "__dict__", "__weakref__", "__name__"])
dct = {}
if override:
for key,value in override.__dict__.items():
if key in o_ignore: continue
if key == "__doc__" and not value: continue
dct[key] = value
if "_wrapped_cls" in dct:
raise TypeError(("The overrides cannot contain a "
"\"_wrapped_cls\" attribute: {}").format(dct))
dct["_wrapped_cls"] = Wrapped
# Mirror the attributes of the proxied class
for key,value in Wrapped.__dict__.items():
if key in w_ignore: continue
if key in dct: continue
if key == "__doc__" and key not in dct:
dct[key]=value
elif callable(value):
dct[key]=_make_wrapper_function(value)
else:
dct[key]=_make_wrapper_property(key)
# Create a basic init function if none is provided
if "__init__" not in dct: dct["__init__"] = init_wrapper
WrapperClass = type(name,(object,),dct)
# print("\n{}".format(name))
# if override:
# print ("OVERRIDE: {}".format(override.__dict__.keys()))
# print ("WRAPPED: {}\n".format(inputclass.__dict__.keys()))
# print("NAME: {} : {}".format(WrapperClass.__name__, WrapperClass.__dict__.keys()))
# Now go through and add docstrings if necessary
for key in dir(WrapperClass):
attr = getattr(WrapperClass, key)
if inspect.isclass(attr): continue
if callable(attr) and attr.__doc__: continue
doc1 = getattrdoc(override,key)
doc2 = getattrdoc(inputclass,key)
if doc1: attr.__doc__ = doc1
elif doc2: attr.__doc__ = doc2
return WrapperClass
#------------------------------------------------------------------------------
# main
#------------------------------------------------------------------------------
if __name__ == "__main__":
raise RuntimeError('Cannot run modules')
| [
"inspect.isclass",
"functools.wraps"
] | [((1412, 1431), 'functools.wraps', 'functools.wraps', (['fn'], {}), '(fn)\n', (1427, 1431), False, 'import functools\n'), ((6636, 6657), 'inspect.isclass', 'inspect.isclass', (['attr'], {}), '(attr)\n', (6651, 6657), False, 'import inspect\n')] |
import os
from biicode.common.model.brl.block_cell_name import BlockCellName
from biicode.common.model.bii_type import BiiType
def _binary_name(name):
return os.path.splitext(name.replace("/", "_"))[0]
class CPPTarget(object):
def __init__(self):
self.files = set() # The source files in this target
self.dep_targets = set() # set of BlockNames, to which this target depends
self.system = set() # These are the included system headers (stdio.h, math.h...)
self.include_paths = {} # Initially {Order#: BlockNamePath}. At the end [FullPaths]
@property
def dep_names(self):
return sorted([_binary_name(d) for d in self.dep_targets])
class CPPLibTarget(CPPTarget):
template = """
# LIBRARY {library_name} ##################################
# with interface {library_name}_interface
# Source code files of the library
SET(BII_LIB_SRC {files})
# STATIC by default if empty, or SHARED
SET(BII_LIB_TYPE {type})
# Dependencies to other libraries (user2_block2, user3_blockX)
SET(BII_LIB_DEPS {library_name}_interface {deps})
# System included headers
SET(BII_LIB_SYSTEM_HEADERS {system})
# Required include paths
SET(BII_LIB_INCLUDE_PATHS {paths})
"""
def __init__(self, block_name):
CPPTarget.__init__(self)
self.name = _binary_name(block_name)
self.type = "" # By default, libs are static
def dumps(self):
content = CPPLibTarget.template.format(library_name=self.name,
files="\n\t\t\t".join(sorted(self.files)),
type=self.type,
deps=" ".join(self.dep_names),
system=" ".join(sorted(self.system)),
paths="\n\t\t\t\t\t".join(self.include_paths))
return content
class CPPExeTarget(CPPTarget):
template = """
# EXECUTABLE {exe_name} ##################################
SET(BII_{exe_name}_SRC {files})
SET(BII_{exe_name}_DEPS {block_interface} {deps})
# System included headers
SET(BII_{exe_name}_SYSTEM_HEADERS {system})
# Required include paths
SET(BII_{exe_name}_INCLUDE_PATHS {paths})
"""
def __init__(self, main):
CPPTarget.__init__(self)
assert isinstance(main, BlockCellName)
assert not BiiType.isCppHeader(main.extension)
self.main = main
self.files.add(main.cell_name)
self.name = _binary_name(main)
self.block_interface = _binary_name(main.block_name) + "_interface"
self.simple_name = _binary_name(main.cell_name)
def dumps(self):
content = CPPExeTarget.template.format(block_interface=self.block_interface,
exe_name=self.simple_name,
files="\n\t\t\t".join(sorted(self.files)),
deps=" ".join(self.dep_names),
system=" ".join(sorted(self.system)),
paths="\n\t\t\t\t\t".join(self.include_paths))
return content
class CPPBlockTargets(object):
""" All the targets defined in a given block:
- 1 Lib
- N Exes
- There is always an Interface Lib per block, but no parametrization required here
"""
def __init__(self, block_name):
self.block_name = block_name
self.is_dep = False # To indicate if lives in deps or blocks folder
self.data = set()
self.lib = CPPLibTarget(block_name)
self.exes = [] # Of CPPExeTargets
self.tests = set() # Of CPPExeTargets
@property
def filename(self):
return "bii_%s_vars.cmake" % _binary_name(self.block_name)
def dumps(self):
exe_list = """# Executables to be created
SET(BII_BLOCK_EXES {executables})
SET(BII_BLOCK_TESTS {tests})
"""
vars_content = ["# Automatically generated file, do not edit\n"
"SET(BII_IS_DEP %s)\n" % self.is_dep]
vars_content.append(self.lib.dumps())
exes = [t.simple_name for t in self.exes]
tests = [t.simple_name for t in self.tests]
exes_list = exe_list.format(executables="\n\t\t\t".join(sorted(exes)),
tests="\n\t\t\t".join(sorted(tests)))
vars_content.append(exes_list)
for exe in self.exes:
content = exe.dumps()
vars_content.append(content)
return "\n".join(vars_content)
| [
"biicode.common.model.bii_type.BiiType.isCppHeader"
] | [((2378, 2413), 'biicode.common.model.bii_type.BiiType.isCppHeader', 'BiiType.isCppHeader', (['main.extension'], {}), '(main.extension)\n', (2397, 2413), False, 'from biicode.common.model.bii_type import BiiType\n')] |
'''
This program implements a Fast Neural Style Transfer model.
References:
https://www.tensorflow.org/tutorials/generative/style_transfer
'''
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
import tensorflow_hub as hub
import os, sys
import time
import numpy as np
from BaseModel import BaseModel
from utils.data_pipeline import *
class FastNeuralStyleTransfer(BaseModel):
''' A Fast Neural Style Transfer model. '''
def __init__(self):
''' Initializes the class. '''
super().__init__()
def build_model(self):
''' Builds network architectures. '''
hub_module_path = 'https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/1'
self.hub_module = hub.load(hub_module_path)
def fit(self, content_image, style_image, output_path = 'stylized_image_fast.png'):
''' Trains the model. '''
start = time.time()
self.stylized_image = self.hub_module(tf.constant(content_image), tf.constant(style_image))[0]
self.save_output(output_path)
end = time.time()
print("Total time: {:.1f}".format(end - start))
def predict(self):
''' Generates an output image from an input. '''
return self.stylized_image
def save_output(self, img_path):
''' Saves the output image. '''
output = tensor_to_image(self.stylized_image)
output.save(img_path)
| [
"tensorflow.constant",
"time.time",
"tensorflow_hub.load"
] | [((794, 819), 'tensorflow_hub.load', 'hub.load', (['hub_module_path'], {}), '(hub_module_path)\n', (802, 819), True, 'import tensorflow_hub as hub\n'), ((963, 974), 'time.time', 'time.time', ([], {}), '()\n', (972, 974), False, 'import time\n'), ((1130, 1141), 'time.time', 'time.time', ([], {}), '()\n', (1139, 1141), False, 'import time\n'), ((1021, 1047), 'tensorflow.constant', 'tf.constant', (['content_image'], {}), '(content_image)\n', (1032, 1047), True, 'import tensorflow as tf\n'), ((1049, 1073), 'tensorflow.constant', 'tf.constant', (['style_image'], {}), '(style_image)\n', (1060, 1073), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8 -*-
# @Author: Anderson
# @Date: 2019-04-25 00:30:09
# @Last Modified by: ander
# @Last Modified time: 2019-12-07 01:14:16
from django.urls import path
from . import views
urlpatterns = [
path("", views.editor, name="editor"),
path("upload_code", views.upload_code, name="upload_code")
]
| [
"django.urls.path"
] | [((218, 255), 'django.urls.path', 'path', (['""""""', 'views.editor'], {'name': '"""editor"""'}), "('', views.editor, name='editor')\n", (222, 255), False, 'from django.urls import path\n'), ((261, 319), 'django.urls.path', 'path', (['"""upload_code"""', 'views.upload_code'], {'name': '"""upload_code"""'}), "('upload_code', views.upload_code, name='upload_code')\n", (265, 319), False, 'from django.urls import path\n')] |
import numpy as np
import torchvision.datasets as datasets
from pathlib import Path
import libs.dirs as dirs
import libs.utils as utils
import libs.dataset_utils as dutils
import models.utils as mutils
import libs.commons as commons
from libs.vis_functions import plot_confusion_matrix
def wrapper_train(epochs, model_path, history_path, dataset_path):
seed = None
device_id = 0
numImgBatch = 256
use_weights = True
# ImageNet statistics
dataTransforms = mutils.resnet_transforms(commons.IMAGENET_MEAN, commons.IMAGENET_STD)
# Load Dataset objects for train and val sets from folder
sets = ['train', 'val']
imageDataset = {}
for phase in sets:
f = dataset_path / phase
imageDataset[phase] = datasets.ImageFolder(str(f),
transform=dataTransforms[phase],
is_valid_file=utils.check_empty_file)
history, _ = mutils.train_network(dataset_path, dataTransforms, epochs=epochs,
batch_size=numImgBatch,
model_path=model_path,
history_path=history_path,
seed=seed,
weighted_loss=use_weights,
device_id=device_id)
# Get best epoch results
bestValIndex = np.argmin(history['loss-val'])
bestValLoss = history['loss-val'][bestValIndex]
bestValAcc = history['acc-val'][bestValIndex]
confMat = history['conf-val'][bestValIndex]
return bestValLoss, bestValAcc, confMat
if __name__ == "__main__":
numEvals = 5
net_type = dutils.get_input_network_type(commons.network_types)
val_type = dutils.get_input_network_type(commons.val_types, message="validation set")
rede = int(input("\nEnter net number.\n"))
numEpochs = 25
# Dataset root folder
datasetPath = Path(dirs.dataset) / "{}_dataset_rede_{}_val_{}".format(net_type, rede, val_type)
datasetName = datasetPath.stem
modelFolder = Path(dirs.saved_models) / \
"{}_{}_epochs".format(datasetName, numEpochs)
historyFolder = Path(dirs.saved_models) / \
"history_{}_{}_epochs".format(datasetName, numEpochs)
filePath = Path(dirs.results) / \
"log_evaluation_{}_{}_epochs.txt".format(datasetName, numEpochs)
confMatPath = Path(dirs.results) / \
"confusion_matrix_{}.pdf".format(datasetName)
valLoss = []
valAcc = []
print()
# Run function many times and save best results
for i in range(numEvals):
print("\nStarting run number {}/{}.\n".format(i+1, numEvals))
modelPath = modelFolder / "model_run_{}.pt".format(i)
historyPath = historyFolder / "history_run_{}.pickle".format(i)
roundValLoss, roundValAcc, confMat = wrapper_train(numEpochs, modelPath, historyPath, datasetPath)
valLoss.append(roundValLoss)
classAcc = mutils.compute_class_acc(confMat)
avgAcc = np.mean(classAcc)
valAcc.append(roundValAcc)
print("Debug\nAvg acc: {:.3f}".format(avgAcc))
print("other acc: {:.3f}\n".format(roundValAcc))
# Save best confusion matrix
if np.argmin(valLoss) == i:
bestConfMat = confMat
printString = ""
printString += "\nFinished training {} evaluation runs for dataset\n{}\n".format(numEvals, datasetPath)
printString += "\nResulting statistics:\n\
Val Loss:\n\
Mean: {:.3f}\n\
Std : {:.3f}\n\
Val Avg Acc:\n\
Mean: {:.5f}\n\
Std {:.5f}\n".format(np.mean(valLoss), np.std(valLoss),
np.mean(valAcc), np.std(valAcc))
print(printString)
with open(filePath, mode='w') as f:
f.write(printString)
title = "Confusion Matrix "+str(datasetName)
plot_confusion_matrix(confMat, title=title, normalize=True, show=False, save_path=confMatPath)
# print("Conf matrix:")
# print(confMat)
| [
"numpy.mean",
"libs.dataset_utils.get_input_network_type",
"pathlib.Path",
"numpy.std",
"numpy.argmin",
"models.utils.train_network",
"models.utils.compute_class_acc",
"models.utils.resnet_transforms",
"libs.vis_functions.plot_confusion_matrix"
] | [((556, 625), 'models.utils.resnet_transforms', 'mutils.resnet_transforms', (['commons.IMAGENET_MEAN', 'commons.IMAGENET_STD'], {}), '(commons.IMAGENET_MEAN, commons.IMAGENET_STD)\n', (580, 625), True, 'import models.utils as mutils\n'), ((1045, 1252), 'models.utils.train_network', 'mutils.train_network', (['dataset_path', 'dataTransforms'], {'epochs': 'epochs', 'batch_size': 'numImgBatch', 'model_path': 'model_path', 'history_path': 'history_path', 'seed': 'seed', 'weighted_loss': 'use_weights', 'device_id': 'device_id'}), '(dataset_path, dataTransforms, epochs=epochs,\n batch_size=numImgBatch, model_path=model_path, history_path=\n history_path, seed=seed, weighted_loss=use_weights, device_id=device_id)\n', (1065, 1252), True, 'import models.utils as mutils\n'), ((1533, 1563), 'numpy.argmin', 'np.argmin', (["history['loss-val']"], {}), "(history['loss-val'])\n", (1542, 1563), True, 'import numpy as np\n'), ((1831, 1883), 'libs.dataset_utils.get_input_network_type', 'dutils.get_input_network_type', (['commons.network_types'], {}), '(commons.network_types)\n', (1860, 1883), True, 'import libs.dataset_utils as dutils\n'), ((1899, 1973), 'libs.dataset_utils.get_input_network_type', 'dutils.get_input_network_type', (['commons.val_types'], {'message': '"""validation set"""'}), "(commons.val_types, message='validation set')\n", (1928, 1973), True, 'import libs.dataset_utils as dutils\n'), ((4023, 4121), 'libs.vis_functions.plot_confusion_matrix', 'plot_confusion_matrix', (['confMat'], {'title': 'title', 'normalize': '(True)', 'show': '(False)', 'save_path': 'confMatPath'}), '(confMat, title=title, normalize=True, show=False,\n save_path=confMatPath)\n', (4044, 4121), False, 'from libs.vis_functions import plot_confusion_matrix\n'), ((2087, 2105), 'pathlib.Path', 'Path', (['dirs.dataset'], {}), '(dirs.dataset)\n', (2091, 2105), False, 'from pathlib import Path\n'), ((2223, 2246), 'pathlib.Path', 'Path', (['dirs.saved_models'], {}), '(dirs.saved_models)\n', (2227, 2246), False, 'from pathlib import Path\n'), ((2329, 2352), 'pathlib.Path', 'Path', (['dirs.saved_models'], {}), '(dirs.saved_models)\n', (2333, 2352), False, 'from pathlib import Path\n'), ((2438, 2456), 'pathlib.Path', 'Path', (['dirs.results'], {}), '(dirs.results)\n', (2442, 2456), False, 'from pathlib import Path\n'), ((2556, 2574), 'pathlib.Path', 'Path', (['dirs.results'], {}), '(dirs.results)\n', (2560, 2574), False, 'from pathlib import Path\n'), ((3136, 3169), 'models.utils.compute_class_acc', 'mutils.compute_class_acc', (['confMat'], {}), '(confMat)\n', (3160, 3169), True, 'import models.utils as mutils\n'), ((3187, 3204), 'numpy.mean', 'np.mean', (['classAcc'], {}), '(classAcc)\n', (3194, 3204), True, 'import numpy as np\n'), ((3777, 3793), 'numpy.mean', 'np.mean', (['valLoss'], {}), '(valLoss)\n', (3784, 3793), True, 'import numpy as np\n'), ((3795, 3810), 'numpy.std', 'np.std', (['valLoss'], {}), '(valLoss)\n', (3801, 3810), True, 'import numpy as np\n'), ((3844, 3859), 'numpy.mean', 'np.mean', (['valAcc'], {}), '(valAcc)\n', (3851, 3859), True, 'import numpy as np\n'), ((3861, 3875), 'numpy.std', 'np.std', (['valAcc'], {}), '(valAcc)\n', (3867, 3875), True, 'import numpy as np\n'), ((3401, 3419), 'numpy.argmin', 'np.argmin', (['valLoss'], {}), '(valLoss)\n', (3410, 3419), True, 'import numpy as np\n')] |
import cv2 as cv
import sys
import numpy as np
import tifffile as ti
import argparse
import itertools
max_lowThreshold = 100
window_name = 'Edge Map'
title_trackbar = 'Min Threshold:'
ratio = 3
kernel_size = 3
def CannyThreshold(val):
low_threshold = val
#img_blur = cv.blur(src_gray, (3,3))
detected_edges = cv.Canny(src_gray, low_threshold, low_threshold*ratio, kernel_size)
mask = detected_edges != 0
dst = src * (mask[:,:,None].astype(src.dtype))
cv.imshow(window_name, dst)
# Sort grey image colors by frequency of appearance
def freq_sort(l):
flat_list = []
for sublist in l:
for item in sublist:
flat_list.append(item)
frequencies = {}
for item in flat_list:
if item in frequencies:
frequencies[item] += 1
else:
frequencies[item] = 1
return sorted(frequencies.items(), key=lambda x: x[1], reverse=True)
# Remove colors of selection ranked by frequency
def gray_filter(img, p_map, start, end):
# Slice the color range
p_map = p_map[start:end]
# Break down the dic
selected_colors = []
for p in p_map:
selected_colors.append(p[0])
# Replace out-off-range colors with black
r_len = len(img)
c_len = len(img[0])
for i in range(r_len):
for j in range(c_len):
if img[i][j] not in selected_colors:
img[i][j] = 0
return img
# Remove disconnected noises
def de_noise(img, kernel_size=1, criteria=4, iterations=4, remove_all=False):
cur = 0
r_len = len(img)
c_len = len(img[0])
while cur < iterations:
cur += 1
for i in range(r_len):
for j in range(c_len):
# If the iterated pixel is already black
if img[i][j] == 0:
continue
try:
# X, Y = np.mgrid[j:j+kernel_size, i:i+kernel_size]
# print(np.vstack((X.ravel(), Y.ravel())))
# exit(1)
# Put adjacent pixels with given kernel size into the list
p_list = []
indices = [p for p in itertools.product(range(kernel_size, -kernel_size-1, -1), repeat=2) if p != (0,0)]
for idx in indices:
p_list.append(img[i+idx[0]][j+idx[1]])
# Remove the pixel if number of adjacent black pixels are greater than the preset value
if p_list.count(0) > criteria:
img[i][j] = 0
if remove_all:
for idx in indices:
img[i+idx[0]][j+idx[1]] = 0
except IndexError:
pass
return img
if __name__ == '__main__':
src = cv.imread(cv.samples.findFile("input.tif"))
img = cv.cvtColor(src, cv.COLOR_BGR2HSV)
img_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
cv.imshow('original', img_gray)
freq_dic = freq_sort(img_gray)
filtered_img = gray_filter(img_gray, freq_dic, 10, -80)
cv.imshow('filtered', filtered_img)
ti.imwrite("filtered.tif", np.array([[filtered_img] * 90], np.uint8))
# de_noise_img = de_noise(filtered_img, 1, 4, 4)
# de_noise_img = de_noise(de_noise_img, 2, 18, 1)
de_noise_img = de_noise(filtered_img, 1, 5, 4)
ti.imwrite("de_noise_img.tif", np.array([[de_noise_img] * 90], np.uint8))
eroded = cv.dilate(de_noise_img, np.ones((2, 2), np.uint8), iterations=1)
dilated = cv.dilate(eroded, np.ones((2, 2), np.uint8), iterations=1)
med_blur = cv.medianBlur(de_noise_img, 3)
cv.imshow('dilated', dilated)
cv.imshow('de-noised-more-aggressive', de_noise_img)
cv.imshow('med_blur', med_blur)
cv.waitKey()
# img_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
# print(img_gray)
# if img is None:
# sys.exit("Could not read the image.")
#
#
# rows, cols, channels = img.shape
# dst = img.copy()
# a = 2.5
# b = 380
# for i in range(rows):
# for j in range(cols):
# for c in range(3):
# color = img[i, j][c]*a+b
# if color > 255: # 防止像素值越界(0~255)
# dst[i, j][c] = 255
# elif color < 0: # 防止像素值越界(0~255)
# dst[i, j][c] = 0
#
# blur_img = cv.GaussianBlur(img, ksize=(5, 5), sigmaX=1, sigmaY=1)
# gaussian_gray = cv.GaussianBlur(img_gray, ksize=(5, 5), sigmaX=1, sigmaY=1)
# ti.imwrite("Gaussian_blur.tif", np.array([[gaussian_gray]*90], np.uint8))
#
# med_blur_img = cv.medianBlur(img_gray, 3)
# ti.imwrite("med_blur.tif", np.array([[med_blur_img]*90], np.uint8))
#
# ret, threshold = cv.threshold(blur_img, 85, 255, cv.THRESH_TOZERO_INV)
# ret_gray, threshold_gray = cv.threshold(gaussian_gray, 85, 255, cv.THRESH_TOZERO_INV)
#
# kernel = np.ones((2, 2), np.uint8)
# erosion = cv.erode(threshold, kernel, iterations=2)
# erosion_gray = cv.erode(threshold_gray, kernel, iterations=2)
# ti.imwrite("erosion.tif", np.array([[erosion_gray]*90], np.uint8))
#
# dilation = cv.dilate(erosion, kernel, iterations=2)
# dilation_gray = cv.dilate(threshold_gray, kernel, iterations=2)
# ti.imwrite("dilation.tif", np.array([[dilation_gray]*90], np.uint8))
#
# lower_grey = np.array([0, 0, 11])
# upper_grey = np.array([0, 0, 60])
# mask = cv.inRange(erosion, lower_grey, upper_grey)
# mask = cv.fastNlMeansDenoising(mask, None, 5)
# res = cv.bitwise_and(erosion, erosion, mask=mask)
# res_gray = cv.cvtColor(res, cv.COLOR_BGR2GRAY)
# ti.imwrite("filtered.tif", np.array([[res_gray]*90], np.uint8))
#
# # gray = cv.cvtColor(res, cv.COLOR_BGR2GRAY)
# # grad_x = cv.Sobel(gray, -1, 1, 0, ksize=5)
# # grad_y = cv.Sobel(gray, -1, 0, 1, ksize=5)
# # grad = cv.addWeighted(grad_x, 1, grad_y, 1, 0)
#
# # src = cv.GaussianBlur(src, (3, 3), 0)
# # src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
# # cv.namedWindow(window_name)
# # cv.createTrackbar(title_trackbar, window_name, 0, max_lowThreshold, CannyThreshold)
# # CannyThreshold(0)
# # cv.waitKey()
#
# cv.imshow("src", img)
# cv.imshow("blur", blur_img)
# cv.imshow("threshold", threshold)
#
# cv.imshow("erosion", erosion)
# cv.imshow("dilation", dilation)
#
# cv.imshow('mask', mask)
# cv.imshow('filtered', res)
#
# # cv.imshow("grad", grad)
# cv.imshow("blur", blur_img)
#
# k = cv.waitKey(0)
# if k == ord("s"):
# cv.imwrite("starry_night.png", erosion)
| [
"numpy.ones",
"cv2.samples.findFile",
"cv2.medianBlur",
"cv2.imshow",
"numpy.array",
"cv2.cvtColor",
"cv2.Canny",
"cv2.waitKey"
] | [((339, 408), 'cv2.Canny', 'cv.Canny', (['src_gray', 'low_threshold', '(low_threshold * ratio)', 'kernel_size'], {}), '(src_gray, low_threshold, low_threshold * ratio, kernel_size)\n', (347, 408), True, 'import cv2 as cv\n'), ((496, 523), 'cv2.imshow', 'cv.imshow', (['window_name', 'dst'], {}), '(window_name, dst)\n', (505, 523), True, 'import cv2 as cv\n'), ((2949, 2983), 'cv2.cvtColor', 'cv.cvtColor', (['src', 'cv.COLOR_BGR2HSV'], {}), '(src, cv.COLOR_BGR2HSV)\n', (2960, 2983), True, 'import cv2 as cv\n'), ((3000, 3035), 'cv2.cvtColor', 'cv.cvtColor', (['src', 'cv.COLOR_BGR2GRAY'], {}), '(src, cv.COLOR_BGR2GRAY)\n', (3011, 3035), True, 'import cv2 as cv\n'), ((3041, 3072), 'cv2.imshow', 'cv.imshow', (['"""original"""', 'img_gray'], {}), "('original', img_gray)\n", (3050, 3072), True, 'import cv2 as cv\n'), ((3177, 3212), 'cv2.imshow', 'cv.imshow', (['"""filtered"""', 'filtered_img'], {}), "('filtered', filtered_img)\n", (3186, 3212), True, 'import cv2 as cv\n'), ((3705, 3735), 'cv2.medianBlur', 'cv.medianBlur', (['de_noise_img', '(3)'], {}), '(de_noise_img, 3)\n', (3718, 3735), True, 'import cv2 as cv\n'), ((3741, 3770), 'cv2.imshow', 'cv.imshow', (['"""dilated"""', 'dilated'], {}), "('dilated', dilated)\n", (3750, 3770), True, 'import cv2 as cv\n'), ((3776, 3828), 'cv2.imshow', 'cv.imshow', (['"""de-noised-more-aggressive"""', 'de_noise_img'], {}), "('de-noised-more-aggressive', de_noise_img)\n", (3785, 3828), True, 'import cv2 as cv\n'), ((3834, 3865), 'cv2.imshow', 'cv.imshow', (['"""med_blur"""', 'med_blur'], {}), "('med_blur', med_blur)\n", (3843, 3865), True, 'import cv2 as cv\n'), ((3873, 3885), 'cv2.waitKey', 'cv.waitKey', ([], {}), '()\n', (3883, 3885), True, 'import cv2 as cv\n'), ((2904, 2936), 'cv2.samples.findFile', 'cv.samples.findFile', (['"""input.tif"""'], {}), "('input.tif')\n", (2923, 2936), True, 'import cv2 as cv\n'), ((3245, 3286), 'numpy.array', 'np.array', (['[[filtered_img] * 90]', 'np.uint8'], {}), '([[filtered_img] * 90], np.uint8)\n', (3253, 3286), True, 'import numpy as np\n'), ((3489, 3530), 'numpy.array', 'np.array', (['[[de_noise_img] * 90]', 'np.uint8'], {}), '([[de_noise_img] * 90], np.uint8)\n', (3497, 3530), True, 'import numpy as np\n'), ((3572, 3597), 'numpy.ones', 'np.ones', (['(2, 2)', 'np.uint8'], {}), '((2, 2), np.uint8)\n', (3579, 3597), True, 'import numpy as np\n'), ((3646, 3671), 'numpy.ones', 'np.ones', (['(2, 2)', 'np.uint8'], {}), '((2, 2), np.uint8)\n', (3653, 3671), True, 'import numpy as np\n')] |
# -*- encoding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import re
from addonpayments.utils import GenerationUtils
class TestGenerationUtils:
def test_generate_hash(self):
"""
Test Hash generation success case.
"""
test_string = '20120926112654.thestore.ORD453-11.00.Successful.3737468273643.79347'
secret = 'mysecret'
expected_result = '368df010076481d47a21e777871012b62b976339'
result = GenerationUtils.generate_hash(test_string, secret)
assert expected_result == result
def test_generate_timestamp(self):
"""
Test timestamp generation. Hard to test this in a meaningful way. Checking length and valid characters.
"""
result = GenerationUtils().generate_timestamp()
match = re.match(r'([0-9]{14})', result)
assert match
def test_generate_order_id(self):
"""
Test order Id generation. Hard to test this in a meaningful way. Checking length and valid characters.
"""
result = GenerationUtils().generate_order_id()
match = re.match(r'[A-Za-z0-9-_]{32}', result)
assert match
| [
"addonpayments.utils.GenerationUtils",
"re.match",
"addonpayments.utils.GenerationUtils.generate_hash"
] | [((480, 530), 'addonpayments.utils.GenerationUtils.generate_hash', 'GenerationUtils.generate_hash', (['test_string', 'secret'], {}), '(test_string, secret)\n', (509, 530), False, 'from addonpayments.utils import GenerationUtils\n'), ((820, 851), 're.match', 're.match', (['"""([0-9]{14})"""', 'result'], {}), "('([0-9]{14})', result)\n", (828, 851), False, 'import re\n'), ((1119, 1156), 're.match', 're.match', (['"""[A-Za-z0-9-_]{32}"""', 'result'], {}), "('[A-Za-z0-9-_]{32}', result)\n", (1127, 1156), False, 'import re\n'), ((765, 782), 'addonpayments.utils.GenerationUtils', 'GenerationUtils', ([], {}), '()\n', (780, 782), False, 'from addonpayments.utils import GenerationUtils\n'), ((1065, 1082), 'addonpayments.utils.GenerationUtils', 'GenerationUtils', ([], {}), '()\n', (1080, 1082), False, 'from addonpayments.utils import GenerationUtils\n')] |
import picamera
from time import sleep
from time import time
import os
import numpy as np
import cv2
import imutils
import argparse
import face_recognition
from camera.check_rectangle_overlap import check_rectangle_overlap
# https://picamera.readthedocs.io/en/release-1.0/api.html
def get_number_faces():
time_now = int(time())
# take picture
camera = picamera.PiCamera()
camera.resolution = (1024, 768)
camera.start_preview()
sleep(3)
camera.capture('./camera/images/{}.jpg'.format(time_now))
camera.stop_preview()
print('picture taken')
# human detector with opencv
HOGCV = cv2.HOGDescriptor()
HOGCV.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
# read image and use the model
image = cv2.imread('./camera/images/{}.jpg'.format(time_now))
image = imutils.resize(image, width = min(800, image.shape[1]))
bounding_box_cordinates, weights = HOGCV.detectMultiScale(image, winStride = (4, 4), padding = (8, 8))
# change coordinates to list and recognize person if the Confidence Value is higher than 0.60
people_count = 0
people_coord = []
for item in range(len(bounding_box_cordinates)):
if weights[item][0] > 0.70:
people_coord.append(list(bounding_box_cordinates[item]))
people_count += 1
cv2.waitKey(0)
cv2.destroyAllWindows()
# count number of faces in picture with face_recognition
face_locations = face_recognition.face_locations(image)
num_faces = len(face_locations)
face_coord = [list(item) for item in face_locations]
# compare opencv and face_recognition results. If face is within the rectangle from opencv substract one face since, the face belongs to the same person.
for person in people_coord:
for face in face_coord:
if check_rectangle_overlap(person, face):
num_faces -= 1
people_from_both_libraries = people_count + num_faces
print('opencv has recogniced {0} people and face_recognition {1} faces'.format(people_count, num_faces))
# save picture only has faces on it
pic_name = ''
if people_from_both_libraries:
pic_name = '{0}_{1}_people.jpg'.format(time_now, people_from_both_libraries)
# draw retangles to compare results
# opencv coordinates
for person in people_coord:
cv2.rectangle(image, (person[0], person[1]), (person[0]+person[2],person[1]+person[3]), (0,255,0), 2)
# face_recognition coordinates
for item in face_coord:
cv2.rectangle(image, (item[3], item[2]), (item[1],item[0]), (0,255,0), 2)
cv2.imwrite('./camera/images/{}'.format(pic_name), image)
os.remove('./camera/images/{}.jpg'.format(time_now))
else:
os.remove('./camera/images/{}.jpg'.format(time_now))
return people_from_both_libraries, pic_name
| [
"cv2.rectangle",
"face_recognition.face_locations",
"picamera.PiCamera",
"time.sleep",
"cv2.HOGDescriptor",
"camera.check_rectangle_overlap.check_rectangle_overlap",
"cv2.destroyAllWindows",
"time.time",
"cv2.HOGDescriptor_getDefaultPeopleDetector",
"cv2.waitKey"
] | [((368, 387), 'picamera.PiCamera', 'picamera.PiCamera', ([], {}), '()\n', (385, 387), False, 'import picamera\n'), ((455, 463), 'time.sleep', 'sleep', (['(3)'], {}), '(3)\n', (460, 463), False, 'from time import sleep\n'), ((625, 644), 'cv2.HOGDescriptor', 'cv2.HOGDescriptor', ([], {}), '()\n', (642, 644), False, 'import cv2\n'), ((1328, 1342), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1339, 1342), False, 'import cv2\n'), ((1347, 1370), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1368, 1370), False, 'import cv2\n'), ((1454, 1492), 'face_recognition.face_locations', 'face_recognition.face_locations', (['image'], {}), '(image)\n', (1485, 1492), False, 'import face_recognition\n'), ((327, 333), 'time.time', 'time', ([], {}), '()\n', (331, 333), False, 'from time import time\n'), ((670, 714), 'cv2.HOGDescriptor_getDefaultPeopleDetector', 'cv2.HOGDescriptor_getDefaultPeopleDetector', ([], {}), '()\n', (712, 714), False, 'import cv2\n'), ((1828, 1865), 'camera.check_rectangle_overlap.check_rectangle_overlap', 'check_rectangle_overlap', (['person', 'face'], {}), '(person, face)\n', (1851, 1865), False, 'from camera.check_rectangle_overlap import check_rectangle_overlap\n'), ((2370, 2483), 'cv2.rectangle', 'cv2.rectangle', (['image', '(person[0], person[1])', '(person[0] + person[2], person[1] + person[3])', '(0, 255, 0)', '(2)'], {}), '(image, (person[0], person[1]), (person[0] + person[2], person\n [1] + person[3]), (0, 255, 0), 2)\n', (2383, 2483), False, 'import cv2\n'), ((2555, 2631), 'cv2.rectangle', 'cv2.rectangle', (['image', '(item[3], item[2])', '(item[1], item[0])', '(0, 255, 0)', '(2)'], {}), '(image, (item[3], item[2]), (item[1], item[0]), (0, 255, 0), 2)\n', (2568, 2631), False, 'import cv2\n')] |
from ._base import BaseWeight
from ..exceptions import NotFittedError
from ..utils.functions import mean_log_beta
import numpy as np
from scipy.special import loggamma
class PitmanYorProcess(BaseWeight):
def __init__(self, pyd=0, alpha=1, truncation_length=-1, rng=None):
super().__init__(rng=rng)
assert -pyd < alpha, "alpha param must be greater than -pyd"
self.pyd = pyd
self.alpha = alpha
self.v = np.array([], dtype=np.float64)
self.truncation_length = truncation_length
def random(self, size=None):
if size is None and len(self.d) == 0:
raise ValueError("Weight structure not fitted and `n` not passed.")
if size is not None:
if type(size) is not int:
raise TypeError("size parameter must be integer or None")
if len(self.d) == 0:
pitman_yor_bias = np.arange(size)
self.v = self.rng.beta(a=1 - self.pyd,
b=self.alpha + pitman_yor_bias * self.pyd,
size=size)
self.w = self.v * np.cumprod(np.concatenate(([1],
1 - self.v[:-1])))
else:
a_c = np.bincount(self.d)
b_c = np.concatenate((np.cumsum(a_c[::-1])[-2::-1], [0]))
if size is not None and size < len(a_c):
a_c = a_c[:size]
b_c = b_c[:size]
pitman_yor_bias = np.arange(len(a_c))
self.v = self.rng.beta(
a=1 - self.pyd + a_c,
b=self.alpha + pitman_yor_bias * self.pyd + b_c
)
self.w = self.v * np.cumprod(np.concatenate(([1],
1 - self.v[:-1])))
if size is not None:
self.complete(size)
return self.w
def complete(self, size):
if type(size) is not int:
raise TypeError("size parameter must be integer or None")
if self.get_size() < size:
pitman_yor_bias = np.arange(self.get_size(), size)
self.v = np.concatenate(
(
self.v,
self.rng.beta(a=1 - self.pyd,
b=self.alpha + pitman_yor_bias * self.pyd)
)
)
self.w = self.v * np.cumprod(np.concatenate(([1],
1 - self.v[:-1])))
return self.w
def fit_variational(self, variational_d):
self.variational_d = variational_d
self.variational_k = len(self.variational_d)
self.variational_params = np.empty((self.variational_k, 2),
dtype=np.float64)
a_c = np.sum(self.variational_d, 1)
b_c = np.concatenate((np.cumsum(a_c[::-1])[-2::-1], [0]))
self.variational_params[:, 0] = 1 - self.pyd + a_c
self.variational_params[:, 1] = self.alpha + (
1 + np.arange(self.variational_params.shape[0])
) * self.pyd + b_c
def variational_mean_log_w_j(self, j):
if self.variational_d is None:
raise NotFittedError
res = 0
for jj in range(j):
res += mean_log_beta(self.variational_params[jj][1],
self.variational_params[jj][0])
res += mean_log_beta(self.variational_params[j, 0],
self.variational_params[j, 1]
)
return res
def variational_mean_log_p_d__w(self, variational_d=None):
if variational_d is None:
_variational_d = self.variational_d
if _variational_d is None:
raise NotFittedError
else:
_variational_d = variational_d
res = 0
for j, nj in enumerate(np.sum(_variational_d, 1)):
res += nj * self.variational_mean_log_w_j(j)
return res
def variational_mean_log_p_w(self):
if self.variational_d is None:
raise NotFittedError
res = 0
for j, params in enumerate(self.variational_params):
res += mean_log_beta(params[0], params[1]) * -self.pyd
res += mean_log_beta(params[1], params[0]) * (
self.alpha + (j + 1) * self.pyd - 1
)
res += loggamma(self.alpha + j * self.pyd + 1)
res -= loggamma(self.alpha + (j + 1) * self.pyd + 1)
res -= loggamma(1 - self.pyd)
return res
def variational_mean_log_q_w(self):
if self.variational_d is None:
raise NotFittedError
res = 0
for params in self.variational_params:
res += (params[0] - 1) * mean_log_beta(params[0], params[1])
res += (params[1] - 1) * mean_log_beta(params[1], params[0])
res += loggamma(params[0] + params[1])
res -= loggamma(params[0]) + loggamma(params[1])
return res
def variational_mean_w(self, j):
if j > self.variational_k:
return 0
res = 1
for jj in range(j):
res *= (self.variational_params[jj][1] /
self.variational_params[jj].sum())
res *= self.variational_params[j, 0] / self.variational_params[j].sum()
return res
def variational_mode_w(self, j):
if j > self.variational_k:
return 0
res = 1
for jj in range(j):
if self.variational_params[jj, 1] <= 1:
if self.variational_params[jj, 0] <= 1:
raise ValueError('multimodal distribution')
else:
return 0
elif self.variational_params[jj, 0] <= 1:
continue
res *= ((self.variational_params[jj, 1] - 1) /
(self.variational_params[jj].sum() - 2))
if self.variational_params[j, 0] <= 1:
if self.variational_params[j, 1] <= 1:
raise ValueError('multimodal distribution')
else:
return 0
elif self.variational_params[j, 1] <= 1:
return res
res *= ((self.variational_params[j, 0] - 1) /
(self.variational_params[j].sum() - 2))
return res
| [
"scipy.special.loggamma",
"numpy.array",
"numpy.sum",
"numpy.empty",
"numpy.concatenate",
"numpy.cumsum",
"numpy.bincount",
"numpy.arange"
] | [((449, 479), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float64'}), '([], dtype=np.float64)\n', (457, 479), True, 'import numpy as np\n'), ((2706, 2757), 'numpy.empty', 'np.empty', (['(self.variational_k, 2)'], {'dtype': 'np.float64'}), '((self.variational_k, 2), dtype=np.float64)\n', (2714, 2757), True, 'import numpy as np\n'), ((2815, 2844), 'numpy.sum', 'np.sum', (['self.variational_d', '(1)'], {}), '(self.variational_d, 1)\n', (2821, 2844), True, 'import numpy as np\n'), ((891, 906), 'numpy.arange', 'np.arange', (['size'], {}), '(size)\n', (900, 906), True, 'import numpy as np\n'), ((1252, 1271), 'numpy.bincount', 'np.bincount', (['self.d'], {}), '(self.d)\n', (1263, 1271), True, 'import numpy as np\n'), ((3901, 3926), 'numpy.sum', 'np.sum', (['_variational_d', '(1)'], {}), '(_variational_d, 1)\n', (3907, 3926), True, 'import numpy as np\n'), ((4410, 4449), 'scipy.special.loggamma', 'loggamma', (['(self.alpha + j * self.pyd + 1)'], {}), '(self.alpha + j * self.pyd + 1)\n', (4418, 4449), False, 'from scipy.special import loggamma\n'), ((4469, 4514), 'scipy.special.loggamma', 'loggamma', (['(self.alpha + (j + 1) * self.pyd + 1)'], {}), '(self.alpha + (j + 1) * self.pyd + 1)\n', (4477, 4514), False, 'from scipy.special import loggamma\n'), ((4534, 4556), 'scipy.special.loggamma', 'loggamma', (['(1 - self.pyd)'], {}), '(1 - self.pyd)\n', (4542, 4556), False, 'from scipy.special import loggamma\n'), ((4917, 4948), 'scipy.special.loggamma', 'loggamma', (['(params[0] + params[1])'], {}), '(params[0] + params[1])\n', (4925, 4948), False, 'from scipy.special import loggamma\n'), ((4968, 4987), 'scipy.special.loggamma', 'loggamma', (['params[0]'], {}), '(params[0])\n', (4976, 4987), False, 'from scipy.special import loggamma\n'), ((4990, 5009), 'scipy.special.loggamma', 'loggamma', (['params[1]'], {}), '(params[1])\n', (4998, 5009), False, 'from scipy.special import loggamma\n'), ((1123, 1161), 'numpy.concatenate', 'np.concatenate', (['([1], 1 - self.v[:-1])'], {}), '(([1], 1 - self.v[:-1]))\n', (1137, 1161), True, 'import numpy as np\n'), ((1706, 1744), 'numpy.concatenate', 'np.concatenate', (['([1], 1 - self.v[:-1])'], {}), '(([1], 1 - self.v[:-1]))\n', (1720, 1744), True, 'import numpy as np\n'), ((2410, 2448), 'numpy.concatenate', 'np.concatenate', (['([1], 1 - self.v[:-1])'], {}), '(([1], 1 - self.v[:-1]))\n', (2424, 2448), True, 'import numpy as np\n'), ((2875, 2895), 'numpy.cumsum', 'np.cumsum', (['a_c[::-1]'], {}), '(a_c[::-1])\n', (2884, 2895), True, 'import numpy as np\n'), ((1306, 1326), 'numpy.cumsum', 'np.cumsum', (['a_c[::-1]'], {}), '(a_c[::-1])\n', (1315, 1326), True, 'import numpy as np\n'), ((3045, 3088), 'numpy.arange', 'np.arange', (['self.variational_params.shape[0]'], {}), '(self.variational_params.shape[0])\n', (3054, 3088), True, 'import numpy as np\n')] |
from __future__ import annotations
from typing import Dict, List, Optional, Union
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from nlp.nlp import Trainer
app = FastAPI()
trainer = Trainer()
#BaseModel is used as data validator when using fast api it cares all about exception handilng and validate
#your incoming json to be what you want to be.
class TestingData(BaseModel):
texts: List[str]
class QueryText(BaseModel):
text: str
class StatusObject(BaseModel):
status: str
timestamp: str
classes: List[str]
evaluation: Dict
class PredictionObject(BaseModel):
text: str
predictions: Dict
class PredictionsObject(BaseModel):
predictions: List[PredictionObject]
@app.get("/status", summary="Get current status of the system")
def get_status():
status = trainer.get_status()
return StatusObject(**status)
@app.get("/trainMachineLearning", summary="Train a new Machine Learning model")
def train():
try:
trainer.trainMachineLearning(trainer.merge().text, trainer.merge().dialect)
status = trainer.get_status()
return StatusObject(**status)
except Exception as e:
raise HTTPException(status_code=503, detail=str(e))
@app.get("/trainDeepLearning", summary="Train a new Deep Learning model")
def train():
try:
trainer.trainDeepLearning(trainer.merge().text, trainer.merge().dialect)
status = trainer.get_status()
return StatusObject(**status)
except Exception as e:
raise HTTPException(status_code=503, detail=str(e))
@app.post("/predict", summary="Predict single input")
def predict(query_text: QueryText):
try:
prediction = trainer.predict([query_text.text])[0]
return PredictionObject(**prediction)
except Exception as e:
raise HTTPException(status_code=503, detail=str(e))
@app.post("/predict-batch", summary="predict a batch of sentences")
def predict_batch(testing_data:TestingData):
try:
predictions = trainer.predict(testing_data.texts)
return PredictionsObject(predictions=predictions)
except Exception as e:
raise HTTPException(status_code=503, detail=str(e))
@app.get("/")
def home():
return({"message": "System is up"})
| [
"fastapi.FastAPI",
"nlp.nlp.Trainer"
] | [((191, 200), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (198, 200), False, 'from fastapi import FastAPI, HTTPException\n'), ((211, 220), 'nlp.nlp.Trainer', 'Trainer', ([], {}), '()\n', (218, 220), False, 'from nlp.nlp import Trainer\n')] |
import pandas as pd
import numpy as np
import glob
import os
import global_config
import yaml
import time
import sys
import collections
def flatten(d, parent_key='', sep='_'):
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def collect_yaml_config(config):
df = pd.DataFrame()
keys = list(filter(lambda key: key not in ['policies', 'seed'], config.keys()))
global_setting = {key:config[key] for key in keys}
baseline_regret = 0
for pol in config['policies']:
if pol.get('identifier') == 'baseline':
baseline_regret = pol['regret']
for pol in config['policies']:
if pol.get('identifier') == 'baseline':
continue
else:
flat_dict = flatten(pol)
regret = flat_dict['regret']
del flat_dict['regret']
if regret != 0.0:
flat_dict['improvement'] = baseline_regret / regret
else:
flat_dict['improvement'] = 0
df = df.append(flat_dict | global_setting, ignore_index=True)
return df
if __name__ == "__main__":
filestart = sys.argv[1]
os.chdir(global_config.EXPERIMENT_SERIALIZATION_DIR)
experiment_files = glob.glob("*.yml")
df =pd.DataFrame()
i = 0
for ef in experiment_files:
print('%d/%d' % (i, len(experiment_files)))
i += 1
with open(ef, 'r') as ymlfile:
if filestart == ef[:len(filestart)]:
experiment_data = yaml.safe_load(ymlfile)
df = df.append(collect_yaml_config(experiment_data), ignore_index=True)
df.to_csv('collected_%s.csv' % filestart, index=False)
| [
"pandas.DataFrame",
"yaml.safe_load",
"os.chdir",
"glob.glob"
] | [((516, 530), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (528, 530), True, 'import pandas as pd\n'), ((1367, 1419), 'os.chdir', 'os.chdir', (['global_config.EXPERIMENT_SERIALIZATION_DIR'], {}), '(global_config.EXPERIMENT_SERIALIZATION_DIR)\n', (1375, 1419), False, 'import os\n'), ((1443, 1461), 'glob.glob', 'glob.glob', (['"""*.yml"""'], {}), "('*.yml')\n", (1452, 1461), False, 'import glob\n'), ((1471, 1485), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1483, 1485), True, 'import pandas as pd\n'), ((1719, 1742), 'yaml.safe_load', 'yaml.safe_load', (['ymlfile'], {}), '(ymlfile)\n', (1733, 1742), False, 'import yaml\n')] |
###################################################
## ##
## This file is part of the KinBot code v2.0 ##
## ##
## The contents are covered by the terms of the ##
## BSD 3-clause license included in the LICENSE ##
## file, found at the root. ##
## ##
## Copyright 2018 National Technology & ##
## Engineering Solutions of Sandia, LLC (NTESS). ##
## Under the terms of Contract DE-NA0003525 with ##
## NTESS, the U.S. Government retains certain ##
## rights to this software. ##
## ##
## Authors: ##
## <NAME> ##
## <NAME> ##
## ##
###################################################
import os,sys
import time
import logging
import numpy as np
import matplotlib.pyplot as plt
from constants import *
from stationary_pt import *
from zmat import *
from qc import *
import par
def generate_hir_geoms(species, natom, atom, mult, charge, cart, wellorts):
species.hir_status = []
species.hir_energies = []
species.hir_geoms = []
while len(species.hir_status) < len(species.dihed):
species.hir_status.append([-1 for i in range(par.nrotation)])
species.hir_energies.append([-1 for i in range(par.nrotation)])
species.hir_geoms.append([[] for i in range(par.nrotation)])
for rotor in range(len(species.dihed)):
cart = np.asarray(cart)
zmat_atom, zmat_ref, zmat, zmatorder = make_zmat_from_cart(species, rotor, natom, atom, cart, 0)
#first element has same geometry ( TODO: this shouldn't be recalculated)
cart_new = make_cart_from_zmat(zmat, zmat_atom, zmat_ref, natom, atom, zmatorder)
fi = [(zi+1) for zi in zmatorder[:4]]
qc_hir(species,cart_new,wellorts,natom,atom,mult,charge,rotor,0,[fi])
for ai in range(1,par.nrotation):
ang = 360. / float(par.nrotation)
zmat[3][2] += ang
for i in range(4, natom):
if zmat_ref[i][2] == 4:
zmat[i][2] += ang
if zmat_ref[i][2] == 1:
zmat[i][2] += ang
cart_new = make_cart_from_zmat(zmat, zmat_atom, zmat_ref, natom, atom, zmatorder)
qc_hir(species,cart_new,wellorts,natom,atom,mult,charge,rotor,ai,[fi])
return 0
def test_hir(species,natom,atom,mult,charge,wellorts):
for rotor in range(len(species.dihed)):
for ai in range(par.nrotation):
if species.hir_status[rotor][ai] == -1:
if wellorts:
job = 'hir/' + species.name + '_hir_' + str(rotor) + '_' + str(ai).zfill(2)
else:
job = 'hir/' + str(species.chemid) + '_hir_' + str(rotor) + '_' + str(ai).zfill(2)
err, geom = get_qc_geom(job, natom)
if err == 1: #still running
continue
elif err == -1: #failed
species.hir_status[rotor][ai] = 1
species.hir_energies[rotor][ai] = -1
species.hir_geoms[rotor][ai] = geom
else:
#check if all the bond lenghts are within 15% or the original bond lengths
if equal_geom(species.bond,species.geom,geom,0.15):
err, energy = get_qc_energy(job)
species.hir_status[rotor][ai] = 0
species.hir_energies[rotor][ai] = energy
species.hir_geoms[rotor][ai] = geom
else:
species.hir_status[rotor][ai] = 1
species.hir_energies[rotor][ai] = -1
species.hir_geoms[rotor][ai] = geom
return 0
def check_hir(species, natom, atom, mult, charge, wellorts, wait = 0):
"""
Check for hir calculations and optionally wait for them to finish
"""
while 1:
#check if all the calculations are finished
test_hir(species,natom,atom,mult,charge,wellorts)
if all([all([test >= 0 for test in status]) for status in species.hir_status]):
for rotor in range(len(species.dihed)):
if wellorts:
job = species.name + '_hir_' + str(rotor)
else:
job = str(species.chemid) + '_hir_' + str(rotor)
angles = [i * 2 * np.pi / float(par.nrotation) for i in range(par.nrotation)]
#write profile to file
write_profile(species,rotor,job,atom,natom)
species.hir_fourier.append(fourier_fit(job,angles,species.hir_energies[rotor],species.hir_status[rotor],plot_fit = 0))
return 1
else:
if wait:
time.sleep(1)
else:
return 0
def write_profile(species,rotor,job,atom,natom):
"""
Write a molden-readable file with the HIR scan (geometries and energies)
"""
file = open('hir/' + job + '.xyz','w')
for i in range(par.nrotation):
s = str(natom) + '\n'
s += 'energy = ' + str(species.hir_energies[rotor][i]) + '\n'
for j,at in enumerate(atom):
x,y,z = species.hir_geoms[rotor][i][j]
s += '{} {:.8f} {:.8f} {:.8f}\n'.format(at,x,y,z)
file.write(s)
file.close()
def fourier_fit(job,angles,energies,status,plot_fit = 0):
"""
Create a alternative fourier formulation of a hindered rotor
(Vanspeybroeck et al.)
profile, the angles are in radians and the eneries in
kcal per mol
plot_fit: plot the profile and the fit to a png
"""
n_terms = 6 #the number of sine and cosine terms
ang = [angles[i] for i in range(len(status)) if status[i] == 0]
ens = [(energies[i] - energies[0])*AUtoKCAL for i in range(len(status)) if status[i] == 0]
if len(ens) < par.par.nrotation - 2:
#more than two points are off
logging.warning("Hindered rotor potential has more than 2 failures for " + job)
X = np.zeros((len(ang), 2 * n_terms))
for i,ai in enumerate(ang):
for j in range(n_terms):
X[i][j] = (1 - np.cos((j+1) * ai))
X[i][j+n_terms] = np.sin((j+1) * ai)
A = np.linalg.lstsq(X,np.array(ens))[0]
for i,si in enumerate(status):
if si == 1:
energies[i] = energies[0] + get_fit_value(A,angles[i])/AUtoKCAL
if plot_fit:
#fit the plot to a png file
plt.plot(ang,ens,'ro')
fit_angles = [i * 2. * np.pi / 360 for i in range(360)]
fit_energies = [get_fit_value(A,ai) for ai in fit_angles]
plt.plot(fit_angles,fit_energies)
plt.xlabel('Dihedral angle [radians]')
plt.ylabel('Energy [kcal/mol]')
plt.savefig('hir_profiles/{}.png'.format(job))
plt.clf()
return A
def get_fit_value(A,ai):
"""
Get the fitted energy
"""
e = 0.
n_terms = (len(A)) / 2
for j in range(n_terms):
e += A[j] * ( 1 - np.cos((j+1) * ai))
e += A[j+n_terms] * np.sin((j+1) * ai)
return e
def main():
"""
Calculate the 1D hindered rotor profiles
Create a fourier fit representation of the profile
"""
if __name__ == "__main__":
main()
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"logging.warning",
"numpy.asarray",
"matplotlib.pyplot.clf",
"time.sleep",
"numpy.array",
"numpy.cos",
"numpy.sin"
] | [((1651, 1667), 'numpy.asarray', 'np.asarray', (['cart'], {}), '(cart)\n', (1661, 1667), True, 'import numpy as np\n'), ((6232, 6311), 'logging.warning', 'logging.warning', (["('Hindered rotor potential has more than 2 failures for ' + job)"], {}), "('Hindered rotor potential has more than 2 failures for ' + job)\n", (6247, 6311), False, 'import logging\n'), ((6767, 6791), 'matplotlib.pyplot.plot', 'plt.plot', (['ang', 'ens', '"""ro"""'], {}), "(ang, ens, 'ro')\n", (6775, 6791), True, 'import matplotlib.pyplot as plt\n'), ((6928, 6962), 'matplotlib.pyplot.plot', 'plt.plot', (['fit_angles', 'fit_energies'], {}), '(fit_angles, fit_energies)\n', (6936, 6962), True, 'import matplotlib.pyplot as plt\n'), ((6970, 7008), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Dihedral angle [radians]"""'], {}), "('Dihedral angle [radians]')\n", (6980, 7008), True, 'import matplotlib.pyplot as plt\n'), ((7017, 7048), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Energy [kcal/mol]"""'], {}), "('Energy [kcal/mol]')\n", (7027, 7048), True, 'import matplotlib.pyplot as plt\n'), ((7112, 7121), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (7119, 7121), True, 'import matplotlib.pyplot as plt\n'), ((6501, 6521), 'numpy.sin', 'np.sin', (['((j + 1) * ai)'], {}), '((j + 1) * ai)\n', (6507, 6521), True, 'import numpy as np\n'), ((6547, 6560), 'numpy.array', 'np.array', (['ens'], {}), '(ens)\n', (6555, 6560), True, 'import numpy as np\n'), ((7344, 7364), 'numpy.sin', 'np.sin', (['((j + 1) * ai)'], {}), '((j + 1) * ai)\n', (7350, 7364), True, 'import numpy as np\n'), ((5048, 5061), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (5058, 5061), False, 'import time\n'), ((6451, 6471), 'numpy.cos', 'np.cos', (['((j + 1) * ai)'], {}), '((j + 1) * ai)\n', (6457, 6471), True, 'import numpy as np\n'), ((7296, 7316), 'numpy.cos', 'np.cos', (['((j + 1) * ai)'], {}), '((j + 1) * ai)\n', (7302, 7316), True, 'import numpy as np\n')] |
import re, requests, bs4, unicodedata
from datetime import timedelta, date, datetime
from time import time
# Constants
root = 'https://www.fanfiction.net'
# REGEX MATCHES
# STORY REGEX
_STORYID_REGEX = r"var\s+storyid\s*=\s*(\d+);"
_CHAPTER_REGEX = r"var\s+chapter\s*=\s*(\d+);"
_CHAPTERS_REGEX = r"Chapters:\s*(\d+)\s*"
_WORDS_REGEX = r"Words:\s*([\d,]+)\s*"
_TITLE_REGEX = r"var\s+title\s*=\s*'(.+)';"
_DATEP_REGEX = r"Published:\s*<span.+?='(\d+)'>"
_DATEU_REGEX = r"Updated:\s*<span.+?='(\d+)'>"
# USER REGEX
_USERID_REGEX = r"var\s+userid\s*=\s*(\d+);"
_USERID_URL_EXTRACT = r".*/u/(\d+)"
_USERNAME_REGEX = r"<link rel=\"canonical\" href=\"//www.fanfiction.net/u/\d+/(.+)\">"
_USER_STORY_COUNT_REGEX = r"My Stories\s*<span class=badge>(\d+)<"
_USER_FAVOURITE_COUNT_REGEX = r"Favorite Stories\s*<span class=badge>(\d+)<"
_USER_FAVOURITE_AUTHOR_COUNT_REGEX = r"Favorite Authors\s*<span class=badge>(\d+)<"
# Useful for generating a review URL later on
_STORYTEXTID_REGEX = r"var\s+storytextid\s*=\s*storytextid=(\d+);"
# REGEX that used to parse reviews page
_REVIEW_COMPLETE_INFO_REGEX = r"img class=.*?</div"
_REVIEW_USER_NAME_REGEX = r"> *([^< ][^<]*)<"
_REVIEW_CHAPTER_REGEX = r"<small style=[^>]*>([^<]*)<"
_REVIEW_TIME_REGEX = r"<span data[^>]*>([^<]*)<"
_REVIEW_TEXT_REGEX = r"<div[^>]*>([^<]*)<"
# Used to parse the attributes which aren't directly contained in the
# JavaScript and hence need to be parsed manually
_NON_JAVASCRIPT_REGEX = r'Rated:(.+?)</div>'
_HTML_TAG_REGEX = r'<.*?>'
# Needed to properly decide if a token contains a genre or a character name
_GENRES = [
'General', 'Romance', 'Humor', 'Drama', 'Poetry', 'Adventure', 'Mystery',
'Horror', 'Parody', 'Angst', 'Supernatural', 'Suspense', 'Sci-Fi',
'Fantasy', 'Spiritual', 'Tragedy', 'Western', 'Crime', 'Family', 'Hurt',
'Comfort', 'Friendship'
]
# TEMPLATES
_STORY_URL_TEMPLATE = 'https://www.fanfiction.net/s/%d'
_CHAPTER_URL_TEMPLATE = 'https://www.fanfiction.net/s/%d/%d'
_USERID_URL_TEMPLATE = 'https://www.fanfiction.net/u/%d'
_DATE_COMPARISON = date(1970, 1, 1)
_DATE_FORMAT = '%Y%m%d'
def _parse_string(regex, source):
"""Returns first group of matched regular expression as string."""
return re.search(regex, source).group(1)
def _parse_integer(regex, source):
"""Returns first group of matched regular expression as integer."""
match = re.search(regex, source).group(1)
match = match.replace(',', '')
return int(match)
def _parse_date(regex, source):
xutime = _parse_integer(regex, source)
delta = timedelta(seconds=xutime)
return _DATE_COMPARISON + delta
def _unescape_javascript_string(string_):
"""Removes JavaScript-specific string escaping characters."""
return string_.replace("\\'", "'").replace('\\"', '"').replace('\\\\', '\\')
def _visible_filter(element):
if element.parent.name in ['style', 'script', '[document]', 'head', 'title']:
return False
element = unicodedata.normalize('NFKD', element).encode('ascii', 'ignore')
if re.match(r'<!--.*-->', str(element)):
return False
return True
def _get_int_value_from_token(token, prefix):
if not token.startswith(prefix):
raise ValueError("int token doesn't starts with given prefix")
else:
return int(token[len(prefix):].replace(',', ''))
def _get_date_value_from_token(token, prefix):
if not token.startswith(prefix):
raise ValueError("date token doesn't starts with given prefix")
else:
try:
return datetime.strptime(token[len(prefix):], '%m/%d/%Y')
except ValueError:
return datetime.today()
def _get_key_of_first_positive(f, d):
"""
returns key k of first item in l for which f(k) == True
or None
"""
for key, value in d.items():
if f(key) == True:
return key
return None
class Story(object):
SERIALIZED_ATTRS = [
'title',
'id',
'timestamp',
'description',
'fandoms',
'author_id',
'chapter_count',
'word_count',
'date_published',
'date_updated',
'rated',
'language',
'genre',
'characters',
'reviews',
'favs',
'followers',
'complete'
]
DATE_ATTRS = [
'timestamp',
'date_published',
'date_updated'
]
def __init__(self, url=None, id=None):
""" A story on fanfiction.net
If both url, and id are provided, url is used.
:type id: int
:param url: The url of the story.
:param id: The story id of the story.
Attributes:
id (int): The story id.
description (str): The text description of the story
timestamp: The timestamp of moment when data was consistent with site
fandoms [str]: The fandoms to which the story belongs
chapter_count (int); The number of chapters.
word_count (int): The number of words.
author_id (int): The user id of the author.
title (str): The title of the story.
date_published (date): The date the story was published.
date_updated (date): The date of the most recent update.
rated (str): The story rating.
language (str): The story language.
genre [str]: The genre(s) of the story.
characters [str]: The character(s) of the story.
reviews (int): The number of reviews of the story.
favs (int): The number of user which has this story in favorite list
followers (int): The number of users who follow the story
complete (bool): True if the story is complete, else False.
"""
self.inited = False
self.id = id
self.url = url
if id is None:
if url is None:
raise ValueError("There must be a url or an id.")
else:
self.id = _parse_integer(_STORYID_REGEX, source)
else:
self.url = _STORY_URL_TEMPLATE % int(self.id)
self.id = int(self.id)
def download_data(self, timeout=5):
self.timestamp = datetime.now()
source = requests.get(self.url, timeout=timeout)
source = source.text
soup = bs4.BeautifulSoup(source, 'html.parser')
self.author_id = _parse_integer(_USERID_REGEX, source)
self.title = _unescape_javascript_string(_parse_string(_TITLE_REGEX, source).replace('+', ' '))
fandom_chunk = soup.find('div', id='pre_story_links').find_all('a')[-1].get_text().replace('Crossover', '')
self.fandoms = [fandom.strip() for fandom in fandom_chunk.split('+')]
self.description = soup.find('div', {'style': 'margin-top:2px'}).get_text()
# Tokens of information that aren't directly contained in the
# JavaScript, need to manually parse and filter those
tags = re.search(_NON_JAVASCRIPT_REGEX, source.replace('\n', ' ')).group(0)
tokens = [token.strip() for token in
re.sub(_HTML_TAG_REGEX, '', tags).split('-')]
self._parse_tags(tokens)
self.inited = True
def _parse_tags(self, tokens):
"""
parse desription of story such as 'Rated: T - English - Humor/Adventure - Chapters: 2 - Words: 131,097 - Reviews: 537 - Favs: 2,515 - Follows: 2,207 - Updated: Jul 27, 2016 - Published: Dec 17, 2009 - <NAME>.'
splitted into tokens list by '-' character
This functions fill all field of the self object except: id, author_id, title, fandoms, timestamp
"""
# skipping tokens 'Crossover' and token which contains fandoms
while not tokens[0].startswith('Rated:'):
tokens = tokens[1:]
# Both tokens are constant and always available
self.rated = tokens[0].replace('Rated:', '').replace('Fiction', '').strip()
self.language = tokens[1]
tokens = tokens[2:]
# there can be token with the list of genres
if tokens[0] in _GENRES or '/' in tokens[0] and all(token in _GENRES for token in tokens[0].split('/')):
self.genre = tokens[0].split('/')
tokens = tokens[1:]
else:
self.genre = []
# deleting useless 'id: ...' token
if tokens[-1].startswith('id:'):
tokens = tokens[:-1]
# and if story is complete the last token contain 'Complete'
if 'Complete' in tokens[-1]:
self.complete = True
tokens = tokens[:-1]
else:
self.complete = False
# except those there are 4 possible kind of tokens: tokens with int data, tokens with date data, story id token,
# and token with characters/pairings
int_tokens = {'Chapters: ': 'chapter_count', 'Words: ': 'word_count', 'Reviews: ': 'reviews',
'Favs: ': 'favs', 'Follows: ': 'followers'}
date_tokens = {'Updated: ': 'date_updated', 'Published: ': 'date_published'}
for token in tokens:
int_k = _get_key_of_first_positive(lambda s: token.startswith(s), int_tokens)
date_k = _get_key_of_first_positive(lambda s: token.startswith(s), date_tokens)
if int_k is not None:
setattr(self, int_tokens[int_k], _get_int_value_from_token(token, int_k))
elif date_k is not None:
setattr(self, date_tokens[date_k], _get_date_value_from_token(token, date_k))
else:
self.characters = [c.translate(str.maketrans('', '', '[]')).strip() for c in token.split(',')]
# now we have to fill field which could be left empty
if not hasattr(self, 'chapter_count'):
self.chapter_count = 1
for field in int_tokens.values():
if not hasattr(self, field):
setattr(self, field, 0)
if not hasattr(self, 'date_updated'):
self.date_updated = self.date_published
if not hasattr(self, 'characters'):
self.characters = []
def _parse_from_storylist_format(self, story_chunk, author_id):
"""
Parse story from html chunk
"""
if author_id:
self.author_id = author_id
else:
self.author_id = _parse_integer(_USERID_URL_EXTRACT, str(story_chunk))
self.timestamp = datetime.now()
self.fandoms = [s.strip() for s in story_chunk.get('data-category').split('&')]
self.title = story_chunk.get('data-title')
self.description = str(story_chunk.find('div', {'class': 'z-indent z-padtop'}))
# save only parts between div tags
self.description = self.description[self.description.find('>') + 1:]
self.description = self.description[:self.description.find('<div', 4)]
tags = story_chunk.find('div', {'class': 'z-padtop2 xgray'}).get_text()
self._parse_tags([token.strip() for token in tags.split('-')])
self.inited = True
def get_chapters(self):
"""
A generator for all chapters in the story.
:return: A generator to fetch chapter objects.
"""
for number in range(1, self.chapter_count + 1):
yield Chapter(story_id=self.id, chapter=number)
def get_user(self):
"""
:return: The user object of the author of the story.
"""
return User(id=self.author_id)
def get_json_dump(self, attrs=None):
result = {}
for attr in attrs or self.SERIALIZED_ATTRS:
if attr in self.DATE_ATTRS:
result[attr] = getattr(self, attr).strftime(_DATE_FORMAT)
else:
result[attr] = getattr(self, attr)
return result
def print_info(self, attrs=None):
"""
Print information held about the story.
:param attrs: A list of attribute names to print information for.
:return: void
"""
assert self.inited
if not attrs:
attrs = self.SERIALIZED_ATTRS
for attr in attrs:
print("%12s\t%s" % (attr, getattr(self, attr)))
def get_reviews(self):
"""
A generator for all reviews in the story.
:return: A generator to fetch reviews.
"""
return ReviewsGenerator(self.id)
# Method alias which allows the user to treat the get_chapters method like
# a normal property if no manual opener is to be specified.
chapters = property(get_chapters)
class ReviewsGenerator(object):
"""
Class that generates review in chronological order
Attributes:
base_url (int): storys review url without specified page number
page_number (int): number of current review page
reviews_cache List(str): list of already downloaded (and partially processed) reviews
skip_reviews_number (int): length of already processed review from review_cache
"""
def __init__(self, story_id, chapter=0):
"""
If chapter unspecified then generator generates review for all chapters
"""
self.story_id = story_id
self.base_url = root + '/r/' + str(story_id) + '/' + str(chapter) + '/'
def __iter__(self):
self.page_number = 0
self.reviews_cache = []
self.skip_reviews_number = 0
return self
def __next__(self):
self.skip_reviews_number += 1
if len(self.reviews_cache) >= self.skip_reviews_number:
return Review(self.story_id, self.reviews_cache[self.skip_reviews_number - 1])
self.page_number += 1
page = self._downloadReviewPage(self.page_number)
self.reviews_cache = re.findall(_REVIEW_COMPLETE_INFO_REGEX, page, re.DOTALL)
if len(self.reviews_cache) == 0:
raise StopIteration
self.skip_reviews_number = 1
return Review(self.story_id, self.reviews_cache[0])
def _downloadReviewPage(self, page_number):
url = self.base_url + str(page_number) + '/'
return requests.get(url).text
class Review(object):
"""
A single review of fanfiction story, on fanfiction.net
Attributes:
story_id (int): story ID
user_id (int): ID of user who submited review (may be None if review is anonymous)
user_name (str): user name (or pseudonym for anonymous review)
chapter (str): chapter name
time_ago (str): how much time passed since review submit (format may be inconsistent with what you see in browser just because fanfiction.net sends different pages depend on do you download page from browser or from console/that library
text (str): review text
"""
def __init__(self, story_id, unparsed_info):
"""
That method should not be invoked outside of Story and Chapter classes
:param story_id (int): story ID
:param unparsed_info (int): string that contain the rest info
"""
self.story_id = story_id
self.user_name = _parse_string(_REVIEW_USER_NAME_REGEX, unparsed_info)
self.chapter = _parse_string(_REVIEW_CHAPTER_REGEX, unparsed_info)
self.text = _parse_string(_REVIEW_TEXT_REGEX, unparsed_info)
self.time_ago = _parse_string(_REVIEW_TIME_REGEX, unparsed_info)
# fanfiction.net provide strange format, instead of '8 hours ago' it show '8h'
# so let's add ' ago' suffix if review submitted hours or minutes ago
if self.time_ago[-1] == 'h' or self.time_ago[-1] == 'm':
self.time_ago += ' ago'
if re.search(_USERID_URL_EXTRACT, unparsed_info) == None:
self.user_id = None
else:
self.user_id = _parse_integer(_USERID_URL_EXTRACT, unparsed_info)
class Chapter(object):
def __init__(self, url=None, story_id=None, chapter=None):
""" A single chapter in a fanfiction story, on fanfiction.net
:param url: The url of the chapter.
:param story_id: The story id of the story of the chapter.
:param chapter: The chapter number of the story.
Attributes:
story_id (int): Story ID
number (int): Chapter number
story_text_id (int): ?
title (str): Title of the chapter, or title of the story.
raw_text (str): The raw HTML of the story.
text_list List(str): List of unicode strings for each paragraph.
text (str): Visible text of the story.
"""
if url is None:
if story_id is None:
raise Exception('A URL or story id must be entered.')
elif chapter is None:
raise Exception('Both a stroy id and chapter number must be provided')
elif story_id and chapter:
url = _CHAPTER_URL_TEMPLATE % (story_id, chapter)
source = requests.get(url)
source = source.text
self.story_id = _parse_integer(_STORYID_REGEX, source)
self.number = _parse_integer(_CHAPTER_REGEX, source)
self.story_text_id = _parse_integer(_STORYTEXTID_REGEX, source)
soup = bs4.BeautifulSoup(source, 'html.parser')
select = soup.find('select', {'name': 'chapter'})
if select:
# There are multiple chapters available, use chapter's title
self.title = select.find('option', selected=True).string.split(None, 1)[1]
else:
# No multiple chapters, one-shot or only a single chapter released
# until now; for the lack of a proper chapter title use the story's
self.title = _unescape_javascript_string(_parse_string(_TITLE_REGEX, source)).decode()
soup = soup.find('div', id='storytext')
# Try to remove AddToAny share buttons
try:
soup.find('div', {'class': lambda class_: class_ and 'a2a_kit' in class_}).extract()
except AttributeError:
pass
# Normalize HTML tag attributes
for hr in soup('hr'):
del hr['size']
del hr['noshade']
self.raw_text = soup.decode()
texts = soup.findAll(text=True)
self.text_list = list(filter(_visible_filter, texts))
self.text = '\n'.join(self.text_list)
def get_reviews(self):
"""
A generator for all reviews for that chapter
:return: A generator to fetch reviews.
"""
return ReviewsGenerator(self.story_id, self.number)
class User(object):
def __init__(self, url=None, id=None):
""" A user page on fanfiction.net
:param url: The url of user profile.
:param id: The url of user profile.
Attributes:
id (int): User id
timestamp (int): Timestamp of last update of downloaded profile
stories [Story]: The list of stories written by user
favorite_stories [Story]: The list of user favorite stories
favorite_authors [User]: The list of user favorite stories
username (str):
"""
self.id = id
self.url = url
if id is None:
if url is None:
raise ValueError("There must be a url or an id.")
else:
self.id = _parse_integer(_USERID_URL_EXTRACT, url)
else:
self.url = _USERID_URL_TEMPLATE % int(self.id)
self.id = int(self.id)
def download_data(self, timeout=5):
self.timestamp = datetime.now()
source = requests.get(self.url, timeout=timeout)
source = source.text
soup = bs4.BeautifulSoup(source, 'html.parser')
self.username = _parse_string(_USERNAME_REGEX, source)
self.stories = self._get_stories_from_profile(soup, fav_stories=False)
self.favorite_stories = self._get_stories_from_profile(soup, fav_stories=True)
self.favorite_authors = self._get_favorite_authors(soup)
def get_json_dump(self):
return {
'id': self.id,
'timestamp': self.timestamp.strftime(_DATE_FORMAT),
'username': self.username,
'stories': [story.id for story in self.stories],
'favorite_stories': [story.id for story in self.favorite_stories],
'favorite_authors': [user.id for user in self.favorite_authors]
}
def _get_stories_from_profile(self, soup, fav_stories=True):
if fav_stories:
target_class = 'favstories'
else:
target_class = 'mystories'
favourite_stories = soup.findAll('div', {'class': target_class})
result = []
for story_chunk in favourite_stories:
story = Story(id=story_chunk.get('data-storyid'))
story._parse_from_storylist_format(story_chunk, author_id=None if fav_stories else self.id)
result.append(story)
return result
def _get_favorite_authors(self, soup):
result = []
for column in soup.findAll('td', {'style': 'line-height:150%'}):
for author_tag in column.findAll('a', href=re.compile(r".*/u/(\d+)/.*")):
author_url = author_tag.get('href')
author_url = root + author_url
result.append(User(author_url))
return result
| [
"re.compile",
"requests.get",
"bs4.BeautifulSoup",
"datetime.datetime.now",
"re.findall",
"datetime.date",
"unicodedata.normalize",
"datetime.datetime.today",
"re.sub",
"datetime.timedelta",
"re.search"
] | [((2058, 2074), 'datetime.date', 'date', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (2062, 2074), False, 'from datetime import timedelta, date, datetime\n'), ((2553, 2578), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'xutime'}), '(seconds=xutime)\n', (2562, 2578), False, 'from datetime import timedelta, date, datetime\n'), ((6336, 6350), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6348, 6350), False, 'from datetime import timedelta, date, datetime\n'), ((6368, 6407), 'requests.get', 'requests.get', (['self.url'], {'timeout': 'timeout'}), '(self.url, timeout=timeout)\n', (6380, 6407), False, 'import re, requests, bs4, unicodedata\n'), ((6452, 6492), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', (['source', '"""html.parser"""'], {}), "(source, 'html.parser')\n", (6469, 6492), False, 'import re, requests, bs4, unicodedata\n'), ((10517, 10531), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (10529, 10531), False, 'from datetime import timedelta, date, datetime\n'), ((13851, 13907), 're.findall', 're.findall', (['_REVIEW_COMPLETE_INFO_REGEX', 'page', 're.DOTALL'], {}), '(_REVIEW_COMPLETE_INFO_REGEX, page, re.DOTALL)\n', (13861, 13907), False, 'import re, requests, bs4, unicodedata\n'), ((17068, 17085), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (17080, 17085), False, 'import re, requests, bs4, unicodedata\n'), ((17327, 17367), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', (['source', '"""html.parser"""'], {}), "(source, 'html.parser')\n", (17344, 17367), False, 'import re, requests, bs4, unicodedata\n'), ((19714, 19728), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (19726, 19728), False, 'from datetime import timedelta, date, datetime\n'), ((19746, 19785), 'requests.get', 'requests.get', (['self.url'], {'timeout': 'timeout'}), '(self.url, timeout=timeout)\n', (19758, 19785), False, 'import re, requests, bs4, unicodedata\n'), ((19830, 19870), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', (['source', '"""html.parser"""'], {}), "(source, 'html.parser')\n", (19847, 19870), False, 'import re, requests, bs4, unicodedata\n'), ((2218, 2242), 're.search', 're.search', (['regex', 'source'], {}), '(regex, source)\n', (2227, 2242), False, 'import re, requests, bs4, unicodedata\n'), ((2373, 2397), 're.search', 're.search', (['regex', 'source'], {}), '(regex, source)\n', (2382, 2397), False, 'import re, requests, bs4, unicodedata\n'), ((2955, 2993), 'unicodedata.normalize', 'unicodedata.normalize', (['"""NFKD"""', 'element'], {}), "('NFKD', element)\n", (2976, 2993), False, 'import re, requests, bs4, unicodedata\n'), ((14197, 14214), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (14209, 14214), False, 'import re, requests, bs4, unicodedata\n'), ((15756, 15801), 're.search', 're.search', (['_USERID_URL_EXTRACT', 'unparsed_info'], {}), '(_USERID_URL_EXTRACT, unparsed_info)\n', (15765, 15801), False, 'import re, requests, bs4, unicodedata\n'), ((3622, 3638), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (3636, 3638), False, 'from datetime import timedelta, date, datetime\n'), ((21303, 21331), 're.compile', 're.compile', (['""".*/u/(\\\\d+)/.*"""'], {}), "('.*/u/(\\\\d+)/.*')\n", (21313, 21331), False, 'import re, requests, bs4, unicodedata\n'), ((7219, 7252), 're.sub', 're.sub', (['_HTML_TAG_REGEX', '""""""', 'tags'], {}), "(_HTML_TAG_REGEX, '', tags)\n", (7225, 7252), False, 'import re, requests, bs4, unicodedata\n')] |
import torch.utils.data
from torch.utils.tensorboard import SummaryWriter
from torch import nn
from tqdm import tqdm
import numpy as np
from datasets.preprocess import DatasetWrapper
from utils import AverageMeter
class IOC_MLP(torch.nn.Module):
def __init__(self, input_features, out_classes):
super().__init__()
self.model = nn.Sequential(
nn.Flatten(),
nn.Linear(input_features, 800),
nn.BatchNorm1d(800),
nn.ELU(),
nn.Linear(800, 800),
nn.BatchNorm1d(800),
nn.ELU(),
nn.Linear(800, 800),
nn.BatchNorm1d(800),
nn.ELU(),
nn.Linear(800, out_classes),
)
def forward(self, x):
output = self.model(x)
return output
def train_epoch(model: nn.Module, optimizer, loss_func, dataset, train_loader,
epoch,
n_epochs):
model.train()
losses = AverageMeter()
errors = AverageMeter()
with tqdm(total=len(dataset.train_set),
desc=f"Epoch {epoch + 1} / {n_epochs}") as pbar:
for data, targets in train_loader:
if torch.cuda.is_available():
data = data.cuda()
targets = targets.cuda()
optimizer.zero_grad()
outputs = model(data)
loss = loss_func(outputs, targets)
loss.backward()
optimizer.step()
# convex ensuring step:
for name, param in model.named_parameters():
split = name.split('.')
if int(split[1]) >= 2 and split[2] == 'weight':
param_data = param.data.cpu().numpy()
param_data[param_data < 0] = np.exp(
param_data[param_data < 0] - 5)
#
param.data.copy_(torch.tensor(param_data))
batch_size = targets.size(0)
_, pred = outputs.data.cpu().topk(1, dim=1)
error = torch.ne(pred.squeeze(),
targets.cpu()).float().sum().item() / batch_size
errors.update(error, batch_size)
losses.update(loss.item())
pbar.update(data.size(0))
pbar.set_postfix(**{
'[Train/Loss]': losses.avg,
'[Train/Error]': errors.avg
})
return losses.avg, errors.avg
#
#
def test_epoch(model: nn.Module, dataset: DatasetWrapper,
test_loader: torch.utils.data.DataLoader):
model.eval()
# losses = AverageMeter()
errors = AverageMeter()
with tqdm(total=len(dataset.test_set),
desc=f"Valid") as pbar:
with torch.no_grad():
for data, targets in test_loader:
if torch.cuda.is_available():
data = data.cuda()
targets = targets.cuda()
outputs = model(data)
# loss = loss_func(outputs, targets)
batch_size = targets.size(0)
_, pred = outputs.data.cpu().topk(1, dim=1)
error = torch.ne(pred.squeeze(),
targets.cpu()).float().sum().item() / batch_size
errors.update(error, batch_size)
# losses.update(loss.item())
pbar.update(data.shape[0])
pbar.set_postfix(**{
'[Valid/Error]': errors.avg
})
return errors.avg
def fit(model: IOC_MLP, dataset: DatasetWrapper, lr=0.0001, batch_size=64,
n_epochs=10, path=None):
if path is None:
path = f'trained_models/ioc_mlp.{dataset.name}'
writer = SummaryWriter(f'runs/ioc_mlp.{dataset.name}')
if torch.cuda.is_available():
model.cuda()
model.train()
train_loader = torch.utils.data.DataLoader(dataset.train_set,
batch_size=batch_size,
shuffle=True,
)
test_loader = torch.utils.data.DataLoader(dataset.test_set,
batch_size=batch_size,
)
valid_loader = torch.utils.data.DataLoader(dataset.valid_set,
batch_size=batch_size,
)
loss_func = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
best_error = np.inf
counter = 0
for epoch in range(n_epochs):
train_loss, train_error = train_epoch(model=model, optimizer=optimizer,
loss_func=loss_func,
dataset=dataset,
train_loader=train_loader,
epoch=epoch,
n_epochs=n_epochs)
valid_error = test_epoch(model, dataset, valid_loader)
writer.add_scalars('loss', {'train': train_loss}, epoch)
writer.add_scalars('accuracy', {'train': (1 - train_error) * 100,
'valid': (1 - valid_error) * 100},
epoch)
print(valid_error)
if valid_error < best_error:
print('Saving!')
torch.save(model.state_dict(), path)
best_error = valid_error
counter = 0
else:
counter += 1
if counter > 7:
print("Patience came ending now")
break
writer.close()
| [
"torch.nn.ELU",
"torch.utils.tensorboard.SummaryWriter",
"torch.nn.CrossEntropyLoss",
"torch.nn.Flatten",
"numpy.exp",
"torch.nn.BatchNorm1d",
"torch.nn.Linear",
"utils.AverageMeter"
] | [((960, 974), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (972, 974), False, 'from utils import AverageMeter\n'), ((988, 1002), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (1000, 1002), False, 'from utils import AverageMeter\n'), ((2598, 2612), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (2610, 2612), False, 'from utils import AverageMeter\n'), ((3693, 3738), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['f"""runs/ioc_mlp.{dataset.name}"""'], {}), "(f'runs/ioc_mlp.{dataset.name}')\n", (3706, 3738), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((4440, 4461), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (4459, 4461), False, 'from torch import nn\n'), ((378, 390), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (388, 390), False, 'from torch import nn\n'), ((404, 434), 'torch.nn.Linear', 'nn.Linear', (['input_features', '(800)'], {}), '(input_features, 800)\n', (413, 434), False, 'from torch import nn\n'), ((448, 467), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(800)'], {}), '(800)\n', (462, 467), False, 'from torch import nn\n'), ((481, 489), 'torch.nn.ELU', 'nn.ELU', ([], {}), '()\n', (487, 489), False, 'from torch import nn\n'), ((503, 522), 'torch.nn.Linear', 'nn.Linear', (['(800)', '(800)'], {}), '(800, 800)\n', (512, 522), False, 'from torch import nn\n'), ((536, 555), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(800)'], {}), '(800)\n', (550, 555), False, 'from torch import nn\n'), ((569, 577), 'torch.nn.ELU', 'nn.ELU', ([], {}), '()\n', (575, 577), False, 'from torch import nn\n'), ((591, 610), 'torch.nn.Linear', 'nn.Linear', (['(800)', '(800)'], {}), '(800, 800)\n', (600, 610), False, 'from torch import nn\n'), ((624, 643), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(800)'], {}), '(800)\n', (638, 643), False, 'from torch import nn\n'), ((657, 665), 'torch.nn.ELU', 'nn.ELU', ([], {}), '()\n', (663, 665), False, 'from torch import nn\n'), ((679, 706), 'torch.nn.Linear', 'nn.Linear', (['(800)', 'out_classes'], {}), '(800, out_classes)\n', (688, 706), False, 'from torch import nn\n'), ((1752, 1790), 'numpy.exp', 'np.exp', (['(param_data[param_data < 0] - 5)'], {}), '(param_data[param_data < 0] - 5)\n', (1758, 1790), True, 'import numpy as np\n')] |
import base64
import re
def base64ToString(base64):
reg = "\\x[a-z0-9][a-z0-9]"
base64.b64decode()
message_bytes = message.encode("ascii")
base64_bytes = base64.b64encode(message_bytes)
base64_message = base64_bytes.decode("ascii")
def shiftASCII(string, n):
r = ""
for i in string:
r += str(chr((ord(i) + n) % 127))
return r
for i in range(0, 127):
message = "}{[l^KlwOmwZjmOKW9"
print(shiftASCII(message, i))
# ecCTF3T_7U_BRU73?! | [
"base64.b64encode",
"base64.b64decode"
] | [((90, 108), 'base64.b64decode', 'base64.b64decode', ([], {}), '()\n', (106, 108), False, 'import base64\n'), ((172, 203), 'base64.b64encode', 'base64.b64encode', (['message_bytes'], {}), '(message_bytes)\n', (188, 203), False, 'import base64\n')] |
import time
from math import fabs
import putil.timer
from putil.testing import UtilTest
class TestTimer(UtilTest):
def setUp(self):
self.op1_times = iter([ .01, .02 ])
self.a1 = putil.timer.Accumulator()
self.op2_step1_times = iter([ .005, .015, .005, .005])
self.op2_step2_times = iter([ .01, .02, .01, .01])
self.a2 = putil.timer.Accumulator()
def test_found_caller(self):
import importable.create_timer
t = importable.create_timer.t
self.assertEquals('timing.putil.test.importable.create_timer', t.logger.name)
def test_time_event(self):
t = putil.timer.Timer()
time.sleep(0.01)
t.complete_step('pause')
time.sleep(0.02)
t.complete_step()
self.assertEquals(3, len(t.times))
def one_step_operation(self):
t = putil.timer.Timer()
time.sleep(self.op1_times.next())
t.complete_step()
self.a1.add(t)
def test_stats_one_step(self):
try:
while True:
self.one_step_operation()
except StopIteration:
pass
self.assertEquals(2, self.a1.get_count())
self.assertAlmostEqual(self.a1.get_average(), 0.015, places=2)
self.assertTrue( fabs(self.a1.get_average()-0.015) < .002 )
self.assertAlmostEqual(self.a1.get_standard_deviation(), 0.005, places=2)
def two_step_operation(self):
t = putil.timer.Timer()
time.sleep(self.op2_step1_times.next())
t.complete_step('one')
time.sleep(self.op2_step2_times.next())
t.complete_step('two')
self.a2.add(t)
def test_stats_two_steps(self):
try:
while True:
self.two_step_operation()
except StopIteration:
pass
self.assertEquals(8, self.a2.get_count())
self.assertEquals(4, self.a2.get_count("one"))
self.assertEquals(4, self.a2.get_count("two"))
self.assertAlmostEqual(self.a2.get_average(), 0.01, places=2)
self.assertAlmostEqual(self.a2.get_average("one"), 0.008, places=2)
self.assertAlmostEqual(self.a2.get_average("two"), 0.013, places=2)
self.assertNotEquals(0, self.a2.get_standard_deviation())
| [
"time.sleep"
] | [((664, 680), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (674, 680), False, 'import time\n'), ((723, 739), 'time.sleep', 'time.sleep', (['(0.02)'], {}), '(0.02)\n', (733, 739), False, 'import time\n')] |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/api/endpoint.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/api/endpoint.proto",
package="google.api",
syntax="proto3",
serialized_options=b"\n\016com.google.apiB\rEndpointProtoP\001ZEgoogle.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig\242\002\004GAPI",
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x19google/api/endpoint.proto\x12\ngoogle.api"Q\n\x08\x45ndpoint\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x07\x61liases\x18\x02 \x03(\tB\x02\x18\x01\x12\x0e\n\x06target\x18\x65 \x01(\t\x12\x12\n\nallow_cors\x18\x05 \x01(\x08\x42o\n\x0e\x63om.google.apiB\rEndpointProtoP\x01ZEgoogle.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig\xa2\x02\x04GAPIb\x06proto3',
)
_ENDPOINT = _descriptor.Descriptor(
name="Endpoint",
full_name="google.api.Endpoint",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.api.Endpoint.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="aliases",
full_name="google.api.Endpoint.aliases",
index=1,
number=2,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\030\001",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="target",
full_name="google.api.Endpoint.target",
index=2,
number=101,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="allow_cors",
full_name="google.api.Endpoint.allow_cors",
index=3,
number=5,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=41,
serialized_end=122,
)
DESCRIPTOR.message_types_by_name["Endpoint"] = _ENDPOINT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Endpoint = _reflection.GeneratedProtocolMessageType(
"Endpoint",
(_message.Message,),
{
"DESCRIPTOR": _ENDPOINT,
"__module__": "google.api.endpoint_pb2"
# @@protoc_insertion_point(class_scope:google.api.Endpoint)
},
)
_sym_db.RegisterMessage(Endpoint)
DESCRIPTOR._options = None
_ENDPOINT.fields_by_name["aliases"]._options = None
# @@protoc_insertion_point(module_scope)
| [
"google.protobuf.reflection.GeneratedProtocolMessageType",
"google.protobuf.symbol_database.Default",
"google.protobuf.descriptor.FieldDescriptor",
"google.protobuf.descriptor.FileDescriptor"
] | [((1001, 1027), 'google.protobuf.symbol_database.Default', '_symbol_database.Default', ([], {}), '()\n', (1025, 1027), True, 'from google.protobuf import symbol_database as _symbol_database\n'), ((1043, 1744), 'google.protobuf.descriptor.FileDescriptor', '_descriptor.FileDescriptor', ([], {'name': '"""google/api/endpoint.proto"""', 'package': '"""google.api"""', 'syntax': '"""proto3"""', 'serialized_options': "b'\\n\\x0ecom.google.apiB\\rEndpointProtoP\\x01ZEgoogle.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig\\xa2\\x02\\x04GAPI'", 'create_key': '_descriptor._internal_create_key', 'serialized_pb': 'b\'\\n\\x19google/api/endpoint.proto\\x12\\ngoogle.api"Q\\n\\x08Endpoint\\x12\\x0c\\n\\x04name\\x18\\x01 \\x01(\\t\\x12\\x13\\n\\x07aliases\\x18\\x02 \\x03(\\tB\\x02\\x18\\x01\\x12\\x0e\\n\\x06target\\x18e \\x01(\\t\\x12\\x12\\n\\nallow_cors\\x18\\x05 \\x01(\\x08Bo\\n\\x0ecom.google.apiB\\rEndpointProtoP\\x01ZEgoogle.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig\\xa2\\x02\\x04GAPIb\\x06proto3\''}), '(name=\'google/api/endpoint.proto\', package=\n \'google.api\', syntax=\'proto3\', serialized_options=\n b\'\\n\\x0ecom.google.apiB\\rEndpointProtoP\\x01ZEgoogle.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig\\xa2\\x02\\x04GAPI\'\n , create_key=_descriptor._internal_create_key, serialized_pb=\n b\'\\n\\x19google/api/endpoint.proto\\x12\\ngoogle.api"Q\\n\\x08Endpoint\\x12\\x0c\\n\\x04name\\x18\\x01 \\x01(\\t\\x12\\x13\\n\\x07aliases\\x18\\x02 \\x03(\\tB\\x02\\x18\\x01\\x12\\x0e\\n\\x06target\\x18e \\x01(\\t\\x12\\x12\\n\\nallow_cors\\x18\\x05 \\x01(\\x08Bo\\n\\x0ecom.google.apiB\\rEndpointProtoP\\x01ZEgoogle.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig\\xa2\\x02\\x04GAPIb\\x06proto3\'\n )\n', (1069, 1744), True, 'from google.protobuf import descriptor as _descriptor\n'), ((4719, 4865), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', (['"""Endpoint"""', '(_message.Message,)', "{'DESCRIPTOR': _ENDPOINT, '__module__': 'google.api.endpoint_pb2'}"], {}), "('Endpoint', (_message.Message,), {\n 'DESCRIPTOR': _ENDPOINT, '__module__': 'google.api.endpoint_pb2'})\n", (4759, 4865), True, 'from google.protobuf import reflection as _reflection\n'), ((2591, 2980), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""aliases"""', 'full_name': '"""google.api.Endpoint.aliases"""', 'index': '(1)', 'number': '(2)', 'type': '(9)', 'cpp_type': '(9)', 'label': '(3)', 'has_default_value': '(False)', 'default_value': '[]', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': "b'\\x18\\x01'", 'file': 'DESCRIPTOR', 'create_key': '_descriptor._internal_create_key'}), "(name='aliases', full_name=\n 'google.api.Endpoint.aliases', index=1, number=2, type=9, cpp_type=9,\n label=3, has_default_value=False, default_value=[], message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=b'\\x18\\x01', file=DESCRIPTOR,\n create_key=_descriptor._internal_create_key)\n", (2618, 2980), True, 'from google.protobuf import descriptor as _descriptor\n'), ((3787, 4179), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""allow_cors"""', 'full_name': '"""google.api.Endpoint.allow_cors"""', 'index': '(3)', 'number': '(5)', 'type': '(8)', 'cpp_type': '(7)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': '(False)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR', 'create_key': '_descriptor._internal_create_key'}), "(name='allow_cors', full_name=\n 'google.api.Endpoint.allow_cors', index=3, number=5, type=8, cpp_type=7,\n label=1, has_default_value=False, default_value=False, message_type=\n None, enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR,\n create_key=_descriptor._internal_create_key)\n", (3814, 4179), True, 'from google.protobuf import descriptor as _descriptor\n')] |
from __future__ import unicode_literals, division, absolute_import
import logging
from flexget import plugin
from flexget.event import event
log = logging.getLogger('imdb_required')
class FilterImdbRequired(object):
"""
Rejects entries without imdb_url or imdb_id.
Makes imdb lookup / search if necessary.
Example::
imdb_required: yes
"""
schema = {'type': 'boolean'}
@plugin.priority(32)
def on_task_filter(self, task, config):
if not config:
return
for entry in task.entries:
try:
plugin.get_plugin_by_name('imdb_lookup').instance.lookup(entry)
except plugin.PluginError:
entry.reject('imdb required')
if 'imdb_id' not in entry and 'imdb_url' not in entry:
entry.reject('imdb required')
@event('plugin.register')
def register_plugin():
plugin.register(FilterImdbRequired, 'imdb_required', api_ver=2)
| [
"logging.getLogger",
"flexget.plugin.register",
"flexget.event.event",
"flexget.plugin.priority",
"flexget.plugin.get_plugin_by_name"
] | [((149, 183), 'logging.getLogger', 'logging.getLogger', (['"""imdb_required"""'], {}), "('imdb_required')\n", (166, 183), False, 'import logging\n'), ((850, 874), 'flexget.event.event', 'event', (['"""plugin.register"""'], {}), "('plugin.register')\n", (855, 874), False, 'from flexget.event import event\n'), ((411, 430), 'flexget.plugin.priority', 'plugin.priority', (['(32)'], {}), '(32)\n', (426, 430), False, 'from flexget import plugin\n'), ((902, 965), 'flexget.plugin.register', 'plugin.register', (['FilterImdbRequired', '"""imdb_required"""'], {'api_ver': '(2)'}), "(FilterImdbRequired, 'imdb_required', api_ver=2)\n", (917, 965), False, 'from flexget import plugin\n'), ((585, 625), 'flexget.plugin.get_plugin_by_name', 'plugin.get_plugin_by_name', (['"""imdb_lookup"""'], {}), "('imdb_lookup')\n", (610, 625), False, 'from flexget import plugin\n')] |
"""
This code contains tasks for executing EMIT Level 3 PGEs and helper utilities.
Author: <NAME>, <EMAIL>
"""
import datetime
import logging
import os
import luigi
import spectral.io.envi as envi
from emit_main.workflow.output_targets import AcquisitionTarget
from emit_main.workflow.workflow_manager import WorkflowManager
from emit_main.workflow.l1b_tasks import L1BGeolocate
from emit_main.workflow.l2a_tasks import L2AMask, L2AReflectance
from emit_main.workflow.slurm import SlurmJobTask
logger = logging.getLogger("emit-main")
class L3Unmix(SlurmJobTask):
"""
Creates L3 fractional cover estimates
:returns: Fractional cover file and uncertainties
"""
config_path = luigi.Parameter()
acquisition_id = luigi.Parameter()
level = luigi.Parameter()
partition = luigi.Parameter()
n_cores = 40
memory = 180000
task_namespace = "emit"
def requires(self):
logger.debug(self.task_family + " requires")
return (L2AReflectance(config_path=self.config_path, acquisition_id=self.acquisition_id, level=self.level,
partition=self.partition),
L2AMask(config_path=self.config_path, acquisition_id=self.acquisition_id, level=self.level,
partition=self.partition))
def output(self):
logger.debug(self.task_family + " output")
wm = WorkflowManager(config_path=self.config_path, acquisition_id=self.acquisition_id)
return AcquisitionTarget(acquisition=wm.acquisition, task_family=self.task_family)
def work(self):
logger.debug(self.task_family + " run")
wm = WorkflowManager(config_path=self.config_path, acquisition_id=self.acquisition_id)
acq = wm.acquisition
pge = wm.pges["SpectralUnmixing"]
# Build PGE commands for run_tetracorder_pge.sh
unmix_exe = os.path.join(pge.repo_dir, "unmix.jl")
endmember_key = "level_1"
tmp_log_path = os.path.join(self.local_tmp_dir,
os.path.basename(acq.cover_img_path).replace(".img", "_pge.log"))
output_base = os.path.join(self.local_tmp_dir, "unmixing_output")
# Set up environment variables
env = os.environ.copy()
env["PATH"] = "/beegfs/store/shared/julia-1.6.5/bin:${PATH}"
env["JULIA_DEPOT_PATH"] = "/beegfs/store/shared/.julia_165_shared"
env["JULIA_PROJECT"] = pge.repo_dir
# Build command
cmd_unmix = ['julia', '-p', str(self.n_cores), unmix_exe, acq.rfl_img_path, wm.config["unmixing_library"],
endmember_key, output_base, "--normalization", "brightness", "--mode", "sma-best",
"--n_mc", "50", "--reflectance_uncertainty_file", acq.uncert_img_path,
"--spectral_starting_column", "8", "--num_endmembers", "20", "--log_file", tmp_log_path]
pge.run(cmd_unmix, tmp_dir=self.tmp_dir, env=env, use_conda_run=False)
wm.copy(f'{output_base}_fractional_cover', acq.cover_img_path)
wm.copy(f'{output_base}_fractional_cover.hdr', acq.cover_hdr_path)
wm.copy(f'{output_base}_fractional_cover_uncertainty', acq.coveruncert_img_path)
wm.copy(f'{output_base}_fractional_cover_uncertainty.hdr', acq.coveruncert_hdr_path)
wm.copy(tmp_log_path, acq.cover_img_path.replace(".img", "_pge.log"))
input_files = {
"reflectance_file": acq.rfl_img_path,
"reflectance_uncertainty_file": acq.uncert_img_path,
"endmember_path": endmember_path,
}
# Update hdr files
for header_to_update in [acq.cover_hdr_path, acq.coveruncert_hdr_path]:
input_files_arr = ["{}={}".format(key, value) for key, value in input_files.items()]
doc_version = "EMIT SDS L3 JPL-D 104238, Rev A" # \todo check
hdr = envi.read_envi_header(header_to_update)
hdr["emit acquisition start time"] = acq.start_time_with_tz.strftime("%Y-%m-%dT%H:%M:%S%z")
hdr["emit acquisition stop time"] = acq.stop_time_with_tz.strftime("%Y-%m-%dT%H:%M:%S%z")
hdr["emit pge name"] = pge.repo_url
hdr["emit pge version"] = pge.version_tag
hdr["emit pge input files"] = input_files_arr
hdr["emit pge run command"] = " ".join(cmd_unmix)
hdr["emit software build version"] = wm.config["extended_build_num"]
hdr["emit documentation version"] = doc_version
creation_time = datetime.datetime.fromtimestamp(
os.path.getmtime(acq.cover_img_path), tz=datetime.timezone.utc)
hdr["emit data product creation time"] = creation_time.strftime("%Y-%m-%dT%H:%M:%S%z")
hdr["emit data product version"] = wm.config["processing_version"]
hdr["emit acquisition daynight"] = acq.daynight
envi.write_envi_header(header_to_update, hdr)
# PGE writes metadata to db
dm = wm.database_manager
product_dict_cover = {
"img_path": acq.cover_img_path,
"hdr_path": acq.cover_hdr_path,
"created": creation_time,
"dimensions": {
"lines": hdr["lines"],
"samples": hdr["samples"],
"bands": hdr["bands"]
}
}
dm.update_acquisition_metadata(acq.acquisition_id, {"products.l3.cover": product_dict_cover})
product_dict_cover_uncert = {
"img_path": acq.coveruncert_img_path,
"hdr_path": acq.coveruncert_hdr_path,
"created": creation_time,
"dimensions": {
"lines": hdr["lines"],
"samples": hdr["samples"],
"bands": hdr["bands"]
}
}
dm.update_acquisition_metadata(acq.acquisition_id, {"products.l3.coveruncert": product_dict_cover_uncert})
log_entry = {
"task": self.task_family,
"pge_name": pge.repo_url,
"pge_version": pge.version_tag,
"pge_input_files": input_files,
"pge_run_command": " ".join(cmd_unmix),
"documentation_version": doc_version,
"product_creation_time": creation_time,
"log_timestamp": datetime.datetime.now(tz=datetime.timezone.utc),
"completion_status": "SUCCESS",
"output": {
"l3_cover_img_path": acq.cover_img_path,
"l3_cover_hdr_path:": acq.cover_hdr_path,
"l3_coveruncert_img_path": acq.coveruncert_img_path,
"l3_coveruncert_hdr_path:": acq.coveruncert_hdr_path
}
}
dm.insert_acquisition_log_entry(self.acquisition_id, log_entry)
| [
"logging.getLogger",
"spectral.io.envi.write_envi_header",
"emit_main.workflow.output_targets.AcquisitionTarget",
"emit_main.workflow.l2a_tasks.L2AReflectance",
"os.path.join",
"emit_main.workflow.l2a_tasks.L2AMask",
"os.environ.copy",
"os.path.getmtime",
"datetime.datetime.now",
"spectral.io.envi... | [((508, 538), 'logging.getLogger', 'logging.getLogger', (['"""emit-main"""'], {}), "('emit-main')\n", (525, 538), False, 'import logging\n'), ((701, 718), 'luigi.Parameter', 'luigi.Parameter', ([], {}), '()\n', (716, 718), False, 'import luigi\n'), ((740, 757), 'luigi.Parameter', 'luigi.Parameter', ([], {}), '()\n', (755, 757), False, 'import luigi\n'), ((770, 787), 'luigi.Parameter', 'luigi.Parameter', ([], {}), '()\n', (785, 787), False, 'import luigi\n'), ((804, 821), 'luigi.Parameter', 'luigi.Parameter', ([], {}), '()\n', (819, 821), False, 'import luigi\n'), ((1388, 1474), 'emit_main.workflow.workflow_manager.WorkflowManager', 'WorkflowManager', ([], {'config_path': 'self.config_path', 'acquisition_id': 'self.acquisition_id'}), '(config_path=self.config_path, acquisition_id=self.\n acquisition_id)\n', (1403, 1474), False, 'from emit_main.workflow.workflow_manager import WorkflowManager\n'), ((1485, 1560), 'emit_main.workflow.output_targets.AcquisitionTarget', 'AcquisitionTarget', ([], {'acquisition': 'wm.acquisition', 'task_family': 'self.task_family'}), '(acquisition=wm.acquisition, task_family=self.task_family)\n', (1502, 1560), False, 'from emit_main.workflow.output_targets import AcquisitionTarget\n'), ((1645, 1731), 'emit_main.workflow.workflow_manager.WorkflowManager', 'WorkflowManager', ([], {'config_path': 'self.config_path', 'acquisition_id': 'self.acquisition_id'}), '(config_path=self.config_path, acquisition_id=self.\n acquisition_id)\n', (1660, 1731), False, 'from emit_main.workflow.workflow_manager import WorkflowManager\n'), ((1875, 1913), 'os.path.join', 'os.path.join', (['pge.repo_dir', '"""unmix.jl"""'], {}), "(pge.repo_dir, 'unmix.jl')\n", (1887, 1913), False, 'import os\n'), ((2128, 2179), 'os.path.join', 'os.path.join', (['self.local_tmp_dir', '"""unmixing_output"""'], {}), "(self.local_tmp_dir, 'unmixing_output')\n", (2140, 2179), False, 'import os\n'), ((2234, 2251), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (2249, 2251), False, 'import os\n'), ((984, 1113), 'emit_main.workflow.l2a_tasks.L2AReflectance', 'L2AReflectance', ([], {'config_path': 'self.config_path', 'acquisition_id': 'self.acquisition_id', 'level': 'self.level', 'partition': 'self.partition'}), '(config_path=self.config_path, acquisition_id=self.\n acquisition_id, level=self.level, partition=self.partition)\n', (998, 1113), False, 'from emit_main.workflow.l2a_tasks import L2AMask, L2AReflectance\n'), ((1157, 1278), 'emit_main.workflow.l2a_tasks.L2AMask', 'L2AMask', ([], {'config_path': 'self.config_path', 'acquisition_id': 'self.acquisition_id', 'level': 'self.level', 'partition': 'self.partition'}), '(config_path=self.config_path, acquisition_id=self.acquisition_id,\n level=self.level, partition=self.partition)\n', (1164, 1278), False, 'from emit_main.workflow.l2a_tasks import L2AMask, L2AReflectance\n'), ((3867, 3906), 'spectral.io.envi.read_envi_header', 'envi.read_envi_header', (['header_to_update'], {}), '(header_to_update)\n', (3888, 3906), True, 'import spectral.io.envi as envi\n'), ((4867, 4912), 'spectral.io.envi.write_envi_header', 'envi.write_envi_header', (['header_to_update', 'hdr'], {}), '(header_to_update, hdr)\n', (4889, 4912), True, 'import spectral.io.envi as envi\n'), ((6248, 6295), 'datetime.datetime.now', 'datetime.datetime.now', ([], {'tz': 'datetime.timezone.utc'}), '(tz=datetime.timezone.utc)\n', (6269, 6295), False, 'import datetime\n'), ((4553, 4589), 'os.path.getmtime', 'os.path.getmtime', (['acq.cover_img_path'], {}), '(acq.cover_img_path)\n', (4569, 4589), False, 'import os\n'), ((2040, 2076), 'os.path.basename', 'os.path.basename', (['acq.cover_img_path'], {}), '(acq.cover_img_path)\n', (2056, 2076), False, 'import os\n')] |
'''
Code to copy Curry expressions.
'''
from __future__ import absolute_import
from .....common import T_SETGRD, T_FWD
from copy import copy, deepcopy
from ..... import inspect
from . import node
__all__ = ['copygraph', 'copynode', 'GraphCopier', 'Skipper']
class GraphCopier(object):
'''
Deep-copies an expression. The skipper can be used to remove nodes. This
can be used to remove forward nodes and set guards from values.
'''
def __init__(self, skipper=None):
self.expr = None
self.skipper = skipper
def __call__(self, expr, memo=None):
self.expr = expr
return deepcopy(self, memo)
def __deepcopy__(self, memo):
if not isinstance(self.expr, node.Node):
return deepcopy(self.expr, memo)
else:
target = self.skipper(self.expr)
if target is not None:
return self(target, memo)
else:
info = self.expr.info
partial = info.arity > len(self.expr.successors)
return node.Node(
info
, *(self(succ) for succ in self.expr.successors)
, partial=partial
)
class Skipper(object):
'''
Indicates which nodes to skip. If a node should be skipped, the
__call__ method should return its replacement.
'''
def __init__(self, skipfwd=False, skipgrds=None):
self.skipfwd = skipfwd
self.skipgrds = set() if skipgrds is None else skipgrds
def __call__(self, expr):
if expr.info.tag == T_FWD:
if skipfwd:
return inspect.fwd_target(expr)
elif expr.info.tag == T_SETGRD:
if inspect.get_set_id(expr) in self.skipgrds:
return inspect.get_setguard_value(expr)
def copygraph(expr, memo=None, **kwds):
'''
Copies a Curry expression with the option to remove certain nodes.
Args:
expr:
An instance of ``graph.Node`` or a built-in type such as ``int``,
``str``, or ``float``.
skipfwd:
Indicates whether to skip FWD nodes.
skipgrds:
A container of set identifer indicating which set guards to skip.
Returns:
A deep copy of ``expr``.
'''
copier = GraphCopier(skipper=Skipper(**kwds))
return copier(expr, memo=memo)
def copynode(expr, mapf=None):
'''
Makes a shallow copy of a Curry expression.
Args:
expr
The expression to copy. Can be an instance of ``graph.Node`` or a
built-in type such as ``int``, ``str``, or ``float``.
mapf
An optional map function. If supplied, this function will be applied
to the successors.
Returns:
A shallow copy of ``expr``.
'''
if isinstance(expr, node.Node):
info = expr.info
partial = info.arity > len(expr.successors)
if mapf is None:
successors = expr.successors
else:
successors = map(mapf, expr.successors)
return node.Node(info, *successors, partial=partial)
else:
return copy(expr)
| [
"copy.copy",
"copy.deepcopy"
] | [((599, 619), 'copy.deepcopy', 'deepcopy', (['self', 'memo'], {}), '(self, memo)\n', (607, 619), False, 'from copy import copy, deepcopy\n'), ((2831, 2841), 'copy.copy', 'copy', (['expr'], {}), '(expr)\n', (2835, 2841), False, 'from copy import copy, deepcopy\n'), ((711, 736), 'copy.deepcopy', 'deepcopy', (['self.expr', 'memo'], {}), '(self.expr, memo)\n', (719, 736), False, 'from copy import copy, deepcopy\n')] |
#
# Visualize the audio of an Elixoids game:
#
# pip3 install websocket-client
# python3 clients/listen.py --host example.com
#
import argparse
import sys
import websocket
try:
import thread
except ImportError:
import _thread as thread
import sound_pb2
def on_message(ws, message):
sound = sound_pb2.Sound()
sound.ParseFromString(message)
if (sound.noise == sound_pb2.Sound.FIRE):
sys.stdout.write(".")
sys.stdout.flush()
elif (sound.noise == sound_pb2.Sound.EXPLOSION):
sys.stdout.write("💥")
sys.stdout.flush()
def on_error(ws, error):
sys.stderr.write("{}\n\n".format(str(error)))
def sound_url(host, game_id):
return "ws://{}/{}/sound".format(host, game_id)
def options():
parser = argparse.ArgumentParser()
parser.add_argument("--host", default="localhost:8065",
help="host[:port] of Elixoids server")
parser.add_argument("--game", default=0,
help="Game id")
return parser.parse_args()
if __name__ == "__main__":
args = options()
ws_url = sound_url(args.host, args.game)
ws = websocket.WebSocketApp(ws_url,
header={"Accept": "application/octet-stream"},
on_message=on_message,
on_error=on_error)
ws.run_forever()
| [
"argparse.ArgumentParser",
"sound_pb2.Sound",
"websocket.WebSocketApp",
"sys.stdout.flush",
"sys.stdout.write"
] | [((314, 331), 'sound_pb2.Sound', 'sound_pb2.Sound', ([], {}), '()\n', (329, 331), False, 'import sound_pb2\n'), ((771, 796), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (794, 796), False, 'import argparse\n'), ((1140, 1264), 'websocket.WebSocketApp', 'websocket.WebSocketApp', (['ws_url'], {'header': "{'Accept': 'application/octet-stream'}", 'on_message': 'on_message', 'on_error': 'on_error'}), "(ws_url, header={'Accept': 'application/octet-stream'\n }, on_message=on_message, on_error=on_error)\n", (1162, 1264), False, 'import websocket\n'), ((421, 442), 'sys.stdout.write', 'sys.stdout.write', (['"""."""'], {}), "('.')\n", (437, 442), False, 'import sys\n'), ((451, 469), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (467, 469), False, 'import sys\n'), ((531, 552), 'sys.stdout.write', 'sys.stdout.write', (['"""💥"""'], {}), "('💥')\n", (547, 552), False, 'import sys\n'), ((561, 579), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (577, 579), False, 'import sys\n')] |
import pennylane as qml
import numpy as np
def algo(x, y, z):
qml.RZ(z, wires=[0])
qml.RY(y, wires=[0])
qml.RX(x, wires=[0])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(wires=1))
def run_algo(device, args):
print(args)
circuit = qml.QNode(algo, device)
result = circuit(float(args['X']), float(args['Y']), float(args['Z']))
return result | [
"pennylane.QNode",
"pennylane.RY",
"pennylane.RZ",
"pennylane.PauliZ",
"pennylane.RX",
"pennylane.CNOT"
] | [((64, 84), 'pennylane.RZ', 'qml.RZ', (['z'], {'wires': '[0]'}), '(z, wires=[0])\n', (70, 84), True, 'import pennylane as qml\n'), ((86, 106), 'pennylane.RY', 'qml.RY', (['y'], {'wires': '[0]'}), '(y, wires=[0])\n', (92, 106), True, 'import pennylane as qml\n'), ((108, 128), 'pennylane.RX', 'qml.RX', (['x'], {'wires': '[0]'}), '(x, wires=[0])\n', (114, 128), True, 'import pennylane as qml\n'), ((130, 152), 'pennylane.CNOT', 'qml.CNOT', ([], {'wires': '[0, 1]'}), '(wires=[0, 1])\n', (138, 152), True, 'import pennylane as qml\n'), ((246, 269), 'pennylane.QNode', 'qml.QNode', (['algo', 'device'], {}), '(algo, device)\n', (255, 269), True, 'import pennylane as qml\n'), ((172, 191), 'pennylane.PauliZ', 'qml.PauliZ', ([], {'wires': '(1)'}), '(wires=1)\n', (182, 191), True, 'import pennylane as qml\n')] |
"""Miscellaneous tools.
"""
import os
import csv
from typing import Union
from subprocess import Popen
from pathlib import Path
from scipy.io import loadmat
import pandas as pd
def open_in_explorer(path : os.PathLike) -> None:
if Path(path).is_dir():
_ = Popen(f'explorer.exe /root,"{path}"')
elif Path(path).is_file():
_ = Popen(f'explorer.exe /select,"{path}"')
else:
raise FileNotFoundError()
def num_to_ith(num):
"""Converts an integer to a string containing an ordinal number (1st, 2nd, 3rd, ect.)
Parameters
----------
num : int
Number
Returns
-------
str
Ordinal number
"""
if num == -1:
return 'last'
elif num < -1:
value = str(num+1).replace('-', '')
else:
value = str(num)
last_digit = value[-1]
if len(value) > 1 and value[-2] == '1':
suffix = 'th'
elif last_digit == '1':
suffix = 'st'
elif last_digit == '2':
suffix = 'nd'
elif last_digit == '3':
suffix = 'rd'
else:
suffix = 'th'
if num < -1:
suffix += ' to last'
return value + suffix
def read_data_string(text, delimiter=',', newline='\n', has_headerline=True):
"""Reads a delimited string into a list of dictionaries. Functions very similar to :meth:`numpy.genfromtxt`, but for strings instead of text files.
Parameters
----------
text : str
String of row/column data with delimiters and new line indicators given in `delimiter` and `newline`.
delimiter : str, optional
Delimiter used in `text`, by default ','
newline : str, optional
New line indicator used in `text`, by default '\n'
has_headerline : bool, optional
If True, treats the first line of `text` as headers. If False, treats the first line of `text` as data and makes generic headers, by default True
Returns
-------
:obj:`list` of :obj:`dict`
A list of dictionaries containing the data from `text`
"""
lines = text.split(newline)
# Generate headers
if has_headerline:
# If the text has headerlines, get them
headers = lines.pop(0).split(delimiter)
else:
# If the text doesn't have headerlines, make generic ones
headers = [str(i+1) for i in range(len(lines[0].split(delimiter)))]
data = []
for line in lines:
# For each line, check if data is missing
if len(line.split(delimiter)) == len(headers):
# If there is no missing data on this line, initialize a dictionary for the line data
line_data = {}
for header, value in zip(headers, line.split(delimiter)):
# For each column in the line, add to the line_data dict (header as key and value as value)
line_data[header] = value
# Append the line_data dict to the data list
data.append(line_data)
return data
def convert_path(filepath):
"""Converts the slashes in `filepath` to whatever is appropriate for the current OS.
Parameters
----------
filepath : str
Filepath to be converted
Returns
-------
str
Converted filepath
"""
return os.path.normpath(filepath.replace('\\', os.sep).replace('/', os.sep))
def mat_to_pd(filename, data_start=None, data_end=None):
"""Creates a pandas dataframe from a .mat file
Parameters
----------
filename : str
Filename of a .mat file
Returns
-------
dataframe
Pandas datafrom of data in `filename`
"""
mat = loadmat(filename) # load mat-file
dataframes = []
for variable in [var for var in mat if var not in ['__header__', '__globals__', '__version__']]:
if mat[variable].shape[1] == 1:
array = mat[variable]
elif mat[variable].shape[0] == 1:
array = mat[variable].transpose()
else:
raise ValueError(f'{filename} does not contain the expected data!')
# Figure out data start and end indices if given
if data_start is None:
data_start = 0
if data_end is None:
data_end = len(array)
dataframes.append(pd.DataFrame(array[data_start:data_end], columns=[variable]))
dataframe = pd.concat(dataframes, axis=1)
return dataframe
def dict_to_csv(data, filename):
"""Writes data in `dict` to a csv file with the keys as headers and the values as columns.
Parameters
----------
data : dict
Dictionary of data
filename : str
Name of file to write data to. (include extension)
"""
with open(filename, 'w', newline='') as outfile:
writer = csv.writer(outfile)
writer.writerow(data.keys())
writer.writerows(zip(*data.values()))
| [
"pathlib.Path",
"subprocess.Popen",
"csv.writer",
"scipy.io.loadmat",
"pandas.DataFrame",
"pandas.concat"
] | [((3707, 3724), 'scipy.io.loadmat', 'loadmat', (['filename'], {}), '(filename)\n', (3714, 3724), False, 'from scipy.io import loadmat\n'), ((4463, 4492), 'pandas.concat', 'pd.concat', (['dataframes'], {'axis': '(1)'}), '(dataframes, axis=1)\n', (4472, 4492), True, 'import pandas as pd\n'), ((270, 307), 'subprocess.Popen', 'Popen', (['f"""explorer.exe /root,"{path}\\""""'], {}), '(f\'explorer.exe /root,"{path}"\')\n', (275, 307), False, 'from subprocess import Popen\n'), ((4879, 4898), 'csv.writer', 'csv.writer', (['outfile'], {}), '(outfile)\n', (4889, 4898), False, 'import csv\n'), ((237, 247), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (241, 247), False, 'from pathlib import Path\n'), ((351, 390), 'subprocess.Popen', 'Popen', (['f"""explorer.exe /select,"{path}\\""""'], {}), '(f\'explorer.exe /select,"{path}"\')\n', (356, 390), False, 'from subprocess import Popen\n'), ((4384, 4444), 'pandas.DataFrame', 'pd.DataFrame', (['array[data_start:data_end]'], {'columns': '[variable]'}), '(array[data_start:data_end], columns=[variable])\n', (4396, 4444), True, 'import pandas as pd\n'), ((317, 327), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (321, 327), False, 'from pathlib import Path\n')] |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import paddle.fluid.core as core
from paddle.fluid.framework import program_guard, Program
from paddle.fluid.executor import Executor
from paddle.fluid import framework
from paddle.fluid.layers.rnn import LSTMCell, GRUCell, RNNCell
from paddle.fluid.layers import rnn as dynamic_rnn
from paddle.fluid import contrib
from paddle.fluid.contrib.layers import basic_lstm
import paddle.fluid.layers.utils as utils
import numpy as np
class TestLSTMCellError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
batch_size, input_size, hidden_size = 4, 16, 16
inputs = fluid.data(name='inputs',
shape=[None, input_size],
dtype='float32')
pre_hidden = fluid.data(name='pre_hidden',
shape=[None, hidden_size],
dtype='float32')
pre_cell = fluid.data(name='pre_cell',
shape=[None, hidden_size],
dtype='float32')
cell = LSTMCell(hidden_size)
def test_input_Variable():
np_input = np.random.random(
(batch_size, input_size)).astype("float32")
cell(np_input, [pre_hidden, pre_cell])
self.assertRaises(TypeError, test_input_Variable)
def test_pre_hidden_Variable():
np_pre_hidden = np.random.random(
(batch_size, hidden_size)).astype("float32")
cell(inputs, [np_pre_hidden, pre_cell])
self.assertRaises(TypeError, test_pre_hidden_Variable)
def test_pre_cell_Variable():
np_pre_cell = np.random.random(
(batch_size, input_size)).astype("float32")
cell(inputs, [pre_hidden, np_pre_cell])
self.assertRaises(TypeError, test_pre_cell_Variable)
def test_input_type():
error_inputs = fluid.data(name='error_inputs',
shape=[None, input_size],
dtype='int32')
cell(error_inputs, [pre_hidden, pre_cell])
self.assertRaises(TypeError, test_input_type)
def test_pre_hidden_type():
error_pre_hidden = fluid.data(name='error_pre_hidden',
shape=[None, hidden_size],
dtype='int32')
cell(inputs, [error_pre_hidden, pre_cell])
self.assertRaises(TypeError, test_pre_hidden_type)
def test_pre_cell_type():
error_pre_cell = fluid.data(name='error_pre_cell',
shape=[None, hidden_size],
dtype='int32')
cell(inputs, [pre_hidden, error_pre_cell])
self.assertRaises(TypeError, test_pre_cell_type)
def test_dtype():
# the input type must be Variable
LSTMCell(hidden_size, dtype="int32")
self.assertRaises(TypeError, test_dtype)
class TestLSTMCell(unittest.TestCase):
def setUp(self):
self.batch_size = 4
self.input_size = 16
self.hidden_size = 16
def test_run(self):
inputs = fluid.data(name='inputs',
shape=[None, self.input_size],
dtype='float32')
pre_hidden = fluid.data(name='pre_hidden',
shape=[None, self.hidden_size],
dtype='float32')
pre_cell = fluid.data(name='pre_cell',
shape=[None, self.hidden_size],
dtype='float32')
cell = LSTMCell(self.hidden_size)
lstm_hidden_new, lstm_states_new = cell(inputs, [pre_hidden, pre_cell])
lstm_unit = contrib.layers.rnn_impl.BasicLSTMUnit(
"basicLSTM", self.hidden_size, None, None, None, None, 1.0,
"float32")
lstm_hidden, lstm_cell = lstm_unit(inputs, pre_hidden, pre_cell)
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
exe = Executor(place)
exe.run(framework.default_startup_program())
inputs_np = np.random.uniform(
-0.1, 0.1, (self.batch_size, self.input_size)).astype('float32')
pre_hidden_np = np.random.uniform(
-0.1, 0.1, (self.batch_size, self.hidden_size)).astype('float32')
pre_cell_np = np.random.uniform(
-0.1, 0.1, (self.batch_size, self.hidden_size)).astype('float32')
param_names = [[
"LSTMCell/BasicLSTMUnit_0.w_0", "basicLSTM/BasicLSTMUnit_0.w_0"
], ["LSTMCell/BasicLSTMUnit_0.b_0", "basicLSTM/BasicLSTMUnit_0.b_0"]]
for names in param_names:
param = np.array(fluid.global_scope().find_var(
names[0]).get_tensor())
param = np.random.uniform(-0.1, 0.1,
size=param.shape).astype('float32')
fluid.global_scope().find_var(names[0]).get_tensor().set(
param, place)
fluid.global_scope().find_var(names[1]).get_tensor().set(
param, place)
out = exe.run(feed={
'inputs': inputs_np,
'pre_hidden': pre_hidden_np,
'pre_cell': pre_cell_np
},
fetch_list=[lstm_hidden_new, lstm_hidden])
self.assertTrue(np.allclose(out[0], out[1], rtol=1e-4, atol=0))
class TestGRUCellError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
batch_size, input_size, hidden_size = 4, 16, 16
inputs = fluid.data(name='inputs',
shape=[None, input_size],
dtype='float32')
pre_hidden = layers.data(name='pre_hidden',
shape=[None, hidden_size],
append_batch_size=False,
dtype='float32')
cell = GRUCell(hidden_size)
def test_input_Variable():
np_input = np.random.random(
(batch_size, input_size)).astype("float32")
cell(np_input, pre_hidden)
self.assertRaises(TypeError, test_input_Variable)
def test_pre_hidden_Variable():
np_pre_hidden = np.random.random(
(batch_size, hidden_size)).astype("float32")
cell(inputs, np_pre_hidden)
self.assertRaises(TypeError, test_pre_hidden_Variable)
def test_input_type():
error_inputs = fluid.data(name='error_inputs',
shape=[None, input_size],
dtype='int32')
cell(error_inputs, pre_hidden)
self.assertRaises(TypeError, test_input_type)
def test_pre_hidden_type():
error_pre_hidden = fluid.data(name='error_pre_hidden',
shape=[None, hidden_size],
dtype='int32')
cell(inputs, error_pre_hidden)
self.assertRaises(TypeError, test_pre_hidden_type)
def test_dtype():
# the input type must be Variable
GRUCell(hidden_size, dtype="int32")
self.assertRaises(TypeError, test_dtype)
class TestGRUCell(unittest.TestCase):
def setUp(self):
self.batch_size = 4
self.input_size = 16
self.hidden_size = 16
def test_run(self):
inputs = fluid.data(name='inputs',
shape=[None, self.input_size],
dtype='float32')
pre_hidden = layers.data(name='pre_hidden',
shape=[None, self.hidden_size],
append_batch_size=False,
dtype='float32')
cell = GRUCell(self.hidden_size)
gru_hidden_new, _ = cell(inputs, pre_hidden)
gru_unit = contrib.layers.rnn_impl.BasicGRUUnit("basicGRU",
self.hidden_size, None,
None, None, None,
"float32")
gru_hidden = gru_unit(inputs, pre_hidden)
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
exe = Executor(place)
exe.run(framework.default_startup_program())
inputs_np = np.random.uniform(
-0.1, 0.1, (self.batch_size, self.input_size)).astype('float32')
pre_hidden_np = np.random.uniform(
-0.1, 0.1, (self.batch_size, self.hidden_size)).astype('float32')
param_names = [
["GRUCell/BasicGRUUnit_0.w_0", "basicGRU/BasicGRUUnit_0.w_0"],
["GRUCell/BasicGRUUnit_0.w_1", "basicGRU/BasicGRUUnit_0.w_1"],
["GRUCell/BasicGRUUnit_0.b_0", "basicGRU/BasicGRUUnit_0.b_0"],
["GRUCell/BasicGRUUnit_0.b_1", "basicGRU/BasicGRUUnit_0.b_1"]
]
for names in param_names:
param = np.array(fluid.global_scope().find_var(
names[0]).get_tensor())
param = np.random.uniform(-0.1, 0.1,
size=param.shape).astype('float32')
fluid.global_scope().find_var(names[0]).get_tensor().set(
param, place)
fluid.global_scope().find_var(names[1]).get_tensor().set(
param, place)
out = exe.run(feed={
'inputs': inputs_np,
'pre_hidden': pre_hidden_np
},
fetch_list=[gru_hidden_new, gru_hidden])
self.assertTrue(np.allclose(out[0], out[1], rtol=1e-4, atol=0))
class TestRnnError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
batch_size = 4
input_size = 16
hidden_size = 16
seq_len = 4
inputs = fluid.data(name='inputs',
shape=[None, input_size],
dtype='float32')
pre_hidden = layers.data(name='pre_hidden',
shape=[None, hidden_size],
append_batch_size=False,
dtype='float32')
inputs_basic_lstm = fluid.data(name='inputs_basic_lstm',
shape=[None, None, input_size],
dtype='float32')
sequence_length = fluid.data(name="sequence_length",
shape=[None],
dtype='int64')
inputs_dynamic_rnn = layers.transpose(inputs_basic_lstm,
perm=[1, 0, 2])
cell = LSTMCell(hidden_size, name="LSTMCell_for_rnn")
np_inputs_dynamic_rnn = np.random.random(
(seq_len, batch_size, input_size)).astype("float32")
def test_input_Variable():
dynamic_rnn(cell=cell,
inputs=np_inputs_dynamic_rnn,
sequence_length=sequence_length,
is_reverse=False)
self.assertRaises(TypeError, test_input_Variable)
def test_input_list():
dynamic_rnn(cell=cell,
inputs=[np_inputs_dynamic_rnn],
sequence_length=sequence_length,
is_reverse=False)
self.assertRaises(TypeError, test_input_list)
def test_initial_states_type():
cell = GRUCell(hidden_size, name="GRUCell_for_rnn")
error_initial_states = np.random.random(
(batch_size, hidden_size)).astype("float32")
dynamic_rnn(cell=cell,
inputs=inputs_dynamic_rnn,
initial_states=error_initial_states,
sequence_length=sequence_length,
is_reverse=False)
self.assertRaises(TypeError, test_initial_states_type)
def test_initial_states_list():
error_initial_states = [
np.random.random(
(batch_size, hidden_size)).astype("float32"),
np.random.random(
(batch_size, hidden_size)).astype("float32")
]
dynamic_rnn(cell=cell,
inputs=inputs_dynamic_rnn,
initial_states=error_initial_states,
sequence_length=sequence_length,
is_reverse=False)
self.assertRaises(TypeError, test_initial_states_type)
def test_sequence_length_type():
np_sequence_length = np.random.random(
(batch_size)).astype("float32")
dynamic_rnn(cell=cell,
inputs=inputs_dynamic_rnn,
sequence_length=np_sequence_length,
is_reverse=False)
self.assertRaises(TypeError, test_sequence_length_type)
class TestRnn(unittest.TestCase):
def setUp(self):
self.batch_size = 4
self.input_size = 16
self.hidden_size = 16
self.seq_len = 4
def test_run(self):
inputs_basic_lstm = fluid.data(name='inputs_basic_lstm',
shape=[None, None, self.input_size],
dtype='float32')
sequence_length = fluid.data(name="sequence_length",
shape=[None],
dtype='int64')
inputs_dynamic_rnn = layers.transpose(inputs_basic_lstm, perm=[1, 0, 2])
cell = LSTMCell(self.hidden_size, name="LSTMCell_for_rnn")
output, final_state = dynamic_rnn(cell=cell,
inputs=inputs_dynamic_rnn,
sequence_length=sequence_length,
is_reverse=False)
output_new = layers.transpose(output, perm=[1, 0, 2])
rnn_out, last_hidden, last_cell = basic_lstm(inputs_basic_lstm, None, None, self.hidden_size, num_layers=1, \
batch_first = False, bidirectional=False, sequence_length=sequence_length, forget_bias = 1.0)
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
exe = Executor(place)
exe.run(framework.default_startup_program())
inputs_basic_lstm_np = np.random.uniform(
-0.1, 0.1,
(self.seq_len, self.batch_size, self.input_size)).astype('float32')
sequence_length_np = np.ones(self.batch_size,
dtype='int64') * self.seq_len
inputs_np = np.random.uniform(
-0.1, 0.1, (self.batch_size, self.input_size)).astype('float32')
pre_hidden_np = np.random.uniform(
-0.1, 0.1, (self.batch_size, self.hidden_size)).astype('float32')
pre_cell_np = np.random.uniform(
-0.1, 0.1, (self.batch_size, self.hidden_size)).astype('float32')
param_names = [[
"LSTMCell_for_rnn/BasicLSTMUnit_0.w_0",
"basic_lstm_layers_0/BasicLSTMUnit_0.w_0"
],
[
"LSTMCell_for_rnn/BasicLSTMUnit_0.b_0",
"basic_lstm_layers_0/BasicLSTMUnit_0.b_0"
]]
for names in param_names:
param = np.array(fluid.global_scope().find_var(
names[0]).get_tensor())
param = np.random.uniform(-0.1, 0.1,
size=param.shape).astype('float32')
fluid.global_scope().find_var(names[0]).get_tensor().set(
param, place)
fluid.global_scope().find_var(names[1]).get_tensor().set(
param, place)
out = exe.run(feed={
'inputs_basic_lstm': inputs_basic_lstm_np,
'sequence_length': sequence_length_np,
'inputs': inputs_np,
'pre_hidden': pre_hidden_np,
'pre_cell': pre_cell_np
},
fetch_list=[output_new, rnn_out])
self.assertTrue(np.allclose(out[0], out[1], rtol=1e-4))
class TestRnnUtil(unittest.TestCase):
"""
Test cases for rnn apis' utility methods for coverage.
"""
def test_case(self):
inputs = {"key1": 1, "key2": 2}
func = lambda x: x + 1
outputs = utils.map_structure(func, inputs)
utils.assert_same_structure(inputs, outputs)
try:
inputs["key3"] = 3
utils.assert_same_structure(inputs, outputs)
except ValueError as identifier:
pass
class EncoderCell(RNNCell):
"""Encoder Cell"""
def __init__(
self,
num_layers,
hidden_size,
dropout_prob=0.,
init_scale=0.1,
):
self.num_layers = num_layers
self.hidden_size = hidden_size
self.dropout_prob = dropout_prob
self.lstm_cells = []
for i in range(num_layers):
self.lstm_cells.append(LSTMCell(hidden_size))
def call(self, step_input, states):
new_states = []
for i in range(self.num_layers):
out, new_state = self.lstm_cells[i](step_input, states[i])
step_input = layers.dropout(
out,
self.dropout_prob,
) if self.dropout_prob else out
new_states.append(new_state)
return step_input, new_states
@property
def state_shape(self):
return [cell.state_shape for cell in self.lstm_cells]
class DecoderCell(RNNCell):
"""Decoder Cell"""
def __init__(self, num_layers, hidden_size, dropout_prob=0.):
self.num_layers = num_layers
self.hidden_size = hidden_size
self.dropout_prob = dropout_prob
self.lstm_cells = []
for i in range(num_layers):
self.lstm_cells.append(LSTMCell(hidden_size))
def call(self, step_input, states):
new_lstm_states = []
for i in range(self.num_layers):
out, new_lstm_state = self.lstm_cells[i](step_input, states[i])
step_input = layers.dropout(
out,
self.dropout_prob,
) if self.dropout_prob else out
new_lstm_states.append(new_lstm_state)
return step_input, new_lstm_states
def def_seq2seq_model(num_layers, hidden_size, dropout_prob, src_vocab_size,
trg_vocab_size):
"vanilla seq2seq model"
# data
source = fluid.data(name="src", shape=[None, None], dtype="int64")
source_length = fluid.data(name="src_sequence_length",
shape=[None],
dtype="int64")
target = fluid.data(name="trg", shape=[None, None], dtype="int64")
target_length = fluid.data(name="trg_sequence_length",
shape=[None],
dtype="int64")
label = fluid.data(name="label", shape=[None, None, 1], dtype="int64")
# embedding
src_emb = fluid.embedding(source, (src_vocab_size, hidden_size))
tar_emb = fluid.embedding(target, (src_vocab_size, hidden_size))
# encoder
enc_cell = EncoderCell(num_layers, hidden_size, dropout_prob)
enc_output, enc_final_state = dynamic_rnn(cell=enc_cell,
inputs=src_emb,
sequence_length=source_length)
# decoder
dec_cell = DecoderCell(num_layers, hidden_size, dropout_prob)
dec_output, dec_final_state = dynamic_rnn(cell=dec_cell,
inputs=tar_emb,
initial_states=enc_final_state)
logits = layers.fc(dec_output,
size=trg_vocab_size,
num_flatten_dims=len(dec_output.shape) - 1,
bias_attr=False)
# loss
loss = layers.softmax_with_cross_entropy(logits=logits,
label=label,
soft_label=False)
loss = layers.unsqueeze(loss, axes=[2])
max_tar_seq_len = layers.shape(target)[1]
tar_mask = layers.sequence_mask(target_length,
maxlen=max_tar_seq_len,
dtype="float32")
loss = loss * tar_mask
loss = layers.reduce_mean(loss, dim=[0])
loss = layers.reduce_sum(loss)
# optimizer
optimizer = fluid.optimizer.Adam(0.001)
optimizer.minimize(loss)
return loss
class TestSeq2SeqModel(unittest.TestCase):
"""
Test cases to confirm seq2seq api training correctly.
"""
def setUp(self):
np.random.seed(123)
self.model_hparams = {
"num_layers": 2,
"hidden_size": 128,
"dropout_prob": 0.1,
"src_vocab_size": 100,
"trg_vocab_size": 100
}
self.iter_num = iter_num = 2
self.batch_size = batch_size = 4
src_seq_len = 10
trg_seq_len = 12
self.data = {
"src":
np.random.randint(
2, self.model_hparams["src_vocab_size"],
(iter_num * batch_size, src_seq_len)).astype("int64"),
"src_sequence_length":
np.random.randint(1, src_seq_len,
(iter_num * batch_size, )).astype("int64"),
"trg":
np.random.randint(
2, self.model_hparams["src_vocab_size"],
(iter_num * batch_size, trg_seq_len)).astype("int64"),
"trg_sequence_length":
np.random.randint(1, trg_seq_len,
(iter_num * batch_size, )).astype("int64"),
"label":
np.random.randint(
2, self.model_hparams["src_vocab_size"],
(iter_num * batch_size, trg_seq_len, 1)).astype("int64"),
}
place = core.CUDAPlace(
0) if core.is_compiled_with_cuda() else core.CPUPlace()
self.exe = Executor(place)
def test_seq2seq_model(self):
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(main_program, startup_program):
cost = def_seq2seq_model(**self.model_hparams)
self.exe.run(startup_program)
for iter_idx in range(self.iter_num):
cost_val = self.exe.run(feed={
"src":
self.data["src"][iter_idx * self.batch_size:(iter_idx + 1) *
self.batch_size, :],
"src_sequence_length":
self.data["src_sequence_length"][iter_idx *
self.batch_size:(iter_idx +
1) *
self.batch_size],
"trg":
self.data["trg"][iter_idx * self.batch_size:(iter_idx + 1) *
self.batch_size, :],
"trg_sequence_length":
self.data["trg_sequence_length"][iter_idx *
self.batch_size:(iter_idx +
1) *
self.batch_size],
"label":
self.data["label"][iter_idx *
self.batch_size:(iter_idx + 1) *
self.batch_size]
},
fetch_list=[cost])[0]
print("iter_idx: %d, cost: %f" % (iter_idx, cost_val))
if __name__ == '__main__':
unittest.main()
| [
"paddle.fluid.embedding",
"paddle.fluid.layers.data",
"paddle.fluid.layers.shape",
"paddle.fluid.layers.sequence_mask",
"paddle.fluid.contrib.layers.rnn_impl.BasicLSTMUnit",
"unittest.main",
"paddle.fluid.optimizer.Adam",
"paddle.fluid.layers.transpose",
"paddle.fluid.executor.Executor",
"paddle.f... | [((20147, 20204), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""src"""', 'shape': '[None, None]', 'dtype': '"""int64"""'}), "(name='src', shape=[None, None], dtype='int64')\n", (20157, 20204), True, 'import paddle.fluid as fluid\n'), ((20225, 20292), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""src_sequence_length"""', 'shape': '[None]', 'dtype': '"""int64"""'}), "(name='src_sequence_length', shape=[None], dtype='int64')\n", (20235, 20292), True, 'import paddle.fluid as fluid\n'), ((20368, 20425), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""trg"""', 'shape': '[None, None]', 'dtype': '"""int64"""'}), "(name='trg', shape=[None, None], dtype='int64')\n", (20378, 20425), True, 'import paddle.fluid as fluid\n'), ((20446, 20513), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""trg_sequence_length"""', 'shape': '[None]', 'dtype': '"""int64"""'}), "(name='trg_sequence_length', shape=[None], dtype='int64')\n", (20456, 20513), True, 'import paddle.fluid as fluid\n'), ((20588, 20650), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""label"""', 'shape': '[None, None, 1]', 'dtype': '"""int64"""'}), "(name='label', shape=[None, None, 1], dtype='int64')\n", (20598, 20650), True, 'import paddle.fluid as fluid\n'), ((20682, 20736), 'paddle.fluid.embedding', 'fluid.embedding', (['source', '(src_vocab_size, hidden_size)'], {}), '(source, (src_vocab_size, hidden_size))\n', (20697, 20736), True, 'import paddle.fluid as fluid\n'), ((20751, 20805), 'paddle.fluid.embedding', 'fluid.embedding', (['target', '(src_vocab_size, hidden_size)'], {}), '(target, (src_vocab_size, hidden_size))\n', (20766, 20805), True, 'import paddle.fluid as fluid\n'), ((20921, 20994), 'paddle.fluid.layers.rnn', 'dynamic_rnn', ([], {'cell': 'enc_cell', 'inputs': 'src_emb', 'sequence_length': 'source_length'}), '(cell=enc_cell, inputs=src_emb, sequence_length=source_length)\n', (20932, 20994), True, 'from paddle.fluid.layers import rnn as dynamic_rnn\n'), ((21202, 21276), 'paddle.fluid.layers.rnn', 'dynamic_rnn', ([], {'cell': 'dec_cell', 'inputs': 'tar_emb', 'initial_states': 'enc_final_state'}), '(cell=dec_cell, inputs=tar_emb, initial_states=enc_final_state)\n', (21213, 21276), True, 'from paddle.fluid.layers import rnn as dynamic_rnn\n'), ((21578, 21657), 'paddle.fluid.layers.softmax_with_cross_entropy', 'layers.softmax_with_cross_entropy', ([], {'logits': 'logits', 'label': 'label', 'soft_label': '(False)'}), '(logits=logits, label=label, soft_label=False)\n', (21611, 21657), True, 'import paddle.fluid.layers as layers\n'), ((21759, 21791), 'paddle.fluid.layers.unsqueeze', 'layers.unsqueeze', (['loss'], {'axes': '[2]'}), '(loss, axes=[2])\n', (21775, 21791), True, 'import paddle.fluid.layers as layers\n'), ((21853, 21929), 'paddle.fluid.layers.sequence_mask', 'layers.sequence_mask', (['target_length'], {'maxlen': 'max_tar_seq_len', 'dtype': '"""float32"""'}), "(target_length, maxlen=max_tar_seq_len, dtype='float32')\n", (21873, 21929), True, 'import paddle.fluid.layers as layers\n'), ((22040, 22073), 'paddle.fluid.layers.reduce_mean', 'layers.reduce_mean', (['loss'], {'dim': '[0]'}), '(loss, dim=[0])\n', (22058, 22073), True, 'import paddle.fluid.layers as layers\n'), ((22085, 22108), 'paddle.fluid.layers.reduce_sum', 'layers.reduce_sum', (['loss'], {}), '(loss)\n', (22102, 22108), True, 'import paddle.fluid.layers as layers\n'), ((22142, 22169), 'paddle.fluid.optimizer.Adam', 'fluid.optimizer.Adam', (['(0.001)'], {}), '(0.001)\n', (22162, 22169), True, 'import paddle.fluid as fluid\n'), ((25505, 25520), 'unittest.main', 'unittest.main', ([], {}), '()\n', (25518, 25520), False, 'import unittest\n'), ((4166, 4239), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""inputs"""', 'shape': '[None, self.input_size]', 'dtype': '"""float32"""'}), "(name='inputs', shape=[None, self.input_size], dtype='float32')\n", (4176, 4239), True, 'import paddle.fluid as fluid\n'), ((4317, 4395), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""pre_hidden"""', 'shape': '[None, self.hidden_size]', 'dtype': '"""float32"""'}), "(name='pre_hidden', shape=[None, self.hidden_size], dtype='float32')\n", (4327, 4395), True, 'import paddle.fluid as fluid\n'), ((4479, 4555), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""pre_cell"""', 'shape': '[None, self.hidden_size]', 'dtype': '"""float32"""'}), "(name='pre_cell', shape=[None, self.hidden_size], dtype='float32')\n", (4489, 4555), True, 'import paddle.fluid as fluid\n'), ((4632, 4658), 'paddle.fluid.layers.rnn.LSTMCell', 'LSTMCell', (['self.hidden_size'], {}), '(self.hidden_size)\n', (4640, 4658), False, 'from paddle.fluid.layers.rnn import LSTMCell, GRUCell, RNNCell\n'), ((4760, 4872), 'paddle.fluid.contrib.layers.rnn_impl.BasicLSTMUnit', 'contrib.layers.rnn_impl.BasicLSTMUnit', (['"""basicLSTM"""', 'self.hidden_size', 'None', 'None', 'None', 'None', '(1.0)', '"""float32"""'], {}), "('basicLSTM', self.hidden_size, None,\n None, None, None, 1.0, 'float32')\n", (4797, 4872), False, 'from paddle.fluid import contrib\n'), ((4979, 5007), 'paddle.fluid.core.is_compiled_with_cuda', 'core.is_compiled_with_cuda', ([], {}), '()\n', (5005, 5007), True, 'import paddle.fluid.core as core\n'), ((5111, 5126), 'paddle.fluid.executor.Executor', 'Executor', (['place'], {}), '(place)\n', (5119, 5126), False, 'from paddle.fluid.executor import Executor\n'), ((8669, 8742), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""inputs"""', 'shape': '[None, self.input_size]', 'dtype': '"""float32"""'}), "(name='inputs', shape=[None, self.input_size], dtype='float32')\n", (8679, 8742), True, 'import paddle.fluid as fluid\n'), ((8820, 8928), 'paddle.fluid.layers.data', 'layers.data', ([], {'name': '"""pre_hidden"""', 'shape': '[None, self.hidden_size]', 'append_batch_size': '(False)', 'dtype': '"""float32"""'}), "(name='pre_hidden', shape=[None, self.hidden_size],\n append_batch_size=False, dtype='float32')\n", (8831, 8928), True, 'import paddle.fluid.layers as layers\n'), ((9040, 9065), 'paddle.fluid.layers.rnn.GRUCell', 'GRUCell', (['self.hidden_size'], {}), '(self.hidden_size)\n', (9047, 9065), False, 'from paddle.fluid.layers.rnn import LSTMCell, GRUCell, RNNCell\n'), ((9139, 9244), 'paddle.fluid.contrib.layers.rnn_impl.BasicGRUUnit', 'contrib.layers.rnn_impl.BasicGRUUnit', (['"""basicGRU"""', 'self.hidden_size', 'None', 'None', 'None', 'None', '"""float32"""'], {}), "('basicGRU', self.hidden_size, None,\n None, None, None, 'float32')\n", (9175, 9244), False, 'from paddle.fluid import contrib\n'), ((9471, 9499), 'paddle.fluid.core.is_compiled_with_cuda', 'core.is_compiled_with_cuda', ([], {}), '()\n', (9497, 9499), True, 'import paddle.fluid.core as core\n'), ((9603, 9618), 'paddle.fluid.executor.Executor', 'Executor', (['place'], {}), '(place)\n', (9611, 9618), False, 'from paddle.fluid.executor import Executor\n'), ((14753, 14847), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""inputs_basic_lstm"""', 'shape': '[None, None, self.input_size]', 'dtype': '"""float32"""'}), "(name='inputs_basic_lstm', shape=[None, None, self.input_size],\n dtype='float32')\n", (14763, 14847), True, 'import paddle.fluid as fluid\n'), ((14948, 15011), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""sequence_length"""', 'shape': '[None]', 'dtype': '"""int64"""'}), "(name='sequence_length', shape=[None], dtype='int64')\n", (14958, 15011), True, 'import paddle.fluid as fluid\n'), ((15116, 15167), 'paddle.fluid.layers.transpose', 'layers.transpose', (['inputs_basic_lstm'], {'perm': '[1, 0, 2]'}), '(inputs_basic_lstm, perm=[1, 0, 2])\n', (15132, 15167), True, 'import paddle.fluid.layers as layers\n'), ((15183, 15234), 'paddle.fluid.layers.rnn.LSTMCell', 'LSTMCell', (['self.hidden_size'], {'name': '"""LSTMCell_for_rnn"""'}), "(self.hidden_size, name='LSTMCell_for_rnn')\n", (15191, 15234), False, 'from paddle.fluid.layers.rnn import LSTMCell, GRUCell, RNNCell\n'), ((15265, 15370), 'paddle.fluid.layers.rnn', 'dynamic_rnn', ([], {'cell': 'cell', 'inputs': 'inputs_dynamic_rnn', 'sequence_length': 'sequence_length', 'is_reverse': '(False)'}), '(cell=cell, inputs=inputs_dynamic_rnn, sequence_length=\n sequence_length, is_reverse=False)\n', (15276, 15370), True, 'from paddle.fluid.layers import rnn as dynamic_rnn\n'), ((15513, 15553), 'paddle.fluid.layers.transpose', 'layers.transpose', (['output'], {'perm': '[1, 0, 2]'}), '(output, perm=[1, 0, 2])\n', (15529, 15553), True, 'import paddle.fluid.layers as layers\n'), ((15597, 15768), 'paddle.fluid.contrib.layers.basic_lstm', 'basic_lstm', (['inputs_basic_lstm', 'None', 'None', 'self.hidden_size'], {'num_layers': '(1)', 'batch_first': '(False)', 'bidirectional': '(False)', 'sequence_length': 'sequence_length', 'forget_bias': '(1.0)'}), '(inputs_basic_lstm, None, None, self.hidden_size, num_layers=1,\n batch_first=False, bidirectional=False, sequence_length=sequence_length,\n forget_bias=1.0)\n', (15607, 15768), False, 'from paddle.fluid.contrib.layers import basic_lstm\n'), ((15795, 15823), 'paddle.fluid.core.is_compiled_with_cuda', 'core.is_compiled_with_cuda', ([], {}), '()\n', (15821, 15823), True, 'import paddle.fluid.core as core\n'), ((15927, 15942), 'paddle.fluid.executor.Executor', 'Executor', (['place'], {}), '(place)\n', (15935, 15942), False, 'from paddle.fluid.executor import Executor\n'), ((18024, 18057), 'paddle.fluid.layers.utils.map_structure', 'utils.map_structure', (['func', 'inputs'], {}), '(func, inputs)\n', (18043, 18057), True, 'import paddle.fluid.layers.utils as utils\n'), ((18066, 18110), 'paddle.fluid.layers.utils.assert_same_structure', 'utils.assert_same_structure', (['inputs', 'outputs'], {}), '(inputs, outputs)\n', (18093, 18110), True, 'import paddle.fluid.layers.utils as utils\n'), ((21814, 21834), 'paddle.fluid.layers.shape', 'layers.shape', (['target'], {}), '(target)\n', (21826, 21834), True, 'import paddle.fluid.layers as layers\n'), ((22364, 22383), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (22378, 22383), True, 'import numpy as np\n'), ((23718, 23733), 'paddle.fluid.executor.Executor', 'Executor', (['place'], {}), '(place)\n', (23726, 23733), False, 'from paddle.fluid.executor import Executor\n'), ((23792, 23807), 'paddle.fluid.Program', 'fluid.Program', ([], {}), '()\n', (23805, 23807), True, 'import paddle.fluid as fluid\n'), ((23834, 23849), 'paddle.fluid.Program', 'fluid.Program', ([], {}), '()\n', (23847, 23849), True, 'import paddle.fluid as fluid\n'), ((1382, 1450), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""inputs"""', 'shape': '[None, input_size]', 'dtype': '"""float32"""'}), "(name='inputs', shape=[None, input_size], dtype='float32')\n", (1392, 1450), True, 'import paddle.fluid as fluid\n'), ((1540, 1613), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""pre_hidden"""', 'shape': '[None, hidden_size]', 'dtype': '"""float32"""'}), "(name='pre_hidden', shape=[None, hidden_size], dtype='float32')\n", (1550, 1613), True, 'import paddle.fluid as fluid\n'), ((1709, 1780), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""pre_cell"""', 'shape': '[None, hidden_size]', 'dtype': '"""float32"""'}), "(name='pre_cell', shape=[None, hidden_size], dtype='float32')\n", (1719, 1780), True, 'import paddle.fluid as fluid\n'), ((1868, 1889), 'paddle.fluid.layers.rnn.LSTMCell', 'LSTMCell', (['hidden_size'], {}), '(hidden_size)\n', (1876, 1889), False, 'from paddle.fluid.layers.rnn import LSTMCell, GRUCell, RNNCell\n'), ((5029, 5046), 'paddle.fluid.core.CUDAPlace', 'core.CUDAPlace', (['(0)'], {}), '(0)\n', (5043, 5046), True, 'import paddle.fluid.core as core\n'), ((5081, 5096), 'paddle.fluid.core.CPUPlace', 'core.CPUPlace', ([], {}), '()\n', (5094, 5096), True, 'import paddle.fluid.core as core\n'), ((5143, 5178), 'paddle.fluid.framework.default_startup_program', 'framework.default_startup_program', ([], {}), '()\n', (5176, 5178), False, 'from paddle.fluid import framework\n'), ((6416, 6464), 'numpy.allclose', 'np.allclose', (['out[0]', 'out[1]'], {'rtol': '(0.0001)', 'atol': '(0)'}), '(out[0], out[1], rtol=0.0001, atol=0)\n', (6427, 6464), True, 'import numpy as np\n'), ((6668, 6736), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""inputs"""', 'shape': '[None, input_size]', 'dtype': '"""float32"""'}), "(name='inputs', shape=[None, input_size], dtype='float32')\n", (6678, 6736), True, 'import paddle.fluid as fluid\n'), ((6826, 6930), 'paddle.fluid.layers.data', 'layers.data', ([], {'name': '"""pre_hidden"""', 'shape': '[None, hidden_size]', 'append_batch_size': '(False)', 'dtype': '"""float32"""'}), "(name='pre_hidden', shape=[None, hidden_size], append_batch_size\n =False, dtype='float32')\n", (6837, 6930), True, 'import paddle.fluid.layers as layers\n'), ((7056, 7076), 'paddle.fluid.layers.rnn.GRUCell', 'GRUCell', (['hidden_size'], {}), '(hidden_size)\n', (7063, 7076), False, 'from paddle.fluid.layers.rnn import LSTMCell, GRUCell, RNNCell\n'), ((9521, 9538), 'paddle.fluid.core.CUDAPlace', 'core.CUDAPlace', (['(0)'], {}), '(0)\n', (9535, 9538), True, 'import paddle.fluid.core as core\n'), ((9573, 9588), 'paddle.fluid.core.CPUPlace', 'core.CPUPlace', ([], {}), '()\n', (9586, 9588), True, 'import paddle.fluid.core as core\n'), ((9635, 9670), 'paddle.fluid.framework.default_startup_program', 'framework.default_startup_program', ([], {}), '()\n', (9668, 9670), False, 'from paddle.fluid import framework\n'), ((10904, 10952), 'numpy.allclose', 'np.allclose', (['out[0]', 'out[1]'], {'rtol': '(0.0001)', 'atol': '(0)'}), '(out[0], out[1], rtol=0.0001, atol=0)\n', (10915, 10952), True, 'import numpy as np\n'), ((11200, 11268), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""inputs"""', 'shape': '[None, input_size]', 'dtype': '"""float32"""'}), "(name='inputs', shape=[None, input_size], dtype='float32')\n", (11210, 11268), True, 'import paddle.fluid as fluid\n'), ((11358, 11462), 'paddle.fluid.layers.data', 'layers.data', ([], {'name': '"""pre_hidden"""', 'shape': '[None, hidden_size]', 'append_batch_size': '(False)', 'dtype': '"""float32"""'}), "(name='pre_hidden', shape=[None, hidden_size], append_batch_size\n =False, dtype='float32')\n", (11369, 11462), True, 'import paddle.fluid.layers as layers\n'), ((11601, 11691), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""inputs_basic_lstm"""', 'shape': '[None, None, input_size]', 'dtype': '"""float32"""'}), "(name='inputs_basic_lstm', shape=[None, None, input_size], dtype=\n 'float32')\n", (11611, 11691), True, 'import paddle.fluid as fluid\n'), ((11803, 11866), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""sequence_length"""', 'shape': '[None]', 'dtype': '"""int64"""'}), "(name='sequence_length', shape=[None], dtype='int64')\n", (11813, 11866), True, 'import paddle.fluid as fluid\n'), ((11983, 12034), 'paddle.fluid.layers.transpose', 'layers.transpose', (['inputs_basic_lstm'], {'perm': '[1, 0, 2]'}), '(inputs_basic_lstm, perm=[1, 0, 2])\n', (11999, 12034), True, 'import paddle.fluid.layers as layers\n'), ((12104, 12150), 'paddle.fluid.layers.rnn.LSTMCell', 'LSTMCell', (['hidden_size'], {'name': '"""LSTMCell_for_rnn"""'}), "(hidden_size, name='LSTMCell_for_rnn')\n", (12112, 12150), False, 'from paddle.fluid.layers.rnn import LSTMCell, GRUCell, RNNCell\n'), ((15845, 15862), 'paddle.fluid.core.CUDAPlace', 'core.CUDAPlace', (['(0)'], {}), '(0)\n', (15859, 15862), True, 'import paddle.fluid.core as core\n'), ((15897, 15912), 'paddle.fluid.core.CPUPlace', 'core.CPUPlace', ([], {}), '()\n', (15910, 15912), True, 'import paddle.fluid.core as core\n'), ((15959, 15994), 'paddle.fluid.framework.default_startup_program', 'framework.default_startup_program', ([], {}), '()\n', (15992, 15994), False, 'from paddle.fluid import framework\n'), ((16179, 16218), 'numpy.ones', 'np.ones', (['self.batch_size'], {'dtype': '"""int64"""'}), "(self.batch_size, dtype='int64')\n", (16186, 16218), True, 'import numpy as np\n'), ((17754, 17794), 'numpy.allclose', 'np.allclose', (['out[0]', 'out[1]'], {'rtol': '(0.0001)'}), '(out[0], out[1], rtol=0.0001)\n', (17765, 17794), True, 'import numpy as np\n'), ((18167, 18211), 'paddle.fluid.layers.utils.assert_same_structure', 'utils.assert_same_structure', (['inputs', 'outputs'], {}), '(inputs, outputs)\n', (18194, 18211), True, 'import paddle.fluid.layers.utils as utils\n'), ((23649, 23677), 'paddle.fluid.core.is_compiled_with_cuda', 'core.is_compiled_with_cuda', ([], {}), '()\n', (23675, 23677), True, 'import paddle.fluid.core as core\n'), ((23615, 23632), 'paddle.fluid.core.CUDAPlace', 'core.CUDAPlace', (['(0)'], {}), '(0)\n', (23629, 23632), True, 'import paddle.fluid.core as core\n'), ((23683, 23698), 'paddle.fluid.core.CPUPlace', 'core.CPUPlace', ([], {}), '()\n', (23696, 23698), True, 'import paddle.fluid.core as core\n'), ((23863, 23913), 'paddle.fluid.program_guard', 'fluid.program_guard', (['main_program', 'startup_program'], {}), '(main_program, startup_program)\n', (23882, 23913), True, 'import paddle.fluid as fluid\n'), ((1278, 1287), 'paddle.fluid.framework.Program', 'Program', ([], {}), '()\n', (1285, 1287), False, 'from paddle.fluid.framework import program_guard, Program\n'), ((1289, 1298), 'paddle.fluid.framework.Program', 'Program', ([], {}), '()\n', (1296, 1298), False, 'from paddle.fluid.framework import program_guard, Program\n'), ((2785, 2857), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""error_inputs"""', 'shape': '[None, input_size]', 'dtype': '"""int32"""'}), "(name='error_inputs', shape=[None, input_size], dtype='int32')\n", (2795, 2857), True, 'import paddle.fluid as fluid\n'), ((3136, 3213), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""error_pre_hidden"""', 'shape': '[None, hidden_size]', 'dtype': '"""int32"""'}), "(name='error_pre_hidden', shape=[None, hidden_size], dtype='int32')\n", (3146, 3213), True, 'import paddle.fluid as fluid\n'), ((3501, 3576), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""error_pre_cell"""', 'shape': '[None, hidden_size]', 'dtype': '"""int32"""'}), "(name='error_pre_cell', shape=[None, hidden_size], dtype='int32')\n", (3511, 3576), True, 'import paddle.fluid as fluid\n'), ((3883, 3919), 'paddle.fluid.layers.rnn.LSTMCell', 'LSTMCell', (['hidden_size'], {'dtype': '"""int32"""'}), "(hidden_size, dtype='int32')\n", (3891, 3919), False, 'from paddle.fluid.layers.rnn import LSTMCell, GRUCell, RNNCell\n'), ((5201, 5265), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1)', '(0.1)', '(self.batch_size, self.input_size)'], {}), '(-0.1, 0.1, (self.batch_size, self.input_size))\n', (5218, 5265), True, 'import numpy as np\n'), ((5321, 5386), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1)', '(0.1)', '(self.batch_size, self.hidden_size)'], {}), '(-0.1, 0.1, (self.batch_size, self.hidden_size))\n', (5338, 5386), True, 'import numpy as np\n'), ((5440, 5505), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1)', '(0.1)', '(self.batch_size, self.hidden_size)'], {}), '(-0.1, 0.1, (self.batch_size, self.hidden_size))\n', (5457, 5505), True, 'import numpy as np\n'), ((6564, 6573), 'paddle.fluid.framework.Program', 'Program', ([], {}), '()\n', (6571, 6573), False, 'from paddle.fluid.framework import program_guard, Program\n'), ((6575, 6584), 'paddle.fluid.framework.Program', 'Program', ([], {}), '()\n', (6582, 6584), False, 'from paddle.fluid.framework import program_guard, Program\n'), ((7671, 7743), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""error_inputs"""', 'shape': '[None, input_size]', 'dtype': '"""int32"""'}), "(name='error_inputs', shape=[None, input_size], dtype='int32')\n", (7681, 7743), True, 'import paddle.fluid as fluid\n'), ((8010, 8087), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""error_pre_hidden"""', 'shape': '[None, hidden_size]', 'dtype': '"""int32"""'}), "(name='error_pre_hidden', shape=[None, hidden_size], dtype='int32')\n", (8020, 8087), True, 'import paddle.fluid as fluid\n'), ((8388, 8423), 'paddle.fluid.layers.rnn.GRUCell', 'GRUCell', (['hidden_size'], {'dtype': '"""int32"""'}), "(hidden_size, dtype='int32')\n", (8395, 8423), False, 'from paddle.fluid.layers.rnn import LSTMCell, GRUCell, RNNCell\n'), ((9693, 9757), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1)', '(0.1)', '(self.batch_size, self.input_size)'], {}), '(-0.1, 0.1, (self.batch_size, self.input_size))\n', (9710, 9757), True, 'import numpy as np\n'), ((9813, 9878), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1)', '(0.1)', '(self.batch_size, self.hidden_size)'], {}), '(-0.1, 0.1, (self.batch_size, self.hidden_size))\n', (9830, 9878), True, 'import numpy as np\n'), ((11048, 11057), 'paddle.fluid.framework.Program', 'Program', ([], {}), '()\n', (11055, 11057), False, 'from paddle.fluid.framework import program_guard, Program\n'), ((11059, 11068), 'paddle.fluid.framework.Program', 'Program', ([], {}), '()\n', (11066, 11068), False, 'from paddle.fluid.framework import program_guard, Program\n'), ((12330, 12438), 'paddle.fluid.layers.rnn', 'dynamic_rnn', ([], {'cell': 'cell', 'inputs': 'np_inputs_dynamic_rnn', 'sequence_length': 'sequence_length', 'is_reverse': '(False)'}), '(cell=cell, inputs=np_inputs_dynamic_rnn, sequence_length=\n sequence_length, is_reverse=False)\n', (12341, 12438), True, 'from paddle.fluid.layers import rnn as dynamic_rnn\n'), ((12633, 12743), 'paddle.fluid.layers.rnn', 'dynamic_rnn', ([], {'cell': 'cell', 'inputs': '[np_inputs_dynamic_rnn]', 'sequence_length': 'sequence_length', 'is_reverse': '(False)'}), '(cell=cell, inputs=[np_inputs_dynamic_rnn], sequence_length=\n sequence_length, is_reverse=False)\n', (12644, 12743), True, 'from paddle.fluid.layers import rnn as dynamic_rnn\n'), ((12950, 12994), 'paddle.fluid.layers.rnn.GRUCell', 'GRUCell', (['hidden_size'], {'name': '"""GRUCell_for_rnn"""'}), "(hidden_size, name='GRUCell_for_rnn')\n", (12957, 12994), False, 'from paddle.fluid.layers.rnn import LSTMCell, GRUCell, RNNCell\n'), ((13133, 13275), 'paddle.fluid.layers.rnn', 'dynamic_rnn', ([], {'cell': 'cell', 'inputs': 'inputs_dynamic_rnn', 'initial_states': 'error_initial_states', 'sequence_length': 'sequence_length', 'is_reverse': '(False)'}), '(cell=cell, inputs=inputs_dynamic_rnn, initial_states=\n error_initial_states, sequence_length=sequence_length, is_reverse=False)\n', (13144, 13275), True, 'from paddle.fluid.layers import rnn as dynamic_rnn\n'), ((13786, 13928), 'paddle.fluid.layers.rnn', 'dynamic_rnn', ([], {'cell': 'cell', 'inputs': 'inputs_dynamic_rnn', 'initial_states': 'error_initial_states', 'sequence_length': 'sequence_length', 'is_reverse': '(False)'}), '(cell=cell, inputs=inputs_dynamic_rnn, initial_states=\n error_initial_states, sequence_length=sequence_length, is_reverse=False)\n', (13797, 13928), True, 'from paddle.fluid.layers import rnn as dynamic_rnn\n'), ((14273, 14381), 'paddle.fluid.layers.rnn', 'dynamic_rnn', ([], {'cell': 'cell', 'inputs': 'inputs_dynamic_rnn', 'sequence_length': 'np_sequence_length', 'is_reverse': '(False)'}), '(cell=cell, inputs=inputs_dynamic_rnn, sequence_length=\n np_sequence_length, is_reverse=False)\n', (14284, 14381), True, 'from paddle.fluid.layers import rnn as dynamic_rnn\n'), ((16028, 16106), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1)', '(0.1)', '(self.seq_len, self.batch_size, self.input_size)'], {}), '(-0.1, 0.1, (self.seq_len, self.batch_size, self.input_size))\n', (16045, 16106), True, 'import numpy as np\n'), ((16292, 16356), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1)', '(0.1)', '(self.batch_size, self.input_size)'], {}), '(-0.1, 0.1, (self.batch_size, self.input_size))\n', (16309, 16356), True, 'import numpy as np\n'), ((16412, 16477), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1)', '(0.1)', '(self.batch_size, self.hidden_size)'], {}), '(-0.1, 0.1, (self.batch_size, self.hidden_size))\n', (16429, 16477), True, 'import numpy as np\n'), ((16531, 16596), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1)', '(0.1)', '(self.batch_size, self.hidden_size)'], {}), '(-0.1, 0.1, (self.batch_size, self.hidden_size))\n', (16548, 16596), True, 'import numpy as np\n'), ((18671, 18692), 'paddle.fluid.layers.rnn.LSTMCell', 'LSTMCell', (['hidden_size'], {}), '(hidden_size)\n', (18679, 18692), False, 'from paddle.fluid.layers.rnn import LSTMCell, GRUCell, RNNCell\n'), ((18896, 18934), 'paddle.fluid.layers.dropout', 'layers.dropout', (['out', 'self.dropout_prob'], {}), '(out, self.dropout_prob)\n', (18910, 18934), True, 'import paddle.fluid.layers as layers\n'), ((19532, 19553), 'paddle.fluid.layers.rnn.LSTMCell', 'LSTMCell', (['hidden_size'], {}), '(hidden_size)\n', (19540, 19553), False, 'from paddle.fluid.layers.rnn import LSTMCell, GRUCell, RNNCell\n'), ((19767, 19805), 'paddle.fluid.layers.dropout', 'layers.dropout', (['out', 'self.dropout_prob'], {}), '(out, self.dropout_prob)\n', (19781, 19805), True, 'import paddle.fluid.layers as layers\n'), ((5872, 5918), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1)', '(0.1)'], {'size': 'param.shape'}), '(-0.1, 0.1, size=param.shape)\n', (5889, 5918), True, 'import numpy as np\n'), ((10399, 10445), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1)', '(0.1)'], {'size': 'param.shape'}), '(-0.1, 0.1, size=param.shape)\n', (10416, 10445), True, 'import numpy as np\n'), ((12187, 12238), 'numpy.random.random', 'np.random.random', (['(seq_len, batch_size, input_size)'], {}), '((seq_len, batch_size, input_size))\n', (12203, 12238), True, 'import numpy as np\n'), ((17113, 17159), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1)', '(0.1)'], {'size': 'param.shape'}), '(-0.1, 0.1, size=param.shape)\n', (17130, 17159), True, 'import numpy as np\n'), ((22770, 22870), 'numpy.random.randint', 'np.random.randint', (['(2)', "self.model_hparams['src_vocab_size']", '(iter_num * batch_size, src_seq_len)'], {}), "(2, self.model_hparams['src_vocab_size'], (iter_num *\n batch_size, src_seq_len))\n", (22787, 22870), True, 'import numpy as np\n'), ((22964, 23023), 'numpy.random.randint', 'np.random.randint', (['(1)', 'src_seq_len', '(iter_num * batch_size,)'], {}), '(1, src_seq_len, (iter_num * batch_size,))\n', (22981, 23023), True, 'import numpy as np\n'), ((23103, 23203), 'numpy.random.randint', 'np.random.randint', (['(2)', "self.model_hparams['src_vocab_size']", '(iter_num * batch_size, trg_seq_len)'], {}), "(2, self.model_hparams['src_vocab_size'], (iter_num *\n batch_size, trg_seq_len))\n", (23120, 23203), True, 'import numpy as np\n'), ((23297, 23356), 'numpy.random.randint', 'np.random.randint', (['(1)', 'trg_seq_len', '(iter_num * batch_size,)'], {}), '(1, trg_seq_len, (iter_num * batch_size,))\n', (23314, 23356), True, 'import numpy as np\n'), ((23438, 23541), 'numpy.random.randint', 'np.random.randint', (['(2)', "self.model_hparams['src_vocab_size']", '(iter_num * batch_size, trg_seq_len, 1)'], {}), "(2, self.model_hparams['src_vocab_size'], (iter_num *\n batch_size, trg_seq_len, 1))\n", (23455, 23541), True, 'import numpy as np\n'), ((1957, 1999), 'numpy.random.random', 'np.random.random', (['(batch_size, input_size)'], {}), '((batch_size, input_size))\n', (1973, 1999), True, 'import numpy as np\n'), ((2234, 2277), 'numpy.random.random', 'np.random.random', (['(batch_size, hidden_size)'], {}), '((batch_size, hidden_size))\n', (2250, 2277), True, 'import numpy as np\n'), ((2514, 2556), 'numpy.random.random', 'np.random.random', (['(batch_size, input_size)'], {}), '((batch_size, input_size))\n', (2530, 2556), True, 'import numpy as np\n'), ((7144, 7186), 'numpy.random.random', 'np.random.random', (['(batch_size, input_size)'], {}), '((batch_size, input_size))\n', (7160, 7186), True, 'import numpy as np\n'), ((7409, 7452), 'numpy.random.random', 'np.random.random', (['(batch_size, hidden_size)'], {}), '((batch_size, hidden_size))\n', (7425, 7452), True, 'import numpy as np\n'), ((13034, 13077), 'numpy.random.random', 'np.random.random', (['(batch_size, hidden_size)'], {}), '((batch_size, hidden_size))\n', (13050, 13077), True, 'import numpy as np\n'), ((14187, 14215), 'numpy.random.random', 'np.random.random', (['batch_size'], {}), '(batch_size)\n', (14203, 14215), True, 'import numpy as np\n'), ((13557, 13600), 'numpy.random.random', 'np.random.random', (['(batch_size, hidden_size)'], {}), '((batch_size, hidden_size))\n', (13573, 13600), True, 'import numpy as np\n'), ((13665, 13708), 'numpy.random.random', 'np.random.random', (['(batch_size, hidden_size)'], {}), '((batch_size, hidden_size))\n', (13681, 13708), True, 'import numpy as np\n'), ((5781, 5801), 'paddle.fluid.global_scope', 'fluid.global_scope', ([], {}), '()\n', (5799, 5801), True, 'import paddle.fluid as fluid\n'), ((10308, 10328), 'paddle.fluid.global_scope', 'fluid.global_scope', ([], {}), '()\n', (10326, 10328), True, 'import paddle.fluid as fluid\n'), ((17022, 17042), 'paddle.fluid.global_scope', 'fluid.global_scope', ([], {}), '()\n', (17040, 17042), True, 'import paddle.fluid as fluid\n'), ((5987, 6007), 'paddle.fluid.global_scope', 'fluid.global_scope', ([], {}), '()\n', (6005, 6007), True, 'import paddle.fluid as fluid\n'), ((6087, 6107), 'paddle.fluid.global_scope', 'fluid.global_scope', ([], {}), '()\n', (6105, 6107), True, 'import paddle.fluid as fluid\n'), ((10514, 10534), 'paddle.fluid.global_scope', 'fluid.global_scope', ([], {}), '()\n', (10532, 10534), True, 'import paddle.fluid as fluid\n'), ((10614, 10634), 'paddle.fluid.global_scope', 'fluid.global_scope', ([], {}), '()\n', (10632, 10634), True, 'import paddle.fluid as fluid\n'), ((17228, 17248), 'paddle.fluid.global_scope', 'fluid.global_scope', ([], {}), '()\n', (17246, 17248), True, 'import paddle.fluid as fluid\n'), ((17328, 17348), 'paddle.fluid.global_scope', 'fluid.global_scope', ([], {}), '()\n', (17346, 17348), True, 'import paddle.fluid as fluid\n')] |
#!/usr/bin/python3
import sys
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter, FileType
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pyfsdb
def parse_args():
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter,
description=__doc__,
epilog="Exmaple Usage: ")
parser.add_argument("-c", "--columns", type=str, nargs=2,
help="Columns to use")
parser.add_argument("-v", "--value-column", default="count", type=str,
help="The value column to plot as the heat map")
parser.add_argument("-i", "--invert", action="store_true",
help="Invert the foreground/background colors")
parser.add_argument("-F", "--add-fractions", action="store_true",
help="Add text fraction labels to the grid")
parser.add_argument("-R", "--add-raw", action="store_true",
help="Add text raw-value labels to the grid")
parser.add_argument("-L", "--add-labels", action="store_true",
help="Add x/y axis labels")
parser.add_argument("-fs", "--font-size", default=None, type=int,
help="Set the fontsize for labels")
parser.add_argument("input_file", type=FileType('r'),
nargs='?', default=sys.stdin,
help="Input fsdb file to read")
parser.add_argument("output_file", type=str,
nargs='?', default="out.png",
help="Where to write the png file to")
args = parser.parse_args()
if not args.columns or len(args.columns) != 2:
raise ValueError("exactly 2 columns must be passed to -c")
return args
def main():
args = parse_args()
# read in the input data
f = pyfsdb.Fsdb(file_handle=args.input_file,
return_type=pyfsdb.RETURN_AS_DICTIONARY)
max_value = None
dataset = {} # nested tree structure
ycols = {} # stores each unique second value
for row in f:
if not max_value:
max_value = float(row[args.value_column])
else:
max_value = max(max_value, float(row[args.value_column]))
if row[args.columns[0]] not in dataset:
dataset[row[args.columns[0]]] = \
{ row[args.columns[1]]: float(row[args.value_column]) }
else:
dataset[row[args.columns[0]]][row[args.columns[1]]] = \
float(row[args.value_column])
ycols[row[args.columns[1]]] = 1
# merge the data into a two dimensional array
data = []
xcols = sorted(dataset.keys())
ycols = sorted(ycols.keys())
for first_column in xcols:
newrow = []
for second_column in ycols:
if second_column in dataset[first_column]:
newrow.append(dataset[first_column][second_column] / max_value)
else:
newrow.append(0.0)
data.append(newrow)
grapharray = np.array(data)
if not args.invert:
grapharray = 1 - grapharray
# generate the graph
fig, ax = plt.subplots()
ax.imshow(grapharray, vmin=0.0, vmax=1.0, cmap='gray')
ax.set_xlabel(args.columns[1])
ax.set_ylabel(args.columns[0])
if args.add_labels:
ax.set_yticks(np.arange(len(dataset)))
ax.set_yticklabels(xcols)
ax.set_xticks(np.arange(len(ycols)))
ax.set_xticklabels(ycols)
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
if args.add_fractions:
for i in range(len(grapharray)):
for j in range(len(grapharray[i])):
text = ax.text(j, i, "{:1.1f}".format(grapharray[i][j]),
ha="center", va="center", color="r",
fontsize=args.font_size)
elif args.add_raw:
for i, first_column in enumerate(xcols):
for j, second_column in enumerate(ycols):
try:
value = dataset[first_column][second_column]
ax.text(j, i, "{}".format(int(value)),
ha="center", va="center", color="r",
fontsize=args.font_size)
except Exception:
pass
fig.tight_layout()
plt.savefig(args.output_file,
bbox_inches="tight", pad_inches=0)
# import pprint
# pprint.pprint(dataset)
if __name__ == "__main__":
main()
| [
"argparse.FileType",
"pyfsdb.Fsdb",
"matplotlib.pyplot.savefig",
"argparse.ArgumentParser",
"numpy.array",
"matplotlib.pyplot.subplots"
] | [((226, 339), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'formatter_class': 'ArgumentDefaultsHelpFormatter', 'description': '__doc__', 'epilog': '"""Exmaple Usage: """'}), "(formatter_class=ArgumentDefaultsHelpFormatter, description=\n __doc__, epilog='Exmaple Usage: ')\n", (240, 339), False, 'from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter, FileType\n'), ((1892, 1978), 'pyfsdb.Fsdb', 'pyfsdb.Fsdb', ([], {'file_handle': 'args.input_file', 'return_type': 'pyfsdb.RETURN_AS_DICTIONARY'}), '(file_handle=args.input_file, return_type=pyfsdb.\n RETURN_AS_DICTIONARY)\n', (1903, 1978), False, 'import pyfsdb\n'), ((3079, 3093), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (3087, 3093), True, 'import numpy as np\n'), ((3194, 3208), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3206, 3208), True, 'import matplotlib.pyplot as plt\n'), ((4420, 4484), 'matplotlib.pyplot.savefig', 'plt.savefig', (['args.output_file'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0)'}), "(args.output_file, bbox_inches='tight', pad_inches=0)\n", (4431, 4484), True, 'import matplotlib.pyplot as plt\n'), ((1356, 1369), 'argparse.FileType', 'FileType', (['"""r"""'], {}), "('r')\n", (1364, 1369), False, 'from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter, FileType\n')] |
import json
import argparse
def to_output(filepath, outpath):
with open(filepath, encoding='utf-8') as f:
data = json.load(f)
output = []
for d in data:
temp = {}
temp['id'] = d['id']
temp['labels'] = d['pred_labels']
output.append(temp)
with open(outpath, 'w+') as f:
json.dump(output, f)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert model prediction into acceptable format.')
parser.add_argument('in_path', metavar='i', type=str)
parser.add_argument('out_path', metavar='o', type=str)
args = parser.parse_args()
to_output(args.in_path, args.out_path) | [
"json.load",
"json.dump",
"argparse.ArgumentParser"
] | [((396, 488), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Convert model prediction into acceptable format."""'}), "(description=\n 'Convert model prediction into acceptable format.')\n", (419, 488), False, 'import argparse\n'), ((126, 138), 'json.load', 'json.load', (['f'], {}), '(f)\n', (135, 138), False, 'import json\n'), ((334, 354), 'json.dump', 'json.dump', (['output', 'f'], {}), '(output, f)\n', (343, 354), False, 'import json\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 27 13:59:43 2018
@author: ofn77899
"""
import numpy
from ccpi.segmentation.SimpleflexSegmentor import SimpleflexSegmentor
from ccpi.viewer.CILViewer import CILViewer
from ccpi.viewer.CILViewer2D import CILViewer2D, Converter
import vtk
#Text-based input system
valid = False
while valid == False:
try:
InitialCameraPositionX = int(input('Enter the initital camera position on the x-axis:'))
InitialCameraPositionY = int(input('Enter the initital camera position on the y-axis:'))
InitialCameraPositionZ = int(input('Enter the initital camera position on the z-axis:'))
FrameCount = int(input('Enter number of frames for the animation:'))
ViewUp1 = int(input('Enter the first viewup value:'))
ViewUp2 = int(input('Enter the second viewup value:'))
ViewUp3 = int(input('Enter the third viewup value:'))
FocalPointX = int(input('Enter the x-coordinate for the camera focal point:'))
FocalPointY = int(input('Enter the y-coordinate for the camera focal point:'))
FocalPointZ = int(input('Enter the z-coordinate for the camera focal point:'))
AngleRangeStart = int(input('Enter the first value for the angle range:'))
AngleRangeEnd = int(input('Enter the last value for the angle range:'))
ClippingRangeStart = int(input('Set lowest value for clipping range:'))
ClippingRangeEnd = int(input('Set highest value for clipping range:'))
InitialCameraPosition = (InitialCameraPositionX, InitialCameraPositionY, InitialCameraPositionZ)
FocalPoint = (FocalPointX, FocalPointY, FocalPointZ)
AngleRange = (AngleRangeStart, AngleRangeEnd)
ClippingRange = (ClippingRangeStart, ClippingRangeEnd)
ViewUp = (ViewUp1, ViewUp2, ViewUp3)
except ValueError:
print('One or more of your inputs were not valid! Try again')
else:
valid = True
def surface2vtkPolyData(coord_list, origin = (0,0,0), spacing=(1,1,1)):
########################################################################
# 7. Display
# with the retrieved data we construct polydata actors to be displayed
# with VTK. Notice that this part is VTK specific. However, it shows how to
# process the data returned by the algorithm.
# Create the VTK output
# Points coordinates structure
triangle_vertices = vtk.vtkPoints()
#associate the points to triangles
triangle = vtk.vtkTriangle()
trianglePointIds = triangle.GetPointIds()
# put all triangles in an array
triangles = vtk.vtkCellArray()
isTriangle = 0
nTriangle = 0
surface = 0
# associate each coordinate with a point: 3 coordinates are needed for a point
# in 3D. Additionally we perform a shift from image coordinates (pixel) which
# is the default of the Contour Tree Algorithm to the World Coordinates.
# TODO: add this in the algorithm.
mScaling = numpy.asarray([spacing[0], 0,0,0,
0,spacing[1],0,0,
0,0,spacing[2],0,
0,0,0,1]).reshape((4,4))
mShift = numpy.asarray([1,0,0,origin[0],
0,1,0,origin[1],
0,0,1,origin[2],
0,0,0,1]).reshape((4,4))
mTransform = numpy.dot(mScaling, mShift)
point_count = 0
for surf in coord_list:
print("Image-to-world coordinate trasformation ... %d" % surface)
for point in surf:
world_coord = numpy.dot(mTransform, point)
xCoord = world_coord[2]
yCoord = world_coord[1]
zCoord = world_coord[0]
# i += 3
triangle_vertices.InsertNextPoint(xCoord, yCoord, zCoord);
# The id of the vertex of the triangle (0,1,2) is linked to
# the id of the points in the list, so in facts we just link id-to-id
trianglePointIds.SetId(isTriangle, point_count)
isTriangle += 1
point_count += 1
if (isTriangle == 3) :
isTriangle = 0;
# insert the current triangle in the triangles array
triangles.InsertNextCell(triangle);
surface += 1
# polydata object
trianglePolyData = vtk.vtkPolyData()
trianglePolyData.SetPoints( triangle_vertices )
trianglePolyData.SetPolys( triangles )
return trianglePolyData
reader = vtk.vtkMetaImageReader()
reader.SetFileName("../../data/fuel_uc_python.mha")
reader.Update()
seg = SimpleflexSegmentor()
seg.setInputData(Converter.vtk2numpy(reader.GetOutput()))
seg.calculateContourTree()
#seg.setIsoValuePercent(24.)
seg.setLocalIsoValuePercent(0.)
seg.resetCollapsePriority(seg.PRIORITY_VOLUME)
# 5. Construct the iso-surfaces
print ("calling resetCollapsePriority")
#seg.updateTreeFromLogTreeSize(size=0.6, isGlobal=False)
print ("calling setlogtreesize")
seg.ct.SetLogTreeSize(1)
print ("calling UpdateTreeFromLogTreeSize")
seg.ct.UpdateTreeFromLogTreeSize()
print ("calling ConstructLocalIsoSurface")
#seg.constructLocalIsoSurfaces()
seg.ct.ConstructLocalIsoSurface()
print ("called ConstructLocalIsoSurface")
#seg.constructIsoSurfaces()
# 6. Retrieve the isosurfaces and display
coord_list = seg.getSurfaces()
del (seg)
#print ("getSurface " , len(coord_list))
spacing = numpy.asarray(reader.GetOutput().GetSpacing())
s1 = spacing[0]
spacing[0] = spacing[2]
spacing[2] = s1
print (len(coord_list))
v = CILViewer()
v.setInput3DData(reader.GetOutput())
v.displayPolyData(surface2vtkPolyData(coord_list, spacing=spacing))
#v.startRenderLoop()
dimX, dimY, dimZ = reader.GetOutput().GetDimensions()
#Setting locked values for camera position
locX = InitialCameraPosition[0]
locY = InitialCameraPosition[1]
locZ = InitialCameraPosition[2]
#Setting camera position
v.getCamera().SetPosition(InitialCameraPosition)
v.getCamera().SetFocalPoint(FocalPoint)
#Setting camera viewup
v.getCamera().SetViewUp(ViewUp)
#Set camera clipping range
v.getCamera().SetClippingRange(ClippingRange)
#Defining distance from camera to focal point
r = numpy.sqrt(((InitialCameraPosition[2]-FocalPoint[2])**2)
+(InitialCameraPosition[1]-FocalPoint[1])**2)
print('Radius: {}'.format(r))
camera = vtk.vtkCamera()
camera.SetPosition(InitialCameraPosition)
camera.SetFocalPoint(FocalPoint)
camera.SetViewUp(ViewUp)
v.getRenderer().SetActiveCamera(camera)
#Animating the camera
for x in range(100):
angle = ((numpy.pi)*4/100)*x
NewLocationX = r*(numpy.sin(angle))+FocalPoint[0]
NewLocationY = r*(numpy.cos(angle))+FocalPoint[1]
NewLocationZ = r*(numpy.cos(angle))+FocalPoint[2]
NewLocation = (NewLocationX, NewLocationY, locZ)
v.getCamera().SetPosition(NewLocation)
#Rendering and saving the render
v.getRenderer().Render()
v.saveRender('test_{}.png'.format(x))
v.startRenderLoop()
| [
"numpy.sqrt",
"vtk.vtkMetaImageReader",
"vtk.vtkTriangle",
"vtk.vtkCamera",
"vtk.vtkCellArray",
"vtk.vtkPolyData",
"numpy.asarray",
"vtk.vtkPoints",
"numpy.dot",
"numpy.cos",
"ccpi.segmentation.SimpleflexSegmentor.SimpleflexSegmentor",
"numpy.sin",
"ccpi.viewer.CILViewer.CILViewer"
] | [((4710, 4734), 'vtk.vtkMetaImageReader', 'vtk.vtkMetaImageReader', ([], {}), '()\n', (4732, 4734), False, 'import vtk\n'), ((4814, 4835), 'ccpi.segmentation.SimpleflexSegmentor.SimpleflexSegmentor', 'SimpleflexSegmentor', ([], {}), '()\n', (4833, 4835), False, 'from ccpi.segmentation.SimpleflexSegmentor import SimpleflexSegmentor\n'), ((5786, 5797), 'ccpi.viewer.CILViewer.CILViewer', 'CILViewer', ([], {}), '()\n', (5795, 5797), False, 'from ccpi.viewer.CILViewer import CILViewer\n'), ((6439, 6553), 'numpy.sqrt', 'numpy.sqrt', (['((InitialCameraPosition[2] - FocalPoint[2]) ** 2 + (InitialCameraPosition[1\n ] - FocalPoint[1]) ** 2)'], {}), '((InitialCameraPosition[2] - FocalPoint[2]) ** 2 + (\n InitialCameraPosition[1] - FocalPoint[1]) ** 2)\n', (6449, 6553), False, 'import numpy\n'), ((6588, 6603), 'vtk.vtkCamera', 'vtk.vtkCamera', ([], {}), '()\n', (6601, 6603), False, 'import vtk\n'), ((2541, 2556), 'vtk.vtkPoints', 'vtk.vtkPoints', ([], {}), '()\n', (2554, 2556), False, 'import vtk\n'), ((2613, 2630), 'vtk.vtkTriangle', 'vtk.vtkTriangle', ([], {}), '()\n', (2628, 2630), False, 'import vtk\n'), ((2732, 2750), 'vtk.vtkCellArray', 'vtk.vtkCellArray', ([], {}), '()\n', (2748, 2750), False, 'import vtk\n'), ((3524, 3551), 'numpy.dot', 'numpy.dot', (['mScaling', 'mShift'], {}), '(mScaling, mShift)\n', (3533, 3551), False, 'import numpy\n'), ((4552, 4569), 'vtk.vtkPolyData', 'vtk.vtkPolyData', ([], {}), '()\n', (4567, 4569), False, 'import vtk\n'), ((3120, 3215), 'numpy.asarray', 'numpy.asarray', (['[spacing[0], 0, 0, 0, 0, spacing[1], 0, 0, 0, 0, spacing[2], 0, 0, 0, 0, 1]'], {}), '([spacing[0], 0, 0, 0, 0, spacing[1], 0, 0, 0, 0, spacing[2], \n 0, 0, 0, 0, 1])\n', (3133, 3215), False, 'import numpy\n'), ((3322, 3414), 'numpy.asarray', 'numpy.asarray', (['[1, 0, 0, origin[0], 0, 1, 0, origin[1], 0, 0, 1, origin[2], 0, 0, 0, 1]'], {}), '([1, 0, 0, origin[0], 0, 1, 0, origin[1], 0, 0, 1, origin[2], \n 0, 0, 0, 1])\n', (3335, 3414), False, 'import numpy\n'), ((3742, 3770), 'numpy.dot', 'numpy.dot', (['mTransform', 'point'], {}), '(mTransform, point)\n', (3751, 3770), False, 'import numpy\n'), ((6852, 6868), 'numpy.sin', 'numpy.sin', (['angle'], {}), '(angle)\n', (6861, 6868), False, 'import numpy\n'), ((6907, 6923), 'numpy.cos', 'numpy.cos', (['angle'], {}), '(angle)\n', (6916, 6923), False, 'import numpy\n'), ((6963, 6979), 'numpy.cos', 'numpy.cos', (['angle'], {}), '(angle)\n', (6972, 6979), False, 'import numpy\n')] |
import json
def read_input():
file = open("Data/day12.json", "r")
return json.load(file)
def calculate_sum(accounts):
if type(accounts) == str:
return 0
elif type(accounts) == int:
return accounts
elif type(accounts) == list:
return sum(calculate_sum(item) for item in accounts)
elif type(accounts) == dict:
return sum(calculate_sum(item) for item in accounts.values())
def calculate_sum_no_red(accounts):
if type(accounts) == str:
return 0
elif type(accounts) == int:
return accounts
elif type(accounts) == list:
return sum(calculate_sum_no_red(item) for item in accounts)
elif type(accounts) == dict:
if "red" in accounts.values():
return 0
else:
return sum(calculate_sum_no_red(item) for item in accounts.values())
if __name__ == "__main__":
accounts = read_input()
print(f"Part one: {calculate_sum(accounts)}")
print(f"Part two: {calculate_sum_no_red(accounts)}")
| [
"json.load"
] | [((86, 101), 'json.load', 'json.load', (['file'], {}), '(file)\n', (95, 101), False, 'import json\n')] |
from easyvec import Mat2, Vec2
import numpy as np
from pytest import approx
def test_constructor1():
m = Mat2(1,2,3,4)
assert m is not None
assert m.m11 == approx(1)
assert m.m12 == approx(2)
assert m.m21 == approx(3)
assert m.m22 == approx(4)
def test_constructor2():
m = Mat2([1,2,3,4])
assert m is not None
assert m.m11 == approx(1)
assert m.m12 == approx(2)
assert m.m21 == approx(3)
assert m.m22 == approx(4)
def test_constructor3():
m = Mat2([[1,2],[3,4]])
assert m is not None
assert m.m11 == approx(1)
assert m.m12 == approx(2)
assert m.m21 == approx(3)
assert m.m22 == approx(4)
def test_constructor4():
m = Mat2([1,2],[3,4])
assert m is not None
assert m.m11 == approx(1)
assert m.m12 == approx(2)
assert m.m21 == approx(3)
assert m.m22 == approx(4)
def test_constructor5():
m = Mat2(Vec2(1,2),Vec2(3,4))
assert m is not None
assert m.m11 == approx(1)
assert m.m12 == approx(2)
assert m.m21 == approx(3)
assert m.m22 == approx(4)
def test_constructor6():
m = Mat2([Vec2(1,2),Vec2(3,4)])
assert m is not None
assert m.m11 == approx(1)
assert m.m12 == approx(2)
assert m.m21 == approx(3)
assert m.m22 == approx(4)
def test_constructor7():
m = Mat2.eye()
assert m is not None
assert m.m11 == approx(1)
assert m.m12 == approx(0)
assert m.m21 == approx(0)
assert m.m22 == approx(1)
def test_constructor8():
from math import sin, cos, pi
for angle in np.random.uniform(-720, 720, 1000):
angle *= pi/180
m = Mat2.from_angle(angle)
assert m is not None
assert m.m11 == approx(cos(angle))
assert m.m12 == approx(sin(angle))
assert m.m21 == approx(-sin(angle))
assert m.m22 == approx(cos(angle))
def test_constructor9():
m = Mat2.from_xaxis((1,1))
assert m is not None
assert m.m11 == approx(1/2**0.5)
assert m.m12 == approx(1/2**0.5)
assert m.m21 == approx(-1/2**0.5)
assert m.m22 == approx(1/2**0.5)
def test_xiyj_axis():
m = Mat2(1,2,3,4)
assert m.x_axis() == (1,2)
assert m.i_axis() == (1,2)
assert m.y_axis() == (3,4)
assert m.j_axis() == (3,4)
def test_cmp():
m = Mat2(-1,2,-3,4)
assert m == [[-1,2],[-3,4]]
assert m != [[-1,-2],[-3,4]]
def test_T():
m = Mat2(-1,2,-3,4)
assert m.T == [[-1,-3], [2,4]]
def test_inverse1():
for angle in np.random.uniform(-720,720,1000):
m = Mat2.from_angle(angle)
assert m._1 == m.T
assert m.det() == approx(1)
def test_inverse2():
for ms in np.random.uniform(-720,720,(1000,4)):
m = Mat2(ms)
if abs(m.det()) < 1e-6:
continue
assert m * m._1 == Mat2.eye()
def test_mul1():
for ms in np.random.uniform(-720,720,(1000,5)):
m = Mat2(ms[:-1])
assert m * ms[-1] == (ms[:-1] * ms[-1]).reshape(2,2)
assert ms[-1] * m == (ms[:-1] * ms[-1]).reshape(2,2)
def test_mul2():
for angle, x, y in np.random.uniform(-180,180,(1000,3)):
m = Mat2.from_angle(angle, 1)
v = Vec2(x, y).norm()
v1 = m * v
assert v.angle_to(v1, 1) == approx(-angle)
v2 = m._1 * v1
assert v2 == v
v3 = m._1 * v
assert v.angle_to(v3, 1) == approx(angle)
def test_imul():
for ms in np.random.uniform(-720,720,(1000,4)):
m = Mat2(ms)
if abs(m.det()) < 1e-6:
continue
assert m * m._1 == Mat2.eye()
m *= m._1
assert m == Mat2.eye()
def test_add1():
for ms in np.random.uniform(-720,720,(1000,5)):
m = Mat2(ms[:-1])
m1 = m + ms[-1]
m1i = (ms[:-1] + ms[-1]).reshape(2,2)
assert m1 == m1i
assert ms[-1] + m == (ms[:-1] + ms[-1]).reshape(2,2)
def test_add2():
for ms in np.random.uniform(-720,720,(1000,8)):
m1 = Mat2(ms[:4])
m2 = Mat2(ms[4:])
assert m1 + m2 == m2 + m1
assert m1 + m2 == (ms[:4] + ms[4:]).reshape(2,2)
def test_iadd():
for ms in np.random.uniform(-720,720,(1000,8)):
m1 = Mat2(ms[:4])
m2 = Mat2(ms[4:])
m12 = m1 + m2
m1 += m2
assert m12 == m1
assert m1 == (ms[:4] + ms[4:]).reshape(2,2)
def test_sub1():
for ms in np.random.uniform(-720,720,(1000,5)):
m = Mat2(ms[:-1])
m1 = m - ms[-1]
m1i = (ms[:-1] - ms[-1]).reshape(2,2)
assert m1 == m1i
assert ms[-1] - m == -(ms[:-1] - ms[-1]).reshape(2,2)
def test_sub2():
for ms in np.random.uniform(-720,720,(1000,8)):
m1 = Mat2(ms[:4])
m2 = Mat2(ms[4:])
assert m1 - m2 == -(m2 - m1)
assert m1 - m2 == (ms[:4] - ms[4:]).reshape(2,2)
def test_isub():
for ms in np.random.uniform(-720,720,(1000,8)):
m1 = Mat2(ms[:4])
m2 = Mat2(ms[4:])
m12 = m1 - m2
m1 -= m2
assert m12 == m1
assert m1 == (ms[:4] - ms[4:]).reshape(2,2)
def test_div1():
for ms in np.random.uniform(-720,720,(1000,5)):
m = Mat2(ms[:-1])
m1 = m / ms[-1]
m1i = (ms[:-1] / ms[-1]).reshape(2,2)
assert m1 == m1i
def test_div2():
for ms in np.random.uniform(-720,720,(1000,8)):
m1 = Mat2(ms[:4])
m2 = Mat2(ms[4:])
assert m1 / m2 == (ms[:4] / ms[4:]).reshape(2,2)
def test_idiv():
for ms in np.random.uniform(-720,720,(1000,8)):
m1 = Mat2(ms[:4])
m2 = Mat2(ms[4:])
m12 = m1 / m2
m1 /= m2
assert m12 == m1
assert m1 == (ms[:4] / ms[4:]).reshape(2,2) | [
"pytest.approx",
"easyvec.Vec2",
"easyvec.Mat2",
"easyvec.Mat2.eye",
"math.cos",
"numpy.random.uniform",
"easyvec.Mat2.from_angle",
"easyvec.Mat2.from_xaxis",
"math.sin"
] | [((110, 126), 'easyvec.Mat2', 'Mat2', (['(1)', '(2)', '(3)', '(4)'], {}), '(1, 2, 3, 4)\n', (114, 126), False, 'from easyvec import Mat2, Vec2\n'), ((304, 322), 'easyvec.Mat2', 'Mat2', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (308, 322), False, 'from easyvec import Mat2, Vec2\n'), ((500, 522), 'easyvec.Mat2', 'Mat2', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (504, 522), False, 'from easyvec import Mat2, Vec2\n'), ((700, 720), 'easyvec.Mat2', 'Mat2', (['[1, 2]', '[3, 4]'], {}), '([1, 2], [3, 4])\n', (704, 720), False, 'from easyvec import Mat2, Vec2\n'), ((1313, 1323), 'easyvec.Mat2.eye', 'Mat2.eye', ([], {}), '()\n', (1321, 1323), False, 'from easyvec import Mat2, Vec2\n'), ((1547, 1581), 'numpy.random.uniform', 'np.random.uniform', (['(-720)', '(720)', '(1000)'], {}), '(-720, 720, 1000)\n', (1564, 1581), True, 'import numpy as np\n'), ((1879, 1902), 'easyvec.Mat2.from_xaxis', 'Mat2.from_xaxis', (['(1, 1)'], {}), '((1, 1))\n', (1894, 1902), False, 'from easyvec import Mat2, Vec2\n'), ((2108, 2124), 'easyvec.Mat2', 'Mat2', (['(1)', '(2)', '(3)', '(4)'], {}), '(1, 2, 3, 4)\n', (2112, 2124), False, 'from easyvec import Mat2, Vec2\n'), ((2271, 2289), 'easyvec.Mat2', 'Mat2', (['(-1)', '(2)', '(-3)', '(4)'], {}), '(-1, 2, -3, 4)\n', (2275, 2289), False, 'from easyvec import Mat2, Vec2\n'), ((2375, 2393), 'easyvec.Mat2', 'Mat2', (['(-1)', '(2)', '(-3)', '(4)'], {}), '(-1, 2, -3, 4)\n', (2379, 2393), False, 'from easyvec import Mat2, Vec2\n'), ((2465, 2499), 'numpy.random.uniform', 'np.random.uniform', (['(-720)', '(720)', '(1000)'], {}), '(-720, 720, 1000)\n', (2482, 2499), True, 'import numpy as np\n'), ((2634, 2673), 'numpy.random.uniform', 'np.random.uniform', (['(-720)', '(720)', '(1000, 4)'], {}), '(-720, 720, (1000, 4))\n', (2651, 2673), True, 'import numpy as np\n'), ((2817, 2856), 'numpy.random.uniform', 'np.random.uniform', (['(-720)', '(720)', '(1000, 5)'], {}), '(-720, 720, (1000, 5))\n', (2834, 2856), True, 'import numpy as np\n'), ((3045, 3084), 'numpy.random.uniform', 'np.random.uniform', (['(-180)', '(180)', '(1000, 3)'], {}), '(-180, 180, (1000, 3))\n', (3062, 3084), True, 'import numpy as np\n'), ((3372, 3411), 'numpy.random.uniform', 'np.random.uniform', (['(-720)', '(720)', '(1000, 4)'], {}), '(-720, 720, (1000, 4))\n', (3389, 3411), True, 'import numpy as np\n'), ((3604, 3643), 'numpy.random.uniform', 'np.random.uniform', (['(-720)', '(720)', '(1000, 5)'], {}), '(-720, 720, (1000, 5))\n', (3621, 3643), True, 'import numpy as np\n'), ((3857, 3896), 'numpy.random.uniform', 'np.random.uniform', (['(-720)', '(720)', '(1000, 8)'], {}), '(-720, 720, (1000, 8))\n', (3874, 3896), True, 'import numpy as np\n'), ((4070, 4109), 'numpy.random.uniform', 'np.random.uniform', (['(-720)', '(720)', '(1000, 8)'], {}), '(-720, 720, (1000, 8))\n', (4087, 4109), True, 'import numpy as np\n'), ((4308, 4347), 'numpy.random.uniform', 'np.random.uniform', (['(-720)', '(720)', '(1000, 5)'], {}), '(-720, 720, (1000, 5))\n', (4325, 4347), True, 'import numpy as np\n'), ((4562, 4601), 'numpy.random.uniform', 'np.random.uniform', (['(-720)', '(720)', '(1000, 8)'], {}), '(-720, 720, (1000, 8))\n', (4579, 4601), True, 'import numpy as np\n'), ((4778, 4817), 'numpy.random.uniform', 'np.random.uniform', (['(-720)', '(720)', '(1000, 8)'], {}), '(-720, 720, (1000, 8))\n', (4795, 4817), True, 'import numpy as np\n'), ((5016, 5055), 'numpy.random.uniform', 'np.random.uniform', (['(-720)', '(720)', '(1000, 5)'], {}), '(-720, 720, (1000, 5))\n', (5033, 5055), True, 'import numpy as np\n'), ((5207, 5246), 'numpy.random.uniform', 'np.random.uniform', (['(-720)', '(720)', '(1000, 8)'], {}), '(-720, 720, (1000, 8))\n', (5224, 5246), True, 'import numpy as np\n'), ((5386, 5425), 'numpy.random.uniform', 'np.random.uniform', (['(-720)', '(720)', '(1000, 8)'], {}), '(-720, 720, (1000, 8))\n', (5403, 5425), True, 'import numpy as np\n'), ((169, 178), 'pytest.approx', 'approx', (['(1)'], {}), '(1)\n', (175, 178), False, 'from pytest import approx\n'), ((199, 208), 'pytest.approx', 'approx', (['(2)'], {}), '(2)\n', (205, 208), False, 'from pytest import approx\n'), ((229, 238), 'pytest.approx', 'approx', (['(3)'], {}), '(3)\n', (235, 238), False, 'from pytest import approx\n'), ((259, 268), 'pytest.approx', 'approx', (['(4)'], {}), '(4)\n', (265, 268), False, 'from pytest import approx\n'), ((365, 374), 'pytest.approx', 'approx', (['(1)'], {}), '(1)\n', (371, 374), False, 'from pytest import approx\n'), ((395, 404), 'pytest.approx', 'approx', (['(2)'], {}), '(2)\n', (401, 404), False, 'from pytest import approx\n'), ((425, 434), 'pytest.approx', 'approx', (['(3)'], {}), '(3)\n', (431, 434), False, 'from pytest import approx\n'), ((455, 464), 'pytest.approx', 'approx', (['(4)'], {}), '(4)\n', (461, 464), False, 'from pytest import approx\n'), ((565, 574), 'pytest.approx', 'approx', (['(1)'], {}), '(1)\n', (571, 574), False, 'from pytest import approx\n'), ((595, 604), 'pytest.approx', 'approx', (['(2)'], {}), '(2)\n', (601, 604), False, 'from pytest import approx\n'), ((625, 634), 'pytest.approx', 'approx', (['(3)'], {}), '(3)\n', (631, 634), False, 'from pytest import approx\n'), ((655, 664), 'pytest.approx', 'approx', (['(4)'], {}), '(4)\n', (661, 664), False, 'from pytest import approx\n'), ((763, 772), 'pytest.approx', 'approx', (['(1)'], {}), '(1)\n', (769, 772), False, 'from pytest import approx\n'), ((793, 802), 'pytest.approx', 'approx', (['(2)'], {}), '(2)\n', (799, 802), False, 'from pytest import approx\n'), ((823, 832), 'pytest.approx', 'approx', (['(3)'], {}), '(3)\n', (829, 832), False, 'from pytest import approx\n'), ((853, 862), 'pytest.approx', 'approx', (['(4)'], {}), '(4)\n', (859, 862), False, 'from pytest import approx\n'), ((903, 913), 'easyvec.Vec2', 'Vec2', (['(1)', '(2)'], {}), '(1, 2)\n', (907, 913), False, 'from easyvec import Mat2, Vec2\n'), ((913, 923), 'easyvec.Vec2', 'Vec2', (['(3)', '(4)'], {}), '(3, 4)\n', (917, 923), False, 'from easyvec import Mat2, Vec2\n'), ((969, 978), 'pytest.approx', 'approx', (['(1)'], {}), '(1)\n', (975, 978), False, 'from pytest import approx\n'), ((999, 1008), 'pytest.approx', 'approx', (['(2)'], {}), '(2)\n', (1005, 1008), False, 'from pytest import approx\n'), ((1029, 1038), 'pytest.approx', 'approx', (['(3)'], {}), '(3)\n', (1035, 1038), False, 'from pytest import approx\n'), ((1059, 1068), 'pytest.approx', 'approx', (['(4)'], {}), '(4)\n', (1065, 1068), False, 'from pytest import approx\n'), ((1177, 1186), 'pytest.approx', 'approx', (['(1)'], {}), '(1)\n', (1183, 1186), False, 'from pytest import approx\n'), ((1207, 1216), 'pytest.approx', 'approx', (['(2)'], {}), '(2)\n', (1213, 1216), False, 'from pytest import approx\n'), ((1237, 1246), 'pytest.approx', 'approx', (['(3)'], {}), '(3)\n', (1243, 1246), False, 'from pytest import approx\n'), ((1267, 1276), 'pytest.approx', 'approx', (['(4)'], {}), '(4)\n', (1273, 1276), False, 'from pytest import approx\n'), ((1369, 1378), 'pytest.approx', 'approx', (['(1)'], {}), '(1)\n', (1375, 1378), False, 'from pytest import approx\n'), ((1399, 1408), 'pytest.approx', 'approx', (['(0)'], {}), '(0)\n', (1405, 1408), False, 'from pytest import approx\n'), ((1429, 1438), 'pytest.approx', 'approx', (['(0)'], {}), '(0)\n', (1435, 1438), False, 'from pytest import approx\n'), ((1459, 1468), 'pytest.approx', 'approx', (['(1)'], {}), '(1)\n', (1465, 1468), False, 'from pytest import approx\n'), ((1619, 1641), 'easyvec.Mat2.from_angle', 'Mat2.from_angle', (['angle'], {}), '(angle)\n', (1634, 1641), False, 'from easyvec import Mat2, Vec2\n'), ((1947, 1967), 'pytest.approx', 'approx', (['(1 / 2 ** 0.5)'], {}), '(1 / 2 ** 0.5)\n', (1953, 1967), False, 'from pytest import approx\n'), ((1984, 2004), 'pytest.approx', 'approx', (['(1 / 2 ** 0.5)'], {}), '(1 / 2 ** 0.5)\n', (1990, 2004), False, 'from pytest import approx\n'), ((2021, 2042), 'pytest.approx', 'approx', (['(-1 / 2 ** 0.5)'], {}), '(-1 / 2 ** 0.5)\n', (2027, 2042), False, 'from pytest import approx\n'), ((2059, 2079), 'pytest.approx', 'approx', (['(1 / 2 ** 0.5)'], {}), '(1 / 2 ** 0.5)\n', (2065, 2079), False, 'from pytest import approx\n'), ((2511, 2533), 'easyvec.Mat2.from_angle', 'Mat2.from_angle', (['angle'], {}), '(angle)\n', (2526, 2533), False, 'from easyvec import Mat2, Vec2\n'), ((2684, 2692), 'easyvec.Mat2', 'Mat2', (['ms'], {}), '(ms)\n', (2688, 2692), False, 'from easyvec import Mat2, Vec2\n'), ((2867, 2880), 'easyvec.Mat2', 'Mat2', (['ms[:-1]'], {}), '(ms[:-1])\n', (2871, 2880), False, 'from easyvec import Mat2, Vec2\n'), ((3095, 3120), 'easyvec.Mat2.from_angle', 'Mat2.from_angle', (['angle', '(1)'], {}), '(angle, 1)\n', (3110, 3120), False, 'from easyvec import Mat2, Vec2\n'), ((3422, 3430), 'easyvec.Mat2', 'Mat2', (['ms'], {}), '(ms)\n', (3426, 3430), False, 'from easyvec import Mat2, Vec2\n'), ((3654, 3667), 'easyvec.Mat2', 'Mat2', (['ms[:-1]'], {}), '(ms[:-1])\n', (3658, 3667), False, 'from easyvec import Mat2, Vec2\n'), ((3908, 3920), 'easyvec.Mat2', 'Mat2', (['ms[:4]'], {}), '(ms[:4])\n', (3912, 3920), False, 'from easyvec import Mat2, Vec2\n'), ((3934, 3946), 'easyvec.Mat2', 'Mat2', (['ms[4:]'], {}), '(ms[4:])\n', (3938, 3946), False, 'from easyvec import Mat2, Vec2\n'), ((4121, 4133), 'easyvec.Mat2', 'Mat2', (['ms[:4]'], {}), '(ms[:4])\n', (4125, 4133), False, 'from easyvec import Mat2, Vec2\n'), ((4147, 4159), 'easyvec.Mat2', 'Mat2', (['ms[4:]'], {}), '(ms[4:])\n', (4151, 4159), False, 'from easyvec import Mat2, Vec2\n'), ((4358, 4371), 'easyvec.Mat2', 'Mat2', (['ms[:-1]'], {}), '(ms[:-1])\n', (4362, 4371), False, 'from easyvec import Mat2, Vec2\n'), ((4613, 4625), 'easyvec.Mat2', 'Mat2', (['ms[:4]'], {}), '(ms[:4])\n', (4617, 4625), False, 'from easyvec import Mat2, Vec2\n'), ((4639, 4651), 'easyvec.Mat2', 'Mat2', (['ms[4:]'], {}), '(ms[4:])\n', (4643, 4651), False, 'from easyvec import Mat2, Vec2\n'), ((4829, 4841), 'easyvec.Mat2', 'Mat2', (['ms[:4]'], {}), '(ms[:4])\n', (4833, 4841), False, 'from easyvec import Mat2, Vec2\n'), ((4855, 4867), 'easyvec.Mat2', 'Mat2', (['ms[4:]'], {}), '(ms[4:])\n', (4859, 4867), False, 'from easyvec import Mat2, Vec2\n'), ((5066, 5079), 'easyvec.Mat2', 'Mat2', (['ms[:-1]'], {}), '(ms[:-1])\n', (5070, 5079), False, 'from easyvec import Mat2, Vec2\n'), ((5258, 5270), 'easyvec.Mat2', 'Mat2', (['ms[:4]'], {}), '(ms[:4])\n', (5262, 5270), False, 'from easyvec import Mat2, Vec2\n'), ((5284, 5296), 'easyvec.Mat2', 'Mat2', (['ms[4:]'], {}), '(ms[4:])\n', (5288, 5296), False, 'from easyvec import Mat2, Vec2\n'), ((5437, 5449), 'easyvec.Mat2', 'Mat2', (['ms[:4]'], {}), '(ms[:4])\n', (5441, 5449), False, 'from easyvec import Mat2, Vec2\n'), ((5463, 5475), 'easyvec.Mat2', 'Mat2', (['ms[4:]'], {}), '(ms[4:])\n', (5467, 5475), False, 'from easyvec import Mat2, Vec2\n'), ((1110, 1120), 'easyvec.Vec2', 'Vec2', (['(1)', '(2)'], {}), '(1, 2)\n', (1114, 1120), False, 'from easyvec import Mat2, Vec2\n'), ((1120, 1130), 'easyvec.Vec2', 'Vec2', (['(3)', '(4)'], {}), '(3, 4)\n', (1124, 1130), False, 'from easyvec import Mat2, Vec2\n'), ((2587, 2596), 'pytest.approx', 'approx', (['(1)'], {}), '(1)\n', (2593, 2596), False, 'from pytest import approx\n'), ((2774, 2784), 'easyvec.Mat2.eye', 'Mat2.eye', ([], {}), '()\n', (2782, 2784), False, 'from easyvec import Mat2, Vec2\n'), ((3206, 3220), 'pytest.approx', 'approx', (['(-angle)'], {}), '(-angle)\n', (3212, 3220), False, 'from pytest import approx\n'), ((3325, 3338), 'pytest.approx', 'approx', (['angle'], {}), '(angle)\n', (3331, 3338), False, 'from pytest import approx\n'), ((3512, 3522), 'easyvec.Mat2.eye', 'Mat2.eye', ([], {}), '()\n', (3520, 3522), False, 'from easyvec import Mat2, Vec2\n'), ((3561, 3571), 'easyvec.Mat2.eye', 'Mat2.eye', ([], {}), '()\n', (3569, 3571), False, 'from easyvec import Mat2, Vec2\n'), ((1702, 1712), 'math.cos', 'cos', (['angle'], {}), '(angle)\n', (1705, 1712), False, 'from math import sin, cos, pi\n'), ((1745, 1755), 'math.sin', 'sin', (['angle'], {}), '(angle)\n', (1748, 1755), False, 'from math import sin, cos, pi\n'), ((1832, 1842), 'math.cos', 'cos', (['angle'], {}), '(angle)\n', (1835, 1842), False, 'from math import sin, cos, pi\n'), ((3133, 3143), 'easyvec.Vec2', 'Vec2', (['x', 'y'], {}), '(x, y)\n', (3137, 3143), False, 'from easyvec import Mat2, Vec2\n'), ((1789, 1799), 'math.sin', 'sin', (['angle'], {}), '(angle)\n', (1792, 1799), False, 'from math import sin, cos, pi\n')] |
import numpy as np
from two_d_nav.envs.static_maze import StaticMazeNavigation
def test_goal():
env = StaticMazeNavigation()
for i in range(60):
obs, reward, done, _ = env.step(np.array([1.0, -0.1]))
env.render()
for i in range(30):
obs, reward, done, _ = env.step(np.array([-1.0, -0.5]))
env.render()
for i in range(5):
obs, reward, done, _ = env.step(np.array([0.0, -1.0]))
env.render()
for i in range(15):
obs, reward, done, _ = env.step(np.array([-1.0, 0.0]))
env.render()
for i in range(30):
obs, reward, done, _ = env.step(np.array([0.0, -1.0]))
env.render()
for i in range(18):
obs, reward, done, _ = env.step(np.array([-1.0, -0.6]))
env.render()
if done:
print(f"Reach goal: {obs}")
print(f"Reward: {reward}")
def test_obstacle():
env = StaticMazeNavigation()
for i in range(60):
obs, reward, done, _ = env.step(np.array([1.0, -0.1]))
env.render()
for i in range(5):
obs, reward, done, _ = env.step(np.array([0.0, -1.0]))
env.render()
for i in range(30):
obs, reward, done, _ = env.step(np.array([-1.0, 0.0]))
env.render()
if done:
print(f"Hit obstacle: {obs}")
print(f"Reward: {reward}")
def test_wall():
env = StaticMazeNavigation()
reward = 0.0
for i in range(20):
obs, reward, done, _ = env.step(np.array([-1.0, 0.0]))
env.render()
print(f"Hit wall reward {reward}")
if __name__ == '__main__':
test_goal()
test_obstacle()
test_wall()
| [
"two_d_nav.envs.static_maze.StaticMazeNavigation",
"numpy.array"
] | [((109, 131), 'two_d_nav.envs.static_maze.StaticMazeNavigation', 'StaticMazeNavigation', ([], {}), '()\n', (129, 131), False, 'from two_d_nav.envs.static_maze import StaticMazeNavigation\n'), ((917, 939), 'two_d_nav.envs.static_maze.StaticMazeNavigation', 'StaticMazeNavigation', ([], {}), '()\n', (937, 939), False, 'from two_d_nav.envs.static_maze import StaticMazeNavigation\n'), ((1394, 1416), 'two_d_nav.envs.static_maze.StaticMazeNavigation', 'StaticMazeNavigation', ([], {}), '()\n', (1414, 1416), False, 'from two_d_nav.envs.static_maze import StaticMazeNavigation\n'), ((197, 218), 'numpy.array', 'np.array', (['[1.0, -0.1]'], {}), '([1.0, -0.1])\n', (205, 218), True, 'import numpy as np\n'), ((306, 328), 'numpy.array', 'np.array', (['[-1.0, -0.5]'], {}), '([-1.0, -0.5])\n', (314, 328), True, 'import numpy as np\n'), ((415, 436), 'numpy.array', 'np.array', (['[0.0, -1.0]'], {}), '([0.0, -1.0])\n', (423, 436), True, 'import numpy as np\n'), ((524, 545), 'numpy.array', 'np.array', (['[-1.0, 0.0]'], {}), '([-1.0, 0.0])\n', (532, 545), True, 'import numpy as np\n'), ((633, 654), 'numpy.array', 'np.array', (['[0.0, -1.0]'], {}), '([0.0, -1.0])\n', (641, 654), True, 'import numpy as np\n'), ((742, 764), 'numpy.array', 'np.array', (['[-1.0, -0.6]'], {}), '([-1.0, -0.6])\n', (750, 764), True, 'import numpy as np\n'), ((1005, 1026), 'numpy.array', 'np.array', (['[1.0, -0.1]'], {}), '([1.0, -0.1])\n', (1013, 1026), True, 'import numpy as np\n'), ((1113, 1134), 'numpy.array', 'np.array', (['[0.0, -1.0]'], {}), '([0.0, -1.0])\n', (1121, 1134), True, 'import numpy as np\n'), ((1222, 1243), 'numpy.array', 'np.array', (['[-1.0, 0.0]'], {}), '([-1.0, 0.0])\n', (1230, 1243), True, 'import numpy as np\n'), ((1499, 1520), 'numpy.array', 'np.array', (['[-1.0, 0.0]'], {}), '([-1.0, 0.0])\n', (1507, 1520), True, 'import numpy as np\n')] |
import argparse
import git
import github
import os.path
from mesonwrap import gitutils
from mesonwrap import tempfile
from mesonwrap import webapi
from mesonwrap import wrap
from mesonwrap.tools import environment
from retrying import retry
class Importer:
def __init__(self):
self._tmp = None
self._projects = None
@property
def _org(self):
return environment.Github().get_organization('mesonbuild')
def __enter__(self):
self._tmp = tempfile.TemporaryDirectory()
self._projects = dict()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._tmp.__exit__(exc_type, exc_value, traceback)
self._tmp = None
self._projects = None
def _clone(self, project):
if project not in self._projects:
repo = self._org.get_repo(project)
path = os.path.join(self._tmp.name, project)
self._projects[project] = git.Repo.clone_from(repo.clone_url,
to_path=path)
return self._projects[project]
def import_project(self, project):
for version in project.versions.values():
self.import_version(version)
def import_version(self, version):
for revision in version.revisions.values():
self.import_revision(revision)
@staticmethod
def _get_commit(repo, branch, revision):
cur = repo.refs['origin/' + branch].commit
todo = [cur]
while todo:
cur = todo.pop()
rev = gitutils.get_revision(repo, cur)
if rev > revision:
todo.extend(cur.parents)
elif rev == revision:
return cur
else:
raise ValueError('Impossible revision')
@staticmethod
def _is_github_error(exception):
return isinstance(exception, github.GithubException)
@retry(stop_max_attempt_number=3,
retry_on_exception=_is_github_error)
def import_wrap(self, wrap: wrap.Wrap):
wrappath = os.path.join(self._tmp.name, wrap.name + '.wrap')
zippath = os.path.join(self._tmp.name, wrap.name + '.zip')
repo = self._clone(wrap.name)
with open(wrappath, 'wb') as f:
f.write(wrap.wrap)
with open(zippath, 'wb') as f:
f.write(wrap.zip)
commit = self._get_commit(repo, wrap.version, wrap.revision)
ghrepo = self._org.get_repo(wrap.name)
tagname = '{}-{}'.format(wrap.version, wrap.revision)
try:
rel = ghrepo.get_release(tagname)
print('Release found')
except github.GithubException:
tag = ghrepo.create_git_tag(tag=tagname, message=tagname,
type='commit', object=commit.hexsha)
ghrepo.create_git_ref('refs/tags/{}'.format(tag.tag), tag.sha)
rel = ghrepo.create_git_release(tag=tagname, name=tagname,
message=tagname)
print('Release created')
patch_label = 'patch.zip'
wrap_label = 'upstream.wrap'
patch_found = False
wrap_found = False
for a in rel.get_assets():
if a.label == patch_label:
patch_found = True
elif a.label == wrap_label:
wrap_found = True
else:
print('Removing unknown asset {!r} / {!r}'.format(a.label,
a.name))
a.delete_asset()
if not wrap_found:
rel.upload_asset(wrappath, label=wrap_label,
content_type='text/plain')
if not patch_found:
rel.upload_asset(zippath, label=patch_label,
content_type='application/zip')
def import_revision(self, revision):
wrap = revision.combined_wrap
print(wrap.name,
wrap.version,
wrap.revision)
self.import_wrap(wrap)
print('Done')
def main(prog, args):
parser = argparse.ArgumentParser(prog)
parser.add_argument('--wrapdb_url', default='http://wrapdb.mesonbuild.com')
parser.add_argument('--project')
parser.add_argument('--version', help='Does not work without --project')
parser.add_argument('--revision', help='Does not work without --version')
args = parser.parse_args(args)
api = webapi.WebAPI(args.wrapdb_url)
projects = api.projects()
with Importer() as imp:
if args.project:
project = projects[args.project]
if args.version:
version = project.versions[args.version]
if args.revision:
imp.import_revision(version.revisions[args.revision])
else:
imp.import_version(version)
else:
imp.import_project(project)
else:
for project in projects:
imp.import_project(project)
| [
"mesonwrap.tempfile.TemporaryDirectory",
"argparse.ArgumentParser",
"git.Repo.clone_from",
"mesonwrap.gitutils.get_revision",
"mesonwrap.tools.environment.Github",
"mesonwrap.webapi.WebAPI",
"retrying.retry"
] | [((1937, 2006), 'retrying.retry', 'retry', ([], {'stop_max_attempt_number': '(3)', 'retry_on_exception': '_is_github_error'}), '(stop_max_attempt_number=3, retry_on_exception=_is_github_error)\n', (1942, 2006), False, 'from retrying import retry\n'), ((4126, 4155), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['prog'], {}), '(prog)\n', (4149, 4155), False, 'import argparse\n'), ((4473, 4503), 'mesonwrap.webapi.WebAPI', 'webapi.WebAPI', (['args.wrapdb_url'], {}), '(args.wrapdb_url)\n', (4486, 4503), False, 'from mesonwrap import webapi\n'), ((489, 518), 'mesonwrap.tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (516, 518), False, 'from mesonwrap import tempfile\n'), ((958, 1007), 'git.Repo.clone_from', 'git.Repo.clone_from', (['repo.clone_url'], {'to_path': 'path'}), '(repo.clone_url, to_path=path)\n', (977, 1007), False, 'import git\n'), ((1574, 1606), 'mesonwrap.gitutils.get_revision', 'gitutils.get_revision', (['repo', 'cur'], {}), '(repo, cur)\n', (1595, 1606), False, 'from mesonwrap import gitutils\n'), ((391, 411), 'mesonwrap.tools.environment.Github', 'environment.Github', ([], {}), '()\n', (409, 411), False, 'from mesonwrap.tools import environment\n')] |
# Generated by Django 3.2.6 on 2021-08-24 09:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('cat_parent', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='product.category')),
],
),
migrations.CreateModel(
name='Productbase',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('stock', models.BooleanField(default=False)),
('device', models.CharField(choices=[('ps4', 'ps4'), ('ps5', 'ps5'), ('all', 'all'), ('xbox', 'xbox'), ('nintendo', 'nintendo switch')], max_length=20)),
('description', models.TextField(blank=True, null=True)),
('price', models.FloatField(default=0.0)),
('added_time', models.DateTimeField(auto_now_add=True)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='product.category')),
],
),
migrations.CreateModel(
name='ImageProduct',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('image', models.ImageField(upload_to='images/')),
('default', models.BooleanField(default=False)),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='product.productbase')),
],
),
]
| [
"django.db.models.FloatField",
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"django.db.models.ImageField",
"django.db.models.BigAutoField",
"django.db.models.DateTimeField",
"django.db.models.CharField"
] | [((337, 433), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (356, 433), False, 'from django.db import migrations, models\n'), ((457, 489), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (473, 489), False, 'from django.db import migrations, models\n'), ((523, 613), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""product.category"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'product.category')\n", (540, 613), False, 'from django.db import migrations, models\n'), ((745, 841), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (764, 841), False, 'from django.db import migrations, models\n'), ((865, 897), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (881, 897), False, 'from django.db import migrations, models\n'), ((926, 960), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (945, 960), False, 'from django.db import migrations, models\n'), ((990, 1135), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('ps4', 'ps4'), ('ps5', 'ps5'), ('all', 'all'), ('xbox', 'xbox'), (\n 'nintendo', 'nintendo switch')]", 'max_length': '(20)'}), "(choices=[('ps4', 'ps4'), ('ps5', 'ps5'), ('all', 'all'), (\n 'xbox', 'xbox'), ('nintendo', 'nintendo switch')], max_length=20)\n", (1006, 1135), False, 'from django.db import migrations, models\n'), ((1165, 1204), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1181, 1204), False, 'from django.db import migrations, models\n'), ((1233, 1263), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0.0)'}), '(default=0.0)\n', (1250, 1263), False, 'from django.db import migrations, models\n'), ((1297, 1336), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1317, 1336), False, 'from django.db import migrations, models\n'), ((1368, 1459), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.RESTRICT', 'to': '"""product.category"""'}), "(on_delete=django.db.models.deletion.RESTRICT, to=\n 'product.category')\n", (1385, 1459), False, 'from django.db import migrations, models\n'), ((1592, 1688), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1611, 1688), False, 'from django.db import migrations, models\n'), ((1712, 1744), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (1728, 1744), False, 'from django.db import migrations, models\n'), ((1773, 1811), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""images/"""'}), "(upload_to='images/')\n", (1790, 1811), False, 'from django.db import migrations, models\n'), ((1842, 1876), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1861, 1876), False, 'from django.db import migrations, models\n'), ((1907, 2000), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""product.productbase"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'product.productbase')\n", (1924, 2000), False, 'from django.db import migrations, models\n')] |
# -*- encoding: utf-8 -*-
"""
License: MIT
Copyright (c) 2019 - present AppSeed.us
"""
from app.home import blueprint
from flask import render_template, redirect, url_for, request, flash, send_file
from flask_login import login_required, current_user
from app import login_manager
from jinja2 import TemplateNotFound
import json
from app.base.forms import AddNewIPphaseone, AddNewIPphasetwo, AddNewInterface
from backend.dbhelper import DBHelper
from backend.snmphelper import SNMPhelper
from backend.selfmonitoringhelper import SelfMonitoring
dbhelp = DBHelper()
snmphelp = SNMPhelper()
smhelp = SelfMonitoring()
alarm_notification = dbhelp.show_notification()
counter = len(alarm_notification)
client = dbhelp.show_client_data()
all_sensor = dbhelp.counter_sensor()
@blueprint.route('/index', methods=['GET', 'POST'])
@login_required
def index():
selfmon = smhelp.get_pc_stats()
#if not current_user.is_authenticated:
#return redirect(url_for('base_blueprint.login'))
phaseoneform = AddNewIPphaseone(request.form)
if 'addnewipphaseone' in request.form:
# read form data
ip = request.form['ip']
if dbhelp.check_ip(ip) == "False":
return redirect(url_for('home_blueprint.addnewipphasetwo', ip=ip, alarm = alarm_notification, counter= counter))
else:
client = dbhelp.show_client_data()
return render_template('index.html', msg='IP is already on the database.', form=phaseoneform, client= client, alarm = alarm_notification, counter= counter, all_sensor= all_sensor, selfmon = selfmon)
client = dbhelp.show_client_data()
# Something (user or pass) is not ok
return render_template('index.html', form =phaseoneform, client = client, alarm = alarm_notification, counter= counter, all_sensor= all_sensor, selfmon = selfmon)
@blueprint.route('/addnewipnext/<ip>', methods=['GET', 'POST'])
@login_required
def addnewipphasetwo(ip):
phasetwoform = AddNewIPphasetwo(request.form)
if 'addnewipphasetwo' in request.form:
# read form data
ip = request.form['ip']
while dbhelp.check_ip(ip) == "True":
return render_template('addnewparentnext.html', msg='IP is already on the database.', form=phasetwoform, alarm = alarm_notification, counter= counter)
name = request.form['name']
sysdescr = request.form['sysdescr']
syslocation = request.form['syslocation']
snmp_ver = request.form['snmp_ver']
community_string = request.form['community_string']
dbhelp.add_parent(name, ip, snmp_ver, community_string, sysdescr, syslocation)
dbhelp.add_sqf_client( name, ip)
flash('IP has successfully been added.')
return redirect(url_for('home_blueprint.index'))
name = snmphelp.get_sysname(ip)
if name[0] == 'error':
sysname = ""
else:
sysname = name[1]
descr = snmphelp.get_sysdescr(ip)
if descr[0] == 'error':
sysdescr = ""
else:
sysdescr = descr[1]
location = snmphelp.get_syslocation(ip)
if location[0] == 'error':
syslocation = ""
else:
syslocation = location[1]
return render_template('addnewparentnext.html', form=phasetwoform, ipv=ip, sysnamev=sysname, sysdescrv=sysdescr, syslocationv=syslocation, alarm = alarm_notification, counter= counter)
@blueprint.route('/device/<device_id>', methods=['GET', 'POST'])
@login_required
def device_template(device_id):
selfmon = smhelp.get_pc_stats()
data_by_id = dbhelp.filter_client_data_by_id(device_id)
lastrow_log = dbhelp.get_lastrow_logs(device_id)
uptime_sparkline = dbhelp.show_uptime_sparkline( device_id)
traffic_sparkline_23 = dbhelp.show_traffic_sparkline( device_id, 23)
traffic_sparkline_24 = dbhelp.show_traffic_sparkline( device_id, 24)
ping_sparkline = dbhelp.show_ping_sparkline(device_id)
sqf_sparkline = dbhelp.show_sqf_sparkline(device_id)
#if not current_user.is_authenticated:
# return redirect(url_for('base_blueprint.login'))
try:
data_by_id = dbhelp.filter_client_data_by_id(device_id)
lastrow_log = dbhelp.get_lastrow_logs(device_id)
interfaceform = AddNewInterface(request.form)
if 'addnewinterface' in request.form:
interface = request.form['interface']
while dbhelp.check_int(data_by_id[2], interface) == "True":
return render_template('device.html', msg='Interface is already on the database.', form=interfaceform, logs = lastrow_log, by_id= data_by_id, uptime_sparkline= uptime_sparkline, traffic_sparkline_23=traffic_sparkline_23, traffic_sparkline_24 = traffic_sparkline_24, ping_sparkline=ping_sparkline, sqf_sparkline=sqf_sparkline, client = client, alarm = alarm_notification, counter= counter, selfmon = selfmon)
name = data_by_id[1]
host = data_by_id[2]
dbhelp.add_int(name, host, interface)
flash('Interface has successfully been added.')
return redirect(url_for('home_blueprint.index'))
return render_template('device.html', form=interfaceform, by_id=data_by_id, logs = lastrow_log, uptime_sparkline= uptime_sparkline, traffic_sparkline_23=traffic_sparkline_23, traffic_sparkline_24 = traffic_sparkline_24, ping_sparkline=ping_sparkline, sqf_sparkline=sqf_sparkline, client = client, alarm = alarm_notification, counter= counter, selfmon = selfmon)
except TemplateNotFound:
return render_template('page-404.html'), 404
except:
return render_template('page-500.html'), 500
@blueprint.route('/device/<device_id>/uptime')
@login_required
def uptime_template(device_id):
selfmon = smhelp.get_pc_stats()
data_by_id = dbhelp.filter_client_data_by_id(device_id)
uptime = dbhelp.show_uptime(device_id)
uptime_graph = dbhelp.show_uptime_graph(device_id)
host = data_by_id[2]
ts = dbhelp.get_currenttimestamp()
#if not current_user.is_authenticated:
# return redirect(url_for('base_blueprint.login'))
try:
return render_template('uptime.html',uptime= uptime, uptime_graph= uptime_graph, client = client, alarm = alarm_notification, counter= counter, selfmon = selfmon, host=host, ts=ts, by_id= data_by_id)
except TemplateNotFound:
return render_template('page-404.html'), 404
except:
return render_template('page-500.html'), 500
@blueprint.route('/device/<device_id>/traffic/<interface>')
@login_required
def traffic_template(device_id, interface):
selfmon = smhelp.get_pc_stats()
data_by_id = dbhelp.filter_client_data_by_id(device_id)
traffic = dbhelp.show_traffic(device_id, interface)
traffic_tot = dbhelp.show_traffictot_graph(device_id, interface)
traffic_in = dbhelp.show_trafficin_graph(device_id, interface)
traffic_out = dbhelp.show_trafficout_graph(device_id, interface)
host = data_by_id[2]
ts = dbhelp.get_currenttimestamp()
interface = interface
#if not current_user.is_authenticated:
# return redirect(url_for('base_blueprint.login'))
try:
return render_template('traffic.html',traffic= traffic, traffic_tot=traffic_tot, traffic_in=traffic_in, traffic_out=traffic_out, client = client, alarm = alarm_notification, counter= counter, selfmon = selfmon, host=host, ts=ts, interface=interface, by_id= data_by_id)
except TemplateNotFound:
return render_template('page-404.html'), 404
except:
return render_template('page-500.html'), 500
@blueprint.route('/device/<device_id>/ping')
@login_required
def ping_template(device_id):
selfmon = smhelp.get_pc_stats()
data_by_id = dbhelp.filter_client_data_by_id(device_id)
ping = dbhelp.show_ping(device_id)
pingtime = dbhelp.show_pingtime_graph(device_id)
pingmax = dbhelp.show_pingmax_graph(device_id)
pingmin = dbhelp.show_pingmin_graph(device_id)
host = data_by_id[2]
ts = dbhelp.get_currenttimestamp()
#if not current_user.is_authenticated:
# return redirect(url_for('base_blueprint.login'))
try:
return render_template('ping.html',ping= ping, pingtime=pingtime, pingmax=pingmax, pingmin=pingmin, client = client, alarm = alarm_notification, counter= counter, selfmon = selfmon, host=host, ts=ts, by_id= data_by_id)
except TemplateNotFound:
return render_template('page-404.html'), 404
except:
return render_template('page-500.html'), 500
@blueprint.route('/device/<device_id>/sqf')
@login_required
def sqf_template(device_id):
selfmon = smhelp.get_pc_stats()
data_by_id = dbhelp.filter_client_data_by_id(device_id)
sqf = dbhelp.show_sqf(device_id)
sqf_graph = dbhelp.show_sqf_graph(device_id)
host = data_by_id[2]
ts = dbhelp.get_currenttimestamp()
#if not current_user.is_authenticated:
# return redirect(url_for('base_blueprint.login'))
try:
return render_template('sqf.html',sqf= sqf, sqf_graph=sqf_graph, client = client, alarm = alarm_notification, counter= counter, selfmon = selfmon, host=host, ts=ts, by_id= data_by_id)
except TemplateNotFound:
return render_template('page-404.html'), 404
except:
return render_template('page-500.html'), 500
@blueprint.route('/download/<table>/<host>/<ts>/<interface>')
@login_required
def download_template(table, host, ts, interface):
filename = table + "_" + host + "_" + str(ts) + "_" + str(interface)
path = "/tmp/"+filename+".csv"
dbhelp.export(table, host, filename, interface=0)
return send_file(path, as_attachment=True)
#if not current_user.is_authenticated:
# return redirect(url_for('base_blueprint.login'))
@blueprint.route('/<template>')
@login_required
def route_template(template):
selfmon = smhelp.get_pc_stats()
logs = dbhelp.show_log()
#if not current_user.is_authenticated:
# return redirect(url_for('base_blueprint.login'))
try:
return render_template(template + '.html', client= client, logs= logs, counter=counter, alarm=alarm_notification, selfmon = selfmon)
except TemplateNotFound:
return render_template('page-404.html'), 404
except:
return render_template('page-500.html'), 500
| [
"flask.render_template",
"app.base.forms.AddNewIPphasetwo",
"flask.flash",
"backend.dbhelper.DBHelper",
"app.base.forms.AddNewInterface",
"flask.url_for",
"app.base.forms.AddNewIPphaseone",
"app.home.blueprint.route",
"flask.send_file",
"backend.snmphelper.SNMPhelper",
"backend.selfmonitoringhel... | [((557, 567), 'backend.dbhelper.DBHelper', 'DBHelper', ([], {}), '()\n', (565, 567), False, 'from backend.dbhelper import DBHelper\n'), ((579, 591), 'backend.snmphelper.SNMPhelper', 'SNMPhelper', ([], {}), '()\n', (589, 591), False, 'from backend.snmphelper import SNMPhelper\n'), ((601, 617), 'backend.selfmonitoringhelper.SelfMonitoring', 'SelfMonitoring', ([], {}), '()\n', (615, 617), False, 'from backend.selfmonitoringhelper import SelfMonitoring\n'), ((775, 825), 'app.home.blueprint.route', 'blueprint.route', (['"""/index"""'], {'methods': "['GET', 'POST']"}), "('/index', methods=['GET', 'POST'])\n", (790, 825), False, 'from app.home import blueprint\n'), ((1838, 1900), 'app.home.blueprint.route', 'blueprint.route', (['"""/addnewipnext/<ip>"""'], {'methods': "['GET', 'POST']"}), "('/addnewipnext/<ip>', methods=['GET', 'POST'])\n", (1853, 1900), False, 'from app.home import blueprint\n'), ((3358, 3421), 'app.home.blueprint.route', 'blueprint.route', (['"""/device/<device_id>"""'], {'methods': "['GET', 'POST']"}), "('/device/<device_id>', methods=['GET', 'POST'])\n", (3373, 3421), False, 'from app.home import blueprint\n'), ((5634, 5679), 'app.home.blueprint.route', 'blueprint.route', (['"""/device/<device_id>/uptime"""'], {}), "('/device/<device_id>/uptime')\n", (5649, 5679), False, 'from app.home import blueprint\n'), ((6463, 6521), 'app.home.blueprint.route', 'blueprint.route', (['"""/device/<device_id>/traffic/<interface>"""'], {}), "('/device/<device_id>/traffic/<interface>')\n", (6478, 6521), False, 'from app.home import blueprint\n'), ((7572, 7615), 'app.home.blueprint.route', 'blueprint.route', (['"""/device/<device_id>/ping"""'], {}), "('/device/<device_id>/ping')\n", (7587, 7615), False, 'from app.home import blueprint\n'), ((8511, 8553), 'app.home.blueprint.route', 'blueprint.route', (['"""/device/<device_id>/sqf"""'], {}), "('/device/<device_id>/sqf')\n", (8526, 8553), False, 'from app.home import blueprint\n'), ((9304, 9364), 'app.home.blueprint.route', 'blueprint.route', (['"""/download/<table>/<host>/<ts>/<interface>"""'], {}), "('/download/<table>/<host>/<ts>/<interface>')\n", (9319, 9364), False, 'from app.home import blueprint\n'), ((9754, 9784), 'app.home.blueprint.route', 'blueprint.route', (['"""/<template>"""'], {}), "('/<template>')\n", (9769, 9784), False, 'from app.home import blueprint\n'), ((1007, 1037), 'app.base.forms.AddNewIPphaseone', 'AddNewIPphaseone', (['request.form'], {}), '(request.form)\n', (1023, 1037), False, 'from app.base.forms import AddNewIPphaseone, AddNewIPphasetwo, AddNewInterface\n'), ((1680, 1836), 'flask.render_template', 'render_template', (['"""index.html"""'], {'form': 'phaseoneform', 'client': 'client', 'alarm': 'alarm_notification', 'counter': 'counter', 'all_sensor': 'all_sensor', 'selfmon': 'selfmon'}), "('index.html', form=phaseoneform, client=client, alarm=\n alarm_notification, counter=counter, all_sensor=all_sensor, selfmon=selfmon\n )\n", (1695, 1836), False, 'from flask import render_template, redirect, url_for, request, flash, send_file\n'), ((1962, 1992), 'app.base.forms.AddNewIPphasetwo', 'AddNewIPphasetwo', (['request.form'], {}), '(request.form)\n', (1978, 1992), False, 'from app.base.forms import AddNewIPphaseone, AddNewIPphasetwo, AddNewInterface\n'), ((3177, 3360), 'flask.render_template', 'render_template', (['"""addnewparentnext.html"""'], {'form': 'phasetwoform', 'ipv': 'ip', 'sysnamev': 'sysname', 'sysdescrv': 'sysdescr', 'syslocationv': 'syslocation', 'alarm': 'alarm_notification', 'counter': 'counter'}), "('addnewparentnext.html', form=phasetwoform, ipv=ip,\n sysnamev=sysname, sysdescrv=sysdescr, syslocationv=syslocation, alarm=\n alarm_notification, counter=counter)\n", (3192, 3360), False, 'from flask import render_template, redirect, url_for, request, flash, send_file\n'), ((9610, 9645), 'flask.send_file', 'send_file', (['path'], {'as_attachment': '(True)'}), '(path, as_attachment=True)\n', (9619, 9645), False, 'from flask import render_template, redirect, url_for, request, flash, send_file\n'), ((2675, 2715), 'flask.flash', 'flash', (['"""IP has successfully been added."""'], {}), "('IP has successfully been added.')\n", (2680, 2715), False, 'from flask import render_template, redirect, url_for, request, flash, send_file\n'), ((4206, 4235), 'app.base.forms.AddNewInterface', 'AddNewInterface', (['request.form'], {}), '(request.form)\n', (4221, 4235), False, 'from app.base.forms import AddNewIPphaseone, AddNewIPphasetwo, AddNewInterface\n'), ((5122, 5483), 'flask.render_template', 'render_template', (['"""device.html"""'], {'form': 'interfaceform', 'by_id': 'data_by_id', 'logs': 'lastrow_log', 'uptime_sparkline': 'uptime_sparkline', 'traffic_sparkline_23': 'traffic_sparkline_23', 'traffic_sparkline_24': 'traffic_sparkline_24', 'ping_sparkline': 'ping_sparkline', 'sqf_sparkline': 'sqf_sparkline', 'client': 'client', 'alarm': 'alarm_notification', 'counter': 'counter', 'selfmon': 'selfmon'}), "('device.html', form=interfaceform, by_id=data_by_id, logs=\n lastrow_log, uptime_sparkline=uptime_sparkline, traffic_sparkline_23=\n traffic_sparkline_23, traffic_sparkline_24=traffic_sparkline_24,\n ping_sparkline=ping_sparkline, sqf_sparkline=sqf_sparkline, client=\n client, alarm=alarm_notification, counter=counter, selfmon=selfmon)\n", (5137, 5483), False, 'from flask import render_template, redirect, url_for, request, flash, send_file\n'), ((6114, 6306), 'flask.render_template', 'render_template', (['"""uptime.html"""'], {'uptime': 'uptime', 'uptime_graph': 'uptime_graph', 'client': 'client', 'alarm': 'alarm_notification', 'counter': 'counter', 'selfmon': 'selfmon', 'host': 'host', 'ts': 'ts', 'by_id': 'data_by_id'}), "('uptime.html', uptime=uptime, uptime_graph=uptime_graph,\n client=client, alarm=alarm_notification, counter=counter, selfmon=\n selfmon, host=host, ts=ts, by_id=data_by_id)\n", (6129, 6306), False, 'from flask import render_template, redirect, url_for, request, flash, send_file\n'), ((7155, 7421), 'flask.render_template', 'render_template', (['"""traffic.html"""'], {'traffic': 'traffic', 'traffic_tot': 'traffic_tot', 'traffic_in': 'traffic_in', 'traffic_out': 'traffic_out', 'client': 'client', 'alarm': 'alarm_notification', 'counter': 'counter', 'selfmon': 'selfmon', 'host': 'host', 'ts': 'ts', 'interface': 'interface', 'by_id': 'data_by_id'}), "('traffic.html', traffic=traffic, traffic_tot=traffic_tot,\n traffic_in=traffic_in, traffic_out=traffic_out, client=client, alarm=\n alarm_notification, counter=counter, selfmon=selfmon, host=host, ts=ts,\n interface=interface, by_id=data_by_id)\n", (7170, 7421), False, 'from flask import render_template, redirect, url_for, request, flash, send_file\n'), ((8144, 8356), 'flask.render_template', 'render_template', (['"""ping.html"""'], {'ping': 'ping', 'pingtime': 'pingtime', 'pingmax': 'pingmax', 'pingmin': 'pingmin', 'client': 'client', 'alarm': 'alarm_notification', 'counter': 'counter', 'selfmon': 'selfmon', 'host': 'host', 'ts': 'ts', 'by_id': 'data_by_id'}), "('ping.html', ping=ping, pingtime=pingtime, pingmax=pingmax,\n pingmin=pingmin, client=client, alarm=alarm_notification, counter=\n counter, selfmon=selfmon, host=host, ts=ts, by_id=data_by_id)\n", (8159, 8356), False, 'from flask import render_template, redirect, url_for, request, flash, send_file\n'), ((8972, 9148), 'flask.render_template', 'render_template', (['"""sqf.html"""'], {'sqf': 'sqf', 'sqf_graph': 'sqf_graph', 'client': 'client', 'alarm': 'alarm_notification', 'counter': 'counter', 'selfmon': 'selfmon', 'host': 'host', 'ts': 'ts', 'by_id': 'data_by_id'}), "('sqf.html', sqf=sqf, sqf_graph=sqf_graph, client=client,\n alarm=alarm_notification, counter=counter, selfmon=selfmon, host=host,\n ts=ts, by_id=data_by_id)\n", (8987, 9148), False, 'from flask import render_template, redirect, url_for, request, flash, send_file\n'), ((10021, 10147), 'flask.render_template', 'render_template', (["(template + '.html')"], {'client': 'client', 'logs': 'logs', 'counter': 'counter', 'alarm': 'alarm_notification', 'selfmon': 'selfmon'}), "(template + '.html', client=client, logs=logs, counter=\n counter, alarm=alarm_notification, selfmon=selfmon)\n", (10036, 10147), False, 'from flask import render_template, redirect, url_for, request, flash, send_file\n'), ((1388, 1581), 'flask.render_template', 'render_template', (['"""index.html"""'], {'msg': '"""IP is already on the database."""', 'form': 'phaseoneform', 'client': 'client', 'alarm': 'alarm_notification', 'counter': 'counter', 'all_sensor': 'all_sensor', 'selfmon': 'selfmon'}), "('index.html', msg='IP is already on the database.', form=\n phaseoneform, client=client, alarm=alarm_notification, counter=counter,\n all_sensor=all_sensor, selfmon=selfmon)\n", (1403, 1581), False, 'from flask import render_template, redirect, url_for, request, flash, send_file\n'), ((2159, 2309), 'flask.render_template', 'render_template', (['"""addnewparentnext.html"""'], {'msg': '"""IP is already on the database."""', 'form': 'phasetwoform', 'alarm': 'alarm_notification', 'counter': 'counter'}), "('addnewparentnext.html', msg=\n 'IP is already on the database.', form=phasetwoform, alarm=\n alarm_notification, counter=counter)\n", (2174, 2309), False, 'from flask import render_template, redirect, url_for, request, flash, send_file\n'), ((2740, 2771), 'flask.url_for', 'url_for', (['"""home_blueprint.index"""'], {}), "('home_blueprint.index')\n", (2747, 2771), False, 'from flask import render_template, redirect, url_for, request, flash, send_file\n'), ((4974, 5021), 'flask.flash', 'flash', (['"""Interface has successfully been added."""'], {}), "('Interface has successfully been added.')\n", (4979, 5021), False, 'from flask import render_template, redirect, url_for, request, flash, send_file\n'), ((1211, 1307), 'flask.url_for', 'url_for', (['"""home_blueprint.addnewipphasetwo"""'], {'ip': 'ip', 'alarm': 'alarm_notification', 'counter': 'counter'}), "('home_blueprint.addnewipphasetwo', ip=ip, alarm=alarm_notification,\n counter=counter)\n", (1218, 1307), False, 'from flask import render_template, redirect, url_for, request, flash, send_file\n'), ((4444, 4853), 'flask.render_template', 'render_template', (['"""device.html"""'], {'msg': '"""Interface is already on the database."""', 'form': 'interfaceform', 'logs': 'lastrow_log', 'by_id': 'data_by_id', 'uptime_sparkline': 'uptime_sparkline', 'traffic_sparkline_23': 'traffic_sparkline_23', 'traffic_sparkline_24': 'traffic_sparkline_24', 'ping_sparkline': 'ping_sparkline', 'sqf_sparkline': 'sqf_sparkline', 'client': 'client', 'alarm': 'alarm_notification', 'counter': 'counter', 'selfmon': 'selfmon'}), "('device.html', msg='Interface is already on the database.',\n form=interfaceform, logs=lastrow_log, by_id=data_by_id,\n uptime_sparkline=uptime_sparkline, traffic_sparkline_23=\n traffic_sparkline_23, traffic_sparkline_24=traffic_sparkline_24,\n ping_sparkline=ping_sparkline, sqf_sparkline=sqf_sparkline, client=\n client, alarm=alarm_notification, counter=counter, selfmon=selfmon)\n", (4459, 4853), False, 'from flask import render_template, redirect, url_for, request, flash, send_file\n'), ((5050, 5081), 'flask.url_for', 'url_for', (['"""home_blueprint.index"""'], {}), "('home_blueprint.index')\n", (5057, 5081), False, 'from flask import render_template, redirect, url_for, request, flash, send_file\n'), ((5523, 5555), 'flask.render_template', 'render_template', (['"""page-404.html"""'], {}), "('page-404.html')\n", (5538, 5555), False, 'from flask import render_template, redirect, url_for, request, flash, send_file\n'), ((5593, 5625), 'flask.render_template', 'render_template', (['"""page-500.html"""'], {}), "('page-500.html')\n", (5608, 5625), False, 'from flask import render_template, redirect, url_for, request, flash, send_file\n'), ((6352, 6384), 'flask.render_template', 'render_template', (['"""page-404.html"""'], {}), "('page-404.html')\n", (6367, 6384), False, 'from flask import render_template, redirect, url_for, request, flash, send_file\n'), ((6422, 6454), 'flask.render_template', 'render_template', (['"""page-500.html"""'], {}), "('page-500.html')\n", (6437, 6454), False, 'from flask import render_template, redirect, url_for, request, flash, send_file\n'), ((7462, 7494), 'flask.render_template', 'render_template', (['"""page-404.html"""'], {}), "('page-404.html')\n", (7477, 7494), False, 'from flask import render_template, redirect, url_for, request, flash, send_file\n'), ((7532, 7564), 'flask.render_template', 'render_template', (['"""page-500.html"""'], {}), "('page-500.html')\n", (7547, 7564), False, 'from flask import render_template, redirect, url_for, request, flash, send_file\n'), ((8401, 8433), 'flask.render_template', 'render_template', (['"""page-404.html"""'], {}), "('page-404.html')\n", (8416, 8433), False, 'from flask import render_template, redirect, url_for, request, flash, send_file\n'), ((8471, 8503), 'flask.render_template', 'render_template', (['"""page-500.html"""'], {}), "('page-500.html')\n", (8486, 8503), False, 'from flask import render_template, redirect, url_for, request, flash, send_file\n'), ((9194, 9226), 'flask.render_template', 'render_template', (['"""page-404.html"""'], {}), "('page-404.html')\n", (9209, 9226), False, 'from flask import render_template, redirect, url_for, request, flash, send_file\n'), ((9264, 9296), 'flask.render_template', 'render_template', (['"""page-500.html"""'], {}), "('page-500.html')\n", (9279, 9296), False, 'from flask import render_template, redirect, url_for, request, flash, send_file\n'), ((10191, 10223), 'flask.render_template', 'render_template', (['"""page-404.html"""'], {}), "('page-404.html')\n", (10206, 10223), False, 'from flask import render_template, redirect, url_for, request, flash, send_file\n'), ((10257, 10289), 'flask.render_template', 'render_template', (['"""page-500.html"""'], {}), "('page-500.html')\n", (10272, 10289), False, 'from flask import render_template, redirect, url_for, request, flash, send_file\n')] |
import logging
from botocore.exceptions import ClientError
from aws_cloudformation_power_switch.power_switch import PowerSwitch
from aws_cloudformation_power_switch.tag import logical_id
class RDSClusterPowerSwitch(PowerSwitch):
def __init__(self):
super(RDSClusterPowerSwitch, self).__init__()
self.resource_type = "AWS::RDS::DBCluster"
def startup(self, instance: dict):
name = logical_id(instance)
cluster_id = self.instance_id(instance)
logging.info("startup rds cluster %s", cluster_id)
if not self.dry_run:
try:
self.rds.start_db_cluster(DBClusterIdentifier=cluster_id)
except ClientError as e:
logging.error("failed to stop %s (%s), %s", name, cluster_id, e)
def shutdown(self, instance: dict):
name = logical_id(instance)
cluster_id = self.instance_id(instance)
logging.info("shutdown rds cluster %s (%s)", name, cluster_id)
if not self.dry_run:
try:
self.rds.stop_db_cluster(DBClusterIdentifier=cluster_id)
except ClientError as e:
logging.error("failed to stop %s (%s), %s", name, cluster_id, e)
def instance_id(self, instance) -> str:
return instance["DBClusterIdentifier"]
def instance_state(self, instance) -> str:
return instance["Status"]
def instance_needs_shutdown(self, instance) -> bool:
return self.instance_state(instance) == "available"
def instance_needs_startup(self, instance) -> bool:
return self.instance_state(instance) == "stopped"
@property
def rds(self):
return self.session.client("rds")
def select_instances(self):
result = []
if self.rds.describe_db_clusters().get("DBClusters"):
for r in self.stack_resources:
instance = self.rds.describe_db_clusters(
DBClusterIdentifier=r["PhysicalResourceId"]
)["DBClusters"][0]
instance["TagList"] = [
{"Key": "aws:cloudformation:stack-name", "Value": r["StackName"]},
{
"Key": "aws:cloudformation:logical-id",
"Value": r["LogicalResourceId"],
},
]
result.append(instance)
for i in filter(lambda i: self.verbose, result):
logging.info(
"rds cluster %s (%s) in state %s",
logical_id(i),
i["DBClusterIdentifier"],
i["Status"],
)
if not result and self.verbose:
logging.info("No RDS clusters found")
return result
| [
"aws_cloudformation_power_switch.tag.logical_id",
"logging.info",
"logging.error"
] | [((417, 437), 'aws_cloudformation_power_switch.tag.logical_id', 'logical_id', (['instance'], {}), '(instance)\n', (427, 437), False, 'from aws_cloudformation_power_switch.tag import logical_id\n'), ((494, 544), 'logging.info', 'logging.info', (['"""startup rds cluster %s"""', 'cluster_id'], {}), "('startup rds cluster %s', cluster_id)\n", (506, 544), False, 'import logging\n'), ((839, 859), 'aws_cloudformation_power_switch.tag.logical_id', 'logical_id', (['instance'], {}), '(instance)\n', (849, 859), False, 'from aws_cloudformation_power_switch.tag import logical_id\n'), ((916, 978), 'logging.info', 'logging.info', (['"""shutdown rds cluster %s (%s)"""', 'name', 'cluster_id'], {}), "('shutdown rds cluster %s (%s)', name, cluster_id)\n", (928, 978), False, 'import logging\n'), ((2669, 2706), 'logging.info', 'logging.info', (['"""No RDS clusters found"""'], {}), "('No RDS clusters found')\n", (2681, 2706), False, 'import logging\n'), ((2516, 2529), 'aws_cloudformation_power_switch.tag.logical_id', 'logical_id', (['i'], {}), '(i)\n', (2526, 2529), False, 'from aws_cloudformation_power_switch.tag import logical_id\n'), ((718, 782), 'logging.error', 'logging.error', (['"""failed to stop %s (%s), %s"""', 'name', 'cluster_id', 'e'], {}), "('failed to stop %s (%s), %s', name, cluster_id, e)\n", (731, 782), False, 'import logging\n'), ((1151, 1215), 'logging.error', 'logging.error', (['"""failed to stop %s (%s), %s"""', 'name', 'cluster_id', 'e'], {}), "('failed to stop %s (%s), %s', name, cluster_id, e)\n", (1164, 1215), False, 'import logging\n')] |
# Generated by Django 2.2.1 on 2019-05-09 21:08
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('matchapp', '0003_available_doctors'),
]
operations = [
migrations.RenameModel(
old_name='UserProfile',
new_name='Person',
),
]
| [
"django.db.migrations.RenameModel",
"django.db.migrations.swappable_dependency"
] | [((186, 243), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (217, 243), False, 'from django.db import migrations\n'), ((327, 392), 'django.db.migrations.RenameModel', 'migrations.RenameModel', ([], {'old_name': '"""UserProfile"""', 'new_name': '"""Person"""'}), "(old_name='UserProfile', new_name='Person')\n", (349, 392), False, 'from django.db import migrations\n')] |
#!/usr/bin/env python
import os, sys, argparse, errno, yaml, time, datetime
import rospy, rospkg
import torch, torchvision, cv2
import numpy as np
from rosky_msgs.msg import WheelsCmdStamped, Twist2DStamped
from img_recognition.msg import Inference
from cv_bridge import CvBridge, CvBridgeError
from jetcam_ros.utils import bgr8_to_jpeg
class Inference_To_Reaction(object):
def __init__(self):
self.package = "img_recognition"
self.node_name = rospy.get_name()
self.veh_name = self.node_name.split("/")[1]
rospy.loginfo("{} Initializing inference_model.py......".format(self.node_name))
self.start = rospy.wait_for_message("/" + self.veh_name +"/inference_model/inference", Inference)
# local parameter
self.confidence = {}
self.inference_gain = {
"linear_velocity": [1, 1, 1], # Vx, Vy, Vz
"angular_velocity": [1, 1, 1], # Ax, Ay, Az
}
# ros parameter
self.confidence_threshold = self.setup_parameter("~confidence_threshold", 0.75)
# setup the subscriber
self.sub_msg_inference = rospy.Subscriber("~inference", Inference, self.inference_analyze, queue_size=1)
self.sub_car_cmd = rospy.Subscriber("~sub_car_cmd", Twist2DStamped, self.cb_car_cmd, queue_size=1)
# setup the publisher
self.pub_car_cmd = rospy.Publisher("~pub_car_cmd", Twist2DStamped, queue_size=1)
def inference_analyze(self, data):
if data == None:
pass
else:
zip_data = zip(data.labels, data.confidence)
self.confidence = dict(zip_data)
recognition = max(self.confidence, key=self.confidence.get)
if self.confidence[recognition] > self.confidence_threshold:
_reaction = self.reaction(recognition)
def reaction(self, recognition):
if recognition == "free":
for key in self.inference_gain.keys():
for index in range(len(self.inference_gain[key])):
self.inference_gain[key][index] = 1
elif recognition == "blocked":
for key in self.inference_gain.keys():
for index in range(len(self.inference_gain[key])):
self.inference_gain[key][index] = 0
else:
for key in self.inference_gain.keys():
for index in range(len(self.inference_gain[key])):
self.inference_gain[key][index] = 1
#self.setup_parameter("~inference_gain", inference_gain)
def cb_car_cmd(self, car_cmd_msg):
car_cmd_msg.v = car_cmd_msg.v * self.inference_gain["linear_velocity"][0]
car_cmd_msg.omega = car_cmd_msg.omega * self.inference_gain["angular_velocity"][2]
self.pub_msg(car_cmd_msg)
def pub_msg(self, car_cmd_msg):
self.pub_car_cmd.publish(car_cmd_msg)
def on_shutdown(self):
rospy.loginfo("{} Close.".format(self.node_name))
rospy.loginfo("{} shutdown.".format(self.node_name))
rospy.sleep(1)
rospy.is_shutdown=True
def setup_parameter(self, param_name, value):
# value = rospy.get_param(param_name, default_value)
# Write to parameter server for transparency
rospy.set_param(param_name, value)
rospy.loginfo("[%s] %s = %s " % (self.node_name, param_name, value))
return value
if __name__ == "__main__" :
rospy.init_node("inference_to_reaction", anonymous=False)
inference_to_reaction_node = Inference_To_Reaction()
rospy.on_shutdown(inference_to_reaction_node.on_shutdown)
rospy.spin()
| [
"rospy.Publisher",
"rospy.init_node",
"rospy.wait_for_message",
"rospy.set_param",
"rospy.loginfo",
"rospy.spin",
"rospy.get_name",
"rospy.sleep",
"rospy.Subscriber",
"rospy.on_shutdown"
] | [((3433, 3490), 'rospy.init_node', 'rospy.init_node', (['"""inference_to_reaction"""'], {'anonymous': '(False)'}), "('inference_to_reaction', anonymous=False)\n", (3448, 3490), False, 'import rospy, rospkg\n'), ((3552, 3609), 'rospy.on_shutdown', 'rospy.on_shutdown', (['inference_to_reaction_node.on_shutdown'], {}), '(inference_to_reaction_node.on_shutdown)\n', (3569, 3609), False, 'import rospy, rospkg\n'), ((3617, 3629), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (3627, 3629), False, 'import rospy, rospkg\n'), ((466, 482), 'rospy.get_name', 'rospy.get_name', ([], {}), '()\n', (480, 482), False, 'import rospy, rospkg\n'), ((647, 736), 'rospy.wait_for_message', 'rospy.wait_for_message', (["('/' + self.veh_name + '/inference_model/inference')", 'Inference'], {}), "('/' + self.veh_name + '/inference_model/inference',\n Inference)\n", (669, 736), False, 'import rospy, rospkg\n'), ((1134, 1213), 'rospy.Subscriber', 'rospy.Subscriber', (['"""~inference"""', 'Inference', 'self.inference_analyze'], {'queue_size': '(1)'}), "('~inference', Inference, self.inference_analyze, queue_size=1)\n", (1150, 1213), False, 'import rospy, rospkg\n'), ((1241, 1320), 'rospy.Subscriber', 'rospy.Subscriber', (['"""~sub_car_cmd"""', 'Twist2DStamped', 'self.cb_car_cmd'], {'queue_size': '(1)'}), "('~sub_car_cmd', Twist2DStamped, self.cb_car_cmd, queue_size=1)\n", (1257, 1320), False, 'import rospy, rospkg\n'), ((1379, 1440), 'rospy.Publisher', 'rospy.Publisher', (['"""~pub_car_cmd"""', 'Twist2DStamped'], {'queue_size': '(1)'}), "('~pub_car_cmd', Twist2DStamped, queue_size=1)\n", (1394, 1440), False, 'import rospy, rospkg\n'), ((3047, 3061), 'rospy.sleep', 'rospy.sleep', (['(1)'], {}), '(1)\n', (3058, 3061), False, 'import rospy, rospkg\n'), ((3266, 3300), 'rospy.set_param', 'rospy.set_param', (['param_name', 'value'], {}), '(param_name, value)\n', (3281, 3300), False, 'import rospy, rospkg\n'), ((3309, 3377), 'rospy.loginfo', 'rospy.loginfo', (["('[%s] %s = %s ' % (self.node_name, param_name, value))"], {}), "('[%s] %s = %s ' % (self.node_name, param_name, value))\n", (3322, 3377), False, 'import rospy, rospkg\n')] |
"""Loads CartPole-v1 demonstrations and trains BC, GAIL, and AIRL models on that data.
"""
import os
import pathlib
import pickle
import tempfile
import seals # noqa: F401
import stable_baselines3 as sb3
from imitation.algorithms import adversarial, bc
from imitation.data import rollout
from imitation.util import logger, util
dirname = os.path.dirname(__file__)
# Load pickled test demonstrations.
with open(os.path.join(dirname, "../tests/testdata/expert_models/cartpole_0/rollouts/final.pkl"), "rb") as f:
# This is a list of `imitation.data.types.Trajectory`, where
# every instance contains observations and actions for a single expert
# demonstration.
trajectories = pickle.load(f)
# Convert List[types.Trajectory] to an instance of `imitation.data.types.Transitions`.
# This is a more general dataclass containing unordered
# (observation, actions, next_observation) transitions.
transitions = rollout.flatten_trajectories(trajectories)
venv = util.make_vec_env("seals/CartPole-v0", n_envs=2)
tempdir = tempfile.TemporaryDirectory(prefix="quickstart")
tempdir_path = pathlib.Path(tempdir.name)
print(f"All Tensorboards and logging are being written inside {tempdir_path}/.")
# Train BC on expert data.
# BC also accepts as `expert_data` any PyTorch-style DataLoader that iterates over
# dictionaries containing observations and actions.
bc_logger = logger.configure(tempdir_path / "BC/")
bc_trainer = bc.BC(
venv.observation_space,
venv.action_space,
expert_data=transitions,
custom_logger=bc_logger,
)
bc_trainer.train(n_epochs=1)
# Train GAIL on expert data.
# GAIL, and AIRL also accept as `expert_data` any Pytorch-style DataLoader that
# iterates over dictionaries containing observations, actions, and next_observations.
gail_logger = logger.configure(tempdir_path / "GAIL/")
gail_trainer = adversarial.GAIL(
venv,
expert_data=transitions,
expert_batch_size=32,
gen_algo=sb3.PPO("MlpPolicy", venv, verbose=1, n_steps=1024),
custom_logger=gail_logger,
)
gail_trainer.train(total_timesteps=2048)
# Train AIRL on expert data.
airl_logger = logger.configure(tempdir_path / "AIRL/")
airl_trainer = adversarial.AIRL(
venv,
expert_data=transitions,
expert_batch_size=32,
gen_algo=sb3.PPO("MlpPolicy", venv, verbose=1, n_steps=1024),
custom_logger=airl_logger,
)
airl_trainer.train(total_timesteps=2048)
| [
"tempfile.TemporaryDirectory",
"imitation.util.logger.configure",
"pathlib.Path",
"imitation.algorithms.bc.BC",
"pickle.load",
"os.path.join",
"imitation.data.rollout.flatten_trajectories",
"os.path.dirname",
"imitation.util.util.make_vec_env",
"stable_baselines3.PPO"
] | [((344, 369), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (359, 369), False, 'import os\n'), ((926, 968), 'imitation.data.rollout.flatten_trajectories', 'rollout.flatten_trajectories', (['trajectories'], {}), '(trajectories)\n', (954, 968), False, 'from imitation.data import rollout\n'), ((977, 1025), 'imitation.util.util.make_vec_env', 'util.make_vec_env', (['"""seals/CartPole-v0"""'], {'n_envs': '(2)'}), "('seals/CartPole-v0', n_envs=2)\n", (994, 1025), False, 'from imitation.util import logger, util\n'), ((1037, 1085), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {'prefix': '"""quickstart"""'}), "(prefix='quickstart')\n", (1064, 1085), False, 'import tempfile\n'), ((1101, 1127), 'pathlib.Path', 'pathlib.Path', (['tempdir.name'], {}), '(tempdir.name)\n', (1113, 1127), False, 'import pathlib\n'), ((1384, 1422), 'imitation.util.logger.configure', 'logger.configure', (["(tempdir_path / 'BC/')"], {}), "(tempdir_path / 'BC/')\n", (1400, 1422), False, 'from imitation.util import logger, util\n'), ((1436, 1538), 'imitation.algorithms.bc.BC', 'bc.BC', (['venv.observation_space', 'venv.action_space'], {'expert_data': 'transitions', 'custom_logger': 'bc_logger'}), '(venv.observation_space, venv.action_space, expert_data=transitions,\n custom_logger=bc_logger)\n', (1441, 1538), False, 'from imitation.algorithms import adversarial, bc\n'), ((1793, 1833), 'imitation.util.logger.configure', 'logger.configure', (["(tempdir_path / 'GAIL/')"], {}), "(tempdir_path / 'GAIL/')\n", (1809, 1833), False, 'from imitation.util import logger, util\n'), ((2116, 2156), 'imitation.util.logger.configure', 'logger.configure', (["(tempdir_path / 'AIRL/')"], {}), "(tempdir_path / 'AIRL/')\n", (2132, 2156), False, 'from imitation.util import logger, util\n'), ((697, 711), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (708, 711), False, 'import pickle\n'), ((417, 507), 'os.path.join', 'os.path.join', (['dirname', '"""../tests/testdata/expert_models/cartpole_0/rollouts/final.pkl"""'], {}), "(dirname,\n '../tests/testdata/expert_models/cartpole_0/rollouts/final.pkl')\n", (429, 507), False, 'import os\n'), ((1945, 1996), 'stable_baselines3.PPO', 'sb3.PPO', (['"""MlpPolicy"""', 'venv'], {'verbose': '(1)', 'n_steps': '(1024)'}), "('MlpPolicy', venv, verbose=1, n_steps=1024)\n", (1952, 1996), True, 'import stable_baselines3 as sb3\n'), ((2268, 2319), 'stable_baselines3.PPO', 'sb3.PPO', (['"""MlpPolicy"""', 'venv'], {'verbose': '(1)', 'n_steps': '(1024)'}), "('MlpPolicy', venv, verbose=1, n_steps=1024)\n", (2275, 2319), True, 'import stable_baselines3 as sb3\n')] |
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2017 <NAME> <<EMAIL>>
# Copyright (C) 2014-2017 <NAME> <<EMAIL>>
# Copyright (C) 2014-2017 <NAME> <<EMAIL>>
# Copyright (C) 2014-2017 <NAME> <<EMAIL>>
# Copyright (C) 2014-2017 <NAME> <<EMAIL>>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import uuid
from django.core.urlresolvers import reverse
from taiga.base.utils import json
from taiga.projects import choices as project_choices
from taiga.projects.models import Project
from taiga.projects.epics.serializers import EpicSerializer
from taiga.projects.epics.models import Epic
from taiga.projects.epics.utils import attach_extra_info as attach_epic_extra_info
from taiga.projects.utils import attach_extra_info as attach_project_extra_info
from taiga.permissions.choices import MEMBERS_PERMISSIONS, ANON_PERMISSIONS
from taiga.projects.occ import OCCResourceMixin
from tests import factories as f
from tests.utils import helper_test_http_method, reconnect_signals
from taiga.projects.votes.services import add_vote
from taiga.projects.notifications.services import add_watcher
from unittest import mock
import pytest
pytestmark = pytest.mark.django_db
def setup_function(function):
reconnect_signals()
@pytest.fixture
def data():
m = type("Models", (object,), {})
m.registered_user = f.UserFactory.create()
m.project_member_with_perms = f.UserFactory.create()
m.project_member_without_perms = f.UserFactory.create()
m.project_owner = f.UserFactory.create()
m.other_user = f.UserFactory.create()
m.public_project = f.ProjectFactory(is_private=False,
anon_permissions=list(map(lambda x: x[0], ANON_PERMISSIONS)),
public_permissions=list(map(lambda x: x[0], ANON_PERMISSIONS)) + ["comment_epic"],
owner=m.project_owner,
epics_csv_uuid=uuid.uuid4().hex)
m.public_project = attach_project_extra_info(Project.objects.all()).get(id=m.public_project.id)
m.private_project1 = f.ProjectFactory(is_private=True,
anon_permissions=list(map(lambda x: x[0], ANON_PERMISSIONS)),
public_permissions=list(map(lambda x: x[0], ANON_PERMISSIONS)),
owner=m.project_owner,
epics_csv_uuid=uuid.uuid4().hex)
m.private_project1 = attach_project_extra_info(Project.objects.all()).get(id=m.private_project1.id)
m.private_project2 = f.ProjectFactory(is_private=True,
anon_permissions=[],
public_permissions=[],
owner=m.project_owner,
epics_csv_uuid=uuid.uuid4().hex)
m.private_project2 = attach_project_extra_info(Project.objects.all()).get(id=m.private_project2.id)
m.blocked_project = f.ProjectFactory(is_private=True,
anon_permissions=[],
public_permissions=[],
owner=m.project_owner,
epics_csv_uuid=uuid.uuid4().hex,
blocked_code=project_choices.BLOCKED_BY_STAFF)
m.blocked_project = attach_project_extra_info(Project.objects.all()).get(id=m.blocked_project.id)
m.public_membership = f.MembershipFactory(
project=m.public_project,
user=m.project_member_with_perms,
role__project=m.public_project,
role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS)))
m.private_membership1 = f.MembershipFactory(
project=m.private_project1,
user=m.project_member_with_perms,
role__project=m.private_project1,
role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS)))
f.MembershipFactory(
project=m.private_project1,
user=m.project_member_without_perms,
role__project=m.private_project1,
role__permissions=[])
m.private_membership2 = f.MembershipFactory(
project=m.private_project2,
user=m.project_member_with_perms,
role__project=m.private_project2,
role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS)))
f.MembershipFactory(
project=m.private_project2,
user=m.project_member_without_perms,
role__project=m.private_project2,
role__permissions=[])
m.blocked_membership = f.MembershipFactory(
project=m.blocked_project,
user=m.project_member_with_perms,
role__project=m.blocked_project,
role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS)))
f.MembershipFactory(project=m.blocked_project,
user=m.project_member_without_perms,
role__project=m.blocked_project,
role__permissions=[])
f.MembershipFactory(project=m.public_project,
user=m.project_owner,
is_admin=True)
f.MembershipFactory(project=m.private_project1,
user=m.project_owner,
is_admin=True)
f.MembershipFactory(project=m.private_project2,
user=m.project_owner,
is_admin=True)
f.MembershipFactory(project=m.blocked_project,
user=m.project_owner,
is_admin=True)
m.public_epic = f.EpicFactory(project=m.public_project,
status__project=m.public_project)
m.public_epic = attach_epic_extra_info(Epic.objects.all()).get(id=m.public_epic.id)
m.private_epic1 = f.EpicFactory(project=m.private_project1,
status__project=m.private_project1)
m.private_epic1 = attach_epic_extra_info(Epic.objects.all()).get(id=m.private_epic1.id)
m.private_epic2 = f.EpicFactory(project=m.private_project2,
status__project=m.private_project2)
m.private_epic2 = attach_epic_extra_info(Epic.objects.all()).get(id=m.private_epic2.id)
m.blocked_epic = f.EpicFactory(project=m.blocked_project,
status__project=m.blocked_project)
m.blocked_epic = attach_epic_extra_info(Epic.objects.all()).get(id=m.blocked_epic.id)
m.public_us = f.UserStoryFactory(project=m.public_project)
m.private_us1 = f.UserStoryFactory(project=m.private_project1)
m.private_us2 = f.UserStoryFactory(project=m.private_project2)
m.blocked_us = f.UserStoryFactory(project=m.blocked_project)
m.public_related_us = f.RelatedUserStory(epic=m.public_epic, user_story=m.public_us)
m.private_related_us1 = f.RelatedUserStory(epic=m.private_epic1, user_story=m.private_us1)
m.private_related_us2 = f.RelatedUserStory(epic=m.private_epic2, user_story=m.private_us2)
m.blocked_related_us = f.RelatedUserStory(epic=m.blocked_epic, user_story=m.blocked_us)
m.public_project.default_epic_status = m.public_epic.status
m.public_project.save()
m.private_project1.default_epic_status = m.private_epic1.status
m.private_project1.save()
m.private_project2.default_epic_status = m.private_epic2.status
m.private_project2.save()
m.blocked_project.default_epic_status = m.blocked_epic.status
m.blocked_project.save()
return m
def test_epic_list(client, data):
url = reverse('epics-list')
response = client.get(url)
epics_data = json.loads(response.content.decode('utf-8'))
assert len(epics_data) == 2
assert response.status_code == 200
client.login(data.registered_user)
response = client.get(url)
epics_data = json.loads(response.content.decode('utf-8'))
assert len(epics_data) == 2
assert response.status_code == 200
client.login(data.project_member_with_perms)
response = client.get(url)
epics_data = json.loads(response.content.decode('utf-8'))
assert len(epics_data) == 4
assert response.status_code == 200
client.login(data.project_owner)
response = client.get(url)
epics_data = json.loads(response.content.decode('utf-8'))
assert len(epics_data) == 4
assert response.status_code == 200
def test_epic_retrieve(client, data):
public_url = reverse('epics-detail', kwargs={"pk": data.public_epic.pk})
private_url1 = reverse('epics-detail', kwargs={"pk": data.private_epic1.pk})
private_url2 = reverse('epics-detail', kwargs={"pk": data.private_epic2.pk})
blocked_url = reverse('epics-detail', kwargs={"pk": data.blocked_epic.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'get', public_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private_url1, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private_url2, None, users)
assert results == [401, 403, 403, 200, 200]
results = helper_test_http_method(client, 'get', blocked_url, None, users)
assert results == [401, 403, 403, 200, 200]
def test_epic_create(client, data):
url = reverse('epics-list')
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
create_data = json.dumps({
"subject": "test",
"ref": 1,
"project": data.public_project.pk,
"status": data.public_project.epic_statuses.all()[0].pk,
})
results = helper_test_http_method(client, 'post', url, create_data, users)
assert results == [401, 403, 403, 201, 201]
create_data = json.dumps({
"subject": "test",
"ref": 2,
"project": data.private_project1.pk,
"status": data.private_project1.epic_statuses.all()[0].pk,
})
results = helper_test_http_method(client, 'post', url, create_data, users)
assert results == [401, 403, 403, 201, 201]
create_data = json.dumps({
"subject": "test",
"ref": 3,
"project": data.private_project2.pk,
"status": data.private_project2.epic_statuses.all()[0].pk,
})
results = helper_test_http_method(client, 'post', url, create_data, users)
assert results == [401, 403, 403, 201, 201]
create_data = json.dumps({
"subject": "test",
"ref": 3,
"project": data.blocked_project.pk,
"status": data.blocked_project.epic_statuses.all()[0].pk,
})
results = helper_test_http_method(client, 'post', url, create_data, users)
assert results == [401, 403, 403, 451, 451]
def test_epic_put_update(client, data):
public_url = reverse('epics-detail', kwargs={"pk": data.public_epic.pk})
private_url1 = reverse('epics-detail', kwargs={"pk": data.private_epic1.pk})
private_url2 = reverse('epics-detail', kwargs={"pk": data.private_epic2.pk})
blocked_url = reverse('epics-detail', kwargs={"pk": data.blocked_epic.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
with mock.patch.object(OCCResourceMixin, "_validate_and_update_version"):
epic_data = EpicSerializer(data.public_epic).data
epic_data["subject"] = "test"
epic_data = json.dumps(epic_data)
results = helper_test_http_method(client, 'put', public_url, epic_data, users)
assert results == [401, 403, 403, 200, 200]
epic_data = EpicSerializer(data.private_epic1).data
epic_data["subject"] = "test"
epic_data = json.dumps(epic_data)
results = helper_test_http_method(client, 'put', private_url1, epic_data, users)
assert results == [401, 403, 403, 200, 200]
epic_data = EpicSerializer(data.private_epic2).data
epic_data["subject"] = "test"
epic_data = json.dumps(epic_data)
results = helper_test_http_method(client, 'put', private_url2, epic_data, users)
assert results == [401, 403, 403, 200, 200]
epic_data = EpicSerializer(data.blocked_epic).data
epic_data["subject"] = "test"
epic_data = json.dumps(epic_data)
results = helper_test_http_method(client, 'put', blocked_url, epic_data, users)
assert results == [401, 403, 403, 451, 451]
def test_epic_put_comment(client, data):
public_url = reverse('epics-detail', kwargs={"pk": data.public_epic.pk})
private_url1 = reverse('epics-detail', kwargs={"pk": data.private_epic1.pk})
private_url2 = reverse('epics-detail', kwargs={"pk": data.private_epic2.pk})
blocked_url = reverse('epics-detail', kwargs={"pk": data.blocked_epic.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
with mock.patch.object(OCCResourceMixin, "_validate_and_update_version"):
epic_data = EpicSerializer(data.public_epic).data
epic_data["comment"] = "test comment"
epic_data = json.dumps(epic_data)
results = helper_test_http_method(client, 'put', public_url, epic_data, users)
assert results == [401, 403, 403, 200, 200]
epic_data = EpicSerializer(data.private_epic1).data
epic_data["comment"] = "test comment"
epic_data = json.dumps(epic_data)
results = helper_test_http_method(client, 'put', private_url1, epic_data, users)
assert results == [401, 403, 403, 200, 200]
epic_data = EpicSerializer(data.private_epic2).data
epic_data["comment"] = "test comment"
epic_data = json.dumps(epic_data)
results = helper_test_http_method(client, 'put', private_url2, epic_data, users)
assert results == [401, 403, 403, 200, 200]
epic_data = EpicSerializer(data.blocked_epic).data
epic_data["comment"] = "test comment"
epic_data = json.dumps(epic_data)
results = helper_test_http_method(client, 'put', blocked_url, epic_data, users)
assert results == [401, 403, 403, 451, 451]
def test_epic_put_update_and_comment(client, data):
public_url = reverse('epics-detail', kwargs={"pk": data.public_epic.pk})
private_url1 = reverse('epics-detail', kwargs={"pk": data.private_epic1.pk})
private_url2 = reverse('epics-detail', kwargs={"pk": data.private_epic2.pk})
blocked_url = reverse('epics-detail', kwargs={"pk": data.blocked_epic.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
with mock.patch.object(OCCResourceMixin, "_validate_and_update_version"):
epic_data = EpicSerializer(data.public_epic).data
epic_data["subject"] = "test"
epic_data["comment"] = "test comment"
epic_data = json.dumps(epic_data)
results = helper_test_http_method(client, 'put', public_url, epic_data, users)
assert results == [401, 403, 403, 200, 200]
epic_data = EpicSerializer(data.private_epic1).data
epic_data["subject"] = "test"
epic_data["comment"] = "test comment"
epic_data = json.dumps(epic_data)
results = helper_test_http_method(client, 'put', private_url1, epic_data, users)
assert results == [401, 403, 403, 200, 200]
epic_data = EpicSerializer(data.private_epic2).data
epic_data["subject"] = "test"
epic_data["comment"] = "test comment"
epic_data = json.dumps(epic_data)
results = helper_test_http_method(client, 'put', private_url2, epic_data, users)
assert results == [401, 403, 403, 200, 200]
epic_data = EpicSerializer(data.blocked_epic).data
epic_data["subject"] = "test"
epic_data["comment"] = "test comment"
epic_data = json.dumps(epic_data)
results = helper_test_http_method(client, 'put', blocked_url, epic_data, users)
assert results == [401, 403, 403, 451, 451]
def test_epic_put_update_with_project_change(client):
user1 = f.UserFactory.create()
user2 = f.UserFactory.create()
user3 = f.UserFactory.create()
user4 = f.UserFactory.create()
project1 = f.ProjectFactory()
project2 = f.ProjectFactory()
epic_status1 = f.EpicStatusFactory.create(project=project1)
epic_status2 = f.EpicStatusFactory.create(project=project2)
project1.default_epic_status = epic_status1
project2.default_epic_status = epic_status2
project1.save()
project2.save()
project1 = attach_project_extra_info(Project.objects.all()).get(id=project1.id)
project2 = attach_project_extra_info(Project.objects.all()).get(id=project2.id)
f.MembershipFactory(project=project1,
user=user1,
role__project=project1,
role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS)))
f.MembershipFactory(project=project2,
user=user1,
role__project=project2,
role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS)))
f.MembershipFactory(project=project1,
user=user2,
role__project=project1,
role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS)))
f.MembershipFactory(project=project2,
user=user3,
role__project=project2,
role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS)))
epic = f.EpicFactory.create(project=project1)
epic = attach_epic_extra_info(Epic.objects.all()).get(id=epic.id)
url = reverse('epics-detail', kwargs={"pk": epic.pk})
# Test user with permissions in both projects
client.login(user1)
epic_data = EpicSerializer(epic).data
epic_data["project"] = project2.id
epic_data = json.dumps(epic_data)
response = client.put(url, data=epic_data, content_type="application/json")
assert response.status_code == 200
epic.project = project1
epic.save()
# Test user with permissions in only origin project
client.login(user2)
epic_data = EpicSerializer(epic).data
epic_data["project"] = project2.id
epic_data = json.dumps(epic_data)
response = client.put(url, data=epic_data, content_type="application/json")
assert response.status_code == 403
epic.project = project1
epic.save()
# Test user with permissions in only destionation project
client.login(user3)
epic_data = EpicSerializer(epic).data
epic_data["project"] = project2.id
epic_data = json.dumps(epic_data)
response = client.put(url, data=epic_data, content_type="application/json")
assert response.status_code == 403
epic.project = project1
epic.save()
# Test user without permissions in the projects
client.login(user4)
epic_data = EpicSerializer(epic).data
epic_data["project"] = project2.id
epic_data = json.dumps(epic_data)
response = client.put(url, data=epic_data, content_type="application/json")
assert response.status_code == 403
epic.project = project1
epic.save()
def test_epic_patch_update(client, data):
public_url = reverse('epics-detail', kwargs={"pk": data.public_epic.pk})
private_url1 = reverse('epics-detail', kwargs={"pk": data.private_epic1.pk})
private_url2 = reverse('epics-detail', kwargs={"pk": data.private_epic2.pk})
blocked_url = reverse('epics-detail', kwargs={"pk": data.blocked_epic.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
with mock.patch.object(OCCResourceMixin, "_validate_and_update_version"):
patch_data = json.dumps({"subject": "test", "version": data.public_epic.version})
results = helper_test_http_method(client, 'patch', public_url, patch_data, users)
assert results == [401, 403, 403, 200, 200]
patch_data = json.dumps({"subject": "test", "version": data.private_epic1.version})
results = helper_test_http_method(client, 'patch', private_url1, patch_data, users)
assert results == [401, 403, 403, 200, 200]
patch_data = json.dumps({"subject": "test", "version": data.private_epic2.version})
results = helper_test_http_method(client, 'patch', private_url2, patch_data, users)
assert results == [401, 403, 403, 200, 200]
patch_data = json.dumps({"subject": "test", "version": data.blocked_epic.version})
results = helper_test_http_method(client, 'patch', blocked_url, patch_data, users)
assert results == [401, 403, 403, 451, 451]
def test_epic_patch_comment(client, data):
public_url = reverse('epics-detail', kwargs={"pk": data.public_epic.pk})
private_url1 = reverse('epics-detail', kwargs={"pk": data.private_epic1.pk})
private_url2 = reverse('epics-detail', kwargs={"pk": data.private_epic2.pk})
blocked_url = reverse('epics-detail', kwargs={"pk": data.blocked_epic.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
with mock.patch.object(OCCResourceMixin, "_validate_and_update_version"):
patch_data = json.dumps({"comment": "test comment", "version": data.public_epic.version})
results = helper_test_http_method(client, 'patch', public_url, patch_data, users)
assert results == [401, 200, 200, 200, 200]
patch_data = json.dumps({"comment": "test comment", "version": data.private_epic1.version})
results = helper_test_http_method(client, 'patch', private_url1, patch_data, users)
assert results == [401, 403, 403, 200, 200]
patch_data = json.dumps({"comment": "test comment", "version": data.private_epic2.version})
results = helper_test_http_method(client, 'patch', private_url2, patch_data, users)
assert results == [401, 403, 403, 200, 200]
patch_data = json.dumps({"comment": "test comment", "version": data.blocked_epic.version})
results = helper_test_http_method(client, 'patch', blocked_url, patch_data, users)
assert results == [401, 403, 403, 451, 451]
def test_epic_patch_update_and_comment(client, data):
public_url = reverse('epics-detail', kwargs={"pk": data.public_epic.pk})
private_url1 = reverse('epics-detail', kwargs={"pk": data.private_epic1.pk})
private_url2 = reverse('epics-detail', kwargs={"pk": data.private_epic2.pk})
blocked_url = reverse('epics-detail', kwargs={"pk": data.blocked_epic.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
with mock.patch.object(OCCResourceMixin, "_validate_and_update_version"):
patch_data = json.dumps({
"subject": "test",
"comment": "test comment",
"version": data.public_epic.version
})
results = helper_test_http_method(client, 'patch', public_url, patch_data, users)
assert results == [401, 403, 403, 200, 200]
patch_data = json.dumps({
"subject": "test",
"comment": "test comment",
"version": data.private_epic1.version
})
results = helper_test_http_method(client, 'patch', private_url1, patch_data, users)
assert results == [401, 403, 403, 200, 200]
patch_data = json.dumps({
"subject": "test",
"comment": "test comment",
"version": data.private_epic2.version
})
results = helper_test_http_method(client, 'patch', private_url2, patch_data, users)
assert results == [401, 403, 403, 200, 200]
patch_data = json.dumps({
"subject": "test",
"comment": "test comment",
"version": data.blocked_epic.version
})
results = helper_test_http_method(client, 'patch', blocked_url, patch_data, users)
assert results == [401, 403, 403, 451, 451]
def test_epic_delete(client, data):
public_url = reverse('epics-detail', kwargs={"pk": data.public_epic.pk})
private_url1 = reverse('epics-detail', kwargs={"pk": data.private_epic1.pk})
private_url2 = reverse('epics-detail', kwargs={"pk": data.private_epic2.pk})
blocked_url = reverse('epics-detail', kwargs={"pk": data.blocked_epic.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
]
results = helper_test_http_method(client, 'delete', public_url, None, users)
assert results == [401, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private_url1, None, users)
assert results == [401, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private_url2, None, users)
assert results == [401, 403, 403, 204]
results = helper_test_http_method(client, 'delete', blocked_url, None, users)
assert results == [401, 403, 403, 451]
def test_epic_action_bulk_create(client, data):
url = reverse('epics-bulk-create')
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
bulk_data = json.dumps({
"bulk_epics": "test1\ntest2",
"project_id": data.public_epic.project.pk,
})
results = helper_test_http_method(client, 'post', url, bulk_data, users)
assert results == [401, 403, 403, 200, 200]
bulk_data = json.dumps({
"bulk_epics": "test1\ntest2",
"project_id": data.private_epic1.project.pk,
})
results = helper_test_http_method(client, 'post', url, bulk_data, users)
assert results == [401, 403, 403, 200, 200]
bulk_data = json.dumps({
"bulk_epics": "test1\ntest2",
"project_id": data.private_epic2.project.pk,
})
results = helper_test_http_method(client, 'post', url, bulk_data, users)
assert results == [401, 403, 403, 200, 200]
bulk_data = json.dumps({
"bulk_epics": "test1\ntest2",
"project_id": data.blocked_epic.project.pk,
})
results = helper_test_http_method(client, 'post', url, bulk_data, users)
assert results == [401, 403, 403, 451, 451]
def test_epic_action_upvote(client, data):
public_url = reverse('epics-upvote', kwargs={"pk": data.public_epic.pk})
private_url1 = reverse('epics-upvote', kwargs={"pk": data.private_epic1.pk})
private_url2 = reverse('epics-upvote', kwargs={"pk": data.private_epic2.pk})
blocked_url = reverse('epics-upvote', kwargs={"pk": data.blocked_epic.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'post', public_url, "", users)
assert results == [401, 200, 200, 200, 200]
results = helper_test_http_method(client, 'post', private_url1, "", users)
assert results == [401, 200, 200, 200, 200]
results = helper_test_http_method(client, 'post', private_url2, "", users)
assert results == [404, 404, 404, 200, 200]
results = helper_test_http_method(client, 'post', blocked_url, "", users)
assert results == [404, 404, 404, 451, 451]
def test_epic_action_downvote(client, data):
public_url = reverse('epics-downvote', kwargs={"pk": data.public_epic.pk})
private_url1 = reverse('epics-downvote', kwargs={"pk": data.private_epic1.pk})
private_url2 = reverse('epics-downvote', kwargs={"pk": data.private_epic2.pk})
blocked_url = reverse('epics-downvote', kwargs={"pk": data.blocked_epic.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'post', public_url, "", users)
assert results == [401, 200, 200, 200, 200]
results = helper_test_http_method(client, 'post', private_url1, "", users)
assert results == [401, 200, 200, 200, 200]
results = helper_test_http_method(client, 'post', private_url2, "", users)
assert results == [404, 404, 404, 200, 200]
results = helper_test_http_method(client, 'post', blocked_url, "", users)
assert results == [404, 404, 404, 451, 451]
def test_epic_voters_list(client, data):
public_url = reverse('epic-voters-list', kwargs={"resource_id": data.public_epic.pk})
private_url1 = reverse('epic-voters-list', kwargs={"resource_id": data.private_epic1.pk})
private_url2 = reverse('epic-voters-list', kwargs={"resource_id": data.private_epic2.pk})
blocked_url = reverse('epic-voters-list', kwargs={"resource_id": data.blocked_epic.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'get', public_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private_url1, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private_url2, None, users)
assert results == [401, 403, 403, 200, 200]
results = helper_test_http_method(client, 'get', blocked_url, None, users)
assert results == [401, 403, 403, 200, 200]
def test_epic_voters_retrieve(client, data):
add_vote(data.public_epic, data.project_owner)
public_url = reverse('epic-voters-detail', kwargs={"resource_id": data.public_epic.pk,
"pk": data.project_owner.pk})
add_vote(data.private_epic1, data.project_owner)
private_url1 = reverse('epic-voters-detail', kwargs={"resource_id": data.private_epic1.pk,
"pk": data.project_owner.pk})
add_vote(data.private_epic2, data.project_owner)
private_url2 = reverse('epic-voters-detail', kwargs={"resource_id": data.private_epic2.pk,
"pk": data.project_owner.pk})
add_vote(data.blocked_epic, data.project_owner)
blocked_url = reverse('epic-voters-detail', kwargs={"resource_id": data.blocked_epic.pk,
"pk": data.project_owner.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'get', public_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private_url1, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private_url2, None, users)
assert results == [401, 403, 403, 200, 200]
results = helper_test_http_method(client, 'get', blocked_url, None, users)
assert results == [401, 403, 403, 200, 200]
def test_epic_action_watch(client, data):
public_url = reverse('epics-watch', kwargs={"pk": data.public_epic.pk})
private_url1 = reverse('epics-watch', kwargs={"pk": data.private_epic1.pk})
private_url2 = reverse('epics-watch', kwargs={"pk": data.private_epic2.pk})
blocked_url = reverse('epics-watch', kwargs={"pk": data.blocked_epic.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'post', public_url, "", users)
assert results == [401, 200, 200, 200, 200]
results = helper_test_http_method(client, 'post', private_url1, "", users)
assert results == [401, 200, 200, 200, 200]
results = helper_test_http_method(client, 'post', private_url2, "", users)
assert results == [404, 404, 404, 200, 200]
results = helper_test_http_method(client, 'post', blocked_url, "", users)
assert results == [404, 404, 404, 451, 451]
def test_epic_action_unwatch(client, data):
public_url = reverse('epics-unwatch', kwargs={"pk": data.public_epic.pk})
private_url1 = reverse('epics-unwatch', kwargs={"pk": data.private_epic1.pk})
private_url2 = reverse('epics-unwatch', kwargs={"pk": data.private_epic2.pk})
blocked_url = reverse('epics-unwatch', kwargs={"pk": data.blocked_epic.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'post', public_url, "", users)
assert results == [401, 200, 200, 200, 200]
results = helper_test_http_method(client, 'post', private_url1, "", users)
assert results == [401, 200, 200, 200, 200]
results = helper_test_http_method(client, 'post', private_url2, "", users)
assert results == [404, 404, 404, 200, 200]
results = helper_test_http_method(client, 'post', blocked_url, "", users)
assert results == [404, 404, 404, 451, 451]
def test_epic_watchers_list(client, data):
public_url = reverse('epic-watchers-list', kwargs={"resource_id": data.public_epic.pk})
private_url1 = reverse('epic-watchers-list', kwargs={"resource_id": data.private_epic1.pk})
private_url2 = reverse('epic-watchers-list', kwargs={"resource_id": data.private_epic2.pk})
blocked_url = reverse('epic-watchers-list', kwargs={"resource_id": data.blocked_epic.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'get', public_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private_url1, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private_url2, None, users)
assert results == [401, 403, 403, 200, 200]
results = helper_test_http_method(client, 'get', blocked_url, None, users)
assert results == [401, 403, 403, 200, 200]
def test_epic_watchers_retrieve(client, data):
add_watcher(data.public_epic, data.project_owner)
public_url = reverse('epic-watchers-detail', kwargs={"resource_id": data.public_epic.pk,
"pk": data.project_owner.pk})
add_watcher(data.private_epic1, data.project_owner)
private_url1 = reverse('epic-watchers-detail', kwargs={"resource_id": data.private_epic1.pk,
"pk": data.project_owner.pk})
add_watcher(data.private_epic2, data.project_owner)
private_url2 = reverse('epic-watchers-detail', kwargs={"resource_id": data.private_epic2.pk,
"pk": data.project_owner.pk})
add_watcher(data.blocked_epic, data.project_owner)
blocked_url = reverse('epic-watchers-detail', kwargs={"resource_id": data.blocked_epic.pk,
"pk": data.project_owner.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'get', public_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private_url1, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private_url2, None, users)
assert results == [401, 403, 403, 200, 200]
results = helper_test_http_method(client, 'get', blocked_url, None, users)
assert results == [401, 403, 403, 200, 200]
def test_epics_csv(client, data):
url = reverse('epics-csv')
csv_public_uuid = data.public_project.epics_csv_uuid
csv_private1_uuid = data.private_project1.epics_csv_uuid
csv_private2_uuid = data.private_project1.epics_csv_uuid
csv_blocked_uuid = data.blocked_project.epics_csv_uuid
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'get', "{}?uuid={}".format(url, csv_public_uuid), None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', "{}?uuid={}".format(url, csv_private1_uuid), None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', "{}?uuid={}".format(url, csv_private2_uuid), None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', "{}?uuid={}".format(url, csv_blocked_uuid), None, users)
assert results == [200, 200, 200, 200, 200]
| [
"tests.factories.UserStoryFactory",
"tests.factories.MembershipFactory",
"django.core.urlresolvers.reverse",
"tests.factories.RelatedUserStory",
"tests.utils.helper_test_http_method",
"taiga.projects.models.Project.objects.all",
"tests.factories.UserFactory.create",
"tests.factories.ProjectFactory",
... | [((1803, 1822), 'tests.utils.reconnect_signals', 'reconnect_signals', ([], {}), '()\n', (1820, 1822), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((1916, 1938), 'tests.factories.UserFactory.create', 'f.UserFactory.create', ([], {}), '()\n', (1936, 1938), True, 'from tests import factories as f\n'), ((1973, 1995), 'tests.factories.UserFactory.create', 'f.UserFactory.create', ([], {}), '()\n', (1993, 1995), True, 'from tests import factories as f\n'), ((2033, 2055), 'tests.factories.UserFactory.create', 'f.UserFactory.create', ([], {}), '()\n', (2053, 2055), True, 'from tests import factories as f\n'), ((2078, 2100), 'tests.factories.UserFactory.create', 'f.UserFactory.create', ([], {}), '()\n', (2098, 2100), True, 'from tests import factories as f\n'), ((2120, 2142), 'tests.factories.UserFactory.create', 'f.UserFactory.create', ([], {}), '()\n', (2140, 2142), True, 'from tests import factories as f\n'), ((4608, 4757), 'tests.factories.MembershipFactory', 'f.MembershipFactory', ([], {'project': 'm.private_project1', 'user': 'm.project_member_without_perms', 'role__project': 'm.private_project1', 'role__permissions': '[]'}), '(project=m.private_project1, user=m.\n project_member_without_perms, role__project=m.private_project1,\n role__permissions=[])\n', (4627, 4757), True, 'from tests import factories as f\n'), ((5029, 5178), 'tests.factories.MembershipFactory', 'f.MembershipFactory', ([], {'project': 'm.private_project2', 'user': 'm.project_member_without_perms', 'role__project': 'm.private_project2', 'role__permissions': '[]'}), '(project=m.private_project2, user=m.\n project_member_without_perms, role__project=m.private_project2,\n role__permissions=[])\n', (5048, 5178), True, 'from tests import factories as f\n'), ((5447, 5594), 'tests.factories.MembershipFactory', 'f.MembershipFactory', ([], {'project': 'm.blocked_project', 'user': 'm.project_member_without_perms', 'role__project': 'm.blocked_project', 'role__permissions': '[]'}), '(project=m.blocked_project, user=m.\n project_member_without_perms, role__project=m.blocked_project,\n role__permissions=[])\n', (5466, 5594), True, 'from tests import factories as f\n'), ((5663, 5749), 'tests.factories.MembershipFactory', 'f.MembershipFactory', ([], {'project': 'm.public_project', 'user': 'm.project_owner', 'is_admin': '(True)'}), '(project=m.public_project, user=m.project_owner,\n is_admin=True)\n', (5682, 5749), True, 'from tests import factories as f\n'), ((5799, 5887), 'tests.factories.MembershipFactory', 'f.MembershipFactory', ([], {'project': 'm.private_project1', 'user': 'm.project_owner', 'is_admin': '(True)'}), '(project=m.private_project1, user=m.project_owner,\n is_admin=True)\n', (5818, 5887), True, 'from tests import factories as f\n'), ((5937, 6025), 'tests.factories.MembershipFactory', 'f.MembershipFactory', ([], {'project': 'm.private_project2', 'user': 'm.project_owner', 'is_admin': '(True)'}), '(project=m.private_project2, user=m.project_owner,\n is_admin=True)\n', (5956, 6025), True, 'from tests import factories as f\n'), ((6075, 6162), 'tests.factories.MembershipFactory', 'f.MembershipFactory', ([], {'project': 'm.blocked_project', 'user': 'm.project_owner', 'is_admin': '(True)'}), '(project=m.blocked_project, user=m.project_owner,\n is_admin=True)\n', (6094, 6162), True, 'from tests import factories as f\n'), ((6228, 6301), 'tests.factories.EpicFactory', 'f.EpicFactory', ([], {'project': 'm.public_project', 'status__project': 'm.public_project'}), '(project=m.public_project, status__project=m.public_project)\n', (6241, 6301), True, 'from tests import factories as f\n'), ((6447, 6524), 'tests.factories.EpicFactory', 'f.EpicFactory', ([], {'project': 'm.private_project1', 'status__project': 'm.private_project1'}), '(project=m.private_project1, status__project=m.private_project1)\n', (6460, 6524), True, 'from tests import factories as f\n'), ((6676, 6753), 'tests.factories.EpicFactory', 'f.EpicFactory', ([], {'project': 'm.private_project2', 'status__project': 'm.private_project2'}), '(project=m.private_project2, status__project=m.private_project2)\n', (6689, 6753), True, 'from tests import factories as f\n'), ((6904, 6979), 'tests.factories.EpicFactory', 'f.EpicFactory', ([], {'project': 'm.blocked_project', 'status__project': 'm.blocked_project'}), '(project=m.blocked_project, status__project=m.blocked_project)\n', (6917, 6979), True, 'from tests import factories as f\n'), ((7125, 7169), 'tests.factories.UserStoryFactory', 'f.UserStoryFactory', ([], {'project': 'm.public_project'}), '(project=m.public_project)\n', (7143, 7169), True, 'from tests import factories as f\n'), ((7190, 7236), 'tests.factories.UserStoryFactory', 'f.UserStoryFactory', ([], {'project': 'm.private_project1'}), '(project=m.private_project1)\n', (7208, 7236), True, 'from tests import factories as f\n'), ((7257, 7303), 'tests.factories.UserStoryFactory', 'f.UserStoryFactory', ([], {'project': 'm.private_project2'}), '(project=m.private_project2)\n', (7275, 7303), True, 'from tests import factories as f\n'), ((7323, 7368), 'tests.factories.UserStoryFactory', 'f.UserStoryFactory', ([], {'project': 'm.blocked_project'}), '(project=m.blocked_project)\n', (7341, 7368), True, 'from tests import factories as f\n'), ((7396, 7458), 'tests.factories.RelatedUserStory', 'f.RelatedUserStory', ([], {'epic': 'm.public_epic', 'user_story': 'm.public_us'}), '(epic=m.public_epic, user_story=m.public_us)\n', (7414, 7458), True, 'from tests import factories as f\n'), ((7487, 7553), 'tests.factories.RelatedUserStory', 'f.RelatedUserStory', ([], {'epic': 'm.private_epic1', 'user_story': 'm.private_us1'}), '(epic=m.private_epic1, user_story=m.private_us1)\n', (7505, 7553), True, 'from tests import factories as f\n'), ((7582, 7648), 'tests.factories.RelatedUserStory', 'f.RelatedUserStory', ([], {'epic': 'm.private_epic2', 'user_story': 'm.private_us2'}), '(epic=m.private_epic2, user_story=m.private_us2)\n', (7600, 7648), True, 'from tests import factories as f\n'), ((7676, 7740), 'tests.factories.RelatedUserStory', 'f.RelatedUserStory', ([], {'epic': 'm.blocked_epic', 'user_story': 'm.blocked_us'}), '(epic=m.blocked_epic, user_story=m.blocked_us)\n', (7694, 7740), True, 'from tests import factories as f\n'), ((8185, 8206), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-list"""'], {}), "('epics-list')\n", (8192, 8206), False, 'from django.core.urlresolvers import reverse\n'), ((9052, 9111), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-detail"""'], {'kwargs': "{'pk': data.public_epic.pk}"}), "('epics-detail', kwargs={'pk': data.public_epic.pk})\n", (9059, 9111), False, 'from django.core.urlresolvers import reverse\n'), ((9131, 9192), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-detail"""'], {'kwargs': "{'pk': data.private_epic1.pk}"}), "('epics-detail', kwargs={'pk': data.private_epic1.pk})\n", (9138, 9192), False, 'from django.core.urlresolvers import reverse\n'), ((9212, 9273), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-detail"""'], {'kwargs': "{'pk': data.private_epic2.pk}"}), "('epics-detail', kwargs={'pk': data.private_epic2.pk})\n", (9219, 9273), False, 'from django.core.urlresolvers import reverse\n'), ((9292, 9352), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-detail"""'], {'kwargs': "{'pk': data.blocked_epic.pk}"}), "('epics-detail', kwargs={'pk': data.blocked_epic.pk})\n", (9299, 9352), False, 'from django.core.urlresolvers import reverse\n'), ((9543, 9606), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""get"""', 'public_url', 'None', 'users'], {}), "(client, 'get', public_url, None, users)\n", (9566, 9606), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((9669, 9734), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""get"""', 'private_url1', 'None', 'users'], {}), "(client, 'get', private_url1, None, users)\n", (9692, 9734), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((9797, 9862), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""get"""', 'private_url2', 'None', 'users'], {}), "(client, 'get', private_url2, None, users)\n", (9820, 9862), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((9925, 9989), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""get"""', 'blocked_url', 'None', 'users'], {}), "(client, 'get', blocked_url, None, users)\n", (9948, 9989), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((10086, 10107), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-list"""'], {}), "('epics-list')\n", (10093, 10107), False, 'from django.core.urlresolvers import reverse\n'), ((10489, 10553), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""post"""', 'url', 'create_data', 'users'], {}), "(client, 'post', url, create_data, users)\n", (10512, 10553), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((10812, 10876), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""post"""', 'url', 'create_data', 'users'], {}), "(client, 'post', url, create_data, users)\n", (10835, 10876), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((11135, 11199), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""post"""', 'url', 'create_data', 'users'], {}), "(client, 'post', url, create_data, users)\n", (11158, 11199), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((11456, 11520), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""post"""', 'url', 'create_data', 'users'], {}), "(client, 'post', url, create_data, users)\n", (11479, 11520), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((11628, 11687), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-detail"""'], {'kwargs': "{'pk': data.public_epic.pk}"}), "('epics-detail', kwargs={'pk': data.public_epic.pk})\n", (11635, 11687), False, 'from django.core.urlresolvers import reverse\n'), ((11707, 11768), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-detail"""'], {'kwargs': "{'pk': data.private_epic1.pk}"}), "('epics-detail', kwargs={'pk': data.private_epic1.pk})\n", (11714, 11768), False, 'from django.core.urlresolvers import reverse\n'), ((11788, 11849), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-detail"""'], {'kwargs': "{'pk': data.private_epic2.pk}"}), "('epics-detail', kwargs={'pk': data.private_epic2.pk})\n", (11795, 11849), False, 'from django.core.urlresolvers import reverse\n'), ((11868, 11928), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-detail"""'], {'kwargs': "{'pk': data.blocked_epic.pk}"}), "('epics-detail', kwargs={'pk': data.blocked_epic.pk})\n", (11875, 11928), False, 'from django.core.urlresolvers import reverse\n'), ((13364, 13423), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-detail"""'], {'kwargs': "{'pk': data.public_epic.pk}"}), "('epics-detail', kwargs={'pk': data.public_epic.pk})\n", (13371, 13423), False, 'from django.core.urlresolvers import reverse\n'), ((13443, 13504), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-detail"""'], {'kwargs': "{'pk': data.private_epic1.pk}"}), "('epics-detail', kwargs={'pk': data.private_epic1.pk})\n", (13450, 13504), False, 'from django.core.urlresolvers import reverse\n'), ((13524, 13585), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-detail"""'], {'kwargs': "{'pk': data.private_epic2.pk}"}), "('epics-detail', kwargs={'pk': data.private_epic2.pk})\n", (13531, 13585), False, 'from django.core.urlresolvers import reverse\n'), ((13604, 13664), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-detail"""'], {'kwargs': "{'pk': data.blocked_epic.pk}"}), "('epics-detail', kwargs={'pk': data.blocked_epic.pk})\n", (13611, 13664), False, 'from django.core.urlresolvers import reverse\n'), ((15143, 15202), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-detail"""'], {'kwargs': "{'pk': data.public_epic.pk}"}), "('epics-detail', kwargs={'pk': data.public_epic.pk})\n", (15150, 15202), False, 'from django.core.urlresolvers import reverse\n'), ((15222, 15283), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-detail"""'], {'kwargs': "{'pk': data.private_epic1.pk}"}), "('epics-detail', kwargs={'pk': data.private_epic1.pk})\n", (15229, 15283), False, 'from django.core.urlresolvers import reverse\n'), ((15303, 15364), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-detail"""'], {'kwargs': "{'pk': data.private_epic2.pk}"}), "('epics-detail', kwargs={'pk': data.private_epic2.pk})\n", (15310, 15364), False, 'from django.core.urlresolvers import reverse\n'), ((15383, 15443), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-detail"""'], {'kwargs': "{'pk': data.blocked_epic.pk}"}), "('epics-detail', kwargs={'pk': data.blocked_epic.pk})\n", (15390, 15443), False, 'from django.core.urlresolvers import reverse\n'), ((17071, 17093), 'tests.factories.UserFactory.create', 'f.UserFactory.create', ([], {}), '()\n', (17091, 17093), True, 'from tests import factories as f\n'), ((17106, 17128), 'tests.factories.UserFactory.create', 'f.UserFactory.create', ([], {}), '()\n', (17126, 17128), True, 'from tests import factories as f\n'), ((17141, 17163), 'tests.factories.UserFactory.create', 'f.UserFactory.create', ([], {}), '()\n', (17161, 17163), True, 'from tests import factories as f\n'), ((17176, 17198), 'tests.factories.UserFactory.create', 'f.UserFactory.create', ([], {}), '()\n', (17196, 17198), True, 'from tests import factories as f\n'), ((17214, 17232), 'tests.factories.ProjectFactory', 'f.ProjectFactory', ([], {}), '()\n', (17230, 17232), True, 'from tests import factories as f\n'), ((17248, 17266), 'tests.factories.ProjectFactory', 'f.ProjectFactory', ([], {}), '()\n', (17264, 17266), True, 'from tests import factories as f\n'), ((17287, 17331), 'tests.factories.EpicStatusFactory.create', 'f.EpicStatusFactory.create', ([], {'project': 'project1'}), '(project=project1)\n', (17313, 17331), True, 'from tests import factories as f\n'), ((17351, 17395), 'tests.factories.EpicStatusFactory.create', 'f.EpicStatusFactory.create', ([], {'project': 'project2'}), '(project=project2)\n', (17377, 17395), True, 'from tests import factories as f\n'), ((18580, 18618), 'tests.factories.EpicFactory.create', 'f.EpicFactory.create', ([], {'project': 'project1'}), '(project=project1)\n', (18600, 18618), True, 'from tests import factories as f\n'), ((18700, 18747), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-detail"""'], {'kwargs': "{'pk': epic.pk}"}), "('epics-detail', kwargs={'pk': epic.pk})\n", (18707, 18747), False, 'from django.core.urlresolvers import reverse\n'), ((18921, 18942), 'taiga.base.utils.json.dumps', 'json.dumps', (['epic_data'], {}), '(epic_data)\n', (18931, 18942), False, 'from taiga.base.utils import json\n'), ((19288, 19309), 'taiga.base.utils.json.dumps', 'json.dumps', (['epic_data'], {}), '(epic_data)\n', (19298, 19309), False, 'from taiga.base.utils import json\n'), ((19661, 19682), 'taiga.base.utils.json.dumps', 'json.dumps', (['epic_data'], {}), '(epic_data)\n', (19671, 19682), False, 'from taiga.base.utils import json\n'), ((20024, 20045), 'taiga.base.utils.json.dumps', 'json.dumps', (['epic_data'], {}), '(epic_data)\n', (20034, 20045), False, 'from taiga.base.utils import json\n'), ((20273, 20332), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-detail"""'], {'kwargs': "{'pk': data.public_epic.pk}"}), "('epics-detail', kwargs={'pk': data.public_epic.pk})\n", (20280, 20332), False, 'from django.core.urlresolvers import reverse\n'), ((20352, 20413), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-detail"""'], {'kwargs': "{'pk': data.private_epic1.pk}"}), "('epics-detail', kwargs={'pk': data.private_epic1.pk})\n", (20359, 20413), False, 'from django.core.urlresolvers import reverse\n'), ((20433, 20494), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-detail"""'], {'kwargs': "{'pk': data.private_epic2.pk}"}), "('epics-detail', kwargs={'pk': data.private_epic2.pk})\n", (20440, 20494), False, 'from django.core.urlresolvers import reverse\n'), ((20513, 20573), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-detail"""'], {'kwargs': "{'pk': data.blocked_epic.pk}"}), "('epics-detail', kwargs={'pk': data.blocked_epic.pk})\n", (20520, 20573), False, 'from django.core.urlresolvers import reverse\n'), ((21831, 21890), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-detail"""'], {'kwargs': "{'pk': data.public_epic.pk}"}), "('epics-detail', kwargs={'pk': data.public_epic.pk})\n", (21838, 21890), False, 'from django.core.urlresolvers import reverse\n'), ((21910, 21971), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-detail"""'], {'kwargs': "{'pk': data.private_epic1.pk}"}), "('epics-detail', kwargs={'pk': data.private_epic1.pk})\n", (21917, 21971), False, 'from django.core.urlresolvers import reverse\n'), ((21991, 22052), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-detail"""'], {'kwargs': "{'pk': data.private_epic2.pk}"}), "('epics-detail', kwargs={'pk': data.private_epic2.pk})\n", (21998, 22052), False, 'from django.core.urlresolvers import reverse\n'), ((22071, 22131), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-detail"""'], {'kwargs': "{'pk': data.blocked_epic.pk}"}), "('epics-detail', kwargs={'pk': data.blocked_epic.pk})\n", (22078, 22131), False, 'from django.core.urlresolvers import reverse\n'), ((23432, 23491), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-detail"""'], {'kwargs': "{'pk': data.public_epic.pk}"}), "('epics-detail', kwargs={'pk': data.public_epic.pk})\n", (23439, 23491), False, 'from django.core.urlresolvers import reverse\n'), ((23511, 23572), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-detail"""'], {'kwargs': "{'pk': data.private_epic1.pk}"}), "('epics-detail', kwargs={'pk': data.private_epic1.pk})\n", (23518, 23572), False, 'from django.core.urlresolvers import reverse\n'), ((23592, 23653), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-detail"""'], {'kwargs': "{'pk': data.private_epic2.pk}"}), "('epics-detail', kwargs={'pk': data.private_epic2.pk})\n", (23599, 23653), False, 'from django.core.urlresolvers import reverse\n'), ((23672, 23732), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-detail"""'], {'kwargs': "{'pk': data.blocked_epic.pk}"}), "('epics-detail', kwargs={'pk': data.blocked_epic.pk})\n", (23679, 23732), False, 'from django.core.urlresolvers import reverse\n'), ((25275, 25334), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-detail"""'], {'kwargs': "{'pk': data.public_epic.pk}"}), "('epics-detail', kwargs={'pk': data.public_epic.pk})\n", (25282, 25334), False, 'from django.core.urlresolvers import reverse\n'), ((25354, 25415), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-detail"""'], {'kwargs': "{'pk': data.private_epic1.pk}"}), "('epics-detail', kwargs={'pk': data.private_epic1.pk})\n", (25361, 25415), False, 'from django.core.urlresolvers import reverse\n'), ((25435, 25496), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-detail"""'], {'kwargs': "{'pk': data.private_epic2.pk}"}), "('epics-detail', kwargs={'pk': data.private_epic2.pk})\n", (25442, 25496), False, 'from django.core.urlresolvers import reverse\n'), ((25515, 25575), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-detail"""'], {'kwargs': "{'pk': data.blocked_epic.pk}"}), "('epics-detail', kwargs={'pk': data.blocked_epic.pk})\n", (25522, 25575), False, 'from django.core.urlresolvers import reverse\n'), ((25738, 25804), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""delete"""', 'public_url', 'None', 'users'], {}), "(client, 'delete', public_url, None, users)\n", (25761, 25804), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((25862, 25930), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""delete"""', 'private_url1', 'None', 'users'], {}), "(client, 'delete', private_url1, None, users)\n", (25885, 25930), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((25988, 26056), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""delete"""', 'private_url2', 'None', 'users'], {}), "(client, 'delete', private_url2, None, users)\n", (26011, 26056), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((26114, 26181), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""delete"""', 'blocked_url', 'None', 'users'], {}), "(client, 'delete', blocked_url, None, users)\n", (26137, 26181), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((26285, 26313), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-bulk-create"""'], {}), "('epics-bulk-create')\n", (26292, 26313), False, 'from django.core.urlresolvers import reverse\n'), ((26506, 26596), 'taiga.base.utils.json.dumps', 'json.dumps', (["{'bulk_epics': 'test1\\ntest2', 'project_id': data.public_epic.project.pk}"], {}), "({'bulk_epics': 'test1\\ntest2', 'project_id': data.public_epic.\n project.pk})\n", (26516, 26596), False, 'from taiga.base.utils import json\n'), ((26629, 26691), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""post"""', 'url', 'bulk_data', 'users'], {}), "(client, 'post', url, bulk_data, users)\n", (26652, 26691), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((26757, 26849), 'taiga.base.utils.json.dumps', 'json.dumps', (["{'bulk_epics': 'test1\\ntest2', 'project_id': data.private_epic1.project.pk}"], {}), "({'bulk_epics': 'test1\\ntest2', 'project_id': data.private_epic1.\n project.pk})\n", (26767, 26849), False, 'from taiga.base.utils import json\n'), ((26882, 26944), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""post"""', 'url', 'bulk_data', 'users'], {}), "(client, 'post', url, bulk_data, users)\n", (26905, 26944), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((27010, 27102), 'taiga.base.utils.json.dumps', 'json.dumps', (["{'bulk_epics': 'test1\\ntest2', 'project_id': data.private_epic2.project.pk}"], {}), "({'bulk_epics': 'test1\\ntest2', 'project_id': data.private_epic2.\n project.pk})\n", (27020, 27102), False, 'from taiga.base.utils import json\n'), ((27135, 27197), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""post"""', 'url', 'bulk_data', 'users'], {}), "(client, 'post', url, bulk_data, users)\n", (27158, 27197), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((27263, 27354), 'taiga.base.utils.json.dumps', 'json.dumps', (["{'bulk_epics': 'test1\\ntest2', 'project_id': data.blocked_epic.project.pk}"], {}), "({'bulk_epics': 'test1\\ntest2', 'project_id': data.blocked_epic.\n project.pk})\n", (27273, 27354), False, 'from taiga.base.utils import json\n'), ((27387, 27449), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""post"""', 'url', 'bulk_data', 'users'], {}), "(client, 'post', url, bulk_data, users)\n", (27410, 27449), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((27560, 27619), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-upvote"""'], {'kwargs': "{'pk': data.public_epic.pk}"}), "('epics-upvote', kwargs={'pk': data.public_epic.pk})\n", (27567, 27619), False, 'from django.core.urlresolvers import reverse\n'), ((27639, 27700), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-upvote"""'], {'kwargs': "{'pk': data.private_epic1.pk}"}), "('epics-upvote', kwargs={'pk': data.private_epic1.pk})\n", (27646, 27700), False, 'from django.core.urlresolvers import reverse\n'), ((27720, 27781), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-upvote"""'], {'kwargs': "{'pk': data.private_epic2.pk}"}), "('epics-upvote', kwargs={'pk': data.private_epic2.pk})\n", (27727, 27781), False, 'from django.core.urlresolvers import reverse\n'), ((27800, 27860), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-upvote"""'], {'kwargs': "{'pk': data.blocked_epic.pk}"}), "('epics-upvote', kwargs={'pk': data.blocked_epic.pk})\n", (27807, 27860), False, 'from django.core.urlresolvers import reverse\n'), ((28051, 28113), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""post"""', 'public_url', '""""""', 'users'], {}), "(client, 'post', public_url, '', users)\n", (28074, 28113), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((28176, 28240), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""post"""', 'private_url1', '""""""', 'users'], {}), "(client, 'post', private_url1, '', users)\n", (28199, 28240), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((28303, 28367), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""post"""', 'private_url2', '""""""', 'users'], {}), "(client, 'post', private_url2, '', users)\n", (28326, 28367), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((28430, 28493), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""post"""', 'blocked_url', '""""""', 'users'], {}), "(client, 'post', blocked_url, '', users)\n", (28453, 28493), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((28606, 28667), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-downvote"""'], {'kwargs': "{'pk': data.public_epic.pk}"}), "('epics-downvote', kwargs={'pk': data.public_epic.pk})\n", (28613, 28667), False, 'from django.core.urlresolvers import reverse\n'), ((28687, 28750), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-downvote"""'], {'kwargs': "{'pk': data.private_epic1.pk}"}), "('epics-downvote', kwargs={'pk': data.private_epic1.pk})\n", (28694, 28750), False, 'from django.core.urlresolvers import reverse\n'), ((28770, 28833), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-downvote"""'], {'kwargs': "{'pk': data.private_epic2.pk}"}), "('epics-downvote', kwargs={'pk': data.private_epic2.pk})\n", (28777, 28833), False, 'from django.core.urlresolvers import reverse\n'), ((28852, 28914), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-downvote"""'], {'kwargs': "{'pk': data.blocked_epic.pk}"}), "('epics-downvote', kwargs={'pk': data.blocked_epic.pk})\n", (28859, 28914), False, 'from django.core.urlresolvers import reverse\n'), ((29105, 29167), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""post"""', 'public_url', '""""""', 'users'], {}), "(client, 'post', public_url, '', users)\n", (29128, 29167), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((29230, 29294), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""post"""', 'private_url1', '""""""', 'users'], {}), "(client, 'post', private_url1, '', users)\n", (29253, 29294), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((29357, 29421), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""post"""', 'private_url2', '""""""', 'users'], {}), "(client, 'post', private_url2, '', users)\n", (29380, 29421), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((29484, 29547), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""post"""', 'blocked_url', '""""""', 'users'], {}), "(client, 'post', blocked_url, '', users)\n", (29507, 29547), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((29656, 29728), 'django.core.urlresolvers.reverse', 'reverse', (['"""epic-voters-list"""'], {'kwargs': "{'resource_id': data.public_epic.pk}"}), "('epic-voters-list', kwargs={'resource_id': data.public_epic.pk})\n", (29663, 29728), False, 'from django.core.urlresolvers import reverse\n'), ((29748, 29822), 'django.core.urlresolvers.reverse', 'reverse', (['"""epic-voters-list"""'], {'kwargs': "{'resource_id': data.private_epic1.pk}"}), "('epic-voters-list', kwargs={'resource_id': data.private_epic1.pk})\n", (29755, 29822), False, 'from django.core.urlresolvers import reverse\n'), ((29842, 29916), 'django.core.urlresolvers.reverse', 'reverse', (['"""epic-voters-list"""'], {'kwargs': "{'resource_id': data.private_epic2.pk}"}), "('epic-voters-list', kwargs={'resource_id': data.private_epic2.pk})\n", (29849, 29916), False, 'from django.core.urlresolvers import reverse\n'), ((29935, 30008), 'django.core.urlresolvers.reverse', 'reverse', (['"""epic-voters-list"""'], {'kwargs': "{'resource_id': data.blocked_epic.pk}"}), "('epic-voters-list', kwargs={'resource_id': data.blocked_epic.pk})\n", (29942, 30008), False, 'from django.core.urlresolvers import reverse\n'), ((30199, 30262), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""get"""', 'public_url', 'None', 'users'], {}), "(client, 'get', public_url, None, users)\n", (30222, 30262), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((30325, 30390), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""get"""', 'private_url1', 'None', 'users'], {}), "(client, 'get', private_url1, None, users)\n", (30348, 30390), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((30453, 30518), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""get"""', 'private_url2', 'None', 'users'], {}), "(client, 'get', private_url2, None, users)\n", (30476, 30518), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((30581, 30645), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""get"""', 'blocked_url', 'None', 'users'], {}), "(client, 'get', blocked_url, None, users)\n", (30604, 30645), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((30745, 30791), 'taiga.projects.votes.services.add_vote', 'add_vote', (['data.public_epic', 'data.project_owner'], {}), '(data.public_epic, data.project_owner)\n', (30753, 30791), False, 'from taiga.projects.votes.services import add_vote\n'), ((30809, 30916), 'django.core.urlresolvers.reverse', 'reverse', (['"""epic-voters-detail"""'], {'kwargs': "{'resource_id': data.public_epic.pk, 'pk': data.project_owner.pk}"}), "('epic-voters-detail', kwargs={'resource_id': data.public_epic.pk,\n 'pk': data.project_owner.pk})\n", (30816, 30916), False, 'from django.core.urlresolvers import reverse\n'), ((30972, 31020), 'taiga.projects.votes.services.add_vote', 'add_vote', (['data.private_epic1', 'data.project_owner'], {}), '(data.private_epic1, data.project_owner)\n', (30980, 31020), False, 'from taiga.projects.votes.services import add_vote\n'), ((31040, 31149), 'django.core.urlresolvers.reverse', 'reverse', (['"""epic-voters-detail"""'], {'kwargs': "{'resource_id': data.private_epic1.pk, 'pk': data.project_owner.pk}"}), "('epic-voters-detail', kwargs={'resource_id': data.private_epic1.pk,\n 'pk': data.project_owner.pk})\n", (31047, 31149), False, 'from django.core.urlresolvers import reverse\n'), ((31207, 31255), 'taiga.projects.votes.services.add_vote', 'add_vote', (['data.private_epic2', 'data.project_owner'], {}), '(data.private_epic2, data.project_owner)\n', (31215, 31255), False, 'from taiga.projects.votes.services import add_vote\n'), ((31275, 31384), 'django.core.urlresolvers.reverse', 'reverse', (['"""epic-voters-detail"""'], {'kwargs': "{'resource_id': data.private_epic2.pk, 'pk': data.project_owner.pk}"}), "('epic-voters-detail', kwargs={'resource_id': data.private_epic2.pk,\n 'pk': data.project_owner.pk})\n", (31282, 31384), False, 'from django.core.urlresolvers import reverse\n'), ((31443, 31490), 'taiga.projects.votes.services.add_vote', 'add_vote', (['data.blocked_epic', 'data.project_owner'], {}), '(data.blocked_epic, data.project_owner)\n', (31451, 31490), False, 'from taiga.projects.votes.services import add_vote\n'), ((31509, 31617), 'django.core.urlresolvers.reverse', 'reverse', (['"""epic-voters-detail"""'], {'kwargs': "{'resource_id': data.blocked_epic.pk, 'pk': data.project_owner.pk}"}), "('epic-voters-detail', kwargs={'resource_id': data.blocked_epic.pk,\n 'pk': data.project_owner.pk})\n", (31516, 31617), False, 'from django.core.urlresolvers import reverse\n'), ((31860, 31923), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""get"""', 'public_url', 'None', 'users'], {}), "(client, 'get', public_url, None, users)\n", (31883, 31923), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((31986, 32051), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""get"""', 'private_url1', 'None', 'users'], {}), "(client, 'get', private_url1, None, users)\n", (32009, 32051), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((32114, 32179), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""get"""', 'private_url2', 'None', 'users'], {}), "(client, 'get', private_url2, None, users)\n", (32137, 32179), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((32242, 32306), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""get"""', 'blocked_url', 'None', 'users'], {}), "(client, 'get', blocked_url, None, users)\n", (32265, 32306), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((32416, 32474), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-watch"""'], {'kwargs': "{'pk': data.public_epic.pk}"}), "('epics-watch', kwargs={'pk': data.public_epic.pk})\n", (32423, 32474), False, 'from django.core.urlresolvers import reverse\n'), ((32494, 32554), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-watch"""'], {'kwargs': "{'pk': data.private_epic1.pk}"}), "('epics-watch', kwargs={'pk': data.private_epic1.pk})\n", (32501, 32554), False, 'from django.core.urlresolvers import reverse\n'), ((32574, 32634), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-watch"""'], {'kwargs': "{'pk': data.private_epic2.pk}"}), "('epics-watch', kwargs={'pk': data.private_epic2.pk})\n", (32581, 32634), False, 'from django.core.urlresolvers import reverse\n'), ((32653, 32712), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-watch"""'], {'kwargs': "{'pk': data.blocked_epic.pk}"}), "('epics-watch', kwargs={'pk': data.blocked_epic.pk})\n", (32660, 32712), False, 'from django.core.urlresolvers import reverse\n'), ((32903, 32965), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""post"""', 'public_url', '""""""', 'users'], {}), "(client, 'post', public_url, '', users)\n", (32926, 32965), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((33028, 33092), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""post"""', 'private_url1', '""""""', 'users'], {}), "(client, 'post', private_url1, '', users)\n", (33051, 33092), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((33155, 33219), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""post"""', 'private_url2', '""""""', 'users'], {}), "(client, 'post', private_url2, '', users)\n", (33178, 33219), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((33282, 33345), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""post"""', 'blocked_url', '""""""', 'users'], {}), "(client, 'post', blocked_url, '', users)\n", (33305, 33345), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((33457, 33517), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-unwatch"""'], {'kwargs': "{'pk': data.public_epic.pk}"}), "('epics-unwatch', kwargs={'pk': data.public_epic.pk})\n", (33464, 33517), False, 'from django.core.urlresolvers import reverse\n'), ((33537, 33599), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-unwatch"""'], {'kwargs': "{'pk': data.private_epic1.pk}"}), "('epics-unwatch', kwargs={'pk': data.private_epic1.pk})\n", (33544, 33599), False, 'from django.core.urlresolvers import reverse\n'), ((33619, 33681), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-unwatch"""'], {'kwargs': "{'pk': data.private_epic2.pk}"}), "('epics-unwatch', kwargs={'pk': data.private_epic2.pk})\n", (33626, 33681), False, 'from django.core.urlresolvers import reverse\n'), ((33700, 33761), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-unwatch"""'], {'kwargs': "{'pk': data.blocked_epic.pk}"}), "('epics-unwatch', kwargs={'pk': data.blocked_epic.pk})\n", (33707, 33761), False, 'from django.core.urlresolvers import reverse\n'), ((33952, 34014), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""post"""', 'public_url', '""""""', 'users'], {}), "(client, 'post', public_url, '', users)\n", (33975, 34014), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((34077, 34141), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""post"""', 'private_url1', '""""""', 'users'], {}), "(client, 'post', private_url1, '', users)\n", (34100, 34141), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((34204, 34268), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""post"""', 'private_url2', '""""""', 'users'], {}), "(client, 'post', private_url2, '', users)\n", (34227, 34268), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((34331, 34394), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""post"""', 'blocked_url', '""""""', 'users'], {}), "(client, 'post', blocked_url, '', users)\n", (34354, 34394), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((34505, 34579), 'django.core.urlresolvers.reverse', 'reverse', (['"""epic-watchers-list"""'], {'kwargs': "{'resource_id': data.public_epic.pk}"}), "('epic-watchers-list', kwargs={'resource_id': data.public_epic.pk})\n", (34512, 34579), False, 'from django.core.urlresolvers import reverse\n'), ((34599, 34675), 'django.core.urlresolvers.reverse', 'reverse', (['"""epic-watchers-list"""'], {'kwargs': "{'resource_id': data.private_epic1.pk}"}), "('epic-watchers-list', kwargs={'resource_id': data.private_epic1.pk})\n", (34606, 34675), False, 'from django.core.urlresolvers import reverse\n'), ((34695, 34771), 'django.core.urlresolvers.reverse', 'reverse', (['"""epic-watchers-list"""'], {'kwargs': "{'resource_id': data.private_epic2.pk}"}), "('epic-watchers-list', kwargs={'resource_id': data.private_epic2.pk})\n", (34702, 34771), False, 'from django.core.urlresolvers import reverse\n'), ((34790, 34865), 'django.core.urlresolvers.reverse', 'reverse', (['"""epic-watchers-list"""'], {'kwargs': "{'resource_id': data.blocked_epic.pk}"}), "('epic-watchers-list', kwargs={'resource_id': data.blocked_epic.pk})\n", (34797, 34865), False, 'from django.core.urlresolvers import reverse\n'), ((35056, 35119), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""get"""', 'public_url', 'None', 'users'], {}), "(client, 'get', public_url, None, users)\n", (35079, 35119), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((35182, 35247), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""get"""', 'private_url1', 'None', 'users'], {}), "(client, 'get', private_url1, None, users)\n", (35205, 35247), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((35310, 35375), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""get"""', 'private_url2', 'None', 'users'], {}), "(client, 'get', private_url2, None, users)\n", (35333, 35375), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((35438, 35502), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""get"""', 'blocked_url', 'None', 'users'], {}), "(client, 'get', blocked_url, None, users)\n", (35461, 35502), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((35604, 35653), 'taiga.projects.notifications.services.add_watcher', 'add_watcher', (['data.public_epic', 'data.project_owner'], {}), '(data.public_epic, data.project_owner)\n', (35615, 35653), False, 'from taiga.projects.notifications.services import add_watcher\n'), ((35671, 35780), 'django.core.urlresolvers.reverse', 'reverse', (['"""epic-watchers-detail"""'], {'kwargs': "{'resource_id': data.public_epic.pk, 'pk': data.project_owner.pk}"}), "('epic-watchers-detail', kwargs={'resource_id': data.public_epic.pk,\n 'pk': data.project_owner.pk})\n", (35678, 35780), False, 'from django.core.urlresolvers import reverse\n'), ((35838, 35889), 'taiga.projects.notifications.services.add_watcher', 'add_watcher', (['data.private_epic1', 'data.project_owner'], {}), '(data.private_epic1, data.project_owner)\n', (35849, 35889), False, 'from taiga.projects.notifications.services import add_watcher\n'), ((35909, 36021), 'django.core.urlresolvers.reverse', 'reverse', (['"""epic-watchers-detail"""'], {'kwargs': "{'resource_id': data.private_epic1.pk, 'pk': data.project_owner.pk}"}), "('epic-watchers-detail', kwargs={'resource_id': data.private_epic1.\n pk, 'pk': data.project_owner.pk})\n", (35916, 36021), False, 'from django.core.urlresolvers import reverse\n'), ((36080, 36131), 'taiga.projects.notifications.services.add_watcher', 'add_watcher', (['data.private_epic2', 'data.project_owner'], {}), '(data.private_epic2, data.project_owner)\n', (36091, 36131), False, 'from taiga.projects.notifications.services import add_watcher\n'), ((36151, 36263), 'django.core.urlresolvers.reverse', 'reverse', (['"""epic-watchers-detail"""'], {'kwargs': "{'resource_id': data.private_epic2.pk, 'pk': data.project_owner.pk}"}), "('epic-watchers-detail', kwargs={'resource_id': data.private_epic2.\n pk, 'pk': data.project_owner.pk})\n", (36158, 36263), False, 'from django.core.urlresolvers import reverse\n'), ((36323, 36373), 'taiga.projects.notifications.services.add_watcher', 'add_watcher', (['data.blocked_epic', 'data.project_owner'], {}), '(data.blocked_epic, data.project_owner)\n', (36334, 36373), False, 'from taiga.projects.notifications.services import add_watcher\n'), ((36392, 36502), 'django.core.urlresolvers.reverse', 'reverse', (['"""epic-watchers-detail"""'], {'kwargs': "{'resource_id': data.blocked_epic.pk, 'pk': data.project_owner.pk}"}), "('epic-watchers-detail', kwargs={'resource_id': data.blocked_epic.pk,\n 'pk': data.project_owner.pk})\n", (36399, 36502), False, 'from django.core.urlresolvers import reverse\n'), ((36746, 36809), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""get"""', 'public_url', 'None', 'users'], {}), "(client, 'get', public_url, None, users)\n", (36769, 36809), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((36872, 36937), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""get"""', 'private_url1', 'None', 'users'], {}), "(client, 'get', private_url1, None, users)\n", (36895, 36937), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((37000, 37065), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""get"""', 'private_url2', 'None', 'users'], {}), "(client, 'get', private_url2, None, users)\n", (37023, 37065), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((37128, 37192), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""get"""', 'blocked_url', 'None', 'users'], {}), "(client, 'get', blocked_url, None, users)\n", (37151, 37192), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((37287, 37307), 'django.core.urlresolvers.reverse', 'reverse', (['"""epics-csv"""'], {}), "('epics-csv')\n", (37294, 37307), False, 'from django.core.urlresolvers import reverse\n'), ((12114, 12181), 'unittest.mock.patch.object', 'mock.patch.object', (['OCCResourceMixin', '"""_validate_and_update_version"""'], {}), "(OCCResourceMixin, '_validate_and_update_version')\n", (12131, 12181), False, 'from unittest import mock\n'), ((12299, 12320), 'taiga.base.utils.json.dumps', 'json.dumps', (['epic_data'], {}), '(epic_data)\n', (12309, 12320), False, 'from taiga.base.utils import json\n'), ((12339, 12407), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""put"""', 'public_url', 'epic_data', 'users'], {}), "(client, 'put', public_url, epic_data, users)\n", (12362, 12407), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((12579, 12600), 'taiga.base.utils.json.dumps', 'json.dumps', (['epic_data'], {}), '(epic_data)\n', (12589, 12600), False, 'from taiga.base.utils import json\n'), ((12619, 12689), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""put"""', 'private_url1', 'epic_data', 'users'], {}), "(client, 'put', private_url1, epic_data, users)\n", (12642, 12689), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((12861, 12882), 'taiga.base.utils.json.dumps', 'json.dumps', (['epic_data'], {}), '(epic_data)\n', (12871, 12882), False, 'from taiga.base.utils import json\n'), ((12901, 12971), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""put"""', 'private_url2', 'epic_data', 'users'], {}), "(client, 'put', private_url2, epic_data, users)\n", (12924, 12971), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((13142, 13163), 'taiga.base.utils.json.dumps', 'json.dumps', (['epic_data'], {}), '(epic_data)\n', (13152, 13163), False, 'from taiga.base.utils import json\n'), ((13182, 13251), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""put"""', 'blocked_url', 'epic_data', 'users'], {}), "(client, 'put', blocked_url, epic_data, users)\n", (13205, 13251), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((13850, 13917), 'unittest.mock.patch.object', 'mock.patch.object', (['OCCResourceMixin', '"""_validate_and_update_version"""'], {}), "(OCCResourceMixin, '_validate_and_update_version')\n", (13867, 13917), False, 'from unittest import mock\n'), ((14043, 14064), 'taiga.base.utils.json.dumps', 'json.dumps', (['epic_data'], {}), '(epic_data)\n', (14053, 14064), False, 'from taiga.base.utils import json\n'), ((14083, 14151), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""put"""', 'public_url', 'epic_data', 'users'], {}), "(client, 'put', public_url, epic_data, users)\n", (14106, 14151), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((14331, 14352), 'taiga.base.utils.json.dumps', 'json.dumps', (['epic_data'], {}), '(epic_data)\n', (14341, 14352), False, 'from taiga.base.utils import json\n'), ((14371, 14441), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""put"""', 'private_url1', 'epic_data', 'users'], {}), "(client, 'put', private_url1, epic_data, users)\n", (14394, 14441), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((14621, 14642), 'taiga.base.utils.json.dumps', 'json.dumps', (['epic_data'], {}), '(epic_data)\n', (14631, 14642), False, 'from taiga.base.utils import json\n'), ((14661, 14731), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""put"""', 'private_url2', 'epic_data', 'users'], {}), "(client, 'put', private_url2, epic_data, users)\n", (14684, 14731), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((14910, 14931), 'taiga.base.utils.json.dumps', 'json.dumps', (['epic_data'], {}), '(epic_data)\n', (14920, 14931), False, 'from taiga.base.utils import json\n'), ((14950, 15019), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""put"""', 'blocked_url', 'epic_data', 'users'], {}), "(client, 'put', blocked_url, epic_data, users)\n", (14973, 15019), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((15629, 15696), 'unittest.mock.patch.object', 'mock.patch.object', (['OCCResourceMixin', '"""_validate_and_update_version"""'], {}), "(OCCResourceMixin, '_validate_and_update_version')\n", (15646, 15696), False, 'from unittest import mock\n'), ((15860, 15881), 'taiga.base.utils.json.dumps', 'json.dumps', (['epic_data'], {}), '(epic_data)\n', (15870, 15881), False, 'from taiga.base.utils import json\n'), ((15900, 15968), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""put"""', 'public_url', 'epic_data', 'users'], {}), "(client, 'put', public_url, epic_data, users)\n", (15923, 15968), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((16186, 16207), 'taiga.base.utils.json.dumps', 'json.dumps', (['epic_data'], {}), '(epic_data)\n', (16196, 16207), False, 'from taiga.base.utils import json\n'), ((16226, 16296), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""put"""', 'private_url1', 'epic_data', 'users'], {}), "(client, 'put', private_url1, epic_data, users)\n", (16249, 16296), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((16514, 16535), 'taiga.base.utils.json.dumps', 'json.dumps', (['epic_data'], {}), '(epic_data)\n', (16524, 16535), False, 'from taiga.base.utils import json\n'), ((16554, 16624), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""put"""', 'private_url2', 'epic_data', 'users'], {}), "(client, 'put', private_url2, epic_data, users)\n", (16577, 16624), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((16841, 16862), 'taiga.base.utils.json.dumps', 'json.dumps', (['epic_data'], {}), '(epic_data)\n', (16851, 16862), False, 'from taiga.base.utils import json\n'), ((16881, 16950), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""put"""', 'blocked_url', 'epic_data', 'users'], {}), "(client, 'put', blocked_url, epic_data, users)\n", (16904, 16950), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((18840, 18860), 'taiga.projects.epics.serializers.EpicSerializer', 'EpicSerializer', (['epic'], {}), '(epic)\n', (18854, 18860), False, 'from taiga.projects.epics.serializers import EpicSerializer\n'), ((19207, 19227), 'taiga.projects.epics.serializers.EpicSerializer', 'EpicSerializer', (['epic'], {}), '(epic)\n', (19221, 19227), False, 'from taiga.projects.epics.serializers import EpicSerializer\n'), ((19580, 19600), 'taiga.projects.epics.serializers.EpicSerializer', 'EpicSerializer', (['epic'], {}), '(epic)\n', (19594, 19600), False, 'from taiga.projects.epics.serializers import EpicSerializer\n'), ((19943, 19963), 'taiga.projects.epics.serializers.EpicSerializer', 'EpicSerializer', (['epic'], {}), '(epic)\n', (19957, 19963), False, 'from taiga.projects.epics.serializers import EpicSerializer\n'), ((20759, 20826), 'unittest.mock.patch.object', 'mock.patch.object', (['OCCResourceMixin', '"""_validate_and_update_version"""'], {}), "(OCCResourceMixin, '_validate_and_update_version')\n", (20776, 20826), False, 'from unittest import mock\n'), ((20849, 20917), 'taiga.base.utils.json.dumps', 'json.dumps', (["{'subject': 'test', 'version': data.public_epic.version}"], {}), "({'subject': 'test', 'version': data.public_epic.version})\n", (20859, 20917), False, 'from taiga.base.utils import json\n'), ((20936, 21007), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""patch"""', 'public_url', 'patch_data', 'users'], {}), "(client, 'patch', public_url, patch_data, users)\n", (20959, 21007), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((21082, 21152), 'taiga.base.utils.json.dumps', 'json.dumps', (["{'subject': 'test', 'version': data.private_epic1.version}"], {}), "({'subject': 'test', 'version': data.private_epic1.version})\n", (21092, 21152), False, 'from taiga.base.utils import json\n'), ((21171, 21244), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""patch"""', 'private_url1', 'patch_data', 'users'], {}), "(client, 'patch', private_url1, patch_data, users)\n", (21194, 21244), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((21319, 21389), 'taiga.base.utils.json.dumps', 'json.dumps', (["{'subject': 'test', 'version': data.private_epic2.version}"], {}), "({'subject': 'test', 'version': data.private_epic2.version})\n", (21329, 21389), False, 'from taiga.base.utils import json\n'), ((21408, 21481), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""patch"""', 'private_url2', 'patch_data', 'users'], {}), "(client, 'patch', private_url2, patch_data, users)\n", (21431, 21481), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((21556, 21625), 'taiga.base.utils.json.dumps', 'json.dumps', (["{'subject': 'test', 'version': data.blocked_epic.version}"], {}), "({'subject': 'test', 'version': data.blocked_epic.version})\n", (21566, 21625), False, 'from taiga.base.utils import json\n'), ((21644, 21716), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""patch"""', 'blocked_url', 'patch_data', 'users'], {}), "(client, 'patch', blocked_url, patch_data, users)\n", (21667, 21716), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((22317, 22384), 'unittest.mock.patch.object', 'mock.patch.object', (['OCCResourceMixin', '"""_validate_and_update_version"""'], {}), "(OCCResourceMixin, '_validate_and_update_version')\n", (22334, 22384), False, 'from unittest import mock\n'), ((22407, 22483), 'taiga.base.utils.json.dumps', 'json.dumps', (["{'comment': 'test comment', 'version': data.public_epic.version}"], {}), "({'comment': 'test comment', 'version': data.public_epic.version})\n", (22417, 22483), False, 'from taiga.base.utils import json\n'), ((22502, 22573), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""patch"""', 'public_url', 'patch_data', 'users'], {}), "(client, 'patch', public_url, patch_data, users)\n", (22525, 22573), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((22648, 22726), 'taiga.base.utils.json.dumps', 'json.dumps', (["{'comment': 'test comment', 'version': data.private_epic1.version}"], {}), "({'comment': 'test comment', 'version': data.private_epic1.version})\n", (22658, 22726), False, 'from taiga.base.utils import json\n'), ((22745, 22818), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""patch"""', 'private_url1', 'patch_data', 'users'], {}), "(client, 'patch', private_url1, patch_data, users)\n", (22768, 22818), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((22893, 22971), 'taiga.base.utils.json.dumps', 'json.dumps', (["{'comment': 'test comment', 'version': data.private_epic2.version}"], {}), "({'comment': 'test comment', 'version': data.private_epic2.version})\n", (22903, 22971), False, 'from taiga.base.utils import json\n'), ((22990, 23063), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""patch"""', 'private_url2', 'patch_data', 'users'], {}), "(client, 'patch', private_url2, patch_data, users)\n", (23013, 23063), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((23138, 23215), 'taiga.base.utils.json.dumps', 'json.dumps', (["{'comment': 'test comment', 'version': data.blocked_epic.version}"], {}), "({'comment': 'test comment', 'version': data.blocked_epic.version})\n", (23148, 23215), False, 'from taiga.base.utils import json\n'), ((23234, 23306), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""patch"""', 'blocked_url', 'patch_data', 'users'], {}), "(client, 'patch', blocked_url, patch_data, users)\n", (23257, 23306), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((23918, 23985), 'unittest.mock.patch.object', 'mock.patch.object', (['OCCResourceMixin', '"""_validate_and_update_version"""'], {}), "(OCCResourceMixin, '_validate_and_update_version')\n", (23935, 23985), False, 'from unittest import mock\n'), ((24008, 24108), 'taiga.base.utils.json.dumps', 'json.dumps', (["{'subject': 'test', 'comment': 'test comment', 'version': data.public_epic.\n version}"], {}), "({'subject': 'test', 'comment': 'test comment', 'version': data.\n public_epic.version})\n", (24018, 24108), False, 'from taiga.base.utils import json\n'), ((24168, 24239), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""patch"""', 'public_url', 'patch_data', 'users'], {}), "(client, 'patch', public_url, patch_data, users)\n", (24191, 24239), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((24314, 24416), 'taiga.base.utils.json.dumps', 'json.dumps', (["{'subject': 'test', 'comment': 'test comment', 'version': data.\n private_epic1.version}"], {}), "({'subject': 'test', 'comment': 'test comment', 'version': data.\n private_epic1.version})\n", (24324, 24416), False, 'from taiga.base.utils import json\n'), ((24476, 24549), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""patch"""', 'private_url1', 'patch_data', 'users'], {}), "(client, 'patch', private_url1, patch_data, users)\n", (24499, 24549), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((24624, 24726), 'taiga.base.utils.json.dumps', 'json.dumps', (["{'subject': 'test', 'comment': 'test comment', 'version': data.\n private_epic2.version}"], {}), "({'subject': 'test', 'comment': 'test comment', 'version': data.\n private_epic2.version})\n", (24634, 24726), False, 'from taiga.base.utils import json\n'), ((24786, 24859), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""patch"""', 'private_url2', 'patch_data', 'users'], {}), "(client, 'patch', private_url2, patch_data, users)\n", (24809, 24859), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((24934, 25035), 'taiga.base.utils.json.dumps', 'json.dumps', (["{'subject': 'test', 'comment': 'test comment', 'version': data.blocked_epic\n .version}"], {}), "({'subject': 'test', 'comment': 'test comment', 'version': data.\n blocked_epic.version})\n", (24944, 25035), False, 'from taiga.base.utils import json\n'), ((25095, 25167), 'tests.utils.helper_test_http_method', 'helper_test_http_method', (['client', '"""patch"""', 'blocked_url', 'patch_data', 'users'], {}), "(client, 'patch', blocked_url, patch_data, users)\n", (25118, 25167), False, 'from tests.utils import helper_test_http_method, reconnect_signals\n'), ((12203, 12235), 'taiga.projects.epics.serializers.EpicSerializer', 'EpicSerializer', (['data.public_epic'], {}), '(data.public_epic)\n', (12217, 12235), False, 'from taiga.projects.epics.serializers import EpicSerializer\n'), ((12481, 12515), 'taiga.projects.epics.serializers.EpicSerializer', 'EpicSerializer', (['data.private_epic1'], {}), '(data.private_epic1)\n', (12495, 12515), False, 'from taiga.projects.epics.serializers import EpicSerializer\n'), ((12763, 12797), 'taiga.projects.epics.serializers.EpicSerializer', 'EpicSerializer', (['data.private_epic2'], {}), '(data.private_epic2)\n', (12777, 12797), False, 'from taiga.projects.epics.serializers import EpicSerializer\n'), ((13045, 13078), 'taiga.projects.epics.serializers.EpicSerializer', 'EpicSerializer', (['data.blocked_epic'], {}), '(data.blocked_epic)\n', (13059, 13078), False, 'from taiga.projects.epics.serializers import EpicSerializer\n'), ((13939, 13971), 'taiga.projects.epics.serializers.EpicSerializer', 'EpicSerializer', (['data.public_epic'], {}), '(data.public_epic)\n', (13953, 13971), False, 'from taiga.projects.epics.serializers import EpicSerializer\n'), ((14225, 14259), 'taiga.projects.epics.serializers.EpicSerializer', 'EpicSerializer', (['data.private_epic1'], {}), '(data.private_epic1)\n', (14239, 14259), False, 'from taiga.projects.epics.serializers import EpicSerializer\n'), ((14515, 14549), 'taiga.projects.epics.serializers.EpicSerializer', 'EpicSerializer', (['data.private_epic2'], {}), '(data.private_epic2)\n', (14529, 14549), False, 'from taiga.projects.epics.serializers import EpicSerializer\n'), ((14805, 14838), 'taiga.projects.epics.serializers.EpicSerializer', 'EpicSerializer', (['data.blocked_epic'], {}), '(data.blocked_epic)\n', (14819, 14838), False, 'from taiga.projects.epics.serializers import EpicSerializer\n'), ((15718, 15750), 'taiga.projects.epics.serializers.EpicSerializer', 'EpicSerializer', (['data.public_epic'], {}), '(data.public_epic)\n', (15732, 15750), False, 'from taiga.projects.epics.serializers import EpicSerializer\n'), ((16042, 16076), 'taiga.projects.epics.serializers.EpicSerializer', 'EpicSerializer', (['data.private_epic1'], {}), '(data.private_epic1)\n', (16056, 16076), False, 'from taiga.projects.epics.serializers import EpicSerializer\n'), ((16370, 16404), 'taiga.projects.epics.serializers.EpicSerializer', 'EpicSerializer', (['data.private_epic2'], {}), '(data.private_epic2)\n', (16384, 16404), False, 'from taiga.projects.epics.serializers import EpicSerializer\n'), ((16698, 16731), 'taiga.projects.epics.serializers.EpicSerializer', 'EpicSerializer', (['data.blocked_epic'], {}), '(data.blocked_epic)\n', (16712, 16731), False, 'from taiga.projects.epics.serializers import EpicSerializer\n'), ((2545, 2557), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2555, 2557), False, 'import uuid\n'), ((2612, 2633), 'taiga.projects.models.Project.objects.all', 'Project.objects.all', ([], {}), '()\n', (2631, 2633), False, 'from taiga.projects.models import Project\n'), ((3055, 3067), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3065, 3067), False, 'import uuid\n'), ((3124, 3145), 'taiga.projects.models.Project.objects.all', 'Project.objects.all', ([], {}), '()\n', (3143, 3145), False, 'from taiga.projects.models import Project\n'), ((3487, 3499), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3497, 3499), False, 'import uuid\n'), ((3556, 3577), 'taiga.projects.models.Project.objects.all', 'Project.objects.all', ([], {}), '()\n', (3575, 3577), False, 'from taiga.projects.models import Project\n'), ((3914, 3926), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3924, 3926), False, 'import uuid\n'), ((4070, 4091), 'taiga.projects.models.Project.objects.all', 'Project.objects.all', ([], {}), '()\n', (4089, 4091), False, 'from taiga.projects.models import Project\n'), ((6379, 6397), 'taiga.projects.epics.models.Epic.objects.all', 'Epic.objects.all', ([], {}), '()\n', (6395, 6397), False, 'from taiga.projects.epics.models import Epic\n'), ((6606, 6624), 'taiga.projects.epics.models.Epic.objects.all', 'Epic.objects.all', ([], {}), '()\n', (6622, 6624), False, 'from taiga.projects.epics.models import Epic\n'), ((6835, 6853), 'taiga.projects.epics.models.Epic.objects.all', 'Epic.objects.all', ([], {}), '()\n', (6851, 6853), False, 'from taiga.projects.epics.models import Epic\n'), ((7059, 7077), 'taiga.projects.epics.models.Epic.objects.all', 'Epic.objects.all', ([], {}), '()\n', (7075, 7077), False, 'from taiga.projects.epics.models import Epic\n'), ((17576, 17597), 'taiga.projects.models.Project.objects.all', 'Project.objects.all', ([], {}), '()\n', (17595, 17597), False, 'from taiga.projects.models import Project\n'), ((17660, 17681), 'taiga.projects.models.Project.objects.all', 'Project.objects.all', ([], {}), '()\n', (17679, 17681), False, 'from taiga.projects.models import Project\n'), ((18653, 18671), 'taiga.projects.epics.models.Epic.objects.all', 'Epic.objects.all', ([], {}), '()\n', (18669, 18671), False, 'from taiga.projects.epics.models import Epic\n')] |
import os
DB_SERVER = os.getenv('WMT_DB_SERVER', 'localhost')
DB_NAME = os.getenv('WMT_DB_NAME', 'wmt_db')
DB_USERNAME = os.getenv('WMT_DB_USERNAME', 'wmt')
DB_PASSWORD = os.getenv('WMT_DB_PASSWORD', '<PASSWORD>')
| [
"os.getenv"
] | [((23, 62), 'os.getenv', 'os.getenv', (['"""WMT_DB_SERVER"""', '"""localhost"""'], {}), "('WMT_DB_SERVER', 'localhost')\n", (32, 62), False, 'import os\n'), ((73, 107), 'os.getenv', 'os.getenv', (['"""WMT_DB_NAME"""', '"""wmt_db"""'], {}), "('WMT_DB_NAME', 'wmt_db')\n", (82, 107), False, 'import os\n'), ((122, 157), 'os.getenv', 'os.getenv', (['"""WMT_DB_USERNAME"""', '"""wmt"""'], {}), "('WMT_DB_USERNAME', 'wmt')\n", (131, 157), False, 'import os\n'), ((172, 214), 'os.getenv', 'os.getenv', (['"""WMT_DB_PASSWORD"""', '"""<PASSWORD>"""'], {}), "('WMT_DB_PASSWORD', '<PASSWORD>')\n", (181, 214), False, 'import os\n')] |
import graphene
import graphql_jwt
import works.schema
import users.schema
import works.schema_relay
import people.schema
class Query(
users.schema.Query,
works.schema.Query,
works.schema_relay.RelayQuery,
people.schema.Query,
graphene.ObjectType,
):
pass
class Mutation(
users.schema.Mutation,
works.schema.Mutation,
works.schema_relay.RelayMutation,
people.schema.Mutation,
graphene.ObjectType,
):
token_auth = graphql_jwt.ObtainJSONWebToken.Field()
verify_token = graphql_jwt.Verify.Field()
refresh_token = graphql_jwt.Refresh.Field()
schema = graphene.Schema(query=Query, mutation=Mutation)
| [
"graphql_jwt.ObtainJSONWebToken.Field",
"graphene.Schema",
"graphql_jwt.Verify.Field",
"graphql_jwt.Refresh.Field"
] | [((609, 656), 'graphene.Schema', 'graphene.Schema', ([], {'query': 'Query', 'mutation': 'Mutation'}), '(query=Query, mutation=Mutation)\n', (624, 656), False, 'import graphene\n'), ((465, 503), 'graphql_jwt.ObtainJSONWebToken.Field', 'graphql_jwt.ObtainJSONWebToken.Field', ([], {}), '()\n', (501, 503), False, 'import graphql_jwt\n'), ((523, 549), 'graphql_jwt.Verify.Field', 'graphql_jwt.Verify.Field', ([], {}), '()\n', (547, 549), False, 'import graphql_jwt\n'), ((570, 597), 'graphql_jwt.Refresh.Field', 'graphql_jwt.Refresh.Field', ([], {}), '()\n', (595, 597), False, 'import graphql_jwt\n')] |
import pathlib
import configparser
class ConfigManager:
DEFAULT_CONFIG_PATH = "~/.config/notify-sync.ini"
SETTING_NOTIFICATION_ICON = "icon"
SETTING_NOTIFICATION_TIMEOUT = "timeout" # in ms
# SETTING_NOTIFICATION_URGENCY = "urgency" # 0,1,2 low, avg, urgent
SETTING_NOTIFICATION_EXEC = "exec_on_click"
def __init__(self, config_path=DEFAULT_CONFIG_PATH):
self.config_path = config_path
self.config = configparser.ConfigParser()
path = pathlib.PosixPath(self.config_path).expanduser()
if path.exists():
# read config file
with open(path, "r") as config_file:
self.config.read_file(config_file)
else:
if not path.parent.exists():
# create config dir
path.parent.mkdir(parents=True)
# set default settings
self.config["GENERAL"] = {
"api_key": "",
"notify_on_error": "no",
"notify_on_connection_changed": "no",
}
self.config["DEFAULT NOTIFICATION"] = {
"icon": "given",
"timeout": "default",
# "urgency": "default",
"exec_on_click": "",
}
# create config file
with open(path, "w") as config_file:
self.config.write(config_file)
def get_notification_setting(self, android_notification, setting):
if (
android_notification.package in self.config
and setting in self.config[android_notification.package]
):
return self.config[android_notification.package][setting]
else:
return self.config["DEFAULT NOTIFICATION"][setting]
def get_api_key(self):
return self.config["GENERAL"]["api_key"]
| [
"pathlib.PosixPath",
"configparser.ConfigParser"
] | [((447, 474), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (472, 474), False, 'import configparser\n'), ((491, 526), 'pathlib.PosixPath', 'pathlib.PosixPath', (['self.config_path'], {}), '(self.config_path)\n', (508, 526), False, 'import pathlib\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# -*- mode: Python -*-
#
# (C) <NAME>, 2021
#
import os
import sys
import re
import unicodedata
import unittest
from hypothesis import given, assume, settings, HealthCheck
import hypothesis.strategies as st
# using a try block so that this makes sense if exported to logtools/aux
# and used as a test case in this settings
try:
import logtools.utils
except Exception as err:
# This is related to the development environment -------------------
from pathlib import Path
home = str(Path.home())
path = [home + "/src/logtools"]
if 'VENVPY' in os.environ:
path.extend(os.environ['VENVPY'].split(":"))
path.extend(sys.path)
sys.path = path
# END related to the development environment -------------------
import logtools.utils
# enables to modify some globals
MAX_SAMPLES = None
if __name__ == "__main__":
if "-v" in sys.argv:
MAX_SAMPLES = 50
settings.register_profile("default", suppress_health_check=(HealthCheck.too_slow,))
settings.load_profile(os.getenv(u'HYPOTHESIS_PROFILE', 'default'))
if MAX_SAMPLES is None:
MAX_SAMPLES = 5
#Unicode alphabet
ALPHABET_UCWild = st.characters(
blacklist_characters="/?#", blacklist_categories=("Cs",))
ALPHABET_UCTame = st.characters(
blacklist_characters="/?#", blacklist_categories=("Cs",),
max_codepoint=0xFD, min_codepoint=0x40)
# somewhat restricted Greek
ALPHABET_UCGreek = st.characters(
blacklist_characters="/?#", blacklist_categories=("Cs",),
max_codepoint=0x3BF, min_codepoint=0x390)
# somewhat restricted Hebrew
ALPHABET_UCHebrew = st.characters(
blacklist_characters="/?#", blacklist_categories=("Cs",),
max_codepoint=0x5DF, min_codepoint=0x5BF)
# Combine a set of printables
ALPHABET_UC = st.one_of(ALPHABET_UCHebrew, ALPHABET_UCGreek , ALPHABET_UCTame)
# Recall
#Unicode Greek and Coptic: U+0370–U+03FF
#Unicode Hebrew block extends from U+0590 to U+05FF and from U+FB1D to U+FB4F.
random_uc_string = st.text(alphabet=ALPHABET_UC, min_size=2, max_size=8)
#
# Run under unittest
#
class TestEncoding(unittest.TestCase):
DO_DEBUG_PRINT = False
@settings(max_examples=MAX_SAMPLES)
@given(random_uc_string)
def test_ustring(self, s):
'''
Show generated strings
'''
form = 'NFKD'
sNorm = unicodedata.normalize(form, s)
print(f"test_ustring received:'{s}',\tnormalized ({form}):'{sNorm}'",
file=sys.stderr)
@settings(max_examples=MAX_SAMPLES)
@given(random_uc_string)
def test_nustring(self, s):
'''
Show generated strings
'''
form = 'NFKD'
sNormEnc = unicodedata.normalize(form, s).encode('ascii','ignore')
print(f"test_nustring received:'{s}',\tnormalized({form})/encoded(ascii) :'{sNormEnc}'",
file=sys.stderr)
@settings(max_examples=MAX_SAMPLES)
@given(random_uc_string)
def test_ucodeNorm(self, s):
'''
Show generated strings
'''
form = 'NFKD'
sNormEnc = logtools.utils.ucodeNorm(s)
print(f"test_nustring received:'{s}',\tucodeNorm returns :'{sNormEnc}'",
file=sys.stderr)
if __name__ == "__main__":
if "-h" in sys.argv:
description = """\
Function:
This is a test allowing to figure out in more detail the functionality
of the unicode python package.
This may run either under tox or standalone. When standalone
flags -h and -v are recognized, other flags are dealt with by unittest.main
and may select test cases.
Flags:
-h print this help and quit
-v print information messages on stderr; also reduces MAX_SAMPLES to 50
Autonomous CLI syntax:
python3 [-h] [-v] [TestUnicode[.<testname>]]
e.g. python3 TestEncoding.test_match_re
"""
print(description)
sys.exit(0)
if "-v" in sys.argv:
sys.argv = [x for x in sys.argv if x != "-v"]
TestEncoding.DO_DEBUG_PRINT = True
sys.stderr.write("Set verbose mode\n")
unittest.main()
| [
"hypothesis.strategies.text",
"sys.exit",
"os.getenv",
"pathlib.Path.home",
"unittest.main",
"hypothesis.settings.register_profile",
"hypothesis.strategies.one_of",
"sys.stderr.write",
"hypothesis.strategies.characters",
"hypothesis.settings",
"unicodedata.normalize",
"hypothesis.given"
] | [((970, 1058), 'hypothesis.settings.register_profile', 'settings.register_profile', (['"""default"""'], {'suppress_health_check': '(HealthCheck.too_slow,)'}), "('default', suppress_health_check=(HealthCheck.\n too_slow,))\n", (995, 1058), False, 'from hypothesis import given, assume, settings, HealthCheck\n'), ((1202, 1273), 'hypothesis.strategies.characters', 'st.characters', ([], {'blacklist_characters': '"""/?#"""', 'blacklist_categories': "('Cs',)"}), "(blacklist_characters='/?#', blacklist_categories=('Cs',))\n", (1215, 1273), True, 'import hypothesis.strategies as st\n'), ((1313, 1425), 'hypothesis.strategies.characters', 'st.characters', ([], {'blacklist_characters': '"""/?#"""', 'blacklist_categories': "('Cs',)", 'max_codepoint': '(253)', 'min_codepoint': '(64)'}), "(blacklist_characters='/?#', blacklist_categories=('Cs',),\n max_codepoint=253, min_codepoint=64)\n", (1326, 1425), True, 'import hypothesis.strategies as st\n'), ((1514, 1627), 'hypothesis.strategies.characters', 'st.characters', ([], {'blacklist_characters': '"""/?#"""', 'blacklist_categories': "('Cs',)", 'max_codepoint': '(959)', 'min_codepoint': '(912)'}), "(blacklist_characters='/?#', blacklist_categories=('Cs',),\n max_codepoint=959, min_codepoint=912)\n", (1527, 1627), True, 'import hypothesis.strategies as st\n'), ((1719, 1834), 'hypothesis.strategies.characters', 'st.characters', ([], {'blacklist_characters': '"""/?#"""', 'blacklist_categories': "('Cs',)", 'max_codepoint': '(1503)', 'min_codepoint': '(1471)'}), "(blacklist_characters='/?#', blacklist_categories=('Cs',),\n max_codepoint=1503, min_codepoint=1471)\n", (1732, 1834), True, 'import hypothesis.strategies as st\n'), ((1920, 1983), 'hypothesis.strategies.one_of', 'st.one_of', (['ALPHABET_UCHebrew', 'ALPHABET_UCGreek', 'ALPHABET_UCTame'], {}), '(ALPHABET_UCHebrew, ALPHABET_UCGreek, ALPHABET_UCTame)\n', (1929, 1983), True, 'import hypothesis.strategies as st\n'), ((2139, 2192), 'hypothesis.strategies.text', 'st.text', ([], {'alphabet': 'ALPHABET_UC', 'min_size': '(2)', 'max_size': '(8)'}), '(alphabet=ALPHABET_UC, min_size=2, max_size=8)\n', (2146, 2192), True, 'import hypothesis.strategies as st\n'), ((1076, 1119), 'os.getenv', 'os.getenv', (['u"""HYPOTHESIS_PROFILE"""', '"""default"""'], {}), "(u'HYPOTHESIS_PROFILE', 'default')\n", (1085, 1119), False, 'import os\n'), ((2292, 2326), 'hypothesis.settings', 'settings', ([], {'max_examples': 'MAX_SAMPLES'}), '(max_examples=MAX_SAMPLES)\n', (2300, 2326), False, 'from hypothesis import given, assume, settings, HealthCheck\n'), ((2332, 2355), 'hypothesis.given', 'given', (['random_uc_string'], {}), '(random_uc_string)\n', (2337, 2355), False, 'from hypothesis import given, assume, settings, HealthCheck\n'), ((2631, 2665), 'hypothesis.settings', 'settings', ([], {'max_examples': 'MAX_SAMPLES'}), '(max_examples=MAX_SAMPLES)\n', (2639, 2665), False, 'from hypothesis import given, assume, settings, HealthCheck\n'), ((2671, 2694), 'hypothesis.given', 'given', (['random_uc_string'], {}), '(random_uc_string)\n', (2676, 2694), False, 'from hypothesis import given, assume, settings, HealthCheck\n'), ((3018, 3052), 'hypothesis.settings', 'settings', ([], {'max_examples': 'MAX_SAMPLES'}), '(max_examples=MAX_SAMPLES)\n', (3026, 3052), False, 'from hypothesis import given, assume, settings, HealthCheck\n'), ((3058, 3081), 'hypothesis.given', 'given', (['random_uc_string'], {}), '(random_uc_string)\n', (3063, 3081), False, 'from hypothesis import given, assume, settings, HealthCheck\n'), ((4185, 4200), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4198, 4200), False, 'import unittest\n'), ((2484, 2514), 'unicodedata.normalize', 'unicodedata.normalize', (['form', 's'], {}), '(form, s)\n', (2505, 2514), False, 'import unicodedata\n'), ((3998, 4009), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (4006, 4009), False, 'import sys\n'), ((4141, 4179), 'sys.stderr.write', 'sys.stderr.write', (['"""Set verbose mode\n"""'], {}), "('Set verbose mode\\n')\n", (4157, 4179), False, 'import sys\n'), ((548, 559), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (557, 559), False, 'from pathlib import Path\n'), ((2827, 2857), 'unicodedata.normalize', 'unicodedata.normalize', (['form', 's'], {}), '(form, s)\n', (2848, 2857), False, 'import unicodedata\n')] |
import os
import numpy as np
import tensorflow as tf
import cPickle
from utils import shared, get_name
from nn import HiddenLayer, EmbeddingLayer, LSTM, forward
class Model(object):
"""
Network architecture.
"""
def __init__(self, parameters=None, models_path=None, model_path=None):
"""
Initialize the model. We either provide the parameters and a path where
we store the models, or the location of a trained model.
"""
if model_path is None:
assert parameters and models_path
# Create a name based on the parameters
self.parameters = parameters
self.name = get_name(parameters)
# Model location
model_path = os.path.join(models_path, self.name)
self.model_path = model_path
self.mappings_path = os.path.join(model_path, 'mappings.pkl')
self.parameters_path = os.path.join(model_path, 'parameters.pkl')
# Create directory for the model if it does not exist
if not os.path.exists(self.model_path):
os.makedirs(self.model_path)
# Save the parameters to disk
with open(self.parameters_path, 'wb') as f:
cPickle.dump(parameters, f)
else:
assert parameters is None and models_path is None
# Model location
self.model_path = model_path
self.mappings_path = os.path.join(model_path, 'mappings.pkl')
self.parameters_path = os.path.join(model_path, 'parameters.pkl')
# Load the parameters and the mappings from disk
with open(self.parameters_path, 'rb') as f:
self.parameters = cPickle.load(f)
self.reload_mappings()
def save_mappings(self, id_to_word, id_to_char, id_to_tag):
"""
We need to save the mappings if we want to use the model later.
"""
self.id_to_word = id_to_word
self.id_to_char = id_to_char
self.id_to_tag = id_to_tag
with open(self.mappings_path, 'wb') as f:
mappings = {
'id_to_word': self.id_to_word,
'id_to_char': self.id_to_char,
'id_to_tag': self.id_to_tag,
}
cPickle.dump(mappings, f)
def reload_mappings(self):
"""
Load mappings from disk.
"""
with open(self.mappings_path, 'rb') as f:
mappings = cPickle.load(f)
self.id_to_word = mappings['id_to_word']
self.id_to_char = mappings['id_to_char']
self.id_to_tag = mappings['id_to_tag']
def build(self,
dropout,
char_dim,
char_lstm_dim,
char_bidirect,
word_dim,
word_lstm_dim,
word_bidirect,
lr_method,
lr_rate,
clip_norm,
crf,
is_train,
**kwargs
):
"""
Build the network.
"""
# Training parameters
n_words = len(self.id_to_word)
n_chars = len(self.id_to_char)
n_tags = len(self.id_to_tag)
# Network variables
self.word_ids = tf.placeholder(tf.int32, shape=[None, None], name='word_ids') # shape:[batch_size, max_word_len]
self.word_pos_ids = tf.placeholder(tf.int32, shape=[None], name='word_pos_ids') # shape: [batch_size]
self.char_for_ids = tf.placeholder(tf.int32, shape=[None, None, None], name='char_for_ids') # shape: [batch_size, word_max_len, char_max_len]
self.char_rev_ids = tf.placeholder(tf.int32, shape=[None, None, None], name='char_rev_ids') # shape: [batch_size, word_max_len, char_max_len]
self.char_pos_ids = tf.placeholder(tf.int32, shape=[None, None], name='char_pos_ids') # shape: [batch_size*word_max_len, char_max_len]
self.tag_ids = tf.placeholder(tf.int32, shape=[None, None], name='tag_ids') # shape: [batch_size,word_max_len]
self.tag_id_trans = tf.placeholder(tf.int32, shape=[None, None, None], name='tag_id_trans') # shape: [batch_size,word_max_len+1,2]
self.tag_id_index = tf.placeholder(tf.int32, shape=[None, None, None], name='tag_id_index') # shape: [batch_size,word_max_len,2]
# Final input (all word features)
input_dim = 0
inputs = []
#
# Word inputs
#
if word_dim:
input_dim += word_dim
with tf.device("/cpu:0"):
word_layer = EmbeddingLayer(n_words, word_dim, name='word_layer')
word_input = word_layer.link(self.word_ids)
inputs.append(word_input)
#
# Phars inputs
#
if char_dim:
input_dim += char_lstm_dim
char_layer = EmbeddingLayer(n_chars, char_dim, name='char_layer')
char_lstm_for = LSTM(char_dim, char_lstm_dim, with_batch=True,
name='char_lstm_for')
char_lstm_rev = LSTM(char_dim, char_lstm_dim, with_batch=True,
name='char_lstm_rev')
with tf.device("/cpu:0"):
char_for_embedding_batch = char_layer.link(self.char_for_ids)
char_rev_embedding_batch = char_layer.link(self.char_rev_ids)
shape_for = tf.shape(char_for_embedding_batch)
# reshape from [batch_size, word_max_len, char_max_len, char_dim] to [batch_size*word_max_len, char_max_len, char_dim]
char_for_embedding = tf.reshape(char_for_embedding_batch,
(shape_for[0]*shape_for[1], shape_for[2], shape_for[3]))
shape_rev = tf.shape(char_rev_embedding_batch)
char_rev_embedding = tf.reshape(char_rev_embedding_batch,
(shape_rev[0] * shape_rev[1], shape_rev[2], shape_rev[3]))
char_lstm_for_states = char_lstm_for.link(char_for_embedding)
char_lstm_rev_states = char_lstm_rev.link(char_rev_embedding)
char_lstm_for_h_trans = tf.transpose(char_lstm_for_states[1], (1, 0, 2), name='char_lstm_for_h_trans')
char_lstm_rev_h_trans = tf.transpose(char_lstm_rev_states[1], (1, 0, 2), name='char_lstm_rev_h_trans')
char_for_output = tf.gather_nd(char_lstm_for_h_trans, self.char_pos_ids, name='char_for_output')
char_rev_output = tf.gather_nd(char_lstm_rev_h_trans, self.char_pos_ids, name='char_rev_output')
char_for_output_batch = tf.reshape(char_for_output, (shape_for[0], shape_for[1], char_lstm_dim))
char_rev_output_batch = tf.reshape(char_rev_output, (shape_rev[0], shape_rev[1], char_lstm_dim))
inputs.append(char_for_output_batch)
if char_bidirect:
inputs.append(char_rev_output_batch)
input_dim += char_lstm_dim
inputs = tf.concat(inputs, axis=-1)
# Dropout on final input
assert dropout < 1 and 0.0 <= dropout
if dropout:
input_train = tf.nn.dropout(inputs, 1 - dropout)
if is_train:
inputs = input_train
# LSTM for words
word_lstm_for = LSTM(input_dim, word_lstm_dim, with_batch=True,
name='word_lstm_for')
word_lstm_rev = LSTM(input_dim, word_lstm_dim, with_batch=True,
name='word_lstm_rev')
# fordword hidden output
word_states_for = word_lstm_for.link(inputs)
word_lstm_for_output = tf.transpose(word_states_for[1], (1, 0, 2), name='word_lstm_for_h_trans')
# reverse hidden ouput
inputs_rev = tf.reverse_sequence(inputs, self.word_pos_ids, seq_dim=1, batch_dim=0)
word_states_rev = word_lstm_rev.link(inputs_rev)
word_lstm_rev_h_trans = tf.transpose(word_states_rev[1], (1, 0, 2), name='word_lstm_rev_h_trans')
word_lstm_rev_output = tf.reverse_sequence(word_lstm_rev_h_trans, self.word_pos_ids, seq_dim=1, batch_dim=0)
if word_bidirect:
final_output = tf.concat([word_lstm_for_output, word_lstm_rev_output],axis=-1)
tanh_layer = HiddenLayer(2 * word_lstm_dim, word_lstm_dim, name='tanh_layer', activation='tanh')
final_output = tanh_layer.link(final_output)
else:
final_output = word_lstm_for_output
final_layer = HiddenLayer(word_lstm_dim, n_tags, name='final_layer')
tags_scores = final_layer.link(final_output)
# No CRF
if not crf:
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.tag_ids, logits=tags_scores, name='xentropy')
cost = tf.reduce_mean(cross_entropy, name='xentropy_mean')
else:
transitions = shared((n_tags + 2, n_tags + 2), 'transitions')
small = -1000
b_s = np.array([[small] * n_tags + [0, small]]).astype(np.float32)
e_s = np.array([[small] * n_tags + [small, 0]]).astype(np.float32)
# for batch observation
#def recurrence(prev, obs):
# s_len = tf.shape(obs)[0]
# obvs = tf.concat([obs, small * tf.ones((s_len, 2))], axis=1)
# observations = tf.concat([b_s, obvs, e_s], axis=0)
# return observations
#tags_scores_shape = tf.shape(tags_scores)
#obs_initial = tf.ones((tags_scores_shape[1] + 2, n_tags + 2))
#obs_batch = tf.scan(fn=recurrence, elems=tags_scores, initializer=obs_initial)
# Score from tags
def recurrence_real_score(prev,obs):
tags_score = obs[0]
tag_id_index_ = obs[1]
tag_id_trans_= obs[2]
word_pos_ = obs[3] + 1
tags_score_slice = tags_score[0:word_pos_,:]
tag_id_index_slice = tag_id_index_[0:word_pos_,:]
tag_id_trans_slice = tag_id_trans_[0:(word_pos_+1),:]
real_path_score = tf.reduce_sum(tf.gather_nd(tags_score_slice, tag_id_index_slice))
real_path_score += tf.reduce_sum(tf.gather_nd(transitions, tag_id_trans_slice))
return tf.reshape(real_path_score,[])
real_path_score_list = tf.scan(fn=recurrence_real_score, elems=[tags_scores, self.tag_id_index, self.tag_id_trans, self.word_pos_ids], initializer=0.0)
def recurrence_all_path(prev, obs):
tags_score = obs[0]
word_pos_ = obs[1] + 1
tags_score_slice = tags_score[0:word_pos_,:]
s_len = tf.shape(tags_score_slice)[0]
obvs = tf.concat([tags_score_slice, small * tf.ones((s_len, 2))], axis=1)
observations = tf.concat([b_s, obvs, e_s], axis=0)
all_paths_scores = forward(observations, transitions)
return tf.reshape(all_paths_scores,[])
all_paths_scores_list = tf.scan(fn=recurrence_all_path, elems=[tags_scores, self.word_pos_ids], initializer=0.0)
cost = - tf.reduce_mean(real_path_score_list - all_paths_scores_list)
# Network parameters
if not crf:
f_score = tf.nn.softmax(tags_scores)
else:
def recurrence_predict(prev, obs):
tags_score = obs[0]
word_pos_ = obs[1] + 1
tags_score_slice = tags_score[0:word_pos_,:]
s_len = tf.shape(tags_score_slice)[0]
obvs = tf.concat([tags_score_slice, small * tf.ones((s_len, 2))], axis=1)
observations = tf.concat([b_s, obvs, e_s], axis=0)
all_paths_scores = forward(observations, transitions, viterbi=True, return_alpha=False, return_best_sequence=True)
all_paths_scores = tf.concat([all_paths_scores, tf.zeros([tf.shape(tags_score)[0]-s_len], tf.int32)], axis=0)
return all_paths_scores
f_score = tf.scan(fn=recurrence_predict, elems=[tags_scores, self.word_pos_ids], initializer=tf.zeros([tf.shape(tags_scores)[1]+2], tf.int32))
# Optimization
tvars = tf.trainable_variables()
grads = tf.gradients(cost, tvars)
if clip_norm > 0:
grads, _ = tf.clip_by_global_norm(grads, clip_norm)
if lr_method == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(lr_rate)
elif lr_method == 'adagrad':
optimizer = tf.train.AdagradOptimizer(lr_rate)
elif lr_method == 'adadelta':
optimizer = tf.train.AdadeltaOptimizer(lr_rate)
elif lr_method == 'adam':
optimizer = tf.train.AdamOptimizer(lr_rate)
elif lr_method == 'rmsprop':
optimizer = tf.train.RMSPropOptimizer(lr_rate)
else:
raise("Not implemented learning method: %s" % lr_method)
train_op = optimizer.apply_gradients(zip(grads, tvars))
return cost, f_score, train_op
| [
"tensorflow.shape",
"tensorflow.transpose",
"nn.forward",
"tensorflow.gradients",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"numpy.array",
"nn.LSTM",
"tensorflow.nn.dropout",
"tensorflow.reverse_sequence",
"tensorflow.nn.softmax",
"tensorflow.reduce_mean",
"tensorflow.scan",
... | [((3248, 3309), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, None]', 'name': '"""word_ids"""'}), "(tf.int32, shape=[None, None], name='word_ids')\n", (3262, 3309), True, 'import tensorflow as tf\n'), ((3373, 3432), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None]', 'name': '"""word_pos_ids"""'}), "(tf.int32, shape=[None], name='word_pos_ids')\n", (3387, 3432), True, 'import tensorflow as tf\n'), ((3483, 3554), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, None, None]', 'name': '"""char_for_ids"""'}), "(tf.int32, shape=[None, None, None], name='char_for_ids')\n", (3497, 3554), True, 'import tensorflow as tf\n'), ((3633, 3704), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, None, None]', 'name': '"""char_rev_ids"""'}), "(tf.int32, shape=[None, None, None], name='char_rev_ids')\n", (3647, 3704), True, 'import tensorflow as tf\n'), ((3783, 3848), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, None]', 'name': '"""char_pos_ids"""'}), "(tf.int32, shape=[None, None], name='char_pos_ids')\n", (3797, 3848), True, 'import tensorflow as tf\n'), ((3921, 3981), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, None]', 'name': '"""tag_ids"""'}), "(tf.int32, shape=[None, None], name='tag_ids')\n", (3935, 3981), True, 'import tensorflow as tf\n'), ((4045, 4116), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, None, None]', 'name': '"""tag_id_trans"""'}), "(tf.int32, shape=[None, None, None], name='tag_id_trans')\n", (4059, 4116), True, 'import tensorflow as tf\n'), ((4185, 4256), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, None, None]', 'name': '"""tag_id_index"""'}), "(tf.int32, shape=[None, None, None], name='tag_id_index')\n", (4199, 4256), True, 'import tensorflow as tf\n'), ((6942, 6968), 'tensorflow.concat', 'tf.concat', (['inputs'], {'axis': '(-1)'}), '(inputs, axis=-1)\n', (6951, 6968), True, 'import tensorflow as tf\n'), ((7241, 7310), 'nn.LSTM', 'LSTM', (['input_dim', 'word_lstm_dim'], {'with_batch': '(True)', 'name': '"""word_lstm_for"""'}), "(input_dim, word_lstm_dim, with_batch=True, name='word_lstm_for')\n", (7245, 7310), False, 'from nn import HiddenLayer, EmbeddingLayer, LSTM, forward\n'), ((7364, 7433), 'nn.LSTM', 'LSTM', (['input_dim', 'word_lstm_dim'], {'with_batch': '(True)', 'name': '"""word_lstm_rev"""'}), "(input_dim, word_lstm_dim, with_batch=True, name='word_lstm_rev')\n", (7368, 7433), False, 'from nn import HiddenLayer, EmbeddingLayer, LSTM, forward\n'), ((7580, 7653), 'tensorflow.transpose', 'tf.transpose', (['word_states_for[1]', '(1, 0, 2)'], {'name': '"""word_lstm_for_h_trans"""'}), "(word_states_for[1], (1, 0, 2), name='word_lstm_for_h_trans')\n", (7592, 7653), True, 'import tensorflow as tf\n'), ((7707, 7777), 'tensorflow.reverse_sequence', 'tf.reverse_sequence', (['inputs', 'self.word_pos_ids'], {'seq_dim': '(1)', 'batch_dim': '(0)'}), '(inputs, self.word_pos_ids, seq_dim=1, batch_dim=0)\n', (7726, 7777), True, 'import tensorflow as tf\n'), ((7867, 7940), 'tensorflow.transpose', 'tf.transpose', (['word_states_rev[1]', '(1, 0, 2)'], {'name': '"""word_lstm_rev_h_trans"""'}), "(word_states_rev[1], (1, 0, 2), name='word_lstm_rev_h_trans')\n", (7879, 7940), True, 'import tensorflow as tf\n'), ((7972, 8061), 'tensorflow.reverse_sequence', 'tf.reverse_sequence', (['word_lstm_rev_h_trans', 'self.word_pos_ids'], {'seq_dim': '(1)', 'batch_dim': '(0)'}), '(word_lstm_rev_h_trans, self.word_pos_ids, seq_dim=1,\n batch_dim=0)\n', (7991, 8061), True, 'import tensorflow as tf\n'), ((8425, 8479), 'nn.HiddenLayer', 'HiddenLayer', (['word_lstm_dim', 'n_tags'], {'name': '"""final_layer"""'}), "(word_lstm_dim, n_tags, name='final_layer')\n", (8436, 8479), False, 'from nn import HiddenLayer, EmbeddingLayer, LSTM, forward\n'), ((12164, 12188), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (12186, 12188), True, 'import tensorflow as tf\n'), ((12205, 12230), 'tensorflow.gradients', 'tf.gradients', (['cost', 'tvars'], {}), '(cost, tvars)\n', (12217, 12230), True, 'import tensorflow as tf\n'), ((666, 686), 'utils.get_name', 'get_name', (['parameters'], {}), '(parameters)\n', (674, 686), False, 'from utils import shared, get_name\n'), ((741, 777), 'os.path.join', 'os.path.join', (['models_path', 'self.name'], {}), '(models_path, self.name)\n', (753, 777), False, 'import os\n'), ((852, 892), 'os.path.join', 'os.path.join', (['model_path', '"""mappings.pkl"""'], {}), "(model_path, 'mappings.pkl')\n", (864, 892), False, 'import os\n'), ((928, 970), 'os.path.join', 'os.path.join', (['model_path', '"""parameters.pkl"""'], {}), "(model_path, 'parameters.pkl')\n", (940, 970), False, 'import os\n'), ((1455, 1495), 'os.path.join', 'os.path.join', (['model_path', '"""mappings.pkl"""'], {}), "(model_path, 'mappings.pkl')\n", (1467, 1495), False, 'import os\n'), ((1531, 1573), 'os.path.join', 'os.path.join', (['model_path', '"""parameters.pkl"""'], {}), "(model_path, 'parameters.pkl')\n", (1543, 1573), False, 'import os\n'), ((2286, 2311), 'cPickle.dump', 'cPickle.dump', (['mappings', 'f'], {}), '(mappings, f)\n', (2298, 2311), False, 'import cPickle\n'), ((2474, 2489), 'cPickle.load', 'cPickle.load', (['f'], {}), '(f)\n', (2486, 2489), False, 'import cPickle\n'), ((4834, 4886), 'nn.EmbeddingLayer', 'EmbeddingLayer', (['n_chars', 'char_dim'], {'name': '"""char_layer"""'}), "(n_chars, char_dim, name='char_layer')\n", (4848, 4886), False, 'from nn import HiddenLayer, EmbeddingLayer, LSTM, forward\n'), ((4916, 4984), 'nn.LSTM', 'LSTM', (['char_dim', 'char_lstm_dim'], {'with_batch': '(True)', 'name': '"""char_lstm_for"""'}), "(char_dim, char_lstm_dim, with_batch=True, name='char_lstm_for')\n", (4920, 4984), False, 'from nn import HiddenLayer, EmbeddingLayer, LSTM, forward\n'), ((5046, 5114), 'nn.LSTM', 'LSTM', (['char_dim', 'char_lstm_dim'], {'with_batch': '(True)', 'name': '"""char_lstm_rev"""'}), "(char_dim, char_lstm_dim, with_batch=True, name='char_lstm_rev')\n", (5050, 5114), False, 'from nn import HiddenLayer, EmbeddingLayer, LSTM, forward\n'), ((5367, 5401), 'tensorflow.shape', 'tf.shape', (['char_for_embedding_batch'], {}), '(char_for_embedding_batch)\n', (5375, 5401), True, 'import tensorflow as tf\n'), ((5566, 5665), 'tensorflow.reshape', 'tf.reshape', (['char_for_embedding_batch', '(shape_for[0] * shape_for[1], shape_for[2], shape_for[3])'], {}), '(char_for_embedding_batch, (shape_for[0] * shape_for[1],\n shape_for[2], shape_for[3]))\n', (5576, 5665), True, 'import tensorflow as tf\n'), ((5728, 5762), 'tensorflow.shape', 'tf.shape', (['char_rev_embedding_batch'], {}), '(char_rev_embedding_batch)\n', (5736, 5762), True, 'import tensorflow as tf\n'), ((5796, 5895), 'tensorflow.reshape', 'tf.reshape', (['char_rev_embedding_batch', '(shape_rev[0] * shape_rev[1], shape_rev[2], shape_rev[3])'], {}), '(char_rev_embedding_batch, (shape_rev[0] * shape_rev[1],\n shape_rev[2], shape_rev[3]))\n', (5806, 5895), True, 'import tensorflow as tf\n'), ((6120, 6198), 'tensorflow.transpose', 'tf.transpose', (['char_lstm_for_states[1]', '(1, 0, 2)'], {'name': '"""char_lstm_for_h_trans"""'}), "(char_lstm_for_states[1], (1, 0, 2), name='char_lstm_for_h_trans')\n", (6132, 6198), True, 'import tensorflow as tf\n'), ((6235, 6313), 'tensorflow.transpose', 'tf.transpose', (['char_lstm_rev_states[1]', '(1, 0, 2)'], {'name': '"""char_lstm_rev_h_trans"""'}), "(char_lstm_rev_states[1], (1, 0, 2), name='char_lstm_rev_h_trans')\n", (6247, 6313), True, 'import tensorflow as tf\n'), ((6344, 6422), 'tensorflow.gather_nd', 'tf.gather_nd', (['char_lstm_for_h_trans', 'self.char_pos_ids'], {'name': '"""char_for_output"""'}), "(char_lstm_for_h_trans, self.char_pos_ids, name='char_for_output')\n", (6356, 6422), True, 'import tensorflow as tf\n'), ((6453, 6531), 'tensorflow.gather_nd', 'tf.gather_nd', (['char_lstm_rev_h_trans', 'self.char_pos_ids'], {'name': '"""char_rev_output"""'}), "(char_lstm_rev_h_trans, self.char_pos_ids, name='char_rev_output')\n", (6465, 6531), True, 'import tensorflow as tf\n'), ((6568, 6640), 'tensorflow.reshape', 'tf.reshape', (['char_for_output', '(shape_for[0], shape_for[1], char_lstm_dim)'], {}), '(char_for_output, (shape_for[0], shape_for[1], char_lstm_dim))\n', (6578, 6640), True, 'import tensorflow as tf\n'), ((6677, 6749), 'tensorflow.reshape', 'tf.reshape', (['char_rev_output', '(shape_rev[0], shape_rev[1], char_lstm_dim)'], {}), '(char_rev_output, (shape_rev[0], shape_rev[1], char_lstm_dim))\n', (6687, 6749), True, 'import tensorflow as tf\n'), ((7094, 7128), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['inputs', '(1 - dropout)'], {}), '(inputs, 1 - dropout)\n', (7107, 7128), True, 'import tensorflow as tf\n'), ((8111, 8175), 'tensorflow.concat', 'tf.concat', (['[word_lstm_for_output, word_lstm_rev_output]'], {'axis': '(-1)'}), '([word_lstm_for_output, word_lstm_rev_output], axis=-1)\n', (8120, 8175), True, 'import tensorflow as tf\n'), ((8200, 8288), 'nn.HiddenLayer', 'HiddenLayer', (['(2 * word_lstm_dim)', 'word_lstm_dim'], {'name': '"""tanh_layer"""', 'activation': '"""tanh"""'}), "(2 * word_lstm_dim, word_lstm_dim, name='tanh_layer', activation\n ='tanh')\n", (8211, 8288), False, 'from nn import HiddenLayer, EmbeddingLayer, LSTM, forward\n'), ((8598, 8707), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'self.tag_ids', 'logits': 'tags_scores', 'name': '"""xentropy"""'}), "(labels=self.tag_ids, logits=\n tags_scores, name='xentropy')\n", (8644, 8707), True, 'import tensorflow as tf\n'), ((8722, 8773), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cross_entropy'], {'name': '"""xentropy_mean"""'}), "(cross_entropy, name='xentropy_mean')\n", (8736, 8773), True, 'import tensorflow as tf\n'), ((8814, 8861), 'utils.shared', 'shared', (['(n_tags + 2, n_tags + 2)', '"""transitions"""'], {}), "((n_tags + 2, n_tags + 2), 'transitions')\n", (8820, 8861), False, 'from utils import shared, get_name\n'), ((10297, 10429), 'tensorflow.scan', 'tf.scan', ([], {'fn': 'recurrence_real_score', 'elems': '[tags_scores, self.tag_id_index, self.tag_id_trans, self.word_pos_ids]', 'initializer': '(0.0)'}), '(fn=recurrence_real_score, elems=[tags_scores, self.tag_id_index,\n self.tag_id_trans, self.word_pos_ids], initializer=0.0)\n', (10304, 10429), True, 'import tensorflow as tf\n'), ((10995, 11087), 'tensorflow.scan', 'tf.scan', ([], {'fn': 'recurrence_all_path', 'elems': '[tags_scores, self.word_pos_ids]', 'initializer': '(0.0)'}), '(fn=recurrence_all_path, elems=[tags_scores, self.word_pos_ids],\n initializer=0.0)\n', (11002, 11087), True, 'import tensorflow as tf\n'), ((11237, 11263), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['tags_scores'], {}), '(tags_scores)\n', (11250, 11263), True, 'import tensorflow as tf\n'), ((12280, 12320), 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['grads', 'clip_norm'], {}), '(grads, clip_norm)\n', (12302, 12320), True, 'import tensorflow as tf\n'), ((12385, 12427), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['lr_rate'], {}), '(lr_rate)\n', (12418, 12427), True, 'import tensorflow as tf\n'), ((1056, 1087), 'os.path.exists', 'os.path.exists', (['self.model_path'], {}), '(self.model_path)\n', (1070, 1087), False, 'import os\n'), ((1105, 1133), 'os.makedirs', 'os.makedirs', (['self.model_path'], {}), '(self.model_path)\n', (1116, 1133), False, 'import os\n'), ((1248, 1275), 'cPickle.dump', 'cPickle.dump', (['parameters', 'f'], {}), '(parameters, f)\n', (1260, 1275), False, 'import cPickle\n'), ((1725, 1740), 'cPickle.load', 'cPickle.load', (['f'], {}), '(f)\n', (1737, 1740), False, 'import cPickle\n'), ((4492, 4511), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (4501, 4511), True, 'import tensorflow as tf\n'), ((4542, 4594), 'nn.EmbeddingLayer', 'EmbeddingLayer', (['n_words', 'word_dim'], {'name': '"""word_layer"""'}), "(n_words, word_dim, name='word_layer')\n", (4556, 4594), False, 'from nn import HiddenLayer, EmbeddingLayer, LSTM, forward\n'), ((5166, 5185), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (5175, 5185), True, 'import tensorflow as tf\n'), ((10231, 10262), 'tensorflow.reshape', 'tf.reshape', (['real_path_score', '[]'], {}), '(real_path_score, [])\n', (10241, 10262), True, 'import tensorflow as tf\n'), ((10798, 10833), 'tensorflow.concat', 'tf.concat', (['[b_s, obvs, e_s]'], {'axis': '(0)'}), '([b_s, obvs, e_s], axis=0)\n', (10807, 10833), True, 'import tensorflow as tf\n'), ((10869, 10903), 'nn.forward', 'forward', (['observations', 'transitions'], {}), '(observations, transitions)\n', (10876, 10903), False, 'from nn import HiddenLayer, EmbeddingLayer, LSTM, forward\n'), ((10927, 10959), 'tensorflow.reshape', 'tf.reshape', (['all_paths_scores', '[]'], {}), '(all_paths_scores, [])\n', (10937, 10959), True, 'import tensorflow as tf\n'), ((11105, 11165), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(real_path_score_list - all_paths_scores_list)'], {}), '(real_path_score_list - all_paths_scores_list)\n', (11119, 11165), True, 'import tensorflow as tf\n'), ((11636, 11671), 'tensorflow.concat', 'tf.concat', (['[b_s, obvs, e_s]'], {'axis': '(0)'}), '([b_s, obvs, e_s], axis=0)\n', (11645, 11671), True, 'import tensorflow as tf\n'), ((11707, 11806), 'nn.forward', 'forward', (['observations', 'transitions'], {'viterbi': '(True)', 'return_alpha': '(False)', 'return_best_sequence': '(True)'}), '(observations, transitions, viterbi=True, return_alpha=False,\n return_best_sequence=True)\n', (11714, 11806), False, 'from nn import HiddenLayer, EmbeddingLayer, LSTM, forward\n'), ((12489, 12523), 'tensorflow.train.AdagradOptimizer', 'tf.train.AdagradOptimizer', (['lr_rate'], {}), '(lr_rate)\n', (12514, 12523), True, 'import tensorflow as tf\n'), ((8906, 8947), 'numpy.array', 'np.array', (['[[small] * n_tags + [0, small]]'], {}), '([[small] * n_tags + [0, small]])\n', (8914, 8947), True, 'import numpy as np\n'), ((8985, 9026), 'numpy.array', 'np.array', (['[[small] * n_tags + [small, 0]]'], {}), '([[small] * n_tags + [small, 0]])\n', (8993, 9026), True, 'import numpy as np\n'), ((10060, 10110), 'tensorflow.gather_nd', 'tf.gather_nd', (['tags_score_slice', 'tag_id_index_slice'], {}), '(tags_score_slice, tag_id_index_slice)\n', (10072, 10110), True, 'import tensorflow as tf\n'), ((10161, 10206), 'tensorflow.gather_nd', 'tf.gather_nd', (['transitions', 'tag_id_trans_slice'], {}), '(transitions, tag_id_trans_slice)\n', (10173, 10206), True, 'import tensorflow as tf\n'), ((10647, 10673), 'tensorflow.shape', 'tf.shape', (['tags_score_slice'], {}), '(tags_score_slice)\n', (10655, 10673), True, 'import tensorflow as tf\n'), ((11485, 11511), 'tensorflow.shape', 'tf.shape', (['tags_score_slice'], {}), '(tags_score_slice)\n', (11493, 11511), True, 'import tensorflow as tf\n'), ((12586, 12621), 'tensorflow.train.AdadeltaOptimizer', 'tf.train.AdadeltaOptimizer', (['lr_rate'], {}), '(lr_rate)\n', (12612, 12621), True, 'import tensorflow as tf\n'), ((12680, 12711), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['lr_rate'], {}), '(lr_rate)\n', (12702, 12711), True, 'import tensorflow as tf\n'), ((10737, 10756), 'tensorflow.ones', 'tf.ones', (['(s_len, 2)'], {}), '((s_len, 2))\n', (10744, 10756), True, 'import tensorflow as tf\n'), ((11575, 11594), 'tensorflow.ones', 'tf.ones', (['(s_len, 2)'], {}), '((s_len, 2))\n', (11582, 11594), True, 'import tensorflow as tf\n'), ((12773, 12807), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', (['lr_rate'], {}), '(lr_rate)\n', (12798, 12807), True, 'import tensorflow as tf\n'), ((12084, 12105), 'tensorflow.shape', 'tf.shape', (['tags_scores'], {}), '(tags_scores)\n', (12092, 12105), True, 'import tensorflow as tf\n'), ((11877, 11897), 'tensorflow.shape', 'tf.shape', (['tags_score'], {}), '(tags_score)\n', (11885, 11897), True, 'import tensorflow as tf\n')] |
import numpy as np
import torch
from torch.utils.data import DataLoader
from torch.utils.data.sampler import RandomSampler
from gnnff.data.keys import Keys
from gnnff.data.split import train_test_split
__all__ = ["get_loader"]
def get_loader(dataset, args, split_path, logging=None):
"""
Parameters
----------
dataset : gnnff.data.Celldata
dataset of cell.
args : Namespace
Namespace dict.
split_path : str
path to split file.
logging : logging
logger
Returns
-------
train_data, val_loader, test_loader : torch.utils.data.DataLoader
"""
# create or load dataset splits depending on args.mode
if args.mode == "train":
if logging is not None:
logging.info("create splits...")
data_train, data_val, data_test = train_test_split(
dataset, *args.split, split_file=split_path
)
else:
if logging is not None:
logging.info("loading exiting split file ...")
data_train, data_val, data_test = train_test_split(
dataset, split_file=split_path
)
if logging is not None:
logging.info("create data loader ...")
train_loader = DataLoader(
dataset=data_train,
batch_size=args.batch_size,
sampler=RandomSampler(data_train),
num_workers=4,
pin_memory=args.cuda,
collate_fn=_collate_aseatoms,
)
val_loader = DataLoader(
dataset=data_val,
batch_size=args.batch_size,
shuffle=True,
num_workers=2,
pin_memory=args.cuda,
collate_fn=_collate_aseatoms,
)
if len(data_test) != 0:
test_loader = DataLoader(
dataset=data_test,
batch_size=args.batch_size,
shuffle=True,
num_workers=2,
pin_memory=args.cuda,
collate_fn=_collate_aseatoms,
)
elif len(data_test) == 0:
test_loader = None
return train_loader, val_loader, test_loader
def _collate_aseatoms(examples):
"""
Build batch from systems and properties & apply padding
Parameters
----------
examples : list
Returns
-------
dict : [str->torch.Tensor]
mini-batch of atomistic systems
References
----------
.. [1] https://github.com/ken2403/schnetpack/blob/master/src/schnetpack/data/loader.py
"""
properties = examples[0]
# initialize maximum sizes
max_size = {
prop: np.array(val.size(), dtype=np.int32) for prop, val in properties.items()
}
# get maximum sizes
for properties in examples[1:]:
for prop, val in properties.items():
max_size[prop] = np.maximum(
max_size[prop], np.array(val.size(), dtype=np.int32)
)
# initialize batch
batch = {
p: torch.zeros(len(examples), *[int(ss) for ss in size]).type(
examples[0][p].type()
)
for p, size in max_size.items()
}
has_atom_mask = Keys.atom_mask in batch.keys()
has_neighbor_mask = Keys.neighbor_mask in batch.keys()
if not has_neighbor_mask:
batch[Keys.neighbor_mask] = torch.zeros_like(batch[Keys.neighbors]).float()
if not has_atom_mask:
batch[Keys.atom_mask] = torch.zeros_like(batch[Keys.Z]).float()
# build batch and pad
for k, properties in enumerate(examples):
for prop, val in properties.items():
shape = val.size()
s = (k,) + tuple([slice(0, d) for d in shape])
batch[prop][s] = val
# add mask
if not has_neighbor_mask:
nbh = properties[Keys.neighbors]
shape = nbh.size()
s = (k,) + tuple([slice(0, d) for d in shape])
mask = nbh >= 0
batch[Keys.neighbor_mask][s] = mask
batch[Keys.neighbors][s] = nbh * mask.long()
if not has_atom_mask:
z = properties[Keys.Z]
shape = z.size()
s = (k,) + tuple([slice(0, d) for d in shape])
batch[Keys.atom_mask][s] = z > 0
return batch
| [
"torch.zeros_like",
"torch.utils.data.sampler.RandomSampler",
"gnnff.data.split.train_test_split",
"torch.utils.data.DataLoader"
] | [((1454, 1595), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'data_val', 'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': '(2)', 'pin_memory': 'args.cuda', 'collate_fn': '_collate_aseatoms'}), '(dataset=data_val, batch_size=args.batch_size, shuffle=True,\n num_workers=2, pin_memory=args.cuda, collate_fn=_collate_aseatoms)\n', (1464, 1595), False, 'from torch.utils.data import DataLoader\n'), ((827, 888), 'gnnff.data.split.train_test_split', 'train_test_split', (['dataset', '*args.split'], {'split_file': 'split_path'}), '(dataset, *args.split, split_file=split_path)\n', (843, 888), False, 'from gnnff.data.split import train_test_split\n'), ((1054, 1102), 'gnnff.data.split.train_test_split', 'train_test_split', (['dataset'], {'split_file': 'split_path'}), '(dataset, split_file=split_path)\n', (1070, 1102), False, 'from gnnff.data.split import train_test_split\n'), ((1697, 1839), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'data_test', 'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': '(2)', 'pin_memory': 'args.cuda', 'collate_fn': '_collate_aseatoms'}), '(dataset=data_test, batch_size=args.batch_size, shuffle=True,\n num_workers=2, pin_memory=args.cuda, collate_fn=_collate_aseatoms)\n', (1707, 1839), False, 'from torch.utils.data import DataLoader\n'), ((1313, 1338), 'torch.utils.data.sampler.RandomSampler', 'RandomSampler', (['data_train'], {}), '(data_train)\n', (1326, 1338), False, 'from torch.utils.data.sampler import RandomSampler\n'), ((3182, 3221), 'torch.zeros_like', 'torch.zeros_like', (['batch[Keys.neighbors]'], {}), '(batch[Keys.neighbors])\n', (3198, 3221), False, 'import torch\n'), ((3288, 3319), 'torch.zeros_like', 'torch.zeros_like', (['batch[Keys.Z]'], {}), '(batch[Keys.Z])\n', (3304, 3319), False, 'import torch\n')] |
from cms.plugin_base import CMSPluginBase
from cms.models.pluginmodel import CMSPlugin
from cms.plugin_pool import plugin_pool
from fduser import models
from django.utils.translation import ugettext as _
from django.contrib.sites.models import Site
class Users(CMSPluginBase):
model = CMSPlugin # Model where data about this plugin is saved
module = _("UserList")
name = _("User List") # Name of the plugin
render_template = "fduser/list.html" # template to render the plugin with
def render(self, context, instance, placeholder):
context['users'] = models.User.objects.all
context['site'] = Site.objects.get_current()
return context
plugin_pool.register_plugin(Users) # register the plugin
| [
"django.utils.translation.ugettext",
"cms.plugin_pool.plugin_pool.register_plugin",
"django.contrib.sites.models.Site.objects.get_current"
] | [((688, 722), 'cms.plugin_pool.plugin_pool.register_plugin', 'plugin_pool.register_plugin', (['Users'], {}), '(Users)\n', (715, 722), False, 'from cms.plugin_pool import plugin_pool\n'), ((360, 373), 'django.utils.translation.ugettext', '_', (['"""UserList"""'], {}), "('UserList')\n", (361, 373), True, 'from django.utils.translation import ugettext as _\n'), ((385, 399), 'django.utils.translation.ugettext', '_', (['"""User List"""'], {}), "('User List')\n", (386, 399), True, 'from django.utils.translation import ugettext as _\n'), ((637, 663), 'django.contrib.sites.models.Site.objects.get_current', 'Site.objects.get_current', ([], {}), '()\n', (661, 663), False, 'from django.contrib.sites.models import Site\n')] |
# pyright: reportUnknownMemberType=false
import logging
import zipfile
from pathlib import Path
from typing import Dict
import requests
from us_pls._config import Config
from us_pls._download.interface import IDownloadService
from us_pls._download.models import DatafileType, DownloadType
from us_pls._logger.interface import ILoggerFactory
from us_pls._persistence.interface import IOnDiskCache
from us_pls._scraper.interface import IScrapingService
BASE_URL = "https://www.imls.gov"
class DownloadService(IDownloadService):
_config: Config
_scraper: IScrapingService
_cache: IOnDiskCache
_logger: logging.Logger
def __init__(
self,
config: Config,
scraper: IScrapingService,
cache: IOnDiskCache,
logger_factory: ILoggerFactory,
) -> None:
self._config = config
self._scraper = scraper
self._cache = cache
self._logger = logger_factory.get_logger(__name__)
def download(self) -> None:
scraped_dict = self._scraper.scrape_files()
scraped_dict_for_year = scraped_dict.get(str(self._config.year))
if scraped_dict_for_year is None:
self._logger.info(f"There is no data for {self._config.year}")
return
self._try_download_resource(
scraped_dict_for_year, "Documentation", DownloadType.Documentation
)
self._try_download_resource(scraped_dict_for_year, "CSV", DownloadType.CsvZip)
self._try_download_resource(
scraped_dict_for_year,
"Data Element Definitions",
DownloadType.DataElementDefinitions,
)
self._clean_up_readme()
def _try_download_resource(
self, scraped_dict: Dict[str, str], resource: str, download_type: DownloadType
) -> None:
route = scraped_dict.get(resource)
self._logger.debug(f"Trying to download {resource}")
if route is None:
self._logger.warning(
f"The resource `{resource}` does not exist for {self._config.year}"
)
return
if self._resource_already_exists(download_type):
self._logger.debug(
f"Resources have already been downloaded for {download_type.value}"
)
return
url = f"{BASE_URL}/{route[1:] if route.startswith('/') else route}"
res = requests.get(url)
if res.status_code != 200:
msg = f"Received a non-200 status code for {url}: {res.status_code}"
self._logger.warning(msg)
return
self._write_content(
download_type,
res.content,
should_unzip=str(download_type.value).endswith(".zip"),
)
def _resource_already_exists(self, download_type: DownloadType) -> bool:
if download_type in [
DownloadType.Documentation,
DownloadType.DataElementDefinitions,
]:
return self._cache.exists(download_type.value)
elif download_type == DownloadType.CsvZip:
return all(
[
self._cache.exists(str(datafile_type.value))
for datafile_type in DatafileType
]
)
return False
def _write_content(
self, download_type: DownloadType, content: bytes, should_unzip: bool = False
) -> None:
self._cache.put(content, download_type.value)
if should_unzip:
zip_path = self._cache.cache_path / Path(download_type.value)
with zipfile.ZipFile(zip_path, "r") as zip_ref:
zip_ref.extractall(self._cache.cache_path)
self._move_content()
self._cache.remove(zip_path)
def _move_content(self) -> None:
for path in self._cache.cache_path.iterdir():
if not path.is_dir():
self._rename(path)
continue
for sub_path in path.iterdir():
self._rename(sub_path)
self._cache.remove(path)
def _rename(self, path: Path) -> None:
new_name: str = path.name
if "_ae_" in path.name.lower() or "ld" in path.name.lower():
new_name = DatafileType.SystemData.value
elif "_outlet_" in path.name.lower() or "out" in path.name.lower():
new_name = DatafileType.OutletData.value
elif "_state_" in path.name.lower() or "sum" in path.name.lower():
new_name = DatafileType.SummaryData.value
elif "readme" in path.name.lower():
new_name = "README.txt"
self._cache.rename(path, Path(new_name))
def _clean_up_readme(self):
self._logger.debug("Cleaning up readme")
readme_text = self._cache.get(
"README.txt",
"txt",
encoding="utf-8",
errors="surrogateescape",
)
if readme_text is None:
self._logger.debug("No readme exists for this year")
return
cleaned_readme_text = "".join([c if ord(c) < 128 else "'" for c in readme_text])
self._cache.put(
bytes(cleaned_readme_text, "utf-8"),
"README.txt",
)
| [
"zipfile.ZipFile",
"requests.get",
"pathlib.Path"
] | [((2394, 2411), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (2406, 2411), False, 'import requests\n'), ((4635, 4649), 'pathlib.Path', 'Path', (['new_name'], {}), '(new_name)\n', (4639, 4649), False, 'from pathlib import Path\n'), ((3535, 3560), 'pathlib.Path', 'Path', (['download_type.value'], {}), '(download_type.value)\n', (3539, 3560), False, 'from pathlib import Path\n'), ((3579, 3609), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zip_path', '"""r"""'], {}), "(zip_path, 'r')\n", (3594, 3609), False, 'import zipfile\n')] |
#!/usr/bin/env python
from dlbot import default_settings
from flask import Flask
app = Flask(__name__, instance_relative_config=True)
app.config.from_object(default_settings)
app.config.from_pyfile('dlbot.cfg', silent=True)
@app.route('/')
def hello_world():
return 'Hello, World!'
if __name__ == '__main__':
app.run()
| [
"flask.Flask"
] | [((88, 134), 'flask.Flask', 'Flask', (['__name__'], {'instance_relative_config': '(True)'}), '(__name__, instance_relative_config=True)\n', (93, 134), False, 'from flask import Flask\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
r"""
FIXME:
sometimes you have to chown -R user:user ~/.theano or run with sudo the
first time after roboot, otherwise you get errors
CommandLineHelp:
python -m wbia_cnn --tf netrun <networkmodel>
--dataset, --ds = <dstag>:<subtag>
dstag is the main dataset name (eg PZ_MTEST), subtag are parameters to
modify (max_examples=3)
--weights, -w = \|new\|<checkpoint_tag>\|<dstag>:<checkpoint_tag> (default: <checkpoint_tag>)
new will initialize clean weights.
a checkpoint tag will try to to match a saved model state in the history.
can load weights from an external dataset.
<checkpoint_tag> defaults to current
--arch, -a = <archtag>
model architecture tag (eg siaml2_128, siam2stream, viewpoint)
--device = <processor>
sets theano device flag to a processor like gpu0, gpu1, or cpu0
"""
import logging
from wbia_cnn import models
from wbia_cnn import ingest_data
from wbia_cnn import experiments
import utool as ut
import sys
print, rrr, profile = ut.inject2(__name__)
logger = logging.getLogger()
# This is more of a history tag
CHECKPOINT_TAG_ALIAS = {
'current': None,
'': None,
}
# second level of alias indirection
# This is more of a dataset tag
DS_TAG_ALIAS2 = {
'flankhack': "dict(acfg_name='ctrl:pername=None,excluderef=False,contributor_contains=FlankHack', colorspace='gray', db='PZ_Master1')",
'pzmtest-bgr': "PZ_MTEST;dict(colorspace='bgr', controlled=True, max_examples=None, num_top=None)", # NOQA
'pzmtest': "PZ_MTEST;dict(colorspace='gray', controlled=True, max_examples=None, num_top=None)", # NOQA
'gz-gray': "GZ_ALL;dict(colorspace='gray', controlled=False, max_examples=None, num_top=None)", # NOQA
'liberty': "liberty;dict(detector='dog', pairs=250000)",
'combo': 'combo_vdsujffw',
'timectrl_pzmaster1': "PZ_Master1;dict(acfg_name='timectrl', colorspace='gray', min_featweight=0.8)", # NOQA
'pzm2': "PZ_Master1;dict(acfg_name='timectrl:pername=None', colorspace='gray', min_featweight=0.8)", # NOQA
'pzm3': "PZ_Master1;dict(acfg_name=None, colorspace='gray', controlled=True, min_featweight=0.8)",
#'pzm3' : "PZ_Master1;dict(acfg_name='default:is_known=True,qmin_pername=2,view=primary,species=primary,minqual=ok', colorspace='gray', min_featweight=0.8)", # NOQA
'pzm4': "PZ_Master1;dict(acfg_name='default:is_known=True,qmin_pername=2,view=primary,species=primary,minqual=ok', colorspace='gray', min_featweight=0.8)",
}
def netrun():
r"""
CommandLine:
# --- UTILITY
python -m wbia_cnn --tf get_juction_dpath --show
# --- DATASET BUILDING ---
# Build Dataset Aliases
python -m wbia_cnn --tf netrun --db PZ_MTEST --acfg ctrl --ensuredata --show
python -m wbia_cnn --tf netrun --db PZ_Master1 --acfg timectrl --ensuredata
python -m wbia_cnn --tf netrun --db PZ_Master1 --acfg timectrl:pername=None --ensuredata
python -m wbia_cnn --tf netrun --db PZ_Master1 --acfg timectrl:pername=None --ensuredata
python -m wbia_cnn --tf netrun --db mnist --ensuredata --show
python -m wbia_cnn --tf netrun --db mnist --ensuredata --show --datatype=category
python -m wbia_cnn --tf netrun --db mnist --ensuredata --show --datatype=siam-patch
python -m wbia_cnn --tf netrun --db PZ_Master1 --acfg ctrl:pername=None,excluderef=False,contributor_contains=FlankHack --ensuredata --show --datatype=siam-part
# Parts based datasets
python -m wbia_cnn --tf netrun --db PZ_MTEST --acfg ctrl --datatype=siam-part --ensuredata --show
# Patch based dataset (big one)
python -m wbia_cnn --tf netrun --db PZ_Master1 --acfg default:is_known=True,qmin_pername=2,view=primary,species=primary,minqual=ok --ensuredata --show --vtd
python -m wbia_cnn --tf netrun --ds pzm4 --weights=new --arch=siaml2_128 --train --monitor
python -m wbia_cnn --tf netrun --ds pzm4 --arch=siaml2_128 --test
python -m wbia_cnn --tf netrun --ds pzm4 --arch=siaml2_128 --veryverbose --no-flask
# --- TRAINING ---
python -m wbia_cnn --tf netrun --db PZ_Master1 --acfg default:is_known=True,qmin_pername=2,view=primary,species=primary,minqual=ok --weights=new --arch=siaml2_128 --train --monitor
python -m wbia_cnn --tf netrun --ds timectrl_pzmaster1 --acfg ctrl:pername=None,excluderef=False,contributor_contains=FlankHack --train --weights=new --arch=siaml2_128 --monitor # NOQA
python -m wbia_cnn --tf netrun --ds timectrl_pzmaster1 --acfg ctrl:pername=None,excluderef=False --train --weights=new --arch=siaml2_128 --monitor # NOQA
python -m wbia_cnn --tf netrun --ds pzmtest --weights=new --arch=siaml2_128 --train --monitor --DEBUG_AUGMENTATION
python -m wbia_cnn --tf netrun --ds pzmtest --weights=new --arch=siaml2_128 --train --monitor
python -m wbia_cnn --tf netrun --ds flankhack --weights=new --arch=siaml2_partmatch --train --monitor --learning_rate=.00001
python -m wbia_cnn --tf netrun --ds flankhack --weights=new --arch=siam_deepfaceish --train --monitor --learning_rate=.00001
# Different ways to train mnist
python -m wbia_cnn --tf netrun --db mnist --weights=new --arch=mnist_siaml2 --train --monitor --datatype=siam-patch
python -m wbia_cnn --tf netrun --db mnist --weights=new --arch=mnist-category --train --monitor --datatype=category
# --- INITIALIZED-TRAINING ---
python -m wbia_cnn --tf netrun --ds pzmtest --arch=siaml2_128 --weights=gz-gray:current --train --monitor
# --- TESTING ---
python -m wbia_cnn --tf netrun --db liberty --weights=liberty:current --arch=siaml2_128 --test
python -m wbia_cnn --tf netrun --db PZ_Master0 --weights=combo:current --arch=siaml2_128 --testall
Example:
>>> # DISABLE_DOCTEST
>>> from wbia_cnn.netrun import * # NOQA
>>> netrun()
>>> ut.show_if_requested()
"""
ut.colorprint('[netrun] NET RUN', 'red')
requests, hyperparams, tags = parse_args()
ds_tag = tags['ds_tag']
datatype = tags['datatype']
extern_ds_tag = tags['extern_ds_tag']
arch_tag = tags['arch_tag']
checkpoint_tag = tags['checkpoint_tag']
# ----------------------------
# Choose the main dataset
ut.colorprint('[netrun] Ensuring Dataset', 'yellow')
dataset = ingest_data.grab_dataset(ds_tag, datatype)
if extern_ds_tag is not None:
extern_dpath = ingest_data.get_extern_training_dpath(extern_ds_tag)
else:
extern_dpath = None
logger.info('dataset.training_dpath = %r' % (dataset.training_dpath,))
logger.info('Dataset Alias Key: %r' % (dataset.alias_key,))
logger.info(
'Current Dataset Tag: %r'
% (ut.invert_dict(DS_TAG_ALIAS2).get(dataset.alias_key, None),)
)
if requests['ensuredata']:
# Print alias key that maps to this particular dataset
if ut.show_was_requested():
interact_ = dataset.interact() # NOQA
return
logger.info('...exiting')
sys.exit(1)
# ----------------------------
# Choose model architecture
# TODO: data will need to return info about number of labels in viewpoint models
# Specify model archichitecture
ut.colorprint('[netrun] Architecture Specification', 'yellow')
if arch_tag == 'siam2stream':
model = models.SiameseCenterSurroundModel(
data_shape=dataset.data_shape,
training_dpath=dataset.training_dpath,
**hyperparams
)
elif arch_tag.startswith('siam'):
model = models.SiameseL2(
data_shape=dataset.data_shape,
arch_tag=arch_tag,
training_dpath=dataset.training_dpath,
**hyperparams
)
elif arch_tag == 'mnist-category':
model = models.MNISTModel(
data_shape=dataset.data_shape,
output_dims=dataset.output_dims,
arch_tag=arch_tag,
training_dpath=dataset.training_dpath,
**hyperparams
)
pass
else:
raise ValueError('Unknown arch_tag=%r' % (arch_tag,))
ut.colorprint('[netrun] Initialize archchitecture', 'yellow')
model.init_arch()
# ----------------------------
# Choose weight initialization
ut.colorprint('[netrun] Setting weights', 'yellow')
if checkpoint_tag == 'new':
ut.colorprint('[netrun] * Initializing new weights', 'lightgray')
model.reinit_weights()
else:
checkpoint_tag = model.resolve_fuzzy_checkpoint_pattern(
checkpoint_tag, extern_dpath
)
ut.colorprint(
'[netrun] * Resolving weights checkpoint_tag=%r' % (checkpoint_tag,),
'lightgray',
)
if extern_dpath is not None:
model.load_extern_weights(dpath=extern_dpath, checkpoint_tag=checkpoint_tag)
elif model.has_saved_state(checkpoint_tag=checkpoint_tag):
model.load_model_state(checkpoint_tag=checkpoint_tag)
else:
model_state_fpath = model.get_model_state_fpath(checkpoint_tag=checkpoint_tag)
logger.info('model_state_fpath = %r' % (model_state_fpath,))
ut.checkpath(model_state_fpath, verbose=True)
logger.info(
'Known checkpoints are: ' + ut.repr3(model.list_saved_checkpoints())
)
raise ValueError(
('Unresolved weight init: ' 'checkpoint_tag=%r, extern_ds_tag=%r')
% (
checkpoint_tag,
extern_ds_tag,
)
)
# logger.info('Model State:')
# logger.info(model.get_state_str())
# ----------------------------
if not model.is_train_state_initialized():
ut.colorprint('[netrun] Need to initialize training state', 'yellow')
X_train, y_train = dataset.subset('train')
model.ensure_data_params(X_train, y_train)
# Run Actions
if requests['train']:
ut.colorprint('[netrun] Training Requested', 'yellow')
# parse training arguments
config = ut.argparse_dict(
dict(
era_size=15,
max_epochs=1200,
rate_decay=0.8,
)
)
model.monitor_config.update(**config)
X_train, y_train = dataset.subset('train')
X_valid, y_valid = dataset.subset('valid')
model.fit(X_train, y_train, X_valid=X_valid, y_valid=y_valid)
elif requests['test']:
# assert model.best_results['epoch'] is not None
ut.colorprint('[netrun] Test Requested', 'yellow')
if requests['testall']:
ut.colorprint('[netrun] * Testing on all data', 'lightgray')
X_test, y_test = dataset.subset('all')
flat_metadata = dataset.subset_metadata('all')
else:
ut.colorprint('[netrun] * Testing on test subset', 'lightgray')
X_test, y_test = dataset.subset('test')
flat_metadata = dataset.subset_metadata('test')
data, labels = X_test, y_test
dataname = dataset.alias_key
experiments.test_siamese_performance(model, data, labels, flat_metadata, dataname)
else:
if not ut.get_argflag('--cmd'):
raise ValueError('nothing here. need to train or test')
if requests['publish']:
ut.colorprint('[netrun] Publish Requested', 'yellow')
publish_dpath = ut.truepath('~/Dropbox/IBEIS')
published_model_state = ut.unixjoin(
publish_dpath, model.arch_tag + '_model_state.pkl'
)
ut.copy(model.get_model_state_fpath(), published_model_state)
ut.view_directory(publish_dpath)
logger.info(
'You need to get the dropbox link and '
'register it into the appropriate file'
)
# pip install dropbox
# https://www.dropbox.com/developers/core/start/python
# import dropbox # need oauth
# client.share('/myfile.txt', short_url=False)
# https://wildbookiarepository.azureedge.net/models/siaml2_128_model_state.pkl
if ut.get_argflag('--cmd'):
ut.embed()
def parse_args():
ds_default = None
arch_default = 'siaml2_128'
weights_tag_default = None
# Test values
if False:
ds_default = 'liberty'
weights_tag_default = 'current'
assert ut.inIPython()
# Parse commandline args
ds_tag = ut.get_argval(('--dataset', '--ds'), type_=str, default=ds_default)
datatype = ut.get_argval(('--datatype', '--dt'), type_=str, default='siam-patch')
arch_tag = ut.get_argval(('--arch', '-a'), default=arch_default)
weights_tag = ut.get_argval(
('--weights', '+w'), type_=str, default=weights_tag_default
)
# Incorporate new config stuff?
# NEW = False
# if NEW:
# default_dstag_cfg = {
# 'ds': 'PZ_MTEST',
# 'mode': 'patches',
# 'arch': arch_default
# }
# named_defaults_dict = {
# '': default_dstag_cfg
# }
# ut.parse_argv_cfg('dstag', named_defaults_dict=named_defaults_dict)
hyperparams = ut.argparse_dict(
{
#'batch_size': 128,
'batch_size': 256,
#'learning_rate': .0005,
'learning_rate': 0.1,
'momentum': 0.9,
#'weight_decay': 0.0005,
'weight_decay': 0.0001,
},
alias_dict={
'weight_decay': ['decay'],
'learning_rate': ['learn_rate'],
},
)
requests = ut.argparse_dict(
{
'train': False,
'test': False,
'testall': False,
'publish': False,
'ensuredata': False,
}
)
requests['test'] = requests['test'] or requests['testall']
# breakup weights tag into extern_ds and checkpoint
if weights_tag is not None and ':' in weights_tag:
extern_ds_tag, checkpoint_tag = weights_tag.split(':')
else:
extern_ds_tag = None
checkpoint_tag = weights_tag
# resolve aliases
ds_tag = DS_TAG_ALIAS2.get(ds_tag, ds_tag)
extern_ds_tag = DS_TAG_ALIAS2.get(extern_ds_tag, extern_ds_tag)
checkpoint_tag = CHECKPOINT_TAG_ALIAS.get(checkpoint_tag, checkpoint_tag)
tags = {
'ds_tag': ds_tag,
'extern_ds_tag': extern_ds_tag,
'checkpoint_tag': checkpoint_tag,
'arch_tag': arch_tag,
'datatype': datatype,
}
ut.colorprint('[netrun] * ds_tag=%r' % (ds_tag,), 'lightgray')
ut.colorprint('[netrun] * arch_tag=%r' % (arch_tag,), 'lightgray')
ut.colorprint('[netrun] * extern_ds_tag=%r' % (extern_ds_tag,), 'lightgray')
ut.colorprint('[netrun] * checkpoint_tag=%r' % (checkpoint_tag,), 'lightgray')
return requests, hyperparams, tags
def merge_ds_tags(ds_alias_list):
r"""
CommandLine:
python -m wbia_cnn --tf merge_ds_tags --alias-list gz-gray girm pzmtest nnp
TODO:
http://stackoverflow.com/questions/18492273/combining-hdf5-files
Example:
>>> # DISABLE_DOCTEST
>>> from wbia_cnn.netrun import * # NOQA
>>> ds_alias_list = ut.get_argval('--alias-list', type_=list, default=[])
>>> result = merge_ds_tags(ds_alias_list)
>>> print(result)
"""
ds_tag_list = [DS_TAG_ALIAS2.get(ds_tag, ds_tag) for ds_tag in ds_alias_list]
dataset_list = [ingest_data.grab_siam_dataset(ds_tag) for ds_tag in ds_tag_list]
merged_dataset = ingest_data.merge_datasets(dataset_list)
logger.info(merged_dataset.alias_key)
return merged_dataset
if __name__ == '__main__':
"""
CommandLine:
python -m wbia_cnn.netrun
python -m wbia_cnn.netrun --allexamples
python -m wbia_cnn.netrun --allexamples --noface --nosrc
"""
# train_pz()
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
# import warnings
# with warnings.catch_warnings():
# # Cause all warnings to always be triggered.
# warnings.filterwarnings("error", ".*get_all_non_bias_params.*")
ut.doctest_funcs()
| [
"logging.getLogger",
"wbia_cnn.models.MNISTModel",
"utool.embed",
"wbia_cnn.models.SiameseCenterSurroundModel",
"utool.truepath",
"utool.doctest_funcs",
"utool.invert_dict",
"utool.show_was_requested",
"multiprocessing.freeze_support",
"sys.exit",
"utool.argparse_dict",
"utool.colorprint",
"... | [((1060, 1080), 'utool.inject2', 'ut.inject2', (['__name__'], {}), '(__name__)\n', (1070, 1080), True, 'import utool as ut\n'), ((1090, 1109), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1107, 1109), False, 'import logging\n'), ((6013, 6053), 'utool.colorprint', 'ut.colorprint', (['"""[netrun] NET RUN"""', '"""red"""'], {}), "('[netrun] NET RUN', 'red')\n", (6026, 6053), True, 'import utool as ut\n'), ((6350, 6402), 'utool.colorprint', 'ut.colorprint', (['"""[netrun] Ensuring Dataset"""', '"""yellow"""'], {}), "('[netrun] Ensuring Dataset', 'yellow')\n", (6363, 6402), True, 'import utool as ut\n'), ((6417, 6459), 'wbia_cnn.ingest_data.grab_dataset', 'ingest_data.grab_dataset', (['ds_tag', 'datatype'], {}), '(ds_tag, datatype)\n', (6441, 6459), False, 'from wbia_cnn import ingest_data\n'), ((7326, 7388), 'utool.colorprint', 'ut.colorprint', (['"""[netrun] Architecture Specification"""', '"""yellow"""'], {}), "('[netrun] Architecture Specification', 'yellow')\n", (7339, 7388), True, 'import utool as ut\n'), ((8207, 8268), 'utool.colorprint', 'ut.colorprint', (['"""[netrun] Initialize archchitecture"""', '"""yellow"""'], {}), "('[netrun] Initialize archchitecture', 'yellow')\n", (8220, 8268), True, 'import utool as ut\n'), ((8366, 8417), 'utool.colorprint', 'ut.colorprint', (['"""[netrun] Setting weights"""', '"""yellow"""'], {}), "('[netrun] Setting weights', 'yellow')\n", (8379, 8417), True, 'import utool as ut\n'), ((12185, 12208), 'utool.get_argflag', 'ut.get_argflag', (['"""--cmd"""'], {}), "('--cmd')\n", (12199, 12208), True, 'import utool as ut\n'), ((12510, 12577), 'utool.get_argval', 'ut.get_argval', (["('--dataset', '--ds')"], {'type_': 'str', 'default': 'ds_default'}), "(('--dataset', '--ds'), type_=str, default=ds_default)\n", (12523, 12577), True, 'import utool as ut\n'), ((12593, 12663), 'utool.get_argval', 'ut.get_argval', (["('--datatype', '--dt')"], {'type_': 'str', 'default': '"""siam-patch"""'}), "(('--datatype', '--dt'), type_=str, default='siam-patch')\n", (12606, 12663), True, 'import utool as ut\n'), ((12679, 12732), 'utool.get_argval', 'ut.get_argval', (["('--arch', '-a')"], {'default': 'arch_default'}), "(('--arch', '-a'), default=arch_default)\n", (12692, 12732), True, 'import utool as ut\n'), ((12751, 12825), 'utool.get_argval', 'ut.get_argval', (["('--weights', '+w')"], {'type_': 'str', 'default': 'weights_tag_default'}), "(('--weights', '+w'), type_=str, default=weights_tag_default)\n", (12764, 12825), True, 'import utool as ut\n'), ((13223, 13404), 'utool.argparse_dict', 'ut.argparse_dict', (["{'batch_size': 256, 'learning_rate': 0.1, 'momentum': 0.9, 'weight_decay': \n 0.0001}"], {'alias_dict': "{'weight_decay': ['decay'], 'learning_rate': ['learn_rate']}"}), "({'batch_size': 256, 'learning_rate': 0.1, 'momentum': 0.9,\n 'weight_decay': 0.0001}, alias_dict={'weight_decay': ['decay'],\n 'learning_rate': ['learn_rate']})\n", (13239, 13404), True, 'import utool as ut\n'), ((13635, 13745), 'utool.argparse_dict', 'ut.argparse_dict', (["{'train': False, 'test': False, 'testall': False, 'publish': False,\n 'ensuredata': False}"], {}), "({'train': False, 'test': False, 'testall': False,\n 'publish': False, 'ensuredata': False})\n", (13651, 13745), True, 'import utool as ut\n'), ((14547, 14609), 'utool.colorprint', 'ut.colorprint', (["('[netrun] * ds_tag=%r' % (ds_tag,))", '"""lightgray"""'], {}), "('[netrun] * ds_tag=%r' % (ds_tag,), 'lightgray')\n", (14560, 14609), True, 'import utool as ut\n'), ((14614, 14680), 'utool.colorprint', 'ut.colorprint', (["('[netrun] * arch_tag=%r' % (arch_tag,))", '"""lightgray"""'], {}), "('[netrun] * arch_tag=%r' % (arch_tag,), 'lightgray')\n", (14627, 14680), True, 'import utool as ut\n'), ((14685, 14761), 'utool.colorprint', 'ut.colorprint', (["('[netrun] * extern_ds_tag=%r' % (extern_ds_tag,))", '"""lightgray"""'], {}), "('[netrun] * extern_ds_tag=%r' % (extern_ds_tag,), 'lightgray')\n", (14698, 14761), True, 'import utool as ut\n'), ((14766, 14844), 'utool.colorprint', 'ut.colorprint', (["('[netrun] * checkpoint_tag=%r' % (checkpoint_tag,))", '"""lightgray"""'], {}), "('[netrun] * checkpoint_tag=%r' % (checkpoint_tag,), 'lightgray')\n", (14779, 14844), True, 'import utool as ut\n'), ((15562, 15602), 'wbia_cnn.ingest_data.merge_datasets', 'ingest_data.merge_datasets', (['dataset_list'], {}), '(dataset_list)\n', (15588, 15602), False, 'from wbia_cnn import ingest_data\n'), ((15929, 15961), 'multiprocessing.freeze_support', 'multiprocessing.freeze_support', ([], {}), '()\n', (15959, 15961), False, 'import multiprocessing\n'), ((16198, 16216), 'utool.doctest_funcs', 'ut.doctest_funcs', ([], {}), '()\n', (16214, 16216), True, 'import utool as ut\n'), ((6517, 6569), 'wbia_cnn.ingest_data.get_extern_training_dpath', 'ingest_data.get_extern_training_dpath', (['extern_ds_tag'], {}), '(extern_ds_tag)\n', (6554, 6569), False, 'from wbia_cnn import ingest_data\n'), ((6984, 7007), 'utool.show_was_requested', 'ut.show_was_requested', ([], {}), '()\n', (7005, 7007), True, 'import utool as ut\n'), ((7121, 7132), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7129, 7132), False, 'import sys\n'), ((7439, 7561), 'wbia_cnn.models.SiameseCenterSurroundModel', 'models.SiameseCenterSurroundModel', ([], {'data_shape': 'dataset.data_shape', 'training_dpath': 'dataset.training_dpath'}), '(data_shape=dataset.data_shape,\n training_dpath=dataset.training_dpath, **hyperparams)\n', (7472, 7561), False, 'from wbia_cnn import models\n'), ((8458, 8523), 'utool.colorprint', 'ut.colorprint', (['"""[netrun] * Initializing new weights"""', '"""lightgray"""'], {}), "('[netrun] * Initializing new weights', 'lightgray')\n", (8471, 8523), True, 'import utool as ut\n'), ((8689, 8790), 'utool.colorprint', 'ut.colorprint', (["('[netrun] * Resolving weights checkpoint_tag=%r' % (checkpoint_tag,))", '"""lightgray"""'], {}), "('[netrun] * Resolving weights checkpoint_tag=%r' % (\n checkpoint_tag,), 'lightgray')\n", (8702, 8790), True, 'import utool as ut\n'), ((9842, 9911), 'utool.colorprint', 'ut.colorprint', (['"""[netrun] Need to initialize training state"""', '"""yellow"""'], {}), "('[netrun] Need to initialize training state', 'yellow')\n", (9855, 9911), True, 'import utool as ut\n'), ((10067, 10121), 'utool.colorprint', 'ut.colorprint', (['"""[netrun] Training Requested"""', '"""yellow"""'], {}), "('[netrun] Training Requested', 'yellow')\n", (10080, 10121), True, 'import utool as ut\n'), ((11430, 11483), 'utool.colorprint', 'ut.colorprint', (['"""[netrun] Publish Requested"""', '"""yellow"""'], {}), "('[netrun] Publish Requested', 'yellow')\n", (11443, 11483), True, 'import utool as ut\n'), ((11508, 11538), 'utool.truepath', 'ut.truepath', (['"""~/Dropbox/IBEIS"""'], {}), "('~/Dropbox/IBEIS')\n", (11519, 11538), True, 'import utool as ut\n'), ((11571, 11634), 'utool.unixjoin', 'ut.unixjoin', (['publish_dpath', "(model.arch_tag + '_model_state.pkl')"], {}), "(publish_dpath, model.arch_tag + '_model_state.pkl')\n", (11582, 11634), True, 'import utool as ut\n'), ((11735, 11767), 'utool.view_directory', 'ut.view_directory', (['publish_dpath'], {}), '(publish_dpath)\n', (11752, 11767), True, 'import utool as ut\n'), ((12218, 12228), 'utool.embed', 'ut.embed', ([], {}), '()\n', (12226, 12228), True, 'import utool as ut\n'), ((12452, 12466), 'utool.inIPython', 'ut.inIPython', ([], {}), '()\n', (12464, 12466), True, 'import utool as ut\n'), ((15476, 15513), 'wbia_cnn.ingest_data.grab_siam_dataset', 'ingest_data.grab_siam_dataset', (['ds_tag'], {}), '(ds_tag)\n', (15505, 15513), False, 'from wbia_cnn import ingest_data\n'), ((7658, 7782), 'wbia_cnn.models.SiameseL2', 'models.SiameseL2', ([], {'data_shape': 'dataset.data_shape', 'arch_tag': 'arch_tag', 'training_dpath': 'dataset.training_dpath'}), '(data_shape=dataset.data_shape, arch_tag=arch_tag,\n training_dpath=dataset.training_dpath, **hyperparams)\n', (7674, 7782), False, 'from wbia_cnn import models\n'), ((10639, 10689), 'utool.colorprint', 'ut.colorprint', (['"""[netrun] Test Requested"""', '"""yellow"""'], {}), "('[netrun] Test Requested', 'yellow')\n", (10652, 10689), True, 'import utool as ut\n'), ((11192, 11278), 'wbia_cnn.experiments.test_siamese_performance', 'experiments.test_siamese_performance', (['model', 'data', 'labels', 'flat_metadata', 'dataname'], {}), '(model, data, labels, flat_metadata,\n dataname)\n', (11228, 11278), False, 'from wbia_cnn import experiments\n'), ((7892, 8055), 'wbia_cnn.models.MNISTModel', 'models.MNISTModel', ([], {'data_shape': 'dataset.data_shape', 'output_dims': 'dataset.output_dims', 'arch_tag': 'arch_tag', 'training_dpath': 'dataset.training_dpath'}), '(data_shape=dataset.data_shape, output_dims=dataset.\n output_dims, arch_tag=arch_tag, training_dpath=dataset.training_dpath,\n **hyperparams)\n', (7909, 8055), False, 'from wbia_cnn import models\n'), ((9270, 9315), 'utool.checkpath', 'ut.checkpath', (['model_state_fpath'], {'verbose': '(True)'}), '(model_state_fpath, verbose=True)\n', (9282, 9315), True, 'import utool as ut\n'), ((10734, 10795), 'utool.colorprint', 'ut.colorprint', (['"""[netrun] * Testing on all data"""', '"""lightgray"""'], {}), "('[netrun] * Testing on all data', 'lightgray')\n", (10747, 10795), True, 'import utool as ut\n'), ((10932, 10996), 'utool.colorprint', 'ut.colorprint', (['"""[netrun] * Testing on test subset"""', '"""lightgray"""'], {}), "('[netrun] * Testing on test subset', 'lightgray')\n", (10945, 10996), True, 'import utool as ut\n'), ((11300, 11323), 'utool.get_argflag', 'ut.get_argflag', (['"""--cmd"""'], {}), "('--cmd')\n", (11314, 11323), True, 'import utool as ut\n'), ((6811, 6840), 'utool.invert_dict', 'ut.invert_dict', (['DS_TAG_ALIAS2'], {}), '(DS_TAG_ALIAS2)\n', (6825, 6840), True, 'import utool as ut\n')] |
# coding: utf-8
from __future__ import absolute_import
import unittest
import ks_api_client
from ks_api_client.api.super_multiple_order_api import SuperMultipleOrderApi # noqa: E501
from ks_api_client.rest import ApiException
class TestSuperMultipleOrderApi(unittest.TestCase):
"""SuperMultipleOrderApi unit test stubs"""
def setUp(self):
self.api = ks_api_client.api.super_multiple_order_api.SuperMultipleOrderApi() # noqa: E501
def tearDown(self):
pass
def test_cancel_sm_order(self):
"""Test case for cancel_sm_order
Cancel an Super Multiple order # noqa: E501
"""
pass
def test_modify_sm_order(self):
"""Test case for modify_sm_order
Modify an existing super multiple order # noqa: E501
"""
pass
def test_place_new_sm_order(self):
"""Test case for place_new_sm_order
Place a New Super Multiple order # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"ks_api_client.api.super_multiple_order_api.SuperMultipleOrderApi"
] | [((1016, 1031), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1029, 1031), False, 'import unittest\n'), ((376, 442), 'ks_api_client.api.super_multiple_order_api.SuperMultipleOrderApi', 'ks_api_client.api.super_multiple_order_api.SuperMultipleOrderApi', ([], {}), '()\n', (440, 442), False, 'import ks_api_client\n')] |
import numpy as np
import os
import os.path as path
from keras.applications import vgg16, inception_v3, resnet50, mobilenet
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
import kmri
base_path = path.dirname(path.realpath(__file__))
img_path = path.join(base_path, 'img')
## Load the VGG model
# model = vgg16.VGG16(weights='imagenet')
# normalize_pixels = True
## Load the MobileNet model
# model = mobilenet.MobileNet(weights='imagenet')
# normalize_pixels = True
## Load the ResNet50 model
model = resnet50.ResNet50(weights='imagenet')
normalize_pixels = False
def get_img(file_name):
image = load_img(path.join(img_path, file_name), target_size=(224, 224))
if normalize_pixels:
return img_to_array(image) / 256
else:
return img_to_array(image)
img_input = np.array([get_img(file_name) for file_name in os.listdir(img_path)])
kmri.visualize_model(model, img_input)
| [
"keras.preprocessing.image.img_to_array",
"os.listdir",
"kmri.visualize_model",
"os.path.join",
"os.path.realpath",
"keras.applications.resnet50.ResNet50"
] | [((297, 324), 'os.path.join', 'path.join', (['base_path', '"""img"""'], {}), "(base_path, 'img')\n", (306, 324), True, 'import os.path as path\n'), ((557, 594), 'keras.applications.resnet50.ResNet50', 'resnet50.ResNet50', ([], {'weights': '"""imagenet"""'}), "(weights='imagenet')\n", (574, 594), False, 'from keras.applications import vgg16, inception_v3, resnet50, mobilenet\n'), ((917, 955), 'kmri.visualize_model', 'kmri.visualize_model', (['model', 'img_input'], {}), '(model, img_input)\n', (937, 955), False, 'import kmri\n'), ((261, 284), 'os.path.realpath', 'path.realpath', (['__file__'], {}), '(__file__)\n', (274, 284), True, 'import os.path as path\n'), ((667, 697), 'os.path.join', 'path.join', (['img_path', 'file_name'], {}), '(img_path, file_name)\n', (676, 697), True, 'import os.path as path\n'), ((814, 833), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['image'], {}), '(image)\n', (826, 833), False, 'from keras.preprocessing.image import img_to_array\n'), ((763, 782), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['image'], {}), '(image)\n', (775, 782), False, 'from keras.preprocessing.image import img_to_array\n'), ((893, 913), 'os.listdir', 'os.listdir', (['img_path'], {}), '(img_path)\n', (903, 913), False, 'import os\n')] |
import pytest
from secrethitlergame.phase import Phase
from unittest import mock
from secrethitlergame.voting_phase import VotingPhase
def test_initialization():
vp = VotingPhase()
assert isinstance(vp, Phase)
assert vp.chancelor is None
assert vp.president is None
def test_get_previous_government():
player = mock.Mock()
player.person.return_value = '<NAME>'
vp_old = VotingPhase()
vp_old.chancelor = player
vp_old.president = player
ret = vp_old.get_previous_government()
assert all(x is None for x in ret)
assert len(ret) == 2
vp = VotingPhase(previous_phase=vp_old)
ret = vp.get_previous_government()
assert all(x is not None for x in ret)
assert len(ret) == 2
assert ret[0] == player
def test_add_chancelor():
player = mock.Mock()
player2 = mock.Mock()
vp_old = VotingPhase()
vp_old.add_chancelor(player)
assert vp_old.chancelor is not None
assert vp_old.chancelor == player
vp = VotingPhase(previous_phase=vp_old)
with pytest.raises(ValueError):
vp.add_chancelor(player)
assert vp.chancelor is None
vp.add_chancelor(player2)
assert vp.chancelor is not None
assert vp.chancelor == player2
def test_add_president():
player = mock.Mock()
vp = VotingPhase()
vp.add_president(player)
assert vp.president == player
def test_failed():
vp = VotingPhase()
x = vp.failed()
assert vp.next_phase == x
assert vp != x
| [
"secrethitlergame.voting_phase.VotingPhase",
"unittest.mock.Mock",
"pytest.raises"
] | [((173, 186), 'secrethitlergame.voting_phase.VotingPhase', 'VotingPhase', ([], {}), '()\n', (184, 186), False, 'from secrethitlergame.voting_phase import VotingPhase\n'), ((335, 346), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (344, 346), False, 'from unittest import mock\n'), ((402, 415), 'secrethitlergame.voting_phase.VotingPhase', 'VotingPhase', ([], {}), '()\n', (413, 415), False, 'from secrethitlergame.voting_phase import VotingPhase\n'), ((592, 626), 'secrethitlergame.voting_phase.VotingPhase', 'VotingPhase', ([], {'previous_phase': 'vp_old'}), '(previous_phase=vp_old)\n', (603, 626), False, 'from secrethitlergame.voting_phase import VotingPhase\n'), ((803, 814), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (812, 814), False, 'from unittest import mock\n'), ((829, 840), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (838, 840), False, 'from unittest import mock\n'), ((855, 868), 'secrethitlergame.voting_phase.VotingPhase', 'VotingPhase', ([], {}), '()\n', (866, 868), False, 'from secrethitlergame.voting_phase import VotingPhase\n'), ((990, 1024), 'secrethitlergame.voting_phase.VotingPhase', 'VotingPhase', ([], {'previous_phase': 'vp_old'}), '(previous_phase=vp_old)\n', (1001, 1024), False, 'from secrethitlergame.voting_phase import VotingPhase\n'), ((1269, 1280), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (1278, 1280), False, 'from unittest import mock\n'), ((1291, 1304), 'secrethitlergame.voting_phase.VotingPhase', 'VotingPhase', ([], {}), '()\n', (1302, 1304), False, 'from secrethitlergame.voting_phase import VotingPhase\n'), ((1398, 1411), 'secrethitlergame.voting_phase.VotingPhase', 'VotingPhase', ([], {}), '()\n', (1409, 1411), False, 'from secrethitlergame.voting_phase import VotingPhase\n'), ((1034, 1059), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1047, 1059), False, 'import pytest\n')] |
import brownie
def test_set_minter_admin_only(accounts, token):
with brownie.reverts("dev: admin only"):
token.set_minter(accounts[2], {"from": accounts[1]})
def test_set_admin_admin_only(accounts, token):
with brownie.reverts("dev: admin only"):
token.set_admin(accounts[2], {"from": accounts[1]})
def test_set_name_admin_only(accounts, token):
with brownie.reverts("Only admin is allowed to change name"):
token.set_name("Foo Token", "FOO", {"from": accounts[1]})
def test_set_minter(accounts, token):
token.set_minter(accounts[1], {"from": accounts[0]})
assert token.minter() == accounts[1]
def test_set_admin(accounts, token):
token.set_admin(accounts[1], {"from": accounts[0]})
assert token.admin() == accounts[1]
def test_set_name(accounts, token):
token.set_name("Foo Token", "FOO", {"from": accounts[0]})
assert token.name() == "Foo Token"
assert token.symbol() == "FOO"
| [
"brownie.reverts"
] | [((75, 109), 'brownie.reverts', 'brownie.reverts', (['"""dev: admin only"""'], {}), "('dev: admin only')\n", (90, 109), False, 'import brownie\n'), ((231, 265), 'brownie.reverts', 'brownie.reverts', (['"""dev: admin only"""'], {}), "('dev: admin only')\n", (246, 265), False, 'import brownie\n'), ((385, 440), 'brownie.reverts', 'brownie.reverts', (['"""Only admin is allowed to change name"""'], {}), "('Only admin is allowed to change name')\n", (400, 440), False, 'import brownie\n')] |
import numpy as np
import json
from collections import Counter
import matplotlib.pyplot as plt
DATASET_DIR = './dataset/tacred/train_mod.json'
with open(DATASET_DIR) as f:
examples = json.load(f)
def plot_counts(data):
counts = Counter(data)
del counts["no_relation"]
labels, values = zip(*counts.items())
indexes = np.arange(len(labels))
width = 1
idx = list(reversed(np.argsort(values)))
indexes_sorted = indexes[idx]
values_sorted = np.array(values)[idx]
labels_sorted = np.array(labels)[idx]
print(values_sorted)
plt.bar(range(len(indexes_sorted)), values_sorted, width)
plt.xticks(indexes_sorted + width * 0.5, labels_sorted, rotation='vertical')
plt.ylabel("Number of examples")
plt.tight_layout()
plt.show()
# relation distribution
print('NUM EXAMPLES', len(examples))
relations = [e['relation'] for e in examples]
print("NUM_UNIQUE_RELATIONS", len(Counter(relations)))
plot_counts(relations)
def plot_counts_sent(data):
plt.hist(sents, range=(0, 100), bins=100)
plt.ylabel("Number of examples")
plt.xlabel("Sentence Length")
plt.show()
# sentence length distribution
sents = [len(e['token']) for e in examples]
plot_counts_sent(sents)
| [
"matplotlib.pyplot.hist",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"collections.Counter",
"numpy.array",
"numpy.argsort",
"matplotlib.pyplot.tight_layout",
"json.load",
"matplotlib.pyplot.show"
] | [((190, 202), 'json.load', 'json.load', (['f'], {}), '(f)\n', (199, 202), False, 'import json\n'), ((240, 253), 'collections.Counter', 'Counter', (['data'], {}), '(data)\n', (247, 253), False, 'from collections import Counter\n'), ((634, 710), 'matplotlib.pyplot.xticks', 'plt.xticks', (['(indexes_sorted + width * 0.5)', 'labels_sorted'], {'rotation': '"""vertical"""'}), "(indexes_sorted + width * 0.5, labels_sorted, rotation='vertical')\n", (644, 710), True, 'import matplotlib.pyplot as plt\n'), ((715, 747), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of examples"""'], {}), "('Number of examples')\n", (725, 747), True, 'import matplotlib.pyplot as plt\n'), ((752, 770), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (768, 770), True, 'import matplotlib.pyplot as plt\n'), ((775, 785), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (783, 785), True, 'import matplotlib.pyplot as plt\n'), ((1005, 1046), 'matplotlib.pyplot.hist', 'plt.hist', (['sents'], {'range': '(0, 100)', 'bins': '(100)'}), '(sents, range=(0, 100), bins=100)\n', (1013, 1046), True, 'import matplotlib.pyplot as plt\n'), ((1051, 1083), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of examples"""'], {}), "('Number of examples')\n", (1061, 1083), True, 'import matplotlib.pyplot as plt\n'), ((1088, 1117), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Sentence Length"""'], {}), "('Sentence Length')\n", (1098, 1117), True, 'import matplotlib.pyplot as plt\n'), ((1122, 1132), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1130, 1132), True, 'import matplotlib.pyplot as plt\n'), ((478, 494), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (486, 494), True, 'import numpy as np\n'), ((520, 536), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (528, 536), True, 'import numpy as np\n'), ((928, 946), 'collections.Counter', 'Counter', (['relations'], {}), '(relations)\n', (935, 946), False, 'from collections import Counter\n'), ((403, 421), 'numpy.argsort', 'np.argsort', (['values'], {}), '(values)\n', (413, 421), True, 'import numpy as np\n')] |
import sys
from fractions import Fraction as frac
from math import gcd, floor
from isqrt import isqrt
sys.setrecursionlimit(10**4)
# text=int(open("4.3_ciphertext.hex").read())
e=int(open("4.4_public_key.hex").read(),0)
n=int((open("4.5_modulo.hex").read()),0)
p = 0
q = 0
# print(text,"\n",e,"\n",n)
def validate(x):
k = x.numerator
d = x.denominator
totient = frac(e * d - 1, k)
if (n - totient + 1) ** 2 - 4 * n < 0:
return False, None, None,None
D = isqrt(((n - totient + 1) ** 2 - 4 * n).numerator)
if D * D != (n - totient + 1) ** 2 - 4 * n:
return False, None, None,None
x = ((n - totient + 1) + (D)) / (2)
y = ((n - totient + 1) - (D)) / (2)
v = False
if x == floor(x):
v = True
return v, x, y,d
def extendedEuclid(l, s):
if s == 0:
return (1, 0, l)
x, y, d = extendedEuclid(s, l % s)
return (y, x - floor(l / s) * y, d)
def value(x):
sum = x[len(x) - 1]
for i in range(len(x) - 2, -1, -1):
sum = frac(1, sum) + x[i]
return sum
def cont(r):
i = floor(r)
f = r - frac(i, 1)
if f == frac(0, 1):
return [i]
return ([i] + cont(frac(1, f)))
def bigmod(x, y, p):
if y == 1:
return x % p
if y % 2 == 1:
return ((x % p) * bigmod((x*x)%p, y//2, p) )% p
return (bigmod((x*x)%p, y//2, p)) % p
x=cont(frac(e,n))
for i in range(len(x)):
c = (value(x[:i + 1]))
if c != 0 and c.denominator % 2 != 0:
v, p, q, d = validate(c)
if v:
break
totient = (p - 1) * (q - 1)
d2, y, z = extendedEuclid(e, totient)
# print(d==d2)
# m = bigmod(text, d, n)
print("Private Key:",d,d==d2,p*q==n)
# print("Message:",m)
| [
"sys.setrecursionlimit",
"fractions.Fraction",
"isqrt.isqrt",
"math.floor"
] | [((106, 136), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(10 ** 4)'], {}), '(10 ** 4)\n', (127, 136), False, 'import sys\n'), ((392, 410), 'fractions.Fraction', 'frac', (['(e * d - 1)', 'k'], {}), '(e * d - 1, k)\n', (396, 410), True, 'from fractions import Fraction as frac\n'), ((503, 552), 'isqrt.isqrt', 'isqrt', (['((n - totient + 1) ** 2 - 4 * n).numerator'], {}), '(((n - totient + 1) ** 2 - 4 * n).numerator)\n', (508, 552), False, 'from isqrt import isqrt\n'), ((1118, 1126), 'math.floor', 'floor', (['r'], {}), '(r)\n', (1123, 1126), False, 'from math import gcd, floor\n'), ((1429, 1439), 'fractions.Fraction', 'frac', (['e', 'n'], {}), '(e, n)\n', (1433, 1439), True, 'from fractions import Fraction as frac\n'), ((751, 759), 'math.floor', 'floor', (['x'], {}), '(x)\n', (756, 759), False, 'from math import gcd, floor\n'), ((1140, 1150), 'fractions.Fraction', 'frac', (['i', '(1)'], {}), '(i, 1)\n', (1144, 1150), True, 'from fractions import Fraction as frac\n'), ((1164, 1174), 'fractions.Fraction', 'frac', (['(0)', '(1)'], {}), '(0, 1)\n', (1168, 1174), True, 'from fractions import Fraction as frac\n'), ((1055, 1067), 'fractions.Fraction', 'frac', (['(1)', 'sum'], {}), '(1, sum)\n', (1059, 1067), True, 'from fractions import Fraction as frac\n'), ((1220, 1230), 'fractions.Fraction', 'frac', (['(1)', 'f'], {}), '(1, f)\n', (1224, 1230), True, 'from fractions import Fraction as frac\n'), ((934, 946), 'math.floor', 'floor', (['(l / s)'], {}), '(l / s)\n', (939, 946), False, 'from math import gcd, floor\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 19 06:10:55 2018
@author: <NAME>
Demo of gradient boosting tree
A very nice reference for gradient boosting
http://homes.cs.washington.edu/~tqchen/pdf/BoostedTree.pdf
LightGBM
https://github.com/Microsoft/LightGBM/tree/master/examples/python-guide
Catboost
https://github.com/catboost/tutorials
Comparative study of different gradient boosting tree
https://towardsdatascience.com/catboost-vs-light-gbm-vs-xgboost-5f93620723db
"""
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import numpy as np
import lightgbm as lgb
import catboost as cb
df_wine = pd.read_csv('../Data/winequality-red.csv', sep=';')
df_shape = df_wine.shape
X, y = df_wine.iloc[:, 0:df_shape[1]-1], df_wine.iloc[:, df_shape[1]-1]
y = y - np.min(y)
X = X.values #covert to numpy array
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state=0)
gbt = GradientBoostingClassifier( n_estimators=100, learning_rate=0.1, random_state=1)
gbt.fit(X_train, y_train)
print( "score: {}".format( gbt.score(X_test, y_test) ) )
# create dataset for lightgbm
lgb_train = lgb.Dataset(X_train, y_train)
lgb_test = lgb.Dataset(X_test, y_test, reference=lgb_train)
# specify your configurations as a dict
params = {
'num_leaves': 6,
'metric': ('l1', 'l2'),
'verbose': 0
}
print('Starting training...')
# train
evals_result = {}
gbm = lgb.train(params,
lgb_train,
num_boost_round=100,
valid_sets=[lgb_train, lgb_test],
feature_name=['f' + str(i + 1) for i in range(X_train.shape[-1])],
categorical_feature=[11],
evals_result=evals_result,
verbose_eval=10)
print('Plotting feature importances...')
ax = lgb.plot_importance(gbm, max_num_features=5)
plt.show()
# predict
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration)
# eval
print('The rmse of prediction is:{}'.format( mean_squared_error(y_test, y_pred) ** 0.5) )
| [
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.mean_squared_error",
"lightgbm.Dataset",
"numpy.min",
"sklearn.ensemble.GradientBoostingClassifier",
"lightgbm.plot_importance",
"matplotlib.pyplot.show"
] | [((768, 819), 'pandas.read_csv', 'pd.read_csv', (['"""../Data/winequality-red.csv"""'], {'sep': '""";"""'}), "('../Data/winequality-red.csv', sep=';')\n", (779, 819), True, 'import pandas as pd\n'), ((1008, 1061), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(0)'}), '(X, y, test_size=0.2, random_state=0)\n', (1024, 1061), False, 'from sklearn.model_selection import train_test_split\n'), ((1072, 1151), 'sklearn.ensemble.GradientBoostingClassifier', 'GradientBoostingClassifier', ([], {'n_estimators': '(100)', 'learning_rate': '(0.1)', 'random_state': '(1)'}), '(n_estimators=100, learning_rate=0.1, random_state=1)\n', (1098, 1151), False, 'from sklearn.ensemble import GradientBoostingClassifier\n'), ((1279, 1308), 'lightgbm.Dataset', 'lgb.Dataset', (['X_train', 'y_train'], {}), '(X_train, y_train)\n', (1290, 1308), True, 'import lightgbm as lgb\n'), ((1320, 1368), 'lightgbm.Dataset', 'lgb.Dataset', (['X_test', 'y_test'], {'reference': 'lgb_train'}), '(X_test, y_test, reference=lgb_train)\n', (1331, 1368), True, 'import lightgbm as lgb\n'), ((1933, 1977), 'lightgbm.plot_importance', 'lgb.plot_importance', (['gbm'], {'max_num_features': '(5)'}), '(gbm, max_num_features=5)\n', (1952, 1977), True, 'import lightgbm as lgb\n'), ((1978, 1988), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1986, 1988), True, 'import matplotlib.pyplot as plt\n'), ((926, 935), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (932, 935), True, 'import numpy as np\n'), ((2115, 2149), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (2133, 2149), False, 'from sklearn.metrics import mean_squared_error\n')] |
import numpy
from scipy.ndimage import gaussian_filter
from skimage.data import binary_blobs
from skimage.util import random_noise
from aydin.it.transforms.fixedpattern import FixedPatternTransform
def add_patterned_noise(image, n):
image = image.copy()
image *= 1 + 0.1 * (numpy.random.rand(n, n) - 0.5)
image += 0.1 * numpy.random.rand(n, n)
# image += 0.1*numpy.random.rand(n)[]
image = random_noise(image, mode="gaussian", var=0.00001, seed=0)
image = random_noise(image, mode="s&p", amount=0.000001, seed=0)
return image
def test_fixed_pattern_real():
n = 128
image = binary_blobs(length=n, seed=1, n_dim=3, volume_fraction=0.01).astype(
numpy.float32
)
image = gaussian_filter(image, sigma=4)
noisy = add_patterned_noise(image, n).astype(numpy.float32)
bs = FixedPatternTransform(sigma=0)
preprocessed = bs.preprocess(noisy)
postprocessed = bs.postprocess(preprocessed)
# import napari
# with napari.gui_qt():
# viewer = napari.Viewer()
# viewer.add_image(image, name='image')
# viewer.add_image(noisy, name='noisy')
# viewer.add_image(preprocessed, name='preprocessed')
# viewer.add_image(postprocessed, name='postprocessed')
assert image.shape == postprocessed.shape
assert image.dtype == postprocessed.dtype
assert numpy.abs(preprocessed - image).mean() < 0.007
assert preprocessed.dtype == postprocessed.dtype
assert numpy.abs(postprocessed - noisy).mean() < 1e-8
# import napari
# with napari.gui_qt():
# viewer = napari.Viewer()
# viewer.add_image(image, name='image')
# viewer.add_image(noisy, name='noisy')
# viewer.add_image(corrected, name='corrected')
| [
"numpy.abs",
"aydin.it.transforms.fixedpattern.FixedPatternTransform",
"numpy.random.rand",
"skimage.data.binary_blobs",
"skimage.util.random_noise",
"scipy.ndimage.gaussian_filter"
] | [((413, 468), 'skimage.util.random_noise', 'random_noise', (['image'], {'mode': '"""gaussian"""', 'var': '(1e-05)', 'seed': '(0)'}), "(image, mode='gaussian', var=1e-05, seed=0)\n", (425, 468), False, 'from skimage.util import random_noise\n'), ((483, 536), 'skimage.util.random_noise', 'random_noise', (['image'], {'mode': '"""s&p"""', 'amount': '(1e-06)', 'seed': '(0)'}), "(image, mode='s&p', amount=1e-06, seed=0)\n", (495, 536), False, 'from skimage.util import random_noise\n'), ((724, 755), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['image'], {'sigma': '(4)'}), '(image, sigma=4)\n', (739, 755), False, 'from scipy.ndimage import gaussian_filter\n'), ((830, 860), 'aydin.it.transforms.fixedpattern.FixedPatternTransform', 'FixedPatternTransform', ([], {'sigma': '(0)'}), '(sigma=0)\n', (851, 860), False, 'from aydin.it.transforms.fixedpattern import FixedPatternTransform\n'), ((335, 358), 'numpy.random.rand', 'numpy.random.rand', (['n', 'n'], {}), '(n, n)\n', (352, 358), False, 'import numpy\n'), ((614, 675), 'skimage.data.binary_blobs', 'binary_blobs', ([], {'length': 'n', 'seed': '(1)', 'n_dim': '(3)', 'volume_fraction': '(0.01)'}), '(length=n, seed=1, n_dim=3, volume_fraction=0.01)\n', (626, 675), False, 'from skimage.data import binary_blobs\n'), ((285, 308), 'numpy.random.rand', 'numpy.random.rand', (['n', 'n'], {}), '(n, n)\n', (302, 308), False, 'import numpy\n'), ((1361, 1392), 'numpy.abs', 'numpy.abs', (['(preprocessed - image)'], {}), '(preprocessed - image)\n', (1370, 1392), False, 'import numpy\n'), ((1473, 1505), 'numpy.abs', 'numpy.abs', (['(postprocessed - noisy)'], {}), '(postprocessed - noisy)\n', (1482, 1505), False, 'import numpy\n')] |
from os import environ
from pathlib import Path
from appdirs import user_cache_dir
from ._version import version as __version__ # noqa: F401
from .bridge import Transform # noqa: F401
from .core import combine # noqa: F401
from .geodesic import BBox, line, panel, wedge # noqa: F401
from .geometry import get_coastlines # noqa: F401
from .geoplotter import GeoBackgroundPlotter, GeoMultiPlotter, GeoPlotter # noqa: F401
from .log import get_logger
# Configure the top-level logger.
logger = get_logger(__name__)
# https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html
_cache_dir = Path(environ.get("XDG_CACHE_HOME", user_cache_dir())) / __package__
#: GeoVista configuration dictionary.
config = dict(cache_dir=_cache_dir)
try:
from .siteconfig import update_config as _update_config
_update_config(config)
del _update_config
except ImportError:
pass
try:
from geovista_config import update_config as _update_config
_update_config(config)
del _update_config
except ImportError:
pass
del _cache_dir
| [
"appdirs.user_cache_dir",
"geovista_config.update_config"
] | [((828, 850), 'geovista_config.update_config', '_update_config', (['config'], {}), '(config)\n', (842, 850), True, 'from geovista_config import update_config as _update_config\n'), ((978, 1000), 'geovista_config.update_config', '_update_config', (['config'], {}), '(config)\n', (992, 1000), True, 'from geovista_config import update_config as _update_config\n'), ((649, 665), 'appdirs.user_cache_dir', 'user_cache_dir', ([], {}), '()\n', (663, 665), False, 'from appdirs import user_cache_dir\n')] |
from morpion import Morpion
import argparse
if __name__=='__main__':
parser = argparse.ArgumentParser(description="Play Tic-Tac-Toe\n")
parser.add_argument('-mode', '--mode', help='play against Human', default=False)
args = parser.parse_args()
mode = False
if args.mode in ["h", "human", "humain", "manuel"]:
mode = True
morpion_game = Morpion(human=mode)
morpion_game.start_game() | [
"morpion.Morpion",
"argparse.ArgumentParser"
] | [((84, 141), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Play Tic-Tac-Toe\n"""'}), "(description='Play Tic-Tac-Toe\\n')\n", (107, 141), False, 'import argparse\n'), ((372, 391), 'morpion.Morpion', 'Morpion', ([], {'human': 'mode'}), '(human=mode)\n', (379, 391), False, 'from morpion import Morpion\n')] |
"""
Mount /sys/fs/cgroup Option
"""
from typing import Callable
import click
def cgroup_mount_option(command: Callable[..., None]) -> Callable[..., None]:
"""
Option for choosing to mount `/sys/fs/cgroup` into the container.
"""
function = click.option(
'--mount-sys-fs-cgroup/--no-mount-sys-fs-cgroup',
default=True,
show_default=True,
help=(
'Mounting ``/sys/fs/cgroup`` from the host is required to run '
'applications which require ``cgroup`` isolation. '
'Choose to not mount ``/sys/fs/cgroup`` if it is not available on '
'the host.'
),
)(command) # type: Callable[..., None]
return function
| [
"click.option"
] | [((260, 562), 'click.option', 'click.option', (['"""--mount-sys-fs-cgroup/--no-mount-sys-fs-cgroup"""'], {'default': '(True)', 'show_default': '(True)', 'help': '"""Mounting ``/sys/fs/cgroup`` from the host is required to run applications which require ``cgroup`` isolation. Choose to not mount ``/sys/fs/cgroup`` if it is not available on the host."""'}), "('--mount-sys-fs-cgroup/--no-mount-sys-fs-cgroup', default=True,\n show_default=True, help=\n 'Mounting ``/sys/fs/cgroup`` from the host is required to run applications which require ``cgroup`` isolation. Choose to not mount ``/sys/fs/cgroup`` if it is not available on the host.'\n )\n", (272, 562), False, 'import click\n')] |
"""functions that generate reports and figures using the .xml output from the performance tests"""
__all__ = ['TestSuite', 'parse_testsuite_xml']
class TestSuite:
def __init__(self, name, platform, tests):
self.name = name
self.platform = platform
self.tests = tests
def __repr__(self):
import pprint
return 'TestSuite' + pprint.pformat( (self.name, self.platform, self.tests) )
class Test:
def __init__(self, name, variables, results):
self.name = name
self.variables = variables
self.results = results
def __repr__(self):
return 'Test' + repr( (self.name, self.variables, self.results) )
def scalar_element(element):
value = element.get('value')
try:
return int(value)
except:
try:
return float(value)
except:
return value
def parse_testsuite_platform(et):
testsuite_platform = {}
platform_element = et.find('platform')
device_element = platform_element.find('device')
device = {}
device['name'] = device_element.get('name')
for property_element in device_element.findall('property'):
device[property_element.get('name')] = scalar_element(property_element)
testsuite_platform['device'] = device
return testsuite_platform
def parse_testsuite_tests(et):
testsuite_tests = {}
for test_element in et.findall('test'):
# test name
test_name = test_element.get('name')
# test variables: name -> value
test_variables = {}
for variable_element in test_element.findall('variable'):
test_variables[variable_element.get('name')] = scalar_element(variable_element)
# test results: name -> (value, units)
test_results = {}
for result_element in test_element.findall('result'):
# TODO make this a thing that can be converted to its first element when treated like a number
test_results[result_element.get('name')] = scalar_element(result_element)
testsuite_tests[test_name] = Test(test_name, test_variables, test_results)
return testsuite_tests
def parse_testsuite_xml(filename):
import xml.etree.ElementTree as ET
et = ET.parse(filename)
testsuite_name = et.getroot().get('name')
testsuite_platform = parse_testsuite_platform(et)
testsuite_tests = parse_testsuite_tests(et)
return TestSuite(testsuite_name, testsuite_platform, testsuite_tests)
| [
"xml.etree.ElementTree.parse",
"pprint.pformat"
] | [((2252, 2270), 'xml.etree.ElementTree.parse', 'ET.parse', (['filename'], {}), '(filename)\n', (2260, 2270), True, 'import xml.etree.ElementTree as ET\n'), ((373, 427), 'pprint.pformat', 'pprint.pformat', (['(self.name, self.platform, self.tests)'], {}), '((self.name, self.platform, self.tests))\n', (387, 427), False, 'import pprint\n')] |
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras layer mirroring tf.contrib.layers.bias_add."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
class BiasLayer(tf.keras.layers.Layer):
"""Keras layer that only adds a bias to the input.
`BiasLayer` implements the operation:
`output = input + bias`
Arguments:
bias_initializer: Initializer for the bias vector.
Input shape:
nD tensor with shape: `(batch_size, ..., input_dim)`. The most common
situation would be a 2D input with shape `(batch_size, input_dim)`. Note
a rank of at least 2 is required.
Output shape:
nD tensor with shape: `(batch_size, ..., input_dim)`. For instance, for a
2D input with shape `(batch_size, input_dim)`, the output would have
shape `(batch_size, input_dim)`.
"""
def __init__(self, bias_initializer='zeros', **kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(BiasLayer, self).__init__(**kwargs)
self.bias_initializer = tf.keras.initializers.get(bias_initializer)
self.supports_masking = True
def build(self, input_shape):
input_shape = tf.TensorShape(input_shape)
if input_shape.rank == 1:
shape = (1,)
else:
shape = (tf.compat.dimension_value(input_shape[-1]),)
self.bias = self.add_weight(
'bias',
shape=shape,
initializer=self.bias_initializer,
dtype=self.dtype,
trainable=True)
self.built = True
def call(self, inputs):
if inputs.shape.rank == 1:
expanded_inputs = tf.expand_dims(inputs, -1)
with_bias = tf.nn.bias_add(expanded_inputs, self.bias)
return with_bias[..., 0]
return tf.nn.bias_add(inputs, self.bias)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'bias_initializer':
tf.keras.initializers.serialize(self.bias_initializer),
}
base_config = super(BiasLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| [
"tensorflow.keras.initializers.serialize",
"tensorflow.TensorShape",
"tensorflow.keras.initializers.get",
"tensorflow.expand_dims",
"tensorflow.nn.bias_add",
"tensorflow.compat.dimension_value"
] | [((1768, 1811), 'tensorflow.keras.initializers.get', 'tf.keras.initializers.get', (['bias_initializer'], {}), '(bias_initializer)\n', (1793, 1811), True, 'import tensorflow as tf\n'), ((1897, 1924), 'tensorflow.TensorShape', 'tf.TensorShape', (['input_shape'], {}), '(input_shape)\n', (1911, 1924), True, 'import tensorflow as tf\n'), ((2442, 2475), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['inputs', 'self.bias'], {}), '(inputs, self.bias)\n', (2456, 2475), True, 'import tensorflow as tf\n'), ((2312, 2338), 'tensorflow.expand_dims', 'tf.expand_dims', (['inputs', '(-1)'], {}), '(inputs, -1)\n', (2326, 2338), True, 'import tensorflow as tf\n'), ((2357, 2399), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['expanded_inputs', 'self.bias'], {}), '(expanded_inputs, self.bias)\n', (2371, 2399), True, 'import tensorflow as tf\n'), ((2627, 2681), 'tensorflow.keras.initializers.serialize', 'tf.keras.initializers.serialize', (['self.bias_initializer'], {}), '(self.bias_initializer)\n', (2658, 2681), True, 'import tensorflow as tf\n'), ((1999, 2041), 'tensorflow.compat.dimension_value', 'tf.compat.dimension_value', (['input_shape[-1]'], {}), '(input_shape[-1])\n', (2024, 2041), True, 'import tensorflow as tf\n')] |
# __Date__ : 1/5/2020.
# __Author__ : CodePerfectPlus
# __Package__ : Python 3
# __GitHub__ : https://www.github.com/codeperfectplus
#
from Algorithms import LinearRegression
X = [12, 24, 36]
y = [25, 49, 73]
lr = LinearRegression()
lr.fit(X, y)
y_predict = lr.predict(12)
print(y_predict)
| [
"Algorithms.LinearRegression"
] | [((231, 249), 'Algorithms.LinearRegression', 'LinearRegression', ([], {}), '()\n', (247, 249), False, 'from Algorithms import LinearRegression\n')] |
from django.contrib import admin
from di_scoring import models
# copypastad with love from :
# `http://stackoverflow.com/questions/10543032/how-to-show-all-fields-of-model-in-admin-page`
# subclassed modeladmins' list_displays will contain all model fields except
# for id
class CustomModelAdminMixin(object):
def __init__(self, model, admin_site):
self.list_display = [field.name for field in model._meta.fields
if field.name != "id"]
super(CustomModelAdminMixin, self).__init__(model, admin_site)
@admin.register(
models.Manager,
models.School,
models.TeamChallenge,
models.Location,
models.Team,
models.TC_Field,
models.TC_Appraiser,
models.TC_Event,
models.TC_Score,
models.IC_Appraiser,
models.IC_Event,
models.IC_Score,
models.TC_Appraiser_Permission,
)
class DefaultModelAdmin(CustomModelAdminMixin, admin.ModelAdmin):
pass
| [
"django.contrib.admin.register"
] | [((552, 823), 'django.contrib.admin.register', 'admin.register', (['models.Manager', 'models.School', 'models.TeamChallenge', 'models.Location', 'models.Team', 'models.TC_Field', 'models.TC_Appraiser', 'models.TC_Event', 'models.TC_Score', 'models.IC_Appraiser', 'models.IC_Event', 'models.IC_Score', 'models.TC_Appraiser_Permission'], {}), '(models.Manager, models.School, models.TeamChallenge, models.\n Location, models.Team, models.TC_Field, models.TC_Appraiser, models.\n TC_Event, models.TC_Score, models.IC_Appraiser, models.IC_Event, models\n .IC_Score, models.TC_Appraiser_Permission)\n', (566, 823), False, 'from django.contrib import admin\n')] |
# Originally auto-generated on 2021-02-15-12:14:36 -0500 EST
# By '--verbose --verbose x7.lib.shell_tools'
from unittest import TestCase
from x7.lib.annotations import tests
from x7.testing.support import Capture
from x7.lib import shell_tools
from x7.lib.shell_tools_load import ShellTool
@tests(shell_tools)
class TestModShellTools(TestCase):
"""Tests for stand-alone functions in x7.lib.shell_tools module"""
@tests(shell_tools.Dir)
def test_dir(self):
self.assertIn('__init__', dir(self))
self.assertNotIn('__init__', shell_tools.Dir(self))
self.assertIn('test_dir', shell_tools.Dir(self))
@tests(shell_tools.help)
def test_help(self):
with Capture() as orig:
help(shell_tools.Dir)
with Capture() as modified:
shell_tools.help(shell_tools.Dir)
self.assertEqual(orig.stdout(), modified.stdout())
self.assertIn('Like dir(v), but only non __ names', orig.stdout())
st_dir = ShellTool('Dir', shell_tools.Dir)
with Capture() as as_shell_tool:
shell_tools.help(st_dir)
self.assertEqual(orig.stdout(), as_shell_tool.stdout())
self.assertNotIn('__init__', as_shell_tool.stdout())
with Capture() as orig_as_shell_tool:
help(st_dir)
self.assertIn('__init__', orig_as_shell_tool.stdout())
@tests(shell_tools.help)
def test_help_on_help(self):
with Capture() as orig:
help(help)
with Capture() as modified:
shell_tools.help(ShellTool('help', shell_tools.help))
self.assertEqual(orig.stdout(), modified.stdout())
@tests(shell_tools.tools)
def test_tools(self):
with Capture() as out:
shell_tools.tools()
self.assertIn('Help for tools', out.stdout())
self.assertGreaterEqual(out.stdout().count('\n'), 5)
| [
"x7.lib.shell_tools.tools",
"x7.lib.shell_tools.help",
"x7.lib.shell_tools.Dir",
"x7.testing.support.Capture",
"x7.lib.annotations.tests",
"x7.lib.shell_tools_load.ShellTool"
] | [((294, 312), 'x7.lib.annotations.tests', 'tests', (['shell_tools'], {}), '(shell_tools)\n', (299, 312), False, 'from x7.lib.annotations import tests\n'), ((425, 447), 'x7.lib.annotations.tests', 'tests', (['shell_tools.Dir'], {}), '(shell_tools.Dir)\n', (430, 447), False, 'from x7.lib.annotations import tests\n'), ((640, 663), 'x7.lib.annotations.tests', 'tests', (['shell_tools.help'], {}), '(shell_tools.help)\n', (645, 663), False, 'from x7.lib.annotations import tests\n'), ((1365, 1388), 'x7.lib.annotations.tests', 'tests', (['shell_tools.help'], {}), '(shell_tools.help)\n', (1370, 1388), False, 'from x7.lib.annotations import tests\n'), ((1644, 1668), 'x7.lib.annotations.tests', 'tests', (['shell_tools.tools'], {}), '(shell_tools.tools)\n', (1649, 1668), False, 'from x7.lib.annotations import tests\n'), ((988, 1021), 'x7.lib.shell_tools_load.ShellTool', 'ShellTool', (['"""Dir"""', 'shell_tools.Dir'], {}), "('Dir', shell_tools.Dir)\n", (997, 1021), False, 'from x7.lib.shell_tools_load import ShellTool\n'), ((554, 575), 'x7.lib.shell_tools.Dir', 'shell_tools.Dir', (['self'], {}), '(self)\n', (569, 575), False, 'from x7.lib import shell_tools\n'), ((611, 632), 'x7.lib.shell_tools.Dir', 'shell_tools.Dir', (['self'], {}), '(self)\n', (626, 632), False, 'from x7.lib import shell_tools\n'), ((702, 711), 'x7.testing.support.Capture', 'Capture', ([], {}), '()\n', (709, 711), False, 'from x7.testing.support import Capture\n'), ((768, 777), 'x7.testing.support.Capture', 'Capture', ([], {}), '()\n', (775, 777), False, 'from x7.testing.support import Capture\n'), ((803, 836), 'x7.lib.shell_tools.help', 'shell_tools.help', (['shell_tools.Dir'], {}), '(shell_tools.Dir)\n', (819, 836), False, 'from x7.lib import shell_tools\n'), ((1035, 1044), 'x7.testing.support.Capture', 'Capture', ([], {}), '()\n', (1042, 1044), False, 'from x7.testing.support import Capture\n'), ((1075, 1099), 'x7.lib.shell_tools.help', 'shell_tools.help', (['st_dir'], {}), '(st_dir)\n', (1091, 1099), False, 'from x7.lib import shell_tools\n'), ((1238, 1247), 'x7.testing.support.Capture', 'Capture', ([], {}), '()\n', (1245, 1247), False, 'from x7.testing.support import Capture\n'), ((1435, 1444), 'x7.testing.support.Capture', 'Capture', ([], {}), '()\n', (1442, 1444), False, 'from x7.testing.support import Capture\n'), ((1490, 1499), 'x7.testing.support.Capture', 'Capture', ([], {}), '()\n', (1497, 1499), False, 'from x7.testing.support import Capture\n'), ((1708, 1717), 'x7.testing.support.Capture', 'Capture', ([], {}), '()\n', (1715, 1717), False, 'from x7.testing.support import Capture\n'), ((1738, 1757), 'x7.lib.shell_tools.tools', 'shell_tools.tools', ([], {}), '()\n', (1755, 1757), False, 'from x7.lib import shell_tools\n'), ((1542, 1577), 'x7.lib.shell_tools_load.ShellTool', 'ShellTool', (['"""help"""', 'shell_tools.help'], {}), "('help', shell_tools.help)\n", (1551, 1577), False, 'from x7.lib.shell_tools_load import ShellTool\n')] |